]> git.karo-electronics.de Git - mv-sheeva.git/commitdiff
Merge git://git.infradead.org/mtd-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 May 2008 18:15:28 +0000 (11:15 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 May 2008 18:15:28 +0000 (11:15 -0700)
* git://git.infradead.org/mtd-2.6:
  [MTD][NOR] Add physical address to point() method
  [JFFS2] Track parent inode for directories (for NFS export)
  [JFFS2] Invert last argument of jffs2_gc_fetch_inode(), make it boolean.
  [JFFS2] Quiet lockdep false positive.
  [JFFS2] Clean up jffs2_alloc_inode() and jffs2_i_init_once()
  [MTD] Delete long-unused jedec.h header file.
  [MTD] [NAND] at91_nand: use at91_nand_{en,dis}able consistently.

2837 files changed:
.gitignore
.mailmap
Documentation/ABI/testing/sysfs-class-bdi [new file with mode: 0644]
Documentation/DMA-API.txt
Documentation/DMA-attributes.txt [new file with mode: 0644]
Documentation/DMA-mapping.txt
Documentation/DocBook/Makefile
Documentation/DocBook/debugobjects.tmpl [new file with mode: 0644]
Documentation/DocBook/kernel-api.tmpl
Documentation/DocBook/rapidio.tmpl
Documentation/braille-console.txt [new file with mode: 0644]
Documentation/cgroups.txt
Documentation/controllers/devices.txt [new file with mode: 0644]
Documentation/controllers/resource_counter.txt [new file with mode: 0644]
Documentation/cpu-freq/user-guide.txt
Documentation/cpusets.txt
Documentation/dontdiff
Documentation/fb/gxfb.txt [new file with mode: 0644]
Documentation/fb/intelfb.txt
Documentation/fb/lxfb.txt [new file with mode: 0644]
Documentation/fb/metronomefb.txt
Documentation/fb/modedb.txt
Documentation/feature-removal-schedule.txt
Documentation/filesystems/Locking
Documentation/filesystems/proc.txt
Documentation/filesystems/tmpfs.txt
Documentation/filesystems/vfat.txt
Documentation/gpio.txt
Documentation/hwmon/w83l785ts
Documentation/i2c/writing-clients
Documentation/i386/boot.txt
Documentation/ia64/kvm.txt [new file with mode: 0644]
Documentation/ide/ide-tape.txt
Documentation/ide/ide.txt
Documentation/ioctl-number.txt
Documentation/kbuild/kconfig-language.txt
Documentation/kdump/kdump.txt
Documentation/kernel-parameters.txt
Documentation/keys-request-key.txt
Documentation/keys.txt
Documentation/kprobes.txt
Documentation/laptops/thinkpad-acpi.txt
Documentation/md.txt
Documentation/mips/AU1xxx_IDE.README
Documentation/oops-tracing.txt
Documentation/powerpc/booting-without-of.txt
Documentation/powerpc/kvm_440.txt [new file with mode: 0644]
Documentation/powerpc/mpc52xx-device-tree-bindings.txt
Documentation/s390/kvm.txt [new file with mode: 0644]
Documentation/sound/alsa/ALSA-Configuration.txt
Documentation/spi/spidev
Documentation/spi/spidev_fdx.c [new file with mode: 0644]
Documentation/sysrq.txt
Documentation/thermal/sysfs-api.txt
Documentation/video4linux/CARDLIST.saa7134
Documentation/video4linux/cx18.txt [new file with mode: 0644]
Documentation/vm/numa_memory_policy.txt
Documentation/vm/slabinfo.c
Kbuild
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/kernel/asm-offsets.c
arch/alpha/kernel/core_marvel.c
arch/alpha/kernel/core_t2.c
arch/alpha/kernel/core_titan.c
arch/alpha/kernel/core_tsunami.c
arch/alpha/kernel/module.c
arch/alpha/kernel/pci.c
arch/alpha/kernel/pci_iommu.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/srm_env.c
arch/alpha/kernel/sys_alcor.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/sys_sable.c
arch/alpha/kernel/sys_sio.c
arch/alpha/kernel/traps.c
arch/arm/Kconfig
arch/arm/configs/am200epdkit_defconfig
arch/arm/kernel/asm-offsets.c
arch/arm/kernel/atags.c
arch/arm/kernel/ecard.c
arch/arm/kernel/kprobes-decode.c
arch/arm/kernel/kprobes.c
arch/arm/mach-at91/at91cap9_devices.c
arch/arm/mach-at91/at91sam9261_devices.c
arch/arm/mach-at91/at91sam9263_devices.c
arch/arm/mach-at91/at91sam9rl_devices.c
arch/arm/mach-at91/board-csb337.c
arch/arm/mach-at91/board-dk.c
arch/arm/mach-at91/board-eb9200.c
arch/arm/mach-at91/pm.c
arch/arm/mach-davinci/clock.c
arch/arm/mach-iop32x/em7210.c
arch/arm/mach-iop32x/glantank.c
arch/arm/mach-iop32x/n2100.c
arch/arm/mach-ixp4xx/dsmg600-setup.c
arch/arm/mach-ixp4xx/ixp4xx_npe.c
arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
arch/arm/mach-ixp4xx/nas100d-setup.c
arch/arm/mach-ixp4xx/nslu2-setup.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/board-h3.c
arch/arm/mach-omap1/board-osk.c
arch/arm/mach-orion5x/addr-map.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-orion5x/common.h
arch/arm/mach-orion5x/db88f5281-setup.c
arch/arm/mach-orion5x/dns323-setup.c
arch/arm/mach-orion5x/kurobox_pro-setup.c
arch/arm/mach-orion5x/pci.c
arch/arm/mach-orion5x/rd88f5182-setup.c
arch/arm/mach-orion5x/ts209-setup.c
arch/arm/mach-pxa/Makefile
arch/arm/mach-pxa/generic.c
arch/arm/mach-pxa/gumstix.c
arch/arm/mach-pxa/littleton.c
arch/arm/mach-pxa/lubbock.c
arch/arm/mach-pxa/magician.c
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-pxa/pcm990-baseboard.c
arch/arm/mach-pxa/pm.c
arch/arm/mach-pxa/pxa3xx.c
arch/arm/mach-pxa/zylonite.c
arch/arm/mm/Kconfig
arch/arm/mm/Makefile
arch/arm/mm/copypage-feroceon.S [new file with mode: 0644]
arch/arm/mm/iomap.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-feroceon.S
arch/arm/oprofile/op_model_mpcore.c
arch/avr32/kernel/asm-offsets.c
arch/avr32/kernel/setup.c
arch/avr32/mm/tlb.c
arch/blackfin/kernel/asm-offsets.c
arch/blackfin/kernel/signal.c
arch/blackfin/mach-bf533/boards/stamp.c
arch/blackfin/mach-bf537/boards/stamp.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/cris/kernel/profile.c
arch/cris/mm/init.c
arch/frv/kernel/asm-offsets.c
arch/frv/kernel/pm.c
arch/frv/kernel/signal.c
arch/frv/kernel/traps.c
arch/frv/mb93090-mb00/pci-iomap.c
arch/frv/mm/Makefile
arch/frv/mm/unaligned.c [deleted file]
arch/h8300/kernel/asm-offsets.c
arch/ia64/Kconfig
arch/ia64/Makefile
arch/ia64/hp/common/hwsw_iommu.c
arch/ia64/hp/common/sba_iommu.c
arch/ia64/hp/sim/simserial.c
arch/ia64/kernel/asm-offsets.c
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/process.c
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/smpboot.c
arch/ia64/kernel/topology.c
arch/ia64/kernel/uncached.c
arch/ia64/kvm/Kconfig [new file with mode: 0644]
arch/ia64/kvm/Makefile [new file with mode: 0644]
arch/ia64/kvm/asm-offsets.c [new file with mode: 0644]
arch/ia64/kvm/kvm-ia64.c [new file with mode: 0644]
arch/ia64/kvm/kvm_fw.c [new file with mode: 0644]
arch/ia64/kvm/kvm_minstate.h [new file with mode: 0644]
arch/ia64/kvm/lapic.h [new file with mode: 0644]
arch/ia64/kvm/misc.h [new file with mode: 0644]
arch/ia64/kvm/mmio.c [new file with mode: 0644]
arch/ia64/kvm/optvfault.S [new file with mode: 0644]
arch/ia64/kvm/process.c [new file with mode: 0644]
arch/ia64/kvm/trampoline.S [new file with mode: 0644]
arch/ia64/kvm/vcpu.c [new file with mode: 0644]
arch/ia64/kvm/vcpu.h [new file with mode: 0644]
arch/ia64/kvm/vmm.c [new file with mode: 0644]
arch/ia64/kvm/vmm_ivt.S [new file with mode: 0644]
arch/ia64/kvm/vti.h [new file with mode: 0644]
arch/ia64/kvm/vtlb.c [new file with mode: 0644]
arch/ia64/mm/init.c
arch/ia64/mm/tlb.c
arch/ia64/sn/kernel/sn2/sn2_smp.c
arch/ia64/sn/kernel/sn2/sn_proc_fs.c
arch/ia64/sn/pci/pci_dma.c
arch/m68k/kernel/asm-offsets.c
arch/m68k/kernel/ints.c
arch/m68k/mac/iop.c
arch/m68k/mac/oss.c
arch/m68k/mm/init.c
arch/m68k/q40/q40ints.c
arch/m68knommu/kernel/asm-offsets.c
arch/m68knommu/kernel/entry.S
arch/m68knommu/kernel/setup.c
arch/m68knommu/kernel/signal.c
arch/m68knommu/kernel/traps.c
arch/m68knommu/kernel/vmlinux.lds.S
arch/m68knommu/platform/5206e/config.c
arch/m68knommu/platform/5272/config.c
arch/m68knommu/platform/528x/config.c
arch/m68knommu/platform/5307/config.c
arch/m68knommu/platform/coldfire/entry.S
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/au1000/common/cputable.c
arch/mips/au1000/common/dbdma.c
arch/mips/au1000/common/dbg_io.c
arch/mips/au1000/common/dma.c
arch/mips/au1000/common/gpio.c
arch/mips/au1000/common/irq.c
arch/mips/au1000/common/pci.c
arch/mips/au1000/common/platform.c
arch/mips/au1000/common/power.c
arch/mips/au1000/common/prom.c
arch/mips/au1000/common/puts.c
arch/mips/au1000/common/reset.c
arch/mips/au1000/common/setup.c
arch/mips/au1000/common/sleeper.S
arch/mips/au1000/common/time.c
arch/mips/au1000/db1x00/board_setup.c
arch/mips/au1000/db1x00/init.c
arch/mips/au1000/db1x00/irqmap.c
arch/mips/au1000/mtx-1/board_setup.c
arch/mips/au1000/mtx-1/init.c
arch/mips/au1000/mtx-1/irqmap.c
arch/mips/au1000/mtx-1/platform.c
arch/mips/au1000/pb1000/board_setup.c
arch/mips/au1000/pb1000/init.c
arch/mips/au1000/pb1000/irqmap.c
arch/mips/au1000/pb1100/board_setup.c
arch/mips/au1000/pb1100/init.c
arch/mips/au1000/pb1100/irqmap.c
arch/mips/au1000/pb1200/Makefile
arch/mips/au1000/pb1200/board_setup.c
arch/mips/au1000/pb1200/init.c
arch/mips/au1000/pb1200/irqmap.c
arch/mips/au1000/pb1200/platform.c [new file with mode: 0644]
arch/mips/au1000/pb1500/board_setup.c
arch/mips/au1000/pb1500/init.c
arch/mips/au1000/pb1500/irqmap.c
arch/mips/au1000/pb1550/board_setup.c
arch/mips/au1000/pb1550/init.c
arch/mips/au1000/pb1550/irqmap.c
arch/mips/au1000/xxs1500/board_setup.c
arch/mips/au1000/xxs1500/init.c
arch/mips/au1000/xxs1500/irqmap.c
arch/mips/basler/excite/excite_procfs.c
arch/mips/configs/mipssim_defconfig
arch/mips/configs/pnx8550-jbs_defconfig
arch/mips/configs/pnx8550-stb810_defconfig
arch/mips/dec/time.c
arch/mips/jmr3927/rbhma3100/setup.c
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/binfmt_elfn32.c
arch/mips/kernel/binfmt_elfo32.c
arch/mips/kernel/cevt-ds1287.c [new file with mode: 0644]
arch/mips/kernel/cevt-gt641xx.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/csrc-ioasic.c [new file with mode: 0644]
arch/mips/kernel/gpio_txx9.c [new file with mode: 0644]
arch/mips/kernel/irq-gic.c [new file with mode: 0644]
arch/mips/kernel/irq-msc01.c
arch/mips/kernel/signal-common.h
arch/mips/kernel/smp-cmp.c [new file with mode: 0644]
arch/mips/kernel/smp-mt.c
arch/mips/kernel/smp.c
arch/mips/kernel/smtc.c
arch/mips/kernel/spram.c [new file with mode: 0644]
arch/mips/kernel/sync-r4k.c [new file with mode: 0644]
arch/mips/kernel/time.c
arch/mips/kernel/traps.c
arch/mips/lib/iomap-pci.c
arch/mips/math-emu/ieee754dp.h
arch/mips/math-emu/ieee754sp.h
arch/mips/mips-boards/generic/Makefile
arch/mips/mips-boards/generic/amon.c [new file with mode: 0644]
arch/mips/mips-boards/generic/init.c
arch/mips/mips-boards/generic/memory.c
arch/mips/mips-boards/generic/time.c
arch/mips/mips-boards/malta/Makefile
arch/mips/mips-boards/malta/malta_int.c
arch/mips/mips-boards/malta/malta_setup.c
arch/mips/mipssim/sim_setup.c
arch/mips/mm/Makefile
arch/mips/mm/c-r4k.c
arch/mips/mm/cache.c
arch/mips/mm/init.c
arch/mips/mm/page.c [new file with mode: 0644]
arch/mips/mm/pg-r4k.c [deleted file]
arch/mips/mm/pg-sb1.c [deleted file]
arch/mips/mm/pgtable.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/uasm.c
arch/mips/mm/uasm.h
arch/mips/nxp/pnx8550/common/Makefile [moved from arch/mips/philips/pnx8550/common/Makefile with 100% similarity]
arch/mips/nxp/pnx8550/common/gdb_hook.c [moved from arch/mips/philips/pnx8550/common/gdb_hook.c with 100% similarity]
arch/mips/nxp/pnx8550/common/int.c [moved from arch/mips/philips/pnx8550/common/int.c with 100% similarity]
arch/mips/nxp/pnx8550/common/pci.c [moved from arch/mips/philips/pnx8550/common/pci.c with 100% similarity]
arch/mips/nxp/pnx8550/common/platform.c [moved from arch/mips/philips/pnx8550/common/platform.c with 98% similarity]
arch/mips/nxp/pnx8550/common/proc.c [moved from arch/mips/philips/pnx8550/common/proc.c with 100% similarity]
arch/mips/nxp/pnx8550/common/prom.c [moved from arch/mips/philips/pnx8550/common/prom.c with 100% similarity]
arch/mips/nxp/pnx8550/common/reset.c [moved from arch/mips/philips/pnx8550/common/reset.c with 100% similarity]
arch/mips/nxp/pnx8550/common/setup.c [moved from arch/mips/philips/pnx8550/common/setup.c with 100% similarity]
arch/mips/nxp/pnx8550/common/time.c [moved from arch/mips/philips/pnx8550/common/time.c with 100% similarity]
arch/mips/nxp/pnx8550/jbs/Makefile [moved from arch/mips/philips/pnx8550/jbs/Makefile with 51% similarity]
arch/mips/nxp/pnx8550/jbs/board_setup.c [moved from arch/mips/philips/pnx8550/jbs/board_setup.c with 100% similarity]
arch/mips/nxp/pnx8550/jbs/init.c [moved from arch/mips/philips/pnx8550/jbs/init.c with 98% similarity]
arch/mips/nxp/pnx8550/jbs/irqmap.c [moved from arch/mips/philips/pnx8550/jbs/irqmap.c with 98% similarity]
arch/mips/nxp/pnx8550/stb810/Makefile [moved from arch/mips/philips/pnx8550/stb810/Makefile with 52% similarity]
arch/mips/nxp/pnx8550/stb810/board_setup.c [moved from arch/mips/philips/pnx8550/stb810/board_setup.c with 95% similarity]
arch/mips/nxp/pnx8550/stb810/irqmap.c [moved from arch/mips/philips/pnx8550/stb810/irqmap.c with 94% similarity]
arch/mips/nxp/pnx8550/stb810/prom_init.c [moved from arch/mips/philips/pnx8550/stb810/prom_init.c with 96% similarity]
arch/mips/oprofile/common.c
arch/mips/oprofile/op_impl.h
arch/mips/oprofile/op_model_mipsxx.c
arch/mips/pci/fixup-au1000.c
arch/mips/pci/ops-pnx8550.c
arch/mips/pmc-sierra/yosemite/setup.c
arch/mips/sgi-ip32/ip32-reset.c
arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_irq.c
arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
arch/mips/tx4938/common/dbgio.c
arch/mips/tx4938/common/prom.c
arch/mips/tx4938/toshiba_rbtx4938/irq.c
arch/mips/tx4938/toshiba_rbtx4938/setup.c
arch/mips/vr41xx/common/init.c
arch/mips/vr41xx/common/siu.c
arch/mn10300/kernel/asm-offsets.c
arch/mn10300/unit-asb2305/pci-iomap.c
arch/parisc/kernel/asm-offsets.c
arch/parisc/kernel/pci-dma.c
arch/parisc/lib/iomap.c
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/Makefile
arch/powerpc/boot/dts/cm5200.dts
arch/powerpc/boot/dts/lite5200.dts
arch/powerpc/boot/dts/lite5200b.dts
arch/powerpc/boot/dts/motionpro.dts
arch/powerpc/boot/dts/mpc8610_hpcd.dts
arch/powerpc/boot/dts/mpc8641_hpcn.dts
arch/powerpc/boot/dts/pcm030.dts [new file with mode: 0644]
arch/powerpc/boot/dts/tqm5200.dts
arch/powerpc/configs/52xx/cm5200_defconfig [new file with mode: 0644]
arch/powerpc/configs/52xx/lite5200b_defconfig [new file with mode: 0644]
arch/powerpc/configs/52xx/motionpro_defconfig [new file with mode: 0644]
arch/powerpc/configs/52xx/pcm030_defconfig [new file with mode: 0644]
arch/powerpc/configs/52xx/tqm5200_defconfig [new file with mode: 0644]
arch/powerpc/configs/g5_defconfig
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/lparcfg.c
arch/powerpc/kernel/machine_kexec.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/proc_ppc64.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/rio.c [deleted file]
arch/powerpc/kernel/rtas-proc.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/time.c
arch/powerpc/kvm/44x_tlb.c [new file with mode: 0644]
arch/powerpc/kvm/44x_tlb.h [new file with mode: 0644]
arch/powerpc/kvm/Kconfig [new file with mode: 0644]
arch/powerpc/kvm/Makefile [new file with mode: 0644]
arch/powerpc/kvm/booke_guest.c [new file with mode: 0644]
arch/powerpc/kvm/booke_host.c [new file with mode: 0644]
arch/powerpc/kvm/booke_interrupts.S [new file with mode: 0644]
arch/powerpc/kvm/emulate.c [new file with mode: 0644]
arch/powerpc/kvm/powerpc.c [new file with mode: 0644]
arch/powerpc/mm/mem.c
arch/powerpc/platforms/52xx/Kconfig
arch/powerpc/platforms/52xx/Makefile
arch/powerpc/platforms/52xx/mpc5200_simple.c
arch/powerpc/platforms/52xx/mpc52xx_gpio.c [new file with mode: 0644]
arch/powerpc/platforms/52xx/mpc52xx_pic.c
arch/powerpc/platforms/86xx/Kconfig
arch/powerpc/platforms/86xx/mpc8610_hpcd.c
arch/powerpc/platforms/86xx/mpc86xx_hpcn.c
arch/powerpc/platforms/cell/celleb_scc_pciex.c
arch/powerpc/platforms/cell/spufs/.gitignore [new file with mode: 0644]
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/run.c
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/spufs.h
arch/powerpc/platforms/cell/spufs/sputrace.c
arch/powerpc/platforms/iseries/lpevents.c
arch/powerpc/platforms/iseries/mf.c
arch/powerpc/platforms/iseries/proc.c
arch/powerpc/platforms/iseries/viopath.c
arch/powerpc/platforms/powermac/Makefile
arch/powerpc/platforms/powermac/setup.c
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/hotplug-memory.c [new file with mode: 0644]
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/platforms/pseries/rtasd.c
arch/powerpc/sysdev/axonram.c
arch/powerpc/sysdev/fsl_rio.c
arch/powerpc/sysdev/fsl_rio.h [deleted file]
arch/powerpc/sysdev/fsl_soc.c
arch/powerpc/sysdev/fsl_soc.h
arch/ppc/kernel/asm-offsets.c
arch/ppc/kernel/pci.c
arch/ppc/kernel/ppc_ksyms.c
arch/ppc/platforms/sbc82xx.c
arch/s390/Kconfig
arch/s390/Makefile
arch/s390/defconfig
arch/s390/kernel/Makefile
arch/s390/kernel/asm-offsets.c
arch/s390/kernel/early.c
arch/s390/kernel/entry.S
arch/s390/kernel/entry64.S
arch/s390/kernel/head31.S
arch/s390/kernel/head64.S
arch/s390/kernel/irq.c
arch/s390/kernel/process.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/topology.c
arch/s390/kernel/traps.c
arch/s390/kernel/vtime.c
arch/s390/kvm/Kconfig [new file with mode: 0644]
arch/s390/kvm/Makefile [new file with mode: 0644]
arch/s390/kvm/diag.c [new file with mode: 0644]
arch/s390/kvm/gaccess.h [new file with mode: 0644]
arch/s390/kvm/intercept.c [new file with mode: 0644]
arch/s390/kvm/interrupt.c [new file with mode: 0644]
arch/s390/kvm/kvm-s390.c [new file with mode: 0644]
arch/s390/kvm/kvm-s390.h [new file with mode: 0644]
arch/s390/kvm/priv.c [new file with mode: 0644]
arch/s390/kvm/sie64a.S [new file with mode: 0644]
arch/s390/kvm/sigp.c [new file with mode: 0644]
arch/s390/lib/Makefile
arch/s390/lib/uaccess_mvcos.c
arch/s390/math-emu/Makefile
arch/s390/mm/Makefile
arch/s390/mm/extmem.c
arch/s390/mm/fault.c
arch/s390/mm/hugetlbpage.c [new file with mode: 0644]
arch/s390/mm/init.c
arch/s390/mm/pgtable.c
arch/s390/mm/vmem.c
arch/sh/boards/renesas/migor/setup.c
arch/sh/boards/renesas/r7780rp/setup.c
arch/sh/drivers/pci/pci.c
arch/sh/kernel/asm-offsets.c
arch/sh/kernel/irq.c
arch/sh/mm/init.c
arch/sparc/Kconfig
arch/sparc/defconfig
arch/sparc/kernel/Makefile
arch/sparc/kernel/asm-offsets.c
arch/sparc/kernel/entry.S
arch/sparc/kernel/head.S
arch/sparc/kernel/kgdb.c [new file with mode: 0644]
arch/sparc/kernel/process.c
arch/sparc/kernel/signal.c
arch/sparc/kernel/sparc-stub.c [deleted file]
arch/sparc/kernel/sparc_ksyms.c
arch/sparc/kernel/sun4d_smp.c
arch/sparc/kernel/sun4m_smp.c
arch/sparc/kernel/sys_sparc.c
arch/sparc/lib/iomap.c
arch/sparc64/Kconfig
arch/sparc64/defconfig
arch/sparc64/kernel/Makefile
arch/sparc64/kernel/audit.c
arch/sparc64/kernel/cherrs.S [new file with mode: 0644]
arch/sparc64/kernel/entry.S [deleted file]
arch/sparc64/kernel/fpu_traps.S [new file with mode: 0644]
arch/sparc64/kernel/getsetcc.S [new file with mode: 0644]
arch/sparc64/kernel/head.S
arch/sparc64/kernel/helpers.S [new file with mode: 0644]
arch/sparc64/kernel/hvcalls.S [new file with mode: 0644]
arch/sparc64/kernel/irq.c
arch/sparc64/kernel/isa.c [deleted file]
arch/sparc64/kernel/ivec.S [new file with mode: 0644]
arch/sparc64/kernel/kgdb.c [new file with mode: 0644]
arch/sparc64/kernel/misctrap.S [new file with mode: 0644]
arch/sparc64/kernel/of_device.c
arch/sparc64/kernel/pci.c
arch/sparc64/kernel/process.c
arch/sparc64/kernel/signal.c
arch/sparc64/kernel/signal32.c
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/spiterrs.S [new file with mode: 0644]
arch/sparc64/kernel/sys_sparc32.c
arch/sparc64/kernel/syscalls.S [new file with mode: 0644]
arch/sparc64/kernel/ttable.S
arch/sparc64/kernel/utrap.S [new file with mode: 0644]
arch/sparc64/lib/iomap.c
arch/sparc64/mm/init.c
arch/sparc64/mm/ultra.S
arch/um/Kconfig.x86_64
arch/um/drivers/chan_kern.c
arch/um/drivers/line.c
arch/um/drivers/mcast_kern.c
arch/um/drivers/mconsole_user.c
arch/um/drivers/net_kern.c
arch/um/drivers/port_user.c
arch/um/drivers/slip_kern.c
arch/um/drivers/stdio_console.c
arch/um/drivers/ubd_kern.c
arch/um/include/chan_kern.h
arch/um/kernel/exitcode.c
arch/um/kernel/process.c
arch/um/kernel/time.c
arch/um/kernel/um_arch.c
arch/um/os-Linux/helper.c
arch/um/os-Linux/start_up.c
arch/um/os-Linux/sys-i386/task_size.c
arch/um/sys-i386/Makefile
arch/um/sys-x86_64/Makefile
arch/v850/kernel/asm-offsets.c
arch/v850/kernel/rte_mb_a_pci.c
arch/x86/Kconfig
arch/x86/Kconfig.cpu
arch/x86/Kconfig.debug
arch/x86/boot/edd.c
arch/x86/boot/header.S
arch/x86/ia32/ia32_signal.c
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/if.c
arch/x86/kernel/crash.c
arch/x86/kernel/e820_64.c
arch/x86/kernel/genapic_64.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/hpet.c
arch/x86/kernel/io_apic_32.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/kdebugfs.c
arch/x86/kernel/kvm.c [new file with mode: 0644]
arch/x86/kernel/kvmclock.c [new file with mode: 0644]
arch/x86/kernel/mfgpt_32.c
arch/x86/kernel/mmconf-fam10h_64.c [new file with mode: 0644]
arch/x86/kernel/olpc.c [new file with mode: 0644]
arch/x86/kernel/pci-dma.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/setup_32.c
arch/x86/kernel/setup_64.c
arch/x86/kernel/signal_32.c
arch/x86/kernel/signal_64.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/time_32.c
arch/x86/kernel/vmlinux_64.lds.S
arch/x86/kernel/vsmp_64.c
arch/x86/kvm/Kconfig
arch/x86/kvm/Makefile
arch/x86/kvm/i8254.c [new file with mode: 0644]
arch/x86/kvm/i8254.h [new file with mode: 0644]
arch/x86/kvm/irq.c
arch/x86/kvm/irq.h
arch/x86/kvm/kvm_svm.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/segment_descriptor.h [deleted file]
arch/x86/kvm/svm.c
arch/x86/kvm/svm.h
arch/x86/kvm/tss.h [new file with mode: 0644]
arch/x86/kvm/vmx.c
arch/x86/kvm/vmx.h
arch/x86/kvm/x86.c
arch/x86/kvm/x86_emulate.c
arch/x86/lib/Makefile
arch/x86/lib/bitops_32.c [deleted file]
arch/x86/lib/bitops_64.c [deleted file]
arch/x86/mach-voyager/voyager_cat.c
arch/x86/mm/highmem_32.c
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/ioremap.c
arch/x86/mm/k8topology_64.c
arch/x86/mm/numa_64.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/pci/Makefile_32
arch/x86/pci/Makefile_64
arch/x86/pci/acpi.c
arch/x86/pci/common.c
arch/x86/pci/direct.c
arch/x86/pci/fixup.c
arch/x86/pci/i386.c
arch/x86/pci/init.c
arch/x86/pci/irq.c
arch/x86/pci/k8-bus_64.c
arch/x86/pci/legacy.c
arch/x86/pci/mmconfig-shared.c
arch/x86/pci/mmconfig_32.c
arch/x86/pci/mmconfig_64.c
arch/x86/pci/mp_bus_to_node.c [new file with mode: 0644]
arch/x86/pci/olpc.c [new file with mode: 0644]
arch/x86/pci/pci.h
arch/x86/vdso/vdso.S
arch/x86/vdso/vdso32-setup.c
arch/x86/xen/mmu.c
arch/xtensa/kernel/asm-offsets.c
block/blk-barrier.c
block/blk-core.c
block/blk-map.c
block/blk-merge.c
block/blk-settings.c
block/blk-sysfs.c
block/blk-tag.c
block/blk.h
block/bsg.c
block/elevator.c
block/genhd.c
block/scsi_ioctl.c
crypto/authenc.c
crypto/cryptd.c
crypto/eseqiv.c
drivers/Kconfig
drivers/Makefile
drivers/accessibility/Kconfig [new file with mode: 0644]
drivers/accessibility/Makefile [new file with mode: 0644]
drivers/accessibility/braille/Makefile [new file with mode: 0644]
drivers/accessibility/braille/braille_console.c [new file with mode: 0644]
drivers/acpi/Kconfig
drivers/acpi/ac.c
drivers/acpi/battery.c
drivers/acpi/bay.c
drivers/acpi/bus.c
drivers/acpi/button.c
drivers/acpi/dispatcher/dsfield.c
drivers/acpi/dispatcher/dsinit.c
drivers/acpi/dispatcher/dsmethod.c
drivers/acpi/dispatcher/dsmthdat.c
drivers/acpi/dispatcher/dsobject.c
drivers/acpi/dispatcher/dsopcode.c
drivers/acpi/dispatcher/dsutils.c
drivers/acpi/dispatcher/dswexec.c
drivers/acpi/dispatcher/dswload.c
drivers/acpi/dispatcher/dswscope.c
drivers/acpi/dispatcher/dswstate.c
drivers/acpi/ec.c
drivers/acpi/event.c
drivers/acpi/events/evevent.c
drivers/acpi/events/evgpe.c
drivers/acpi/events/evgpeblk.c
drivers/acpi/events/evmisc.c
drivers/acpi/events/evregion.c
drivers/acpi/events/evrgnini.c
drivers/acpi/events/evsci.c
drivers/acpi/events/evxface.c
drivers/acpi/events/evxfevnt.c
drivers/acpi/events/evxfregn.c
drivers/acpi/executer/exconfig.c
drivers/acpi/executer/exconvrt.c
drivers/acpi/executer/excreate.c
drivers/acpi/executer/exdump.c
drivers/acpi/executer/exfield.c
drivers/acpi/executer/exfldio.c
drivers/acpi/executer/exmisc.c
drivers/acpi/executer/exmutex.c
drivers/acpi/executer/exnames.c
drivers/acpi/executer/exoparg1.c
drivers/acpi/executer/exoparg2.c
drivers/acpi/executer/exoparg3.c
drivers/acpi/executer/exoparg6.c
drivers/acpi/executer/exprep.c
drivers/acpi/executer/exregion.c
drivers/acpi/executer/exresnte.c
drivers/acpi/executer/exresolv.c
drivers/acpi/executer/exresop.c
drivers/acpi/executer/exstore.c
drivers/acpi/executer/exstoren.c
drivers/acpi/executer/exstorob.c
drivers/acpi/executer/exsystem.c
drivers/acpi/executer/exutils.c
drivers/acpi/fan.c
drivers/acpi/glue.c
drivers/acpi/hardware/hwacpi.c
drivers/acpi/hardware/hwgpe.c
drivers/acpi/hardware/hwregs.c
drivers/acpi/hardware/hwsleep.c
drivers/acpi/hardware/hwtimer.c
drivers/acpi/namespace/nsaccess.c
drivers/acpi/namespace/nsalloc.c
drivers/acpi/namespace/nsdump.c
drivers/acpi/namespace/nsdumpdv.c
drivers/acpi/namespace/nseval.c
drivers/acpi/namespace/nsinit.c
drivers/acpi/namespace/nsload.c
drivers/acpi/namespace/nsnames.c
drivers/acpi/namespace/nsobject.c
drivers/acpi/namespace/nsparse.c
drivers/acpi/namespace/nssearch.c
drivers/acpi/namespace/nsutils.c
drivers/acpi/namespace/nswalk.c
drivers/acpi/namespace/nsxfeval.c
drivers/acpi/namespace/nsxfname.c
drivers/acpi/namespace/nsxfobj.c
drivers/acpi/osl.c
drivers/acpi/parser/psargs.c
drivers/acpi/parser/psloop.c
drivers/acpi/parser/psopcode.c
drivers/acpi/parser/psparse.c
drivers/acpi/parser/psscope.c
drivers/acpi/parser/pstree.c
drivers/acpi/parser/psutils.c
drivers/acpi/parser/pswalk.c
drivers/acpi/parser/psxface.c
drivers/acpi/power.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/processor_perflib.c
drivers/acpi/processor_thermal.c
drivers/acpi/processor_throttling.c
drivers/acpi/resources/rsaddr.c
drivers/acpi/resources/rscalc.c
drivers/acpi/resources/rscreate.c
drivers/acpi/resources/rsdump.c
drivers/acpi/resources/rsinfo.c
drivers/acpi/resources/rsio.c
drivers/acpi/resources/rsirq.c
drivers/acpi/resources/rslist.c
drivers/acpi/resources/rsmemory.c
drivers/acpi/resources/rsmisc.c
drivers/acpi/resources/rsutils.c
drivers/acpi/resources/rsxface.c
drivers/acpi/sbs.c
drivers/acpi/scan.c
drivers/acpi/sleep/main.c
drivers/acpi/sleep/proc.c
drivers/acpi/system.c
drivers/acpi/tables/tbfadt.c
drivers/acpi/tables/tbfind.c
drivers/acpi/tables/tbinstal.c
drivers/acpi/tables/tbutils.c
drivers/acpi/tables/tbxface.c
drivers/acpi/tables/tbxfroot.c
drivers/acpi/thermal.c
drivers/acpi/utilities/utalloc.c
drivers/acpi/utilities/utcache.c
drivers/acpi/utilities/utcopy.c
drivers/acpi/utilities/utdebug.c
drivers/acpi/utilities/utdelete.c
drivers/acpi/utilities/uteval.c
drivers/acpi/utilities/utglobal.c
drivers/acpi/utilities/utinit.c
drivers/acpi/utilities/utmath.c
drivers/acpi/utilities/utmisc.c
drivers/acpi/utilities/utmutex.c
drivers/acpi/utilities/utobject.c
drivers/acpi/utilities/utresrc.c
drivers/acpi/utilities/utstate.c
drivers/acpi/utilities/utxface.c
drivers/acpi/utils.c
drivers/acpi/video.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/libata.h
drivers/ata/pata_atiixp.c
drivers/ata/pata_bf54x.c
drivers/ata/pata_rb532_cf.c [moved from drivers/ata/pata_rb500_cf.c with 72% similarity]
drivers/ata/pata_via.c
drivers/ata/sata_fsl.c
drivers/ata/sata_mv.c
drivers/atm/ambassador.c
drivers/atm/ambassador.h
drivers/atm/nicstar.c
drivers/base/base.h
drivers/base/class.c
drivers/base/core.c
drivers/base/cpu.c
drivers/base/driver.c
drivers/base/firmware_class.c
drivers/base/node.c
drivers/block/aoe/aoe.h
drivers/block/aoe/aoecmd.c
drivers/block/aoe/aoedev.c
drivers/block/aoe/aoenet.c
drivers/block/brd.c
drivers/block/cciss.c
drivers/block/cpqarray.c
drivers/block/floppy.c
drivers/block/loop.c
drivers/block/nbd.c
drivers/block/paride/pd.c
drivers/block/pktcdvd.c
drivers/block/ps3disk.c
drivers/block/ub.c
drivers/block/xen-blkfront.c
drivers/bluetooth/hci_ldisc.c
drivers/cdrom/cdrom.c
drivers/cdrom/viocd.c
drivers/char/Kconfig
drivers/char/agp/agp.h
drivers/char/amiserial.c
drivers/char/apm-emulation.c
drivers/char/applicom.c
drivers/char/consolemap.c
drivers/char/cs5535_gpio.c
drivers/char/cyclades.c
drivers/char/drm/drmP.h
drivers/char/drm/drm_sysfs.c
drivers/char/drm/i830_dma.c
drivers/char/drm/i830_drv.h
drivers/char/drm/i830_irq.c
drivers/char/drm/i915_dma.c
drivers/char/drm/i915_drv.h
drivers/char/drm/r128_cce.c
drivers/char/drm/radeon_cp.c
drivers/char/ds1286.c
drivers/char/epca.c
drivers/char/esp.c
drivers/char/generic_serial.c
drivers/char/hpet.c
drivers/char/hvsi.c
drivers/char/i8k.c
drivers/char/ip2/i2ellis.c
drivers/char/ip2/i2ellis.h
drivers/char/ip2/i2hw.h
drivers/char/ip2/i2lib.c
drivers/char/ip2/i2os.h [deleted file]
drivers/char/ip2/ip2main.c
drivers/char/ipmi/Makefile
drivers/char/ipmi/ipmi_bt_sm.c
drivers/char/ipmi/ipmi_kcs_sm.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_poweroff.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_si_sm.h
drivers/char/ipmi/ipmi_smic_sm.c
drivers/char/ipmi/ipmi_watchdog.c
drivers/char/isicom.c
drivers/char/istallion.c
drivers/char/keyboard.c
drivers/char/mem.c
drivers/char/misc.c
drivers/char/mmtimer.c
drivers/char/moxa.c
drivers/char/moxa.h [new file with mode: 0644]
drivers/char/mspec.c
drivers/char/mxser.c
drivers/char/mxser.h
drivers/char/n_hdlc.c
drivers/char/n_r3964.c
drivers/char/n_tty.c
drivers/char/nozomi.c
drivers/char/pcmcia/cm4000_cs.c
drivers/char/pcmcia/cm4040_cs.c
drivers/char/pcmcia/ipwireless/hardware.c
drivers/char/pcmcia/ipwireless/hardware.h
drivers/char/pcmcia/ipwireless/network.c
drivers/char/pcmcia/ipwireless/network.h
drivers/char/pcmcia/synclink_cs.c
drivers/char/pty.c
drivers/char/random.c
drivers/char/rio/cirrus.h
drivers/char/rio/rio_linux.c
drivers/char/rio/rio_linux.h
drivers/char/rio/riocmd.c
drivers/char/rio/rioctrl.c
drivers/char/rio/riointr.c
drivers/char/rio/rioparam.c
drivers/char/rio/rioroute.c
drivers/char/rio/riotty.c
drivers/char/riscom8.c
drivers/char/rocket.c
drivers/char/rocket_int.h
drivers/char/rtc.c
drivers/char/serial167.c
drivers/char/snsc.c
drivers/char/snsc_event.c
drivers/char/sonypi.c
drivers/char/specialix.c
drivers/char/stallion.c
drivers/char/sx.c
drivers/char/synclink.c
drivers/char/synclink_gt.c
drivers/char/synclinkmp.c
drivers/char/sysrq.c
drivers/char/toshiba.c
drivers/char/tpm/Kconfig
drivers/char/tpm/tpm_nsc.c
drivers/char/tty_audit.c
drivers/char/tty_io.c
drivers/char/tty_ioctl.c
drivers/char/viocons.c
drivers/char/viotape.c
drivers/char/vt.c
drivers/char/vt_ioctl.c
drivers/cpufreq/Kconfig
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_powersave.c
drivers/cpufreq/cpufreq_stats.c
drivers/edac/Kconfig
drivers/edac/amd76x_edac.c
drivers/edac/e752x_edac.c
drivers/edac/e7xxx_edac.c
drivers/edac/edac_device.c
drivers/edac/edac_mc.c
drivers/edac/edac_module.h
drivers/edac/edac_pci.c
drivers/edac/edac_pci_sysfs.c
drivers/edac/i3000_edac.c
drivers/edac/i5000_edac.c
drivers/edac/i82443bxgx_edac.c
drivers/edac/i82860_edac.c
drivers/edac/i82875p_edac.c
drivers/edac/i82975x_edac.c
drivers/edac/pasemi_edac.c
drivers/edac/r82600_edac.c
drivers/firmware/Kconfig
drivers/firmware/dcdbas.c
drivers/firmware/dell_rbu.c
drivers/firmware/iscsi_ibft_find.c
drivers/gpio/gpiolib.c
drivers/gpio/mcp23s08.c
drivers/gpio/pca953x.c
drivers/gpio/pcf857x.c
drivers/hid/hid-core.c
drivers/hid/usbhid/hid-core.c
drivers/hwmon/ads7828.c
drivers/hwmon/adt7473.c
drivers/hwmon/asb100.c
drivers/hwmon/f75375s.c
drivers/hwmon/lm75.c
drivers/hwmon/smsc47b397.c
drivers/hwmon/w83793.c
drivers/hwmon/w83l785ts.c
drivers/i2c/busses/i2c-amd756-s4882.c
drivers/i2c/busses/i2c-piix4.c
drivers/i2c/busses/i2c-sis5595.c
drivers/i2c/busses/i2c-sis630.c
drivers/i2c/busses/i2c-stub.c
drivers/i2c/busses/i2c-taos-evm.c
drivers/i2c/chips/ds1682.c
drivers/i2c/chips/menelaus.c
drivers/i2c/chips/tps65010.c
drivers/i2c/chips/tsl2550.c
drivers/i2c/i2c-core.c
drivers/ide/Kconfig
drivers/ide/Makefile
drivers/ide/arm/bast-ide.c
drivers/ide/arm/icside.c
drivers/ide/arm/ide_arm.c
drivers/ide/arm/palm_bk3710.c
drivers/ide/arm/rapide.c
drivers/ide/cris/Makefile [deleted file]
drivers/ide/cris/ide-cris.c [deleted file]
drivers/ide/h8300/ide-h8300.c
drivers/ide/ide-acpi.c
drivers/ide/ide-cd.c
drivers/ide/ide-cd_verbose.c
drivers/ide/ide-dma.c
drivers/ide/ide-floppy.c
drivers/ide/ide-generic.c
drivers/ide/ide-io.c
drivers/ide/ide-iops.c
drivers/ide/ide-lib.c
drivers/ide/ide-pnp.c
drivers/ide/ide-probe.c
drivers/ide/ide-proc.c
drivers/ide/ide-scan-pci.c
drivers/ide/ide-tape.c
drivers/ide/ide-taskfile.c
drivers/ide/ide.c
drivers/ide/legacy/ali14xx.c
drivers/ide/legacy/buddha.c
drivers/ide/legacy/dtc2278.c
drivers/ide/legacy/falconide.c
drivers/ide/legacy/gayle.c
drivers/ide/legacy/ht6560b.c
drivers/ide/legacy/ide-4drives.c
drivers/ide/legacy/ide-cs.c
drivers/ide/legacy/ide_platform.c
drivers/ide/legacy/macide.c
drivers/ide/legacy/q40ide.c
drivers/ide/legacy/qd65xx.c
drivers/ide/legacy/umc8672.c
drivers/ide/mips/au1xxx-ide.c
drivers/ide/mips/swarm.c
drivers/ide/pci/aec62xx.c
drivers/ide/pci/alim15x3.c
drivers/ide/pci/amd74xx.c
drivers/ide/pci/atiixp.c
drivers/ide/pci/cmd640.c
drivers/ide/pci/cmd64x.c
drivers/ide/pci/cs5520.c
drivers/ide/pci/cs5530.c
drivers/ide/pci/cs5535.c
drivers/ide/pci/cy82c693.c
drivers/ide/pci/delkin_cb.c
drivers/ide/pci/hpt34x.c
drivers/ide/pci/hpt366.c
drivers/ide/pci/it8213.c
drivers/ide/pci/it821x.c
drivers/ide/pci/jmicron.c
drivers/ide/pci/ns87415.c
drivers/ide/pci/opti621.c
drivers/ide/pci/pdc202xx_new.c
drivers/ide/pci/pdc202xx_old.c
drivers/ide/pci/piix.c
drivers/ide/pci/sc1200.c
drivers/ide/pci/scc_pata.c
drivers/ide/pci/serverworks.c
drivers/ide/pci/sgiioc4.c
drivers/ide/pci/siimage.c
drivers/ide/pci/sis5513.c
drivers/ide/pci/sl82c105.c
drivers/ide/pci/slc90e66.c
drivers/ide/pci/tc86c001.c
drivers/ide/pci/triflex.c
drivers/ide/pci/trm290.c
drivers/ide/pci/via82cxxx.c
drivers/ide/ppc/mpc8xx.c
drivers/ide/ppc/pmac.c
drivers/ide/setup-pci.c
drivers/infiniband/core/umem.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_hal.h
drivers/infiniband/hw/cxgb3/cxio_wr.h
drivers/infiniband/hw/cxgb3/iwch.c
drivers/infiniband/hw/cxgb3/iwch.h
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb3/iwch_cm.h
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_provider.h
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/ehca/ehca_classes.h
drivers/infiniband/hw/ehca/ehca_cq.c
drivers/infiniband/hw/ehca/ehca_eq.c
drivers/infiniband/hw/ehca/ehca_irq.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_mrmw.c
drivers/infiniband/hw/ehca/ehca_qp.c
drivers/infiniband/hw/ehca/ehca_reqs.c
drivers/infiniband/hw/ehca/ehca_uverbs.c
drivers/infiniband/hw/ehca/hcp_if.c
drivers/infiniband/hw/ipath/ipath_mr.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/doorbell.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx4/srq.c
drivers/infiniband/hw/mthca/mthca_mr.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.h
drivers/infiniband/hw/mthca/mthca_user.h
drivers/infiniband/hw/nes/Kconfig
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/nes/nes_utils.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_memory.c
drivers/input/input.c
drivers/input/joystick/iforce/iforce-usb.c
drivers/input/joystick/xpad.c
drivers/input/misc/sparcspkr.c
drivers/input/serio/serport.c
drivers/input/tablet/aiptek.c
drivers/input/tablet/gtco.c
drivers/input/tablet/kbtab.c
drivers/isdn/capi/capi.c
drivers/isdn/capi/capidrv.c
drivers/isdn/capi/capifs.c
drivers/isdn/capi/capilib.c
drivers/isdn/capi/capiutil.c
drivers/isdn/capi/kcapi.c
drivers/isdn/capi/kcapi.h
drivers/isdn/capi/kcapi_proc.c
drivers/isdn/divert/divert_procfs.c
drivers/isdn/gigaset/ser-gigaset.c
drivers/isdn/hardware/avm/b1.c
drivers/isdn/hardware/avm/b1dma.c
drivers/isdn/hardware/avm/b1isa.c
drivers/isdn/hardware/avm/b1pci.c
drivers/isdn/hardware/avm/b1pcmcia.c
drivers/isdn/hardware/avm/c4.c
drivers/isdn/hardware/avm/t1isa.c
drivers/isdn/hardware/avm/t1pci.c
drivers/isdn/hardware/eicon/divasmain.c
drivers/isdn/hardware/eicon/divasproc.c
drivers/isdn/hardware/eicon/message.c
drivers/isdn/hisax/asuscom.c
drivers/isdn/hisax/avm_pci.c
drivers/isdn/hisax/diva.c
drivers/isdn/hisax/elsa.c
drivers/isdn/hisax/hfc_sx.c
drivers/isdn/hisax/hfc_usb.c
drivers/isdn/hisax/hfcscard.c
drivers/isdn/hisax/hisax_debug.h
drivers/isdn/hisax/hisax_fcpcipnp.c
drivers/isdn/hisax/ix1_micro.c
drivers/isdn/hisax/niccy.c
drivers/isdn/hisax/sedlbauer.c
drivers/isdn/hisax/st5481.h
drivers/isdn/hisax/st5481_usb.c
drivers/isdn/hisax/teles3.c
drivers/isdn/hysdn/hysdn_procconf.c
drivers/isdn/hysdn/hysdn_proclog.c
drivers/isdn/i4l/isdn_common.c
drivers/isdn/i4l/isdn_net.h
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/i4l/isdn_tty.c
drivers/leds/led-class.c
drivers/macintosh/Kconfig
drivers/macintosh/Makefile
drivers/macintosh/windfarm_lm75_sensor.c
drivers/macintosh/windfarm_max6690_sensor.c
drivers/macintosh/windfarm_pm121.c [new file with mode: 0644]
drivers/macintosh/windfarm_smu_controls.c
drivers/mca/mca-legacy.c
drivers/mca/mca-proc.c
drivers/md/dm-emc.c
drivers/md/dm-mpath-hp-sw.c
drivers/md/dm-mpath-rdac.c
drivers/md/dm-table.c
drivers/md/dm-uevent.c
drivers/md/md.c
drivers/md/multipath.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid6algos.c
drivers/media/Kconfig
drivers/media/Makefile
drivers/media/common/Makefile
drivers/media/common/tuners/Kconfig [new file with mode: 0644]
drivers/media/common/tuners/Makefile [new file with mode: 0644]
drivers/media/common/tuners/mt2060.c [moved from drivers/media/dvb/frontends/mt2060.c with 100% similarity]
drivers/media/common/tuners/mt2060.h [moved from drivers/media/dvb/frontends/mt2060.h with 90% similarity]
drivers/media/common/tuners/mt2060_priv.h [moved from drivers/media/dvb/frontends/mt2060_priv.h with 100% similarity]
drivers/media/common/tuners/mt20xx.c [moved from drivers/media/video/mt20xx.c with 100% similarity]
drivers/media/common/tuners/mt20xx.h [moved from drivers/media/video/mt20xx.h with 91% similarity]
drivers/media/common/tuners/mt2131.c [moved from drivers/media/dvb/frontends/mt2131.c with 100% similarity]
drivers/media/common/tuners/mt2131.h [moved from drivers/media/dvb/frontends/mt2131.h with 91% similarity]
drivers/media/common/tuners/mt2131_priv.h [moved from drivers/media/dvb/frontends/mt2131_priv.h with 100% similarity]
drivers/media/common/tuners/mt2266.c [moved from drivers/media/dvb/frontends/mt2266.c with 100% similarity]
drivers/media/common/tuners/mt2266.h [moved from drivers/media/dvb/frontends/mt2266.h with 88% similarity]
drivers/media/common/tuners/qt1010.c [moved from drivers/media/dvb/frontends/qt1010.c with 100% similarity]
drivers/media/common/tuners/qt1010.h [moved from drivers/media/dvb/frontends/qt1010.h with 91% similarity]
drivers/media/common/tuners/qt1010_priv.h [moved from drivers/media/dvb/frontends/qt1010_priv.h with 100% similarity]
drivers/media/common/tuners/tda18271-common.c [moved from drivers/media/dvb/frontends/tda18271-common.c with 100% similarity]
drivers/media/common/tuners/tda18271-fe.c [moved from drivers/media/dvb/frontends/tda18271-fe.c with 100% similarity]
drivers/media/common/tuners/tda18271-maps.c [moved from drivers/media/dvb/frontends/tda18271-tables.c with 100% similarity]
drivers/media/common/tuners/tda18271-priv.h [moved from drivers/media/dvb/frontends/tda18271-priv.h with 100% similarity]
drivers/media/common/tuners/tda18271.h [moved from drivers/media/dvb/frontends/tda18271.h with 96% similarity]
drivers/media/common/tuners/tda827x.c [moved from drivers/media/dvb/frontends/tda827x.c with 100% similarity]
drivers/media/common/tuners/tda827x.h [moved from drivers/media/dvb/frontends/tda827x.h with 93% similarity]
drivers/media/common/tuners/tda8290.c [moved from drivers/media/video/tda8290.c with 99% similarity]
drivers/media/common/tuners/tda8290.h [moved from drivers/media/video/tda8290.h with 94% similarity]
drivers/media/common/tuners/tda9887.c [moved from drivers/media/video/tda9887.c with 100% similarity]
drivers/media/common/tuners/tda9887.h [moved from drivers/media/video/tda9887.h with 92% similarity]
drivers/media/common/tuners/tea5761.c [moved from drivers/media/video/tea5761.c with 100% similarity]
drivers/media/common/tuners/tea5761.h [moved from drivers/media/video/tea5761.h with 93% similarity]
drivers/media/common/tuners/tea5767.c [moved from drivers/media/video/tea5767.c with 100% similarity]
drivers/media/common/tuners/tea5767.h [moved from drivers/media/video/tea5767.h with 94% similarity]
drivers/media/common/tuners/tuner-i2c.h [moved from drivers/media/video/tuner-i2c.h with 100% similarity]
drivers/media/common/tuners/tuner-simple.c [moved from drivers/media/video/tuner-simple.c with 100% similarity]
drivers/media/common/tuners/tuner-simple.h [moved from drivers/media/video/tuner-simple.h with 92% similarity]
drivers/media/common/tuners/tuner-types.c [moved from drivers/media/video/tuner-types.c with 100% similarity]
drivers/media/common/tuners/tuner-xc2028-types.h [moved from drivers/media/video/tuner-xc2028-types.h with 100% similarity]
drivers/media/common/tuners/tuner-xc2028.c [moved from drivers/media/video/tuner-xc2028.c with 100% similarity]
drivers/media/common/tuners/tuner-xc2028.h [moved from drivers/media/video/tuner-xc2028.h with 93% similarity]
drivers/media/common/tuners/xc5000.c [moved from drivers/media/dvb/frontends/xc5000.c with 100% similarity]
drivers/media/common/tuners/xc5000.h [moved from drivers/media/dvb/frontends/xc5000.h with 92% similarity]
drivers/media/common/tuners/xc5000_priv.h [moved from drivers/media/dvb/frontends/xc5000_priv.h with 100% similarity]
drivers/media/dvb/Kconfig
drivers/media/dvb/b2c2/Kconfig
drivers/media/dvb/b2c2/Makefile
drivers/media/dvb/bt8xx/Kconfig
drivers/media/dvb/bt8xx/Makefile
drivers/media/dvb/bt8xx/dst.c
drivers/media/dvb/dvb-core/Kconfig [deleted file]
drivers/media/dvb/dvb-core/dvb_frontend.c
drivers/media/dvb/dvb-core/dvbdev.h
drivers/media/dvb/dvb-usb/Kconfig
drivers/media/dvb/dvb-usb/Makefile
drivers/media/dvb/frontends/Kconfig
drivers/media/dvb/frontends/Makefile
drivers/media/dvb/frontends/s5h1420.c
drivers/media/video/Kconfig
drivers/media/video/Makefile
drivers/media/video/au0828/Kconfig
drivers/media/video/au0828/Makefile
drivers/media/video/au0828/au0828-dvb.c
drivers/media/video/bt8xx/Kconfig
drivers/media/video/bt8xx/Makefile
drivers/media/video/bt8xx/bttvp.h
drivers/media/video/cs5345.c
drivers/media/video/cs53l32a.c
drivers/media/video/cx18/Kconfig [new file with mode: 0644]
drivers/media/video/cx18/Makefile [new file with mode: 0644]
drivers/media/video/cx18/cx18-audio.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-audio.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-av-audio.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-av-core.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-av-core.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-av-firmware.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-av-vbi.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-cards.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-cards.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-controls.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-controls.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-driver.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-driver.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-dvb.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-dvb.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-fileops.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-fileops.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-firmware.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-firmware.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-gpio.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-gpio.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-i2c.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-i2c.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-ioctl.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-ioctl.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-irq.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-irq.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-mailbox.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-mailbox.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-queue.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-queue.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-scb.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-scb.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-streams.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-streams.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-vbi.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-vbi.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-version.h [new file with mode: 0644]
drivers/media/video/cx18/cx18-video.c [new file with mode: 0644]
drivers/media/video/cx18/cx18-video.h [new file with mode: 0644]
drivers/media/video/cx18/cx23418.h [new file with mode: 0644]
drivers/media/video/cx23885/Kconfig
drivers/media/video/cx23885/Makefile
drivers/media/video/cx25840/cx25840-core.c
drivers/media/video/cx88/Kconfig
drivers/media/video/cx88/Makefile
drivers/media/video/cx88/cx88-cards.c
drivers/media/video/cx88/cx88-i2c.c
drivers/media/video/em28xx/Kconfig
drivers/media/video/em28xx/Makefile
drivers/media/video/ivtv/Kconfig
drivers/media/video/ivtv/Makefile
drivers/media/video/ivtv/ivtv-cards.c
drivers/media/video/ivtv/ivtv-cards.h
drivers/media/video/ivtv/ivtv-driver.c
drivers/media/video/ivtv/ivtv-fileops.c
drivers/media/video/ivtv/ivtv-gpio.c
drivers/media/video/ivtv/ivtv-i2c.c
drivers/media/video/ivtv/ivtv-ioctl.c
drivers/media/video/ivtv/ivtv-irq.c
drivers/media/video/ivtv/ivtv-version.h
drivers/media/video/ivtv/ivtv-yuv.c
drivers/media/video/ivtv/ivtvfb.c
drivers/media/video/m52790.c
drivers/media/video/msp3400-driver.c
drivers/media/video/mt9m001.c
drivers/media/video/mt9v022.c
drivers/media/video/pvrusb2/Kconfig
drivers/media/video/pvrusb2/Makefile
drivers/media/video/pvrusb2/pvrusb2-audio.c
drivers/media/video/pvrusb2/pvrusb2-context.c
drivers/media/video/pvrusb2/pvrusb2-cx2584x-v4l.c
drivers/media/video/pvrusb2/pvrusb2-debug.h
drivers/media/video/pvrusb2/pvrusb2-devattr.c
drivers/media/video/pvrusb2/pvrusb2-dvb.c
drivers/media/video/pvrusb2/pvrusb2-v4l2.c
drivers/media/video/pvrusb2/pvrusb2-video-v4l.c
drivers/media/video/saa7115.c
drivers/media/video/saa7127.c
drivers/media/video/saa7134/Kconfig
drivers/media/video/saa7134/Makefile
drivers/media/video/saa7134/saa7134-cards.c
drivers/media/video/saa7134/saa7134-i2c.c
drivers/media/video/saa7134/saa7134-input.c
drivers/media/video/saa7134/saa7134.h
drivers/media/video/saa717x.c
drivers/media/video/tcm825x.c
drivers/media/video/tlv320aic23b.c
drivers/media/video/tuner-core.c
drivers/media/video/tvaudio.c
drivers/media/video/upd64031a.c
drivers/media/video/upd64083.c
drivers/media/video/usbvideo/vicam.c
drivers/media/video/usbvision/Kconfig
drivers/media/video/usbvision/Makefile
drivers/media/video/v4l2-common.c
drivers/media/video/videobuf-core.c
drivers/media/video/vino.c
drivers/media/video/vp27smpx.c
drivers/media/video/wm8739.c
drivers/media/video/wm8775.c
drivers/media/video/zoran_procfs.c
drivers/message/i2o/i2o_block.c
drivers/message/i2o/i2o_proc.c
drivers/mfd/asic3.c
drivers/mfd/htc-pasic3.c
drivers/mfd/sm501.c
drivers/mfd/ucb1x00-ts.c
drivers/misc/Kconfig
drivers/misc/Makefile
drivers/misc/eeepc-laptop.c [new file with mode: 0644]
drivers/misc/enclosure.c
drivers/misc/hdpuftrs/hdpu_cpustate.c
drivers/misc/hdpuftrs/hdpu_nexus.c
drivers/misc/ibmasm/command.c
drivers/misc/ibmasm/heartbeat.c
drivers/misc/intel_menlow.c
drivers/misc/ioc4.c
drivers/misc/kgdbts.c
drivers/misc/phantom.c
drivers/misc/sgi-xp/xpc_partition.c
drivers/misc/sony-laptop.c
drivers/misc/thinkpad_acpi.c
drivers/mmc/host/mmc_spi.c
drivers/mmc/host/mmci.c
drivers/mtd/maps/plat-ram.c
drivers/net/3c505.c
drivers/net/3c505.h
drivers/net/3c509.c
drivers/net/3c515.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/arm/Kconfig
drivers/net/arm/Makefile
drivers/net/arm/am79c961a.c
drivers/net/arm/ixp4xx_eth.c [new file with mode: 0644]
drivers/net/bfin_mac.c
drivers/net/bfin_mac.h
drivers/net/bonding/bond_main.c
drivers/net/cxgb3/version.h
drivers/net/e100.c
drivers/net/eepro.c
drivers/net/fec.c
drivers/net/fec.h
drivers/net/fec_mpc52xx.c
drivers/net/gianfar.c
drivers/net/gianfar.h
drivers/net/gianfar_mii.c
drivers/net/gianfar_mii.h
drivers/net/hamachi.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/mkiss.c
drivers/net/ibmveth.c
drivers/net/irda/irtty-sir.c
drivers/net/irda/mcs7780.c
drivers/net/irda/stir4200.c
drivers/net/irda/vlsi_ir.c
drivers/net/mlx4/alloc.c
drivers/net/mlx4/cq.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mlx4.h
drivers/net/mlx4/mr.c
drivers/net/mlx4/qp.c
drivers/net/myri10ge/myri10ge.c
drivers/net/phy/Kconfig
drivers/net/phy/phy_device.c
drivers/net/phy/smsc.c
drivers/net/ppp_async.c
drivers/net/ppp_synctty.c
drivers/net/pppoe.c
drivers/net/pppol2tp.c
drivers/net/r8169.c
drivers/net/rionet.c
drivers/net/s2io.c
drivers/net/s2io.h
drivers/net/sfc/Kconfig [new file with mode: 0644]
drivers/net/sfc/Makefile [new file with mode: 0644]
drivers/net/sfc/bitfield.h [new file with mode: 0644]
drivers/net/sfc/boards.c [new file with mode: 0644]
drivers/net/sfc/boards.h [new file with mode: 0644]
drivers/net/sfc/efx.c [new file with mode: 0644]
drivers/net/sfc/efx.h [new file with mode: 0644]
drivers/net/sfc/enum.h [new file with mode: 0644]
drivers/net/sfc/ethtool.c [new file with mode: 0644]
drivers/net/sfc/ethtool.h [new file with mode: 0644]
drivers/net/sfc/falcon.c [new file with mode: 0644]
drivers/net/sfc/falcon.h [new file with mode: 0644]
drivers/net/sfc/falcon_hwdefs.h [new file with mode: 0644]
drivers/net/sfc/falcon_io.h [new file with mode: 0644]
drivers/net/sfc/falcon_xmac.c [new file with mode: 0644]
drivers/net/sfc/gmii.h [new file with mode: 0644]
drivers/net/sfc/i2c-direct.c [new file with mode: 0644]
drivers/net/sfc/i2c-direct.h [new file with mode: 0644]
drivers/net/sfc/mac.h [new file with mode: 0644]
drivers/net/sfc/mdio_10g.c [new file with mode: 0644]
drivers/net/sfc/mdio_10g.h [new file with mode: 0644]
drivers/net/sfc/net_driver.h [new file with mode: 0644]
drivers/net/sfc/phy.h [new file with mode: 0644]
drivers/net/sfc/rx.c [new file with mode: 0644]
drivers/net/sfc/rx.h [new file with mode: 0644]
drivers/net/sfc/sfe4001.c [new file with mode: 0644]
drivers/net/sfc/spi.h [new file with mode: 0644]
drivers/net/sfc/tenxpress.c [new file with mode: 0644]
drivers/net/sfc/tx.c [new file with mode: 0644]
drivers/net/sfc/tx.h [new file with mode: 0644]
drivers/net/sfc/workarounds.h [new file with mode: 0644]
drivers/net/sfc/xenpack.h [new file with mode: 0644]
drivers/net/sfc/xfp_phy.c [new file with mode: 0644]
drivers/net/sis190.c
drivers/net/slip.c
drivers/net/tehuti.c
drivers/net/tg3.c
drivers/net/tulip/de4x5.c
drivers/net/tulip/de4x5.h
drivers/net/tulip/tulip.h
drivers/net/tulip/tulip_core.c
drivers/net/wan/pc300_tty.c
drivers/net/wan/x25_asy.c
drivers/net/wireless/Makefile
drivers/net/wireless/airo.c
drivers/net/wireless/ath5k/base.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43legacy/main.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/iwl-3945.c
drivers/net/wireless/libertas/scan.c
drivers/net/wireless/strip.c
drivers/net/wireless/zd1211rw/zd_usb.c
drivers/net/yellowfin.c
drivers/nubus/proc.c
drivers/oprofile/buffer_sync.c
drivers/oprofile/cpu_buffer.c
drivers/oprofile/cpu_buffer.h
drivers/oprofile/oprofile_stats.c
drivers/parisc/ccio-dma.c
drivers/parisc/sba_iommu.c
drivers/parport/ieee1284.c
drivers/parport/parport_gsc.c
drivers/parport/parport_pc.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_core.c
drivers/pci/hotplug/pciehp_ctrl.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/hotplug/shpchp_core.c
drivers/pci/msi.c
drivers/pci/pci-driver.c
drivers/pci/pcie/Kconfig
drivers/pci/pcie/aer/aerdrv_acpi.c
drivers/pci/probe.c
drivers/pci/proc.c
drivers/pcmcia/Kconfig
drivers/pcmcia/au1000_db1x00.c
drivers/pcmcia/au1000_generic.c
drivers/pcmcia/au1000_pb1x00.c
drivers/pcmcia/au1000_xxs1500.c
drivers/pcmcia/cardbus.c
drivers/pcmcia/cistpl.c
drivers/pcmcia/cs.c
drivers/pcmcia/cs_internal.h
drivers/pcmcia/ds.c
drivers/pcmcia/i82092.c
drivers/pcmcia/omap_cf.c
drivers/pcmcia/pcmcia_ioctl.c
drivers/pcmcia/pd6729.c
drivers/pcmcia/pxa2xx_lubbock.c
drivers/pcmcia/pxa2xx_mainstone.c
drivers/pcmcia/rsrc_nonstatic.c
drivers/pcmcia/sa1100_assabet.c
drivers/pcmcia/sa1100_badge4.c
drivers/pcmcia/sa1100_cerf.c
drivers/pcmcia/sa1100_jornada720.c
drivers/pcmcia/sa1100_neponset.c
drivers/pcmcia/sa1100_shannon.c
drivers/pcmcia/sa1100_simpad.c
drivers/pcmcia/soc_common.c
drivers/pcmcia/soc_common.h
drivers/pcmcia/socket_sysfs.c
drivers/pnp/base.h
drivers/pnp/card.c
drivers/pnp/core.c
drivers/pnp/driver.c
drivers/pnp/interface.c
drivers/pnp/isapnp/Makefile
drivers/pnp/isapnp/core.c
drivers/pnp/isapnp/proc.c
drivers/pnp/manager.c
drivers/pnp/pnpacpi/Makefile
drivers/pnp/pnpacpi/core.c
drivers/pnp/pnpacpi/pnpacpi.h
drivers/pnp/pnpacpi/rsparser.c
drivers/pnp/pnpbios/Makefile
drivers/pnp/pnpbios/bioscalls.c
drivers/pnp/pnpbios/core.c
drivers/pnp/pnpbios/pnpbios.h
drivers/pnp/pnpbios/proc.c
drivers/pnp/pnpbios/rsparser.c
drivers/pnp/quirks.c
drivers/pnp/resource.c
drivers/pnp/support.c
drivers/pnp/system.c
drivers/power/ds2760_battery.c
drivers/power/olpc_battery.c
drivers/power/power_supply_core.c
drivers/power/power_supply_leds.c
drivers/rapidio/Kconfig
drivers/rapidio/rio-access.c
drivers/rapidio/rio-scan.c
drivers/rapidio/rio-sysfs.c
drivers/rapidio/rio.c
drivers/rapidio/rio.h
drivers/rtc/Kconfig
drivers/rtc/rtc-at91rm9200.c
drivers/rtc/rtc-at91sam9.c
drivers/rtc/rtc-bfin.c
drivers/rtc/rtc-cmos.c
drivers/rtc/rtc-ds1302.c
drivers/rtc/rtc-ds1307.c
drivers/rtc/rtc-ds1374.c
drivers/rtc/rtc-ds1511.c
drivers/rtc/rtc-ds1672.c
drivers/rtc/rtc-isl1208.c
drivers/rtc/rtc-m41t80.c
drivers/rtc/rtc-max6900.c
drivers/rtc/rtc-max6902.c
drivers/rtc/rtc-pcf8563.c
drivers/rtc/rtc-pcf8583.c
drivers/rtc/rtc-proc.c
drivers/rtc/rtc-rs5c313.c
drivers/rtc/rtc-rs5c372.c
drivers/rtc/rtc-s35390a.c
drivers/rtc/rtc-s3c.c
drivers/rtc/rtc-sh.c
drivers/rtc/rtc-sysfs.c
drivers/rtc/rtc-test.c
drivers/rtc/rtc-v3020.c
drivers/rtc/rtc-x1205.c
drivers/s390/Makefile
drivers/s390/block/dasd_proc.c
drivers/s390/block/dcssblk.c
drivers/s390/char/con3215.c
drivers/s390/char/sclp_config.c
drivers/s390/char/sclp_tty.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/tape_proc.c
drivers/s390/char/tty3270.c
drivers/s390/cio/blacklist.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/cio.c
drivers/s390/cio/cio.h
drivers/s390/cio/cmf.c
drivers/s390/cio/css.c
drivers/s390/cio/device.c
drivers/s390/cio/device_fsm.c
drivers/s390/cio/device_ops.c
drivers/s390/cio/qdio.c
drivers/s390/kvm/Makefile [new file with mode: 0644]
drivers/s390/kvm/kvm_virtio.c [new file with mode: 0644]
drivers/s390/net/cu3088.c
drivers/s390/net/lcs.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3.h
drivers/s390/net/qeth_l3_main.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_fsf.h
drivers/s390/scsi/zfcp_scsi.c
drivers/sbus/char/cpwatchdog.c
drivers/sbus/char/uctrl.c
drivers/scsi/FlashPoint.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/aha152x.c
drivers/scsi/aha1542.c
drivers/scsi/aic7xxx/aic7770_osm.c
drivers/scsi/aic7xxx/aic79xx.h
drivers/scsi/aic7xxx/aic79xx.reg
drivers/scsi/aic7xxx/aic79xx_core.c
drivers/scsi/aic7xxx/aic79xx_inline.h
drivers/scsi/aic7xxx/aic79xx_osm.c
drivers/scsi/aic7xxx/aic79xx_osm.h
drivers/scsi/aic7xxx/aic79xx_osm_pci.c
drivers/scsi/aic7xxx/aic79xx_pci.c
drivers/scsi/aic7xxx/aic79xx_proc.c
drivers/scsi/aic7xxx/aic79xx_reg.h_shipped
drivers/scsi/aic7xxx/aic79xx_reg_print.c_shipped
drivers/scsi/aic7xxx/aic79xx_seq.h_shipped
drivers/scsi/aic7xxx/aic7xxx.h
drivers/scsi/aic7xxx/aic7xxx.reg
drivers/scsi/aic7xxx/aic7xxx_93cx6.c
drivers/scsi/aic7xxx/aic7xxx_core.c
drivers/scsi/aic7xxx/aic7xxx_inline.h
drivers/scsi/aic7xxx/aic7xxx_osm.c
drivers/scsi/aic7xxx/aic7xxx_osm.h
drivers/scsi/aic7xxx/aic7xxx_osm_pci.c
drivers/scsi/aic7xxx/aic7xxx_pci.c
drivers/scsi/aic7xxx/aic7xxx_proc.c
drivers/scsi/aic7xxx/aic7xxx_reg_print.c_shipped
drivers/scsi/aic7xxx/aic7xxx_seq.h_shipped
drivers/scsi/aic7xxx/aicasm/aicasm.c
drivers/scsi/aic7xxx/aicasm/aicasm_gram.y
drivers/scsi/aic7xxx/aicasm/aicasm_scan.l
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.c
drivers/scsi/aic7xxx/aicasm/aicasm_symbol.h
drivers/scsi/dpt_i2o.c
drivers/scsi/eata.c
drivers/scsi/esp_scsi.c
drivers/scsi/esp_scsi.h
drivers/scsi/fdomain.c
drivers/scsi/hosts.c
drivers/scsi/ide-scsi.c
drivers/scsi/jazz_esp.c
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/mac_esp.c [new file with mode: 0644]
drivers/scsi/megaraid.c
drivers/scsi/mvsas.c
drivers/scsi/ncr53c8xx.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/scsi_proc.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/scsi_transport_sas.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/sd.c
drivers/scsi/sg.c
drivers/scsi/sgiwd93.c
drivers/scsi/sni_53c710.c
drivers/scsi/st.c
drivers/scsi/sun3x_esp.c
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/scsi/u14-34f.c
drivers/scsi/ultrastor.c
drivers/serial/68328serial.c
drivers/serial/68360serial.c
drivers/serial/8250.c
drivers/serial/8250_au1x00.c [deleted file]
drivers/serial/8250_early.c
drivers/serial/8250_pci.c
drivers/serial/Kconfig
drivers/serial/Makefile
drivers/serial/atmel_serial.c
drivers/serial/bfin_5xx.c
drivers/serial/bfin_sport_uart.c [new file with mode: 0644]
drivers/serial/bfin_sport_uart.h [new file with mode: 0644]
drivers/serial/cpm_uart/cpm_uart_core.c
drivers/serial/crisv10.c
drivers/serial/dz.c
drivers/serial/ioc3_serial.c
drivers/serial/ioc4_serial.c
drivers/serial/jsm/jsm.h
drivers/serial/jsm/jsm_driver.c
drivers/serial/kgdboc.c
drivers/serial/mcfserial.c
drivers/serial/mpc52xx_uart.c
drivers/serial/netx-serial.c
drivers/serial/s3c2410.c
drivers/serial/sa1100.c
drivers/serial/serial_core.c
drivers/serial/sh-sci.c
drivers/serial/sn_console.c
drivers/serial/sunzilog.c
drivers/serial/uartlite.c
drivers/serial/ucc_uart.c
drivers/serial/vr41xx_siu.c
drivers/spi/Kconfig
drivers/spi/atmel_spi.c
drivers/spi/omap_uwire.c
drivers/spi/pxa2xx_spi.c
drivers/spi/spi_bfin5xx.c
drivers/spi/spi_bitbang.c
drivers/spi/spi_imx.c
drivers/spi/spi_mpc83xx.c
drivers/spi/spi_s3c24xx.c
drivers/spi/xilinx_spi.c
drivers/thermal/Kconfig
drivers/thermal/Makefile
drivers/thermal/thermal_sys.c [moved from drivers/thermal/thermal.c with 81% similarity]
drivers/usb/atm/cxacru.c
drivers/usb/atm/ueagle-atm.c
drivers/usb/class/cdc-acm.c
drivers/usb/core/inode.c
drivers/usb/gadget/amd5536udc.c
drivers/usb/gadget/at91_udc.c
drivers/usb/gadget/dummy_hcd.c
drivers/usb/gadget/gmidi.c
drivers/usb/gadget/goku_udc.c
drivers/usb/gadget/m66592-udc.h
drivers/usb/gadget/omap_udc.c
drivers/usb/gadget/rndis.c
drivers/usb/gadget/serial.c
drivers/usb/gadget/usbstring.c
drivers/usb/host/ehci-dbg.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-q.c
drivers/usb/host/ohci-hub.c
drivers/usb/host/r8a66597-hcd.c
drivers/usb/host/sl811-hcd.c
drivers/usb/serial/aircable.c
drivers/usb/serial/cypress_m8.c
drivers/usb/serial/digi_acceleport.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/io_edgeport.c
drivers/usb/serial/kl5kusb105.c
drivers/usb/serial/oti6858.c
drivers/usb/serial/spcp8x5.c
drivers/usb/serial/usb-serial.c
drivers/usb/serial/whiteheat.c
drivers/video/Kconfig
drivers/video/Makefile
drivers/video/am200epd.c [new file with mode: 0644]
drivers/video/amifb.c
drivers/video/arkfb.c
drivers/video/atafb.c
drivers/video/atmel_lcdfb.c
drivers/video/aty/aty128fb.c
drivers/video/aty/atyfb_base.c
drivers/video/aty/mach64_ct.c
drivers/video/aty/radeon_base.c
drivers/video/aty/radeon_i2c.c
drivers/video/aty/radeon_monitor.c
drivers/video/aty/radeonfb.h
drivers/video/bf54x-lq043fb.c
drivers/video/bw2.c
drivers/video/cfbcopyarea.c
drivers/video/cfbfillrect.c
drivers/video/cfbimgblt.c
drivers/video/cg14.c
drivers/video/cg3.c
drivers/video/cg6.c
drivers/video/cirrusfb.c
drivers/video/clps711xfb.c
drivers/video/console/fbcon.c
drivers/video/console/fbcon.h
drivers/video/console/mdacon.c
drivers/video/console/sticon.c
drivers/video/console/vgacon.c
drivers/video/fb_draw.h
drivers/video/fbmem.c
drivers/video/ffb.c
drivers/video/fsl-diu-fb.c [new file with mode: 0644]
drivers/video/fsl-diu-fb.h [new file with mode: 0644]
drivers/video/geode/Kconfig
drivers/video/geode/Makefile
drivers/video/geode/display_gx.c
drivers/video/geode/display_gx.h [deleted file]
drivers/video/geode/gxfb.h [new file with mode: 0644]
drivers/video/geode/gxfb_core.c
drivers/video/geode/lxfb.h
drivers/video/geode/lxfb_core.c
drivers/video/geode/lxfb_ops.c
drivers/video/geode/suspend_gx.c [new file with mode: 0644]
drivers/video/geode/video_gx.c
drivers/video/geode/video_gx.h [deleted file]
drivers/video/gxt4500.c
drivers/video/hecubafb.c
drivers/video/imsttfb.c
drivers/video/imxfb.c
drivers/video/intelfb/intelfb.h
drivers/video/intelfb/intelfb_i2c.c
drivers/video/intelfb/intelfbdrv.c
drivers/video/intelfb/intelfbhw.c
drivers/video/leo.c
drivers/video/matrox/matroxfb_DAC1064.c
drivers/video/matrox/matroxfb_Ti3026.c
drivers/video/matrox/matroxfb_accel.c
drivers/video/matrox/matroxfb_base.c
drivers/video/matrox/matroxfb_crtc2.c
drivers/video/matrox/matroxfb_maven.c
drivers/video/matrox/matroxfb_misc.c
drivers/video/metronomefb.c
drivers/video/modedb.c
drivers/video/n411.c [new file with mode: 0644]
drivers/video/nvidia/nv_hw.c
drivers/video/nvidia/nv_setup.c
drivers/video/nvidia/nvidia.c
drivers/video/offb.c
drivers/video/p9100.c
drivers/video/pm2fb.c
drivers/video/pm3fb.c
drivers/video/pxafb.c
drivers/video/pxafb.h
drivers/video/riva/fbdev.c
drivers/video/riva/nv_driver.c
drivers/video/riva/riva_hw.c
drivers/video/s3c2410fb.c
drivers/video/s3fb.c
drivers/video/sa1100fb.h
drivers/video/savage/savagefb-i2c.c
drivers/video/sis/sis.h
drivers/video/sstfb.c
drivers/video/stifb.c
drivers/video/syscopyarea.c
drivers/video/sysfillrect.c
drivers/video/sysimgblt.c
drivers/video/tcx.c
drivers/video/tdfxfb.c
drivers/video/tridentfb.c
drivers/video/uvesafb.c
drivers/video/vermilion/vermilion.c
drivers/video/vt8623fb.c
drivers/video/w100fb.c
drivers/w1/w1_log.h
drivers/zorro/proc.c
fs/Kconfig
fs/Kconfig.binfmt
fs/adfs/adfs.h
fs/adfs/dir_f.c
fs/affs/file.c
fs/affs/super.c
fs/afs/afs_cm.h
fs/afs/cell.c
fs/afs/cmservice.c
fs/afs/dir.c
fs/afs/internal.h
fs/afs/proc.c
fs/aio.c
fs/autofs4/autofs_i.h
fs/autofs4/expire.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/befs/linuxvfs.c
fs/bfs/bfs.h
fs/binfmt_aout.c
fs/binfmt_elf.c
fs/binfmt_elf_fdpic.c
fs/binfmt_em86.c
fs/binfmt_flat.c
fs/binfmt_misc.c
fs/binfmt_script.c
fs/bio.c
fs/buffer.c
fs/char_dev.c
fs/cifs/cifs_debug.c
fs/coda/coda_linux.c
fs/coda/dir.c
fs/compat.c
fs/compat_ioctl.c
fs/configfs/file.c
fs/configfs/inode.c
fs/configfs/mount.c
fs/configfs/symlink.c
fs/debugfs/file.c
fs/devpts/inode.c
fs/dlm/lockspace.c
fs/dnotify.c
fs/dquot.c
fs/drop_caches.c
fs/ecryptfs/Makefile
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/ecryptfs/messaging.c
fs/ecryptfs/miscdev.c [new file with mode: 0644]
fs/ecryptfs/mmap.c
fs/ecryptfs/netlink.c
fs/ecryptfs/read_write.c
fs/eventpoll.c
fs/exec.c
fs/exportfs/expfs.c
fs/ext2/balloc.c
fs/ext2/dir.c
fs/ext2/ialloc.c
fs/ext2/inode.c
fs/ext2/super.c
fs/ext2/xattr.c
fs/ext2/xip.c
fs/ext2/xip.h
fs/ext3/balloc.c
fs/ext3/ext3_jbd.c
fs/ext3/fsync.c
fs/ext3/ialloc.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext3/resize.c
fs/ext3/super.c
fs/ext3/xattr.c
fs/ext3/xattr.h
fs/ext4/acl.c
fs/ext4/balloc.c
fs/ext4/bitmap.c
fs/ext4/dir.c
fs/ext4/ext4.h [moved from include/linux/ext4_fs.h with 99% similarity]
fs/ext4/ext4_extents.h [moved from include/linux/ext4_fs_extents.h with 98% similarity]
fs/ext4/ext4_i.h [moved from include/linux/ext4_fs_i.h with 97% similarity]
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h [moved from include/linux/ext4_jbd2.h with 98% similarity]
fs/ext4/ext4_sb.h [moved from include/linux/ext4_fs_sb.h with 97% similarity]
fs/ext4/extents.c
fs/ext4/file.c
fs/ext4/fsync.c
fs/ext4/hash.c
fs/ext4/ialloc.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/mballoc.c
fs/ext4/mballoc.h [new file with mode: 0644]
fs/ext4/migrate.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/symlink.c
fs/ext4/xattr.c
fs/ext4/xattr.h
fs/ext4/xattr_security.c
fs/ext4/xattr_trusted.c
fs/ext4/xattr_user.c
fs/fat/cache.c
fs/fat/dir.c
fs/fat/fatent.c
fs/fat/file.c
fs/fat/inode.c
fs/freevxfs/vxfs_extern.h
fs/freevxfs/vxfs_immed.c
fs/freevxfs/vxfs_inode.c
fs/fs-writeback.c
fs/fuse/control.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/fuse_i.h
fs/fuse/inode.c
fs/gfs2/locking/dlm/sysfs.c
fs/gfs2/ops_address.c
fs/gfs2/util.h
fs/hfs/btree.c
fs/hfs/mdb.c
fs/hfs/super.c
fs/hfsplus/btree.c
fs/hfsplus/hfsplus_fs.h
fs/hfsplus/inode.c
fs/hfsplus/options.c
fs/hfsplus/super.c
fs/hfsplus/wrapper.c
fs/hugetlbfs/inode.c
fs/inode.c
fs/inotify_user.c
fs/ioctl.c
fs/isofs/dir.c
fs/isofs/isofs.h
fs/isofs/namei.c
fs/jbd/commit.c
fs/jbd/journal.c
fs/jbd/revoke.c
fs/jbd/transaction.c
fs/jbd2/commit.c
fs/jbd2/journal.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/jffs2/debug.h
fs/jffs2/xattr.c
fs/jfs/jfs_debug.c
fs/lockd/clntproc.c
fs/lockd/svclock.c
fs/msdos/namei.c
fs/namei.c
fs/namespace.c
fs/ncpfs/inode.c
fs/ncpfs/ioctl.c
fs/ncpfs/ncplib_kernel.c
fs/ncpfs/ncpsign_kernel.c
fs/nfs/client.c
fs/nfs/super.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfsctl.c
fs/ntfs/debug.h
fs/ntfs/mft.c
fs/ocfs2/dlm/dlmfs.c
fs/open.c
fs/partitions/ldm.c
fs/partitions/msdos.c
fs/proc/array.c
fs/proc/base.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/proc/nommu.c
fs/proc/proc_misc.c
fs/proc/proc_sysctl.c
fs/proc/proc_tty.c
fs/proc/root.c
fs/proc/task_mmu.c
fs/proc/task_nommu.c
fs/quota.c
fs/quota_v1.c
fs/quota_v2.c
fs/ramfs/file-mmu.c
fs/ramfs/inode.c
fs/ramfs/internal.h
fs/reiserfs/bitmap.c
fs/reiserfs/do_balan.c
fs/reiserfs/ioctl.c
fs/reiserfs/journal.c
fs/reiserfs/namei.c
fs/reiserfs/objectid.c
fs/reiserfs/procfs.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/select.c
fs/smbfs/smb_debug.h
fs/splice.c
fs/super.c
fs/sync.c
fs/sysfs/file.c
fs/sysfs/group.c
fs/sysfs/inode.c
fs/sysfs/mount.c
fs/sysfs/sysfs.h
fs/sysv/sysv.h
fs/timerfd.c
fs/udf/namei.c
fs/udf/super.c
fs/ufs/balloc.c
fs/ufs/dir.c
fs/ufs/inode.c
fs/ufs/swab.h
fs/ufs/ufs.h
fs/utimes.c
fs/vfat/namei.c
fs/xattr.c
fs/xfs/Kconfig
fs/xfs/linux-2.6/mrlock.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_export.c
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_linux.h
fs/xfs/linux-2.6/xfs_lrw.c
fs/xfs/linux-2.6/xfs_lrw.h
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_vnode.h
fs/xfs/quota/xfs_dquot.c
fs/xfs/quota/xfs_qm.c
fs/xfs/quota/xfs_qm_syscalls.c
fs/xfs/quota/xfs_quota_priv.h
fs/xfs/quota/xfs_trans_dquot.c
fs/xfs/support/debug.h
fs/xfs/xfs.h
fs/xfs/xfs_acl.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr.h
fs/xfs/xfs_bmap.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_ialloc.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_rename.c
fs/xfs/xfs_trans_inode.c
fs/xfs/xfs_utils.c
fs/xfs/xfs_utils.h
fs/xfs/xfs_vfsops.c
fs/xfs/xfs_vnodeops.c
fs/xfs/xfs_vnodeops.h
include/acpi/acconfig.h
include/acpi/acdebug.h
include/acpi/acdisasm.h
include/acpi/acdispat.h
include/acpi/acevents.h
include/acpi/acexcep.h
include/acpi/acglobal.h
include/acpi/achware.h
include/acpi/acinterp.h
include/acpi/aclocal.h
include/acpi/acmacros.h
include/acpi/acnames.h
include/acpi/acnamesp.h
include/acpi/acobject.h
include/acpi/acopcode.h
include/acpi/acoutput.h
include/acpi/acparser.h
include/acpi/acpi.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/acresrc.h
include/acpi/acstruct.h
include/acpi/actables.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actypes.h
include/acpi/acutils.h
include/acpi/amlcode.h
include/acpi/amlresrc.h
include/acpi/platform/acenv.h
include/acpi/platform/acgcc.h
include/acpi/platform/aclinux.h
include/asm-alpha/bitops.h
include/asm-alpha/bug.h
include/asm-alpha/byteorder.h
include/asm-alpha/pgtable.h
include/asm-alpha/unaligned.h
include/asm-arm/arch-orion5x/io.h
include/asm-arm/arch-pxa/irqs.h
include/asm-arm/arch-pxa/magician.h
include/asm-arm/arch-pxa/pxa-regs.h
include/asm-arm/arch-pxa/pxafb.h
include/asm-arm/arch-pxa/regs-lcd.h [new file with mode: 0644]
include/asm-arm/arch-pxa/system.h
include/asm-arm/arch-sa1100/ide.h
include/asm-arm/div64.h
include/asm-arm/page.h
include/asm-arm/pgtable.h
include/asm-arm/unaligned.h
include/asm-avr32/pgtable.h
include/asm-avr32/unaligned.h
include/asm-blackfin/unaligned.h
include/asm-cris/arch-v10/ide.h
include/asm-cris/pgtable.h
include/asm-cris/unaligned.h
include/asm-frv/pgtable.h
include/asm-frv/unaligned.h
include/asm-generic/bitops.h
include/asm-generic/bitops/__fls.h [new file with mode: 0644]
include/asm-generic/bitops/find.h
include/asm-generic/bitops/fls64.h
include/asm-generic/div64.h
include/asm-generic/futex.h
include/asm-generic/gpio.h
include/asm-generic/ioctl.h
include/asm-generic/memory_model.h
include/asm-generic/page.h
include/asm-generic/rtc.h
include/asm-generic/unaligned.h [deleted file]
include/asm-h8300/unaligned.h
include/asm-ia64/bitops.h
include/asm-ia64/dma-mapping.h
include/asm-ia64/dmi.h
include/asm-ia64/futex.h
include/asm-ia64/gcc_intrin.h
include/asm-ia64/hugetlb.h [new file with mode: 0644]
include/asm-ia64/io.h
include/asm-ia64/kvm.h
include/asm-ia64/kvm_host.h [new file with mode: 0644]
include/asm-ia64/kvm_para.h [new file with mode: 0644]
include/asm-ia64/machvec.h
include/asm-ia64/machvec_hpzx1.h
include/asm-ia64/machvec_hpzx1_swiotlb.h
include/asm-ia64/machvec_sn2.h
include/asm-ia64/page.h
include/asm-ia64/pgtable.h
include/asm-ia64/processor.h
include/asm-ia64/system.h
include/asm-ia64/thread_info.h
include/asm-ia64/topology.h
include/asm-ia64/unaligned.h
include/asm-ia64/uncached.h
include/asm-m32r/pgtable.h
include/asm-m32r/unaligned.h
include/asm-m68k/div64.h
include/asm-m68k/motorola_pgtable.h
include/asm-m68k/sun3_pgtable.h
include/asm-m68k/unaligned.h
include/asm-m68knommu/dma.h
include/asm-m68knommu/param.h
include/asm-m68knommu/unaligned.h
include/asm-mips/bitops.h
include/asm-mips/cmp.h [new file with mode: 0644]
include/asm-mips/cpu.h
include/asm-mips/dec/ioasic.h
include/asm-mips/div64.h
include/asm-mips/ds1287.h [new file with mode: 0644]
include/asm-mips/futex.h
include/asm-mips/gcmpregs.h [new file with mode: 0644]
include/asm-mips/gic.h [new file with mode: 0644]
include/asm-mips/io.h
include/asm-mips/jmr3927/jmr3927.h
include/asm-mips/jmr3927/tx3927.h
include/asm-mips/jmr3927/txx927.h
include/asm-mips/mach-au1x00/au1000.h
include/asm-mips/mach-au1x00/au1xxx_ide.h
include/asm-mips/mach-db1x00/db1200.h
include/asm-mips/mach-generic/gpio.h
include/asm-mips/mach-ip27/topology.h
include/asm-mips/mach-pb1x00/pb1200.h
include/asm-mips/mips-boards/generic.h
include/asm-mips/mips-boards/launch.h [new file with mode: 0644]
include/asm-mips/mips-boards/malta.h
include/asm-mips/mips-boards/maltaint.h
include/asm-mips/mips-boards/maltasmp.h [new file with mode: 0644]
include/asm-mips/mipsmtregs.h
include/asm-mips/pgtable-32.h
include/asm-mips/pgtable-bits.h
include/asm-mips/pgtable.h
include/asm-mips/r4k-timer.h [new file with mode: 0644]
include/asm-mips/smp-ops.h
include/asm-mips/smtc.h
include/asm-mips/smvp.h [new file with mode: 0644]
include/asm-mips/traps.h
include/asm-mips/tx4938/rbtx4938.h
include/asm-mips/tx4938/tx4938.h
include/asm-mips/tx4938/tx4938_mips.h [deleted file]
include/asm-mips/txx9pio.h [new file with mode: 0644]
include/asm-mips/unaligned.h
include/asm-mips/vr41xx/siu.h
include/asm-mips/vr41xx/vr41xx.h
include/asm-mn10300/div64.h
include/asm-mn10300/pgtable.h
include/asm-mn10300/unaligned.h
include/asm-parisc/bitops.h
include/asm-parisc/futex.h
include/asm-parisc/pgtable.h
include/asm-parisc/unaligned.h
include/asm-powerpc/bitops.h
include/asm-powerpc/futex.h
include/asm-powerpc/hugetlb.h [new file with mode: 0644]
include/asm-powerpc/irq.h
include/asm-powerpc/kvm.h
include/asm-powerpc/kvm_asm.h [new file with mode: 0644]
include/asm-powerpc/kvm_host.h [new file with mode: 0644]
include/asm-powerpc/kvm_para.h [new file with mode: 0644]
include/asm-powerpc/kvm_ppc.h [new file with mode: 0644]
include/asm-powerpc/mmu-44x.h
include/asm-powerpc/page_64.h
include/asm-powerpc/pgtable-ppc32.h
include/asm-powerpc/pgtable-ppc64.h
include/asm-powerpc/processor.h
include/asm-powerpc/rio.h [moved from include/asm-ppc/rio.h with 100% similarity]
include/asm-powerpc/system.h
include/asm-powerpc/unaligned.h
include/asm-ppc/pgtable.h
include/asm-s390/Kbuild
include/asm-s390/bitops.h
include/asm-s390/ccwgroup.h
include/asm-s390/hugetlb.h [new file with mode: 0644]
include/asm-s390/kvm.h
include/asm-s390/kvm_host.h [new file with mode: 0644]
include/asm-s390/kvm_para.h [new file with mode: 0644]
include/asm-s390/kvm_virtio.h [new file with mode: 0644]
include/asm-s390/lowcore.h
include/asm-s390/mmu.h
include/asm-s390/mmu_context.h
include/asm-s390/page.h
include/asm-s390/pgtable.h
include/asm-s390/processor.h
include/asm-s390/ptrace.h
include/asm-s390/qdio.h
include/asm-s390/setup.h
include/asm-s390/smp.h
include/asm-s390/sparsemem.h [new file with mode: 0644]
include/asm-s390/sysinfo.h
include/asm-s390/system.h
include/asm-s390/thread_info.h
include/asm-s390/tlbflush.h
include/asm-s390/topology.h
include/asm-s390/unaligned.h
include/asm-sh/bitops.h
include/asm-sh/futex.h
include/asm-sh/hugetlb.h [new file with mode: 0644]
include/asm-sh/pgtable_32.h
include/asm-sh/pgtable_64.h
include/asm-sh/unaligned.h
include/asm-sparc/head.h
include/asm-sparc/kgdb.h
include/asm-sparc/pgtable.h
include/asm-sparc/processor.h
include/asm-sparc/smp.h
include/asm-sparc/system.h
include/asm-sparc/unaligned.h
include/asm-sparc64/bitops.h
include/asm-sparc64/floppy.h
include/asm-sparc64/futex.h
include/asm-sparc64/hugetlb.h [new file with mode: 0644]
include/asm-sparc64/isa.h [deleted file]
include/asm-sparc64/kgdb.h [new file with mode: 0644]
include/asm-sparc64/page.h
include/asm-sparc64/pgtable.h
include/asm-sparc64/pil.h
include/asm-sparc64/ptrace.h
include/asm-sparc64/system.h
include/asm-sparc64/thread_info.h
include/asm-sparc64/ttable.h
include/asm-sparc64/unaligned.h
include/asm-um/div64.h
include/asm-um/pgtable.h
include/asm-um/unaligned.h
include/asm-v850/unaligned.h
include/asm-x86/bitops.h
include/asm-x86/bitops_32.h [deleted file]
include/asm-x86/bitops_64.h [deleted file]
include/asm-x86/bootparam.h
include/asm-x86/div64.h
include/asm-x86/dmi.h
include/asm-x86/e820_64.h
include/asm-x86/futex.h
include/asm-x86/geode.h
include/asm-x86/hugetlb.h [new file with mode: 0644]
include/asm-x86/io_32.h
include/asm-x86/kvm.h
include/asm-x86/kvm_host.h
include/asm-x86/kvm_para.h
include/asm-x86/mach-default/mach_apic.h
include/asm-x86/olpc.h [new file with mode: 0644]
include/asm-x86/pci.h
include/asm-x86/pgtable.h
include/asm-x86/processor.h
include/asm-x86/proto.h
include/asm-x86/reboot.h
include/asm-x86/thread_info_32.h
include/asm-x86/thread_info_64.h
include/asm-x86/time.h
include/asm-x86/topology.h
include/asm-x86/tsc.h
include/asm-x86/unaligned.h
include/asm-xtensa/pgtable.h
include/asm-xtensa/unaligned.h
include/crypto/scatterwalk.h
include/linux/Kbuild
include/linux/acpi.h
include/linux/agp_backend.h
include/linux/aio.h
include/linux/audit.h
include/linux/backing-dev.h
include/linux/binfmts.h
include/linux/bio.h
include/linux/bitmap.h
include/linux/bitops.h
include/linux/blkdev.h
include/linux/bootmem.h
include/linux/bsg.h
include/linux/buffer_head.h
include/linux/byteorder/Kbuild
include/linux/byteorder/big_endian.h
include/linux/byteorder/generic.h
include/linux/byteorder/little_endian.h
include/linux/cache.h
include/linux/calc64.h [deleted file]
include/linux/capability.h
include/linux/cdev.h
include/linux/cgroup.h
include/linux/cgroup_subsys.h
include/linux/clocksource.h
include/linux/coda_fs_i.h
include/linux/coda_linux.h
include/linux/compat.h
include/linux/compiler-gcc.h
include/linux/concap.h
include/linux/configfs.h
include/linux/console.h
include/linux/console_struct.h
include/linux/cpu.h
include/linux/cpufreq.h
include/linux/cpumask.h
include/linux/cpuset.h
include/linux/crc-ccitt.h
include/linux/dcache.h
include/linux/debugobjects.h [new file with mode: 0644]
include/linux/device-mapper.h
include/linux/device.h
include/linux/device_cgroup.h [new file with mode: 0644]
include/linux/devpts_fs.h
include/linux/dma-attrs.h [new file with mode: 0644]
include/linux/dma-mapping.h
include/linux/dmi.h
include/linux/edac.h
include/linux/elf.h
include/linux/eventfd.h
include/linux/fb.h
include/linux/fs.h
include/linux/fsl_devices.h
include/linux/fsnotify.h
include/linux/generic_serial.h
include/linux/gfp.h
include/linux/hid.h
include/linux/hrtimer.h
include/linux/hugetlb.h
include/linux/hw_random.h
include/linux/i2c-id.h
include/linux/i2c.h
include/linux/i2o.h
include/linux/ide.h
include/linux/idr.h
include/linux/if_macvlan.h
include/linux/inet.h
include/linux/init.h
include/linux/init_task.h
include/linux/interrupt.h
include/linux/io.h
include/linux/ipc_namespace.h
include/linux/ipmi.h
include/linux/ipmi_smi.h
include/linux/isapnp.h
include/linux/isicom.h
include/linux/jiffies.h
include/linux/kbuild.h [new file with mode: 0644]
include/linux/kernel.h
include/linux/key.h
include/linux/keyctl.h
include/linux/kfifo.h
include/linux/klist.h
include/linux/kobj_map.h
include/linux/kobject.h
include/linux/kprobes.h
include/linux/kref.h
include/linux/kvm.h
include/linux/kvm_host.h
include/linux/kvm_para.h
include/linux/kvm_types.h
include/linux/libata.h
include/linux/list.h
include/linux/lmb.h
include/linux/math64.h [new file with mode: 0644]
include/linux/mca-legacy.h
include/linux/memcontrol.h
include/linux/memory.h
include/linux/memory_hotplug.h
include/linux/mempolicy.h
include/linux/mlx4/device.h
include/linux/mlx4/qp.h
include/linux/mm.h
include/linux/mm_types.h
include/linux/mmzone.h
include/linux/mod_devicetable.h
include/linux/module.h
include/linux/mount.h
include/linux/msdos_fs.h
include/linux/msg.h
include/linux/msi.h
include/linux/nbd.h
include/linux/ncp_fs.h
include/linux/ncp_fs_i.h
include/linux/netlink.h
include/linux/nodemask.h
include/linux/notifier.h
include/linux/of_device.h
include/linux/oom.h
include/linux/page-flags.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/percpu.h
include/linux/personality.h
include/linux/phantom.h
include/linux/pid.h
include/linux/pid_namespace.h
include/linux/pm.h
include/linux/pnp.h
include/linux/pnpbios.h [deleted file]
include/linux/poison.h
include/linux/prctl.h
include/linux/proc_fs.h
include/linux/profile.h
include/linux/proportions.h
include/linux/ptrace.h
include/linux/quota.h
include/linux/quotaops.h
include/linux/raid/md.h
include/linux/raid/md_k.h
include/linux/raid/raid5.h
include/linux/rcuclassic.h
include/linux/rcupdate.h
include/linux/rcupreempt.h
include/linux/rcupreempt_trace.h
include/linux/reiserfs_fs.h
include/linux/reiserfs_fs_sb.h
include/linux/res_counter.h
include/linux/resource.h
include/linux/rio.h
include/linux/rio_drv.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/securebits.h
include/linux/security.h
include/linux/seq_file.h
include/linux/serial_8250.h
include/linux/serial_core.h
include/linux/shmem_fs.h
include/linux/signal.h
include/linux/slab.h
include/linux/slub_def.h
include/linux/smb.h
include/linux/smb_fs_i.h
include/linux/smb_fs_sb.h
include/linux/string.h
include/linux/suspend.h
include/linux/svga.h
include/linux/swap.h
include/linux/synclink.h
include/linux/syscalls.h
include/linux/sysctl.h
include/linux/sysfs.h
include/linux/sysv_fs.h
include/linux/textsearch.h
include/linux/thermal.h
include/linux/thread_info.h
include/linux/timer.h
include/linux/timex.h
include/linux/tty.h
include/linux/tty_driver.h
include/linux/unaligned/access_ok.h [new file with mode: 0644]
include/linux/unaligned/be_byteshift.h [new file with mode: 0644]
include/linux/unaligned/be_memmove.h [new file with mode: 0644]
include/linux/unaligned/be_struct.h [new file with mode: 0644]
include/linux/unaligned/generic.h [new file with mode: 0644]
include/linux/unaligned/le_byteshift.h [new file with mode: 0644]
include/linux/unaligned/le_memmove.h [new file with mode: 0644]
include/linux/unaligned/le_struct.h [new file with mode: 0644]
include/linux/unaligned/memmove.h [new file with mode: 0644]
include/linux/unaligned/packed_struct.h [new file with mode: 0644]
include/linux/vmalloc.h
include/linux/vmstat.h
include/linux/writeback.h
include/linux/xattr.h
include/media/v4l2-chip-ident.h
include/media/v4l2-common.h
include/media/v4l2-i2c-drv-legacy.h
include/media/v4l2-i2c-drv.h
include/net/compat.h
include/net/ip_vs.h
include/net/netfilter/nf_conntrack_tuple.h
include/net/netlabel.h
include/net/route.h
include/net/xfrm.h
include/rdma/ib_umem.h
include/rdma/ib_verbs.h
include/scsi/libiscsi.h
include/scsi/scsi_device.h
include/sound/mpu401.h
include/video/atmel_lcdc.h
include/video/hecubafb.h [new file with mode: 0644]
include/video/metronomefb.h [new file with mode: 0644]
init/Kconfig
init/do_mounts_md.c
init/do_mounts_rd.c
init/initramfs.c
init/main.c
ipc/Makefile
ipc/ipc_sysctl.c
ipc/ipcns_notifier.c [new file with mode: 0644]
ipc/msg.c
ipc/namespace.c
ipc/sem.c
ipc/shm.c
ipc/util.c
ipc/util.h
kernel/Makefile
kernel/audit.c
kernel/audit.h
kernel/auditfilter.c
kernel/auditsc.c
kernel/bounds.c [new file with mode: 0644]
kernel/cgroup.c
kernel/cgroup_debug.c
kernel/compat.c
kernel/configs.c
kernel/cpu.c
kernel/cpuset.c
kernel/dma.c
kernel/exit.c
kernel/fork.c
kernel/futex.c
kernel/hrtimer.c
kernel/irq/devres.c
kernel/irq/manage.c
kernel/kallsyms.c
kernel/kexec.c
kernel/kprobes.c
kernel/kthread.c
kernel/latencytop.c
kernel/lockdep_proc.c
kernel/marker.c
kernel/module.c
kernel/notifier.c
kernel/ns_cgroup.c
kernel/nsproxy.c
kernel/panic.c
kernel/pid.c
kernel/pid_namespace.c
kernel/posix-cpu-timers.c
kernel/posix-timers.c
kernel/power/Kconfig
kernel/power/Makefile
kernel/power/console.c
kernel/power/pm.c [deleted file]
kernel/printk.c
kernel/profile.c
kernel/ptrace.c
kernel/rcutorture.c
kernel/relay.c
kernel/res_counter.c
kernel/resource.c
kernel/sched.c
kernel/sched_debug.c
kernel/signal.c
kernel/softirq.c
kernel/sys.c
kernel/sysctl.c
kernel/taskstats.c
kernel/time.c
kernel/time/ntp.c
kernel/time/timekeeping.c
kernel/time/timer_list.c
kernel/time/timer_stats.c
kernel/timer.c
kernel/user.c
kernel/user_namespace.c
kernel/utsname.c
kernel/workqueue.c
lib/Kconfig
lib/Kconfig.debug
lib/Makefile
lib/bitmap.c
lib/debugobjects.c [new file with mode: 0644]
lib/devres.c
lib/div64.c
lib/find_next_bit.c
lib/idr.c
lib/inflate.c
lib/iomap.c
lib/klist.c
lib/kobject.c
lib/kobject_uevent.c
lib/lmb.c
lib/percpu_counter.c
lib/proportions.c
lib/radix-tree.c
lib/ratelimit.c [new file with mode: 0644]
lib/string.c
lib/swiotlb.c
mm/Kconfig
mm/backing-dev.c
mm/bootmem.c
mm/dmapool.c
mm/fadvise.c
mm/filemap.c
mm/filemap_xip.c
mm/hugetlb.c
mm/internal.h
mm/madvise.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/migrate.c
mm/mincore.c
mm/mmap.c
mm/mmzone.c
mm/nommu.c
mm/oom_kill.c
mm/page-writeback.c
mm/page_alloc.c
mm/pagewalk.c
mm/readahead.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slob.c
mm/slub.c
mm/sparse.c
mm/swap.c
mm/swap_state.c
mm/swapfile.c
mm/truncate.c
mm/vmalloc.c
mm/vmscan.c
mm/vmstat.c
net/bridge/br.c
net/bridge/br_if.c
net/can/raw.c
net/compat.c
net/core/dev.c
net/ipv4/af_inet.c
net/ipv4/icmp.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ipconfig.c
net/ipv4/ipvs/ip_vs_proto.c
net/ipv4/ipvs/ip_vs_proto_ah.c
net/ipv4/ipvs/ip_vs_proto_esp.c
net/ipv4/ipvs/ip_vs_proto_tcp.c
net/ipv4/ipvs/ip_vs_proto_udp.c
net/ipv4/ipvs/ip_vs_sync.c
net/ipv4/netfilter/ip_queue.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/route.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_input.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_veno.c
net/ipv6/Kconfig
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/netfilter/ip6_queue.c
net/irda/ircomm/ircomm_tty.c
net/irda/irnet/irnet_irda.c
net/irda/irnet/irnet_irda.h
net/key/af_key.c
net/mac80211/Kconfig
net/mac80211/mesh.h
net/mac80211/mesh_hwmp.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_queue.c
net/netfilter/nfnetlink_queue.c
net/netfilter/x_tables.c
net/netfilter/xt_TCPOPTSTRIP.c
net/netfilter/xt_connbytes.c
net/netlabel/netlabel_unlabeled.c
net/netlabel/netlabel_user.c
net/netlabel/netlabel_user.h
net/netlink/af_netlink.c
net/sched/sch_sfq.c
net/sunrpc/xprt.c
net/tipc/msg.h
net/xfrm/xfrm_algo.c
net/xfrm/xfrm_output.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_state.c
net/xfrm/xfrm_user.c
samples/markers/marker-example.c
scripts/Lindent
scripts/Makefile.modpost
scripts/checkpatch.pl
scripts/gen_initramfs_list.sh
scripts/kallsyms.c
scripts/kconfig/lex.zconf.c_shipped
scripts/kconfig/lkc_proto.h
scripts/kconfig/lxdialog/inputbox.c
scripts/kconfig/menu.c
scripts/kconfig/symbol.c
scripts/kconfig/zconf.tab.c_shipped
scripts/kconfig/zconf.y
scripts/kernel-doc
scripts/mod/file2alias.c
scripts/mod/modpost.c
security/Makefile
security/capability.c
security/commoncap.c
security/device_cgroup.c [new file with mode: 0644]
security/dummy.c
security/keys/Makefile
security/keys/compat.c
security/keys/internal.h
security/keys/key.c
security/keys/keyctl.c
security/keys/keyring.c
security/keys/proc.c
security/keys/process_keys.c
security/keys/request_key.c
security/keys/request_key_auth.c
security/keys/sysctl.c [new file with mode: 0644]
security/root_plug.c
security/security.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/avc_ss.h
security/selinux/include/netlabel.h
security/selinux/include/objsec.h
security/selinux/include/security.h
security/selinux/netnode.c
security/selinux/netport.c
security/selinux/ss/conditional.h
security/selinux/ss/context.h
security/selinux/ss/hashtab.h
security/selinux/ss/mls.h
security/selinux/ss/mls_types.h
security/selinux/ss/policydb.h
security/selinux/ss/services.c
security/smack/smack_lsm.c
security/smack/smackfs.c
sound/core/info.c
sound/core/memalloc.c
sound/drivers/Kconfig
sound/drivers/mpu401/mpu401_uart.c
sound/pci/hda/patch_realtek.c
sound/pci/ice1712/ice1724.c
sound/sh/aica.c
sound/soc/Kconfig
sound/soc/Makefile
sound/soc/codecs/wm8753.c
sound/soc/codecs/wm9712.c
sound/soc/omap/Kconfig [new file with mode: 0644]
sound/soc/omap/Makefile [new file with mode: 0644]
sound/soc/omap/n810.c [new file with mode: 0644]
sound/soc/omap/omap-mcbsp.c [new file with mode: 0644]
sound/soc/omap/omap-mcbsp.h [new file with mode: 0644]
sound/soc/omap/omap-pcm.c [new file with mode: 0644]
sound/soc/omap/omap-pcm.h [new file with mode: 0644]
sound/soc/s3c24xx/ln2440sbc_alc650.c
sound/soc/s3c24xx/neo1973_wm8753.c
sound/soc/s3c24xx/s3c2443-ac97.c
sound/soc/s3c24xx/s3c24xx-pcm.c
virt/kvm/kvm_main.c
virt/kvm/kvm_trace.c [new file with mode: 0644]

index 3016ed30526d4296e219d4ea9dc3ef7e23fe9f23..090b293b87792a815cf51aeccb590ec37c90cfa3 100644 (file)
@@ -41,6 +41,7 @@ include/linux/autoconf.h
 include/linux/compile.h
 include/linux/version.h
 include/linux/utsrelease.h
+include/linux/bounds.h
 
 # stgit generated dirs
 patches-*
index ebf9bf84da0aab5ed944264a5db2a65fe3a3e883..726084286d33e626ddc2bf69db078cdc972c0e4e 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -88,6 +88,7 @@ Rudolf Marek <R.Marek@sh.cvut.cz>
 Rui Saraiva <rmps@joel.ist.utl.pt>
 Sachin P Sant <ssant@in.ibm.com>
 Sam Ravnborg <sam@mars.ravnborg.org>
+S.ÇaÄŸlar Onur <caglar@pardus.org.tr>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
diff --git a/Documentation/ABI/testing/sysfs-class-bdi b/Documentation/ABI/testing/sysfs-class-bdi
new file mode 100644 (file)
index 0000000..5ac1e01
--- /dev/null
@@ -0,0 +1,46 @@
+What:          /sys/class/bdi/<bdi>/
+Date:          January 2008
+Contact:       Peter Zijlstra <a.p.zijlstra@chello.nl>
+Description:
+
+Provide a place in sysfs for the backing_dev_info object.  This allows
+setting and retrieving various BDI specific variables.
+
+The <bdi> identifier can be either of the following:
+
+MAJOR:MINOR
+
+       Device number for block devices, or value of st_dev on
+       non-block filesystems which provide their own BDI, such as NFS
+       and FUSE.
+
+default
+
+       The default backing dev, used for non-block device backed
+       filesystems which do not provide their own BDI.
+
+Files under /sys/class/bdi/<bdi>/
+---------------------------------
+
+read_ahead_kb (read-write)
+
+       Size of the read-ahead window in kilobytes
+
+min_ratio (read-write)
+
+       Under normal circumstances each device is given a part of the
+       total write-back cache that relates to its current average
+       writeout speed in relation to the other devices.
+
+       The 'min_ratio' parameter allows assigning a minimum
+       percentage of the write-back cache to a particular device.
+       For example, this is useful for providing a minimum QoS.
+
+max_ratio (read-write)
+
+       Allows limiting a particular device to use not more than the
+       given percentage of the write-back cache.  This is useful in
+       situations where we want to avoid one device taking all or
+       most of the write-back cache.  For example in case of an NFS
+       mount that is prone to get stuck, or a FUSE mount which cannot
+       be trusted to play fair.
index b939ebb62871ec6b743b02cd78be1ca6d230baae..80d150458c80c5ac7f7f5f75b3e6ff8a602a19ca 100644 (file)
@@ -145,7 +145,7 @@ Part Ic - DMA addressing limitations
 int
 dma_supported(struct device *dev, u64 mask)
 int
-pci_dma_supported(struct device *dev, u64 mask)
+pci_dma_supported(struct pci_dev *hwdev, u64 mask)
 
 Checks to see if the device can support DMA to the memory described by
 mask.
@@ -189,7 +189,7 @@ dma_addr_t
 dma_map_single(struct device *dev, void *cpu_addr, size_t size,
                      enum dma_data_direction direction)
 dma_addr_t
-pci_map_single(struct device *dev, void *cpu_addr, size_t size,
+pci_map_single(struct pci_dev *hwdev, void *cpu_addr, size_t size,
                      int direction)
 
 Maps a piece of processor virtual memory so it can be accessed by the
@@ -395,6 +395,71 @@ Notes:  You must do this:
 
 See also dma_map_single().
 
+dma_addr_t
+dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size,
+                    enum dma_data_direction dir,
+                    struct dma_attrs *attrs)
+
+void
+dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
+                      size_t size, enum dma_data_direction dir,
+                      struct dma_attrs *attrs)
+
+int
+dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+                int nents, enum dma_data_direction dir,
+                struct dma_attrs *attrs)
+
+void
+dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
+                  int nents, enum dma_data_direction dir,
+                  struct dma_attrs *attrs)
+
+The four functions above are just like the counterpart functions
+without the _attrs suffixes, except that they pass an optional
+struct dma_attrs*.
+
+struct dma_attrs encapsulates a set of "dma attributes". For the
+definition of struct dma_attrs see linux/dma-attrs.h.
+
+The interpretation of dma attributes is architecture-specific, and
+each attribute should be documented in Documentation/DMA-attributes.txt.
+
+If struct dma_attrs* is NULL, the semantics of each of these
+functions is identical to those of the corresponding function
+without the _attrs suffix. As a result dma_map_single_attrs()
+can generally replace dma_map_single(), etc.
+
+As an example of the use of the *_attrs functions, here's how
+you could pass an attribute DMA_ATTR_FOO when mapping memory
+for DMA:
+
+#include <linux/dma-attrs.h>
+/* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and
+ * documented in Documentation/DMA-attributes.txt */
+...
+
+       DEFINE_DMA_ATTRS(attrs);
+       dma_set_attr(DMA_ATTR_FOO, &attrs);
+       ....
+       n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr);
+       ....
+
+Architectures that care about DMA_ATTR_FOO would check for its
+presence in their implementations of the mapping and unmapping
+routines, e.g.:
+
+void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr,
+                            size_t size, enum dma_data_direction dir,
+                            struct dma_attrs *attrs)
+{
+       ....
+       int foo =  dma_get_attr(DMA_ATTR_FOO, attrs);
+       ....
+       if (foo)
+               /* twizzle the frobnozzle */
+       ....
+
 
 Part II - Advanced dma_ usage
 -----------------------------
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
new file mode 100644 (file)
index 0000000..6d772f8
--- /dev/null
@@ -0,0 +1,24 @@
+                       DMA attributes
+                       ==============
+
+This document describes the semantics of the DMA attributes that are
+defined in linux/dma-attrs.h.
+
+DMA_ATTR_WRITE_BARRIER
+----------------------
+
+DMA_ATTR_WRITE_BARRIER is a (write) barrier attribute for DMA.  DMA
+to a memory region with the DMA_ATTR_WRITE_BARRIER attribute forces
+all pending DMA writes to complete, and thus provides a mechanism to
+strictly order DMA from a device across all intervening busses and
+bridges.  This barrier is not specific to a particular type of
+interconnect, it applies to the system as a whole, and so its
+implementation must account for the idiosyncracies of the system all
+the way from the DMA device to memory.
+
+As an example of a situation where DMA_ATTR_WRITE_BARRIER would be
+useful, suppose that a device does a DMA write to indicate that data is
+ready and available in memory.  The DMA of the "completion indication"
+could race with data DMA.  Mapping the memory used for completion
+indications with DMA_ATTR_WRITE_BARRIER would prevent the race.
+
index d84f89dbf921401ac0e7afbe2d291b4dc304ef97..b463ecd0c7cebf36f53104015afcbeeccf4b934d 100644 (file)
@@ -315,11 +315,11 @@ you should do:
 
        dma_addr_t dma_handle;
 
-       cpu_addr = pci_alloc_consistent(dev, size, &dma_handle);
+       cpu_addr = pci_alloc_consistent(pdev, size, &dma_handle);
 
-where dev is a struct pci_dev *. You should pass NULL for PCI like buses
-where devices don't have struct pci_dev (like ISA, EISA).  This may be
-called in interrupt context. 
+where pdev is a struct pci_dev *. This may be called in interrupt context.
+You should use dma_alloc_coherent (see DMA-API.txt) for buses
+where devices don't have struct pci_dev (like ISA, EISA).
 
 This argument is needed because the DMA translations may be bus
 specific (and often is private to the bus which the device is attached
@@ -332,7 +332,7 @@ __get_free_pages (but takes size instead of a page order).  If your
 driver needs regions sized smaller than a page, you may prefer using
 the pci_pool interface, described below.
 
-The consistent DMA mapping interfaces, for non-NULL dev, will by
+The consistent DMA mapping interfaces, for non-NULL pdev, will by
 default return a DMA address which is SAC (Single Address Cycle)
 addressable.  Even if the device indicates (via PCI dma mask) that it
 may address the upper 32-bits and thus perform DAC cycles, consistent
@@ -354,9 +354,9 @@ buffer you receive will not cross a 64K boundary.
 
 To unmap and free such a DMA region, you call:
 
-       pci_free_consistent(dev, size, cpu_addr, dma_handle);
+       pci_free_consistent(pdev, size, cpu_addr, dma_handle);
 
-where dev, size are the same as in the above call and cpu_addr and
+where pdev, size are the same as in the above call and cpu_addr and
 dma_handle are the values pci_alloc_consistent returned to you.
 This function may not be called in interrupt context.
 
@@ -371,9 +371,9 @@ Create a pci_pool like this:
 
        struct pci_pool *pool;
 
-       pool = pci_pool_create(name, dev, size, align, alloc);
+       pool = pci_pool_create(name, pdev, size, align, alloc);
 
-The "name" is for diagnostics (like a kmem_cache name); dev and size
+The "name" is for diagnostics (like a kmem_cache name); pdev and size
 are as above.  The device's hardware alignment requirement for this
 type of data is "align" (which is expressed in bytes, and must be a
 power of two).  If your device has no boundary crossing restrictions,
@@ -472,11 +472,11 @@ To map a single region, you do:
        void *addr = buffer->ptr;
        size_t size = buffer->len;
 
-       dma_handle = pci_map_single(dev, addr, size, direction);
+       dma_handle = pci_map_single(pdev, addr, size, direction);
 
 and to unmap it:
 
-       pci_unmap_single(dev, dma_handle, size, direction);
+       pci_unmap_single(pdev, dma_handle, size, direction);
 
 You should call pci_unmap_single when the DMA activity is finished, e.g.
 from the interrupt which told you that the DMA transfer is done.
@@ -493,17 +493,17 @@ Specifically:
        unsigned long offset = buffer->offset;
        size_t size = buffer->len;
 
-       dma_handle = pci_map_page(dev, page, offset, size, direction);
+       dma_handle = pci_map_page(pdev, page, offset, size, direction);
 
        ...
 
-       pci_unmap_page(dev, dma_handle, size, direction);
+       pci_unmap_page(pdev, dma_handle, size, direction);
 
 Here, "offset" means byte offset within the given page.
 
 With scatterlists, you map a region gathered from several regions by:
 
-       int i, count = pci_map_sg(dev, sglist, nents, direction);
+       int i, count = pci_map_sg(pdev, sglist, nents, direction);
        struct scatterlist *sg;
 
        for_each_sg(sglist, sg, count, i) {
@@ -527,7 +527,7 @@ accessed sg->address and sg->length as shown above.
 
 To unmap a scatterlist, just call:
 
-       pci_unmap_sg(dev, sglist, nents, direction);
+       pci_unmap_sg(pdev, sglist, nents, direction);
 
 Again, make sure DMA activity has already finished.
 
@@ -550,11 +550,11 @@ correct copy of the DMA buffer.
 So, firstly, just map it with pci_map_{single,sg}, and after each DMA
 transfer call either:
 
-       pci_dma_sync_single_for_cpu(dev, dma_handle, size, direction);
+       pci_dma_sync_single_for_cpu(pdev, dma_handle, size, direction);
 
 or:
 
-       pci_dma_sync_sg_for_cpu(dev, sglist, nents, direction);
+       pci_dma_sync_sg_for_cpu(pdev, sglist, nents, direction);
 
 as appropriate.
 
@@ -562,7 +562,7 @@ Then, if you wish to let the device get at the DMA area again,
 finish accessing the data with the cpu, and then before actually
 giving the buffer to the hardware call either:
 
-       pci_dma_sync_single_for_device(dev, dma_handle, size, direction);
+       pci_dma_sync_single_for_device(pdev, dma_handle, size, direction);
 
 or:
 
@@ -739,7 +739,7 @@ failure can be determined by:
 
        dma_addr_t dma_handle;
 
-       dma_handle = pci_map_single(dev, addr, size, direction);
+       dma_handle = pci_map_single(pdev, addr, size, direction);
        if (pci_dma_mapping_error(dma_handle)) {
                /*
                 * reduce current DMA mapping usage,
index 83966e94cc32308579b7b6f041c4299fad94fbd5..0eb0d027eb32e139f442eb30e3b92a7560112bed 100644 (file)
@@ -12,7 +12,7 @@ DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
            kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
            gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
            genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
-           mac80211.xml
+           mac80211.xml debugobjects.xml
 
 ###
 # The build process is as follows (targets):
diff --git a/Documentation/DocBook/debugobjects.tmpl b/Documentation/DocBook/debugobjects.tmpl
new file mode 100644 (file)
index 0000000..7f5f218
--- /dev/null
@@ -0,0 +1,391 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
+       "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
+
+<book id="debug-objects-guide">
+ <bookinfo>
+  <title>Debug objects life time</title>
+
+  <authorgroup>
+   <author>
+    <firstname>Thomas</firstname>
+    <surname>Gleixner</surname>
+    <affiliation>
+     <address>
+      <email>tglx@linutronix.de</email>
+     </address>
+    </affiliation>
+   </author>
+  </authorgroup>
+
+  <copyright>
+   <year>2008</year>
+   <holder>Thomas Gleixner</holder>
+  </copyright>
+
+  <legalnotice>
+   <para>
+     This documentation is free software; you can redistribute
+     it and/or modify it under the terms of the GNU General Public
+     License version 2 as published by the Free Software Foundation.
+   </para>
+
+   <para>
+     This program is distributed in the hope that it will be
+     useful, but WITHOUT ANY WARRANTY; without even the implied
+     warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+     See the GNU General Public License for more details.
+   </para>
+
+   <para>
+     You should have received a copy of the GNU General Public
+     License along with this program; if not, write to the Free
+     Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+     MA 02111-1307 USA
+   </para>
+
+   <para>
+     For more details see the file COPYING in the source
+     distribution of Linux.
+   </para>
+  </legalnotice>
+ </bookinfo>
+
+<toc></toc>
+
+  <chapter id="intro">
+    <title>Introduction</title>
+    <para>
+      debugobjects is a generic infrastructure to track the life time
+      of kernel objects and validate the operations on those.
+    </para>
+    <para>
+      debugobjects is useful to check for the following error patterns:
+       <itemizedlist>
+         <listitem><para>Activation of uninitialized objects</para></listitem>
+         <listitem><para>Initialization of active objects</para></listitem>
+         <listitem><para>Usage of freed/destroyed objects</para></listitem>
+       </itemizedlist>
+    </para>
+    <para>
+      debugobjects is not changing the data structure of the real
+      object so it can be compiled in with a minimal runtime impact
+      and enabled on demand with a kernel command line option.
+    </para>
+  </chapter>
+
+  <chapter id="howto">
+    <title>Howto use debugobjects</title>
+    <para>
+      A kernel subsystem needs to provide a data structure which
+      describes the object type and add calls into the debug code at
+      appropriate places. The data structure to describe the object
+      type needs at minimum the name of the object type. Optional
+      functions can and should be provided to fixup detected problems
+      so the kernel can continue to work and the debug information can
+      be retrieved from a live system instead of hard core debugging
+      with serial consoles and stack trace transcripts from the
+      monitor.
+    </para>
+    <para>
+      The debug calls provided by debugobjects are:
+      <itemizedlist>
+       <listitem><para>debug_object_init</para></listitem>
+       <listitem><para>debug_object_init_on_stack</para></listitem>
+       <listitem><para>debug_object_activate</para></listitem>
+       <listitem><para>debug_object_deactivate</para></listitem>
+       <listitem><para>debug_object_destroy</para></listitem>
+       <listitem><para>debug_object_free</para></listitem>
+      </itemizedlist>
+      Each of these functions takes the address of the real object and
+      a pointer to the object type specific debug description
+      structure.
+    </para>
+    <para>
+      Each detected error is reported in the statistics and a limited
+      number of errors are printk'ed including a full stack trace.
+    </para>
+    <para>
+      The statistics are available via debugfs/debug_objects/stats.
+      They provide information about the number of warnings and the
+      number of successful fixups along with information about the
+      usage of the internal tracking objects and the state of the
+      internal tracking objects pool.
+    </para>
+  </chapter>
+  <chapter id="debugfunctions">
+    <title>Debug functions</title>
+    <sect1 id="prototypes">
+      <title>Debug object function reference</title>
+!Elib/debugobjects.c
+    </sect1>
+    <sect1 id="debug_object_init">
+      <title>debug_object_init</title>
+      <para>
+       This function is called whenever the initialization function
+       of a real object is called.
+      </para>
+      <para>
+       When the real object is already tracked by debugobjects it is
+       checked, whether the object can be initialized.  Initializing
+       is not allowed for active and destroyed objects. When
+       debugobjects detects an error, then it calls the fixup_init
+       function of the object type description structure if provided
+       by the caller. The fixup function can correct the problem
+       before the real initialization of the object happens. E.g. it
+       can deactivate an active object in order to prevent damage to
+       the subsystem.
+      </para>
+      <para>
+       When the real object is not yet tracked by debugobjects,
+       debugobjects allocates a tracker object for the real object
+       and sets the tracker object state to ODEBUG_STATE_INIT. It
+       verifies that the object is not on the callers stack. If it is
+       on the callers stack then a limited number of warnings
+       including a full stack trace is printk'ed. The calling code
+       must use debug_object_init_on_stack() and remove the object
+       before leaving the function which allocated it. See next
+       section.
+      </para>
+    </sect1>
+
+    <sect1 id="debug_object_init_on_stack">
+      <title>debug_object_init_on_stack</title>
+      <para>
+       This function is called whenever the initialization function
+       of a real object which resides on the stack is called.
+      </para>
+      <para>
+       When the real object is already tracked by debugobjects it is
+       checked, whether the object can be initialized. Initializing
+       is not allowed for active and destroyed objects. When
+       debugobjects detects an error, then it calls the fixup_init
+       function of the object type description structure if provided
+       by the caller. The fixup function can correct the problem
+       before the real initialization of the object happens. E.g. it
+       can deactivate an active object in order to prevent damage to
+       the subsystem.
+      </para>
+      <para>
+       When the real object is not yet tracked by debugobjects
+       debugobjects allocates a tracker object for the real object
+       and sets the tracker object state to ODEBUG_STATE_INIT. It
+       verifies that the object is on the callers stack.
+      </para>
+      <para>
+       An object which is on the stack must be removed from the
+       tracker by calling debug_object_free() before the function
+       which allocates the object returns. Otherwise we keep track of
+       stale objects.
+      </para>
+    </sect1>
+
+    <sect1 id="debug_object_activate">
+      <title>debug_object_activate</title>
+      <para>
+       This function is called whenever the activation function of a
+       real object is called.
+      </para>
+      <para>
+       When the real object is already tracked by debugobjects it is
+       checked, whether the object can be activated.  Activating is
+       not allowed for active and destroyed objects. When
+       debugobjects detects an error, then it calls the
+       fixup_activate function of the object type description
+       structure if provided by the caller. The fixup function can
+       correct the problem before the real activation of the object
+       happens. E.g. it can deactivate an active object in order to
+       prevent damage to the subsystem.
+      </para>
+      <para>
+       When the real object is not yet tracked by debugobjects then
+       the fixup_activate function is called if available. This is
+       necessary to allow the legitimate activation of statically
+       allocated and initialized objects. The fixup function checks
+       whether the object is valid and calls the debug_objects_init()
+       function to initialize the tracking of this object.
+      </para>
+      <para>
+       When the activation is legitimate, then the state of the
+       associated tracker object is set to ODEBUG_STATE_ACTIVE.
+      </para>
+    </sect1>
+
+    <sect1 id="debug_object_deactivate">
+      <title>debug_object_deactivate</title>
+      <para>
+       This function is called whenever the deactivation function of
+       a real object is called.
+      </para>
+      <para>
+       When the real object is tracked by debugobjects it is checked,
+       whether the object can be deactivated. Deactivating is not
+       allowed for untracked or destroyed objects.
+      </para>
+      <para>
+       When the deactivation is legitimate, then the state of the
+       associated tracker object is set to ODEBUG_STATE_INACTIVE.
+      </para>
+    </sect1>
+
+    <sect1 id="debug_object_destroy">
+      <title>debug_object_destroy</title>
+      <para>
+       This function is called to mark an object destroyed. This is
+       useful to prevent the usage of invalid objects, which are
+       still available in memory: either statically allocated objects
+       or objects which are freed later.
+      </para>
+      <para>
+       When the real object is tracked by debugobjects it is checked,
+       whether the object can be destroyed. Destruction is not
+       allowed for active and destroyed objects. When debugobjects
+       detects an error, then it calls the fixup_destroy function of
+       the object type description structure if provided by the
+       caller. The fixup function can correct the problem before the
+       real destruction of the object happens. E.g. it can deactivate
+       an active object in order to prevent damage to the subsystem.
+      </para>
+      <para>
+       When the destruction is legitimate, then the state of the
+       associated tracker object is set to ODEBUG_STATE_DESTROYED.
+      </para>
+    </sect1>
+
+    <sect1 id="debug_object_free">
+      <title>debug_object_free</title>
+      <para>
+       This function is called before an object is freed.
+      </para>
+      <para>
+       When the real object is tracked by debugobjects it is checked,
+       whether the object can be freed. Free is not allowed for
+       active objects. When debugobjects detects an error, then it
+       calls the fixup_free function of the object type description
+       structure if provided by the caller. The fixup function can
+       correct the problem before the real free of the object
+       happens. E.g. it can deactivate an active object in order to
+       prevent damage to the subsystem.
+      </para>
+      <para>
+       Note that debug_object_free removes the object from the
+       tracker. Later usage of the object is detected by the other
+       debug checks.
+      </para>
+    </sect1>
+  </chapter>
+  <chapter id="fixupfunctions">
+    <title>Fixup functions</title>
+    <sect1 id="debug_obj_descr">
+      <title>Debug object type description structure</title>
+!Iinclude/linux/debugobjects.h
+    </sect1>
+    <sect1 id="fixup_init">
+      <title>fixup_init</title>
+      <para>
+       This function is called from the debug code whenever a problem
+       in debug_object_init is detected. The function takes the
+       address of the object and the state which is currently
+       recorded in the tracker.
+      </para>
+      <para>
+       Called from debug_object_init when the object state is:
+       <itemizedlist>
+         <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
+       </itemizedlist>
+      </para>
+      <para>
+       The function returns 1 when the fixup was successful,
+       otherwise 0. The return value is used to update the
+       statistics.
+      </para>
+      <para>
+       Note, that the function needs to call the debug_object_init()
+       function again, after the damage has been repaired in order to
+       keep the state consistent.
+      </para>
+    </sect1>
+
+    <sect1 id="fixup_activate">
+      <title>fixup_activate</title>
+      <para>
+       This function is called from the debug code whenever a problem
+       in debug_object_activate is detected.
+      </para>
+      <para>
+       Called from debug_object_activate when the object state is:
+       <itemizedlist>
+         <listitem><para>ODEBUG_STATE_NOTAVAILABLE</para></listitem>
+         <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
+       </itemizedlist>
+      </para>
+      <para>
+       The function returns 1 when the fixup was successful,
+       otherwise 0. The return value is used to update the
+       statistics.
+      </para>
+      <para>
+       Note that the function needs to call the debug_object_activate()
+       function again after the damage has been repaired in order to
+       keep the state consistent.
+      </para>
+      <para>
+       The activation of statically initialized objects is a special
+       case. When debug_object_activate() has no tracked object for
+       this object address then fixup_activate() is called with
+       object state ODEBUG_STATE_NOTAVAILABLE. The fixup function
+       needs to check whether this is a legitimate case of a
+       statically initialized object or not. In case it is it calls
+       debug_object_init() and debug_object_activate() to make the
+       object known to the tracker and marked active. In this case
+       the function should return 0 because this is not a real fixup.
+      </para>
+    </sect1>
+
+    <sect1 id="fixup_destroy">
+      <title>fixup_destroy</title>
+      <para>
+       This function is called from the debug code whenever a problem
+       in debug_object_destroy is detected.
+      </para>
+      <para>
+       Called from debug_object_destroy when the object state is:
+       <itemizedlist>
+         <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
+       </itemizedlist>
+      </para>
+      <para>
+       The function returns 1 when the fixup was successful,
+       otherwise 0. The return value is used to update the
+       statistics.
+      </para>
+    </sect1>
+    <sect1 id="fixup_free">
+      <title>fixup_free</title>
+      <para>
+       This function is called from the debug code whenever a problem
+       in debug_object_free is detected. Further it can be called
+       from the debug checks in kfree/vfree, when an active object is
+       detected from the debug_check_no_obj_freed() sanity checks.
+      </para>
+      <para>
+       Called from debug_object_free() or debug_check_no_obj_freed()
+       when the object state is:
+       <itemizedlist>
+         <listitem><para>ODEBUG_STATE_ACTIVE</para></listitem>
+       </itemizedlist>
+      </para>
+      <para>
+       The function returns 1 when the fixup was successful,
+       otherwise 0. The return value is used to update the
+       statistics.
+      </para>
+    </sect1>
+  </chapter>
+  <chapter id="bugs">
+    <title>Known Bugs And Assumptions</title>
+    <para>
+       None (knock on wood).
+    </para>
+  </chapter>
+</book>
index 488dd4a4945b84205f66fbdec3ba4b3cbe5af003..b7b1482f6e04ebda24e665d0734fb4786bfc197a 100644 (file)
@@ -119,7 +119,7 @@ X!Ilib/string.c
 !Elib/string.c
      </sect1>
      <sect1><title>Bit Operations</title>
-!Iinclude/asm-x86/bitops_32.h
+!Iinclude/asm-x86/bitops.h
      </sect1>
   </chapter>
 
@@ -645,4 +645,58 @@ X!Idrivers/video/console/fonts.c
 !Edrivers/i2c/i2c-core.c
   </chapter>
 
+  <chapter id="clk">
+     <title>Clock Framework</title>
+
+     <para>
+       The clock framework defines programming interfaces to support
+       software management of the system clock tree.
+       This framework is widely used with System-On-Chip (SOC) platforms
+       to support power management and various devices which may need
+       custom clock rates.
+       Note that these "clocks" don't relate to timekeeping or real
+       time clocks (RTCs), each of which have separate frameworks.
+       These <structname>struct clk</structname> instances may be used
+       to manage for example a 96 MHz signal that is used to shift bits
+       into and out of peripherals or busses, or otherwise trigger
+       synchronous state machine transitions in system hardware.
+     </para>
+
+     <para>
+       Power management is supported by explicit software clock gating:
+       unused clocks are disabled, so the system doesn't waste power
+       changing the state of transistors that aren't in active use.
+       On some systems this may be backed by hardware clock gating,
+       where clocks are gated without being disabled in software.
+       Sections of chips that are powered but not clocked may be able
+       to retain their last state.
+       This low power state is often called a <emphasis>retention
+       mode</emphasis>.
+       This mode still incurs leakage currents, especially with finer
+       circuit geometries, but for CMOS circuits power is mostly used
+       by clocked state changes.
+     </para>
+
+     <para>
+       Power-aware drivers only enable their clocks when the device
+       they manage is in active use.  Also, system sleep states often
+       differ according to which clock domains are active:  while a
+       "standby" state may allow wakeup from several active domains, a
+       "mem" (suspend-to-RAM) state may require a more wholesale shutdown
+       of clocks derived from higher speed PLLs and oscillators, limiting
+       the number of possible wakeup event sources.  A driver's suspend
+       method may need to be aware of system-specific clock constraints
+       on the target sleep state.
+     </para>
+
+     <para>
+        Some platforms support programmable clock generators.  These
+       can be used by external chips of various kinds, such as other
+       CPUs, multimedia codecs, and devices with strict requirements
+       for interface clocking.
+     </para>
+
+!Iinclude/linux/clk.h
+  </chapter>
+
 </book>
index b9e143e28c641ebec67d7e422b8ff81e97cda2a4..54eb26b573720e21a85a4181000b603de4678fcb 100644 (file)
 !Idrivers/rapidio/rio-sysfs.c
      </sect1>
      <sect1 id="PPC32_support"><title>PPC32 support</title>
-!Iarch/powerpc/kernel/rio.c
 !Earch/powerpc/sysdev/fsl_rio.c
 !Iarch/powerpc/sysdev/fsl_rio.c
      </sect1>
diff --git a/Documentation/braille-console.txt b/Documentation/braille-console.txt
new file mode 100644 (file)
index 0000000..000b0fb
--- /dev/null
@@ -0,0 +1,34 @@
+                       Linux Braille Console
+
+To get early boot messages on a braille device (before userspace screen
+readers can start), you first need to compile the support for the usual serial
+console (see serial-console.txt), and for braille device (in Device Drivers -
+Accessibility).
+
+Then you need to specify a console=brl, option on the kernel command line, the
+format is:
+
+       console=brl,serial_options...
+
+where serial_options... are the same as described in serial-console.txt
+
+So for instance you can use console=brl,ttyS0 if the braille device is connected
+to the first serial port, and console=brl,ttyS0,115200 to override the baud rate
+to 115200, etc.
+
+By default, the braille device will just show the last kernel message (console
+mode).  To review previous messages, press the Insert key to switch to the VT
+review mode.  In review mode, the arrow keys permit to browse in the VT content,
+page up/down keys go at the top/bottom of the screen, and the home key goes back
+to the cursor, hence providing very basic screen reviewing facility.
+
+Sound feedback can be obtained by adding the braille_console.sound=1 kernel
+parameter.
+
+For simplicity, only one braille console can be enabled, other uses of
+console=brl,... will be discarded.  Also note that it does not interfere with
+the console selection mecanism described in serial-console.txt
+
+For now, only the VisioBraille device is supported.
+
+Samuel Thibault <samuel.thibault@ens-lyon.org>
index 31d12e21ff8a9a780cbdeef130421d419338e310..c298a6690e0d67700b08b933a65a3eb4fd97cd96 100644 (file)
@@ -500,8 +500,7 @@ post-attachment activity that requires memory allocations or blocking.
 
 void fork(struct cgroup_subsy *ss, struct task_struct *task)
 
-Called when a task is forked into a cgroup. Also called during
-registration for all existing tasks.
+Called when a task is forked into a cgroup.
 
 void exit(struct cgroup_subsys *ss, struct task_struct *task)
 
diff --git a/Documentation/controllers/devices.txt b/Documentation/controllers/devices.txt
new file mode 100644 (file)
index 0000000..4dcea42
--- /dev/null
@@ -0,0 +1,48 @@
+Device Whitelist Controller
+
+1. Description:
+
+Implement a cgroup to track and enforce open and mknod restrictions
+on device files.  A device cgroup associates a device access
+whitelist with each cgroup.  A whitelist entry has 4 fields.
+'type' is a (all), c (char), or b (block).  'all' means it applies
+to all types and all major and minor numbers.  Major and minor are
+either an integer or * for all.  Access is a composition of r
+(read), w (write), and m (mknod).
+
+The root device cgroup starts with rwm to 'all'.  A child device
+cgroup gets a copy of the parent.  Administrators can then remove
+devices from the whitelist or add new entries.  A child cgroup can
+never receive a device access which is denied its parent.  However
+when a device access is removed from a parent it will not also be
+removed from the child(ren).
+
+2. User Interface
+
+An entry is added using devices.allow, and removed using
+devices.deny.  For instance
+
+       echo 'c 1:3 mr' > /cgroups/1/devices.allow
+
+allows cgroup 1 to read and mknod the device usually known as
+/dev/null.  Doing
+
+       echo a > /cgroups/1/devices.deny
+
+will remove the default 'a *:* mrw' entry.
+
+3. Security
+
+Any task can move itself between cgroups.  This clearly won't
+suffice, but we can decide the best way to adequately restrict
+movement as people get some experience with this.  We may just want
+to require CAP_SYS_ADMIN, which at least is a separate bit from
+CAP_MKNOD.  We may want to just refuse moving to a cgroup which
+isn't a descendent of the current one.  Or we may want to use
+CAP_MAC_ADMIN, since we really are trying to lock down root.
+
+CAP_SYS_ADMIN is needed to modify the whitelist or move another
+task to a new cgroup.  (Again we'll probably want to change that).
+
+A cgroup may not be granted more permissions than the cgroup's
+parent has.
diff --git a/Documentation/controllers/resource_counter.txt b/Documentation/controllers/resource_counter.txt
new file mode 100644 (file)
index 0000000..f196ac1
--- /dev/null
@@ -0,0 +1,181 @@
+
+               The Resource Counter
+
+The resource counter, declared at include/linux/res_counter.h,
+is supposed to facilitate the resource management by controllers
+by providing common stuff for accounting.
+
+This "stuff" includes the res_counter structure and routines
+to work with it.
+
+
+
+1. Crucial parts of the res_counter structure
+
+ a. unsigned long long usage
+
+       The usage value shows the amount of a resource that is consumed
+       by a group at a given time. The units of measurement should be
+       determined by the controller that uses this counter. E.g. it can
+       be bytes, items or any other unit the controller operates on.
+
+ b. unsigned long long max_usage
+
+       The maximal value of the usage over time.
+
+       This value is useful when gathering statistical information about
+       the particular group, as it shows the actual resource requirements
+       for a particular group, not just some usage snapshot.
+
+ c. unsigned long long limit
+
+       The maximal allowed amount of resource to consume by the group. In
+       case the group requests for more resources, so that the usage value
+       would exceed the limit, the resource allocation is rejected (see
+       the next section).
+
+ d. unsigned long long failcnt
+
+       The failcnt stands for "failures counter". This is the number of
+       resource allocation attempts that failed.
+
+ c. spinlock_t lock
+
+       Protects changes of the above values.
+
+
+
+2. Basic accounting routines
+
+ a. void res_counter_init(struct res_counter *rc)
+
+       Initializes the resource counter. As usual, should be the first
+       routine called for a new counter.
+
+ b. int res_counter_charge[_locked]
+                       (struct res_counter *rc, unsigned long val)
+
+       When a resource is about to be allocated it has to be accounted
+       with the appropriate resource counter (controller should determine
+       which one to use on its own). This operation is called "charging".
+
+       This is not very important which operation - resource allocation
+       or charging - is performed first, but
+         * if the allocation is performed first, this may create a
+           temporary resource over-usage by the time resource counter is
+           charged;
+         * if the charging is performed first, then it should be uncharged
+           on error path (if the one is called).
+
+ c. void res_counter_uncharge[_locked]
+                       (struct res_counter *rc, unsigned long val)
+
+       When a resource is released (freed) it should be de-accounted
+       from the resource counter it was accounted to.  This is called
+       "uncharging".
+
+    The _locked routines imply that the res_counter->lock is taken.
+
+
+ 2.1 Other accounting routines
+
+    There are more routines that may help you with common needs, like
+    checking whether the limit is reached or resetting the max_usage
+    value. They are all declared in include/linux/res_counter.h.
+
+
+
+3. Analyzing the resource counter registrations
+
+ a. If the failcnt value constantly grows, this means that the counter's
+    limit is too tight. Either the group is misbehaving and consumes too
+    many resources, or the configuration is not suitable for the group
+    and the limit should be increased.
+
+ b. The max_usage value can be used to quickly tune the group. One may
+    set the limits to maximal values and either load the container with
+    a common pattern or leave one for a while. After this the max_usage
+    value shows the amount of memory the container would require during
+    its common activity.
+
+    Setting the limit a bit above this value gives a pretty good
+    configuration that works in most of the cases.
+
+ c. If the max_usage is much less than the limit, but the failcnt value
+    is growing, then the group tries to allocate a big chunk of resource
+    at once.
+
+ d. If the max_usage is much less than the limit, but the failcnt value
+    is 0, then this group is given too high limit, that it does not
+    require. It is better to lower the limit a bit leaving more resource
+    for other groups.
+
+
+
+4. Communication with the control groups subsystem (cgroups)
+
+All the resource controllers that are using cgroups and resource counters
+should provide files (in the cgroup filesystem) to work with the resource
+counter fields. They are recommended to adhere to the following rules:
+
+ a. File names
+
+       Field name      File name
+       ---------------------------------------------------
+       usage           usage_in_<unit_of_measurement>
+       max_usage       max_usage_in_<unit_of_measurement>
+       limit           limit_in_<unit_of_measurement>
+       failcnt         failcnt
+       lock            no file :)
+
+ b. Reading from file should show the corresponding field value in the
+    appropriate format.
+
+ c. Writing to file
+
+       Field           Expected behavior
+       ----------------------------------
+       usage           prohibited
+       max_usage       reset to usage
+       limit           set the limit
+       failcnt         reset to zero
+
+
+
+5. Usage example
+
+ a. Declare a task group (take a look at cgroups subsystem for this) and
+    fold a res_counter into it
+
+       struct my_group {
+               struct res_counter res;
+
+               <other fields>
+       }
+
+ b. Put hooks in resource allocation/release paths
+
+       int alloc_something(...)
+       {
+               if (res_counter_charge(res_counter_ptr, amount) < 0)
+                       return -ENOMEM;
+
+               <allocate the resource and return to the caller>
+       }
+
+       void release_something(...)
+       {
+               res_counter_uncharge(res_counter_ptr, amount);
+
+               <release the resource>
+       }
+
+    In order to keep the usage value self-consistent, both the
+    "res_counter_ptr" and the "amount" in release_something() should be
+    the same as they were in the alloc_something() when the releasing
+    resource was allocated.
+
+ c. Provide the way to read res_counter values and set them (the cgroups
+    still can help with it).
+
+ c. Compile and run :)
index af3b925ece08945e6ca5ed37147022cfb45c0989..6c442d8426b5928569918ebd149f4818c5657ab1 100644 (file)
@@ -154,6 +154,11 @@ scaling_governor,          and by "echoing" the name of another
                                that some governors won't load - they only
                                work on some specific architectures or
                                processors.
+
+cpuinfo_cur_freq :             Current speed of the CPU, in KHz.
+
+scaling_available_frequencies : List of available frequencies, in KHz.
+
 scaling_min_freq and
 scaling_max_freq               show the current "policy limits" (in
                                kHz). By echoing new values into these
@@ -162,6 +167,15 @@ scaling_max_freq           show the current "policy limits" (in
                                first set scaling_max_freq, then
                                scaling_min_freq.
 
+affected_cpus :                        List of CPUs that require software coordination
+                               of frequency.
+
+related_cpus :                 List of CPUs that need some sort of frequency
+                               coordination, whether software or hardware.
+
+scaling_driver :               Hardware driver for cpufreq.
+
+scaling_cur_freq :             Current frequency of the CPU, in KHz.
 
 If you have selected the "userspace" governor which allows you to
 set the CPU operating frequency to a specific value, you can read out
index aa854b9b18cda8de6fae047540a529b55ca81405..fb7b361e6eea66c573af07d86c415ad4371dcb64 100644 (file)
@@ -171,6 +171,7 @@ files describing that cpuset:
  - memory_migrate flag: if set, move pages to cpusets nodes
  - cpu_exclusive flag: is cpu placement exclusive?
  - mem_exclusive flag: is memory placement exclusive?
+ - mem_hardwall flag:  is memory allocation hardwalled
  - memory_pressure: measure of how much paging pressure in cpuset
 
 In addition, the root cpuset only has the following file:
@@ -222,17 +223,18 @@ If a cpuset is cpu or mem exclusive, no other cpuset, other than
 a direct ancestor or descendent, may share any of the same CPUs or
 Memory Nodes.
 
-A cpuset that is mem_exclusive restricts kernel allocations for
-page, buffer and other data commonly shared by the kernel across
-multiple users.  All cpusets, whether mem_exclusive or not, restrict
-allocations of memory for user space.  This enables configuring a
-system so that several independent jobs can share common kernel data,
-such as file system pages, while isolating each jobs user allocation in
-its own cpuset.  To do this, construct a large mem_exclusive cpuset to
-hold all the jobs, and construct child, non-mem_exclusive cpusets for
-each individual job.  Only a small amount of typical kernel memory,
-such as requests from interrupt handlers, is allowed to be taken
-outside even a mem_exclusive cpuset.
+A cpuset that is mem_exclusive *or* mem_hardwall is "hardwalled",
+i.e. it restricts kernel allocations for page, buffer and other data
+commonly shared by the kernel across multiple users.  All cpusets,
+whether hardwalled or not, restrict allocations of memory for user
+space.  This enables configuring a system so that several independent
+jobs can share common kernel data, such as file system pages, while
+isolating each job's user allocation in its own cpuset.  To do this,
+construct a large mem_exclusive cpuset to hold all the jobs, and
+construct child, non-mem_exclusive cpusets for each individual job.
+Only a small amount of typical kernel memory, such as requests from
+interrupt handlers, is allowed to be taken outside even a
+mem_exclusive cpuset.
 
 
 1.5 What is memory_pressure ?
@@ -707,7 +709,7 @@ Now you want to do something with this cpuset.
 
 In this directory you can find several files:
 # ls
-cpus  cpu_exclusive  mems  mem_exclusive  tasks
+cpus  cpu_exclusive  mems  mem_exclusive mem_hardwall  tasks
 
 Reading them will give you information about the state of this cpuset:
 the CPUs and Memory Nodes it can use, the processes that are using
index 354aec047c0e93662894f5043db6aa4153cddc15..881e6dd03aea411bb9a1b0b11aa3b4fb3a90761e 100644 (file)
@@ -141,6 +141,7 @@ mkprep
 mktables
 mktree
 modpost
+modules.order
 modversions.h*
 offset.h
 offsets.h
@@ -171,6 +172,7 @@ sm_tbl*
 split-include
 tags
 tftpboot.img
+timeconst.h
 times.h*
 tkparse
 trix_boot.h
diff --git a/Documentation/fb/gxfb.txt b/Documentation/fb/gxfb.txt
new file mode 100644 (file)
index 0000000..2f64090
--- /dev/null
@@ -0,0 +1,52 @@
+[This file is cloned from VesaFB/aty128fb]
+
+What is gxfb?
+=================
+
+This is a graphics framebuffer driver for AMD Geode GX2 based processors.
+
+Advantages:
+
+ * No need to use AMD's VSA code (or other VESA emulation layer) in the
+   BIOS.
+ * It provides a nice large console (128 cols + 48 lines with 1024x768)
+   without using tiny, unreadable fonts.
+ * You can run XF68_FBDev on top of /dev/fb0
+ * Most important: boot logo :-)
+
+Disadvantages:
+
+ * graphic mode is slower than text mode...
+
+
+How to use it?
+==============
+
+Switching modes is done using  gxfb.mode_option=<resolution>... boot
+parameter or using `fbset' program.
+
+See Documentation/fb/modedb.txt for more information on modedb
+resolutions.
+
+
+X11
+===
+
+XF68_FBDev should generally work fine, but it is non-accelerated.
+
+
+Configuration
+=============
+
+You can pass kernel command line options to gxfb with gxfb.<option>.
+For example, gxfb.mode_option=800x600@75.
+Accepted options:
+
+mode_option    - specify the video mode.  Of the form
+                 <x>x<y>[-<bpp>][@<refresh>]
+vram           - size of video ram (normally auto-detected)
+vt_switch      - enable vt switching during suspend/resume.  The vt
+                 switch is slow, but harmless.
+
+--
+Andres Salomon <dilinger@debian.org>
index da5ee74219e8c55016dda1f8d1a6fb4820e5e717..27a3160650a468bed78a1483d485e1ccbf51bd6d 100644 (file)
@@ -14,6 +14,8 @@ graphics devices.  These would include:
        Intel 915GM
        Intel 945G
        Intel 945GM
+       Intel 965G
+       Intel 965GM
 
 B.  List of available options
 
diff --git a/Documentation/fb/lxfb.txt b/Documentation/fb/lxfb.txt
new file mode 100644 (file)
index 0000000..38b3ca6
--- /dev/null
@@ -0,0 +1,52 @@
+[This file is cloned from VesaFB/aty128fb]
+
+What is lxfb?
+=================
+
+This is a graphics framebuffer driver for AMD Geode LX based processors.
+
+Advantages:
+
+ * No need to use AMD's VSA code (or other VESA emulation layer) in the
+   BIOS.
+ * It provides a nice large console (128 cols + 48 lines with 1024x768)
+   without using tiny, unreadable fonts.
+ * You can run XF68_FBDev on top of /dev/fb0
+ * Most important: boot logo :-)
+
+Disadvantages:
+
+ * graphic mode is slower than text mode...
+
+
+How to use it?
+==============
+
+Switching modes is done using  lxfb.mode_option=<resolution>... boot
+parameter or using `fbset' program.
+
+See Documentation/fb/modedb.txt for more information on modedb
+resolutions.
+
+
+X11
+===
+
+XF68_FBDev should generally work fine, but it is non-accelerated.
+
+
+Configuration
+=============
+
+You can pass kernel command line options to lxfb with lxfb.<option>.
+For example, lxfb.mode_option=800x600@75.
+Accepted options:
+
+mode_option    - specify the video mode.  Of the form
+                 <x>x<y>[-<bpp>][@<refresh>]
+vram           - size of video ram (normally auto-detected)
+vt_switch      - enable vt switching during suspend/resume.  The vt
+                 switch is slow, but harmless.
+
+--
+Andres Salomon <dilinger@debian.org>
index b9a2e7b7e83879802ea0001ecaacd45b6acdcd1a..237ca412582d424de7468316d4054d9004c48384 100644 (file)
@@ -1,7 +1,7 @@
                        Metronomefb
                        -----------
 Maintained by Jaya Kumar <jayakumar.lkml.gmail.com>
-Last revised: Nov 20, 2007
+Last revised: Mar 10, 2008
 
 Metronomefb is a driver for the Metronome display controller. The controller
 is from E-Ink Corporation. It is intended to be used to drive the E-Ink
@@ -11,20 +11,18 @@ display media here http://www.e-ink.com/products/matrix/metronome.html .
 Metronome is interfaced to the host CPU through the AMLCD interface. The
 host CPU generates the control information and the image in a framebuffer
 which is then delivered to the AMLCD interface by a host specific method.
-Currently, that's implemented for the PXA's LCDC controller. The display and
-error status are each pulled through individual GPIOs.
+The display and error status are each pulled through individual GPIOs.
 
-Metronomefb was written for the PXA255/gumstix/lyre combination and
-therefore currently has board set specific code in it. If other boards based on
-other architectures are available, then the host specific code can be separated
-and abstracted out.
+Metronomefb is platform independent and depends on a board specific driver
+to do all physical IO work. Currently, an example is implemented for the
+PXA board used in the AM-200 EPD devkit. This example is am200epd.c
 
 Metronomefb requires waveform information which is delivered via the AMLCD
 interface to the metronome controller. The waveform information is expected to
 be delivered from userspace via the firmware class interface. The waveform file
 can be compressed as long as your udev or hotplug script is aware of the need
-to uncompress it before delivering it. metronomefb will ask for waveform.wbf
-which would typically go into /lib/firmware/waveform.wbf depending on your
+to uncompress it before delivering it. metronomefb will ask for metronome.wbf
+which would typically go into /lib/firmware/metronome.wbf depending on your
 udev/hotplug setup. I have only tested with a single waveform file which was
 originally labeled 23P01201_60_WT0107_MTC. I do not know what it stands for.
 Caution should be exercised when manipulating the waveform as there may be
index 4fcdb4cf4cca922c91d7ff699154c5ecae39778d..ec4dee75a35450376ad13442ea926eaf76de76aa 100644 (file)
@@ -125,8 +125,12 @@ There may be more modes.
     amifb      - Amiga chipset frame buffer
     aty128fb   - ATI Rage128 / Pro frame buffer
     atyfb      - ATI Mach64 frame buffer
+    pm2fb      - Permedia 2/2V frame buffer
+    pm3fb      - Permedia 3 frame buffer
+    sstfb      - Voodoo 1/2 (SST1) chipset frame buffer
     tdfxfb     - 3D Fx frame buffer
     tridentfb  - Trident (Cyber)blade chipset frame buffer
+    vt8623fb   - VIA 8623 frame buffer
 
 BTW, only a few drivers use this at the moment. Others are to follow
 (feel free to send patches).
index 448729fcaeb1d0b9557333d54cef7ef7f1628fe5..3c35d452b1a968439cdbb2b270bcaa75b26b762d 100644 (file)
@@ -128,15 +128,6 @@ Who:       Arjan van de Ven <arjan@linux.intel.com>
 
 ---------------------------
 
-What:  vm_ops.nopage
-When:  Soon, provided in-kernel callers have been converted
-Why:   This interface is replaced by vm_ops.fault, but it has been around
-       forever, is used by a lot of drivers, and doesn't cost much to
-       maintain.
-Who:   Nick Piggin <npiggin@suse.de>
-
----------------------------
-
 What:  PHYSDEVPATH, PHYSDEVBUS, PHYSDEVDRIVER in the uevent environment
 When:  October 2008
 Why:   The stacking of class devices makes these values misleading and
@@ -147,6 +138,24 @@ Who:       Kay Sievers <kay.sievers@suse.de>
 
 ---------------------------
 
+What:  find_task_by_pid
+When:  2.6.26
+Why:   With pid namespaces, calling this funciton will return the
+       wrong task when called from inside a namespace.
+
+       The best way to save a task pid and find a task by this
+       pid later, is to find this task's struct pid pointer (or get
+       it directly from the task) and call pid_task() later.
+
+       If someone really needs to get a task by its pid_t, then
+       he most likely needs the find_task_by_vpid() to get the
+       task from the same namespace as the current task is in, but
+       this may be not so in general.
+
+Who:   Pavel Emelyanov <xemul@openvz.org>
+
+---------------------------
+
 What:  ACPI procfs interface
 When:  July 2008
 Why:   ACPI sysfs conversion should be finished by January 2008.
index 42d4b30b10459966c9ea0db1ae111272e0af1482..c2992bc54f2f49f0f7caee130acf19205df7511a 100644 (file)
@@ -511,7 +511,6 @@ prototypes:
        void (*open)(struct vm_area_struct*);
        void (*close)(struct vm_area_struct*);
        int (*fault)(struct vm_area_struct*, struct vm_fault *);
-       struct page *(*nopage)(struct vm_area_struct*, unsigned long, int *);
        int (*page_mkwrite)(struct vm_area_struct *, struct page *);
 
 locking rules:
@@ -519,7 +518,6 @@ locking rules:
 open:          no      yes
 close:         no      yes
 fault:         no      yes
-nopage:                no      yes
 page_mkwrite:  no      yes             no
 
        ->page_mkwrite() is called when a previously read-only page is
@@ -537,4 +535,3 @@ NULL.
 
 ipc/shm.c::shm_delete() - may need BKL.
 ->read() and ->write() in many drivers are (probably) missing BKL.
-drivers/sgi/char/graphics.c::sgi_graphics_nopage() - may need BKL.
index 2a99116edc47335f6ab6f50859de0a8284750cdd..dbc3c6a3650f5c29b24000d69e1c9cb7973b4988 100644 (file)
@@ -463,11 +463,17 @@ SwapTotal:           0 kB
 SwapFree:            0 kB
 Dirty:             968 kB
 Writeback:           0 kB
+AnonPages:      861800 kB
 Mapped:         280372 kB
-Slab:           684068 kB
+Slab:           284364 kB
+SReclaimable:   159856 kB
+SUnreclaim:     124508 kB
+PageTables:      24448 kB
+NFS_Unstable:        0 kB
+Bounce:              0 kB
+WritebackTmp:        0 kB
 CommitLimit:   7669796 kB
 Committed_AS:   100056 kB
-PageTables:      24448 kB
 VmallocTotal:   112216 kB
 VmallocUsed:       428 kB
 VmallocChunk:   111088 kB
@@ -503,8 +509,17 @@ VmallocChunk:   111088 kB
               on the disk
        Dirty: Memory which is waiting to get written back to the disk
    Writeback: Memory which is actively being written back to the disk
+   AnonPages: Non-file backed pages mapped into userspace page tables
       Mapped: files which have been mmaped, such as libraries
         Slab: in-kernel data structures cache
+SReclaimable: Part of Slab, that might be reclaimed, such as caches
+  SUnreclaim: Part of Slab, that cannot be reclaimed on memory pressure
+  PageTables: amount of memory dedicated to the lowest level of page
+              tables.
+NFS_Unstable: NFS pages sent to the server, but not yet committed to stable
+             storage
+      Bounce: Memory used for block device "bounce buffers"
+WritebackTmp: Memory used by FUSE for temporary writeback buffers
  CommitLimit: Based on the overcommit ratio ('vm.overcommit_ratio'),
               this is the total amount of  memory currently available to
               be allocated on the system. This limit is only adhered to
@@ -531,8 +546,6 @@ Committed_AS: The amount of memory presently allocated on the system.
               above) will not be permitted. This is useful if one needs
               to guarantee that processes will not fail due to lack of
               memory once that memory has been successfully allocated.
-  PageTables: amount of memory dedicated to the lowest level of page
-              tables.
 VmallocTotal: total size of vmalloc memory area
  VmallocUsed: amount of vmalloc area which is used
 VmallocChunk: largest contigious block of vmalloc area which is free
index 145e44086358653e4b8b6b17302f3b7652246194..222437efd75a8231751c83a9d68195364b22ad62 100644 (file)
@@ -92,6 +92,18 @@ NodeList format is a comma-separated list of decimal numbers and ranges,
 a range being two hyphen-separated decimal numbers, the smallest and
 largest node numbers in the range.  For example, mpol=bind:0-3,5,7,9-15
 
+NUMA memory allocation policies have optional flags that can be used in
+conjunction with their modes.  These optional flags can be specified
+when tmpfs is mounted by appending them to the mode before the NodeList.
+See Documentation/vm/numa_memory_policy.txt for a list of all available
+memory allocation policy mode flags.
+
+       =static         is equivalent to        MPOL_F_STATIC_NODES
+       =relative       is equivalent to        MPOL_F_RELATIVE_NODES
+
+For example, mpol=bind=static:NodeList, is the equivalent of an
+allocation policy of MPOL_BIND | MPOL_F_STATIC_NODES.
+
 Note that trying to mount a tmpfs with an mpol option will fail if the
 running kernel does not support NUMA; and will fail if its nodelist
 specifies a node which is not online.  If your system relies on that
index fcc123ffa2523d2dc3d932ddb63c240cf9052840..2d5e1e582e13272bfaef2fbc494a156d829cc289 100644 (file)
@@ -17,6 +17,21 @@ dmask=###     -- The permission mask for the directory.
 fmask=###     -- The permission mask for files.
                  The default is the umask of current process.
 
+allow_utime=### -- This option controls the permission check of mtime/atime.
+
+                  20 - If current process is in group of file's group ID,
+                       you can change timestamp.
+                   2 - Other users can change timestamp.
+
+                 The default is set from `dmask' option. (If the directory is
+                 writable, utime(2) is also allowed. I.e. ~dmask & 022)
+
+                 Normally utime(2) checks current process is owner of
+                 the file, or it has CAP_FOWNER capability.  But FAT
+                 filesystem doesn't have uid/gid on disk, so normal
+                 check is too unflexible. With this option you can
+                 relax it.
+
 codepage=###  -- Sets the codepage number for converting to shortname
                 characters on FAT filesystem.
                 By default, FAT_DEFAULT_CODEPAGE setting is used.
index 54630095aa3c8f841c097e723eef518f660a30a7..c35ca9e40d4ca8ae0cbf7c06c639d8531ae8d9dd 100644 (file)
@@ -107,6 +107,16 @@ type of GPIO controller, and on one particular board 80-95 with an FPGA.
 The numbers need not be contiguous; either of those platforms could also
 use numbers 2000-2063 to identify GPIOs in a bank of I2C GPIO expanders.
 
+If you want to initialize a structure with an invalid GPIO number, use
+some negative number (perhaps "-EINVAL"); that will never be valid.  To
+test if a number could reference a GPIO, you may use this predicate:
+
+       int gpio_is_valid(int number);
+
+A number that's not valid will be rejected by calls which may request
+or free GPIOs (see below).  Other numbers may also be rejected; for
+example, a number might be valid but unused on a given board.
+
 Whether a platform supports multiple GPIO controllers is currently a
 platform-specific implementation issue.
 
index 1841cedc25b27d02d96d7dfac7b0763da909c8a9..bd1fa9d4468d9ba238c05c62cd562807c122e1ad 100644 (file)
@@ -33,7 +33,8 @@ Known Issues
 ------------
 
 On some systems (Asus), the BIOS is known to interfere with the driver
-and cause read errors. The driver will retry a given number of times
+and cause read errors. Or maybe the W83L785TS-S chip is simply unreliable,
+we don't really know. The driver will retry a given number of times
 (5 by default) and then give up, returning the old value (or 0 if
 there is no old value). It seems to work well enough so that you should
 not notice anything. Thanks to James Bolt for helping test this feature.
index bfb0a5520817c5bbe770d35af48c0381b5b6dc6c..ee75cbace28d529cb690e573ec4c2cb67f75bd0f 100644 (file)
@@ -164,7 +164,8 @@ I2C device drivers using this binding model work just like any other
 kind of driver in Linux:  they provide a probe() method to bind to
 those devices, and a remove() method to unbind.
 
-       static int foo_probe(struct i2c_client *client);
+       static int foo_probe(struct i2c_client *client,
+                            const struct i2c_device_id *id);
        static int foo_remove(struct i2c_client *client);
 
 Remember that the i2c_driver does not create those client handles.  The
index 2eb16100bb3fef7658799f5f7f4d259be644b537..95ad15c3b01f3256b8f06efb6a1a5f3905d02952 100644 (file)
@@ -40,8 +40,18 @@ Protocol 2.05:       (Kernel 2.6.20) Make protected mode kernel relocatable.
                Introduce relocatable_kernel and kernel_alignment fields.
 
 Protocol 2.06: (Kernel 2.6.22) Added a field that contains the size of
-               the boot command line
+               the boot command line.
 
+Protocol 2.07: (Kernel 2.6.24) Added paravirtualised boot protocol.
+               Introduced hardware_subarch and hardware_subarch_data
+               and KEEP_SEGMENTS flag in load_flags.
+
+Protocol 2.08: (Kernel 2.6.26) Added crc32 checksum and ELF format
+               payload. Introduced payload_offset and payload length
+               fields to aid in locating the payload.
+
+Protocol 2.09: (Kernel 2.6.26) Added a field of 64-bit physical
+               pointer to single linked list of struct setup_data.
 
 **** MEMORY LAYOUT
 
@@ -172,6 +182,8 @@ Offset      Proto   Name            Meaning
 0240/8 2.07+   hardware_subarch_data Subarchitecture-specific data
 0248/4 2.08+   payload_offset  Offset of kernel payload
 024C/4 2.08+   payload_length  Length of kernel payload
+0250/8 2.09+   setup_data      64-bit physical pointer to linked list
+                               of struct setup_data
 
 (1) For backwards compatibility, if the setup_sects field contains 0, the
     real value is 4.
@@ -572,6 +584,28 @@ command line is entered using the following protocol:
        covered by setup_move_size, so you may need to adjust this
        field.
 
+Field name:    setup_data
+Type:          write (obligatory)
+Offset/size:   0x250/8
+Protocol:      2.09+
+
+  The 64-bit physical pointer to NULL terminated single linked list of
+  struct setup_data. This is used to define a more extensible boot
+  parameters passing mechanism. The definition of struct setup_data is
+  as follow:
+
+  struct setup_data {
+         u64 next;
+         u32 type;
+         u32 len;
+         u8  data[0];
+  };
+
+  Where, the next is a 64-bit physical pointer to the next node of
+  linked list, the next field of the last node is 0; the type is used
+  to identify the contents of data; the len is the length of data
+  field; the data holds the real payload.
+
 
 **** MEMORY LAYOUT OF THE REAL-MODE CODE
 
diff --git a/Documentation/ia64/kvm.txt b/Documentation/ia64/kvm.txt
new file mode 100644 (file)
index 0000000..bec9d81
--- /dev/null
@@ -0,0 +1,82 @@
+Currently, kvm module in EXPERIMENTAL stage on IA64. This means that
+interfaces are not stable enough to use. So, plase had better don't run
+critical applications in virtual machine. We will try our best to make it
+strong in future versions!
+                               Guide: How to boot up guests on kvm/ia64
+
+This guide is to describe how to enable kvm support for IA-64 systems.
+
+1. Get the kvm source from git.kernel.org.
+       Userspace source:
+               git clone git://git.kernel.org/pub/scm/virt/kvm/kvm-userspace.git
+       Kernel Source:
+               git clone git://git.kernel.org/pub/scm/linux/kernel/git/xiantao/kvm-ia64.git
+
+2. Compile the source code.
+       2.1 Compile userspace code:
+               (1)cd ./kvm-userspace
+               (2)./configure
+               (3)cd kernel
+               (4)make sync LINUX= $kernel_dir (kernel_dir is the directory of kernel source.)
+               (5)cd ..
+               (6)make qemu
+               (7)cd qemu; make install
+
+       2.2 Compile kernel source code:
+               (1) cd ./$kernel_dir
+               (2) Make menuconfig
+               (3) Enter into virtualization option, and choose kvm.
+               (4) make
+               (5) Once (4) done, make modules_install
+               (6) Make initrd, and use new kernel to reboot up host machine.
+               (7) Once (6) done, cd $kernel_dir/arch/ia64/kvm
+               (8) insmod kvm.ko; insmod kvm-intel.ko
+
+Note: For step 2, please make sure that host page size == TARGET_PAGE_SIZE of qemu, otherwise, may fail.
+
+3. Get Guest Firmware named as Flash.fd, and put it under right place:
+       (1) If you have the guest firmware (binary) released by Intel Corp for Xen, use it directly.
+
+       (2) If you have no firmware at hand, Please download its source from
+               hg clone http://xenbits.xensource.com/ext/efi-vfirmware.hg
+           you can get the firmware's binary in the directory of efi-vfirmware.hg/binaries.
+
+       (3) Rename the firware you owned to Flash.fd, and copy it to /usr/local/share/qemu
+
+4. Boot up Linux or Windows guests:
+       4.1 Create or install a image for guest boot. If you have xen experience, it should be easy.
+
+       4.2 Boot up guests use the following command.
+               /usr/local/bin/qemu-system-ia64 -smp xx -m 512 -hda $your_image
+               (xx is the number of virtual processors for the guest, now the maximum value is 4)
+
+5. Known possibile issue on some platforms with old Firmware.
+
+If meet strange host crashe issues, try to solve it through either of the following ways:
+
+(1): Upgrade your Firmware to the latest one.
+
+(2): Applying the below patch to kernel source.
+diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S
+index 0b53344..f02b0f7 100644
+--- a/arch/ia64/kernel/pal.S
++++ b/arch/ia64/kernel/pal.S
+@@ -84,7 +84,8 @@ GLOBAL_ENTRY(ia64_pal_call_static)
+       mov ar.pfs = loc1
+       mov rp = loc0
+       ;;
+-      srlz.d                          // seralize restoration of psr.l
++      srlz.i                  // seralize restoration of psr.l
++      ;;
+       br.ret.sptk.many b0
+ END(ia64_pal_call_static)
+
+6. Bug report:
+       If you found any issues when use kvm/ia64, Please post the bug info to kvm-ia64-devel mailing list.
+       https://lists.sourceforge.net/lists/listinfo/kvm-ia64-devel/
+
+Thanks for your interest! Let's work together, and make kvm/ia64 stronger and stronger!
+
+
+                                                               Xiantao Zhang <xiantao.zhang@intel.com>
+                                                                                       2008.3.10
index 658f271a373fca8c57dc50a32dc04ad494250fcf..3f348a0b21d83e4f9354d90d47ca61b221543fad 100644 (file)
-/*
- * IDE ATAPI streaming tape driver.
- *
- * This driver is a part of the Linux ide driver.
- *
- * The driver, in co-operation with ide.c, basically traverses the
- * request-list for the block device interface. The character device
- * interface, on the other hand, creates new requests, adds them
- * to the request-list of the block device, and waits for their completion.
- *
- * Pipelined operation mode is now supported on both reads and writes.
- *
- * The block device major and minor numbers are determined from the
- * tape's relative position in the ide interfaces, as explained in ide.c.
- *
- * The character device interface consists of the following devices:
- *
- * ht0         major 37, minor 0       first  IDE tape, rewind on close.
- * ht1         major 37, minor 1       second IDE tape, rewind on close.
- * ...
- * nht0                major 37, minor 128     first  IDE tape, no rewind on close.
- * nht1                major 37, minor 129     second IDE tape, no rewind on close.
- * ...
- *
- * The general magnetic tape commands compatible interface, as defined by
- * include/linux/mtio.h, is accessible through the character device.
- *
- * General ide driver configuration options, such as the interrupt-unmask
- * flag, can be configured by issuing an ioctl to the block device interface,
- * as any other ide device.
- *
- * Our own ide-tape ioctl's can be issued to either the block device or
- * the character device interface.
- *
- * Maximal throughput with minimal bus load will usually be achieved in the
- * following scenario:
- *
- *     1.      ide-tape is operating in the pipelined operation mode.
- *     2.      No buffering is performed by the user backup program.
- *
- * Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive.
- *
- * Here are some words from the first releases of hd.c, which are quoted
- * in ide.c and apply here as well:
- *
- * | Special care is recommended.  Have Fun!
- *
- *
- * An overview of the pipelined operation mode.
- *
- * In the pipelined write mode, we will usually just add requests to our
- * pipeline and return immediately, before we even start to service them. The
- * user program will then have enough time to prepare the next request while
- * we are still busy servicing previous requests. In the pipelined read mode,
- * the situation is similar - we add read-ahead requests into the pipeline,
- * before the user even requested them.
- *
- * The pipeline can be viewed as a "safety net" which will be activated when
- * the system load is high and prevents the user backup program from keeping up
- * with the current tape speed. At this point, the pipeline will get
- * shorter and shorter but the tape will still be streaming at the same speed.
- * Assuming we have enough pipeline stages, the system load will hopefully
- * decrease before the pipeline is completely empty, and the backup program
- * will be able to "catch up" and refill the pipeline again.
- *
- * When using the pipelined mode, it would be best to disable any type of
- * buffering done by the user program, as ide-tape already provides all the
- * benefits in the kernel, where it can be done in a more efficient way.
- * As we will usually not block the user program on a request, the most
- * efficient user code will then be a simple read-write-read-... cycle.
- * Any additional logic will usually just slow down the backup process.
- *
- * Using the pipelined mode, I get a constant over 400 KBps throughput,
- * which seems to be the maximum throughput supported by my tape.
- *
- * However, there are some downfalls:
- *
- *     1.      We use memory (for data buffers) in proportional to the number
- *             of pipeline stages (each stage is about 26 KB with my tape).
- *     2.      In the pipelined write mode, we cheat and postpone error codes
- *             to the user task. In read mode, the actual tape position
- *             will be a bit further than the last requested block.
- *
- * Concerning (1):
- *
- *     1.      We allocate stages dynamically only when we need them. When
- *             we don't need them, we don't consume additional memory. In
- *             case we can't allocate stages, we just manage without them
- *             (at the expense of decreased throughput) so when Linux is
- *             tight in memory, we will not pose additional difficulties.
- *
- *     2.      The maximum number of stages (which is, in fact, the maximum
- *             amount of memory) which we allocate is limited by the compile
- *             time parameter IDETAPE_MAX_PIPELINE_STAGES.
- *
- *     3.      The maximum number of stages is a controlled parameter - We
- *             don't start from the user defined maximum number of stages
- *             but from the lower IDETAPE_MIN_PIPELINE_STAGES (again, we
- *             will not even allocate this amount of stages if the user
- *             program can't handle the speed). We then implement a feedback
- *             loop which checks if the pipeline is empty, and if it is, we
- *             increase the maximum number of stages as necessary until we
- *             reach the optimum value which just manages to keep the tape
- *             busy with minimum allocated memory or until we reach
- *             IDETAPE_MAX_PIPELINE_STAGES.
- *
- * Concerning (2):
- *
- *     In pipelined write mode, ide-tape can not return accurate error codes
- *     to the user program since we usually just add the request to the
- *      pipeline without waiting for it to be serviced. In case an error
- *      occurs, I will report it on the next user request.
- *
- *     In the pipelined read mode, subsequent read requests or forward
- *     filemark spacing will perform correctly, as we preserve all blocks
- *     and filemarks which we encountered during our excess read-ahead.
- *
- *     For accurate tape positioning and error reporting, disabling
- *     pipelined mode might be the best option.
- *
- * You can enable/disable/tune the pipelined operation mode by adjusting
- * the compile time parameters below.
- *
- *
- *     Possible improvements.
- *
- *     1.      Support for the ATAPI overlap protocol.
- *
- *             In order to maximize bus throughput, we currently use the DSC
- *             overlap method which enables ide.c to service requests from the
- *             other device while the tape is busy executing a command. The
- *             DSC overlap method involves polling the tape's status register
- *             for the DSC bit, and servicing the other device while the tape
- *             isn't ready.
- *
- *             In the current QIC development standard (December 1995),
- *             it is recommended that new tape drives will *in addition*
- *             implement the ATAPI overlap protocol, which is used for the
- *             same purpose - efficient use of the IDE bus, but is interrupt
- *             driven and thus has much less CPU overhead.
- *
- *             ATAPI overlap is likely to be supported in most new ATAPI
- *             devices, including new ATAPI cdroms, and thus provides us
- *             a method by which we can achieve higher throughput when
- *             sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
- */
+IDE ATAPI streaming tape driver.
+
+This driver is a part of the Linux ide driver.
+
+The driver, in co-operation with ide.c, basically traverses the
+request-list for the block device interface. The character device
+interface, on the other hand, creates new requests, adds them
+to the request-list of the block device, and waits for their completion.
+
+The block device major and minor numbers are determined from the
+tape's relative position in the ide interfaces, as explained in ide.c.
+
+The character device interface consists of the following devices:
+
+ht0            major 37, minor 0       first  IDE tape, rewind on close.
+ht1            major 37, minor 1       second IDE tape, rewind on close.
+...
+nht0           major 37, minor 128     first  IDE tape, no rewind on close.
+nht1           major 37, minor 129     second IDE tape, no rewind on close.
+...
+
+The general magnetic tape commands compatible interface, as defined by
+include/linux/mtio.h, is accessible through the character device.
+
+General ide driver configuration options, such as the interrupt-unmask
+flag, can be configured by issuing an ioctl to the block device interface,
+as any other ide device.
+
+Our own ide-tape ioctl's can be issued to either the block device or
+the character device interface.
+
+Maximal throughput with minimal bus load will usually be achieved in the
+following scenario:
+
+     1.        ide-tape is operating in the pipelined operation mode.
+     2.        No buffering is performed by the user backup program.
+
+Testing was done with a 2 GB CONNER CTMA 4000 IDE ATAPI Streaming Tape Drive.
+
+Here are some words from the first releases of hd.c, which are quoted
+in ide.c and apply here as well:
+
+| Special care is recommended.  Have Fun!
+
+Possible improvements:
+
+1. Support for the ATAPI overlap protocol.
+
+In order to maximize bus throughput, we currently use the DSC
+overlap method which enables ide.c to service requests from the
+other device while the tape is busy executing a command. The
+DSC overlap method involves polling the tape's status register
+for the DSC bit, and servicing the other device while the tape
+isn't ready.
+
+In the current QIC development standard (December 1995),
+it is recommended that new tape drives will *in addition*
+implement the ATAPI overlap protocol, which is used for the
+same purpose - efficient use of the IDE bus, but is interrupt
+driven and thus has much less CPU overhead.
+
+ATAPI overlap is likely to be supported in most new ATAPI
+devices, including new ATAPI cdroms, and thus provides us
+a method by which we can achieve higher throughput when
+sharing a (fast) ATA-2 disk with any (slow) new ATAPI device.
index 486c699f4aea4722bb822a7af7ad12053f5c321c..0c78f4b1d9d9f1df5298e9147c058a32aee90353 100644 (file)
@@ -82,27 +82,26 @@ Drives are normally found by auto-probing and/or examining the CMOS/BIOS data.
 For really weird situations, the apparent (fdisk) geometry can also be specified
 on the kernel "command line" using LILO.  The format of such lines is:
 
-       hdx=cyls,heads,sects
-or     hdx=cdrom
+       ide_core.chs=[interface_number.device_number]:cyls,heads,sects
+or     ide_core.cdrom=[interface_number.device_number]
 
-where hdx can be any of hda through hdh, Three values are required
-(cyls,heads,sects).  For example:
+For example:
 
-       hdc=1050,32,64  hdd=cdrom
+       ide_core.chs=1.0:1050,32,64  ide_core.cdrom=1.1
 
-either {hda,hdb} or {hdc,hdd}.  The results of successful auto-probing may
-override the physical geometry/irq specified, though the "original" geometry
-may be retained as the "logical" geometry for partitioning purposes (fdisk).
+The results of successful auto-probing may override the physical geometry/irq
+specified, though the "original" geometry may be retained as the "logical"
+geometry for partitioning purposes (fdisk).
 
 If the auto-probing during boot time confuses a drive (ie. the drive works
 with hd.c but not with ide.c), then an command line option may be specified
 for each drive for which you'd like the drive to skip the hardware
 probe/identification sequence.  For example:
 
-       hdb=noprobe
+       ide_core.noprobe=0.1
 or
-       hdc=768,16,32
-       hdc=noprobe
+       ide_core.chs=1.0:768,16,32
+       ide_core.noprobe=1.0
 
 Note that when only one IDE device is attached to an interface, it should be
 jumpered as "single" or "master", *not* "slave".  Many folks have had
@@ -118,9 +117,9 @@ If for some reason your cdrom drive is *not* found at boot time, you can force
 the probe to look harder by supplying a kernel command line parameter
 via LILO, such as:
 
-       hdc=cdrom       /* hdc = "master" on second interface */
+       ide_core.cdrom=1.0      /* "master" on second interface (hdc) */
 or
-       hdd=cdrom       /* hdd = "slave" on second interface */
+       ide_core.cdrom=1.1      /* "slave" on second interface (hdd) */
 
 For example, a GW2000 system might have a hard drive on the primary
 interface (/dev/hda) and an IDE cdrom drive on the secondary interface
@@ -174,9 +173,7 @@ to /etc/modprobe.conf.
 
 When ide.c is used as a module, you can pass command line parameters to the
 driver using the "options=" keyword to insmod, while replacing any ',' with
-';'.  For example:
-
-       insmod ide.o options="hda=nodma hdb=nodma"
+';'.
 
 
 ================================================================================
@@ -184,57 +181,6 @@ driver using the "options=" keyword to insmod, while replacing any ',' with
 Summary of ide driver parameters for kernel command line
 --------------------------------------------------------
 
- "hdx="  is recognized for all "x" from "a" to "u", such as "hdc".
-
- "idex=" is recognized for all "x" from "0" to "9", such as "ide1".
-
- "hdx=noprobe"         : drive may be present, but do not probe for it
-
- "hdx=none"            : drive is NOT present, ignore cmos and do not probe
-
- "hdx=nowerr"          : ignore the WRERR_STAT bit on this drive
-
- "hdx=cdrom"           : drive is present, and is a cdrom drive
-
- "hdx=cyl,head,sect"   : disk drive is present, with specified geometry
-
- "hdx=autotune"                : driver will attempt to tune interface speed
-                         to the fastest PIO mode supported,
-                         if possible for this drive only.
-                         Not fully supported by all chipset types,
-                         and quite likely to cause trouble with
-                         older/odd IDE drives.
-
- "hdx=nodma"           : disallow DMA
-
- "idebus=xx"           : inform IDE driver of VESA/PCI bus speed in MHz,
-                         where "xx" is between 20 and 66 inclusive,
-                         used when tuning chipset PIO modes.
-                         For PCI bus, 25 is correct for a P75 system,
-                         30 is correct for P90,P120,P180 systems,
-                         and 33 is used for P100,P133,P166 systems.
-                         If in doubt, use idebus=33 for PCI.
-                         As for VLB, it is safest to not specify it.
-                         Bigger values are safer than smaller ones.
-
- "idex=serialize"      : do not overlap operations on idex. Please note
-                         that you will have to specify this option for
-                         both the respective primary and secondary channel
-                         to take effect.
-
- "idex=reset"          : reset interface after probe
-
- "idex=ata66"          : informs the interface that it has an 80c cable
-                         for chipsets that are ATA-66 capable, but the
-                         ability to bit test for detection is currently
-                         unknown.
-
- "ide=doubler"         : probe/support IDE doublers on Amiga
-
-There may be more options than shown -- use the source, Luke!
-
-Everything else is rejected with a "BAD OPTION" message.
-
 For legacy IDE VLB host drivers (ali14xx/dtc2278/ht6560b/qd65xx/umc8672)
 you need to explicitly enable probing by using "probe" kernel parameter,
 i.e. to enable probing for ALI M14xx chipsets (ali14xx host driver) use:
@@ -251,6 +197,33 @@ are detected automatically).
 You also need to use "probe" kernel parameter for ide-4drives driver
 (support for IDE generic chipset with four drives on one port).
 
+To enable support for IDE doublers on Amiga use "doubler" kernel parameter
+for gayle host driver (i.e. "gayle.doubler" if the driver is built-in).
+
+To force ignoring cable detection (this should be needed only if you're using
+short 40-wires cable which cannot be automatically detected - if this is not
+a case please report it as a bug instead) use "ignore_cable" kernel parameter:
+
+* "ide_core.ignore_cable=[interface_number]" boot option if IDE is built-in
+  (i.e. "ide_core.ignore_cable=1" to force ignoring cable for "ide1")
+
+* "ignore_cable=[interface_number]" module parameter (for ide_core module)
+  if IDE is compiled as module
+
+Other kernel parameters for ide_core are:
+
+* "nodma=[interface_number.device_number]" to disallow DMA for a device
+
+* "noflush=[interface_number.device_number]" to disable flush requests
+
+* "noprobe=[interface_number.device_number]" to skip probing
+
+* "nowerr=[interface_number.device_number]" to ignore the WRERR_STAT bit
+
+* "cdrom=[interface_number.device_number]" to force device as a CD-ROM
+
+* "chs=[interface_number.device_number]" to force device as a disk (using CHS)
+
 ================================================================================
 
 Some Terminology
index c18363bd8d11b1f8b3c725c7c12639568033a115..240ce7a56c40c9edfd141468ef2f2f1cf40dfab5 100644 (file)
@@ -183,6 +183,8 @@ Code        Seq#    Include File            Comments
 0xAC   00-1F   linux/raw.h
 0xAD   00      Netfilter device        in development:
                                        <mailto:rusty@rustcorp.com.au>  
+0xAE   all     linux/kvm.h             Kernel-based Virtual Machine
+                                       <mailto:kvm-devel@lists.sourceforge.net>
 0xB0   all     RATIO devices           in development:
                                        <mailto:vgo@ratio.de>
 0xB1   00-1F   PPPoX                   <mailto:mostrows@styx.uwaterloo.ca>
index 649cb87998900e235afe6b83c5e9f10077be2085..00b950d1c1931a7b20c19c2595eb2fbf062eb424 100644 (file)
@@ -104,14 +104,15 @@ applicable everywhere (see syntax).
   Reverse dependencies can only be used with boolean or tristate
   symbols.
   Note:
-       select is evil.... select will by brute force set a symbol
-       equal to 'y' without visiting the dependencies. So abusing
-       select you are able to select a symbol FOO even if FOO depends
-       on BAR that is not set. In general use select only for
-       non-visible symbols (no prompts anywhere) and for symbols with
-       no dependencies. That will limit the usefulness but on the
-       other hand avoid the illegal configurations all over. kconfig
-       should one day warn about such things.
+       select should be used with care. select will force
+       a symbol to a value without visiting the dependencies.
+       By abusing select you are able to select a symbol FOO even
+       if FOO depends on BAR that is not set.
+       In general use select only for non-visible symbols
+       (no prompts anywhere) and for symbols with no dependencies.
+       That will limit the usefulness but on the other hand avoid
+       the illegal configurations all over.
+       kconfig should one day warn about such things.
 
 - numerical ranges: "range" <symbol> <symbol> ["if" <expr>]
   This allows to limit the range of possible input values for int
index d0ac72cc19ff8e29667e7a2c58e21d2d0cfafcb5..b8e52c0355d3bab81760adbf62d0c8a06a9ba52a 100644 (file)
@@ -245,6 +245,8 @@ The syntax is:
     crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
     range=start-[end]
 
+    'start' is inclusive and 'end' is exclusive.
+
 For example:
 
     crashkernel=512M-2G:64M,2G-:128M
@@ -253,10 +255,11 @@ This would mean:
 
     1) if the RAM is smaller than 512M, then don't reserve anything
        (this is the "rescue" case)
-    2) if the RAM size is between 512M and 2G, then reserve 64M
+    2) if the RAM size is between 512M and 2G (exclusive), then reserve 64M
     3) if the RAM size is larger than 2G, then reserve 128M
 
 
+
 Boot into System Kernel
 =======================
 
index bf6303ec0bde4784b71370457eb8510c6198ed05..a3c35446e755c7c5bedaacf47be5f6050aad2145 100644 (file)
@@ -496,6 +496,11 @@ and is between 256 and 4096 characters. It is defined in the file
                        switching to the matching ttyS device later.  The
                        options are the same as for ttyS, above.
 
+                If the device connected to the port is not a TTY but a braille
+                device, prepend "brl," before the device type, for instance
+                       console=brl,ttyS0
+               For now, only VisioBraille is supported.
+
        earlycon=       [KNL] Output early console device and options.
                uart[8250],io,<addr>[,options]
                uart[8250],mmio,<addr>[,options]
@@ -556,6 +561,8 @@ and is between 256 and 4096 characters. It is defined in the file
                        1 will print _a lot_ more information - normally
                        only useful to kernel developers.
 
+       debug_objects   [KNL] Enable object debugging
+
        decnet.addr=    [HW,NET]
                        Format: <area>[,<node>]
                        See also Documentation/networking/decnet.txt.
@@ -627,8 +634,7 @@ and is between 256 and 4096 characters. It is defined in the file
        eata=           [HW,SCSI]
 
        edd=            [EDD]
-                       Format: {"of[f]" | "sk[ipmbr]"}
-                       See comment in arch/i386/boot/edd.S
+                       Format: {"off" | "on" | "skip[mbr]"}
 
        eisa_irq_edge=  [PARISC,HW]
                        See header of drivers/parisc/eisa.c.
@@ -772,10 +778,6 @@ and is between 256 and 4096 characters. It is defined in the file
                        Format: ide=nodma or ide=doubler
                        See Documentation/ide/ide.txt.
 
-       ide?=           [HW] (E)IDE subsystem
-                       Format: ide?=ata66 or chipset specific parameters.
-                       See Documentation/ide/ide.txt.
-
        idebus=         [HW] (E)IDE subsystem - VLB/PCI bus speed
                        See Documentation/ide/ide.txt.
 
@@ -1393,6 +1395,13 @@ and is between 256 and 4096 characters. It is defined in the file
 
        nr_uarts=       [SERIAL] maximum number of UARTs to be registered.
 
+       olpc_ec_timeout= [OLPC] ms delay when issuing EC commands
+                       Rather than timing out after 20 ms if an EC
+                       command is not properly ACKed, override the length
+                       of the timeout.  We have interrupts disabled while
+                       waiting for the ACK, so if this is set too high
+                       interrupts *may* be lost!
+
        opl3=           [HW,OSS]
                        Format: <io>
 
index 266955d23ee6eb9bff2e98ad1bc6d2bc916d9369..09b55e4617404437fb2806c5cabccf58f1c3aa40 100644 (file)
@@ -11,26 +11,29 @@ request_key*():
 
        struct key *request_key(const struct key_type *type,
                                const char *description,
-                               const char *callout_string);
+                               const char *callout_info);
 
 or:
 
        struct key *request_key_with_auxdata(const struct key_type *type,
                                             const char *description,
-                                            const char *callout_string,
+                                            const char *callout_info,
+                                            size_t callout_len,
                                             void *aux);
 
 or:
 
        struct key *request_key_async(const struct key_type *type,
                                      const char *description,
-                                     const char *callout_string);
+                                     const char *callout_info,
+                                     size_t callout_len);
 
 or:
 
        struct key *request_key_async_with_auxdata(const struct key_type *type,
                                                   const char *description,
-                                                  const char *callout_string,
+                                                  const char *callout_info,
+                                                  size_t callout_len,
                                                   void *aux);
 
 Or by userspace invoking the request_key system call:
index 51652d39e61c3d0d25bba568699f249f6e1fa0f4..d5c7a57d17007fa0f43558b270b0e4c302d5f8aa 100644 (file)
@@ -170,7 +170,8 @@ The key service provides a number of features besides keys:
      amount of description and payload space that can be consumed.
 
      The user can view information on this and other statistics through procfs
-     files.
+     files.  The root user may also alter the quota limits through sysctl files
+     (see the section "New procfs files").
 
      Process-specific and thread-specific keyrings are not counted towards a
      user's quota.
@@ -329,6 +330,27 @@ about the status of the key service:
        <bytes>/<max>           Key size quota
 
 
+Four new sysctl files have been added also for the purpose of controlling the
+quota limits on keys:
+
+ (*) /proc/sys/kernel/keys/root_maxkeys
+     /proc/sys/kernel/keys/root_maxbytes
+
+     These files hold the maximum number of keys that root may have and the
+     maximum total number of bytes of data that root may have stored in those
+     keys.
+
+ (*) /proc/sys/kernel/keys/maxkeys
+     /proc/sys/kernel/keys/maxbytes
+
+     These files hold the maximum number of keys that each non-root user may
+     have and the maximum total number of bytes of data that each of those
+     users may have stored in their keys.
+
+Root may alter these by writing each new limit as a decimal number string to
+the appropriate file.
+
+
 ===============================
 USERSPACE SYSTEM CALL INTERFACE
 ===============================
@@ -711,6 +733,27 @@ The keyctl syscall functions are:
      The assumed authoritative key is inherited across fork and exec.
 
 
+ (*) Get the LSM security context attached to a key.
+
+       long keyctl(KEYCTL_GET_SECURITY, key_serial_t key, char *buffer,
+                   size_t buflen)
+
+     This function returns a string that represents the LSM security context
+     attached to a key in the buffer provided.
+
+     Unless there's an error, it always returns the amount of data it could
+     produce, even if that's too big for the buffer, but it won't copy more
+     than requested to userspace. If the buffer pointer is NULL then no copy
+     will take place.
+
+     A NUL character is included at the end of the string if the buffer is
+     sufficiently big.  This is included in the returned count.  If no LSM is
+     in force then an empty string will be returned.
+
+     A process must have view permission on the key for this function to be
+     successful.
+
+
 ===============
 KERNEL SERVICES
 ===============
@@ -771,7 +814,7 @@ payload contents" for more information.
 
        struct key *request_key(const struct key_type *type,
                                const char *description,
-                               const char *callout_string);
+                               const char *callout_info);
 
     This is used to request a key or keyring with a description that matches
     the description specified according to the key type's match function. This
@@ -793,24 +836,28 @@ payload contents" for more information.
 
        struct key *request_key_with_auxdata(const struct key_type *type,
                                             const char *description,
-                                            const char *callout_string,
+                                            const void *callout_info,
+                                            size_t callout_len,
                                             void *aux);
 
     This is identical to request_key(), except that the auxiliary data is
-    passed to the key_type->request_key() op if it exists.
+    passed to the key_type->request_key() op if it exists, and the callout_info
+    is a blob of length callout_len, if given (the length may be 0).
 
 
 (*) A key can be requested asynchronously by calling one of:
 
        struct key *request_key_async(const struct key_type *type,
                                      const char *description,
-                                     const char *callout_string);
+                                     const void *callout_info,
+                                     size_t callout_len);
 
     or:
 
        struct key *request_key_async_with_auxdata(const struct key_type *type,
                                                   const char *description,
-                                                  const char *callout_string,
+                                                  const char *callout_info,
+                                                  size_t callout_len,
                                                   void *aux);
 
     which are asynchronous equivalents of request_key() and
index be89f393274fbd086a4f14b4e56bb82c0a29c76d..6877e71871132b92d60a603fe444f93af53f0aa1 100644 (file)
@@ -37,6 +37,11 @@ registration function such as register_kprobe() specifies where
 the probe is to be inserted and what handler is to be called when
 the probe is hit.
 
+There are also register_/unregister_*probes() functions for batch
+registration/unregistration of a group of *probes. These functions
+can speed up unregistration process when you have to unregister
+a lot of probes at once.
+
 The next three subsections explain how the different types of
 probes work.  They explain certain things that you'll need to
 know in order to make the best use of Kprobes -- e.g., the
@@ -190,10 +195,11 @@ code mapping.
 4. API Reference
 
 The Kprobes API includes a "register" function and an "unregister"
-function for each type of probe.  Here are terse, mini-man-page
-specifications for these functions and the associated probe handlers
-that you'll write.  See the files in the samples/kprobes/ sub-directory
-for examples.
+function for each type of probe. The API also includes "register_*probes"
+and "unregister_*probes" functions for (un)registering arrays of probes.
+Here are terse, mini-man-page specifications for these functions and
+the associated probe handlers that you'll write. See the files in the
+samples/kprobes/ sub-directory for examples.
 
 4.1 register_kprobe
 
@@ -319,6 +325,43 @@ void unregister_kretprobe(struct kretprobe *rp);
 Removes the specified probe.  The unregister function can be called
 at any time after the probe has been registered.
 
+NOTE:
+If the functions find an incorrect probe (ex. an unregistered probe),
+they clear the addr field of the probe.
+
+4.5 register_*probes
+
+#include <linux/kprobes.h>
+int register_kprobes(struct kprobe **kps, int num);
+int register_kretprobes(struct kretprobe **rps, int num);
+int register_jprobes(struct jprobe **jps, int num);
+
+Registers each of the num probes in the specified array.  If any
+error occurs during registration, all probes in the array, up to
+the bad probe, are safely unregistered before the register_*probes
+function returns.
+- kps/rps/jps: an array of pointers to *probe data structures
+- num: the number of the array entries.
+
+NOTE:
+You have to allocate(or define) an array of pointers and set all
+of the array entries before using these functions.
+
+4.6 unregister_*probes
+
+#include <linux/kprobes.h>
+void unregister_kprobes(struct kprobe **kps, int num);
+void unregister_kretprobes(struct kretprobe **rps, int num);
+void unregister_jprobes(struct jprobe **jps, int num);
+
+Removes each of the num probes in the specified array at once.
+
+NOTE:
+If the functions find some incorrect probes (ex. unregistered
+probes) in the specified array, they clear the addr field of those
+incorrect probes. However, other probes in the array are
+unregistered correctly.
+
 5. Kprobes Features and Limitations
 
 Kprobes allows multiple probes at the same address.  Currently,
index 76cb428435daf4c1dfe9d611fd6a155483e447a2..01c6c3d8a7e3fe61ee007182b9c45841e6857973 100644 (file)
@@ -1,7 +1,7 @@
                     ThinkPad ACPI Extras Driver
 
-                            Version 0.19
-                         January 06th, 2008
+                            Version 0.20
+                          April 09th, 2008
 
                Borislav Deianov <borislav@users.sf.net>
              Henrique de Moraes Holschuh <hmh@hmh.eng.br>
@@ -18,6 +18,11 @@ This driver used to be named ibm-acpi until kernel 2.6.21 and release
 moved to the drivers/misc tree and renamed to thinkpad-acpi for kernel
 2.6.22, and release 0.14.
 
+The driver is named "thinkpad-acpi".  In some places, like module
+names, "thinkpad_acpi" is used because of userspace issues.
+
+"tpacpi" is used as a shorthand where "thinkpad-acpi" would be too
+long due to length limitations on some Linux kernel versions.
 
 Status
 ------
@@ -571,6 +576,47 @@ netlink interface and the input layer interface, and don't bother at all
 with hotkey_report_mode.
 
 
+Brightness hotkey notes:
+
+These are the current sane choices for brightness key mapping in
+thinkpad-acpi:
+
+For IBM and Lenovo models *without* ACPI backlight control (the ones on
+which thinkpad-acpi will autoload its backlight interface by default,
+and on which ACPI video does not export a backlight interface):
+
+1. Don't enable or map the brightness hotkeys in thinkpad-acpi, as
+   these older firmware versions unfortunately won't respect the hotkey
+   mask for brightness keys anyway, and always reacts to them.  This
+   usually work fine, unless X.org drivers are doing something to block
+   the BIOS.  In that case, use (3) below.  This is the default mode of
+   operation.
+
+2. Enable the hotkeys, but map them to something else that is NOT
+   KEY_BRIGHTNESS_UP/DOWN or any other keycode that would cause
+   userspace to try to change the backlight level, and use that as an
+   on-screen-display hint.
+
+3. IF AND ONLY IF X.org drivers find a way to block the firmware from
+   automatically changing the brightness, enable the hotkeys and map
+   them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN, and feed that to
+   something that calls xbacklight.  thinkpad-acpi will not be able to
+   change brightness in that case either, so you should disable its
+   backlight interface.
+
+For Lenovo models *with* ACPI backlight control:
+
+1. Load up ACPI video and use that.  ACPI video will report ACPI
+   events for brightness change keys.  Do not mess with thinkpad-acpi
+   defaults in this case.  thinkpad-acpi should not have anything to do
+   with backlight events in a scenario where ACPI video is loaded:
+   brightness hotkeys must be disabled, and the backlight interface is
+   to be kept disabled as well.  This is the default mode of operation.
+
+2. Do *NOT* load up ACPI video, enable the hotkeys in thinkpad-acpi,
+   and map them to KEY_BRIGHTNESS_UP and KEY_BRIGHTNESS_DOWN.  Process
+   these keys on userspace somehow (e.g. by calling xbacklight).
+
 Bluetooth
 ---------
 
@@ -647,16 +693,31 @@ while others are still having problems. For more information:
 
 https://bugs.freedesktop.org/show_bug.cgi?id=2000
 
-ThinkLight control -- /proc/acpi/ibm/light
-------------------------------------------
+ThinkLight control
+------------------
+
+procfs: /proc/acpi/ibm/light
+sysfs attributes: as per LED class, for the "tpacpi::thinklight" LED
 
-The current status of the ThinkLight can be found in this file. A few
-models which do not make the status available will show it as
-"unknown". The available commands are:
+procfs notes:
+
+The ThinkLight status can be read and set through the procfs interface.  A
+few models which do not make the status available will show the ThinkLight
+status as "unknown". The available commands are:
 
        echo on  > /proc/acpi/ibm/light
        echo off > /proc/acpi/ibm/light
 
+sysfs notes:
+
+The ThinkLight sysfs interface is documented by the LED class
+documentation, in Documentation/leds-class.txt.  The ThinkLight LED name
+is "tpacpi::thinklight".
+
+Due to limitations in the sysfs LED class, if the status of the thinklight
+cannot be read or if it is unknown, thinkpad-acpi will report it as "off".
+It is impossible to know if the status returned through sysfs is valid.
+
 Docking / undocking -- /proc/acpi/ibm/dock
 ------------------------------------------
 
@@ -815,28 +876,63 @@ The cmos command interface is prone to firmware split-brain problems, as
 in newer ThinkPads it is just a compatibility layer.  Do not use it, it is
 exported just as a debug tool.
 
-LED control -- /proc/acpi/ibm/led
----------------------------------
+LED control
+-----------
+
+procfs: /proc/acpi/ibm/led
+sysfs attributes: as per LED class, see below for names
+
+Some of the LED indicators can be controlled through this feature.  On
+some older ThinkPad models, it is possible to query the status of the
+LED indicators as well.  Newer ThinkPads cannot query the real status
+of the LED indicators.
 
-Some of the LED indicators can be controlled through this feature. The
-available commands are:
+procfs notes:
+
+The available commands are:
 
-       echo '<led number> on' >/proc/acpi/ibm/led
-       echo '<led number> off' >/proc/acpi/ibm/led
-       echo '<led number> blink' >/proc/acpi/ibm/led
+       echo '<LED number> on' >/proc/acpi/ibm/led
+       echo '<LED number> off' >/proc/acpi/ibm/led
+       echo '<LED number> blink' >/proc/acpi/ibm/led
 
-The <led number> range is 0 to 7. The set of LEDs that can be
-controlled varies from model to model. Here is the mapping on the X40:
+The <LED number> range is 0 to 7. The set of LEDs that can be
+controlled varies from model to model. Here is the common ThinkPad
+mapping:
 
        0 - power
        1 - battery (orange)
        2 - battery (green)
-       3 - UltraBase
+       3 - UltraBase/dock
        4 - UltraBay
+       5 - UltraBase battery slot
+       6 - (unknown)
        7 - standby
 
 All of the above can be turned on and off and can be made to blink.
 
+sysfs notes:
+
+The ThinkPad LED sysfs interface is described in detail by the LED class
+documentation, in Documentation/leds-class.txt.
+
+The leds are named (in LED ID order, from 0 to 7):
+"tpacpi::power", "tpacpi:orange:batt", "tpacpi:green:batt",
+"tpacpi::dock_active", "tpacpi::bay_active", "tpacpi::dock_batt",
+"tpacpi::unknown_led", "tpacpi::standby".
+
+Due to limitations in the sysfs LED class, if the status of the LED
+indicators cannot be read due to an error, thinkpad-acpi will report it as
+a brightness of zero (same as LED off).
+
+If the thinkpad firmware doesn't support reading the current status,
+trying to read the current LED brightness will just return whatever
+brightness was last written to that attribute.
+
+These LEDs can blink using hardware acceleration.  To request that a
+ThinkPad indicator LED should blink in hardware accelerated mode, use the
+"timer" trigger, and leave the delay_on and delay_off parameters set to
+zero (to request hardware acceleration autodetection).
+
 ACPI sounds -- /proc/acpi/ibm/beep
 ----------------------------------
 
@@ -1090,6 +1186,15 @@ it there will be the following attributes:
                dim the display.
 
 
+WARNING:
+
+    Whatever you do, do NOT ever call thinkpad-acpi backlight-level change
+    interface and the ACPI-based backlight level change interface
+    (available on newer BIOSes, and driven by the Linux ACPI video driver)
+    at the same time.  The two will interact in bad ways, do funny things,
+    and maybe reduce the life of the backlight lamps by needlessly kicking
+    its level up and down at every change.
+
 Volume control -- /proc/acpi/ibm/volume
 ---------------------------------------
 
index 396cdd982c26505ee39a577a64a7fb2c7472985e..a8b430627473aa243995ab6f6e173b9cf1ff819e 100644 (file)
@@ -450,3 +450,9 @@ These currently include
       there are upper and lower limits (32768, 16).  Default is 128.
   strip_cache_active (currently raid5 only)
       number of active entries in the stripe cache
+  preread_bypass_threshold (currently raid5 only)
+      number of times a stripe requiring preread will be bypassed by
+      a stripe that does not require preread.  For fairness defaults
+      to 1.  Setting this to 0 disables bypass accounting and
+      requires preread stripes to wait until all full-width stripe-
+      writes are complete.  Valid values are 0 to stripe_cache_size.
index 5c8334123f4f7b6f29d41c6129ecd511a7e03d10..25a6ed1aaa5b1e3e7aa2e774a7beab34f78001f9 100644 (file)
@@ -46,8 +46,6 @@ Two files are introduced:
 
   a) 'include/asm-mips/mach-au1x00/au1xxx_ide.h'
      containes : struct _auide_hwif
-                 struct drive_list_entry dma_white_list
-                 struct drive_list_entry dma_black_list
                  timing parameters for PIO mode 0/1/2/3/4
                  timing parameters for MWDMA 0/1/2
 
@@ -63,12 +61,6 @@ Four configs variables are introduced:
   CONFIG_BLK_DEV_IDE_AU1XXX_SEQTS_PER_RQ - maximum transfer size
                                            per descriptor
 
-If MWDMA is enabled and the connected hard disc is not on the white list, the
-kernel switches to a "safe mwdma mode" at boot time. In this mode the IDE
-performance is substantial slower then in full speed mwdma. In this case
-please add your hard disc to the white list (follow instruction from 'ADD NEW
-HARD DISC TO WHITE OR BLACK LIST' section).
-
 
 SUPPORTED IDE MODES
 -------------------
@@ -120,44 +112,6 @@ CONFIG_IDEDMA_AUTO=y
 Also undefine 'IDE_AU1XXX_BURSTMODE' in 'drivers/ide/mips/au1xxx-ide.c' to
 disable the burst support on DBDMA controller.
 
-ADD NEW HARD DISC TO WHITE OR BLACK LIST
-----------------------------------------
-
-Step 1 : detect the model name of your hard disc
-
-  a) connect your hard disc to the AU1XXX
-
-  b) boot your kernel and get the hard disc model.
-
-     Example boot log:
-
-     --snipped--
-     Uniform Multi-Platform E-IDE driver Revision: 7.00alpha2
-     ide: Assuming 50MHz system bus speed for PIO modes; override with idebus=xx
-     Au1xxx IDE(builtin) configured for MWDMA2
-     Probing IDE interface ide0...
-     hda: Maxtor 6E040L0, ATA DISK drive
-     ide0 at 0xac800000-0xac800007,0xac8001c0 on irq 64
-     hda: max request size: 64KiB
-     hda: 80293248 sectors (41110 MB) w/2048KiB Cache, CHS=65535/16/63, (U)DMA
-     --snipped--
-
-     In this example 'Maxtor 6E040L0'.
-
-Step  2 : edit 'include/asm-mips/mach-au1x00/au1xxx_ide.h'
-
-  Add your hard disc to the dma_white_list or dma_black_list structur.
-
-Step 3 : Recompile the kernel
-
-  Enable MWDMA support in the kernel configuration. Recompile the kernel and
-  reboot.
-
-Step 4 : Tests
-
-  If you have add a hard disc to the white list, please run some stress tests
-  for verification.
-
 
 ACKNOWLEDGMENTS
 ---------------
index 7f60dfe642caf9a65421f49b3cf9c342b79705dd..b152e81da5923e5150de6ba6a67d6ec37219ca43 100644 (file)
@@ -253,6 +253,10 @@ characters, each representing a particular tainted value.
 
   8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG.
 
+  9: 'A' if the ACPI table has been overridden.
+
+ 10: 'W' if a warning has previously been issued by the kernel.
+
 The primary reason for the 'Tainted: ' string is to tell kernel
 debuggers if this is a clean kernel or if anything unusual has
 occurred.  Tainting is permanent: even if an offending module is
index cf89e8cfd5bfe1ce59632dad01f374a78c9d6c88..1d2a772506cfa46e23acda9c3660369b72e1e85f 100644 (file)
@@ -2836,6 +2836,39 @@ platforms are moved over to use the flattened-device-tree model.
                   big-endian;
           };
 
+    r) Freescale Display Interface Unit
+
+    The Freescale DIU is a LCD controller, with proper hardware, it can also
+    drive DVI monitors.
+
+    Required properties:
+    - compatible : should be "fsl-diu".
+    - reg : should contain at least address and length of the DIU register
+      set.
+    - Interrupts : one DIU interrupt should be describe here.
+
+    Example (MPC8610HPCD)
+       display@2c000 {
+               compatible = "fsl,diu";
+               reg = <0x2c000 100>;
+               interrupts = <72 2>;
+               interrupt-parent = <&mpic>;
+       };
+
+    s) Freescale on board FPGA
+
+    This is the memory-mapped registers for on board FPGA.
+
+    Required properities:
+    - compatible : should be "fsl,fpga-pixis".
+    - reg : should contain the address and the lenght of the FPPGA register
+      set.
+
+    Example (MPC8610HPCD)
+       board-control@e8000000 {
+               compatible = "fsl,fpga-pixis";
+               reg = <0xe8000000 32>;
+       };
 
 VII - Marvell Discovery mv64[345]6x System Controller chips
 ===========================================================
diff --git a/Documentation/powerpc/kvm_440.txt b/Documentation/powerpc/kvm_440.txt
new file mode 100644 (file)
index 0000000..c02a003
--- /dev/null
@@ -0,0 +1,41 @@
+Hollis Blanchard <hollisb@us.ibm.com>
+15 Apr 2008
+
+Various notes on the implementation of KVM for PowerPC 440:
+
+To enforce isolation, host userspace, guest kernel, and guest userspace all
+run at user privilege level. Only the host kernel runs in supervisor mode.
+Executing privileged instructions in the guest traps into KVM (in the host
+kernel), where we decode and emulate them. Through this technique, unmodified
+440 Linux kernels can be run (slowly) as guests. Future performance work will
+focus on reducing the overhead and frequency of these traps.
+
+The usual code flow is started from userspace invoking an "run" ioctl, which
+causes KVM to switch into guest context. We use IVPR to hijack the host
+interrupt vectors while running the guest, which allows us to direct all
+interrupts to kvmppc_handle_interrupt(). At this point, we could either
+- handle the interrupt completely (e.g. emulate "mtspr SPRG0"), or
+- let the host interrupt handler run (e.g. when the decrementer fires), or
+- return to host userspace (e.g. when the guest performs device MMIO)
+
+Address spaces: We take advantage of the fact that Linux doesn't use the AS=1
+address space (in host or guest), which gives us virtual address space to use
+for guest mappings. While the guest is running, the host kernel remains mapped
+in AS=0, but the guest can only use AS=1 mappings.
+
+TLB entries: The TLB entries covering the host linear mapping remain
+present while running the guest. This reduces the overhead of lightweight
+exits, which are handled by KVM running in the host kernel. We keep three
+copies of the TLB:
+ - guest TLB: contents of the TLB as the guest sees it
+ - shadow TLB: the TLB that is actually in hardware while guest is running
+ - host TLB: to restore TLB state when context switching guest -> host
+When a TLB miss occurs because a mapping was not present in the shadow TLB,
+but was present in the guest TLB, KVM handles the fault without invoking the
+guest. Large guest pages are backed by multiple 4KB shadow pages through this
+mechanism.
+
+IO: MMIO and DCR accesses are emulated by userspace. We use virtio for network
+and block IO, so those drivers must be enabled in the guest. It's possible
+that some qemu device emulation (e.g. e1000 or rtl8139) may also work with
+little effort.
index 5e03610e186f986c5cc3a21a5e4f9bbac3ee4332..cda7a7dffa6d6376fb0b0e83f4e096a9c448ffed 100644 (file)
@@ -186,6 +186,12 @@ Recommended soc5200 child nodes; populate as needed for your board
 name           device_type     compatible        Description
 ----           -----------     ----------        -----------
 gpt@<addr>     gpt             fsl,mpc5200-gpt   General purpose timers
+gpt@<addr>     gpt             fsl,mpc5200-gpt-gpio    General purpose
+                                                       timers in GPIO mode
+gpio@<addr>                    fsl,mpc5200-gpio        MPC5200 simple gpio
+                                                       controller
+gpio@<addr>                    fsl,mpc5200-gpio-wkup   MPC5200 wakeup gpio
+                                                       controller
 rtc@<addr>     rtc             mpc5200-rtc       Real time clock
 mscan@<addr>   mscan           mpc5200-mscan     CAN bus controller
 pci@<addr>     pci             mpc5200-pci       PCI bridge
@@ -225,6 +231,12 @@ PSC in i2s mode:  The mpc5200 and mpc5200b PSCs are not compatible when in
 i2s mode.  An 'mpc5200b-psc-i2s' node cannot include 'mpc5200-psc-i2s' in the
 compatible field.
 
+7) GPIO controller nodes
+Each GPIO controller node should have the empty property gpio-controller and
+#gpio-cells set to 2. First cell is the GPIO number which is interpreted
+according to the bit numbers in the GPIO control registers. The second cell
+is for flags which is currently unsused.
+
 IV - Extra Notes
 ================
 
diff --git a/Documentation/s390/kvm.txt b/Documentation/s390/kvm.txt
new file mode 100644 (file)
index 0000000..6f5ceb0
--- /dev/null
@@ -0,0 +1,125 @@
+*** BIG FAT WARNING ***
+The kvm module is currently in EXPERIMENTAL state for s390. This means that
+the interface to the module is not yet considered to remain stable. Thus, be
+prepared that we keep breaking your userspace application and guest
+compatibility over and over again until we feel happy with the result. Make sure
+your guest kernel, your host kernel, and your userspace launcher are in a
+consistent state.
+
+This Documentation describes the unique ioctl calls to /dev/kvm, the resulting
+kvm-vm file descriptors, and the kvm-vcpu file descriptors that differ from x86.
+
+1. ioctl calls to /dev/kvm
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_GET_API_VERSION
+KVM_CREATE_VM          (*) see note
+KVM_CHECK_EXTENSION
+KVM_GET_VCPU_MMAP_SIZE
+
+Notes:
+* KVM_CREATE_VM may fail on s390, if the calling process has multiple
+threads and has not called KVM_S390_ENABLE_SIE before.
+
+In addition, on s390 the following architecture specific ioctls are supported:
+ioctl:         KVM_S390_ENABLE_SIE
+args:          none
+see also:      include/linux/kvm.h
+This call causes the kernel to switch on PGSTE in the user page table. This
+operation is needed in order to run a virtual machine, and it requires the
+calling process to be single-threaded. Note that the first call to KVM_CREATE_VM
+will implicitly try to switch on PGSTE if the user process has not called
+KVM_S390_ENABLE_SIE before. User processes that want to launch multiple threads
+before creating a virtual machine have to call KVM_S390_ENABLE_SIE, or will
+observe an error calling KVM_CREATE_VM. Switching on PGSTE is a one-time
+operation, is not reversible, and will persist over the entire lifetime of
+the calling process. It does not have any user-visible effect other than a small
+performance penalty.
+
+2. ioctl calls to the kvm-vm file descriptor
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_CREATE_VCPU
+KVM_SET_USER_MEMORY_REGION      (*) see note
+KVM_GET_DIRTY_LOG              (**) see note
+
+Notes:
+*  kvm does only allow exactly one memory slot on s390, which has to start
+   at guest absolute address zero and at a user address that is aligned on any
+   page boundary. This hardware "limitation" allows us to have a few unique
+   optimizations. The memory slot doesn't have to be filled
+   with memory actually, it may contain sparse holes. That said, with different
+   user memory layout this does still allow a large flexibility when
+   doing the guest memory setup.
+** KVM_GET_DIRTY_LOG doesn't work properly yet. The user will receive an empty
+log. This ioctl call is only needed for guest migration, and we intend to
+implement this one in the future.
+
+In addition, on s390 the following architecture specific ioctls for the kvm-vm
+file descriptor are supported:
+ioctl:         KVM_S390_INTERRUPT
+args:          struct kvm_s390_interrupt *
+see also:      include/linux/kvm.h
+This ioctl is used to submit a floating interrupt for a virtual machine.
+Floating interrupts may be delivered to any virtual cpu in the configuration.
+Only some interrupt types defined in include/linux/kvm.h make sense when
+submitted as floating interrupts. The following interrupts are not considered
+to be useful as floating interrupts, and a call to inject them will result in
+-EINVAL error code: program interrupts and interprocessor signals. Valid
+floating interrupts are:
+KVM_S390_INT_VIRTIO
+KVM_S390_INT_SERVICE
+
+3. ioctl calls to the kvm-vcpu file descriptor
+KVM does support the following ioctls on s390 that are common with other
+architectures and do behave the same:
+KVM_RUN
+KVM_GET_REGS
+KVM_SET_REGS
+KVM_GET_SREGS
+KVM_SET_SREGS
+KVM_GET_FPU
+KVM_SET_FPU
+
+In addition, on s390 the following architecture specific ioctls for the
+kvm-vcpu file descriptor are supported:
+ioctl:         KVM_S390_INTERRUPT
+args:          struct kvm_s390_interrupt *
+see also:      include/linux/kvm.h
+This ioctl is used to submit an interrupt for a specific virtual cpu.
+Only some interrupt types defined in include/linux/kvm.h make sense when
+submitted for a specific cpu. The following interrupts are not considered
+to be useful, and a call to inject them will result in -EINVAL error code:
+service processor calls and virtio interrupts. Valid interrupt types are:
+KVM_S390_PROGRAM_INT
+KVM_S390_SIGP_STOP
+KVM_S390_RESTART
+KVM_S390_SIGP_SET_PREFIX
+KVM_S390_INT_EMERGENCY
+
+ioctl:         KVM_S390_STORE_STATUS
+args:          unsigned long
+see also:      include/linux/kvm.h
+This ioctl stores the state of the cpu at the guest real address given as
+argument, unless one of the following values defined in include/linux/kvm.h
+is given as arguement:
+KVM_S390_STORE_STATUS_NOADDR - the CPU stores its status to the save area in
+absolute lowcore as defined by the principles of operation
+KVM_S390_STORE_STATUS_PREFIXED - the CPU stores its status to the save area in
+its prefix page just like the dump tool that comes with zipl. This is useful
+to create a system dump for use with lkcdutils or crash.
+
+ioctl:         KVM_S390_SET_INITIAL_PSW
+args:          struct kvm_s390_psw *
+see also:      include/linux/kvm.h
+This ioctl can be used to set the processor status word (psw) of a stopped cpu
+prior to running it with KVM_RUN. Note that this call is not required to modify
+the psw during sie intercepts that fall back to userspace because struct kvm_run
+does contain the psw, and this value is evaluated during reentry of KVM_RUN
+after the intercept exit was recognized.
+
+ioctl:         KVM_S390_INITIAL_RESET
+args:          none
+see also:      include/linux/kvm.h
+This ioctl can be used to perform an initial cpu reset as defined by the
+principles of operation. The target cpu has to be in stopped state.
index fd4c32a031c9f5e5381b440379ea70ca9e85ff7e..0bbee38acd263f6e88605ac71e3bd12c8ca3bf70 100644 (file)
@@ -795,6 +795,7 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
          lg-lw         LG LW20/LW25 laptop
          tcl           TCL S700
          clevo         Clevo laptops (m520G, m665n)
+         medion        Medion Rim 2150
          test          for testing/debugging purpose, almost all controls can be
                        adjusted.  Appearing only when compiled with
                        $CONFIG_SND_DEBUG=y
index 5c8e1b988a0825708e85b1153239d6fb431b05e2..ed2da5e5b28a4490a3b03787b02df66d083692be 100644 (file)
@@ -126,8 +126,8 @@ NOTES:
 FULL DUPLEX CHARACTER DEVICE API
 ================================
 
-See the sample program below for one example showing the use of the full
-duplex programming interface.  (Although it doesn't perform a full duplex
+See the spidev_fdx.c sample program for one example showing the use of the
+full duplex programming interface.  (Although it doesn't perform a full duplex
 transfer.)  The model is the same as that used in the kernel spi_sync()
 request; the individual transfers offer the same capabilities as are
 available to kernel drivers (except that it's not asynchronous).
@@ -141,167 +141,3 @@ and bitrate for each transfer segment.)
 
 To make a full duplex request, provide both rx_buf and tx_buf for the
 same transfer.  It's even OK if those are the same buffer.
-
-
-SAMPLE PROGRAM
-==============
-
---------------------------------       CUT HERE
-#include <stdio.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <fcntl.h>
-#include <string.h>
-
-#include <sys/ioctl.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-
-#include <linux/types.h>
-#include <linux/spi/spidev.h>
-
-
-static int verbose;
-
-static void do_read(int fd, int len)
-{
-       unsigned char   buf[32], *bp;
-       int             status;
-
-       /* read at least 2 bytes, no more than 32 */
-       if (len < 2)
-               len = 2;
-       else if (len > sizeof(buf))
-               len = sizeof(buf);
-       memset(buf, 0, sizeof buf);
-
-       status = read(fd, buf, len);
-       if (status < 0) {
-               perror("read");
-               return;
-       }
-       if (status != len) {
-               fprintf(stderr, "short read\n");
-               return;
-       }
-
-       printf("read(%2d, %2d): %02x %02x,", len, status,
-               buf[0], buf[1]);
-       status -= 2;
-       bp = buf + 2;
-       while (status-- > 0)
-               printf(" %02x", *bp++);
-       printf("\n");
-}
-
-static void do_msg(int fd, int len)
-{
-       struct spi_ioc_transfer xfer[2];
-       unsigned char           buf[32], *bp;
-       int                     status;
-
-       memset(xfer, 0, sizeof xfer);
-       memset(buf, 0, sizeof buf);
-
-       if (len > sizeof buf)
-               len = sizeof buf;
-
-       buf[0] = 0xaa;
-       xfer[0].tx_buf = (__u64) buf;
-       xfer[0].len = 1;
-
-       xfer[1].rx_buf = (__u64) buf;
-       xfer[1].len = len;
-
-       status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
-       if (status < 0) {
-               perror("SPI_IOC_MESSAGE");
-               return;
-       }
-
-       printf("response(%2d, %2d): ", len, status);
-       for (bp = buf; len; len--)
-               printf(" %02x", *bp++);
-       printf("\n");
-}
-
-static void dumpstat(const char *name, int fd)
-{
-       __u8    mode, lsb, bits;
-       __u32   speed;
-
-       if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) {
-               perror("SPI rd_mode");
-               return;
-       }
-       if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) {
-               perror("SPI rd_lsb_fist");
-               return;
-       }
-       if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) {
-               perror("SPI bits_per_word");
-               return;
-       }
-       if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) {
-               perror("SPI max_speed_hz");
-               return;
-       }
-
-       printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n",
-               name, mode, bits, lsb ? "(lsb first) " : "", speed);
-}
-
-int main(int argc, char **argv)
-{
-       int             c;
-       int             readcount = 0;
-       int             msglen = 0;
-       int             fd;
-       const char      *name;
-
-       while ((c = getopt(argc, argv, "hm:r:v")) != EOF) {
-               switch (c) {
-               case 'm':
-                       msglen = atoi(optarg);
-                       if (msglen < 0)
-                               goto usage;
-                       continue;
-               case 'r':
-                       readcount = atoi(optarg);
-                       if (readcount < 0)
-                               goto usage;
-                       continue;
-               case 'v':
-                       verbose++;
-                       continue;
-               case 'h':
-               case '?':
-usage:
-                       fprintf(stderr,
-                               "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n",
-                               argv[0]);
-                       return 1;
-               }
-       }
-
-       if ((optind + 1) != argc)
-               goto usage;
-       name = argv[optind];
-
-       fd = open(name, O_RDWR);
-       if (fd < 0) {
-               perror("open");
-               return 1;
-       }
-
-       dumpstat(name, fd);
-
-       if (msglen)
-               do_msg(fd, msglen);
-
-       if (readcount)
-               do_read(fd, readcount);
-
-       close(fd);
-       return 0;
-}
diff --git a/Documentation/spi/spidev_fdx.c b/Documentation/spi/spidev_fdx.c
new file mode 100644 (file)
index 0000000..fc354f7
--- /dev/null
@@ -0,0 +1,158 @@
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+
+#include <sys/ioctl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <linux/types.h>
+#include <linux/spi/spidev.h>
+
+
+static int verbose;
+
+static void do_read(int fd, int len)
+{
+       unsigned char   buf[32], *bp;
+       int             status;
+
+       /* read at least 2 bytes, no more than 32 */
+       if (len < 2)
+               len = 2;
+       else if (len > sizeof(buf))
+               len = sizeof(buf);
+       memset(buf, 0, sizeof buf);
+
+       status = read(fd, buf, len);
+       if (status < 0) {
+               perror("read");
+               return;
+       }
+       if (status != len) {
+               fprintf(stderr, "short read\n");
+               return;
+       }
+
+       printf("read(%2d, %2d): %02x %02x,", len, status,
+               buf[0], buf[1]);
+       status -= 2;
+       bp = buf + 2;
+       while (status-- > 0)
+               printf(" %02x", *bp++);
+       printf("\n");
+}
+
+static void do_msg(int fd, int len)
+{
+       struct spi_ioc_transfer xfer[2];
+       unsigned char           buf[32], *bp;
+       int                     status;
+
+       memset(xfer, 0, sizeof xfer);
+       memset(buf, 0, sizeof buf);
+
+       if (len > sizeof buf)
+               len = sizeof buf;
+
+       buf[0] = 0xaa;
+       xfer[0].tx_buf = (__u64) buf;
+       xfer[0].len = 1;
+
+       xfer[1].rx_buf = (__u64) buf;
+       xfer[1].len = len;
+
+       status = ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
+       if (status < 0) {
+               perror("SPI_IOC_MESSAGE");
+               return;
+       }
+
+       printf("response(%2d, %2d): ", len, status);
+       for (bp = buf; len; len--)
+               printf(" %02x", *bp++);
+       printf("\n");
+}
+
+static void dumpstat(const char *name, int fd)
+{
+       __u8    mode, lsb, bits;
+       __u32   speed;
+
+       if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) {
+               perror("SPI rd_mode");
+               return;
+       }
+       if (ioctl(fd, SPI_IOC_RD_LSB_FIRST, &lsb) < 0) {
+               perror("SPI rd_lsb_fist");
+               return;
+       }
+       if (ioctl(fd, SPI_IOC_RD_BITS_PER_WORD, &bits) < 0) {
+               perror("SPI bits_per_word");
+               return;
+       }
+       if (ioctl(fd, SPI_IOC_RD_MAX_SPEED_HZ, &speed) < 0) {
+               perror("SPI max_speed_hz");
+               return;
+       }
+
+       printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n",
+               name, mode, bits, lsb ? "(lsb first) " : "", speed);
+}
+
+int main(int argc, char **argv)
+{
+       int             c;
+       int             readcount = 0;
+       int             msglen = 0;
+       int             fd;
+       const char      *name;
+
+       while ((c = getopt(argc, argv, "hm:r:v")) != EOF) {
+               switch (c) {
+               case 'm':
+                       msglen = atoi(optarg);
+                       if (msglen < 0)
+                               goto usage;
+                       continue;
+               case 'r':
+                       readcount = atoi(optarg);
+                       if (readcount < 0)
+                               goto usage;
+                       continue;
+               case 'v':
+                       verbose++;
+                       continue;
+               case 'h':
+               case '?':
+usage:
+                       fprintf(stderr,
+                               "usage: %s [-h] [-m N] [-r N] /dev/spidevB.D\n",
+                               argv[0]);
+                       return 1;
+               }
+       }
+
+       if ((optind + 1) != argc)
+               goto usage;
+       name = argv[optind];
+
+       fd = open(name, O_RDWR);
+       if (fd < 0) {
+               perror("open");
+               return 1;
+       }
+
+       dumpstat(name, fd);
+
+       if (msglen)
+               do_msg(fd, msglen);
+
+       if (readcount)
+               do_read(fd, readcount);
+
+       close(fd);
+       return 0;
+}
index 10c8f6922ef40d7a0a49b85ae6367be1a0edfd91..5ce0952aa06546096fc165d9562f35565ed17025 100644 (file)
@@ -85,6 +85,8 @@ On all -  write a character to /proc/sysrq-trigger.  e.g.:
 'k'     - Secure Access Key (SAK) Kills all programs on the current virtual
           console. NOTE: See important comments below in SAK section.
 
+'l'     - Shows a stack backtrace for all active CPUs.
+
 'm'     - Will dump current memory info to your console.
 
 'n'    - Used to make RT tasks nice-able
index d9f28be75403cea53c07cfe8e965074a71037619..70d68ce8640a0f1490f3e126f6526ab58a62a581 100644 (file)
@@ -108,10 +108,12 @@ and throttle appropriate devices.
 RO     read only value
 RW     read/write value
 
-All thermal sysfs attributes will be represented under /sys/class/thermal
+Thermal sysfs attributes will be represented under /sys/class/thermal.
+Hwmon sysfs I/F extension is also available under /sys/class/hwmon
+if hwmon is compiled in or built as a module.
 
 Thermal zone device sys I/F, created once it's registered:
-|thermal_zone[0-*]:
+/sys/class/thermal/thermal_zone[0-*]:
        |-----type:                     Type of the thermal zone
        |-----temp:                     Current temperature
        |-----mode:                     Working mode of the thermal zone
@@ -119,7 +121,7 @@ Thermal zone device sys I/F, created once it's registered:
        |-----trip_point_[0-*]_type:    Trip point type
 
 Thermal cooling device sys I/F, created once it's registered:
-|cooling_device[0-*]:
+/sys/class/thermal/cooling_device[0-*]:
        |-----type :                    Type of the cooling device(processor/fan/...)
        |-----max_state:                Maximum cooling state of the cooling device
        |-----cur_state:                Current cooling state of the cooling device
@@ -130,10 +132,19 @@ They represent the relationship between a thermal zone and its associated coolin
 They are created/removed for each
 thermal_zone_bind_cooling_device/thermal_zone_unbind_cooling_device successful execution.
 
-|thermal_zone[0-*]
+/sys/class/thermal/thermal_zone[0-*]
        |-----cdev[0-*]:                The [0-*]th cooling device in the current thermal zone
        |-----cdev[0-*]_trip_point:     Trip point that cdev[0-*] is associated with
 
+Besides the thermal zone device sysfs I/F and cooling device sysfs I/F,
+the generic thermal driver also creates a hwmon sysfs I/F for each _type_ of
+thermal zone device. E.g. the generic thermal driver registers one hwmon class device
+and build the associated hwmon sysfs I/F for all the registered ACPI thermal zones.
+/sys/class/hwmon/hwmon[0-*]:
+       |-----name:                     The type of the thermal zone devices.
+       |-----temp[1-*]_input:          The current temperature of thermal zone [1-*].
+       |-----temp[1-*]_critical:       The critical trip point of thermal zone [1-*].
+Please read Documentation/hwmon/sysfs-interface for additional information.
 
 ***************************
 * Thermal zone attributes *
@@ -141,7 +152,10 @@ thermal_zone_bind_cooling_device/thermal_zone_unbind_cooling_device successful e
 
 type                           Strings which represent the thermal zone type.
                                This is given by thermal zone driver as part of registration.
-                               Eg: "ACPI thermal zone" indicates it's a ACPI thermal device
+                               Eg: "acpitz" indicates it's an ACPI thermal device.
+                               In order to keep it consistent with hwmon sys attribute,
+                               this should be a short, lowercase string,
+                               not containing spaces nor dashes.
                                RO
                                Required
 
@@ -218,7 +232,7 @@ the sys I/F structure will be built like this:
 /sys/class/thermal:
 
 |thermal_zone1:
-       |-----type:                     ACPI thermal zone
+       |-----type:                     acpitz
        |-----temp:                     37000
        |-----mode:                     kernel
        |-----trip_point_0_temp:        100000
@@ -243,3 +257,10 @@ the sys I/F structure will be built like this:
        |-----type:                     Fan
        |-----max_state:                2
        |-----cur_state:                0
+
+/sys/class/hwmon:
+
+|hwmon0:
+       |-----name:                     acpitz
+       |-----temp1_input:              37000
+       |-----temp1_crit:               100000
index 44d84dd15ad672d55b4a93db652f34d74cb3a664..67937df1e974b0e787199c04d996ab084786844d 100644 (file)
 127 -> Beholder BeholdTV 507 FM/RDS / BeholdTV 509 FM [0000:5071,0000:507B,5ace:5070,5ace:5090]
 128 -> Beholder BeholdTV Columbus TVFM          [0000:5201]
 129 -> Beholder BeholdTV 607 / BeholdTV 609     [5ace:6070,5ace:6071,5ace:6072,5ace:6073,5ace:6090,5ace:6091,5ace:6092,5ace:6093]
-130 -> Beholder BeholdTV M6 / BeholdTV M6 Extra [5ace:6190,5ace:6193]
+130 -> Beholder BeholdTV M6 / BeholdTV M6 Extra [5ace:6190,5ace:6193,5ace:6191]
 131 -> Twinhan Hybrid DTV-DVB 3056 PCI          [1822:0022]
 132 -> Genius TVGO AM11MCE
 133 -> NXP Snake DVB-S reference design
 139 -> Compro VideoMate T750                    [185b:c900]
 140 -> Avermedia DVB-S Pro A700                 [1461:a7a1]
 141 -> Avermedia DVB-S Hybrid+FM A700           [1461:a7a2]
+142 -> Beholder BeholdTV H6                     [5ace:6290]
diff --git a/Documentation/video4linux/cx18.txt b/Documentation/video4linux/cx18.txt
new file mode 100644 (file)
index 0000000..077d56e
--- /dev/null
@@ -0,0 +1,34 @@
+Some notes regarding the cx18 driver for the Conexant CX23418 MPEG
+encoder chip:
+
+1) The only hardware currently supported is the Hauppauge HVR-1600.
+
+2) Some people have problems getting the i2c bus to work. Cause unknown.
+   The symptom is that the eeprom cannot be read and the card is
+   unusable.
+
+3) The audio from the analog tuner is mono only. Probably caused by
+   incorrect audio register information in the datasheet. We are
+   waiting for updated information from Conexant.
+
+4) VBI (raw or sliced) has not yet been implemented.
+
+5) MPEG indexing is not yet implemented.
+
+6) The driver is still a bit rough around the edges, this should
+   improve over time.
+
+
+Firmware:
+
+The firmware needs to be extracted from the Windows Hauppauge HVR-1600
+driver, available here:
+
+http://hauppauge.lightpath.net/software/install_cd/hauppauge_cd_3.4d1.zip
+
+Unzip, then copy the following files to the firmware directory
+and rename them as follows:
+
+Drivers/Driver18/hcw18apu.rom -> v4l-cx23418-apu.fw
+Drivers/Driver18/hcw18enc.rom -> v4l-cx23418-cpu.fw
+Drivers/Driver18/hcw18mlC.rom -> v4l-cx23418-dig.fw
index dd49864979964f10ee097fd46779294e4b2cbe21..bad16d3f6a473afe913b0be76e783bcb946bc937 100644 (file)
@@ -135,77 +135,58 @@ most general to most specific:
 
 Components of Memory Policies
 
-    A Linux memory policy is a tuple consisting of a "mode" and an optional set
-    of nodes.  The mode determine the behavior of the policy, while the
-    optional set of nodes can be viewed as the arguments to the behavior.
+    A Linux memory policy consists of a "mode", optional mode flags, and an
+    optional set of nodes.  The mode determines the behavior of the policy,
+    the optional mode flags determine the behavior of the mode, and the
+    optional set of nodes can be viewed as the arguments to the policy
+    behavior.
 
    Internally, memory policies are implemented by a reference counted
    structure, struct mempolicy.  Details of this structure will be discussed
    in context, below, as required to explain the behavior.
 
-       Note:  in some functions AND in the struct mempolicy itself, the mode
-       is called "policy".  However, to avoid confusion with the policy tuple,
-       this document will continue to use the term "mode".
-
    Linux memory policy supports the following 4 behavioral modes:
 
-       Default Mode--MPOL_DEFAULT:  The behavior specified by this mode is
-       context or scope dependent.
-
-           As mentioned in the Policy Scope section above, during normal
-           system operation, the System Default Policy is hard coded to
-           contain the Default mode.
-
-           In this context, default mode means "local" allocation--that is
-           attempt to allocate the page from the node associated with the cpu
-           where the fault occurs.  If the "local" node has no memory, or the
-           node's memory can be exhausted [no free pages available], local
-           allocation will "fallback to"--attempt to allocate pages from--
-           "nearby" nodes, in order of increasing "distance".
+       Default Mode--MPOL_DEFAULT:  This mode is only used in the memory
+       policy APIs.  Internally, MPOL_DEFAULT is converted to the NULL
+       memory policy in all policy scopes.  Any existing non-default policy
+       will simply be removed when MPOL_DEFAULT is specified.  As a result,
+       MPOL_DEFAULT means "fall back to the next most specific policy scope."
 
-               Implementation detail -- subject to change:  "Fallback" uses
-               a per node list of sibling nodes--called zonelists--built at
-               boot time, or when nodes or memory are added or removed from
-               the system [memory hotplug].  These per node zonelist are
-               constructed with nodes in order of increasing distance based
-               on information provided by the platform firmware.
+           For example, a NULL or default task policy will fall back to the
+           system default policy.  A NULL or default vma policy will fall
+           back to the task policy.
 
-           When a task/process policy or a shared policy contains the Default
-           mode, this also means "local allocation", as described above.
+           When specified in one of the memory policy APIs, the Default mode
+           does not use the optional set of nodes.
 
-           In the context of a VMA, Default mode means "fall back to task
-           policy"--which may or may not specify Default mode.  Thus, Default
-           mode can not be counted on to mean local allocation when used
-           on a non-shared region of the address space.  However, see
-           MPOL_PREFERRED below.
-
-           The Default mode does not use the optional set of nodes.
+           It is an error for the set of nodes specified for this policy to
+           be non-empty.
 
        MPOL_BIND:  This mode specifies that memory must come from the
-       set of nodes specified by the policy.
-
-           The memory policy APIs do not specify an order in which the nodes
-           will be searched.  However, unlike "local allocation", the Bind
-           policy does not consider the distance between the nodes.  Rather,
-           allocations will fallback to the nodes specified by the policy in
-           order of numeric node id.  Like everything in Linux, this is subject
-           to change.
+       set of nodes specified by the policy.  Memory will be allocated from
+       the node in the set with sufficient free memory that is closest to
+       the node where the allocation takes place.
 
        MPOL_PREFERRED:  This mode specifies that the allocation should be
        attempted from the single node specified in the policy.  If that
-       allocation fails, the kernel will search other nodes, exactly as
-       it would for a local allocation that started at the preferred node
-       in increasing distance from the preferred node.  "Local" allocation
-       policy can be viewed as a Preferred policy that starts at the node
+       allocation fails, the kernel will search other nodes, in order of
+       increasing distance from the preferred node based on information
+       provided by the platform firmware.
        containing the cpu where the allocation takes place.
 
            Internally, the Preferred policy uses a single node--the
-           preferred_node member of struct mempolicy.  A "distinguished
-           value of this preferred_node, currently '-1', is interpreted
-           as "the node containing the cpu where the allocation takes
-           place"--local allocation.  This is the way to specify
-           local allocation for a specific range of addresses--i.e. for
-           VMA policies.
+           preferred_node member of struct mempolicy.  When the internal
+           mode flag MPOL_F_LOCAL is set, the preferred_node is ignored and
+           the policy is interpreted as local allocation.  "Local" allocation
+           policy can be viewed as a Preferred policy that starts at the node
+           containing the cpu where the allocation takes place.
+
+           It is possible for the user to specify that local allocation is
+           always preferred by passing an empty nodemask with this mode.
+           If an empty nodemask is passed, the policy cannot use the
+           MPOL_F_STATIC_NODES or MPOL_F_RELATIVE_NODES flags described
+           below.
 
        MPOL_INTERLEAVED:  This mode specifies that page allocations be
        interleaved, on a page granularity, across the nodes specified in
@@ -231,6 +212,154 @@ Components of Memory Policies
            the temporary interleaved system default policy works in this
            mode.
 
+   Linux memory policy supports the following optional mode flags:
+
+       MPOL_F_STATIC_NODES:  This flag specifies that the nodemask passed by
+       the user should not be remapped if the task or VMA's set of allowed
+       nodes changes after the memory policy has been defined.
+
+           Without this flag, anytime a mempolicy is rebound because of a
+           change in the set of allowed nodes, the node (Preferred) or
+           nodemask (Bind, Interleave) is remapped to the new set of
+           allowed nodes.  This may result in nodes being used that were
+           previously undesired.
+
+           With this flag, if the user-specified nodes overlap with the
+           nodes allowed by the task's cpuset, then the memory policy is
+           applied to their intersection.  If the two sets of nodes do not
+           overlap, the Default policy is used.
+
+           For example, consider a task that is attached to a cpuset with
+           mems 1-3 that sets an Interleave policy over the same set.  If
+           the cpuset's mems change to 3-5, the Interleave will now occur
+           over nodes 3, 4, and 5.  With this flag, however, since only node
+           3 is allowed from the user's nodemask, the "interleave" only
+           occurs over that node.  If no nodes from the user's nodemask are
+           now allowed, the Default behavior is used.
+
+           MPOL_F_STATIC_NODES cannot be combined with the
+           MPOL_F_RELATIVE_NODES flag.  It also cannot be used for
+           MPOL_PREFERRED policies that were created with an empty nodemask
+           (local allocation).
+
+       MPOL_F_RELATIVE_NODES:  This flag specifies that the nodemask passed
+       by the user will be mapped relative to the set of the task or VMA's
+       set of allowed nodes.  The kernel stores the user-passed nodemask,
+       and if the allowed nodes changes, then that original nodemask will
+       be remapped relative to the new set of allowed nodes.
+
+           Without this flag (and without MPOL_F_STATIC_NODES), anytime a
+           mempolicy is rebound because of a change in the set of allowed
+           nodes, the node (Preferred) or nodemask (Bind, Interleave) is
+           remapped to the new set of allowed nodes.  That remap may not
+           preserve the relative nature of the user's passed nodemask to its
+           set of allowed nodes upon successive rebinds: a nodemask of
+           1,3,5 may be remapped to 7-9 and then to 1-3 if the set of
+           allowed nodes is restored to its original state.
+
+           With this flag, the remap is done so that the node numbers from
+           the user's passed nodemask are relative to the set of allowed
+           nodes.  In other words, if nodes 0, 2, and 4 are set in the user's
+           nodemask, the policy will be effected over the first (and in the
+           Bind or Interleave case, the third and fifth) nodes in the set of
+           allowed nodes.  The nodemask passed by the user represents nodes
+           relative to task or VMA's set of allowed nodes.
+
+           If the user's nodemask includes nodes that are outside the range
+           of the new set of allowed nodes (for example, node 5 is set in
+           the user's nodemask when the set of allowed nodes is only 0-3),
+           then the remap wraps around to the beginning of the nodemask and,
+           if not already set, sets the node in the mempolicy nodemask.
+
+           For example, consider a task that is attached to a cpuset with
+           mems 2-5 that sets an Interleave policy over the same set with
+           MPOL_F_RELATIVE_NODES.  If the cpuset's mems change to 3-7, the
+           interleave now occurs over nodes 3,5-6.  If the cpuset's mems
+           then change to 0,2-3,5, then the interleave occurs over nodes
+           0,3,5.
+
+           Thanks to the consistent remapping, applications preparing
+           nodemasks to specify memory policies using this flag should
+           disregard their current, actual cpuset imposed memory placement
+           and prepare the nodemask as if they were always located on
+           memory nodes 0 to N-1, where N is the number of memory nodes the
+           policy is intended to manage.  Let the kernel then remap to the
+           set of memory nodes allowed by the task's cpuset, as that may
+           change over time.
+
+           MPOL_F_RELATIVE_NODES cannot be combined with the
+           MPOL_F_STATIC_NODES flag.  It also cannot be used for
+           MPOL_PREFERRED policies that were created with an empty nodemask
+           (local allocation).
+
+MEMORY POLICY REFERENCE COUNTING
+
+To resolve use/free races, struct mempolicy contains an atomic reference
+count field.  Internal interfaces, mpol_get()/mpol_put() increment and
+decrement this reference count, respectively.  mpol_put() will only free
+the structure back to the mempolicy kmem cache when the reference count
+goes to zero.
+
+When a new memory policy is allocated, it's reference count is initialized
+to '1', representing the reference held by the task that is installing the
+new policy.  When a pointer to a memory policy structure is stored in another
+structure, another reference is added, as the task's reference will be dropped
+on completion of the policy installation.
+
+During run-time "usage" of the policy, we attempt to minimize atomic operations
+on the reference count, as this can lead to cache lines bouncing between cpus
+and NUMA nodes.  "Usage" here means one of the following:
+
+1) querying of the policy, either by the task itself [using the get_mempolicy()
+   API discussed below] or by another task using the /proc/<pid>/numa_maps
+   interface.
+
+2) examination of the policy to determine the policy mode and associated node
+   or node lists, if any, for page allocation.  This is considered a "hot
+   path".  Note that for MPOL_BIND, the "usage" extends across the entire
+   allocation process, which may sleep during page reclaimation, because the
+   BIND policy nodemask is used, by reference, to filter ineligible nodes.
+
+We can avoid taking an extra reference during the usages listed above as
+follows:
+
+1) we never need to get/free the system default policy as this is never
+   changed nor freed, once the system is up and running.
+
+2) for querying the policy, we do not need to take an extra reference on the
+   target task's task policy nor vma policies because we always acquire the
+   task's mm's mmap_sem for read during the query.  The set_mempolicy() and
+   mbind() APIs [see below] always acquire the mmap_sem for write when
+   installing or replacing task or vma policies.  Thus, there is no possibility
+   of a task or thread freeing a policy while another task or thread is
+   querying it.
+
+3) Page allocation usage of task or vma policy occurs in the fault path where
+   we hold them mmap_sem for read.  Again, because replacing the task or vma
+   policy requires that the mmap_sem be held for write, the policy can't be
+   freed out from under us while we're using it for page allocation.
+
+4) Shared policies require special consideration.  One task can replace a
+   shared memory policy while another task, with a distinct mmap_sem, is
+   querying or allocating a page based on the policy.  To resolve this
+   potential race, the shared policy infrastructure adds an extra reference
+   to the shared policy during lookup while holding a spin lock on the shared
+   policy management structure.  This requires that we drop this extra
+   reference when we're finished "using" the policy.  We must drop the
+   extra reference on shared policies in the same query/allocation paths
+   used for non-shared policies.  For this reason, shared policies are marked
+   as such, and the extra reference is dropped "conditionally"--i.e., only
+   for shared policies.
+
+   Because of this extra reference counting, and because we must lookup
+   shared policies in a tree structure under spinlock, shared policies are
+   more expensive to use in the page allocation path.  This is expecially
+   true for shared policies on shared memory regions shared by tasks running
+   on different NUMA nodes.  This extra overhead can be avoided by always
+   falling back to task or system default policy for shared memory regions,
+   or by prefaulting the entire shared memory region into memory and locking
+   it down.  However, this might not be appropriate for all applications.
+
 MEMORY POLICY APIs
 
 Linux supports 3 system calls for controlling memory policy.  These APIS
@@ -251,7 +380,9 @@ Set [Task] Memory Policy:
        Set's the calling task's "task/process memory policy" to mode
        specified by the 'mode' argument and the set of nodes defined
        by 'nmask'.  'nmask' points to a bit mask of node ids containing
-       at least 'maxnode' ids.
+       at least 'maxnode' ids.  Optional mode flags may be passed by
+       combining the 'mode' argument with the flag (for example:
+       MPOL_INTERLEAVE | MPOL_F_STATIC_NODES).
 
        See the set_mempolicy(2) man page for more details
 
@@ -303,29 +434,19 @@ MEMORY POLICIES AND CPUSETS
 Memory policies work within cpusets as described above.  For memory policies
 that require a node or set of nodes, the nodes are restricted to the set of
 nodes whose memories are allowed by the cpuset constraints.  If the nodemask
-specified for the policy contains nodes that are not allowed by the cpuset, or
-the intersection of the set of nodes specified for the policy and the set of
-nodes with memory is the empty set, the policy is considered invalid
-and cannot be installed.
-
-The interaction of memory policies and cpusets can be problematic for a
-couple of reasons:
-
-1) the memory policy APIs take physical node id's as arguments.  As mentioned
-   above, it is illegal to specify nodes that are not allowed in the cpuset.
-   The application must query the allowed nodes using the get_mempolicy()
-   API with the MPOL_F_MEMS_ALLOWED flag to determine the allowed nodes and
-   restrict itself to those nodes.  However, the resources available to a
-   cpuset can be changed by the system administrator, or a workload manager
-   application, at any time.  So, a task may still get errors attempting to
-   specify policy nodes, and must query the allowed memories again.
-
-2) when tasks in two cpusets share access to a memory region, such as shared
-   memory segments created by shmget() of mmap() with the MAP_ANONYMOUS and
-   MAP_SHARED flags, and any of the tasks install shared policy on the region,
-   only nodes whose memories are allowed in both cpusets may be used in the
-   policies.  Obtaining this information requires "stepping outside" the
-   memory policy APIs to use the cpuset information and requires that one
-   know in what cpusets other task might be attaching to the shared region.
-   Furthermore, if the cpusets' allowed memory sets are disjoint, "local"
-   allocation is the only valid policy.
+specified for the policy contains nodes that are not allowed by the cpuset and
+MPOL_F_RELATIVE_NODES is not used, the intersection of the set of nodes
+specified for the policy and the set of nodes with memory is used.  If the
+result is the empty set, the policy is considered invalid and cannot be
+installed.  If MPOL_F_RELATIVE_NODES is used, the policy's nodes are mapped
+onto and folded into the task's set of allowed nodes as previously described.
+
+The interaction of memory policies and cpusets can be problematic when tasks
+in two cpusets share access to a memory region, such as shared memory segments
+created by shmget() of mmap() with the MAP_ANONYMOUS and MAP_SHARED flags, and
+any of the tasks install shared policy on the region, only nodes whose
+memories are allowed in both cpusets may be used in the policies.  Obtaining
+this information requires "stepping outside" the memory policy APIs to use the
+cpuset information and requires that one know in what cpusets other task might
+be attaching to the shared region.  Furthermore, if the cpusets' allowed
+memory sets are disjoint, "local" allocation is the only valid policy.
index 22d7e3e4d60ce06df281d77d2b5638b76ded932b..d3ce295bffac59ce6ca9a4be032194210e5ac41f 100644 (file)
@@ -31,7 +31,7 @@ struct slabinfo {
        int hwcache_align, object_size, objs_per_slab;
        int sanity_checks, slab_size, store_user, trace;
        int order, poison, reclaim_account, red_zone;
-       unsigned long partial, objects, slabs;
+       unsigned long partial, objects, slabs, objects_partial, objects_total;
        unsigned long alloc_fastpath, alloc_slowpath;
        unsigned long free_fastpath, free_slowpath;
        unsigned long free_frozen, free_add_partial, free_remove_partial;
@@ -540,7 +540,8 @@ void slabcache(struct slabinfo *s)
                return;
 
        store_size(size_str, slab_size(s));
-       snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs, s->partial, s->cpu_slabs);
+       snprintf(dist_str, 40, "%lu/%lu/%d", s->slabs - s->cpu_slabs,
+                                               s->partial, s->cpu_slabs);
 
        if (!line++)
                first_line();
@@ -776,7 +777,6 @@ void totals(void)
                unsigned long used;
                unsigned long long wasted;
                unsigned long long objwaste;
-               long long objects_in_partial_slabs;
                unsigned long percentage_partial_slabs;
                unsigned long percentage_partial_objs;
 
@@ -790,18 +790,11 @@ void totals(void)
                wasted = size - used;
                objwaste = s->slab_size - s->object_size;
 
-               objects_in_partial_slabs = s->objects -
-                       (s->slabs - s->partial - s ->cpu_slabs) *
-                       s->objs_per_slab;
-
-               if (objects_in_partial_slabs < 0)
-                       objects_in_partial_slabs = 0;
-
                percentage_partial_slabs = s->partial * 100 / s->slabs;
                if (percentage_partial_slabs > 100)
                        percentage_partial_slabs = 100;
 
-               percentage_partial_objs = objects_in_partial_slabs * 100
+               percentage_partial_objs = s->objects_partial * 100
                                                        / s->objects;
 
                if (percentage_partial_objs > 100)
@@ -823,8 +816,8 @@ void totals(void)
                        min_objects = s->objects;
                if (used < min_used)
                        min_used = used;
-               if (objects_in_partial_slabs < min_partobj)
-                       min_partobj = objects_in_partial_slabs;
+               if (s->objects_partial < min_partobj)
+                       min_partobj = s->objects_partial;
                if (percentage_partial_slabs < min_ppart)
                        min_ppart = percentage_partial_slabs;
                if (percentage_partial_objs < min_ppartobj)
@@ -848,8 +841,8 @@ void totals(void)
                        max_objects = s->objects;
                if (used > max_used)
                        max_used = used;
-               if (objects_in_partial_slabs > max_partobj)
-                       max_partobj = objects_in_partial_slabs;
+               if (s->objects_partial > max_partobj)
+                       max_partobj = s->objects_partial;
                if (percentage_partial_slabs > max_ppart)
                        max_ppart = percentage_partial_slabs;
                if (percentage_partial_objs > max_ppartobj)
@@ -864,7 +857,7 @@ void totals(void)
 
                total_objects += s->objects;
                total_used += used;
-               total_partobj += objects_in_partial_slabs;
+               total_partobj += s->objects_partial;
                total_ppart += percentage_partial_slabs;
                total_ppartobj += percentage_partial_objs;
 
@@ -1160,6 +1153,8 @@ void read_slab_dir(void)
                        slab->hwcache_align = get_obj("hwcache_align");
                        slab->object_size = get_obj("object_size");
                        slab->objects = get_obj("objects");
+                       slab->objects_partial = get_obj("objects_partial");
+                       slab->objects_total = get_obj("objects_total");
                        slab->objs_per_slab = get_obj("objs_per_slab");
                        slab->order = get_obj("order");
                        slab->partial = get_obj("partial");
diff --git a/Kbuild b/Kbuild
index 1570d248ad9237e4fa6e4d079336b9da62d9ba32..32f19c5c9bb07e106ee06de49e7f72c1ead9dd3b 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -1,26 +1,61 @@
 #
 # Kbuild for top-level directory of the kernel
 # This file takes care of the following:
-# 1) Generate asm-offsets.h
-# 2) Check for missing system calls
+# 1) Generate bounds.h
+# 2) Generate asm-offsets.h (may need bounds.h)
+# 3) Check for missing system calls
 
 #####
-# 1) Generate asm-offsets.h
+# 1) Generate bounds.h
+
+bounds-file := include/linux/bounds.h
+
+always  := $(bounds-file)
+targets := $(bounds-file) kernel/bounds.s
+
+quiet_cmd_bounds = GEN     $@
+define cmd_bounds
+       (set -e; \
+        echo "#ifndef __LINUX_BOUNDS_H__"; \
+        echo "#define __LINUX_BOUNDS_H__"; \
+        echo "/*"; \
+        echo " * DO NOT MODIFY."; \
+        echo " *"; \
+        echo " * This file was generated by Kbuild"; \
+        echo " *"; \
+        echo " */"; \
+        echo ""; \
+        sed -ne $(sed-y) $<; \
+        echo ""; \
+        echo "#endif" ) > $@
+endef
+
+# We use internal kbuild rules to avoid the "is up to date" message from make
+kernel/bounds.s: kernel/bounds.c FORCE
+       $(Q)mkdir -p $(dir $@)
+       $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(bounds-file): kernel/bounds.s Kbuild
+       $(Q)mkdir -p $(dir $@)
+       $(call cmd,bounds)
+
+#####
+# 2) Generate asm-offsets.h
 #
 
 offsets-file := include/asm-$(SRCARCH)/asm-offsets.h
 
-always  := $(offsets-file)
-targets := $(offsets-file)
+always  += $(offsets-file)
+targets += $(offsets-file)
 targets += arch/$(SRCARCH)/kernel/asm-offsets.s
-clean-files := $(addprefix $(objtree)/,$(targets))
+
 
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
-       "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
+       "/^->/{s:->#\(.*\):/* \1 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+       s:->::; p;}"
 endef
-# Override default regexp for specific architectures
-sed-$(CONFIG_MIPS) := "/^@@@/{s/^@@@//; s/ \#.*\$$//; p;}"
 
 quiet_cmd_offsets = GEN     $@
 define cmd_offsets
@@ -40,7 +75,8 @@ define cmd_offsets
 endef
 
 # We use internal kbuild rules to avoid the "is up to date" message from make
-arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c FORCE
+arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
+                                      $(obj)/$(bounds-file) FORCE
        $(Q)mkdir -p $(dir $@)
        $(call if_changed_dep,cc_s_c)
 
@@ -49,7 +85,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild
        $(call cmd,offsets)
 
 #####
-# 2) Check for missing system calls
+# 3) Check for missing system calls
 #
 
 quiet_cmd_syscalls = CALL    $<
@@ -58,3 +94,7 @@ quiet_cmd_syscalls = CALL    $<
 PHONY += missing-syscalls
 missing-syscalls: scripts/checksyscalls.sh FORCE
        $(call cmd,syscalls)
+
+# Delete all targets during make clean
+clean-files := $(addprefix $(objtree)/,$(targets))
+
index a942f385249901d21e679a5d77478bd3e4f49174..cae9001a670d84fbbac271924a4d23d6efed1e3b 100644 (file)
@@ -752,11 +752,13 @@ W:        http://atmelwlandriver.sourceforge.net/
 S:     Maintained
 
 AUDIT SUBSYSTEM
-P:     David Woodhouse
-M:     dwmw2@infradead.org
+P:     Al Viro
+M:     viro@zeniv.linux.org.uk
+P:     Eric Paris
+M:     eparis@redhat.com
 L:     linux-audit@redhat.com (subscribers-only)
 W:     http://people.redhat.com/sgrubb/audit/
-T:     git kernel.org:/pub/scm/linux/kernel/git/dwmw2/audit-2.6.git
+T:     git git.kernel.org/pub/scm/linux/kernel/git/viro/audit-current.git
 S:     Maintained
 
 AUXILIARY DISPLAY DRIVERS
@@ -1037,7 +1039,7 @@ P:        Urs Thuermann
 M:     urs.thuermann@volkswagen.de
 P:     Oliver Hartkopp
 M:     oliver.hartkopp@volkswagen.de
-L:     socketcan-core@lists.berlios.de
+L:     socketcan-core@lists.berlios.de (subscribers-only)
 W:     http://developer.berlios.de/projects/socketcan/
 S:     Maintained
 
@@ -1194,9 +1196,9 @@ S:        Maintained
 
 CPUSETS
 P:     Paul Jackson
-P:     Simon Derr
+P:     Paul Menage
 M:     pj@sgi.com
-M:     simon.derr@bull.net
+M:     menage@google.com
 L:     linux-kernel@vger.kernel.org
 W:     http://www.bullopensource.org/cpuset/
 S:     Supported
@@ -1531,6 +1533,13 @@ L:       bluesmoke-devel@lists.sourceforge.net
 W:     bluesmoke.sourceforge.net
 S:     Maintained
 
+EEEPC LAPTOP EXTRAS DRIVER
+P:     Corentin Chary
+M:     corentincj@iksaif.net
+L:     acpi4asus-user@lists.sourceforge.net
+W:     http://sourceforge.net/projects/acpi4asus
+S:     Maintained
+
 EEPRO100 NETWORK DRIVER
 P:     Andrey V. Savochkin
 M:     saw@saw.sw.com.sg
@@ -1548,6 +1557,14 @@ M:       raisch@de.ibm.com
 L:     general@lists.openfabrics.org
 S:     Supported
 
+EMBEDDED LINUX
+P:     Paul Gortmaker
+M:     paul.gortmaker@windriver.com
+P      David Woodhouse
+M:     dwmw2@infradead.org
+L:     linux-embedded@vger.kernel.org
+S:     Maintained
+
 EMULEX LPFC FC SCSI DRIVER
 P:     James Smart
 M:     james.smart@emulex.com
@@ -2329,6 +2346,13 @@ L:       kvm-devel@lists.sourceforge.net
 W:     kvm.sourceforge.net
 S:     Supported
 
+KERNEL VIRTUAL MACHINE (KVM) FOR POWERPC
+P:     Hollis Blanchard
+M:     hollisb@us.ibm.com
+L:     kvm-ppc-devel@lists.sourceforge.net
+W:     kvm.sourceforge.net
+S:     Supported
+
 KERNEL VIRTUAL MACHINE For Itanium(KVM/IA64)
 P:     Anthony Xu
 M:     anthony.xu@intel.com
@@ -2338,6 +2362,16 @@ L:       kvm-ia64-devel@lists.sourceforge.net
 W:     kvm.sourceforge.net
 S:     Supported
 
+KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
+P:     Carsten Otte
+M:     cotte@de.ibm.com
+P:     Christian Borntraeger
+M:     borntraeger@de.ibm.com
+M:     linux390@de.ibm.com
+L:     linux-s390@vger.kernel.org
+W:     http://www.ibm.com/developerworks/linux/linux390/
+S:     Supported
+
 KEXEC
 P:     Eric Biederman
 M:     ebiederm@xmission.com
@@ -2677,7 +2711,7 @@ P:        David Howells
 M:     dhowells@redhat.com
 P:     Koichi Yasutake
 M:     yasutake.koichi@jp.panasonic.com
-L:     linux-am33-list@redhat.com
+L:     linux-am33-list@redhat.com (moderated for non-subscribers)
 W:     ftp://ftp.redhat.com/pub/redhat/gnupro/AM33/
 S:     Maintained
 
@@ -2740,7 +2774,7 @@ M:        rubini@ipvvis.unipv.it
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
-MOXA SMARTIO/INDUSTIO SERIAL CARD (MXSER 2.0)
+MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
 P:     Jiri Slaby
 M:     jirislaby@gmail.com
 L:     linux-kernel@vger.kernel.org
@@ -3097,6 +3131,7 @@ P:        Jesse Barnes
 M:     jbarnes@virtuousgeek.org
 L:     linux-kernel@vger.kernel.org
 L:     linux-pci@atrey.karlin.mff.cuni.cz
+T:     git kernel.org:/pub/scm/linux/kernel/git/jbarnes/pci-2.6.git
 S:     Supported
 
 PCI HOTPLUG CORE
@@ -3557,6 +3592,13 @@ M:       pfg@sgi.com
 L:     linux-ia64@vger.kernel.org
 S:     Supported
 
+SFC NETWORK DRIVER
+P:     Steve Hodgson
+P:     Ben Hutchings
+P:     Robert Stonehouse
+M:     linux-net-drivers@solarflare.com
+S:     Supported
+
 SGI VISUAL WORKSTATION 320 AND 540
 P:     Andrey Panin
 M:     pazke@donpac.ru
@@ -3723,42 +3765,6 @@ M:       chrisw@sous-sol.org
 L:     stable@kernel.org
 S:     Maintained
 
-TPM DEVICE DRIVER
-P:     Kylene Hall
-M:     tpmdd-devel@lists.sourceforge.net
-W:     http://tpmdd.sourceforge.net
-P:     Marcel Selhorst
-M:     tpm@selhorst.net
-W:     http://www.prosec.rub.de/tpm/
-L:     tpmdd-devel@lists.sourceforge.net
-S:     Maintained
-
-Telecom Clock Driver for MCPL0010
-P:     Mark Gross
-M:     mark.gross@intel.com
-S:     Supported
-
-TENSILICA XTENSA PORT (xtensa):
-P:     Chris Zankel
-M:     chris@zankel.net
-S:     Maintained
-
-THINKPAD ACPI EXTRAS DRIVER
-P:     Henrique de Moraes Holschuh
-M:     ibm-acpi@hmh.eng.br
-L:     ibm-acpi-devel@lists.sourceforge.net
-W:     http://ibm-acpi.sourceforge.net
-W:     http://thinkwiki.org/wiki/Ibm-acpi
-T:     git repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
-S:     Maintained
-
-UltraSPARC (sparc64):
-P:     David S. Miller
-M:     davem@davemloft.net
-L:     sparclinux@vger.kernel.org
-T:     git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git
-S:     Maintained
-
 SHARP LH SUPPORT (LH7952X & LH7A40X)
 P:     Marc Singer
 M:     elf@buici.com
@@ -3855,6 +3861,12 @@ P:       Christoph Hellwig
 M:     hch@infradead.org
 S:     Maintained
 
+TASKSTATS STATISTICS INTERFACE
+P:     Shailabh Nagar
+M:     nagar@watson.ibm.com
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 TC CLASSIFIER
 P:     Jamal Hadi Salim
 M:     hadi@cyberus.ca
@@ -3877,6 +3889,25 @@ M:       andy@greyhouse.net
 L:     netdev@vger.kernel.org
 S:     Supported
 
+Telecom Clock Driver for MCPL0010
+P:     Mark Gross
+M:     mark.gross@intel.com
+S:     Supported
+
+TENSILICA XTENSA PORT (xtensa):
+P:     Chris Zankel
+M:     chris@zankel.net
+S:     Maintained
+
+THINKPAD ACPI EXTRAS DRIVER
+P:     Henrique de Moraes Holschuh
+M:     ibm-acpi@hmh.eng.br
+L:     ibm-acpi-devel@lists.sourceforge.net
+W:     http://ibm-acpi.sourceforge.net
+W:     http://thinkwiki.org/wiki/Ibm-acpi
+T:     git repo.or.cz/linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git
+S:     Maintained
+
 TI FLASH MEDIA INTERFACE DRIVER
 P:      Alex Dubov
 M:      oakad@yahoo.com
@@ -3894,12 +3925,6 @@ P:       Deepak Saxena
 M:     dsaxena@plexity.net
 S:     Maintained
 
-TASKSTATS STATISTICS INTERFACE
-P:     Shailabh Nagar
-M:     nagar@watson.ibm.com
-L:     linux-kernel@vger.kernel.org
-S:     Maintained
-
 TIPC NETWORK LAYER
 P:     Per Liden
 M:     per.liden@ericsson.com
@@ -3933,6 +3958,16 @@ L:       tlinux-users@tce.toshiba-dme.co.jp
 W:     http://www.buzzard.org.uk/toshiba/
 S:     Maintained
 
+TPM DEVICE DRIVER
+P:     Kylene Hall
+M:     tpmdd-devel@lists.sourceforge.net
+W:     http://tpmdd.sourceforge.net
+P:     Marcel Selhorst
+M:     tpm@selhorst.net
+W:     http://www.prosec.rub.de/tpm/
+L:     tpmdd-devel@lists.sourceforge.net
+S:     Maintained
+
 TRIDENT 4DWAVE/SIS 7018 PCI AUDIO CORE
 P:     Muli Ben-Yehuda
 M:     mulix@mulix.org
@@ -3945,6 +3980,12 @@ M:       trivial@kernel.org
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
+TTY LAYER
+P:     Alan Cox
+M:     alan@lxorguk.ukuu.org.uk
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+
 TULIP NETWORK DRIVERS
 P:     Grant Grundler
 M:     grundler@parisc-linux.org
@@ -4113,6 +4154,20 @@ L:      linux-usb@vger.kernel.org
 W:     http://www.chello.nl/~j.vreeken/se401/
 S:     Maintained
 
+USB SERIAL BELKIN F5U103 DRIVER
+P:     William Greathouse
+M:     wgreathouse@smva.com
+L:      linux-usb@vger.kernel.org
+S:     Maintained
+
+USB SERIAL CYPRESS M8 DRIVER
+P:     Lonnie Mendez
+M:     dignome@gmail.com
+L:      linux-usb@vger.kernel.org
+S:     Maintained
+W:     http://geocities.com/i0xox0i
+W:     http://firstlight.net/cvs
+
 USB SERIAL CYBERJACK DRIVER
 P:     Matthias Bruestle and Harald Welte
 M:     support@reiner-sct.com
@@ -4132,20 +4187,6 @@ M:       gregkh@suse.de
 L:      linux-usb@vger.kernel.org
 S:     Supported
 
-USB SERIAL BELKIN F5U103 DRIVER
-P:     William Greathouse
-M:     wgreathouse@smva.com
-L:      linux-usb@vger.kernel.org
-S:     Maintained
-
-USB SERIAL CYPRESS M8 DRIVER
-P:     Lonnie Mendez
-M:     dignome@gmail.com
-L:      linux-usb@vger.kernel.org
-S:     Maintained
-W:     http://geocities.com/i0xox0i
-W:     http://firstlight.net/cvs
-
 USB SERIAL EMPEG EMPEG-CAR MARK I/II DRIVER
 P:     Gary Brubaker
 M:     xavyer@ix.netcom.com
@@ -4248,7 +4289,7 @@ M:        gregkh@suse.de
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
-FAT/VFAT/MSDOS FILESYSTEM:
+VFAT/FAT/MSDOS FILESYSTEM:
 P:     OGAWA Hirofumi
 M:     hirofumi@mail.parknet.co.jp
 L:     linux-kernel@vger.kernel.org
@@ -4293,6 +4334,13 @@ M:       dushistov@mail.ru
 L:     linux-kernel@vger.kernel.org
 S:     Maintained
 
+UltraSPARC (sparc64):
+P:     David S. Miller
+M:     davem@davemloft.net
+L:     sparclinux@vger.kernel.org
+T:     git kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6.git
+S:     Maintained
+
 USB DIAMOND RIO500 DRIVER
 P:     Cesar Miquel
 M:     miquel@df.uba.ar
index fc3411e6f071fb34be17e7bd937e38634ab78cfd..d3634cd6fe35bde2a8ef73835e9f6f2559087dab 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1174,8 +1174,10 @@ rpm: include/config/kernel.release FORCE
 # Brief documentation of the typical targets used
 # ---------------------------------------------------------------------------
 
-boards := $(wildcard $(srctree)/arch/$(ARCH)/configs/*_defconfig)
+boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig)
 boards := $(notdir $(boards))
+board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
+board-dirs := $(sort $(notdir $(board-dirs:/=)))
 
 help:
        @echo  'Cleaning targets:'
@@ -1221,14 +1223,19 @@ help:
        @echo  'Documentation targets:'
        @$(MAKE) -f $(srctree)/Documentation/DocBook/Makefile dochelp
        @echo  ''
-       @echo  'Architecture specific targets ($(ARCH)):'
+       @echo  'Architecture specific targets ($(SRCARCH)):'
        @$(if $(archhelp),$(archhelp),\
-               echo '  No architecture specific help defined for $(ARCH)')
+               echo '  No architecture specific help defined for $(SRCARCH)')
        @echo  ''
        @$(if $(boards), \
                $(foreach b, $(boards), \
                printf "  %-24s - Build for %s\\n" $(b) $(subst _defconfig,,$(b));) \
                echo '')
+       @$(if $(board-dirs), \
+               $(foreach b, $(board-dirs), \
+               printf "  %-16s - Show %s-specific targets\\n" help-$(b) $(b);) \
+               printf "  %-16s - Show all of the above\\n" help-boards; \
+               echo '')
 
        @echo  '  make V=0|1 [targets] 0 => quiet build (default), 1 => verbose build'
        @echo  '  make V=2   [targets] 2 => give reason for rebuild of target'
@@ -1240,6 +1247,20 @@ help:
        @echo  'For further info see the ./README file'
 
 
+help-board-dirs := $(addprefix help-,$(board-dirs))
+
+help-boards: $(help-board-dirs)
+
+boards-per-dir = $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig))
+
+$(help-board-dirs): help-%:
+       @echo  'Architecture specific targets ($(SRCARCH) $*):'
+       @$(if $(boards-per-dir), \
+               $(foreach b, $(boards-per-dir), \
+               printf "  %-24s - Build for %s\\n" $*/$(b) $(subst _defconfig,,$(b));) \
+               echo '')
+
+
 # Documentation targets
 # ---------------------------------------------------------------------------
 %docs: scripts_basic FORCE
index 694c9af520bbed3e9a36a6c16aa693f541b192b8..3ea332b009e511f785349c725e716052d42877ef 100644 (file)
@@ -36,3 +36,6 @@ config HAVE_KPROBES
 
 config HAVE_KRETPROBES
        def_bool n
+
+config HAVE_DMA_ATTRS
+       def_bool n
index 6c56c754a0b53c2947f662c0e3adde986ebdc513..4b18cd94d59d5060acfac02ad276101579ca481a 100644 (file)
@@ -8,13 +8,9 @@
 #include <linux/stddef.h>
 #include <linux/sched.h>
 #include <linux/ptrace.h>
+#include <linux/kbuild.h>
 #include <asm/io.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 void foo(void)
 {
        DEFINE(TI_TASK, offsetof(struct thread_info, task));
index f10d2eddd2c309a807d58df12c48f01156f99efc..b04f1feb1dda80cd6c9868473b9c8789f1db4906 100644 (file)
@@ -994,7 +994,7 @@ marvel_agp_configure(alpha_agp_info *agp)
                 * rate, but warn the user.
                 */
                printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
-                      __FUNCTION__, IO7_PLL_RNGB(agp_pll), agp_pll);
+                      __func__, IO7_PLL_RNGB(agp_pll), agp_pll);
                break;
        }
 
@@ -1044,13 +1044,13 @@ marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
 
        if (addr < agp->aperture.bus_base ||
            addr >= agp->aperture.bus_base + agp->aperture.size) {
-               printk("%s: addr out of range\n", __FUNCTION__);
+               printk("%s: addr out of range\n", __func__);
                return -EINVAL;
        }
 
        pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
        if (!(pte & 1)) {
-               printk("%s: pte not valid\n", __FUNCTION__);
+               printk("%s: pte not valid\n", __func__);
                return -EINVAL;
        } 
        return (pte >> 1) << PAGE_SHIFT;
index f5ca5255eb060ad822fff8358b4557ced9e11d2e..c0750291b44ad54a885fd3395f0811dc254ea44e 100644 (file)
@@ -336,10 +336,7 @@ t2_direct_map_window1(unsigned long base, unsigned long length)
 
 #if DEBUG_PRINT_FINAL_SETTINGS
        printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
-              __FUNCTION__,
-              *(vulp)T2_WBASE1,
-              *(vulp)T2_WMASK1,
-              *(vulp)T2_TBASE1);
+              __func__, *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
 #endif
 }
 
@@ -366,10 +363,7 @@ t2_sg_map_window2(struct pci_controller *hose,
 
 #if DEBUG_PRINT_FINAL_SETTINGS
        printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
-              __FUNCTION__,
-              *(vulp)T2_WBASE2,
-              *(vulp)T2_WMASK2,
-              *(vulp)T2_TBASE2);
+              __func__, *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
 #endif
 }
 
@@ -377,15 +371,15 @@ static void __init
 t2_save_configuration(void)
 {
 #if DEBUG_PRINT_INITIAL_SETTINGS
-       printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */
-       printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2);
-       printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3);
-       printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4);
-       printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE);
+       printk("%s: HAE_1 was 0x%lx\n", __func__, srm_hae); /* HW is 0 */
+       printk("%s: HAE_2 was 0x%lx\n", __func__, *(vulp)T2_HAE_2);
+       printk("%s: HAE_3 was 0x%lx\n", __func__, *(vulp)T2_HAE_3);
+       printk("%s: HAE_4 was 0x%lx\n", __func__, *(vulp)T2_HAE_4);
+       printk("%s: HBASE was 0x%lx\n", __func__, *(vulp)T2_HBASE);
 
-       printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__, 
+       printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __func__,
               *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
-       printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__, 
+       printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __func__,
               *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
 #endif
 
index 819326627b966c3f166f76b38a11614ad51e11ec..319fcb74611e57c9990aaba2e691079e8f1aa038 100644 (file)
@@ -365,21 +365,21 @@ void __init
 titan_init_arch(void)
 {
 #if 0
-       printk("%s: titan_init_arch()\n", __FUNCTION__);
-       printk("%s: CChip registers:\n", __FUNCTION__);
-       printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr);
-       printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr);
-       printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr);
-       printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr);
-       printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr);
-       printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr);
-       printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr);
-       printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr);
-
-       printk("%s: DChip registers:\n", __FUNCTION__);
-       printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr);
-       printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr);
-       printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr);
+       printk("%s: titan_init_arch()\n", __func__);
+       printk("%s: CChip registers:\n", __func__);
+       printk("%s: CSR_CSC 0x%lx\n", __func__, TITAN_cchip->csc.csr);
+       printk("%s: CSR_MTR 0x%lx\n", __func__, TITAN_cchip->mtr.csr);
+       printk("%s: CSR_MISC 0x%lx\n", __func__, TITAN_cchip->misc.csr);
+       printk("%s: CSR_DIM0 0x%lx\n", __func__, TITAN_cchip->dim0.csr);
+       printk("%s: CSR_DIM1 0x%lx\n", __func__, TITAN_cchip->dim1.csr);
+       printk("%s: CSR_DIR0 0x%lx\n", __func__, TITAN_cchip->dir0.csr);
+       printk("%s: CSR_DIR1 0x%lx\n", __func__, TITAN_cchip->dir1.csr);
+       printk("%s: CSR_DRIR 0x%lx\n", __func__, TITAN_cchip->drir.csr);
+
+       printk("%s: DChip registers:\n", __func__);
+       printk("%s: CSR_DSC 0x%lx\n", __func__, TITAN_dchip->dsc.csr);
+       printk("%s: CSR_STR 0x%lx\n", __func__, TITAN_dchip->str.csr);
+       printk("%s: CSR_DREV 0x%lx\n", __func__, TITAN_dchip->drev.csr);
 #endif
 
        boot_cpuid = __hard_smp_processor_id();
@@ -700,13 +700,13 @@ titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
 
        if (addr < agp->aperture.bus_base ||
            addr >= agp->aperture.bus_base + agp->aperture.size) {
-               printk("%s: addr out of range\n", __FUNCTION__);
+               printk("%s: addr out of range\n", __func__);
                return -EINVAL;
        }
 
        pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
        if (!(pte & 1)) {
-               printk("%s: pte not valid\n", __FUNCTION__);
+               printk("%s: pte not valid\n", __func__);
                return -EINVAL;
        }
 
index ef91e09590d4d1aefe76772e81b29c83d9f73b78..5e7c28f92f19f29dde28d0e636ceb696f1750457 100644 (file)
@@ -241,8 +241,6 @@ tsunami_probe_write(volatile unsigned long *vaddr)
 #define tsunami_probe_read(ADDR) 1
 #endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
 
-#define FN __FUNCTION__
-
 static void __init
 tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
 {
@@ -383,27 +381,27 @@ tsunami_init_arch(void)
        /* NXMs just don't matter to Tsunami--unless they make it
           choke completely. */
        tmp = (unsigned long)(TSUNAMI_cchip - 1);
-       printk("%s: probing bogus address:  0x%016lx\n", FN, bogus_addr);
+       printk("%s: probing bogus address:  0x%016lx\n", __func__, bogus_addr);
        printk("\tprobe %s\n",
               tsunami_probe_write((unsigned long *)bogus_addr)
               ? "succeeded" : "failed");
 #endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
 
 #if 0
-       printk("%s: CChip registers:\n", FN);
-       printk("%s: CSR_CSC 0x%lx\n", FN, TSUNAMI_cchip->csc.csr);
-       printk("%s: CSR_MTR 0x%lx\n", FN, TSUNAMI_cchip.mtr.csr);
-       printk("%s: CSR_MISC 0x%lx\n", FN, TSUNAMI_cchip->misc.csr);
-       printk("%s: CSR_DIM0 0x%lx\n", FN, TSUNAMI_cchip->dim0.csr);
-       printk("%s: CSR_DIM1 0x%lx\n", FN, TSUNAMI_cchip->dim1.csr);
-       printk("%s: CSR_DIR0 0x%lx\n", FN, TSUNAMI_cchip->dir0.csr);
-       printk("%s: CSR_DIR1 0x%lx\n", FN, TSUNAMI_cchip->dir1.csr);
-       printk("%s: CSR_DRIR 0x%lx\n", FN, TSUNAMI_cchip->drir.csr);
+       printk("%s: CChip registers:\n", __func__);
+       printk("%s: CSR_CSC 0x%lx\n", __func__, TSUNAMI_cchip->csc.csr);
+       printk("%s: CSR_MTR 0x%lx\n", __func__, TSUNAMI_cchip.mtr.csr);
+       printk("%s: CSR_MISC 0x%lx\n", __func__, TSUNAMI_cchip->misc.csr);
+       printk("%s: CSR_DIM0 0x%lx\n", __func__, TSUNAMI_cchip->dim0.csr);
+       printk("%s: CSR_DIM1 0x%lx\n", __func__, TSUNAMI_cchip->dim1.csr);
+       printk("%s: CSR_DIR0 0x%lx\n", __func__, TSUNAMI_cchip->dir0.csr);
+       printk("%s: CSR_DIR1 0x%lx\n", __func__, TSUNAMI_cchip->dir1.csr);
+       printk("%s: CSR_DRIR 0x%lx\n", __func__, TSUNAMI_cchip->drir.csr);
 
        printk("%s: DChip registers:\n");
-       printk("%s: CSR_DSC 0x%lx\n", FN, TSUNAMI_dchip->dsc.csr);
-       printk("%s: CSR_STR 0x%lx\n", FN, TSUNAMI_dchip->str.csr);
-       printk("%s: CSR_DREV 0x%lx\n", FN, TSUNAMI_dchip->drev.csr);
+       printk("%s: CSR_DSC 0x%lx\n", __func__, TSUNAMI_dchip->dsc.csr);
+       printk("%s: CSR_STR 0x%lx\n", __func__, TSUNAMI_dchip->str.csr);
+       printk("%s: CSR_DREV 0x%lx\n", __func__, TSUNAMI_dchip->drev.csr);
 #endif
        /* With multiple PCI busses, we play with I/O as physical addrs.  */
        ioport_resource.end = ~0UL;
index 026ba9af6d6ae3e70a9ea71f6af4c014620176c7..ebc3c894b5a2cc0d5a871e0e30ac99249d599bd7 100644 (file)
@@ -120,6 +120,12 @@ module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
 
        nsyms = symtab->sh_size / sizeof(Elf64_Sym);
        chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL);
+       if (!chains) {
+               printk(KERN_ERR
+                      "module %s: no memory for symbol chain buffer\n",
+                      me->name);
+               return -ENOMEM;
+       }
 
        got->sh_size = 0;
        got->sh_addralign = 8;
index 78357798b6fd100d3964f20232fa974fc241b26f..36ab22a7ea124920f7f0cf8617deacf6906623a9 100644 (file)
@@ -208,7 +208,7 @@ pdev_save_srm_config(struct pci_dev *dev)
 
        tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
        if (!tmp) {
-               printk(KERN_ERR "%s: kmalloc() failed!\n", __FUNCTION__);
+               printk(KERN_ERR "%s: kmalloc() failed!\n", __func__);
                return;
        }
        tmp->next = srm_saved_configs;
@@ -514,8 +514,8 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index dd6e334ab9e127d61338f27bb28de710689aa6ab..2179c602032a6e5a7a4d55ec59bca02e2a745334 100644 (file)
@@ -79,25 +79,21 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
 
 #ifdef CONFIG_DISCONTIGMEM
 
-        if (!NODE_DATA(nid) ||
-            (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
-                                                 sizeof(*arena))))) {
-                printk("%s: couldn't allocate arena from node %d\n"
-                       "    falling back to system-wide allocation\n",
-                       __FUNCTION__, nid);
-                arena = alloc_bootmem(sizeof(*arena));
-        }
-
-        if (!NODE_DATA(nid) ||
-            (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
-                                                         mem_size,
-                                                         align,
-                                                         0)))) {
-                printk("%s: couldn't allocate arena ptes from node %d\n"
-                       "    falling back to system-wide allocation\n",
-                       __FUNCTION__, nid);
-                arena->ptes = __alloc_bootmem(mem_size, align, 0);
-        }
+       arena = alloc_bootmem_node(NODE_DATA(nid), sizeof(*arena));
+       if (!NODE_DATA(nid) || !arena) {
+               printk("%s: couldn't allocate arena from node %d\n"
+                      "    falling back to system-wide allocation\n",
+                      __func__, nid);
+               arena = alloc_bootmem(sizeof(*arena));
+       }
+
+       arena->ptes = __alloc_bootmem_node(NODE_DATA(nid), mem_size, align, 0);
+       if (!NODE_DATA(nid) || !arena->ptes) {
+               printk("%s: couldn't allocate arena ptes from node %d\n"
+                      "    falling back to system-wide allocation\n",
+                      __func__, nid);
+               arena->ptes = __alloc_bootmem(mem_size, align, 0);
+       }
 
 #else /* CONFIG_DISCONTIGMEM */
 
index 63c2073401ee5ce9bf75cbe90f28bffb88cc8375..2525692db0abd2608f1acfc78ac351668a57c85e 100644 (file)
@@ -755,7 +755,7 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
        if (atomic_read(&data.unstarted_count) > 0) {
                long start_time = jiffies;
                printk(KERN_ERR "%s: initial timeout -- trying long wait\n",
-                      __FUNCTION__);
+                      __func__);
                timeout = jiffies + 30 * HZ;
                while (atomic_read(&data.unstarted_count) > 0
                       && time_before(jiffies, timeout))
@@ -764,7 +764,7 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
                        long delta = jiffies - start_time;
                        printk(KERN_ERR 
                               "%s: response %ld.%ld seconds into long wait\n",
-                              __FUNCTION__, delta / HZ,
+                              __func__, delta / HZ,
                               (100 * (delta - ((delta / HZ) * HZ))) / HZ);
                }
        }
index f7dd081d57ffa4283bd08917d6965e6181e9ada6..78ad7cd1bbd60326d38bee8f00c8beb6a55f03eb 100644 (file)
@@ -199,7 +199,7 @@ srm_env_init(void)
                printk(KERN_INFO "%s: This Alpha system doesn't "
                                "know about SRM (or you've booted "
                                "SRM->MILO->Linux, which gets "
-                               "misdetected)...\n", __FUNCTION__);
+                               "misdetected)...\n", __func__);
                return -ENODEV;
        }
 
index d187d01d2a17b72dc603241c26c85b6cf426d1ad..e53a1e1c2f217233abfc19ff40249f7bcde269b8 100644 (file)
@@ -259,7 +259,7 @@ alcor_init_pci(void)
        if (dev && dev->devfn == PCI_DEVFN(6,0)) {
                alpha_mv.sys.cia.gru_int_req_bits = XLT_GRU_INT_REQ_BITS; 
                printk(KERN_INFO "%s: Detected AS500 or XLT motherboard.\n",
-                      __FUNCTION__);
+                      __func__);
        }
        pci_dev_put(dev);
 }
index 922143ea1cdbb6d4b6df166f26e22482ef2f871c..828449cd263687c18bc278358a8e8e2c8c8a01be 100644 (file)
@@ -80,7 +80,7 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
        if (!(io7 = marvel_find_io7(pid))) {
                printk(KERN_ERR 
                       "%s for nonexistent io7 -- vec %x, pid %d\n",
-                      __FUNCTION__, irq, pid);
+                      __func__, irq, pid);
                return NULL;
        }
 
@@ -90,7 +90,7 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
        if (irq >= 0x180) {
                printk(KERN_ERR 
                       "%s for invalid irq -- pid %d adjusted irq %x\n",
-                      __FUNCTION__, pid, irq);
+                      __func__, pid, irq);
                return NULL;
        }
 
@@ -110,8 +110,8 @@ io7_enable_irq(unsigned int irq)
 
        ctl = io7_get_irq_ctl(irq, &io7);
        if (!ctl || !io7) {
-               printk(KERN_ERR "%s: get_ctl failed for irq %x\n", 
-                      __FUNCTION__, irq);
+               printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
+                      __func__, irq);
                return;
        }
                
@@ -130,8 +130,8 @@ io7_disable_irq(unsigned int irq)
 
        ctl = io7_get_irq_ctl(irq, &io7);
        if (!ctl || !io7) {
-               printk(KERN_ERR "%s: get_ctl failed for irq %x\n", 
-                      __FUNCTION__, irq);
+               printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
+                      __func__, irq);
                return;
        }
                
index 906019cfa6815ff8d6c00d6a299313090398b7bc..99a7f19da13aae935ae8b25ca9a590e8d5e02272 100644 (file)
@@ -454,7 +454,7 @@ sable_lynx_enable_irq(unsigned int irq)
        spin_unlock(&sable_lynx_irq_lock);
 #if 0
        printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
-              __FUNCTION__, mask, bit, irq);
+              __func__, mask, bit, irq);
 #endif
 }
 
@@ -470,7 +470,7 @@ sable_lynx_disable_irq(unsigned int irq)
        spin_unlock(&sable_lynx_irq_lock);
 #if 0
        printk("%s: mask 0x%lx bit 0x%x irq 0x%x\n",
-              __FUNCTION__, mask, bit, irq);
+              __func__, mask, bit, irq);
 #endif
 }
 
@@ -524,7 +524,7 @@ sable_lynx_srm_device_interrupt(unsigned long vector)
        irq = sable_lynx_irq_swizzle->mask_to_irq[bit];
 #if 0
        printk("%s: vector 0x%lx bit 0x%x irq 0x%x\n",
-              __FUNCTION__, vector, bit, irq);
+              __func__, vector, bit, irq);
 #endif
        handle_irq(irq);
 }
index ee7b9009ebb4f45c19d50fbf93d0a762769e0154..d4327e461c22156543e044eff59abc1ba628e0ad 100644 (file)
@@ -89,7 +89,7 @@ sio_pci_route(void)
        /* First, ALWAYS read and print the original setting. */
        pci_bus_read_config_dword(pci_isa_hose->bus, PCI_DEVFN(7, 0), 0x60,
                                  &orig_route_tab);
-       printk("%s: PIRQ original 0x%x new 0x%x\n", __FUNCTION__,
+       printk("%s: PIRQ original 0x%x new 0x%x\n", __func__,
               orig_route_tab, alpha_mv.sys.sio.route_tab);
 
 #if defined(ALPHA_RESTORE_SRM_SETUP)
index 2dc7f9fed213704749e0e2b7d58e161b909fad7e..dc57790250d2a26484bd25e2be0dca496e3217e8 100644 (file)
@@ -8,6 +8,7 @@
  * This file initializes the trap entry points
  */
 
+#include <linux/jiffies.h>
 #include <linux/mm.h>
 #include <linux/sched.h>
 #include <linux/tty.h>
@@ -770,7 +771,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
              unsigned long reg, struct pt_regs *regs)
 {
        static int cnt = 0;
-       static long last_time = 0;
+       static unsigned long last_time;
 
        unsigned long tmp1, tmp2, tmp3, tmp4;
        unsigned long fake_reg, *reg_addr = &fake_reg;
@@ -781,7 +782,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
           with the unaliged access.  */
 
        if (!test_thread_flag (TIF_UAC_NOPRINT)) {
-               if (cnt >= 5 && jiffies - last_time > 5*HZ) {
+               if (cnt >= 5 && time_after(jiffies, last_time + 5 * HZ)) {
                        cnt = 0;
                }
                if (++cnt < 5) {
index d8d253285a94a33c8cc80607b8e811403563e78b..b786e68914d4a20b12b743ee886b9acdbed623d7 100644 (file)
@@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration"
 config ARM
        bool
        default y
+       select HAVE_IDE
        select RTC_LIB
        select SYS_SUPPORTS_APM_EMULATION
        select HAVE_OPROFILE
@@ -223,7 +224,6 @@ config ARCH_CLPS7500
        select TIMER_ACORN
        select ISA
        select NO_IOPORT
-       select HAVE_IDE
        help
          Support for the Cirrus Logic PS7500FE system-on-a-chip.
 
@@ -236,7 +236,6 @@ config ARCH_CO285
        bool "Co-EBSA285"
        select FOOTBRIDGE
        select FOOTBRIDGE_ADDIN
-       select HAVE_IDE
        help
          Support for Intel's EBSA285 companion chip.
 
@@ -262,7 +261,6 @@ config ARCH_EP93XX
 config ARCH_FOOTBRIDGE
        bool "FootBridge"
        select FOOTBRIDGE
-       select HAVE_IDE
        help
          Support for systems based on the DC21285 companion chip
          ("FootBridge"), such as the Simtec CATS and the Rebel NetWinder.
@@ -301,7 +299,6 @@ config ARCH_IOP32X
        depends on MMU
        select PLAT_IOP
        select PCI
-       select HAVE_IDE
        help
          Support for Intel's 80219 and IOP32X (XScale) family of
          processors.
@@ -311,14 +308,12 @@ config ARCH_IOP33X
        depends on MMU
        select PLAT_IOP
        select PCI
-       select HAVE_IDE
        help
          Support for Intel's IOP33X (XScale) family of processors.
 
 config ARCH_IXP23XX
        bool "IXP23XX-based"
        depends on MMU
-       select HAVE_IDE
        select PCI
        help
          Support for Intel's IXP23xx (XScale) family of processors.
@@ -336,14 +331,12 @@ config ARCH_IXP4XX
        select GENERIC_GPIO
        select GENERIC_TIME
        select GENERIC_CLOCKEVENTS
-       select HAVE_IDE
        help
          Support for Intel's IXP4XX (XScale) family of processors.
 
 config ARCH_L7200
        bool "LinkUp-L7200"
        select FIQ
-       select HAVE_IDE
        help
          Say Y here if you intend to run this kernel on a LinkUp Systems
          L7200 Software Development Board which uses an ARM720T processor.
@@ -400,7 +393,6 @@ config ARCH_PXA
        depends on MMU
        select ARCH_MTD_XIP
        select GENERIC_GPIO
-       select HAVE_IDE
        select HAVE_GPIO_LIB
        select GENERIC_TIME
        select GENERIC_CLOCKEVENTS
@@ -416,7 +408,6 @@ config ARCH_RPC
        select ARCH_MAY_HAVE_PC_FDC
        select ISA_DMA_API
        select NO_IOPORT
-       select HAVE_IDE
        help
          On the Acorn Risc-PC, Linux can support the internal IDE disk and
          CD-ROM interface, serial and parallel port, and the floppy drive.
@@ -432,7 +423,6 @@ config ARCH_SA1100
        select GENERIC_TIME
        select GENERIC_CLOCKEVENTS
        select TICK_ONESHOT
-       select HAVE_IDE
        select HAVE_GPIO_LIB
        help
          Support for StrongARM 11x0 based boards.
@@ -440,7 +430,6 @@ config ARCH_SA1100
 config ARCH_S3C2410
        bool "Samsung S3C2410, S3C2412, S3C2413, S3C2440, S3C2442, S3C2443"
        select GENERIC_GPIO
-       select HAVE_IDE
        help
          Samsung S3C2410X CPU based systems, such as the Simtec Electronics
          BAST (<http://www.simtec.co.uk/products/EB110ITX/>), the IPAQ 1940 or
@@ -448,7 +437,6 @@ config ARCH_S3C2410
 
 config ARCH_SHARK
        bool "Shark"
-       select HAVE_IDE
        select ISA
        select ISA_DMA
        select PCI
@@ -458,7 +446,6 @@ config ARCH_SHARK
 
 config ARCH_LH7A40X
        bool "Sharp LH7A40X"
-       select HAVE_IDE
        help
          Say Y here for systems based on one of the Sharp LH7A40X
          System on a Chip processors.  These CPUs include an ARM922T
index dc030cfe5009640c1a7891f2ce632fea9cf3f816..5e68420f468046c306386e1f05275d0902ae3aea 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25-rc3
-# Sun Mar  9 06:33:33 2008
+# Linux kernel version: 2.6.25
+# Sun Apr 20 00:29:49 2008
 #
 CONFIG_ARM=y
 CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@@ -51,7 +51,8 @@ CONFIG_FAIR_GROUP_SCHED=y
 # CONFIG_RT_GROUP_SCHED is not set
 CONFIG_USER_SCHED=y
 # CONFIG_CGROUP_SCHED is not set
-# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
 # CONFIG_RELAY is not set
 # CONFIG_NAMESPACES is not set
 # CONFIG_BLK_DEV_INITRD is not set
@@ -85,6 +86,7 @@ CONFIG_SLAB=y
 CONFIG_HAVE_OPROFILE=y
 # CONFIG_KPROBES is not set
 CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
 CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -115,7 +117,6 @@ CONFIG_IOSCHED_NOOP=y
 CONFIG_DEFAULT_NOOP=y
 CONFIG_DEFAULT_IOSCHED="noop"
 CONFIG_CLASSIC_RCU=y
-# CONFIG_PREEMPT_RCU is not set
 
 #
 # System Type
@@ -320,8 +321,6 @@ CONFIG_TCP_CONG_CUBIC=y
 CONFIG_DEFAULT_TCP_CONG="cubic"
 # CONFIG_TCP_MD5SIG is not set
 # CONFIG_IPV6 is not set
-# CONFIG_INET6_XFRM_TUNNEL is not set
-# CONFIG_INET6_TUNNEL is not set
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
 # CONFIG_IP_DCCP is not set
@@ -383,7 +382,6 @@ CONFIG_IEEE80211=m
 CONFIG_IEEE80211_CRYPT_WEP=m
 # CONFIG_IEEE80211_CRYPT_CCMP is not set
 # CONFIG_IEEE80211_CRYPT_TKIP is not set
-# CONFIG_IEEE80211_SOFTMAC is not set
 # CONFIG_RFKILL is not set
 # CONFIG_NET_9P is not set
 
@@ -503,7 +501,7 @@ CONFIG_IDE_MAX_HWIFS=2
 CONFIG_BLK_DEV_IDE=m
 
 #
-# Please see Documentation/ide.txt for help/info on IDE drives
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
 #
 # CONFIG_BLK_DEV_IDE_SATA is not set
 CONFIG_BLK_DEV_IDEDISK=m
@@ -518,10 +516,9 @@ CONFIG_IDE_PROC_FS=y
 #
 # IDE chipset support/bugfixes
 #
-CONFIG_IDE_GENERIC=m
 # CONFIG_BLK_DEV_PLATFORM is not set
 # CONFIG_BLK_DEV_IDEDMA is not set
-CONFIG_IDE_ARCH_OBSOLETE_INIT=y
+# CONFIG_BLK_DEV_HD_ONLY is not set
 # CONFIG_BLK_DEV_HD is not set
 
 #
@@ -562,6 +559,7 @@ CONFIG_NETDEV_10000=y
 #
 # CONFIG_WLAN_PRE80211 is not set
 # CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI_LEDS is not set
 # CONFIG_NET_PCMCIA is not set
 # CONFIG_WAN is not set
 # CONFIG_PPP is not set
@@ -707,6 +705,8 @@ CONFIG_SSB_POSSIBLE=y
 #
 # CONFIG_MFD_SM501 is not set
 # CONFIG_MFD_ASIC3 is not set
+# CONFIG_HTC_EGPIO is not set
+# CONFIG_HTC_PASIC3 is not set
 
 #
 # Multimedia devices
@@ -745,6 +745,7 @@ CONFIG_FB_TILEBLITTING=y
 CONFIG_FB_PXA=y
 CONFIG_FB_PXA_PARAMETERS=y
 CONFIG_FB_MBX=m
+# CONFIG_FB_METRONOME is not set
 CONFIG_FB_VIRTUAL=m
 # CONFIG_BACKLIGHT_LCD_SUPPORT is not set
 
@@ -891,7 +892,6 @@ CONFIG_RTC_LIB=y
 # CONFIG_JFS_FS is not set
 # CONFIG_FS_POSIX_ACL is not set
 # CONFIG_XFS_FS is not set
-# CONFIG_GFS2_FS is not set
 # CONFIG_OCFS2_FS is not set
 # CONFIG_DNOTIFY is not set
 CONFIG_INOTIFY=y
index 0a0d2479274b6c749c905aa5efcf4c2beb01b407..4a881258bb171f7c14828d12fdc4ef9bf91b0186 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/procinfo.h>
+#include <linux/kbuild.h>
 
 /*
  * Make sure that the compiler and target are compatible.
 #error    Known good compilers: 3.3
 #endif
 
-/* Use marker if you need to separate the values later */
-
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
   DEFINE(TSK_ACTIVE_MM,                offsetof(struct task_struct, active_mm));
index e2e934c38080240ef5843b30273f114f2a213347..64c420805e6f4e2b7945cef4bcff38e3c4e2f576 100644 (file)
@@ -35,7 +35,7 @@ create_proc_entries(void)
 {
        struct proc_dir_entry* tags_entry;
 
-       tags_entry = create_proc_read_entry("atags", 0400, &proc_root, read_buffer, &tags_buffer);
+       tags_entry = create_proc_read_entry("atags", 0400, NULL, read_buffer, &tags_buffer);
        if (!tags_entry)
                return -ENOMEM;
 
index f56d48c451ea8dc4438a92e0539de21a7b3fc100..a53c0aba5c142b53295ca2563785bde904dc72b1 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/mutex.h>
@@ -723,17 +724,14 @@ unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed)
        return address;
 }
 
-static int ecard_prints(char *buffer, ecard_t *ec)
+static int ecard_prints(struct seq_file *m, ecard_t *ec)
 {
-       char *start = buffer;
-
-       buffer += sprintf(buffer, "  %d: %s ", ec->slot_no,
-                         ec->easi ? "EASI" : "    ");
+       seq_printf(m, "  %d: %s ", ec->slot_no, ec->easi ? "EASI" : "    ");
 
        if (ec->cid.id == 0) {
                struct in_chunk_dir incd;
 
-               buffer += sprintf(buffer, "[%04X:%04X] ",
+               seq_printf(m, "[%04X:%04X] ",
                        ec->cid.manufacturer, ec->cid.product);
 
                if (!ec->card_desc && ec->cid.cd &&
@@ -744,43 +742,43 @@ static int ecard_prints(char *buffer, ecard_t *ec)
                                strcpy((char *)ec->card_desc, incd.d.string);
                }
 
-               buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
+               seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*");
        } else
-               buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id);
+               seq_printf(m, "Simple card %d\n", ec->cid.id);
 
-       return buffer - start;
+       return 0;
 }
 
-static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count)
+static int ecard_devices_proc_show(struct seq_file *m, void *v)
 {
        ecard_t *ec = cards;
-       off_t at = 0;
-       int len, cnt;
-
-       cnt = 0;
-       while (ec && count > cnt) {
-               len = ecard_prints(buf, ec);
-               at += len;
-               if (at >= pos) {
-                       if (!*start) {
-                               *start = buf + (pos - (at - len));
-                               cnt = at - pos;
-                       } else
-                               cnt += len;
-                       buf += len;
-               }
+
+       while (ec) {
+               ecard_prints(m, ec);
                ec = ec->next;
        }
-       return (count > cnt) ? cnt : count;
+       return 0;
 }
 
+static int ecard_devices_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, ecard_devices_proc_show, NULL);
+}
+
+static const struct file_operations bus_ecard_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = ecard_devices_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static struct proc_dir_entry *proc_bus_ecard_dir = NULL;
 
 static void ecard_proc_init(void)
 {
-       proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus);
-       create_proc_info_entry("devices", 0, proc_bus_ecard_dir,
-               get_ecard_dev_info);
+       proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL);
+       proc_create("devices", 0, proc_bus_ecard_dir, &bus_ecard_proc_fops);
 }
 
 #define ec_set_resource(ec,nr,st,sz)                           \
index d51bc8b60557243cda3bef93c1e5d29a2c198eca..b4565bb133c1ebeadd6c3a8d3b3a1afdd9015a7e 100644 (file)
@@ -1176,7 +1176,7 @@ space_cccc_001x(kprobe_opcode_t insn, struct arch_specific_insn *asi)
         * *S (bit 20) updates condition codes
         * ADC/SBC/RSC reads the C flag
         */
-       insn &= 0xfff00ff0;     /* Rn = r0, Rd = r0 */
+       insn &= 0xfff00fff;     /* Rn = r0, Rd = r0 */
        asi->insn[0] = insn;
        asi->insn_handler = (insn & (1 << 20)) ?  /* S-bit */
                        emulate_alu_imm_rwflags : emulate_alu_imm_rflags;
index 13e371aad8792686df6fee77d0642fa3030f8a69..5593dd207216bd0d2d1d8319eb8ab06e2bc13714 100644 (file)
@@ -66,7 +66,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
                        return -ENOMEM;
                for (is = 0; is < MAX_INSN_SIZE; ++is)
                        p->ainsn.insn[is] = tmp_insn[is];
-               flush_insns(&p->ainsn.insn, MAX_INSN_SIZE);
+               flush_insns(p->ainsn.insn, MAX_INSN_SIZE);
                break;
 
        case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
index f1a80d74a4b659bde31a4402453f0bfa0232c19b..be526746e01e496d46a3e49db1b06494c7f9b1bf 100644 (file)
@@ -246,7 +246,7 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
                }
 
                mmc0_data = *data;
-               at91_clock_associate("mci0_clk", &at91cap9_mmc1_device.dev, "mci_clk");
+               at91_clock_associate("mci0_clk", &at91cap9_mmc0_device.dev, "mci_clk");
                platform_device_register(&at91cap9_mmc0_device);
        } else {                        /* MCI1 */
                /* CLK */
index 37cd547855b1baa5dbd894b3942c35d5cebbd5cd..728bb8f394415fc683d3e8b4620cdaec984992a8 100644 (file)
@@ -539,6 +539,17 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
        at91_set_B_periph(AT91_PIN_PB28, 0);    /* LCDD23 */
 #endif
 
+       if (ARRAY_SIZE(lcdc_resources) > 2) {
+               void __iomem *fb;
+               struct resource *fb_res = &lcdc_resources[2];
+               size_t fb_len = fb_res->end - fb_res->start + 1;
+
+               fb = ioremap_writecombine(fb_res->start, fb_len);
+               if (fb) {
+                       memset(fb, 0, fb_len);
+                       iounmap(fb, fb_len);
+               }
+       }
        lcdc_data = *data;
        platform_device_register(&at91_lcdc_device);
 }
index b6454c52596227a640c77b15ec3699d05fcd1b3e..719667e25c981f01f8261e37746d3f5cdbb22d65 100644 (file)
@@ -308,7 +308,7 @@ void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
                }
 
                mmc0_data = *data;
-               at91_clock_associate("mci0_clk", &at91sam9263_mmc1_device.dev, "mci_clk");
+               at91_clock_associate("mci0_clk", &at91sam9263_mmc0_device.dev, "mci_clk");
                platform_device_register(&at91sam9263_mmc0_device);
        } else {                        /* MCI1 */
                /* CLK */
index dbb9a5fc2090250855daa02b7d6f65a9dfce7fe3..054689804e77571213012852f4dc771f5f70501e 100644 (file)
@@ -381,6 +381,20 @@ void __init at91_add_device_lcdc(struct atmel_lcdfb_info *data)
        at91_set_B_periph(AT91_PIN_PC24, 0);    /* LCDD22 */
        at91_set_B_periph(AT91_PIN_PC25, 0);    /* LCDD23 */
 
+#ifdef CONFIG_FB_INTSRAM
+       {
+               void __iomem *fb;
+               struct resource *fb_res = &lcdc_resources[2];
+               size_t fb_len = fb_res->end - fb_res->start + 1;
+
+               fb = ioremap_writecombine(fb_res->start, fb_len);
+               if (fb) {
+                       memset(fb, 0, fb_len);
+                       iounmap(fb, fb_len);
+               }
+       }
+#endif
+
        lcdc_data = *data;
        platform_device_register(&at91_lcdc_device);
 }
index 26fea4dcc3a013edbd14f41c58c88b3b8b098e2d..81f1ebb4e964ed3c3228bb4c1b2a370b982ca39b 100644 (file)
@@ -79,8 +79,7 @@ static struct at91_udc_data __initdata csb337_udc_data = {
 
 static struct i2c_board_info __initdata csb337_i2c_devices[] = {
        {
-               I2C_BOARD_INFO("rtc-ds1307", 0x68),
-               .type   = "ds1307",
+               I2C_BOARD_INFO("ds1307", 0x68),
        },
 };
 
index 0a897efeba8e5321dc8fdfec5da831aa7a554f73..c1a813c7169b63f86f932b53bd3945efc1535204 100644 (file)
@@ -132,8 +132,7 @@ static struct i2c_board_info __initdata dk_i2c_devices[] = {
                I2C_BOARD_INFO("x9429", 0x28),
        },
        {
-               I2C_BOARD_INFO("at24c", 0x50),
-               .type   = "24c1024",
+               I2C_BOARD_INFO("24c1024", 0x50),
        }
 };
 
index b7b79bb9d6c4b09bd464225adb1e15a415ff16d8..af1a1d8ecc303a31d59841536f9c896acdd82b34 100644 (file)
@@ -93,8 +93,7 @@ static struct at91_mmc_data __initdata eb9200_mmc_data = {
 
 static struct i2c_board_info __initdata eb9200_i2c_devices[] = {
        {
-               I2C_BOARD_INFO("at24c", 0x50),
-               .type   = "24c512",
+               I2C_BOARD_INFO("24c512", 0x50),
        },
 };
 
index 39733b6992aa3126ee5e715dbaa0ef97faceefd3..aa863c1577087fa1bb0ba8ea5d26d8b37572c96b 100644 (file)
@@ -61,6 +61,15 @@ static inline void sdram_selfrefresh_enable(void)
 #else
 #include <asm/arch/at91sam9_sdramc.h>
 
+#ifdef CONFIG_ARCH_AT91SAM9263
+/*
+ * FIXME either or both the SDRAM controllers (EB0, EB1) might be in use;
+ * handle those cases both here and in the Suspend-To-RAM support.
+ */
+#define        AT91_SDRAMC     AT91_SDRAMC0
+#warning Assuming EB1 SDRAM controller is *NOT* used
+#endif
+
 static u32 saved_lpr;
 
 static inline void sdram_selfrefresh_enable(void)
@@ -75,11 +84,6 @@ static inline void sdram_selfrefresh_enable(void)
 
 #define sdram_selfrefresh_disable()    at91_sys_write(AT91_SDRAMC_LPR, saved_lpr)
 
-/*
- * FIXME: The AT91SAM9263 has a second EBI controller which may have
- *        additional SDRAM.  pm_slowclock.S will require a similar fix.
- */
-
 #endif
 
 
index 4143828a9684615226e33de4fae0bbad831694fd..c6b94f60e0b2d242f3d5021bd6ed81a71c18bc7b 100644 (file)
@@ -311,11 +311,7 @@ static const struct file_operations proc_davinci_ck_operations = {
 
 static int __init davinci_ck_proc_init(void)
 {
-       struct proc_dir_entry *entry;
-
-       entry = create_proc_entry("davinci_clocks", 0, NULL);
-       if (entry)
-               entry->proc_fops = &proc_davinci_ck_operations;
+       proc_create("davinci_clocks", 0, NULL, &proc_davinci_ck_operations);
        return 0;
 
 }
index c947152f9a3c7baaf0ff5f46cd64d324eaa7298c..4877597c8758df1b1394571d7ec6a9b2b95d00e8 100644 (file)
@@ -50,8 +50,7 @@ static struct sys_timer em7210_timer = {
  */
 static struct i2c_board_info __initdata em7210_i2c_devices[] = {
        {
-               I2C_BOARD_INFO("rtc-rs5c372", 0x32),
-               .type = "rs5c372a",
+               I2C_BOARD_INFO("rs5c372a", 0x32),
        },
 };
 
index d2a7b04f1cb0764c51f7238d19385615b992bc29..d4fca75ce5426e88e23c88d5b0836fe817601c0e 100644 (file)
@@ -176,12 +176,10 @@ static struct f75375s_platform_data glantank_f75375s = {
 
 static struct i2c_board_info __initdata glantank_i2c_devices[] = {
        {
-               I2C_BOARD_INFO("rtc-rs5c372", 0x32),
-               .type = "rs5c372a",
+               I2C_BOARD_INFO("rs5c372a", 0x32),
        },
        {
                I2C_BOARD_INFO("f75375", 0x2e),
-               .type = "f75375",
                .platform_data = &glantank_f75375s,
        },
 };
index bc91d6e66bc4ea05e067d3b322b3ed8f1f64ac07..2741063bf361e34abca66f96f648c1f6a7843ea8 100644 (file)
@@ -208,12 +208,10 @@ static struct f75375s_platform_data n2100_f75375s = {
 
 static struct i2c_board_info __initdata n2100_i2c_devices[] = {
        {
-               I2C_BOARD_INFO("rtc-rs5c372", 0x32),
-               .type = "rs5c372b",
+               I2C_BOARD_INFO("rs5c372b", 0x32),
        },
        {
                I2C_BOARD_INFO("f75375", 0x2e),
-               .type = "f75375",
                .platform_data = &n2100_f75375s,
        },
 };
index 8cb07437a807f81d92eed0bbd1a7d82b5118ab5d..a51bfa6978b6a619190987d5ffb3e45a74fc5001 100644 (file)
@@ -65,7 +65,7 @@ static struct platform_device dsmg600_i2c_gpio = {
 
 static struct i2c_board_info __initdata dsmg600_i2c_board_info [] = {
        {
-               I2C_BOARD_INFO("rtc-pcf8563", 0x51),
+               I2C_BOARD_INFO("pcf8563", 0x51),
        },
 };
 
index 83c137ec582ce63441556ed4f2e78c1451fa600d..63a23fa4aab4c21b9d03d44ce08fa5023621badf 100644 (file)
@@ -448,7 +448,9 @@ int npe_send_message(struct npe *npe, const void *msg, const char *what)
                return -ETIMEDOUT;
        }
 
+#if DEBUG_MSG > 1
        debug_msg(npe, "Sending a message took %i cycles\n", cycles);
+#endif
        return 0;
 }
 
@@ -484,7 +486,9 @@ int npe_recv_message(struct npe *npe, void *msg, const char *what)
                return -ETIMEDOUT;
        }
 
+#if DEBUG_MSG > 1
        debug_msg(npe, "Receiving a message took %i cycles\n", cycles);
+#endif
        return 0;
 }
 
index e8330132530175fe43363983eecee4d90ede5dc9..fab94eaecee7abb797b1645a121229f41d7eaf91 100644 (file)
@@ -184,6 +184,8 @@ void qmgr_release_queue(unsigned int queue)
        case 3: mask[0] = 0xFF; break;
        }
 
+       mask[1] = mask[2] = mask[3] = 0;
+
        while (addr--)
                shift_mask(mask);
 
index 159e1c4f1edaa789deb76336aa1616fd38c02c1c..84b5e62a9c0ab8a6f9defe0d9c59f0c9e8f4bc6b 100644 (file)
@@ -54,7 +54,7 @@ static struct platform_device nas100d_flash = {
 
 static struct i2c_board_info __initdata nas100d_i2c_board_info [] = {
        {
-               I2C_BOARD_INFO("rtc-pcf8563", 0x51),
+               I2C_BOARD_INFO("pcf8563", 0x51),
        },
 };
 
index d9a182895a0f998bee850213b0c607dbc13eceb4..a48a6655b887ef7fe535db996f08617a8b27e9af 100644 (file)
@@ -57,7 +57,7 @@ static struct i2c_gpio_platform_data nslu2_i2c_gpio_data = {
 
 static struct i2c_board_info __initdata nslu2_i2c_board_info [] = {
        {
-               I2C_BOARD_INFO("rtc-x1205", 0x6f),
+               I2C_BOARD_INFO("x1205", 0x6f),
        },
 };
 
index 5079877200154364cf5d5d5921898909f53b2c2a..4b444fdaafea04b430234f61574471168931155f 100644 (file)
@@ -351,11 +351,9 @@ static void __init h2_init_smc91x(void)
 static struct i2c_board_info __initdata h2_i2c_board_info[] = {
        {
                I2C_BOARD_INFO("tps65010", 0x48),
-               .type           = "tps65010",
                .irq            = OMAP_GPIO_IRQ(58),
        }, {
                I2C_BOARD_INFO("isp1301_omap", 0x2d),
-               .type           = "isp1301_omap",
                .irq            = OMAP_GPIO_IRQ(2),
        },
 };
index c3ef1ee5f77bcd4a01fb9a5ecac297493c850651..7fbaa8d648cd8b8eea971c2b3168a9b1bdd204a4 100644 (file)
@@ -473,8 +473,7 @@ static struct omap_board_config_kernel h3_config[] __initdata = {
 
 static struct i2c_board_info __initdata h3_i2c_board_info[] = {
        {
-               I2C_BOARD_INFO("tps65010", 0x48),
-               .type           = "tps65013",
+               I2C_BOARD_INFO("tps65013", 0x48),
                /* .irq         = OMAP_GPIO_IRQ(??), */
        },
 };
index 4f9baba7d893e2c1b263019b868768876c78ec52..a66505f58b1591f21e25634ac16ee26bf27e043d 100644 (file)
@@ -254,7 +254,6 @@ static struct tps65010_board tps_board = {
 static struct i2c_board_info __initdata osk_i2c_board_info[] = {
        {
                I2C_BOARD_INFO("tps65010", 0x48),
-               .type           = "tps65010",
                .irq            = OMAP_GPIO_IRQ(OMAP_MPUIO(1)),
                .platform_data  = &tps_board,
 
index 6b179371e0a238b119c5f25931536549877264be..9608503d67f5475eda40eaea8efc252f5c123a9d 100644 (file)
 
 /*
  * The Orion has fully programable address map. There's a separate address
- * map for each of the device _master_ interfaces, e.g. CPU, PCI, PCIE, USB,
+ * map for each of the device _master_ interfaces, e.g. CPU, PCI, PCIe, USB,
  * Gigabit Ethernet, DMA/XOR engines, etc. Each interface has its own
  * address decode windows that allow it to access any of the Orion resources.
  *
  * CPU address decoding --
  * Linux assumes that it is the boot loader that already setup the access to
  * DDR and internal registers.
- * Setup access to PCI and PCI-E IO/MEM space is issued by this file.
+ * Setup access to PCI and PCIe IO/MEM space is issued by this file.
  * Setup access to various devices located on the device bus interface (e.g.
  * flashes, RTC, etc) should be issued by machine-setup.c according to
  * specific board population (by using orion5x_setup_*_win()).
index 439c7784af02ddfbb1c7afc10755c18f8a81e0eb..968deb58be01f7c9dd42905c486897feb5634928 100644 (file)
@@ -132,7 +132,7 @@ static struct platform_device orion5x_uart = {
 static struct resource orion5x_ehci0_resources[] = {
        {
                .start  = ORION5X_USB0_PHYS_BASE,
-               .end    = ORION5X_USB0_PHYS_BASE + SZ_4K,
+               .end    = ORION5X_USB0_PHYS_BASE + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        {
@@ -145,7 +145,7 @@ static struct resource orion5x_ehci0_resources[] = {
 static struct resource orion5x_ehci1_resources[] = {
        {
                .start  = ORION5X_USB1_PHYS_BASE,
-               .end    = ORION5X_USB1_PHYS_BASE + SZ_4K,
+               .end    = ORION5X_USB1_PHYS_BASE + SZ_4K - 1,
                .flags  = IORESOURCE_MEM,
        },
        {
@@ -317,7 +317,7 @@ struct sys_timer orion5x_timer = {
  ****************************************************************************/
 
 /*
- * Identify device ID and rev from PCIE configuration header space '0'.
+ * Identify device ID and rev from PCIe configuration header space '0'.
  */
 static void __init orion5x_id(u32 *dev, u32 *rev, char **dev_name)
 {
index f4c4c9a72a7c772ba26eb6091b553fff548d661e..14adf8d1a54ac8d96f33e23a8a92b98369faab72 100644 (file)
@@ -33,10 +33,9 @@ struct pci_sys_data;
 struct pci_bus;
 
 void orion5x_pcie_id(u32 *dev, u32 *rev);
-int orion5x_pcie_local_bus_nr(void);
-int orion5x_pci_local_bus_nr(void);
 int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys);
 struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys);
+int orion5x_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin);
 
 /*
  * Valid GPIO pins according to MPP setup, used by machine-setup.
index 872aed372327de33c6aa481bf74f753b20328ba1..44c64342dacb9cc92bdec165c930307122fc6d83 100644 (file)
@@ -241,14 +241,17 @@ void __init db88f5281_pci_preinit(void)
 
 static int __init db88f5281_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
+       int irq;
+
        /*
-        * PCIE IRQ is connected internally (not GPIO)
+        * Check for devices with hard-wired IRQs.
         */
-       if (dev->bus->number == orion5x_pcie_local_bus_nr())
-               return IRQ_ORION5X_PCIE0_INT;
+       irq = orion5x_pci_map_irq(dev, slot, pin);
+       if (irq != -1)
+               return irq;
 
        /*
-        * PCI IRQs are connected via GPIOs
+        * PCI IRQs are connected via GPIOs.
         */
        switch (slot - DB88F5281_PCI_SLOT0_OFFS) {
        case 0:
@@ -292,9 +295,7 @@ static struct mv643xx_eth_platform_data db88f5281_eth_data = {
  * RTC DS1339 on I2C bus
  ****************************************************************************/
 static struct i2c_board_info __initdata db88f5281_i2c_rtc = {
-       .driver_name    = "rtc-ds1307",
-       .type           = "ds1339",
-       .addr           = 0x68,
+       I2C_BOARD_INFO("ds1339", 0x68),
 };
 
 /*****************************************************************************
index d67790ef236e13863c20c53b8b24412a0555c2dd..f9430f5ca9a8891d01ea4a5f2ea57894af0e923e 100644 (file)
 
 static int __init dns323_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
-       /* PCI-E */
-       if (dev->bus->number == orion5x_pcie_local_bus_nr())
-               return IRQ_ORION5X_PCIE0_INT;
+       int irq;
 
-       pr_err("%s: requested mapping for unknown bus\n", __func__);
+       /*
+        * Check for devices with hard-wired IRQs.
+        */
+       irq = orion5x_pci_map_irq(dev, slot, pin);
+       if (irq != -1)
+               return irq;
+
+       pr_err("%s: requested mapping for unknown device\n", __func__);
 
        return -1;
 }
@@ -220,19 +225,16 @@ static struct platform_device *dns323_plat_devices[] __initdata = {
 static struct i2c_board_info __initdata dns323_i2c_devices[] = {
        {
                I2C_BOARD_INFO("g760a", 0x3e),
-               .type = "g760a",
        },
 #if 0
        /* this entry requires the new-style driver model lm75 driver,
         * for the meantime "insmod lm75.ko force_lm75=0,0x48" is needed */
        {
-               I2C_BOARD_INFO("lm75", 0x48),
-               .type = "g751",
+               I2C_BOARD_INFO("g751", 0x48),
        },
 #endif
        {
-               I2C_BOARD_INFO("rtc-m41t80", 0x68),
-               .type = "m41t80",
+               I2C_BOARD_INFO("m41t80", 0x68),
        }
 };
 
@@ -253,9 +255,9 @@ static void __init dns323_init(void)
         */
        orion5x_setup_dev_boot_win(DNS323_NOR_BOOT_BASE, DNS323_NOR_BOOT_SIZE);
 
-       /* DNS-323 has a Marvell 88X7042 SATA controller attached via PCIE
+       /* DNS-323 has a Marvell 88X7042 SATA controller attached via PCIe
         *
-        * Open a special address decode windows for the PCIE WA.
+        * Open a special address decode windows for the PCIe WA.
         */
        orion5x_setup_pcie_wa_win(ORION5X_PCIE_WA_PHYS_BASE,
                                ORION5X_PCIE_WA_SIZE);
index 91413455beba0ca35f3f22d3ddad4b5d3b6f31ac..88410862feefa6d7b3c2d75b546ad80de62c485e 100644 (file)
@@ -120,13 +120,19 @@ static struct platform_device kurobox_pro_nor_flash = {
 
 static int __init kurobox_pro_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
+       int irq;
+
+       /*
+        * Check for devices with hard-wired IRQs.
+        */
+       irq = orion5x_pci_map_irq(dev, slot, pin);
+       if (irq != -1)
+               return irq;
+
        /*
         * PCI isn't used on the Kuro
         */
-       if (dev->bus->number == orion5x_pcie_local_bus_nr())
-               return IRQ_ORION5X_PCIE0_INT;
-       else
-               printk(KERN_ERR "kurobox_pro_pci_map_irq failed, unknown bus\n");
+       printk(KERN_ERR "kurobox_pro_pci_map_irq failed, unknown bus\n");
 
        return -1;
 }
@@ -162,9 +168,7 @@ static struct mv643xx_eth_platform_data kurobox_pro_eth_data = {
  * RTC 5C372a on I2C bus
  ****************************************************************************/
 static struct i2c_board_info __initdata kurobox_pro_i2c_rtc = {
-       .driver_name    = "rtc-rs5c372",
-       .type           = "rs5c372a",
-       .addr           = 0x32,
+       I2C_BOARD_INFO("rs5c372a", 0x32),
 };
 
 /*****************************************************************************
@@ -193,7 +197,7 @@ static void __init kurobox_pro_init(void)
        orion5x_setup_dev0_win(KUROBOX_PRO_NAND_BASE, KUROBOX_PRO_NAND_SIZE);
 
        /*
-        * Open a special address decode windows for the PCIE WA.
+        * Open a special address decode windows for the PCIe WA.
         */
        orion5x_setup_pcie_wa_win(ORION5X_PCIE_WA_PHYS_BASE,
                                ORION5X_PCIE_WA_SIZE);
index fdf99fca85b30081e1d9b5bede09ac0bd5d32d71..9d5d39fa19c36f7490ef3ad0700f2a133b4b8ccb 100644 (file)
@@ -41,11 +41,6 @@ void __init orion5x_pcie_id(u32 *dev, u32 *rev)
        *rev = orion_pcie_rev(PCIE_BASE);
 }
 
-int __init orion5x_pcie_local_bus_nr(void)
-{
-       return orion_pcie_get_local_bus_nr(PCIE_BASE);
-}
-
 static int pcie_valid_config(int bus, int dev)
 {
        /*
@@ -269,7 +264,7 @@ static int __init pcie_setup(struct pci_sys_data *sys)
  */
 static DEFINE_SPINLOCK(orion5x_pci_lock);
 
-int orion5x_pci_local_bus_nr(void)
+static int orion5x_pci_local_bus_nr(void)
 {
        u32 conf = orion5x_read(PCI_P2P_CONF);
        return((conf & PCI_P2P_BUS_MASK) >> PCI_P2P_BUS_OFFS);
@@ -557,3 +552,16 @@ struct pci_bus __init *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys
 
        return bus;
 }
+
+int __init orion5x_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+       int bus = dev->bus->number;
+
+       /*
+        * PCIe endpoint?
+        */
+       if (bus < orion5x_pci_local_bus_nr())
+               return IRQ_ORION5X_PCIE0_INT;
+
+       return -1;
+}
index 37e8b2dc3ed57eaef7e601464adaad03ec6ea10d..81abc1003aaef01d01081fd7f9bc7c392bba6db5 100644 (file)
@@ -172,11 +172,14 @@ void __init rd88f5182_pci_preinit(void)
 
 static int __init rd88f5182_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
+       int irq;
+
        /*
-        * PCI-E isn't used on the RD2
+        * Check for devices with hard-wired IRQs.
         */
-       if (dev->bus->number == orion5x_pcie_local_bus_nr())
-               return IRQ_ORION5X_PCIE0_INT;
+       irq = orion5x_pci_map_irq(dev, slot, pin);
+       if (irq != -1)
+               return irq;
 
        /*
         * PCI IRQs are connected via GPIOs
@@ -224,9 +227,7 @@ static struct mv643xx_eth_platform_data rd88f5182_eth_data = {
  * RTC DS1338 on I2C bus
  ****************************************************************************/
 static struct i2c_board_info __initdata rd88f5182_i2c_rtc = {
-       .driver_name    = "rtc-ds1307",
-       .type           = "ds1338",
-       .addr           = 0x68,
+       I2C_BOARD_INFO("ds1338", 0x68),
 };
 
 /*****************************************************************************
@@ -259,7 +260,7 @@ static void __init rd88f5182_init(void)
        orion5x_setup_dev1_win(RD88F5182_NOR_BASE, RD88F5182_NOR_SIZE);
 
        /*
-        * Open a special address decode windows for the PCIE WA.
+        * Open a special address decode windows for the PCIe WA.
         */
        orion5x_setup_pcie_wa_win(ORION5X_PCIE_WA_PHYS_BASE,
                                ORION5X_PCIE_WA_SIZE);
index fd43863a86f6dbc0479bb22264db087a12c4ba42..9afb41ee6e078caf22f08913f05225505f820b4f 100644 (file)
@@ -141,14 +141,17 @@ void __init qnap_ts209_pci_preinit(void)
 
 static int __init qnap_ts209_pci_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
 {
+       int irq;
+
        /*
-        * PCIE IRQ is connected internally (not GPIO)
+        * Check for devices with hard-wired IRQs.
         */
-       if (dev->bus->number == orion5x_pcie_local_bus_nr())
-               return IRQ_ORION5X_PCIE0_INT;
+       irq = orion5x_pci_map_irq(dev, slot, pin);
+       if (irq != -1)
+               return irq;
 
        /*
-        * PCI IRQs are connected via GPIOs
+        * PCI IRQs are connected via GPIOs.
         */
        switch (slot - QNAP_TS209_PCI_SLOT0_OFFS) {
        case 0:
@@ -276,8 +279,7 @@ static void __init ts209_find_mac_addr(void)
 #define TS209_RTC_GPIO 3
 
 static struct i2c_board_info __initdata qnap_ts209_i2c_rtc = {
-       .driver_name = "rtc-s35390a",
-       .addr        = 0x30,
+       I2C_BOARD_INFO("s35390a", 0x30),
        .irq         = 0,
 };
 
@@ -373,7 +375,7 @@ static void __init qnap_ts209_init(void)
                            QNAP_TS209_NOR_BOOT_SIZE);
 
        /*
-        * Open a special address decode windows for the PCIE WA.
+        * Open a special address decode windows for the PCIe WA.
         */
        orion5x_setup_pcie_wa_win(ORION5X_PCIE_WA_PHYS_BASE,
                                ORION5X_PCIE_WA_SIZE);
index 7cdcb459ea9dd266d1bb9c076bb19127144ec276..6a830853aa6a610e532c33040f0ab96544dc8c3e 100644 (file)
@@ -5,9 +5,9 @@
 # Common support (must be linked before board specific support)
 obj-y                          += clock.o devices.o generic.o irq.o dma.o \
                                   time.o gpio.o
-obj-$(CONFIG_PXA25x)           += pxa25x.o mfp-pxa2xx.o
-obj-$(CONFIG_PXA27x)           += pxa27x.o mfp-pxa2xx.o
-obj-$(CONFIG_PXA3xx)           += pxa3xx.o mfp-pxa3xx.o smemc.o
+obj-$(CONFIG_PXA25x)           += mfp-pxa2xx.o pxa25x.o
+obj-$(CONFIG_PXA27x)           += mfp-pxa2xx.o pxa27x.o
+obj-$(CONFIG_PXA3xx)           += mfp-pxa3xx.o pxa3xx.o smemc.o
 obj-$(CONFIG_CPU_PXA300)       += pxa300.o
 obj-$(CONFIG_CPU_PXA320)       += pxa320.o
 
index 331f29b2d0cd5e6e2cefeeb30d92fbed9fe4969a..44617938f3f10ff044083b027077f54e0cadc21e 100644 (file)
@@ -90,11 +90,6 @@ static struct map_desc standard_io_desc[] __initdata = {
                .pfn            = __phys_to_pfn(0x40000000),
                .length         = 0x02000000,
                .type           = MT_DEVICE
-       }, {    /* LCD */
-               .virtual        =  0xf4000000,
-               .pfn            = __phys_to_pfn(0x44000000),
-               .length         = 0x00100000,
-               .type           = MT_DEVICE
        }, {    /* Mem Ctl */
                .virtual        =  0xf6000000,
                .pfn            = __phys_to_pfn(0x48000000),
index f01d18544133e435be54466468b3ead8eab08006..bdf2397540373e8d9f2047cc1cb45bb81c5a1cae 100644 (file)
@@ -40,6 +40,7 @@
 
 #include <asm/arch/pxa-regs.h>
 #include <asm/arch/pxa2xx-regs.h>
+#include <asm/arch/pxa2xx-gpio.h>
 
 #include "generic.h"
 
index 03396063b561b0c1a9ef71cc09f57d73c22a2e92..530654474bb23bda983e2909b6bbf802748d44d3 100644 (file)
@@ -301,8 +301,7 @@ static struct pxafb_mode_info tpo_tdo24mtea1_modes[] = {
 static struct pxafb_mach_info littleton_lcd_info = {
        .modes                  = tpo_tdo24mtea1_modes,
        .num_modes              = 2,
-       .lccr0                  = LCCR0_Act,
-       .lccr3                  = LCCR3_HSP | LCCR3_VSP,
+       .lcd_conn               = LCD_COLOR_TFT_16BPP,
        .pxafb_lcd_power        = littleton_lcd_power,
 };
 
index ca209c443f34c3c90fce9b8979ce04ac86702c4e..0993f4d1a0bc71bba362a86a2c92aba54f56ee4f 100644 (file)
@@ -395,8 +395,8 @@ static struct pxafb_mach_info sharp_lm8v31 = {
        .num_modes      = 1,
        .cmap_inverse   = 0,
        .cmap_static    = 0,
-       .lccr0          = LCCR0_SDS,
-       .lccr3          = LCCR3_PCP | LCCR3_Acb(255),
+       .lcd_conn       = LCD_COLOR_DSTN_16BPP | LCD_PCLK_EDGE_FALL |
+                         LCD_AC_BIAS_FREQ(255);
 };
 
 #define        MMC_POLL_RATE           msecs_to_jiffies(1000)
index d70be75bd199552d63cbc7d851d9fbd39f87d4c4..badba064dc0400326e74a3e874d9366aa6519c52 100644 (file)
@@ -114,6 +114,14 @@ static unsigned long magician_pin_config[] = {
        GPIO82_CIF_DD_5,
        GPIO84_CIF_FV,
        GPIO85_CIF_LV,
+
+       /* Magician specific input GPIOs */
+       GPIO9_GPIO,     /* unknown */
+       GPIO10_GPIO,    /* GSM_IRQ */
+       GPIO13_GPIO,    /* CPLD_IRQ */
+       GPIO107_GPIO,   /* DS1WM_IRQ */
+       GPIO108_GPIO,   /* GSM_READY */
+       GPIO115_GPIO,   /* nPEN_IRQ */
 };
 
 /*
@@ -438,7 +446,7 @@ static struct pasic3_led pasic3_leds[] = {
 
 static struct platform_device pasic3;
 
-static struct pasic3_leds_machinfo __devinit pasic3_leds_info = {
+static struct pasic3_leds_machinfo pasic3_leds_info = {
        .num_leds   = ARRAY_SIZE(pasic3_leds),
        .power_gpio = EGPIO_MAGICIAN_LED_POWER,
        .leds       = pasic3_leds,
@@ -543,9 +551,28 @@ static struct platform_device power_supply = {
 static int magician_mci_init(struct device *dev,
                                irq_handler_t detect_irq, void *data)
 {
-       return request_irq(IRQ_MAGICIAN_SD, detect_irq,
+       int err;
+
+       err = request_irq(IRQ_MAGICIAN_SD, detect_irq,
                                IRQF_DISABLED | IRQF_SAMPLE_RANDOM,
                                "MMC card detect", data);
+       if (err)
+               goto err_request_irq;
+       err = gpio_request(EGPIO_MAGICIAN_SD_POWER, "SD_POWER");
+       if (err)
+               goto err_request_power;
+       err = gpio_request(EGPIO_MAGICIAN_nSD_READONLY, "nSD_READONLY");
+       if (err)
+               goto err_request_readonly;
+
+       return 0;
+
+err_request_readonly:
+       gpio_free(EGPIO_MAGICIAN_SD_POWER);
+err_request_power:
+       free_irq(IRQ_MAGICIAN_SD, data);
+err_request_irq:
+       return err;
 }
 
 static void magician_mci_setpower(struct device *dev, unsigned int vdd)
@@ -562,6 +589,8 @@ static int magician_mci_get_ro(struct device *dev)
 
 static void magician_mci_exit(struct device *dev, void *data)
 {
+       gpio_free(EGPIO_MAGICIAN_nSD_READONLY);
+       gpio_free(EGPIO_MAGICIAN_SD_POWER);
        free_irq(IRQ_MAGICIAN_SD, data);
 }
 
@@ -643,28 +672,42 @@ static void __init magician_init(void)
 {
        void __iomem *cpld;
        int lcd_select;
+       int err;
+
+       gpio_request(GPIO13_MAGICIAN_CPLD_IRQ, "CPLD_IRQ");
+       gpio_request(GPIO107_MAGICIAN_DS1WM_IRQ, "DS1WM_IRQ");
 
        pxa2xx_mfp_config(ARRAY_AND_SIZE(magician_pin_config));
 
        platform_add_devices(devices, ARRAY_SIZE(devices));
+
+       err = gpio_request(GPIO83_MAGICIAN_nIR_EN, "nIR_EN");
+       if (!err) {
+               gpio_direction_output(GPIO83_MAGICIAN_nIR_EN, 1);
+               pxa_set_ficp_info(&magician_ficp_info);
+       }
        pxa_set_i2c_info(NULL);
        pxa_set_mci_info(&magician_mci_info);
        pxa_set_ohci_info(&magician_ohci_info);
-       pxa_set_ficp_info(&magician_ficp_info);
 
        /* Check LCD type we have */
        cpld = ioremap_nocache(PXA_CS3_PHYS, 0x1000);
        if (cpld) {
                u8 board_id = __raw_readb(cpld+0x14);
+               iounmap(cpld);
                system_rev = board_id & 0x7;
                lcd_select = board_id & 0x8;
-               iounmap(cpld);
                pr_info("LCD type: %s\n", lcd_select ? "Samsung" : "Toppoly");
-               if (lcd_select && (system_rev < 3))
-                       pxa_gpio_mode(GPIO75_MAGICIAN_SAMSUNG_POWER_MD);
-               pxa_gpio_mode(GPIO104_MAGICIAN_LCD_POWER_1_MD);
-               pxa_gpio_mode(GPIO105_MAGICIAN_LCD_POWER_2_MD);
-               pxa_gpio_mode(GPIO106_MAGICIAN_LCD_POWER_3_MD);
+               if (lcd_select && (system_rev < 3)) {
+                       gpio_request(GPIO75_MAGICIAN_SAMSUNG_POWER, "SAMSUNG_POWER");
+                       gpio_direction_output(GPIO75_MAGICIAN_SAMSUNG_POWER, 0);
+               }
+               gpio_request(GPIO104_MAGICIAN_LCD_POWER_1, "LCD_POWER_1");
+               gpio_request(GPIO105_MAGICIAN_LCD_POWER_2, "LCD_POWER_2");
+               gpio_request(GPIO106_MAGICIAN_LCD_POWER_3, "LCD_POWER_3");
+               gpio_direction_output(GPIO104_MAGICIAN_LCD_POWER_1, 0);
+               gpio_direction_output(GPIO105_MAGICIAN_LCD_POWER_2, 0);
+               gpio_direction_output(GPIO106_MAGICIAN_LCD_POWER_3, 0);
                set_pxa_fb_info(lcd_select ? &samsung_info : &toppoly_info);
        } else
                pr_err("LCD detection: CPLD mapping failed\n");
index 18d47cfa2a188884fa48dd9886409d905c389fde..7399fb34da4e8edc3edfa72532276ca05f20005c 100644 (file)
@@ -434,8 +434,7 @@ static struct pxafb_mode_info toshiba_ltm035a776c_mode = {
 
 static struct pxafb_mach_info mainstone_pxafb_info = {
        .num_modes              = 1,
-       .lccr0                  = LCCR0_Act,
-       .lccr3                  = LCCR3_PCP,
+       .lcd_conn               = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
 };
 
 static int mainstone_mci_init(struct device *dev, irq_handler_t mstone_detect_int, void *data)
index e6be9d0aeccf2d9e94c4a1ac11306529d5f1bf2f..49d951db0f3d9dca15d2f2aeaeacc11c3b666754 100644 (file)
@@ -320,16 +320,13 @@ static struct soc_camera_link iclink[] = {
 static struct i2c_board_info __initdata pcm990_i2c_devices[] = {
        {
                /* Must initialize before the camera(s) */
-               I2C_BOARD_INFO("pca953x", 0x41),
-               .type = "pca9536",
+               I2C_BOARD_INFO("pca9536", 0x41),
                .platform_data = &pca9536_data,
        }, {
                I2C_BOARD_INFO("mt9v022", 0x48),
-               .type = "mt9v022",
                .platform_data = &iclink[0], /* With extender */
        }, {
                I2C_BOARD_INFO("mt9m001", 0x5d),
-               .type = "mt9m001",
                .platform_data = &iclink[0], /* With extender */
        },
 };
index 039194cbe477fd90eaacaa613dd87e1417040d65..ec1bbf333a3ad5a74c8c1cb06c2b0c6613fa277b 100644 (file)
@@ -46,8 +46,8 @@ int pxa_pm_enter(suspend_state_t state)
                        sleep_save_checksum += sleep_save[i];
        }
 
-       /* Clear sleep reset status */
-       RCSR = RCSR_SMR;
+       /* Clear reset status */
+       RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR;
 
        /* *** go zzz *** */
        pxa_cpu_pm_fns->enter(state);
index dde355e88fa1574d3760a778a4cb7bf8297d166a..b6a6f5fcc77ad56158bbd01b061c973ca4700285 100644 (file)
@@ -486,6 +486,8 @@ static int pxa3xx_set_wake(unsigned int irq, unsigned int on)
        case IRQ_MMC3:
                mask = ADXER_MFP_GEN12;
                break;
+       default:
+               return -EINVAL;
        }
 
        local_irq_save(flags);
index dbb546216be19d3220504fbce21bac09ab0780b5..4a0028087ea65740d5b253c16dc9f839ab94892a 100644 (file)
@@ -97,8 +97,7 @@ static struct pxafb_mode_info toshiba_ltm04c380k_mode = {
 
 static struct pxafb_mach_info zylonite_toshiba_lcd_info = {
        .num_modes              = 1,
-       .lccr0                  = LCCR0_Act,
-       .lccr3                  = LCCR3_PCP,
+       .lcd_conn               = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
        .pxafb_backlight_power  = zylonite_backlight_power,
 };
 
@@ -134,8 +133,7 @@ static struct pxafb_mode_info sharp_ls037_modes[] = {
 static struct pxafb_mach_info zylonite_sharp_lcd_info = {
        .modes                  = sharp_ls037_modes,
        .num_modes              = 2,
-       .lccr0                  = LCCR0_Act,
-       .lccr3                  = LCCR3_PCP | LCCR3_HSP | LCCR3_VSP,
+       .lcd_conn               = LCD_COLOR_TFT_16BPP | LCD_PCLK_EDGE_FALL,
        .pxafb_backlight_power  = zylonite_backlight_power,
 };
 
index 1b8229d9c9d59e17890f916b79cf527a9b3a9c58..33ed048502a361b2204445e95ffc5e670dd8f645 100644 (file)
@@ -372,7 +372,7 @@ config CPU_FEROCEON
        select CPU_PABRT_NOIFAR
        select CPU_CACHE_VIVT
        select CPU_CP15_MMU
-       select CPU_COPY_V4WB if MMU
+       select CPU_COPY_FEROCEON if MMU
        select CPU_TLB_V4WBI if MMU
 
 config CPU_FEROCEON_OLD_ID
@@ -523,6 +523,9 @@ config CPU_COPY_V4WT
 config CPU_COPY_V4WB
        bool
 
+config CPU_COPY_FEROCEON
+       bool
+
 config CPU_COPY_V6
        bool
 
@@ -658,7 +661,7 @@ config CPU_DCACHE_SIZE
 
 config CPU_DCACHE_WRITETHROUGH
        bool "Force write through D-cache"
-       depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_FEROCEON) && !CPU_DCACHE_DISABLE
+       depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020) && !CPU_DCACHE_DISABLE
        default y if CPU_ARM925T
        help
          Say Y here to use the data cache in writethrough mode. Unless you
index 44536a0b995a5b2d1ec129c133ac1463a466fdd6..32b2d2d213a62e1913fd80bacb2c80b1565d32e7 100644 (file)
@@ -36,6 +36,7 @@ obj-$(CONFIG_CPU_CACHE_V7)    += cache-v7.o
 obj-$(CONFIG_CPU_COPY_V3)      += copypage-v3.o
 obj-$(CONFIG_CPU_COPY_V4WT)    += copypage-v4wt.o
 obj-$(CONFIG_CPU_COPY_V4WB)    += copypage-v4wb.o
+obj-$(CONFIG_CPU_COPY_FEROCEON)        += copypage-feroceon.o
 obj-$(CONFIG_CPU_COPY_V6)      += copypage-v6.o context.o
 obj-$(CONFIG_CPU_SA1100)       += copypage-v4mc.o
 obj-$(CONFIG_CPU_XSCALE)       += copypage-xscale.o
diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S
new file mode 100644 (file)
index 0000000..7eb0d32
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ *  linux/arch/arm/lib/copypage-feroceon.S
+ *
+ *  Copyright (C) 2008 Marvell Semiconductors
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This handles copy_user_page and clear_user_page on Feroceon
+ * more optimally than the generic implementations.
+ */
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/asm-offsets.h>
+
+       .text
+       .align  5
+
+ENTRY(feroceon_copy_user_page)
+       stmfd   sp!, {r4-r9, lr}
+       mov     ip, #PAGE_SZ
+1:     mov     lr, r1
+       ldmia   r1!, {r2 - r9}
+       pld     [lr, #32]
+       pld     [lr, #64]
+       pld     [lr, #96]
+       pld     [lr, #128]
+       pld     [lr, #160]
+       pld     [lr, #192]
+       pld     [lr, #224]
+       stmia   r0, {r2 - r9}
+       ldmia   r1!, {r2 - r9}
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       stmia   r0, {r2 - r9}
+       ldmia   r1!, {r2 - r9}
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       stmia   r0, {r2 - r9}
+       ldmia   r1!, {r2 - r9}
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       stmia   r0, {r2 - r9}
+       ldmia   r1!, {r2 - r9}
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       stmia   r0, {r2 - r9}
+       ldmia   r1!, {r2 - r9}
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       stmia   r0, {r2 - r9}
+       ldmia   r1!, {r2 - r9}
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       stmia   r0, {r2 - r9}
+       ldmia   r1!, {r2 - r9}
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       stmia   r0, {r2 - r9}
+       subs    ip, ip, #(32 * 8)
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       bne     1b
+       mcr     p15, 0, ip, c7, c10, 4          @ drain WB
+       ldmfd   sp!, {r4-r9, pc}
+
+       .align  5
+
+ENTRY(feroceon_clear_user_page)
+       stmfd   sp!, {r4-r7, lr}
+       mov     r1, #PAGE_SZ/32
+       mov     r2, #0
+       mov     r3, #0
+       mov     r4, #0
+       mov     r5, #0
+       mov     r6, #0
+       mov     r7, #0
+       mov     ip, #0
+       mov     lr, #0
+1:     stmia   r0, {r2-r7, ip, lr}
+       subs    r1, r1, #1
+       mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D line
+       add     r0, r0, #32
+       bne     1b
+       mcr     p15, 0, r1, c7, c10, 4          @ drain WB
+       ldmfd   sp!, {r4-r7, pc}
+
+       __INITDATA
+
+       .type   feroceon_user_fns, #object
+ENTRY(feroceon_user_fns)
+       .long   feroceon_clear_user_page
+       .long   feroceon_copy_user_page
+       .size   feroceon_user_fns, . - feroceon_user_fns
index 62066f3020c801353e3ea281d2ac57879e3e076c..7429f8c01015ffaff33372c67057f78aeba25e1d 100644 (file)
@@ -26,8 +26,8 @@ EXPORT_SYMBOL(ioport_unmap);
 #ifdef CONFIG_PCI
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len   = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len   = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index d41a75ed3dce27431fd11c78f7cf27553bbbe61c..2d6d682c206a814e5b7c41d7e8ce851f68721b18 100644 (file)
@@ -35,6 +35,7 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  * zero-initialized data and COW.
  */
 struct page *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
 
 /*
  * The pmd table for the upper-most set of pages.
index 90e7594e29b182123f1831d329418bd16228909a..a02c1712b52de690d0410bf3ce3fe2a5a90a131c 100644 (file)
@@ -93,7 +93,7 @@ ENTRY(cpu_feroceon_reset)
  *
  * Called with IRQs disabled
  */
-       .align  10
+       .align  5
 ENTRY(cpu_feroceon_do_idle)
        mov     r0, #0
        mcr     p15, 0, r0, c7, c10, 4          @ Drain write buffer
@@ -106,6 +106,7 @@ ENTRY(cpu_feroceon_do_idle)
  *     Clean and invalidate all cache entries in a particular
  *     address space.
  */
+       .align  5
 ENTRY(feroceon_flush_user_cache_all)
        /* FALLTHROUGH */
 
@@ -118,12 +119,8 @@ ENTRY(feroceon_flush_kern_cache_all)
        mov     r2, #VM_EXEC
        mov     ip, #0
 __flush_whole_cache:
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
-       mcr     p15, 0, ip, c7, c6, 0           @ invalidate D cache
-#else
 1:     mrc     p15, 0, r15, c7, c14, 3         @ test,clean,invalidate
        bne     1b
-#endif
        tst     r2, #VM_EXEC
        mcrne   p15, 0, ip, c7, c5, 0           @ invalidate I cache
        mcrne   p15, 0, ip, c7, c10, 4          @ drain WB
@@ -139,27 +136,19 @@ __flush_whole_cache:
  *     - end   - end address (exclusive)
  *     - flags - vm_flags describing address space
  */
+       .align  5
 ENTRY(feroceon_flush_user_cache_range)
        mov     ip, #0
        sub     r3, r1, r0                      @ calculate total size
        cmp     r3, #CACHE_DLIMIT
        bgt     __flush_whole_cache
 1:     tst     r2, #VM_EXEC
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
-       mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
-       mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
-       add     r0, r0, #CACHE_DLINESIZE
-       mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
-       mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
-       add     r0, r0, #CACHE_DLINESIZE
-#else
        mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D entry
        mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
        add     r0, r0, #CACHE_DLINESIZE
        mcr     p15, 0, r0, c7, c14, 1          @ clean and invalidate D entry
        mcrne   p15, 0, r0, c7, c5, 1           @ invalidate I entry
        add     r0, r0, #CACHE_DLINESIZE
-#endif
        cmp     r0, r1
        blo     1b
        tst     r2, #VM_EXEC
@@ -176,6 +165,7 @@ ENTRY(feroceon_flush_user_cache_range)
  *     - start - virtual start address
  *     - end   - virtual end address
  */
+       .align  5
 ENTRY(feroceon_coherent_kern_range)
        /* FALLTHROUGH */
 
@@ -207,6 +197,7 @@ ENTRY(feroceon_coherent_user_range)
  *
  *     - addr  - page aligned address
  */
+       .align  5
 ENTRY(feroceon_flush_kern_dcache_page)
        add     r1, r0, #PAGE_SZ
 1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
@@ -231,13 +222,12 @@ ENTRY(feroceon_flush_kern_dcache_page)
  *
  * (same as v4wb)
  */
+       .align  5
 ENTRY(feroceon_dma_inv_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
        tst     r0, #CACHE_DLINESIZE - 1
        mcrne   p15, 0, r0, c7, c10, 1          @ clean D entry
        tst     r1, #CACHE_DLINESIZE - 1
        mcrne   p15, 0, r1, c7, c10, 1          @ clean D entry
-#endif
        bic     r0, r0, #CACHE_DLINESIZE - 1
 1:     mcr     p15, 0, r0, c7, c6, 1           @ invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
@@ -256,14 +246,13 @@ ENTRY(feroceon_dma_inv_range)
  *
  * (same as v4wb)
  */
+       .align  5
 ENTRY(feroceon_dma_clean_range)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
        bic     r0, r0, #CACHE_DLINESIZE - 1
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
-#endif
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        mov     pc, lr
 
@@ -275,14 +264,10 @@ ENTRY(feroceon_dma_clean_range)
  *     - start - virtual start address
  *     - end   - virtual end address
  */
+       .align  5
 ENTRY(feroceon_dma_flush_range)
        bic     r0, r0, #CACHE_DLINESIZE - 1
-1:
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
-       mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
-#else
-       mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
-#endif
+1:     mcr     p15, 0, r0, c7, c14, 1          @ clean+invalidate D entry
        add     r0, r0, #CACHE_DLINESIZE
        cmp     r0, r1
        blo     1b
@@ -300,13 +285,12 @@ ENTRY(feroceon_cache_fns)
        .long   feroceon_dma_clean_range
        .long   feroceon_dma_flush_range
 
+       .align  5
 ENTRY(cpu_feroceon_dcache_clean_area)
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
 1:     mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
        add     r0, r0, #CACHE_DLINESIZE
        subs    r1, r1, #CACHE_DLINESIZE
        bhi     1b
-#endif
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
        mov     pc, lr
 
@@ -323,13 +307,9 @@ ENTRY(cpu_feroceon_dcache_clean_area)
 ENTRY(cpu_feroceon_switch_mm)
 #ifdef CONFIG_MMU
        mov     ip, #0
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
-       mcr     p15, 0, ip, c7, c6, 0           @ invalidate D cache
-#else
 @ && 'Clean & Invalidate whole DCache'
 1:     mrc     p15, 0, r15, c7, c14, 3         @ test,clean,invalidate
        bne     1b
-#endif
        mcr     p15, 0, ip, c7, c5, 0           @ invalidate I cache
        mcr     p15, 0, ip, c7, c10, 4          @ drain WB
        mcr     p15, 0, r0, c2, c0, 0           @ load page table pointer
@@ -362,16 +342,9 @@ ENTRY(cpu_feroceon_set_pte_ext)
        tst     r1, #L_PTE_PRESENT | L_PTE_YOUNG        @ Present and Young?
        movne   r2, #0
 
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
-       eor     r3, r2, #0x0a                   @ C & small page?
-       tst     r3, #0x0b
-       biceq   r2, r2, #4
-#endif
        str     r2, [r0]                        @ hardware version
        mov     r0, r0
-#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
        mcr     p15, 0, r0, c7, c10, 1          @ clean D entry
-#endif
        mcr     p15, 0, r0, c7, c10, 4          @ drain WB
 #endif
        mov     pc, lr
@@ -387,20 +360,11 @@ __feroceon_setup:
        mcr     p15, 0, r0, c8, c7              @ invalidate I,D TLBs on v4
 #endif
 
-
-#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
-       mov     r0, #4                          @ disable write-back on caches explicitly
-       mcr     p15, 7, r0, c15, c0, 0
-#endif
-
        adr     r5, feroceon_crval
        ldmia   r5, {r5, r6}
        mrc     p15, 0, r0, c1, c0              @ get control register v4
        bic     r0, r0, r5
        orr     r0, r0, r6
-#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
-       orr     r0, r0, #0x4000                 @ .1.. .... .... ....
-#endif
        mov     pc, lr
        .size   __feroceon_setup, . - __feroceon_setup
 
@@ -476,7 +440,7 @@ __feroceon_old_id_proc_info:
        .long   cpu_feroceon_name
        .long   feroceon_processor_functions
        .long   v4wbi_tlb_fns
-       .long   v4wb_user_fns
+       .long   feroceon_user_fns
        .long   feroceon_cache_fns
        .size   __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info
 #endif
@@ -502,6 +466,6 @@ __feroceon_proc_info:
        .long   cpu_feroceon_name
        .long   feroceon_processor_functions
        .long   v4wbi_tlb_fns
-       .long   v4wb_user_fns
+       .long   feroceon_user_fns
        .long   feroceon_cache_fns
        .size   __feroceon_proc_info, . - __feroceon_proc_info
index 75bae067922d1c55fdef728f67e8f952062009be..74fae6045650409bdd4dc44a343d60887b540443 100644 (file)
@@ -51,7 +51,7 @@
 /*
  * MPCore SCU event monitor support
  */
-#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_MPCORE_SCU_BASE + 0x10)
+#define SCU_EVENTMONITORS_VA_BASE __io_address(REALVIEW_EB11MP_SCU_BASE + 0x10)
 
 /*
  * Bitmask of used SCU counters
@@ -80,7 +80,7 @@ static irqreturn_t scu_em_interrupt(int irq, void *arg)
        struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
        unsigned int cnt;
 
-       cnt = irq - IRQ_PMU_SCU0;
+       cnt = irq - IRQ_EB11MP_PMU_SCU0;
        oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
        scu_reset_counter(emc, cnt);
 
@@ -119,10 +119,10 @@ static int scu_start(void)
         */
        for (i = 0; i < NUM_SCU_COUNTERS; i++) {
                if (scu_em_used & (1 << i)) {
-                       ret = request_irq(IRQ_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
+                       ret = request_irq(IRQ_EB11MP_PMU_SCU0 + i, scu_em_interrupt, IRQF_DISABLED, "SCU PMU", NULL);
                        if (ret) {
                                printk(KERN_ERR "oprofile: unable to request IRQ%u for SCU Event Monitor\n",
-                                      IRQ_PMU_SCU0 + i);
+                                      IRQ_EB11MP_PMU_SCU0 + i);
                                goto err_free_scu;
                        }
                }
@@ -153,7 +153,7 @@ static int scu_start(void)
 
  err_free_scu:
        while (i--)
-               free_irq(IRQ_PMU_SCU0 + i, NULL);
+               free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
        return ret;
 }
 
@@ -175,7 +175,7 @@ static void scu_stop(void)
        for (i = 0; i < NUM_SCU_COUNTERS; i++) {
                if (scu_em_used & (1 << i)) {
                        scu_reset_counter(emc, i);
-                       free_irq(IRQ_PMU_SCU0 + i, NULL);
+                       free_irq(IRQ_EB11MP_PMU_SCU0 + i, NULL);
                }
        }
 }
@@ -225,10 +225,10 @@ static int em_setup_ctrs(void)
 }
 
 static int arm11_irqs[] = {
-       [0]     = IRQ_PMU_CPU0,
-       [1]     = IRQ_PMU_CPU1,
-       [2]     = IRQ_PMU_CPU2,
-       [3]     = IRQ_PMU_CPU3
+       [0]     = IRQ_EB11MP_PMU_CPU0,
+       [1]     = IRQ_EB11MP_PMU_CPU1,
+       [2]     = IRQ_EB11MP_PMU_CPU2,
+       [3]     = IRQ_EB11MP_PMU_CPU3
 };
 
 static int em_start(void)
@@ -273,22 +273,22 @@ static int em_setup(void)
        /*
         * Send SCU PMU interrupts to the "owner" CPU.
         */
-       em_route_irq(IRQ_PMU_SCU0, 0);
-       em_route_irq(IRQ_PMU_SCU1, 0);
-       em_route_irq(IRQ_PMU_SCU2, 1);
-       em_route_irq(IRQ_PMU_SCU3, 1);
-       em_route_irq(IRQ_PMU_SCU4, 2);
-       em_route_irq(IRQ_PMU_SCU5, 2);
-       em_route_irq(IRQ_PMU_SCU6, 3);
-       em_route_irq(IRQ_PMU_SCU7, 3);
+       em_route_irq(IRQ_EB11MP_PMU_SCU0, 0);
+       em_route_irq(IRQ_EB11MP_PMU_SCU1, 0);
+       em_route_irq(IRQ_EB11MP_PMU_SCU2, 1);
+       em_route_irq(IRQ_EB11MP_PMU_SCU3, 1);
+       em_route_irq(IRQ_EB11MP_PMU_SCU4, 2);
+       em_route_irq(IRQ_EB11MP_PMU_SCU5, 2);
+       em_route_irq(IRQ_EB11MP_PMU_SCU6, 3);
+       em_route_irq(IRQ_EB11MP_PMU_SCU7, 3);
 
        /*
         * Send CP15 PMU interrupts to the owner CPU.
         */
-       em_route_irq(IRQ_PMU_CPU0, 0);
-       em_route_irq(IRQ_PMU_CPU1, 1);
-       em_route_irq(IRQ_PMU_CPU2, 2);
-       em_route_irq(IRQ_PMU_CPU3, 3);
+       em_route_irq(IRQ_EB11MP_PMU_CPU0, 0);
+       em_route_irq(IRQ_EB11MP_PMU_CPU1, 1);
+       em_route_irq(IRQ_EB11MP_PMU_CPU2, 2);
+       em_route_irq(IRQ_EB11MP_PMU_CPU3, 3);
 
        return 0;
 }
index 078cd33f467be29256d3d81f39da48c9df6fb510..e4796c67a831aa930208c736de6722b1a460891b 100644 (file)
@@ -5,14 +5,7 @@
  */
 
 #include <linux/thread_info.h>
-
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-        DEFINE(sym, offsetof(struct str, mem));
+#include <linux/kbuild.h>
 
 void foo(void)
 {
index 2687b730e2d006a638492b303429cbcb767a313a..ce48c14f4349cfd3f18a1544397fd37588c6b888 100644 (file)
@@ -274,6 +274,8 @@ static int __init early_parse_fbmem(char *p)
                        printk(KERN_WARNING
                               "Failed to allocate framebuffer memory\n");
                        fbmem_size = 0;
+               } else {
+                       memset(__va(fbmem_start), 0, fbmem_size);
                }
        }
 
index b835257a8fa39d88d794ad2667ef9f46a4e6028c..cd12edbea9f26c403bdc5a3c85f034e76766fc9a 100644 (file)
@@ -369,11 +369,7 @@ static const struct file_operations proc_tlb_operations = {
 
 static int __init proctlb_init(void)
 {
-       struct proc_dir_entry *entry;
-
-       entry = create_proc_entry("tlb", 0, NULL);
-       if (entry)
-               entry->proc_fops = &proc_tlb_operations;
+       proc_create("tlb", 0, NULL, &proc_tlb_operations);
        return 0;
 }
 late_initcall(proctlb_init);
index b56b2741cdea6746c0a445408c5d40f40786fec5..721f15f3cebf1915d67f2b83ab46574697517155 100644 (file)
@@ -34,8 +34,7 @@
 #include <linux/hardirq.h>
 #include <linux/irq.h>
 #include <linux/thread_info.h>
-
-#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+#include <linux/kbuild.h>
 
 int main(void)
 {
index d1fa24401dc6995d92410efb3948989049682e52..cb9d883d493c4b3ad30dbefed9766de9980faf0e 100644 (file)
@@ -212,7 +212,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
 
        /* Set up registers for signal handler */
        wrusp((unsigned long)frame);
-       if (get_personality & FDPIC_FUNCPTRS) {
+       if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor *) ka->sa.sa_handler;
                __get_user(regs->pc, &funcptr->text);
index fddce32901a2249e31af17a17bacb3afa768667d..024f418ae5430b36951c67072ac312e5c139c58c 100644 (file)
@@ -499,20 +499,17 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
 #if defined(CONFIG_JOYSTICK_AD7142) || defined(CONFIG_JOYSTICK_AD7142_MODULE)
        {
                I2C_BOARD_INFO("ad7142_joystick", 0x2C),
-               .type = "ad7142_joystick",
                .irq = 39,
        },
 #endif
 #if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
        {
                I2C_BOARD_INFO("pcf8574_lcd", 0x22),
-               .type = "pcf8574_lcd",
        },
 #endif
 #if defined(CONFIG_TWI_KEYPAD) || defined(CONFIG_TWI_KEYPAD_MODULE)
        {
                I2C_BOARD_INFO("pcf8574_keypad", 0x27),
-               .type = "pcf8574_keypad",
                .irq = 39,
        },
 #endif
index 0cec14b1ef5c0ee45345dcdcd05303e30857e9ea..d3727b7c2d7d94da933ec76834c2a2378764b407 100644 (file)
@@ -751,20 +751,17 @@ static struct i2c_board_info __initdata bfin_i2c_board_info[] = {
 #if defined(CONFIG_JOYSTICK_AD7142) || defined(CONFIG_JOYSTICK_AD7142_MODULE)
        {
                I2C_BOARD_INFO("ad7142_joystick", 0x2C),
-               .type = "ad7142_joystick",
                .irq = 55,
        },
 #endif
 #if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
        {
                I2C_BOARD_INFO("pcf8574_lcd", 0x22),
-               .type = "pcf8574_lcd",
        },
 #endif
 #if defined(CONFIG_TWI_KEYPAD) || defined(CONFIG_TWI_KEYPAD_MODULE)
        {
                I2C_BOARD_INFO("pcf8574_keypad", 0x27),
-               .type = "pcf8574_keypad",
                .irq = 72,
        },
 #endif
index 231dfbd3bc1ffcefc1a9c11b94ad885ab1d376d3..b00f68ac6bc991e972a765fe670bfff67bcc5ea8 100644 (file)
@@ -641,13 +641,11 @@ static struct i2c_board_info __initdata bfin_i2c_board_info1[] = {
 #if defined(CONFIG_TWI_LCD) || defined(CONFIG_TWI_LCD_MODULE)
        {
                I2C_BOARD_INFO("pcf8574_lcd", 0x22),
-               .type = "pcf8574_lcd",
        },
 #endif
 #if defined(CONFIG_TWI_KEYPAD) || defined(CONFIG_TWI_KEYPAD_MODULE)
        {
                I2C_BOARD_INFO("pcf8574_keypad", 0x27),
-               .type = "pcf8574_keypad",
                .irq = 212,
        },
 #endif
index aad0a9e5991a3e358685726cb5fed98afa4de8ed..44f7b4f794760dcaebf8cae38f114a39739e4d6f 100644 (file)
@@ -75,9 +75,9 @@ __init init_cris_profile(void)
 
        sample_buffer_pos = sample_buffer;
 
-       entry = create_proc_entry("system_profile", S_IWUSR | S_IRUGO, NULL);
+       entry = proc_create("system_profile", S_IWUSR | S_IRUGO, NULL,
+                           &cris_proc_profile_operations);
        if (entry) {
-               entry->proc_fops = &cris_proc_profile_operations;
                entry->size = SAMPLE_BUFFER_SIZE;
        }
        prof_running = 1;
index 4207a2b527504d33cd4c72f28fc7f1d3d31effd9..5b06ffa15e347868c8ddfa5c796744b42fa4db3e 100644 (file)
@@ -27,7 +27,6 @@ show_mem(void)
 
        printk("\nMem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        i = max_mapnr;
        while (i-- > 0) {
                total++;
index fbb19fc1af407e83fc3d52e7422a7b370eb25f5c..9de96843a27808d0f98e0221057f6d2b53a02f06 100644 (file)
@@ -7,15 +7,13 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
+#include <linux/kbuild.h>
 #include <asm/registers.h>
 #include <asm/ucontext.h>
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include <asm/gdb-stub.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
 #define DEF_PTREG(sym, reg) \
         asm volatile("\n->" #sym " %0 offsetof(struct pt_regs, " #reg ")" \
                     : : "i" (offsetof(struct pt_regs, reg)))
         asm volatile("\n->" #sym " %0 offsetof(struct frv_frame0, " #reg ")" \
                     : : "i" (offsetof(struct frv_frame0, reg)))
 
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-       DEFINE(sym, offsetof(struct str, mem));
-
 void foo(void)
 {
        /* offsets into the thread_info structure */
index c57ce3f1f2e281b2c38a7de860d390cbc08e5fd0..73f3aeefd2034ac0b0f35d8268ad7adac631d314 100644 (file)
@@ -163,14 +163,11 @@ static int sysctl_pm_do_suspend(ctl_table *ctl, int write, struct file *filp,
        if ((mode != 1) && (mode != 5))
                return -EINVAL;
 
-       retval = pm_send_all(PM_SUSPEND, (void *)3);
-
        if (retval == 0) {
                if (mode == 5)
                    retval = pm_do_bus_sleep();
                else
                    retval = pm_do_suspend();
-               pm_send_all(PM_RESUME, (void *)0);
        }
 
        return retval;
@@ -183,9 +180,6 @@ static int try_set_cmode(int new_cmode)
        if (!(clock_cmodes_permitted & (1<<new_cmode)))
                return -EINVAL;
 
-       /* tell all the drivers we're suspending */
-       pm_send_all(PM_SUSPEND, (void *)3);
-
        /* now change cmode */
        local_irq_disable();
        frv_dma_pause_all();
@@ -201,8 +195,6 @@ static int try_set_cmode(int new_cmode)
        frv_dma_resume_all();
        local_irq_enable();
 
-       /* tell all the drivers we're resuming */
-       pm_send_all(PM_RESUME, (void *)0);
        return 0;
 }
 
index d64bcaff54cd441ebd76c735c9ad878109c76223..3bdb368292a8c8a53f9c00b3aa39dc169420b095 100644 (file)
@@ -297,7 +297,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set)
        __frame->lr   = (unsigned long) &frame->retcode;
        __frame->gr8  = sig;
 
-       if (get_personality & FDPIC_FUNCPTRS) {
+       if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
                __get_user(__frame->pc, &funcptr->text);
@@ -396,7 +396,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
        __frame->gr8 = sig;
        __frame->gr9 = (unsigned long) &frame->info;
 
-       if (get_personality & FDPIC_FUNCPTRS) {
+       if (current->personality & FDPIC_FUNCPTRS) {
                struct fdpic_func_descriptor __user *funcptr =
                        (struct fdpic_func_descriptor __user *) ka->sa.sa_handler;
                __get_user(__frame->pc, &funcptr->text);
index 7089c2428b3f14507ffbf1e356bc59f5da81aa75..1d2dfe67d4426b7805e595f75293b5b46e5f6a8f 100644 (file)
@@ -49,7 +49,7 @@ asmlinkage void insn_access_error(unsigned long esfr1, unsigned long epcr0, unsi
        info.si_signo   = SIGSEGV;
        info.si_code    = SEGV_ACCERR;
        info.si_errno   = 0;
-       info.si_addr    = (void *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc);
+       info.si_addr    = (void __user *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc);
 
        force_sig_info(info.si_signo, &info, current);
 } /* end insn_access_error() */
@@ -73,7 +73,7 @@ asmlinkage void illegal_instruction(unsigned long esfr1, unsigned long epcr0, un
                      epcr0, esr0, esfr1);
 
        info.si_errno   = 0;
-       info.si_addr    = (void *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc);
+       info.si_addr    = (void __user *) ((epcr0 & EPCR0_V) ? (epcr0 & EPCR0_PC) : __frame->pc);
 
        switch (__frame->tbr & TBR_TT) {
        case TBR_TT_ILLEGAL_INSTR:
@@ -111,7 +111,8 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                                 unsigned long esr0)
 {
        static DEFINE_SPINLOCK(atomic_op_lock);
-       unsigned long x, y, z, *p;
+       unsigned long x, y, z;
+       unsigned long __user *p;
        mm_segment_t oldfs;
        siginfo_t info;
        int ret;
@@ -128,7 +129,7 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                 * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new)
                 */
        case TBR_TT_ATOMIC_CMPXCHG32:
-               p = (unsigned long *) __frame->gr8;
+               p = (unsigned long __user *) __frame->gr8;
                x = __frame->gr9;
                y = __frame->gr10;
 
@@ -158,7 +159,7 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                 * u32 __atomic_kernel_xchg32(void *v, u32 new)
                 */
        case TBR_TT_ATOMIC_XCHG32:
-               p = (unsigned long *) __frame->gr8;
+               p = (unsigned long __user *) __frame->gr8;
                y = __frame->gr9;
 
                for (;;) {
@@ -181,7 +182,7 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                 * ulong __atomic_kernel_XOR_return(ulong i, ulong *v)
                 */
        case TBR_TT_ATOMIC_XOR:
-               p = (unsigned long *) __frame->gr8;
+               p = (unsigned long __user *) __frame->gr8;
                x = __frame->gr9;
 
                for (;;) {
@@ -205,7 +206,7 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                 * ulong __atomic_kernel_OR_return(ulong i, ulong *v)
                 */
        case TBR_TT_ATOMIC_OR:
-               p = (unsigned long *) __frame->gr8;
+               p = (unsigned long __user *) __frame->gr8;
                x = __frame->gr9;
 
                for (;;) {
@@ -229,7 +230,7 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                 * ulong __atomic_kernel_AND_return(ulong i, ulong *v)
                 */
        case TBR_TT_ATOMIC_AND:
-               p = (unsigned long *) __frame->gr8;
+               p = (unsigned long __user *) __frame->gr8;
                x = __frame->gr9;
 
                for (;;) {
@@ -253,7 +254,7 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                 * int __atomic_user_sub_return(atomic_t *v, int i)
                 */
        case TBR_TT_ATOMIC_SUB:
-               p = (unsigned long *) __frame->gr8;
+               p = (unsigned long __user *) __frame->gr8;
                x = __frame->gr9;
 
                for (;;) {
@@ -277,7 +278,7 @@ asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
                 * int __atomic_user_add_return(atomic_t *v, int i)
                 */
        case TBR_TT_ATOMIC_ADD:
-               p = (unsigned long *) __frame->gr8;
+               p = (unsigned long __user *) __frame->gr8;
                x = __frame->gr9;
 
                for (;;) {
@@ -322,7 +323,7 @@ error:
        info.si_signo   = SIGSEGV;
        info.si_code    = SEGV_ACCERR;
        info.si_errno   = 0;
-       info.si_addr    = (void *) __frame->pc;
+       info.si_addr    = (void __user *) __frame->pc;
 
        force_sig_info(info.si_signo, &info, current);
 }
@@ -343,7 +344,7 @@ asmlinkage void media_exception(unsigned long msr0, unsigned long msr1)
        info.si_signo   = SIGFPE;
        info.si_code    = FPE_MDAOVF;
        info.si_errno   = 0;
-       info.si_addr    = (void *) __frame->pc;
+       info.si_addr    = (void __user *) __frame->pc;
 
        force_sig_info(info.si_signo, &info, current);
 } /* end media_exception() */
@@ -361,11 +362,8 @@ asmlinkage void memory_access_exception(unsigned long esr0,
 #ifdef CONFIG_MMU
        unsigned long fixup;
 
-       if ((esr0 & ESRx_EC) == ESRx_EC_DATA_ACCESS)
-               if (handle_misalignment(esr0, ear0, epcr0) == 0)
-                       return;
-
-       if ((fixup = search_exception_table(__frame->pc)) != 0) {
+       fixup = search_exception_table(__frame->pc);
+       if (fixup) {
                __frame->pc = fixup;
                return;
        }
@@ -383,7 +381,7 @@ asmlinkage void memory_access_exception(unsigned long esr0,
        info.si_addr    = NULL;
 
        if ((esr0 & (ESRx_VALID | ESR0_EAV)) == (ESRx_VALID | ESR0_EAV))
-               info.si_addr = (void *) ear0;
+               info.si_addr = (void __user *) ear0;
 
        force_sig_info(info.si_signo, &info, current);
 
@@ -412,7 +410,7 @@ asmlinkage void data_access_error(unsigned long esfr1, unsigned long esr15, unsi
        info.si_signo   = SIGSEGV;
        info.si_code    = SEGV_ACCERR;
        info.si_errno   = 0;
-       info.si_addr    = (void *)
+       info.si_addr    = (void __user *)
                (((esr15 & (ESRx_VALID|ESR15_EAV)) == (ESRx_VALID|ESR15_EAV)) ? ear15 : 0);
 
        force_sig_info(info.si_signo, &info, current);
@@ -446,7 +444,7 @@ asmlinkage void division_exception(unsigned long esfr1, unsigned long esr0, unsi
        info.si_signo   = SIGFPE;
        info.si_code    = FPE_INTDIV;
        info.si_errno   = 0;
-       info.si_addr    = (void *) __frame->pc;
+       info.si_addr    = (void __user *) __frame->pc;
 
        force_sig_info(info.si_signo, &info, current);
 } /* end division_exception() */
index 068fa04bd52752764b4d6d3838609d7b64042f45..35f6df28351ec103b59570f01e79dd66b49167d5 100644 (file)
@@ -13,8 +13,8 @@
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index fb8b1d860f46b1f71e60684534c38aa16f2812c6..1bca5ab8a6ab085aa35fdb1c14574b7dc2adb7a7 100644 (file)
@@ -6,4 +6,4 @@ obj-y := init.o kmap.o
 
 obj-$(CONFIG_MMU) += \
        pgalloc.o highmem.o fault.o extable.o cache-page.o tlb-flush.o tlb-miss.o \
-       mmu-context.o dma-alloc.o unaligned.o elf-fdpic.o
+       mmu-context.o dma-alloc.o elf-fdpic.o
diff --git a/arch/frv/mm/unaligned.c b/arch/frv/mm/unaligned.c
deleted file mode 100644 (file)
index 8f0375f..0000000
+++ /dev/null
@@ -1,217 +0,0 @@
-/* unaligned.c: unalignment fixup handler for CPUs on which it is supported (FR451 only)
- *
- * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/sched.h>
-#include <linux/signal.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/types.h>
-#include <linux/user.h>
-#include <linux/string.h>
-#include <linux/linkage.h>
-#include <linux/init.h>
-
-#include <asm/setup.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#if 0
-#define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ )
-#else
-#define kdebug(fmt, ...) do {} while(0)
-#endif
-
-#define _MA_SIGNED     0x01
-#define _MA_HALF       0x02
-#define _MA_WORD       0x04
-#define _MA_DWORD      0x08
-#define _MA_SZ_MASK    0x0e
-#define _MA_LOAD       0x10
-#define _MA_STORE      0x20
-#define _MA_UPDATE     0x40
-#define _MA_IMM                0x80
-
-#define _MA_LDxU       _MA_LOAD | _MA_UPDATE
-#define _MA_LDxI       _MA_LOAD | _MA_IMM
-#define _MA_STxU       _MA_STORE | _MA_UPDATE
-#define _MA_STxI       _MA_STORE | _MA_IMM
-
-static const uint8_t tbl_LDGRk_reg[0x40] = {
-       [0x02] = _MA_LOAD | _MA_HALF | _MA_SIGNED,      /* LDSH  @(GRi,GRj),GRk */
-       [0x03] = _MA_LOAD | _MA_HALF,                   /* LDUH  @(GRi,GRj),GRk */
-       [0x04] = _MA_LOAD | _MA_WORD,                   /* LD    @(GRi,GRj),GRk */
-       [0x05] = _MA_LOAD | _MA_DWORD,                  /* LDD   @(GRi,GRj),GRk */
-       [0x12] = _MA_LDxU | _MA_HALF | _MA_SIGNED,      /* LDSHU @(GRi,GRj),GRk */
-       [0x13] = _MA_LDxU | _MA_HALF,                   /* LDUHU @(GRi,GRj),GRk */
-       [0x14] = _MA_LDxU | _MA_WORD,                   /* LDU   @(GRi,GRj),GRk */
-       [0x15] = _MA_LDxU | _MA_DWORD,                  /* LDDU  @(GRi,GRj),GRk */
-};
-
-static const uint8_t tbl_STGRk_reg[0x40] = {
-       [0x01] = _MA_STORE | _MA_HALF,                  /* STH   @(GRi,GRj),GRk */
-       [0x02] = _MA_STORE | _MA_WORD,                  /* ST    @(GRi,GRj),GRk */
-       [0x03] = _MA_STORE | _MA_DWORD,                 /* STD   @(GRi,GRj),GRk */
-       [0x11] = _MA_STxU  | _MA_HALF,                  /* STHU  @(GRi,GRj),GRk */
-       [0x12] = _MA_STxU  | _MA_WORD,                  /* STU   @(GRi,GRj),GRk */
-       [0x13] = _MA_STxU  | _MA_DWORD,                 /* STDU  @(GRi,GRj),GRk */
-};
-
-static const uint8_t tbl_LDSTGRk_imm[0x80] = {
-       [0x31] = _MA_LDxI | _MA_HALF | _MA_SIGNED,      /* LDSHI @(GRi,d12),GRk */
-       [0x32] = _MA_LDxI | _MA_WORD,                   /* LDI   @(GRi,d12),GRk */
-       [0x33] = _MA_LDxI | _MA_DWORD,                  /* LDDI  @(GRi,d12),GRk */
-       [0x36] = _MA_LDxI | _MA_HALF,                   /* LDUHI @(GRi,d12),GRk */
-       [0x51] = _MA_STxI | _MA_HALF,                   /* STHI  @(GRi,d12),GRk */
-       [0x52] = _MA_STxI | _MA_WORD,                   /* STI   @(GRi,d12),GRk */
-       [0x53] = _MA_STxI | _MA_DWORD,                  /* STDI  @(GRi,d12),GRk */
-};
-
-
-/*****************************************************************************/
-/*
- * see if we can handle the exception by fixing up a misaligned memory access
- */
-int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0)
-{
-       unsigned long insn, addr, *greg;
-       int GRi, GRj, GRk, D12, op;
-
-       union {
-               uint64_t _64;
-               uint32_t _32[2];
-               uint16_t _16;
-               uint8_t _8[8];
-       } x;
-
-       if (!(esr0 & ESR0_EAV) || !(epcr0 & EPCR0_V) || !(ear0 & 7))
-               return -EAGAIN;
-
-       epcr0 &= EPCR0_PC;
-
-       if (__frame->pc != epcr0) {
-               kdebug("MISALIGN: Execution not halted on excepting instruction\n");
-               BUG();
-       }
-
-       if (__get_user(insn, (unsigned long *) epcr0) < 0)
-               return -EFAULT;
-
-       /* determine the instruction type first */
-       switch ((insn >> 18) & 0x7f) {
-       case 0x2:
-               /* LDx @(GRi,GRj),GRk */
-               op = tbl_LDGRk_reg[(insn >> 6) & 0x3f];
-               break;
-
-       case 0x3:
-               /* STx GRk,@(GRi,GRj) */
-               op = tbl_STGRk_reg[(insn >> 6) & 0x3f];
-               break;
-
-       default:
-               op = tbl_LDSTGRk_imm[(insn >> 18) & 0x7f];
-               break;
-       }
-
-       if (!op)
-               return -EAGAIN;
-
-       kdebug("MISALIGN: pc=%08lx insn=%08lx ad=%08lx op=%02x\n", epcr0, insn, ear0, op);
-
-       memset(&x, 0xba, 8);
-
-       /* validate the instruction parameters */
-       greg = (unsigned long *) &__frame->tbr;
-
-       GRi = (insn >> 12) & 0x3f;
-       GRk = (insn >> 25) & 0x3f;
-
-       if (GRi > 31 || GRk > 31)
-               return -ENOENT;
-
-       if (op & _MA_DWORD && GRk & 1)
-               return -EINVAL;
-
-       if (op & _MA_IMM) {
-               D12 = insn & 0xfff;
-               asm ("slli %0,#20,%0 ! srai %0,#20,%0" : "=r"(D12) : "0"(D12)); /* sign extend */
-               addr = (GRi ? greg[GRi] : 0) + D12;
-       }
-       else {
-               GRj = (insn >>  0) & 0x3f;
-               if (GRj > 31)
-                       return -ENOENT;
-               addr = (GRi ? greg[GRi] : 0) + (GRj ? greg[GRj] : 0);
-       }
-
-       if (addr != ear0) {
-               kdebug("MISALIGN: Calculated addr (%08lx) does not match EAR0 (%08lx)\n",
-                      addr, ear0);
-               return -EFAULT;
-       }
-
-       /* check the address is okay */
-       if (user_mode(__frame) && ___range_ok(ear0, 8) < 0)
-               return -EFAULT;
-
-       /* perform the memory op */
-       if (op & _MA_STORE) {
-               /* perform a store */
-               x._32[0] = 0;
-               if (GRk != 0) {
-                       if (op & _MA_HALF) {
-                               x._16 = greg[GRk];
-                       }
-                       else {
-                               x._32[0] = greg[GRk];
-                       }
-               }
-               if (op & _MA_DWORD)
-                       x._32[1] = greg[GRk + 1];
-
-               kdebug("MISALIGN: Store GR%d { %08x:%08x } -> %08lx (%dB)\n",
-                      GRk, x._32[1], x._32[0], addr, op & _MA_SZ_MASK);
-
-               if (__memcpy_user((void *) addr, &x, op & _MA_SZ_MASK) != 0)
-                       return -EFAULT;
-       }
-       else {
-               /* perform a load */
-               if (__memcpy_user(&x, (void *) addr, op & _MA_SZ_MASK) != 0)
-                       return -EFAULT;
-
-               if (op & _MA_HALF) {
-                       if (op & _MA_SIGNED)
-                               asm ("slli %0,#16,%0 ! srai %0,#16,%0"
-                                    : "=r"(x._32[0]) : "0"(x._16));
-                       else
-                               asm ("sethi #0,%0"
-                                    : "=r"(x._32[0]) : "0"(x._16));
-               }
-
-               kdebug("MISALIGN: Load %08lx (%dB) -> GR%d, { %08x:%08x }\n",
-                      addr, op & _MA_SZ_MASK, GRk, x._32[1], x._32[0]);
-
-               if (GRk != 0)
-                       greg[GRk] = x._32[0];
-               if (op & _MA_DWORD)
-                       greg[GRk + 1] = x._32[1];
-       }
-
-       /* update the base pointer if required */
-       if (op & _MA_UPDATE)
-               greg[GRi] = addr;
-
-       /* well... we've done that insn */
-       __frame->pc = __frame->pc + 4;
-
-       return 0;
-} /* end handle_misalignment() */
index fc30b4fd0914fcb607d4449b48cea1ff86772f36..2042552e08714ea60db579e3933a39227efd7018 100644 (file)
 #include <linux/kernel_stat.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/ptrace.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
        /* offsets into the task struct */
index cd13e138bd03800edd9ab5cc935d8e8c5418bb0e..0df5f6f75edf7a016d7033a85519e778d4cfcc7c 100644 (file)
@@ -19,6 +19,8 @@ config IA64
        select HAVE_OPROFILE
        select HAVE_KPROBES
        select HAVE_KRETPROBES
+       select HAVE_DMA_ATTRS
+       select HAVE_KVM
        default y
        help
          The Itanium Processor Family is Intel's 64-bit successor to
@@ -46,6 +48,9 @@ config MMU
 config SWIOTLB
        bool
 
+config IOMMU_HELPER
+       bool
+
 config GENERIC_LOCKBREAK
        bool
        default y
@@ -589,6 +594,8 @@ config MSPEC
 
 source "fs/Kconfig"
 
+source "arch/ia64/kvm/Kconfig"
+
 source "lib/Kconfig"
 
 #
@@ -612,7 +619,7 @@ config IRQ_PER_CPU
        default y
 
 config IOMMU_HELPER
-       def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC)
+       def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB)
 
 source "arch/ia64/hp/sim/Kconfig"
 
index f1645c4f70393c5294ecc9148300c8368178d5f5..ec4cca477f491c31cf74b79f184e1fce812cf4dc 100644 (file)
@@ -57,6 +57,7 @@ core-$(CONFIG_IA64_GENERIC)   += arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1)     += arch/ia64/dig/
 core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
 core-$(CONFIG_IA64_SGI_SN2)    += arch/ia64/sn/
+core-$(CONFIG_KVM)             += arch/ia64/kvm/
 
 drivers-$(CONFIG_PCI)          += arch/ia64/pci/
 drivers-$(CONFIG_IA64_HP_SIM)  += arch/ia64/hp/sim/
index 8f6bcfe1dadaa0fab0ec16fd2734ddf87d916450..1c44ec2a1d58e2495cb21f41759dcee218471ffc 100644 (file)
 extern int swiotlb_late_init_with_default_size (size_t size);
 extern ia64_mv_dma_alloc_coherent      swiotlb_alloc_coherent;
 extern ia64_mv_dma_free_coherent       swiotlb_free_coherent;
-extern ia64_mv_dma_map_single          swiotlb_map_single;
-extern ia64_mv_dma_unmap_single                swiotlb_unmap_single;
-extern ia64_mv_dma_map_sg              swiotlb_map_sg;
-extern ia64_mv_dma_unmap_sg            swiotlb_unmap_sg;
+extern ia64_mv_dma_map_single_attrs    swiotlb_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs  swiotlb_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs                swiotlb_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs      swiotlb_unmap_sg_attrs;
 extern ia64_mv_dma_supported           swiotlb_dma_supported;
 extern ia64_mv_dma_mapping_error       swiotlb_dma_mapping_error;
 
@@ -31,19 +31,19 @@ extern ia64_mv_dma_mapping_error    swiotlb_dma_mapping_error;
 
 extern ia64_mv_dma_alloc_coherent      sba_alloc_coherent;
 extern ia64_mv_dma_free_coherent       sba_free_coherent;
-extern ia64_mv_dma_map_single          sba_map_single;
-extern ia64_mv_dma_unmap_single                sba_unmap_single;
-extern ia64_mv_dma_map_sg              sba_map_sg;
-extern ia64_mv_dma_unmap_sg            sba_unmap_sg;
+extern ia64_mv_dma_map_single_attrs    sba_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs  sba_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs                sba_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs      sba_unmap_sg_attrs;
 extern ia64_mv_dma_supported           sba_dma_supported;
 extern ia64_mv_dma_mapping_error       sba_dma_mapping_error;
 
 #define hwiommu_alloc_coherent         sba_alloc_coherent
 #define hwiommu_free_coherent          sba_free_coherent
-#define hwiommu_map_single             sba_map_single
-#define hwiommu_unmap_single           sba_unmap_single
-#define hwiommu_map_sg                 sba_map_sg
-#define hwiommu_unmap_sg               sba_unmap_sg
+#define hwiommu_map_single_attrs       sba_map_single_attrs
+#define hwiommu_unmap_single_attrs     sba_unmap_single_attrs
+#define hwiommu_map_sg_attrs           sba_map_sg_attrs
+#define hwiommu_unmap_sg_attrs         sba_unmap_sg_attrs
 #define hwiommu_dma_supported          sba_dma_supported
 #define hwiommu_dma_mapping_error      sba_dma_mapping_error
 #define hwiommu_sync_single_for_cpu    machvec_dma_sync_single
@@ -98,41 +98,48 @@ hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma
 }
 
 dma_addr_t
-hwsw_map_single (struct device *dev, void *addr, size_t size, int dir)
+hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
+                      struct dma_attrs *attrs)
 {
        if (use_swiotlb(dev))
-               return swiotlb_map_single(dev, addr, size, dir);
+               return swiotlb_map_single_attrs(dev, addr, size, dir, attrs);
        else
-               return hwiommu_map_single(dev, addr, size, dir);
+               return hwiommu_map_single_attrs(dev, addr, size, dir, attrs);
 }
+EXPORT_SYMBOL(hwsw_map_single_attrs);
 
 void
-hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir)
+hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+                        int dir, struct dma_attrs *attrs)
 {
        if (use_swiotlb(dev))
-               return swiotlb_unmap_single(dev, iova, size, dir);
+               return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs);
        else
-               return hwiommu_unmap_single(dev, iova, size, dir);
+               return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs);
 }
-
+EXPORT_SYMBOL(hwsw_unmap_single_attrs);
 
 int
-hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+                  int dir, struct dma_attrs *attrs)
 {
        if (use_swiotlb(dev))
-               return swiotlb_map_sg(dev, sglist, nents, dir);
+               return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs);
        else
-               return hwiommu_map_sg(dev, sglist, nents, dir);
+               return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs);
 }
+EXPORT_SYMBOL(hwsw_map_sg_attrs);
 
 void
-hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+                    int dir, struct dma_attrs *attrs)
 {
        if (use_swiotlb(dev))
-               return swiotlb_unmap_sg(dev, sglist, nents, dir);
+               return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
        else
-               return hwiommu_unmap_sg(dev, sglist, nents, dir);
+               return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
 }
+EXPORT_SYMBOL(hwsw_unmap_sg_attrs);
 
 void
 hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir)
@@ -185,10 +192,6 @@ hwsw_dma_mapping_error (dma_addr_t dma_addr)
 }
 
 EXPORT_SYMBOL(hwsw_dma_mapping_error);
-EXPORT_SYMBOL(hwsw_map_single);
-EXPORT_SYMBOL(hwsw_unmap_single);
-EXPORT_SYMBOL(hwsw_map_sg);
-EXPORT_SYMBOL(hwsw_unmap_sg);
 EXPORT_SYMBOL(hwsw_dma_supported);
 EXPORT_SYMBOL(hwsw_alloc_coherent);
 EXPORT_SYMBOL(hwsw_free_coherent);
index 9409de5c94412c986442c881a9e31258f34b99a9..34421aed1e2ab95ff0d4f4d4e458003b409fd1a2 100644 (file)
@@ -899,16 +899,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
 }
 
 /**
- * sba_map_single - map one buffer and return IOVA for DMA
+ * sba_map_single_attrs - map one buffer and return IOVA for DMA
  * @dev: instance of PCI owned by the driver that's asking.
  * @addr:  driver buffer to map.
  * @size:  number of bytes to map in driver buffer.
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
 dma_addr_t
-sba_map_single(struct device *dev, void *addr, size_t size, int dir)
+sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir,
+                    struct dma_attrs *attrs)
 {
        struct ioc *ioc;
        dma_addr_t iovp;
@@ -932,7 +934,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
                ** Device is bit capable of DMA'ing to the buffer...
                ** just return the PCI address of ptr
                */
-               DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
+               DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
+                          "0x%lx/0x%lx\n",
                           to_pci_dev(dev)->dma_mask, pci_addr);
                return pci_addr;
        }
@@ -953,7 +956,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       if (sba_check_pdir(ioc,"Check before sba_map_single()"))
+       if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
                panic("Sanity check failed");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -982,11 +985,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir)
        /* form complete address */
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       sba_check_pdir(ioc,"Check after sba_map_single()");
+       sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
        return SBA_IOVA(ioc, iovp, offset);
 }
+EXPORT_SYMBOL(sba_map_single_attrs);
 
 #ifdef ENABLE_MARK_CLEAN
 static SBA_INLINE void
@@ -1013,15 +1017,17 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
 #endif
 
 /**
- * sba_unmap_single - unmap one IOVA and free resources
+ * sba_unmap_single_attrs - unmap one IOVA and free resources
  * @dev: instance of PCI owned by the driver that's asking.
  * @iova:  IOVA of driver buffer previously mapped.
  * @size:  number of bytes mapped in driver buffer.
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
+void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
+                           int dir, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
 #if DELAYED_RESOURCE_CNT > 0
@@ -1038,7 +1044,8 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
                /*
                ** Address does not fall w/in IOVA, must be bypassing
                */
-               DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
+               DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n",
+                          iova);
 
 #ifdef ENABLE_MARK_CLEAN
                if (dir == DMA_FROM_DEVICE) {
@@ -1087,7 +1094,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif /* DELAYED_RESOURCE_CNT == 0 */
 }
-
+EXPORT_SYMBOL(sba_unmap_single_attrs);
 
 /**
  * sba_alloc_coherent - allocate/map shared mem for DMA
@@ -1144,7 +1151,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
         * If device can't bypass or bypass is disabled, pass the 32bit fake
         * device to map single to get an iova mapping.
         */
-       *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
+       *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
+                                          size, 0, NULL);
 
        return addr;
 }
@@ -1161,7 +1169,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp
  */
 void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
 {
-       sba_unmap_single(dev, dma_handle, size, 0);
+       sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
        free_pages((unsigned long) vaddr, get_order(size));
 }
 
@@ -1410,10 +1418,12 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
+int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents,
+                    int dir, struct dma_attrs *attrs)
 {
        struct ioc *ioc;
        int coalesced, filled = 0;
@@ -1441,16 +1451,16 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
        /* Fast path single entry scatterlists. */
        if (nents == 1) {
                sglist->dma_length = sglist->length;
-               sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
+               sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
                return 1;
        }
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
+       if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
        {
                sba_dump_sg(ioc, sglist, nents);
-               panic("Check before sba_map_sg()");
+               panic("Check before sba_map_sg_attrs()");
        }
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -1479,10 +1489,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
+       if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
        {
                sba_dump_sg(ioc, sglist, nents);
-               panic("Check after sba_map_sg()\n");
+               panic("Check after sba_map_sg_attrs()\n");
        }
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
@@ -1492,18 +1502,20 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di
 
        return filled;
 }
-
+EXPORT_SYMBOL(sba_map_sg_attrs);
 
 /**
- * sba_unmap_sg - unmap Scatter/Gather list
+ * sba_unmap_sg_attrs - unmap Scatter/Gather list
  * @dev: instance of PCI owned by the driver that's asking.
  * @sglist:  array of buffer/length pairs
  * @nents:  number of entries in list
  * @dir:  R/W or both.
+ * @attrs: optional dma attributes
  *
  * See Documentation/DMA-mapping.txt
  */
-void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
+void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+                       int nents, int dir, struct dma_attrs *attrs)
 {
 #ifdef ASSERT_PDIR_SANITY
        struct ioc *ioc;
@@ -1518,13 +1530,14 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
        ASSERT(ioc);
 
        spin_lock_irqsave(&ioc->res_lock, flags);
-       sba_check_pdir(ioc,"Check before sba_unmap_sg()");
+       sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
        while (nents && sglist->dma_length) {
 
-               sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
+               sba_unmap_single_attrs(dev, sglist->dma_address,
+                                      sglist->dma_length, dir, attrs);
                sglist = sg_next(sglist);
                nents--;
        }
@@ -1533,11 +1546,12 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in
 
 #ifdef ASSERT_PDIR_SANITY
        spin_lock_irqsave(&ioc->res_lock, flags);
-       sba_check_pdir(ioc,"Check after sba_unmap_sg()");
+       sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
        spin_unlock_irqrestore(&ioc->res_lock, flags);
 #endif
 
 }
+EXPORT_SYMBOL(sba_unmap_sg_attrs);
 
 /**************************************************************
 *
@@ -1918,15 +1932,13 @@ static const struct file_operations ioc_fops = {
 static void __init
 ioc_proc_init(void)
 {
-       struct proc_dir_entry *dir, *entry;
+       struct proc_dir_entry *dir;
 
        dir = proc_mkdir("bus/mckinley", NULL);
        if (!dir)
                return;
 
-       entry = create_proc_entry(ioc_list->name, 0, dir);
-       if (entry)
-               entry->proc_fops = &ioc_fops;
+       proc_create(ioc_list->name, 0, dir, &ioc_fops);
 }
 #endif
 
@@ -2166,10 +2178,6 @@ sba_page_override(char *str)
 __setup("sbapagesize=",sba_page_override);
 
 EXPORT_SYMBOL(sba_dma_mapping_error);
-EXPORT_SYMBOL(sba_map_single);
-EXPORT_SYMBOL(sba_unmap_single);
-EXPORT_SYMBOL(sba_map_sg);
-EXPORT_SYMBOL(sba_unmap_sg);
 EXPORT_SYMBOL(sba_dma_supported);
 EXPORT_SYMBOL(sba_alloc_coherent);
 EXPORT_SYMBOL(sba_free_coherent);
index eb0c32a85fd737c3154a016a609cd99b13216266..23cafc80d2a455478197ea21350ee69faa468ae9 100644 (file)
@@ -210,21 +210,23 @@ static void do_softint(struct work_struct *private_)
        printk(KERN_ERR "simserial: do_softint called\n");
 }
 
-static void rs_put_char(struct tty_struct *tty, unsigned char ch)
+static int rs_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct async_struct *info = (struct async_struct *)tty->driver_data;
        unsigned long flags;
 
-       if (!tty || !info->xmit.buf) return;
+       if (!tty || !info->xmit.buf)
+               return 0;
 
        local_irq_save(flags);
        if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) {
                local_irq_restore(flags);
-               return;
+               return 0;
        }
        info->xmit.buf[info->xmit.head] = ch;
        info->xmit.head = (info->xmit.head + 1) & (SERIAL_XMIT_SIZE-1);
        local_irq_restore(flags);
+       return 1;
 }
 
 static void transmit_chars(struct async_struct *info, int *intr_done)
@@ -621,7 +623,8 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
         * the line discipline to only process XON/XOFF characters.
         */
        shutdown(info);
-       if (tty->driver->flush_buffer) tty->driver->flush_buffer(tty);
+       if (tty->ops->flush_buffer)
+               tty->ops->flush_buffer(tty);
        if (tty->ldisc.flush_buffer) tty->ldisc.flush_buffer(tty);
        info->event = 0;
        info->tty = NULL;
index 230a6f92367fc3a966d362e2f8126419396b1a55..c64a55af9b95b43651dd2c4e0cf281e8422c99d2 100644 (file)
@@ -9,7 +9,7 @@
 #include <linux/sched.h>
 #include <linux/pid.h>
 #include <linux/clocksource.h>
-
+#include <linux/kbuild.h>
 #include <asm-ia64/processor.h>
 #include <asm-ia64/ptrace.h>
 #include <asm-ia64/siginfo.h>
 #include "../kernel/sigframe.h"
 #include "../kernel/fsyscall_gtod_data.h"
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 void foo(void)
 {
        DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct));
index c8e403752a0c54d5c519b6ada5f3df1b68508bac..7fbb51e10bbe04e70c2cc66570400f3569f3804c 100644 (file)
@@ -6695,16 +6695,12 @@ pfm_init(void)
        /*
         * create /proc/perfmon (mostly for debugging purposes)
         */
-       perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL);
+       perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops);
        if (perfmon_dir == NULL) {
                printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n");
                pmu_conf = NULL;
                return -1;
        }
-       /*
-        * install customized file operations for /proc/perfmon entry
-        */
-       perfmon_dir->proc_fops = &pfm_proc_fops;
 
        /*
         * create /proc/sys/kernel/perfmon (for debugging purposes)
index a5ea817cbcbf0ad19ec512c4d74a9348d14c0b21..58dcfac5ea882ec5529ffdec98d3219cbb58b27d 100644 (file)
@@ -183,7 +183,7 @@ do_notify_resume_user (sigset_t *unused, struct sigscratch *scr, long in_syscall
 #endif
 
        /* deal with pending signal delivery */
-       if (test_thread_flag(TIF_SIGPENDING)||test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (test_thread_flag(TIF_SIGPENDING))
                ia64_do_signal(scr, in_syscall);
 
        /* copy user rbs to kernel rbs */
index b11bb50a197a68ff8aa5f8661ef7552618940c56..ecb9eb78d6877685de4d677d52e4b21292de4ddc 100644 (file)
@@ -648,18 +648,16 @@ salinfo_init(void)
                if (!dir)
                        continue;
 
-               entry = create_proc_entry("event", S_IRUSR, dir);
+               entry = proc_create_data("event", S_IRUSR, dir,
+                                        &salinfo_event_fops, data);
                if (!entry)
                        continue;
-               entry->data = data;
-               entry->proc_fops = &salinfo_event_fops;
                *sdir++ = entry;
 
-               entry = create_proc_entry("data", S_IRUSR | S_IWUSR, dir);
+               entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir,
+                                        &salinfo_data_fops, data);
                if (!entry)
                        continue;
-               entry->data = data;
-               entry->proc_fops = &salinfo_data_fops;
                *sdir++ = entry;
 
                /* we missed any events before now */
index 16483be18c0b8c6c33834fdb5e87ffe1da52bf70..d7ad42b77d41a2c5d4d105bcfe6dbf42c4984d7c 100644 (file)
@@ -873,7 +873,8 @@ identify_siblings(struct cpuinfo_ia64 *c)
        u16 pltid;
        pal_logical_to_physical_t info;
 
-       if ((status = ia64_pal_logical_to_phys(-1, &info)) != PAL_STATUS_SUCCESS) {
+       status = ia64_pal_logical_to_phys(-1, &info);
+       if (status != PAL_STATUS_SUCCESS) {
                if (status != PAL_STATUS_UNIMPLEMENTED) {
                        printk(KERN_ERR
                                "ia64_pal_logical_to_phys failed with %ld\n",
@@ -885,8 +886,13 @@ identify_siblings(struct cpuinfo_ia64 *c)
                info.overview_cpp  = 1;
                info.overview_tpc  = 1;
        }
-       if ((status = ia64_sal_physical_id_info(&pltid)) != PAL_STATUS_SUCCESS) {
-               printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status);
+
+       status = ia64_sal_physical_id_info(&pltid);
+       if (status != PAL_STATUS_SUCCESS) {
+               if (status != PAL_STATUS_UNIMPLEMENTED)
+                       printk(KERN_ERR
+                               "ia64_sal_pltid failed with %ld\n",
+                               status);
                return;
        }
 
index a2484fc1a06c43482e575888ca1991d45faf43e7..abb17a613b172f2feac86f38981c255d226864ac 100644 (file)
 
 static struct ia64_cpu *sysfs_cpus;
 
+void arch_fix_phys_package_id(int num, u32 slot)
+{
+#ifdef CONFIG_SMP
+       if (cpu_data(num)->socket_id == -1)
+               cpu_data(num)->socket_id = slot;
+#endif
+}
+EXPORT_SYMBOL_GPL(arch_fix_phys_package_id);
+
 int arch_register_cpu(int num)
 {
 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
index 2a90c32024f4419fa5cc8f7d918e536c2d934d33..e77995a6e3ed35dc50d6674aba025666f0f648bb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2001-2006 Silicon Graphics, Inc.  All rights reserved.
+ * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License
@@ -177,12 +177,13 @@ failed:
  * uncached_alloc_page
  *
  * @starting_nid: node id of node to start with, or -1
+ * @n_pages: number of contiguous pages to allocate
  *
- * Allocate 1 uncached page. Allocates on the requested node. If no
- * uncached pages are available on the requested node, roundrobin starting
- * with the next higher node.
+ * Allocate the specified number of contiguous uncached pages on the
+ * the requested node. If not enough contiguous uncached pages are available
+ * on the requested node, roundrobin starting with the next higher node.
  */
-unsigned long uncached_alloc_page(int starting_nid)
+unsigned long uncached_alloc_page(int starting_nid, int n_pages)
 {
        unsigned long uc_addr;
        struct uncached_pool *uc_pool;
@@ -202,7 +203,8 @@ unsigned long uncached_alloc_page(int starting_nid)
                if (uc_pool->pool == NULL)
                        continue;
                do {
-                       uc_addr = gen_pool_alloc(uc_pool->pool, PAGE_SIZE);
+                       uc_addr = gen_pool_alloc(uc_pool->pool,
+                                                n_pages * PAGE_SIZE);
                        if (uc_addr != 0)
                                return uc_addr;
                } while (uncached_add_chunk(uc_pool, nid) == 0);
@@ -217,11 +219,12 @@ EXPORT_SYMBOL(uncached_alloc_page);
 /*
  * uncached_free_page
  *
- * @uc_addr: uncached address of page to free
+ * @uc_addr: uncached address of first page to free
+ * @n_pages: number of contiguous pages to free
  *
- * Free a single uncached page.
+ * Free the specified number of uncached pages.
  */
-void uncached_free_page(unsigned long uc_addr)
+void uncached_free_page(unsigned long uc_addr, int n_pages)
 {
        int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
        struct gen_pool *pool = uncached_pools[nid].pool;
@@ -232,7 +235,7 @@ void uncached_free_page(unsigned long uc_addr)
        if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
                panic("uncached_free_page invalid address %lx\n", uc_addr);
 
-       gen_pool_free(pool, uc_addr, PAGE_SIZE);
+       gen_pool_free(pool, uc_addr, n_pages * PAGE_SIZE);
 }
 EXPORT_SYMBOL(uncached_free_page);
 
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
new file mode 100644 (file)
index 0000000..7914e48
--- /dev/null
@@ -0,0 +1,49 @@
+#
+# KVM configuration
+#
+config HAVE_KVM
+       bool
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       depends on HAVE_KVM || IA64
+       default y
+       ---help---
+         Say Y here to get to see options for using your Linux host to run other
+         operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       tristate "Kernel-based Virtual Machine (KVM) support"
+       depends on HAVE_KVM && EXPERIMENTAL
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       ---help---
+         Support hosting fully virtualized guest machines using hardware
+         virtualization extensions.  You will need a fairly recent
+         processor equipped with virtualization extensions. You will also
+         need to select one or more of the processor modules below.
+
+         This module provides access to the hardware capabilities through
+         a character device node named /dev/kvm.
+
+         To compile this as a module, choose M here: the module
+         will be called kvm.
+
+         If unsure, say N.
+
+config KVM_INTEL
+       tristate "KVM for Intel Itanium 2 processors support"
+       depends on KVM && m
+       ---help---
+         Provides support for KVM on Itanium 2 processors equipped with the VT
+         extensions.
+
+config KVM_TRACE
+       bool
+
+endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
new file mode 100644 (file)
index 0000000..5235339
--- /dev/null
@@ -0,0 +1,58 @@
+#This Make file is to generate asm-offsets.h and build source.
+#
+
+#Generate asm-offsets.h for vmm module build
+offsets-file := asm-offsets.h
+
+always  := $(offsets-file)
+targets := $(offsets-file)
+targets += arch/ia64/kvm/asm-offsets.s
+clean-files := $(addprefix $(objtree)/,$(targets) $(obj)/memcpy.S $(obj)/memset.S)
+
+# Default sed regexp - multiline due to syntax constraints
+define sed-y
+       "/^->/{s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; s:->::; p;}"
+endef
+
+quiet_cmd_offsets = GEN     $@
+define cmd_offsets
+       (set -e; \
+        echo "#ifndef __ASM_KVM_OFFSETS_H__"; \
+        echo "#define __ASM_KVM_OFFSETS_H__"; \
+        echo "/*"; \
+        echo " * DO NOT MODIFY."; \
+        echo " *"; \
+        echo " * This file was generated by Makefile"; \
+        echo " *"; \
+        echo " */"; \
+        echo ""; \
+        sed -ne $(sed-y) $<; \
+        echo ""; \
+        echo "#endif" ) > $@
+endef
+# We use internal rules to avoid the "is up to date" message from make
+arch/ia64/kvm/asm-offsets.s: arch/ia64/kvm/asm-offsets.c
+       $(call if_changed_dep,cc_s_c)
+
+$(obj)/$(offsets-file): arch/ia64/kvm/asm-offsets.s
+       $(call cmd,offsets)
+
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
+EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
+
+kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
+obj-$(CONFIG_KVM) += kvm.o
+
+FORCE : $(obj)/$(offsets-file)
+EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
+kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
+       vtlb.o process.o
+#Add link memcpy and memset to avoid possible structure assignment error
+kvm-intel-objs += ../lib/memset.o ../lib/memcpy.o
+obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
new file mode 100644 (file)
index 0000000..4e3dc13
--- /dev/null
@@ -0,0 +1,251 @@
+/*
+ * asm-offsets.c Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ *
+ * Anthony Xu    <anthony.xu@intel.com>
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ * Copyright (c) 2007 Intel Corporation  KVM support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/autoconf.h>
+#include <linux/kvm_host.h>
+
+#include "vcpu.h"
+
+#define task_struct kvm_vcpu
+
+#define DEFINE(sym, val) \
+       asm volatile("\n->" #sym " (%0) " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : :)
+
+#define OFFSET(_sym, _str, _mem) \
+    DEFINE(_sym, offsetof(_str, _mem));
+
+void foo(void)
+{
+       DEFINE(VMM_TASK_SIZE, sizeof(struct kvm_vcpu));
+       DEFINE(VMM_PT_REGS_SIZE, sizeof(struct kvm_pt_regs));
+
+       BLANK();
+
+       DEFINE(VMM_VCPU_META_RR0_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.metaphysical_rr0));
+       DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
+                       offsetof(struct kvm_vcpu,
+                               arch.metaphysical_saved_rr0));
+       DEFINE(VMM_VCPU_VRR0_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.vrr[0]));
+       DEFINE(VMM_VPD_IRR0_OFFSET,
+                       offsetof(struct vpd, irr[0]));
+       DEFINE(VMM_VCPU_ITC_CHECK_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.itc_check));
+       DEFINE(VMM_VCPU_IRQ_CHECK_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.irq_check));
+       DEFINE(VMM_VPD_VHPI_OFFSET,
+                       offsetof(struct vpd, vhpi));
+       DEFINE(VMM_VCPU_VSA_BASE_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.vsa_base));
+       DEFINE(VMM_VCPU_VPD_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.vpd));
+       DEFINE(VMM_VCPU_IRQ_CHECK,
+                       offsetof(struct kvm_vcpu, arch.irq_check));
+       DEFINE(VMM_VCPU_TIMER_PENDING,
+                       offsetof(struct kvm_vcpu, arch.timer_pending));
+       DEFINE(VMM_VCPU_META_SAVED_RR0_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.metaphysical_saved_rr0));
+       DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.mode_flags));
+       DEFINE(VMM_VCPU_ITC_OFS_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.itc_offset));
+       DEFINE(VMM_VCPU_LAST_ITC_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.last_itc));
+       DEFINE(VMM_VCPU_SAVED_GP_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.saved_gp));
+
+       BLANK();
+
+       DEFINE(VMM_PT_REGS_B6_OFFSET,
+                               offsetof(struct kvm_pt_regs, b6));
+       DEFINE(VMM_PT_REGS_B7_OFFSET,
+                               offsetof(struct kvm_pt_regs, b7));
+       DEFINE(VMM_PT_REGS_AR_CSD_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_csd));
+       DEFINE(VMM_PT_REGS_AR_SSD_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_ssd));
+       DEFINE(VMM_PT_REGS_R8_OFFSET,
+                               offsetof(struct kvm_pt_regs, r8));
+       DEFINE(VMM_PT_REGS_R9_OFFSET,
+                               offsetof(struct kvm_pt_regs, r9));
+       DEFINE(VMM_PT_REGS_R10_OFFSET,
+                               offsetof(struct kvm_pt_regs, r10));
+       DEFINE(VMM_PT_REGS_R11_OFFSET,
+                               offsetof(struct kvm_pt_regs, r11));
+       DEFINE(VMM_PT_REGS_CR_IPSR_OFFSET,
+                               offsetof(struct kvm_pt_regs, cr_ipsr));
+       DEFINE(VMM_PT_REGS_CR_IIP_OFFSET,
+                               offsetof(struct kvm_pt_regs, cr_iip));
+       DEFINE(VMM_PT_REGS_CR_IFS_OFFSET,
+                               offsetof(struct kvm_pt_regs, cr_ifs));
+       DEFINE(VMM_PT_REGS_AR_UNAT_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_unat));
+       DEFINE(VMM_PT_REGS_AR_PFS_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_pfs));
+       DEFINE(VMM_PT_REGS_AR_RSC_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_rsc));
+       DEFINE(VMM_PT_REGS_AR_RNAT_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_rnat));
+
+       DEFINE(VMM_PT_REGS_AR_BSPSTORE_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_bspstore));
+       DEFINE(VMM_PT_REGS_PR_OFFSET,
+                               offsetof(struct kvm_pt_regs, pr));
+       DEFINE(VMM_PT_REGS_B0_OFFSET,
+                               offsetof(struct kvm_pt_regs, b0));
+       DEFINE(VMM_PT_REGS_LOADRS_OFFSET,
+                               offsetof(struct kvm_pt_regs, loadrs));
+       DEFINE(VMM_PT_REGS_R1_OFFSET,
+                               offsetof(struct kvm_pt_regs, r1));
+       DEFINE(VMM_PT_REGS_R12_OFFSET,
+                               offsetof(struct kvm_pt_regs, r12));
+       DEFINE(VMM_PT_REGS_R13_OFFSET,
+                               offsetof(struct kvm_pt_regs, r13));
+       DEFINE(VMM_PT_REGS_AR_FPSR_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_fpsr));
+       DEFINE(VMM_PT_REGS_R15_OFFSET,
+                               offsetof(struct kvm_pt_regs, r15));
+       DEFINE(VMM_PT_REGS_R14_OFFSET,
+                               offsetof(struct kvm_pt_regs, r14));
+       DEFINE(VMM_PT_REGS_R2_OFFSET,
+                               offsetof(struct kvm_pt_regs, r2));
+       DEFINE(VMM_PT_REGS_R3_OFFSET,
+                               offsetof(struct kvm_pt_regs, r3));
+       DEFINE(VMM_PT_REGS_R16_OFFSET,
+                               offsetof(struct kvm_pt_regs, r16));
+       DEFINE(VMM_PT_REGS_R17_OFFSET,
+                               offsetof(struct kvm_pt_regs, r17));
+       DEFINE(VMM_PT_REGS_R18_OFFSET,
+                               offsetof(struct kvm_pt_regs, r18));
+       DEFINE(VMM_PT_REGS_R19_OFFSET,
+                               offsetof(struct kvm_pt_regs, r19));
+       DEFINE(VMM_PT_REGS_R20_OFFSET,
+                               offsetof(struct kvm_pt_regs, r20));
+       DEFINE(VMM_PT_REGS_R21_OFFSET,
+                               offsetof(struct kvm_pt_regs, r21));
+       DEFINE(VMM_PT_REGS_R22_OFFSET,
+                               offsetof(struct kvm_pt_regs, r22));
+       DEFINE(VMM_PT_REGS_R23_OFFSET,
+                               offsetof(struct kvm_pt_regs, r23));
+       DEFINE(VMM_PT_REGS_R24_OFFSET,
+                               offsetof(struct kvm_pt_regs, r24));
+       DEFINE(VMM_PT_REGS_R25_OFFSET,
+                               offsetof(struct kvm_pt_regs, r25));
+       DEFINE(VMM_PT_REGS_R26_OFFSET,
+                               offsetof(struct kvm_pt_regs, r26));
+       DEFINE(VMM_PT_REGS_R27_OFFSET,
+                               offsetof(struct kvm_pt_regs, r27));
+       DEFINE(VMM_PT_REGS_R28_OFFSET,
+                               offsetof(struct kvm_pt_regs, r28));
+       DEFINE(VMM_PT_REGS_R29_OFFSET,
+                               offsetof(struct kvm_pt_regs, r29));
+       DEFINE(VMM_PT_REGS_R30_OFFSET,
+                               offsetof(struct kvm_pt_regs, r30));
+       DEFINE(VMM_PT_REGS_R31_OFFSET,
+                               offsetof(struct kvm_pt_regs, r31));
+       DEFINE(VMM_PT_REGS_AR_CCV_OFFSET,
+                               offsetof(struct kvm_pt_regs, ar_ccv));
+       DEFINE(VMM_PT_REGS_F6_OFFSET,
+                               offsetof(struct kvm_pt_regs, f6));
+       DEFINE(VMM_PT_REGS_F7_OFFSET,
+                               offsetof(struct kvm_pt_regs, f7));
+       DEFINE(VMM_PT_REGS_F8_OFFSET,
+                               offsetof(struct kvm_pt_regs, f8));
+       DEFINE(VMM_PT_REGS_F9_OFFSET,
+                               offsetof(struct kvm_pt_regs, f9));
+       DEFINE(VMM_PT_REGS_F10_OFFSET,
+                               offsetof(struct kvm_pt_regs, f10));
+       DEFINE(VMM_PT_REGS_F11_OFFSET,
+                               offsetof(struct kvm_pt_regs, f11));
+       DEFINE(VMM_PT_REGS_R4_OFFSET,
+                               offsetof(struct kvm_pt_regs, r4));
+       DEFINE(VMM_PT_REGS_R5_OFFSET,
+                               offsetof(struct kvm_pt_regs, r5));
+       DEFINE(VMM_PT_REGS_R6_OFFSET,
+                               offsetof(struct kvm_pt_regs, r6));
+       DEFINE(VMM_PT_REGS_R7_OFFSET,
+                               offsetof(struct kvm_pt_regs, r7));
+       DEFINE(VMM_PT_REGS_EML_UNAT_OFFSET,
+                               offsetof(struct kvm_pt_regs, eml_unat));
+       DEFINE(VMM_VCPU_IIPA_OFFSET,
+                               offsetof(struct kvm_vcpu, arch.cr_iipa));
+       DEFINE(VMM_VCPU_OPCODE_OFFSET,
+                               offsetof(struct kvm_vcpu, arch.opcode));
+       DEFINE(VMM_VCPU_CAUSE_OFFSET, offsetof(struct kvm_vcpu, arch.cause));
+       DEFINE(VMM_VCPU_ISR_OFFSET,
+                               offsetof(struct kvm_vcpu, arch.cr_isr));
+       DEFINE(VMM_PT_REGS_R16_SLOT,
+                               (((offsetof(struct kvm_pt_regs, r16)
+                               - sizeof(struct kvm_pt_regs)) >> 3) & 0x3f));
+       DEFINE(VMM_VCPU_MODE_FLAGS_OFFSET,
+                               offsetof(struct kvm_vcpu, arch.mode_flags));
+       DEFINE(VMM_VCPU_GP_OFFSET, offsetof(struct kvm_vcpu, arch.__gp));
+       BLANK();
+
+       DEFINE(VMM_VPD_BASE_OFFSET, offsetof(struct kvm_vcpu, arch.vpd));
+       DEFINE(VMM_VPD_VIFS_OFFSET, offsetof(struct vpd, ifs));
+       DEFINE(VMM_VLSAPIC_INSVC_BASE_OFFSET,
+                       offsetof(struct kvm_vcpu, arch.insvc[0]));
+       DEFINE(VMM_VPD_VPTA_OFFSET, offsetof(struct vpd, pta));
+       DEFINE(VMM_VPD_VPSR_OFFSET, offsetof(struct vpd, vpsr));
+
+       DEFINE(VMM_CTX_R4_OFFSET, offsetof(union context, gr[4]));
+       DEFINE(VMM_CTX_R5_OFFSET, offsetof(union context, gr[5]));
+       DEFINE(VMM_CTX_R12_OFFSET, offsetof(union context, gr[12]));
+       DEFINE(VMM_CTX_R13_OFFSET, offsetof(union context, gr[13]));
+       DEFINE(VMM_CTX_KR0_OFFSET, offsetof(union context, ar[0]));
+       DEFINE(VMM_CTX_KR1_OFFSET, offsetof(union context, ar[1]));
+       DEFINE(VMM_CTX_B0_OFFSET, offsetof(union context, br[0]));
+       DEFINE(VMM_CTX_B1_OFFSET, offsetof(union context, br[1]));
+       DEFINE(VMM_CTX_B2_OFFSET, offsetof(union context, br[2]));
+       DEFINE(VMM_CTX_RR0_OFFSET, offsetof(union context, rr[0]));
+       DEFINE(VMM_CTX_RSC_OFFSET, offsetof(union context, ar[16]));
+       DEFINE(VMM_CTX_BSPSTORE_OFFSET, offsetof(union context, ar[18]));
+       DEFINE(VMM_CTX_RNAT_OFFSET, offsetof(union context, ar[19]));
+       DEFINE(VMM_CTX_FCR_OFFSET, offsetof(union context, ar[21]));
+       DEFINE(VMM_CTX_EFLAG_OFFSET, offsetof(union context, ar[24]));
+       DEFINE(VMM_CTX_CFLG_OFFSET, offsetof(union context, ar[27]));
+       DEFINE(VMM_CTX_FSR_OFFSET, offsetof(union context, ar[28]));
+       DEFINE(VMM_CTX_FIR_OFFSET, offsetof(union context, ar[29]));
+       DEFINE(VMM_CTX_FDR_OFFSET, offsetof(union context, ar[30]));
+       DEFINE(VMM_CTX_UNAT_OFFSET, offsetof(union context, ar[36]));
+       DEFINE(VMM_CTX_FPSR_OFFSET, offsetof(union context, ar[40]));
+       DEFINE(VMM_CTX_PFS_OFFSET, offsetof(union context, ar[64]));
+       DEFINE(VMM_CTX_LC_OFFSET, offsetof(union context, ar[65]));
+       DEFINE(VMM_CTX_DCR_OFFSET, offsetof(union context, cr[0]));
+       DEFINE(VMM_CTX_IVA_OFFSET, offsetof(union context, cr[2]));
+       DEFINE(VMM_CTX_PTA_OFFSET, offsetof(union context, cr[8]));
+       DEFINE(VMM_CTX_IBR0_OFFSET, offsetof(union context, ibr[0]));
+       DEFINE(VMM_CTX_DBR0_OFFSET, offsetof(union context, dbr[0]));
+       DEFINE(VMM_CTX_F2_OFFSET, offsetof(union context, fr[2]));
+       DEFINE(VMM_CTX_F3_OFFSET, offsetof(union context, fr[3]));
+       DEFINE(VMM_CTX_F32_OFFSET, offsetof(union context, fr[32]));
+       DEFINE(VMM_CTX_F33_OFFSET, offsetof(union context, fr[33]));
+       DEFINE(VMM_CTX_PKR0_OFFSET, offsetof(union context, pkr[0]));
+       DEFINE(VMM_CTX_PSR_OFFSET, offsetof(union context, psr));
+       BLANK();
+}
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
new file mode 100644 (file)
index 0000000..318b811
--- /dev/null
@@ -0,0 +1,1805 @@
+/*
+ * kvm_ia64.c: Basic KVM suppport On Itanium series processors
+ *
+ *
+ *     Copyright (C) 2007, Intel Corporation.
+ *     Xiantao Zhang  (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/gfp.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/kvm_host.h>
+#include <linux/kvm.h>
+#include <linux/bitops.h>
+#include <linux/hrtimer.h>
+#include <linux/uaccess.h>
+
+#include <asm/pgtable.h>
+#include <asm/gcc_intrin.h>
+#include <asm/pal.h>
+#include <asm/cacheflush.h>
+#include <asm/div64.h>
+#include <asm/tlb.h>
+
+#include "misc.h"
+#include "vti.h"
+#include "iodev.h"
+#include "ioapic.h"
+#include "lapic.h"
+
+static unsigned long kvm_vmm_base;
+static unsigned long kvm_vsa_base;
+static unsigned long kvm_vm_buffer;
+static unsigned long kvm_vm_buffer_size;
+unsigned long kvm_vmm_gp;
+
+static long vp_env_info;
+
+static struct kvm_vmm_info *kvm_vmm_info;
+
+static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu);
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { NULL }
+};
+
+
+struct fdesc{
+    unsigned long ip;
+    unsigned long gp;
+};
+
+static void kvm_flush_icache(unsigned long start, unsigned long len)
+{
+       int l;
+
+       for (l = 0; l < (len + 32); l += 32)
+               ia64_fc(start + l);
+
+       ia64_sync_i();
+       ia64_srlz_i();
+}
+
+static void kvm_flush_tlb_all(void)
+{
+       unsigned long i, j, count0, count1, stride0, stride1, addr;
+       long flags;
+
+       addr    = local_cpu_data->ptce_base;
+       count0  = local_cpu_data->ptce_count[0];
+       count1  = local_cpu_data->ptce_count[1];
+       stride0 = local_cpu_data->ptce_stride[0];
+       stride1 = local_cpu_data->ptce_stride[1];
+
+       local_irq_save(flags);
+       for (i = 0; i < count0; ++i) {
+               for (j = 0; j < count1; ++j) {
+                       ia64_ptce(addr);
+                       addr += stride1;
+               }
+               addr += stride0;
+       }
+       local_irq_restore(flags);
+       ia64_srlz_i();                  /* srlz.i implies srlz.d */
+}
+
+long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL_STK(iprv, PAL_VP_CREATE, (u64)vpd, (u64)host_iva,
+                       (u64)opt_handler);
+
+       return iprv.status;
+}
+
+static  DEFINE_SPINLOCK(vp_lock);
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+       long  status;
+       long  tmp_base;
+       unsigned long pte;
+       unsigned long saved_psr;
+       int slot;
+
+       pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
+                               PAGE_KERNEL));
+       local_irq_save(saved_psr);
+       slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+       if (slot < 0)
+               return;
+       local_irq_restore(saved_psr);
+
+       spin_lock(&vp_lock);
+       status = ia64_pal_vp_init_env(kvm_vsa_base ?
+                               VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
+                       __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
+       if (status != 0) {
+               printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
+               return ;
+       }
+
+       if (!kvm_vsa_base) {
+               kvm_vsa_base = tmp_base;
+               printk(KERN_INFO"kvm: kvm_vsa_base:0x%lx\n", kvm_vsa_base);
+       }
+       spin_unlock(&vp_lock);
+       ia64_ptr_entry(0x3, slot);
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+
+       long status;
+       int slot;
+       unsigned long pte;
+       unsigned long saved_psr;
+       unsigned long host_iva = ia64_getreg(_IA64_REG_CR_IVA);
+
+       pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
+                               PAGE_KERNEL));
+
+       local_irq_save(saved_psr);
+       slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+       if (slot < 0)
+               return;
+       local_irq_restore(saved_psr);
+
+       status = ia64_pal_vp_exit_env(host_iva);
+       if (status)
+               printk(KERN_DEBUG"kvm: Failed to disable VT support! :%ld\n",
+                               status);
+       ia64_ptr_entry(0x3, slot);
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       *(int *)rtn = 0;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_IRQCHIP:
+       case KVM_CAP_USER_MEMORY:
+
+               r = 1;
+               break;
+       default:
+               r = 0;
+       }
+       return r;
+
+}
+
+static struct kvm_io_device *vcpu_find_mmio_dev(struct kvm_vcpu *vcpu,
+                                       gpa_t addr)
+{
+       struct kvm_io_device *dev;
+
+       dev = kvm_io_bus_find_dev(&vcpu->kvm->mmio_bus, addr);
+
+       return dev;
+}
+
+static int handle_vm_error(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+       kvm_run->hw.hardware_exit_reason = 1;
+       return 0;
+}
+
+static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       struct kvm_mmio_req *p;
+       struct kvm_io_device *mmio_dev;
+
+       p = kvm_get_vcpu_ioreq(vcpu);
+
+       if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
+               goto mmio;
+       vcpu->mmio_needed = 1;
+       vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr;
+       vcpu->mmio_size = kvm_run->mmio.len = p->size;
+       vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
+
+       if (vcpu->mmio_is_write)
+               memcpy(vcpu->mmio_data, &p->data, p->size);
+       memcpy(kvm_run->mmio.data, &p->data, p->size);
+       kvm_run->exit_reason = KVM_EXIT_MMIO;
+       return 0;
+mmio:
+       mmio_dev = vcpu_find_mmio_dev(vcpu, p->addr);
+       if (mmio_dev) {
+               if (!p->dir)
+                       kvm_iodevice_write(mmio_dev, p->addr, p->size,
+                                               &p->data);
+               else
+                       kvm_iodevice_read(mmio_dev, p->addr, p->size,
+                                               &p->data);
+
+       } else
+               printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
+       p->state = STATE_IORESP_READY;
+
+       return 1;
+}
+
+static int handle_pal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       struct exit_ctl_data *p;
+
+       p = kvm_get_exit_data(vcpu);
+
+       if (p->exit_reason == EXIT_REASON_PAL_CALL)
+               return kvm_pal_emul(vcpu, kvm_run);
+       else {
+               kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+               kvm_run->hw.hardware_exit_reason = 2;
+               return 0;
+       }
+}
+
+static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       struct exit_ctl_data *p;
+
+       p = kvm_get_exit_data(vcpu);
+
+       if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+               kvm_sal_emul(vcpu);
+               return 1;
+       } else {
+               kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+               kvm_run->hw.hardware_exit_reason = 3;
+               return 0;
+       }
+
+}
+
+/*
+ *  offset: address offset to IPI space.
+ *  value:  deliver value.
+ */
+static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
+                               uint64_t vector)
+{
+       switch (dm) {
+       case SAPIC_FIXED:
+               kvm_apic_set_irq(vcpu, vector, 0);
+               break;
+       case SAPIC_NMI:
+               kvm_apic_set_irq(vcpu, 2, 0);
+               break;
+       case SAPIC_EXTINT:
+               kvm_apic_set_irq(vcpu, 0, 0);
+               break;
+       case SAPIC_INIT:
+       case SAPIC_PMI:
+       default:
+               printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
+               break;
+       }
+}
+
+static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
+                       unsigned long eid)
+{
+       union ia64_lid lid;
+       int i;
+
+       for (i = 0; i < KVM_MAX_VCPUS; i++) {
+               if (kvm->vcpus[i]) {
+                       lid.val = VCPU_LID(kvm->vcpus[i]);
+                       if (lid.id == id && lid.eid == eid)
+                               return kvm->vcpus[i];
+               }
+       }
+
+       return NULL;
+}
+
+static int handle_ipi(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+       struct kvm_vcpu *target_vcpu;
+       struct kvm_pt_regs *regs;
+       union ia64_ipi_a addr = p->u.ipi_data.addr;
+       union ia64_ipi_d data = p->u.ipi_data.data;
+
+       target_vcpu = lid_to_vcpu(vcpu->kvm, addr.id, addr.eid);
+       if (!target_vcpu)
+               return handle_vm_error(vcpu, kvm_run);
+
+       if (!target_vcpu->arch.launched) {
+               regs = vcpu_regs(target_vcpu);
+
+               regs->cr_iip = vcpu->kvm->arch.rdv_sal_data.boot_ip;
+               regs->r1 = vcpu->kvm->arch.rdv_sal_data.boot_gp;
+
+               target_vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               if (waitqueue_active(&target_vcpu->wq))
+                       wake_up_interruptible(&target_vcpu->wq);
+       } else {
+               vcpu_deliver_ipi(target_vcpu, data.dm, data.vector);
+               if (target_vcpu != vcpu)
+                       kvm_vcpu_kick(target_vcpu);
+       }
+
+       return 1;
+}
+
+struct call_data {
+       struct kvm_ptc_g ptc_g_data;
+       struct kvm_vcpu *vcpu;
+};
+
+static void vcpu_global_purge(void *info)
+{
+       struct call_data *p = (struct call_data *)info;
+       struct kvm_vcpu *vcpu = p->vcpu;
+
+       if (test_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
+               return;
+
+       set_bit(KVM_REQ_PTC_G, &vcpu->requests);
+       if (vcpu->arch.ptc_g_count < MAX_PTC_G_NUM) {
+               vcpu->arch.ptc_g_data[vcpu->arch.ptc_g_count++] =
+                                                       p->ptc_g_data;
+       } else {
+               clear_bit(KVM_REQ_PTC_G, &vcpu->requests);
+               vcpu->arch.ptc_g_count = 0;
+               set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
+       }
+}
+
+static int handle_global_purge(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       struct exit_ctl_data *p = kvm_get_exit_data(vcpu);
+       struct kvm *kvm = vcpu->kvm;
+       struct call_data call_data;
+       int i;
+       call_data.ptc_g_data = p->u.ptc_g_data;
+
+       for (i = 0; i < KVM_MAX_VCPUS; i++) {
+               if (!kvm->vcpus[i] || kvm->vcpus[i]->arch.mp_state ==
+                                               KVM_MP_STATE_UNINITIALIZED ||
+                                       vcpu == kvm->vcpus[i])
+                       continue;
+
+               if (waitqueue_active(&kvm->vcpus[i]->wq))
+                       wake_up_interruptible(&kvm->vcpus[i]->wq);
+
+               if (kvm->vcpus[i]->cpu != -1) {
+                       call_data.vcpu = kvm->vcpus[i];
+                       smp_call_function_single(kvm->vcpus[i]->cpu,
+                                       vcpu_global_purge, &call_data, 0, 1);
+               } else
+                       printk(KERN_WARNING"kvm: Uninit vcpu received ipi!\n");
+
+       }
+       return 1;
+}
+
+static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       return 1;
+}
+
+int kvm_emulate_halt(struct kvm_vcpu *vcpu)
+{
+
+       ktime_t kt;
+       long itc_diff;
+       unsigned long vcpu_now_itc;
+
+       unsigned long expires;
+       struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
+       unsigned long cyc_per_usec = local_cpu_data->cyc_per_usec;
+       struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+       vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
+
+       if (time_after(vcpu_now_itc, vpd->itm)) {
+               vcpu->arch.timer_check = 1;
+               return 1;
+       }
+       itc_diff = vpd->itm - vcpu_now_itc;
+       if (itc_diff < 0)
+               itc_diff = -itc_diff;
+
+       expires = div64_u64(itc_diff, cyc_per_usec);
+       kt = ktime_set(0, 1000 * expires);
+       vcpu->arch.ht_active = 1;
+       hrtimer_start(p_ht, kt, HRTIMER_MODE_ABS);
+
+       if (irqchip_in_kernel(vcpu->kvm)) {
+               vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+               kvm_vcpu_block(vcpu);
+               hrtimer_cancel(p_ht);
+               vcpu->arch.ht_active = 0;
+
+               if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
+                       return -EINTR;
+               return 1;
+       } else {
+               printk(KERN_ERR"kvm: Unsupported userspace halt!");
+               return 0;
+       }
+}
+
+static int handle_vm_shutdown(struct kvm_vcpu *vcpu,
+               struct kvm_run *kvm_run)
+{
+       kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+       return 0;
+}
+
+static int handle_external_interrupt(struct kvm_vcpu *vcpu,
+               struct kvm_run *kvm_run)
+{
+       return 1;
+}
+
+static int (*kvm_vti_exit_handlers[])(struct kvm_vcpu *vcpu,
+               struct kvm_run *kvm_run) = {
+       [EXIT_REASON_VM_PANIC]              = handle_vm_error,
+       [EXIT_REASON_MMIO_INSTRUCTION]      = handle_mmio,
+       [EXIT_REASON_PAL_CALL]              = handle_pal_call,
+       [EXIT_REASON_SAL_CALL]              = handle_sal_call,
+       [EXIT_REASON_SWITCH_RR6]            = handle_switch_rr6,
+       [EXIT_REASON_VM_DESTROY]            = handle_vm_shutdown,
+       [EXIT_REASON_EXTERNAL_INTERRUPT]    = handle_external_interrupt,
+       [EXIT_REASON_IPI]                   = handle_ipi,
+       [EXIT_REASON_PTC_G]                 = handle_global_purge,
+
+};
+
+static const int kvm_vti_max_exit_handlers =
+               sizeof(kvm_vti_exit_handlers)/sizeof(*kvm_vti_exit_handlers);
+
+static void kvm_prepare_guest_switch(struct kvm_vcpu *vcpu)
+{
+}
+
+static uint32_t kvm_get_exit_reason(struct kvm_vcpu *vcpu)
+{
+       struct exit_ctl_data *p_exit_data;
+
+       p_exit_data = kvm_get_exit_data(vcpu);
+       return p_exit_data->exit_reason;
+}
+
+/*
+ * The guest has exited.  See if we can fix it or if we need userspace
+ * assistance.
+ */
+static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+{
+       u32 exit_reason = kvm_get_exit_reason(vcpu);
+       vcpu->arch.last_exit = exit_reason;
+
+       if (exit_reason < kvm_vti_max_exit_handlers
+                       && kvm_vti_exit_handlers[exit_reason])
+               return kvm_vti_exit_handlers[exit_reason](vcpu, kvm_run);
+       else {
+               kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
+               kvm_run->hw.hardware_exit_reason = exit_reason;
+       }
+       return 0;
+}
+
+static inline void vti_set_rr6(unsigned long rr6)
+{
+       ia64_set_rr(RR6, rr6);
+       ia64_srlz_i();
+}
+
+static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
+{
+       unsigned long pte;
+       struct kvm *kvm = vcpu->kvm;
+       int r;
+
+       /*Insert a pair of tr to map vmm*/
+       pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
+       r = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
+       if (r < 0)
+               goto out;
+       vcpu->arch.vmm_tr_slot = r;
+       /*Insert a pairt of tr to map data of vm*/
+       pte = pte_val(mk_pte_phys(__pa(kvm->arch.vm_base), PAGE_KERNEL));
+       r = ia64_itr_entry(0x3, KVM_VM_DATA_BASE,
+                                       pte, KVM_VM_DATA_SHIFT);
+       if (r < 0)
+               goto out;
+       vcpu->arch.vm_tr_slot = r;
+       r = 0;
+out:
+       return r;
+
+}
+
+static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
+{
+
+       ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
+       ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
+
+}
+
+static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
+{
+       int cpu = smp_processor_id();
+
+       if (vcpu->arch.last_run_cpu != cpu ||
+                       per_cpu(last_vcpu, cpu) != vcpu) {
+               per_cpu(last_vcpu, cpu) = vcpu;
+               vcpu->arch.last_run_cpu = cpu;
+               kvm_flush_tlb_all();
+       }
+
+       vcpu->arch.host_rr6 = ia64_get_rr(RR6);
+       vti_set_rr6(vcpu->arch.vmm_rr);
+       return kvm_insert_vmm_mapping(vcpu);
+}
+static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
+{
+       kvm_purge_vmm_mapping(vcpu);
+       vti_set_rr6(vcpu->arch.host_rr6);
+}
+
+static int  vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       union context *host_ctx, *guest_ctx;
+       int r;
+
+       /*Get host and guest context with guest address space.*/
+       host_ctx = kvm_get_host_context(vcpu);
+       guest_ctx = kvm_get_guest_context(vcpu);
+
+       r = kvm_vcpu_pre_transition(vcpu);
+       if (r < 0)
+               goto out;
+       kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
+       kvm_vcpu_post_transition(vcpu);
+       r = 0;
+out:
+       return r;
+}
+
+static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       int r;
+
+again:
+       preempt_disable();
+
+       kvm_prepare_guest_switch(vcpu);
+       local_irq_disable();
+
+       if (signal_pending(current)) {
+               local_irq_enable();
+               preempt_enable();
+               r = -EINTR;
+               kvm_run->exit_reason = KVM_EXIT_INTR;
+               goto out;
+       }
+
+       vcpu->guest_mode = 1;
+       kvm_guest_enter();
+
+       r = vti_vcpu_run(vcpu, kvm_run);
+       if (r < 0) {
+               local_irq_enable();
+               preempt_enable();
+               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+               goto out;
+       }
+
+       vcpu->arch.launched = 1;
+       vcpu->guest_mode = 0;
+       local_irq_enable();
+
+       /*
+        * We must have an instruction between local_irq_enable() and
+        * kvm_guest_exit(), so the timer interrupt isn't delayed by
+        * the interrupt shadow.  The stat.exits increment will do nicely.
+        * But we need to prevent reordering, hence this barrier():
+        */
+       barrier();
+
+       kvm_guest_exit();
+
+       preempt_enable();
+
+       r = kvm_handle_exit(kvm_run, vcpu);
+
+       if (r > 0) {
+               if (!need_resched())
+                       goto again;
+       }
+
+out:
+       if (r > 0) {
+               kvm_resched(vcpu);
+               goto again;
+       }
+
+       return r;
+}
+
+static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
+
+       if (!vcpu->mmio_is_write)
+               memcpy(&p->data, vcpu->mmio_data, 8);
+       p->state = STATE_IORESP_READY;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       int r;
+       sigset_t sigsaved;
+
+       vcpu_load(vcpu);
+
+       if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
+               kvm_vcpu_block(vcpu);
+               vcpu_put(vcpu);
+               return -EAGAIN;
+       }
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
+               kvm_set_mmio_data(vcpu);
+               vcpu->mmio_read_completed = 1;
+               vcpu->mmio_needed = 0;
+       }
+       r = __vcpu_run(vcpu, kvm_run);
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       vcpu_put(vcpu);
+       return r;
+}
+
+/*
+ * Allocate 16M memory for every vm to hold its specific data.
+ * Its memory map is defined in kvm_host.h.
+ */
+static struct kvm *kvm_alloc_kvm(void)
+{
+
+       struct kvm *kvm;
+       uint64_t  vm_base;
+
+       vm_base = __get_free_pages(GFP_KERNEL, get_order(KVM_VM_DATA_SIZE));
+
+       if (!vm_base)
+               return ERR_PTR(-ENOMEM);
+       printk(KERN_DEBUG"kvm: VM data's base Address:0x%lx\n", vm_base);
+
+       /* Zero all pages before use! */
+       memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
+
+       kvm = (struct kvm *)(vm_base + KVM_VM_OFS);
+       kvm->arch.vm_base = vm_base;
+
+       return kvm;
+}
+
+struct kvm_io_range {
+       unsigned long start;
+       unsigned long size;
+       unsigned long type;
+};
+
+static const struct kvm_io_range io_ranges[] = {
+       {VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
+       {MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
+       {LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
+       {IO_SAPIC_START, IO_SAPIC_SIZE, GPFN_IOSAPIC},
+       {PIB_START, PIB_SIZE, GPFN_PIB},
+};
+
+static void kvm_build_io_pmt(struct kvm *kvm)
+{
+       unsigned long i, j;
+
+       /* Mark I/O ranges */
+       for (i = 0; i < (sizeof(io_ranges) / sizeof(struct kvm_io_range));
+                                                       i++) {
+               for (j = io_ranges[i].start;
+                               j < io_ranges[i].start + io_ranges[i].size;
+                               j += PAGE_SIZE)
+                       kvm_set_pmt_entry(kvm, j >> PAGE_SHIFT,
+                                       io_ranges[i].type, 0);
+       }
+
+}
+
+/*Use unused rids to virtualize guest rid.*/
+#define GUEST_PHYSICAL_RR0     0x1739
+#define GUEST_PHYSICAL_RR4     0x2739
+#define VMM_INIT_RR            0x1660
+
+static void kvm_init_vm(struct kvm *kvm)
+{
+       long vm_base;
+
+       BUG_ON(!kvm);
+
+       kvm->arch.metaphysical_rr0 = GUEST_PHYSICAL_RR0;
+       kvm->arch.metaphysical_rr4 = GUEST_PHYSICAL_RR4;
+       kvm->arch.vmm_init_rr = VMM_INIT_RR;
+
+       vm_base = kvm->arch.vm_base;
+       if (vm_base) {
+               kvm->arch.vhpt_base = vm_base + KVM_VHPT_OFS;
+               kvm->arch.vtlb_base = vm_base + KVM_VTLB_OFS;
+               kvm->arch.vpd_base  = vm_base + KVM_VPD_OFS;
+       }
+
+       /*
+        *Fill P2M entries for MMIO/IO ranges
+        */
+       kvm_build_io_pmt(kvm);
+
+}
+
+struct  kvm *kvm_arch_create_vm(void)
+{
+       struct kvm *kvm = kvm_alloc_kvm();
+
+       if (IS_ERR(kvm))
+               return ERR_PTR(-ENOMEM);
+       kvm_init_vm(kvm);
+
+       return kvm;
+
+}
+
+static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
+                                       struct kvm_irqchip *chip)
+{
+       int r;
+
+       r = 0;
+       switch (chip->chip_id) {
+       case KVM_IRQCHIP_IOAPIC:
+               memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
+                               sizeof(struct kvm_ioapic_state));
+               break;
+       default:
+               r = -EINVAL;
+               break;
+       }
+       return r;
+}
+
+static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
+{
+       int r;
+
+       r = 0;
+       switch (chip->chip_id) {
+       case KVM_IRQCHIP_IOAPIC:
+               memcpy(ioapic_irqchip(kvm),
+                               &chip->chip.ioapic,
+                               sizeof(struct kvm_ioapic_state));
+               break;
+       default:
+               r = -EINVAL;
+               break;
+       }
+       return r;
+}
+
+#define RESTORE_REGS(_x) vcpu->arch._x = regs->_x
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+       struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+       int r;
+
+       vcpu_load(vcpu);
+
+       for (i = 0; i < 16; i++) {
+               vpd->vgr[i] = regs->vpd.vgr[i];
+               vpd->vbgr[i] = regs->vpd.vbgr[i];
+       }
+       for (i = 0; i < 128; i++)
+               vpd->vcr[i] = regs->vpd.vcr[i];
+       vpd->vhpi = regs->vpd.vhpi;
+       vpd->vnat = regs->vpd.vnat;
+       vpd->vbnat = regs->vpd.vbnat;
+       vpd->vpsr = regs->vpd.vpsr;
+
+       vpd->vpr = regs->vpd.vpr;
+
+       r = -EFAULT;
+       r = copy_from_user(&vcpu->arch.guest, regs->saved_guest,
+                                               sizeof(union context));
+       if (r)
+               goto out;
+       r = copy_from_user(vcpu + 1, regs->saved_stack +
+                       sizeof(struct kvm_vcpu),
+                       IA64_STK_OFFSET - sizeof(struct kvm_vcpu));
+       if (r)
+               goto out;
+       vcpu->arch.exit_data =
+               ((struct kvm_vcpu *)(regs->saved_stack))->arch.exit_data;
+
+       RESTORE_REGS(mp_state);
+       RESTORE_REGS(vmm_rr);
+       memcpy(vcpu->arch.itrs, regs->itrs, sizeof(struct thash_data) * NITRS);
+       memcpy(vcpu->arch.dtrs, regs->dtrs, sizeof(struct thash_data) * NDTRS);
+       RESTORE_REGS(itr_regions);
+       RESTORE_REGS(dtr_regions);
+       RESTORE_REGS(tc_regions);
+       RESTORE_REGS(irq_check);
+       RESTORE_REGS(itc_check);
+       RESTORE_REGS(timer_check);
+       RESTORE_REGS(timer_pending);
+       RESTORE_REGS(last_itc);
+       for (i = 0; i < 8; i++) {
+               vcpu->arch.vrr[i] = regs->vrr[i];
+               vcpu->arch.ibr[i] = regs->ibr[i];
+               vcpu->arch.dbr[i] = regs->dbr[i];
+       }
+       for (i = 0; i < 4; i++)
+               vcpu->arch.insvc[i] = regs->insvc[i];
+       RESTORE_REGS(xtp);
+       RESTORE_REGS(metaphysical_rr0);
+       RESTORE_REGS(metaphysical_rr4);
+       RESTORE_REGS(metaphysical_saved_rr0);
+       RESTORE_REGS(metaphysical_saved_rr4);
+       RESTORE_REGS(fp_psr);
+       RESTORE_REGS(saved_gp);
+
+       vcpu->arch.irq_new_pending = 1;
+       vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
+       set_bit(KVM_REQ_RESUME, &vcpu->requests);
+
+       vcpu_put(vcpu);
+       r = 0;
+out:
+       return r;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+               unsigned int ioctl, unsigned long arg)
+{
+       struct kvm *kvm = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       int r = -EINVAL;
+
+       switch (ioctl) {
+       case KVM_SET_MEMORY_REGION: {
+               struct kvm_memory_region kvm_mem;
+               struct kvm_userspace_memory_region kvm_userspace_mem;
+
+               r = -EFAULT;
+               if (copy_from_user(&kvm_mem, argp, sizeof kvm_mem))
+                       goto out;
+               kvm_userspace_mem.slot = kvm_mem.slot;
+               kvm_userspace_mem.flags = kvm_mem.flags;
+               kvm_userspace_mem.guest_phys_addr =
+                                       kvm_mem.guest_phys_addr;
+               kvm_userspace_mem.memory_size = kvm_mem.memory_size;
+               r = kvm_vm_ioctl_set_memory_region(kvm,
+                                       &kvm_userspace_mem, 0);
+               if (r)
+                       goto out;
+               break;
+               }
+       case KVM_CREATE_IRQCHIP:
+               r = -EFAULT;
+               r = kvm_ioapic_init(kvm);
+               if (r)
+                       goto out;
+               break;
+       case KVM_IRQ_LINE: {
+               struct kvm_irq_level irq_event;
+
+               r = -EFAULT;
+               if (copy_from_user(&irq_event, argp, sizeof irq_event))
+                       goto out;
+               if (irqchip_in_kernel(kvm)) {
+                       mutex_lock(&kvm->lock);
+                       kvm_ioapic_set_irq(kvm->arch.vioapic,
+                                               irq_event.irq,
+                                               irq_event.level);
+                       mutex_unlock(&kvm->lock);
+                       r = 0;
+               }
+               break;
+               }
+       case KVM_GET_IRQCHIP: {
+               /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+               struct kvm_irqchip chip;
+
+               r = -EFAULT;
+               if (copy_from_user(&chip, argp, sizeof chip))
+                               goto out;
+               r = -ENXIO;
+               if (!irqchip_in_kernel(kvm))
+                       goto out;
+               r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
+               if (r)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user(argp, &chip, sizeof chip))
+                               goto out;
+               r = 0;
+               break;
+               }
+       case KVM_SET_IRQCHIP: {
+               /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
+               struct kvm_irqchip chip;
+
+               r = -EFAULT;
+               if (copy_from_user(&chip, argp, sizeof chip))
+                               goto out;
+               r = -ENXIO;
+               if (!irqchip_in_kernel(kvm))
+                       goto out;
+               r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
+               if (r)
+                       goto out;
+               r = 0;
+               break;
+               }
+       default:
+               ;
+       }
+out:
+       return r;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+               struct kvm_sregs *sregs)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+               struct kvm_sregs *sregs)
+{
+       return -EINVAL;
+
+}
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+               struct kvm_translation *tr)
+{
+
+       return -EINVAL;
+}
+
+static int kvm_alloc_vmm_area(void)
+{
+       if (!kvm_vmm_base && (kvm_vm_buffer_size < KVM_VM_BUFFER_SIZE)) {
+               kvm_vmm_base = __get_free_pages(GFP_KERNEL,
+                               get_order(KVM_VMM_SIZE));
+               if (!kvm_vmm_base)
+                       return -ENOMEM;
+
+               memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
+               kvm_vm_buffer = kvm_vmm_base + VMM_SIZE;
+
+               printk(KERN_DEBUG"kvm:VMM's Base Addr:0x%lx, vm_buffer:0x%lx\n",
+                               kvm_vmm_base, kvm_vm_buffer);
+       }
+
+       return 0;
+}
+
+static void kvm_free_vmm_area(void)
+{
+       if (kvm_vmm_base) {
+               /*Zero this area before free to avoid bits leak!!*/
+               memset((void *)kvm_vmm_base, 0, KVM_VMM_SIZE);
+               free_pages(kvm_vmm_base, get_order(KVM_VMM_SIZE));
+               kvm_vmm_base  = 0;
+               kvm_vm_buffer = 0;
+               kvm_vsa_base = 0;
+       }
+}
+
+/*
+ * Make sure that a cpu that is being hot-unplugged does not have any vcpus
+ * cached on it. Leave it as blank for IA64.
+ */
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+static int vti_init_vpd(struct kvm_vcpu *vcpu)
+{
+       int i;
+       union cpuid3_t cpuid3;
+       struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+       if (IS_ERR(vpd))
+               return PTR_ERR(vpd);
+
+       /* CPUID init */
+       for (i = 0; i < 5; i++)
+               vpd->vcpuid[i] = ia64_get_cpuid(i);
+
+       /* Limit the CPUID number to 5 */
+       cpuid3.value = vpd->vcpuid[3];
+       cpuid3.number = 4;      /* 5 - 1 */
+       vpd->vcpuid[3] = cpuid3.value;
+
+       /*Set vac and vdc fields*/
+       vpd->vac.a_from_int_cr = 1;
+       vpd->vac.a_to_int_cr = 1;
+       vpd->vac.a_from_psr = 1;
+       vpd->vac.a_from_cpuid = 1;
+       vpd->vac.a_cover = 1;
+       vpd->vac.a_bsw = 1;
+       vpd->vac.a_int = 1;
+       vpd->vdc.d_vmsw = 1;
+
+       /*Set virtual buffer*/
+       vpd->virt_env_vaddr = KVM_VM_BUFFER_BASE;
+
+       return 0;
+}
+
+static int vti_create_vp(struct kvm_vcpu *vcpu)
+{
+       long ret;
+       struct vpd *vpd = vcpu->arch.vpd;
+       unsigned long  vmm_ivt;
+
+       vmm_ivt = kvm_vmm_info->vmm_ivt;
+
+       printk(KERN_DEBUG "kvm: vcpu:%p,ivt: 0x%lx\n", vcpu, vmm_ivt);
+
+       ret = ia64_pal_vp_create((u64 *)vpd, (u64 *)vmm_ivt, 0);
+
+       if (ret) {
+               printk(KERN_ERR"kvm: ia64_pal_vp_create failed!\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void init_ptce_info(struct kvm_vcpu *vcpu)
+{
+       ia64_ptce_info_t ptce = {0};
+
+       ia64_get_ptce(&ptce);
+       vcpu->arch.ptce_base = ptce.base;
+       vcpu->arch.ptce_count[0] = ptce.count[0];
+       vcpu->arch.ptce_count[1] = ptce.count[1];
+       vcpu->arch.ptce_stride[0] = ptce.stride[0];
+       vcpu->arch.ptce_stride[1] = ptce.stride[1];
+}
+
+static void kvm_migrate_hlt_timer(struct kvm_vcpu *vcpu)
+{
+       struct hrtimer *p_ht = &vcpu->arch.hlt_timer;
+
+       if (hrtimer_cancel(p_ht))
+               hrtimer_start(p_ht, p_ht->expires, HRTIMER_MODE_ABS);
+}
+
+static enum hrtimer_restart hlt_timer_fn(struct hrtimer *data)
+{
+       struct kvm_vcpu *vcpu;
+       wait_queue_head_t *q;
+
+       vcpu  = container_of(data, struct kvm_vcpu, arch.hlt_timer);
+       if (vcpu->arch.mp_state != KVM_MP_STATE_HALTED)
+               goto out;
+
+       q = &vcpu->wq;
+       if (waitqueue_active(q)) {
+               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               wake_up_interruptible(q);
+       }
+out:
+       vcpu->arch.timer_check = 1;
+       return HRTIMER_NORESTART;
+}
+
+#define PALE_RESET_ENTRY    0x80000000ffffffb0UL
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm_vcpu *v;
+       int r;
+       int i;
+       long itc_offset;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       union context *p_ctx = &vcpu->arch.guest;
+       struct kvm_vcpu *vmm_vcpu = to_guest(vcpu->kvm, vcpu);
+
+       /*Init vcpu context for first run.*/
+       if (IS_ERR(vmm_vcpu))
+               return PTR_ERR(vmm_vcpu);
+
+       if (vcpu->vcpu_id == 0) {
+               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+
+               /*Set entry address for first run.*/
+               regs->cr_iip = PALE_RESET_ENTRY;
+
+               /*Initilize itc offset for vcpus*/
+               itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
+               for (i = 0; i < MAX_VCPU_NUM; i++) {
+                       v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
+                       v->arch.itc_offset = itc_offset;
+                       v->arch.last_itc = 0;
+               }
+       } else
+               vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
+
+       r = -ENOMEM;
+       vcpu->arch.apic = kzalloc(sizeof(struct kvm_lapic), GFP_KERNEL);
+       if (!vcpu->arch.apic)
+               goto out;
+       vcpu->arch.apic->vcpu = vcpu;
+
+       p_ctx->gr[1] = 0;
+       p_ctx->gr[12] = (unsigned long)((char *)vmm_vcpu + IA64_STK_OFFSET);
+       p_ctx->gr[13] = (unsigned long)vmm_vcpu;
+       p_ctx->psr = 0x1008522000UL;
+       p_ctx->ar[40] = FPSR_DEFAULT; /*fpsr*/
+       p_ctx->caller_unat = 0;
+       p_ctx->pr = 0x0;
+       p_ctx->ar[36] = 0x0; /*unat*/
+       p_ctx->ar[19] = 0x0; /*rnat*/
+       p_ctx->ar[18] = (unsigned long)vmm_vcpu +
+                               ((sizeof(struct kvm_vcpu)+15) & ~15);
+       p_ctx->ar[64] = 0x0; /*pfs*/
+       p_ctx->cr[0] = 0x7e04UL;
+       p_ctx->cr[2] = (unsigned long)kvm_vmm_info->vmm_ivt;
+       p_ctx->cr[8] = 0x3c;
+
+       /*Initilize region register*/
+       p_ctx->rr[0] = 0x30;
+       p_ctx->rr[1] = 0x30;
+       p_ctx->rr[2] = 0x30;
+       p_ctx->rr[3] = 0x30;
+       p_ctx->rr[4] = 0x30;
+       p_ctx->rr[5] = 0x30;
+       p_ctx->rr[7] = 0x30;
+
+       /*Initilize branch register 0*/
+       p_ctx->br[0] = *(unsigned long *)kvm_vmm_info->vmm_entry;
+
+       vcpu->arch.vmm_rr = kvm->arch.vmm_init_rr;
+       vcpu->arch.metaphysical_rr0 = kvm->arch.metaphysical_rr0;
+       vcpu->arch.metaphysical_rr4 = kvm->arch.metaphysical_rr4;
+
+       hrtimer_init(&vcpu->arch.hlt_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       vcpu->arch.hlt_timer.function = hlt_timer_fn;
+
+       vcpu->arch.last_run_cpu = -1;
+       vcpu->arch.vpd = (struct vpd *)VPD_ADDR(vcpu->vcpu_id);
+       vcpu->arch.vsa_base = kvm_vsa_base;
+       vcpu->arch.__gp = kvm_vmm_gp;
+       vcpu->arch.dirty_log_lock_pa = __pa(&kvm->arch.dirty_log_lock);
+       vcpu->arch.vhpt.hash = (struct thash_data *)VHPT_ADDR(vcpu->vcpu_id);
+       vcpu->arch.vtlb.hash = (struct thash_data *)VTLB_ADDR(vcpu->vcpu_id);
+       init_ptce_info(vcpu);
+
+       r = 0;
+out:
+       return r;
+}
+
+static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
+{
+       unsigned long psr;
+       int r;
+
+       local_irq_save(psr);
+       r = kvm_insert_vmm_mapping(vcpu);
+       if (r)
+               goto fail;
+       r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
+       if (r)
+               goto fail;
+
+       r = vti_init_vpd(vcpu);
+       if (r) {
+               printk(KERN_DEBUG"kvm: vpd init error!!\n");
+               goto uninit;
+       }
+
+       r = vti_create_vp(vcpu);
+       if (r)
+               goto uninit;
+
+       kvm_purge_vmm_mapping(vcpu);
+       local_irq_restore(psr);
+
+       return 0;
+uninit:
+       kvm_vcpu_uninit(vcpu);
+fail:
+       return r;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+               unsigned int id)
+{
+       struct kvm_vcpu *vcpu;
+       unsigned long vm_base = kvm->arch.vm_base;
+       int r;
+       int cpu;
+
+       r = -ENOMEM;
+       if (!vm_base) {
+               printk(KERN_ERR"kvm: Create vcpu[%d] error!\n", id);
+               goto fail;
+       }
+       vcpu = (struct kvm_vcpu *)(vm_base + KVM_VCPU_OFS + VCPU_SIZE * id);
+       vcpu->kvm = kvm;
+
+       cpu = get_cpu();
+       vti_vcpu_load(vcpu, cpu);
+       r = vti_vcpu_setup(vcpu, id);
+       put_cpu();
+
+       if (r) {
+               printk(KERN_DEBUG"kvm: vcpu_setup error!!\n");
+               goto fail;
+       }
+
+       return vcpu;
+fail:
+       return ERR_PTR(r);
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+               struct kvm_debug_guest *dbg)
+{
+       return -EINVAL;
+}
+
+static void free_kvm(struct kvm *kvm)
+{
+       unsigned long vm_base = kvm->arch.vm_base;
+
+       if (vm_base) {
+               memset((void *)vm_base, 0, KVM_VM_DATA_SIZE);
+               free_pages(vm_base, get_order(KVM_VM_DATA_SIZE));
+       }
+
+}
+
+static void kvm_release_vm_pages(struct kvm *kvm)
+{
+       struct kvm_memory_slot *memslot;
+       int i, j;
+       unsigned long base_gfn;
+
+       for (i = 0; i < kvm->nmemslots; i++) {
+               memslot = &kvm->memslots[i];
+               base_gfn = memslot->base_gfn;
+
+               for (j = 0; j < memslot->npages; j++) {
+                       if (memslot->rmap[j])
+                               put_page((struct page *)memslot->rmap[j]);
+               }
+       }
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kfree(kvm->arch.vioapic);
+       kvm_release_vm_pages(kvm);
+       kvm_free_physmem(kvm);
+       free_kvm(kvm);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       if (cpu != vcpu->cpu) {
+               vcpu->cpu = cpu;
+               if (vcpu->arch.ht_active)
+                       kvm_migrate_hlt_timer(vcpu);
+       }
+}
+
+#define SAVE_REGS(_x)  regs->_x = vcpu->arch._x
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+       int r;
+       struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+       vcpu_load(vcpu);
+
+       for (i = 0; i < 16; i++) {
+               regs->vpd.vgr[i] = vpd->vgr[i];
+               regs->vpd.vbgr[i] = vpd->vbgr[i];
+       }
+       for (i = 0; i < 128; i++)
+               regs->vpd.vcr[i] = vpd->vcr[i];
+       regs->vpd.vhpi = vpd->vhpi;
+       regs->vpd.vnat = vpd->vnat;
+       regs->vpd.vbnat = vpd->vbnat;
+       regs->vpd.vpsr = vpd->vpsr;
+       regs->vpd.vpr = vpd->vpr;
+
+       r = -EFAULT;
+       r = copy_to_user(regs->saved_guest, &vcpu->arch.guest,
+                                       sizeof(union context));
+       if (r)
+               goto out;
+       r = copy_to_user(regs->saved_stack, (void *)vcpu, IA64_STK_OFFSET);
+       if (r)
+               goto out;
+       SAVE_REGS(mp_state);
+       SAVE_REGS(vmm_rr);
+       memcpy(regs->itrs, vcpu->arch.itrs, sizeof(struct thash_data) * NITRS);
+       memcpy(regs->dtrs, vcpu->arch.dtrs, sizeof(struct thash_data) * NDTRS);
+       SAVE_REGS(itr_regions);
+       SAVE_REGS(dtr_regions);
+       SAVE_REGS(tc_regions);
+       SAVE_REGS(irq_check);
+       SAVE_REGS(itc_check);
+       SAVE_REGS(timer_check);
+       SAVE_REGS(timer_pending);
+       SAVE_REGS(last_itc);
+       for (i = 0; i < 8; i++) {
+               regs->vrr[i] = vcpu->arch.vrr[i];
+               regs->ibr[i] = vcpu->arch.ibr[i];
+               regs->dbr[i] = vcpu->arch.dbr[i];
+       }
+       for (i = 0; i < 4; i++)
+               regs->insvc[i] = vcpu->arch.insvc[i];
+       regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
+       SAVE_REGS(xtp);
+       SAVE_REGS(metaphysical_rr0);
+       SAVE_REGS(metaphysical_rr4);
+       SAVE_REGS(metaphysical_saved_rr0);
+       SAVE_REGS(metaphysical_saved_rr4);
+       SAVE_REGS(fp_psr);
+       SAVE_REGS(saved_gp);
+       vcpu_put(vcpu);
+       r = 0;
+out:
+       return r;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+
+       hrtimer_cancel(&vcpu->arch.hlt_timer);
+       kfree(vcpu->arch.apic);
+}
+
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+               unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+               struct kvm_userspace_memory_region *mem,
+               struct kvm_memory_slot old,
+               int user_alloc)
+{
+       unsigned long i;
+       struct page *page;
+       int npages = mem->memory_size >> PAGE_SHIFT;
+       struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
+       unsigned long base_gfn = memslot->base_gfn;
+
+       for (i = 0; i < npages; i++) {
+               page = gfn_to_page(kvm, base_gfn + i);
+               kvm_set_pmt_entry(kvm, base_gfn + i,
+                               page_to_pfn(page) << PAGE_SHIFT,
+                               _PAGE_AR_RWX|_PAGE_MA_WB);
+               memslot->rmap[i] = (unsigned long)page;
+       }
+
+       return 0;
+}
+
+
+long kvm_arch_dev_ioctl(struct file *filp,
+               unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_vcpu_uninit(vcpu);
+}
+
+static int vti_cpu_has_kvm_support(void)
+{
+       long  avail = 1, status = 1, control = 1;
+       long ret;
+
+       ret = ia64_pal_proc_get_features(&avail, &status, &control, 0);
+       if (ret)
+               goto out;
+
+       if (!(avail & PAL_PROC_VM_BIT))
+               goto out;
+
+       printk(KERN_DEBUG"kvm: Hardware Supports VT\n");
+
+       ret = ia64_pal_vp_env_info(&kvm_vm_buffer_size, &vp_env_info);
+       if (ret)
+               goto out;
+       printk(KERN_DEBUG"kvm: VM Buffer Size:0x%lx\n", kvm_vm_buffer_size);
+
+       if (!(vp_env_info & VP_OPCODE)) {
+               printk(KERN_WARNING"kvm: No opcode ability on hardware, "
+                               "vm_env_info:0x%lx\n", vp_env_info);
+       }
+
+       return 1;
+out:
+       return 0;
+}
+
+static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
+                                               struct module *module)
+{
+       unsigned long module_base;
+       unsigned long vmm_size;
+
+       unsigned long vmm_offset, func_offset, fdesc_offset;
+       struct fdesc *p_fdesc;
+
+       BUG_ON(!module);
+
+       if (!kvm_vmm_base) {
+               printk("kvm: kvm area hasn't been initilized yet!!\n");
+               return -EFAULT;
+       }
+
+       /*Calculate new position of relocated vmm module.*/
+       module_base = (unsigned long)module->module_core;
+       vmm_size = module->core_size;
+       if (unlikely(vmm_size > KVM_VMM_SIZE))
+               return -EFAULT;
+
+       memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
+       kvm_flush_icache(kvm_vmm_base, vmm_size);
+
+       /*Recalculate kvm_vmm_info based on new VMM*/
+       vmm_offset = vmm_info->vmm_ivt - module_base;
+       kvm_vmm_info->vmm_ivt = KVM_VMM_BASE + vmm_offset;
+       printk(KERN_DEBUG"kvm: Relocated VMM's IVT Base Addr:%lx\n",
+                       kvm_vmm_info->vmm_ivt);
+
+       fdesc_offset = (unsigned long)vmm_info->vmm_entry - module_base;
+       kvm_vmm_info->vmm_entry = (kvm_vmm_entry *)(KVM_VMM_BASE +
+                                                       fdesc_offset);
+       func_offset = *(unsigned long *)vmm_info->vmm_entry - module_base;
+       p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
+       p_fdesc->ip = KVM_VMM_BASE + func_offset;
+       p_fdesc->gp = KVM_VMM_BASE+(p_fdesc->gp - module_base);
+
+       printk(KERN_DEBUG"kvm: Relocated VMM's Init Entry Addr:%lx\n",
+                       KVM_VMM_BASE+func_offset);
+
+       fdesc_offset = (unsigned long)vmm_info->tramp_entry - module_base;
+       kvm_vmm_info->tramp_entry = (kvm_tramp_entry *)(KVM_VMM_BASE +
+                       fdesc_offset);
+       func_offset = *(unsigned long *)vmm_info->tramp_entry - module_base;
+       p_fdesc = (struct fdesc *)(kvm_vmm_base + fdesc_offset);
+       p_fdesc->ip = KVM_VMM_BASE + func_offset;
+       p_fdesc->gp = KVM_VMM_BASE + (p_fdesc->gp - module_base);
+
+       kvm_vmm_gp = p_fdesc->gp;
+
+       printk(KERN_DEBUG"kvm: Relocated VMM's Entry IP:%p\n",
+                                               kvm_vmm_info->vmm_entry);
+       printk(KERN_DEBUG"kvm: Relocated VMM's Trampoline Entry IP:0x%lx\n",
+                                               KVM_VMM_BASE + func_offset);
+
+       return 0;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       int r;
+       struct kvm_vmm_info *vmm_info = (struct kvm_vmm_info *)opaque;
+
+       if (!vti_cpu_has_kvm_support()) {
+               printk(KERN_ERR "kvm: No Hardware Virtualization Support!\n");
+               r = -EOPNOTSUPP;
+               goto out;
+       }
+
+       if (kvm_vmm_info) {
+               printk(KERN_ERR "kvm: Already loaded VMM module!\n");
+               r = -EEXIST;
+               goto out;
+       }
+
+       r = -ENOMEM;
+       kvm_vmm_info = kzalloc(sizeof(struct kvm_vmm_info), GFP_KERNEL);
+       if (!kvm_vmm_info)
+               goto out;
+
+       if (kvm_alloc_vmm_area())
+               goto out_free0;
+
+       r = kvm_relocate_vmm(vmm_info, vmm_info->module);
+       if (r)
+               goto out_free1;
+
+       return 0;
+
+out_free1:
+       kvm_free_vmm_area();
+out_free0:
+       kfree(kvm_vmm_info);
+out:
+       return r;
+}
+
+void kvm_arch_exit(void)
+{
+       kvm_free_vmm_area();
+       kfree(kvm_vmm_info);
+       kvm_vmm_info = NULL;
+}
+
+static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
+               struct kvm_dirty_log *log)
+{
+       struct kvm_memory_slot *memslot;
+       int r, i;
+       long n, base;
+       unsigned long *dirty_bitmap = (unsigned long *)((void *)kvm - KVM_VM_OFS
+                                       + KVM_MEM_DIRTY_LOG_OFS);
+
+       r = -EINVAL;
+       if (log->slot >= KVM_MEMORY_SLOTS)
+               goto out;
+
+       memslot = &kvm->memslots[log->slot];
+       r = -ENOENT;
+       if (!memslot->dirty_bitmap)
+               goto out;
+
+       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+       base = memslot->base_gfn / BITS_PER_LONG;
+
+       for (i = 0; i < n/sizeof(long); ++i) {
+               memslot->dirty_bitmap[i] = dirty_bitmap[base + i];
+               dirty_bitmap[base + i] = 0;
+       }
+       r = 0;
+out:
+       return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+               struct kvm_dirty_log *log)
+{
+       int r;
+       int n;
+       struct kvm_memory_slot *memslot;
+       int is_dirty = 0;
+
+       spin_lock(&kvm->arch.dirty_log_lock);
+
+       r = kvm_ia64_sync_dirty_log(kvm, log);
+       if (r)
+               goto out;
+
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               kvm_flush_remote_tlbs(kvm);
+               memslot = &kvm->memslots[log->slot];
+               n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+       r = 0;
+out:
+       spin_unlock(&kvm->arch.dirty_log_lock);
+       return r;
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+static void vcpu_kick_intr(void *info)
+{
+#ifdef DEBUG
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
+       printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
+#endif
+}
+
+void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
+{
+       int ipi_pcpu = vcpu->cpu;
+
+       if (waitqueue_active(&vcpu->wq))
+               wake_up_interruptible(&vcpu->wq);
+
+       if (vcpu->guest_mode)
+               smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+}
+
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
+{
+
+       struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+       if (!test_and_set_bit(vec, &vpd->irr[0])) {
+               vcpu->arch.irq_new_pending = 1;
+                if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
+                       kvm_vcpu_kick(vcpu);
+               else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
+                       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+                       if (waitqueue_active(&vcpu->wq))
+                               wake_up_interruptible(&vcpu->wq);
+               }
+               return 1;
+       }
+       return 0;
+}
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
+{
+       return apic->vcpu->vcpu_id == dest;
+}
+
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
+{
+       return 0;
+}
+
+struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
+                                      unsigned long bitmap)
+{
+       struct kvm_vcpu *lvcpu = kvm->vcpus[0];
+       int i;
+
+       for (i = 1; i < KVM_MAX_VCPUS; i++) {
+               if (!kvm->vcpus[i])
+                       continue;
+               if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
+                       lvcpu = kvm->vcpus[i];
+       }
+
+       return lvcpu;
+}
+
+static int find_highest_bits(int *dat)
+{
+       u32  bits, bitnum;
+       int i;
+
+       /* loop for all 256 bits */
+       for (i = 7; i >= 0 ; i--) {
+               bits = dat[i];
+               if (bits) {
+                       bitnum = fls(bits);
+                       return i * 32 + bitnum - 1;
+               }
+       }
+
+       return -1;
+}
+
+int kvm_highest_pending_irq(struct kvm_vcpu *vcpu)
+{
+    struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
+
+    if (vpd->irr[0] & (1UL << NMI_VECTOR))
+               return NMI_VECTOR;
+    if (vpd->irr[0] & (1UL << ExtINT_VECTOR))
+               return ExtINT_VECTOR;
+
+    return find_highest_bits((int *)&vpd->irr[0]);
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+       if (kvm_highest_pending_irq(vcpu) != -1)
+               return 1;
+       return 0;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return gfn;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
new file mode 100644 (file)
index 0000000..091f936
--- /dev/null
@@ -0,0 +1,500 @@
+/*
+ * PAL/SAL call delegation
+ *
+ * Copyright (c) 2004 Li Susie <susie.li@intel.com>
+ * Copyright (c) 2005 Yu Ke <ke.yu@intel.com>
+ * Copyright (c) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/smp.h>
+
+#include "vti.h"
+#include "misc.h"
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/tlb.h>
+
+/*
+ * Handy macros to make sure that the PAL return values start out
+ * as something meaningful.
+ */
+#define INIT_PAL_STATUS_UNIMPLEMENTED(x)               \
+       {                                               \
+               x.status = PAL_STATUS_UNIMPLEMENTED;    \
+               x.v0 = 0;                               \
+               x.v1 = 0;                               \
+               x.v2 = 0;                               \
+       }
+
+#define INIT_PAL_STATUS_SUCCESS(x)                     \
+       {                                               \
+               x.status = PAL_STATUS_SUCCESS;          \
+               x.v0 = 0;                               \
+               x.v1 = 0;                               \
+               x.v2 = 0;                               \
+    }
+
+static void kvm_get_pal_call_data(struct kvm_vcpu *vcpu,
+               u64 *gr28, u64 *gr29, u64 *gr30, u64 *gr31) {
+       struct exit_ctl_data *p;
+
+       if (vcpu) {
+               p = &vcpu->arch.exit_data;
+               if (p->exit_reason == EXIT_REASON_PAL_CALL) {
+                       *gr28 = p->u.pal_data.gr28;
+                       *gr29 = p->u.pal_data.gr29;
+                       *gr30 = p->u.pal_data.gr30;
+                       *gr31 = p->u.pal_data.gr31;
+                       return ;
+               }
+       }
+       printk(KERN_DEBUG"Failed to get vcpu pal data!!!\n");
+}
+
+static void set_pal_result(struct kvm_vcpu *vcpu,
+               struct ia64_pal_retval result) {
+
+       struct exit_ctl_data *p;
+
+       p = kvm_get_exit_data(vcpu);
+       if (p && p->exit_reason == EXIT_REASON_PAL_CALL) {
+               p->u.pal_data.ret = result;
+               return ;
+       }
+       INIT_PAL_STATUS_UNIMPLEMENTED(p->u.pal_data.ret);
+}
+
+static void set_sal_result(struct kvm_vcpu *vcpu,
+               struct sal_ret_values result) {
+       struct exit_ctl_data *p;
+
+       p = kvm_get_exit_data(vcpu);
+       if (p && p->exit_reason == EXIT_REASON_SAL_CALL) {
+               p->u.sal_data.ret = result;
+               return ;
+       }
+       printk(KERN_WARNING"Failed to set sal result!!\n");
+}
+
+struct cache_flush_args {
+       u64 cache_type;
+       u64 operation;
+       u64 progress;
+       long status;
+};
+
+cpumask_t cpu_cache_coherent_map;
+
+static void remote_pal_cache_flush(void *data)
+{
+       struct cache_flush_args *args = data;
+       long status;
+       u64 progress = args->progress;
+
+       status = ia64_pal_cache_flush(args->cache_type, args->operation,
+                                       &progress, NULL);
+       if (status != 0)
+       args->status = status;
+}
+
+static struct ia64_pal_retval pal_cache_flush(struct kvm_vcpu *vcpu)
+{
+       u64 gr28, gr29, gr30, gr31;
+       struct ia64_pal_retval result = {0, 0, 0, 0};
+       struct cache_flush_args args = {0, 0, 0, 0};
+       long psr;
+
+       gr28 = gr29 = gr30 = gr31 = 0;
+       kvm_get_pal_call_data(vcpu, &gr28, &gr29, &gr30, &gr31);
+
+       if (gr31 != 0)
+               printk(KERN_ERR"vcpu:%p called cache_flush error!\n", vcpu);
+
+       /* Always call Host Pal in int=1 */
+       gr30 &= ~PAL_CACHE_FLUSH_CHK_INTRS;
+       args.cache_type = gr29;
+       args.operation = gr30;
+       smp_call_function(remote_pal_cache_flush,
+                               (void *)&args, 1, 1);
+       if (args.status != 0)
+               printk(KERN_ERR"pal_cache_flush error!,"
+                               "status:0x%lx\n", args.status);
+       /*
+        * Call Host PAL cache flush
+        * Clear psr.ic when call PAL_CACHE_FLUSH
+        */
+       local_irq_save(psr);
+       result.status = ia64_pal_cache_flush(gr29, gr30, &result.v1,
+                                               &result.v0);
+       local_irq_restore(psr);
+       if (result.status != 0)
+               printk(KERN_ERR"vcpu:%p crashed due to cache_flush err:%ld"
+                               "in1:%lx,in2:%lx\n",
+                               vcpu, result.status, gr29, gr30);
+
+#if 0
+       if (gr29 == PAL_CACHE_TYPE_COHERENT) {
+               cpus_setall(vcpu->arch.cache_coherent_map);
+               cpu_clear(vcpu->cpu, vcpu->arch.cache_coherent_map);
+               cpus_setall(cpu_cache_coherent_map);
+               cpu_clear(vcpu->cpu, cpu_cache_coherent_map);
+       }
+#endif
+       return result;
+}
+
+struct ia64_pal_retval pal_cache_summary(struct kvm_vcpu *vcpu)
+{
+
+       struct ia64_pal_retval result;
+
+       PAL_CALL(result, PAL_CACHE_SUMMARY, 0, 0, 0);
+       return result;
+}
+
+static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
+{
+
+       struct ia64_pal_retval result;
+
+       PAL_CALL(result, PAL_FREQ_BASE, 0, 0, 0);
+
+       /*
+        * PAL_FREQ_BASE may not be implemented in some platforms,
+        * call SAL instead.
+        */
+       if (result.v0 == 0) {
+               result.status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
+                                                       &result.v0,
+                                                       &result.v1);
+               result.v2 = 0;
+       }
+
+       return result;
+}
+
+static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
+{
+
+       struct ia64_pal_retval result;
+
+       PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
+       return result;
+}
+
+static struct ia64_pal_retval pal_logical_to_physica(struct kvm_vcpu *vcpu)
+{
+       struct ia64_pal_retval result;
+
+       INIT_PAL_STATUS_UNIMPLEMENTED(result);
+       return result;
+}
+
+static struct ia64_pal_retval pal_platform_addr(struct kvm_vcpu *vcpu)
+{
+
+       struct ia64_pal_retval result;
+
+       INIT_PAL_STATUS_SUCCESS(result);
+       return result;
+}
+
+static struct ia64_pal_retval pal_proc_get_features(struct kvm_vcpu *vcpu)
+{
+
+       struct ia64_pal_retval result = {0, 0, 0, 0};
+       long in0, in1, in2, in3;
+
+       kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+       result.status = ia64_pal_proc_get_features(&result.v0, &result.v1,
+                       &result.v2, in2);
+
+       return result;
+}
+
+static struct ia64_pal_retval pal_cache_info(struct kvm_vcpu *vcpu)
+{
+
+       pal_cache_config_info_t ci;
+       long status;
+       unsigned long in0, in1, in2, in3, r9, r10;
+
+       kvm_get_pal_call_data(vcpu, &in0, &in1, &in2, &in3);
+       status = ia64_pal_cache_config_info(in1, in2, &ci);
+       r9 = ci.pcci_info_1.pcci1_data;
+       r10 = ci.pcci_info_2.pcci2_data;
+       return ((struct ia64_pal_retval){status, r9, r10, 0});
+}
+
+#define GUEST_IMPL_VA_MSB      59
+#define GUEST_RID_BITS         18
+
+static struct ia64_pal_retval pal_vm_summary(struct kvm_vcpu *vcpu)
+{
+
+       pal_vm_info_1_u_t vminfo1;
+       pal_vm_info_2_u_t vminfo2;
+       struct ia64_pal_retval result;
+
+       PAL_CALL(result, PAL_VM_SUMMARY, 0, 0, 0);
+       if (!result.status) {
+               vminfo1.pvi1_val = result.v0;
+               vminfo1.pal_vm_info_1_s.max_itr_entry = 8;
+               vminfo1.pal_vm_info_1_s.max_dtr_entry = 8;
+               result.v0 = vminfo1.pvi1_val;
+               vminfo2.pal_vm_info_2_s.impl_va_msb = GUEST_IMPL_VA_MSB;
+               vminfo2.pal_vm_info_2_s.rid_size = GUEST_RID_BITS;
+               result.v1 = vminfo2.pvi2_val;
+       }
+
+       return result;
+}
+
+static struct ia64_pal_retval pal_vm_info(struct kvm_vcpu *vcpu)
+{
+       struct ia64_pal_retval result;
+
+       INIT_PAL_STATUS_UNIMPLEMENTED(result);
+
+       return result;
+}
+
+static  u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
+{
+       u64 index = 0;
+       struct exit_ctl_data *p;
+
+       p = kvm_get_exit_data(vcpu);
+       if (p && (p->exit_reason == EXIT_REASON_PAL_CALL))
+               index = p->u.pal_data.gr28;
+
+       return index;
+}
+
+int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+
+       u64 gr28;
+       struct ia64_pal_retval result;
+       int ret = 1;
+
+       gr28 = kvm_get_pal_call_index(vcpu);
+       /*printk("pal_call index:%lx\n",gr28);*/
+       switch (gr28) {
+       case PAL_CACHE_FLUSH:
+               result = pal_cache_flush(vcpu);
+               break;
+       case PAL_CACHE_SUMMARY:
+               result = pal_cache_summary(vcpu);
+               break;
+       case PAL_HALT_LIGHT:
+       {
+               vcpu->arch.timer_pending = 1;
+               INIT_PAL_STATUS_SUCCESS(result);
+               if (kvm_highest_pending_irq(vcpu) == -1)
+                       ret = kvm_emulate_halt(vcpu);
+
+       }
+               break;
+
+       case PAL_FREQ_RATIOS:
+               result = pal_freq_ratios(vcpu);
+               break;
+
+       case PAL_FREQ_BASE:
+               result = pal_freq_base(vcpu);
+               break;
+
+       case PAL_LOGICAL_TO_PHYSICAL :
+               result = pal_logical_to_physica(vcpu);
+               break;
+
+       case PAL_VM_SUMMARY :
+               result = pal_vm_summary(vcpu);
+               break;
+
+       case PAL_VM_INFO :
+               result = pal_vm_info(vcpu);
+               break;
+       case PAL_PLATFORM_ADDR :
+               result = pal_platform_addr(vcpu);
+               break;
+       case PAL_CACHE_INFO:
+               result = pal_cache_info(vcpu);
+               break;
+       case PAL_PTCE_INFO:
+               INIT_PAL_STATUS_SUCCESS(result);
+               result.v1 = (1L << 32) | 1L;
+               break;
+       case PAL_VM_PAGE_SIZE:
+               result.status = ia64_pal_vm_page_size(&result.v0,
+                                                       &result.v1);
+               break;
+       case PAL_RSE_INFO:
+               result.status = ia64_pal_rse_info(&result.v0,
+                                       (pal_hints_u_t *)&result.v1);
+               break;
+       case PAL_PROC_GET_FEATURES:
+               result = pal_proc_get_features(vcpu);
+               break;
+       case PAL_DEBUG_INFO:
+               result.status = ia64_pal_debug_info(&result.v0,
+                                                       &result.v1);
+               break;
+       case PAL_VERSION:
+               result.status = ia64_pal_version(
+                               (pal_version_u_t *)&result.v0,
+                               (pal_version_u_t *)&result.v1);
+
+               break;
+       case PAL_FIXED_ADDR:
+               result.status = PAL_STATUS_SUCCESS;
+               result.v0 = vcpu->vcpu_id;
+               break;
+       default:
+               INIT_PAL_STATUS_UNIMPLEMENTED(result);
+               printk(KERN_WARNING"kvm: Unsupported pal call,"
+                                       " index:0x%lx\n", gr28);
+       }
+       set_pal_result(vcpu, result);
+       return ret;
+}
+
+static struct sal_ret_values sal_emulator(struct kvm *kvm,
+                               long index, unsigned long in1,
+                               unsigned long in2, unsigned long in3,
+                               unsigned long in4, unsigned long in5,
+                               unsigned long in6, unsigned long in7)
+{
+       unsigned long r9  = 0;
+       unsigned long r10 = 0;
+       long r11 = 0;
+       long status;
+
+       status = 0;
+       switch (index) {
+       case SAL_FREQ_BASE:
+               status = ia64_sal_freq_base(in1, &r9, &r10);
+               break;
+       case SAL_PCI_CONFIG_READ:
+               printk(KERN_WARNING"kvm: Not allowed to call here!"
+                       " SAL_PCI_CONFIG_READ\n");
+               break;
+       case SAL_PCI_CONFIG_WRITE:
+               printk(KERN_WARNING"kvm: Not allowed to call here!"
+                       " SAL_PCI_CONFIG_WRITE\n");
+               break;
+       case SAL_SET_VECTORS:
+               if (in1 == SAL_VECTOR_OS_BOOT_RENDEZ) {
+                       if (in4 != 0 || in5 != 0 || in6 != 0 || in7 != 0) {
+                               status = -2;
+                       } else {
+                               kvm->arch.rdv_sal_data.boot_ip = in2;
+                               kvm->arch.rdv_sal_data.boot_gp = in3;
+                       }
+                       printk("Rendvous called! iip:%lx\n\n", in2);
+               } else
+                       printk(KERN_WARNING"kvm: CALLED SAL_SET_VECTORS %lu."
+                                                       "ignored...\n", in1);
+               break;
+       case SAL_GET_STATE_INFO:
+               /* No more info.  */
+               status = -5;
+               r9 = 0;
+               break;
+       case SAL_GET_STATE_INFO_SIZE:
+               /* Return a dummy size.  */
+               status = 0;
+               r9 = 128;
+               break;
+       case SAL_CLEAR_STATE_INFO:
+               /* Noop.  */
+               break;
+       case SAL_MC_RENDEZ:
+               printk(KERN_WARNING
+                       "kvm: called SAL_MC_RENDEZ. ignored...\n");
+               break;
+       case SAL_MC_SET_PARAMS:
+               printk(KERN_WARNING
+                       "kvm: called  SAL_MC_SET_PARAMS.ignored!\n");
+               break;
+       case SAL_CACHE_FLUSH:
+               if (1) {
+                       /*Flush using SAL.
+                       This method is faster but has a side
+                       effect on other vcpu running on
+                       this cpu.  */
+                       status = ia64_sal_cache_flush(in1);
+               } else {
+                       /*Maybe need to implement the method
+                       without side effect!*/
+                       status = 0;
+               }
+               break;
+       case SAL_CACHE_INIT:
+               printk(KERN_WARNING
+                       "kvm: called SAL_CACHE_INIT.  ignored...\n");
+               break;
+       case SAL_UPDATE_PAL:
+               printk(KERN_WARNING
+                       "kvm: CALLED SAL_UPDATE_PAL.  ignored...\n");
+               break;
+       default:
+               printk(KERN_WARNING"kvm: called SAL_CALL with unknown index."
+                                               " index:%ld\n", index);
+               status = -1;
+               break;
+       }
+       return ((struct sal_ret_values) {status, r9, r10, r11});
+}
+
+static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
+               u64 *in2, u64 *in3, u64 *in4, u64 *in5, u64 *in6, u64 *in7){
+
+       struct exit_ctl_data *p;
+
+       p = kvm_get_exit_data(vcpu);
+
+       if (p) {
+               if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+                       *in0 = p->u.sal_data.in0;
+                       *in1 = p->u.sal_data.in1;
+                       *in2 = p->u.sal_data.in2;
+                       *in3 = p->u.sal_data.in3;
+                       *in4 = p->u.sal_data.in4;
+                       *in5 = p->u.sal_data.in5;
+                       *in6 = p->u.sal_data.in6;
+                       *in7 = p->u.sal_data.in7;
+                       return ;
+               }
+       }
+       *in0 = 0;
+}
+
+void kvm_sal_emul(struct kvm_vcpu *vcpu)
+{
+
+       struct sal_ret_values result;
+       u64 index, in1, in2, in3, in4, in5, in6, in7;
+
+       kvm_get_sal_call_data(vcpu, &index, &in1, &in2,
+                       &in3, &in4, &in5, &in6, &in7);
+       result = sal_emulator(vcpu->kvm, index, in1, in2, in3,
+                                       in4, in5, in6, in7);
+       set_sal_result(vcpu, result);
+}
diff --git a/arch/ia64/kvm/kvm_minstate.h b/arch/ia64/kvm/kvm_minstate.h
new file mode 100644 (file)
index 0000000..13980d9
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ *  kvm_minstate.h: min save macros
+ *  Copyright (c) 2007, Intel Corporation.
+ *
+ *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ *  Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+
+#include <asm/asmmacro.h>
+#include <asm/types.h>
+#include <asm/kregs.h>
+#include "asm-offsets.h"
+
+#define KVM_MINSTATE_START_SAVE_MIN                                            \
+       mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
+       ;;                                                                      \
+       mov.m r28 = ar.rnat;                                                    \
+       addl r22 = VMM_RBS_OFFSET,r1;            /* compute base of RBS */      \
+       ;;                                                                      \
+       lfetch.fault.excl.nt1 [r22];                                            \
+       addl r1 = IA64_STK_OFFSET-VMM_PT_REGS_SIZE,r1;  /* compute base of memory stack */  \
+       mov r23 = ar.bspstore;                  /* save ar.bspstore */          \
+       ;;                                                                      \
+       mov ar.bspstore = r22;                          /* switch to kernel RBS */\
+       ;;                                                                      \
+       mov r18 = ar.bsp;                                                       \
+       mov ar.rsc = 0x3;     /* set eager mode, pl 0, little-endian, loadrs=0 */
+
+
+
+#define KVM_MINSTATE_END_SAVE_MIN                                              \
+       bsw.1;          /* switch back to bank 1 (must be last in insn group) */\
+       ;;
+
+
+#define PAL_VSA_SYNC_READ                                              \
+       /* begin to call pal vps sync_read */                           \
+       add r25 = VMM_VPD_BASE_OFFSET, r21;                             \
+       adds r20 = VMM_VCPU_VSA_BASE_OFFSET, r21;  /* entry point */    \
+       ;;                                                              \
+       ld8 r25 = [r25];      /* read vpd base */                       \
+       ld8 r20 = [r20];                                                \
+       ;;                                                              \
+       add r20 = PAL_VPS_SYNC_READ,r20;                                \
+       ;;                                                              \
+{ .mii;                                                                        \
+       nop 0x0;                                                        \
+       mov r24 = ip;                                                   \
+       mov b0 = r20;                                                   \
+       ;;                                                              \
+};                                                                     \
+{ .mmb;                                                                        \
+       add r24 = 0x20, r24;                                            \
+       nop 0x0;                                                        \
+       br.cond.sptk b0;        /*  call the service */                 \
+       ;;                                                              \
+};
+
+
+
+#define KVM_MINSTATE_GET_CURRENT(reg)   mov reg=r21
+
+/*
+ * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
+ * the minimum state necessary that allows us to turn psr.ic back
+ * on.
+ *
+ * Assumed state upon entry:
+ *  psr.ic: off
+ *  r31:       contains saved predicates (pr)
+ *
+ * Upon exit, the state is as follows:
+ *  psr.ic: off
+ *   r2 = points to &pt_regs.r16
+ *   r8 = contents of ar.ccv
+ *   r9 = contents of ar.csd
+ *  r10 = contents of ar.ssd
+ *  r11 = FPSR_DEFAULT
+ *  r12 = kernel sp (kernel virtual address)
+ *  r13 = points to current task_struct (kernel virtual address)
+ *  p15 = TRUE if psr.i is set in cr.ipsr
+ *  predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
+ *       preserved
+ *
+ * Note that psr.ic is NOT turned on by this macro.  This is so that
+ * we can pass interruption state as arguments to a handler.
+ */
+
+
+#define PT(f) (VMM_PT_REGS_##f##_OFFSET)
+
+#define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA)                  \
+       KVM_MINSTATE_GET_CURRENT(r16);  /* M (or M;;I) */       \
+       mov r27 = ar.rsc;         /* M */                       \
+       mov r20 = r1;         /* A */                           \
+       mov r25 = ar.unat;        /* M */                       \
+       mov r29 = cr.ipsr;        /* M */                       \
+       mov r26 = ar.pfs;         /* I */                       \
+       mov r18 = cr.isr;                                       \
+       COVER;              /* B;; (or nothing) */              \
+       ;;                                                      \
+       tbit.z p0,p15 = r29,IA64_PSR_I_BIT;                     \
+       mov r1 = r16;                                           \
+/*     mov r21=r16;    */                                      \
+       /* switch from user to kernel RBS: */                   \
+       ;;                                                      \
+       invala;             /* M */                             \
+       SAVE_IFS;                                               \
+       ;;                                                      \
+       KVM_MINSTATE_START_SAVE_MIN                             \
+       adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */    \
+       adds r16 = PT(CR_IPSR),r1;                              \
+       ;;                                                      \
+       lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES;             \
+       st8 [r16] = r29;      /* save cr.ipsr */                \
+       ;;                                                      \
+       lfetch.fault.excl.nt1 [r17];                            \
+       tbit.nz p15,p0 = r29,IA64_PSR_I_BIT;                    \
+       mov r29 = b0                                            \
+       ;;                                                      \
+       adds r16 = PT(R8),r1; /* initialize first base pointer */\
+       adds r17 = PT(R9),r1; /* initialize second base pointer */\
+       ;;                                                      \
+.mem.offset 0,0; st8.spill [r16] = r8,16;                      \
+.mem.offset 8,0; st8.spill [r17] = r9,16;                      \
+       ;;                                                      \
+.mem.offset 0,0; st8.spill [r16] = r10,24;                     \
+.mem.offset 8,0; st8.spill [r17] = r11,24;                     \
+       ;;                                                      \
+       mov r9 = cr.iip;         /* M */                        \
+       mov r10 = ar.fpsr;        /* M */                       \
+       ;;                                                      \
+       st8 [r16] = r9,16;    /* save cr.iip */                 \
+       st8 [r17] = r30,16;   /* save cr.ifs */                 \
+       sub r18 = r18,r22;    /* r18=RSE.ndirty*8 */            \
+       ;;                                                      \
+       st8 [r16] = r25,16;   /* save ar.unat */                \
+       st8 [r17] = r26,16;    /* save ar.pfs */                \
+       shl r18 = r18,16;     /* calu ar.rsc used for "loadrs" */\
+       ;;                                                      \
+       st8 [r16] = r27,16;   /* save ar.rsc */                 \
+       st8 [r17] = r28,16;   /* save ar.rnat */                \
+       ;;          /* avoid RAW on r16 & r17 */                \
+       st8 [r16] = r23,16;   /* save ar.bspstore */            \
+       st8 [r17] = r31,16;   /* save predicates */             \
+       ;;                                                      \
+       st8 [r16] = r29,16;   /* save b0 */                     \
+       st8 [r17] = r18,16;   /* save ar.rsc value for "loadrs" */\
+       ;;                                                      \
+.mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */  \
+.mem.offset 8,0; st8.spill [r17] = r12,16;                     \
+       adds r12 = -16,r1;    /* switch to kernel memory stack */  \
+       ;;                                                      \
+.mem.offset 0,0; st8.spill [r16] = r13,16;                     \
+.mem.offset 8,0; st8.spill [r17] = r10,16;     /* save ar.fpsr */\
+       mov r13 = r21;   /* establish `current' */              \
+       ;;                                                      \
+.mem.offset 0,0; st8.spill [r16] = r15,16;                     \
+.mem.offset 8,0; st8.spill [r17] = r14,16;                     \
+       ;;                                                      \
+.mem.offset 0,0; st8.spill [r16] = r2,16;                      \
+.mem.offset 8,0; st8.spill [r17] = r3,16;                      \
+       adds r2 = VMM_PT_REGS_R16_OFFSET,r1;                    \
+        ;;                                                     \
+       adds r16 = VMM_VCPU_IIPA_OFFSET,r13;                    \
+       adds r17 = VMM_VCPU_ISR_OFFSET,r13;                     \
+       mov r26 = cr.iipa;                                      \
+       mov r27 = cr.isr;                                       \
+       ;;                                                      \
+       st8 [r16] = r26;                                        \
+       st8 [r17] = r27;                                        \
+       ;;                                                      \
+       EXTRA;                                                  \
+       mov r8 = ar.ccv;                                        \
+       mov r9 = ar.csd;                                        \
+       mov r10 = ar.ssd;                                       \
+       movl r11 = FPSR_DEFAULT;   /* L-unit */                 \
+       adds r17 = VMM_VCPU_GP_OFFSET,r13;                      \
+       ;;                                                      \
+       ld8 r1 = [r17];/* establish kernel global pointer */    \
+       ;;                                                      \
+       PAL_VSA_SYNC_READ                                       \
+       KVM_MINSTATE_END_SAVE_MIN
+
+/*
+ * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
+ *
+ * Assumed state upon entry:
+ *  psr.ic: on
+ *  r2: points to &pt_regs.f6
+ *  r3: points to &pt_regs.f7
+ *  r8: contents of ar.ccv
+ *  r9: contents of ar.csd
+ *  r10:       contents of ar.ssd
+ *  r11:       FPSR_DEFAULT
+ *
+ * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
+ */
+#define KVM_SAVE_REST                          \
+.mem.offset 0,0; st8.spill [r2] = r16,16;      \
+.mem.offset 8,0; st8.spill [r3] = r17,16;      \
+       ;;                              \
+.mem.offset 0,0; st8.spill [r2] = r18,16;      \
+.mem.offset 8,0; st8.spill [r3] = r19,16;      \
+       ;;                              \
+.mem.offset 0,0; st8.spill [r2] = r20,16;      \
+.mem.offset 8,0; st8.spill [r3] = r21,16;      \
+       mov r18=b6;                     \
+       ;;                              \
+.mem.offset 0,0; st8.spill [r2] = r22,16;      \
+.mem.offset 8,0; st8.spill [r3] = r23,16;      \
+       mov r19 = b7;                           \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2] = r24,16;      \
+.mem.offset 8,0; st8.spill [r3] = r25,16;      \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2] = r26,16;      \
+.mem.offset 8,0; st8.spill [r3] = r27,16;      \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2] = r28,16;      \
+.mem.offset 8,0; st8.spill [r3] = r29,16;      \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2] = r30,16;      \
+.mem.offset 8,0; st8.spill [r3] = r31,32;      \
+       ;;                                      \
+       mov ar.fpsr = r11;                      \
+       st8 [r2] = r8,8;                        \
+       adds r24 = PT(B6)-PT(F7),r3;            \
+       adds r25 = PT(B7)-PT(F7),r3;            \
+       ;;                                      \
+       st8 [r24] = r18,16;       /* b6 */      \
+       st8 [r25] = r19,16;       /* b7 */      \
+       adds r2 = PT(R4)-PT(F6),r2;             \
+       adds r3 = PT(R5)-PT(F7),r3;             \
+       ;;                                      \
+       st8 [r24] = r9; /* ar.csd */            \
+       st8 [r25] = r10;        /* ar.ssd */    \
+       ;;                                      \
+       mov r18 = ar.unat;                      \
+       adds r19 = PT(EML_UNAT)-PT(R4),r2;      \
+       ;;                                      \
+       st8 [r19] = r18; /* eml_unat */         \
+
+
+#define KVM_SAVE_EXTRA                         \
+.mem.offset 0,0; st8.spill [r2] = r4,16;       \
+.mem.offset 8,0; st8.spill [r3] = r5,16;       \
+       ;;                                      \
+.mem.offset 0,0; st8.spill [r2] = r6,16;       \
+.mem.offset 8,0; st8.spill [r3] = r7;          \
+       ;;                                      \
+       mov r26 = ar.unat;                      \
+       ;;                                      \
+       st8 [r2] = r26;/* eml_unat */           \
+
+#define KVM_SAVE_MIN_WITH_COVER                KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
+#define KVM_SAVE_MIN_WITH_COVER_R19    KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
+#define KVM_SAVE_MIN                   KVM_DO_SAVE_MIN(     , mov r30 = r0, )
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
new file mode 100644 (file)
index 0000000..6d6cbcb
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef __KVM_IA64_LAPIC_H
+#define __KVM_IA64_LAPIC_H
+
+#include <linux/kvm_host.h>
+
+/*
+ * vlsapic
+ */
+struct kvm_lapic{
+       struct kvm_vcpu *vcpu;
+       uint64_t insvc[4];
+       uint64_t vhpi;
+       uint8_t xtp;
+       uint8_t pal_init_pending;
+       uint8_t pad[2];
+};
+
+int kvm_create_lapic(struct kvm_vcpu *vcpu);
+void kvm_free_lapic(struct kvm_vcpu *vcpu);
+
+int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
+int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
+int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
+
+#endif
diff --git a/arch/ia64/kvm/misc.h b/arch/ia64/kvm/misc.h
new file mode 100644 (file)
index 0000000..e585c46
--- /dev/null
@@ -0,0 +1,93 @@
+#ifndef __KVM_IA64_MISC_H
+#define __KVM_IA64_MISC_H
+
+#include <linux/kvm_host.h>
+/*
+ * misc.h
+ *     Copyright (C) 2007, Intel Corporation.
+ *     Xiantao Zhang  (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+/*
+ *Return p2m base address at host side!
+ */
+static inline uint64_t *kvm_host_get_pmt(struct kvm *kvm)
+{
+       return (uint64_t *)(kvm->arch.vm_base + KVM_P2M_OFS);
+}
+
+static inline void kvm_set_pmt_entry(struct kvm *kvm, gfn_t gfn,
+               u64 paddr, u64 mem_flags)
+{
+       uint64_t *pmt_base = kvm_host_get_pmt(kvm);
+       unsigned long pte;
+
+       pte = PAGE_ALIGN(paddr) | mem_flags;
+       pmt_base[gfn] = pte;
+}
+
+/*Function for translating host address to guest address*/
+
+static inline void *to_guest(struct kvm *kvm, void *addr)
+{
+       return (void *)((unsigned long)(addr) - kvm->arch.vm_base +
+                       KVM_VM_DATA_BASE);
+}
+
+/*Function for translating guest address to host address*/
+
+static inline void *to_host(struct kvm *kvm, void *addr)
+{
+       return (void *)((unsigned long)addr - KVM_VM_DATA_BASE
+                       + kvm->arch.vm_base);
+}
+
+/* Get host context of the vcpu */
+static inline union context *kvm_get_host_context(struct kvm_vcpu *vcpu)
+{
+       union context *ctx = &vcpu->arch.host;
+       return to_guest(vcpu->kvm, ctx);
+}
+
+/* Get guest context of the vcpu */
+static inline union context *kvm_get_guest_context(struct kvm_vcpu *vcpu)
+{
+       union context *ctx = &vcpu->arch.guest;
+       return  to_guest(vcpu->kvm, ctx);
+}
+
+/* kvm get exit data from gvmm! */
+static inline struct exit_ctl_data *kvm_get_exit_data(struct kvm_vcpu *vcpu)
+{
+       return &vcpu->arch.exit_data;
+}
+
+/*kvm get vcpu ioreq for kvm module!*/
+static inline struct kvm_mmio_req *kvm_get_vcpu_ioreq(struct kvm_vcpu *vcpu)
+{
+       struct exit_ctl_data *p_ctl_data;
+
+       if (vcpu) {
+               p_ctl_data = kvm_get_exit_data(vcpu);
+               if (p_ctl_data->exit_reason == EXIT_REASON_MMIO_INSTRUCTION)
+                       return &p_ctl_data->u.ioreq;
+       }
+
+       return NULL;
+}
+
+#endif
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
new file mode 100644 (file)
index 0000000..351bf70
--- /dev/null
@@ -0,0 +1,341 @@
+/*
+ * mmio.c: MMIO emulation components.
+ * Copyright (c) 2004, Intel Corporation.
+ *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ *  Kun Tian (Kevin Tian) (Kevin.tian@intel.com)
+ *
+ * Copyright (c) 2007 Intel Corporation  KVM support.
+ * Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ * Xiantao Zhang  (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <linux/kvm_host.h>
+
+#include "vcpu.h"
+
+static void vlsapic_write_xtp(struct kvm_vcpu *v, uint8_t val)
+{
+       VLSAPIC_XTP(v) = val;
+}
+
+/*
+ * LSAPIC OFFSET
+ */
+#define PIB_LOW_HALF(ofst)     !(ofst & (1 << 20))
+#define PIB_OFST_INTA          0x1E0000
+#define PIB_OFST_XTP           0x1E0008
+
+/*
+ * execute write IPI op.
+ */
+static void vlsapic_write_ipi(struct kvm_vcpu *vcpu,
+                                       uint64_t addr, uint64_t data)
+{
+       struct exit_ctl_data *p = &current_vcpu->arch.exit_data;
+       unsigned long psr;
+
+       local_irq_save(psr);
+
+       p->exit_reason = EXIT_REASON_IPI;
+       p->u.ipi_data.addr.val = addr;
+       p->u.ipi_data.data.val = data;
+       vmm_transition(current_vcpu);
+
+       local_irq_restore(psr);
+
+}
+
+void lsapic_write(struct kvm_vcpu *v, unsigned long addr,
+                       unsigned long length, unsigned long val)
+{
+       addr &= (PIB_SIZE - 1);
+
+       switch (addr) {
+       case PIB_OFST_INTA:
+               /*panic_domain(NULL, "Undefined write on PIB INTA\n");*/
+               panic_vm(v);
+               break;
+       case PIB_OFST_XTP:
+               if (length == 1) {
+                       vlsapic_write_xtp(v, val);
+               } else {
+                       /*panic_domain(NULL,
+                       "Undefined write on PIB XTP\n");*/
+                       panic_vm(v);
+               }
+               break;
+       default:
+               if (PIB_LOW_HALF(addr)) {
+                       /*lower half */
+                       if (length != 8)
+                               /*panic_domain(NULL,
+                               "Can't LHF write with size %ld!\n",
+                               length);*/
+                               panic_vm(v);
+                       else
+                               vlsapic_write_ipi(v, addr, val);
+               } else {   /*   upper half
+                               printk("IPI-UHF write %lx\n",addr);*/
+                       panic_vm(v);
+               }
+               break;
+       }
+}
+
+unsigned long lsapic_read(struct kvm_vcpu *v, unsigned long addr,
+               unsigned long length)
+{
+       uint64_t result = 0;
+
+       addr &= (PIB_SIZE - 1);
+
+       switch (addr) {
+       case PIB_OFST_INTA:
+               if (length == 1) /* 1 byte load */
+                       ; /* There is no i8259, there is no INTA access*/
+               else
+                       /*panic_domain(NULL,"Undefined read on PIB INTA\n"); */
+                       panic_vm(v);
+
+               break;
+       case PIB_OFST_XTP:
+               if (length == 1) {
+                       result = VLSAPIC_XTP(v);
+                       /* printk("read xtp %lx\n", result); */
+               } else {
+                       /*panic_domain(NULL,
+                       "Undefined read on PIB XTP\n");*/
+                       panic_vm(v);
+               }
+               break;
+       default:
+               panic_vm(v);
+               break;
+       }
+       return result;
+}
+
+static void mmio_access(struct kvm_vcpu *vcpu, u64 src_pa, u64 *dest,
+                                       u16 s, int ma, int dir)
+{
+       unsigned long iot;
+       struct exit_ctl_data *p = &vcpu->arch.exit_data;
+       unsigned long psr;
+
+       iot = __gpfn_is_io(src_pa >> PAGE_SHIFT);
+
+       local_irq_save(psr);
+
+       /*Intercept the acces for PIB range*/
+       if (iot == GPFN_PIB) {
+               if (!dir)
+                       lsapic_write(vcpu, src_pa, s, *dest);
+               else
+                       *dest = lsapic_read(vcpu, src_pa, s);
+               goto out;
+       }
+       p->exit_reason = EXIT_REASON_MMIO_INSTRUCTION;
+       p->u.ioreq.addr = src_pa;
+       p->u.ioreq.size = s;
+       p->u.ioreq.dir = dir;
+       if (dir == IOREQ_WRITE)
+               p->u.ioreq.data = *dest;
+       p->u.ioreq.state = STATE_IOREQ_READY;
+       vmm_transition(vcpu);
+
+       if (p->u.ioreq.state == STATE_IORESP_READY) {
+               if (dir == IOREQ_READ)
+                       *dest = p->u.ioreq.data;
+       } else
+               panic_vm(vcpu);
+out:
+       local_irq_restore(psr);
+       return ;
+}
+
+/*
+   dir 1: read 0:write
+   inst_type 0:integer 1:floating point
+ */
+#define SL_INTEGER     0       /* store/load interger*/
+#define SL_FLOATING    1       /* store/load floating*/
+
+void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
+{
+       struct kvm_pt_regs *regs;
+       IA64_BUNDLE bundle;
+       int slot, dir = 0;
+       int inst_type = -1;
+       u16 size = 0;
+       u64 data, slot1a, slot1b, temp, update_reg;
+       s32 imm;
+       INST64 inst;
+
+       regs = vcpu_regs(vcpu);
+
+       if (fetch_code(vcpu, regs->cr_iip, &bundle)) {
+               /* if fetch code fail, return and try again */
+               return;
+       }
+       slot = ((struct ia64_psr *)&(regs->cr_ipsr))->ri;
+       if (!slot)
+               inst.inst = bundle.slot0;
+       else if (slot == 1) {
+               slot1a = bundle.slot1a;
+               slot1b = bundle.slot1b;
+               inst.inst = slot1a + (slot1b << 18);
+       } else if (slot == 2)
+               inst.inst = bundle.slot2;
+
+       /* Integer Load/Store */
+       if (inst.M1.major == 4 && inst.M1.m == 0 && inst.M1.x == 0) {
+               inst_type = SL_INTEGER;
+               size = (inst.M1.x6 & 0x3);
+               if ((inst.M1.x6 >> 2) > 0xb) {
+                       /*write*/
+                       dir = IOREQ_WRITE;
+                       data = vcpu_get_gr(vcpu, inst.M4.r2);
+               } else if ((inst.M1.x6 >> 2) < 0xb) {
+                       /*read*/
+                       dir = IOREQ_READ;
+               }
+       } else if (inst.M2.major == 4 && inst.M2.m == 1 && inst.M2.x == 0) {
+               /* Integer Load + Reg update */
+               inst_type = SL_INTEGER;
+               dir = IOREQ_READ;
+               size = (inst.M2.x6 & 0x3);
+               temp = vcpu_get_gr(vcpu, inst.M2.r3);
+               update_reg = vcpu_get_gr(vcpu, inst.M2.r2);
+               temp += update_reg;
+               vcpu_set_gr(vcpu, inst.M2.r3, temp, 0);
+       } else if (inst.M3.major == 5) {
+               /*Integer Load/Store + Imm update*/
+               inst_type = SL_INTEGER;
+               size = (inst.M3.x6&0x3);
+               if ((inst.M5.x6 >> 2) > 0xb) {
+                       /*write*/
+                       dir = IOREQ_WRITE;
+                       data = vcpu_get_gr(vcpu, inst.M5.r2);
+                       temp = vcpu_get_gr(vcpu, inst.M5.r3);
+                       imm = (inst.M5.s << 31) | (inst.M5.i << 30) |
+                               (inst.M5.imm7 << 23);
+                       temp += imm >> 23;
+                       vcpu_set_gr(vcpu, inst.M5.r3, temp, 0);
+
+               } else if ((inst.M3.x6 >> 2) < 0xb) {
+                       /*read*/
+                       dir = IOREQ_READ;
+                       temp = vcpu_get_gr(vcpu, inst.M3.r3);
+                       imm = (inst.M3.s << 31) | (inst.M3.i << 30) |
+                               (inst.M3.imm7 << 23);
+                       temp += imm >> 23;
+                       vcpu_set_gr(vcpu, inst.M3.r3, temp, 0);
+
+               }
+       } else if (inst.M9.major == 6 && inst.M9.x6 == 0x3B
+                               && inst.M9.m == 0 && inst.M9.x == 0) {
+               /* Floating-point spill*/
+               struct ia64_fpreg v;
+
+               inst_type = SL_FLOATING;
+               dir = IOREQ_WRITE;
+               vcpu_get_fpreg(vcpu, inst.M9.f2, &v);
+               /* Write high word. FIXME: this is a kludge!  */
+               v.u.bits[1] &= 0x3ffff;
+               mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+               data = v.u.bits[0];
+               size = 3;
+       } else if (inst.M10.major == 7 && inst.M10.x6 == 0x3B) {
+               /* Floating-point spill + Imm update */
+               struct ia64_fpreg v;
+
+               inst_type = SL_FLOATING;
+               dir = IOREQ_WRITE;
+               vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
+               temp = vcpu_get_gr(vcpu, inst.M10.r3);
+               imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
+                       (inst.M10.imm7 << 23);
+               temp += imm >> 23;
+               vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
+
+               /* Write high word.FIXME: this is a kludge!  */
+               v.u.bits[1] &= 0x3ffff;
+               mmio_access(vcpu, padr + 8, &v.u.bits[1], 8, ma, IOREQ_WRITE);
+               data = v.u.bits[0];
+               size = 3;
+       } else if (inst.M10.major == 7 && inst.M10.x6 == 0x31) {
+               /* Floating-point stf8 + Imm update */
+               struct ia64_fpreg v;
+               inst_type = SL_FLOATING;
+               dir = IOREQ_WRITE;
+               size = 3;
+               vcpu_get_fpreg(vcpu, inst.M10.f2, &v);
+               data = v.u.bits[0]; /* Significand.  */
+               temp = vcpu_get_gr(vcpu, inst.M10.r3);
+               imm = (inst.M10.s << 31) | (inst.M10.i << 30) |
+                       (inst.M10.imm7 << 23);
+               temp += imm >> 23;
+               vcpu_set_gr(vcpu, inst.M10.r3, temp, 0);
+       } else if (inst.M15.major == 7 && inst.M15.x6 >= 0x2c
+                       && inst.M15.x6 <= 0x2f) {
+               temp = vcpu_get_gr(vcpu, inst.M15.r3);
+               imm = (inst.M15.s << 31) | (inst.M15.i << 30) |
+                       (inst.M15.imm7 << 23);
+               temp += imm >> 23;
+               vcpu_set_gr(vcpu, inst.M15.r3, temp, 0);
+
+               vcpu_increment_iip(vcpu);
+               return;
+       } else if (inst.M12.major == 6 && inst.M12.m == 1
+                       && inst.M12.x == 1 && inst.M12.x6 == 1) {
+               /* Floating-point Load Pair + Imm ldfp8 M12*/
+               struct ia64_fpreg v;
+
+               inst_type = SL_FLOATING;
+               dir = IOREQ_READ;
+               size = 8;     /*ldfd*/
+               mmio_access(vcpu, padr, &data, size, ma, dir);
+               v.u.bits[0] = data;
+               v.u.bits[1] = 0x1003E;
+               vcpu_set_fpreg(vcpu, inst.M12.f1, &v);
+               padr += 8;
+               mmio_access(vcpu, padr, &data, size, ma, dir);
+               v.u.bits[0] = data;
+               v.u.bits[1] = 0x1003E;
+               vcpu_set_fpreg(vcpu, inst.M12.f2, &v);
+               padr += 8;
+               vcpu_set_gr(vcpu, inst.M12.r3, padr, 0);
+               vcpu_increment_iip(vcpu);
+               return;
+       } else {
+               inst_type = -1;
+               panic_vm(vcpu);
+       }
+
+       size = 1 << size;
+       if (dir == IOREQ_WRITE) {
+               mmio_access(vcpu, padr, &data, size, ma, dir);
+       } else {
+               mmio_access(vcpu, padr, &data, size, ma, dir);
+               if (inst_type == SL_INTEGER)
+                       vcpu_set_gr(vcpu, inst.M1.r1, data, 0);
+               else
+                       panic_vm(vcpu);
+
+       }
+       vcpu_increment_iip(vcpu);
+}
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
new file mode 100644 (file)
index 0000000..e4f15d6
--- /dev/null
@@ -0,0 +1,918 @@
+/*
+ * arch/ia64/vmx/optvfault.S
+ * optimize virtualization fault handler
+ *
+ * Copyright (C) 2006 Intel Co
+ *     Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ */
+
+#include <asm/asmmacro.h>
+#include <asm/processor.h>
+
+#include "vti.h"
+#include "asm-offsets.h"
+
+#define ACCE_MOV_FROM_AR
+#define ACCE_MOV_FROM_RR
+#define ACCE_MOV_TO_RR
+#define ACCE_RSM
+#define ACCE_SSM
+#define ACCE_MOV_TO_PSR
+#define ACCE_THASH
+
+//mov r1=ar3
+GLOBAL_ENTRY(kvm_asm_mov_from_ar)
+#ifndef ACCE_MOV_FROM_AR
+       br.many kvm_virtualization_fault_back
+#endif
+       add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
+       add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
+       extr.u r17=r25,6,7
+       ;;
+       ld8 r18=[r18]
+       mov r19=ar.itc
+       mov r24=b0
+       ;;
+       add r19=r19,r18
+       addl r20=@gprel(asm_mov_to_reg),gp
+       ;;
+       st8 [r16] = r19
+       adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
+       shladd r17=r17,4,r20
+       ;;
+       mov b0=r17
+       br.sptk.few b0
+       ;;
+END(kvm_asm_mov_from_ar)
+
+
+// mov r1=rr[r3]
+GLOBAL_ENTRY(kvm_asm_mov_from_rr)
+#ifndef ACCE_MOV_FROM_RR
+       br.many kvm_virtualization_fault_back
+#endif
+       extr.u r16=r25,20,7
+       extr.u r17=r25,6,7
+       addl r20=@gprel(asm_mov_from_reg),gp
+       ;;
+       adds r30=kvm_asm_mov_from_rr_back_1-asm_mov_from_reg,r20
+       shladd r16=r16,4,r20
+       mov r24=b0
+       ;;
+       add r27=VMM_VCPU_VRR0_OFFSET,r21
+       mov b0=r16
+       br.many b0
+       ;;
+kvm_asm_mov_from_rr_back_1:
+       adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+       adds r22=asm_mov_to_reg-asm_mov_from_reg,r20
+       shr.u r26=r19,61
+       ;;
+       shladd r17=r17,4,r22
+       shladd r27=r26,3,r27
+       ;;
+       ld8 r19=[r27]
+       mov b0=r17
+       br.many b0
+END(kvm_asm_mov_from_rr)
+
+
+// mov rr[r3]=r2
+GLOBAL_ENTRY(kvm_asm_mov_to_rr)
+#ifndef ACCE_MOV_TO_RR
+       br.many kvm_virtualization_fault_back
+#endif
+       extr.u r16=r25,20,7
+       extr.u r17=r25,13,7
+       addl r20=@gprel(asm_mov_from_reg),gp
+       ;;
+       adds r30=kvm_asm_mov_to_rr_back_1-asm_mov_from_reg,r20
+       shladd r16=r16,4,r20
+       mov r22=b0
+       ;;
+       add r27=VMM_VCPU_VRR0_OFFSET,r21
+       mov b0=r16
+       br.many b0
+       ;;
+kvm_asm_mov_to_rr_back_1:
+       adds r30=kvm_asm_mov_to_rr_back_2-asm_mov_from_reg,r20
+       shr.u r23=r19,61
+       shladd r17=r17,4,r20
+       ;;
+       //if rr6, go back
+       cmp.eq p6,p0=6,r23
+       mov b0=r22
+       (p6) br.cond.dpnt.many kvm_virtualization_fault_back
+       ;;
+       mov r28=r19
+       mov b0=r17
+       br.many b0
+kvm_asm_mov_to_rr_back_2:
+       adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+       shladd r27=r23,3,r27
+       ;; // vrr.rid<<4 |0xe
+       st8 [r27]=r19
+       mov b0=r30
+       ;;
+       extr.u r16=r19,8,26
+       extr.u r18 =r19,2,6
+       mov r17 =0xe
+       ;;
+       shladd r16 = r16, 4, r17
+       extr.u r19 =r19,0,8
+       ;;
+       shl r16 = r16,8
+       ;;
+       add r19 = r19, r16
+       ;; //set ve 1
+       dep r19=-1,r19,0,1
+       cmp.lt p6,p0=14,r18
+       ;;
+       (p6) mov r18=14
+       ;;
+       (p6) dep r19=r18,r19,2,6
+       ;;
+       cmp.eq p6,p0=0,r23
+       ;;
+       cmp.eq.or p6,p0=4,r23
+       ;;
+       adds r16=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+       (p6) adds r17=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+       ;;
+       ld4 r16=[r16]
+       cmp.eq p7,p0=r0,r0
+       (p6) shladd r17=r23,1,r17
+       ;;
+       (p6) st8 [r17]=r19
+       (p6) tbit.nz p6,p7=r16,0
+       ;;
+       (p7) mov rr[r28]=r19
+       mov r24=r22
+       br.many b0
+END(kvm_asm_mov_to_rr)
+
+
+//rsm
+GLOBAL_ENTRY(kvm_asm_rsm)
+#ifndef ACCE_RSM
+       br.many kvm_virtualization_fault_back
+#endif
+       add r16=VMM_VPD_BASE_OFFSET,r21
+       extr.u r26=r25,6,21
+       extr.u r27=r25,31,2
+       ;;
+       ld8 r16=[r16]
+       extr.u r28=r25,36,1
+       dep r26=r27,r26,21,2
+       ;;
+       add r17=VPD_VPSR_START_OFFSET,r16
+       add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+       //r26 is imm24
+       dep r26=r28,r26,23,1
+       ;;
+       ld8 r18=[r17]
+       movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI
+       ld4 r23=[r22]
+       sub r27=-1,r26
+       mov r24=b0
+       ;;
+       mov r20=cr.ipsr
+       or r28=r27,r28
+       and r19=r18,r27
+       ;;
+       st8 [r17]=r19
+       and r20=r20,r28
+       /* Comment it out due to short of fp lazy alorgithm support
+       adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
+       ;;
+       ld8 r27=[r27]
+       ;;
+       tbit.nz p8,p0= r27,IA64_PSR_DFH_BIT
+       ;;
+       (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+       */
+       ;;
+       mov cr.ipsr=r20
+       tbit.nz p6,p0=r23,0
+       ;;
+       tbit.z.or p6,p0=r26,IA64_PSR_DT_BIT
+       (p6) br.dptk kvm_resume_to_guest
+       ;;
+       add r26=VMM_VCPU_META_RR0_OFFSET,r21
+       add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
+       dep r23=-1,r23,0,1
+       ;;
+       ld8 r26=[r26]
+       ld8 r27=[r27]
+       st4 [r22]=r23
+       dep.z r28=4,61,3
+       ;;
+       mov rr[r0]=r26
+       ;;
+       mov rr[r28]=r27
+       ;;
+       srlz.d
+       br.many kvm_resume_to_guest
+END(kvm_asm_rsm)
+
+
+//ssm
+GLOBAL_ENTRY(kvm_asm_ssm)
+#ifndef ACCE_SSM
+       br.many kvm_virtualization_fault_back
+#endif
+       add r16=VMM_VPD_BASE_OFFSET,r21
+       extr.u r26=r25,6,21
+       extr.u r27=r25,31,2
+       ;;
+       ld8 r16=[r16]
+       extr.u r28=r25,36,1
+       dep r26=r27,r26,21,2
+       ;;  //r26 is imm24
+       add r27=VPD_VPSR_START_OFFSET,r16
+       dep r26=r28,r26,23,1
+       ;;  //r19 vpsr
+       ld8 r29=[r27]
+       mov r24=b0
+       ;;
+       add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+       mov r20=cr.ipsr
+       or r19=r29,r26
+       ;;
+       ld4 r23=[r22]
+       st8 [r27]=r19
+       or r20=r20,r26
+       ;;
+       mov cr.ipsr=r20
+       movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
+       ;;
+       and r19=r28,r19
+       tbit.z p6,p0=r23,0
+       ;;
+       cmp.ne.or p6,p0=r28,r19
+       (p6) br.dptk kvm_asm_ssm_1
+       ;;
+       add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+       add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
+       dep r23=0,r23,0,1
+       ;;
+       ld8 r26=[r26]
+       ld8 r27=[r27]
+       st4 [r22]=r23
+       dep.z r28=4,61,3
+       ;;
+       mov rr[r0]=r26
+       ;;
+       mov rr[r28]=r27
+       ;;
+       srlz.d
+       ;;
+kvm_asm_ssm_1:
+       tbit.nz p6,p0=r29,IA64_PSR_I_BIT
+       ;;
+       tbit.z.or p6,p0=r19,IA64_PSR_I_BIT
+       (p6) br.dptk kvm_resume_to_guest
+       ;;
+       add r29=VPD_VTPR_START_OFFSET,r16
+       add r30=VPD_VHPI_START_OFFSET,r16
+       ;;
+       ld8 r29=[r29]
+       ld8 r30=[r30]
+       ;;
+       extr.u r17=r29,4,4
+       extr.u r18=r29,16,1
+       ;;
+       dep r17=r18,r17,4,1
+       ;;
+       cmp.gt p6,p0=r30,r17
+       (p6) br.dpnt.few kvm_asm_dispatch_vexirq
+       br.many kvm_resume_to_guest
+END(kvm_asm_ssm)
+
+
+//mov psr.l=r2
+GLOBAL_ENTRY(kvm_asm_mov_to_psr)
+#ifndef ACCE_MOV_TO_PSR
+       br.many kvm_virtualization_fault_back
+#endif
+       add r16=VMM_VPD_BASE_OFFSET,r21
+       extr.u r26=r25,13,7 //r2
+       ;;
+       ld8 r16=[r16]
+       addl r20=@gprel(asm_mov_from_reg),gp
+       ;;
+       adds r30=kvm_asm_mov_to_psr_back-asm_mov_from_reg,r20
+       shladd r26=r26,4,r20
+       mov r24=b0
+       ;;
+       add r27=VPD_VPSR_START_OFFSET,r16
+       mov b0=r26
+       br.many b0
+       ;;
+kvm_asm_mov_to_psr_back:
+       ld8 r17=[r27]
+       add r22=VMM_VCPU_MODE_FLAGS_OFFSET,r21
+       dep r19=0,r19,32,32
+       ;;
+       ld4 r23=[r22]
+       dep r18=0,r17,0,32
+       ;;
+       add r30=r18,r19
+       movl r28=IA64_PSR_DT+IA64_PSR_RT+IA64_PSR_IT
+       ;;
+       st8 [r27]=r30
+       and r27=r28,r30
+       and r29=r28,r17
+       ;;
+       cmp.eq p5,p0=r29,r27
+       cmp.eq p6,p7=r28,r27
+       (p5) br.many kvm_asm_mov_to_psr_1
+       ;;
+       //virtual to physical
+       (p7) add r26=VMM_VCPU_META_RR0_OFFSET,r21
+       (p7) add r27=VMM_VCPU_META_RR0_OFFSET+8,r21
+       (p7) dep r23=-1,r23,0,1
+       ;;
+       //physical to virtual
+       (p6) add r26=VMM_VCPU_META_SAVED_RR0_OFFSET,r21
+       (p6) add r27=VMM_VCPU_META_SAVED_RR0_OFFSET+8,r21
+       (p6) dep r23=0,r23,0,1
+       ;;
+       ld8 r26=[r26]
+       ld8 r27=[r27]
+       st4 [r22]=r23
+       dep.z r28=4,61,3
+       ;;
+       mov rr[r0]=r26
+       ;;
+       mov rr[r28]=r27
+       ;;
+       srlz.d
+       ;;
+kvm_asm_mov_to_psr_1:
+       mov r20=cr.ipsr
+       movl r28=IA64_PSR_IC+IA64_PSR_I+IA64_PSR_DT+IA64_PSR_SI+IA64_PSR_RT
+       ;;
+       or r19=r19,r28
+       dep r20=0,r20,0,32
+       ;;
+       add r20=r19,r20
+       mov b0=r24
+       ;;
+       /* Comment it out due to short of fp lazy algorithm support
+       adds r27=IA64_VCPU_FP_PSR_OFFSET,r21
+       ;;
+       ld8 r27=[r27]
+       ;;
+       tbit.nz p8,p0=r27,IA64_PSR_DFH_BIT
+       ;;
+       (p8) dep r20=-1,r20,IA64_PSR_DFH_BIT,1
+       ;;
+       */
+       mov cr.ipsr=r20
+       cmp.ne p6,p0=r0,r0
+       ;;
+       tbit.nz.or p6,p0=r17,IA64_PSR_I_BIT
+       tbit.z.or p6,p0=r30,IA64_PSR_I_BIT
+       (p6) br.dpnt.few kvm_resume_to_guest
+       ;;
+       add r29=VPD_VTPR_START_OFFSET,r16
+       add r30=VPD_VHPI_START_OFFSET,r16
+       ;;
+       ld8 r29=[r29]
+       ld8 r30=[r30]
+       ;;
+       extr.u r17=r29,4,4
+       extr.u r18=r29,16,1
+       ;;
+       dep r17=r18,r17,4,1
+       ;;
+       cmp.gt p6,p0=r30,r17
+       (p6) br.dpnt.few kvm_asm_dispatch_vexirq
+       br.many kvm_resume_to_guest
+END(kvm_asm_mov_to_psr)
+
+
+ENTRY(kvm_asm_dispatch_vexirq)
+//increment iip
+       mov r16=cr.ipsr
+       ;;
+       extr.u r17=r16,IA64_PSR_RI_BIT,2
+       tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
+       ;;
+       (p6) mov r18=cr.iip
+       (p6) mov r17=r0
+       (p7) add r17=1,r17
+       ;;
+       (p6) add r18=0x10,r18
+       dep r16=r17,r16,IA64_PSR_RI_BIT,2
+       ;;
+       (p6) mov cr.iip=r18
+       mov cr.ipsr=r16
+       mov r30 =1
+       br.many kvm_dispatch_vexirq
+END(kvm_asm_dispatch_vexirq)
+
+// thash
+// TODO: add support when pta.vf = 1
+GLOBAL_ENTRY(kvm_asm_thash)
+#ifndef ACCE_THASH
+       br.many kvm_virtualization_fault_back
+#endif
+       extr.u r17=r25,20,7             // get r3 from opcode in r25
+       extr.u r18=r25,6,7              // get r1 from opcode in r25
+       addl r20=@gprel(asm_mov_from_reg),gp
+       ;;
+       adds r30=kvm_asm_thash_back1-asm_mov_from_reg,r20
+       shladd r17=r17,4,r20    // get addr of MOVE_FROM_REG(r17)
+       adds r16=VMM_VPD_BASE_OFFSET,r21        // get vcpu.arch.priveregs
+       ;;
+       mov r24=b0
+       ;;
+       ld8 r16=[r16]           // get VPD addr
+       mov b0=r17
+       br.many b0                      // r19 return value
+       ;;
+kvm_asm_thash_back1:
+       shr.u r23=r19,61                // get RR number
+       adds r25=VMM_VCPU_VRR0_OFFSET,r21       // get vcpu->arch.vrr[0]'s addr
+       adds r16=VMM_VPD_VPTA_OFFSET,r16        // get vpta
+       ;;
+       shladd r27=r23,3,r25    // get vcpu->arch.vrr[r23]'s addr
+       ld8 r17=[r16]           // get PTA
+       mov r26=1
+       ;;
+       extr.u r29=r17,2,6              // get pta.size
+       ld8 r25=[r27]           // get vcpu->arch.vrr[r23]'s value
+       ;;
+       extr.u r25=r25,2,6              // get rr.ps
+       shl r22=r26,r29         // 1UL << pta.size
+       ;;
+       shr.u r23=r19,r25               // vaddr >> rr.ps
+       adds r26=3,r29          // pta.size + 3
+       shl r27=r17,3           // pta << 3
+       ;;
+       shl r23=r23,3           // (vaddr >> rr.ps) << 3
+       shr.u r27=r27,r26               // (pta << 3) >> (pta.size+3)
+       movl r16=7<<61
+       ;;
+       adds r22=-1,r22         // (1UL << pta.size) - 1
+       shl r27=r27,r29         // ((pta<<3)>>(pta.size+3))<<pta.size
+       and r19=r19,r16         // vaddr & VRN_MASK
+       ;;
+       and r22=r22,r23         // vhpt_offset
+       or r19=r19,r27 // (vadr&VRN_MASK)|(((pta<<3)>>(pta.size + 3))<<pta.size)
+       adds r26=asm_mov_to_reg-asm_mov_from_reg,r20
+       ;;
+       or r19=r19,r22          // calc pval
+       shladd r17=r18,4,r26
+       adds r30=kvm_resume_to_guest-asm_mov_from_reg,r20
+       ;;
+       mov b0=r17
+       br.many b0
+END(kvm_asm_thash)
+
+#define MOV_TO_REG0    \
+{;                     \
+       nop.b 0x0;              \
+       nop.b 0x0;              \
+       nop.b 0x0;              \
+       ;;                      \
+};
+
+
+#define MOV_TO_REG(n)  \
+{;                     \
+       mov r##n##=r19; \
+       mov b0=r30;     \
+       br.sptk.many b0;        \
+       ;;                      \
+};
+
+
+#define MOV_FROM_REG(n)        \
+{;                             \
+       mov r19=r##n##;         \
+       mov b0=r30;             \
+       br.sptk.many b0;                \
+       ;;                              \
+};
+
+
+#define MOV_TO_BANK0_REG(n)                    \
+ENTRY_MIN_ALIGN(asm_mov_to_bank0_reg##n##);    \
+{;                                             \
+       mov r26=r2;                             \
+       mov r2=r19;                             \
+       bsw.1;                                  \
+       ;;                                              \
+};                                             \
+{;                                             \
+       mov r##n##=r2;                          \
+       nop.b 0x0;                                      \
+       bsw.0;                                  \
+       ;;                                              \
+};                                             \
+{;                                             \
+       mov r2=r26;                             \
+       mov b0=r30;                             \
+       br.sptk.many b0;                                \
+       ;;                                              \
+};                                             \
+END(asm_mov_to_bank0_reg##n##)
+
+
+#define MOV_FROM_BANK0_REG(n)                  \
+ENTRY_MIN_ALIGN(asm_mov_from_bank0_reg##n##);  \
+{;                                             \
+       mov r26=r2;                             \
+       nop.b 0x0;                                      \
+       bsw.1;                                  \
+       ;;                                              \
+};                                             \
+{;                                             \
+       mov r2=r##n##;                          \
+       nop.b 0x0;                                      \
+       bsw.0;                                  \
+       ;;                                              \
+};                                             \
+{;                                             \
+       mov r19=r2;                             \
+       mov r2=r26;                             \
+       mov b0=r30;                             \
+};                                             \
+{;                                             \
+       nop.b 0x0;                                      \
+       nop.b 0x0;                                      \
+       br.sptk.many b0;                                \
+       ;;                                              \
+};                                             \
+END(asm_mov_from_bank0_reg##n##)
+
+
+#define JMP_TO_MOV_TO_BANK0_REG(n)             \
+{;                                             \
+       nop.b 0x0;                                      \
+       nop.b 0x0;                                      \
+       br.sptk.many asm_mov_to_bank0_reg##n##; \
+       ;;                                              \
+}
+
+
+#define JMP_TO_MOV_FROM_BANK0_REG(n)           \
+{;                                             \
+       nop.b 0x0;                                      \
+       nop.b 0x0;                                      \
+       br.sptk.many asm_mov_from_bank0_reg##n##;       \
+       ;;                                              \
+}
+
+
+MOV_FROM_BANK0_REG(16)
+MOV_FROM_BANK0_REG(17)
+MOV_FROM_BANK0_REG(18)
+MOV_FROM_BANK0_REG(19)
+MOV_FROM_BANK0_REG(20)
+MOV_FROM_BANK0_REG(21)
+MOV_FROM_BANK0_REG(22)
+MOV_FROM_BANK0_REG(23)
+MOV_FROM_BANK0_REG(24)
+MOV_FROM_BANK0_REG(25)
+MOV_FROM_BANK0_REG(26)
+MOV_FROM_BANK0_REG(27)
+MOV_FROM_BANK0_REG(28)
+MOV_FROM_BANK0_REG(29)
+MOV_FROM_BANK0_REG(30)
+MOV_FROM_BANK0_REG(31)
+
+
+// mov from reg table
+ENTRY(asm_mov_from_reg)
+       MOV_FROM_REG(0)
+       MOV_FROM_REG(1)
+       MOV_FROM_REG(2)
+       MOV_FROM_REG(3)
+       MOV_FROM_REG(4)
+       MOV_FROM_REG(5)
+       MOV_FROM_REG(6)
+       MOV_FROM_REG(7)
+       MOV_FROM_REG(8)
+       MOV_FROM_REG(9)
+       MOV_FROM_REG(10)
+       MOV_FROM_REG(11)
+       MOV_FROM_REG(12)
+       MOV_FROM_REG(13)
+       MOV_FROM_REG(14)
+       MOV_FROM_REG(15)
+       JMP_TO_MOV_FROM_BANK0_REG(16)
+       JMP_TO_MOV_FROM_BANK0_REG(17)
+       JMP_TO_MOV_FROM_BANK0_REG(18)
+       JMP_TO_MOV_FROM_BANK0_REG(19)
+       JMP_TO_MOV_FROM_BANK0_REG(20)
+       JMP_TO_MOV_FROM_BANK0_REG(21)
+       JMP_TO_MOV_FROM_BANK0_REG(22)
+       JMP_TO_MOV_FROM_BANK0_REG(23)
+       JMP_TO_MOV_FROM_BANK0_REG(24)
+       JMP_TO_MOV_FROM_BANK0_REG(25)
+       JMP_TO_MOV_FROM_BANK0_REG(26)
+       JMP_TO_MOV_FROM_BANK0_REG(27)
+       JMP_TO_MOV_FROM_BANK0_REG(28)
+       JMP_TO_MOV_FROM_BANK0_REG(29)
+       JMP_TO_MOV_FROM_BANK0_REG(30)
+       JMP_TO_MOV_FROM_BANK0_REG(31)
+       MOV_FROM_REG(32)
+       MOV_FROM_REG(33)
+       MOV_FROM_REG(34)
+       MOV_FROM_REG(35)
+       MOV_FROM_REG(36)
+       MOV_FROM_REG(37)
+       MOV_FROM_REG(38)
+       MOV_FROM_REG(39)
+       MOV_FROM_REG(40)
+       MOV_FROM_REG(41)
+       MOV_FROM_REG(42)
+       MOV_FROM_REG(43)
+       MOV_FROM_REG(44)
+       MOV_FROM_REG(45)
+       MOV_FROM_REG(46)
+       MOV_FROM_REG(47)
+       MOV_FROM_REG(48)
+       MOV_FROM_REG(49)
+       MOV_FROM_REG(50)
+       MOV_FROM_REG(51)
+       MOV_FROM_REG(52)
+       MOV_FROM_REG(53)
+       MOV_FROM_REG(54)
+       MOV_FROM_REG(55)
+       MOV_FROM_REG(56)
+       MOV_FROM_REG(57)
+       MOV_FROM_REG(58)
+       MOV_FROM_REG(59)
+       MOV_FROM_REG(60)
+       MOV_FROM_REG(61)
+       MOV_FROM_REG(62)
+       MOV_FROM_REG(63)
+       MOV_FROM_REG(64)
+       MOV_FROM_REG(65)
+       MOV_FROM_REG(66)
+       MOV_FROM_REG(67)
+       MOV_FROM_REG(68)
+       MOV_FROM_REG(69)
+       MOV_FROM_REG(70)
+       MOV_FROM_REG(71)
+       MOV_FROM_REG(72)
+       MOV_FROM_REG(73)
+       MOV_FROM_REG(74)
+       MOV_FROM_REG(75)
+       MOV_FROM_REG(76)
+       MOV_FROM_REG(77)
+       MOV_FROM_REG(78)
+       MOV_FROM_REG(79)
+       MOV_FROM_REG(80)
+       MOV_FROM_REG(81)
+       MOV_FROM_REG(82)
+       MOV_FROM_REG(83)
+       MOV_FROM_REG(84)
+       MOV_FROM_REG(85)
+       MOV_FROM_REG(86)
+       MOV_FROM_REG(87)
+       MOV_FROM_REG(88)
+       MOV_FROM_REG(89)
+       MOV_FROM_REG(90)
+       MOV_FROM_REG(91)
+       MOV_FROM_REG(92)
+       MOV_FROM_REG(93)
+       MOV_FROM_REG(94)
+       MOV_FROM_REG(95)
+       MOV_FROM_REG(96)
+       MOV_FROM_REG(97)
+       MOV_FROM_REG(98)
+       MOV_FROM_REG(99)
+       MOV_FROM_REG(100)
+       MOV_FROM_REG(101)
+       MOV_FROM_REG(102)
+       MOV_FROM_REG(103)
+       MOV_FROM_REG(104)
+       MOV_FROM_REG(105)
+       MOV_FROM_REG(106)
+       MOV_FROM_REG(107)
+       MOV_FROM_REG(108)
+       MOV_FROM_REG(109)
+       MOV_FROM_REG(110)
+       MOV_FROM_REG(111)
+       MOV_FROM_REG(112)
+       MOV_FROM_REG(113)
+       MOV_FROM_REG(114)
+       MOV_FROM_REG(115)
+       MOV_FROM_REG(116)
+       MOV_FROM_REG(117)
+       MOV_FROM_REG(118)
+       MOV_FROM_REG(119)
+       MOV_FROM_REG(120)
+       MOV_FROM_REG(121)
+       MOV_FROM_REG(122)
+       MOV_FROM_REG(123)
+       MOV_FROM_REG(124)
+       MOV_FROM_REG(125)
+       MOV_FROM_REG(126)
+       MOV_FROM_REG(127)
+END(asm_mov_from_reg)
+
+
+/* must be in bank 0
+ * parameter:
+ * r31: pr
+ * r24: b0
+ */
+ENTRY(kvm_resume_to_guest)
+       adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+       ;;
+       ld8 r1 =[r16]
+       adds r20 = VMM_VCPU_VSA_BASE_OFFSET,r21
+       ;;
+       mov r16=cr.ipsr
+       ;;
+       ld8 r20 = [r20]
+       adds r19=VMM_VPD_BASE_OFFSET,r21
+       ;;
+       ld8 r25=[r19]
+       extr.u r17=r16,IA64_PSR_RI_BIT,2
+       tbit.nz p6,p7=r16,IA64_PSR_RI_BIT+1
+       ;;
+       (p6) mov r18=cr.iip
+       (p6) mov r17=r0
+       ;;
+       (p6) add r18=0x10,r18
+       (p7) add r17=1,r17
+       ;;
+       (p6) mov cr.iip=r18
+       dep r16=r17,r16,IA64_PSR_RI_BIT,2
+       ;;
+       mov cr.ipsr=r16
+       adds r19= VPD_VPSR_START_OFFSET,r25
+       add r28=PAL_VPS_RESUME_NORMAL,r20
+       add r29=PAL_VPS_RESUME_HANDLER,r20
+       ;;
+       ld8 r19=[r19]
+       mov b0=r29
+       cmp.ne p6,p7 = r0,r0
+       ;;
+       tbit.z p6,p7 = r19,IA64_PSR_IC_BIT              // p1=vpsr.ic
+       ;;
+       (p6) ld8 r26=[r25]
+       (p7) mov b0=r28
+       mov pr=r31,-2
+       br.sptk.many b0             // call pal service
+       ;;
+END(kvm_resume_to_guest)
+
+
+MOV_TO_BANK0_REG(16)
+MOV_TO_BANK0_REG(17)
+MOV_TO_BANK0_REG(18)
+MOV_TO_BANK0_REG(19)
+MOV_TO_BANK0_REG(20)
+MOV_TO_BANK0_REG(21)
+MOV_TO_BANK0_REG(22)
+MOV_TO_BANK0_REG(23)
+MOV_TO_BANK0_REG(24)
+MOV_TO_BANK0_REG(25)
+MOV_TO_BANK0_REG(26)
+MOV_TO_BANK0_REG(27)
+MOV_TO_BANK0_REG(28)
+MOV_TO_BANK0_REG(29)
+MOV_TO_BANK0_REG(30)
+MOV_TO_BANK0_REG(31)
+
+
+// mov to reg table
+ENTRY(asm_mov_to_reg)
+       MOV_TO_REG0
+       MOV_TO_REG(1)
+       MOV_TO_REG(2)
+       MOV_TO_REG(3)
+       MOV_TO_REG(4)
+       MOV_TO_REG(5)
+       MOV_TO_REG(6)
+       MOV_TO_REG(7)
+       MOV_TO_REG(8)
+       MOV_TO_REG(9)
+       MOV_TO_REG(10)
+       MOV_TO_REG(11)
+       MOV_TO_REG(12)
+       MOV_TO_REG(13)
+       MOV_TO_REG(14)
+       MOV_TO_REG(15)
+       JMP_TO_MOV_TO_BANK0_REG(16)
+       JMP_TO_MOV_TO_BANK0_REG(17)
+       JMP_TO_MOV_TO_BANK0_REG(18)
+       JMP_TO_MOV_TO_BANK0_REG(19)
+       JMP_TO_MOV_TO_BANK0_REG(20)
+       JMP_TO_MOV_TO_BANK0_REG(21)
+       JMP_TO_MOV_TO_BANK0_REG(22)
+       JMP_TO_MOV_TO_BANK0_REG(23)
+       JMP_TO_MOV_TO_BANK0_REG(24)
+       JMP_TO_MOV_TO_BANK0_REG(25)
+       JMP_TO_MOV_TO_BANK0_REG(26)
+       JMP_TO_MOV_TO_BANK0_REG(27)
+       JMP_TO_MOV_TO_BANK0_REG(28)
+       JMP_TO_MOV_TO_BANK0_REG(29)
+       JMP_TO_MOV_TO_BANK0_REG(30)
+       JMP_TO_MOV_TO_BANK0_REG(31)
+       MOV_TO_REG(32)
+       MOV_TO_REG(33)
+       MOV_TO_REG(34)
+       MOV_TO_REG(35)
+       MOV_TO_REG(36)
+       MOV_TO_REG(37)
+       MOV_TO_REG(38)
+       MOV_TO_REG(39)
+       MOV_TO_REG(40)
+       MOV_TO_REG(41)
+       MOV_TO_REG(42)
+       MOV_TO_REG(43)
+       MOV_TO_REG(44)
+       MOV_TO_REG(45)
+       MOV_TO_REG(46)
+       MOV_TO_REG(47)
+       MOV_TO_REG(48)
+       MOV_TO_REG(49)
+       MOV_TO_REG(50)
+       MOV_TO_REG(51)
+       MOV_TO_REG(52)
+       MOV_TO_REG(53)
+       MOV_TO_REG(54)
+       MOV_TO_REG(55)
+       MOV_TO_REG(56)
+       MOV_TO_REG(57)
+       MOV_TO_REG(58)
+       MOV_TO_REG(59)
+       MOV_TO_REG(60)
+       MOV_TO_REG(61)
+       MOV_TO_REG(62)
+       MOV_TO_REG(63)
+       MOV_TO_REG(64)
+       MOV_TO_REG(65)
+       MOV_TO_REG(66)
+       MOV_TO_REG(67)
+       MOV_TO_REG(68)
+       MOV_TO_REG(69)
+       MOV_TO_REG(70)
+       MOV_TO_REG(71)
+       MOV_TO_REG(72)
+       MOV_TO_REG(73)
+       MOV_TO_REG(74)
+       MOV_TO_REG(75)
+       MOV_TO_REG(76)
+       MOV_TO_REG(77)
+       MOV_TO_REG(78)
+       MOV_TO_REG(79)
+       MOV_TO_REG(80)
+       MOV_TO_REG(81)
+       MOV_TO_REG(82)
+       MOV_TO_REG(83)
+       MOV_TO_REG(84)
+       MOV_TO_REG(85)
+       MOV_TO_REG(86)
+       MOV_TO_REG(87)
+       MOV_TO_REG(88)
+       MOV_TO_REG(89)
+       MOV_TO_REG(90)
+       MOV_TO_REG(91)
+       MOV_TO_REG(92)
+       MOV_TO_REG(93)
+       MOV_TO_REG(94)
+       MOV_TO_REG(95)
+       MOV_TO_REG(96)
+       MOV_TO_REG(97)
+       MOV_TO_REG(98)
+       MOV_TO_REG(99)
+       MOV_TO_REG(100)
+       MOV_TO_REG(101)
+       MOV_TO_REG(102)
+       MOV_TO_REG(103)
+       MOV_TO_REG(104)
+       MOV_TO_REG(105)
+       MOV_TO_REG(106)
+       MOV_TO_REG(107)
+       MOV_TO_REG(108)
+       MOV_TO_REG(109)
+       MOV_TO_REG(110)
+       MOV_TO_REG(111)
+       MOV_TO_REG(112)
+       MOV_TO_REG(113)
+       MOV_TO_REG(114)
+       MOV_TO_REG(115)
+       MOV_TO_REG(116)
+       MOV_TO_REG(117)
+       MOV_TO_REG(118)
+       MOV_TO_REG(119)
+       MOV_TO_REG(120)
+       MOV_TO_REG(121)
+       MOV_TO_REG(122)
+       MOV_TO_REG(123)
+       MOV_TO_REG(124)
+       MOV_TO_REG(125)
+       MOV_TO_REG(126)
+       MOV_TO_REG(127)
+END(asm_mov_to_reg)
diff --git a/arch/ia64/kvm/process.c b/arch/ia64/kvm/process.c
new file mode 100644 (file)
index 0000000..5a33f7e
--- /dev/null
@@ -0,0 +1,970 @@
+/*
+ * process.c: handle interruption inject for guests.
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ *     Shaofan Li (Susue Li) <susie.li@intel.com>
+ *     Xiaoyan Feng (Fleming Feng)  <fleming.feng@intel.com>
+ *     Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ *     Xiantao Zhang (xiantao.zhang@intel.com)
+ */
+#include "vcpu.h"
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+#include <asm/fpswa.h>
+#include <asm/kregs.h>
+#include <asm/tlb.h>
+
+fpswa_interface_t *vmm_fpswa_interface;
+
+#define IA64_VHPT_TRANS_VECTOR                 0x0000
+#define IA64_INST_TLB_VECTOR                   0x0400
+#define IA64_DATA_TLB_VECTOR                   0x0800
+#define IA64_ALT_INST_TLB_VECTOR               0x0c00
+#define IA64_ALT_DATA_TLB_VECTOR               0x1000
+#define IA64_DATA_NESTED_TLB_VECTOR            0x1400
+#define IA64_INST_KEY_MISS_VECTOR              0x1800
+#define IA64_DATA_KEY_MISS_VECTOR              0x1c00
+#define IA64_DIRTY_BIT_VECTOR                  0x2000
+#define IA64_INST_ACCESS_BIT_VECTOR            0x2400
+#define IA64_DATA_ACCESS_BIT_VECTOR            0x2800
+#define IA64_BREAK_VECTOR                      0x2c00
+#define IA64_EXTINT_VECTOR                     0x3000
+#define IA64_PAGE_NOT_PRESENT_VECTOR           0x5000
+#define IA64_KEY_PERMISSION_VECTOR             0x5100
+#define IA64_INST_ACCESS_RIGHTS_VECTOR         0x5200
+#define IA64_DATA_ACCESS_RIGHTS_VECTOR         0x5300
+#define IA64_GENEX_VECTOR                      0x5400
+#define IA64_DISABLED_FPREG_VECTOR             0x5500
+#define IA64_NAT_CONSUMPTION_VECTOR            0x5600
+#define IA64_SPECULATION_VECTOR                0x5700 /* UNUSED */
+#define IA64_DEBUG_VECTOR                      0x5900
+#define IA64_UNALIGNED_REF_VECTOR              0x5a00
+#define IA64_UNSUPPORTED_DATA_REF_VECTOR       0x5b00
+#define IA64_FP_FAULT_VECTOR                   0x5c00
+#define IA64_FP_TRAP_VECTOR                    0x5d00
+#define IA64_LOWERPRIV_TRANSFER_TRAP_VECTOR    0x5e00
+#define IA64_TAKEN_BRANCH_TRAP_VECTOR          0x5f00
+#define IA64_SINGLE_STEP_TRAP_VECTOR           0x6000
+
+/* SDM vol2 5.5 - IVA based interruption handling */
+#define INITIAL_PSR_VALUE_AT_INTERRUPTION (IA64_PSR_UP | IA64_PSR_MFL |\
+                       IA64_PSR_MFH | IA64_PSR_PK | IA64_PSR_DT |      \
+                       IA64_PSR_RT | IA64_PSR_MC|IA64_PSR_IT)
+
+#define DOMN_PAL_REQUEST    0x110000
+#define DOMN_SAL_REQUEST    0x110001
+
+static u64 vec2off[68] = {0x0, 0x400, 0x800, 0xc00, 0x1000, 0x1400, 0x1800,
+       0x1c00, 0x2000, 0x2400, 0x2800, 0x2c00, 0x3000, 0x3400, 0x3800, 0x3c00,
+       0x4000, 0x4400, 0x4800, 0x4c00, 0x5000, 0x5100, 0x5200, 0x5300, 0x5400,
+       0x5500, 0x5600, 0x5700, 0x5800, 0x5900, 0x5a00, 0x5b00, 0x5c00, 0x5d00,
+       0x5e00, 0x5f00, 0x6000, 0x6100, 0x6200, 0x6300, 0x6400, 0x6500, 0x6600,
+       0x6700, 0x6800, 0x6900, 0x6a00, 0x6b00, 0x6c00, 0x6d00, 0x6e00, 0x6f00,
+       0x7000, 0x7100, 0x7200, 0x7300, 0x7400, 0x7500, 0x7600, 0x7700, 0x7800,
+       0x7900, 0x7a00, 0x7b00, 0x7c00, 0x7d00, 0x7e00, 0x7f00
+};
+
+static void collect_interruption(struct kvm_vcpu *vcpu)
+{
+       u64 ipsr;
+       u64 vdcr;
+       u64 vifs;
+       unsigned long vpsr;
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       vpsr = vcpu_get_psr(vcpu);
+       vcpu_bsw0(vcpu);
+       if (vpsr & IA64_PSR_IC) {
+
+               /* Sync mpsr id/da/dd/ss/ed bits to vipsr
+                * since after guest do rfi, we still want these bits on in
+                * mpsr
+                */
+
+               ipsr = regs->cr_ipsr;
+               vpsr = vpsr | (ipsr & (IA64_PSR_ID | IA64_PSR_DA
+                                       | IA64_PSR_DD | IA64_PSR_SS
+                                       | IA64_PSR_ED));
+               vcpu_set_ipsr(vcpu, vpsr);
+
+               /* Currently, for trap, we do not advance IIP to next
+                * instruction. That's because we assume caller already
+                * set up IIP correctly
+                */
+
+               vcpu_set_iip(vcpu , regs->cr_iip);
+
+               /* set vifs.v to zero */
+               vifs = VCPU(vcpu, ifs);
+               vifs &= ~IA64_IFS_V;
+               vcpu_set_ifs(vcpu, vifs);
+
+               vcpu_set_iipa(vcpu, VMX(vcpu, cr_iipa));
+       }
+
+       vdcr = VCPU(vcpu, dcr);
+
+       /* Set guest psr
+        * up/mfl/mfh/pk/dt/rt/mc/it keeps unchanged
+        * be: set to the value of dcr.be
+        * pp: set to the value of dcr.pp
+        */
+       vpsr &= INITIAL_PSR_VALUE_AT_INTERRUPTION;
+       vpsr |= (vdcr & IA64_DCR_BE);
+
+       /* VDCR pp bit position is different from VPSR pp bit */
+       if (vdcr & IA64_DCR_PP) {
+               vpsr |= IA64_PSR_PP;
+       } else {
+               vpsr &= ~IA64_PSR_PP;;
+       }
+
+       vcpu_set_psr(vcpu, vpsr);
+
+}
+
+void inject_guest_interruption(struct kvm_vcpu *vcpu, u64 vec)
+{
+       u64 viva;
+       struct kvm_pt_regs *regs;
+       union ia64_isr pt_isr;
+
+       regs = vcpu_regs(vcpu);
+
+       /* clear cr.isr.ir (incomplete register frame)*/
+       pt_isr.val = VMX(vcpu, cr_isr);
+       pt_isr.ir = 0;
+       VMX(vcpu, cr_isr) = pt_isr.val;
+
+       collect_interruption(vcpu);
+
+       viva = vcpu_get_iva(vcpu);
+       regs->cr_iip = viva + vec;
+}
+
+static u64 vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, u64 ifa)
+{
+       union ia64_rr rr, rr1;
+
+       rr.val = vcpu_get_rr(vcpu, ifa);
+       rr1.val = 0;
+       rr1.ps = rr.ps;
+       rr1.rid = rr.rid;
+       return (rr1.val);
+}
+
+
+/*
+ * Set vIFA & vITIR & vIHA, when vPSR.ic =1
+ * Parameter:
+ *  set_ifa: if true, set vIFA
+ *  set_itir: if true, set vITIR
+ *  set_iha: if true, set vIHA
+ */
+void set_ifa_itir_iha(struct kvm_vcpu *vcpu, u64 vadr,
+               int set_ifa, int set_itir, int set_iha)
+{
+       long vpsr;
+       u64 value;
+
+       vpsr = VCPU(vcpu, vpsr);
+       /* Vol2, Table 8-1 */
+       if (vpsr & IA64_PSR_IC) {
+               if (set_ifa)
+                       vcpu_set_ifa(vcpu, vadr);
+               if (set_itir) {
+                       value = vcpu_get_itir_on_fault(vcpu, vadr);
+                       vcpu_set_itir(vcpu, value);
+               }
+
+               if (set_iha) {
+                       value = vcpu_thash(vcpu, vadr);
+                       vcpu_set_iha(vcpu, value);
+               }
+       }
+}
+
+/*
+ * Data TLB Fault
+ *  @ Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dtlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       /* If vPSR.ic, IFA, ITIR, IHA */
+       set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+       inject_guest_interruption(vcpu, IA64_DATA_TLB_VECTOR);
+}
+
+/*
+ * Instruction TLB Fault
+ *  @ Instruction TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void itlb_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       /* If vPSR.ic, IFA, ITIR, IHA */
+       set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+       inject_guest_interruption(vcpu, IA64_INST_TLB_VECTOR);
+}
+
+
+
+/*
+ * Data Nested TLB Fault
+ *  @ Data Nested TLB Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void nested_dtlb(struct kvm_vcpu *vcpu)
+{
+       inject_guest_interruption(vcpu, IA64_DATA_NESTED_TLB_VECTOR);
+}
+
+/*
+ * Alternate Data TLB Fault
+ *  @ Alternate Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+       inject_guest_interruption(vcpu, IA64_ALT_DATA_TLB_VECTOR);
+}
+
+
+/*
+ * Data TLB Fault
+ *  @ Data TLB vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void alt_itlb(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+       inject_guest_interruption(vcpu, IA64_ALT_INST_TLB_VECTOR);
+}
+
+/* Deal with:
+ *  VHPT Translation Vector
+ */
+static void _vhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       /* If vPSR.ic, IFA, ITIR, IHA*/
+       set_ifa_itir_iha(vcpu, vadr, 1, 1, 1);
+       inject_guest_interruption(vcpu, IA64_VHPT_TRANS_VECTOR);
+
+
+}
+
+/*
+ * VHPT Instruction Fault
+ *  @ VHPT Translation vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void ivhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       _vhpt_fault(vcpu, vadr);
+}
+
+
+/*
+ * VHPT Data Fault
+ *  @ VHPT Translation vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       _vhpt_fault(vcpu, vadr);
+}
+
+
+
+/*
+ * Deal with:
+ *  General Exception vector
+ */
+void _general_exception(struct kvm_vcpu *vcpu)
+{
+       inject_guest_interruption(vcpu, IA64_GENEX_VECTOR);
+}
+
+
+/*
+ * Illegal Operation Fault
+ *  @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void illegal_op(struct kvm_vcpu *vcpu)
+{
+       _general_exception(vcpu);
+}
+
+/*
+ * Illegal Dependency Fault
+ *  @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void illegal_dep(struct kvm_vcpu *vcpu)
+{
+       _general_exception(vcpu);
+}
+
+/*
+ * Reserved Register/Field Fault
+ *  @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void rsv_reg_field(struct kvm_vcpu *vcpu)
+{
+       _general_exception(vcpu);
+}
+/*
+ * Privileged Operation Fault
+ *  @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+
+void privilege_op(struct kvm_vcpu *vcpu)
+{
+       _general_exception(vcpu);
+}
+
+/*
+ * Unimplement Data Address Fault
+ *  @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void unimpl_daddr(struct kvm_vcpu *vcpu)
+{
+       _general_exception(vcpu);
+}
+
+/*
+ * Privileged Register Fault
+ *  @ General Exception Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void privilege_reg(struct kvm_vcpu *vcpu)
+{
+       _general_exception(vcpu);
+}
+
+/* Deal with
+ *  Nat consumption vector
+ * Parameter:
+ *  vaddr: Optional, if t == REGISTER
+ */
+static void _nat_consumption_fault(struct kvm_vcpu *vcpu, u64 vadr,
+                                               enum tlb_miss_type t)
+{
+       /* If vPSR.ic && t == DATA/INST, IFA */
+       if (t == DATA || t == INSTRUCTION) {
+               /* IFA */
+               set_ifa_itir_iha(vcpu, vadr, 1, 0, 0);
+       }
+
+       inject_guest_interruption(vcpu, IA64_NAT_CONSUMPTION_VECTOR);
+}
+
+/*
+ * Instruction Nat Page Consumption Fault
+ *  @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void inat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       _nat_consumption_fault(vcpu, vadr, INSTRUCTION);
+}
+
+/*
+ * Register Nat Consumption Fault
+ *  @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void rnat_consumption(struct kvm_vcpu *vcpu)
+{
+       _nat_consumption_fault(vcpu, 0, REGISTER);
+}
+
+/*
+ * Data Nat Page Consumption Fault
+ *  @ Nat Consumption Vector
+ * Refer to SDM Vol2 Table 5-6 & 8-1
+ */
+void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       _nat_consumption_fault(vcpu, vadr, DATA);
+}
+
+/* Deal with
+ *  Page not present vector
+ */
+static void __page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       /* If vPSR.ic, IFA, ITIR */
+       set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+       inject_guest_interruption(vcpu, IA64_PAGE_NOT_PRESENT_VECTOR);
+}
+
+
+void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       __page_not_present(vcpu, vadr);
+}
+
+
+void inst_page_not_present(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       __page_not_present(vcpu, vadr);
+}
+
+
+/* Deal with
+ *  Data access rights vector
+ */
+void data_access_rights(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       /* If vPSR.ic, IFA, ITIR */
+       set_ifa_itir_iha(vcpu, vadr, 1, 1, 0);
+       inject_guest_interruption(vcpu, IA64_DATA_ACCESS_RIGHTS_VECTOR);
+}
+
+fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
+               unsigned long *fpsr, unsigned long *isr, unsigned long *pr,
+               unsigned long *ifs, struct kvm_pt_regs *regs)
+{
+       fp_state_t fp_state;
+       fpswa_ret_t ret;
+       struct kvm_vcpu *vcpu = current_vcpu;
+
+       uint64_t old_rr7 = ia64_get_rr(7UL<<61);
+
+       if (!vmm_fpswa_interface)
+               return (fpswa_ret_t) {-1, 0, 0, 0};
+
+       /*
+        * Just let fpswa driver to use hardware fp registers.
+        * No fp register is valid in memory.
+        */
+       memset(&fp_state, 0, sizeof(fp_state_t));
+
+       /*
+        * unsigned long (*EFI_FPSWA) (
+        *      unsigned long    trap_type,
+        *      void             *Bundle,
+        *      unsigned long    *pipsr,
+        *      unsigned long    *pfsr,
+        *      unsigned long    *pisr,
+        *      unsigned long    *ppreds,
+        *      unsigned long    *pifs,
+        *      void             *fp_state);
+        */
+       /*Call host fpswa interface directly to virtualize
+        *guest fpswa request!
+        */
+       ia64_set_rr(7UL << 61, vcpu->arch.host.rr[7]);
+       ia64_srlz_d();
+
+       ret = (*vmm_fpswa_interface->fpswa) (fp_fault, bundle,
+                       ipsr, fpsr, isr, pr, ifs, &fp_state);
+       ia64_set_rr(7UL << 61, old_rr7);
+       ia64_srlz_d();
+       return ret;
+}
+
+/*
+ * Handle floating-point assist faults and traps for domain.
+ */
+unsigned long vmm_handle_fpu_swa(int fp_fault, struct kvm_pt_regs *regs,
+                                       unsigned long isr)
+{
+       struct kvm_vcpu *v = current_vcpu;
+       IA64_BUNDLE bundle;
+       unsigned long fault_ip;
+       fpswa_ret_t ret;
+
+       fault_ip = regs->cr_iip;
+       /*
+        * When the FP trap occurs, the trapping instruction is completed.
+        * If ipsr.ri == 0, there is the trapping instruction in previous
+        * bundle.
+        */
+       if (!fp_fault && (ia64_psr(regs)->ri == 0))
+               fault_ip -= 16;
+
+       if (fetch_code(v, fault_ip, &bundle))
+               return -EAGAIN;
+
+       if (!bundle.i64[0] && !bundle.i64[1])
+               return -EACCES;
+
+       ret = vmm_fp_emulate(fp_fault, &bundle, &regs->cr_ipsr, &regs->ar_fpsr,
+                       &isr, &regs->pr, &regs->cr_ifs, regs);
+       return ret.status;
+}
+
+void reflect_interruption(u64 ifa, u64 isr, u64 iim,
+               u64 vec, struct kvm_pt_regs *regs)
+{
+       u64 vector;
+       int status ;
+       struct kvm_vcpu *vcpu = current_vcpu;
+       u64 vpsr = VCPU(vcpu, vpsr);
+
+       vector = vec2off[vec];
+
+       if (!(vpsr & IA64_PSR_IC) && (vector != IA64_DATA_NESTED_TLB_VECTOR)) {
+               panic_vm(vcpu);
+               return;
+       }
+
+       switch (vec) {
+       case 32:        /*IA64_FP_FAULT_VECTOR*/
+               status = vmm_handle_fpu_swa(1, regs, isr);
+               if (!status) {
+                       vcpu_increment_iip(vcpu);
+                       return;
+               } else if (-EAGAIN == status)
+                       return;
+               break;
+       case 33:        /*IA64_FP_TRAP_VECTOR*/
+               status = vmm_handle_fpu_swa(0, regs, isr);
+               if (!status)
+                       return ;
+               else if (-EAGAIN == status) {
+                       vcpu_decrement_iip(vcpu);
+                       return ;
+               }
+               break;
+       }
+
+       VCPU(vcpu, isr) = isr;
+       VCPU(vcpu, iipa) = regs->cr_iip;
+       if (vector == IA64_BREAK_VECTOR || vector == IA64_SPECULATION_VECTOR)
+               VCPU(vcpu, iim) = iim;
+       else
+               set_ifa_itir_iha(vcpu, ifa, 1, 1, 1);
+
+       inject_guest_interruption(vcpu, vector);
+}
+
+static void set_pal_call_data(struct kvm_vcpu *vcpu)
+{
+       struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+       /*FIXME:For static and stacked convention, firmware
+        * has put the parameters in gr28-gr31 before
+        * break to vmm  !!*/
+
+       p->u.pal_data.gr28 = vcpu_get_gr(vcpu, 28);
+       p->u.pal_data.gr29 = vcpu_get_gr(vcpu, 29);
+       p->u.pal_data.gr30 = vcpu_get_gr(vcpu, 30);
+       p->u.pal_data.gr31 = vcpu_get_gr(vcpu, 31);
+       p->exit_reason = EXIT_REASON_PAL_CALL;
+}
+
+static void set_pal_call_result(struct kvm_vcpu *vcpu)
+{
+       struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+       if (p->exit_reason == EXIT_REASON_PAL_CALL) {
+               vcpu_set_gr(vcpu, 8, p->u.pal_data.ret.status, 0);
+               vcpu_set_gr(vcpu, 9, p->u.pal_data.ret.v0, 0);
+               vcpu_set_gr(vcpu, 10, p->u.pal_data.ret.v1, 0);
+               vcpu_set_gr(vcpu, 11, p->u.pal_data.ret.v2, 0);
+       } else
+               panic_vm(vcpu);
+}
+
+static void set_sal_call_data(struct kvm_vcpu *vcpu)
+{
+       struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+       p->u.sal_data.in0 = vcpu_get_gr(vcpu, 32);
+       p->u.sal_data.in1 = vcpu_get_gr(vcpu, 33);
+       p->u.sal_data.in2 = vcpu_get_gr(vcpu, 34);
+       p->u.sal_data.in3 = vcpu_get_gr(vcpu, 35);
+       p->u.sal_data.in4 = vcpu_get_gr(vcpu, 36);
+       p->u.sal_data.in5 = vcpu_get_gr(vcpu, 37);
+       p->u.sal_data.in6 = vcpu_get_gr(vcpu, 38);
+       p->u.sal_data.in7 = vcpu_get_gr(vcpu, 39);
+       p->exit_reason = EXIT_REASON_SAL_CALL;
+}
+
+static void set_sal_call_result(struct kvm_vcpu *vcpu)
+{
+       struct exit_ctl_data *p = &vcpu->arch.exit_data;
+
+       if (p->exit_reason == EXIT_REASON_SAL_CALL) {
+               vcpu_set_gr(vcpu, 8, p->u.sal_data.ret.r8, 0);
+               vcpu_set_gr(vcpu, 9, p->u.sal_data.ret.r9, 0);
+               vcpu_set_gr(vcpu, 10, p->u.sal_data.ret.r10, 0);
+               vcpu_set_gr(vcpu, 11, p->u.sal_data.ret.r11, 0);
+       } else
+               panic_vm(vcpu);
+}
+
+void  kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
+               unsigned long isr, unsigned long iim)
+{
+       struct kvm_vcpu *v = current_vcpu;
+
+       if (ia64_psr(regs)->cpl == 0) {
+               /* Allow hypercalls only when cpl = 0.  */
+               if (iim == DOMN_PAL_REQUEST) {
+                       set_pal_call_data(v);
+                       vmm_transition(v);
+                       set_pal_call_result(v);
+                       vcpu_increment_iip(v);
+                       return;
+               } else if (iim == DOMN_SAL_REQUEST) {
+                       set_sal_call_data(v);
+                       vmm_transition(v);
+                       set_sal_call_result(v);
+                       vcpu_increment_iip(v);
+                       return;
+               }
+       }
+       reflect_interruption(ifa, isr, iim, 11, regs);
+}
+
+void check_pending_irq(struct kvm_vcpu *vcpu)
+{
+       int  mask, h_pending, h_inservice;
+       u64 isr;
+       unsigned long  vpsr;
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       h_pending = highest_pending_irq(vcpu);
+       if (h_pending == NULL_VECTOR) {
+               update_vhpi(vcpu, NULL_VECTOR);
+               return;
+       }
+       h_inservice = highest_inservice_irq(vcpu);
+
+       vpsr = VCPU(vcpu, vpsr);
+       mask = irq_masked(vcpu, h_pending, h_inservice);
+       if ((vpsr & IA64_PSR_I) && IRQ_NO_MASKED == mask) {
+               isr = vpsr & IA64_PSR_RI;
+               update_vhpi(vcpu, h_pending);
+               reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
+       } else if (mask == IRQ_MASKED_BY_INSVC) {
+               if (VCPU(vcpu, vhpi))
+                       update_vhpi(vcpu, NULL_VECTOR);
+       } else {
+               /* masked by vpsr.i or vtpr.*/
+               update_vhpi(vcpu, h_pending);
+       }
+}
+
+static void generate_exirq(struct kvm_vcpu *vcpu)
+{
+       unsigned  vpsr;
+       uint64_t isr;
+
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       vpsr = VCPU(vcpu, vpsr);
+       isr = vpsr & IA64_PSR_RI;
+       if (!(vpsr & IA64_PSR_IC))
+               panic_vm(vcpu);
+       reflect_interruption(0, isr, 0, 12, regs); /* EXT IRQ */
+}
+
+void vhpi_detection(struct kvm_vcpu *vcpu)
+{
+       uint64_t    threshold, vhpi;
+       union ia64_tpr       vtpr;
+       struct ia64_psr vpsr;
+
+       vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+       vtpr.val = VCPU(vcpu, tpr);
+
+       threshold = ((!vpsr.i) << 5) | (vtpr.mmi << 4) | vtpr.mic;
+       vhpi = VCPU(vcpu, vhpi);
+       if (vhpi > threshold) {
+               /* interrupt actived*/
+               generate_exirq(vcpu);
+       }
+}
+
+
+void leave_hypervisor_tail(void)
+{
+       struct kvm_vcpu *v = current_vcpu;
+
+       if (VMX(v, timer_check)) {
+               VMX(v, timer_check) = 0;
+               if (VMX(v, itc_check)) {
+                       if (vcpu_get_itc(v) > VCPU(v, itm)) {
+                               if (!(VCPU(v, itv) & (1 << 16))) {
+                                       vcpu_pend_interrupt(v, VCPU(v, itv)
+                                                       & 0xff);
+                               VMX(v, itc_check) = 0;
+                               } else {
+                                       v->arch.timer_pending = 1;
+                               }
+                               VMX(v, last_itc) = VCPU(v, itm) + 1;
+                       }
+               }
+       }
+
+       rmb();
+       if (v->arch.irq_new_pending) {
+               v->arch.irq_new_pending = 0;
+               VMX(v, irq_check) = 0;
+               check_pending_irq(v);
+               return;
+       }
+       if (VMX(v, irq_check)) {
+               VMX(v, irq_check) = 0;
+               vhpi_detection(v);
+       }
+}
+
+
+static inline void handle_lds(struct kvm_pt_regs *regs)
+{
+       regs->cr_ipsr |= IA64_PSR_ED;
+}
+
+void physical_tlb_miss(struct kvm_vcpu *vcpu, unsigned long vadr, int type)
+{
+       unsigned long pte;
+       union ia64_rr rr;
+
+       rr.val = ia64_get_rr(vadr);
+       pte =  vadr & _PAGE_PPN_MASK;
+       pte = pte | PHY_PAGE_WB;
+       thash_vhpt_insert(vcpu, pte, (u64)(rr.ps << 2), vadr, type);
+       return;
+}
+
+void kvm_page_fault(u64 vadr , u64 vec, struct kvm_pt_regs *regs)
+{
+       unsigned long vpsr;
+       int type;
+
+       u64 vhpt_adr, gppa, pteval, rr, itir;
+       union ia64_isr misr;
+       union ia64_pta vpta;
+       struct thash_data *data;
+       struct kvm_vcpu *v = current_vcpu;
+
+       vpsr = VCPU(v, vpsr);
+       misr.val = VMX(v, cr_isr);
+
+       type = vec;
+
+       if (is_physical_mode(v) && (!(vadr << 1 >> 62))) {
+               if (vec == 2) {
+                       if (__gpfn_is_io((vadr << 1) >> (PAGE_SHIFT + 1))) {
+                               emulate_io_inst(v, ((vadr << 1) >> 1), 4);
+                               return;
+                       }
+               }
+               physical_tlb_miss(v, vadr, type);
+               return;
+       }
+       data = vtlb_lookup(v, vadr, type);
+       if (data != 0) {
+               if (type == D_TLB) {
+                       gppa = (vadr & ((1UL << data->ps) - 1))
+                               + (data->ppn >> (data->ps - 12) << data->ps);
+                       if (__gpfn_is_io(gppa >> PAGE_SHIFT)) {
+                               if (data->pl >= ((regs->cr_ipsr >>
+                                               IA64_PSR_CPL0_BIT) & 3))
+                                       emulate_io_inst(v, gppa, data->ma);
+                               else {
+                                       vcpu_set_isr(v, misr.val);
+                                       data_access_rights(v, vadr);
+                               }
+                               return ;
+                       }
+               }
+               thash_vhpt_insert(v, data->page_flags, data->itir, vadr, type);
+
+       } else if (type == D_TLB) {
+               if (misr.sp) {
+                       handle_lds(regs);
+                       return;
+               }
+
+               rr = vcpu_get_rr(v, vadr);
+               itir = rr & (RR_RID_MASK | RR_PS_MASK);
+
+               if (!vhpt_enabled(v, vadr, misr.rs ? RSE_REF : DATA_REF)) {
+                       if (vpsr & IA64_PSR_IC) {
+                               vcpu_set_isr(v, misr.val);
+                               alt_dtlb(v, vadr);
+                       } else {
+                               nested_dtlb(v);
+                       }
+                       return ;
+               }
+
+               vpta.val = vcpu_get_pta(v);
+               /* avoid recursively walking (short format) VHPT */
+
+               vhpt_adr = vcpu_thash(v, vadr);
+               if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
+                       /* VHPT successfully read.  */
+                       if (!(pteval & _PAGE_P)) {
+                               if (vpsr & IA64_PSR_IC) {
+                                       vcpu_set_isr(v, misr.val);
+                                       dtlb_fault(v, vadr);
+                               } else {
+                                       nested_dtlb(v);
+                               }
+                       } else if ((pteval & _PAGE_MA_MASK) != _PAGE_MA_ST) {
+                               thash_purge_and_insert(v, pteval, itir,
+                                                               vadr, D_TLB);
+                       } else if (vpsr & IA64_PSR_IC) {
+                               vcpu_set_isr(v, misr.val);
+                               dtlb_fault(v, vadr);
+                       } else {
+                               nested_dtlb(v);
+                       }
+               } else {
+                       /* Can't read VHPT.  */
+                       if (vpsr & IA64_PSR_IC) {
+                               vcpu_set_isr(v, misr.val);
+                               dvhpt_fault(v, vadr);
+                       } else {
+                               nested_dtlb(v);
+                       }
+               }
+       } else if (type == I_TLB) {
+               if (!(vpsr & IA64_PSR_IC))
+                       misr.ni = 1;
+               if (!vhpt_enabled(v, vadr, INST_REF)) {
+                       vcpu_set_isr(v, misr.val);
+                       alt_itlb(v, vadr);
+                       return;
+               }
+
+               vpta.val = vcpu_get_pta(v);
+
+               vhpt_adr = vcpu_thash(v, vadr);
+               if (!guest_vhpt_lookup(vhpt_adr, &pteval)) {
+                       /* VHPT successfully read.  */
+                       if (pteval & _PAGE_P) {
+                               if ((pteval & _PAGE_MA_MASK) == _PAGE_MA_ST) {
+                                       vcpu_set_isr(v, misr.val);
+                                       itlb_fault(v, vadr);
+                                       return ;
+                               }
+                               rr = vcpu_get_rr(v, vadr);
+                               itir = rr & (RR_RID_MASK | RR_PS_MASK);
+                               thash_purge_and_insert(v, pteval, itir,
+                                                       vadr, I_TLB);
+                       } else {
+                               vcpu_set_isr(v, misr.val);
+                               inst_page_not_present(v, vadr);
+                       }
+               } else {
+                       vcpu_set_isr(v, misr.val);
+                       ivhpt_fault(v, vadr);
+               }
+       }
+}
+
+void kvm_vexirq(struct kvm_vcpu *vcpu)
+{
+       u64 vpsr, isr;
+       struct kvm_pt_regs *regs;
+
+       regs = vcpu_regs(vcpu);
+       vpsr = VCPU(vcpu, vpsr);
+       isr = vpsr & IA64_PSR_RI;
+       reflect_interruption(0, isr, 0, 12, regs); /*EXT IRQ*/
+}
+
+void kvm_ia64_handle_irq(struct kvm_vcpu *v)
+{
+       struct exit_ctl_data *p = &v->arch.exit_data;
+       long psr;
+
+       local_irq_save(psr);
+       p->exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT;
+       vmm_transition(v);
+       local_irq_restore(psr);
+
+       VMX(v, timer_check) = 1;
+
+}
+
+static void ptc_ga_remote_func(struct kvm_vcpu *v, int pos)
+{
+       u64 oldrid, moldrid, oldpsbits, vaddr;
+       struct kvm_ptc_g *p = &v->arch.ptc_g_data[pos];
+       vaddr = p->vaddr;
+
+       oldrid = VMX(v, vrr[0]);
+       VMX(v, vrr[0]) = p->rr;
+       oldpsbits = VMX(v, psbits[0]);
+       VMX(v, psbits[0]) = VMX(v, psbits[REGION_NUMBER(vaddr)]);
+       moldrid = ia64_get_rr(0x0);
+       ia64_set_rr(0x0, vrrtomrr(p->rr));
+       ia64_srlz_d();
+
+       vaddr = PAGEALIGN(vaddr, p->ps);
+       thash_purge_entries_remote(v, vaddr, p->ps);
+
+       VMX(v, vrr[0]) = oldrid;
+       VMX(v, psbits[0]) = oldpsbits;
+       ia64_set_rr(0x0, moldrid);
+       ia64_dv_serialize_data();
+}
+
+static void vcpu_do_resume(struct kvm_vcpu *vcpu)
+{
+       /*Re-init VHPT and VTLB once from resume*/
+       vcpu->arch.vhpt.num = VHPT_NUM_ENTRIES;
+       thash_init(&vcpu->arch.vhpt, VHPT_SHIFT);
+       vcpu->arch.vtlb.num = VTLB_NUM_ENTRIES;
+       thash_init(&vcpu->arch.vtlb, VTLB_SHIFT);
+
+       ia64_set_pta(vcpu->arch.vhpt.pta.val);
+}
+
+static void kvm_do_resume_op(struct kvm_vcpu *vcpu)
+{
+       if (test_and_clear_bit(KVM_REQ_RESUME, &vcpu->requests)) {
+               vcpu_do_resume(vcpu);
+               return;
+       }
+
+       if (unlikely(test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))) {
+               thash_purge_all(vcpu);
+               return;
+       }
+
+       if (test_and_clear_bit(KVM_REQ_PTC_G, &vcpu->requests)) {
+               while (vcpu->arch.ptc_g_count > 0)
+                       ptc_ga_remote_func(vcpu, --vcpu->arch.ptc_g_count);
+       }
+}
+
+void vmm_transition(struct kvm_vcpu *vcpu)
+{
+       ia64_call_vsa(PAL_VPS_SAVE, (unsigned long)vcpu->arch.vpd,
+                       0, 0, 0, 0, 0, 0);
+       vmm_trampoline(&vcpu->arch.guest, &vcpu->arch.host);
+       ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)vcpu->arch.vpd,
+                                               0, 0, 0, 0, 0, 0);
+       kvm_do_resume_op(vcpu);
+}
diff --git a/arch/ia64/kvm/trampoline.S b/arch/ia64/kvm/trampoline.S
new file mode 100644 (file)
index 0000000..30897d4
--- /dev/null
@@ -0,0 +1,1038 @@
+/* Save all processor states
+ *
+ * Copyright (c) 2007 Fleming Feng <fleming.feng@intel.com>
+ * Copyright (c) 2007 Anthony Xu   <anthony.xu@intel.com>
+ */
+
+#include <asm/asmmacro.h>
+#include "asm-offsets.h"
+
+
+#define CTX(name)    VMM_CTX_##name##_OFFSET
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_BRANCH_REGS                        \
+       add     r2 = CTX(B0),r32;               \
+       add     r3 = CTX(B1),r32;               \
+       mov     r16 = b0;                       \
+       mov     r17 = b1;                       \
+       ;;                                      \
+       st8     [r2]=r16,16;                    \
+       st8     [r3]=r17,16;                    \
+       ;;                                      \
+       mov     r16 = b2;                       \
+       mov     r17 = b3;                       \
+       ;;                                      \
+       st8     [r2]=r16,16;                    \
+       st8     [r3]=r17,16;                    \
+       ;;                                      \
+       mov     r16 = b4;                       \
+       mov     r17 = b5;                       \
+       ;;                                      \
+       st8     [r2]=r16;                       \
+       st8     [r3]=r17;                       \
+       ;;
+
+       /*
+        *      r33:            context_t base address
+        */
+#define        RESTORE_BRANCH_REGS                     \
+       add     r2 = CTX(B0),r33;               \
+       add     r3 = CTX(B1),r33;               \
+       ;;                                      \
+       ld8     r16=[r2],16;                    \
+       ld8     r17=[r3],16;                    \
+       ;;                                      \
+       mov     b0 = r16;                       \
+       mov     b1 = r17;                       \
+       ;;                                      \
+       ld8     r16=[r2],16;                    \
+       ld8     r17=[r3],16;                    \
+       ;;                                      \
+       mov     b2 = r16;                       \
+       mov     b3 = r17;                       \
+       ;;                                      \
+       ld8     r16=[r2];                       \
+       ld8     r17=[r3];                       \
+       ;;                                      \
+       mov     b4=r16;                         \
+       mov     b5=r17;                         \
+       ;;
+
+
+       /*
+        *      r32: context_t base address
+        *      bsw == 1
+        *      Save all bank1 general registers, r4 ~ r7
+        */
+#define        SAVE_GENERAL_REGS                       \
+       add     r2=CTX(R4),r32;                 \
+       add     r3=CTX(R5),r32;                 \
+       ;;                                      \
+.mem.offset 0,0;                               \
+       st8.spill       [r2]=r4,16;             \
+.mem.offset 8,0;                               \
+       st8.spill       [r3]=r5,16;             \
+       ;;                                      \
+.mem.offset 0,0;                               \
+       st8.spill       [r2]=r6,48;             \
+.mem.offset 8,0;                               \
+       st8.spill       [r3]=r7,48;             \
+       ;;                                      \
+.mem.offset 0,0;                               \
+    st8.spill    [r2]=r12;                     \
+.mem.offset 8,0;                               \
+    st8.spill    [r3]=r13;                     \
+    ;;
+
+       /*
+        *      r33: context_t base address
+        *      bsw == 1
+        */
+#define        RESTORE_GENERAL_REGS                    \
+       add     r2=CTX(R4),r33;                 \
+       add     r3=CTX(R5),r33;                 \
+       ;;                                      \
+       ld8.fill        r4=[r2],16;             \
+       ld8.fill        r5=[r3],16;             \
+       ;;                                      \
+       ld8.fill        r6=[r2],48;             \
+       ld8.fill        r7=[r3],48;             \
+       ;;                                      \
+       ld8.fill    r12=[r2];                   \
+       ld8.fill    r13 =[r3];                  \
+       ;;
+
+
+
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_KERNEL_REGS                        \
+       add     r2 = CTX(KR0),r32;              \
+       add     r3 = CTX(KR1),r32;              \
+       mov     r16 = ar.k0;                    \
+       mov     r17 = ar.k1;                    \
+       ;;                                      \
+       st8     [r2] = r16,16;                  \
+       st8     [r3] = r17,16;                  \
+       ;;                                      \
+       mov     r16 = ar.k2;                    \
+       mov     r17 = ar.k3;                    \
+       ;;                                      \
+       st8     [r2] = r16,16;                  \
+       st8     [r3] = r17,16;                  \
+       ;;                                      \
+       mov     r16 = ar.k4;                    \
+       mov     r17 = ar.k5;                    \
+       ;;                                      \
+       st8     [r2] = r16,16;                  \
+       st8     [r3] = r17,16;                  \
+       ;;                                      \
+       mov     r16 = ar.k6;                    \
+       mov     r17 = ar.k7;                    \
+       ;;                                      \
+       st8     [r2] = r16;                     \
+       st8     [r3] = r17;                     \
+       ;;
+
+
+
+       /*
+        *      r33:            context_t base address
+        */
+#define        RESTORE_KERNEL_REGS                     \
+       add     r2 = CTX(KR0),r33;              \
+       add     r3 = CTX(KR1),r33;              \
+       ;;                                      \
+       ld8     r16=[r2],16;                    \
+       ld8     r17=[r3],16;                    \
+       ;;                                      \
+       mov     ar.k0=r16;                      \
+       mov     ar.k1=r17;                      \
+       ;;                                      \
+       ld8     r16=[r2],16;                    \
+       ld8     r17=[r3],16;                    \
+       ;;                                      \
+       mov     ar.k2=r16;                      \
+       mov     ar.k3=r17;                      \
+       ;;                                      \
+       ld8     r16=[r2],16;                    \
+       ld8     r17=[r3],16;                    \
+       ;;                                      \
+       mov     ar.k4=r16;                      \
+       mov     ar.k5=r17;                      \
+       ;;                                      \
+       ld8     r16=[r2],16;                    \
+       ld8     r17=[r3],16;                    \
+       ;;                                      \
+       mov     ar.k6=r16;                      \
+       mov     ar.k7=r17;                      \
+       ;;
+
+
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_APP_REGS                           \
+       add  r2 = CTX(BSPSTORE),r32;            \
+       mov  r16 = ar.bspstore;                 \
+       ;;                                      \
+       st8  [r2] = r16,CTX(RNAT)-CTX(BSPSTORE);\
+       mov  r16 = ar.rnat;                     \
+       ;;                                      \
+       st8  [r2] = r16,CTX(FCR)-CTX(RNAT);     \
+       mov  r16 = ar.fcr;                      \
+       ;;                                      \
+       st8  [r2] = r16,CTX(EFLAG)-CTX(FCR);    \
+       mov  r16 = ar.eflag;                    \
+       ;;                                      \
+       st8  [r2] = r16,CTX(CFLG)-CTX(EFLAG);   \
+       mov  r16 = ar.cflg;                     \
+       ;;                                      \
+       st8  [r2] = r16,CTX(FSR)-CTX(CFLG);     \
+       mov  r16 = ar.fsr;                      \
+       ;;                                      \
+       st8  [r2] = r16,CTX(FIR)-CTX(FSR);      \
+       mov  r16 = ar.fir;                      \
+       ;;                                      \
+       st8  [r2] = r16,CTX(FDR)-CTX(FIR);      \
+       mov  r16 = ar.fdr;                      \
+       ;;                                      \
+       st8  [r2] = r16,CTX(UNAT)-CTX(FDR);     \
+       mov  r16 = ar.unat;                     \
+       ;;                                      \
+       st8  [r2] = r16,CTX(FPSR)-CTX(UNAT);    \
+       mov  r16 = ar.fpsr;                     \
+       ;;                                      \
+       st8  [r2] = r16,CTX(PFS)-CTX(FPSR);     \
+       mov  r16 = ar.pfs;                      \
+       ;;                                      \
+       st8  [r2] = r16,CTX(LC)-CTX(PFS);       \
+       mov  r16 = ar.lc;                       \
+       ;;                                      \
+       st8  [r2] = r16;                        \
+       ;;
+
+       /*
+        *      r33:            context_t base address
+        */
+#define        RESTORE_APP_REGS                        \
+       add  r2=CTX(BSPSTORE),r33;              \
+       ;;                                      \
+       ld8  r16=[r2],CTX(RNAT)-CTX(BSPSTORE);  \
+       ;;                                      \
+       mov  ar.bspstore=r16;                   \
+       ld8  r16=[r2],CTX(FCR)-CTX(RNAT);       \
+       ;;                                      \
+       mov  ar.rnat=r16;                       \
+       ld8  r16=[r2],CTX(EFLAG)-CTX(FCR);      \
+       ;;                                      \
+       mov  ar.fcr=r16;                        \
+       ld8  r16=[r2],CTX(CFLG)-CTX(EFLAG);     \
+       ;;                                      \
+       mov  ar.eflag=r16;                      \
+       ld8  r16=[r2],CTX(FSR)-CTX(CFLG);       \
+       ;;                                      \
+       mov  ar.cflg=r16;                       \
+       ld8  r16=[r2],CTX(FIR)-CTX(FSR);        \
+       ;;                                      \
+       mov  ar.fsr=r16;                        \
+       ld8  r16=[r2],CTX(FDR)-CTX(FIR);        \
+       ;;                                      \
+       mov  ar.fir=r16;                        \
+       ld8  r16=[r2],CTX(UNAT)-CTX(FDR);       \
+       ;;                                      \
+       mov  ar.fdr=r16;                        \
+       ld8  r16=[r2],CTX(FPSR)-CTX(UNAT);      \
+       ;;                                      \
+       mov  ar.unat=r16;                       \
+       ld8  r16=[r2],CTX(PFS)-CTX(FPSR);       \
+       ;;                                      \
+       mov  ar.fpsr=r16;                       \
+       ld8  r16=[r2],CTX(LC)-CTX(PFS);         \
+       ;;                                      \
+       mov  ar.pfs=r16;                        \
+       ld8  r16=[r2];                          \
+       ;;                                      \
+       mov  ar.lc=r16;                         \
+       ;;
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_CTL_REGS                           \
+       add     r2 = CTX(DCR),r32;              \
+       mov     r16 = cr.dcr;                   \
+       ;;                                      \
+       st8     [r2] = r16,CTX(IVA)-CTX(DCR);   \
+       ;;                                      \
+       mov     r16 = cr.iva;                   \
+       ;;                                      \
+       st8     [r2] = r16,CTX(PTA)-CTX(IVA);   \
+       ;;                                      \
+       mov r16 = cr.pta;                       \
+       ;;                                      \
+       st8 [r2] = r16 ;                        \
+       ;;
+
+       /*
+        *      r33:            context_t base address
+        */
+#define        RESTORE_CTL_REGS                                \
+       add     r2 = CTX(DCR),r33;                      \
+       ;;                                              \
+       ld8     r16 = [r2],CTX(IVA)-CTX(DCR);           \
+       ;;                                              \
+       mov     cr.dcr = r16;                           \
+       dv_serialize_data;                              \
+       ;;                                              \
+       ld8     r16 = [r2],CTX(PTA)-CTX(IVA);           \
+       ;;                                              \
+       mov     cr.iva = r16;                           \
+       dv_serialize_data;                              \
+       ;;                                              \
+       ld8 r16 = [r2];                                 \
+       ;;                                              \
+       mov cr.pta = r16;                               \
+       dv_serialize_data;                              \
+       ;;
+
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_REGION_REGS                        \
+       add     r2=CTX(RR0),r32;                \
+       mov     r16=rr[r0];                     \
+       dep.z   r18=1,61,3;                     \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       mov     r17=rr[r18];                    \
+       dep.z   r18=2,61,3;                     \
+       ;;                                      \
+       st8     [r2]=r17,8;                     \
+       mov     r16=rr[r18];                    \
+       dep.z   r18=3,61,3;                     \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       mov     r17=rr[r18];                    \
+       dep.z   r18=4,61,3;                     \
+       ;;                                      \
+       st8     [r2]=r17,8;                     \
+       mov     r16=rr[r18];                    \
+       dep.z   r18=5,61,3;                     \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       mov     r17=rr[r18];                    \
+       dep.z   r18=7,61,3;                     \
+       ;;                                      \
+       st8     [r2]=r17,16;                    \
+       mov     r16=rr[r18];                    \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       ;;
+
+       /*
+        *      r33:context_t base address
+        */
+#define        RESTORE_REGION_REGS     \
+       add     r2=CTX(RR0),r33;\
+       mov r18=r0;             \
+       ;;                      \
+       ld8     r20=[r2],8;     \
+       ;;      /* rr0 */       \
+       ld8     r21=[r2],8;     \
+       ;;      /* rr1 */       \
+       ld8     r22=[r2],8;     \
+       ;;      /* rr2 */       \
+       ld8     r23=[r2],8;     \
+       ;;      /* rr3 */       \
+       ld8     r24=[r2],8;     \
+       ;;      /* rr4 */       \
+       ld8     r25=[r2],16;    \
+       ;;      /* rr5 */       \
+       ld8     r27=[r2];       \
+       ;;      /* rr7 */       \
+       mov rr[r18]=r20;        \
+       dep.z   r18=1,61,3;     \
+       ;;  /* rr1 */           \
+       mov rr[r18]=r21;        \
+       dep.z   r18=2,61,3;     \
+       ;;  /* rr2 */           \
+       mov rr[r18]=r22;        \
+       dep.z   r18=3,61,3;     \
+       ;;  /* rr3 */           \
+       mov rr[r18]=r23;        \
+       dep.z   r18=4,61,3;     \
+       ;;  /* rr4 */           \
+       mov rr[r18]=r24;        \
+       dep.z   r18=5,61,3;     \
+       ;;  /* rr5 */           \
+       mov rr[r18]=r25;        \
+       dep.z   r18=7,61,3;     \
+       ;;  /* rr7 */           \
+       mov rr[r18]=r27;        \
+       ;;                      \
+       srlz.i;                 \
+       ;;
+
+
+
+       /*
+        *      r32:    context_t base address
+        *      r36~r39:scratch registers
+        */
+#define        SAVE_DEBUG_REGS                         \
+       add     r2=CTX(IBR0),r32;               \
+       add     r3=CTX(DBR0),r32;               \
+       mov     r16=ibr[r0];                    \
+       mov     r17=dbr[r0];                    \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=1,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=2,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=2,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=3,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=4,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=5,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=6,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       add     r18=7,r0;                       \
+       ;;                                      \
+       mov     r16=ibr[r18];                   \
+       mov     r17=dbr[r18];                   \
+       ;;                                      \
+       st8     [r2]=r16,8;                     \
+       st8     [r3]=r17,8;                     \
+       ;;
+
+
+/*
+ *      r33:    point to context_t structure
+ *      ar.lc are corrupted.
+ */
+#define RESTORE_DEBUG_REGS                     \
+       add     r2=CTX(IBR0),r33;               \
+       add     r3=CTX(DBR0),r33;               \
+       mov r16=7;                              \
+       mov r17=r0;                             \
+       ;;                                      \
+       mov ar.lc = r16;                        \
+       ;;                                      \
+1:                                             \
+       ld8 r18=[r2],8;                         \
+       ld8 r19=[r3],8;                         \
+       ;;                                      \
+       mov ibr[r17]=r18;                       \
+       mov dbr[r17]=r19;                       \
+       ;;                                      \
+       srlz.i;                                 \
+       ;;                                      \
+       add r17=1,r17;                          \
+       br.cloop.sptk 1b;                       \
+       ;;
+
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_FPU_LOW                            \
+       add     r2=CTX(F2),r32;                 \
+       add     r3=CTX(F3),r32;                 \
+       ;;                                      \
+       stf.spill.nta   [r2]=f2,32;             \
+       stf.spill.nta   [r3]=f3,32;             \
+       ;;                                      \
+       stf.spill.nta   [r2]=f4,32;             \
+       stf.spill.nta   [r3]=f5,32;             \
+       ;;                                      \
+       stf.spill.nta   [r2]=f6,32;             \
+       stf.spill.nta   [r3]=f7,32;             \
+       ;;                                      \
+       stf.spill.nta   [r2]=f8,32;             \
+       stf.spill.nta   [r3]=f9,32;             \
+       ;;                                      \
+       stf.spill.nta   [r2]=f10,32;            \
+       stf.spill.nta   [r3]=f11,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f12,32;            \
+       stf.spill.nta   [r3]=f13,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f14,32;            \
+       stf.spill.nta   [r3]=f15,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f16,32;            \
+       stf.spill.nta   [r3]=f17,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f18,32;            \
+       stf.spill.nta   [r3]=f19,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f20,32;            \
+       stf.spill.nta   [r3]=f21,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f22,32;            \
+       stf.spill.nta   [r3]=f23,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f24,32;            \
+       stf.spill.nta   [r3]=f25,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f26,32;            \
+       stf.spill.nta   [r3]=f27,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f28,32;            \
+       stf.spill.nta   [r3]=f29,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f30;               \
+       stf.spill.nta   [r3]=f31;               \
+       ;;
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_FPU_HIGH                           \
+       add     r2=CTX(F32),r32;                \
+       add     r3=CTX(F33),r32;                \
+       ;;                                      \
+       stf.spill.nta   [r2]=f32,32;            \
+       stf.spill.nta   [r3]=f33,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f34,32;            \
+       stf.spill.nta   [r3]=f35,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f36,32;            \
+       stf.spill.nta   [r3]=f37,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f38,32;            \
+       stf.spill.nta   [r3]=f39,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f40,32;            \
+       stf.spill.nta   [r3]=f41,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f42,32;            \
+       stf.spill.nta   [r3]=f43,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f44,32;            \
+       stf.spill.nta   [r3]=f45,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f46,32;            \
+       stf.spill.nta   [r3]=f47,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f48,32;            \
+       stf.spill.nta   [r3]=f49,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f50,32;            \
+       stf.spill.nta   [r3]=f51,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f52,32;            \
+       stf.spill.nta   [r3]=f53,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f54,32;            \
+       stf.spill.nta   [r3]=f55,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f56,32;            \
+       stf.spill.nta   [r3]=f57,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f58,32;            \
+       stf.spill.nta   [r3]=f59,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f60,32;            \
+       stf.spill.nta   [r3]=f61,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f62,32;            \
+       stf.spill.nta   [r3]=f63,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f64,32;            \
+       stf.spill.nta   [r3]=f65,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f66,32;            \
+       stf.spill.nta   [r3]=f67,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f68,32;            \
+       stf.spill.nta   [r3]=f69,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f70,32;            \
+       stf.spill.nta   [r3]=f71,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f72,32;            \
+       stf.spill.nta   [r3]=f73,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f74,32;            \
+       stf.spill.nta   [r3]=f75,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f76,32;            \
+       stf.spill.nta   [r3]=f77,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f78,32;            \
+       stf.spill.nta   [r3]=f79,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f80,32;            \
+       stf.spill.nta   [r3]=f81,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f82,32;            \
+       stf.spill.nta   [r3]=f83,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f84,32;            \
+       stf.spill.nta   [r3]=f85,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f86,32;            \
+       stf.spill.nta   [r3]=f87,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f88,32;            \
+       stf.spill.nta   [r3]=f89,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f90,32;            \
+       stf.spill.nta   [r3]=f91,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f92,32;            \
+       stf.spill.nta   [r3]=f93,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f94,32;            \
+       stf.spill.nta   [r3]=f95,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f96,32;            \
+       stf.spill.nta   [r3]=f97,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f98,32;            \
+       stf.spill.nta   [r3]=f99,32;            \
+       ;;                                      \
+       stf.spill.nta   [r2]=f100,32;           \
+       stf.spill.nta   [r3]=f101,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f102,32;           \
+       stf.spill.nta   [r3]=f103,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f104,32;           \
+       stf.spill.nta   [r3]=f105,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f106,32;           \
+       stf.spill.nta   [r3]=f107,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f108,32;           \
+       stf.spill.nta   [r3]=f109,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f110,32;           \
+       stf.spill.nta   [r3]=f111,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f112,32;           \
+       stf.spill.nta   [r3]=f113,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f114,32;           \
+       stf.spill.nta   [r3]=f115,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f116,32;           \
+       stf.spill.nta   [r3]=f117,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f118,32;           \
+       stf.spill.nta   [r3]=f119,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f120,32;           \
+       stf.spill.nta   [r3]=f121,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f122,32;           \
+       stf.spill.nta   [r3]=f123,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f124,32;           \
+       stf.spill.nta   [r3]=f125,32;           \
+       ;;                                      \
+       stf.spill.nta   [r2]=f126;              \
+       stf.spill.nta   [r3]=f127;              \
+       ;;
+
+     /*
+      *      r33:    point to context_t structure
+      */
+#define        RESTORE_FPU_LOW                         \
+    add     r2 = CTX(F2), r33;                 \
+    add     r3 = CTX(F3), r33;                 \
+    ;;                                         \
+    ldf.fill.nta f2 = [r2], 32;                        \
+    ldf.fill.nta f3 = [r3], 32;                        \
+    ;;                                         \
+    ldf.fill.nta f4 = [r2], 32;                        \
+    ldf.fill.nta f5 = [r3], 32;                        \
+    ;;                                         \
+    ldf.fill.nta f6 = [r2], 32;                        \
+    ldf.fill.nta f7 = [r3], 32;                        \
+    ;;                                         \
+    ldf.fill.nta f8 = [r2], 32;                        \
+    ldf.fill.nta f9 = [r3], 32;                        \
+    ;;                                         \
+    ldf.fill.nta f10 = [r2], 32;               \
+    ldf.fill.nta f11 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f12 = [r2], 32;               \
+    ldf.fill.nta f13 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f14 = [r2], 32;               \
+    ldf.fill.nta f15 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f16 = [r2], 32;               \
+    ldf.fill.nta f17 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f18 = [r2], 32;               \
+    ldf.fill.nta f19 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f20 = [r2], 32;               \
+    ldf.fill.nta f21 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f22 = [r2], 32;               \
+    ldf.fill.nta f23 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f24 = [r2], 32;               \
+    ldf.fill.nta f25 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f26 = [r2], 32;               \
+    ldf.fill.nta f27 = [r3], 32;               \
+       ;;                                      \
+    ldf.fill.nta f28 = [r2], 32;               \
+    ldf.fill.nta f29 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f30 = [r2], 32;               \
+    ldf.fill.nta f31 = [r3], 32;               \
+    ;;
+
+
+
+    /*
+     *      r33:    point to context_t structure
+     */
+#define        RESTORE_FPU_HIGH                        \
+    add     r2 = CTX(F32), r33;                        \
+    add     r3 = CTX(F33), r33;                        \
+    ;;                                         \
+    ldf.fill.nta f32 = [r2], 32;               \
+    ldf.fill.nta f33 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f34 = [r2], 32;               \
+    ldf.fill.nta f35 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f36 = [r2], 32;               \
+    ldf.fill.nta f37 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f38 = [r2], 32;               \
+    ldf.fill.nta f39 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f40 = [r2], 32;               \
+    ldf.fill.nta f41 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f42 = [r2], 32;               \
+    ldf.fill.nta f43 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f44 = [r2], 32;               \
+    ldf.fill.nta f45 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f46 = [r2], 32;               \
+    ldf.fill.nta f47 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f48 = [r2], 32;               \
+    ldf.fill.nta f49 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f50 = [r2], 32;               \
+    ldf.fill.nta f51 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f52 = [r2], 32;               \
+    ldf.fill.nta f53 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f54 = [r2], 32;               \
+    ldf.fill.nta f55 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f56 = [r2], 32;               \
+    ldf.fill.nta f57 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f58 = [r2], 32;               \
+    ldf.fill.nta f59 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f60 = [r2], 32;               \
+    ldf.fill.nta f61 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f62 = [r2], 32;               \
+    ldf.fill.nta f63 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f64 = [r2], 32;               \
+    ldf.fill.nta f65 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f66 = [r2], 32;               \
+    ldf.fill.nta f67 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f68 = [r2], 32;               \
+    ldf.fill.nta f69 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f70 = [r2], 32;               \
+    ldf.fill.nta f71 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f72 = [r2], 32;               \
+    ldf.fill.nta f73 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f74 = [r2], 32;               \
+    ldf.fill.nta f75 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f76 = [r2], 32;               \
+    ldf.fill.nta f77 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f78 = [r2], 32;               \
+    ldf.fill.nta f79 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f80 = [r2], 32;               \
+    ldf.fill.nta f81 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f82 = [r2], 32;               \
+    ldf.fill.nta f83 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f84 = [r2], 32;               \
+    ldf.fill.nta f85 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f86 = [r2], 32;               \
+    ldf.fill.nta f87 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f88 = [r2], 32;               \
+    ldf.fill.nta f89 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f90 = [r2], 32;               \
+    ldf.fill.nta f91 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f92 = [r2], 32;               \
+    ldf.fill.nta f93 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f94 = [r2], 32;               \
+    ldf.fill.nta f95 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f96 = [r2], 32;               \
+    ldf.fill.nta f97 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f98 = [r2], 32;               \
+    ldf.fill.nta f99 = [r3], 32;               \
+    ;;                                         \
+    ldf.fill.nta f100 = [r2], 32;              \
+    ldf.fill.nta f101 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f102 = [r2], 32;              \
+    ldf.fill.nta f103 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f104 = [r2], 32;              \
+    ldf.fill.nta f105 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f106 = [r2], 32;              \
+    ldf.fill.nta f107 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f108 = [r2], 32;              \
+    ldf.fill.nta f109 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f110 = [r2], 32;              \
+    ldf.fill.nta f111 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f112 = [r2], 32;              \
+    ldf.fill.nta f113 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f114 = [r2], 32;              \
+    ldf.fill.nta f115 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f116 = [r2], 32;              \
+    ldf.fill.nta f117 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f118 = [r2], 32;              \
+    ldf.fill.nta f119 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f120 = [r2], 32;              \
+    ldf.fill.nta f121 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f122 = [r2], 32;              \
+    ldf.fill.nta f123 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f124 = [r2], 32;              \
+    ldf.fill.nta f125 = [r3], 32;              \
+    ;;                                         \
+    ldf.fill.nta f126 = [r2], 32;              \
+    ldf.fill.nta f127 = [r3], 32;              \
+    ;;
+
+       /*
+        *      r32:            context_t base address
+        */
+#define        SAVE_PTK_REGS                           \
+    add r2=CTX(PKR0), r32;                     \
+    mov r16=7;                                 \
+    ;;                                                 \
+    mov ar.lc=r16;                             \
+    mov r17=r0;                                        \
+    ;;                                         \
+1:                                             \
+    mov r18=pkr[r17];                          \
+    ;;                                         \
+    srlz.i;                                    \
+    ;;                                                 \
+    st8 [r2]=r18, 8;                           \
+    ;;                                         \
+    add r17 =1,r17;                            \
+    ;;                                         \
+    br.cloop.sptk 1b;                          \
+    ;;
+
+/*
+ *      r33:    point to context_t structure
+ *      ar.lc are corrupted.
+ */
+#define RESTORE_PTK_REGS                       \
+    add r2=CTX(PKR0), r33;                     \
+    mov r16=7;                                 \
+    ;;                                                 \
+    mov ar.lc=r16;                             \
+    mov r17=r0;                                        \
+    ;;                                         \
+1:                                             \
+    ld8 r18=[r2], 8;                           \
+    ;;                                         \
+    mov pkr[r17]=r18;                          \
+    ;;                                         \
+    srlz.i;                                    \
+    ;;                                                 \
+    add r17 =1,r17;                            \
+    ;;                                         \
+    br.cloop.sptk 1b;                          \
+    ;;
+
+
+/*
+ * void vmm_trampoline( context_t * from,
+ *                     context_t * to)
+ *
+ *     from:   r32
+ *     to:     r33
+ *  note: interrupt disabled before call this function.
+ */
+GLOBAL_ENTRY(vmm_trampoline)
+    mov r16 = psr
+    adds r2 = CTX(PSR), r32
+    ;;
+    st8 [r2] = r16, 8       // psr
+    mov r17 = pr
+    ;;
+    st8 [r2] = r17, 8       // pr
+    mov r18 = ar.unat
+    ;;
+    st8 [r2] = r18
+    mov r17 = ar.rsc
+    ;;
+    adds r2 = CTX(RSC),r32
+    ;;
+    st8 [r2]= r17
+    mov ar.rsc =0
+    flushrs
+    ;;
+    SAVE_GENERAL_REGS
+    ;;
+    SAVE_KERNEL_REGS
+    ;;
+    SAVE_APP_REGS
+    ;;
+    SAVE_BRANCH_REGS
+    ;;
+    SAVE_CTL_REGS
+    ;;
+    SAVE_REGION_REGS
+    ;;
+    //SAVE_DEBUG_REGS
+    ;;
+    rsm  psr.dfl
+    ;;
+    srlz.d
+    ;;
+    SAVE_FPU_LOW
+    ;;
+    rsm  psr.dfh
+    ;;
+    srlz.d
+    ;;
+    SAVE_FPU_HIGH
+    ;;
+    SAVE_PTK_REGS
+    ;;
+    RESTORE_PTK_REGS
+    ;;
+    RESTORE_FPU_HIGH
+    ;;
+    RESTORE_FPU_LOW
+    ;;
+    //RESTORE_DEBUG_REGS
+    ;;
+    RESTORE_REGION_REGS
+    ;;
+    RESTORE_CTL_REGS
+    ;;
+    RESTORE_BRANCH_REGS
+    ;;
+    RESTORE_APP_REGS
+    ;;
+    RESTORE_KERNEL_REGS
+    ;;
+    RESTORE_GENERAL_REGS
+    ;;
+    adds r2=CTX(PSR), r33
+    ;;
+    ld8 r16=[r2], 8       // psr
+    ;;
+    mov psr.l=r16
+    ;;
+    srlz.d
+    ;;
+    ld8 r16=[r2], 8       // pr
+    ;;
+    mov pr =r16,-1
+    ld8 r16=[r2]       // unat
+    ;;
+    mov ar.unat=r16
+    ;;
+    adds r2=CTX(RSC),r33
+    ;;
+    ld8 r16 =[r2]
+    ;;
+    mov ar.rsc = r16
+    ;;
+    br.ret.sptk.few b0
+END(vmm_trampoline)
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
new file mode 100644 (file)
index 0000000..e44027c
--- /dev/null
@@ -0,0 +1,2163 @@
+/*
+ * kvm_vcpu.c: handling all virtual cpu related thing.
+ * Copyright (c) 2005, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ *  Shaofan Li (Susue Li) <susie.li@intel.com>
+ *  Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ *  Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ *  Xiantao Zhang <xiantao.zhang@intel.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/types.h>
+
+#include <asm/processor.h>
+#include <asm/ia64regs.h>
+#include <asm/gcc_intrin.h>
+#include <asm/kregs.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+#include "asm-offsets.h"
+#include "vcpu.h"
+
+/*
+ * Special notes:
+ * - Index by it/dt/rt sequence
+ * - Only existing mode transitions are allowed in this table
+ * - RSE is placed at lazy mode when emulating guest partial mode
+ * - If gva happens to be rr0 and rr4, only allowed case is identity
+ *   mapping (gva=gpa), or panic! (How?)
+ */
+int mm_switch_table[8][8] = {
+       /*  2004/09/12(Kevin): Allow switch to self */
+       /*
+        *  (it,dt,rt): (0,0,0) -> (1,1,1)
+        *  This kind of transition usually occurs in the very early
+        *  stage of Linux boot up procedure. Another case is in efi
+        *  and pal calls. (see "arch/ia64/kernel/head.S")
+        *
+        *  (it,dt,rt): (0,0,0) -> (0,1,1)
+        *  This kind of transition is found when OSYa exits efi boot
+        *  service. Due to gva = gpa in this case (Same region),
+        *  data access can be satisfied though itlb entry for physical
+        *  emulation is hit.
+        */
+       {SW_SELF, 0,  0,  SW_NOP, 0,  0,  0,  SW_P2V},
+       {0,  0,  0,  0,  0,  0,  0,  0},
+       {0,  0,  0,  0,  0,  0,  0,  0},
+       /*
+        *  (it,dt,rt): (0,1,1) -> (1,1,1)
+        *  This kind of transition is found in OSYa.
+        *
+        *  (it,dt,rt): (0,1,1) -> (0,0,0)
+        *  This kind of transition is found in OSYa
+        */
+       {SW_NOP, 0,  0,  SW_SELF, 0,  0,  0,  SW_P2V},
+       /* (1,0,0)->(1,1,1) */
+       {0,  0,  0,  0,  0,  0,  0,  SW_P2V},
+       /*
+        *  (it,dt,rt): (1,0,1) -> (1,1,1)
+        *  This kind of transition usually occurs when Linux returns
+        *  from the low level TLB miss handlers.
+        *  (see "arch/ia64/kernel/ivt.S")
+        */
+       {0,  0,  0,  0,  0,  SW_SELF, 0,  SW_P2V},
+       {0,  0,  0,  0,  0,  0,  0,  0},
+       /*
+        *  (it,dt,rt): (1,1,1) -> (1,0,1)
+        *  This kind of transition usually occurs in Linux low level
+        *  TLB miss handler. (see "arch/ia64/kernel/ivt.S")
+        *
+        *  (it,dt,rt): (1,1,1) -> (0,0,0)
+        *  This kind of transition usually occurs in pal and efi calls,
+        *  which requires running in physical mode.
+        *  (see "arch/ia64/kernel/head.S")
+        *  (1,1,1)->(1,0,0)
+        */
+
+       {SW_V2P, 0,  0,  0,  SW_V2P, SW_V2P, 0,  SW_SELF},
+};
+
+void physical_mode_init(struct kvm_vcpu  *vcpu)
+{
+       vcpu->arch.mode_flags = GUEST_IN_PHY;
+}
+
+void switch_to_physical_rid(struct kvm_vcpu *vcpu)
+{
+       unsigned long psr;
+
+       /* Save original virtual mode rr[0] and rr[4] */
+       psr = ia64_clear_ic();
+       ia64_set_rr(VRN0<<VRN_SHIFT, vcpu->arch.metaphysical_rr0);
+       ia64_srlz_d();
+       ia64_set_rr(VRN4<<VRN_SHIFT, vcpu->arch.metaphysical_rr4);
+       ia64_srlz_d();
+
+       ia64_set_psr(psr);
+       return;
+}
+
+
+void switch_to_virtual_rid(struct kvm_vcpu *vcpu)
+{
+       unsigned long psr;
+
+       psr = ia64_clear_ic();
+       ia64_set_rr(VRN0 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr0);
+       ia64_srlz_d();
+       ia64_set_rr(VRN4 << VRN_SHIFT, vcpu->arch.metaphysical_saved_rr4);
+       ia64_srlz_d();
+       ia64_set_psr(psr);
+       return;
+}
+
+static int mm_switch_action(struct ia64_psr opsr, struct ia64_psr npsr)
+{
+       return mm_switch_table[MODE_IND(opsr)][MODE_IND(npsr)];
+}
+
+void switch_mm_mode(struct kvm_vcpu *vcpu, struct ia64_psr old_psr,
+                                       struct ia64_psr new_psr)
+{
+       int act;
+       act = mm_switch_action(old_psr, new_psr);
+       switch (act) {
+       case SW_V2P:
+               /*printk("V -> P mode transition: (0x%lx -> 0x%lx)\n",
+               old_psr.val, new_psr.val);*/
+               switch_to_physical_rid(vcpu);
+               /*
+                * Set rse to enforced lazy, to prevent active rse
+                *save/restor when guest physical mode.
+                */
+               vcpu->arch.mode_flags |= GUEST_IN_PHY;
+               break;
+       case SW_P2V:
+               switch_to_virtual_rid(vcpu);
+               /*
+                * recover old mode which is saved when entering
+                * guest physical mode
+                */
+               vcpu->arch.mode_flags &= ~GUEST_IN_PHY;
+               break;
+       case SW_SELF:
+               break;
+       case SW_NOP:
+               break;
+       default:
+               /* Sanity check */
+               break;
+       }
+       return;
+}
+
+
+
+/*
+ * In physical mode, insert tc/tr for region 0 and 4 uses
+ * RID[0] and RID[4] which is for physical mode emulation.
+ * However what those inserted tc/tr wants is rid for
+ * virtual mode. So original virtual rid needs to be restored
+ * before insert.
+ *
+ * Operations which required such switch include:
+ *  - insertions (itc.*, itr.*)
+ *  - purges (ptc.* and ptr.*)
+ *  - tpa
+ *  - tak
+ *  - thash?, ttag?
+ * All above needs actual virtual rid for destination entry.
+ */
+
+void check_mm_mode_switch(struct kvm_vcpu *vcpu,  struct ia64_psr old_psr,
+                                       struct ia64_psr new_psr)
+{
+
+       if ((old_psr.dt != new_psr.dt)
+                       || (old_psr.it != new_psr.it)
+                       || (old_psr.rt != new_psr.rt))
+               switch_mm_mode(vcpu, old_psr, new_psr);
+
+       return;
+}
+
+
+/*
+ * In physical mode, insert tc/tr for region 0 and 4 uses
+ * RID[0] and RID[4] which is for physical mode emulation.
+ * However what those inserted tc/tr wants is rid for
+ * virtual mode. So original virtual rid needs to be restored
+ * before insert.
+ *
+ * Operations which required such switch include:
+ *  - insertions (itc.*, itr.*)
+ *  - purges (ptc.* and ptr.*)
+ *  - tpa
+ *  - tak
+ *  - thash?, ttag?
+ * All above needs actual virtual rid for destination entry.
+ */
+
+void prepare_if_physical_mode(struct kvm_vcpu *vcpu)
+{
+       if (is_physical_mode(vcpu)) {
+               vcpu->arch.mode_flags |= GUEST_PHY_EMUL;
+               switch_to_virtual_rid(vcpu);
+       }
+       return;
+}
+
+/* Recover always follows prepare */
+void recover_if_physical_mode(struct kvm_vcpu *vcpu)
+{
+       if (is_physical_mode(vcpu))
+               switch_to_physical_rid(vcpu);
+       vcpu->arch.mode_flags &= ~GUEST_PHY_EMUL;
+       return;
+}
+
+#define RPT(x) ((u16) &((struct kvm_pt_regs *)0)->x)
+
+static u16 gr_info[32] = {
+       0,      /* r0 is read-only : WE SHOULD NEVER GET THIS */
+       RPT(r1), RPT(r2), RPT(r3),
+       RPT(r4), RPT(r5), RPT(r6), RPT(r7),
+       RPT(r8), RPT(r9), RPT(r10), RPT(r11),
+       RPT(r12), RPT(r13), RPT(r14), RPT(r15),
+       RPT(r16), RPT(r17), RPT(r18), RPT(r19),
+       RPT(r20), RPT(r21), RPT(r22), RPT(r23),
+       RPT(r24), RPT(r25), RPT(r26), RPT(r27),
+       RPT(r28), RPT(r29), RPT(r30), RPT(r31)
+};
+
+#define IA64_FIRST_STACKED_GR   32
+#define IA64_FIRST_ROTATING_FR  32
+
+static inline unsigned long
+rotate_reg(unsigned long sor, unsigned long rrb, unsigned long reg)
+{
+       reg += rrb;
+       if (reg >= sor)
+               reg -= sor;
+       return reg;
+}
+
+/*
+ * Return the (rotated) index for floating point register
+ * be in the REGNUM (REGNUM must range from 32-127,
+ * result is in the range from 0-95.
+ */
+static inline unsigned long fph_index(struct kvm_pt_regs *regs,
+                                               long regnum)
+{
+       unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
+       return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
+}
+
+
+/*
+ * The inverse of the above: given bspstore and the number of
+ * registers, calculate ar.bsp.
+ */
+static inline unsigned long *kvm_rse_skip_regs(unsigned long *addr,
+                                                       long num_regs)
+{
+       long delta = ia64_rse_slot_num(addr) + num_regs;
+       int i = 0;
+
+       if (num_regs < 0)
+               delta -= 0x3e;
+       if (delta < 0) {
+               while (delta <= -0x3f) {
+                       i--;
+                       delta += 0x3f;
+               }
+       } else {
+               while (delta >= 0x3f) {
+                       i++;
+                       delta -= 0x3f;
+               }
+       }
+
+       return addr + num_regs + i;
+}
+
+static void get_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
+                                       unsigned long *val, int *nat)
+{
+       unsigned long *bsp, *addr, *rnat_addr, *bspstore;
+       unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
+       unsigned long nat_mask;
+       unsigned long old_rsc, new_rsc;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+       old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
+       new_rsc = old_rsc&(~(0x3));
+       ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
+
+       bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+       bsp = kbs + (regs->loadrs >> 19);
+
+       addr = kvm_rse_skip_regs(bsp, -sof + ridx);
+       nat_mask = 1UL << ia64_rse_slot_num(addr);
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+       if (addr >= bspstore) {
+               ia64_flushrs();
+               ia64_mf();
+               bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+       }
+       *val = *addr;
+       if (nat) {
+               if (bspstore < rnat_addr)
+                       *nat = (int)!!(ia64_getreg(_IA64_REG_AR_RNAT)
+                                                       & nat_mask);
+               else
+                       *nat = (int)!!((*rnat_addr) & nat_mask);
+               ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
+       }
+}
+
+void set_rse_reg(struct kvm_pt_regs *regs, unsigned long r1,
+                               unsigned long val, unsigned long nat)
+{
+       unsigned long *bsp, *bspstore, *addr, *rnat_addr;
+       unsigned long *kbs = (void *) current_vcpu + VMM_RBS_OFFSET;
+       unsigned long nat_mask;
+       unsigned long old_rsc, new_rsc, psr;
+       unsigned long rnat;
+       long sof = (regs->cr_ifs) & 0x7f;
+       long sor = (((regs->cr_ifs >> 14) & 0xf) << 3);
+       long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
+       long ridx = r1 - 32;
+
+       if (ridx < sor)
+               ridx = rotate_reg(sor, rrb_gr, ridx);
+
+       old_rsc = ia64_getreg(_IA64_REG_AR_RSC);
+       /* put RSC to lazy mode, and set loadrs 0 */
+       new_rsc = old_rsc & (~0x3fff0003);
+       ia64_setreg(_IA64_REG_AR_RSC, new_rsc);
+       bsp = kbs + (regs->loadrs >> 19); /* 16 + 3 */
+
+       addr = kvm_rse_skip_regs(bsp, -sof + ridx);
+       nat_mask = 1UL << ia64_rse_slot_num(addr);
+       rnat_addr = ia64_rse_rnat_addr(addr);
+
+       local_irq_save(psr);
+       bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+       if (addr >= bspstore) {
+
+               ia64_flushrs();
+               ia64_mf();
+               *addr = val;
+               bspstore = (unsigned long *)ia64_getreg(_IA64_REG_AR_BSPSTORE);
+               rnat = ia64_getreg(_IA64_REG_AR_RNAT);
+               if (bspstore < rnat_addr)
+                       rnat = rnat & (~nat_mask);
+               else
+                       *rnat_addr = (*rnat_addr)&(~nat_mask);
+
+               ia64_mf();
+               ia64_loadrs();
+               ia64_setreg(_IA64_REG_AR_RNAT, rnat);
+       } else {
+               rnat = ia64_getreg(_IA64_REG_AR_RNAT);
+               *addr = val;
+               if (bspstore < rnat_addr)
+                       rnat = rnat&(~nat_mask);
+               else
+                       *rnat_addr = (*rnat_addr) & (~nat_mask);
+
+               ia64_setreg(_IA64_REG_AR_BSPSTORE, bspstore);
+               ia64_setreg(_IA64_REG_AR_RNAT, rnat);
+       }
+       local_irq_restore(psr);
+       ia64_setreg(_IA64_REG_AR_RSC, old_rsc);
+}
+
+void getreg(unsigned long regnum, unsigned long *val,
+                               int *nat, struct kvm_pt_regs *regs)
+{
+       unsigned long addr, *unat;
+       if (regnum >= IA64_FIRST_STACKED_GR) {
+               get_rse_reg(regs, regnum, val, nat);
+               return;
+       }
+
+       /*
+        * Now look at registers in [0-31] range and init correct UNAT
+        */
+       addr = (unsigned long)regs;
+       unat = &regs->eml_unat;;
+
+       addr += gr_info[regnum];
+
+       *val  = *(unsigned long *)addr;
+       /*
+        * do it only when requested
+        */
+       if (nat)
+               *nat  = (*unat >> ((addr >> 3) & 0x3f)) & 0x1UL;
+}
+
+void setreg(unsigned long regnum, unsigned long val,
+                       int nat, struct kvm_pt_regs *regs)
+{
+       unsigned long addr;
+       unsigned long bitmask;
+       unsigned long *unat;
+
+       /*
+        * First takes care of stacked registers
+        */
+       if (regnum >= IA64_FIRST_STACKED_GR) {
+               set_rse_reg(regs, regnum, val, nat);
+               return;
+       }
+
+       /*
+        * Now look at registers in [0-31] range and init correct UNAT
+        */
+       addr = (unsigned long)regs;
+       unat = &regs->eml_unat;
+       /*
+        * add offset from base of struct
+        * and do it !
+        */
+       addr += gr_info[regnum];
+
+       *(unsigned long *)addr = val;
+
+       /*
+        * We need to clear the corresponding UNAT bit to fully emulate the load
+        * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
+        */
+       bitmask   = 1UL << ((addr >> 3) & 0x3f);
+       if (nat)
+               *unat |= bitmask;
+        else
+               *unat &= ~bitmask;
+
+}
+
+u64 vcpu_get_gr(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+       u64 val;
+
+       if (!reg)
+               return 0;
+       getreg(reg, &val, 0, regs);
+       return val;
+}
+
+void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 value, int nat)
+{
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+       long sof = (regs->cr_ifs) & 0x7f;
+
+       if (!reg)
+               return;
+       if (reg >= sof + 32)
+               return;
+       setreg(reg, value, nat, regs);  /* FIXME: handle NATs later*/
+}
+
+void getfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+                               struct kvm_pt_regs *regs)
+{
+       /* Take floating register rotation into consideration*/
+       if (regnum >= IA64_FIRST_ROTATING_FR)
+               regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
+#define CASE_FIXED_FP(reg)                     \
+       case  (reg) :                           \
+               ia64_stf_spill(fpval, reg);     \
+       break
+
+       switch (regnum) {
+               CASE_FIXED_FP(0);
+               CASE_FIXED_FP(1);
+               CASE_FIXED_FP(2);
+               CASE_FIXED_FP(3);
+               CASE_FIXED_FP(4);
+               CASE_FIXED_FP(5);
+
+               CASE_FIXED_FP(6);
+               CASE_FIXED_FP(7);
+               CASE_FIXED_FP(8);
+               CASE_FIXED_FP(9);
+               CASE_FIXED_FP(10);
+               CASE_FIXED_FP(11);
+
+               CASE_FIXED_FP(12);
+               CASE_FIXED_FP(13);
+               CASE_FIXED_FP(14);
+               CASE_FIXED_FP(15);
+               CASE_FIXED_FP(16);
+               CASE_FIXED_FP(17);
+               CASE_FIXED_FP(18);
+               CASE_FIXED_FP(19);
+               CASE_FIXED_FP(20);
+               CASE_FIXED_FP(21);
+               CASE_FIXED_FP(22);
+               CASE_FIXED_FP(23);
+               CASE_FIXED_FP(24);
+               CASE_FIXED_FP(25);
+               CASE_FIXED_FP(26);
+               CASE_FIXED_FP(27);
+               CASE_FIXED_FP(28);
+               CASE_FIXED_FP(29);
+               CASE_FIXED_FP(30);
+               CASE_FIXED_FP(31);
+               CASE_FIXED_FP(32);
+               CASE_FIXED_FP(33);
+               CASE_FIXED_FP(34);
+               CASE_FIXED_FP(35);
+               CASE_FIXED_FP(36);
+               CASE_FIXED_FP(37);
+               CASE_FIXED_FP(38);
+               CASE_FIXED_FP(39);
+               CASE_FIXED_FP(40);
+               CASE_FIXED_FP(41);
+               CASE_FIXED_FP(42);
+               CASE_FIXED_FP(43);
+               CASE_FIXED_FP(44);
+               CASE_FIXED_FP(45);
+               CASE_FIXED_FP(46);
+               CASE_FIXED_FP(47);
+               CASE_FIXED_FP(48);
+               CASE_FIXED_FP(49);
+               CASE_FIXED_FP(50);
+               CASE_FIXED_FP(51);
+               CASE_FIXED_FP(52);
+               CASE_FIXED_FP(53);
+               CASE_FIXED_FP(54);
+               CASE_FIXED_FP(55);
+               CASE_FIXED_FP(56);
+               CASE_FIXED_FP(57);
+               CASE_FIXED_FP(58);
+               CASE_FIXED_FP(59);
+               CASE_FIXED_FP(60);
+               CASE_FIXED_FP(61);
+               CASE_FIXED_FP(62);
+               CASE_FIXED_FP(63);
+               CASE_FIXED_FP(64);
+               CASE_FIXED_FP(65);
+               CASE_FIXED_FP(66);
+               CASE_FIXED_FP(67);
+               CASE_FIXED_FP(68);
+               CASE_FIXED_FP(69);
+               CASE_FIXED_FP(70);
+               CASE_FIXED_FP(71);
+               CASE_FIXED_FP(72);
+               CASE_FIXED_FP(73);
+               CASE_FIXED_FP(74);
+               CASE_FIXED_FP(75);
+               CASE_FIXED_FP(76);
+               CASE_FIXED_FP(77);
+               CASE_FIXED_FP(78);
+               CASE_FIXED_FP(79);
+               CASE_FIXED_FP(80);
+               CASE_FIXED_FP(81);
+               CASE_FIXED_FP(82);
+               CASE_FIXED_FP(83);
+               CASE_FIXED_FP(84);
+               CASE_FIXED_FP(85);
+               CASE_FIXED_FP(86);
+               CASE_FIXED_FP(87);
+               CASE_FIXED_FP(88);
+               CASE_FIXED_FP(89);
+               CASE_FIXED_FP(90);
+               CASE_FIXED_FP(91);
+               CASE_FIXED_FP(92);
+               CASE_FIXED_FP(93);
+               CASE_FIXED_FP(94);
+               CASE_FIXED_FP(95);
+               CASE_FIXED_FP(96);
+               CASE_FIXED_FP(97);
+               CASE_FIXED_FP(98);
+               CASE_FIXED_FP(99);
+               CASE_FIXED_FP(100);
+               CASE_FIXED_FP(101);
+               CASE_FIXED_FP(102);
+               CASE_FIXED_FP(103);
+               CASE_FIXED_FP(104);
+               CASE_FIXED_FP(105);
+               CASE_FIXED_FP(106);
+               CASE_FIXED_FP(107);
+               CASE_FIXED_FP(108);
+               CASE_FIXED_FP(109);
+               CASE_FIXED_FP(110);
+               CASE_FIXED_FP(111);
+               CASE_FIXED_FP(112);
+               CASE_FIXED_FP(113);
+               CASE_FIXED_FP(114);
+               CASE_FIXED_FP(115);
+               CASE_FIXED_FP(116);
+               CASE_FIXED_FP(117);
+               CASE_FIXED_FP(118);
+               CASE_FIXED_FP(119);
+               CASE_FIXED_FP(120);
+               CASE_FIXED_FP(121);
+               CASE_FIXED_FP(122);
+               CASE_FIXED_FP(123);
+               CASE_FIXED_FP(124);
+               CASE_FIXED_FP(125);
+               CASE_FIXED_FP(126);
+               CASE_FIXED_FP(127);
+       }
+#undef CASE_FIXED_FP
+}
+
+void setfpreg(unsigned long regnum, struct ia64_fpreg *fpval,
+                                       struct kvm_pt_regs *regs)
+{
+       /* Take floating register rotation into consideration*/
+       if (regnum >= IA64_FIRST_ROTATING_FR)
+               regnum = IA64_FIRST_ROTATING_FR + fph_index(regs, regnum);
+
+#define CASE_FIXED_FP(reg)                     \
+       case (reg) :                            \
+               ia64_ldf_fill(reg, fpval);      \
+       break
+
+       switch (regnum) {
+               CASE_FIXED_FP(2);
+               CASE_FIXED_FP(3);
+               CASE_FIXED_FP(4);
+               CASE_FIXED_FP(5);
+
+               CASE_FIXED_FP(6);
+               CASE_FIXED_FP(7);
+               CASE_FIXED_FP(8);
+               CASE_FIXED_FP(9);
+               CASE_FIXED_FP(10);
+               CASE_FIXED_FP(11);
+
+               CASE_FIXED_FP(12);
+               CASE_FIXED_FP(13);
+               CASE_FIXED_FP(14);
+               CASE_FIXED_FP(15);
+               CASE_FIXED_FP(16);
+               CASE_FIXED_FP(17);
+               CASE_FIXED_FP(18);
+               CASE_FIXED_FP(19);
+               CASE_FIXED_FP(20);
+               CASE_FIXED_FP(21);
+               CASE_FIXED_FP(22);
+               CASE_FIXED_FP(23);
+               CASE_FIXED_FP(24);
+               CASE_FIXED_FP(25);
+               CASE_FIXED_FP(26);
+               CASE_FIXED_FP(27);
+               CASE_FIXED_FP(28);
+               CASE_FIXED_FP(29);
+               CASE_FIXED_FP(30);
+               CASE_FIXED_FP(31);
+               CASE_FIXED_FP(32);
+               CASE_FIXED_FP(33);
+               CASE_FIXED_FP(34);
+               CASE_FIXED_FP(35);
+               CASE_FIXED_FP(36);
+               CASE_FIXED_FP(37);
+               CASE_FIXED_FP(38);
+               CASE_FIXED_FP(39);
+               CASE_FIXED_FP(40);
+               CASE_FIXED_FP(41);
+               CASE_FIXED_FP(42);
+               CASE_FIXED_FP(43);
+               CASE_FIXED_FP(44);
+               CASE_FIXED_FP(45);
+               CASE_FIXED_FP(46);
+               CASE_FIXED_FP(47);
+               CASE_FIXED_FP(48);
+               CASE_FIXED_FP(49);
+               CASE_FIXED_FP(50);
+               CASE_FIXED_FP(51);
+               CASE_FIXED_FP(52);
+               CASE_FIXED_FP(53);
+               CASE_FIXED_FP(54);
+               CASE_FIXED_FP(55);
+               CASE_FIXED_FP(56);
+               CASE_FIXED_FP(57);
+               CASE_FIXED_FP(58);
+               CASE_FIXED_FP(59);
+               CASE_FIXED_FP(60);
+               CASE_FIXED_FP(61);
+               CASE_FIXED_FP(62);
+               CASE_FIXED_FP(63);
+               CASE_FIXED_FP(64);
+               CASE_FIXED_FP(65);
+               CASE_FIXED_FP(66);
+               CASE_FIXED_FP(67);
+               CASE_FIXED_FP(68);
+               CASE_FIXED_FP(69);
+               CASE_FIXED_FP(70);
+               CASE_FIXED_FP(71);
+               CASE_FIXED_FP(72);
+               CASE_FIXED_FP(73);
+               CASE_FIXED_FP(74);
+               CASE_FIXED_FP(75);
+               CASE_FIXED_FP(76);
+               CASE_FIXED_FP(77);
+               CASE_FIXED_FP(78);
+               CASE_FIXED_FP(79);
+               CASE_FIXED_FP(80);
+               CASE_FIXED_FP(81);
+               CASE_FIXED_FP(82);
+               CASE_FIXED_FP(83);
+               CASE_FIXED_FP(84);
+               CASE_FIXED_FP(85);
+               CASE_FIXED_FP(86);
+               CASE_FIXED_FP(87);
+               CASE_FIXED_FP(88);
+               CASE_FIXED_FP(89);
+               CASE_FIXED_FP(90);
+               CASE_FIXED_FP(91);
+               CASE_FIXED_FP(92);
+               CASE_FIXED_FP(93);
+               CASE_FIXED_FP(94);
+               CASE_FIXED_FP(95);
+               CASE_FIXED_FP(96);
+               CASE_FIXED_FP(97);
+               CASE_FIXED_FP(98);
+               CASE_FIXED_FP(99);
+               CASE_FIXED_FP(100);
+               CASE_FIXED_FP(101);
+               CASE_FIXED_FP(102);
+               CASE_FIXED_FP(103);
+               CASE_FIXED_FP(104);
+               CASE_FIXED_FP(105);
+               CASE_FIXED_FP(106);
+               CASE_FIXED_FP(107);
+               CASE_FIXED_FP(108);
+               CASE_FIXED_FP(109);
+               CASE_FIXED_FP(110);
+               CASE_FIXED_FP(111);
+               CASE_FIXED_FP(112);
+               CASE_FIXED_FP(113);
+               CASE_FIXED_FP(114);
+               CASE_FIXED_FP(115);
+               CASE_FIXED_FP(116);
+               CASE_FIXED_FP(117);
+               CASE_FIXED_FP(118);
+               CASE_FIXED_FP(119);
+               CASE_FIXED_FP(120);
+               CASE_FIXED_FP(121);
+               CASE_FIXED_FP(122);
+               CASE_FIXED_FP(123);
+               CASE_FIXED_FP(124);
+               CASE_FIXED_FP(125);
+               CASE_FIXED_FP(126);
+               CASE_FIXED_FP(127);
+       }
+}
+
+void vcpu_get_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
+                                               struct ia64_fpreg *val)
+{
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       getfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
+}
+
+void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
+                                               struct ia64_fpreg *val)
+{
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       if (reg > 1)
+               setfpreg(reg, val, regs);   /* FIXME: handle NATs later*/
+}
+
+/************************************************************************
+ * lsapic timer
+ ***********************************************************************/
+u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
+{
+       unsigned long guest_itc;
+       guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
+
+       if (guest_itc >= VMX(vcpu, last_itc)) {
+               VMX(vcpu, last_itc) = guest_itc;
+               return  guest_itc;
+       } else
+               return VMX(vcpu, last_itc);
+}
+
+static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val);
+static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
+{
+       struct kvm_vcpu *v;
+       int i;
+       long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
+       unsigned long vitv = VCPU(vcpu, itv);
+
+       if (vcpu->vcpu_id == 0) {
+               for (i = 0; i < MAX_VCPU_NUM; i++) {
+                       v = (struct kvm_vcpu *)((char *)vcpu + VCPU_SIZE * i);
+                       VMX(v, itc_offset) = itc_offset;
+                       VMX(v, last_itc) = 0;
+               }
+       }
+       VMX(vcpu, last_itc) = 0;
+       if (VCPU(vcpu, itm) <= val) {
+               VMX(vcpu, itc_check) = 0;
+               vcpu_unpend_interrupt(vcpu, vitv);
+       } else {
+               VMX(vcpu, itc_check) = 1;
+               vcpu_set_itm(vcpu, VCPU(vcpu, itm));
+       }
+
+}
+
+static inline u64 vcpu_get_itm(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, itm));
+}
+
+static inline void vcpu_set_itm(struct kvm_vcpu *vcpu, u64 val)
+{
+       unsigned long vitv = VCPU(vcpu, itv);
+       VCPU(vcpu, itm) = val;
+
+       if (val > vcpu_get_itc(vcpu)) {
+               VMX(vcpu, itc_check) = 1;
+               vcpu_unpend_interrupt(vcpu, vitv);
+               VMX(vcpu, timer_pending) = 0;
+       } else
+               VMX(vcpu, itc_check) = 0;
+}
+
+#define  ITV_VECTOR(itv)    (itv&0xff)
+#define  ITV_IRQ_MASK(itv)  (itv&(1<<16))
+
+static inline void vcpu_set_itv(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, itv) = val;
+       if (!ITV_IRQ_MASK(val) && vcpu->arch.timer_pending) {
+               vcpu_pend_interrupt(vcpu, ITV_VECTOR(val));
+               vcpu->arch.timer_pending = 0;
+       }
+}
+
+static inline void vcpu_set_eoi(struct kvm_vcpu *vcpu, u64 val)
+{
+       int vec;
+
+       vec = highest_inservice_irq(vcpu);
+       if (vec == NULL_VECTOR)
+               return;
+       VMX(vcpu, insvc[vec >> 6]) &= ~(1UL << (vec & 63));
+       VCPU(vcpu, eoi) = 0;
+       vcpu->arch.irq_new_pending = 1;
+
+}
+
+/* See Table 5-8 in SDM vol2 for the definition */
+int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice)
+{
+       union ia64_tpr vtpr;
+
+       vtpr.val = VCPU(vcpu, tpr);
+
+       if (h_inservice == NMI_VECTOR)
+               return IRQ_MASKED_BY_INSVC;
+
+       if (h_pending == NMI_VECTOR) {
+               /* Non Maskable Interrupt */
+               return IRQ_NO_MASKED;
+       }
+
+       if (h_inservice == ExtINT_VECTOR)
+               return IRQ_MASKED_BY_INSVC;
+
+       if (h_pending == ExtINT_VECTOR) {
+               if (vtpr.mmi) {
+                       /* mask all external IRQ */
+                       return IRQ_MASKED_BY_VTPR;
+               } else
+                       return IRQ_NO_MASKED;
+       }
+
+       if (is_higher_irq(h_pending, h_inservice)) {
+               if (is_higher_class(h_pending, vtpr.mic + (vtpr.mmi << 4)))
+                       return IRQ_NO_MASKED;
+               else
+                       return IRQ_MASKED_BY_VTPR;
+       } else {
+               return IRQ_MASKED_BY_INSVC;
+       }
+}
+
+void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
+{
+       long spsr;
+       int ret;
+
+       local_irq_save(spsr);
+       ret = test_and_set_bit(vec, &VCPU(vcpu, irr[0]));
+       local_irq_restore(spsr);
+
+       vcpu->arch.irq_new_pending = 1;
+}
+
+void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec)
+{
+       long spsr;
+       int ret;
+
+       local_irq_save(spsr);
+       ret = test_and_clear_bit(vec, &VCPU(vcpu, irr[0]));
+       local_irq_restore(spsr);
+       if (ret) {
+               vcpu->arch.irq_new_pending = 1;
+               wmb();
+       }
+}
+
+void update_vhpi(struct kvm_vcpu *vcpu, int vec)
+{
+       u64 vhpi;
+
+       if (vec == NULL_VECTOR)
+               vhpi = 0;
+       else if (vec == NMI_VECTOR)
+               vhpi = 32;
+       else if (vec == ExtINT_VECTOR)
+               vhpi = 16;
+       else
+               vhpi = vec >> 4;
+
+       VCPU(vcpu, vhpi) = vhpi;
+       if (VCPU(vcpu, vac).a_int)
+               ia64_call_vsa(PAL_VPS_SET_PENDING_INTERRUPT,
+                               (u64)vcpu->arch.vpd, 0, 0, 0, 0, 0, 0);
+}
+
+u64 vcpu_get_ivr(struct kvm_vcpu *vcpu)
+{
+       int vec, h_inservice, mask;
+
+       vec = highest_pending_irq(vcpu);
+       h_inservice = highest_inservice_irq(vcpu);
+       mask = irq_masked(vcpu, vec, h_inservice);
+       if (vec == NULL_VECTOR || mask == IRQ_MASKED_BY_INSVC) {
+               if (VCPU(vcpu, vhpi))
+                       update_vhpi(vcpu, NULL_VECTOR);
+               return IA64_SPURIOUS_INT_VECTOR;
+       }
+       if (mask == IRQ_MASKED_BY_VTPR) {
+               update_vhpi(vcpu, vec);
+               return IA64_SPURIOUS_INT_VECTOR;
+       }
+       VMX(vcpu, insvc[vec >> 6]) |= (1UL << (vec & 63));
+       vcpu_unpend_interrupt(vcpu, vec);
+       return  (u64)vec;
+}
+
+/**************************************************************************
+  Privileged operation emulation routines
+ **************************************************************************/
+u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       union ia64_pta vpta;
+       union ia64_rr vrr;
+       u64 pval;
+       u64 vhpt_offset;
+
+       vpta.val = vcpu_get_pta(vcpu);
+       vrr.val = vcpu_get_rr(vcpu, vadr);
+       vhpt_offset = ((vadr >> vrr.ps) << 3) & ((1UL << (vpta.size)) - 1);
+       if (vpta.vf) {
+               pval = ia64_call_vsa(PAL_VPS_THASH, vadr, vrr.val,
+                               vpta.val, 0, 0, 0, 0);
+       } else {
+               pval = (vadr & VRN_MASK) | vhpt_offset |
+                       (vpta.val << 3 >> (vpta.size + 3) << (vpta.size));
+       }
+       return  pval;
+}
+
+u64 vcpu_ttag(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       union ia64_rr vrr;
+       union ia64_pta vpta;
+       u64 pval;
+
+       vpta.val = vcpu_get_pta(vcpu);
+       vrr.val = vcpu_get_rr(vcpu, vadr);
+       if (vpta.vf) {
+               pval = ia64_call_vsa(PAL_VPS_TTAG, vadr, vrr.val,
+                                               0, 0, 0, 0, 0);
+       } else
+               pval = 1;
+
+       return  pval;
+}
+
+u64 vcpu_tak(struct kvm_vcpu *vcpu, u64 vadr)
+{
+       struct thash_data *data;
+       union ia64_pta vpta;
+       u64 key;
+
+       vpta.val = vcpu_get_pta(vcpu);
+       if (vpta.vf == 0) {
+               key = 1;
+               return key;
+       }
+       data = vtlb_lookup(vcpu, vadr, D_TLB);
+       if (!data || !data->p)
+               key = 1;
+       else
+               key = data->key;
+
+       return key;
+}
+
+
+
+void kvm_thash(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long thash, vadr;
+
+       vadr = vcpu_get_gr(vcpu, inst.M46.r3);
+       thash = vcpu_thash(vcpu, vadr);
+       vcpu_set_gr(vcpu, inst.M46.r1, thash, 0);
+}
+
+
+void kvm_ttag(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long tag, vadr;
+
+       vadr = vcpu_get_gr(vcpu, inst.M46.r3);
+       tag = vcpu_ttag(vcpu, vadr);
+       vcpu_set_gr(vcpu, inst.M46.r1, tag, 0);
+}
+
+int vcpu_tpa(struct kvm_vcpu *vcpu, u64 vadr, u64 *padr)
+{
+       struct thash_data *data;
+       union ia64_isr visr, pt_isr;
+       struct kvm_pt_regs *regs;
+       struct ia64_psr vpsr;
+
+       regs = vcpu_regs(vcpu);
+       pt_isr.val = VMX(vcpu, cr_isr);
+       visr.val = 0;
+       visr.ei = pt_isr.ei;
+       visr.ir = pt_isr.ir;
+       vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+       visr.na = 1;
+
+       data = vhpt_lookup(vadr);
+       if (data) {
+               if (data->p == 0) {
+                       vcpu_set_isr(vcpu, visr.val);
+                       data_page_not_present(vcpu, vadr);
+                       return IA64_FAULT;
+               } else if (data->ma == VA_MATTR_NATPAGE) {
+                       vcpu_set_isr(vcpu, visr.val);
+                       dnat_page_consumption(vcpu, vadr);
+                       return IA64_FAULT;
+               } else {
+                       *padr = (data->gpaddr >> data->ps << data->ps) |
+                               (vadr & (PSIZE(data->ps) - 1));
+                       return IA64_NO_FAULT;
+               }
+       }
+
+       data = vtlb_lookup(vcpu, vadr, D_TLB);
+       if (data) {
+               if (data->p == 0) {
+                       vcpu_set_isr(vcpu, visr.val);
+                       data_page_not_present(vcpu, vadr);
+                       return IA64_FAULT;
+               } else if (data->ma == VA_MATTR_NATPAGE) {
+                       vcpu_set_isr(vcpu, visr.val);
+                       dnat_page_consumption(vcpu, vadr);
+                       return IA64_FAULT;
+               } else{
+                       *padr = ((data->ppn >> (data->ps - 12)) << data->ps)
+                               | (vadr & (PSIZE(data->ps) - 1));
+                       return IA64_NO_FAULT;
+               }
+       }
+       if (!vhpt_enabled(vcpu, vadr, NA_REF)) {
+               if (vpsr.ic) {
+                       vcpu_set_isr(vcpu, visr.val);
+                       alt_dtlb(vcpu, vadr);
+                       return IA64_FAULT;
+               } else {
+                       nested_dtlb(vcpu);
+                       return IA64_FAULT;
+               }
+       } else {
+               if (vpsr.ic) {
+                       vcpu_set_isr(vcpu, visr.val);
+                       dvhpt_fault(vcpu, vadr);
+                       return IA64_FAULT;
+               } else{
+                       nested_dtlb(vcpu);
+                       return IA64_FAULT;
+               }
+       }
+
+       return IA64_NO_FAULT;
+}
+
+
+int kvm_tpa(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r1, r3;
+
+       r3 = vcpu_get_gr(vcpu, inst.M46.r3);
+
+       if (vcpu_tpa(vcpu, r3, &r1))
+               return IA64_FAULT;
+
+       vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+       return(IA64_NO_FAULT);
+}
+
+void kvm_tak(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r1, r3;
+
+       r3 = vcpu_get_gr(vcpu, inst.M46.r3);
+       r1 = vcpu_tak(vcpu, r3);
+       vcpu_set_gr(vcpu, inst.M46.r1, r1, 0);
+}
+
+
+/************************************
+ * Insert/Purge translation register/cache
+ ************************************/
+void vcpu_itc_i(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
+{
+       thash_purge_and_insert(vcpu, pte, itir, ifa, I_TLB);
+}
+
+void vcpu_itc_d(struct kvm_vcpu *vcpu, u64 pte, u64 itir, u64 ifa)
+{
+       thash_purge_and_insert(vcpu, pte, itir, ifa, D_TLB);
+}
+
+void vcpu_itr_i(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+       u64 ps, va, rid;
+       struct thash_data *p_itr;
+
+       ps = itir_ps(itir);
+       va = PAGEALIGN(ifa, ps);
+       pte &= ~PAGE_FLAGS_RV_MASK;
+       rid = vcpu_get_rr(vcpu, ifa);
+       rid = rid & RR_RID_MASK;
+       p_itr = (struct thash_data *)&vcpu->arch.itrs[slot];
+       vcpu_set_tr(p_itr, pte, itir, va, rid);
+       vcpu_quick_region_set(VMX(vcpu, itr_regions), va);
+}
+
+
+void vcpu_itr_d(struct kvm_vcpu *vcpu, u64 slot, u64 pte, u64 itir, u64 ifa)
+{
+       u64 gpfn;
+       u64 ps, va, rid;
+       struct thash_data *p_dtr;
+
+       ps = itir_ps(itir);
+       va = PAGEALIGN(ifa, ps);
+       pte &= ~PAGE_FLAGS_RV_MASK;
+
+       if (ps != _PAGE_SIZE_16M)
+               thash_purge_entries(vcpu, va, ps);
+       gpfn = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
+       if (__gpfn_is_io(gpfn))
+               pte |= VTLB_PTE_IO;
+       rid = vcpu_get_rr(vcpu, va);
+       rid = rid & RR_RID_MASK;
+       p_dtr = (struct thash_data *)&vcpu->arch.dtrs[slot];
+       vcpu_set_tr((struct thash_data *)&vcpu->arch.dtrs[slot],
+                                                       pte, itir, va, rid);
+       vcpu_quick_region_set(VMX(vcpu, dtr_regions), va);
+}
+
+void vcpu_ptr_d(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
+{
+       int index;
+       u64 va;
+
+       va = PAGEALIGN(ifa, ps);
+       while ((index = vtr_find_overlap(vcpu, va, ps, D_TLB)) >= 0)
+               vcpu->arch.dtrs[index].page_flags = 0;
+
+       thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptr_i(struct kvm_vcpu *vcpu, u64 ifa, u64 ps)
+{
+       int index;
+       u64 va;
+
+       va = PAGEALIGN(ifa, ps);
+       while ((index = vtr_find_overlap(vcpu, va, ps, I_TLB)) >= 0)
+               vcpu->arch.itrs[index].page_flags = 0;
+
+       thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptc_l(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+       va = PAGEALIGN(va, ps);
+       thash_purge_entries(vcpu, va, ps);
+}
+
+void vcpu_ptc_e(struct kvm_vcpu *vcpu, u64 va)
+{
+       thash_purge_all(vcpu);
+}
+
+void vcpu_ptc_ga(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+       struct exit_ctl_data *p = &vcpu->arch.exit_data;
+       long psr;
+       local_irq_save(psr);
+       p->exit_reason = EXIT_REASON_PTC_G;
+
+       p->u.ptc_g_data.rr = vcpu_get_rr(vcpu, va);
+       p->u.ptc_g_data.vaddr = va;
+       p->u.ptc_g_data.ps = ps;
+       vmm_transition(vcpu);
+       /* Do Local Purge Here*/
+       vcpu_ptc_l(vcpu, va, ps);
+       local_irq_restore(psr);
+}
+
+
+void vcpu_ptc_g(struct kvm_vcpu *vcpu, u64 va, u64 ps)
+{
+       vcpu_ptc_ga(vcpu, va, ps);
+}
+
+void kvm_ptc_e(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long ifa;
+
+       ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+       vcpu_ptc_e(vcpu, ifa);
+}
+
+void kvm_ptc_g(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long ifa, itir;
+
+       ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+       itir = vcpu_get_gr(vcpu, inst.M45.r2);
+       vcpu_ptc_g(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptc_ga(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long ifa, itir;
+
+       ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+       itir = vcpu_get_gr(vcpu, inst.M45.r2);
+       vcpu_ptc_ga(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptc_l(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long ifa, itir;
+
+       ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+       itir = vcpu_get_gr(vcpu, inst.M45.r2);
+       vcpu_ptc_l(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptr_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long ifa, itir;
+
+       ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+       itir = vcpu_get_gr(vcpu, inst.M45.r2);
+       vcpu_ptr_d(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_ptr_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long ifa, itir;
+
+       ifa = vcpu_get_gr(vcpu, inst.M45.r3);
+       itir = vcpu_get_gr(vcpu, inst.M45.r2);
+       vcpu_ptr_i(vcpu, ifa, itir_ps(itir));
+}
+
+void kvm_itr_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long itir, ifa, pte, slot;
+
+       slot = vcpu_get_gr(vcpu, inst.M45.r3);
+       pte = vcpu_get_gr(vcpu, inst.M45.r2);
+       itir = vcpu_get_itir(vcpu);
+       ifa = vcpu_get_ifa(vcpu);
+       vcpu_itr_d(vcpu, slot, pte, itir, ifa);
+}
+
+
+
+void kvm_itr_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long itir, ifa, pte, slot;
+
+       slot = vcpu_get_gr(vcpu, inst.M45.r3);
+       pte = vcpu_get_gr(vcpu, inst.M45.r2);
+       itir = vcpu_get_itir(vcpu);
+       ifa = vcpu_get_ifa(vcpu);
+       vcpu_itr_i(vcpu, slot, pte, itir, ifa);
+}
+
+void kvm_itc_d(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long itir, ifa, pte;
+
+       itir = vcpu_get_itir(vcpu);
+       ifa = vcpu_get_ifa(vcpu);
+       pte = vcpu_get_gr(vcpu, inst.M45.r2);
+       vcpu_itc_d(vcpu, pte, itir, ifa);
+}
+
+void kvm_itc_i(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long itir, ifa, pte;
+
+       itir = vcpu_get_itir(vcpu);
+       ifa = vcpu_get_ifa(vcpu);
+       pte = vcpu_get_gr(vcpu, inst.M45.r2);
+       vcpu_itc_i(vcpu, pte, itir, ifa);
+}
+
+/*************************************
+ * Moves to semi-privileged registers
+ *************************************/
+
+void kvm_mov_to_ar_imm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long imm;
+
+       if (inst.M30.s)
+               imm = -inst.M30.imm;
+       else
+               imm = inst.M30.imm;
+
+       vcpu_set_itc(vcpu, imm);
+}
+
+void kvm_mov_to_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r2;
+
+       r2 = vcpu_get_gr(vcpu, inst.M29.r2);
+       vcpu_set_itc(vcpu, r2);
+}
+
+
+void kvm_mov_from_ar_reg(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r1;
+
+       r1 = vcpu_get_itc(vcpu);
+       vcpu_set_gr(vcpu, inst.M31.r1, r1, 0);
+}
+/**************************************************************************
+  struct kvm_vcpu*protection key register access routines
+ **************************************************************************/
+
+unsigned long vcpu_get_pkr(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+       return ((unsigned long)ia64_get_pkr(reg));
+}
+
+void vcpu_set_pkr(struct kvm_vcpu *vcpu, unsigned long reg, unsigned long val)
+{
+       ia64_set_pkr(reg, val);
+}
+
+
+unsigned long vcpu_get_itir_on_fault(struct kvm_vcpu *vcpu, unsigned long ifa)
+{
+       union ia64_rr rr, rr1;
+
+       rr.val = vcpu_get_rr(vcpu, ifa);
+       rr1.val = 0;
+       rr1.ps = rr.ps;
+       rr1.rid = rr.rid;
+       return (rr1.val);
+}
+
+
+
+/********************************
+ * Moves to privileged registers
+ ********************************/
+unsigned long vcpu_set_rr(struct kvm_vcpu *vcpu, unsigned long reg,
+                                       unsigned long val)
+{
+       union ia64_rr oldrr, newrr;
+       unsigned long rrval;
+       struct exit_ctl_data *p = &vcpu->arch.exit_data;
+       unsigned long psr;
+
+       oldrr.val = vcpu_get_rr(vcpu, reg);
+       newrr.val = val;
+       vcpu->arch.vrr[reg >> VRN_SHIFT] = val;
+
+       switch ((unsigned long)(reg >> VRN_SHIFT)) {
+       case VRN6:
+               vcpu->arch.vmm_rr = vrrtomrr(val);
+               local_irq_save(psr);
+               p->exit_reason = EXIT_REASON_SWITCH_RR6;
+               vmm_transition(vcpu);
+               local_irq_restore(psr);
+               break;
+       case VRN4:
+               rrval = vrrtomrr(val);
+               vcpu->arch.metaphysical_saved_rr4 = rrval;
+               if (!is_physical_mode(vcpu))
+                       ia64_set_rr(reg, rrval);
+               break;
+       case VRN0:
+               rrval = vrrtomrr(val);
+               vcpu->arch.metaphysical_saved_rr0 = rrval;
+               if (!is_physical_mode(vcpu))
+                       ia64_set_rr(reg, rrval);
+               break;
+       default:
+               ia64_set_rr(reg, vrrtomrr(val));
+               break;
+       }
+
+       return (IA64_NO_FAULT);
+}
+
+
+
+void kvm_mov_to_rr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r2;
+
+       r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       vcpu_set_rr(vcpu, r3, r2);
+}
+
+void kvm_mov_to_dbr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+}
+
+void kvm_mov_to_ibr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+}
+
+void kvm_mov_to_pmc(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r2;
+
+       r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       vcpu_set_pmc(vcpu, r3, r2);
+}
+
+void kvm_mov_to_pmd(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r2;
+
+       r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       vcpu_set_pmd(vcpu, r3, r2);
+}
+
+void kvm_mov_to_pkr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       u64 r3, r2;
+
+       r3 = vcpu_get_gr(vcpu, inst.M42.r3);
+       r2 = vcpu_get_gr(vcpu, inst.M42.r2);
+       vcpu_set_pkr(vcpu, r3, r2);
+}
+
+
+
+void kvm_mov_from_rr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r1;
+
+       r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+       r1 = vcpu_get_rr(vcpu, r3);
+       vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_pkr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r1;
+
+       r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+       r1 = vcpu_get_pkr(vcpu, r3);
+       vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_dbr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r1;
+
+       r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+       r1 = vcpu_get_dbr(vcpu, r3);
+       vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_ibr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r1;
+
+       r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+       r1 = vcpu_get_ibr(vcpu, r3);
+       vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void kvm_mov_from_pmc(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r1;
+
+       r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+       r1 = vcpu_get_pmc(vcpu, r3);
+       vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+
+unsigned long vcpu_get_cpuid(struct kvm_vcpu *vcpu, unsigned long reg)
+{
+       /* FIXME: This could get called as a result of a rsvd-reg fault */
+       if (reg > (ia64_get_cpuid(3) & 0xff))
+               return 0;
+       else
+               return ia64_get_cpuid(reg);
+}
+
+void kvm_mov_from_cpuid(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r3, r1;
+
+       r3 = vcpu_get_gr(vcpu, inst.M43.r3);
+       r1 = vcpu_get_cpuid(vcpu, r3);
+       vcpu_set_gr(vcpu, inst.M43.r1, r1, 0);
+}
+
+void vcpu_set_tpr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       VCPU(vcpu, tpr) = val;
+       vcpu->arch.irq_check = 1;
+}
+
+unsigned long kvm_mov_to_cr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long r2;
+
+       r2 = vcpu_get_gr(vcpu, inst.M32.r2);
+       VCPU(vcpu, vcr[inst.M32.cr3]) = r2;
+
+       switch (inst.M32.cr3) {
+       case 0:
+               vcpu_set_dcr(vcpu, r2);
+               break;
+       case 1:
+               vcpu_set_itm(vcpu, r2);
+               break;
+       case 66:
+               vcpu_set_tpr(vcpu, r2);
+               break;
+       case 67:
+               vcpu_set_eoi(vcpu, r2);
+               break;
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+
+unsigned long kvm_mov_from_cr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long tgt = inst.M33.r1;
+       unsigned long val;
+
+       switch (inst.M33.cr3) {
+       case 65:
+               val = vcpu_get_ivr(vcpu);
+               vcpu_set_gr(vcpu, tgt, val, 0);
+               break;
+
+       case 67:
+               vcpu_set_gr(vcpu, tgt, 0L, 0);
+               break;
+       default:
+               val = VCPU(vcpu, vcr[inst.M33.cr3]);
+               vcpu_set_gr(vcpu, tgt, val, 0);
+               break;
+       }
+
+       return 0;
+}
+
+
+
+void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
+{
+
+       unsigned long mask;
+       struct kvm_pt_regs *regs;
+       struct ia64_psr old_psr, new_psr;
+
+       old_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+       regs = vcpu_regs(vcpu);
+       /* We only support guest as:
+        *  vpsr.pk = 0
+        *  vpsr.is = 0
+        * Otherwise panic
+        */
+       if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
+               panic_vm(vcpu);
+
+       /*
+        * For those IA64_PSR bits: id/da/dd/ss/ed/ia
+        * Since these bits will become 0, after success execution of each
+        * instruction, we will change set them to mIA64_PSR
+        */
+       VCPU(vcpu, vpsr) = val
+               & (~(IA64_PSR_ID | IA64_PSR_DA | IA64_PSR_DD |
+                       IA64_PSR_SS | IA64_PSR_ED | IA64_PSR_IA));
+
+       if (!old_psr.i && (val & IA64_PSR_I)) {
+               /* vpsr.i 0->1 */
+               vcpu->arch.irq_check = 1;
+       }
+       new_psr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+       /*
+        * All vIA64_PSR bits shall go to mPSR (v->tf->tf_special.psr)
+        * , except for the following bits:
+        *  ic/i/dt/si/rt/mc/it/bn/vm
+        */
+       mask =  IA64_PSR_IC + IA64_PSR_I + IA64_PSR_DT + IA64_PSR_SI +
+               IA64_PSR_RT + IA64_PSR_MC + IA64_PSR_IT + IA64_PSR_BN +
+               IA64_PSR_VM;
+
+       regs->cr_ipsr = (regs->cr_ipsr & mask) | (val & (~mask));
+
+       check_mm_mode_switch(vcpu, old_psr, new_psr);
+
+       return ;
+}
+
+unsigned long vcpu_cover(struct kvm_vcpu *vcpu)
+{
+       struct ia64_psr vpsr;
+
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+       vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+
+       if (!vpsr.ic)
+               VCPU(vcpu, ifs) = regs->cr_ifs;
+       regs->cr_ifs = IA64_IFS_V;
+       return (IA64_NO_FAULT);
+}
+
+
+
+/**************************************************************************
+  VCPU banked general register access routines
+ **************************************************************************/
+#define vcpu_bsw0_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
+       do {                                                            \
+               __asm__ __volatile__ (                                  \
+                               ";;extr.u %0 = %3,%6,16;;\n"            \
+                               "dep %1 = %0, %1, 0, 16;;\n"            \
+                               "st8 [%4] = %1\n"                       \
+                               "extr.u %0 = %2, 16, 16;;\n"            \
+                               "dep %3 = %0, %3, %6, 16;;\n"           \
+                               "st8 [%5] = %3\n"                       \
+                               ::"r"(i), "r"(*b1unat), "r"(*b0unat),   \
+                               "r"(*runat), "r"(b1unat), "r"(runat),   \
+                               "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
+       } while (0)
+
+void vcpu_bsw0(struct kvm_vcpu *vcpu)
+{
+       unsigned long i;
+
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+       unsigned long *r = &regs->r16;
+       unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
+       unsigned long *b1 = &VCPU(vcpu, vgr[0]);
+       unsigned long *runat = &regs->eml_unat;
+       unsigned long *b0unat = &VCPU(vcpu, vbnat);
+       unsigned long *b1unat = &VCPU(vcpu, vnat);
+
+
+       if (VCPU(vcpu, vpsr) & IA64_PSR_BN) {
+               for (i = 0; i < 16; i++) {
+                       *b1++ = *r;
+                       *r++ = *b0++;
+               }
+               vcpu_bsw0_unat(i, b0unat, b1unat, runat,
+                               VMM_PT_REGS_R16_SLOT);
+               VCPU(vcpu, vpsr) &= ~IA64_PSR_BN;
+       }
+}
+
+#define vcpu_bsw1_unat(i, b0unat, b1unat, runat, VMM_PT_REGS_R16_SLOT) \
+       do {                                                            \
+               __asm__ __volatile__ (";;extr.u %0 = %3, %6, 16;;\n"    \
+                               "dep %1 = %0, %1, 16, 16;;\n"           \
+                               "st8 [%4] = %1\n"                       \
+                               "extr.u %0 = %2, 0, 16;;\n"             \
+                               "dep %3 = %0, %3, %6, 16;;\n"           \
+                               "st8 [%5] = %3\n"                       \
+                               ::"r"(i), "r"(*b0unat), "r"(*b1unat),   \
+                               "r"(*runat), "r"(b0unat), "r"(runat),   \
+                               "i"(VMM_PT_REGS_R16_SLOT) : "memory");  \
+       } while (0)
+
+void vcpu_bsw1(struct kvm_vcpu *vcpu)
+{
+       unsigned long i;
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+       unsigned long *r = &regs->r16;
+       unsigned long *b0 = &VCPU(vcpu, vbgr[0]);
+       unsigned long *b1 = &VCPU(vcpu, vgr[0]);
+       unsigned long *runat = &regs->eml_unat;
+       unsigned long *b0unat = &VCPU(vcpu, vbnat);
+       unsigned long *b1unat = &VCPU(vcpu, vnat);
+
+       if (!(VCPU(vcpu, vpsr) & IA64_PSR_BN)) {
+               for (i = 0; i < 16; i++) {
+                       *b0++ = *r;
+                       *r++ = *b1++;
+               }
+               vcpu_bsw1_unat(i, b0unat, b1unat, runat,
+                               VMM_PT_REGS_R16_SLOT);
+               VCPU(vcpu, vpsr) |= IA64_PSR_BN;
+       }
+}
+
+
+
+
+void vcpu_rfi(struct kvm_vcpu *vcpu)
+{
+       unsigned long ifs, psr;
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       psr = VCPU(vcpu, ipsr);
+       if (psr & IA64_PSR_BN)
+               vcpu_bsw1(vcpu);
+       else
+               vcpu_bsw0(vcpu);
+       vcpu_set_psr(vcpu, psr);
+       ifs = VCPU(vcpu, ifs);
+       if (ifs >> 63)
+               regs->cr_ifs = ifs;
+       regs->cr_iip = VCPU(vcpu, iip);
+}
+
+
+/*
+   VPSR can't keep track of below bits of guest PSR
+   This function gets guest PSR
+ */
+
+unsigned long vcpu_get_psr(struct kvm_vcpu *vcpu)
+{
+       unsigned long mask;
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+
+       mask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL |
+               IA64_PSR_MFH | IA64_PSR_CPL | IA64_PSR_RI;
+       return (VCPU(vcpu, vpsr) & ~mask) | (regs->cr_ipsr & mask);
+}
+
+void kvm_rsm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long vpsr;
+       unsigned long imm24 = (inst.M44.i<<23) | (inst.M44.i2<<21)
+                                       | inst.M44.imm;
+
+       vpsr = vcpu_get_psr(vcpu);
+       vpsr &= (~imm24);
+       vcpu_set_psr(vcpu, vpsr);
+}
+
+void kvm_ssm(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long vpsr;
+       unsigned long imm24 = (inst.M44.i << 23) | (inst.M44.i2 << 21)
+                               | inst.M44.imm;
+
+       vpsr = vcpu_get_psr(vcpu);
+       vpsr |= imm24;
+       vcpu_set_psr(vcpu, vpsr);
+}
+
+/* Generate Mask
+ * Parameter:
+ *  bit -- starting bit
+ *  len -- how many bits
+ */
+#define MASK(bit,len)                                  \
+({                                                     \
+               __u64   ret;                            \
+                                                       \
+               __asm __volatile("dep %0=-1, r0, %1, %2"\
+                               : "=r" (ret):           \
+                 "M" (bit),                            \
+                 "M" (len));                           \
+               ret;                                    \
+})
+
+void vcpu_set_psr_l(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       val = (val & MASK(0, 32)) | (vcpu_get_psr(vcpu) & MASK(32, 32));
+       vcpu_set_psr(vcpu, val);
+}
+
+void kvm_mov_to_psr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long val;
+
+       val = vcpu_get_gr(vcpu, inst.M35.r2);
+       vcpu_set_psr_l(vcpu, val);
+}
+
+void kvm_mov_from_psr(struct kvm_vcpu *vcpu, INST64 inst)
+{
+       unsigned long val;
+
+       val = vcpu_get_psr(vcpu);
+       val = (val & MASK(0, 32)) | (val & MASK(35, 2));
+       vcpu_set_gr(vcpu, inst.M33.r1, val, 0);
+}
+
+void vcpu_increment_iip(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+       struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
+       if (ipsr->ri == 2) {
+               ipsr->ri = 0;
+               regs->cr_iip += 16;
+       } else
+               ipsr->ri++;
+}
+
+void vcpu_decrement_iip(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pt_regs *regs = vcpu_regs(vcpu);
+       struct ia64_psr *ipsr = (struct ia64_psr *)&regs->cr_ipsr;
+
+       if (ipsr->ri == 0) {
+               ipsr->ri = 2;
+               regs->cr_iip -= 16;
+       } else
+               ipsr->ri--;
+}
+
+/** Emulate a privileged operation.
+ *
+ *
+ * @param vcpu virtual cpu
+ * @cause the reason cause virtualization fault
+ * @opcode the instruction code which cause virtualization fault
+ */
+
+void kvm_emulate(struct kvm_vcpu *vcpu, struct kvm_pt_regs *regs)
+{
+       unsigned long status, cause, opcode ;
+       INST64 inst;
+
+       status = IA64_NO_FAULT;
+       cause = VMX(vcpu, cause);
+       opcode = VMX(vcpu, opcode);
+       inst.inst = opcode;
+       /*
+        * Switch to actual virtual rid in rr0 and rr4,
+        * which is required by some tlb related instructions.
+        */
+       prepare_if_physical_mode(vcpu);
+
+       switch (cause) {
+       case EVENT_RSM:
+               kvm_rsm(vcpu, inst);
+               break;
+       case EVENT_SSM:
+               kvm_ssm(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_PSR:
+               kvm_mov_to_psr(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_PSR:
+               kvm_mov_from_psr(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_CR:
+               kvm_mov_from_cr(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_CR:
+               kvm_mov_to_cr(vcpu, inst);
+               break;
+       case EVENT_BSW_0:
+               vcpu_bsw0(vcpu);
+               break;
+       case EVENT_BSW_1:
+               vcpu_bsw1(vcpu);
+               break;
+       case EVENT_COVER:
+               vcpu_cover(vcpu);
+               break;
+       case EVENT_RFI:
+               vcpu_rfi(vcpu);
+               break;
+       case EVENT_ITR_D:
+               kvm_itr_d(vcpu, inst);
+               break;
+       case EVENT_ITR_I:
+               kvm_itr_i(vcpu, inst);
+               break;
+       case EVENT_PTR_D:
+               kvm_ptr_d(vcpu, inst);
+               break;
+       case EVENT_PTR_I:
+               kvm_ptr_i(vcpu, inst);
+               break;
+       case EVENT_ITC_D:
+               kvm_itc_d(vcpu, inst);
+               break;
+       case EVENT_ITC_I:
+               kvm_itc_i(vcpu, inst);
+               break;
+       case EVENT_PTC_L:
+               kvm_ptc_l(vcpu, inst);
+               break;
+       case EVENT_PTC_G:
+               kvm_ptc_g(vcpu, inst);
+               break;
+       case EVENT_PTC_GA:
+               kvm_ptc_ga(vcpu, inst);
+               break;
+       case EVENT_PTC_E:
+               kvm_ptc_e(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_RR:
+               kvm_mov_to_rr(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_RR:
+               kvm_mov_from_rr(vcpu, inst);
+               break;
+       case EVENT_THASH:
+               kvm_thash(vcpu, inst);
+               break;
+       case EVENT_TTAG:
+               kvm_ttag(vcpu, inst);
+               break;
+       case EVENT_TPA:
+               status = kvm_tpa(vcpu, inst);
+               break;
+       case EVENT_TAK:
+               kvm_tak(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_AR_IMM:
+               kvm_mov_to_ar_imm(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_AR:
+               kvm_mov_to_ar_reg(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_AR:
+               kvm_mov_from_ar_reg(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_DBR:
+               kvm_mov_to_dbr(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_IBR:
+               kvm_mov_to_ibr(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_PMC:
+               kvm_mov_to_pmc(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_PMD:
+               kvm_mov_to_pmd(vcpu, inst);
+               break;
+       case EVENT_MOV_TO_PKR:
+               kvm_mov_to_pkr(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_DBR:
+               kvm_mov_from_dbr(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_IBR:
+               kvm_mov_from_ibr(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_PMC:
+               kvm_mov_from_pmc(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_PKR:
+               kvm_mov_from_pkr(vcpu, inst);
+               break;
+       case EVENT_MOV_FROM_CPUID:
+               kvm_mov_from_cpuid(vcpu, inst);
+               break;
+       case EVENT_VMSW:
+               status = IA64_FAULT;
+               break;
+       default:
+               break;
+       };
+       /*Assume all status is NO_FAULT ?*/
+       if (status == IA64_NO_FAULT && cause != EVENT_RFI)
+               vcpu_increment_iip(vcpu);
+
+       recover_if_physical_mode(vcpu);
+}
+
+void init_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       vcpu->arch.mode_flags = GUEST_IN_PHY;
+       VMX(vcpu, vrr[0]) = 0x38;
+       VMX(vcpu, vrr[1]) = 0x38;
+       VMX(vcpu, vrr[2]) = 0x38;
+       VMX(vcpu, vrr[3]) = 0x38;
+       VMX(vcpu, vrr[4]) = 0x38;
+       VMX(vcpu, vrr[5]) = 0x38;
+       VMX(vcpu, vrr[6]) = 0x38;
+       VMX(vcpu, vrr[7]) = 0x38;
+       VCPU(vcpu, vpsr) = IA64_PSR_BN;
+       VCPU(vcpu, dcr) = 0;
+       /* pta.size must not be 0.  The minimum is 15 (32k) */
+       VCPU(vcpu, pta) = 15 << 2;
+       VCPU(vcpu, itv) = 0x10000;
+       VCPU(vcpu, itm) = 0;
+       VMX(vcpu, last_itc) = 0;
+
+       VCPU(vcpu, lid) = VCPU_LID(vcpu);
+       VCPU(vcpu, ivr) = 0;
+       VCPU(vcpu, tpr) = 0x10000;
+       VCPU(vcpu, eoi) = 0;
+       VCPU(vcpu, irr[0]) = 0;
+       VCPU(vcpu, irr[1]) = 0;
+       VCPU(vcpu, irr[2]) = 0;
+       VCPU(vcpu, irr[3]) = 0;
+       VCPU(vcpu, pmv) = 0x10000;
+       VCPU(vcpu, cmcv) = 0x10000;
+       VCPU(vcpu, lrr0) = 0x10000;   /* default reset value? */
+       VCPU(vcpu, lrr1) = 0x10000;   /* default reset value? */
+       update_vhpi(vcpu, NULL_VECTOR);
+       VLSAPIC_XTP(vcpu) = 0x80;       /* disabled */
+
+       for (i = 0; i < 4; i++)
+               VLSAPIC_INSVC(vcpu, i) = 0;
+}
+
+void kvm_init_all_rr(struct kvm_vcpu *vcpu)
+{
+       unsigned long psr;
+
+       local_irq_save(psr);
+
+       /* WARNING: not allow co-exist of both virtual mode and physical
+        * mode in same region
+        */
+
+       vcpu->arch.metaphysical_saved_rr0 = vrrtomrr(VMX(vcpu, vrr[VRN0]));
+       vcpu->arch.metaphysical_saved_rr4 = vrrtomrr(VMX(vcpu, vrr[VRN4]));
+
+       if (is_physical_mode(vcpu)) {
+               if (vcpu->arch.mode_flags & GUEST_PHY_EMUL)
+                       panic_vm(vcpu);
+
+               ia64_set_rr((VRN0 << VRN_SHIFT), vcpu->arch.metaphysical_rr0);
+               ia64_dv_serialize_data();
+               ia64_set_rr((VRN4 << VRN_SHIFT), vcpu->arch.metaphysical_rr4);
+               ia64_dv_serialize_data();
+       } else {
+               ia64_set_rr((VRN0 << VRN_SHIFT),
+                               vcpu->arch.metaphysical_saved_rr0);
+               ia64_dv_serialize_data();
+               ia64_set_rr((VRN4 << VRN_SHIFT),
+                               vcpu->arch.metaphysical_saved_rr4);
+               ia64_dv_serialize_data();
+       }
+       ia64_set_rr((VRN1 << VRN_SHIFT),
+                       vrrtomrr(VMX(vcpu, vrr[VRN1])));
+       ia64_dv_serialize_data();
+       ia64_set_rr((VRN2 << VRN_SHIFT),
+                       vrrtomrr(VMX(vcpu, vrr[VRN2])));
+       ia64_dv_serialize_data();
+       ia64_set_rr((VRN3 << VRN_SHIFT),
+                       vrrtomrr(VMX(vcpu, vrr[VRN3])));
+       ia64_dv_serialize_data();
+       ia64_set_rr((VRN5 << VRN_SHIFT),
+                       vrrtomrr(VMX(vcpu, vrr[VRN5])));
+       ia64_dv_serialize_data();
+       ia64_set_rr((VRN7 << VRN_SHIFT),
+                       vrrtomrr(VMX(vcpu, vrr[VRN7])));
+       ia64_dv_serialize_data();
+       ia64_srlz_d();
+       ia64_set_psr(psr);
+}
+
+int vmm_entry(void)
+{
+       struct kvm_vcpu *v;
+       v = current_vcpu;
+
+       ia64_call_vsa(PAL_VPS_RESTORE, (unsigned long)v->arch.vpd,
+                                               0, 0, 0, 0, 0, 0);
+       kvm_init_vtlb(v);
+       kvm_init_vhpt(v);
+       init_vcpu(v);
+       kvm_init_all_rr(v);
+       vmm_reset_entry();
+
+       return 0;
+}
+
+void panic_vm(struct kvm_vcpu *v)
+{
+       struct exit_ctl_data *p = &v->arch.exit_data;
+
+       p->exit_reason = EXIT_REASON_VM_PANIC;
+       vmm_transition(v);
+       /*Never to return*/
+       while (1);
+}
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
new file mode 100644 (file)
index 0000000..b0fcfb6
--- /dev/null
@@ -0,0 +1,740 @@
+/*
+ *  vcpu.h: vcpu routines
+ *     Copyright (c) 2005, Intel Corporation.
+ *     Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ *     Yaozu Dong (Eddie Dong) (Eddie.dong@intel.com)
+ *
+ *     Copyright (c) 2007, Intel Corporation.
+ *     Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
+ *     Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+
+#ifndef __KVM_VCPU_H__
+#define __KVM_VCPU_H__
+
+#include <asm/types.h>
+#include <asm/fpu.h>
+#include <asm/processor.h>
+
+#ifndef __ASSEMBLY__
+#include "vti.h"
+
+#include <linux/kvm_host.h>
+#include <linux/spinlock.h>
+
+typedef unsigned long IA64_INST;
+
+typedef union U_IA64_BUNDLE {
+       unsigned long i64[2];
+       struct { unsigned long template:5, slot0:41, slot1a:18,
+               slot1b:23, slot2:41; };
+       /* NOTE: following doesn't work because bitfields can't cross natural
+          size boundaries
+          struct { unsigned long template:5, slot0:41, slot1:41, slot2:41; }; */
+} IA64_BUNDLE;
+
+typedef union U_INST64_A5 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, imm7b:7, r3:2, imm5c:5,
+               imm9d:9, s:1, major:4; };
+} INST64_A5;
+
+typedef union U_INST64_B4 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, btype:3, un3:3, p:1, b2:3, un11:11, x6:6,
+               wh:2, d:1, un1:1, major:4; };
+} INST64_B4;
+
+typedef union U_INST64_B8 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, un21:21, x6:6, un4:4, major:4; };
+} INST64_B8;
+
+typedef union U_INST64_B9 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, imm20:20, :1, x6:6, :3, i:1, major:4; };
+} INST64_B9;
+
+typedef union U_INST64_I19 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, imm20:20, :1, x6:6, x3:3, i:1, major:4; };
+} INST64_I19;
+
+typedef union U_INST64_I26 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_I26;
+
+typedef union U_INST64_I27 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, imm:7, ar3:7, x6:6, x3:3, s:1, major:4; };
+} INST64_I27;
+
+typedef union U_INST64_I28 { /* not privileged (mov from AR) */
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_I28;
+
+typedef union U_INST64_M28 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :14, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M28;
+
+typedef union U_INST64_M29 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, r2:7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M29;
+
+typedef union U_INST64_M30 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, imm:7, ar3:7, x4:4, x2:2,
+               x3:3, s:1, major:4; };
+} INST64_M30;
+
+typedef union U_INST64_M31 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, :7, ar3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M31;
+
+typedef union U_INST64_M32 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, r2:7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M32;
+
+typedef union U_INST64_M33 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, :7, cr3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M33;
+
+typedef union U_INST64_M35 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+
+} INST64_M35;
+
+typedef union U_INST64_M36 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, :14, x6:6, x3:3, :1, major:4; };
+} INST64_M36;
+
+typedef union U_INST64_M37 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, imm20a:20, :1, x4:4, x2:2, x3:3,
+               i:1, major:4; };
+} INST64_M37;
+
+typedef union U_INST64_M41 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, r2:7, :7, x6:6, x3:3, :1, major:4; };
+} INST64_M41;
+
+typedef union U_INST64_M42 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M42;
+
+typedef union U_INST64_M43 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, :7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M43;
+
+typedef union U_INST64_M44 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, imm:21, x4:4, i2:2, x3:3, i:1, major:4; };
+} INST64_M44;
+
+typedef union U_INST64_M45 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, r2:7, r3:7, x6:6, x3:3, :1, major:4; };
+} INST64_M45;
+
+typedef union U_INST64_M46 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, un7:7, r3:7, x6:6,
+               x3:3, un1:1, major:4; };
+} INST64_M46;
+
+typedef union U_INST64_M47 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, un14:14, r3:7, x6:6, x3:3, un1:1, major:4; };
+} INST64_M47;
+
+typedef union U_INST64_M1{
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, un7:7, r3:7, x:1, hint:2,
+               x6:6, m:1, major:4; };
+} INST64_M1;
+
+typedef union U_INST64_M2{
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, r2:7, r3:7, x:1, hint:2,
+               x6:6, m:1, major:4; };
+} INST64_M2;
+
+typedef union U_INST64_M3{
+       IA64_INST inst;
+       struct { unsigned long qp:6, r1:7, imm7:7, r3:7, i:1, hint:2,
+               x6:6, s:1, major:4; };
+} INST64_M3;
+
+typedef union U_INST64_M4 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, un7:7, r2:7, r3:7, x:1, hint:2,
+               x6:6, m:1, major:4; };
+} INST64_M4;
+
+typedef union U_INST64_M5 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, imm7:7, r2:7, r3:7, i:1, hint:2,
+               x6:6, s:1, major:4; };
+} INST64_M5;
+
+typedef union U_INST64_M6 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, f1:7, un7:7, r3:7, x:1, hint:2,
+               x6:6, m:1, major:4; };
+} INST64_M6;
+
+typedef union U_INST64_M9 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, f2:7, r3:7, x:1, hint:2,
+               x6:6, m:1, major:4; };
+} INST64_M9;
+
+typedef union U_INST64_M10 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, imm7:7, f2:7, r3:7, i:1, hint:2,
+               x6:6, s:1, major:4; };
+} INST64_M10;
+
+typedef union U_INST64_M12 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, f1:7, f2:7, r3:7, x:1, hint:2,
+               x6:6, m:1, major:4; };
+} INST64_M12;
+
+typedef union U_INST64_M15 {
+       IA64_INST inst;
+       struct { unsigned long qp:6, :7, imm7:7, r3:7, i:1, hint:2,
+               x6:6, s:1, major:4; };
+} INST64_M15;
+
+typedef union U_INST64 {
+       IA64_INST inst;
+       struct { unsigned long :37, major:4; } generic;
+       INST64_A5 A5;   /* used in build_hypercall_bundle only */
+       INST64_B4 B4;   /* used in build_hypercall_bundle only */
+       INST64_B8 B8;   /* rfi, bsw.[01] */
+       INST64_B9 B9;   /* break.b */
+       INST64_I19 I19; /* used in build_hypercall_bundle only */
+       INST64_I26 I26; /* mov register to ar (I unit) */
+       INST64_I27 I27; /* mov immediate to ar (I unit) */
+       INST64_I28 I28; /* mov from ar (I unit) */
+       INST64_M1  M1;  /* ld integer */
+       INST64_M2  M2;
+       INST64_M3  M3;
+       INST64_M4  M4;  /* st integer */
+       INST64_M5  M5;
+       INST64_M6  M6;  /* ldfd floating pointer                */
+       INST64_M9  M9;  /* stfd floating pointer                */
+       INST64_M10 M10; /* stfd floating pointer                */
+       INST64_M12 M12;     /* ldfd pair floating pointer               */
+       INST64_M15 M15; /* lfetch + imm update                  */
+       INST64_M28 M28; /* purge translation cache entry        */
+       INST64_M29 M29; /* mov register to ar (M unit)          */
+       INST64_M30 M30; /* mov immediate to ar (M unit)         */
+       INST64_M31 M31; /* mov from ar (M unit)                 */
+       INST64_M32 M32; /* mov reg to cr                        */
+       INST64_M33 M33; /* mov from cr                          */
+       INST64_M35 M35; /* mov to psr                           */
+       INST64_M36 M36; /* mov from psr                         */
+       INST64_M37 M37; /* break.m                              */
+       INST64_M41 M41; /* translation cache insert             */
+       INST64_M42 M42; /* mov to indirect reg/translation reg insert*/
+       INST64_M43 M43; /* mov from indirect reg                */
+       INST64_M44 M44; /* set/reset system mask                */
+       INST64_M45 M45; /* translation purge                    */
+       INST64_M46 M46; /* translation access (tpa,tak)         */
+       INST64_M47 M47; /* purge translation entry              */
+} INST64;
+
+#define MASK_41 ((unsigned long)0x1ffffffffff)
+
+/* Virtual address memory attributes encoding */
+#define VA_MATTR_WB         0x0
+#define VA_MATTR_UC         0x4
+#define VA_MATTR_UCE        0x5
+#define VA_MATTR_WC         0x6
+#define VA_MATTR_NATPAGE    0x7
+
+#define PMASK(size)         (~((size) - 1))
+#define PSIZE(size)         (1UL<<(size))
+#define CLEARLSB(ppn, nbits)    (((ppn) >> (nbits)) << (nbits))
+#define PAGEALIGN(va, ps)      CLEARLSB(va, ps)
+#define PAGE_FLAGS_RV_MASK   (0x2|(0x3UL<<50)|(((1UL<<11)-1)<<53))
+#define _PAGE_MA_ST     (0x1 <<  2) /* is reserved for software use */
+
+#define ARCH_PAGE_SHIFT   12
+
+#define INVALID_TI_TAG (1UL << 63)
+
+#define VTLB_PTE_P_BIT      0
+#define VTLB_PTE_IO_BIT     60
+#define VTLB_PTE_IO         (1UL<<VTLB_PTE_IO_BIT)
+#define VTLB_PTE_P          (1UL<<VTLB_PTE_P_BIT)
+
+#define vcpu_quick_region_check(_tr_regions,_ifa)              \
+       (_tr_regions & (1 << ((unsigned long)_ifa >> 61)))
+
+#define vcpu_quick_region_set(_tr_regions,_ifa)             \
+       do {_tr_regions |= (1 << ((unsigned long)_ifa >> 61)); } while (0)
+
+static inline void vcpu_set_tr(struct thash_data *trp, u64 pte, u64 itir,
+               u64 va, u64 rid)
+{
+       trp->page_flags = pte;
+       trp->itir = itir;
+       trp->vadr = va;
+       trp->rid = rid;
+}
+
+extern u64 kvm_lookup_mpa(u64 gpfn);
+extern u64 kvm_gpa_to_mpa(u64 gpa);
+
+/* Return I/O type if trye */
+#define __gpfn_is_io(gpfn)                     \
+       ({                                              \
+        u64 pte, ret = 0;                      \
+        pte = kvm_lookup_mpa(gpfn);            \
+        if (!(pte & GPFN_INV_MASK))            \
+        ret = pte & GPFN_IO_MASK;      \
+        ret;                                   \
+        })
+
+#endif
+
+#define IA64_NO_FAULT  0
+#define IA64_FAULT     1
+
+#define VMM_RBS_OFFSET  ((VMM_TASK_SIZE + 15) & ~15)
+
+#define SW_BAD  0   /* Bad mode transitition */
+#define SW_V2P  1   /* Physical emulatino is activated */
+#define SW_P2V  2   /* Exit physical mode emulation */
+#define SW_SELF 3   /* No mode transition */
+#define SW_NOP  4   /* Mode transition, but without action required */
+
+#define GUEST_IN_PHY    0x1
+#define GUEST_PHY_EMUL  0x2
+
+#define current_vcpu ((struct kvm_vcpu *) ia64_getreg(_IA64_REG_TP))
+
+#define VRN_SHIFT      61
+#define VRN_MASK       0xe000000000000000
+#define VRN0           0x0UL
+#define VRN1           0x1UL
+#define VRN2           0x2UL
+#define VRN3           0x3UL
+#define VRN4           0x4UL
+#define VRN5           0x5UL
+#define VRN6           0x6UL
+#define VRN7           0x7UL
+
+#define IRQ_NO_MASKED         0
+#define IRQ_MASKED_BY_VTPR    1
+#define IRQ_MASKED_BY_INSVC   2   /* masked by inservice IRQ */
+
+#define PTA_BASE_SHIFT      15
+
+#define IA64_PSR_VM_BIT     46
+#define IA64_PSR_VM (__IA64_UL(1) << IA64_PSR_VM_BIT)
+
+/* Interruption Function State */
+#define IA64_IFS_V_BIT      63
+#define IA64_IFS_V  (__IA64_UL(1) << IA64_IFS_V_BIT)
+
+#define PHY_PAGE_UC (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_UC|_PAGE_AR_RWX)
+#define PHY_PAGE_WB (_PAGE_A|_PAGE_D|_PAGE_P|_PAGE_MA_WB|_PAGE_AR_RWX)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/gcc_intrin.h>
+
+#define is_physical_mode(v)            \
+       ((v->arch.mode_flags) & GUEST_IN_PHY)
+
+#define is_virtual_mode(v)     \
+       (!is_physical_mode(v))
+
+#define MODE_IND(psr)  \
+       (((psr).it << 2) + ((psr).dt << 1) + (psr).rt)
+
+#define _vmm_raw_spin_lock(x)                                          \
+       do {                                                            \
+               __u32 *ia64_spinlock_ptr = (__u32 *) (x);               \
+               __u64 ia64_spinlock_val;                                \
+               ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
+               if (unlikely(ia64_spinlock_val)) {                      \
+                       do {                                            \
+                               while (*ia64_spinlock_ptr)              \
+                               ia64_barrier();                         \
+                               ia64_spinlock_val =                     \
+                               ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0);\
+                       } while (ia64_spinlock_val);                    \
+               }                                                       \
+       } while (0)
+
+#define _vmm_raw_spin_unlock(x)                                \
+       do { barrier();                         \
+               ((spinlock_t *)x)->raw_lock.lock = 0; } \
+while (0)
+
+void vmm_spin_lock(spinlock_t *lock);
+void vmm_spin_unlock(spinlock_t *lock);
+enum {
+       I_TLB = 1,
+       D_TLB = 2
+};
+
+union kvm_va {
+       struct {
+               unsigned long off : 60;         /* intra-region offset */
+               unsigned long reg :  4;         /* region number */
+       } f;
+       unsigned long l;
+       void *p;
+};
+
+#define __kvm_pa(x)     ({union kvm_va _v; _v.l = (long) (x);          \
+                                               _v.f.reg = 0; _v.l; })
+#define __kvm_va(x)     ({union kvm_va _v; _v.l = (long) (x);          \
+                               _v.f.reg = -1; _v.p; })
+
+#define _REGION_ID(x)           ({union ia64_rr _v; _v.val = (long)(x); \
+                                               _v.rid; })
+#define _REGION_PAGE_SIZE(x)    ({union ia64_rr _v; _v.val = (long)(x); \
+                                               _v.ps; })
+#define _REGION_HW_WALKER(x)    ({union ia64_rr _v; _v.val = (long)(x);        \
+                                               _v.ve; })
+
+enum vhpt_ref{ DATA_REF, NA_REF, INST_REF, RSE_REF };
+enum tlb_miss_type { INSTRUCTION, DATA, REGISTER };
+
+#define VCPU(_v, _x) ((_v)->arch.vpd->_x)
+#define VMX(_v, _x)  ((_v)->arch._x)
+
+#define VLSAPIC_INSVC(vcpu, i) ((vcpu)->arch.insvc[i])
+#define VLSAPIC_XTP(_v)        VMX(_v, xtp)
+
+static inline unsigned long itir_ps(unsigned long itir)
+{
+       return ((itir >> 2) & 0x3f);
+}
+
+
+/**************************************************************************
+  VCPU control register access routines
+ **************************************************************************/
+
+static inline u64 vcpu_get_itir(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, itir));
+}
+
+static inline void vcpu_set_itir(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, itir) = val;
+}
+
+static inline u64 vcpu_get_ifa(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, ifa));
+}
+
+static inline void vcpu_set_ifa(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, ifa) = val;
+}
+
+static inline u64 vcpu_get_iva(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, iva));
+}
+
+static inline u64 vcpu_get_pta(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, pta));
+}
+
+static inline u64 vcpu_get_lid(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, lid));
+}
+
+static inline u64 vcpu_get_tpr(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, tpr));
+}
+
+static inline u64 vcpu_get_eoi(struct kvm_vcpu *vcpu)
+{
+       return (0UL);           /*reads of eoi always return 0 */
+}
+
+static inline u64 vcpu_get_irr0(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, irr[0]));
+}
+
+static inline u64 vcpu_get_irr1(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, irr[1]));
+}
+
+static inline u64 vcpu_get_irr2(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, irr[2]));
+}
+
+static inline u64 vcpu_get_irr3(struct kvm_vcpu *vcpu)
+{
+       return ((u64)VCPU(vcpu, irr[3]));
+}
+
+static inline void vcpu_set_dcr(struct kvm_vcpu *vcpu, u64 val)
+{
+       ia64_setreg(_IA64_REG_CR_DCR, val);
+}
+
+static inline void vcpu_set_isr(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, isr) = val;
+}
+
+static inline void vcpu_set_lid(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, lid) = val;
+}
+
+static inline void vcpu_set_ipsr(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, ipsr) = val;
+}
+
+static inline void vcpu_set_iip(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, iip) = val;
+}
+
+static inline void vcpu_set_ifs(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, ifs) = val;
+}
+
+static inline void vcpu_set_iipa(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, iipa) = val;
+}
+
+static inline void vcpu_set_iha(struct kvm_vcpu *vcpu, u64 val)
+{
+       VCPU(vcpu, iha) = val;
+}
+
+
+static inline u64 vcpu_get_rr(struct kvm_vcpu *vcpu, u64 reg)
+{
+       return vcpu->arch.vrr[reg>>61];
+}
+
+/**************************************************************************
+  VCPU debug breakpoint register access routines
+ **************************************************************************/
+
+static inline void vcpu_set_dbr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+       __ia64_set_dbr(reg, val);
+}
+
+static inline void vcpu_set_ibr(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+       ia64_set_ibr(reg, val);
+}
+
+static inline u64 vcpu_get_dbr(struct kvm_vcpu *vcpu, u64 reg)
+{
+       return ((u64)__ia64_get_dbr(reg));
+}
+
+static inline u64 vcpu_get_ibr(struct kvm_vcpu *vcpu, u64 reg)
+{
+       return ((u64)ia64_get_ibr(reg));
+}
+
+/**************************************************************************
+  VCPU performance monitor register access routines
+ **************************************************************************/
+static inline void vcpu_set_pmc(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+       /* NOTE: Writes to unimplemented PMC registers are discarded */
+       ia64_set_pmc(reg, val);
+}
+
+static inline void vcpu_set_pmd(struct kvm_vcpu *vcpu, u64 reg, u64 val)
+{
+       /* NOTE: Writes to unimplemented PMD registers are discarded */
+       ia64_set_pmd(reg, val);
+}
+
+static inline u64 vcpu_get_pmc(struct kvm_vcpu *vcpu, u64 reg)
+{
+       /* NOTE: Reads from unimplemented PMC registers return zero */
+       return ((u64)ia64_get_pmc(reg));
+}
+
+static inline u64 vcpu_get_pmd(struct kvm_vcpu *vcpu, u64 reg)
+{
+       /* NOTE: Reads from unimplemented PMD registers return zero */
+       return ((u64)ia64_get_pmd(reg));
+}
+
+static inline unsigned long vrrtomrr(unsigned long val)
+{
+       union ia64_rr rr;
+       rr.val = val;
+       rr.rid = (rr.rid << 4) | 0xe;
+       if (rr.ps > PAGE_SHIFT)
+               rr.ps = PAGE_SHIFT;
+       rr.ve = 1;
+       return rr.val;
+}
+
+
+static inline int highest_bits(int *dat)
+{
+       u32  bits, bitnum;
+       int i;
+
+       /* loop for all 256 bits */
+       for (i = 7; i >= 0 ; i--) {
+               bits = dat[i];
+               if (bits) {
+                       bitnum = fls(bits);
+                       return i * 32 + bitnum - 1;
+               }
+       }
+       return NULL_VECTOR;
+}
+
+/*
+ * The pending irq is higher than the inservice one.
+ *
+ */
+static inline int is_higher_irq(int pending, int inservice)
+{
+       return ((pending > inservice)
+                       || ((pending != NULL_VECTOR)
+                               && (inservice == NULL_VECTOR)));
+}
+
+static inline int is_higher_class(int pending, int mic)
+{
+       return ((pending >> 4) > mic);
+}
+
+/*
+ * Return 0-255 for pending irq.
+ *        NULL_VECTOR: when no pending.
+ */
+static inline int highest_pending_irq(struct kvm_vcpu *vcpu)
+{
+       if (VCPU(vcpu, irr[0]) & (1UL<<NMI_VECTOR))
+               return NMI_VECTOR;
+       if (VCPU(vcpu, irr[0]) & (1UL<<ExtINT_VECTOR))
+               return ExtINT_VECTOR;
+
+       return highest_bits((int *)&VCPU(vcpu, irr[0]));
+}
+
+static inline int highest_inservice_irq(struct kvm_vcpu *vcpu)
+{
+       if (VMX(vcpu, insvc[0]) & (1UL<<NMI_VECTOR))
+               return NMI_VECTOR;
+       if (VMX(vcpu, insvc[0]) & (1UL<<ExtINT_VECTOR))
+               return ExtINT_VECTOR;
+
+       return highest_bits((int *)&(VMX(vcpu, insvc[0])));
+}
+
+extern void vcpu_get_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+                                       struct ia64_fpreg *val);
+extern void vcpu_set_fpreg(struct kvm_vcpu *vcpu, u64 reg,
+                                       struct ia64_fpreg *val);
+extern u64 vcpu_get_gr(struct kvm_vcpu *vcpu, u64 reg);
+extern void vcpu_set_gr(struct kvm_vcpu *vcpu, u64 reg, u64 val, int nat);
+extern u64 vcpu_get_psr(struct kvm_vcpu *vcpu);
+extern void vcpu_set_psr(struct kvm_vcpu *vcpu, u64 val);
+extern u64 vcpu_thash(struct kvm_vcpu *vcpu, u64 vadr);
+extern void vcpu_bsw0(struct kvm_vcpu *vcpu);
+extern void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte,
+                                       u64 itir, u64 va, int type);
+extern struct thash_data *vhpt_lookup(u64 va);
+extern u64 guest_vhpt_lookup(u64 iha, u64 *pte);
+extern void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps);
+extern void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps);
+extern u64 translate_phy_pte(u64 *pte, u64 itir, u64 va);
+extern int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte,
+               u64 itir, u64 ifa, int type);
+extern void thash_purge_all(struct kvm_vcpu *v);
+extern struct thash_data *vtlb_lookup(struct kvm_vcpu *v,
+                                               u64 va, int is_data);
+extern int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va,
+                                               u64 ps, int is_data);
+
+extern void vcpu_increment_iip(struct kvm_vcpu *v);
+extern void vcpu_decrement_iip(struct kvm_vcpu *vcpu);
+extern void vcpu_pend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
+extern void vcpu_unpend_interrupt(struct kvm_vcpu *vcpu, u8 vec);
+extern void data_page_not_present(struct kvm_vcpu *vcpu, u64 vadr);
+extern void dnat_page_consumption(struct kvm_vcpu *vcpu, u64 vadr);
+extern void alt_dtlb(struct kvm_vcpu *vcpu, u64 vadr);
+extern void nested_dtlb(struct kvm_vcpu *vcpu);
+extern void dvhpt_fault(struct kvm_vcpu *vcpu, u64 vadr);
+extern int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref);
+
+extern void update_vhpi(struct kvm_vcpu *vcpu, int vec);
+extern int irq_masked(struct kvm_vcpu *vcpu, int h_pending, int h_inservice);
+
+extern int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle);
+extern void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma);
+extern void vmm_transition(struct kvm_vcpu *vcpu);
+extern void vmm_trampoline(union context *from, union context *to);
+extern int vmm_entry(void);
+extern  u64 vcpu_get_itc(struct kvm_vcpu *vcpu);
+
+extern void vmm_reset_entry(void);
+void kvm_init_vtlb(struct kvm_vcpu *v);
+void kvm_init_vhpt(struct kvm_vcpu *v);
+void thash_init(struct thash_cb *hcb, u64 sz);
+
+void panic_vm(struct kvm_vcpu *v);
+
+extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2, u64 arg3,
+               u64 arg4, u64 arg5, u64 arg6, u64 arg7);
+#endif
+#endif /* __VCPU_H__ */
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
new file mode 100644 (file)
index 0000000..2275bf4
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * vmm.c: vmm module interface with kvm module
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ *  Xiantao Zhang (xiantao.zhang@intel.com)
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+
+
+#include<linux/module.h>
+#include<asm/fpswa.h>
+
+#include "vcpu.h"
+
+MODULE_AUTHOR("Intel");
+MODULE_LICENSE("GPL");
+
+extern char kvm_ia64_ivt;
+extern fpswa_interface_t *vmm_fpswa_interface;
+
+struct kvm_vmm_info vmm_info = {
+       .module      = THIS_MODULE,
+       .vmm_entry   = vmm_entry,
+       .tramp_entry = vmm_trampoline,
+       .vmm_ivt     = (unsigned long)&kvm_ia64_ivt,
+};
+
+static int __init  kvm_vmm_init(void)
+{
+
+       vmm_fpswa_interface = fpswa_interface;
+
+       /*Register vmm data to kvm side*/
+       return kvm_init(&vmm_info, 1024, THIS_MODULE);
+}
+
+static void __exit kvm_vmm_exit(void)
+{
+       kvm_exit();
+       return ;
+}
+
+void vmm_spin_lock(spinlock_t *lock)
+{
+       _vmm_raw_spin_lock(lock);
+}
+
+void vmm_spin_unlock(spinlock_t *lock)
+{
+       _vmm_raw_spin_unlock(lock);
+}
+module_init(kvm_vmm_init)
+module_exit(kvm_vmm_exit)
diff --git a/arch/ia64/kvm/vmm_ivt.S b/arch/ia64/kvm/vmm_ivt.S
new file mode 100644 (file)
index 0000000..3ee5f48
--- /dev/null
@@ -0,0 +1,1424 @@
+/*
+ * /ia64/kvm_ivt.S
+ *
+ * Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
+ *      Stephane Eranian <eranian@hpl.hp.com>
+ *      David Mosberger <davidm@hpl.hp.com>
+ * Copyright (C) 2000, 2002-2003 Intel Co
+ *      Asit Mallick <asit.k.mallick@intel.com>
+ *      Suresh Siddha <suresh.b.siddha@intel.com>
+ *      Kenneth Chen <kenneth.w.chen@intel.com>
+ *      Fenghua Yu <fenghua.yu@intel.com>
+ *
+ *
+ * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> TLB handling
+ * for SMP
+ * 00/12/20 David Mosberger-Tang <davidm@hpl.hp.com> DTLB/ITLB
+ * handler now uses virtual PT.
+ *
+ * 07/6/20 Xuefei Xu  (Anthony Xu) (anthony.xu@intel.com)
+ *              Supporting Intel virtualization architecture
+ *
+ */
+
+/*
+ * This file defines the interruption vector table used by the CPU.
+ * It does not include one entry per possible cause of interruption.
+ *
+ * The first 20 entries of the table contain 64 bundles each while the
+ * remaining 48 entries contain only 16 bundles each.
+ *
+ * The 64 bundles are used to allow inlining the whole handler for
+ * critical
+ * interruptions like TLB misses.
+ *
+ *  For each entry, the comment is as follows:
+ *
+ *              // 0x1c00 Entry 7 (size 64 bundles) Data Key Miss
+ *              (12,51)
+ *  entry offset ----/     /         /                  /
+ *  /
+ *  entry number ---------/         /                  /
+ *  /
+ *  size of the entry -------------/                  /
+ *  /
+ *  vector name -------------------------------------/
+ *  /
+ *  interruptions triggering this vector
+ *  ----------------------/
+ *
+ * The table is 32KB in size and must be aligned on 32KB
+ * boundary.
+ * (The CPU ignores the 15 lower bits of the address)
+ *
+ * Table is based upon EAS2.6 (Oct 1999)
+ */
+
+
+#include <asm/asmmacro.h>
+#include <asm/cache.h>
+#include <asm/pgtable.h>
+
+#include "asm-offsets.h"
+#include "vcpu.h"
+#include "kvm_minstate.h"
+#include "vti.h"
+
+#if 1
+# define PSR_DEFAULT_BITS   psr.ac
+#else
+# define PSR_DEFAULT_BITS   0
+#endif
+
+
+#define KVM_FAULT(n)    \
+    kvm_fault_##n:;          \
+    mov r19=n;;          \
+    br.sptk.many kvm_fault_##n;         \
+    ;;                  \
+
+
+#define KVM_REFLECT(n)    \
+    mov r31=pr;           \
+    mov r19=n;       /* prepare to save predicates */ \
+    mov r29=cr.ipsr;      \
+    ;;      \
+    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;       \
+(p7)br.sptk.many kvm_dispatch_reflection;        \
+    br.sptk.many kvm_panic;      \
+
+
+GLOBAL_ENTRY(kvm_panic)
+    br.sptk.many kvm_panic
+    ;;
+END(kvm_panic)
+
+
+
+
+
+    .section .text.ivt,"ax"
+
+    .align 32768    // align on 32KB boundary
+    .global kvm_ia64_ivt
+kvm_ia64_ivt:
+///////////////////////////////////////////////////////////////
+// 0x0000 Entry 0 (size 64 bundles) VHPT Translation (8,20,47)
+ENTRY(kvm_vhpt_miss)
+    KVM_FAULT(0)
+END(kvm_vhpt_miss)
+
+
+    .org kvm_ia64_ivt+0x400
+////////////////////////////////////////////////////////////////
+// 0x0400 Entry 1 (size 64 bundles) ITLB (21)
+ENTRY(kvm_itlb_miss)
+    mov r31 = pr
+    mov r29=cr.ipsr;
+    ;;
+    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
+    (p6) br.sptk kvm_alt_itlb_miss
+    mov r19 = 1
+    br.sptk kvm_itlb_miss_dispatch
+    KVM_FAULT(1);
+END(kvm_itlb_miss)
+
+    .org kvm_ia64_ivt+0x0800
+//////////////////////////////////////////////////////////////////
+// 0x0800 Entry 2 (size 64 bundles) DTLB (9,48)
+ENTRY(kvm_dtlb_miss)
+    mov r31 = pr
+    mov r29=cr.ipsr;
+    ;;
+    tbit.z p6,p7=r29,IA64_PSR_VM_BIT;
+(p6)br.sptk kvm_alt_dtlb_miss
+    br.sptk kvm_dtlb_miss_dispatch
+END(kvm_dtlb_miss)
+
+     .org kvm_ia64_ivt+0x0c00
+////////////////////////////////////////////////////////////////////
+// 0x0c00 Entry 3 (size 64 bundles) Alt ITLB (19)
+ENTRY(kvm_alt_itlb_miss)
+    mov r16=cr.ifa    // get address that caused the TLB miss
+    ;;
+    movl r17=PAGE_KERNEL
+    mov r24=cr.ipsr
+    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+    ;;
+    and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
+    ;;
+    or r19=r17,r19      // insert PTE control bits into r19
+    ;;
+    movl r20=IA64_GRANULE_SHIFT<<2
+    ;;
+    mov cr.itir=r20
+    ;;
+    itc.i r19          // insert the TLB entry
+    mov pr=r31,-1
+    rfi
+END(kvm_alt_itlb_miss)
+
+    .org kvm_ia64_ivt+0x1000
+/////////////////////////////////////////////////////////////////////
+// 0x1000 Entry 4 (size 64 bundles) Alt DTLB (7,46)
+ENTRY(kvm_alt_dtlb_miss)
+    mov r16=cr.ifa             // get address that caused the TLB miss
+    ;;
+    movl r17=PAGE_KERNEL
+    movl r19=(((1 << IA64_MAX_PHYS_BITS) - 1) & ~0xfff)
+    mov r24=cr.ipsr
+    ;;
+    and r19=r19,r16     // clear ed, reserved bits, and PTE control bits
+    ;;
+    or r19=r19,r17     // insert PTE control bits into r19
+    ;;
+    movl r20=IA64_GRANULE_SHIFT<<2
+    ;;
+    mov cr.itir=r20
+    ;;
+    itc.d r19          // insert the TLB entry
+    mov pr=r31,-1
+    rfi
+END(kvm_alt_dtlb_miss)
+
+    .org kvm_ia64_ivt+0x1400
+//////////////////////////////////////////////////////////////////////
+// 0x1400 Entry 5 (size 64 bundles) Data nested TLB (6,45)
+ENTRY(kvm_nested_dtlb_miss)
+    KVM_FAULT(5)
+END(kvm_nested_dtlb_miss)
+
+    .org kvm_ia64_ivt+0x1800
+/////////////////////////////////////////////////////////////////////
+// 0x1800 Entry 6 (size 64 bundles) Instruction Key Miss (24)
+ENTRY(kvm_ikey_miss)
+    KVM_REFLECT(6)
+END(kvm_ikey_miss)
+
+    .org kvm_ia64_ivt+0x1c00
+/////////////////////////////////////////////////////////////////////
+// 0x1c00 Entry 7 (size 64 bundles) Data Key Miss (12,51)
+ENTRY(kvm_dkey_miss)
+    KVM_REFLECT(7)
+END(kvm_dkey_miss)
+
+    .org kvm_ia64_ivt+0x2000
+////////////////////////////////////////////////////////////////////
+// 0x2000 Entry 8 (size 64 bundles) Dirty-bit (54)
+ENTRY(kvm_dirty_bit)
+    KVM_REFLECT(8)
+END(kvm_dirty_bit)
+
+    .org kvm_ia64_ivt+0x2400
+////////////////////////////////////////////////////////////////////
+// 0x2400 Entry 9 (size 64 bundles) Instruction Access-bit (27)
+ENTRY(kvm_iaccess_bit)
+    KVM_REFLECT(9)
+END(kvm_iaccess_bit)
+
+    .org kvm_ia64_ivt+0x2800
+///////////////////////////////////////////////////////////////////
+// 0x2800 Entry 10 (size 64 bundles) Data Access-bit (15,55)
+ENTRY(kvm_daccess_bit)
+    KVM_REFLECT(10)
+END(kvm_daccess_bit)
+
+    .org kvm_ia64_ivt+0x2c00
+/////////////////////////////////////////////////////////////////
+// 0x2c00 Entry 11 (size 64 bundles) Break instruction (33)
+ENTRY(kvm_break_fault)
+    mov r31=pr
+    mov r19=11
+    mov r29=cr.ipsr
+    ;;
+    KVM_SAVE_MIN_WITH_COVER_R19
+    ;;
+    alloc r14=ar.pfs,0,0,4,0 // now it's safe (must be first in insn group!)
+    mov out0=cr.ifa
+    mov out2=cr.isr     // FIXME: pity to make this slow access twice
+    mov out3=cr.iim     // FIXME: pity to make this slow access twice
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    //(p15)ssm psr.i               // restore psr.i
+    addl r14=@gprel(ia64_leave_hypervisor),gp
+    ;;
+    KVM_SAVE_REST
+    mov rp=r14
+    ;;
+    adds out1=16,sp
+    br.call.sptk.many b6=kvm_ia64_handle_break
+    ;;
+END(kvm_break_fault)
+
+    .org kvm_ia64_ivt+0x3000
+/////////////////////////////////////////////////////////////////
+// 0x3000 Entry 12 (size 64 bundles) External Interrupt (4)
+ENTRY(kvm_interrupt)
+    mov r31=pr         // prepare to save predicates
+    mov r19=12
+    mov r29=cr.ipsr
+    ;;
+    tbit.z p6,p7=r29,IA64_PSR_VM_BIT
+    tbit.z p0,p15=r29,IA64_PSR_I_BIT
+    ;;
+(p7) br.sptk kvm_dispatch_interrupt
+    ;;
+    mov r27=ar.rsc             /* M */
+    mov r20=r1                 /* A */
+    mov r25=ar.unat            /* M */
+    mov r26=ar.pfs             /* I */
+    mov r28=cr.iip             /* M */
+    cover                      /* B (or nothing) */
+    ;;
+    mov r1=sp
+    ;;
+    invala                     /* M */
+    mov r30=cr.ifs
+    ;;
+    addl r1=-VMM_PT_REGS_SIZE,r1
+    ;;
+    adds r17=2*L1_CACHE_BYTES,r1       /* really: biggest cache-line size */
+    adds r16=PT(CR_IPSR),r1
+    ;;
+    lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES
+    st8 [r16]=r29                      /* save cr.ipsr */
+    ;;
+    lfetch.fault.excl.nt1 [r17]
+    mov r29=b0
+    ;;
+    adds r16=PT(R8),r1         /* initialize first base pointer */
+    adds r17=PT(R9),r1         /* initialize second base pointer */
+    mov r18=r0                 /* make sure r18 isn't NaT */
+    ;;
+.mem.offset 0,0; st8.spill [r16]=r8,16
+.mem.offset 8,0; st8.spill [r17]=r9,16
+        ;;
+.mem.offset 0,0; st8.spill [r16]=r10,24
+.mem.offset 8,0; st8.spill [r17]=r11,24
+        ;;
+    st8 [r16]=r28,16           /* save cr.iip */
+    st8 [r17]=r30,16           /* save cr.ifs */
+    mov r8=ar.fpsr             /* M */
+    mov r9=ar.csd
+    mov r10=ar.ssd
+    movl r11=FPSR_DEFAULT      /* L-unit */
+    ;;
+    st8 [r16]=r25,16           /* save ar.unat */
+    st8 [r17]=r26,16           /* save ar.pfs */
+    shl r18=r18,16             /* compute ar.rsc to be used for "loadrs" */
+    ;;
+    st8 [r16]=r27,16           /* save ar.rsc */
+    adds r17=16,r17            /* skip over ar_rnat field */
+    ;;
+    st8 [r17]=r31,16           /* save predicates */
+    adds r16=16,r16            /* skip over ar_bspstore field */
+    ;;
+    st8 [r16]=r29,16           /* save b0 */
+    st8 [r17]=r18,16           /* save ar.rsc value for "loadrs" */
+    ;;
+.mem.offset 0,0; st8.spill [r16]=r20,16    /* save original r1 */
+.mem.offset 8,0; st8.spill [r17]=r12,16
+    adds r12=-16,r1
+    /* switch to kernel memory stack (with 16 bytes of scratch) */
+    ;;
+.mem.offset 0,0; st8.spill [r16]=r13,16
+.mem.offset 8,0; st8.spill [r17]=r8,16 /* save ar.fpsr */
+    ;;
+.mem.offset 0,0; st8.spill [r16]=r15,16
+.mem.offset 8,0; st8.spill [r17]=r14,16
+    dep r14=-1,r0,60,4
+    ;;
+.mem.offset 0,0; st8.spill [r16]=r2,16
+.mem.offset 8,0; st8.spill [r17]=r3,16
+    adds r2=VMM_PT_REGS_R16_OFFSET,r1
+    adds r14 = VMM_VCPU_GP_OFFSET,r13
+    ;;
+    mov r8=ar.ccv
+    ld8 r14 = [r14]
+    ;;
+    mov r1=r14       /* establish kernel global pointer */
+    ;;                                          \
+    bsw.1
+    ;;
+    alloc r14=ar.pfs,0,0,1,0   // must be first in an insn group
+    mov out0=r13
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i
+    ;;
+    //(p15) ssm psr.i
+    adds r3=8,r2               // set up second base pointer for SAVE_REST
+    srlz.i                     // ensure everybody knows psr.ic is back on
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r16,16
+.mem.offset 8,0; st8.spill [r3]=r17,16
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r18,16
+.mem.offset 8,0; st8.spill [r3]=r19,16
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r20,16
+.mem.offset 8,0; st8.spill [r3]=r21,16
+    mov r18=b6
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r22,16
+.mem.offset 8,0; st8.spill [r3]=r23,16
+    mov r19=b7
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r24,16
+.mem.offset 8,0; st8.spill [r3]=r25,16
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r26,16
+.mem.offset 8,0; st8.spill [r3]=r27,16
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r28,16
+.mem.offset 8,0; st8.spill [r3]=r29,16
+    ;;
+.mem.offset 0,0; st8.spill [r2]=r30,16
+.mem.offset 8,0; st8.spill [r3]=r31,32
+    ;;
+    mov ar.fpsr=r11       /* M-unit */
+    st8 [r2]=r8,8         /* ar.ccv */
+    adds r24=PT(B6)-PT(F7),r3
+    ;;
+    stf.spill [r2]=f6,32
+    stf.spill [r3]=f7,32
+    ;;
+    stf.spill [r2]=f8,32
+    stf.spill [r3]=f9,32
+    ;;
+    stf.spill [r2]=f10
+    stf.spill [r3]=f11
+    adds r25=PT(B7)-PT(F11),r3
+    ;;
+    st8 [r24]=r18,16       /* b6 */
+    st8 [r25]=r19,16       /* b7 */
+    ;;
+    st8 [r24]=r9           /* ar.csd */
+    st8 [r25]=r10          /* ar.ssd */
+    ;;
+    srlz.d             // make sure we see the effect of cr.ivr
+    addl r14=@gprel(ia64_leave_nested),gp
+    ;;
+    mov rp=r14
+    br.call.sptk.many b6=kvm_ia64_handle_irq
+    ;;
+END(kvm_interrupt)
+
+    .global kvm_dispatch_vexirq
+    .org kvm_ia64_ivt+0x3400
+//////////////////////////////////////////////////////////////////////
+// 0x3400 Entry 13 (size 64 bundles) Reserved
+ENTRY(kvm_virtual_exirq)
+    mov r31=pr
+    mov r19=13
+    mov r30 =r0
+    ;;
+kvm_dispatch_vexirq:
+    cmp.eq p6,p0 = 1,r30
+    ;;
+(p6)add r29 = VMM_VCPU_SAVED_GP_OFFSET,r21
+    ;;
+(p6)ld8 r1 = [r29]
+    ;;
+    KVM_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,1,0
+    mov out0=r13
+
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    //(p15) ssm psr.i               // restore psr.i
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    KVM_SAVE_REST
+    addl r14=@gprel(ia64_leave_hypervisor),gp
+    ;;
+    mov rp=r14
+    br.call.sptk.many b6=kvm_vexirq
+END(kvm_virtual_exirq)
+
+    .org kvm_ia64_ivt+0x3800
+/////////////////////////////////////////////////////////////////////
+// 0x3800 Entry 14 (size 64 bundles) Reserved
+    KVM_FAULT(14)
+    // this code segment is from 2.6.16.13
+
+
+    .org kvm_ia64_ivt+0x3c00
+///////////////////////////////////////////////////////////////////////
+// 0x3c00 Entry 15 (size 64 bundles) Reserved
+    KVM_FAULT(15)
+
+
+    .org kvm_ia64_ivt+0x4000
+///////////////////////////////////////////////////////////////////////
+// 0x4000 Entry 16 (size 64 bundles) Reserved
+    KVM_FAULT(16)
+
+    .org kvm_ia64_ivt+0x4400
+//////////////////////////////////////////////////////////////////////
+// 0x4400 Entry 17 (size 64 bundles) Reserved
+    KVM_FAULT(17)
+
+    .org kvm_ia64_ivt+0x4800
+//////////////////////////////////////////////////////////////////////
+// 0x4800 Entry 18 (size 64 bundles) Reserved
+    KVM_FAULT(18)
+
+    .org kvm_ia64_ivt+0x4c00
+//////////////////////////////////////////////////////////////////////
+// 0x4c00 Entry 19 (size 64 bundles) Reserved
+    KVM_FAULT(19)
+
+    .org kvm_ia64_ivt+0x5000
+//////////////////////////////////////////////////////////////////////
+// 0x5000 Entry 20 (size 16 bundles) Page Not Present
+ENTRY(kvm_page_not_present)
+    KVM_REFLECT(20)
+END(kvm_page_not_present)
+
+    .org kvm_ia64_ivt+0x5100
+///////////////////////////////////////////////////////////////////////
+// 0x5100 Entry 21 (size 16 bundles) Key Permission vector
+ENTRY(kvm_key_permission)
+    KVM_REFLECT(21)
+END(kvm_key_permission)
+
+    .org kvm_ia64_ivt+0x5200
+//////////////////////////////////////////////////////////////////////
+// 0x5200 Entry 22 (size 16 bundles) Instruction Access Rights (26)
+ENTRY(kvm_iaccess_rights)
+    KVM_REFLECT(22)
+END(kvm_iaccess_rights)
+
+    .org kvm_ia64_ivt+0x5300
+//////////////////////////////////////////////////////////////////////
+// 0x5300 Entry 23 (size 16 bundles) Data Access Rights (14,53)
+ENTRY(kvm_daccess_rights)
+    KVM_REFLECT(23)
+END(kvm_daccess_rights)
+
+    .org kvm_ia64_ivt+0x5400
+/////////////////////////////////////////////////////////////////////
+// 0x5400 Entry 24 (size 16 bundles) General Exception (5,32,34,36,38,39)
+ENTRY(kvm_general_exception)
+   KVM_REFLECT(24)
+   KVM_FAULT(24)
+END(kvm_general_exception)
+
+    .org kvm_ia64_ivt+0x5500
+//////////////////////////////////////////////////////////////////////
+// 0x5500 Entry 25 (size 16 bundles) Disabled FP-Register (35)
+ENTRY(kvm_disabled_fp_reg)
+    KVM_REFLECT(25)
+END(kvm_disabled_fp_reg)
+
+    .org kvm_ia64_ivt+0x5600
+////////////////////////////////////////////////////////////////////
+// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
+ENTRY(kvm_nat_consumption)
+    KVM_REFLECT(26)
+END(kvm_nat_consumption)
+
+    .org kvm_ia64_ivt+0x5700
+/////////////////////////////////////////////////////////////////////
+// 0x5700 Entry 27 (size 16 bundles) Speculation (40)
+ENTRY(kvm_speculation_vector)
+    KVM_REFLECT(27)
+END(kvm_speculation_vector)
+
+    .org kvm_ia64_ivt+0x5800
+/////////////////////////////////////////////////////////////////////
+// 0x5800 Entry 28 (size 16 bundles) Reserved
+    KVM_FAULT(28)
+
+    .org kvm_ia64_ivt+0x5900
+///////////////////////////////////////////////////////////////////
+// 0x5900 Entry 29 (size 16 bundles) Debug (16,28,56)
+ENTRY(kvm_debug_vector)
+    KVM_FAULT(29)
+END(kvm_debug_vector)
+
+    .org kvm_ia64_ivt+0x5a00
+///////////////////////////////////////////////////////////////
+// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
+ENTRY(kvm_unaligned_access)
+    KVM_REFLECT(30)
+END(kvm_unaligned_access)
+
+    .org kvm_ia64_ivt+0x5b00
+//////////////////////////////////////////////////////////////////////
+// 0x5b00 Entry 31 (size 16 bundles) Unsupported Data Reference (57)
+ENTRY(kvm_unsupported_data_reference)
+    KVM_REFLECT(31)
+END(kvm_unsupported_data_reference)
+
+    .org kvm_ia64_ivt+0x5c00
+////////////////////////////////////////////////////////////////////
+// 0x5c00 Entry 32 (size 16 bundles) Floating Point FAULT (65)
+ENTRY(kvm_floating_point_fault)
+    KVM_REFLECT(32)
+END(kvm_floating_point_fault)
+
+    .org kvm_ia64_ivt+0x5d00
+/////////////////////////////////////////////////////////////////////
+// 0x5d00 Entry 33 (size 16 bundles) Floating Point Trap (66)
+ENTRY(kvm_floating_point_trap)
+    KVM_REFLECT(33)
+END(kvm_floating_point_trap)
+
+    .org kvm_ia64_ivt+0x5e00
+//////////////////////////////////////////////////////////////////////
+// 0x5e00 Entry 34 (size 16 bundles) Lower Privilege Transfer Trap (66)
+ENTRY(kvm_lower_privilege_trap)
+    KVM_REFLECT(34)
+END(kvm_lower_privilege_trap)
+
+    .org kvm_ia64_ivt+0x5f00
+//////////////////////////////////////////////////////////////////////
+// 0x5f00 Entry 35 (size 16 bundles) Taken Branch Trap (68)
+ENTRY(kvm_taken_branch_trap)
+    KVM_REFLECT(35)
+END(kvm_taken_branch_trap)
+
+    .org kvm_ia64_ivt+0x6000
+////////////////////////////////////////////////////////////////////
+// 0x6000 Entry 36 (size 16 bundles) Single Step Trap (69)
+ENTRY(kvm_single_step_trap)
+    KVM_REFLECT(36)
+END(kvm_single_step_trap)
+    .global kvm_virtualization_fault_back
+    .org kvm_ia64_ivt+0x6100
+/////////////////////////////////////////////////////////////////////
+// 0x6100 Entry 37 (size 16 bundles) Virtualization Fault
+ENTRY(kvm_virtualization_fault)
+    mov r31=pr
+    adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+    ;;
+    st8 [r16] = r1
+    adds r17 = VMM_VCPU_GP_OFFSET, r21
+    ;;
+    ld8 r1 = [r17]
+    cmp.eq p6,p0=EVENT_MOV_FROM_AR,r24
+    cmp.eq p7,p0=EVENT_MOV_FROM_RR,r24
+    cmp.eq p8,p0=EVENT_MOV_TO_RR,r24
+    cmp.eq p9,p0=EVENT_RSM,r24
+    cmp.eq p10,p0=EVENT_SSM,r24
+    cmp.eq p11,p0=EVENT_MOV_TO_PSR,r24
+    cmp.eq p12,p0=EVENT_THASH,r24
+    (p6) br.dptk.many kvm_asm_mov_from_ar
+    (p7) br.dptk.many kvm_asm_mov_from_rr
+    (p8) br.dptk.many kvm_asm_mov_to_rr
+    (p9) br.dptk.many kvm_asm_rsm
+    (p10) br.dptk.many kvm_asm_ssm
+    (p11) br.dptk.many kvm_asm_mov_to_psr
+    (p12) br.dptk.many kvm_asm_thash
+    ;;
+kvm_virtualization_fault_back:
+    adds r16 = VMM_VCPU_SAVED_GP_OFFSET,r21
+    ;;
+    ld8 r1 = [r16]
+    ;;
+    mov r19=37
+    adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
+    adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
+    ;;
+    st8 [r16] = r24
+    st8 [r17] = r25
+    ;;
+    cmp.ne p6,p0=EVENT_RFI, r24
+    (p6) br.sptk kvm_dispatch_virtualization_fault
+    ;;
+    adds r18=VMM_VPD_BASE_OFFSET,r21
+    ;;
+    ld8 r18=[r18]
+    ;;
+    adds r18=VMM_VPD_VIFS_OFFSET,r18
+    ;;
+    ld8 r18=[r18]
+    ;;
+    tbit.z p6,p0=r18,63
+    (p6) br.sptk kvm_dispatch_virtualization_fault
+    ;;
+    //if vifs.v=1 desert current register frame
+    alloc r18=ar.pfs,0,0,0,0
+    br.sptk kvm_dispatch_virtualization_fault
+END(kvm_virtualization_fault)
+
+    .org kvm_ia64_ivt+0x6200
+//////////////////////////////////////////////////////////////
+// 0x6200 Entry 38 (size 16 bundles) Reserved
+    KVM_FAULT(38)
+
+    .org kvm_ia64_ivt+0x6300
+/////////////////////////////////////////////////////////////////
+// 0x6300 Entry 39 (size 16 bundles) Reserved
+    KVM_FAULT(39)
+
+    .org kvm_ia64_ivt+0x6400
+/////////////////////////////////////////////////////////////////
+// 0x6400 Entry 40 (size 16 bundles) Reserved
+    KVM_FAULT(40)
+
+    .org kvm_ia64_ivt+0x6500
+//////////////////////////////////////////////////////////////////
+// 0x6500 Entry 41 (size 16 bundles) Reserved
+    KVM_FAULT(41)
+
+    .org kvm_ia64_ivt+0x6600
+//////////////////////////////////////////////////////////////////
+// 0x6600 Entry 42 (size 16 bundles) Reserved
+    KVM_FAULT(42)
+
+    .org kvm_ia64_ivt+0x6700
+//////////////////////////////////////////////////////////////////
+// 0x6700 Entry 43 (size 16 bundles) Reserved
+    KVM_FAULT(43)
+
+    .org kvm_ia64_ivt+0x6800
+//////////////////////////////////////////////////////////////////
+// 0x6800 Entry 44 (size 16 bundles) Reserved
+    KVM_FAULT(44)
+
+    .org kvm_ia64_ivt+0x6900
+///////////////////////////////////////////////////////////////////
+// 0x6900 Entry 45 (size 16 bundles) IA-32 Exeception
+//(17,18,29,41,42,43,44,58,60,61,62,72,73,75,76,77)
+ENTRY(kvm_ia32_exception)
+    KVM_FAULT(45)
+END(kvm_ia32_exception)
+
+    .org kvm_ia64_ivt+0x6a00
+////////////////////////////////////////////////////////////////////
+// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept  (30,31,59,70,71)
+ENTRY(kvm_ia32_intercept)
+    KVM_FAULT(47)
+END(kvm_ia32_intercept)
+
+    .org kvm_ia64_ivt+0x6c00
+/////////////////////////////////////////////////////////////////////
+// 0x6c00 Entry 48 (size 16 bundles) Reserved
+    KVM_FAULT(48)
+
+    .org kvm_ia64_ivt+0x6d00
+//////////////////////////////////////////////////////////////////////
+// 0x6d00 Entry 49 (size 16 bundles) Reserved
+    KVM_FAULT(49)
+
+    .org kvm_ia64_ivt+0x6e00
+//////////////////////////////////////////////////////////////////////
+// 0x6e00 Entry 50 (size 16 bundles) Reserved
+    KVM_FAULT(50)
+
+    .org kvm_ia64_ivt+0x6f00
+/////////////////////////////////////////////////////////////////////
+// 0x6f00 Entry 51 (size 16 bundles) Reserved
+    KVM_FAULT(52)
+
+    .org kvm_ia64_ivt+0x7100
+////////////////////////////////////////////////////////////////////
+// 0x7100 Entry 53 (size 16 bundles) Reserved
+    KVM_FAULT(53)
+
+    .org kvm_ia64_ivt+0x7200
+/////////////////////////////////////////////////////////////////////
+// 0x7200 Entry 54 (size 16 bundles) Reserved
+    KVM_FAULT(54)
+
+    .org kvm_ia64_ivt+0x7300
+////////////////////////////////////////////////////////////////////
+// 0x7300 Entry 55 (size 16 bundles) Reserved
+    KVM_FAULT(55)
+
+    .org kvm_ia64_ivt+0x7400
+////////////////////////////////////////////////////////////////////
+// 0x7400 Entry 56 (size 16 bundles) Reserved
+    KVM_FAULT(56)
+
+    .org kvm_ia64_ivt+0x7500
+/////////////////////////////////////////////////////////////////////
+// 0x7500 Entry 57 (size 16 bundles) Reserved
+    KVM_FAULT(57)
+
+    .org kvm_ia64_ivt+0x7600
+/////////////////////////////////////////////////////////////////////
+// 0x7600 Entry 58 (size 16 bundles) Reserved
+    KVM_FAULT(58)
+
+    .org kvm_ia64_ivt+0x7700
+////////////////////////////////////////////////////////////////////
+// 0x7700 Entry 59 (size 16 bundles) Reserved
+    KVM_FAULT(59)
+
+    .org kvm_ia64_ivt+0x7800
+////////////////////////////////////////////////////////////////////
+// 0x7800 Entry 60 (size 16 bundles) Reserved
+    KVM_FAULT(60)
+
+    .org kvm_ia64_ivt+0x7900
+/////////////////////////////////////////////////////////////////////
+// 0x7900 Entry 61 (size 16 bundles) Reserved
+    KVM_FAULT(61)
+
+    .org kvm_ia64_ivt+0x7a00
+/////////////////////////////////////////////////////////////////////
+// 0x7a00 Entry 62 (size 16 bundles) Reserved
+    KVM_FAULT(62)
+
+    .org kvm_ia64_ivt+0x7b00
+/////////////////////////////////////////////////////////////////////
+// 0x7b00 Entry 63 (size 16 bundles) Reserved
+    KVM_FAULT(63)
+
+    .org kvm_ia64_ivt+0x7c00
+////////////////////////////////////////////////////////////////////
+// 0x7c00 Entry 64 (size 16 bundles) Reserved
+    KVM_FAULT(64)
+
+    .org kvm_ia64_ivt+0x7d00
+/////////////////////////////////////////////////////////////////////
+// 0x7d00 Entry 65 (size 16 bundles) Reserved
+    KVM_FAULT(65)
+
+    .org kvm_ia64_ivt+0x7e00
+/////////////////////////////////////////////////////////////////////
+// 0x7e00 Entry 66 (size 16 bundles) Reserved
+    KVM_FAULT(66)
+
+    .org kvm_ia64_ivt+0x7f00
+////////////////////////////////////////////////////////////////////
+// 0x7f00 Entry 67 (size 16 bundles) Reserved
+    KVM_FAULT(67)
+
+    .org kvm_ia64_ivt+0x8000
+// There is no particular reason for this code to be here, other than that
+// there happens to be space here that would go unused otherwise.  If this
+// fault ever gets "unreserved", simply moved the following code to a more
+// suitable spot...
+
+
+ENTRY(kvm_dtlb_miss_dispatch)
+    mov r19 = 2
+    KVM_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,3,0
+    mov out0=cr.ifa
+    mov out1=r15
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    //(p15) ssm psr.i               // restore psr.i
+    addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
+    ;;
+    KVM_SAVE_REST
+    KVM_SAVE_EXTRA
+    mov rp=r14
+    ;;
+    adds out2=16,r12
+    br.call.sptk.many b6=kvm_page_fault
+END(kvm_dtlb_miss_dispatch)
+
+ENTRY(kvm_itlb_miss_dispatch)
+
+    KVM_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,3,0
+    mov out0=cr.ifa
+    mov out1=r15
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    //(p15) ssm psr.i               // restore psr.i
+    addl r14=@gprel(ia64_leave_hypervisor),gp
+    ;;
+    KVM_SAVE_REST
+    mov rp=r14
+    ;;
+    adds out2=16,r12
+    br.call.sptk.many b6=kvm_page_fault
+END(kvm_itlb_miss_dispatch)
+
+ENTRY(kvm_dispatch_reflection)
+    /*
+     * Input:
+     *  psr.ic: off
+     *  r19:    intr type (offset into ivt, see ia64_int.h)
+     *  r31:    contains saved predicates (pr)
+     */
+    KVM_SAVE_MIN_WITH_COVER_R19
+    alloc r14=ar.pfs,0,0,5,0
+    mov out0=cr.ifa
+    mov out1=cr.isr
+    mov out2=cr.iim
+    mov out3=r15
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    //(p15) ssm psr.i               // restore psr.i
+    addl r14=@gprel(ia64_leave_hypervisor),gp
+    ;;
+    KVM_SAVE_REST
+    mov rp=r14
+    ;;
+    adds out4=16,r12
+    br.call.sptk.many b6=reflect_interruption
+END(kvm_dispatch_reflection)
+
+ENTRY(kvm_dispatch_virtualization_fault)
+    adds r16 = VMM_VCPU_CAUSE_OFFSET,r21
+    adds r17 = VMM_VCPU_OPCODE_OFFSET,r21
+    ;;
+    st8 [r16] = r24
+    st8 [r17] = r25
+    ;;
+    KVM_SAVE_MIN_WITH_COVER_R19
+    ;;
+    alloc r14=ar.pfs,0,0,2,0 // now it's safe (must be first in insn group!)
+    mov out0=r13        //vcpu
+    adds r3=8,r2                // set up second base pointer
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i                  // guarantee that interruption collection is on
+    ;;
+    //(p15) ssm psr.i               // restore psr.i
+    addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
+    ;;
+    KVM_SAVE_REST
+    KVM_SAVE_EXTRA
+    mov rp=r14
+    ;;
+    adds out1=16,sp         //regs
+    br.call.sptk.many b6=kvm_emulate
+END(kvm_dispatch_virtualization_fault)
+
+
+ENTRY(kvm_dispatch_interrupt)
+    KVM_SAVE_MIN_WITH_COVER_R19        // uses r31; defines r2 and r3
+    ;;
+    alloc r14=ar.pfs,0,0,1,0 // must be first in an insn group
+    //mov out0=cr.ivr          // pass cr.ivr as first arg
+    adds r3=8,r2               // set up second base pointer for SAVE_REST
+    ;;
+    ssm psr.ic
+    ;;
+    srlz.i
+    ;;
+    //(p15) ssm psr.i
+    addl r14=@gprel(ia64_leave_hypervisor),gp
+    ;;
+    KVM_SAVE_REST
+    mov rp=r14
+    ;;
+    mov out0=r13               // pass pointer to pt_regs as second arg
+    br.call.sptk.many b6=kvm_ia64_handle_irq
+END(kvm_dispatch_interrupt)
+
+
+
+
+GLOBAL_ENTRY(ia64_leave_nested)
+       rsm psr.i
+       ;;
+       adds r21=PT(PR)+16,r12
+       ;;
+       lfetch [r21],PT(CR_IPSR)-PT(PR)
+       adds r2=PT(B6)+16,r12
+       adds r3=PT(R16)+16,r12
+       ;;
+       lfetch [r21]
+       ld8 r28=[r2],8          // load b6
+       adds r29=PT(R24)+16,r12
+
+       ld8.fill r16=[r3]
+       adds r3=PT(AR_CSD)-PT(R16),r3
+       adds r30=PT(AR_CCV)+16,r12
+       ;;
+       ld8.fill r24=[r29]
+       ld8 r15=[r30]           // load ar.ccv
+       ;;
+       ld8 r29=[r2],16         // load b7
+       ld8 r30=[r3],16         // load ar.csd
+       ;;
+       ld8 r31=[r2],16         // load ar.ssd
+       ld8.fill r8=[r3],16
+       ;;
+       ld8.fill r9=[r2],16
+       ld8.fill r10=[r3],PT(R17)-PT(R10)
+       ;;
+       ld8.fill r11=[r2],PT(R18)-PT(R11)
+       ld8.fill r17=[r3],16
+       ;;
+       ld8.fill r18=[r2],16
+       ld8.fill r19=[r3],16
+       ;;
+       ld8.fill r20=[r2],16
+       ld8.fill r21=[r3],16
+       mov ar.csd=r30
+       mov ar.ssd=r31
+       ;;
+       rsm psr.i | psr.ic
+       // initiate turning off of interrupt and interruption collection
+       invala                  // invalidate ALAT
+       ;;
+       srlz.i
+       ;;
+       ld8.fill r22=[r2],24
+       ld8.fill r23=[r3],24
+       mov b6=r28
+       ;;
+       ld8.fill r25=[r2],16
+       ld8.fill r26=[r3],16
+       mov b7=r29
+       ;;
+       ld8.fill r27=[r2],16
+       ld8.fill r28=[r3],16
+       ;;
+       ld8.fill r29=[r2],16
+       ld8.fill r30=[r3],24
+       ;;
+       ld8.fill r31=[r2],PT(F9)-PT(R31)
+       adds r3=PT(F10)-PT(F6),r3
+       ;;
+       ldf.fill f9=[r2],PT(F6)-PT(F9)
+       ldf.fill f10=[r3],PT(F8)-PT(F10)
+       ;;
+       ldf.fill f6=[r2],PT(F7)-PT(F6)
+       ;;
+       ldf.fill f7=[r2],PT(F11)-PT(F7)
+       ldf.fill f8=[r3],32
+       ;;
+       srlz.i                  // ensure interruption collection is off
+       mov ar.ccv=r15
+       ;;
+       bsw.0   // switch back to bank 0 (no stop bit required beforehand...)
+       ;;
+       ldf.fill f11=[r2]
+//     mov r18=r13
+//    mov r21=r13
+       adds r16=PT(CR_IPSR)+16,r12
+       adds r17=PT(CR_IIP)+16,r12
+       ;;
+       ld8 r29=[r16],16        // load cr.ipsr
+       ld8 r28=[r17],16        // load cr.iip
+       ;;
+       ld8 r30=[r16],16        // load cr.ifs
+       ld8 r25=[r17],16        // load ar.unat
+       ;;
+       ld8 r26=[r16],16        // load ar.pfs
+       ld8 r27=[r17],16        // load ar.rsc
+       cmp.eq p9,p0=r0,r0
+       // set p9 to indicate that we should restore cr.ifs
+       ;;
+       ld8 r24=[r16],16        // load ar.rnat (may be garbage)
+       ld8 r23=[r17],16// load ar.bspstore (may be garbage)
+       ;;
+       ld8 r31=[r16],16        // load predicates
+       ld8 r22=[r17],16        // load b0
+       ;;
+       ld8 r19=[r16],16        // load ar.rsc value for "loadrs"
+       ld8.fill r1=[r17],16    // load r1
+       ;;
+       ld8.fill r12=[r16],16
+       ld8.fill r13=[r17],16
+       ;;
+       ld8 r20=[r16],16        // ar.fpsr
+       ld8.fill r15=[r17],16
+       ;;
+       ld8.fill r14=[r16],16
+       ld8.fill r2=[r17]
+       ;;
+       ld8.fill r3=[r16]
+       ;;
+       mov r16=ar.bsp          // get existing backing store pointer
+       ;;
+       mov b0=r22
+       mov ar.pfs=r26
+       mov cr.ifs=r30
+       mov cr.ipsr=r29
+       mov ar.fpsr=r20
+       mov cr.iip=r28
+       ;;
+       mov ar.rsc=r27
+       mov ar.unat=r25
+       mov pr=r31,-1
+       rfi
+END(ia64_leave_nested)
+
+
+
+GLOBAL_ENTRY(ia64_leave_hypervisor_prepare)
+    /*
+     * work.need_resched etc. mustn't get changed
+     *by this CPU before it returns to
+    ;;
+     * user- or fsys-mode, hence we disable interrupts early on:
+     */
+    adds r2 = PT(R4)+16,r12
+    adds r3 = PT(R5)+16,r12
+    adds r8 = PT(EML_UNAT)+16,r12
+    ;;
+    ld8 r8 = [r8]
+    ;;
+    mov ar.unat=r8
+    ;;
+    ld8.fill r4=[r2],16    //load r4
+    ld8.fill r5=[r3],16    //load r5
+    ;;
+    ld8.fill r6=[r2]    //load r6
+    ld8.fill r7=[r3]    //load r7
+    ;;
+END(ia64_leave_hypervisor_prepare)
+//fall through
+GLOBAL_ENTRY(ia64_leave_hypervisor)
+    rsm psr.i
+    ;;
+    br.call.sptk.many b0=leave_hypervisor_tail
+    ;;
+    adds r20=PT(PR)+16,r12
+    adds r8=PT(EML_UNAT)+16,r12
+    ;;
+    ld8 r8=[r8]
+    ;;
+    mov ar.unat=r8
+    ;;
+    lfetch [r20],PT(CR_IPSR)-PT(PR)
+    adds r2 = PT(B6)+16,r12
+    adds r3 = PT(B7)+16,r12
+    ;;
+    lfetch [r20]
+    ;;
+    ld8 r24=[r2],16        /* B6 */
+    ld8 r25=[r3],16        /* B7 */
+    ;;
+    ld8 r26=[r2],16        /* ar_csd */
+    ld8 r27=[r3],16        /* ar_ssd */
+    mov b6 = r24
+    ;;
+    ld8.fill r8=[r2],16
+    ld8.fill r9=[r3],16
+    mov b7 = r25
+    ;;
+    mov ar.csd = r26
+    mov ar.ssd = r27
+    ;;
+    ld8.fill r10=[r2],PT(R15)-PT(R10)
+    ld8.fill r11=[r3],PT(R14)-PT(R11)
+    ;;
+    ld8.fill r15=[r2],PT(R16)-PT(R15)
+    ld8.fill r14=[r3],PT(R17)-PT(R14)
+    ;;
+    ld8.fill r16=[r2],16
+    ld8.fill r17=[r3],16
+    ;;
+    ld8.fill r18=[r2],16
+    ld8.fill r19=[r3],16
+    ;;
+    ld8.fill r20=[r2],16
+    ld8.fill r21=[r3],16
+    ;;
+    ld8.fill r22=[r2],16
+    ld8.fill r23=[r3],16
+    ;;
+    ld8.fill r24=[r2],16
+    ld8.fill r25=[r3],16
+    ;;
+    ld8.fill r26=[r2],16
+    ld8.fill r27=[r3],16
+    ;;
+    ld8.fill r28=[r2],16
+    ld8.fill r29=[r3],16
+    ;;
+    ld8.fill r30=[r2],PT(F6)-PT(R30)
+    ld8.fill r31=[r3],PT(F7)-PT(R31)
+    ;;
+    rsm psr.i | psr.ic
+    // initiate turning off of interrupt and interruption collection
+    invala          // invalidate ALAT
+    ;;
+    srlz.i          // ensure interruption collection is off
+    ;;
+    bsw.0
+    ;;
+    adds r16 = PT(CR_IPSR)+16,r12
+    adds r17 = PT(CR_IIP)+16,r12
+    mov r21=r13                // get current
+    ;;
+    ld8 r31=[r16],16    // load cr.ipsr
+    ld8 r30=[r17],16    // load cr.iip
+    ;;
+    ld8 r29=[r16],16    // load cr.ifs
+    ld8 r28=[r17],16    // load ar.unat
+    ;;
+    ld8 r27=[r16],16    // load ar.pfs
+    ld8 r26=[r17],16    // load ar.rsc
+    ;;
+    ld8 r25=[r16],16    // load ar.rnat
+    ld8 r24=[r17],16    // load ar.bspstore
+    ;;
+    ld8 r23=[r16],16    // load predicates
+    ld8 r22=[r17],16    // load b0
+    ;;
+    ld8 r20=[r16],16    // load ar.rsc value for "loadrs"
+    ld8.fill r1=[r17],16    //load r1
+    ;;
+    ld8.fill r12=[r16],16    //load r12
+    ld8.fill r13=[r17],PT(R2)-PT(R13)    //load r13
+    ;;
+    ld8 r19=[r16],PT(R3)-PT(AR_FPSR)    //load ar_fpsr
+    ld8.fill r2=[r17],PT(AR_CCV)-PT(R2)    //load r2
+    ;;
+    ld8.fill r3=[r16]  //load r3
+    ld8 r18=[r17]      //load ar_ccv
+    ;;
+    mov ar.fpsr=r19
+    mov ar.ccv=r18
+    shr.u r18=r20,16
+    ;;
+kvm_rbs_switch:
+    mov r19=96
+
+kvm_dont_preserve_current_frame:
+/*
+    * To prevent leaking bits between the hypervisor and guest domain,
+    * we must clear the stacked registers in the "invalid" partition here.
+    * 5 registers/cycle on McKinley).
+    */
+#   define pRecurse    p6
+#   define pReturn     p7
+#   define Nregs       14
+
+    alloc loc0=ar.pfs,2,Nregs-2,2,0
+    shr.u loc1=r18,9           // RNaTslots <= floor(dirtySize / (64*8))
+    sub r19=r19,r18            // r19 = (physStackedSize + 8) - dirtySize
+    ;;
+    mov ar.rsc=r20             // load ar.rsc to be used for "loadrs"
+    shladd in0=loc1,3,r19
+    mov in1=0
+    ;;
+    TEXT_ALIGN(32)
+kvm_rse_clear_invalid:
+    alloc loc0=ar.pfs,2,Nregs-2,2,0
+    cmp.lt pRecurse,p0=Nregs*8,in0
+    // if more than Nregs regs left to clear, (re)curse
+    add out0=-Nregs*8,in0
+    add out1=1,in1             // increment recursion count
+    mov loc1=0
+    mov loc2=0
+    ;;
+    mov loc3=0
+    mov loc4=0
+    mov loc5=0
+    mov loc6=0
+    mov loc7=0
+(pRecurse) br.call.dptk.few b0=kvm_rse_clear_invalid
+    ;;
+    mov loc8=0
+    mov loc9=0
+    cmp.ne pReturn,p0=r0,in1
+    // if recursion count != 0, we need to do a br.ret
+    mov loc10=0
+    mov loc11=0
+(pReturn) br.ret.dptk.many b0
+
+#      undef pRecurse
+#      undef pReturn
+
+// loadrs has already been shifted
+    alloc r16=ar.pfs,0,0,0,0    // drop current register frame
+    ;;
+    loadrs
+    ;;
+    mov ar.bspstore=r24
+    ;;
+    mov ar.unat=r28
+    mov ar.rnat=r25
+    mov ar.rsc=r26
+    ;;
+    mov cr.ipsr=r31
+    mov cr.iip=r30
+    mov cr.ifs=r29
+    mov ar.pfs=r27
+    adds r18=VMM_VPD_BASE_OFFSET,r21
+    ;;
+    ld8 r18=[r18]   //vpd
+    adds r17=VMM_VCPU_ISR_OFFSET,r21
+    ;;
+    ld8 r17=[r17]
+    adds r19=VMM_VPD_VPSR_OFFSET,r18
+    ;;
+    ld8 r19=[r19]        //vpsr
+    adds r20=VMM_VCPU_VSA_BASE_OFFSET,r21
+    ;;
+    ld8 r20=[r20]
+    ;;
+//vsa_sync_write_start
+    mov r25=r18
+    adds r16= VMM_VCPU_GP_OFFSET,r21
+    ;;
+    ld8 r16= [r16] // Put gp in r24
+    movl r24=@gprel(ia64_vmm_entry)  // calculate return address
+    ;;
+    add  r24=r24,r16
+    ;;
+    add r16=PAL_VPS_SYNC_WRITE,r20
+    ;;
+    mov b0=r16
+    br.cond.sptk b0         // call the service
+    ;;
+END(ia64_leave_hypervisor)
+// fall through
+GLOBAL_ENTRY(ia64_vmm_entry)
+/*
+ *  must be at bank 0
+ *  parameter:
+ *  r17:cr.isr
+ *  r18:vpd
+ *  r19:vpsr
+ *  r20:__vsa_base
+ *  r22:b0
+ *  r23:predicate
+ */
+    mov r24=r22
+    mov r25=r18
+    tbit.nz p1,p2 = r19,IA64_PSR_IC_BIT        // p1=vpsr.ic
+    ;;
+    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
+    (p1) br.sptk.many ia64_vmm_entry_out
+    ;;
+    tbit.nz p1,p2 = r17,IA64_ISR_IR_BIT                //p1=cr.isr.ir
+    ;;
+    (p1) add r29=PAL_VPS_RESUME_NORMAL,r20
+    (p2) add r29=PAL_VPS_RESUME_HANDLER,r20
+    (p2) ld8 r26=[r25]
+    ;;
+ia64_vmm_entry_out:
+    mov pr=r23,-2
+    mov b0=r29
+    ;;
+    br.cond.sptk b0             // call pal service
+END(ia64_vmm_entry)
+
+
+
+/*
+ * extern u64 ia64_call_vsa(u64 proc, u64 arg1, u64 arg2,
+ *                  u64 arg3, u64 arg4, u64 arg5,
+ *                  u64 arg6, u64 arg7);
+ *
+ * XXX: The currently defined services use only 4 args at the max. The
+ *  rest are not consumed.
+ */
+GLOBAL_ENTRY(ia64_call_vsa)
+    .regstk 4,4,0,0
+
+rpsave  =   loc0
+pfssave =   loc1
+psrsave =   loc2
+entry   =   loc3
+hostret =   r24
+
+    alloc   pfssave=ar.pfs,4,4,0,0
+    mov rpsave=rp
+    adds entry=VMM_VCPU_VSA_BASE_OFFSET, r13
+    ;;
+    ld8 entry=[entry]
+1:  mov hostret=ip
+    mov r25=in1         // copy arguments
+    mov r26=in2
+    mov r27=in3
+    mov psrsave=psr
+    ;;
+    tbit.nz p6,p0=psrsave,14    // IA64_PSR_I
+    tbit.nz p7,p0=psrsave,13    // IA64_PSR_IC
+    ;;
+    add hostret=2f-1b,hostret   // calculate return address
+    add entry=entry,in0
+    ;;
+    rsm psr.i | psr.ic
+    ;;
+    srlz.i
+    mov b6=entry
+    br.cond.sptk b6         // call the service
+2:
+    // Architectural sequence for enabling interrupts if necessary
+(p7)    ssm psr.ic
+    ;;
+(p7)    srlz.i
+    ;;
+//(p6)    ssm psr.i
+    ;;
+    mov rp=rpsave
+    mov ar.pfs=pfssave
+    mov r8=r31
+    ;;
+    srlz.d
+    br.ret.sptk rp
+
+END(ia64_call_vsa)
+
+#define  INIT_BSPSTORE  ((4<<30)-(12<<20)-0x100)
+
+GLOBAL_ENTRY(vmm_reset_entry)
+    //set up ipsr, iip, vpd.vpsr, dcr
+    // For IPSR: it/dt/rt=1, i/ic=1, si=1, vm/bn=1
+    // For DCR: all bits 0
+    adds r14=-VMM_PT_REGS_SIZE, r12
+    ;;
+    movl r6=0x501008826000      // IPSR dt/rt/it:1;i/ic:1, si:1, vm/bn:1
+    movl r10=0x8000000000000000
+    adds r16=PT(CR_IIP), r14
+    adds r20=PT(R1), r14
+    ;;
+    rsm psr.ic | psr.i
+    ;;
+    srlz.i
+    ;;
+    bsw.0
+    ;;
+    mov r21 =r13
+    ;;
+    bsw.1
+    ;;
+    mov ar.rsc = 0
+    ;;
+    flushrs
+    ;;
+    mov ar.bspstore = 0
+    // clear BSPSTORE
+    ;;
+    mov cr.ipsr=r6
+    mov cr.ifs=r10
+    ld8 r4 = [r16] // Set init iip for first run.
+    ld8 r1 = [r20]
+    ;;
+    mov cr.iip=r4
+    ;;
+    adds r16=VMM_VPD_BASE_OFFSET,r13
+    adds r20=VMM_VCPU_VSA_BASE_OFFSET,r13
+    ;;
+    ld8 r18=[r16]
+    ld8 r20=[r20]
+    ;;
+    adds r19=VMM_VPD_VPSR_OFFSET,r18
+    ;;
+    ld8 r19=[r19]
+    mov r17=r0
+    mov r22=r0
+    mov r23=r0
+    br.cond.sptk ia64_vmm_entry
+    br.ret.sptk  b0
+END(vmm_reset_entry)
diff --git a/arch/ia64/kvm/vti.h b/arch/ia64/kvm/vti.h
new file mode 100644 (file)
index 0000000..f6c5617
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * vti.h: prototype for generial vt related interface
+ *     Copyright (c) 2004, Intel Corporation.
+ *
+ *     Xuefei Xu (Anthony Xu) (anthony.xu@intel.com)
+ *     Fred Yang (fred.yang@intel.com)
+ *     Kun Tian (Kevin Tian) (kevin.tian@intel.com)
+ *
+ *     Copyright (c) 2007, Intel Corporation.
+ *     Zhang xiantao <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ */
+#ifndef _KVM_VT_I_H
+#define _KVM_VT_I_H
+
+#ifndef __ASSEMBLY__
+#include <asm/page.h>
+
+#include <linux/kvm_host.h>
+
+/* define itr.i and itr.d  in ia64_itr function */
+#define        ITR     0x01
+#define        DTR     0x02
+#define        IaDTR   0x03
+
+#define IA64_TR_VMM       6 /*itr6, dtr6 : maps vmm code, vmbuffer*/
+#define IA64_TR_VM_DATA   7 /*dtr7       : maps current vm data*/
+
+#define RR6 (6UL<<61)
+#define RR7 (7UL<<61)
+
+
+/* config_options in pal_vp_init_env */
+#define        VP_INITIALIZE   1UL
+#define        VP_FR_PMC       1UL<<1
+#define        VP_OPCODE       1UL<<8
+#define        VP_CAUSE        1UL<<9
+#define VP_FW_ACC      1UL<<63
+
+/* init vp env with initializing vm_buffer */
+#define        VP_INIT_ENV_INITALIZE  (VP_INITIALIZE | VP_FR_PMC |\
+       VP_OPCODE | VP_CAUSE | VP_FW_ACC)
+/* init vp env without initializing vm_buffer */
+#define        VP_INIT_ENV  VP_FR_PMC | VP_OPCODE | VP_CAUSE | VP_FW_ACC
+
+#define                PAL_VP_CREATE   265
+/* Stacked Virt. Initializes a new VPD for the operation of
+ * a new virtual processor in the virtual environment.
+ */
+#define                PAL_VP_ENV_INFO 266
+/*Stacked Virt. Returns the parameters needed to enter a virtual environment.*/
+#define                PAL_VP_EXIT_ENV 267
+/*Stacked Virt. Allows a logical processor to exit a virtual environment.*/
+#define                PAL_VP_INIT_ENV 268
+/*Stacked Virt. Allows a logical processor to enter a virtual environment.*/
+#define                PAL_VP_REGISTER 269
+/*Stacked Virt. Register a different host IVT for the virtual processor.*/
+#define                PAL_VP_RESUME   270
+/* Renamed from PAL_VP_RESUME */
+#define                PAL_VP_RESTORE  270
+/*Stacked Virt. Resumes virtual processor operation on the logical processor.*/
+#define                PAL_VP_SUSPEND  271
+/* Renamed from PAL_VP_SUSPEND */
+#define                PAL_VP_SAVE     271
+/* Stacked Virt. Suspends operation for the specified virtual processor on
+ * the logical processor.
+ */
+#define                PAL_VP_TERMINATE 272
+/* Stacked Virt. Terminates operation for the specified virtual processor.*/
+
+union vac {
+       unsigned long value;
+       struct {
+               int a_int:1;
+               int a_from_int_cr:1;
+               int a_to_int_cr:1;
+               int a_from_psr:1;
+               int a_from_cpuid:1;
+               int a_cover:1;
+               int a_bsw:1;
+               long reserved:57;
+       };
+};
+
+union vdc {
+       unsigned long value;
+       struct {
+               int d_vmsw:1;
+               int d_extint:1;
+               int d_ibr_dbr:1;
+               int d_pmc:1;
+               int d_to_pmd:1;
+               int d_itm:1;
+               long reserved:58;
+       };
+};
+
+struct vpd {
+       union vac   vac;
+       union vdc   vdc;
+       unsigned long  virt_env_vaddr;
+       unsigned long  reserved1[29];
+       unsigned long  vhpi;
+       unsigned long  reserved2[95];
+       unsigned long  vgr[16];
+       unsigned long  vbgr[16];
+       unsigned long  vnat;
+       unsigned long  vbnat;
+       unsigned long  vcpuid[5];
+       unsigned long  reserved3[11];
+       unsigned long  vpsr;
+       unsigned long  vpr;
+       unsigned long  reserved4[76];
+       union {
+               unsigned long  vcr[128];
+               struct {
+                       unsigned long dcr;
+                       unsigned long itm;
+                       unsigned long iva;
+                       unsigned long rsv1[5];
+                       unsigned long pta;
+                       unsigned long rsv2[7];
+                       unsigned long ipsr;
+                       unsigned long isr;
+                       unsigned long rsv3;
+                       unsigned long iip;
+                       unsigned long ifa;
+                       unsigned long itir;
+                       unsigned long iipa;
+                       unsigned long ifs;
+                       unsigned long iim;
+                       unsigned long iha;
+                       unsigned long rsv4[38];
+                       unsigned long lid;
+                       unsigned long ivr;
+                       unsigned long tpr;
+                       unsigned long eoi;
+                       unsigned long irr[4];
+                       unsigned long itv;
+                       unsigned long pmv;
+                       unsigned long cmcv;
+                       unsigned long rsv5[5];
+                       unsigned long lrr0;
+                       unsigned long lrr1;
+                       unsigned long rsv6[46];
+               };
+       };
+       unsigned long  reserved5[128];
+       unsigned long  reserved6[3456];
+       unsigned long  vmm_avail[128];
+       unsigned long  reserved7[4096];
+};
+
+#define PAL_PROC_VM_BIT                (1UL << 40)
+#define PAL_PROC_VMSW_BIT      (1UL << 54)
+
+static inline s64 ia64_pal_vp_env_info(u64 *buffer_size,
+               u64 *vp_env_info)
+{
+       struct ia64_pal_retval iprv;
+       PAL_CALL_STK(iprv, PAL_VP_ENV_INFO, 0, 0, 0);
+       *buffer_size = iprv.v0;
+       *vp_env_info = iprv.v1;
+       return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_exit_env(u64 iva)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL_STK(iprv, PAL_VP_EXIT_ENV, (u64)iva, 0, 0);
+       return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_init_env(u64 config_options, u64 pbase_addr,
+                       u64 vbase_addr, u64 *vsa_base)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL_STK(iprv, PAL_VP_INIT_ENV, config_options, pbase_addr,
+                       vbase_addr);
+       *vsa_base = iprv.v0;
+
+       return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_restore(u64 *vpd, u64 pal_proc_vector)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL_STK(iprv, PAL_VP_RESTORE, (u64)vpd, pal_proc_vector, 0);
+
+       return iprv.status;
+}
+
+static inline s64 ia64_pal_vp_save(u64 *vpd, u64 pal_proc_vector)
+{
+       struct ia64_pal_retval iprv;
+
+       PAL_CALL_STK(iprv, PAL_VP_SAVE, (u64)vpd, pal_proc_vector, 0);
+
+       return iprv.status;
+}
+
+#endif
+
+/*VPD field offset*/
+#define VPD_VAC_START_OFFSET           0
+#define VPD_VDC_START_OFFSET           8
+#define VPD_VHPI_START_OFFSET          256
+#define VPD_VGR_START_OFFSET           1024
+#define VPD_VBGR_START_OFFSET          1152
+#define VPD_VNAT_START_OFFSET          1280
+#define VPD_VBNAT_START_OFFSET         1288
+#define VPD_VCPUID_START_OFFSET                1296
+#define VPD_VPSR_START_OFFSET          1424
+#define VPD_VPR_START_OFFSET           1432
+#define VPD_VRSE_CFLE_START_OFFSET     1440
+#define VPD_VCR_START_OFFSET           2048
+#define VPD_VTPR_START_OFFSET          2576
+#define VPD_VRR_START_OFFSET           3072
+#define VPD_VMM_VAIL_START_OFFSET      31744
+
+/*Virtualization faults*/
+
+#define EVENT_MOV_TO_AR                         1
+#define EVENT_MOV_TO_AR_IMM             2
+#define EVENT_MOV_FROM_AR               3
+#define EVENT_MOV_TO_CR                         4
+#define EVENT_MOV_FROM_CR               5
+#define EVENT_MOV_TO_PSR                6
+#define EVENT_MOV_FROM_PSR              7
+#define EVENT_ITC_D                     8
+#define EVENT_ITC_I                     9
+#define EVENT_MOV_TO_RR                         10
+#define EVENT_MOV_TO_DBR                11
+#define EVENT_MOV_TO_IBR                12
+#define EVENT_MOV_TO_PKR                13
+#define EVENT_MOV_TO_PMC                14
+#define EVENT_MOV_TO_PMD                15
+#define EVENT_ITR_D                     16
+#define EVENT_ITR_I                     17
+#define EVENT_MOV_FROM_RR               18
+#define EVENT_MOV_FROM_DBR              19
+#define EVENT_MOV_FROM_IBR              20
+#define EVENT_MOV_FROM_PKR              21
+#define EVENT_MOV_FROM_PMC              22
+#define EVENT_MOV_FROM_CPUID            23
+#define EVENT_SSM                       24
+#define EVENT_RSM                       25
+#define EVENT_PTC_L                     26
+#define EVENT_PTC_G                     27
+#define EVENT_PTC_GA                    28
+#define EVENT_PTR_D                     29
+#define EVENT_PTR_I                     30
+#define EVENT_THASH                     31
+#define EVENT_TTAG                      32
+#define EVENT_TPA                       33
+#define EVENT_TAK                       34
+#define EVENT_PTC_E                     35
+#define EVENT_COVER                     36
+#define EVENT_RFI                       37
+#define EVENT_BSW_0                     38
+#define EVENT_BSW_1                     39
+#define EVENT_VMSW                      40
+
+/**PAL virtual services offsets */
+#define PAL_VPS_RESUME_NORMAL           0x0000
+#define PAL_VPS_RESUME_HANDLER          0x0400
+#define PAL_VPS_SYNC_READ               0x0800
+#define PAL_VPS_SYNC_WRITE              0x0c00
+#define PAL_VPS_SET_PENDING_INTERRUPT   0x1000
+#define PAL_VPS_THASH                   0x1400
+#define PAL_VPS_TTAG                    0x1800
+#define PAL_VPS_RESTORE                 0x1c00
+#define PAL_VPS_SAVE                    0x2000
+
+#endif/* _VT_I_H*/
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
new file mode 100644 (file)
index 0000000..def4576
--- /dev/null
@@ -0,0 +1,636 @@
+/*
+ * vtlb.c: guest virtual tlb handling module.
+ * Copyright (c) 2004, Intel Corporation.
+ *  Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
+ *  Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ *
+ * Copyright (c) 2007, Intel Corporation.
+ *  Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
+ *  Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include "vcpu.h"
+
+#include <linux/rwsem.h>
+
+#include <asm/tlb.h>
+
+/*
+ * Check to see if the address rid:va is translated by the TLB
+ */
+
+static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
+{
+       return ((trp->p) && (trp->rid == rid)
+                               && ((va-trp->vadr) < PSIZE(trp->ps)));
+}
+
+/*
+ * Only for GUEST TR format.
+ */
+static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
+{
+       u64 sa1, ea1;
+
+       if (!trp->p || trp->rid != rid)
+               return 0;
+
+       sa1 = trp->vadr;
+       ea1 = sa1 + PSIZE(trp->ps) - 1;
+       eva -= 1;
+       if ((sva > ea1) || (sa1 > eva))
+               return 0;
+       else
+               return 1;
+
+}
+
+void machine_tlb_purge(u64 va, u64 ps)
+{
+       ia64_ptcl(va, ps << 2);
+}
+
+void local_flush_tlb_all(void)
+{
+       int i, j;
+       unsigned long flags, count0, count1;
+       unsigned long stride0, stride1, addr;
+
+       addr    = current_vcpu->arch.ptce_base;
+       count0  = current_vcpu->arch.ptce_count[0];
+       count1  = current_vcpu->arch.ptce_count[1];
+       stride0 = current_vcpu->arch.ptce_stride[0];
+       stride1 = current_vcpu->arch.ptce_stride[1];
+
+       local_irq_save(flags);
+       for (i = 0; i < count0; ++i) {
+               for (j = 0; j < count1; ++j) {
+                       ia64_ptce(addr);
+                       addr += stride1;
+               }
+               addr += stride0;
+       }
+       local_irq_restore(flags);
+       ia64_srlz_i();          /* srlz.i implies srlz.d */
+}
+
+int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
+{
+       union ia64_rr    vrr;
+       union ia64_pta   vpta;
+       struct  ia64_psr   vpsr;
+
+       vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
+       vrr.val = vcpu_get_rr(vcpu, vadr);
+       vpta.val = vcpu_get_pta(vcpu);
+
+       if (vrr.ve & vpta.ve) {
+               switch (ref) {
+               case DATA_REF:
+               case NA_REF:
+                       return vpsr.dt;
+               case INST_REF:
+                       return vpsr.dt && vpsr.it && vpsr.ic;
+               case RSE_REF:
+                       return vpsr.dt && vpsr.rt;
+
+               }
+       }
+       return 0;
+}
+
+struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
+{
+       u64 index, pfn, rid, pfn_bits;
+
+       pfn_bits = vpta.size - 5 - 8;
+       pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
+       rid = _REGION_ID(vrr);
+       index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
+       *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
+
+       return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
+                               (index << 5));
+}
+
+struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
+{
+
+       struct thash_data *trp;
+       int  i;
+       u64 rid;
+
+       rid = vcpu_get_rr(vcpu, va);
+       rid = rid & RR_RID_MASK;;
+       if (type == D_TLB) {
+               if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
+                       for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
+                                               i < NDTRS; i++, trp++) {
+                               if (__is_tr_translated(trp, rid, va))
+                                       return trp;
+                       }
+               }
+       } else {
+               if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
+                       for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
+                                       i < NITRS; i++, trp++) {
+                               if (__is_tr_translated(trp, rid, va))
+                                       return trp;
+                       }
+               }
+       }
+
+       return NULL;
+}
+
+static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
+{
+       union ia64_rr rr;
+       struct thash_data *head;
+       unsigned long ps, gpaddr;
+
+       ps = itir_ps(itir);
+
+       gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
+               (ifa & ((1UL << ps) - 1));
+
+       rr.val = ia64_get_rr(ifa);
+       head = (struct thash_data *)ia64_thash(ifa);
+       head->etag = INVALID_TI_TAG;
+       ia64_mf();
+       head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
+       head->itir = rr.ps << 2;
+       head->etag = ia64_ttag(ifa);
+       head->gpaddr = gpaddr;
+}
+
+void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
+{
+       u64 i, dirty_pages = 1;
+       u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
+       spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+       void *dirty_bitmap = (void *)v - (KVM_VCPU_OFS + v->vcpu_id * VCPU_SIZE)
+                                               + KVM_MEM_DIRTY_LOG_OFS;
+       dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
+
+       vmm_spin_lock(lock);
+       for (i = 0; i < dirty_pages; i++) {
+               /* avoid RMW */
+               if (!test_bit(base_gfn + i, dirty_bitmap))
+                       set_bit(base_gfn + i , dirty_bitmap);
+       }
+       vmm_spin_unlock(lock);
+}
+
+void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
+{
+       u64 phy_pte, psr;
+       union ia64_rr mrr;
+
+       mrr.val = ia64_get_rr(va);
+       phy_pte = translate_phy_pte(&pte, itir, va);
+
+       if (itir_ps(itir) >= mrr.ps) {
+               vhpt_insert(phy_pte, itir, va, pte);
+       } else {
+               phy_pte  &= ~PAGE_FLAGS_RV_MASK;
+               psr = ia64_clear_ic();
+               ia64_itc(type, va, phy_pte, itir_ps(itir));
+               ia64_set_psr(psr);
+       }
+
+       if (!(pte&VTLB_PTE_IO))
+               mark_pages_dirty(v, pte, itir_ps(itir));
+}
+
+/*
+ *   vhpt lookup
+ */
+struct thash_data *vhpt_lookup(u64 va)
+{
+       struct thash_data *head;
+       u64 tag;
+
+       head = (struct thash_data *)ia64_thash(va);
+       tag = ia64_ttag(va);
+       if (head->etag == tag)
+               return head;
+       return NULL;
+}
+
+u64 guest_vhpt_lookup(u64 iha, u64 *pte)
+{
+       u64 ret;
+       struct thash_data *data;
+
+       data = __vtr_lookup(current_vcpu, iha, D_TLB);
+       if (data != NULL)
+               thash_vhpt_insert(current_vcpu, data->page_flags,
+                       data->itir, iha, D_TLB);
+
+       asm volatile ("rsm psr.ic|psr.i;;"
+                       "srlz.d;;"
+                       "ld8.s r9=[%1];;"
+                       "tnat.nz p6,p7=r9;;"
+                       "(p6) mov %0=1;"
+                       "(p6) mov r9=r0;"
+                       "(p7) extr.u r9=r9,0,53;;"
+                       "(p7) mov %0=r0;"
+                       "(p7) st8 [%2]=r9;;"
+                       "ssm psr.ic;;"
+                       "srlz.d;;"
+                       /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
+                       : "=r"(ret) : "r"(iha), "r"(pte):"memory");
+
+       return ret;
+}
+
+/*
+ *  purge software guest tlb
+ */
+
+static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+       struct thash_data *cur;
+       u64 start, curadr, size, psbits, tag, rr_ps, num;
+       union ia64_rr vrr;
+       struct thash_cb *hcb = &v->arch.vtlb;
+
+       vrr.val = vcpu_get_rr(v, va);
+       psbits = VMX(v, psbits[(va >> 61)]);
+       start = va & ~((1UL << ps) - 1);
+       while (psbits) {
+               curadr = start;
+               rr_ps = __ffs(psbits);
+               psbits &= ~(1UL << rr_ps);
+               num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
+               size = PSIZE(rr_ps);
+               vrr.ps = rr_ps;
+               while (num) {
+                       cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
+                       if (cur->etag == tag && cur->ps == rr_ps)
+                               cur->etag = INVALID_TI_TAG;
+                       curadr += size;
+                       num--;
+               }
+       }
+}
+
+
+/*
+ *  purge VHPT and machine TLB
+ */
+static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+       struct thash_data *cur;
+       u64 start, size, tag, num;
+       union ia64_rr rr;
+
+       start = va & ~((1UL << ps) - 1);
+       rr.val = ia64_get_rr(va);
+       size = PSIZE(rr.ps);
+       num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
+       while (num) {
+               cur = (struct thash_data *)ia64_thash(start);
+               tag = ia64_ttag(start);
+               if (cur->etag == tag)
+                       cur->etag = INVALID_TI_TAG;
+               start += size;
+               num--;
+       }
+       machine_tlb_purge(va, ps);
+}
+
+/*
+ * Insert an entry into hash TLB or VHPT.
+ * NOTES:
+ *  1: When inserting VHPT to thash, "va" is a must covered
+ *  address by the inserted machine VHPT entry.
+ *  2: The format of entry is always in TLB.
+ *  3: The caller need to make sure the new entry will not overlap
+ *     with any existed entry.
+ */
+void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
+{
+       struct thash_data *head;
+       union ia64_rr vrr;
+       u64 tag;
+       struct thash_cb *hcb = &v->arch.vtlb;
+
+       vrr.val = vcpu_get_rr(v, va);
+       vrr.ps = itir_ps(itir);
+       VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
+       head = vsa_thash(hcb->pta, va, vrr.val, &tag);
+       head->page_flags = pte;
+       head->itir = itir;
+       head->etag = tag;
+}
+
+int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
+{
+       struct thash_data  *trp;
+       int  i;
+       u64 end, rid;
+
+       rid = vcpu_get_rr(vcpu, va);
+       rid = rid & RR_RID_MASK;
+       end = va + PSIZE(ps);
+       if (type == D_TLB) {
+               if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
+                       for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
+                                       i < NDTRS; i++, trp++) {
+                               if (__is_tr_overlap(trp, rid, va, end))
+                                       return i;
+                       }
+               }
+       } else {
+               if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
+                       for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
+                                       i < NITRS; i++, trp++) {
+                               if (__is_tr_overlap(trp, rid, va, end))
+                                       return i;
+                       }
+               }
+       }
+       return -1;
+}
+
+/*
+ * Purge entries in VTLB and VHPT
+ */
+void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+       if (vcpu_quick_region_check(v->arch.tc_regions, va))
+               vtlb_purge(v, va, ps);
+       vhpt_purge(v, va, ps);
+}
+
+void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
+{
+       u64 old_va = va;
+       va = REGION_OFFSET(va);
+       if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
+               vtlb_purge(v, va, ps);
+       vhpt_purge(v, va, ps);
+}
+
+u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
+{
+       u64 ps, ps_mask, paddr, maddr;
+       union pte_flags phy_pte;
+
+       ps = itir_ps(itir);
+       ps_mask = ~((1UL << ps) - 1);
+       phy_pte.val = *pte;
+       paddr = *pte;
+       paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
+       maddr = kvm_lookup_mpa(paddr >> PAGE_SHIFT);
+       if (maddr & GPFN_IO_MASK) {
+               *pte |= VTLB_PTE_IO;
+               return -1;
+       }
+       maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
+                                       (paddr & ~PAGE_MASK);
+       phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
+       return phy_pte.val;
+}
+
+/*
+ * Purge overlap TCs and then insert the new entry to emulate itc ops.
+ *    Notes: Only TC entry can purge and insert.
+ *    1 indicates this is MMIO
+ */
+int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
+                                               u64 ifa, int type)
+{
+       u64 ps;
+       u64 phy_pte;
+       union ia64_rr vrr, mrr;
+       int ret = 0;
+
+       ps = itir_ps(itir);
+       vrr.val = vcpu_get_rr(v, ifa);
+       mrr.val = ia64_get_rr(ifa);
+
+       phy_pte = translate_phy_pte(&pte, itir, ifa);
+
+       /* Ensure WB attribute if pte is related to a normal mem page,
+        * which is required by vga acceleration since qemu maps shared
+        * vram buffer with WB.
+        */
+       if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT)) {
+               pte &= ~_PAGE_MA_MASK;
+               phy_pte &= ~_PAGE_MA_MASK;
+       }
+
+       if (pte & VTLB_PTE_IO)
+               ret = 1;
+
+       vtlb_purge(v, ifa, ps);
+       vhpt_purge(v, ifa, ps);
+
+       if (ps == mrr.ps) {
+               if (!(pte&VTLB_PTE_IO)) {
+                       vhpt_insert(phy_pte, itir, ifa, pte);
+               } else {
+                       vtlb_insert(v, pte, itir, ifa);
+                       vcpu_quick_region_set(VMX(v, tc_regions), ifa);
+               }
+       } else if (ps > mrr.ps) {
+               vtlb_insert(v, pte, itir, ifa);
+               vcpu_quick_region_set(VMX(v, tc_regions), ifa);
+               if (!(pte&VTLB_PTE_IO))
+                       vhpt_insert(phy_pte, itir, ifa, pte);
+       } else {
+               u64 psr;
+               phy_pte  &= ~PAGE_FLAGS_RV_MASK;
+               psr = ia64_clear_ic();
+               ia64_itc(type, ifa, phy_pte, ps);
+               ia64_set_psr(psr);
+       }
+       if (!(pte&VTLB_PTE_IO))
+               mark_pages_dirty(v, pte, ps);
+
+       return ret;
+}
+
+/*
+ * Purge all TCs or VHPT entries including those in Hash table.
+ *
+ */
+
+void thash_purge_all(struct kvm_vcpu *v)
+{
+       int i;
+       struct thash_data *head;
+       struct thash_cb  *vtlb, *vhpt;
+       vtlb = &v->arch.vtlb;
+       vhpt = &v->arch.vhpt;
+
+       for (i = 0; i < 8; i++)
+               VMX(v, psbits[i]) = 0;
+
+       head = vtlb->hash;
+       for (i = 0; i < vtlb->num; i++) {
+               head->page_flags = 0;
+               head->etag = INVALID_TI_TAG;
+               head->itir = 0;
+               head->next = 0;
+               head++;
+       };
+
+       head = vhpt->hash;
+       for (i = 0; i < vhpt->num; i++) {
+               head->page_flags = 0;
+               head->etag = INVALID_TI_TAG;
+               head->itir = 0;
+               head->next = 0;
+               head++;
+       };
+
+       local_flush_tlb_all();
+}
+
+
+/*
+ * Lookup the hash table and its collision chain to find an entry
+ * covering this address rid:va or the entry.
+ *
+ * INPUT:
+ *  in: TLB format for both VHPT & TLB.
+ */
+
+struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
+{
+       struct thash_data  *cch;
+       u64    psbits, ps, tag;
+       union ia64_rr vrr;
+
+       struct thash_cb *hcb = &v->arch.vtlb;
+
+       cch = __vtr_lookup(v, va, is_data);;
+       if (cch)
+               return cch;
+
+       if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
+               return NULL;
+
+       psbits = VMX(v, psbits[(va >> 61)]);
+       vrr.val = vcpu_get_rr(v, va);
+       while (psbits) {
+               ps = __ffs(psbits);
+               psbits &= ~(1UL << ps);
+               vrr.ps = ps;
+               cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
+               if (cch->etag == tag && cch->ps == ps)
+                       return cch;
+       }
+
+       return NULL;
+}
+
+
+/*
+ * Initialize internal control data before service.
+ */
+void thash_init(struct thash_cb *hcb, u64 sz)
+{
+       int i;
+       struct thash_data *head;
+
+       hcb->pta.val = (unsigned long)hcb->hash;
+       hcb->pta.vf = 1;
+       hcb->pta.ve = 1;
+       hcb->pta.size = sz;
+       head = hcb->hash;
+       for (i = 0; i < hcb->num; i++) {
+               head->page_flags = 0;
+               head->itir = 0;
+               head->etag = INVALID_TI_TAG;
+               head->next = 0;
+               head++;
+       }
+}
+
+u64 kvm_lookup_mpa(u64 gpfn)
+{
+       u64 *base = (u64 *) KVM_P2M_BASE;
+       return *(base + gpfn);
+}
+
+u64 kvm_gpa_to_mpa(u64 gpa)
+{
+       u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
+       return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
+}
+
+
+/*
+ * Fetch guest bundle code.
+ * INPUT:
+ *  gip: guest ip
+ *  pbundle: used to return fetched bundle.
+ */
+int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
+{
+       u64     gpip = 0;   /* guest physical IP*/
+       u64     *vpa;
+       struct thash_data    *tlb;
+       u64     maddr;
+
+       if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
+               /* I-side physical mode */
+               gpip = gip;
+       } else {
+               tlb = vtlb_lookup(vcpu, gip, I_TLB);
+               if (tlb)
+                       gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
+                               (gip & (PSIZE(tlb->ps) - 1));
+       }
+       if (gpip) {
+               maddr = kvm_gpa_to_mpa(gpip);
+       } else {
+               tlb = vhpt_lookup(gip);
+               if (tlb == NULL) {
+                       ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
+                       return IA64_FAULT;
+               }
+               maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
+                                       | (gip & (PSIZE(tlb->ps) - 1));
+       }
+       vpa = (u64 *)__kvm_va(maddr);
+
+       pbundle->i64[0] = *vpa++;
+       pbundle->i64[1] = *vpa;
+
+       return IA64_NO_FAULT;
+}
+
+
+void kvm_init_vhpt(struct kvm_vcpu *v)
+{
+       v->arch.vhpt.num = VHPT_NUM_ENTRIES;
+       thash_init(&v->arch.vhpt, VHPT_SHIFT);
+       ia64_set_pta(v->arch.vhpt.pta.val);
+       /*Enable VHPT here?*/
+}
+
+void kvm_init_vtlb(struct kvm_vcpu *v)
+{
+       v->arch.vtlb.num = VTLB_NUM_ENTRIES;
+       thash_init(&v->arch.vtlb, VTLB_SHIFT);
+}
index 5c1de53c8c1c07e02aa4add71f749aa3dd6458f2..fc6c6636ffdac444b280e1cf4bc29fa140483ba5 100644 (file)
@@ -682,15 +682,6 @@ mem_init (void)
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
-       ClearPageReserved(page);
-       init_page_count(page);
-       __free_page(page);
-       totalram_pages++;
-       num_physpages++;
-}
-
 int arch_add_memory(int nid, u64 start, u64 size)
 {
        pg_data_t *pgdat;
index d52ec4e83409126647984095dd52bced8856d314..8caf42471f0d1e72d3b2adc007a9d8e8dcc76bf0 100644 (file)
@@ -168,7 +168,10 @@ setup_ptcg_sem(int max_purges, int nptcg_from)
        static int firstcpu = 1;
 
        if (toolatetochangeptcgsem) {
-               BUG_ON(max_purges < nptcg);
+               if (nptcg_from == NPTCG_FROM_PAL && max_purges == 0)
+                       BUG_ON(1 < nptcg);
+               else
+                       BUG_ON(max_purges < nptcg);
                return;
        }
 
index dfc6bf1c7b412084a6a3038a6b2bb35fb0c1da7d..49d3120415eb740154a5469be144e51598775d04 100644 (file)
@@ -550,11 +550,12 @@ static int __init sn2_ptc_init(void)
        if (!ia64_platform_is("sn2"))
                return 0;
 
-       if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) {
+       proc_sn2_ptc = proc_create(PTC_BASENAME, 0444,
+                                  NULL, &proc_sn2_ptc_operations);
+       if (!&proc_sn2_ptc_operations) {
                printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME);
                return -EINVAL;
        }
-       proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations;
        spin_lock_init(&sn2_global_ptc_lock);
        return 0;
 }
index 62b3e9a496acc7bd0d716c74931cd6e58db78bf5..2526e5c783a42514fe6111b78c8c70c0233c4838 100644 (file)
@@ -139,30 +139,21 @@ static const struct file_operations proc_sn_topo_fops = {
 void register_sn_procfs(void)
 {
        static struct proc_dir_entry *sgi_proc_dir = NULL;
-       struct proc_dir_entry *pde;
 
        BUG_ON(sgi_proc_dir != NULL);
        if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL)))
                return;
 
-       pde = create_proc_entry("partition_id", 0444, sgi_proc_dir);
-       if (pde)
-               pde->proc_fops = &proc_partition_id_fops;
-       pde = create_proc_entry("system_serial_number", 0444, sgi_proc_dir);
-       if (pde)
-               pde->proc_fops = &proc_system_sn_fops;
-       pde = create_proc_entry("licenseID", 0444, sgi_proc_dir);
-       if (pde)
-               pde->proc_fops = &proc_license_id_fops;
-       pde = create_proc_entry("sn_force_interrupt", 0644, sgi_proc_dir);
-       if (pde)
-               pde->proc_fops = &proc_sn_force_intr_fops;
-       pde = create_proc_entry("coherence_id", 0444, sgi_proc_dir);
-       if (pde)
-               pde->proc_fops = &proc_coherence_id_fops;
-       pde = create_proc_entry("sn_topology", 0444, sgi_proc_dir);
-       if (pde)
-               pde->proc_fops = &proc_sn_topo_fops;
+       proc_create("partition_id", 0444, sgi_proc_dir,
+                   &proc_partition_id_fops);
+       proc_create("system_serial_number", 0444, sgi_proc_dir,
+                   &proc_system_sn_fops);
+       proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops);
+       proc_create("sn_force_interrupt", 0644, sgi_proc_dir,
+                   &proc_sn_force_intr_fops);
+       proc_create("coherence_id", 0444, sgi_proc_dir,
+                   &proc_coherence_id_fops);
+       proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops);
 }
 
 #endif /* CONFIG_PROC_FS */
index 18b94b792d5491bad4ccf3f650ce0feecfaed073..52175af299a0424516fc3299c8509487581956dc 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/dma-attrs.h>
 #include <asm/dma.h>
 #include <asm/sn/intr.h>
 #include <asm/sn/pcibus_provider_defs.h>
@@ -149,11 +150,12 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
 EXPORT_SYMBOL(sn_dma_free_coherent);
 
 /**
- * sn_dma_map_single - map a single page for DMA
+ * sn_dma_map_single_attrs - map a single page for DMA
  * @dev: device to map for
  * @cpu_addr: kernel virtual address of the region to map
  * @size: size of the region
  * @direction: DMA direction
+ * @attrs: optional dma attributes
  *
  * Map the region pointed to by @cpu_addr for DMA and return the
  * DMA address.
@@ -163,42 +165,59 @@ EXPORT_SYMBOL(sn_dma_free_coherent);
  * no way of saving the dmamap handle from the alloc to later free
  * (which is pretty much unacceptable).
  *
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
+ *
  * TODO: simplify our interface;
  *       figure out how to save dmamap handle so can use two step.
  */
-dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
-                            int direction)
+dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr,
+                                  size_t size, int direction,
+                                  struct dma_attrs *attrs)
 {
        dma_addr_t dma_addr;
        unsigned long phys_addr;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
+       int dmabarr;
+
+       dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 
        BUG_ON(dev->bus != &pci_bus_type);
 
        phys_addr = __pa(cpu_addr);
-       dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
+       if (dmabarr)
+               dma_addr = provider->dma_map_consistent(pdev, phys_addr,
+                                                       size, SN_DMA_ADDR_PHYS);
+       else
+               dma_addr = provider->dma_map(pdev, phys_addr, size,
+                                            SN_DMA_ADDR_PHYS);
+
        if (!dma_addr) {
                printk(KERN_ERR "%s: out of ATEs\n", __func__);
                return 0;
        }
        return dma_addr;
 }
-EXPORT_SYMBOL(sn_dma_map_single);
+EXPORT_SYMBOL(sn_dma_map_single_attrs);
 
 /**
- * sn_dma_unmap_single - unamp a DMA mapped page
+ * sn_dma_unmap_single_attrs - unamp a DMA mapped page
  * @dev: device to sync
  * @dma_addr: DMA address to sync
  * @size: size of region
  * @direction: DMA direction
+ * @attrs: optional dma attributes
  *
  * This routine is supposed to sync the DMA region specified
  * by @dma_handle into the coherence domain.  On SN, we're always cache
  * coherent, so we just need to free any ATEs associated with this mapping.
  */
-void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
-                        int direction)
+void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr,
+                              size_t size, int direction,
+                              struct dma_attrs *attrs)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
@@ -207,19 +226,21 @@ void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
 
        provider->dma_unmap(pdev, dma_addr, direction);
 }
-EXPORT_SYMBOL(sn_dma_unmap_single);
+EXPORT_SYMBOL(sn_dma_unmap_single_attrs);
 
 /**
- * sn_dma_unmap_sg - unmap a DMA scatterlist
+ * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist
  * @dev: device to unmap
  * @sg: scatterlist to unmap
  * @nhwentries: number of scatterlist entries
  * @direction: DMA direction
+ * @attrs: optional dma attributes
  *
  * Unmap a set of streaming mode DMA translations.
  */
-void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
-                    int nhwentries, int direction)
+void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl,
+                          int nhwentries, int direction,
+                          struct dma_attrs *attrs)
 {
        int i;
        struct pci_dev *pdev = to_pci_dev(dev);
@@ -234,25 +255,34 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
                sg->dma_length = 0;
        }
 }
-EXPORT_SYMBOL(sn_dma_unmap_sg);
+EXPORT_SYMBOL(sn_dma_unmap_sg_attrs);
 
 /**
- * sn_dma_map_sg - map a scatterlist for DMA
+ * sn_dma_map_sg_attrs - map a scatterlist for DMA
  * @dev: device to map for
  * @sg: scatterlist to map
  * @nhwentries: number of entries
  * @direction: direction of the DMA transaction
+ * @attrs: optional dma attributes
+ *
+ * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with
+ * dma_map_consistent() so that writes force a flush of pending DMA.
+ * (See "SGI Altix Architecture Considerations for Linux Device Drivers",
+ * Document Number: 007-4763-001)
  *
  * Maps each entry of @sg for DMA.
  */
-int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
-                 int direction)
+int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+                       int nhwentries, int direction, struct dma_attrs *attrs)
 {
        unsigned long phys_addr;
        struct scatterlist *saved_sg = sgl, *sg;
        struct pci_dev *pdev = to_pci_dev(dev);
        struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);
        int i;
+       int dmabarr;
+
+       dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs);
 
        BUG_ON(dev->bus != &pci_bus_type);
 
@@ -260,11 +290,19 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
         * Setup a DMA address for each entry in the scatterlist.
         */
        for_each_sg(sgl, sg, nhwentries, i) {
+               dma_addr_t dma_addr;
                phys_addr = SG_ENT_PHYS_ADDRESS(sg);
-               sg->dma_address = provider->dma_map(pdev,
-                                                   phys_addr, sg->length,
-                                                   SN_DMA_ADDR_PHYS);
+               if (dmabarr)
+                       dma_addr = provider->dma_map_consistent(pdev,
+                                                               phys_addr,
+                                                               sg->length,
+                                                               SN_DMA_ADDR_PHYS);
+               else
+                       dma_addr = provider->dma_map(pdev, phys_addr,
+                                                    sg->length,
+                                                    SN_DMA_ADDR_PHYS);
 
+               sg->dma_address = dma_addr;
                if (!sg->dma_address) {
                        printk(KERN_ERR "%s: out of ATEs\n", __func__);
 
@@ -272,7 +310,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
                         * Free any successfully allocated entries.
                         */
                        if (i > 0)
-                               sn_dma_unmap_sg(dev, saved_sg, i, direction);
+                               sn_dma_unmap_sg_attrs(dev, saved_sg, i,
+                                                     direction, attrs);
                        return 0;
                }
 
@@ -281,7 +320,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries,
 
        return nhwentries;
 }
-EXPORT_SYMBOL(sn_dma_map_sg);
+EXPORT_SYMBOL(sn_dma_map_sg_attrs);
 
 void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
                                size_t size, int direction)
index 246a8820c2236b814e636985078b93cf78dd6c3b..b1f012f6c4931636551dcb832c4044754691565e 100644 (file)
 #include <linux/stddef.h>
 #include <linux/sched.h>
 #include <linux/kernel_stat.h>
+#include <linux/kbuild.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/amigahw.h>
 #include <linux/font.h>
 
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
 int main(void)
 {
        /* offsets into the task struct */
index 2b412454cb419fb91d898df93fcd0ffe507b6c32..ded7dd2f67b2803688965a49f0abb72faf1e0463 100644 (file)
@@ -186,7 +186,7 @@ int setup_irq(unsigned int irq, struct irq_node *node)
 
        if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
                printk("%s: Incorrect IRQ %d from %s\n",
-                      __FUNCTION__, irq, node->devname);
+                      __func__, irq, node->devname);
                return -ENXIO;
        }
 
@@ -249,7 +249,7 @@ void free_irq(unsigned int irq, void *dev_id)
        unsigned long flags;
 
        if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
-               printk("%s: Incorrect IRQ %d\n", __FUNCTION__, irq);
+               printk("%s: Incorrect IRQ %d\n", __func__, irq);
                return;
        }
 
@@ -267,7 +267,7 @@ void free_irq(unsigned int irq, void *dev_id)
                node->handler = NULL;
        } else
                printk("%s: Removing probably wrong IRQ %d\n",
-                      __FUNCTION__, irq);
+                      __func__, irq);
 
        if (!irq_list[irq]) {
                if (contr->shutdown)
@@ -288,7 +288,7 @@ void enable_irq(unsigned int irq)
 
        if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
                printk("%s: Incorrect IRQ %d\n",
-                      __FUNCTION__, irq);
+                      __func__, irq);
                return;
        }
 
@@ -312,7 +312,7 @@ void disable_irq(unsigned int irq)
 
        if (irq >= NR_IRQS || !(contr = irq_controller[irq])) {
                printk("%s: Incorrect IRQ %d\n",
-                      __FUNCTION__, irq);
+                      __func__, irq);
                return;
        }
 
index 5b2799eb96a68f930d6d93e30873f6c796487c25..326fb9978094961221c14ce4a53a596ee3934816 100644 (file)
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/init.h>
-#include <linux/proc_fs.h>
 #include <linux/interrupt.h>
 
 #include <asm/bootinfo.h>
 
 int iop_scc_present,iop_ism_present;
 
-#ifdef CONFIG_PROC_FS
-static int iop_get_proc_info(char *, char **, off_t, int);
-#endif /* CONFIG_PROC_FS */
-
 /* structure for tracking channel listeners */
 
 struct listener {
@@ -299,12 +294,6 @@ void __init iop_init(void)
                iop_listeners[IOP_NUM_ISM][i].devname = NULL;
                iop_listeners[IOP_NUM_ISM][i].handler = NULL;
        }
-
-#if 0  /* Crashing in 2.4 now, not yet sure why.   --jmt */
-#ifdef CONFIG_PROC_FS
-       create_proc_info_entry("mac_iop", 0, &proc_root, iop_get_proc_info);
-#endif
-#endif
 }
 
 /*
@@ -637,77 +626,3 @@ irqreturn_t iop_ism_irq(int irq, void *dev_id)
        }
        return IRQ_HANDLED;
 }
-
-#ifdef CONFIG_PROC_FS
-
-char *iop_chan_state(int state)
-{
-       switch(state) {
-               case IOP_MSG_IDLE       : return "idle      ";
-               case IOP_MSG_NEW        : return "new       ";
-               case IOP_MSG_RCVD       : return "received  ";
-               case IOP_MSG_COMPLETE   : return "completed ";
-               default                 : return "unknown   ";
-       }
-}
-
-int iop_dump_one_iop(char *buf, int iop_num, char *iop_name)
-{
-       int i,len = 0;
-       volatile struct mac_iop *iop = iop_base[iop_num];
-
-       len += sprintf(buf+len, "%s IOP channel states:\n\n", iop_name);
-       len += sprintf(buf+len, "##  send_state  recv_state  device\n");
-       len += sprintf(buf+len, "------------------------------------------------\n");
-       for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
-               len += sprintf(buf+len, "%2d  %10s  %10s  %s\n", i,
-                       iop_chan_state(iop_readb(iop, IOP_ADDR_SEND_STATE+i)),
-                       iop_chan_state(iop_readb(iop, IOP_ADDR_RECV_STATE+i)),
-                       iop_listeners[iop_num][i].handler?
-                                     iop_listeners[iop_num][i].devname : "");
-
-       }
-       len += sprintf(buf+len, "\n");
-       return len;
-}
-
-static int iop_get_proc_info(char *buf, char **start, off_t pos, int count)
-{
-       int len, cnt;
-
-       cnt = 0;
-       len =  sprintf(buf, "IOPs detected:\n\n");
-
-       if (iop_scc_present) {
-               len += sprintf(buf+len, "SCC IOP (%p): status %02X\n",
-                               iop_base[IOP_NUM_SCC],
-                               (uint) iop_base[IOP_NUM_SCC]->status_ctrl);
-       }
-       if (iop_ism_present) {
-               len += sprintf(buf+len, "ISM IOP (%p): status %02X\n\n",
-                               iop_base[IOP_NUM_ISM],
-                               (uint) iop_base[IOP_NUM_ISM]->status_ctrl);
-       }
-
-       if (iop_scc_present) {
-               len += iop_dump_one_iop(buf+len, IOP_NUM_SCC, "SCC");
-
-       }
-
-       if (iop_ism_present) {
-               len += iop_dump_one_iop(buf+len, IOP_NUM_ISM, "ISM");
-
-       }
-
-       if (len >= pos) {
-               if (!*start) {
-                       *start = buf + pos;
-                       cnt = len - pos;
-               } else {
-                       cnt += len;
-               }
-       }
-       return (count > cnt) ? cnt : count;
-}
-
-#endif /* CONFIG_PROC_FS */
index 50603d3dce843798b488f249b95bc53d984cdb76..3c943d2ec570b07f8e05c90701172b28bc964a85 100644 (file)
@@ -190,7 +190,7 @@ void oss_irq_enable(int irq) {
                        break;
 #ifdef DEBUG_IRQUSE
                default:
-                       printk("%s unknown irq %d\n",__FUNCTION__, irq);
+                       printk("%s unknown irq %d\n", __func__, irq);
                        break;
 #endif
        }
@@ -230,7 +230,7 @@ void oss_irq_disable(int irq) {
                        break;
 #ifdef DEBUG_IRQUSE
                default:
-                       printk("%s unknown irq %d\n", __FUNCTION__, irq);
+                       printk("%s unknown irq %d\n", __func__, irq);
                        break;
 #endif
        }
index f42caa79e4e89f9fadf73041d33ab81077e7d11f..d8fb9c5303ccf931bca250fa61e410849e886492 100644 (file)
@@ -69,6 +69,7 @@ void __init m68k_setup_node(int node)
  */
 
 void *empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
 
 void show_mem(void)
 {
@@ -79,7 +80,6 @@ void show_mem(void)
 
        printk("\nMem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        for_each_online_pgdat(pgdat) {
                for (i = 0; i < pgdat->node_spanned_pages; i++) {
                        struct page *page = pgdat->node_mem_map + i;
index 46161cef08b95520733b048b6cd40e53b7ad2716..9f0e3d59bf923e240bbe648a1d2e304659f38a23 100644 (file)
@@ -47,7 +47,7 @@ static int q40_irq_startup(unsigned int irq)
        switch (irq) {
        case 1: case 2: case 8: case 9:
        case 11: case 12: case 13:
-               printk("%s: ISA IRQ %d not implemented by HW\n", __FUNCTION__, irq);
+               printk("%s: ISA IRQ %d not implemented by HW\n", __func__, irq);
                return -ENXIO;
        }
        return 0;
index d97b89bae53cf10c824422308254148e55c6e325..c785d07c02cc249ce8e537f13210b9b3edccdb35 100644 (file)
 #include <linux/kernel_stat.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
 #include <asm/bootinfo.h>
 #include <asm/irq.h>
 #include <asm/thread_info.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
        /* offsets into the task struct */
@@ -91,6 +87,7 @@ int main(void)
        DEFINE(TI_TASK, offsetof(struct thread_info, task));
        DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain));
        DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+       DEFINE(TI_PREEMPTCOUNT, offsetof(struct thread_info, preempt_count));
        DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
 
        return 0;
index 1e7ea6a3e1a146c9a092c33eba6073751f3c0d38..f4782d2dce8f7369fbdaeddd916dc61c57a2afa9 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/segment.h>
 #include <asm/asm-offsets.h>
 #include <asm/entry.h>
+#include <asm/unistd.h>
 
 .text
 
@@ -140,3 +141,11 @@ ENTRY(sys_rt_sigreturn)
        RESTORE_SWITCH_STACK
        rts
 
+ENTRY(ret_from_user_signal)
+       moveq #__NR_sigreturn,%d0
+       trap #0
+
+ENTRY(ret_from_user_rt_signal)
+       move #__NR_rt_sigreturn,%d0
+       trap #0
+
index d6f0200316febaa7f8f1fb76c09c0b96e91b0ad1..03f4fe6a2fc0e20e6e28f233cb8718a4799dd9b6 100644 (file)
@@ -162,7 +162,7 @@ void __init setup_arch(char **cmdline_p)
        printk(KERN_INFO "DragonEngine II board support by Georges Menie\n");
 #endif
 #ifdef CONFIG_M5235EVB
-       printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)");
+       printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
 #endif
 
 #ifdef DEBUG
index 70371378db868de2ce8cabf544993e44d4ef8aa2..bbfcae9e52b4b56ff76932e004e58916c8e12d64 100644 (file)
@@ -51,6 +51,8 @@
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
+void ret_from_user_signal(void);
+void ret_from_user_rt_signal(void);
 asmlinkage int do_signal(sigset_t *oldset, struct pt_regs *regs);
 
 /*
@@ -539,10 +541,6 @@ static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
        return err;
 }
 
-static inline void push_cache (unsigned long vaddr)
-{
-}
-
 static inline void *
 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
 {
@@ -586,16 +584,11 @@ static void setup_frame (int sig, struct k_sigaction *ka,
        err |= copy_to_user (&frame->sc, &context, sizeof(context));
 
        /* Set up to return from userspace.  */
-       err |= __put_user(frame->retcode, &frame->pretcode);
-       /* moveq #,d0; trap #0 */
-       err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
-                         (long *)(frame->retcode));
+       err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
 
        if (err)
                goto give_sigsegv;
 
-       push_cache ((unsigned long) &frame->retcode);
-
        /* Set up registers for signal handler */
        wrusp ((unsigned long) frame);
        regs->pc = (unsigned long) ka->sa.sa_handler;
@@ -655,17 +648,11 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
        err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
 
        /* Set up to return from userspace.  */
-       err |= __put_user(frame->retcode, &frame->pretcode);
-       /* moveq #,d0; notb d0; trap #0 */
-       err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
-                         (long *)(frame->retcode + 0));
-       err |= __put_user(0x4e40, (short *)(frame->retcode + 4));
+       err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
 
        if (err)
                goto give_sigsegv;
 
-       push_cache ((unsigned long) &frame->retcode);
-
        /* Set up registers for signal handler */
        wrusp ((unsigned long) frame);
        regs->pc = (unsigned long) ka->sa.sa_handler;
index 437a061d8b94a74745f0e7d55cc96c14d9276e08..ec9aea652e79d0665adcb36fe5e9cd49d9625a51 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 #include <linux/ptrace.h>
+#include <linux/kallsyms.h>
 
 #include <asm/setup.h>
 #include <asm/fpu.h>
@@ -102,56 +103,47 @@ asmlinkage void buserr_c(struct frame *fp)
        force_sig(SIGSEGV, current);
 }
 
-
 int kstack_depth_to_print = 48;
 
-void show_stack(struct task_struct *task, unsigned long *stack)
+static void __show_stack(struct task_struct *task, unsigned long *stack)
 {
        unsigned long *endstack, addr;
-       extern char _start, _etext;
+       unsigned long *last_stack;
        int i;
 
-       if (!stack) {
-               if (task)
-                       stack = (unsigned long *)task->thread.ksp;
-               else
-                       stack = (unsigned long *)&stack;
-       }
+       if (!stack)
+               stack = (unsigned long *)task->thread.ksp;
 
        addr = (unsigned long) stack;
        endstack = (unsigned long *) PAGE_ALIGN(addr);
 
        printk(KERN_EMERG "Stack from %08lx:", (unsigned long)stack);
        for (i = 0; i < kstack_depth_to_print; i++) {
-               if (stack + 1 > endstack)
+               if (stack + 1 + i > endstack)
                        break;
                if (i % 8 == 0)
                        printk("\n" KERN_EMERG "       ");
-               printk(" %08lx", *stack++);
+               printk(" %08lx", *(stack + i));
        }
        printk("\n");
 
-       printk(KERN_EMERG "Call Trace:");
-       i = 0;
-       while (stack + 1 <= endstack) {
-               addr = *stack++;
-               /*
-                * If the address is either in the text segment of the
-                * kernel, or in the region which contains vmalloc'ed
-                * memory, it *may* be the address of a calling
-                * routine; if so, print it so that someone tracing
-                * down the cause of the crash will be able to figure
-                * out the call path that was taken.
-                */
-               if (((addr >= (unsigned long) &_start) &&
-                    (addr <= (unsigned long) &_etext))) {
-                       if (i % 4 == 0)
-                               printk("\n" KERN_EMERG "       ");
-                       printk(" [<%08lx>]", addr);
-                       i++;
-               }
+#ifdef CONFIG_FRAME_POINTER
+       printk(KERN_EMERG "Call Trace:\n");
+
+       last_stack = stack - 1;
+       while (stack <= endstack && stack > last_stack) {
+
+               addr = *(stack + 1);
+               printk(KERN_EMERG " [%08lx] ", addr);
+               print_symbol(KERN_CONT "%s\n", addr);
+
+               last_stack = stack;
+               stack = (unsigned long *)*stack;
        }
        printk("\n");
+#else
+       printk(KERN_EMERG "CONFIG_FRAME_POINTER disabled, no symbolic call trace\n");
+#endif
 }
 
 void bad_super_trap(struct frame *fp)
@@ -298,19 +290,47 @@ asmlinkage void set_esp0(unsigned long ssp)
        current->thread.esp0 = ssp;
 }
 
-
 /*
  * The architecture-independent backtrace generator
  */
 void dump_stack(void)
 {
-       unsigned long stack;
+       /*
+        * We need frame pointers for this little trick, which works as follows:
+        *
+        * +------------+ 0x00
+        * | Next SP    |       -> 0x0c
+        * +------------+ 0x04
+        * | Caller     |
+        * +------------+ 0x08
+        * | Local vars |       -> our stack var
+        * +------------+ 0x0c
+        * | Next SP    |       -> 0x18, that is what we pass to show_stack()
+        * +------------+ 0x10
+        * | Caller     |
+        * +------------+ 0x14
+        * | Local vars |
+        * +------------+ 0x18
+        * | ...        |
+        * +------------+
+        */
 
-       show_stack(current, &stack);
-}
+       unsigned long *stack;
 
+       stack = (unsigned long *)&stack;
+       stack++;
+       __show_stack(current, stack);
+}
 EXPORT_SYMBOL(dump_stack);
 
+void show_stack(struct task_struct *task, unsigned long *stack)
+{
+       if (!stack && !task)
+               dump_stack();
+       else
+               __show_stack(task, stack);
+}
+
 #ifdef CONFIG_M68KFPU_EMU
 asmlinkage void fpemu_signal(int signal, int code, void *addr)
 {
index b44edb08e21276f3dce89e8b938c4e6a5252596c..5592e0bf951f555f9efb18c55838215b80cc9db8 100644 (file)
@@ -64,6 +64,7 @@ SECTIONS {
                _stext = . ;
                TEXT_TEXT
                SCHED_TEXT
+               LOCK_TEXT
                *(.text.lock)
 
                . = ALIGN(16);          /* Exception table              */
@@ -73,6 +74,7 @@ SECTIONS {
 
                *(.rodata) *(.rodata.*)
                *(__vermagic)           /* Kernel version magic */
+               *(__markers_strings)
                *(.rodata1)
                *(.rodata.str1.1)
 
@@ -182,6 +184,7 @@ SECTIONS {
                *(COMMON)
                . = ALIGN(4) ;
                _ebss = . ;
+               _end = . ;
        } > BSS
 
 }
index a6692e958f6b93265380bfe7631b57d2fae5fe01..d01a5d2b75579190df5babe0f162a9b1eb947041 100644 (file)
@@ -48,7 +48,7 @@ static struct platform_device *m5206e_devices[] __initdata = {
 
 /***************************************************************************/
 
-static void __init m5206_uart_init_line(int line, int irq)
+static void __init m5206e_uart_init_line(int line, int irq)
 {
        if (line == 0) {
                writel(MCFSIM_ICR_LEVEL6 | MCFSIM_ICR_PRI1, MCF_MBAR + MCFSIM_UART1ICR);
index 2aca599a1ca714ae2b70ec3d6d89f2ce74dc631f..230bae691a7f4be9060b01397efd7cc20a045ace 100644 (file)
@@ -139,10 +139,6 @@ void __init config_BSP(char *commandp, int size)
        /* Copy command line from FLASH to local buffer... */
        memcpy(commandp, (char *) 0xf0004000, size);
        commandp[size-1] = 0;
-#elif defined(CONFIG_MTD_KeyTechnology)
-       /* Copy command line from FLASH to local buffer... */
-       memcpy(commandp, (char *) 0xffe06000, size);
-       commandp[size-1] = 0;
 #elif defined(CONFIG_CANCam)
        /* Copy command line from FLASH to local buffer... */
        memcpy(commandp, (char *) 0xf0010000, size);
index 036e1b73d94400b8619db75d71e271a5ce2592e3..dfdb5c2ed8e6d35a3ce45b0827c03b61833c2eaf 100644 (file)
 #include <asm/mcfuart.h>
 #include <asm/mcfqspi.h>
 
+#ifdef CONFIG_MTD_PARTITIONS
+#include <linux/mtd/partitions.h>
+#endif
+
 /***************************************************************************/
 
 void coldfire_reset(void);
+static void coldfire_qspi_cs_control(u8 cs, u8 command);
+
+/***************************************************************************/
+
+#if defined(CONFIG_SPI)
+
+#if defined(CONFIG_WILDFIRE)
+#define SPI_NUM_CHIPSELECTS    0x02
+#define SPI_PAR_VAL            0x07  /* Enable DIN, DOUT, CLK */
+#define SPI_CS_MASK            0x18
+
+#define FLASH_BLOCKSIZE                (1024*64)
+#define FLASH_NUMBLOCKS                16
+#define FLASH_TYPE             "m25p80"
+
+#define M25P80_CS              0
+#define MMC_CS                 1
+
+#ifdef CONFIG_MTD_PARTITIONS
+static struct mtd_partition stm25p_partitions[] = {
+       /* sflash */
+       [0] = {
+               .name = "stm25p80",
+               .offset = 0x00000000,
+               .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS,
+               .mask_flags = 0
+       }
+};
+
+#endif
+
+#elif defined(CONFIG_WILDFIREMOD)
+
+#define SPI_NUM_CHIPSELECTS    0x08
+#define SPI_PAR_VAL            0x07  /* Enable DIN, DOUT, CLK */
+#define SPI_CS_MASK            0x78
+
+#define FLASH_BLOCKSIZE                (1024*64)
+#define FLASH_NUMBLOCKS                64
+#define FLASH_TYPE             "m25p32"
+/* Reserve 1M for the kernel parition */
+#define FLASH_KERNEL_SIZE   (1024 * 1024)
+
+#define M25P80_CS              5
+#define MMC_CS                 6
+
+#ifdef CONFIG_MTD_PARTITIONS
+static struct mtd_partition stm25p_partitions[] = {
+       /* sflash */
+       [0] = {
+               .name = "kernel",
+               .offset = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS - FLASH_KERNEL_SIZE,
+               .size = FLASH_KERNEL_SIZE,
+               .mask_flags = 0
+       },
+       [1] = {
+               .name = "image",
+               .offset = 0x00000000,
+               .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS - FLASH_KERNEL_SIZE,
+               .mask_flags = 0
+       },
+       [2] = {
+               .name = "all",
+               .offset = 0x00000000,
+               .size = FLASH_BLOCKSIZE * FLASH_NUMBLOCKS,
+               .mask_flags = 0
+       }
+};
+#endif
+
+#else
+#define SPI_NUM_CHIPSELECTS    0x04
+#define SPI_PAR_VAL            0x7F  /* Enable DIN, DOUT, CLK, CS0 - CS4 */
+#endif
+
+#ifdef MMC_CS
+static struct coldfire_spi_chip flash_chip_info = {
+       .mode = SPI_MODE_0,
+       .bits_per_word = 16,
+       .del_cs_to_clk = 17,
+       .del_after_trans = 1,
+       .void_write_data = 0
+};
+
+static struct coldfire_spi_chip mmc_chip_info = {
+       .mode = SPI_MODE_0,
+       .bits_per_word = 16,
+       .del_cs_to_clk = 17,
+       .del_after_trans = 1,
+       .void_write_data = 0xFFFF
+};
+#endif
+
+#ifdef M25P80_CS
+static struct flash_platform_data stm25p80_platform_data = {
+       .name = "ST M25P80 SPI Flash chip",
+#ifdef CONFIG_MTD_PARTITIONS
+       .parts = stm25p_partitions,
+       .nr_parts = sizeof(stm25p_partitions) / sizeof(*stm25p_partitions),
+#endif
+       .type = FLASH_TYPE
+};
+#endif
+
+static struct spi_board_info spi_board_info[] __initdata = {
+#ifdef M25P80_CS
+       {
+               .modalias = "m25p80",
+               .max_speed_hz = 16000000,
+               .bus_num = 1,
+               .chip_select = M25P80_CS,
+               .platform_data = &stm25p80_platform_data,
+               .controller_data = &flash_chip_info
+       },
+#endif
+#ifdef MMC_CS
+       {
+               .modalias = "mmc_spi",
+               .max_speed_hz = 16000000,
+               .bus_num = 1,
+               .chip_select = MMC_CS,
+               .controller_data = &mmc_chip_info
+       }
+#endif
+};
+
+static struct coldfire_spi_master coldfire_master_info = {
+       .bus_num = 1,
+       .num_chipselect = SPI_NUM_CHIPSELECTS,
+       .irq_source = MCF5282_QSPI_IRQ_SOURCE,
+       .irq_vector = MCF5282_QSPI_IRQ_VECTOR,
+       .irq_mask = ((0x01 << MCF5282_QSPI_IRQ_SOURCE) | 0x01),
+       .irq_lp = 0x2B,  /* Level 5 and Priority 3 */
+       .par_val = SPI_PAR_VAL,
+       .cs_control = coldfire_qspi_cs_control,
+};
+
+static struct resource coldfire_spi_resources[] = {
+       [0] = {
+               .name = "qspi-par",
+               .start = MCF5282_QSPI_PAR,
+               .end = MCF5282_QSPI_PAR,
+               .flags = IORESOURCE_MEM
+       },
+
+       [1] = {
+               .name = "qspi-module",
+               .start = MCF5282_QSPI_QMR,
+               .end = MCF5282_QSPI_QMR + 0x18,
+               .flags = IORESOURCE_MEM
+       },
+
+       [2] = {
+               .name = "qspi-int-level",
+               .start = MCF5282_INTC0 + MCFINTC_ICR0 + MCF5282_QSPI_IRQ_SOURCE,
+               .end = MCF5282_INTC0 + MCFINTC_ICR0 + MCF5282_QSPI_IRQ_SOURCE,
+               .flags = IORESOURCE_MEM
+       },
+
+       [3] = {
+               .name = "qspi-int-mask",
+               .start = MCF5282_INTC0 + MCFINTC_IMRL,
+               .end = MCF5282_INTC0 + MCFINTC_IMRL,
+               .flags = IORESOURCE_MEM
+       }
+};
+
+static struct platform_device coldfire_spi = {
+       .name = "spi_coldfire",
+       .id = -1,
+       .resource = coldfire_spi_resources,
+       .num_resources = ARRAY_SIZE(coldfire_spi_resources),
+       .dev = {
+               .platform_data = &coldfire_master_info,
+       }
+};
+
+static void coldfire_qspi_cs_control(u8 cs, u8 command)
+{
+       u8 cs_bit = ((0x01 << cs) << 3) & SPI_CS_MASK;
+
+#if defined(CONFIG_WILDFIRE)
+       u8 cs_mask = ~(((0x01 << cs) << 3) & SPI_CS_MASK);
+#endif
+#if defined(CONFIG_WILDFIREMOD)
+       u8 cs_mask = (cs << 3) & SPI_CS_MASK;
+#endif
+
+       /*
+        * Don't do anything if the chip select is not
+        * one of the port qs pins.
+        */
+       if (command & QSPI_CS_INIT) {
+#if defined(CONFIG_WILDFIRE)
+               MCF5282_GPIO_DDRQS  |= cs_bit;
+               MCF5282_GPIO_PQSPAR &= ~cs_bit;
+#endif
+
+#if defined(CONFIG_WILDFIREMOD)
+               MCF5282_GPIO_DDRQS  |= SPI_CS_MASK;
+               MCF5282_GPIO_PQSPAR &= ~SPI_CS_MASK;
+#endif
+       }
+
+       if (command & QSPI_CS_ASSERT) {
+               MCF5282_GPIO_PORTQS &= ~SPI_CS_MASK;
+               MCF5282_GPIO_PORTQS |= cs_mask;
+       } else if (command & QSPI_CS_DROP) {
+               MCF5282_GPIO_PORTQS |= SPI_CS_MASK;
+       }
+}
+
+static int __init spi_dev_init(void)
+{
+       int retval;
+
+       retval = platform_device_register(&coldfire_spi);
+       if (retval < 0)
+               return retval;
+
+       if (ARRAY_SIZE(spi_board_info))
+               retval = spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+
+       return retval;
+}
+
+#endif /* CONFIG_SPI */
 
 /***************************************************************************/
 
@@ -111,10 +342,43 @@ void mcf_autovector(unsigned int vec)
 
 /***************************************************************************/
 
+#ifdef CONFIG_WILDFIRE
+void wildfire_halt(void)
+{
+       writeb(0, 0x30000007);
+       writeb(0x2, 0x30000007);
+}
+#endif
+
+#ifdef CONFIG_WILDFIREMOD
+void wildfiremod_halt(void)
+{
+       printk(KERN_INFO "WildFireMod hibernating...\n");
+
+       /* Set portE.5 to Digital IO */
+       MCF5282_GPIO_PEPAR &= ~(1 << (5 * 2));
+
+       /* Make portE.5 an output */
+       MCF5282_GPIO_DDRE |= (1 << 5);
+
+       /* Now toggle portE.5 from low to high */
+       MCF5282_GPIO_PORTE &= ~(1 << 5);
+       MCF5282_GPIO_PORTE |= (1 << 5);
+
+       printk(KERN_EMERG "Failed to hibernate. Halting!\n");
+}
+#endif
+
 void __init config_BSP(char *commandp, int size)
 {
        mcf_disableall();
-       mach_reset = coldfire_reset;
+
+#ifdef CONFIG_WILDFIRE
+       mach_halt = wildfire_halt;
+#endif
+#ifdef CONFIG_WILDFIREMOD
+       mach_halt = wildfiremod_halt;
+#endif
 }
 
 /***************************************************************************/
index 92dc862fa826edb5432875d585a99d266f2d80ca..11cff6625dcce8ae12b5e6d77abc2b17680edda5 100644 (file)
@@ -124,8 +124,7 @@ void __init config_BSP(char *commandp, int size)
        mcf_setimr(MCFSIM_IMR_MASKALL);
 
 #if defined(CONFIG_NETtel) || defined(CONFIG_eLIA) || \
-      defined(CONFIG_DISKtel) || defined(CONFIG_SECUREEDGEMP3) || \
-      defined(CONFIG_CLEOPATRA)
+    defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA)
        /* Copy command line from FLASH to local buffer... */
        memcpy(commandp, (char *) 0xf0004000, size);
        commandp[size-1] = 0;
index 111b66dc737ba5f15e0d2af56b7a85e2db676050..1e3c0dcbd7acec7a1ac9070704d3109bd6d2980e 100644 (file)
@@ -103,9 +103,26 @@ ret_from_signal:
        addql   #4,%sp
 
 ret_from_exception:
+       move    #0x2700,%sr             /* disable intrs */
        btst    #5,%sp@(PT_SR)          /* check if returning to kernel */
        jeq     Luser_return            /* if so, skip resched, signals */
 
+#ifdef CONFIG_PREEMPT
+       movel   %sp,%d1                 /* get thread_info pointer */
+       andl    #-THREAD_SIZE,%d1       /* at base of kernel stack */
+       movel   %d1,%a0
+       movel   %a0@(TI_FLAGS),%d1      /* get thread_info->flags */
+       andl    #_TIF_NEED_RESCHED,%d1
+       jeq     Lkernel_return
+
+       movel   %a0@(TI_PREEMPTCOUNT),%d1
+       cmpl    #0,%d1
+       jne     Lkernel_return
+
+       pea     Lkernel_return
+       jmp     preempt_schedule_irq    /* preempt the kernel */
+#endif
+
 Lkernel_return:
        moveml  %sp@,%d1-%d5/%a0-%a2
        lea     %sp@(32),%sp            /* space for 8 regs */
@@ -140,6 +157,7 @@ Lreturn:
 
 Lwork_to_do:
        movel   %a0@(TI_FLAGS),%d1      /* get thread_info->flags */
+       move    #0x2000,%sr             /* enable intrs again */
        btst    #TIF_NEED_RESCHED,%d1
        jne     reschedule
 
index 8724ed3298d31f9de2dad9521126a75bb8d51298..e5a7c5d96364f73f5cf700a9ac70d9ee67d3bda8 100644 (file)
@@ -81,7 +81,9 @@ config MIPS_COBALT
 config MACH_DECSTATION
        bool "DECstations"
        select BOOT_ELF32
+       select CEVT_DS1287
        select CEVT_R4K
+       select CSRC_IOASIC
        select CSRC_R4K
        select CPU_DADDI_WORKAROUNDS if 64BIT
        select CPU_R4000_WORKAROUNDS if 64BIT
@@ -221,6 +223,7 @@ config MIPS_MALTA
        select DMA_NONCOHERENT
        select GENERIC_ISA_DMA
        select IRQ_CPU
+       select IRQ_GIC
        select HW_HAS_PCI
        select I8253
        select I8259
@@ -309,12 +312,12 @@ config MACH_VR41XX
        select GENERIC_HARDIRQS_NO__DO_IRQ
 
 config PNX8550_JBS
-       bool "Philips PNX8550 based JBS board"
+       bool "NXP PNX8550 based JBS board"
        select PNX8550
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
 config PNX8550_STB810
-       bool "Philips PNX8550 based STB810 board"
+       bool "NXP PNX8550 based STB810 board"
        select PNX8550
        select SYS_SUPPORTS_LITTLE_ENDIAN
 
@@ -612,6 +615,7 @@ config TOSHIBA_JMR3927
        select SYS_SUPPORTS_LITTLE_ENDIAN
        select SYS_SUPPORTS_BIG_ENDIAN
        select GENERIC_HARDIRQS_NO__DO_IRQ
+       select GPIO_TXX9
 
 config TOSHIBA_RBTX4927
        bool "Toshiba RBTX49[23]7 board"
@@ -653,7 +657,7 @@ config TOSHIBA_RBTX4938
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_KGDB
        select GENERIC_HARDIRQS_NO__DO_IRQ
-       select GENERIC_GPIO
+       select GPIO_TXX9
        help
          This Toshiba board is based on the TX4938 processor. Say Y here to
          support this machine type
@@ -767,6 +771,9 @@ config BOOT_RAW
 config CEVT_BCM1480
        bool
 
+config CEVT_DS1287
+       bool
+
 config CEVT_GT641XX
        bool
 
@@ -782,12 +789,20 @@ config CEVT_TXX9
 config CSRC_BCM1480
        bool
 
+config CSRC_IOASIC
+       bool
+
 config CSRC_R4K
        bool
 
 config CSRC_SB1250
        bool
 
+config GPIO_TXX9
+       select GENERIC_GPIO
+       select HAVE_GPIO_LIB
+       bool
+
 config CFE
        bool
 
@@ -840,6 +855,9 @@ config MIPS_NILE4
 config MIPS_DISABLE_OBSOLETE_IDE
        bool
 
+config SYNC_R4K
+       bool
+
 config NO_IOPORT
        def_bool n
 
@@ -909,6 +927,9 @@ config IRQ_TXX9
 config IRQ_GT641XX
        bool
 
+config IRQ_GIC
+       bool
+
 config MIPS_BOARDS_GEN
        bool
 
@@ -1811,6 +1832,17 @@ config NR_CPUS
          performance should round up your number of processors to the next
          power of two.
 
+config MIPS_CMP
+       bool "MIPS CMP framework support"
+       depends on SMP
+       select SYNC_R4K
+       select SYS_SUPPORTS_SCHED_SMT
+       select WEAK_ORDERING
+       default n
+       help
+         This is a placeholder option for the GCMP work. It will need to
+         be handled differently...
+
 source "kernel/time/Kconfig"
 
 #
index fd7124c1b75a01b74091cb51112c22501adc2f6a..f18cf92650e350d746a299081dfd58dbeb468b34 100644 (file)
@@ -73,14 +73,4 @@ config RUNTIME_DEBUG
          include/asm-mips/debug.h for debuging macros.
          If unsure, say N.
 
-config MIPS_UNCACHED
-       bool "Run uncached"
-       depends on DEBUG_KERNEL && !SMP && !SGI_IP27
-       help
-         If you say Y here there kernel will disable all CPU caches.  This will
-         reduce the system's performance dramatically but can help finding
-         otherwise hard to track bugs.  It can also useful if you're doing
-         hardware debugging with a logic analyzer and need to see all traffic
-         on the bus.
-
 endmenu
index 1c62381f5c230c6b42bcb7ef95ff6148df553a4e..69648d01acc0a49e195259efbf039417793b9c36 100644 (file)
@@ -410,21 +410,21 @@ load-$(CONFIG_CASIO_E55)  += 0xffffffff80004000
 load-$(CONFIG_TANBAC_TB022X)   += 0xffffffff80000000
 
 #
-# Common Philips PNX8550
+# Common NXP PNX8550
 #
-core-$(CONFIG_SOC_PNX8550)     += arch/mips/philips/pnx8550/common/
+core-$(CONFIG_SOC_PNX8550)     += arch/mips/nxp/pnx8550/common/
 cflags-$(CONFIG_SOC_PNX8550)   += -Iinclude/asm-mips/mach-pnx8550
 
 #
-# Philips PNX8550 JBS board
+# NXP PNX8550 JBS board
 #
-libs-$(CONFIG_PNX8550_JBS)     += arch/mips/philips/pnx8550/jbs/
+libs-$(CONFIG_PNX8550_JBS)     += arch/mips/nxp/pnx8550/jbs/
 #cflags-$(CONFIG_PNX8550_JBS)  += -Iinclude/asm-mips/mach-pnx8550
 load-$(CONFIG_PNX8550_JBS)     += 0xffffffff80060000
 
-# Philips PNX8550 STB810 board
+# NXP PNX8550 STB810 board
 #
-libs-$(CONFIG_PNX8550_STB810)  += arch/mips/philips/pnx8550/stb810/
+libs-$(CONFIG_PNX8550_STB810)  += arch/mips/nxp/pnx8550/stb810/
 load-$(CONFIG_PNX8550_STB810)  += 0xffffffff80060000
 
 # NEC EMMA2RH boards
index 5c0d35d6e22a7758605a85c85f17af545459f766..8c93a05d73826e804ff82781ae5f50d28ed35029 100644 (file)
  *  as published by the Free Software Foundation; either version
  *  2 of the License, or (at your option) any later version.
  */
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/threads.h>
-#include <linux/init.h>
+
 #include <asm/mach-au1x00/au1000.h>
 
 struct cpu_spec* cur_cpu_spec[NR_CPUS];
index 57f17b41098dfa8134a9b7b500ea1f66547abb8b..53377dfc0640da33d85db3b7f47db242872ca5b1 100644 (file)
  */
 
 #include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1xxx_dbdma.h>
-#include <asm/system.h>
-
 
 #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
 
index 79e0b0a51acea821e1df5b6e18991412b1fa4221..eae1bb2ca26e72ab115c35e97030fbe52a5d877a 100644 (file)
@@ -1,5 +1,4 @@
 
-#include <asm/io.h>
 #include <asm/mach-au1x00/au1000.h>
 
 #ifdef CONFIG_KGDB
@@ -55,8 +54,7 @@ typedef         unsigned int  uint32;
 #define UART16550_READ(y)    (au_readl(DEBUG_BASE + y) & 0xff)
 #define UART16550_WRITE(y, z) (au_writel(z&0xff, DEBUG_BASE + y))
 
-extern unsigned long get_au1x00_uart_baud_base(void);
-extern unsigned long cal_r4koff(void);
+extern unsigned long calc_clock(void);
 
 void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
 {
@@ -64,7 +62,7 @@ void debugInit(uint32 baud, uint8 data, uint8 parity, uint8 stop)
        if (UART16550_READ(UART_MOD_CNTRL) != 0x3) {
                UART16550_WRITE(UART_MOD_CNTRL, 3);
        }
-       cal_r4koff();
+       calc_clock();
 
        /* disable interrupts */
        UART16550_WRITE(UART_IER, 0);
index c78260d4e837532e3e19b28459f2ced292a06119..95f69ea146e90a49d5ffdca4605af5fbb64df99e 100644 (file)
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
-#include <linux/sched.h>
 #include <linux/spinlock.h>
-#include <linux/string.h>
-#include <linux/delay.h>
 #include <linux/interrupt.h>
-#include <asm/system.h>
+
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-au1x00/au1000_dma.h>
 
index 0b658f1db4cedde3021e4eea3916091b36e4ab87..52545258997157c5bd849f637ca7acef02a8a4fd 100644 (file)
  *     others have a second one : GPIO2
  */
 
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/types.h>
 #include <linux/module.h>
 
-#include <asm/addrspace.h>
-
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/gpio.h>
 
index 3c7714f057acf7b01b9d7b4c23db7cff4f6ca1fc..f0626992fd75fce7b4f735d91c9db199ccdaba55 100644 (file)
@@ -1,7 +1,6 @@
 /*
- * Copyright 2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- *             ppopov@mvista.com or source@mvista.com
+ * Copyright 2001, 2007-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
  *
  * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
  *
@@ -27,7 +26,6 @@
  */
 #include <linux/bitops.h>
 #include <linux/init.h>
-#include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 
@@ -591,7 +589,7 @@ void __init arch_init_irq(void)
                imp++;
        }
 
-       set_c0_status(ALLINTS);
+       set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4);
 
        /* Board specific IRQ initialization.
        */
index ce771487567d5e3ee29fc4bd01a9211e23bd8554..7e966b31e3e1e2a383eaff451ba6b572aa2e2592 100644 (file)
@@ -30,7 +30,7 @@
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/types.h>
+
 #include <linux/pci.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
index 39d68126529770701c6668017edc19611ab59dbb..31d2a2270878832354a02e4cd7f6b4b6eaf03516 100644 (file)
@@ -3,18 +3,65 @@
  *
  * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
  *
+ * (C) Copyright Embedded Alley Solutions, Inc 2005
+ * Author: Pantelis Antoniou <pantelis@embeddedalley.com>
+ *
  * This file is licensed under the terms of the GNU General Public
  * License version 2.  This program is licensed "as is" without any
  * warranty of any kind, whether express or implied.
  */
-#include <linux/device.h>
+
 #include <linux/platform_device.h>
-#include <linux/kernel.h>
+#include <linux/serial_8250.h>
 #include <linux/init.h>
-#include <linux/resource.h>
 
 #include <asm/mach-au1x00/au1xxx.h>
 
+#define PORT(_base, _irq)                              \
+       {                                               \
+               .iobase         = _base,                \
+               .membase        = (void __iomem *)_base,\
+               .mapbase        = CPHYSADDR(_base),     \
+               .irq            = _irq,                 \
+               .regshift       = 2,                    \
+               .iotype         = UPIO_AU,              \
+               .flags          = UPF_SKIP_TEST         \
+       }
+
+static struct plat_serial8250_port au1x00_uart_data[] = {
+#if defined(CONFIG_SERIAL_8250_AU1X00)
+#if defined(CONFIG_SOC_AU1000)
+       PORT(UART0_ADDR, AU1000_UART0_INT),
+       PORT(UART1_ADDR, AU1000_UART1_INT),
+       PORT(UART2_ADDR, AU1000_UART2_INT),
+       PORT(UART3_ADDR, AU1000_UART3_INT),
+#elif defined(CONFIG_SOC_AU1500)
+       PORT(UART0_ADDR, AU1500_UART0_INT),
+       PORT(UART3_ADDR, AU1500_UART3_INT),
+#elif defined(CONFIG_SOC_AU1100)
+       PORT(UART0_ADDR, AU1100_UART0_INT),
+       PORT(UART1_ADDR, AU1100_UART1_INT),
+       PORT(UART3_ADDR, AU1100_UART3_INT),
+#elif defined(CONFIG_SOC_AU1550)
+       PORT(UART0_ADDR, AU1550_UART0_INT),
+       PORT(UART1_ADDR, AU1550_UART1_INT),
+       PORT(UART3_ADDR, AU1550_UART3_INT),
+#elif defined(CONFIG_SOC_AU1200)
+       PORT(UART0_ADDR, AU1200_UART0_INT),
+       PORT(UART1_ADDR, AU1200_UART1_INT),
+#endif
+#endif /* CONFIG_SERIAL_8250_AU1X00 */
+       { },
+};
+
+static struct platform_device au1xx0_uart_device = {
+       .name                   = "serial8250",
+       .id                     = PLAT8250_DEV_AU1X00,
+       .dev                    = {
+               .platform_data  = au1x00_uart_data,
+       },
+};
+
 /* OHCI (USB full speed host controller) */
 static struct resource au1xxx_usb_ohci_resources[] = {
        [0] = {
@@ -186,19 +233,6 @@ static struct resource au1200_lcd_resources[] = {
        }
 };
 
-static struct resource au1200_ide0_resources[] = {
-       [0] = {
-               .start          = AU1XXX_ATA_PHYS_ADDR,
-               .end            = AU1XXX_ATA_PHYS_ADDR + AU1XXX_ATA_PHYS_LEN - 1,
-               .flags          = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start          = AU1XXX_ATA_INT,
-               .end            = AU1XXX_ATA_INT,
-               .flags          = IORESOURCE_IRQ,
-       }
-};
-
 static u64 au1200_lcd_dmamask = ~(u32)0;
 
 static struct platform_device au1200_lcd_device = {
@@ -212,20 +246,6 @@ static struct platform_device au1200_lcd_device = {
        .resource       = au1200_lcd_resources,
 };
 
-
-static u64 ide0_dmamask = ~(u32)0;
-
-static struct platform_device au1200_ide0_device = {
-       .name           = "au1200-ide",
-       .id             = 0,
-       .dev = {
-               .dma_mask               = &ide0_dmamask,
-               .coherent_dma_mask      = 0xffffffff,
-       },
-       .num_resources = ARRAY_SIZE(au1200_ide0_resources),
-       .resource       = au1200_ide0_resources,
-};
-
 static u64 au1xxx_mmc_dmamask =  ~(u32)0;
 
 static struct platform_device au1xxx_mmc_device = {
@@ -245,31 +265,6 @@ static struct platform_device au1x00_pcmcia_device = {
        .id             = 0,
 };
 
-#ifdef CONFIG_MIPS_DB1200
-
-static struct resource smc91x_resources[] = {
-       [0] = {
-               .name   = "smc91x-regs",
-               .start  = AU1XXX_SMC91111_PHYS_ADDR,
-               .end    = AU1XXX_SMC91111_PHYS_ADDR + 0xfffff,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = AU1XXX_SMC91111_IRQ,
-               .end    = AU1XXX_SMC91111_IRQ,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
-static struct platform_device smc91x_device = {
-       .name           = "smc91x",
-       .id             = -1,
-       .num_resources  = ARRAY_SIZE(smc91x_resources),
-       .resource       = smc91x_resources,
-};
-
-#endif
-
 /* All Alchemy demoboards with I2C have this #define in their headers */
 #ifdef SMBUS_PSC_BASE
 static struct resource pbdb_smbus_resources[] = {
@@ -289,6 +284,7 @@ static struct platform_device pbdb_smbus_device = {
 #endif
 
 static struct platform_device *au1xxx_platform_devices[] __initdata = {
+       &au1xx0_uart_device,
        &au1xxx_usb_ohci_device,
        &au1x00_pcmcia_device,
 #ifdef CONFIG_FB_AU1100
@@ -299,12 +295,8 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
        &au1xxx_usb_gdt_device,
        &au1xxx_usb_otg_device,
        &au1200_lcd_device,
-       &au1200_ide0_device,
        &au1xxx_mmc_device,
 #endif
-#ifdef CONFIG_MIPS_DB1200
-       &smc91x_device,
-#endif
 #ifdef SMBUS_PSC_BASE
        &pbdb_smbus_device,
 #endif
@@ -312,6 +304,13 @@ static struct platform_device *au1xxx_platform_devices[] __initdata = {
 
 int __init au1xxx_platform_init(void)
 {
+       unsigned int uartclk = get_au1x00_uart_baud_base() * 16;
+       int i;
+
+       /* Fill up uartclk. */
+       for (i = 0; au1x00_uart_data[i].flags ; i++)
+               au1x00_uart_data[i].uartclk = uartclk;
+
        return platform_add_devices(au1xxx_platform_devices, ARRAY_SIZE(au1xxx_platform_devices));
 }
 
index 54047d69b820c98195dfa534748019414d8e76ea..a8cd2c1b9e1b59f313ae98a0c6e8579e0afc6dca 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
 #include <linux/pm.h>
 #include <linux/pm_legacy.h>
-#include <linux/slab.h>
 #include <linux/sysctl.h>
 #include <linux/jiffies.h>
 
-#include <asm/string.h>
 #include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/system.h>
 #include <asm/cacheflush.h>
 #include <asm/mach-au1x00/au1000.h>
 
 
 #define DEBUG 1
 #ifdef DEBUG
-#  define DPRINTK(fmt, args...)        printk("%s: " fmt, __FUNCTION__ , ## args)
+#  define DPRINTK(fmt, args...)        printk("%s: " fmt, __func__, ## args)
 #else
 #  define DPRINTK(fmt, args...)
 #endif
 
 static void au1000_calibrate_delay(void);
 
-extern void set_au1x00_speed(unsigned int new_freq);
-extern unsigned int get_au1x00_speed(void);
-extern unsigned long get_au1x00_uart_baud_base(void);
-extern void set_au1x00_uart_baud_base(unsigned long new_baud_base);
 extern unsigned long save_local_and_disable(int controller);
 extern void restore_local_and_enable(int controller, unsigned long mask);
 extern void local_enable_irq(unsigned int irq_nr);
@@ -258,7 +251,6 @@ int au_sleep(void)
 static int pm_do_sleep(ctl_table * ctl, int write, struct file *file,
                       void __user *buffer, size_t * len, loff_t *ppos)
 {
-       int retval = 0;
 #ifdef SLEEP_TEST_TIMEOUT
 #define TMPBUFLEN2 16
        char buf[TMPBUFLEN2], *p;
@@ -278,36 +270,12 @@ static int pm_do_sleep(ctl_table * ctl, int write, struct file *file,
                p = buf;
                sleep_ticks = simple_strtoul(p, &p, 0);
 #endif
-               retval = pm_send_all(PM_SUSPEND, (void *) 2);
-
-               if (retval)
-                       return retval;
 
                au_sleep();
-               retval = pm_send_all(PM_RESUME, (void *) 0);
-       }
-       return retval;
-}
-
-static int pm_do_suspend(ctl_table * ctl, int write, struct file *file,
-                        void __user *buffer, size_t * len, loff_t *ppos)
-{
-       int retval = 0;
-
-       if (!write) {
-               *len = 0;
-       } else {
-               retval = pm_send_all(PM_SUSPEND, (void *) 2);
-               if (retval)
-                       return retval;
-               suspend_mode = 1;
-
-               retval = pm_send_all(PM_RESUME, (void *) 0);
        }
-       return retval;
+       return 0;
 }
 
-
 static int pm_do_freq(ctl_table * ctl, int write, struct file *file,
                      void __user *buffer, size_t * len, loff_t *ppos)
 {
@@ -420,14 +388,6 @@ static int pm_do_freq(ctl_table * ctl, int write, struct file *file,
 
 
 static struct ctl_table pm_table[] = {
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "suspend",
-               .data           = NULL,
-               .maxlen         = 0,
-               .mode           = 0600,
-               .proc_handler   = &pm_do_suspend
-       },
        {
                .ctl_name       = CTL_UNNUMBERED,
                .procname       = "sleep",
index 90d70695aa60fc0c7c0f30e7fc0eb46def5483ca..f10af829e4ec02b1292df103f6c98fca511ab258 100644 (file)
@@ -33,8 +33,8 @@
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/module.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/string.h>
 
index 2705829cd466f35085a38daaef3b4a0ab8a67f42..e34c67e892937527fe42f96bb9364c6d59f54475 100644 (file)
@@ -28,7 +28,6 @@
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-#include <linux/types.h>
 #include <asm/mach-au1x00/au1000.h>
 
 #define SERIAL_BASE   UART_BASE
index b8638d293cf9c9f3ab53e6c08e260cf88894f0e8..60cec537c7450cd4bb6d129f8dc83a16591f1044 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <asm/system.h>
+
 #include <asm/mach-au1x00/au1000.h>
 
 extern int au_sleep(void);
index 9e4ab80caab6b46fb800acc0271a861e39ecc990..0e86f7a6b4a7f5ad60fa1a24cab5b14d7c815508 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
 #include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/pm.h>
 
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
 #include <asm/mipsregs.h>
 #include <asm/reboot.h>
-#include <asm/pgtable.h>
 #include <asm/time.h>
 
 #include <au1000.h>
@@ -49,8 +42,6 @@ extern void __init board_setup(void);
 extern void au1000_restart(char *);
 extern void au1000_halt(void);
 extern void au1000_power_off(void);
-extern void au1x_time_init(void);
-extern void au1x_timer_setup(struct irqaction *irq);
 extern void set_cpuspec(void);
 
 void __init plat_mem_setup(void)
index 683d9da84b6695832193c8c5fe0e2b61496ed1fb..4b3cf021a4545adaf280e1add901f05297af06c1 100644 (file)
@@ -9,9 +9,9 @@
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  */
+
 #include <asm/asm.h>
 #include <asm/mipsregs.h>
-#include <asm/addrspace.h>
 #include <asm/regdef.h>
 #include <asm/stackframe.h>
 
index e122bbc6cd88ff640d957df1d164a472ea288137..bdb6d73b26fb19876ddff6da2da037946f144e10 100644 (file)
@@ -1,6 +1,6 @@
 /*
  *
- * Copyright (C) 2001 MontaVista Software, ppopov@mvista.com
+ * Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
  * Copied and modified Carsten Langgaard's time.c
  *
  * Carsten Langgaard, carstenl@mips.com
 
 #include <linux/types.h>
 #include <linux/init.h>
-#include <linux/kernel_stat.h>
-#include <linux/sched.h>
 #include <linux/spinlock.h>
-#include <linux/hardirq.h>
 
-#include <asm/compiler.h>
 #include <asm/mipsregs.h>
 #include <asm/time.h>
-#include <asm/div64.h>
 #include <asm/mach-au1x00/au1000.h>
 
-#include <linux/mc146818rtc.h>
-#include <linux/timex.h>
-
-static unsigned long r4k_offset; /* Amount to increment compare reg each time */
-static unsigned long r4k_cur;    /* What counter should be at next timer irq */
-int    no_au1xxx_32khz;
+static int no_au1xxx_32khz;
 extern int allow_au1k_wait; /* default off for CP0 Counter */
 
 #ifdef CONFIG_PM
@@ -184,7 +174,7 @@ wakeup_counter0_set(int ticks)
  * "wait" is enabled, and we need to detect if the 32KHz isn't present
  * but requested......got it? :-)              -- Dan
  */
-unsigned long cal_r4koff(void)
+unsigned long calc_clock(void)
 {
        unsigned long cpu_speed;
        unsigned long flags;
@@ -229,19 +219,13 @@ unsigned long cal_r4koff(void)
        // Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16)
        set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)&0x03) + 2) * 16));
        spin_unlock_irqrestore(&time_lock, flags);
-       return (cpu_speed / HZ);
+       return cpu_speed;
 }
 
 void __init plat_time_init(void)
 {
-       unsigned int est_freq;
-
-       printk("calculating r4koff... ");
-       r4k_offset = cal_r4koff();
-       printk("%08lx(%d)\n", r4k_offset, (int) r4k_offset);
+       unsigned int est_freq = calc_clock();
 
-       //est_freq = 2*r4k_offset*HZ;
-       est_freq = r4k_offset*HZ;
        est_freq += 5000;    /* round */
        est_freq -= est_freq%10000;
        printk("CPU frequency %d.%02d MHz\n", est_freq/1000000,
@@ -249,9 +233,6 @@ void __init plat_time_init(void)
        set_au1x00_speed(est_freq);
        set_au1x00_lcd_clock(); // program the LCD clock
 
-       r4k_cur = (read_c0_count() + r4k_offset);
-       write_c0_compare(r4k_cur);
-
 #ifdef CONFIG_PM
        /*
         * setup counter 0, since it keeps ticking after a
@@ -265,12 +246,8 @@ void __init plat_time_init(void)
         * Check to ensure we really have a 32KHz oscillator before
         * we do this.
         */
-       if (no_au1xxx_32khz) {
+       if (no_au1xxx_32khz)
                printk("WARNING: no 32KHz clock found.\n");
-
-               /* Ensure we get CPO_COUNTER interrupts.  */
-               set_c0_status(IE_IRQ5);
-       }
        else {
                while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
                au_writel(0, SYS_TOYWRITE);
index 99eafeada518d28cf42c098b52880bdb8439a177..b7dcbad5c5861070b267393f3f44e1fce1c036ff 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
+
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-db1x00/db1x00.h>
 
index e822c123eab82e8e41a0b94a60496f77a1ae9b08..d3b967caf70c10729a7b1bb5a2681890c696a0a9 100644 (file)
  */
 
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index 09cea03411b0d9c972cb96e5d689ee51335b1e05..eaa50c7b6341cdcf5407fbd58a69827accac7ef4 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
 
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
 #include <asm/mach-au1x00/au1000.h>
 
 #ifdef CONFIG_MIPS_DB1500
index 310d5dff89fca6347f34ce38e112dbf2116a87f6..5736354829c6a2d4356cba3a978f29694981c720 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/delay.h>
 
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
 #include <asm/mach-au1x00/au1000.h>
 
 extern int (*board_pci_idsel)(unsigned int devsel, int assert);
index e700fd312a24f0f853f2aa9564c512135622c2ff..c015cbce1cca4b0c7b5063b39ffcc3362dd8bc4b 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/string.h>
+
 #include <linux/kernel.h>
-#include <linux/sched.h>
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/bootmem.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index 49c612aeddcfbc294af2e78bef6efa789a41d90f..78d70c42c9dbc8265814f164bd51db83fb6741df 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
 
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
 #include <asm/mach-au1x00/au1000.h>
 
 char irq_tab_alchemy[][5] __initdata = {
index ce8637b3afa9ea95e7fe16a8d7c987ef145d043b..a7edbf0829ac3b3ff13495421a738c26ef8fc9d7 100644 (file)
@@ -19,7 +19,6 @@
  */
 
 #include <linux/init.h>
-#include <linux/types.h>
 #include <linux/platform_device.h>
 #include <linux/leds.h>
 #include <linux/gpio_keys.h>
index 5198c4f98b43e872a568ce8d5f40633680f27a5e..33f15acc1b17677bb6ae6b2191142aaec6267e01 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
 #include <linux/delay.h>
 
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-pb1x00/pb1000.h>
 
index 2515b9fb24af68b0d703b978d70d994d24db4207..549447df71d6f66d0c118a9860c4af7e8bb48857 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index 88e354508204f7c3f52b4a1b08bc962d5d8180c8..b3d56b0af321bf3ff8350a1035a6d73859d3b022 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
 #include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
 
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
 #include <asm/mach-au1x00/au1000.h>
 
 struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
index 42874a6b31d174e6621a00777d980f387b0b53b6..656164c8e9cafb4b4219c15b42fb0d63a8a9a174 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
 #include <linux/delay.h>
 
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-pb1x00/pb1100.h>
 
index 490c3801c275813ace198893e4b2d3b3f38f051e..c91344648ed317f8fe5f160395a62c83fd1322b4 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index 880456bf8c11facce55221cd5b041efb190d934b..b5021e3d477fcabd3b7db8222487380b14ee97f3 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
 
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
 #include <asm/mach-au1x00/au1000.h>
 
 struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
index 970b1b1d5cda63cf06702cbc1ed60ca4f8454ab2..4fe02ea65a60e13b20e5706532e95260492c044b 100644 (file)
@@ -3,5 +3,6 @@
 #
 
 lib-y := init.o board_setup.o irqmap.o
+obj-y += platform.o
 
 EXTRA_CFLAGS += -Werror
index b98bebfa87c6acd7c50e835b98c74f48ba8a3548..4493a792cc4c5a79d83b058c2066b3aac18a1437 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
 #include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
-
-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX)
-#include <linux/ide.h>
-#endif
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
 
 #include <au1000.h>
-#include <au1xxx_dbdma.h>
 #include <prom.h>
 
 #ifdef CONFIG_MIPS_PB1200
@@ -52,8 +36,6 @@
 
 #ifdef CONFIG_MIPS_DB1200
 #include <asm/mach-db1x00/db1200.h>
-#define PB1200_ETH_INT DB1200_ETH_INT
-#define PB1200_IDE_INT DB1200_IDE_INT
 #endif
 
 extern void _board_init_irq(void);
index 069ed45f04f2e504e7f9522d0cc9c07aff0e6008..72af5500660b0517c9f0fec4253da6fdabeaacaf 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index 8fcd0df86f93248e4e7a0fdf1eff33fd70647bae..e61eb8e0b76bd96cccc2196a95ff9e86d83f9511 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
 #include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
-
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
+
 #include <asm/mach-au1x00/au1000.h>
 
 #ifdef CONFIG_MIPS_PB1200
diff --git a/arch/mips/au1000/pb1200/platform.c b/arch/mips/au1000/pb1200/platform.c
new file mode 100644 (file)
index 0000000..5930110
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Pb1200/DBAu1200 board platform device registration
+ *
+ * Copyright (C) 2008 MontaVista Software Inc. <source@mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/mach-au1x00/au1xxx.h>
+
+static struct resource ide_resources[] = {
+       [0] = {
+               .start  = IDE_PHYS_ADDR,
+               .end    = IDE_PHYS_ADDR + IDE_PHYS_LEN - 1,
+               .flags  = IORESOURCE_MEM
+       },
+       [1] = {
+               .start  = IDE_INT,
+               .end    = IDE_INT,
+               .flags  = IORESOURCE_IRQ
+       }
+};
+
+static u64 ide_dmamask = ~(u32)0;
+
+static struct platform_device ide_device = {
+       .name           = "au1200-ide",
+       .id             = 0,
+       .dev = {
+               .dma_mask               = &ide_dmamask,
+               .coherent_dma_mask      = 0xffffffff,
+       },
+       .num_resources  = ARRAY_SIZE(ide_resources),
+       .resource       = ide_resources
+};
+
+static struct resource smc91c111_resources[] = {
+       [0] = {
+               .name   = "smc91x-regs",
+               .start  = SMC91C111_PHYS_ADDR,
+               .end    = SMC91C111_PHYS_ADDR + 0xf,
+               .flags  = IORESOURCE_MEM
+       },
+       [1] = {
+               .start  = SMC91C111_INT,
+               .end    = SMC91C111_INT,
+               .flags  = IORESOURCE_IRQ
+       },
+};
+
+static struct platform_device smc91c111_device = {
+       .name           = "smc91x",
+       .id             = -1,
+       .num_resources  = ARRAY_SIZE(smc91c111_resources),
+       .resource       = smc91c111_resources
+};
+
+static struct platform_device *board_platform_devices[] __initdata = {
+       &ide_device,
+       &smc91c111_device
+};
+
+static int __init board_register_devices(void)
+{
+       return platform_add_devices(board_platform_devices,
+                                   ARRAY_SIZE(board_platform_devices));
+}
+
+arch_initcall(board_register_devices);
index 5446836869d66a506253f6464edabdb48055a5a1..24c652e8ec4b7f542b264430ed3e6b4ef4b61fab 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
 #include <linux/delay.h>
 
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-pb1x00/pb1500.h>
 
index db558c967048e766379e7fca31122dfe16ebb59e..488507c07db929e40e5b99dd5d328c88b574b845 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index 810f695e24bbadd5cd215a54edcdedfa431e6103..4817ab44d07f660e7970703d2e37c0dca4389903 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
 
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
 #include <asm/mach-au1x00/au1000.h>
 
 char irq_tab_alchemy[][5] __initdata = {
index e3cfb0d73180e52c765412d41c471a85f27fc416..45d60872b5655b49905c3e300365c30cd2729ea5 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
-#include <linux/mc146818rtc.h>
-#include <linux/delay.h>
 
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
 #include <asm/mach-au1x00/au1000.h>
 #include <asm/mach-pb1x00/pb1550.h>
 
index b716363ea56443a4d3ce1ddf74bb88b7db05310b..f6b2fc587980102a5f561609143ffb55b7651090 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index 56becab28e5dc4a94a443f5135e7e296bbf4f388..e1dac37af08ac3ec426211b382c342e5bfd2afe7 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
 
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
 #include <asm/mach-au1x00/au1000.h>
 
 char irq_tab_alchemy[][5] __initdata = {
index b2e413e597a808dddf85178ef05ed7841a7689b7..79d1798621bfede35ca23fbaa5bb80532222cfee 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/ioport.h>
-#include <linux/mm.h>
-#include <linux/console.h>
 #include <linux/delay.h>
 
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/reboot.h>
-#include <asm/pgtable.h>
 #include <asm/mach-au1x00/au1000.h>
 
 void board_reset(void)
index 7e6878c1b0a58aa0b1fcb9031ecfa585b7dc0f20..24fc6e132dc0276f7916856ee550321c8294e614 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-#include <linux/string.h>
 #include <linux/kernel.h>
 
-#include <asm/addrspace.h>
 #include <asm/bootinfo.h>
 
 #include <prom.h>
index a343da13433481a2120511a1f9ef606666d62245..dd6e3d1eb4d4da4cf09d531d420de4b4532ca9e2 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/errno.h>
+
 #include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/kernel_stat.h>
-#include <linux/module.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/types.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/timex.h>
-#include <linux/slab.h>
-#include <linux/random.h>
-#include <linux/delay.h>
-#include <linux/bitops.h>
 
-#include <asm/bootinfo.h>
-#include <asm/io.h>
-#include <asm/mipsregs.h>
-#include <asm/system.h>
 #include <asm/mach-au1x00/au1000.h>
 
 struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
index 9ee67a95f6b96b57f17e34891c0c61366d48b18a..08923e6825b5421adac39ad3e880f8203c11e6d3 100644 (file)
@@ -18,8 +18,9 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  */
-
+#include <linux/module.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/stat.h>
 #include <asm/page.h>
 #include <asm/io.h>
 
 #include <excite.h>
 
-static int excite_get_unit_id(char *buf, char **addr, off_t offs, int size)
+static int excite_unit_id_proc_show(struct seq_file *m, void *v)
 {
-       const int len = snprintf(buf, PAGE_SIZE, "%06x", unit_id);
-       const int w = len - offs;
-       *addr = buf + offs;
-       return w < size ? w : size;
+       seq_printf(m, "%06x", unit_id);
+       return 0;
 }
 
+static int excite_unit_id_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, excite_unit_id_proc_show, NULL);
+}
+
+static const struct file_operations excite_unit_id_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = excite_unit_id_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static int
 excite_bootrom_read(char *page, char **start, off_t off, int count,
                  int *eof, void *data)
@@ -65,12 +77,12 @@ excite_bootrom_read(char *page, char **start, off_t off, int count,
 void excite_procfs_init(void)
 {
        /* Create & populate /proc/excite */
-       struct proc_dir_entry * const pdir = proc_mkdir("excite", &proc_root);
+       struct proc_dir_entry * const pdir = proc_mkdir("excite", NULL);
        if (pdir) {
                struct proc_dir_entry * e;
 
-               e = create_proc_info_entry("unit_id", S_IRUGO, pdir,
-                                          excite_get_unit_id);
+               e = proc_create("unit_id", S_IRUGO, pdir,
+                               &excite_unit_id_proc_fops);
                if (e) e->size = 6;
 
                e = create_proc_read_entry("bootrom", S_IRUGO, pdir,
index 6db0bdaefb27f72ea11bd4b920819a7448a1ac36..4f6bce99d5cfe4a5bd3052f4fe0475446c106178 100644 (file)
@@ -641,7 +641,6 @@ CONFIG_CROSSCOMPILE=y
 CONFIG_CMDLINE="nfsroot=192.168.192.169:/u1/mipsel,timeo=20 ip=dhcp"
 # CONFIG_DEBUG_STACK_USAGE is not set
 # CONFIG_RUNTIME_DEBUG is not set
-# CONFIG_MIPS_UNCACHED is not set
 
 #
 # Security options
index 518a60892b78e890aadd2173e8492008834d0659..780c7fc24b824fc6b97ce6dcf070ee5825b6fa04 100644 (file)
@@ -1223,7 +1223,6 @@ CONFIG_CMDLINE="console=ttyS1,38400n8 kgdb=ttyS0 root=/dev/nfs ip=bootp"
 # CONFIG_KGDB is not set
 CONFIG_SYS_SUPPORTS_KGDB=y
 # CONFIG_RUNTIME_DEBUG is not set
-# CONFIG_MIPS_UNCACHED is not set
 
 #
 # Security options
index 68351eb81bc88bbe78e208dc69a9e2ac8c4b213e..267f21ed1d0f423dacf60c63e1c50f385d89d399 100644 (file)
@@ -1213,7 +1213,6 @@ CONFIG_CMDLINE="console=ttyS1,38400n8 kgdb=ttyS0 root=/dev/nfs ip=bootp"
 # CONFIG_KGDB is not set
 CONFIG_SYS_SUPPORTS_KGDB=y
 # CONFIG_RUNTIME_DEBUG is not set
-# CONFIG_MIPS_UNCACHED is not set
 
 #
 # Security options
index 60349062595a9f5c0d420a75bab4faf3a9ace8d8..3965fda94a89afd60fdc861f9520340cc9d246cd 100644 (file)
@@ -9,30 +9,15 @@
  *
  */
 #include <linux/bcd.h>
-#include <linux/errno.h>
 #include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
 #include <linux/mc146818rtc.h>
-#include <linux/mm.h>
-#include <linux/module.h>
 #include <linux/param.h>
-#include <linux/sched.h>
-#include <linux/string.h>
-#include <linux/time.h>
-#include <linux/types.h>
-
-#include <asm/bootinfo.h>
-#include <asm/cpu.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/mipsregs.h>
-#include <asm/sections.h>
-#include <asm/time.h>
 
+#include <asm/cpu-features.h>
+#include <asm/ds1287.h>
+#include <asm/time.h>
 #include <asm/dec/interrupts.h>
 #include <asm/dec/ioasic.h>
-#include <asm/dec/ioasic_addrs.h>
 #include <asm/dec/machtype.h>
 
 unsigned long read_persistent_clock(void)
@@ -139,42 +124,32 @@ int rtc_mips_set_mmss(unsigned long nowtime)
        return retval;
 }
 
-static int dec_timer_state(void)
+void __init plat_time_init(void)
 {
-       return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
-}
+       u32 start, end;
+       int i = HZ / 10;
 
-static void dec_timer_ack(void)
-{
-       CMOS_READ(RTC_REG_C);                   /* Ack the RTC interrupt.  */
-}
-
-static cycle_t dec_ioasic_hpt_read(void)
-{
-       /*
-        * The free-running counter is 32-bit which is good for about
-        * 2 minutes, 50 seconds at possible count rates of up to 25MHz.
-        */
-       return ioasic_read(IO_REG_FCTR);
-}
+       /* Set up the rate of periodic DS1287 interrupts. */
+       ds1287_set_base_clock(HZ);
 
+       if (cpu_has_counter) {
+               while (!ds1287_timer_state())
+                       ;
 
-void __init plat_time_init(void)
-{
-       mips_timer_ack = dec_timer_ack;
+               start = read_c0_count();
 
-       if (!cpu_has_counter && IOASIC)
-               /* For pre-R4k systems we use the I/O ASIC's counter.  */
-               clocksource_mips.read = dec_ioasic_hpt_read;
+               while (i--)
+                       while (!ds1287_timer_state())
+                               ;
 
-       /* Set up the rate of periodic DS1287 interrupts.  */
-       CMOS_WRITE(RTC_REF_CLCK_32KHZ | (16 - __ffs(HZ)), RTC_REG_A);
-}
+               end = read_c0_count();
 
-void __init plat_timer_setup(struct irqaction *irq)
-{
-       setup_irq(dec_interrupt[DEC_IRQ_RTC], irq);
+               mips_hpt_frequency = (end - start) * 10;
+               printk(KERN_INFO "MIPS counter frequency %dHz\n",
+                       mips_hpt_frequency);
+       } else if (IOASIC)
+               /* For pre-R4k systems we use the I/O ASIC's counter.  */
+               dec_ioasic_clocksource_init();
 
-       /* Enable periodic DS1287 interrupts.  */
-       CMOS_WRITE(CMOS_READ(RTC_REG_B) | RTC_PIE, RTC_REG_B);
+       ds1287_clockevent_init(dec_interrupt[DEC_IRQ_RTC]);
 }
index c886d804d3037d964db8ae277b3c5da3ca2ebeda..f39c444e42d4156ceabcdf10a30a26e3cf4d3fb8 100644 (file)
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/gpio.h>
 #ifdef CONFIG_SERIAL_TXX9
 #include <linux/serial_core.h>
 #endif
 
 #include <asm/txx9tmr.h>
+#include <asm/txx9pio.h>
 #include <asm/reboot.h>
 #include <asm/jmr3927/jmr3927.h>
 #include <asm/mipsregs.h>
@@ -340,9 +342,12 @@ static void __init tx3927_setup(void)
 
        /* PIO */
        /* PIO[15:12] connected to LEDs */
-       tx3927_pioptr->dir = 0x0000f000;
-       tx3927_pioptr->maskcpu = 0;
-       tx3927_pioptr->maskext = 0;
+       __raw_writel(0x0000f000, &tx3927_pioptr->dir);
+       __raw_writel(0, &tx3927_pioptr->maskcpu);
+       __raw_writel(0, &tx3927_pioptr->maskext);
+       txx9_gpio_init(TX3927_PIO_REG, 0, 16);
+       gpio_request(11, "dipsw1");
+       gpio_request(10, "dipsw2");
        {
                unsigned int conf;
 
index 6fcdb6fda2e2e1f3ef72b167810d4cd62adb35f2..45545be3eb86ce015cabdc65059a32189d89595a 100644 (file)
@@ -10,12 +10,15 @@ obj-y               += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
 
 obj-$(CONFIG_CEVT_BCM1480)     += cevt-bcm1480.o
 obj-$(CONFIG_CEVT_R4K)         += cevt-r4k.o
+obj-$(CONFIG_CEVT_DS1287)      += cevt-ds1287.o
 obj-$(CONFIG_CEVT_GT641XX)     += cevt-gt641xx.o
 obj-$(CONFIG_CEVT_SB1250)      += cevt-sb1250.o
 obj-$(CONFIG_CEVT_TXX9)                += cevt-txx9.o
 obj-$(CONFIG_CSRC_BCM1480)     += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_IOASIC)      += csrc-ioasic.o
 obj-$(CONFIG_CSRC_R4K)         += csrc-r4k.o
 obj-$(CONFIG_CSRC_SB1250)      += csrc-sb1250.o
+obj-$(CONFIG_SYNC_R4K)         += sync-r4k.o
 
 binfmt_irix-objs       := irixelf.o irixinv.o irixioctl.o irixsig.o    \
                           irix5sys.o sysirix.o
@@ -50,6 +53,8 @@ obj-$(CONFIG_MIPS_MT)         += mips-mt.o
 obj-$(CONFIG_MIPS_MT_FPAFF)    += mips-mt-fpaff.o
 obj-$(CONFIG_MIPS_MT_SMTC)     += smtc.o smtc-asm.o smtc-proc.o
 obj-$(CONFIG_MIPS_MT_SMP)      += smp-mt.o
+obj-$(CONFIG_MIPS_CMP)         += smp-cmp.o
+obj-$(CONFIG_CPU_MIPSR2)       += spram.o
 
 obj-$(CONFIG_MIPS_APSP_KSPD)   += kspd.o
 obj-$(CONFIG_MIPS_VPE_LOADER)  += vpe.o
@@ -62,6 +67,7 @@ obj-$(CONFIG_IRQ_CPU_RM9K)    += irq-rm9000.o
 obj-$(CONFIG_MIPS_BOARDS_GEN)  += irq-msc01.o
 obj-$(CONFIG_IRQ_TXX9)         += irq_txx9.o
 obj-$(CONFIG_IRQ_GT641XX)      += irq-gt641xx.o
+obj-$(CONFIG_IRQ_GIC)          += irq-gic.o
 
 obj-$(CONFIG_32BIT)            += scall32-o32.o
 obj-$(CONFIG_64BIT)            += scall64-64.o
@@ -77,6 +83,8 @@ obj-$(CONFIG_64BIT)           += cpu-bugs64.o
 
 obj-$(CONFIG_I8253)            += i8253.o
 
+obj-$(CONFIG_GPIO_TXX9)                += gpio_txx9.o
+
 obj-$(CONFIG_KEXEC)            += machine_kexec.o relocate_kernel.o
 obj-$(CONFIG_EARLY_PRINTK)     += early_printk.o
 
index ca136298acdccf800a3706fb1817dd3e5e52b735..72942226fcdd5b285cc8eed3fc301dc9c96b507d 100644 (file)
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/interrupt.h>
-
+#include <linux/kbuild.h>
 #include <asm/ptrace.h>
 #include <asm/processor.h>
 
-#define text(t) __asm__("\n@@@" t)
-#define _offset(type, member) (&(((type *)NULL)->member))
-#define offset(string, ptr, member) \
-       __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member)))
-#define constant(string, member) \
-       __asm__("\n@@@" string "%X0" : : "ri" (member))
-#define size(string, size) \
-       __asm__("\n@@@" string "%0" : : "i" (sizeof(size)))
-#define linefeed text("")
-
 void output_ptreg_defines(void)
 {
-       text("/* MIPS pt_regs offsets. */");
-       offset("#define PT_R0     ", struct pt_regs, regs[0]);
-       offset("#define PT_R1     ", struct pt_regs, regs[1]);
-       offset("#define PT_R2     ", struct pt_regs, regs[2]);
-       offset("#define PT_R3     ", struct pt_regs, regs[3]);
-       offset("#define PT_R4     ", struct pt_regs, regs[4]);
-       offset("#define PT_R5     ", struct pt_regs, regs[5]);
-       offset("#define PT_R6     ", struct pt_regs, regs[6]);
-       offset("#define PT_R7     ", struct pt_regs, regs[7]);
-       offset("#define PT_R8     ", struct pt_regs, regs[8]);
-       offset("#define PT_R9     ", struct pt_regs, regs[9]);
-       offset("#define PT_R10    ", struct pt_regs, regs[10]);
-       offset("#define PT_R11    ", struct pt_regs, regs[11]);
-       offset("#define PT_R12    ", struct pt_regs, regs[12]);
-       offset("#define PT_R13    ", struct pt_regs, regs[13]);
-       offset("#define PT_R14    ", struct pt_regs, regs[14]);
-       offset("#define PT_R15    ", struct pt_regs, regs[15]);
-       offset("#define PT_R16    ", struct pt_regs, regs[16]);
-       offset("#define PT_R17    ", struct pt_regs, regs[17]);
-       offset("#define PT_R18    ", struct pt_regs, regs[18]);
-       offset("#define PT_R19    ", struct pt_regs, regs[19]);
-       offset("#define PT_R20    ", struct pt_regs, regs[20]);
-       offset("#define PT_R21    ", struct pt_regs, regs[21]);
-       offset("#define PT_R22    ", struct pt_regs, regs[22]);
-       offset("#define PT_R23    ", struct pt_regs, regs[23]);
-       offset("#define PT_R24    ", struct pt_regs, regs[24]);
-       offset("#define PT_R25    ", struct pt_regs, regs[25]);
-       offset("#define PT_R26    ", struct pt_regs, regs[26]);
-       offset("#define PT_R27    ", struct pt_regs, regs[27]);
-       offset("#define PT_R28    ", struct pt_regs, regs[28]);
-       offset("#define PT_R29    ", struct pt_regs, regs[29]);
-       offset("#define PT_R30    ", struct pt_regs, regs[30]);
-       offset("#define PT_R31    ", struct pt_regs, regs[31]);
-       offset("#define PT_LO     ", struct pt_regs, lo);
-       offset("#define PT_HI     ", struct pt_regs, hi);
+       COMMENT("MIPS pt_regs offsets.");
+       OFFSET(PT_R0, pt_regs, regs[0]);
+       OFFSET(PT_R1, pt_regs, regs[1]);
+       OFFSET(PT_R2, pt_regs, regs[2]);
+       OFFSET(PT_R3, pt_regs, regs[3]);
+       OFFSET(PT_R4, pt_regs, regs[4]);
+       OFFSET(PT_R5, pt_regs, regs[5]);
+       OFFSET(PT_R6, pt_regs, regs[6]);
+       OFFSET(PT_R7, pt_regs, regs[7]);
+       OFFSET(PT_R8, pt_regs, regs[8]);
+       OFFSET(PT_R9, pt_regs, regs[9]);
+       OFFSET(PT_R10, pt_regs, regs[10]);
+       OFFSET(PT_R11, pt_regs, regs[11]);
+       OFFSET(PT_R12, pt_regs, regs[12]);
+       OFFSET(PT_R13, pt_regs, regs[13]);
+       OFFSET(PT_R14, pt_regs, regs[14]);
+       OFFSET(PT_R15, pt_regs, regs[15]);
+       OFFSET(PT_R16, pt_regs, regs[16]);
+       OFFSET(PT_R17, pt_regs, regs[17]);
+       OFFSET(PT_R18, pt_regs, regs[18]);
+       OFFSET(PT_R19, pt_regs, regs[19]);
+       OFFSET(PT_R20, pt_regs, regs[20]);
+       OFFSET(PT_R21, pt_regs, regs[21]);
+       OFFSET(PT_R22, pt_regs, regs[22]);
+       OFFSET(PT_R23, pt_regs, regs[23]);
+       OFFSET(PT_R24, pt_regs, regs[24]);
+       OFFSET(PT_R25, pt_regs, regs[25]);
+       OFFSET(PT_R26, pt_regs, regs[26]);
+       OFFSET(PT_R27, pt_regs, regs[27]);
+       OFFSET(PT_R28, pt_regs, regs[28]);
+       OFFSET(PT_R29, pt_regs, regs[29]);
+       OFFSET(PT_R30, pt_regs, regs[30]);
+       OFFSET(PT_R31, pt_regs, regs[31]);
+       OFFSET(PT_LO, pt_regs, lo);
+       OFFSET(PT_HI, pt_regs, hi);
 #ifdef CONFIG_CPU_HAS_SMARTMIPS
-       offset("#define PT_ACX    ", struct pt_regs, acx);
+       OFFSET(PT_ACX, pt_regs, acx);
 #endif
-       offset("#define PT_EPC    ", struct pt_regs, cp0_epc);
-       offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
-       offset("#define PT_STATUS ", struct pt_regs, cp0_status);
-       offset("#define PT_CAUSE  ", struct pt_regs, cp0_cause);
+       OFFSET(PT_EPC, pt_regs, cp0_epc);
+       OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
+       OFFSET(PT_STATUS, pt_regs, cp0_status);
+       OFFSET(PT_CAUSE, pt_regs, cp0_cause);
 #ifdef CONFIG_MIPS_MT_SMTC
-       offset("#define PT_TCSTATUS  ", struct pt_regs, cp0_tcstatus);
+       OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
 #endif /* CONFIG_MIPS_MT_SMTC */
-       size("#define PT_SIZE   ", struct pt_regs);
-       linefeed;
+       DEFINE(PT_SIZE, sizeof(struct pt_regs));
+       BLANK();
 }
 
 void output_task_defines(void)
 {
-       text("/* MIPS task_struct offsets. */");
-       offset("#define TASK_STATE         ", struct task_struct, state);
-       offset("#define TASK_THREAD_INFO   ", struct task_struct, stack);
-       offset("#define TASK_FLAGS         ", struct task_struct, flags);
-       offset("#define TASK_MM            ", struct task_struct, mm);
-       offset("#define TASK_PID           ", struct task_struct, pid);
-       size(  "#define TASK_STRUCT_SIZE   ", struct task_struct);
-       linefeed;
+       COMMENT("MIPS task_struct offsets.");
+       OFFSET(TASK_STATE, task_struct, state);
+       OFFSET(TASK_THREAD_INFO, task_struct, stack);
+       OFFSET(TASK_FLAGS, task_struct, flags);
+       OFFSET(TASK_MM, task_struct, mm);
+       OFFSET(TASK_PID, task_struct, pid);
+       DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+       BLANK();
 }
 
 void output_thread_info_defines(void)
 {
-       text("/* MIPS thread_info offsets. */");
-       offset("#define TI_TASK            ", struct thread_info, task);
-       offset("#define TI_EXEC_DOMAIN     ", struct thread_info, exec_domain);
-       offset("#define TI_FLAGS           ", struct thread_info, flags);
-       offset("#define TI_TP_VALUE        ", struct thread_info, tp_value);
-       offset("#define TI_CPU             ", struct thread_info, cpu);
-       offset("#define TI_PRE_COUNT       ", struct thread_info, preempt_count);
-       offset("#define TI_ADDR_LIMIT      ", struct thread_info, addr_limit);
-       offset("#define TI_RESTART_BLOCK   ", struct thread_info, restart_block);
-       offset("#define TI_REGS            ", struct thread_info, regs);
-       constant("#define _THREAD_SIZE       ", THREAD_SIZE);
-       constant("#define _THREAD_MASK       ", THREAD_MASK);
-       linefeed;
+       COMMENT("MIPS thread_info offsets.");
+       OFFSET(TI_TASK, thread_info, task);
+       OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain);
+       OFFSET(TI_FLAGS, thread_info, flags);
+       OFFSET(TI_TP_VALUE, thread_info, tp_value);
+       OFFSET(TI_CPU, thread_info, cpu);
+       OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+       OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
+       OFFSET(TI_RESTART_BLOCK, thread_info, restart_block);
+       OFFSET(TI_REGS, thread_info, regs);
+       DEFINE(_THREAD_SIZE, THREAD_SIZE);
+       DEFINE(_THREAD_MASK, THREAD_MASK);
+       BLANK();
 }
 
 void output_thread_defines(void)
 {
-       text("/* MIPS specific thread_struct offsets. */");
-       offset("#define THREAD_REG16   ", struct task_struct, thread.reg16);
-       offset("#define THREAD_REG17   ", struct task_struct, thread.reg17);
-       offset("#define THREAD_REG18   ", struct task_struct, thread.reg18);
-       offset("#define THREAD_REG19   ", struct task_struct, thread.reg19);
-       offset("#define THREAD_REG20   ", struct task_struct, thread.reg20);
-       offset("#define THREAD_REG21   ", struct task_struct, thread.reg21);
-       offset("#define THREAD_REG22   ", struct task_struct, thread.reg22);
-       offset("#define THREAD_REG23   ", struct task_struct, thread.reg23);
-       offset("#define THREAD_REG29   ", struct task_struct, thread.reg29);
-       offset("#define THREAD_REG30   ", struct task_struct, thread.reg30);
-       offset("#define THREAD_REG31   ", struct task_struct, thread.reg31);
-       offset("#define THREAD_STATUS  ", struct task_struct,
+       COMMENT("MIPS specific thread_struct offsets.");
+       OFFSET(THREAD_REG16, task_struct, thread.reg16);
+       OFFSET(THREAD_REG17, task_struct, thread.reg17);
+       OFFSET(THREAD_REG18, task_struct, thread.reg18);
+       OFFSET(THREAD_REG19, task_struct, thread.reg19);
+       OFFSET(THREAD_REG20, task_struct, thread.reg20);
+       OFFSET(THREAD_REG21, task_struct, thread.reg21);
+       OFFSET(THREAD_REG22, task_struct, thread.reg22);
+       OFFSET(THREAD_REG23, task_struct, thread.reg23);
+       OFFSET(THREAD_REG29, task_struct, thread.reg29);
+       OFFSET(THREAD_REG30, task_struct, thread.reg30);
+       OFFSET(THREAD_REG31, task_struct, thread.reg31);
+       OFFSET(THREAD_STATUS, task_struct,
               thread.cp0_status);
-       offset("#define THREAD_FPU     ", struct task_struct, thread.fpu);
+       OFFSET(THREAD_FPU, task_struct, thread.fpu);
 
-       offset("#define THREAD_BVADDR  ", struct task_struct, \
+       OFFSET(THREAD_BVADDR, task_struct, \
               thread.cp0_badvaddr);
-       offset("#define THREAD_BUADDR  ", struct task_struct, \
+       OFFSET(THREAD_BUADDR, task_struct, \
               thread.cp0_baduaddr);
-       offset("#define THREAD_ECODE   ", struct task_struct, \
+       OFFSET(THREAD_ECODE, task_struct, \
               thread.error_code);
-       offset("#define THREAD_TRAPNO  ", struct task_struct, thread.trap_no);
-       offset("#define THREAD_TRAMP   ", struct task_struct, \
+       OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no);
+       OFFSET(THREAD_TRAMP, task_struct, \
               thread.irix_trampoline);
-       offset("#define THREAD_OLDCTX  ", struct task_struct, \
+       OFFSET(THREAD_OLDCTX, task_struct, \
               thread.irix_oldctx);
-       linefeed;
+       BLANK();
 }
 
 void output_thread_fpu_defines(void)
 {
-       offset("#define THREAD_FPR0    ",
-              struct task_struct, thread.fpu.fpr[0]);
-       offset("#define THREAD_FPR1    ",
-              struct task_struct, thread.fpu.fpr[1]);
-       offset("#define THREAD_FPR2    ",
-              struct task_struct, thread.fpu.fpr[2]);
-       offset("#define THREAD_FPR3    ",
-              struct task_struct, thread.fpu.fpr[3]);
-       offset("#define THREAD_FPR4    ",
-              struct task_struct, thread.fpu.fpr[4]);
-       offset("#define THREAD_FPR5    ",
-              struct task_struct, thread.fpu.fpr[5]);
-       offset("#define THREAD_FPR6    ",
-              struct task_struct, thread.fpu.fpr[6]);
-       offset("#define THREAD_FPR7    ",
-              struct task_struct, thread.fpu.fpr[7]);
-       offset("#define THREAD_FPR8    ",
-              struct task_struct, thread.fpu.fpr[8]);
-       offset("#define THREAD_FPR9    ",
-              struct task_struct, thread.fpu.fpr[9]);
-       offset("#define THREAD_FPR10   ",
-              struct task_struct, thread.fpu.fpr[10]);
-       offset("#define THREAD_FPR11   ",
-              struct task_struct, thread.fpu.fpr[11]);
-       offset("#define THREAD_FPR12   ",
-              struct task_struct, thread.fpu.fpr[12]);
-       offset("#define THREAD_FPR13   ",
-              struct task_struct, thread.fpu.fpr[13]);
-       offset("#define THREAD_FPR14   ",
-              struct task_struct, thread.fpu.fpr[14]);
-       offset("#define THREAD_FPR15   ",
-              struct task_struct, thread.fpu.fpr[15]);
-       offset("#define THREAD_FPR16   ",
-              struct task_struct, thread.fpu.fpr[16]);
-       offset("#define THREAD_FPR17   ",
-              struct task_struct, thread.fpu.fpr[17]);
-       offset("#define THREAD_FPR18   ",
-              struct task_struct, thread.fpu.fpr[18]);
-       offset("#define THREAD_FPR19   ",
-              struct task_struct, thread.fpu.fpr[19]);
-       offset("#define THREAD_FPR20   ",
-              struct task_struct, thread.fpu.fpr[20]);
-       offset("#define THREAD_FPR21   ",
-              struct task_struct, thread.fpu.fpr[21]);
-       offset("#define THREAD_FPR22   ",
-              struct task_struct, thread.fpu.fpr[22]);
-       offset("#define THREAD_FPR23   ",
-              struct task_struct, thread.fpu.fpr[23]);
-       offset("#define THREAD_FPR24   ",
-              struct task_struct, thread.fpu.fpr[24]);
-       offset("#define THREAD_FPR25   ",
-              struct task_struct, thread.fpu.fpr[25]);
-       offset("#define THREAD_FPR26   ",
-              struct task_struct, thread.fpu.fpr[26]);
-       offset("#define THREAD_FPR27   ",
-              struct task_struct, thread.fpu.fpr[27]);
-       offset("#define THREAD_FPR28   ",
-              struct task_struct, thread.fpu.fpr[28]);
-       offset("#define THREAD_FPR29   ",
-              struct task_struct, thread.fpu.fpr[29]);
-       offset("#define THREAD_FPR30   ",
-              struct task_struct, thread.fpu.fpr[30]);
-       offset("#define THREAD_FPR31   ",
-              struct task_struct, thread.fpu.fpr[31]);
+       OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
+       OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
+       OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
+       OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
+       OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
+       OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
+       OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
+       OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
+       OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
+       OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
+       OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
+       OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
+       OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
+       OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
+       OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
+       OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
+       OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
+       OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
+       OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
+       OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
+       OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
+       OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
+       OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
+       OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
+       OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
+       OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
+       OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
+       OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
+       OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
+       OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
+       OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
+       OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
 
-       offset("#define THREAD_FCR31   ",
-              struct task_struct, thread.fpu.fcr31);
-       linefeed;
+       OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+       BLANK();
 }
 
 void output_mm_defines(void)
 {
-       text("/* Size of struct page  */");
-       size("#define STRUCT_PAGE_SIZE   ", struct page);
-       linefeed;
-       text("/* Linux mm_struct offsets. */");
-       offset("#define MM_USERS      ", struct mm_struct, mm_users);
-       offset("#define MM_PGD        ", struct mm_struct, pgd);
-       offset("#define MM_CONTEXT    ", struct mm_struct, context);
-       linefeed;
-       constant("#define _PAGE_SIZE     ", PAGE_SIZE);
-       constant("#define _PAGE_SHIFT    ", PAGE_SHIFT);
-       linefeed;
-       constant("#define _PGD_T_SIZE    ", sizeof(pgd_t));
-       constant("#define _PMD_T_SIZE    ", sizeof(pmd_t));
-       constant("#define _PTE_T_SIZE    ", sizeof(pte_t));
-       linefeed;
-       constant("#define _PGD_T_LOG2    ", PGD_T_LOG2);
-       constant("#define _PMD_T_LOG2    ", PMD_T_LOG2);
-       constant("#define _PTE_T_LOG2    ", PTE_T_LOG2);
-       linefeed;
-       constant("#define _PGD_ORDER     ", PGD_ORDER);
-       constant("#define _PMD_ORDER     ", PMD_ORDER);
-       constant("#define _PTE_ORDER     ", PTE_ORDER);
-       linefeed;
-       constant("#define _PMD_SHIFT     ", PMD_SHIFT);
-       constant("#define _PGDIR_SHIFT   ", PGDIR_SHIFT);
-       linefeed;
-       constant("#define _PTRS_PER_PGD  ", PTRS_PER_PGD);
-       constant("#define _PTRS_PER_PMD  ", PTRS_PER_PMD);
-       constant("#define _PTRS_PER_PTE  ", PTRS_PER_PTE);
-       linefeed;
+       COMMENT("Size of struct page");
+       DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+       BLANK();
+       COMMENT("Linux mm_struct offsets.");
+       OFFSET(MM_USERS, mm_struct, mm_users);
+       OFFSET(MM_PGD, mm_struct, pgd);
+       OFFSET(MM_CONTEXT, mm_struct, context);
+       BLANK();
+       DEFINE(_PAGE_SIZE, PAGE_SIZE);
+       DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+       BLANK();
+       DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+       DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+       DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+       BLANK();
+       DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+       DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+       DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+       BLANK();
+       DEFINE(_PGD_ORDER, PGD_ORDER);
+       DEFINE(_PMD_ORDER, PMD_ORDER);
+       DEFINE(_PTE_ORDER, PTE_ORDER);
+       BLANK();
+       DEFINE(_PMD_SHIFT, PMD_SHIFT);
+       DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+       BLANK();
+       DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+       DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+       DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+       BLANK();
 }
 
 #ifdef CONFIG_32BIT
 void output_sc_defines(void)
 {
-       text("/* Linux sigcontext offsets. */");
-       offset("#define SC_REGS       ", struct sigcontext, sc_regs);
-       offset("#define SC_FPREGS     ", struct sigcontext, sc_fpregs);
-       offset("#define SC_ACX        ", struct sigcontext, sc_acx);
-       offset("#define SC_MDHI       ", struct sigcontext, sc_mdhi);
-       offset("#define SC_MDLO       ", struct sigcontext, sc_mdlo);
-       offset("#define SC_PC         ", struct sigcontext, sc_pc);
-       offset("#define SC_FPC_CSR    ", struct sigcontext, sc_fpc_csr);
-       offset("#define SC_FPC_EIR    ", struct sigcontext, sc_fpc_eir);
-       offset("#define SC_HI1        ", struct sigcontext, sc_hi1);
-       offset("#define SC_LO1        ", struct sigcontext, sc_lo1);
-       offset("#define SC_HI2        ", struct sigcontext, sc_hi2);
-       offset("#define SC_LO2        ", struct sigcontext, sc_lo2);
-       offset("#define SC_HI3        ", struct sigcontext, sc_hi3);
-       offset("#define SC_LO3        ", struct sigcontext, sc_lo3);
-       linefeed;
+       COMMENT("Linux sigcontext offsets.");
+       OFFSET(SC_REGS, sigcontext, sc_regs);
+       OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+       OFFSET(SC_ACX, sigcontext, sc_acx);
+       OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+       OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+       OFFSET(SC_PC, sigcontext, sc_pc);
+       OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+       OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
+       OFFSET(SC_HI1, sigcontext, sc_hi1);
+       OFFSET(SC_LO1, sigcontext, sc_lo1);
+       OFFSET(SC_HI2, sigcontext, sc_hi2);
+       OFFSET(SC_LO2, sigcontext, sc_lo2);
+       OFFSET(SC_HI3, sigcontext, sc_hi3);
+       OFFSET(SC_LO3, sigcontext, sc_lo3);
+       BLANK();
 }
 #endif
 
 #ifdef CONFIG_64BIT
 void output_sc_defines(void)
 {
-       text("/* Linux sigcontext offsets. */");
-       offset("#define SC_REGS       ", struct sigcontext, sc_regs);
-       offset("#define SC_FPREGS     ", struct sigcontext, sc_fpregs);
-       offset("#define SC_MDHI       ", struct sigcontext, sc_mdhi);
-       offset("#define SC_MDLO       ", struct sigcontext, sc_mdlo);
-       offset("#define SC_PC         ", struct sigcontext, sc_pc);
-       offset("#define SC_FPC_CSR    ", struct sigcontext, sc_fpc_csr);
-       linefeed;
+       COMMENT("Linux sigcontext offsets.");
+       OFFSET(SC_REGS, sigcontext, sc_regs);
+       OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+       OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+       OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+       OFFSET(SC_PC, sigcontext, sc_pc);
+       OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+       BLANK();
 }
 #endif
 
 #ifdef CONFIG_MIPS32_COMPAT
 void output_sc32_defines(void)
 {
-       text("/* Linux 32-bit sigcontext offsets. */");
-       offset("#define SC32_FPREGS     ", struct sigcontext32, sc_fpregs);
-       offset("#define SC32_FPC_CSR    ", struct sigcontext32, sc_fpc_csr);
-       offset("#define SC32_FPC_EIR    ", struct sigcontext32, sc_fpc_eir);
-       linefeed;
+       COMMENT("Linux 32-bit sigcontext offsets.");
+       OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
+       OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
+       OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
+       BLANK();
 }
 #endif
 
 void output_signal_defined(void)
 {
-       text("/* Linux signal numbers. */");
-       constant("#define _SIGHUP     ", SIGHUP);
-       constant("#define _SIGINT     ", SIGINT);
-       constant("#define _SIGQUIT    ", SIGQUIT);
-       constant("#define _SIGILL     ", SIGILL);
-       constant("#define _SIGTRAP    ", SIGTRAP);
-       constant("#define _SIGIOT     ", SIGIOT);
-       constant("#define _SIGABRT    ", SIGABRT);
-       constant("#define _SIGEMT     ", SIGEMT);
-       constant("#define _SIGFPE     ", SIGFPE);
-       constant("#define _SIGKILL    ", SIGKILL);
-       constant("#define _SIGBUS     ", SIGBUS);
-       constant("#define _SIGSEGV    ", SIGSEGV);
-       constant("#define _SIGSYS     ", SIGSYS);
-       constant("#define _SIGPIPE    ", SIGPIPE);
-       constant("#define _SIGALRM    ", SIGALRM);
-       constant("#define _SIGTERM    ", SIGTERM);
-       constant("#define _SIGUSR1    ", SIGUSR1);
-       constant("#define _SIGUSR2    ", SIGUSR2);
-       constant("#define _SIGCHLD    ", SIGCHLD);
-       constant("#define _SIGPWR     ", SIGPWR);
-       constant("#define _SIGWINCH   ", SIGWINCH);
-       constant("#define _SIGURG     ", SIGURG);
-       constant("#define _SIGIO      ", SIGIO);
-       constant("#define _SIGSTOP    ", SIGSTOP);
-       constant("#define _SIGTSTP    ", SIGTSTP);
-       constant("#define _SIGCONT    ", SIGCONT);
-       constant("#define _SIGTTIN    ", SIGTTIN);
-       constant("#define _SIGTTOU    ", SIGTTOU);
-       constant("#define _SIGVTALRM  ", SIGVTALRM);
-       constant("#define _SIGPROF    ", SIGPROF);
-       constant("#define _SIGXCPU    ", SIGXCPU);
-       constant("#define _SIGXFSZ    ", SIGXFSZ);
-       linefeed;
+       COMMENT("Linux signal numbers.");
+       DEFINE(_SIGHUP, SIGHUP);
+       DEFINE(_SIGINT, SIGINT);
+       DEFINE(_SIGQUIT, SIGQUIT);
+       DEFINE(_SIGILL, SIGILL);
+       DEFINE(_SIGTRAP, SIGTRAP);
+       DEFINE(_SIGIOT, SIGIOT);
+       DEFINE(_SIGABRT, SIGABRT);
+       DEFINE(_SIGEMT, SIGEMT);
+       DEFINE(_SIGFPE, SIGFPE);
+       DEFINE(_SIGKILL, SIGKILL);
+       DEFINE(_SIGBUS, SIGBUS);
+       DEFINE(_SIGSEGV, SIGSEGV);
+       DEFINE(_SIGSYS, SIGSYS);
+       DEFINE(_SIGPIPE, SIGPIPE);
+       DEFINE(_SIGALRM, SIGALRM);
+       DEFINE(_SIGTERM, SIGTERM);
+       DEFINE(_SIGUSR1, SIGUSR1);
+       DEFINE(_SIGUSR2, SIGUSR2);
+       DEFINE(_SIGCHLD, SIGCHLD);
+       DEFINE(_SIGPWR, SIGPWR);
+       DEFINE(_SIGWINCH, SIGWINCH);
+       DEFINE(_SIGURG, SIGURG);
+       DEFINE(_SIGIO, SIGIO);
+       DEFINE(_SIGSTOP, SIGSTOP);
+       DEFINE(_SIGTSTP, SIGTSTP);
+       DEFINE(_SIGCONT, SIGCONT);
+       DEFINE(_SIGTTIN, SIGTTIN);
+       DEFINE(_SIGTTOU, SIGTTOU);
+       DEFINE(_SIGVTALRM, SIGVTALRM);
+       DEFINE(_SIGPROF, SIGPROF);
+       DEFINE(_SIGXCPU, SIGXCPU);
+       DEFINE(_SIGXFSZ, SIGXFSZ);
+       BLANK();
 }
 
 void output_irq_cpustat_t_defines(void)
 {
-       text("/* Linux irq_cpustat_t offsets. */");
-       offset("#define IC_SOFTIRQ_PENDING ", irq_cpustat_t, __softirq_pending);
-       size("#define IC_IRQ_CPUSTAT_T   ", irq_cpustat_t);
-       linefeed;
+       COMMENT("Linux irq_cpustat_t offsets.");
+       DEFINE(IC_SOFTIRQ_PENDING,
+                       offsetof(irq_cpustat_t, __softirq_pending));
+       DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t));
+       BLANK();
 }
index 77db3473deabfc20c97acb99d89dbd77c96b9170..9fdd8bcdd21ef1785c90cc40b0579b6acb4dc1a0 100644 (file)
@@ -54,6 +54,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 #include <linux/module.h>
 #include <linux/elfcore.h>
 #include <linux/compat.h>
+#include <linux/math64.h>
 
 #define elf_prstatus elf_prstatus32
 struct elf_prstatus32
@@ -102,8 +103,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
         * one divide.
         */
        u64 nsec = (u64)jiffies * TICK_NSEC;
-       long rem;
-       value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+       u32 rem;
+       value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
        value->tv_usec = rem / NSEC_PER_USEC;
 }
 
index 08f4cd781ee33cc5ac695fed9705a33e7e507e65..e1333d7319e275a065546490baa8401b87c9bf47 100644 (file)
@@ -56,6 +56,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
 #include <linux/module.h>
 #include <linux/elfcore.h>
 #include <linux/compat.h>
+#include <linux/math64.h>
 
 #define elf_prstatus elf_prstatus32
 struct elf_prstatus32
@@ -104,8 +105,8 @@ jiffies_to_compat_timeval(unsigned long jiffies, struct compat_timeval *value)
         * one divide.
         */
        u64 nsec = (u64)jiffies * TICK_NSEC;
-       long rem;
-       value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+       u32 rem;
+       value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
        value->tv_usec = rem / NSEC_PER_USEC;
 }
 
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
new file mode 100644 (file)
index 0000000..df4acb6
--- /dev/null
@@ -0,0 +1,129 @@
+/*
+ *  DS1287 clockevent driver
+ *
+ *  Copyright (C) 2008  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+
+#include <asm/time.h>
+
+int ds1287_timer_state(void)
+{
+       return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
+}
+
+int ds1287_set_base_clock(unsigned int hz)
+{
+       u8 rate;
+
+       switch (hz) {
+       case 128:
+               rate = 0x9;
+               break;
+       case 256:
+               rate = 0x8;
+               break;
+       case 1024:
+               rate = 0x6;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
+
+       return 0;
+}
+
+static int ds1287_set_next_event(unsigned long delta,
+                                struct clock_event_device *evt)
+{
+       return -EINVAL;
+}
+
+static void ds1287_set_mode(enum clock_event_mode mode,
+                           struct clock_event_device *evt)
+{
+       u8 val;
+
+       spin_lock(&rtc_lock);
+
+       val = CMOS_READ(RTC_REG_B);
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               val |= RTC_PIE;
+               break;
+       default:
+               val &= ~RTC_PIE;
+               break;
+       }
+
+       CMOS_WRITE(val, RTC_REG_B);
+
+       spin_unlock(&rtc_lock);
+}
+
+static void ds1287_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device ds1287_clockevent = {
+       .name           = "ds1287",
+       .features       = CLOCK_EVT_FEAT_PERIODIC,
+       .cpumask        = CPU_MASK_CPU0,
+       .set_next_event = ds1287_set_next_event,
+       .set_mode       = ds1287_set_mode,
+       .event_handler  = ds1287_event_handler,
+};
+
+static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd = &ds1287_clockevent;
+
+       /* Ack the RTC interrupt. */
+       CMOS_READ(RTC_REG_C);
+
+       cd->event_handler(cd);
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction ds1287_irqaction = {
+       .handler        = ds1287_interrupt,
+       .flags          = IRQF_DISABLED | IRQF_PERCPU,
+       .name           = "ds1287",
+};
+
+int __init ds1287_clockevent_init(int irq)
+{
+       struct clock_event_device *cd;
+
+       cd = &ds1287_clockevent;
+       cd->rating = 100;
+       cd->irq = irq;
+       clockevent_set_clock(cd, 32768);
+       cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+       cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+
+       clockevents_register_device(&ds1287_clockevent);
+
+       return setup_irq(irq, &ds1287_irqaction);
+}
index c36772631fe0e7b9524fbfe2ad6ed4b6da9435f4..6e2f58520afbdba949fb839ebfd66df78e4df364 100644 (file)
@@ -25,8 +25,6 @@
 #include <asm/gt64120.h>
 #include <asm/time.h>
 
-#include <irq.h>
-
 static DEFINE_SPINLOCK(gt641xx_timer_lock);
 static unsigned int gt641xx_base_clock;
 
index 89c3304cb93c391903f0178de2bd796a48e24c5d..335a6ae3d594044fa0ca99e44ee189631238b609 100644 (file)
@@ -169,6 +169,7 @@ static inline void check_wait(void)
 
        case CPU_24K:
        case CPU_34K:
+       case CPU_1004K:
                cpu_wait = r4k_wait;
                if (read_c0_config7() & MIPS_CONF7_WII)
                        cpu_wait = r4k_wait_irqoff;
@@ -675,6 +676,12 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
                return;
 }
 
+#ifdef CONFIG_CPU_MIPSR2
+extern void spram_config(void);
+#else
+static inline void spram_config(void) {}
+#endif
+
 static inline void cpu_probe_mips(struct cpuinfo_mips *c)
 {
        decode_configs(c);
@@ -711,7 +718,12 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
        case PRID_IMP_74K:
                c->cputype = CPU_74K;
                break;
+       case PRID_IMP_1004K:
+               c->cputype = CPU_1004K;
+               break;
        }
+
+       spram_config();
 }
 
 static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
@@ -778,7 +790,7 @@ static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
        }
 }
 
-static inline void cpu_probe_philips(struct cpuinfo_mips *c)
+static inline void cpu_probe_nxp(struct cpuinfo_mips *c)
 {
        decode_configs(c);
        switch (c->processor_id & 0xff00) {
@@ -787,7 +799,7 @@ static inline void cpu_probe_philips(struct cpuinfo_mips *c)
                c->isa_level = MIPS_CPU_ISA_M32R1;
                break;
        default:
-               panic("Unknown Philips Core!"); /* REVISIT: die? */
+               panic("Unknown NXP Core!"); /* REVISIT: die? */
                break;
        }
 }
@@ -876,6 +888,7 @@ static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
        case CPU_24K:           name = "MIPS 24K"; break;
        case CPU_25KF:          name = "MIPS 25Kf"; break;
        case CPU_34K:           name = "MIPS 34K"; break;
+       case CPU_1004K:         name = "MIPS 1004K"; break;
        case CPU_74K:           name = "MIPS 74K"; break;
        case CPU_VR4111:        name = "NEC VR4111"; break;
        case CPU_VR4121:        name = "NEC VR4121"; break;
@@ -925,8 +938,8 @@ __cpuinit void cpu_probe(void)
        case PRID_COMP_SANDCRAFT:
                cpu_probe_sandcraft(c);
                break;
-       case PRID_COMP_PHILIPS:
-               cpu_probe_philips(c);
+       case PRID_COMP_NXP:
+               cpu_probe_nxp(c);
                break;
        default:
                c->cputype = CPU_UNKNOWN;
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
new file mode 100644 (file)
index 0000000..1d5f63c
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ *  DEC I/O ASIC's counter clocksource
+ *
+ *  Copyright (C) 2008  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+
+#include <asm/ds1287.h>
+#include <asm/time.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+
+static cycle_t dec_ioasic_hpt_read(void)
+{
+       return ioasic_read(IO_REG_FCTR);
+}
+
+static struct clocksource clocksource_dec = {
+       .name           = "dec-ioasic",
+       .read           = dec_ioasic_hpt_read,
+       .mask           = CLOCKSOURCE_MASK(32),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+void __init dec_ioasic_clocksource_init(void)
+{
+       unsigned int freq;
+       u32 start, end;
+       int i = HZ / 10;
+
+
+       while (!ds1287_timer_state())
+               ;
+
+       start = dec_ioasic_hpt_read();
+
+       while (i--)
+               while (!ds1287_timer_state())
+                       ;
+
+       end = dec_ioasic_hpt_read();
+
+       freq = (end - start) * 10;
+       printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
+
+       clocksource_dec.rating = 200 + freq / 10000000;
+       clocksource_set_clock(&clocksource_dec, freq);
+
+       clocksource_register(&clocksource_dec);
+}
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
new file mode 100644 (file)
index 0000000..b1436a8
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+ * A gpio chip driver for TXx9 SoCs
+ *
+ * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/gpio.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/txx9pio.h>
+
+static DEFINE_SPINLOCK(txx9_gpio_lock);
+
+static struct txx9_pio_reg __iomem *txx9_pioptr;
+
+static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+       return __raw_readl(&txx9_pioptr->din) & (1 << offset);
+}
+
+static void txx9_gpio_set_raw(unsigned int offset, int value)
+{
+       u32 val;
+       val = __raw_readl(&txx9_pioptr->dout);
+       if (value)
+               val |= 1 << offset;
+       else
+               val &= ~(1 << offset);
+       __raw_writel(val, &txx9_pioptr->dout);
+}
+
+static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
+                         int value)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&txx9_gpio_lock, flags);
+       txx9_gpio_set_raw(offset, value);
+       mmiowb();
+       spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+}
+
+static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
+{
+       spin_lock_irq(&txx9_gpio_lock);
+       __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
+                    &txx9_pioptr->dir);
+       mmiowb();
+       spin_unlock_irq(&txx9_gpio_lock);
+       return 0;
+}
+
+static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
+                            int value)
+{
+       spin_lock_irq(&txx9_gpio_lock);
+       txx9_gpio_set_raw(offset, value);
+       __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
+                    &txx9_pioptr->dir);
+       mmiowb();
+       spin_unlock_irq(&txx9_gpio_lock);
+       return 0;
+}
+
+static struct gpio_chip txx9_gpio_chip = {
+       .get = txx9_gpio_get,
+       .set = txx9_gpio_set,
+       .direction_input = txx9_gpio_dir_in,
+       .direction_output = txx9_gpio_dir_out,
+       .label = "TXx9",
+};
+
+int __init txx9_gpio_init(unsigned long baseaddr,
+                         unsigned int base, unsigned int num)
+{
+       txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
+       if (!txx9_pioptr)
+               return -ENODEV;
+       txx9_gpio_chip.base = base;
+       txx9_gpio_chip.ngpio = num;
+       return gpiochip_add(&txx9_gpio_chip);
+}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
new file mode 100644 (file)
index 0000000..f0a4bb1
--- /dev/null
@@ -0,0 +1,295 @@
+#undef DEBUG
+
+#include <linux/bitmap.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/gic.h>
+#include <asm/gcmpregs.h>
+#include <asm/mips-boards/maltaint.h>
+#include <asm/irq.h>
+#include <linux/hardirq.h>
+#include <asm-generic/bitops/find.h>
+
+
+static unsigned long _gic_base;
+static unsigned int _irqbase, _mapsize, numvpes, numintrs;
+static struct gic_intr_map *_intrmap;
+
+static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
+static struct gic_pending_regs pending_regs[NR_CPUS];
+static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
+
+#define gic_wedgeb2bok 0       /*
+                                * Can GIC handle b2b writes to wedge register?
+                                */
+#if gic_wedgeb2bok == 0
+static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
+#endif
+
+void gic_send_ipi(unsigned int intr)
+{
+#if gic_wedgeb2bok == 0
+       unsigned long flags;
+#endif
+       pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
+                read_c0_status());
+       if (!gic_wedgeb2bok)
+               spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
+       GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
+       if (!gic_wedgeb2bok) {
+               (void) GIC_REG(SHARED, GIC_SH_CONFIG);
+               spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
+       }
+}
+
+/* This is Malta specific and needs to be exported */
+static void vpe_local_setup(unsigned int numvpes)
+{
+       int i;
+       unsigned long timer_interrupt = 5, perf_interrupt = 5;
+       unsigned int vpe_ctl;
+
+       /*
+        * Setup the default performance counter timer interrupts
+        * for all VPEs
+        */
+       for (i = 0; i < numvpes; i++) {
+               GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
+
+               /* Are Interrupts locally routable? */
+               GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
+               if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
+                       GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
+                                GIC_MAP_TO_PIN_MSK | timer_interrupt);
+
+               if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
+                       GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
+                                GIC_MAP_TO_PIN_MSK | perf_interrupt);
+       }
+}
+
+unsigned int gic_get_int(void)
+{
+       unsigned int i;
+       unsigned long *pending, *intrmask, *pcpu_mask;
+       unsigned long *pending_abs, *intrmask_abs;
+
+       /* Get per-cpu bitmaps */
+       pending = pending_regs[smp_processor_id()].pending;
+       intrmask = intrmask_regs[smp_processor_id()].intrmask;
+       pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
+
+       pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+                                                        GIC_SH_PEND_31_0_OFS);
+       intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
+                                                         GIC_SH_MASK_31_0_OFS);
+
+       for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
+               GICREAD(*pending_abs, pending[i]);
+               GICREAD(*intrmask_abs, intrmask[i]);
+               pending_abs++;
+               intrmask_abs++;
+       }
+
+       bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
+       bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
+
+       i = find_first_bit(pending, GIC_NUM_INTRS);
+
+       pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
+
+       return i;
+}
+
+static unsigned int gic_irq_startup(unsigned int irq)
+{
+       pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+       irq -= _irqbase;
+       /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+       GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
+                1 << (irq % 32));
+       return 0;
+}
+
+static void gic_irq_ack(unsigned int irq)
+{
+#if gic_wedgeb2bok == 0
+       unsigned long flags;
+#endif
+       pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+       irq -= _irqbase;
+       GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
+                1 << (irq % 32));
+
+       if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
+               if (!gic_wedgeb2bok)
+                       spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
+               GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
+               if (!gic_wedgeb2bok) {
+                       (void) GIC_REG(SHARED, GIC_SH_CONFIG);
+                       spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
+               }
+       }
+}
+
+static void gic_mask_irq(unsigned int irq)
+{
+       pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+       irq -= _irqbase;
+       /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+       GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
+                1 << (irq % 32));
+}
+
+static void gic_unmask_irq(unsigned int irq)
+{
+       pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
+       irq -= _irqbase;
+       /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
+       GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
+                1 << (irq % 32));
+}
+
+#ifdef CONFIG_SMP
+
+static DEFINE_SPINLOCK(gic_lock);
+
+static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
+{
+       cpumask_t       tmp = CPU_MASK_NONE;
+       unsigned long   flags;
+       int             i;
+
+       pr_debug(KERN_DEBUG "%s called\n", __func__);
+       irq -= _irqbase;
+
+       cpus_and(tmp, cpumask, cpu_online_map);
+       if (cpus_empty(tmp))
+               return;
+
+       /* Assumption : cpumask refers to a single CPU */
+       spin_lock_irqsave(&gic_lock, flags);
+       for (;;) {
+               /* Re-route this IRQ */
+               GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
+
+               /*
+                * FIXME: assumption that _intrmap is ordered and has no holes
+                */
+
+               /* Update the intr_map */
+               _intrmap[irq].cpunum = first_cpu(tmp);
+
+               /* Update the pcpu_masks */
+               for (i = 0; i < NR_CPUS; i++)
+                       clear_bit(irq, pcpu_masks[i].pcpu_mask);
+               set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+
+       }
+       irq_desc[irq].affinity = cpumask;
+       spin_unlock_irqrestore(&gic_lock, flags);
+
+}
+#endif
+
+static struct irq_chip gic_irq_controller = {
+       .name           =       "MIPS GIC",
+       .startup        =       gic_irq_startup,
+       .ack            =       gic_irq_ack,
+       .mask           =       gic_mask_irq,
+       .mask_ack       =       gic_mask_irq,
+       .unmask         =       gic_unmask_irq,
+       .eoi            =       gic_unmask_irq,
+#ifdef CONFIG_SMP
+       .set_affinity   =       gic_set_affinity,
+#endif
+};
+
+static void __init setup_intr(unsigned int intr, unsigned int cpu,
+       unsigned int pin, unsigned int polarity, unsigned int trigtype)
+{
+       /* Setup Intr to Pin mapping */
+       if (pin & GIC_MAP_TO_NMI_MSK) {
+               GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
+               /* FIXME: hack to route NMI to all cpu's */
+               for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
+                       GICWRITE(GIC_REG_ADDR(SHARED,
+                                         GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
+                                0xffffffff);
+               }
+       } else {
+               GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
+                        GIC_MAP_TO_PIN_MSK | pin);
+               /* Setup Intr to CPU mapping */
+               GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
+       }
+
+       /* Setup Intr Polarity */
+       GIC_SET_POLARITY(intr, polarity);
+
+       /* Setup Intr Trigger Type */
+       GIC_SET_TRIGGER(intr, trigtype);
+
+       /* Init Intr Masks */
+       GIC_SET_INTR_MASK(intr, 0);
+}
+
+static void __init gic_basic_init(void)
+{
+       unsigned int i, cpu;
+
+       /* Setup defaults */
+       for (i = 0; i < GIC_NUM_INTRS; i++) {
+               GIC_SET_POLARITY(i, GIC_POL_POS);
+               GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
+               GIC_SET_INTR_MASK(i, 0);
+       }
+
+       /* Setup specifics */
+       for (i = 0; i < _mapsize; i++) {
+               cpu = _intrmap[i].cpunum;
+               if (cpu == X)
+                       continue;
+
+               setup_intr(_intrmap[i].intrnum,
+                               _intrmap[i].cpunum,
+                               _intrmap[i].pin,
+                               _intrmap[i].polarity,
+                               _intrmap[i].trigtype);
+               /* Initialise per-cpu Interrupt software masks */
+               if (_intrmap[i].ipiflag)
+                       set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
+       }
+
+       vpe_local_setup(numvpes);
+
+       for (i = _irqbase; i < (_irqbase + numintrs); i++)
+               set_irq_chip(i, &gic_irq_controller);
+}
+
+void __init gic_init(unsigned long gic_base_addr,
+                    unsigned long gic_addrspace_size,
+                    struct gic_intr_map *intr_map, unsigned int intr_map_size,
+                    unsigned int irqbase)
+{
+       unsigned int gicconfig;
+
+       _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
+                                                   gic_addrspace_size);
+       _irqbase = irqbase;
+       _intrmap = intr_map;
+       _mapsize = intr_map_size;
+
+       GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
+       numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
+                  GIC_SH_CONFIG_NUMINTRS_SHF;
+       numintrs = ((numintrs + 1) * 8);
+
+       numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
+                 GIC_SH_CONFIG_NUMVPES_SHF;
+
+       pr_debug("%s called\n", __func__);
+
+       gic_basic_init();
+}
index 4edc7e451d91ce5ffce4f6a04c88dba1f835e2b4..963c16d266aba30da2d14d95813979b7170f3f75 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/msc01_ic.h>
+#include <asm/traps.h>
 
 static unsigned long _icctrl_msc;
 #define MSC01_IC_REG_BASE      _icctrl_msc
@@ -98,14 +99,13 @@ void ll_msc_irq(void)
        }
 }
 
-void
-msc_bind_eic_interrupt(unsigned int irq, unsigned int set)
+static void msc_bind_eic_interrupt(int irq, int set)
 {
        MSCIC_WRITE(MSC01_IC_RAMW,
                    (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
 }
 
-struct irq_chip msc_levelirq_type = {
+static struct irq_chip msc_levelirq_type = {
        .name = "SOC-it-Level",
        .ack = level_mask_and_ack_msc_irq,
        .mask = mask_msc_irq,
@@ -115,7 +115,7 @@ struct irq_chip msc_levelirq_type = {
        .end = end_msc_irq,
 };
 
-struct irq_chip msc_edgeirq_type = {
+static struct irq_chip msc_edgeirq_type = {
        .name = "SOC-it-Edge",
        .ack = edge_mask_and_ack_msc_irq,
        .mask = mask_msc_irq,
@@ -128,8 +128,6 @@ struct irq_chip msc_edgeirq_type = {
 
 void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
 {
-       extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
-
        _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
 
        /* Reset interrupt controller - initialises all registers to 0 */
index c0faabd520103dec45ddce73aefaf8e27f775d35..6c8e8c4246f754439602790815f88eaa1bb6280a 100644 (file)
@@ -14,7 +14,7 @@
 /* #define DEBUG_SIG */
 
 #ifdef DEBUG_SIG
-#  define DEBUGP(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ##args)
+#  define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
 #else
 #  define DEBUGP(fmt, args...)
 #endif
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644 (file)
index 0000000..ca476c4
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ *    Chris Dearman (chris@mips.com)
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <asm/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+
+/*
+ * Crude manipulation of the CPU masks to control which
+ * which CPU's are brought online during initialisation
+ *
+ * Beware... this needs to be called after CPU discovery
+ * but before CPU bringup
+ */
+static int __init allowcpus(char *str)
+{
+       cpumask_t cpu_allow_map;
+       char buf[256];
+       int len;
+
+       cpus_clear(cpu_allow_map);
+       if (cpulist_parse(str, cpu_allow_map) == 0) {
+               cpu_set(0, cpu_allow_map);
+               cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
+               len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map);
+               buf[len] = '\0';
+               pr_debug("Allowable CPUs: %s\n", buf);
+               return 1;
+       } else
+               return 0;
+}
+__setup("allowcpus=", allowcpus);
+
+static void ipi_call_function(unsigned int cpu)
+{
+       unsigned int action = 0;
+
+       pr_debug("CPU%d: %s cpu %d status %08x\n",
+                smp_processor_id(), __func__, cpu, read_c0_status());
+
+       switch (cpu) {
+       case 0:
+               action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
+               break;
+       case 1:
+               action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
+               break;
+       case 2:
+               action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
+               break;
+       case 3:
+               action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
+               break;
+       }
+       gic_send_ipi(action);
+}
+
+
+static void ipi_resched(unsigned int cpu)
+{
+       unsigned int action = 0;
+
+       pr_debug("CPU%d: %s cpu %d status %08x\n",
+                smp_processor_id(), __func__, cpu, read_c0_status());
+
+       switch (cpu) {
+       case 0:
+               action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
+               break;
+       case 1:
+               action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
+               break;
+       case 2:
+               action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
+               break;
+       case 3:
+               action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
+               break;
+       }
+       gic_send_ipi(action);
+}
+
+/*
+ * FIXME: This isn't restricted to CMP
+ * The SMVP kernel could use GIC interrupts if available
+ */
+void cmp_send_ipi_single(int cpu, unsigned int action)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       switch (action) {
+       case SMP_CALL_FUNCTION:
+               ipi_call_function(cpu);
+               break;
+
+       case SMP_RESCHEDULE_YOURSELF:
+               ipi_resched(cpu);
+               break;
+       }
+
+       local_irq_restore(flags);
+}
+
+static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
+{
+       unsigned int i;
+
+       for_each_cpu_mask(i, mask)
+               cmp_send_ipi_single(i, action);
+}
+
+static void cmp_init_secondary(void)
+{
+       struct cpuinfo_mips *c = &current_cpu_data;
+
+       /* Assume GIC is present */
+       change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
+                                STATUSF_IP7);
+
+       /* Enable per-cpu interrupts: platform specific */
+
+       c->core = (read_c0_ebase() >> 1) & 0xff;
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+       c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
+#endif
+#ifdef CONFIG_MIPS_MT_SMTC
+       c->tc_id  = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
+#endif
+}
+
+static void cmp_smp_finish(void)
+{
+       pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+       /* CDFIXME: remove this? */
+       write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+       /* If we have an FPU, enroll ourselves in the FPU-full mask */
+       if (cpu_has_fpu)
+               cpu_set(smp_processor_id(), mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+       local_irq_enable();
+}
+
+static void cmp_cpus_done(void)
+{
+       pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it running
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ */
+static void cmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+       struct thread_info *gp = task_thread_info(idle);
+       unsigned long sp = __KSTK_TOS(idle);
+       unsigned long pc = (unsigned long)&smp_bootstrap;
+       unsigned long a0 = 0;
+
+       pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
+               __func__, cpu);
+
+#if 0
+       /* Needed? */
+       flush_icache_range((unsigned long)gp,
+                          (unsigned long)(gp + sizeof(struct thread_info)));
+#endif
+
+       amon_cpu_start(cpu, pc, sp, gp, a0);
+}
+
+/*
+ * Common setup before any secondaries are started
+ */
+void __init cmp_smp_setup(void)
+{
+       int i;
+       int ncpu = 0;
+
+       pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+       /* If we have an FPU, enroll ourselves in the FPU-full mask */
+       if (cpu_has_fpu)
+               cpu_set(0, mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+       for (i = 1; i < NR_CPUS; i++) {
+               if (amon_cpu_avail(i)) {
+                       cpu_set(i, phys_cpu_present_map);
+                       __cpu_number_map[i]     = ++ncpu;
+                       __cpu_logical_map[ncpu] = i;
+               }
+       }
+
+       if (cpu_has_mipsmt) {
+               unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
+
+               nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+               smp_num_siblings = nvpe;
+       }
+       pr_info("Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+void __init cmp_prepare_cpus(unsigned int max_cpus)
+{
+       pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
+                smp_processor_id(), __func__, max_cpus);
+
+       /*
+        * FIXME: some of these options are per-system, some per-core and
+        * some per-cpu
+        */
+       mips_mt_set_cpuoptions();
+}
+
+struct plat_smp_ops cmp_smp_ops = {
+       .send_ipi_single        = cmp_send_ipi_single,
+       .send_ipi_mask          = cmp_send_ipi_mask,
+       .init_secondary         = cmp_init_secondary,
+       .smp_finish             = cmp_smp_finish,
+       .cpus_done              = cmp_cpus_done,
+       .boot_secondary         = cmp_boot_secondary,
+       .smp_setup              = cmp_smp_setup,
+       .prepare_cpus           = cmp_prepare_cpus,
+};
index 89e6f6aa5166f310c20ae91fafe0c829def00c94..87a1816c1f4589cdb90c88896830b16b77b84f58 100644 (file)
 #include <asm/mipsmtregs.h>
 #include <asm/mips_mt.h>
 
-#define MIPS_CPU_IPI_RESCHED_IRQ 0
-#define MIPS_CPU_IPI_CALL_IRQ 1
-
-static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
-
-#if 0
-static void dump_mtregisters(int vpe, int tc)
-{
-       printk("vpe %d tc %d\n", vpe, tc);
-
-       settc(tc);
-
-       printk("  c0 status  0x%lx\n", read_vpe_c0_status());
-       printk("  vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
-       printk("  vpeconf0    0x%lx\n", read_vpe_c0_vpeconf0());
-       printk("  tcstatus 0x%lx\n", read_tc_c0_tcstatus());
-       printk("  tcrestart 0x%lx\n", read_tc_c0_tcrestart());
-       printk("  tcbind 0x%lx\n", read_tc_c0_tcbind());
-       printk("  tchalt 0x%lx\n", read_tc_c0_tchalt());
-}
-#endif
-
-void __init sanitize_tlb_entries(void)
-{
-       int i, tlbsiz;
-       unsigned long mvpconf0, ncpu;
-
-       if (!cpu_has_mipsmt)
-               return;
-
-       /* Enable VPC */
-       set_c0_mvpcontrol(MVPCONTROL_VPC);
-
-       back_to_back_c0_hazard();
-
-       /* Disable TLB sharing */
-       clear_c0_mvpcontrol(MVPCONTROL_STLB);
-
-       mvpconf0 = read_c0_mvpconf0();
-
-       printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
-                  (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
-                          (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
-
-       tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
-       ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
-
-       printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
-
-       if (tlbsiz > 0) {
-               /* share them out across the vpe's */
-               tlbsiz /= ncpu;
-
-               printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
-
-               for (i = 0; i < ncpu; i++) {
-                       settc(i);
-
-                       if (i == 0)
-                               write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
-                       else
-                               write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
-                                                  (tlbsiz << 25));
-               }
-       }
-
-       clear_c0_mvpcontrol(MVPCONTROL_VPC);
-}
-
-static void ipi_resched_dispatch(void)
-{
-       do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
-}
-
-static void ipi_call_dispatch(void)
-{
-       do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
-}
-
-static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
-{
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
-{
-       smp_call_function_interrupt();
-
-       return IRQ_HANDLED;
-}
-
-static struct irqaction irq_resched = {
-       .handler        = ipi_resched_interrupt,
-       .flags          = IRQF_DISABLED|IRQF_PERCPU,
-       .name           = "IPI_resched"
-};
-
-static struct irqaction irq_call = {
-       .handler        = ipi_call_interrupt,
-       .flags          = IRQF_DISABLED|IRQF_PERCPU,
-       .name           = "IPI_call"
-};
-
-static void __init smp_copy_vpe_config(void)
+static void __init smvp_copy_vpe_config(void)
 {
        write_vpe_c0_status(
                (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
@@ -156,7 +53,7 @@ static void __init smp_copy_vpe_config(void)
        write_vpe_c0_count(read_c0_count());
 }
 
-static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
+static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
        unsigned int ncpu)
 {
        if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
@@ -182,12 +79,12 @@ static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
        write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
 
        if (tc != 0)
-               smp_copy_vpe_config();
+               smvp_copy_vpe_config();
 
        return ncpu;
 }
 
-static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
+static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
 {
        unsigned long tmp;
 
@@ -254,15 +151,20 @@ static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
 
 static void __cpuinit vsmp_init_secondary(void)
 {
-       /* Enable per-cpu interrupts */
+       extern int gic_present;
 
        /* This is Malta specific: IPI,performance and timer inetrrupts */
-       write_c0_status((read_c0_status() & ~ST0_IM ) |
-                       (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
+       if (gic_present)
+               change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
+                                        STATUSF_IP6 | STATUSF_IP7);
+       else
+               change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
+                                        STATUSF_IP6 | STATUSF_IP7);
 }
 
 static void __cpuinit vsmp_smp_finish(void)
 {
+       /* CDFIXME: remove this? */
        write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
 
 #ifdef CONFIG_MIPS_MT_FPAFF
@@ -323,7 +225,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
 /*
  * Common setup before any secondaries are started
  * Make sure all CPU's are in a sensible state before we boot any of the
- * secondarys
+ * secondaries
  */
 static void __init vsmp_smp_setup(void)
 {
@@ -356,8 +258,8 @@ static void __init vsmp_smp_setup(void)
        for (tc = 0; tc <= ntc; tc++) {
                settc(tc);
 
-               smp_tc_init(tc, mvpconf0);
-               ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
+               smvp_tc_init(tc, mvpconf0);
+               ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
        }
 
        /* Release config state */
@@ -371,21 +273,6 @@ static void __init vsmp_smp_setup(void)
 static void __init vsmp_prepare_cpus(unsigned int max_cpus)
 {
        mips_mt_set_cpuoptions();
-
-       /* set up ipi interrupts */
-       if (cpu_has_vint) {
-               set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
-               set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
-       }
-
-       cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
-       cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
-
-       setup_irq(cpu_ipi_resched_irq, &irq_resched);
-       setup_irq(cpu_ipi_call_irq, &irq_call);
-
-       set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
-       set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
 }
 
 struct plat_smp_ops vsmp_smp_ops = {
index 9d41dab90a809732e1c2eab6ed234e989d5637cd..33780cc61ce902e80a289410d46bded866e582f2 100644 (file)
@@ -35,6 +35,7 @@
 #include <asm/atomic.h>
 #include <asm/cpu.h>
 #include <asm/processor.h>
+#include <asm/r4k-timer.h>
 #include <asm/system.h>
 #include <asm/mmu_context.h>
 #include <asm/time.h>
@@ -125,6 +126,8 @@ asmlinkage __cpuinit void start_secondary(void)
 
        cpu_set(cpu, cpu_callin_map);
 
+       synchronise_count_slave();
+
        cpu_idle();
 }
 
@@ -287,6 +290,7 @@ void smp_send_stop(void)
 void __init smp_cpus_done(unsigned int max_cpus)
 {
        mp_ops->cpus_done();
+       synchronise_count_master();
 }
 
 /* called from main before smp_init() */
index b42e71c711199a4f169f717e8467240b8ebbae57..3e863186cd222944fcbf9b441dc7e10f140fcb18 100644 (file)
@@ -174,14 +174,6 @@ static int clock_hang_reported[NR_CPUS];
 
 #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
 
-/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
-
-void __init sanitize_tlb_entries(void)
-{
-       printk("Deprecated sanitize_tlb_entries() invoked\n");
-}
-
-
 /*
  * Configure shared TLB - VPC configuration bit must be set by caller
  */
@@ -339,7 +331,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
        /* In general, all TCs should have the same cpu_data indications */
        memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
        /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
-       if (cpu_data[0].cputype == CPU_34K)
+       if (cpu_data[0].cputype == CPU_34K ||
+           cpu_data[0].cputype == CPU_1004K)
                cpu_data[cpu].options &= ~MIPS_CPU_FPU;
        cpu_data[cpu].vpe_id = vpe;
        cpu_data[cpu].tc_id = tc;
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
new file mode 100644 (file)
index 0000000..6ddb507
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * MIPS SPRAM support
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/stddef.h>
+
+#include <asm/cpu.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/system.h>
+#include <asm/r4kcache.h>
+#include <asm/hazards.h>
+
+/*
+ * These definitions are correct for the 24K/34K/74K SPRAM sample
+ * implementation. The 4KS interpreted the tags differently...
+ */
+#define SPRAM_TAG0_ENABLE      0x00000080
+#define SPRAM_TAG0_PA_MASK     0xfffff000
+#define SPRAM_TAG1_SIZE_MASK   0xfffff000
+
+#define SPRAM_TAG_STRIDE       8
+
+#define ERRCTL_SPRAM           (1 << 28)
+
+/* errctl access */
+#define read_c0_errctl(x) read_c0_ecc(x)
+#define write_c0_errctl(x) write_c0_ecc(x)
+
+/*
+ * Different semantics to the set_c0_* function built by __BUILD_SET_C0
+ */
+static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
+{
+       unsigned int res;
+       res = read_c0_errctl();
+       write_c0_errctl(res | set);
+       return res;
+}
+
+static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
+{
+       unsigned int errctl;
+
+       /* enable SPRAM tag access */
+       errctl = bis_c0_errctl(ERRCTL_SPRAM);
+       ehb();
+
+       write_c0_taglo(data);
+       ehb();
+
+       cache_op(Index_Store_Tag_I, CKSEG0|offset);
+       ehb();
+
+       write_c0_errctl(errctl);
+       ehb();
+}
+
+
+static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
+{
+       unsigned int data;
+       unsigned int errctl;
+
+       /* enable SPRAM tag access */
+       errctl = bis_c0_errctl(ERRCTL_SPRAM);
+       ehb();
+       cache_op(Index_Load_Tag_I, CKSEG0 | offset);
+       ehb();
+       data = read_c0_taglo();
+       ehb();
+       write_c0_errctl(errctl);
+       ehb();
+
+       return data;
+}
+
+static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
+{
+       unsigned int errctl;
+
+       /* enable SPRAM tag access */
+       errctl = bis_c0_errctl(ERRCTL_SPRAM);
+       ehb();
+       write_c0_dtaglo(data);
+       ehb();
+       cache_op(Index_Store_Tag_D, CKSEG0 | offset);
+       ehb();
+       write_c0_errctl(errctl);
+       ehb();
+}
+
+
+static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
+{
+       unsigned int data;
+       unsigned int errctl;
+
+       errctl = bis_c0_errctl(ERRCTL_SPRAM);
+       ehb();
+       cache_op(Index_Load_Tag_D, CKSEG0 | offset);
+       ehb();
+       data = read_c0_dtaglo();
+       ehb();
+       write_c0_errctl(errctl);
+       ehb();
+
+       return data;
+}
+
+static __cpuinit void probe_spram(char *type,
+           unsigned int base,
+           unsigned int (*read)(unsigned int),
+           void (*write)(unsigned int, unsigned int))
+{
+       unsigned int firstsize = 0, lastsize = 0;
+       unsigned int firstpa = 0, lastpa = 0, pa = 0;
+       unsigned int offset = 0;
+       unsigned int size, tag0, tag1;
+       unsigned int enabled;
+       int i;
+
+       /*
+        * The limit is arbitrary but avoids the loop running away if
+        * the SPRAM tags are implemented differently
+        */
+
+       for (i = 0; i < 8; i++) {
+               tag0 = read(offset);
+               tag1 = read(offset+SPRAM_TAG_STRIDE);
+               pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
+                        type, i, tag0, tag1);
+
+               size = tag1 & SPRAM_TAG1_SIZE_MASK;
+
+               if (size == 0)
+                       break;
+
+               if (i != 0) {
+                       /* tags may repeat... */
+                       if ((pa == firstpa && size == firstsize) ||
+                           (pa == lastpa && size == lastsize))
+                               break;
+               }
+
+               /* Align base with size */
+               base = (base + size - 1) & ~(size-1);
+
+               /* reprogram the base address base address and enable */
+               tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
+               write(offset, tag0);
+
+               base += size;
+
+               /* reread the tag */
+               tag0 = read(offset);
+               pa = tag0 & SPRAM_TAG0_PA_MASK;
+               enabled = tag0 & SPRAM_TAG0_ENABLE;
+
+               if (i == 0) {
+                       firstpa = pa;
+                       firstsize = size;
+               }
+
+               lastpa = pa;
+               lastsize = size;
+
+               if (strcmp(type, "DSPRAM") == 0) {
+                       unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
+                       unsigned int v;
+#define TDAT   0x5a5aa5a5
+                       vp[0] = TDAT;
+                       vp[1] = ~TDAT;
+
+                       mb();
+
+                       v = vp[0];
+                       if (v != TDAT)
+                               printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+                                      vp, TDAT, v);
+                       v = vp[1];
+                       if (v != ~TDAT)
+                               printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+                                      vp+1, ~TDAT, v);
+               }
+
+               pr_info("%s%d: PA=%08x,Size=%08x%s\n",
+                       type, i, pa, size, enabled ? ",enabled" : "");
+               offset += 2 * SPRAM_TAG_STRIDE;
+       }
+}
+
+__cpuinit void spram_config(void)
+{
+       struct cpuinfo_mips *c = &current_cpu_data;
+       unsigned int config0;
+
+       switch (c->cputype) {
+       case CPU_24K:
+       case CPU_34K:
+       case CPU_74K:
+               config0 = read_c0_config();
+               /* FIXME: addresses are Malta specific */
+               if (config0 & (1<<24)) {
+                       probe_spram("ISPRAM", 0x1c000000,
+                                   &ispram_load_tag, &ispram_store_tag);
+               }
+               if (config0 & (1<<23))
+                       probe_spram("DSPRAM", 0x1c100000,
+                                   &dspram_load_tag, &dspram_store_tag);
+       }
+}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644 (file)
index 0000000..9021108
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+ * Count register synchronisation.
+ *
+ * All CPUs will have their count registers synchronised to the CPU0 expirelo
+ * value. This can cause a small timewarp for CPU0. All other CPU's should
+ * not have done anything significant (but they may have had interrupts
+ * enabled briefly - prom_smp_finish() should not be responsible for enabling
+ * interrupts...)
+ *
+ * FIXME: broken for SMTC
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/r4k-timer.h>
+
+#include <asm/atomic.h>
+#include <asm/barrier.h>
+#include <asm/cpumask.h>
+#include <asm/mipsregs.h>
+
+static atomic_t __initdata count_start_flag = ATOMIC_INIT(0);
+static atomic_t __initdata count_count_start = ATOMIC_INIT(0);
+static atomic_t __initdata count_count_stop = ATOMIC_INIT(0);
+
+#define COUNTON        100
+#define NR_LOOPS 5
+
+void __init synchronise_count_master(void)
+{
+       int i;
+       unsigned long flags;
+       unsigned int initcount;
+       int nslaves;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC needs to synchronise per VPE, not per CPU
+        * ignore for now
+        */
+       return;
+#endif
+
+       pr_info("Checking COUNT synchronization across %u CPUs: ",
+               num_online_cpus());
+
+       local_irq_save(flags);
+
+       /*
+        * Notify the slaves that it's time to start
+        */
+       atomic_set(&count_start_flag, 1);
+       smp_wmb();
+
+       /* Count will be initialised to expirelo for all CPU's */
+       initcount = expirelo;
+
+       /*
+        * We loop a few times to get a primed instruction cache,
+        * then the last pass is more or less synchronised and
+        * the master and slaves each set their cycle counters to a known
+        * value all at once. This reduces the chance of having random offsets
+        * between the processors, and guarantees that the maximum
+        * delay between the cycle counters is never bigger than
+        * the latency of information-passing (cachelines) between
+        * two CPUs.
+        */
+
+       nslaves = num_online_cpus()-1;
+       for (i = 0; i < NR_LOOPS; i++) {
+               /* slaves loop on '!= ncpus' */
+               while (atomic_read(&count_count_start) != nslaves)
+                       mb();
+               atomic_set(&count_count_stop, 0);
+               smp_wmb();
+
+               /* this lets the slaves write their count register */
+               atomic_inc(&count_count_start);
+
+               /*
+                * Everyone initialises count in the last loop:
+                */
+               if (i == NR_LOOPS-1)
+                       write_c0_count(initcount);
+
+               /*
+                * Wait for all slaves to leave the synchronization point:
+                */
+               while (atomic_read(&count_count_stop) != nslaves)
+                       mb();
+               atomic_set(&count_count_start, 0);
+               smp_wmb();
+               atomic_inc(&count_count_stop);
+       }
+       /* Arrange for an interrupt in a short while */
+       write_c0_compare(read_c0_count() + COUNTON);
+
+       local_irq_restore(flags);
+
+       /*
+        * i386 code reported the skew here, but the
+        * count registers were almost certainly out of sync
+        * so no point in alarming people
+        */
+       printk("done.\n");
+}
+
+void __init synchronise_count_slave(void)
+{
+       int i;
+       unsigned long flags;
+       unsigned int initcount;
+       int ncpus;
+
+#ifdef CONFIG_MIPS_MT_SMTC
+       /*
+        * SMTC needs to synchronise per VPE, not per CPU
+        * ignore for now
+        */
+       return;
+#endif
+
+       local_irq_save(flags);
+
+       /*
+        * Not every cpu is online at the time this gets called,
+        * so we first wait for the master to say everyone is ready
+        */
+
+       while (!atomic_read(&count_start_flag))
+               mb();
+
+       /* Count will be initialised to expirelo for all CPU's */
+       initcount = expirelo;
+
+       ncpus = num_online_cpus();
+       for (i = 0; i < NR_LOOPS; i++) {
+               atomic_inc(&count_count_start);
+               while (atomic_read(&count_count_start) != ncpus)
+                       mb();
+
+               /*
+                * Everyone initialises count in the last loop:
+                */
+               if (i == NR_LOOPS-1)
+                       write_c0_count(initcount);
+
+               atomic_inc(&count_count_stop);
+               while (atomic_read(&count_count_stop) != ncpus)
+                       mb();
+       }
+       /* Arrange for an interrupt in a short while */
+       write_c0_compare(read_c0_count() + COUNTON);
+
+       local_irq_restore(flags);
+}
+#undef NR_LOOPS
+#endif
index b45a7093ca2d07b65eb57251147fda2d88678591..1f467d5346426d5af02bfffa7bbec5b3c6f689ba 100644 (file)
@@ -38,7 +38,6 @@ int __weak rtc_mips_set_time(unsigned long sec)
 {
        return 0;
 }
-EXPORT_SYMBOL(rtc_mips_set_time);
 
 int __weak rtc_mips_set_mmss(unsigned long nowtime)
 {
@@ -50,13 +49,11 @@ int update_persistent_clock(struct timespec now)
        return rtc_mips_set_mmss(now.tv_sec);
 }
 
-int null_perf_irq(void)
+static int null_perf_irq(void)
 {
        return 0;
 }
 
-EXPORT_SYMBOL(null_perf_irq);
-
 int (*perf_irq)(void) = null_perf_irq;
 
 EXPORT_SYMBOL(perf_irq);
index 984c0d0a7b4d72d2f0ff49c6dcf450220ba061c4..cb8b0e2c79547ba1c67a577cc852daf51f558dab 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kallsyms.h>
 #include <linux/bootmem.h>
 #include <linux/interrupt.h>
+#include <linux/ptrace.h>
 
 #include <asm/bootinfo.h>
 #include <asm/branch.h>
@@ -80,19 +81,22 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
 
 static void show_raw_backtrace(unsigned long reg29)
 {
-       unsigned long *sp = (unsigned long *)reg29;
+       unsigned long *sp = (unsigned long *)(reg29 & ~3);
        unsigned long addr;
 
        printk("Call Trace:");
 #ifdef CONFIG_KALLSYMS
        printk("\n");
 #endif
-       while (!kstack_end(sp)) {
-               addr = *sp++;
-               if (__kernel_text_address(addr))
-                       print_ip_sym(addr);
+#define IS_KVA01(a) ((((unsigned int)a) & 0xc0000000) == 0x80000000)
+       if (IS_KVA01(sp)) {
+               while (!kstack_end(sp)) {
+                       addr = *sp++;
+                       if (__kernel_text_address(addr))
+                               print_ip_sym(addr);
+               }
+               printk("\n");
        }
-       printk("\n");
 }
 
 #ifdef CONFIG_KALLSYMS
@@ -192,16 +196,19 @@ EXPORT_SYMBOL(dump_stack);
 static void show_code(unsigned int __user *pc)
 {
        long i;
+       unsigned short __user *pc16 = NULL;
 
        printk("\nCode:");
 
+       if ((unsigned long)pc & 1)
+               pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
        for(i = -3 ; i < 6 ; i++) {
                unsigned int insn;
-               if (__get_user(insn, pc + i)) {
+               if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
                        printk(" (Bad address in epc)\n");
                        break;
                }
-               printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+               printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
        }
 }
 
@@ -311,10 +318,21 @@ void show_regs(struct pt_regs *regs)
 
 void show_registers(const struct pt_regs *regs)
 {
+       const int field = 2 * sizeof(unsigned long);
+
        __show_regs(regs);
        print_modules();
-       printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
-               current->comm, task_pid_nr(current), current_thread_info(), current);
+       printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+              current->comm, current->pid, current_thread_info(), current,
+             field, current_thread_info()->tp_value);
+       if (cpu_has_userlocal) {
+               unsigned long tls;
+
+               tls = read_c0_userlocal();
+               if (tls != current_thread_info()->tp_value)
+                       printk("*HwTLS: %0*lx\n", field, tls);
+       }
+
        show_stacktrace(current, regs);
        show_code((unsigned int __user *) regs->cp0_epc);
        printk("\n");
@@ -657,35 +675,24 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
        force_sig_info(SIGFPE, &info, current);
 }
 
-asmlinkage void do_bp(struct pt_regs *regs)
+static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+       const char *str)
 {
-       unsigned int opcode, bcode;
        siginfo_t info;
-
-       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
-               goto out_sigsegv;
-
-       /*
-        * There is the ancient bug in the MIPS assemblers that the break
-        * code starts left to bit 16 instead to bit 6 in the opcode.
-        * Gas is bug-compatible, but not always, grrr...
-        * We handle both cases with a simple heuristics.  --macro
-        */
-       bcode = ((opcode >> 6) & ((1 << 20) - 1));
-       if (bcode < (1 << 10))
-               bcode <<= 10;
+       char b[40];
 
        /*
-        * (A short test says that IRIX 5.3 sends SIGTRAP for all break
-        * insns, even for break codes that indicate arithmetic failures.
-        * Weird ...)
+        * A short test says that IRIX 5.3 sends SIGTRAP for all trap
+        * insns, even for trap and break codes that indicate arithmetic
+        * failures.  Weird ...
         * But should we continue the brokenness???  --macro
         */
-       switch (bcode) {
-       case BRK_OVERFLOW << 10:
-       case BRK_DIVZERO << 10:
-               die_if_kernel("Break instruction in kernel code", regs);
-               if (bcode == (BRK_DIVZERO << 10))
+       switch (code) {
+       case BRK_OVERFLOW:
+       case BRK_DIVZERO:
+               scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+               die_if_kernel(b, regs);
+               if (code == BRK_DIVZERO)
                        info.si_code = FPE_INTDIV;
                else
                        info.si_code = FPE_INTOVF;
@@ -695,12 +702,34 @@ asmlinkage void do_bp(struct pt_regs *regs)
                force_sig_info(SIGFPE, &info, current);
                break;
        case BRK_BUG:
-               die("Kernel bug detected", regs);
+               die_if_kernel("Kernel bug detected", regs);
+               force_sig(SIGTRAP, current);
                break;
        default:
-               die_if_kernel("Break instruction in kernel code", regs);
+               scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+               die_if_kernel(b, regs);
                force_sig(SIGTRAP, current);
        }
+}
+
+asmlinkage void do_bp(struct pt_regs *regs)
+{
+       unsigned int opcode, bcode;
+
+       if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
+               goto out_sigsegv;
+
+       /*
+        * There is the ancient bug in the MIPS assemblers that the break
+        * code starts left to bit 16 instead to bit 6 in the opcode.
+        * Gas is bug-compatible, but not always, grrr...
+        * We handle both cases with a simple heuristics.  --macro
+        */
+       bcode = ((opcode >> 6) & ((1 << 20) - 1));
+       if (bcode >= (1 << 10))
+               bcode >>= 10;
+
+       do_trap_or_bp(regs, bcode, "Break");
        return;
 
 out_sigsegv:
@@ -710,7 +739,6 @@ out_sigsegv:
 asmlinkage void do_tr(struct pt_regs *regs)
 {
        unsigned int opcode, tcode = 0;
-       siginfo_t info;
 
        if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
                goto out_sigsegv;
@@ -719,32 +747,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
        if (!(opcode & OPCODE))
                tcode = ((opcode >> 6) & ((1 << 10) - 1));
 
-       /*
-        * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
-        * insns, even for trap codes that indicate arithmetic failures.
-        * Weird ...)
-        * But should we continue the brokenness???  --macro
-        */
-       switch (tcode) {
-       case BRK_OVERFLOW:
-       case BRK_DIVZERO:
-               die_if_kernel("Trap instruction in kernel code", regs);
-               if (tcode == BRK_DIVZERO)
-                       info.si_code = FPE_INTDIV;
-               else
-                       info.si_code = FPE_INTOVF;
-               info.si_signo = SIGFPE;
-               info.si_errno = 0;
-               info.si_addr = (void __user *) regs->cp0_epc;
-               force_sig_info(SIGFPE, &info, current);
-               break;
-       case BRK_BUG:
-               die("Kernel bug detected", regs);
-               break;
-       default:
-               die_if_kernel("Trap instruction in kernel code", regs);
-               force_sig(SIGTRAP, current);
-       }
+       do_trap_or_bp(regs, tcode, "Trap");
        return;
 
 out_sigsegv:
@@ -985,6 +988,21 @@ asmlinkage void do_reserved(struct pt_regs *regs)
              (regs->cp0_cause & 0x7f) >> 2);
 }
 
+static int __initdata l1parity = 1;
+static int __init nol1parity(char *s)
+{
+       l1parity = 0;
+       return 1;
+}
+__setup("nol1par", nol1parity);
+static int __initdata l2parity = 1;
+static int __init nol2parity(char *s)
+{
+       l2parity = 0;
+       return 1;
+}
+__setup("nol2par", nol2parity);
+
 /*
  * Some MIPS CPUs can enable/disable for cache parity detection, but do
  * it different ways.
@@ -994,6 +1012,62 @@ static inline void parity_protection_init(void)
        switch (current_cpu_type()) {
        case CPU_24K:
        case CPU_34K:
+       case CPU_74K:
+       case CPU_1004K:
+               {
+#define ERRCTL_PE      0x80000000
+#define ERRCTL_L2P     0x00800000
+                       unsigned long errctl;
+                       unsigned int l1parity_present, l2parity_present;
+
+                       errctl = read_c0_ecc();
+                       errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
+
+                       /* probe L1 parity support */
+                       write_c0_ecc(errctl | ERRCTL_PE);
+                       back_to_back_c0_hazard();
+                       l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+
+                       /* probe L2 parity support */
+                       write_c0_ecc(errctl|ERRCTL_L2P);
+                       back_to_back_c0_hazard();
+                       l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+
+                       if (l1parity_present && l2parity_present) {
+                               if (l1parity)
+                                       errctl |= ERRCTL_PE;
+                               if (l1parity ^ l2parity)
+                                       errctl |= ERRCTL_L2P;
+                       } else if (l1parity_present) {
+                               if (l1parity)
+                                       errctl |= ERRCTL_PE;
+                       } else if (l2parity_present) {
+                               if (l2parity)
+                                       errctl |= ERRCTL_L2P;
+                       } else {
+                               /* No parity available */
+                       }
+
+                       printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
+
+                       write_c0_ecc(errctl);
+                       back_to_back_c0_hazard();
+                       errctl = read_c0_ecc();
+                       printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
+
+                       if (l1parity_present)
+                               printk(KERN_INFO "Cache parity protection %sabled\n",
+                                      (errctl & ERRCTL_PE) ? "en" : "dis");
+
+                       if (l2parity_present) {
+                               if (l1parity_present && l1parity)
+                                       errctl ^= ERRCTL_L2P;
+                               printk(KERN_INFO "L2 cache parity protection %sabled\n",
+                                      (errctl & ERRCTL_L2P) ? "en" : "dis");
+                       }
+               }
+               break;
+
        case CPU_5KC:
                write_c0_ecc(0x80000000);
                back_to_back_c0_hazard();
@@ -1306,6 +1380,17 @@ int cp0_compare_irq;
 int cp0_perfcount_irq;
 EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
 
+static int __cpuinitdata noulri;
+
+static int __init ulri_disable(char *s)
+{
+       pr_info("Disabling ulri\n");
+       noulri = 1;
+
+       return 1;
+}
+__setup("noulri", ulri_disable);
+
 void __cpuinit per_cpu_trap_init(void)
 {
        unsigned int cpu = smp_processor_id();
@@ -1342,16 +1427,14 @@ void __cpuinit per_cpu_trap_init(void)
        change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
                         status_set);
 
-#ifdef CONFIG_CPU_MIPSR2
        if (cpu_has_mips_r2) {
                unsigned int enable = 0x0000000f;
 
-               if (cpu_has_userlocal)
+               if (!noulri && cpu_has_userlocal)
                        enable |= (1 << 29);
 
                write_c0_hwrena(enable);
        }
-#endif
 
 #ifdef CONFIG_MIPS_MT_SMTC
        if (!secondaryTC) {
index c11b2494bb6ec8d97b1be94f4eef82474885a54e..2ab899c4b4ce1ac38a7b235f5c340af49dcc923c 100644 (file)
@@ -45,8 +45,8 @@ static void __iomem *ioport_map_pci(struct pci_dev *dev,
  */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index 8977eb585a3716f348dc25d0612ec528fd34fba2..762786538449c8a652183fb68b6bfa8f8d064ffc 100644 (file)
@@ -46,7 +46,7 @@
 #define DPDNORMX       DPDNORMx(xm, xe)
 #define DPDNORMY       DPDNORMx(ym, ye)
 
-static __inline ieee754dp builddp(int s, int bx, u64 m)
+static inline ieee754dp builddp(int s, int bx, u64 m)
 {
        ieee754dp r;
 
index 9917c1e4d947ade47260cb48b9abb6d64f9059f7..d9e3586b5bcee6ee47dbd25bd29e7f50a934fdd1 100644 (file)
@@ -51,7 +51,7 @@
 #define SPDNORMX       SPDNORMx(xm, xe)
 #define SPDNORMY       SPDNORMx(ym, ye)
 
-static __inline ieee754sp buildsp(int s, int bx, unsigned m)
+static inline ieee754sp buildsp(int s, int bx, unsigned m)
 {
        ieee754sp r;
 
index b31d8dfed1bec138967d8caf011d85a249094c3f..f7f87fc09d1e7b221a8efaabf278740a75d9f6b3 100644 (file)
@@ -20,6 +20,7 @@
 
 obj-y                          := reset.o display.o init.o memory.o \
                                   cmdline.o time.o
+obj-y                          += amon.o
 
 obj-$(CONFIG_EARLY_PRINTK)     += console.o
 obj-$(CONFIG_PCI)              += pci.o
diff --git a/arch/mips/mips-boards/generic/amon.c b/arch/mips/mips-boards/generic/amon.c
new file mode 100644 (file)
index 0000000..b7633fd
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2007  MIPS Technologies, Inc.
+ *     All rights reserved.
+
+ *  This program is free software; you can distribute it and/or modify it
+ *  under the terms of the GNU General Public License (Version 2) as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with this program; if not, write to the Free Software Foundation, Inc.,
+ *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Arbitrary Monitor interface
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/smp.h>
+
+#include <asm-mips/addrspace.h>
+#include <asm-mips/mips-boards/launch.h>
+#include <asm-mips/mipsmtregs.h>
+
+int amon_cpu_avail(int cpu)
+{
+       struct cpulaunch *launch = (struct cpulaunch *)KSEG0ADDR(CPULAUNCH);
+
+       if (cpu < 0 || cpu >= NCPULAUNCH) {
+               pr_debug("avail: cpu%d is out of range\n", cpu);
+               return 0;
+       }
+
+       launch += cpu;
+       if (!(launch->flags & LAUNCH_FREADY)) {
+               pr_debug("avail: cpu%d is not ready\n", cpu);
+               return 0;
+       }
+       if (launch->flags & (LAUNCH_FGO|LAUNCH_FGONE)) {
+               pr_debug("avail: too late.. cpu%d is already gone\n", cpu);
+               return 0;
+       }
+
+       return 1;
+}
+
+void amon_cpu_start(int cpu,
+                   unsigned long pc, unsigned long sp,
+                   unsigned long gp, unsigned long a0)
+{
+       volatile struct cpulaunch *launch =
+               (struct cpulaunch  *)KSEG0ADDR(CPULAUNCH);
+
+       if (!amon_cpu_avail(cpu))
+               return;
+       if (cpu == smp_processor_id()) {
+               pr_debug("launch: I am cpu%d!\n", cpu);
+               return;
+       }
+       launch += cpu;
+
+       pr_debug("launch: starting cpu%d\n", cpu);
+
+       launch->pc = pc;
+       launch->gp = gp;
+       launch->sp = sp;
+       launch->a0 = a0;
+
+       /* Make sure target sees parameters before the go bit */
+       smp_mb();
+
+       launch->flags |= LAUNCH_FGO;
+       while ((launch->flags & LAUNCH_FGONE) == 0)
+               ;
+       pr_debug("launch: cpu%d gone!\n", cpu);
+}
index 1695dca5506bbe7e008221ca400836ff97446703..83b9dc739203af55474ed5288c0c94f25b097b7e 100644 (file)
@@ -226,7 +226,7 @@ void __init kgdb_config(void)
 }
 #endif
 
-void __init mips_nmi_setup(void)
+static void __init mips_nmi_setup(void)
 {
        void *base;
        extern char except_vec_nmi;
@@ -238,7 +238,7 @@ void __init mips_nmi_setup(void)
        flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
 }
 
-void __init mips_ejtag_setup(void)
+static void __init mips_ejtag_setup(void)
 {
        void *base;
        extern char except_vec_ejtag_debug;
@@ -295,15 +295,21 @@ void __init prom_init(void)
                        break;
                case MIPS_REVISION_CORID_CORE_MSC:
                case MIPS_REVISION_CORID_CORE_FPGA2:
-               case MIPS_REVISION_CORID_CORE_FPGA3:
-               case MIPS_REVISION_CORID_CORE_FPGA4:
                case MIPS_REVISION_CORID_CORE_24K:
-               case MIPS_REVISION_CORID_CORE_EMUL_MSC:
+                       /*
+                        * SOCit/ROCit support is essentially identical
+                        * but make an attempt to distinguish them
+                        */
                        mips_revision_sconid = MIPS_REVISION_SCON_SOCIT;
                        break;
+               case MIPS_REVISION_CORID_CORE_FPGA3:
+               case MIPS_REVISION_CORID_CORE_FPGA4:
+               case MIPS_REVISION_CORID_CORE_FPGA5:
+               case MIPS_REVISION_CORID_CORE_EMUL_MSC:
                default:
-                       mips_display_message("CC Error");
-                       while (1);   /* We die here... */
+                       /* See above */
+                       mips_revision_sconid = MIPS_REVISION_SCON_ROCIT;
+                       break;
                }
        }
 
@@ -418,6 +424,9 @@ void __init prom_init(void)
 #ifdef CONFIG_SERIAL_8250_CONSOLE
        console_config();
 #endif
+#ifdef CONFIG_MIPS_CMP
+       register_smp_ops(&cmp_smp_ops);
+#endif
 #ifdef CONFIG_MIPS_MT_SMP
        register_smp_ops(&vsmp_smp_ops);
 #endif
index dc272c1882337167a0350d87afc978702b1c0f0a..5e443bba5662e4ce1fbc0d5a7531f4b530af0417 100644 (file)
@@ -37,7 +37,7 @@ enum yamon_memtypes {
        yamon_prom,
        yamon_free,
 };
-struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
+static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS];
 
 #ifdef DEBUG
 static char *mtypes[3] = {
@@ -50,7 +50,7 @@ static char *mtypes[3] = {
 /* determined physical memory size, not overridden by command line args  */
 unsigned long physical_memsize = 0L;
 
-struct prom_pmemblock * __init prom_getmdesc(void)
+static struct prom_pmemblock * __init prom_getmdesc(void)
 {
        char *memsize_str;
        unsigned int memsize;
index b50e0fc406ac9d04fa50b64ad87e8f6666d800a0..008fd82b584041013082a87c127dc70f669679f8 100644 (file)
 unsigned long cpu_khz;
 
 static int mips_cpu_timer_irq;
+static int mips_cpu_perf_irq;
 extern int cp0_perfcount_irq;
 
+DEFINE_PER_CPU(unsigned int, tickcount);
+#define tickcount_this_cpu __get_cpu_var(tickcount)
+static unsigned long ledbitmask;
+
 static void mips_timer_dispatch(void)
 {
+#if defined(CONFIG_MIPS_MALTA) || defined(CONFIG_MIPS_ATLAS)
+       /*
+        * Yes, this is very tacky, won't work as expected with SMTC and
+        * dyntick will break it,
+        * but it gives me a nice warm feeling during debug
+        */
+#define LEDBAR 0xbf000408
+       if (tickcount_this_cpu++ >= HZ) {
+               tickcount_this_cpu = 0;
+               change_bit(smp_processor_id(), &ledbitmask);
+               smp_wmb(); /* Make sure every one else sees the change */
+               /* This will pick up any recent changes made by other CPU's */
+               *(unsigned int *)LEDBAR = ledbitmask;
+       }
+#endif
        do_IRQ(mips_cpu_timer_irq);
 }
 
 static void mips_perf_dispatch(void)
 {
-       do_IRQ(cp0_perfcount_irq);
+       do_IRQ(mips_cpu_perf_irq);
 }
 
 /*
@@ -127,21 +147,20 @@ unsigned long read_persistent_clock(void)
        return mc146818_get_cmos_time();
 }
 
-void __init plat_perf_setup(void)
+static void __init plat_perf_setup(void)
 {
-       cp0_perfcount_irq = -1;
-
 #ifdef MSC01E_INT_BASE
        if (cpu_has_veic) {
                set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch);
-               cp0_perfcount_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
+               mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
        } else
 #endif
        if (cp0_perfcount_irq >= 0) {
                if (cpu_has_vint)
                        set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
+               mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
 #ifdef CONFIG_SMP
-               set_irq_handler(cp0_perfcount_irq, handle_percpu_irq);
+               set_irq_handler(mips_cpu_perf_irq, handle_percpu_irq);
 #endif
        }
 }
index 931ca4600a63ec97ca507d4e1d969d7ee070cc86..8dc6e2ac4c0386e80867ca018e1f9b72ac5c5758 100644 (file)
@@ -22,6 +22,7 @@
 obj-y := malta_int.o malta_platform.o malta_setup.o
 
 obj-$(CONFIG_MTD) += malta_mtd.o
+# FIXME FIXME FIXME
 obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o
 
 EXTRA_CFLAGS += -Werror
index dbe60eb55e2962c4e40d5ae7f8322514c63c21f7..8c495104b321c54078bee3dd18bfefc02f63eb05 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kernel.h>
 #include <linux/random.h>
 
+#include <asm/traps.h>
 #include <asm/i8259.h>
 #include <asm/irq_cpu.h>
 #include <asm/irq_regs.h>
 #include <asm/mips-boards/generic.h>
 #include <asm/mips-boards/msc01_pci.h>
 #include <asm/msc01_ic.h>
+#include <asm/gic.h>
+#include <asm/gcmpregs.h>
+
+int gcmp_present = -1;
+int gic_present;
+static unsigned long _msc01_biu_base;
+static unsigned long _gcmp_base;
+static unsigned int ipi_map[NR_CPUS];
 
 static DEFINE_SPINLOCK(mips_irq_lock);
 
@@ -121,6 +130,17 @@ static void malta_hw0_irqdispatch(void)
        do_IRQ(MALTA_INT_BASE + irq);
 }
 
+static void malta_ipi_irqdispatch(void)
+{
+       int irq;
+
+       irq = gic_get_int();
+       if (irq < 0)
+               return;  /* interrupt has already been cleared */
+
+       do_IRQ(MIPS_GIC_IRQ_BASE + irq);
+}
+
 static void corehi_irqdispatch(void)
 {
        unsigned int intedge, intsteer, pcicmd, pcibadaddr;
@@ -257,12 +277,61 @@ asmlinkage void plat_irq_dispatch(void)
 
        if (irq == MIPSCPU_INT_I8259A)
                malta_hw0_irqdispatch();
+       else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()]))
+               malta_ipi_irqdispatch();
        else if (irq >= 0)
                do_IRQ(MIPS_CPU_IRQ_BASE + irq);
        else
                spurious_interrupt();
 }
 
+#ifdef CONFIG_MIPS_MT_SMP
+
+
+#define GIC_MIPS_CPU_IPI_RESCHED_IRQ   3
+#define GIC_MIPS_CPU_IPI_CALL_IRQ      4
+
+#define MIPS_CPU_IPI_RESCHED_IRQ 0     /* SW int 0 for resched */
+#define C_RESCHED C_SW0
+#define MIPS_CPU_IPI_CALL_IRQ 1                /* SW int 1 for resched */
+#define C_CALL C_SW1
+static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
+
+static void ipi_resched_dispatch(void)
+{
+       do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
+}
+
+static void ipi_call_dispatch(void)
+{
+       do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
+}
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+       smp_call_function_interrupt();
+
+       return IRQ_HANDLED;
+}
+
+static struct irqaction irq_resched = {
+       .handler        = ipi_resched_interrupt,
+       .flags          = IRQF_DISABLED|IRQF_PERCPU,
+       .name           = "IPI_resched"
+};
+
+static struct irqaction irq_call = {
+       .handler        = ipi_call_interrupt,
+       .flags          = IRQF_DISABLED|IRQF_PERCPU,
+       .name           = "IPI_call"
+};
+#endif /* CONFIG_MIPS_MT_SMP */
+
 static struct irqaction i8259irq = {
        .handler = no_action,
        .name = "XT-PIC cascade"
@@ -273,13 +342,13 @@ static struct irqaction corehi_irqaction = {
        .name = "CoreHi"
 };
 
-msc_irqmap_t __initdata msc_irqmap[] = {
+static msc_irqmap_t __initdata msc_irqmap[] = {
        {MSC01C_INT_TMR,                MSC01_IRQ_EDGE, 0},
        {MSC01C_INT_PCI,                MSC01_IRQ_LEVEL, 0},
 };
-int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
+static int __initdata msc_nr_irqs = ARRAY_SIZE(msc_irqmap);
 
-msc_irqmap_t __initdata msc_eicirqmap[] = {
+static msc_irqmap_t __initdata msc_eicirqmap[] = {
        {MSC01E_INT_SW0,                MSC01_IRQ_LEVEL, 0},
        {MSC01E_INT_SW1,                MSC01_IRQ_LEVEL, 0},
        {MSC01E_INT_I8259A,             MSC01_IRQ_LEVEL, 0},
@@ -291,15 +360,90 @@ msc_irqmap_t __initdata msc_eicirqmap[] = {
        {MSC01E_INT_PERFCTR,            MSC01_IRQ_LEVEL, 0},
        {MSC01E_INT_CPUCTR,             MSC01_IRQ_LEVEL, 0}
 };
-int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
+
+static int __initdata msc_nr_eicirqs = ARRAY_SIZE(msc_eicirqmap);
+
+/*
+ * This GIC specific tabular array defines the association between External
+ * Interrupts and CPUs/Core Interrupts. The nature of the External
+ * Interrupts is also defined here - polarity/trigger.
+ */
+static struct gic_intr_map gic_intr_map[] = {
+       { GIC_EXT_INTR(0),      X,      X,              X,              X,              0 },
+       { GIC_EXT_INTR(1),      X,      X,              X,              X,              0 },
+       { GIC_EXT_INTR(2),      X,      X,              X,              X,              0 },
+       { GIC_EXT_INTR(3),      0,      GIC_CPU_INT0,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(4),      0,      GIC_CPU_INT1,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(5),      0,      GIC_CPU_INT2,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(6),      0,      GIC_CPU_INT3,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(7),      0,      GIC_CPU_INT4,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(8),      0,      GIC_CPU_INT3,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(9),      0,      GIC_CPU_INT3,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(10),     X,      X,              X,              X,              0 },
+       { GIC_EXT_INTR(11),     X,      X,              X,              X,              0 },
+       { GIC_EXT_INTR(12),     0,      GIC_CPU_INT3,   GIC_POL_POS,    GIC_TRIG_LEVEL, 0 },
+       { GIC_EXT_INTR(13),     0,      GIC_MAP_TO_NMI_MSK,     GIC_POL_POS, GIC_TRIG_LEVEL,    0 },
+       { GIC_EXT_INTR(14),     0,      GIC_MAP_TO_NMI_MSK,     GIC_POL_POS, GIC_TRIG_LEVEL,    0 },
+       { GIC_EXT_INTR(15),     X,      X,              X,              X,              0 },
+       { GIC_EXT_INTR(16),     0,      GIC_CPU_INT1,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+       { GIC_EXT_INTR(17),     0,      GIC_CPU_INT2,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+       { GIC_EXT_INTR(18),     1,      GIC_CPU_INT1,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+       { GIC_EXT_INTR(19),     1,      GIC_CPU_INT2,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+       { GIC_EXT_INTR(20),     2,      GIC_CPU_INT1,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+       { GIC_EXT_INTR(21),     2,      GIC_CPU_INT2,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+       { GIC_EXT_INTR(22),     3,      GIC_CPU_INT1,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+       { GIC_EXT_INTR(23),     3,      GIC_CPU_INT2,   GIC_POL_POS, GIC_TRIG_EDGE,     1 },
+};
+
+/*
+ * GCMP needs to be detected before any SMP initialisation
+ */
+int __init gcmp_probe(unsigned long addr, unsigned long size)
+{
+       if (gcmp_present >= 0)
+               return gcmp_present;
+
+       _gcmp_base = (unsigned long) ioremap_nocache(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
+       _msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE, MSC01_BIU_ADDRSPACE_SZ);
+       gcmp_present = (GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == GCMP_BASE_ADDR;
+
+       if (gcmp_present)
+               printk(KERN_DEBUG "GCMP present\n");
+       return gcmp_present;
+}
+
+void __init fill_ipi_map(void)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(gic_intr_map); i++) {
+               if (gic_intr_map[i].ipiflag && (gic_intr_map[i].cpunum != X))
+                       ipi_map[gic_intr_map[i].cpunum] |=
+                               (1 << (gic_intr_map[i].pin + 2));
+       }
+}
 
 void __init arch_init_irq(void)
 {
+       int gic_present, gcmp_present;
+
        init_i8259_irqs();
 
        if (!cpu_has_veic)
                mips_cpu_irq_init();
 
+       gcmp_present = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
+       if (gcmp_present)  {
+               GCMPGCB(GICBA) = GIC_BASE_ADDR | GCMP_GCB_GICBA_EN_MSK;
+               gic_present = 1;
+       } else {
+               _msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE, MSC01_BIU_ADDRSPACE_SZ);
+               gic_present = (REG(_msc01_biu_base, MSC01_SC_CFG) &
+               MSC01_SC_CFG_GICPRES_MSK) >> MSC01_SC_CFG_GICPRES_SHF;
+       }
+       if (gic_present)
+               printk(KERN_DEBUG "GIC present\n");
+
        switch (mips_revision_sconid) {
        case MIPS_REVISION_SCON_SOCIT:
        case MIPS_REVISION_SCON_ROCIT:
@@ -360,4 +504,206 @@ void __init arch_init_irq(void)
                setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
                                                &corehi_irqaction);
        }
+
+#if defined(CONFIG_MIPS_MT_SMP)
+       if (gic_present) {
+               /* FIXME */
+               int i;
+               struct {
+                       unsigned int resched;
+                       unsigned int call;
+               } ipiirq[] = {
+                       {
+                               .resched = GIC_IPI_EXT_INTR_RESCHED_VPE0,
+                               .call =  GIC_IPI_EXT_INTR_CALLFNC_VPE0},
+                       {
+                               .resched = GIC_IPI_EXT_INTR_RESCHED_VPE1,
+                               .call =  GIC_IPI_EXT_INTR_CALLFNC_VPE1
+                       }, {
+                               .resched = GIC_IPI_EXT_INTR_RESCHED_VPE2,
+                               .call =  GIC_IPI_EXT_INTR_CALLFNC_VPE2
+                       }, {
+                               .resched = GIC_IPI_EXT_INTR_RESCHED_VPE3,
+                               .call =  GIC_IPI_EXT_INTR_CALLFNC_VPE3
+                       }
+               };
+#define NIPI (sizeof(ipiirq)/sizeof(ipiirq[0]))
+               fill_ipi_map();
+               gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
+               if (!gcmp_present) {
+                       /* Enable the GIC */
+                       i = REG(_msc01_biu_base, MSC01_SC_CFG);
+                       REG(_msc01_biu_base, MSC01_SC_CFG) =
+                               (i | (0x1 << MSC01_SC_CFG_GICENA_SHF));
+                       pr_debug("GIC Enabled\n");
+               }
+
+               /* set up ipi interrupts */
+               if (cpu_has_vint) {
+                       set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch);
+                       set_vi_handler(MIPSCPU_INT_IPI1, malta_ipi_irqdispatch);
+               }
+               /* Argh.. this really needs sorting out.. */
+               printk("CPU%d: status register was %08x\n", smp_processor_id(), read_c0_status());
+               write_c0_status(read_c0_status() | STATUSF_IP3 | STATUSF_IP4);
+               printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
+               write_c0_status(0x1100dc00);
+               printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
+               for (i = 0; i < NIPI; i++) {
+                       setup_irq(MIPS_GIC_IRQ_BASE + ipiirq[i].resched, &irq_resched);
+                       setup_irq(MIPS_GIC_IRQ_BASE + ipiirq[i].call, &irq_call);
+
+                       set_irq_handler(MIPS_GIC_IRQ_BASE + ipiirq[i].resched, handle_percpu_irq);
+                       set_irq_handler(MIPS_GIC_IRQ_BASE + ipiirq[i].call, handle_percpu_irq);
+               }
+       } else {
+               /* set up ipi interrupts */
+               if (cpu_has_veic) {
+                       set_vi_handler (MSC01E_INT_SW0, ipi_resched_dispatch);
+                       set_vi_handler (MSC01E_INT_SW1, ipi_call_dispatch);
+                       cpu_ipi_resched_irq = MSC01E_INT_SW0;
+                       cpu_ipi_call_irq = MSC01E_INT_SW1;
+               } else {
+                       if (cpu_has_vint) {
+                               set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
+                               set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
+                       }
+                       cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
+                       cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
+               }
+
+               setup_irq(cpu_ipi_resched_irq, &irq_resched);
+               setup_irq(cpu_ipi_call_irq, &irq_call);
+
+               set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
+               set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
+       }
+#endif
+}
+
+void malta_be_init(void)
+{
+       if (gcmp_present) {
+               /* Could change CM error mask register */
+       }
+}
+
+
+static char *tr[8] = {
+       "mem",  "gcr",  "gic",  "mmio",
+       "0x04", "0x05", "0x06", "0x07"
+};
+
+static char *mcmd[32] = {
+       [0x00] = "0x00",
+       [0x01] = "Legacy Write",
+       [0x02] = "Legacy Read",
+       [0x03] = "0x03",
+       [0x04] = "0x04",
+       [0x05] = "0x05",
+       [0x06] = "0x06",
+       [0x07] = "0x07",
+       [0x08] = "Coherent Read Own",
+       [0x09] = "Coherent Read Share",
+       [0x0a] = "Coherent Read Discard",
+       [0x0b] = "Coherent Ready Share Always",
+       [0x0c] = "Coherent Upgrade",
+       [0x0d] = "Coherent Writeback",
+       [0x0e] = "0x0e",
+       [0x0f] = "0x0f",
+       [0x10] = "Coherent Copyback",
+       [0x11] = "Coherent Copyback Invalidate",
+       [0x12] = "Coherent Invalidate",
+       [0x13] = "Coherent Write Invalidate",
+       [0x14] = "Coherent Completion Sync",
+       [0x15] = "0x15",
+       [0x16] = "0x16",
+       [0x17] = "0x17",
+       [0x18] = "0x18",
+       [0x19] = "0x19",
+       [0x1a] = "0x1a",
+       [0x1b] = "0x1b",
+       [0x1c] = "0x1c",
+       [0x1d] = "0x1d",
+       [0x1e] = "0x1e",
+       [0x1f] = "0x1f"
+};
+
+static char *core[8] = {
+       "Invalid/OK",   "Invalid/Data",
+       "Shared/OK",    "Shared/Data",
+       "Modified/OK",  "Modified/Data",
+       "Exclusive/OK", "Exclusive/Data"
+};
+
+static char *causes[32] = {
+       "None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+       "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+       "0x08", "0x09", "0x0a", "0x0b",
+       "0x0c", "0x0d", "0x0e", "0x0f",
+       "0x10", "0x11", "0x12", "0x13",
+       "0x14", "0x15", "0x16", "INTVN_WR_ERR",
+       "INTVN_RD_ERR", "0x19", "0x1a", "0x1b",
+       "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+int malta_be_handler(struct pt_regs *regs, int is_fixup)
+{
+       /* This duplicates the handling in do_be which seems wrong */
+       int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
+
+       if (gcmp_present) {
+               unsigned long cm_error = GCMPGCB(GCMEC);
+               unsigned long cm_addr = GCMPGCB(GCMEA);
+               unsigned long cm_other = GCMPGCB(GCMEO);
+               unsigned long cause, ocause;
+               char buf[256];
+
+               cause = (cm_error & GCMP_GCB_GMEC_ERROR_TYPE_MSK);
+               if (cause != 0) {
+                       cause >>= GCMP_GCB_GMEC_ERROR_TYPE_SHF;
+                       if (cause < 16) {
+                               unsigned long cca_bits = (cm_error >> 15) & 7;
+                               unsigned long tr_bits = (cm_error >> 12) & 7;
+                               unsigned long mcmd_bits = (cm_error >> 7) & 0x1f;
+                               unsigned long stag_bits = (cm_error >> 3) & 15;
+                               unsigned long sport_bits = (cm_error >> 0) & 7;
+
+                               snprintf(buf, sizeof(buf),
+                                        "CCA=%lu TR=%s MCmd=%s STag=%lu "
+                                        "SPort=%lu\n",
+                                        cca_bits, tr[tr_bits], mcmd[mcmd_bits],
+                                        stag_bits, sport_bits);
+                       } else {
+                               /* glob state & sresp together */
+                               unsigned long c3_bits = (cm_error >> 18) & 7;
+                               unsigned long c2_bits = (cm_error >> 15) & 7;
+                               unsigned long c1_bits = (cm_error >> 12) & 7;
+                               unsigned long c0_bits = (cm_error >> 9) & 7;
+                               unsigned long sc_bit = (cm_error >> 8) & 1;
+                               unsigned long mcmd_bits = (cm_error >> 3) & 0x1f;
+                               unsigned long sport_bits = (cm_error >> 0) & 7;
+                               snprintf(buf, sizeof(buf),
+                                        "C3=%s C2=%s C1=%s C0=%s SC=%s "
+                                        "MCmd=%s SPort=%lu\n",
+                                        core[c3_bits], core[c2_bits],
+                                        core[c1_bits], core[c0_bits],
+                                        sc_bit ? "True" : "False",
+                                        mcmd[mcmd_bits], sport_bits);
+                       }
+
+                       ocause = (cm_other & GCMP_GCB_GMEO_ERROR_2ND_MSK) >>
+                                GCMP_GCB_GMEO_ERROR_2ND_SHF;
+
+                       printk("CM_ERROR=%08lx %s <%s>\n", cm_error,
+                              causes[cause], buf);
+                       printk("CM_ADDR =%08lx\n", cm_addr);
+                       printk("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
+
+                       /* reprime cause register */
+                       GCMPGCB(GCMEC) = 0;
+               }
+       }
+
+       return retval;
 }
index 2cd8f5734b3699cba9be4d1fab0860893b56475b..e7cad54936ca3ce9164219ae7043dbe16a9776ae 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Carsten Langgaard, carstenl@mips.com
  * Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
- * Copyright (C) Dmitri Vorobiev
+ * Copyright (C) 2008 Dmitri Vorobiev
  *
  *  This program is free software; you can distribute it and/or modify it
  *  under the terms of the GNU General Public License (Version 2) as
 #include <linux/console.h>
 #endif
 
-struct resource standard_io_resources[] = {
+extern void malta_be_init(void);
+extern int malta_be_handler(struct pt_regs *regs, int is_fixup);
+
+static struct resource standard_io_resources[] = {
        {
                .name = "dma1",
                .start = 0x00,
@@ -220,4 +223,7 @@ void __init plat_mem_setup(void)
        screen_info_setup();
 #endif
        mips_reboot_setup();
+
+       board_be_init = malta_be_init;
+       board_be_handler = malta_be_handler;
 }
index d49fe73426b74e017a788c82cbb70fdc9156e4fb..7c7148ef26463f5d159ef7ceaf538ecceb8b0dde 100644 (file)
@@ -39,9 +39,6 @@
 static void __init serial_init(void);
 unsigned int _isbonito = 0;
 
-extern void __init sanitize_tlb_entries(void);
-
-
 const char *get_system_type(void)
 {
        return "MIPSsim";
@@ -55,9 +52,6 @@ void __init plat_mem_setup(void)
 
        pr_info("Linux started...\n");
 
-#ifdef CONFIG_MIPS_MT_SMP
-       sanitize_tlb_entries();
-#endif
 }
 
 extern struct plat_smp_ops ssmtc_smp_ops;
index c6f832e0f41ca6f1cffac106e193cff2503ed912..48731020ca0e9bb61fc31cef45f80c4bee2ca289 100644 (file)
@@ -4,30 +4,29 @@
 
 obj-y                          += cache.o dma-default.o extable.o fault.o \
                                   init.o pgtable.o tlbex.o tlbex-fault.o \
-                                  uasm.o
+                                  uasm.o page.o
 
 obj-$(CONFIG_32BIT)            += ioremap.o pgtable-32.o
 obj-$(CONFIG_64BIT)            += pgtable-64.o
 obj-$(CONFIG_HIGHMEM)          += highmem.o
 
-obj-$(CONFIG_CPU_LOONGSON2)    += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_MIPS32)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_MIPS64)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_NEVADA)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_R10000)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_R3000)                += c-r3k.o tlb-r3k.o pg-r4k.o
-obj-$(CONFIG_CPU_R4300)                += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_R4X00)                += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_R5000)                += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_R5432)                += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_R8000)                += c-r4k.o cex-gen.o pg-r4k.o tlb-r8k.o
-obj-$(CONFIG_CPU_RM7000)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_RM9000)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_SB1)          += c-r4k.o cerr-sb1.o cex-sb1.o pg-sb1.o \
-                                  tlb-r4k.o
-obj-$(CONFIG_CPU_TX39XX)       += c-tx39.o pg-r4k.o tlb-r3k.o
-obj-$(CONFIG_CPU_TX49XX)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
-obj-$(CONFIG_CPU_VR41XX)       += c-r4k.o cex-gen.o pg-r4k.o tlb-r4k.o
+obj-$(CONFIG_CPU_LOONGSON2)    += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_MIPS32)       += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_MIPS64)       += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_NEVADA)       += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R10000)       += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R3000)                += c-r3k.o tlb-r3k.o
+obj-$(CONFIG_CPU_R4300)                += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R4X00)                += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R5000)                += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R5432)                += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_R8000)                += c-r4k.o cex-gen.o tlb-r8k.o
+obj-$(CONFIG_CPU_RM7000)       += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_RM9000)       += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_SB1)          += c-r4k.o cerr-sb1.o cex-sb1.o tlb-r4k.o
+obj-$(CONFIG_CPU_TX39XX)       += c-tx39.o tlb-r3k.o
+obj-$(CONFIG_CPU_TX49XX)       += c-r4k.o cex-gen.o tlb-r4k.o
+obj-$(CONFIG_CPU_VR41XX)       += c-r4k.o cex-gen.o tlb-r4k.o
 
 obj-$(CONFIG_IP22_CPU_SCACHE)  += sc-ip22.o
 obj-$(CONFIG_R5000_CPU_SCACHE)  += sc-r5k.o
index 77aefb4ebedd45944d0923cd5ff41810bc31fb1f..643c8bcffff313d2f9d48a6a9daae396d2263bba 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/linkage.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/bitops.h>
 
 #include <asm/bcache.h>
@@ -53,6 +54,12 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
        preempt_enable();
 }
 
+#if defined(CONFIG_MIPS_CMP)
+#define cpu_has_safe_index_cacheops 0
+#else
+#define cpu_has_safe_index_cacheops 1
+#endif
+
 /*
  * Must die.
  */
@@ -481,6 +488,8 @@ static inline void local_r4k_flush_cache_page(void *args)
 
        if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
                r4k_blast_dcache_page(addr);
+               if (exec && !cpu_icache_snoops_remote_store)
+                       r4k_blast_scache_page(addr);
        }
        if (exec) {
                if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
@@ -583,7 +592,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
         * subset property so we have to flush the primary caches
         * explicitly
         */
-       if (size >= dcache_size) {
+       if (cpu_has_safe_index_cacheops && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
                R4600_HIT_CACHEOP_WAR_IMPL;
@@ -606,7 +615,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                return;
        }
 
-       if (size >= dcache_size) {
+       if (cpu_has_safe_index_cacheops && size >= dcache_size) {
                r4k_blast_dcache();
        } else {
                R4600_HIT_CACHEOP_WAR_IMPL;
@@ -968,6 +977,7 @@ static void __cpuinit probe_pcache(void)
        case CPU_24K:
        case CPU_34K:
        case CPU_74K:
+       case CPU_1004K:
                if ((read_c0_config7() & (1 << 16))) {
                        /* effectively physically indexed dcache,
                           thus no virtual aliases. */
@@ -1216,9 +1226,25 @@ void au1x00_fixup_config_od(void)
        }
 }
 
+static int __cpuinitdata cca = -1;
+
+static int __init cca_setup(char *str)
+{
+       get_option(&str, &cca);
+
+       return 1;
+}
+
+__setup("cca=", cca_setup);
+
 static void __cpuinit coherency_setup(void)
 {
-       change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
+       if (cca < 0 || cca > 7)
+               cca = read_c0_config() & CONF_CM_CMASK;
+       _page_cachable_default = cca << _CACHE_SHIFT;
+
+       pr_debug("Using cache attribute %d\n", cca);
+       change_c0_config(CONF_CM_CMASK, cca);
 
        /*
         * c0_status.cu=0 specifies that updates by the sc instruction use
@@ -1248,6 +1274,20 @@ static void __cpuinit coherency_setup(void)
        }
 }
 
+#if defined(CONFIG_DMA_NONCOHERENT)
+
+static int __cpuinitdata coherentio;
+
+static int __init setcoherentio(char *str)
+{
+       coherentio = 1;
+
+       return 1;
+}
+
+__setup("coherentio", setcoherentio);
+#endif
+
 void __cpuinit r4k_cache_init(void)
 {
        extern void build_clear_page(void);
@@ -1307,14 +1347,22 @@ void __cpuinit r4k_cache_init(void)
        flush_data_cache_page   = r4k_flush_data_cache_page;
        flush_icache_range      = r4k_flush_icache_range;
 
-#ifdef CONFIG_DMA_NONCOHERENT
-       _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
-       _dma_cache_wback        = r4k_dma_cache_wback_inv;
-       _dma_cache_inv          = r4k_dma_cache_inv;
+#if defined(CONFIG_DMA_NONCOHERENT)
+       if (coherentio) {
+               _dma_cache_wback_inv    = (void *)cache_noop;
+               _dma_cache_wback        = (void *)cache_noop;
+               _dma_cache_inv          = (void *)cache_noop;
+       } else {
+               _dma_cache_wback_inv    = r4k_dma_cache_wback_inv;
+               _dma_cache_wback        = r4k_dma_cache_wback_inv;
+               _dma_cache_inv          = r4k_dma_cache_inv;
+       }
 #endif
 
        build_clear_page();
        build_copy_page();
+#if !defined(CONFIG_MIPS_CMP)
        local_r4k___flush_cache_all(NULL);
+#endif
        coherency_setup();
 }
index f5903679ee6af552e9f50a743ef8fa8fd462286d..034e8506f6ea712a9554d9f13de28f622d8db1d4 100644 (file)
@@ -130,8 +130,28 @@ void __update_cache(struct vm_area_struct *vma, unsigned long address,
        }
 }
 
-static char cache_panic[] __cpuinitdata =
-       "Yeee, unsupported cache architecture.";
+unsigned long _page_cachable_default;
+EXPORT_SYMBOL_GPL(_page_cachable_default);
+
+static inline void setup_protection_map(void)
+{
+       protection_map[0] = PAGE_NONE;
+       protection_map[1] = PAGE_READONLY;
+       protection_map[2] = PAGE_COPY;
+       protection_map[3] = PAGE_COPY;
+       protection_map[4] = PAGE_READONLY;
+       protection_map[5] = PAGE_READONLY;
+       protection_map[6] = PAGE_COPY;
+       protection_map[7] = PAGE_COPY;
+       protection_map[8] = PAGE_NONE;
+       protection_map[9] = PAGE_READONLY;
+       protection_map[10] = PAGE_SHARED;
+       protection_map[11] = PAGE_SHARED;
+       protection_map[12] = PAGE_READONLY;
+       protection_map[13] = PAGE_READONLY;
+       protection_map[14] = PAGE_SHARED;
+       protection_map[15] = PAGE_SHARED;
+}
 
 void __devinit cpu_cache_init(void)
 {
@@ -139,34 +159,29 @@ void __devinit cpu_cache_init(void)
                extern void __weak r3k_cache_init(void);
 
                r3k_cache_init();
-               return;
        }
        if (cpu_has_6k_cache) {
                extern void __weak r6k_cache_init(void);
 
                r6k_cache_init();
-               return;
        }
        if (cpu_has_4k_cache) {
                extern void __weak r4k_cache_init(void);
 
                r4k_cache_init();
-               return;
        }
        if (cpu_has_8k_cache) {
                extern void __weak r8k_cache_init(void);
 
                r8k_cache_init();
-               return;
        }
        if (cpu_has_tx39_cache) {
                extern void __weak tx39_cache_init(void);
 
                tx39_cache_init();
-               return;
        }
 
-       panic(cache_panic);
+       setup_protection_map();
 }
 
 int __weak __uncached_access(struct file *file, unsigned long addr)
index c7aed133d11d586894b14bd3a349ec2ba088baad..ecd562d2c348908f4e2fade35c72733d58909ef5 100644 (file)
@@ -142,7 +142,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
 #endif
        vaddr = __fix_to_virt(FIX_CMAP_END - idx);
        pte = mk_pte(page, PAGE_KERNEL);
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
        entrylo = pte.pte_high;
 #else
        entrylo = pte_val(pte) >> 6;
@@ -221,7 +221,7 @@ void copy_user_highpage(struct page *to, struct page *from,
                copy_page(vto, vfrom);
                kunmap_atomic(vfrom, KM_USER0);
        }
-       if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
+       if ((!cpu_has_ic_fills_f_dc) ||
            pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
                flush_data_cache_page((unsigned long)vto);
        kunmap_atomic(vto, KM_USER1);
@@ -229,8 +229,6 @@ void copy_user_highpage(struct page *to, struct page *from,
        smp_wmb();
 }
 
-EXPORT_SYMBOL(copy_user_highpage);
-
 void copy_to_user_page(struct vm_area_struct *vma,
        struct page *page, unsigned long vaddr, void *dst, const void *src,
        unsigned long len)
@@ -249,8 +247,6 @@ void copy_to_user_page(struct vm_area_struct *vma,
                flush_cache_page(vma, vaddr, page_to_pfn(page));
 }
 
-EXPORT_SYMBOL(copy_to_user_page);
-
 void copy_from_user_page(struct vm_area_struct *vma,
        struct page *page, unsigned long vaddr, void *dst, const void *src,
        unsigned long len)
@@ -267,9 +263,6 @@ void copy_from_user_page(struct vm_area_struct *vma,
        }
 }
 
-EXPORT_SYMBOL(copy_from_user_page);
-
-
 #ifdef CONFIG_HIGHMEM
 unsigned long highstart_pfn, highend_pfn;
 
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
new file mode 100644 (file)
index 0000000..d827d61
--- /dev/null
@@ -0,0 +1,684 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007  Maciej W. Rozycki
+ * Copyright (C) 2008  Thiemo Seufer
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+
+#include <asm/bugs.h>
+#include <asm/cacheops.h>
+#include <asm/inst.h>
+#include <asm/io.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/prefetch.h>
+#include <asm/system.h>
+#include <asm/bootinfo.h>
+#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
+#include <asm/cpu.h>
+#include <asm/war.h>
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_dma.h>
+#endif
+
+#include "uasm.h"
+
+/* Registers used in the assembled routines. */
+#define ZERO 0
+#define AT 2
+#define A0 4
+#define A1 5
+#define A2 6
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#define T9 25
+#define RA 31
+
+/* Handle labels (which must be positive integers). */
+enum label_id {
+       label_clear_nopref = 1,
+       label_clear_pref,
+       label_copy_nopref,
+       label_copy_pref_both,
+       label_copy_pref_store,
+};
+
+UASM_L_LA(_clear_nopref)
+UASM_L_LA(_clear_pref)
+UASM_L_LA(_copy_nopref)
+UASM_L_LA(_copy_pref_both)
+UASM_L_LA(_copy_pref_store)
+
+/* We need one branch and therefore one relocation per target label. */
+static struct uasm_label __cpuinitdata labels[5];
+static struct uasm_reloc __cpuinitdata relocs[5];
+
+#define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
+#define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x058 bytes
+ * R4600 v1.7:                         0x05c bytes
+ * R4600 v2.0:                         0x060 bytes
+ * With prefetching, 16 word strides   0x120 bytes
+ */
+
+static u32 clear_page_array[0x120 / 4];
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+void clear_page_cpu(void *page) __attribute__((alias("clear_page_array")));
+#else
+void clear_page(void *page) __attribute__((alias("clear_page_array")));
+#endif
+
+EXPORT_SYMBOL(clear_page);
+
+/*
+ * Maximum sizes:
+ *
+ * R4000 128 bytes S-cache:            0x11c bytes
+ * R4600 v1.7:                         0x080 bytes
+ * R4600 v2.0:                         0x07c bytes
+ * With prefetching, 16 word strides   0x540 bytes
+ */
+static u32 copy_page_array[0x540 / 4];
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+void
+copy_page_cpu(void *to, void *from) __attribute__((alias("copy_page_array")));
+#else
+void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
+#endif
+
+EXPORT_SYMBOL(copy_page);
+
+
+static int pref_bias_clear_store __cpuinitdata;
+static int pref_bias_copy_load __cpuinitdata;
+static int pref_bias_copy_store __cpuinitdata;
+
+static u32 pref_src_mode __cpuinitdata;
+static u32 pref_dst_mode __cpuinitdata;
+
+static int clear_word_size __cpuinitdata;
+static int copy_word_size __cpuinitdata;
+
+static int half_clear_loop_size __cpuinitdata;
+static int half_copy_loop_size __cpuinitdata;
+
+static int cache_line_size __cpuinitdata;
+#define cache_line_mask() (cache_line_size - 1)
+
+static inline void __cpuinit
+pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off)
+{
+       if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
+               if (off > 0x7fff) {
+                       uasm_i_lui(buf, T9, uasm_rel_hi(off));
+                       uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
+               } else
+                       uasm_i_addiu(buf, T9, ZERO, off);
+               uasm_i_daddu(buf, reg1, reg2, T9);
+       } else {
+               if (off > 0x7fff) {
+                       uasm_i_lui(buf, T9, uasm_rel_hi(off));
+                       uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
+                       UASM_i_ADDU(buf, reg1, reg2, T9);
+               } else
+                       UASM_i_ADDIU(buf, reg1, reg2, off);
+       }
+}
+
+static void __cpuinit set_prefetch_parameters(void)
+{
+       if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg)
+               clear_word_size = 8;
+       else
+               clear_word_size = 4;
+
+       if (cpu_has_64bit_gp_regs)
+               copy_word_size = 8;
+       else
+               copy_word_size = 4;
+
+       /*
+        * The pref's used here are using "streaming" hints, which cause the
+        * copied data to be kicked out of the cache sooner.  A page copy often
+        * ends up copying a lot more data than is commonly used, so this seems
+        * to make sense in terms of reducing cache pollution, but I've no real
+        * performance data to back this up.
+        */
+       if (cpu_has_prefetch) {
+               /*
+                * XXX: Most prefetch bias values in here are based on
+                * guesswork.
+                */
+               cache_line_size = cpu_dcache_line_size();
+               switch (current_cpu_type()) {
+               case CPU_TX49XX:
+                       /* TX49 supports only Pref_Load */
+                       pref_bias_copy_load = 256;
+                       break;
+
+               case CPU_RM9000:
+                       /*
+                        * As a workaround for erratum G105 which make the
+                        * PrepareForStore hint unusable we fall back to
+                        * StoreRetained on the RM9000.  Once it is known which
+                        * versions of the RM9000 we'll be able to condition-
+                        * alize this.
+                        */
+
+               case CPU_R10000:
+               case CPU_R12000:
+               case CPU_R14000:
+                       /*
+                        * Those values have been experimentally tuned for an
+                        * Origin 200.
+                        */
+                       pref_bias_clear_store = 512;
+                       pref_bias_copy_load = 256;
+                       pref_bias_copy_store = 256;
+                       pref_src_mode = Pref_LoadStreamed;
+                       pref_dst_mode = Pref_StoreStreamed;
+                       break;
+
+               case CPU_SB1:
+               case CPU_SB1A:
+                       pref_bias_clear_store = 128;
+                       pref_bias_copy_load = 128;
+                       pref_bias_copy_store = 128;
+                       /*
+                        * SB1 pass1 Pref_LoadStreamed/Pref_StoreStreamed
+                        * hints are broken.
+                        */
+                       if (current_cpu_type() == CPU_SB1 &&
+                           (current_cpu_data.processor_id & 0xff) < 0x02) {
+                               pref_src_mode = Pref_Load;
+                               pref_dst_mode = Pref_Store;
+                       } else {
+                               pref_src_mode = Pref_LoadStreamed;
+                               pref_dst_mode = Pref_StoreStreamed;
+                       }
+                       break;
+
+               default:
+                       pref_bias_clear_store = 128;
+                       pref_bias_copy_load = 256;
+                       pref_bias_copy_store = 128;
+                       pref_src_mode = Pref_LoadStreamed;
+                       pref_dst_mode = Pref_PrepareForStore;
+                       break;
+               }
+       } else {
+               if (cpu_has_cache_cdex_s)
+                       cache_line_size = cpu_scache_line_size();
+               else if (cpu_has_cache_cdex_p)
+                       cache_line_size = cpu_dcache_line_size();
+       }
+       /*
+        * Too much unrolling will overflow the available space in
+        * clear_space_array / copy_page_array. 8 words sounds generous,
+        * but a R4000 with 128 byte L2 line length can exceed even that.
+        */
+       half_clear_loop_size = min(8 * clear_word_size,
+                                  max(cache_line_size >> 1,
+                                      4 * clear_word_size));
+       half_copy_loop_size = min(8 * copy_word_size,
+                                 max(cache_line_size >> 1,
+                                     4 * copy_word_size));
+}
+
+static void __cpuinit build_clear_store(u32 **buf, int off)
+{
+       if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) {
+               uasm_i_sd(buf, ZERO, off, A0);
+       } else {
+               uasm_i_sw(buf, ZERO, off, A0);
+       }
+}
+
+static inline void __cpuinit build_clear_pref(u32 **buf, int off)
+{
+       if (off & cache_line_mask())
+               return;
+
+       if (pref_bias_clear_store) {
+               uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+                           A0);
+       } else if (cpu_has_cache_cdex_s) {
+               uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+       } else if (cpu_has_cache_cdex_p) {
+               if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+                       uasm_i_nop(buf);
+                       uasm_i_nop(buf);
+                       uasm_i_nop(buf);
+                       uasm_i_nop(buf);
+               }
+
+               if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+                       uasm_i_lw(buf, ZERO, ZERO, AT);
+
+               uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+       }
+}
+
+void __cpuinit build_clear_page(void)
+{
+       int off;
+       u32 *buf = (u32 *)&clear_page_array;
+       struct uasm_label *l = labels;
+       struct uasm_reloc *r = relocs;
+       int i;
+
+       memset(labels, 0, sizeof(labels));
+       memset(relocs, 0, sizeof(relocs));
+
+       set_prefetch_parameters();
+
+       /*
+        * This algorithm makes the following assumptions:
+        *   - The prefetch bias is a multiple of 2 words.
+        *   - The prefetch bias is less than one page.
+        */
+       BUG_ON(pref_bias_clear_store % (2 * clear_word_size));
+       BUG_ON(PAGE_SIZE < pref_bias_clear_store);
+
+       off = PAGE_SIZE - pref_bias_clear_store;
+       if (off > 0xffff || !pref_bias_clear_store)
+               pg_addiu(&buf, A2, A0, off);
+       else
+               uasm_i_ori(&buf, A2, A0, off);
+
+       if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+               uasm_i_lui(&buf, AT, 0xa000);
+
+       off = min(8, pref_bias_clear_store / cache_line_size) *
+             cache_line_size;
+       while (off) {
+               build_clear_pref(&buf, -off);
+               off -= cache_line_size;
+       }
+       uasm_l_clear_pref(&l, buf);
+       do {
+               build_clear_pref(&buf, off);
+               build_clear_store(&buf, off);
+               off += clear_word_size;
+       } while (off < half_clear_loop_size);
+       pg_addiu(&buf, A0, A0, 2 * off);
+       off = -off;
+       do {
+               build_clear_pref(&buf, off);
+               if (off == -clear_word_size)
+                       uasm_il_bne(&buf, &r, A0, A2, label_clear_pref);
+               build_clear_store(&buf, off);
+               off += clear_word_size;
+       } while (off < 0);
+
+       if (pref_bias_clear_store) {
+               pg_addiu(&buf, A2, A0, pref_bias_clear_store);
+               uasm_l_clear_nopref(&l, buf);
+               off = 0;
+               do {
+                       build_clear_store(&buf, off);
+                       off += clear_word_size;
+               } while (off < half_clear_loop_size);
+               pg_addiu(&buf, A0, A0, 2 * off);
+               off = -off;
+               do {
+                       if (off == -clear_word_size)
+                               uasm_il_bne(&buf, &r, A0, A2,
+                                           label_clear_nopref);
+                       build_clear_store(&buf, off);
+                       off += clear_word_size;
+               } while (off < 0);
+       }
+
+       uasm_i_jr(&buf, RA);
+       uasm_i_nop(&buf);
+
+       BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array));
+
+       uasm_resolve_relocs(relocs, labels);
+
+       pr_debug("Synthesized clear page handler (%u instructions).\n",
+                (u32)(buf - clear_page_array));
+
+       pr_debug("\t.set push\n");
+       pr_debug("\t.set noreorder\n");
+       for (i = 0; i < (buf - clear_page_array); i++)
+               pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
+       pr_debug("\t.set pop\n");
+}
+
+static void __cpuinit build_copy_load(u32 **buf, int reg, int off)
+{
+       if (cpu_has_64bit_gp_regs) {
+               uasm_i_ld(buf, reg, off, A1);
+       } else {
+               uasm_i_lw(buf, reg, off, A1);
+       }
+}
+
+static void __cpuinit build_copy_store(u32 **buf, int reg, int off)
+{
+       if (cpu_has_64bit_gp_regs) {
+               uasm_i_sd(buf, reg, off, A0);
+       } else {
+               uasm_i_sw(buf, reg, off, A0);
+       }
+}
+
+static inline void build_copy_load_pref(u32 **buf, int off)
+{
+       if (off & cache_line_mask())
+               return;
+
+       if (pref_bias_copy_load)
+               uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+}
+
+static inline void build_copy_store_pref(u32 **buf, int off)
+{
+       if (off & cache_line_mask())
+               return;
+
+       if (pref_bias_copy_store) {
+               uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+                           A0);
+       } else if (cpu_has_cache_cdex_s) {
+               uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
+       } else if (cpu_has_cache_cdex_p) {
+               if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
+                       uasm_i_nop(buf);
+                       uasm_i_nop(buf);
+                       uasm_i_nop(buf);
+                       uasm_i_nop(buf);
+               }
+
+               if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+                       uasm_i_lw(buf, ZERO, ZERO, AT);
+
+               uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
+       }
+}
+
+void __cpuinit build_copy_page(void)
+{
+       int off;
+       u32 *buf = (u32 *)&copy_page_array;
+       struct uasm_label *l = labels;
+       struct uasm_reloc *r = relocs;
+       int i;
+
+       memset(labels, 0, sizeof(labels));
+       memset(relocs, 0, sizeof(relocs));
+
+       set_prefetch_parameters();
+
+       /*
+        * This algorithm makes the following assumptions:
+        *   - All prefetch biases are multiples of 8 words.
+        *   - The prefetch biases are less than one page.
+        *   - The store prefetch bias isn't greater than the load
+        *     prefetch bias.
+        */
+       BUG_ON(pref_bias_copy_load % (8 * copy_word_size));
+       BUG_ON(pref_bias_copy_store % (8 * copy_word_size));
+       BUG_ON(PAGE_SIZE < pref_bias_copy_load);
+       BUG_ON(pref_bias_copy_store > pref_bias_copy_load);
+
+       off = PAGE_SIZE - pref_bias_copy_load;
+       if (off > 0xffff || !pref_bias_copy_load)
+               pg_addiu(&buf, A2, A0, off);
+       else
+               uasm_i_ori(&buf, A2, A0, off);
+
+       if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
+               uasm_i_lui(&buf, AT, 0xa000);
+
+       off = min(8, pref_bias_copy_load / cache_line_size) * cache_line_size;
+       while (off) {
+               build_copy_load_pref(&buf, -off);
+               off -= cache_line_size;
+       }
+       off = min(8, pref_bias_copy_store / cache_line_size) * cache_line_size;
+       while (off) {
+               build_copy_store_pref(&buf, -off);
+               off -= cache_line_size;
+       }
+       uasm_l_copy_pref_both(&l, buf);
+       do {
+               build_copy_load_pref(&buf, off);
+               build_copy_load(&buf, T0, off);
+               build_copy_load_pref(&buf, off + copy_word_size);
+               build_copy_load(&buf, T1, off + copy_word_size);
+               build_copy_load_pref(&buf, off + 2 * copy_word_size);
+               build_copy_load(&buf, T2, off + 2 * copy_word_size);
+               build_copy_load_pref(&buf, off + 3 * copy_word_size);
+               build_copy_load(&buf, T3, off + 3 * copy_word_size);
+               build_copy_store_pref(&buf, off);
+               build_copy_store(&buf, T0, off);
+               build_copy_store_pref(&buf, off + copy_word_size);
+               build_copy_store(&buf, T1, off + copy_word_size);
+               build_copy_store_pref(&buf, off + 2 * copy_word_size);
+               build_copy_store(&buf, T2, off + 2 * copy_word_size);
+               build_copy_store_pref(&buf, off + 3 * copy_word_size);
+               build_copy_store(&buf, T3, off + 3 * copy_word_size);
+               off += 4 * copy_word_size;
+       } while (off < half_copy_loop_size);
+       pg_addiu(&buf, A1, A1, 2 * off);
+       pg_addiu(&buf, A0, A0, 2 * off);
+       off = -off;
+       do {
+               build_copy_load_pref(&buf, off);
+               build_copy_load(&buf, T0, off);
+               build_copy_load_pref(&buf, off + copy_word_size);
+               build_copy_load(&buf, T1, off + copy_word_size);
+               build_copy_load_pref(&buf, off + 2 * copy_word_size);
+               build_copy_load(&buf, T2, off + 2 * copy_word_size);
+               build_copy_load_pref(&buf, off + 3 * copy_word_size);
+               build_copy_load(&buf, T3, off + 3 * copy_word_size);
+               build_copy_store_pref(&buf, off);
+               build_copy_store(&buf, T0, off);
+               build_copy_store_pref(&buf, off + copy_word_size);
+               build_copy_store(&buf, T1, off + copy_word_size);
+               build_copy_store_pref(&buf, off + 2 * copy_word_size);
+               build_copy_store(&buf, T2, off + 2 * copy_word_size);
+               build_copy_store_pref(&buf, off + 3 * copy_word_size);
+               if (off == -(4 * copy_word_size))
+                       uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both);
+               build_copy_store(&buf, T3, off + 3 * copy_word_size);
+               off += 4 * copy_word_size;
+       } while (off < 0);
+
+       if (pref_bias_copy_load - pref_bias_copy_store) {
+               pg_addiu(&buf, A2, A0,
+                        pref_bias_copy_load - pref_bias_copy_store);
+               uasm_l_copy_pref_store(&l, buf);
+               off = 0;
+               do {
+                       build_copy_load(&buf, T0, off);
+                       build_copy_load(&buf, T1, off + copy_word_size);
+                       build_copy_load(&buf, T2, off + 2 * copy_word_size);
+                       build_copy_load(&buf, T3, off + 3 * copy_word_size);
+                       build_copy_store_pref(&buf, off);
+                       build_copy_store(&buf, T0, off);
+                       build_copy_store_pref(&buf, off + copy_word_size);
+                       build_copy_store(&buf, T1, off + copy_word_size);
+                       build_copy_store_pref(&buf, off + 2 * copy_word_size);
+                       build_copy_store(&buf, T2, off + 2 * copy_word_size);
+                       build_copy_store_pref(&buf, off + 3 * copy_word_size);
+                       build_copy_store(&buf, T3, off + 3 * copy_word_size);
+                       off += 4 * copy_word_size;
+               } while (off < half_copy_loop_size);
+               pg_addiu(&buf, A1, A1, 2 * off);
+               pg_addiu(&buf, A0, A0, 2 * off);
+               off = -off;
+               do {
+                       build_copy_load(&buf, T0, off);
+                       build_copy_load(&buf, T1, off + copy_word_size);
+                       build_copy_load(&buf, T2, off + 2 * copy_word_size);
+                       build_copy_load(&buf, T3, off + 3 * copy_word_size);
+                       build_copy_store_pref(&buf, off);
+                       build_copy_store(&buf, T0, off);
+                       build_copy_store_pref(&buf, off + copy_word_size);
+                       build_copy_store(&buf, T1, off + copy_word_size);
+                       build_copy_store_pref(&buf, off + 2 * copy_word_size);
+                       build_copy_store(&buf, T2, off + 2 * copy_word_size);
+                       build_copy_store_pref(&buf, off + 3 * copy_word_size);
+                       if (off == -(4 * copy_word_size))
+                               uasm_il_bne(&buf, &r, A2, A0,
+                                           label_copy_pref_store);
+                       build_copy_store(&buf, T3, off + 3 * copy_word_size);
+                       off += 4 * copy_word_size;
+               } while (off < 0);
+       }
+
+       if (pref_bias_copy_store) {
+               pg_addiu(&buf, A2, A0, pref_bias_copy_store);
+               uasm_l_copy_nopref(&l, buf);
+               off = 0;
+               do {
+                       build_copy_load(&buf, T0, off);
+                       build_copy_load(&buf, T1, off + copy_word_size);
+                       build_copy_load(&buf, T2, off + 2 * copy_word_size);
+                       build_copy_load(&buf, T3, off + 3 * copy_word_size);
+                       build_copy_store(&buf, T0, off);
+                       build_copy_store(&buf, T1, off + copy_word_size);
+                       build_copy_store(&buf, T2, off + 2 * copy_word_size);
+                       build_copy_store(&buf, T3, off + 3 * copy_word_size);
+                       off += 4 * copy_word_size;
+               } while (off < half_copy_loop_size);
+               pg_addiu(&buf, A1, A1, 2 * off);
+               pg_addiu(&buf, A0, A0, 2 * off);
+               off = -off;
+               do {
+                       build_copy_load(&buf, T0, off);
+                       build_copy_load(&buf, T1, off + copy_word_size);
+                       build_copy_load(&buf, T2, off + 2 * copy_word_size);
+                       build_copy_load(&buf, T3, off + 3 * copy_word_size);
+                       build_copy_store(&buf, T0, off);
+                       build_copy_store(&buf, T1, off + copy_word_size);
+                       build_copy_store(&buf, T2, off + 2 * copy_word_size);
+                       if (off == -(4 * copy_word_size))
+                               uasm_il_bne(&buf, &r, A2, A0,
+                                           label_copy_nopref);
+                       build_copy_store(&buf, T3, off + 3 * copy_word_size);
+                       off += 4 * copy_word_size;
+               } while (off < 0);
+       }
+
+       uasm_i_jr(&buf, RA);
+       uasm_i_nop(&buf);
+
+       BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array));
+
+       uasm_resolve_relocs(relocs, labels);
+
+       pr_debug("Synthesized copy page handler (%u instructions).\n",
+                (u32)(buf - copy_page_array));
+
+       pr_debug("\t.set push\n");
+       pr_debug("\t.set noreorder\n");
+       for (i = 0; i < (buf - copy_page_array); i++)
+               pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
+       pr_debug("\t.set pop\n");
+}
+
+#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
+
+/*
+ * Pad descriptors to cacheline, since each is exclusively owned by a
+ * particular CPU.
+ */
+struct dmadscr {
+       u64 dscr_a;
+       u64 dscr_b;
+       u64 pad_a;
+       u64 pad_b;
+} ____cacheline_aligned_in_smp page_descr[DM_NUM_CHANNELS];
+
+void sb1_dma_init(void)
+{
+       int i;
+
+       for (i = 0; i < DM_NUM_CHANNELS; i++) {
+               const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
+                                    V_DM_DSCR_BASE_RINGSZ(1);
+               void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
+
+               __raw_writeq(base_val, base_reg);
+               __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
+               __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
+       }
+}
+
+void clear_page(void *page)
+{
+       u64 to_phys = CPHYSADDR((unsigned long)page);
+       unsigned int cpu = smp_processor_id();
+
+       /* if the page is not in KSEG0, use old way */
+       if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
+               return clear_page_cpu(page);
+
+       page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
+                                M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
+       page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+       __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+
+       /*
+        * Don't really want to do it this way, but there's no
+        * reliable way to delay completion detection.
+        */
+       while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+                & M_DM_DSCR_BASE_INTERRUPT))
+               ;
+       __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+}
+
+void copy_page(void *to, void *from)
+{
+       u64 from_phys = CPHYSADDR((unsigned long)from);
+       u64 to_phys = CPHYSADDR((unsigned long)to);
+       unsigned int cpu = smp_processor_id();
+
+       /* if any page is not in KSEG0, use old way */
+       if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
+           || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
+               return copy_page_cpu(to, from);
+
+       page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
+                                M_DM_DSCRA_INTERRUPT;
+       page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
+       __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
+
+       /*
+        * Don't really want to do it this way, but there's no
+        * reliable way to delay completion detection.
+        */
+       while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
+                & M_DM_DSCR_BASE_INTERRUPT))
+               ;
+       __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
+}
+
+#endif /* CONFIG_SIBYTE_DMA_PAGEOPS */
diff --git a/arch/mips/mm/pg-r4k.c b/arch/mips/mm/pg-r4k.c
deleted file mode 100644 (file)
index 455dedb..0000000
+++ /dev/null
@@ -1,534 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2003, 04, 05 Ralf Baechle (ralf@linux-mips.org)
- * Copyright (C) 2007  Maciej W. Rozycki
- */
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/proc_fs.h>
-
-#include <asm/bugs.h>
-#include <asm/cacheops.h>
-#include <asm/inst.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/prefetch.h>
-#include <asm/system.h>
-#include <asm/bootinfo.h>
-#include <asm/mipsregs.h>
-#include <asm/mmu_context.h>
-#include <asm/cpu.h>
-#include <asm/war.h>
-
-#define half_scache_line_size()        (cpu_scache_line_size() >> 1)
-#define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
-#define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
-
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x58 bytes
- * R4600 v1.7:                         0x5c bytes
- * R4600 v2.0:                         0x60 bytes
- * With prefetching, 16 byte strides   0xa0 bytes
- */
-
-static unsigned int clear_page_array[0x130 / 4];
-
-void clear_page(void * page) __attribute__((alias("clear_page_array")));
-
-EXPORT_SYMBOL(clear_page);
-
-/*
- * Maximum sizes:
- *
- * R4000 128 bytes S-cache:            0x11c bytes
- * R4600 v1.7:                         0x080 bytes
- * R4600 v2.0:                         0x07c bytes
- * With prefetching, 16 byte strides   0x0b8 bytes
- */
-static unsigned int copy_page_array[0x148 / 4];
-
-void copy_page(void *to, void *from) __attribute__((alias("copy_page_array")));
-
-EXPORT_SYMBOL(copy_page);
-
-/*
- * This is suboptimal for 32-bit kernels; we assume that R10000 is only used
- * with 64-bit kernels.  The prefetch offsets have been experimentally tuned
- * an Origin 200.
- */
-static int pref_offset_clear __cpuinitdata = 512;
-static int pref_offset_copy  __cpuinitdata = 256;
-
-static unsigned int pref_src_mode __cpuinitdata;
-static unsigned int pref_dst_mode __cpuinitdata;
-
-static int load_offset __cpuinitdata;
-static int store_offset __cpuinitdata;
-
-static unsigned int __cpuinitdata *dest, *epc;
-
-static unsigned int instruction_pending;
-static union mips_instruction delayed_mi;
-
-static void __cpuinit emit_instruction(union mips_instruction mi)
-{
-       if (instruction_pending)
-               *epc++ = delayed_mi.word;
-
-       instruction_pending = 1;
-       delayed_mi = mi;
-}
-
-static inline void flush_delay_slot_or_nop(void)
-{
-       if (instruction_pending) {
-               *epc++ = delayed_mi.word;
-               instruction_pending = 0;
-               return;
-       }
-
-       *epc++ = 0;
-}
-
-static inline unsigned int *label(void)
-{
-       if (instruction_pending) {
-               *epc++ = delayed_mi.word;
-               instruction_pending = 0;
-       }
-
-       return epc;
-}
-
-static inline void build_insn_word(unsigned int word)
-{
-       union mips_instruction mi;
-
-       mi.word          = word;
-
-       emit_instruction(mi);
-}
-
-static inline void build_nop(void)
-{
-       build_insn_word(0);                     /* nop */
-}
-
-static inline void build_src_pref(int advance)
-{
-       if (!(load_offset & (cpu_dcache_line_size() - 1)) && advance) {
-               union mips_instruction mi;
-
-               mi.i_format.opcode     = pref_op;
-               mi.i_format.rs         = 5;             /* $a1 */
-               mi.i_format.rt         = pref_src_mode;
-               mi.i_format.simmediate = load_offset + advance;
-
-               emit_instruction(mi);
-       }
-}
-
-static inline void __build_load_reg(int reg)
-{
-       union mips_instruction mi;
-       unsigned int width;
-
-       if (cpu_has_64bit_gp_regs) {
-               mi.i_format.opcode     = ld_op;
-               width = 8;
-       } else {
-               mi.i_format.opcode     = lw_op;
-               width = 4;
-       }
-       mi.i_format.rs         = 5;             /* $a1 */
-       mi.i_format.rt         = reg;           /* $reg */
-       mi.i_format.simmediate = load_offset;
-
-       load_offset += width;
-       emit_instruction(mi);
-}
-
-static inline void build_load_reg(int reg)
-{
-       if (cpu_has_prefetch)
-               build_src_pref(pref_offset_copy);
-
-       __build_load_reg(reg);
-}
-
-static inline void build_dst_pref(int advance)
-{
-       if (!(store_offset & (cpu_dcache_line_size() - 1)) && advance) {
-               union mips_instruction mi;
-
-               mi.i_format.opcode     = pref_op;
-               mi.i_format.rs         = 4;             /* $a0 */
-               mi.i_format.rt         = pref_dst_mode;
-               mi.i_format.simmediate = store_offset + advance;
-
-               emit_instruction(mi);
-       }
-}
-
-static inline void build_cdex_s(void)
-{
-       union mips_instruction mi;
-
-       if ((store_offset & (cpu_scache_line_size() - 1)))
-               return;
-
-       mi.c_format.opcode     = cache_op;
-       mi.c_format.rs         = 4;             /* $a0 */
-       mi.c_format.c_op       = 3;             /* Create Dirty Exclusive */
-       mi.c_format.cache      = 3;             /* Secondary Data Cache */
-       mi.c_format.simmediate = store_offset;
-
-       emit_instruction(mi);
-}
-
-static inline void build_cdex_p(void)
-{
-       union mips_instruction mi;
-
-       if (store_offset & (cpu_dcache_line_size() - 1))
-               return;
-
-       if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
-               build_nop();
-               build_nop();
-               build_nop();
-               build_nop();
-       }
-
-       if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
-               build_insn_word(0x8c200000);    /* lw      $zero, ($at) */
-
-       mi.c_format.opcode     = cache_op;
-       mi.c_format.rs         = 4;             /* $a0 */
-       mi.c_format.c_op       = 3;             /* Create Dirty Exclusive */
-       mi.c_format.cache      = 1;             /* Data Cache */
-       mi.c_format.simmediate = store_offset;
-
-       emit_instruction(mi);
-}
-
-static void __cpuinit __build_store_reg(int reg)
-{
-       union mips_instruction mi;
-       unsigned int width;
-
-       if (cpu_has_64bit_gp_regs ||
-           (cpu_has_64bit_zero_reg && reg == 0)) {
-               mi.i_format.opcode     = sd_op;
-               width = 8;
-       } else {
-               mi.i_format.opcode     = sw_op;
-               width = 4;
-       }
-       mi.i_format.rs         = 4;             /* $a0 */
-       mi.i_format.rt         = reg;           /* $reg */
-       mi.i_format.simmediate = store_offset;
-
-       store_offset += width;
-       emit_instruction(mi);
-}
-
-static inline void build_store_reg(int reg)
-{
-       int pref_off = cpu_has_prefetch ?
-               (reg ? pref_offset_copy : pref_offset_clear) : 0;
-       if (pref_off)
-               build_dst_pref(pref_off);
-       else if (cpu_has_cache_cdex_s)
-               build_cdex_s();
-       else if (cpu_has_cache_cdex_p)
-               build_cdex_p();
-
-       __build_store_reg(reg);
-}
-
-static inline void build_addiu_rt_rs(unsigned int rt, unsigned int rs,
-                                    unsigned long offset)
-{
-       union mips_instruction mi;
-
-       BUG_ON(offset > 0x7fff);
-
-       if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) {
-               mi.i_format.opcode     = addiu_op;
-               mi.i_format.rs         = 0;     /* $zero */
-               mi.i_format.rt         = 25;    /* $t9 */
-               mi.i_format.simmediate = offset;
-               emit_instruction(mi);
-
-               mi.r_format.opcode     = spec_op;
-               mi.r_format.rs         = rs;
-               mi.r_format.rt         = 25;    /* $t9 */
-               mi.r_format.rd         = rt;
-               mi.r_format.re         = 0;
-               mi.r_format.func       = daddu_op;
-       } else {
-               mi.i_format.opcode     = cpu_has_64bit_gp_regs ?
-                                        daddiu_op : addiu_op;
-               mi.i_format.rs         = rs;
-               mi.i_format.rt         = rt;
-               mi.i_format.simmediate = offset;
-       }
-       emit_instruction(mi);
-}
-
-static inline void build_addiu_a2_a0(unsigned long offset)
-{
-       build_addiu_rt_rs(6, 4, offset);        /* $a2, $a0, offset */
-}
-
-static inline void build_addiu_a2(unsigned long offset)
-{
-       build_addiu_rt_rs(6, 6, offset);        /* $a2, $a2, offset */
-}
-
-static inline void build_addiu_a1(unsigned long offset)
-{
-       build_addiu_rt_rs(5, 5, offset);        /* $a1, $a1, offset */
-
-       load_offset -= offset;
-}
-
-static inline void build_addiu_a0(unsigned long offset)
-{
-       build_addiu_rt_rs(4, 4, offset);        /* $a0, $a0, offset */
-
-       store_offset -= offset;
-}
-
-static inline void build_bne(unsigned int *dest)
-{
-       union mips_instruction mi;
-
-       mi.i_format.opcode = bne_op;
-       mi.i_format.rs     = 6;                 /* $a2 */
-       mi.i_format.rt     = 4;                 /* $a0 */
-       mi.i_format.simmediate = dest - epc - 1;
-
-       *epc++ = mi.word;
-       flush_delay_slot_or_nop();
-}
-
-static inline void build_jr_ra(void)
-{
-       union mips_instruction mi;
-
-       mi.r_format.opcode = spec_op;
-       mi.r_format.rs     = 31;
-       mi.r_format.rt     = 0;
-       mi.r_format.rd     = 0;
-       mi.r_format.re     = 0;
-       mi.r_format.func   = jr_op;
-
-       *epc++ = mi.word;
-       flush_delay_slot_or_nop();
-}
-
-void __cpuinit build_clear_page(void)
-{
-       unsigned int loop_start;
-       unsigned long off;
-       int i;
-
-       epc = (unsigned int *) &clear_page_array;
-       instruction_pending = 0;
-       store_offset = 0;
-
-       if (cpu_has_prefetch) {
-               switch (current_cpu_type()) {
-               case CPU_TX49XX:
-                       /* TX49 supports only Pref_Load */
-                       pref_offset_clear = 0;
-                       pref_offset_copy = 0;
-                       break;
-
-               case CPU_RM9000:
-                       /*
-                        * As a workaround for erratum G105 which make the
-                        * PrepareForStore hint unusable we fall back to
-                        * StoreRetained on the RM9000.  Once it is known which
-                        * versions of the RM9000 we'll be able to condition-
-                        * alize this.
-                        */
-
-               case CPU_R10000:
-               case CPU_R12000:
-               case CPU_R14000:
-                       pref_src_mode = Pref_LoadStreamed;
-                       pref_dst_mode = Pref_StoreStreamed;
-                       break;
-
-               default:
-                       pref_src_mode = Pref_LoadStreamed;
-                       pref_dst_mode = Pref_PrepareForStore;
-                       break;
-               }
-       }
-
-        off = PAGE_SIZE - (cpu_has_prefetch ? pref_offset_clear : 0);
-       if (off > 0x7fff) {
-               build_addiu_a2_a0(off >> 1);
-               build_addiu_a2(off >> 1);
-       } else
-               build_addiu_a2_a0(off);
-
-       if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
-               build_insn_word(0x3c01a000);    /* lui     $at, 0xa000  */
-
-dest = label();
-       do {
-               build_store_reg(0);
-               build_store_reg(0);
-               build_store_reg(0);
-               build_store_reg(0);
-       } while (store_offset < half_scache_line_size());
-       build_addiu_a0(2 * store_offset);
-       loop_start = store_offset;
-       do {
-               build_store_reg(0);
-               build_store_reg(0);
-               build_store_reg(0);
-               build_store_reg(0);
-       } while ((store_offset - loop_start) < half_scache_line_size());
-       build_bne(dest);
-
-       if (cpu_has_prefetch && pref_offset_clear) {
-               build_addiu_a2_a0(pref_offset_clear);
-       dest = label();
-               loop_start = store_offset;
-               do {
-                       __build_store_reg(0);
-                       __build_store_reg(0);
-                       __build_store_reg(0);
-                       __build_store_reg(0);
-               } while ((store_offset - loop_start) < half_scache_line_size());
-               build_addiu_a0(2 * store_offset);
-               loop_start = store_offset;
-               do {
-                       __build_store_reg(0);
-                       __build_store_reg(0);
-                       __build_store_reg(0);
-                       __build_store_reg(0);
-               } while ((store_offset - loop_start) < half_scache_line_size());
-               build_bne(dest);
-       }
-
-       build_jr_ra();
-
-       BUG_ON(epc > clear_page_array + ARRAY_SIZE(clear_page_array));
-
-       pr_info("Synthesized clear page handler (%u instructions).\n",
-               (unsigned int)(epc - clear_page_array));
-
-       pr_debug("\t.set push\n");
-       pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (epc - clear_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", clear_page_array[i]);
-       pr_debug("\t.set pop\n");
-}
-
-void __cpuinit build_copy_page(void)
-{
-       unsigned int loop_start;
-       unsigned long off;
-       int i;
-
-       epc = (unsigned int *) &copy_page_array;
-       store_offset = load_offset = 0;
-       instruction_pending = 0;
-
-       off = PAGE_SIZE - (cpu_has_prefetch ? pref_offset_copy : 0);
-       if (off > 0x7fff) {
-               build_addiu_a2_a0(off >> 1);
-               build_addiu_a2(off >> 1);
-       } else
-               build_addiu_a2_a0(off);
-
-       if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
-               build_insn_word(0x3c01a000);    /* lui     $at, 0xa000  */
-
-dest = label();
-       loop_start = store_offset;
-       do {
-               build_load_reg( 8);
-               build_load_reg( 9);
-               build_load_reg(10);
-               build_load_reg(11);
-               build_store_reg( 8);
-               build_store_reg( 9);
-               build_store_reg(10);
-               build_store_reg(11);
-       } while ((store_offset - loop_start) < half_scache_line_size());
-       build_addiu_a0(2 * store_offset);
-       build_addiu_a1(2 * load_offset);
-       loop_start = store_offset;
-       do {
-               build_load_reg( 8);
-               build_load_reg( 9);
-               build_load_reg(10);
-               build_load_reg(11);
-               build_store_reg( 8);
-               build_store_reg( 9);
-               build_store_reg(10);
-               build_store_reg(11);
-       } while ((store_offset - loop_start) < half_scache_line_size());
-       build_bne(dest);
-
-       if (cpu_has_prefetch && pref_offset_copy) {
-               build_addiu_a2_a0(pref_offset_copy);
-       dest = label();
-               loop_start = store_offset;
-               do {
-                       __build_load_reg( 8);
-                       __build_load_reg( 9);
-                       __build_load_reg(10);
-                       __build_load_reg(11);
-                       __build_store_reg( 8);
-                       __build_store_reg( 9);
-                       __build_store_reg(10);
-                       __build_store_reg(11);
-               } while ((store_offset - loop_start) < half_scache_line_size());
-               build_addiu_a0(2 * store_offset);
-               build_addiu_a1(2 * load_offset);
-               loop_start = store_offset;
-               do {
-                       __build_load_reg( 8);
-                       __build_load_reg( 9);
-                       __build_load_reg(10);
-                       __build_load_reg(11);
-                       __build_store_reg( 8);
-                       __build_store_reg( 9);
-                       __build_store_reg(10);
-                       __build_store_reg(11);
-               } while ((store_offset - loop_start) < half_scache_line_size());
-               build_bne(dest);
-       }
-
-       build_jr_ra();
-
-       BUG_ON(epc > copy_page_array + ARRAY_SIZE(copy_page_array));
-
-       pr_info("Synthesized copy page handler (%u instructions).\n",
-               (unsigned int)(epc - copy_page_array));
-
-       pr_debug("\t.set push\n");
-       pr_debug("\t.set noreorder\n");
-       for (i = 0; i < (epc - copy_page_array); i++)
-               pr_debug("\t.word 0x%08x\n", copy_page_array[i]);
-       pr_debug("\t.set pop\n");
-}
diff --git a/arch/mips/mm/pg-sb1.c b/arch/mips/mm/pg-sb1.c
deleted file mode 100644 (file)
index 49e289d..0000000
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
- * Copyright (C) 1997, 2001 Ralf Baechle (ralf@gnu.org)
- * Copyright (C) 2000 SiByte, Inc.
- * Copyright (C) 2005 Thiemo Seufer
- *
- * Written by Justin Carlson of SiByte, Inc.
- *         and Kip Walker of Broadcom Corp.
- *
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
- */
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <linux/smp.h>
-
-#include <asm/io.h>
-#include <asm/sibyte/sb1250.h>
-#include <asm/sibyte/sb1250_regs.h>
-#include <asm/sibyte/sb1250_dma.h>
-
-#ifdef CONFIG_SB1_PASS_1_WORKAROUNDS
-#define SB1_PREF_LOAD_STREAMED_HINT "0"
-#define SB1_PREF_STORE_STREAMED_HINT "1"
-#else
-#define SB1_PREF_LOAD_STREAMED_HINT "4"
-#define SB1_PREF_STORE_STREAMED_HINT "5"
-#endif
-
-static inline void clear_page_cpu(void *page)
-{
-       unsigned char *addr = (unsigned char *) page;
-       unsigned char *end = addr + PAGE_SIZE;
-
-       /*
-        * JDCXXX - This should be bottlenecked by the write buffer, but these
-        * things tend to be mildly unpredictable...should check this on the
-        * performance model
-        *
-        * We prefetch 4 lines ahead.  We're also "cheating" slightly here...
-        * since we know we're on an SB1, we force the assembler to take
-        * 64-bit operands to speed things up
-        */
-       __asm__ __volatile__(
-       "       .set    push            \n"
-       "       .set    mips4           \n"
-       "       .set    noreorder       \n"
-#ifdef CONFIG_CPU_HAS_PREFETCH
-       "       daddiu  %0, %0, 128     \n"
-       "       pref    " SB1_PREF_STORE_STREAMED_HINT ", -128(%0)  \n"
-                                            /* Prefetch the first 4 lines */
-       "       pref    " SB1_PREF_STORE_STREAMED_HINT ",  -96(%0)  \n"
-       "       pref    " SB1_PREF_STORE_STREAMED_HINT ",  -64(%0)  \n"
-       "       pref    " SB1_PREF_STORE_STREAMED_HINT ",  -32(%0)  \n"
-       "1:     sd      $0, -128(%0)    \n"  /* Throw out a cacheline of 0's */
-       "       sd      $0, -120(%0)    \n"
-       "       sd      $0, -112(%0)    \n"
-       "       sd      $0, -104(%0)    \n"
-       "       daddiu  %0, %0, 32      \n"
-       "       bnel    %0, %1, 1b      \n"
-       "        pref   " SB1_PREF_STORE_STREAMED_HINT ",  -32(%0)  \n"
-       "       daddiu  %0, %0, -128    \n"
-#endif
-       "       sd      $0, 0(%0)       \n"  /* Throw out a cacheline of 0's */
-       "1:     sd      $0, 8(%0)       \n"
-       "       sd      $0, 16(%0)      \n"
-       "       sd      $0, 24(%0)      \n"
-       "       daddiu  %0, %0, 32      \n"
-       "       bnel    %0, %1, 1b      \n"
-       "        sd     $0, 0(%0)       \n"
-       "       .set    pop             \n"
-       : "+r" (addr)
-       : "r" (end)
-       : "memory");
-}
-
-static inline void copy_page_cpu(void *to, void *from)
-{
-       unsigned char *src = (unsigned char *)from;
-       unsigned char *dst = (unsigned char *)to;
-       unsigned char *end = src + PAGE_SIZE;
-
-       /*
-        * The pref's used here are using "streaming" hints, which cause the
-        * copied data to be kicked out of the cache sooner.  A page copy often
-        * ends up copying a lot more data than is commonly used, so this seems
-        * to make sense in terms of reducing cache pollution, but I've no real
-        * performance data to back this up
-        */
-       __asm__ __volatile__(
-       "       .set    push            \n"
-       "       .set    mips4           \n"
-       "       .set    noreorder       \n"
-#ifdef CONFIG_CPU_HAS_PREFETCH
-       "       daddiu  %0, %0, 128     \n"
-       "       daddiu  %1, %1, 128     \n"
-       "       pref    " SB1_PREF_LOAD_STREAMED_HINT  ", -128(%0)\n"
-                                            /* Prefetch the first 4 lines */
-       "       pref    " SB1_PREF_STORE_STREAMED_HINT ", -128(%1)\n"
-       "       pref    " SB1_PREF_LOAD_STREAMED_HINT  ",  -96(%0)\n"
-       "       pref    " SB1_PREF_STORE_STREAMED_HINT ",  -96(%1)\n"
-       "       pref    " SB1_PREF_LOAD_STREAMED_HINT  ",  -64(%0)\n"
-       "       pref    " SB1_PREF_STORE_STREAMED_HINT ",  -64(%1)\n"
-       "       pref    " SB1_PREF_LOAD_STREAMED_HINT  ",  -32(%0)\n"
-       "1:     pref    " SB1_PREF_STORE_STREAMED_HINT ",  -32(%1)\n"
-# ifdef CONFIG_64BIT
-       "       ld      $8, -128(%0)    \n"  /* Block copy a cacheline */
-       "       ld      $9, -120(%0)    \n"
-       "       ld      $10, -112(%0)   \n"
-       "       ld      $11, -104(%0)   \n"
-       "       sd      $8, -128(%1)    \n"
-       "       sd      $9, -120(%1)    \n"
-       "       sd      $10, -112(%1)   \n"
-       "       sd      $11, -104(%1)   \n"
-# else
-       "       lw      $2, -128(%0)    \n"  /* Block copy a cacheline */
-       "       lw      $3, -124(%0)    \n"
-       "       lw      $6, -120(%0)    \n"
-       "       lw      $7, -116(%0)    \n"
-       "       lw      $8, -112(%0)    \n"
-       "       lw      $9, -108(%0)    \n"
-       "       lw      $10, -104(%0)   \n"
-       "       lw      $11, -100(%0)   \n"
-       "       sw      $2, -128(%1)    \n"
-       "       sw      $3, -124(%1)    \n"
-       "       sw      $6, -120(%1)    \n"
-       "       sw      $7, -116(%1)    \n"
-       "       sw      $8, -112(%1)    \n"
-       "       sw      $9, -108(%1)    \n"
-       "       sw      $10, -104(%1)   \n"
-       "       sw      $11, -100(%1)   \n"
-# endif
-       "       daddiu  %0, %0, 32      \n"
-       "       daddiu  %1, %1, 32      \n"
-       "       bnel    %0, %2, 1b      \n"
-       "        pref   " SB1_PREF_LOAD_STREAMED_HINT  ",  -32(%0)\n"
-       "       daddiu  %0, %0, -128    \n"
-       "       daddiu  %1, %1, -128    \n"
-#endif
-#ifdef CONFIG_64BIT
-       "       ld      $8, 0(%0)       \n"  /* Block copy a cacheline */
-       "1:     ld      $9, 8(%0)       \n"
-       "       ld      $10, 16(%0)     \n"
-       "       ld      $11, 24(%0)     \n"
-       "       sd      $8, 0(%1)       \n"
-       "       sd      $9, 8(%1)       \n"
-       "       sd      $10, 16(%1)     \n"
-       "       sd      $11, 24(%1)     \n"
-#else
-       "       lw      $2, 0(%0)       \n"  /* Block copy a cacheline */
-       "1:     lw      $3, 4(%0)       \n"
-       "       lw      $6, 8(%0)       \n"
-       "       lw      $7, 12(%0)      \n"
-       "       lw      $8, 16(%0)      \n"
-       "       lw      $9, 20(%0)      \n"
-       "       lw      $10, 24(%0)     \n"
-       "       lw      $11, 28(%0)     \n"
-       "       sw      $2, 0(%1)       \n"
-       "       sw      $3, 4(%1)       \n"
-       "       sw      $6, 8(%1)       \n"
-       "       sw      $7, 12(%1)      \n"
-       "       sw      $8, 16(%1)      \n"
-       "       sw      $9, 20(%1)      \n"
-       "       sw      $10, 24(%1)     \n"
-       "       sw      $11, 28(%1)     \n"
-#endif
-       "       daddiu  %0, %0, 32      \n"
-       "       daddiu  %1, %1, 32      \n"
-       "       bnel    %0, %2, 1b      \n"
-#ifdef CONFIG_64BIT
-       "        ld     $8, 0(%0)       \n"
-#else
-       "        lw     $2, 0(%0)       \n"
-#endif
-       "       .set    pop             \n"
-       : "+r" (src), "+r" (dst)
-       : "r" (end)
-#ifdef CONFIG_64BIT
-       : "$8", "$9", "$10", "$11", "memory");
-#else
-       : "$2", "$3", "$6", "$7", "$8", "$9", "$10", "$11", "memory");
-#endif
-}
-
-
-#ifdef CONFIG_SIBYTE_DMA_PAGEOPS
-
-/*
- * Pad descriptors to cacheline, since each is exclusively owned by a
- * particular CPU.
- */
-typedef struct dmadscr_s {
-       u64 dscr_a;
-       u64 dscr_b;
-       u64 pad_a;
-       u64 pad_b;
-} dmadscr_t;
-
-static dmadscr_t page_descr[DM_NUM_CHANNELS]
-       __attribute__((aligned(SMP_CACHE_BYTES)));
-
-void sb1_dma_init(void)
-{
-       int i;
-
-       for (i = 0; i < DM_NUM_CHANNELS; i++) {
-               const u64 base_val = CPHYSADDR((unsigned long)&page_descr[i]) |
-                                    V_DM_DSCR_BASE_RINGSZ(1);
-               void *base_reg = IOADDR(A_DM_REGISTER(i, R_DM_DSCR_BASE));
-
-               __raw_writeq(base_val, base_reg);
-               __raw_writeq(base_val | M_DM_DSCR_BASE_RESET, base_reg);
-               __raw_writeq(base_val | M_DM_DSCR_BASE_ENABL, base_reg);
-       }
-}
-
-void clear_page(void *page)
-{
-       u64 to_phys = CPHYSADDR((unsigned long)page);
-       unsigned int cpu = smp_processor_id();
-
-       /* if the page is not in KSEG0, use old way */
-       if ((long)KSEGX((unsigned long)page) != (long)CKSEG0)
-               return clear_page_cpu(page);
-
-       page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_ZERO_MEM |
-                                M_DM_DSCRA_L2C_DEST | M_DM_DSCRA_INTERRUPT;
-       page_descr[cpu].dscr_b = V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
-       __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
-
-       /*
-        * Don't really want to do it this way, but there's no
-        * reliable way to delay completion detection.
-        */
-       while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
-                & M_DM_DSCR_BASE_INTERRUPT))
-               ;
-       __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
-}
-
-void copy_page(void *to, void *from)
-{
-       u64 from_phys = CPHYSADDR((unsigned long)from);
-       u64 to_phys = CPHYSADDR((unsigned long)to);
-       unsigned int cpu = smp_processor_id();
-
-       /* if any page is not in KSEG0, use old way */
-       if ((long)KSEGX((unsigned long)to) != (long)CKSEG0
-           || (long)KSEGX((unsigned long)from) != (long)CKSEG0)
-               return copy_page_cpu(to, from);
-
-       page_descr[cpu].dscr_a = to_phys | M_DM_DSCRA_L2C_DEST |
-                                M_DM_DSCRA_INTERRUPT;
-       page_descr[cpu].dscr_b = from_phys | V_DM_DSCRB_SRC_LENGTH(PAGE_SIZE);
-       __raw_writeq(1, IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_COUNT)));
-
-       /*
-        * Don't really want to do it this way, but there's no
-        * reliable way to delay completion detection.
-        */
-       while (!(__raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE_DEBUG)))
-                & M_DM_DSCR_BASE_INTERRUPT))
-               ;
-       __raw_readq(IOADDR(A_DM_REGISTER(cpu, R_DM_DSCR_BASE)));
-}
-
-#else /* !CONFIG_SIBYTE_DMA_PAGEOPS */
-
-void clear_page(void *page)
-{
-       return clear_page_cpu(page);
-}
-
-void copy_page(void *to, void *from)
-{
-       return copy_page_cpu(to, from);
-}
-
-#endif /* !CONFIG_SIBYTE_DMA_PAGEOPS */
-
-EXPORT_SYMBOL(clear_page);
-EXPORT_SYMBOL(copy_page);
-
-void __cpuinit build_clear_page(void)
-{
-}
-
-void __cpuinit build_copy_page(void)
-{
-}
index 57df1c38e3031a65ae1ace1a5e524476f55d6fae..7dfa579ab24c61761db8446c11fcb63e2ab6c5b8 100644 (file)
@@ -12,7 +12,6 @@ void show_mem(void)
 
        printk("Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        pfn = max_mapnr;
        while (pfn-- > 0) {
                if (!pfn_valid(pfn))
index 63065d6e806397202e93628db4dc0142e1ce642a..5ce2fa74562644d49adccb673d0d9dd950b96d96 100644 (file)
@@ -299,7 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
        idx = read_c0_index();
        ptep = pte_offset_map(pmdp, address);
 
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
        write_c0_entrylo0(ptep->pte_high);
        ptep++;
        write_c0_entrylo1(ptep->pte_high);
index 1a6f7704cc894276dda1b6ce6dcc3d6edb225382..1655aa69e133699f9eb0ce6c95255d4888dcbc65 100644 (file)
@@ -58,13 +58,13 @@ enum opcode {
        insn_invalid,
        insn_addu, insn_addiu, insn_and, insn_andi, insn_beq,
        insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
-       insn_bne, insn_daddu, insn_daddiu, insn_dmfc0, insn_dmtc0,
-       insn_dsll, insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32,
-       insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr, insn_ld,
-       insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0, insn_mtc0,
-       insn_ori, insn_rfe, insn_sc, insn_scd, insn_sd, insn_sll,
-       insn_sra, insn_srl, insn_subu, insn_sw, insn_tlbp, insn_tlbwi,
-       insn_tlbwr, insn_xor, insn_xori
+       insn_bne, insn_cache, insn_daddu, insn_daddiu, insn_dmfc0,
+       insn_dmtc0, insn_dsll, insn_dsll32, insn_dsra, insn_dsrl,
+       insn_dsrl32, insn_dsubu, insn_eret, insn_j, insn_jal, insn_jr,
+       insn_ld, insn_ll, insn_lld, insn_lui, insn_lw, insn_mfc0,
+       insn_mtc0, insn_ori, insn_pref, insn_rfe, insn_sc, insn_scd,
+       insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw,
+       insn_tlbp, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori
 };
 
 struct insn {
@@ -94,6 +94,7 @@ static struct insn insn_table[] __cpuinitdata = {
        { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
        { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
        { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+       { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
        { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
@@ -116,6 +117,7 @@ static struct insn insn_table[] __cpuinitdata = {
        { insn_mfc0,  M(cop0_op, mfc_op, 0, 0, 0, 0),  RT | RD | SET},
        { insn_mtc0,  M(cop0_op, mtc_op, 0, 0, 0, 0),  RT | RD | SET},
        { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
+       { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
        { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
@@ -337,6 +339,7 @@ I_u1s2(_bgezl)
 I_u1s2(_bltz)
 I_u1s2(_bltzl)
 I_u1u2s3(_bne)
+I_u2s3u1(_cache)
 I_u1u2u3(_dmfc0)
 I_u1u2u3(_dmtc0)
 I_u2u1s3(_daddiu)
@@ -359,6 +362,7 @@ I_u2s3u1(_lw)
 I_u1u2u3(_mfc0)
 I_u1u2u3(_mtc0)
 I_u2u1u3(_ori)
+I_u2s3u1(_pref)
 I_0(_rfe)
 I_u2s3u1(_sc)
 I_u2s3u1(_scd)
@@ -554,6 +558,14 @@ uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
        uasm_i_beqzl(p, reg, 0);
 }
 
+void __cpuinit
+uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+       unsigned int reg2, int lid)
+{
+       uasm_r_mips_pc16(r, *p, lid);
+       uasm_i_bne(p, reg1, reg2, 0);
+}
+
 void __cpuinit
 uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid)
 {
index fe0574f6e77ddcec84847b6e4028540022982646..0d6a66f32030e0de7bf33cf63546a312c3ee38b5 100644 (file)
@@ -55,6 +55,7 @@ Ip_u1s2(_bgezl);
 Ip_u1s2(_bltz);
 Ip_u1s2(_bltzl);
 Ip_u1u2s3(_bne);
+Ip_u2s3u1(_cache);
 Ip_u1u2u3(_dmfc0);
 Ip_u1u2u3(_dmtc0);
 Ip_u2u1s3(_daddiu);
@@ -77,6 +78,7 @@ Ip_u2s3u1(_lw);
 Ip_u1u2u3(_mfc0);
 Ip_u1u2u3(_mtc0);
 Ip_u2u1u3(_ori);
+Ip_u2s3u1(_pref);
 Ip_0(_rfe);
 Ip_u2s3u1(_sc);
 Ip_u2s3u1(_scd);
@@ -177,6 +179,8 @@ void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
 void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid);
 void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
 void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
+void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1,
+                unsigned int reg2, int lid);
 void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
 void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
 void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid);
similarity index 98%
rename from arch/mips/philips/pnx8550/common/platform.c
rename to arch/mips/nxp/pnx8550/common/platform.c
index c839436bd012915aa787eadec4097143a5391f70..c7c763dbe58843c2bf2276ef2925f8bde54027ac 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Platform device support for Philips PNX8550 SoCs
+ * Platform device support for NXP PNX8550 SoCs
  *
  * Copyright 2005, Embedded Alley Solutions, Inc
  *
similarity index 51%
rename from arch/mips/philips/pnx8550/jbs/Makefile
rename to arch/mips/nxp/pnx8550/jbs/Makefile
index e8228dbca8f653ee30e7c2e1090a6b11e9c5ea79..ad6a8ca7d8ce1420bd23ff538f1314cadf9aaad7 100644 (file)
@@ -1,4 +1,4 @@
 
-# Makefile for the Philips JBS Board.
+# Makefile for the NXP JBS Board.
 
 lib-y := init.o board_setup.o irqmap.o
similarity index 98%
rename from arch/mips/philips/pnx8550/jbs/init.c
rename to arch/mips/nxp/pnx8550/jbs/init.c
index 90b4d35f3eceb2b4cde916d43f6a03e04f7b143c..d59b4a4e5e8b2065b88b36447b4dca8563855c61 100644 (file)
@@ -40,7 +40,7 @@ extern char *prom_getenv(char *envname);
 
 const char *get_system_type(void)
 {
-       return "Philips PNX8550/JBS";
+       return "NXP PNX8550/JBS";
 }
 
 void __init prom_init(void)
similarity index 98%
rename from arch/mips/philips/pnx8550/jbs/irqmap.c
rename to arch/mips/nxp/pnx8550/jbs/irqmap.c
index 98c3429e6e50327ce94d45c080c29c019eca604f..7fc89842002c39f33386fdc2a4a02310de666724 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Philips JBS board irqmap.
+ *  NXP JBS board irqmap.
  *
  *  Copyright 2005 Embedded Alley Solutions, Inc
  *  source@embeddealley.com
@@ -33,4 +33,3 @@ char pnx8550_irq_tab[][5] __initdata = {
        [9]     = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
        [17]    = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
 };
-
similarity index 52%
rename from arch/mips/philips/pnx8550/stb810/Makefile
rename to arch/mips/nxp/pnx8550/stb810/Makefile
index f14b592af398a4de93927e23c190419018ef71f6..ab91d72c5664556515b13f3e2cd0f5ecbc901e0d 100644 (file)
@@ -1,4 +1,4 @@
 
-# Makefile for the Philips STB810 Board.
+# Makefile for the NXP STB810 Board.
 
 lib-y := prom_init.o board_setup.o irqmap.o
similarity index 95%
rename from arch/mips/philips/pnx8550/stb810/board_setup.c
rename to arch/mips/nxp/pnx8550/stb810/board_setup.c
index 345d71e53cf2587cddf8e2cf828408ec875775e3..1282c27cfcb7fe1e2255f675fb32e8aa518657f1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  STB810 specific board startup routines.
  *
- *  Based on the arch/mips/philips/pnx8550/jbs/board_setup.c
+ *  Based on the arch/mips/nxp/pnx8550/jbs/board_setup.c
  *
  *  Author: MontaVista Software, Inc.
  *          source@mvista.com
similarity index 94%
rename from arch/mips/philips/pnx8550/stb810/irqmap.c
rename to arch/mips/nxp/pnx8550/stb810/irqmap.c
index 5ee11e19975ef6cb400524ff7cab3f5acdec2e4b..8c034963ddcd778ef800a7f5b43298cb0f20b8b5 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  Philips STB810 board irqmap.
+ *  NXP STB810 board irqmap.
  *
  *  Author: MontaVista Software, Inc.
  *          source@mvista.com
@@ -20,4 +20,3 @@ char pnx8550_irq_tab[][5] __initdata = {
        [9]     = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
        [10]    = { -1, PNX8550_INT_PCI_INTA, 0xff, 0xff, 0xff},
 };
-
similarity index 96%
rename from arch/mips/philips/pnx8550/stb810/prom_init.c
rename to arch/mips/nxp/pnx8550/stb810/prom_init.c
index 832dd60b0a7a80113a5cd824b3db8954b52ee0d8..ca7f4ada064079a3df6ff4c672621a0aa726c138 100644 (file)
@@ -28,7 +28,7 @@ extern char *prom_getenv(char *envname);
 
 const char *get_system_type(void)
 {
-       return "Philips PNX8550/STB810";
+       return "NXP PNX8950/STB810";
 }
 
 void __init prom_init(void)
index aa52aa146cea301e83ac6a2e65dd1970be651aba..b5f6f71b27bc8b837ed83b2ca4945d1bf1aab3ac 100644 (file)
@@ -80,6 +80,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
        case CPU_24K:
        case CPU_25KF:
        case CPU_34K:
+       case CPU_1004K:
        case CPU_74K:
        case CPU_SB1:
        case CPU_SB1A:
index fa6b4aae75233f764d1979f7b1f0e5fd9944545e..2bfc17c3010613d7a8a4ea87e92fac9112810675 100644 (file)
@@ -10,7 +10,6 @@
 #ifndef OP_IMPL_H
 #define OP_IMPL_H 1
 
-extern int null_perf_irq(void);
 extern int (*perf_irq)(void);
 
 /* Per-counter configuration as set via oprofilefs.  */
index ccbea229a0e6f0da88557840e414653bd486aad5..da8cbb6899dcde8cb439fd53766f0281dad78c1d 100644 (file)
 
 #define M_COUNTER_OVERFLOW             (1UL      << 31)
 
+static int (*save_perf_irq)(void);
+
 #ifdef CONFIG_MIPS_MT_SMP
-#define WHAT           (M_TC_EN_VPE | M_PERFCTL_VPEID(smp_processor_id()))
-#define vpe_id()       smp_processor_id()
+static int cpu_has_mipsmt_pertccounters;
+#define WHAT           (M_TC_EN_VPE | \
+                        M_PERFCTL_VPEID(cpu_data[smp_processor_id()].vpe_id))
+#define vpe_id()       (cpu_has_mipsmt_pertccounters ? \
+                       0 : cpu_data[smp_processor_id()].vpe_id)
 
 /*
  * The number of bits to shift to convert between counters per core and
@@ -243,11 +248,11 @@ static inline int __n_counters(void)
 {
        if (!(read_c0_config1() & M_CONFIG1_PC))
                return 0;
-       if (!(r_c0_perfctrl0() & M_PERFCTL_MORE))
+       if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
                return 1;
-       if (!(r_c0_perfctrl1() & M_PERFCTL_MORE))
+       if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
                return 2;
-       if (!(r_c0_perfctrl2() & M_PERFCTL_MORE))
+       if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
                return 3;
 
        return 4;
@@ -274,8 +279,9 @@ static inline int n_counters(void)
        return counters;
 }
 
-static inline void reset_counters(int counters)
+static void reset_counters(void *arg)
 {
+       int counters = (int)arg;
        switch (counters) {
        case 4:
                w_c0_perfctrl3(0);
@@ -302,9 +308,12 @@ static int __init mipsxx_init(void)
                return -ENODEV;
        }
 
-       reset_counters(counters);
-
-       counters = counters_total_to_per_cpu(counters);
+#ifdef CONFIG_MIPS_MT_SMP
+       cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
+       if (!cpu_has_mipsmt_pertccounters)
+               counters = counters_total_to_per_cpu(counters);
+#endif
+       on_each_cpu(reset_counters, (void *)counters, 0, 1);
 
        op_model_mipsxx_ops.num_counters = counters;
        switch (current_cpu_type()) {
@@ -320,6 +329,13 @@ static int __init mipsxx_init(void)
                op_model_mipsxx_ops.cpu_type = "mips/25K";
                break;
 
+       case CPU_1004K:
+#if 0
+               /* FIXME: report as 34K for now */
+               op_model_mipsxx_ops.cpu_type = "mips/1004K";
+               break;
+#endif
+
        case CPU_34K:
                op_model_mipsxx_ops.cpu_type = "mips/34K";
                break;
@@ -355,6 +371,7 @@ static int __init mipsxx_init(void)
                return -ENODEV;
        }
 
+       save_perf_irq = perf_irq;
        perf_irq = mipsxx_perfcount_handler;
 
        return 0;
@@ -365,9 +382,9 @@ static void mipsxx_exit(void)
        int counters = op_model_mipsxx_ops.num_counters;
 
        counters = counters_per_cpu_to_total(counters);
-       reset_counters(counters);
+       on_each_cpu(reset_counters, (void *)counters, 0, 1);
 
-       perf_irq = null_perf_irq;
+       perf_irq = save_perf_irq;
 }
 
 struct op_mips_model op_model_mipsxx_ops = {
index ca0276c8070aeeb94f738d59a3edddcae4bd6d68..00c36c9dbe0e9ccd3ddc2429d22fe5e69701ed40 100644 (file)
  *  with this program; if not, write  to the Free Software Foundation, Inc.,
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
-#include <linux/types.h>
+
 #include <linux/pci.h>
-#include <linux/kernel.h>
 #include <linux/init.h>
 
-#include <asm/mach-au1x00/au1000.h>
-
 extern char irq_tab_alchemy[][5];
 
 int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
index d6106465249830907fdff82239f25dc388b9ba8e..0e160d9f07c39ae5aafd5df33dd605b6365118f7 100644 (file)
@@ -90,14 +90,14 @@ config_access(unsigned int pci_cmd, struct pci_bus *bus, unsigned int devfn, int
 
                loops--;
                if (loops == 0) {
-                       printk("%s : Arbiter Locked.\n", __FUNCTION__);
+                       printk("%s : Arbiter Locked.\n", __func__);
                }
        }
 
        clear_status();
        if ((pci_cmd == PCI_CMD_IOR) || (pci_cmd == PCI_CMD_IOW)) {
                printk("%s timeout (GPPM_CTRL=%X) ioaddr %lX pci_cmd %X\n",
-                      __FUNCTION__, inl(PCI_BASE | PCI_GPPM_CTRL), ioaddr,
+                      __func__, inl(PCI_BASE | PCI_GPPM_CTRL), ioaddr,
                       pci_cmd);
        }
 
index 855977ca51cd3d5e05fd09bec366a40ce687a89b..6537d90a25bbcbd1cc0430a41e2e0d6b46950616 100644 (file)
@@ -143,9 +143,6 @@ void __init plat_time_init(void)
 mips_hpt_frequency = 33000000 * 3 * 5;
 }
 
-/* No other usable initialization hook than this ...  */
-extern void (*late_time_init)(void);
-
 unsigned long ocd_base;
 
 EXPORT_SYMBOL(ocd_base);
index 624bbdbff2a8bf40d47ae412d5361b43369b9b36..b6cab089561e117cc4a4fc785cdd10d5b0f736bf 100644 (file)
@@ -142,7 +142,7 @@ static irqreturn_t ip32_rtc_int(int irq, void *dev_id)
        reg_c = CMOS_READ(RTC_INTR_FLAGS);
        if (!(reg_c & RTC_IRQF)) {
                printk(KERN_WARNING
-                       "%s: RTC IRQ without RTC_IRQF\n", __FUNCTION__);
+                       "%s: RTC IRQ without RTC_IRQF\n", __func__);
        }
        /* Wait until interrupt goes away */
        disable_irq(MACEISA_RTC_IRQ);
index 3f808b6292429248dbfd6be1055fcf493e6a9d83..6d31f2a98abf3a068388a1fecf3496adc7d399e1 100644 (file)
@@ -173,7 +173,7 @@ static const u32 toshiba_rbtx4927_irq_debug_flag =
         { \
            char tmp[100]; \
            sprintf( tmp, str ); \
-           printk( "%s(%s:%u)::%s", __FUNCTION__, __FILE__, __LINE__, tmp ); \
+           printk( "%s(%s:%u)::%s", __func__, __FILE__, __LINE__, tmp ); \
         }
 #else
 #define TOSHIBA_RBTX4927_IRQ_DPRINTK(flag, str...)
index e466e5e711d8919c69b49f2033b60e9407a202f4..2203c77b2ce25e21f383dbb46447e9478753bbbe 100644 (file)
@@ -93,7 +93,7 @@ static const u32 toshiba_rbtx4927_setup_debug_flag =
         { \
            char tmp[100]; \
            sprintf( tmp, str ); \
-           printk( "%s(%s:%u)::%s", __FUNCTION__, __FILE__, __LINE__, tmp ); \
+           printk( "%s(%s:%u)::%s", __func__, __FILE__, __LINE__, tmp ); \
         }
 #else
 #define TOSHIBA_RBTX4927_SETUP_DPRINTK(flag, str...)
index bea59ff1842afab13f242eea7b264c1f8311b501..33b9c672a322a8d055b58cd7d6e0eba2c0d1aa9c 100644 (file)
@@ -31,9 +31,7 @@
  * Support for TX4938 in 2.6 - Hiroshi DOYU <Hiroshi_DOYU@montavista.co.jp>
  */
 
-#include <asm/mipsregs.h>
-#include <asm/system.h>
-#include <asm/tx4938/tx4938_mips.h>
+#include <linux/types>
 
 extern u8 txx9_sio_kdbg_rd(void);
 extern int txx9_sio_kdbg_wr( u8 ch );
index 3189a65f7d7e7e8320f244386eec4357e04c117e..20baeaeba4cd0f85f156abe717a6bd45aea3de94 100644 (file)
  */
 
 #include <linux/init.h>
-#include <linux/mm.h>
-#include <linux/sched.h>
-#include <linux/bootmem.h>
-
-#include <asm/addrspace.h>
-#include <asm/bootinfo.h>
-#include <asm/tx4938/tx4938.h>
+#include <linux/types.h>
+#include <linux/io.h>
 
 static unsigned int __init
 tx4938_process_sdccr(u64 * addr)
@@ -35,7 +30,7 @@ tx4938_process_sdccr(u64 * addr)
        unsigned int bc = 4;
        unsigned int msize = 0;
 
-       val = (*((vu64 *) (addr)));
+       val = ____raw_readq((void __iomem *)addr);
 
        /* MVMCP -- need #defs for these bits masks */
        sdccr_ce = ((val & (1 << 10)) >> 10);
index f00185017e80bea947267998b1ecdf15579e1e27..4d6a8dc46c76eb963e7c5a0df6acd0d882976180 100644 (file)
@@ -67,24 +67,7 @@ IRQ  Device
 63 RBTX4938-IOC/07 SWINT
 */
 #include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/mm.h>
-#include <linux/swap.h>
-#include <linux/ioport.h>
-#include <linux/sched.h>
 #include <linux/interrupt.h>
-#include <linux/pci.h>
-#include <linux/timex.h>
-#include <asm/bootinfo.h>
-#include <asm/page.h>
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/processor.h>
-#include <asm/reboot.h>
-#include <asm/time.h>
-#include <asm/wbflush.h>
-#include <linux/bootmem.h>
 #include <asm/tx4938/rbtx4938.h>
 
 static void toshiba_rbtx4938_irq_ioc_enable(unsigned int irq);
@@ -99,21 +82,16 @@ static struct irq_chip toshiba_rbtx4938_irq_ioc_type = {
        .unmask = toshiba_rbtx4938_irq_ioc_enable,
 };
 
-#define TOSHIBA_RBTX4938_IOC_INTR_ENAB 0xb7f02000
-#define TOSHIBA_RBTX4938_IOC_INTR_STAT 0xb7f0200a
-
 int
 toshiba_rbtx4938_irq_nested(int sw_irq)
 {
        u8 level3;
 
-       level3 = reg_rd08(TOSHIBA_RBTX4938_IOC_INTR_STAT) & 0xff;
-       if (level3) {
+       level3 = readb(rbtx4938_imstat_addr);
+       if (level3)
                /* must use fls so onboard ATA has priority */
                sw_irq = TOSHIBA_RBTX4938_IRQ_IOC_BEG + fls(level3) - 1;
-       }
 
-       wbflush();
        return sw_irq;
 }
 
@@ -144,25 +122,23 @@ toshiba_rbtx4938_irq_ioc_init(void)
 static void
 toshiba_rbtx4938_irq_ioc_enable(unsigned int irq)
 {
-       volatile unsigned char v;
+       unsigned char v;
 
-       v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
+       v = readb(rbtx4938_imask_addr);
        v |= (1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
-       TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
+       writeb(v, rbtx4938_imask_addr);
        mmiowb();
-       TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
 }
 
 static void
 toshiba_rbtx4938_irq_ioc_disable(unsigned int irq)
 {
-       volatile unsigned char v;
+       unsigned char v;
 
-       v = TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
+       v = readb(rbtx4938_imask_addr);
        v &= ~(1 << (irq - TOSHIBA_RBTX4938_IRQ_IOC_BEG));
-       TX4938_WR08(TOSHIBA_RBTX4938_IOC_INTR_ENAB, v);
+       writeb(v, rbtx4938_imask_addr);
        mmiowb();
-       TX4938_RD08(TOSHIBA_RBTX4938_IOC_INTR_ENAB);
 }
 
 void __init arch_init_irq(void)
@@ -174,14 +150,12 @@ void __init arch_init_irq(void)
        /* all IRC interrupt mode are Low Active. */
 
        /* mask all IOC interrupts */
-       *rbtx4938_imask_ptr = 0;
+       writeb(0, rbtx4938_imask_addr);
 
        /* clear SoftInt interrupts */
-       *rbtx4938_softint_ptr = 0;
+       writeb(0, rbtx4938_softint_addr);
        tx4938_irq_init();
        toshiba_rbtx4938_irq_ioc_init();
        /* Onboard 10M Ether: High Active */
        set_irq_type(RBTX4938_IRQ_ETHER, IRQF_TRIGGER_HIGH);
-
-       wbflush();
 }
index 61249f049cd6d3c1c348d9ea7c149eb3b1d4ce7a..3a3659e8633aa642a79ac3988127632b9cf38ff3 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
+#include <linux/gpio.h>
 
-#include <asm/wbflush.h>
 #include <asm/reboot.h>
 #include <asm/time.h>
 #include <asm/txx9tmr.h>
@@ -34,7 +34,7 @@
 #endif
 #include <linux/spi/spi.h>
 #include <asm/tx4938/spi.h>
-#include <asm/gpio.h>
+#include <asm/txx9pio.h>
 
 extern char * __init prom_getcmdline(void);
 static inline void tx4938_report_pcic_status1(struct tx4938_pcic_reg *pcicptr);
@@ -90,12 +90,11 @@ void rbtx4938_machine_restart(char *command)
        local_irq_disable();
 
        printk("Rebooting...");
-       *rbtx4938_softresetlock_ptr = 1;
-       *rbtx4938_sfvol_ptr = 1;
-       *rbtx4938_softreset_ptr = 1;
-       wbflush();
-
-       while(1);
+       writeb(1, rbtx4938_softresetlock_addr);
+       writeb(1, rbtx4938_sfvol_addr);
+       writeb(1, rbtx4938_softreset_addr);
+       while(1)
+               ;
 }
 
 void __init
@@ -487,7 +486,7 @@ static int __init tx4938_pcibios_init(void)
        }
 
        /* Reset PCI Bus */
-       *rbtx4938_pcireset_ptr = 0;
+       writeb(0, rbtx4938_pcireset_addr);
        /* Reset PCIC */
        tx4938_ccfgptr->clkctr |= TX4938_CLKCTR_PCIRST;
        if (txboard_pci66_mode > 0)
@@ -495,8 +494,8 @@ static int __init tx4938_pcibios_init(void)
        mdelay(10);
        /* clear PCIC reset */
        tx4938_ccfgptr->clkctr &= ~TX4938_CLKCTR_PCIRST;
-       *rbtx4938_pcireset_ptr = 1;
-       wbflush();
+       writeb(1, rbtx4938_pcireset_addr);
+       mmiowb();
        tx4938_report_pcic_status1(tx4938_pcicptr);
 
        tx4938_report_pciclk();
@@ -504,15 +503,15 @@ static int __init tx4938_pcibios_init(void)
        if (txboard_pci66_mode == 0 &&
            txboard_pci66_check(&tx4938_pci_controller[0], 0, 0)) {
                /* Reset PCI Bus */
-               *rbtx4938_pcireset_ptr = 0;
+               writeb(0, rbtx4938_pcireset_addr);
                /* Reset PCIC */
                tx4938_ccfgptr->clkctr |= TX4938_CLKCTR_PCIRST;
                tx4938_pciclk66_setup();
                mdelay(10);
                /* clear PCIC reset */
                tx4938_ccfgptr->clkctr &= ~TX4938_CLKCTR_PCIRST;
-               *rbtx4938_pcireset_ptr = 1;
-               wbflush();
+               writeb(1, rbtx4938_pcireset_addr);
+               mmiowb();
                /* Reinitialize PCIC */
                tx4938_report_pciclk();
                tx4938_pcic_setup(tx4938_pcicptr, &tx4938_pci_controller[0], io_base[0], extarb);
@@ -615,9 +614,6 @@ static void __init rbtx4938_spi_setup(void)
 {
        /* set SPI_SEL */
        tx4938_ccfgptr->pcfg |= TX4938_PCFG_SPI_SEL;
-       /* chip selects for SPI devices */
-       tx4938_pioptr->dout |= (1 << SEEPROM1_CS);
-       tx4938_pioptr->dir |= (1 << SEEPROM1_CS);
 }
 
 static struct resource rbtx4938_fpga_resource;
@@ -776,12 +772,13 @@ void __init tx4938_board_setup(void)
                txx9_tmr_init(TX4938_TMR_REG(i) & 0xfffffffffULL);
 
        /* enable DMA */
-       TX4938_WR64(0xff1fb150, TX4938_DMA_MCR_MSTEN);
-       TX4938_WR64(0xff1fb950, TX4938_DMA_MCR_MSTEN);
+       for (i = 0; i < 2; i++)
+               ____raw_writeq(TX4938_DMA_MCR_MSTEN,
+                              (void __iomem *)(TX4938_DMA_REG(i) + 0x50));
 
        /* PIO */
-       tx4938_pioptr->maskcpu = 0;
-       tx4938_pioptr->maskext = 0;
+       __raw_writel(0, &tx4938_pioptr->maskcpu);
+       __raw_writel(0, &tx4938_pioptr->maskext);
 
        /* TX4938 internal registers */
        if (request_resource(&iomem_resource, &tx4938_reg_resource))
@@ -863,10 +860,6 @@ void __init plat_mem_setup(void)
        if (txx9_master_clock == 0)
                txx9_master_clock = 25000000; /* 25MHz */
        tx4938_board_setup();
-       /* setup serial stuff */
-       TX4938_WR(0xff1ff314, 0x00000000);      /* h/w flow control off */
-       TX4938_WR(0xff1ff414, 0x00000000);      /* h/w flow control off */
-
 #ifndef CONFIG_PCI
        set_io_port_base(RBTX4938_ETHER_BASE);
 #endif
@@ -932,16 +925,16 @@ void __init plat_mem_setup(void)
        pcfg = tx4938_ccfgptr->pcfg;    /* updated */
        /* fixup piosel */
        if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
-           TX4938_PCFG_ATA_SEL) {
-               *rbtx4938_piosel_ptr = (*rbtx4938_piosel_ptr & 0x03) | 0x04;
-       }
+           TX4938_PCFG_ATA_SEL)
+               writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x04,
+                      rbtx4938_piosel_addr);
        else if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) ==
-           TX4938_PCFG_NDF_SEL) {
-               *rbtx4938_piosel_ptr = (*rbtx4938_piosel_ptr & 0x03) | 0x08;
-       }
-       else {
-               *rbtx4938_piosel_ptr &= ~(0x08 | 0x04);
-       }
+                TX4938_PCFG_NDF_SEL)
+               writeb((readb(rbtx4938_piosel_addr) & 0x03) | 0x08,
+                      rbtx4938_piosel_addr);
+       else
+               writeb(readb(rbtx4938_piosel_addr) & ~(0x08 | 0x04),
+                      rbtx4938_piosel_addr);
 
        rbtx4938_fpga_resource.name = "FPGA Registers";
        rbtx4938_fpga_resource.start = CPHYSADDR(RBTX4938_FPGA_REG_ADDR);
@@ -950,17 +943,14 @@ void __init plat_mem_setup(void)
        if (request_resource(&iomem_resource, &rbtx4938_fpga_resource))
                printk("request resource for fpga failed\n");
 
-       /* disable all OnBoard I/O interrupts */
-       *rbtx4938_imask_ptr = 0;
-
        _machine_restart = rbtx4938_machine_restart;
        _machine_halt = rbtx4938_machine_halt;
        pm_power_off = rbtx4938_machine_power_off;
 
-       *rbtx4938_led_ptr = 0xff;
-       printk("RBTX4938 --- FPGA(Rev %02x)", *rbtx4938_fpga_rev_ptr);
-       printk(" DIPSW:%02x,%02x\n",
-              *rbtx4938_dipsw_ptr, *rbtx4938_bdipsw_ptr);
+       writeb(0xff, rbtx4938_led_addr);
+       printk(KERN_INFO "RBTX4938 --- FPGA(Rev %02x) DIPSW:%02x,%02x\n",
+              readb(rbtx4938_fpga_rev_addr),
+              readb(rbtx4938_dipsw_addr), readb(rbtx4938_bdipsw_addr));
 }
 
 static int __init rbtx4938_ne_init(void)
@@ -984,106 +974,48 @@ device_initcall(rbtx4938_ne_init);
 
 /* GPIO support */
 
-static DEFINE_SPINLOCK(rbtx4938_spi_gpio_lock);
-
-static void rbtx4938_spi_gpio_set(unsigned gpio, int value)
+int gpio_to_irq(unsigned gpio)
 {
-       u8 val;
-       unsigned long flags;
-       gpio -= 16;
-       spin_lock_irqsave(&rbtx4938_spi_gpio_lock, flags);
-       val = *rbtx4938_spics_ptr;
-       if (value)
-               val |= 1 << gpio;
-       else
-               val &= ~(1 << gpio);
-       *rbtx4938_spics_ptr = val;
-       mmiowb();
-       spin_unlock_irqrestore(&rbtx4938_spi_gpio_lock, flags);
+       return -EINVAL;
 }
 
-static int rbtx4938_spi_gpio_dir_out(unsigned gpio, int value)
+int irq_to_gpio(unsigned irq)
 {
-       rbtx4938_spi_gpio_set(gpio, value);
-       return 0;
+       return -EINVAL;
 }
 
-static DEFINE_SPINLOCK(tx4938_gpio_lock);
-
-static int tx4938_gpio_get(unsigned gpio)
-{
-       return tx4938_pioptr->din & (1 << gpio);
-}
+static DEFINE_SPINLOCK(rbtx4938_spi_gpio_lock);
 
-static void tx4938_gpio_set_raw(unsigned gpio, int value)
+static void rbtx4938_spi_gpio_set(struct gpio_chip *chip, unsigned int offset,
+                                 int value)
 {
-       u32 val;
-       val = tx4938_pioptr->dout;
+       u8 val;
+       unsigned long flags;
+       spin_lock_irqsave(&rbtx4938_spi_gpio_lock, flags);
+       val = readb(rbtx4938_spics_addr);
        if (value)
-               val |= 1 << gpio;
+               val |= 1 << offset;
        else
-               val &= ~(1 << gpio);
-       tx4938_pioptr->dout = val;
-}
-
-static void tx4938_gpio_set(unsigned gpio, int value)
-{
-       unsigned long flags;
-       spin_lock_irqsave(&tx4938_gpio_lock, flags);
-       tx4938_gpio_set_raw(gpio, value);
-       mmiowb();
-       spin_unlock_irqrestore(&tx4938_gpio_lock, flags);
-}
-
-static int tx4938_gpio_dir_in(unsigned gpio)
-{
-       spin_lock_irq(&tx4938_gpio_lock);
-       tx4938_pioptr->dir &= ~(1 << gpio);
+               val &= ~(1 << offset);
+       writeb(val, rbtx4938_spics_addr);
        mmiowb();
-       spin_unlock_irq(&tx4938_gpio_lock);
-       return 0;
-}
-
-static int tx4938_gpio_dir_out(unsigned int gpio, int value)
-{
-       spin_lock_irq(&tx4938_gpio_lock);
-       tx4938_gpio_set_raw(gpio, value);
-       tx4938_pioptr->dir |= 1 << gpio;
-       mmiowb();
-       spin_unlock_irq(&tx4938_gpio_lock);
-       return 0;
-}
-
-int gpio_direction_input(unsigned gpio)
-{
-       if (gpio < 16)
-               return tx4938_gpio_dir_in(gpio);
-       return -EINVAL;
-}
-
-int gpio_direction_output(unsigned gpio, int value)
-{
-       if (gpio < 16)
-               return tx4938_gpio_dir_out(gpio, value);
-       if (gpio < 16 + 3)
-               return rbtx4938_spi_gpio_dir_out(gpio, value);
-       return -EINVAL;
+       spin_unlock_irqrestore(&rbtx4938_spi_gpio_lock, flags);
 }
 
-int gpio_get_value(unsigned gpio)
+static int rbtx4938_spi_gpio_dir_out(struct gpio_chip *chip,
+                                    unsigned int offset, int value)
 {
-       if (gpio < 16)
-               return tx4938_gpio_get(gpio);
+       rbtx4938_spi_gpio_set(chip, offset, value);
        return 0;
 }
 
-void gpio_set_value(unsigned gpio, int value)
-{
-       if (gpio < 16)
-               tx4938_gpio_set(gpio, value);
-       else
-               rbtx4938_spi_gpio_set(gpio, value);
-}
+static struct gpio_chip rbtx4938_spi_gpio_chip = {
+       .set = rbtx4938_spi_gpio_set,
+       .direction_output = rbtx4938_spi_gpio_dir_out,
+       .label = "RBTX4938-SPICS",
+       .base = 16,
+       .ngpio = 3,
+};
 
 /* SPI support */
 
@@ -1094,7 +1026,6 @@ static void __init txx9_spi_init(unsigned long base, int irq)
                        .start  = base,
                        .end    = base + 0x20 - 1,
                        .flags  = IORESOURCE_MEM,
-                       .parent = &tx4938_reg_resource,
                }, {
                        .start  = irq,
                        .flags  = IORESOURCE_IRQ,
@@ -1118,10 +1049,25 @@ static int __init rbtx4938_spi_init(void)
        spi_eeprom_register(SEEPROM1_CS);
        spi_eeprom_register(16 + SEEPROM2_CS);
        spi_eeprom_register(16 + SEEPROM3_CS);
+       gpio_request(16 + SRTC_CS, "rtc-rs5c348");
+       gpio_direction_output(16 + SRTC_CS, 0);
+       gpio_request(SEEPROM1_CS, "seeprom1");
+       gpio_direction_output(SEEPROM1_CS, 1);
+       gpio_request(16 + SEEPROM2_CS, "seeprom2");
+       gpio_direction_output(16 + SEEPROM2_CS, 1);
+       gpio_request(16 + SEEPROM3_CS, "seeprom3");
+       gpio_direction_output(16 + SEEPROM3_CS, 1);
        txx9_spi_init(TX4938_SPI_REG & 0xfffffffffULL, RBTX4938_IRQ_IRC_SPI);
        return 0;
 }
-arch_initcall(rbtx4938_spi_init);
+
+static int __init rbtx4938_arch_init(void)
+{
+       txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, 16);
+       gpiochip_add(&rbtx4938_spi_gpio_chip);
+       return rbtx4938_spi_init();
+}
+arch_initcall(rbtx4938_arch_init);
 
 /* Watchdog support */
 
@@ -1131,7 +1077,6 @@ static int __init txx9_wdt_init(unsigned long base)
                .start  = base,
                .end    = base + 0x100 - 1,
                .flags  = IORESOURCE_MEM,
-               .parent = &tx4938_reg_resource,
        };
        struct platform_device *dev =
                platform_device_register_simple("txx9wdt", -1, &res, 1);
index 76d4b5ed3fc0127d9814af755769df7dc86fce9f..c64995342ba82f12922267674f46936fbf2b9571 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  init.c, Common initialization routines for NEC VR4100 series.
  *
- *  Copyright (C) 2003-2005  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *  Copyright (C) 2003-2008  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -53,6 +53,8 @@ void __init plat_time_init(void)
 void __init plat_mem_setup(void)
 {
        iomem_resource_init();
+
+       vr41xx_siu_setup();
 }
 
 void __init prom_init(void)
index b735f45b25f0f1144aaf64d1b96be60476c4c543..654dee6208be2258ce85ef608c0cc7a639696ad4 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  NEC VR4100 series SIU platform device.
  *
- *  Copyright (C) 2007  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *  Copyright (C) 2007-2008  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -118,3 +118,37 @@ err_free_device:
        return retval;
 }
 device_initcall(vr41xx_siu_add);
+
+void __init vr41xx_siu_setup(void)
+{
+       struct uart_port port;
+       struct resource *res;
+       unsigned int *type;
+       int i;
+
+       switch (current_cpu_type()) {
+       case CPU_VR4111:
+       case CPU_VR4121:
+               type = siu_type1_ports;
+               res = siu_type1_resource;
+               break;
+       case CPU_VR4122:
+       case CPU_VR4131:
+       case CPU_VR4133:
+               type = siu_type2_ports;
+               res = siu_type2_resource;
+               break;
+       default:
+               return;
+       }
+
+       for (i = 0; i < SIU_PORTS_MAX; i++) {
+               port.line = i;
+               port.type = type[i];
+               if (port.type == PORT_UNKNOWN)
+                       break;
+               port.mapbase = res[i].start;
+               port.membase = (unsigned char __iomem *)KSEG1ADDR(res[i].start);
+               vr41xx_siu_early_setup(&port);
+       }
+}
index ee2d9f8af5ad53f3d76b49e0ff88b2e80d364e45..2646fcbd7d89d7c09ee72ce7997c808760e9dcdf 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/sched.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
+#include <linux/kbuild.h>
 #include <asm/ucontext.h>
 #include <asm/processor.h>
 #include <asm/thread_info.h>
 #include "sigframe.h"
 #include "mn10300-serial.h"
 
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->")
-
-#define OFFSET(sym, str, mem) \
-       DEFINE(sym, offsetof(struct str, mem));
-
 void foo(void)
 {
        OFFSET(SIGCONTEXT_d0, sigcontext, d0);
index dbceae4307dacffd940c035c204a8be972439bf7..c1a8d8f941fdb7d6b11c67007adeb38357ec63e2 100644 (file)
@@ -16,8 +16,8 @@
  */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index eaa79bc14d9463fcce66b5acc96014d15f9433f4..3efc0b73e4ff9ce71efd7bf93550af12f1710394 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/thread_info.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
 
 #include <asm/pgtable.h>
 #include <asm/ptrace.h>
 #include <asm/pdc.h>
 #include <asm/uaccess.h>
 
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 #ifdef CONFIG_64BIT
 #define FRAME_SIZE     128
 #else
index 9448d4e91142a5e62f56afa639de5539d5d28173..ccd61b9567a6e8f837426961ec1c4de231c517f2 100644 (file)
@@ -397,10 +397,9 @@ pcxl_dma_init(void)
                        "pcxl_dma_init: Unable to create gsc /proc dir entry\n");
        else {
                struct proc_dir_entry* ent;
-               ent = create_proc_entry("pcxl_dma", 0, proc_gsc_root);
-               if (ent)
-                       ent->proc_fops = &proc_pcxl_dma_ops;
-               else
+               ent = proc_create("pcxl_dma", 0, proc_gsc_root,
+                                 &proc_pcxl_dma_ops);
+               if (!ent)
                        printk(KERN_WARNING
                                "pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
        }
index f4a811690ab30e1dc4aa9b077c657a8b5ec01faf..9abed07db7fc0ded2b7e041c5e73f65e1911b211 100644 (file)
@@ -438,8 +438,8 @@ void ioport_unmap(void __iomem *addr)
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index eb80f5e33d7dad11b2cb11b9e8a2b9136c0863dc..1f012843150fbd83b801c11b8a6a58ddcaa099b5 100644 (file)
@@ -603,15 +603,18 @@ void show_mem(void)
 #ifdef CONFIG_DISCONTIGMEM
        {
                struct zonelist *zl;
-               int i, j, k;
+               int i, j;
 
                for (i = 0; i < npmem_ranges; i++) {
+                       zl = node_zonelist(i);
                        for (j = 0; j < MAX_NR_ZONES; j++) {
-                               zl = NODE_DATA(i)->node_zonelists + j;
+                               struct zoneref *z;
+                               struct zone *zone;
 
                                printk("Zone list for zone %d on node %d: ", j, i);
-                               for (k = 0; zl->zones[k] != NULL; k++) 
-                                       printk("[%d/%s] ", zone_to_nid(zl->zones[k]), zl->zones[k]->name);
+                               for_each_zone_zonelist(zone, z, zl, j)
+                                       printk("[%d/%s] ", zone_to_nid(zone),
+                                                               zone->name);
                                printk("\n");
                        }
                }
index 20f45a8b87e3e56c7e12615610f3706497e26003..3934e2659407b2769aaf81fba5c08a3873ed7fe0 100644 (file)
@@ -608,6 +608,19 @@ source "drivers/pcmcia/Kconfig"
 
 source "drivers/pci/hotplug/Kconfig"
 
+config HAS_RAPIDIO
+       bool
+       default n
+
+config RAPIDIO
+       bool "RapidIO support"
+       depends on HAS_RAPIDIO
+       help
+         If you say Y here, the kernel will include drivers and
+         infrastructure code to support RapidIO interconnect devices.
+
+source "drivers/rapidio/Kconfig"
+
 endmenu
 
 menu "Advanced setup"
@@ -803,3 +816,4 @@ config PPC_CLOCK
 config PPC_LIB_RHEAP
        bool
 
+source "arch/powerpc/kvm/Kconfig"
index a86d8d853214a37c6e807c3e5515c7cf21ef36c6..a7d24e692bab12e2ae18db1048718dfdd27647b5 100644 (file)
@@ -118,7 +118,6 @@ config XMON_DISASSEMBLY
 
 config IRQSTACKS
        bool "Use separate kernel stacks when processing interrupts"
-       depends on PPC64
        help
          If you say Y here the kernel will use separate kernel stacks
          for handling hard and soft interrupts.  This can help avoid
@@ -151,6 +150,9 @@ config BOOTX_TEXT
 
 config PPC_EARLY_DEBUG
        bool "Early debugging (dangerous)"
+       # PPC_EARLY_DEBUG on 440 leaves AS=1 mappings above the TLB high water
+       # mark, which doesn't work with current 440 KVM.
+       depends on !KVM
        help
          Say Y to enable some early debugging facilities that may be available
          for your processor/board combination. Those facilities are hacks
index e2ec4a91ccefee1b68b22a4b2c834ebc578983f7..9dcdc036cdf7ff13a6e09ad02d61b01c888e7d6f 100644 (file)
@@ -145,6 +145,7 @@ core-y                              += arch/powerpc/kernel/ \
                                   arch/powerpc/platforms/
 core-$(CONFIG_MATH_EMULATION)  += arch/powerpc/math-emu/
 core-$(CONFIG_XMON)            += arch/powerpc/xmon/
+core-$(CONFIG_KVM)             += arch/powerpc/kvm/
 
 drivers-$(CONFIG_OPROFILE)     += arch/powerpc/oprofile/
 
index c6ca6319e4f7f2a2adeb8b98066d38fdf4f0edac..2f74cc4e093e16564548179cd035b1df304112a9 100644 (file)
  * option) any later version.
  */
 
-/*
- * WARNING: Do not depend on this tree layout remaining static just yet.
- * The MPC5200 device tree conventions are still in flux
- * Keep an eye on the linuxppc-dev mailing list for more details
- */
+/dts-v1/;
 
 / {
        model = "schindler,cm5200";
                PowerPC,5200@0 {
                        device_type = "cpu";
                        reg = <0>;
-                       d-cache-line-size = <20>;
-                       i-cache-line-size = <20>;
-                       d-cache-size = <4000>;          // L1, 16K
-                       i-cache-size = <4000>;          // L1, 16K
+                       d-cache-line-size = <32>;
+                       i-cache-line-size = <32>;
+                       d-cache-size = <0x4000>;                // L1, 16K
+                       i-cache-size = <0x4000>;                // L1, 16K
                        timebase-frequency = <0>;       // from bootloader
                        bus-frequency = <0>;            // from bootloader
                        clock-frequency = <0>;          // from bootloader
 
        memory {
                device_type = "memory";
-               reg = <00000000 04000000>;      // 64MB
+               reg = <0x00000000 0x04000000>;  // 64MB
        };
 
        soc5200@f0000000 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "fsl,mpc5200b-immr";
-               ranges = <0 f0000000 0000c000>;
-               reg = <f0000000 00000100>;
+               ranges = <0 0xf0000000 0x0000c000>;
+               reg = <0xf0000000 0x00000100>;
                bus-frequency = <0>;            // from bootloader
                system-frequency = <0>;         // from bootloader
 
                cdm@200 {
                        compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-                       reg = <200 38>;
+                       reg = <0x200 0x38>;
                };
 
-               mpc5200_pic: pic@500 {
+               mpc5200_pic: interrupt-controller@500 {
                        // 5200 interrupts are encoded into two levels;
                        interrupt-controller;
                        #interrupt-cells = <3>;
                        compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-                       reg = <500 80>;
+                       reg = <0x500 0x80>;
                };
 
                timer@600 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <600 10>;
+                       reg = <0x600 0x10>;
                        interrupts = <1 9 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl,has-wdt;
 
                timer@610 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <610 10>;
-                       interrupts = <1 a 0>;
+                       reg = <0x610 0x10>;
+                       interrupts = <1 10 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@620 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <620 10>;
-                       interrupts = <1 b 0>;
+                       reg = <0x620 0x10>;
+                       interrupts = <1 11 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@630 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <630 10>;
-                       interrupts = <1 c 0>;
+                       reg = <0x630 0x10>;
+                       interrupts = <1 12 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@640 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <640 10>;
-                       interrupts = <1 d 0>;
+                       reg = <0x640 0x10>;
+                       interrupts = <1 13 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@650 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <650 10>;
-                       interrupts = <1 e 0>;
+                       reg = <0x650 0x10>;
+                       interrupts = <1 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@660 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <660 10>;
-                       interrupts = <1 f 0>;
+                       reg = <0x660 0x10>;
+                       interrupts = <1 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@670 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <670 10>;
-                       interrupts = <1 10 0>;
+                       reg = <0x670 0x10>;
+                       interrupts = <1 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                rtc@800 {       // Real time clock
                        compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-                       reg = <800 100>;
+                       reg = <0x800 0x100>;
                        interrupts = <1 5 0 1 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                gpio@b00 {
                        compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-                       reg = <b00 40>;
+                       reg = <0xb00 0x40>;
                        interrupts = <1 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                gpio@c00 {
                        compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-                       reg = <c00 40>;
+                       reg = <0xc00 0x40>;
                        interrupts = <1 8 0 0 3 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                spi@f00 {
                        compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-                       reg = <f00 20>;
-                       interrupts = <2 d 0 2 e 0>;
+                       reg = <0xf00 0x20>;
+                       interrupts = <2 13 0 2 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                usb@1000 {
                        compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-                       reg = <1000 ff>;
+                       reg = <0x1000 0xff>;
                        interrupts = <2 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                dma-controller@1200 {
                        compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-                       reg = <1200 80>;
+                       reg = <0x1200 0x80>;
                        interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
                                      3 4 0  3 5 0  3 6 0  3 7 0
-                                     3 8 0  3 9 0  3 a 0  3 b 0
-                                     3 c 0  3 d 0  3 e 0  3 f 0>;
+                                     3 8 0  3 9 0  3 10 0  3 11 0
+                                     3 12 0  3 13 0  3 14 0  3 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                xlb@1f00 {
                        compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-                       reg = <1f00 100>;
+                       reg = <0x1f00 0x100>;
                };
 
                serial@2000 {           // PSC1
                        device_type = "serial";
                        compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
                        port-number = <0>;  // Logical port assignment
-                       reg = <2000 100>;
+                       reg = <0x2000 0x100>;
                        interrupts = <2 1 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        device_type = "serial";
                        compatible = "fsl,mpc5200-psc-uart";
                        port-number = <1>;  // Logical port assignment
-                       reg = <2200 100>;
+                       reg = <0x2200 0x100>;
                        interrupts = <2 2 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        device_type = "serial";
                        compatible = "fsl,mpc5200-psc-uart";
                        port-number = <2>;  // Logical port assignment
-                       reg = <2400 100>;
+                       reg = <0x2400 0x100>;
                        interrupts = <2 3 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        device_type = "serial";
                        compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
                        port-number = <5>;  // Logical port assignment
-                       reg = <2c00 100>;
+                       reg = <0x2c00 0x100>;
                        interrupts = <2 4 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                ethernet@3000 {
                        device_type = "network";
                        compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-                       reg = <3000 400>;
+                       reg = <0x3000 0x400>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
                        interrupts = <2 5 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-                       reg = <3000 400>;       // fec range, since we need to setup fec interrupts
+                       reg = <0x3000 0x400>;       // fec range, since we need to setup fec interrupts
                        interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
                        interrupt-parent = <&mpc5200_pic>;
 
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-                       reg = <3d40 40>;
-                       interrupts = <2 10 0>;
+                       reg = <0x3d40 0x40>;
+                       interrupts = <2 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl5200-clocking;
                };
 
                sram@8000 {
                        compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-                       reg = <8000 4000>;
+                       reg = <0x8000 0x4000>;
                };
        };
 
                compatible = "fsl,lpb";
                #address-cells = <2>;
                #size-cells = <1>;
-               ranges = <0 0 fc000000 2000000>;
+               ranges = <0 0 0xfc000000 0x2000000>;
 
                // 16-bit flash device at LocalPlus Bus CS0
                flash@0,0 {
                        compatible = "cfi-flash";
-                       reg = <0 0 2000000>;
+                       reg = <0 0 0x2000000>;
                        bank-width = <2>;
                        device-width = <2>;
                        #size-cells = <1>;
index 09b4e16154d6021938c0f74b2e69d370d45f39e2..2cf9a8768f4482218fe71eb1c2f368aeb3f9220f 100644 (file)
@@ -10,6 +10,8 @@
  * option) any later version.
  */
 
+/dts-v1/;
+
 / {
        model = "fsl,lite5200";
        compatible = "fsl,lite5200";
                PowerPC,5200@0 {
                        device_type = "cpu";
                        reg = <0>;
-                       d-cache-line-size = <20>;
-                       i-cache-line-size = <20>;
-                       d-cache-size = <4000>;          // L1, 16K
-                       i-cache-size = <4000>;          // L1, 16K
+                       d-cache-line-size = <32>;
+                       i-cache-line-size = <32>;
+                       d-cache-size = <0x4000>;        // L1, 16K
+                       i-cache-size = <0x4000>;        // L1, 16K
                        timebase-frequency = <0>;       // from bootloader
                        bus-frequency = <0>;            // from bootloader
                        clock-frequency = <0>;          // from bootloader
 
        memory {
                device_type = "memory";
-               reg = <00000000 04000000>;      // 64MB
+               reg = <0x00000000 0x04000000>;  // 64MB
        };
 
        soc5200@f0000000 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "fsl,mpc5200-immr";
-               ranges = <0 f0000000 0000c000>;
-               reg = <f0000000 00000100>;
+               ranges = <0 0xf0000000 0x0000c000>;
+               reg = <0xf0000000 0x00000100>;
                bus-frequency = <0>;            // from bootloader
                system-frequency = <0>;         // from bootloader
 
                cdm@200 {
                        compatible = "fsl,mpc5200-cdm";
-                       reg = <200 38>;
+                       reg = <0x200 0x38>;
                };
 
                mpc5200_pic: interrupt-controller@500 {
                        #interrupt-cells = <3>;
                        device_type = "interrupt-controller";
                        compatible = "fsl,mpc5200-pic";
-                       reg = <500 80>;
+                       reg = <0x500 0x80>;
                };
 
                timer@600 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <0>;
-                       reg = <600 10>;
+                       reg = <0x600 0x10>;
                        interrupts = <1 9 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl,has-wdt;
                timer@610 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <1>;
-                       reg = <610 10>;
-                       interrupts = <1 a 0>;
+                       reg = <0x610 0x10>;
+                       interrupts = <1 10 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@620 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <2>;
-                       reg = <620 10>;
-                       interrupts = <1 b 0>;
+                       reg = <0x620 0x10>;
+                       interrupts = <1 11 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@630 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <3>;
-                       reg = <630 10>;
-                       interrupts = <1 c 0>;
+                       reg = <0x630 0x10>;
+                       interrupts = <1 12 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@640 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <4>;
-                       reg = <640 10>;
-                       interrupts = <1 d 0>;
+                       reg = <0x640 0x10>;
+                       interrupts = <1 13 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@650 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <5>;
-                       reg = <650 10>;
-                       interrupts = <1 e 0>;
+                       reg = <0x650 0x10>;
+                       interrupts = <1 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@660 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <6>;
-                       reg = <660 10>;
-                       interrupts = <1 f 0>;
+                       reg = <0x660 0x10>;
+                       interrupts = <1 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@670 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
                        cell-index = <7>;
-                       reg = <670 10>;
-                       interrupts = <1 10 0>;
+                       reg = <0x670 0x10>;
+                       interrupts = <1 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                rtc@800 {       // Real time clock
                        compatible = "fsl,mpc5200-rtc";
                        device_type = "rtc";
-                       reg = <800 100>;
+                       reg = <0x800 0x100>;
                        interrupts = <1 5 0 1 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                can@900 {
                        compatible = "fsl,mpc5200-mscan";
                        cell-index = <0>;
-                       interrupts = <2 11 0>;
+                       interrupts = <2 17 0>;
                        interrupt-parent = <&mpc5200_pic>;
-                       reg = <900 80>;
+                       reg = <0x900 0x80>;
                };
 
                can@980 {
                        compatible = "fsl,mpc5200-mscan";
                        cell-index = <1>;
-                       interrupts = <2 12 0>;
+                       interrupts = <2 18 0>;
                        interrupt-parent = <&mpc5200_pic>;
-                       reg = <980 80>;
+                       reg = <0x980 0x80>;
                };
 
                gpio@b00 {
                        compatible = "fsl,mpc5200-gpio";
-                       reg = <b00 40>;
+                       reg = <0xb00 0x40>;
                        interrupts = <1 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                gpio@c00 {
                        compatible = "fsl,mpc5200-gpio-wkup";
-                       reg = <c00 40>;
+                       reg = <0xc00 0x40>;
                        interrupts = <1 8 0 0 3 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                spi@f00 {
                        compatible = "fsl,mpc5200-spi";
-                       reg = <f00 20>;
-                       interrupts = <2 d 0 2 e 0>;
+                       reg = <0xf00 0x20>;
+                       interrupts = <2 13 0 2 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                usb@1000 {
                        compatible = "fsl,mpc5200-ohci","ohci-be";
-                       reg = <1000 ff>;
+                       reg = <0x1000 0xff>;
                        interrupts = <2 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                dma-controller@1200 {
                        device_type = "dma-controller";
                        compatible = "fsl,mpc5200-bestcomm";
-                       reg = <1200 80>;
+                       reg = <0x1200 0x80>;
                        interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
                                      3 4 0  3 5 0  3 6 0  3 7 0
-                                     3 8 0  3 9 0  3 a 0  3 b 0
-                                     3 c 0  3 d 0  3 e 0  3 f 0>;
+                                     3 8 0  3 9 0  3 10 0  3 11 0
+                                     3 12 0  3 13 0  3 14 0  3 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                xlb@1f00 {
                        compatible = "fsl,mpc5200-xlb";
-                       reg = <1f00 100>;
+                       reg = <0x1f00 0x100>;
                };
 
                serial@2000 {           // PSC1
                        compatible = "fsl,mpc5200-psc-uart";
                        port-number = <0>;  // Logical port assignment
                        cell-index = <0>;
-                       reg = <2000 100>;
+                       reg = <0x2000 0x100>;
                        interrupts = <2 1 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                //ac97@2200 {           // PSC2
                //      compatible = "fsl,mpc5200-psc-ac97";
                //      cell-index = <1>;
-               //      reg = <2200 100>;
+               //      reg = <0x2200 0x100>;
                //      interrupts = <2 2 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
                //i2s@2400 {            // PSC3
                //      compatible = "fsl,mpc5200-psc-i2s";
                //      cell-index = <2>;
-               //      reg = <2400 100>;
+               //      reg = <0x2400 0x100>;
                //      interrupts = <2 3 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
                //      device_type = "serial";
                //      compatible = "fsl,mpc5200-psc-uart";
                //      cell-index = <3>;
-               //      reg = <2600 100>;
-               //      interrupts = <2 b 0>;
+               //      reg = <0x2600 0x100>;
+               //      interrupts = <2 11 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
 
                //      device_type = "serial";
                //      compatible = "fsl,mpc5200-psc-uart";
                //      cell-index = <4>;
-               //      reg = <2800 100>;
-               //      interrupts = <2 c 0>;
+               //      reg = <0x2800 0x100>;
+               //      interrupts = <2 12 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
 
                //spi@2c00 {            // PSC6
                //      compatible = "fsl,mpc5200-psc-spi";
                //      cell-index = <5>;
-               //      reg = <2c00 100>;
+               //      reg = <0x2c00 0x100>;
                //      interrupts = <2 4 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
                ethernet@3000 {
                        device_type = "network";
                        compatible = "fsl,mpc5200-fec";
-                       reg = <3000 800>;
+                       reg = <0x3000 0x400>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
                        interrupts = <2 5 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200-mdio";
-                       reg = <3000 400>;       // fec range, since we need to setup fec interrupts
+                       reg = <0x3000 0x400>;   // fec range, since we need to setup fec interrupts
                        interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
                        interrupt-parent = <&mpc5200_pic>;
 
-                       phy0:ethernet-phy@1 {
+                       phy0: ethernet-phy@1 {
                                device_type = "ethernet-phy";
                                reg = <1>;
                        };
                ata@3a00 {
                        device_type = "ata";
                        compatible = "fsl,mpc5200-ata";
-                       reg = <3a00 100>;
+                       reg = <0x3a00 0x100>;
                        interrupts = <2 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200-i2c","fsl-i2c";
                        cell-index = <0>;
-                       reg = <3d00 40>;
-                       interrupts = <2 f 0>;
+                       reg = <0x3d00 0x40>;
+                       interrupts = <2 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl5200-clocking;
                };
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200-i2c","fsl-i2c";
                        cell-index = <1>;
-                       reg = <3d40 40>;
-                       interrupts = <2 10 0>;
+                       reg = <0x3d40 0x40>;
+                       interrupts = <2 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl5200-clocking;
                };
                sram@8000 {
                        compatible = "fsl,mpc5200-sram","sram";
-                       reg = <8000 4000>;
+                       reg = <0x8000 0x4000>;
                };
        };
 
                #address-cells = <3>;
                device_type = "pci";
                compatible = "fsl,mpc5200-pci";
-               reg = <f0000d00 100>;
-               interrupt-map-mask = <f800 0 0 7>;
-               interrupt-map = <c000 0 0 1 &mpc5200_pic 0 0 3
-                                c000 0 0 2 &mpc5200_pic 0 0 3
-                                c000 0 0 3 &mpc5200_pic 0 0 3
-                                c000 0 0 4 &mpc5200_pic 0 0 3>;
+               reg = <0xf0000d00 0x100>;
+               interrupt-map-mask = <0xf800 0 0 7>;
+               interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3
+                                0xc000 0 0 2 &mpc5200_pic 0 0 3
+                                0xc000 0 0 3 &mpc5200_pic 0 0 3
+                                0xc000 0 0 4 &mpc5200_pic 0 0 3>;
                clock-frequency = <0>; // From boot loader
-               interrupts = <2 8 0 2 9 0 2 a 0>;
+               interrupts = <2 8 0 2 9 0 2 10 0>;
                interrupt-parent = <&mpc5200_pic>;
                bus-range = <0 0>;
-               ranges = <42000000 0 80000000 80000000 0 20000000
-                         02000000 0 a0000000 a0000000 0 10000000
-                         01000000 0 00000000 b0000000 0 01000000>;
+               ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
+                         0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
+                         0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
        };
 };
index 2e9bc397ae9a1eb40dadbb7e7ab2559d7155b36b..7bd5b9c399b8fe2933c171cb7c7790b1f0cb54a3 100644 (file)
  * option) any later version.
  */
 
-/*
- * WARNING: Do not depend on this tree layout remaining static just yet.
- * The MPC5200 device tree conventions are still in flux
- * Keep an eye on the linuxppc-dev mailing list for more details
- */
+/dts-v1/;
 
 / {
        model = "fsl,lite5200b";
                PowerPC,5200@0 {
                        device_type = "cpu";
                        reg = <0>;
-                       d-cache-line-size = <20>;
-                       i-cache-line-size = <20>;
-                       d-cache-size = <4000>;          // L1, 16K
-                       i-cache-size = <4000>;          // L1, 16K
+                       d-cache-line-size = <32>;
+                       i-cache-line-size = <32>;
+                       d-cache-size = <0x4000>;        // L1, 16K
+                       i-cache-size = <0x4000>;        // L1, 16K
                        timebase-frequency = <0>;       // from bootloader
                        bus-frequency = <0>;            // from bootloader
                        clock-frequency = <0>;          // from bootloader
 
        memory {
                device_type = "memory";
-               reg = <00000000 10000000>;      // 256MB
+               reg = <0x00000000 0x10000000>;  // 256MB
        };
 
        soc5200@f0000000 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "fsl,mpc5200b-immr";
-               ranges = <0 f0000000 0000c000>;
-               reg = <f0000000 00000100>;
+               ranges = <0 0xf0000000 0x0000c000>;
+               reg = <0xf0000000 0x00000100>;
                bus-frequency = <0>;            // from bootloader
                system-frequency = <0>;         // from bootloader
 
                cdm@200 {
                        compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-                       reg = <200 38>;
+                       reg = <0x200 0x38>;
                };
 
                mpc5200_pic: interrupt-controller@500 {
                        #interrupt-cells = <3>;
                        device_type = "interrupt-controller";
                        compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-                       reg = <500 80>;
+                       reg = <0x500 0x80>;
                };
 
                timer@600 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <0>;
-                       reg = <600 10>;
+                       reg = <0x600 0x10>;
                        interrupts = <1 9 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl,has-wdt;
                timer@610 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <1>;
-                       reg = <610 10>;
-                       interrupts = <1 a 0>;
+                       reg = <0x610 0x10>;
+                       interrupts = <1 10 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@620 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <2>;
-                       reg = <620 10>;
-                       interrupts = <1 b 0>;
+                       reg = <0x620 0x10>;
+                       interrupts = <1 11 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@630 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <3>;
-                       reg = <630 10>;
-                       interrupts = <1 c 0>;
+                       reg = <0x630 0x10>;
+                       interrupts = <1 12 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@640 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <4>;
-                       reg = <640 10>;
-                       interrupts = <1 d 0>;
+                       reg = <0x640 0x10>;
+                       interrupts = <1 13 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@650 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <5>;
-                       reg = <650 10>;
-                       interrupts = <1 e 0>;
+                       reg = <0x650 0x10>;
+                       interrupts = <1 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@660 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <6>;
-                       reg = <660 10>;
-                       interrupts = <1 f 0>;
+                       reg = <0x660 0x10>;
+                       interrupts = <1 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@670 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
                        cell-index = <7>;
-                       reg = <670 10>;
-                       interrupts = <1 10 0>;
+                       reg = <0x670 0x10>;
+                       interrupts = <1 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                rtc@800 {       // Real time clock
                        compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
                        device_type = "rtc";
-                       reg = <800 100>;
+                       reg = <0x800 0x100>;
                        interrupts = <1 5 0 1 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                can@900 {
                        compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
                        cell-index = <0>;
-                       interrupts = <2 11 0>;
+                       interrupts = <2 17 0>;
                        interrupt-parent = <&mpc5200_pic>;
-                       reg = <900 80>;
+                       reg = <0x900 0x80>;
                };
 
                can@980 {
                        compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
                        cell-index = <1>;
-                       interrupts = <2 12 0>;
+                       interrupts = <2 18 0>;
                        interrupt-parent = <&mpc5200_pic>;
-                       reg = <980 80>;
+                       reg = <0x980 0x80>;
                };
 
                gpio@b00 {
                        compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-                       reg = <b00 40>;
+                       reg = <0xb00 0x40>;
                        interrupts = <1 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                gpio@c00 {
                        compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-                       reg = <c00 40>;
+                       reg = <0xc00 0x40>;
                        interrupts = <1 8 0 0 3 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                spi@f00 {
                        compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-                       reg = <f00 20>;
-                       interrupts = <2 d 0 2 e 0>;
+                       reg = <0xf00 0x20>;
+                       interrupts = <2 13 0 2 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                usb@1000 {
                        compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-                       reg = <1000 ff>;
+                       reg = <0x1000 0xff>;
                        interrupts = <2 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                dma-controller@1200 {
                        device_type = "dma-controller";
                        compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-                       reg = <1200 80>;
+                       reg = <0x1200 0x80>;
                        interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
                                      3 4 0  3 5 0  3 6 0  3 7 0
-                                     3 8 0  3 9 0  3 a 0  3 b 0
-                                     3 c 0  3 d 0  3 e 0  3 f 0>;
+                                     3 8 0  3 9 0  3 10 0  3 11 0
+                                     3 12 0  3 13 0  3 14 0  3 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                xlb@1f00 {
                        compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-                       reg = <1f00 100>;
+                       reg = <0x1f00 0x100>;
                };
 
                serial@2000 {           // PSC1
                        compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
                        port-number = <0>;  // Logical port assignment
                        cell-index = <0>;
-                       reg = <2000 100>;
+                       reg = <0x2000 0x100>;
                        interrupts = <2 1 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                //ac97@2200 {           // PSC2
                //      compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97";
                //      cell-index = <1>;
-               //      reg = <2200 100>;
+               //      reg = <0x2200 0x100>;
                //      interrupts = <2 2 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
                //i2s@2400 {            // PSC3
                //      compatible = "fsl,mpc5200b-psc-i2s"; //not 5200 compatible
                //      cell-index = <2>;
-               //      reg = <2400 100>;
+               //      reg = <0x2400 0x100>;
                //      interrupts = <2 3 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
                //      device_type = "serial";
                //      compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
                //      cell-index = <3>;
-               //      reg = <2600 100>;
-               //      interrupts = <2 b 0>;
+               //      reg = <0x2600 0x100>;
+               //      interrupts = <2 11 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
 
                //      device_type = "serial";
                //      compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
                //      cell-index = <4>;
-               //      reg = <2800 100>;
-               //      interrupts = <2 c 0>;
+               //      reg = <0x2800 0x100>;
+               //      interrupts = <2 12 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
 
                //spi@2c00 {            // PSC6
                //      compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi";
                //      cell-index = <5>;
-               //      reg = <2c00 100>;
+               //      reg = <0x2c00 0x100>;
                //      interrupts = <2 4 0>;
                //      interrupt-parent = <&mpc5200_pic>;
                //};
                ethernet@3000 {
                        device_type = "network";
                        compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-                       reg = <3000 400>;
+                       reg = <0x3000 0x400>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
                        interrupts = <2 5 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200b-mdio", "fsl,mpc5200-mdio";
-                       reg = <3000 400>;       // fec range, since we need to setup fec interrupts
+                       reg = <0x3000 0x400>;   // fec range, since we need to setup fec interrupts
                        interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
                        interrupt-parent = <&mpc5200_pic>;
 
-                       phy0:ethernet-phy@0 {
+                       phy0: ethernet-phy@0 {
                                device_type = "ethernet-phy";
                                reg = <0>;
                        };
                ata@3a00 {
                        device_type = "ata";
                        compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-                       reg = <3a00 100>;
+                       reg = <0x3a00 0x100>;
                        interrupts = <2 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
                        cell-index = <0>;
-                       reg = <3d00 40>;
-                       interrupts = <2 f 0>;
+                       reg = <0x3d00 0x40>;
+                       interrupts = <2 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl5200-clocking;
                };
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
                        cell-index = <1>;
-                       reg = <3d40 40>;
-                       interrupts = <2 10 0>;
+                       reg = <0x3d40 0x40>;
+                       interrupts = <2 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl5200-clocking;
                };
                sram@8000 {
                        compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram","sram";
-                       reg = <8000 4000>;
+                       reg = <0x8000 0x4000>;
                };
        };
 
                #address-cells = <3>;
                device_type = "pci";
                compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
-               reg = <f0000d00 100>;
-               interrupt-map-mask = <f800 0 0 7>;
-               interrupt-map = <c000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot
-                                c000 0 0 2 &mpc5200_pic 1 1 3
-                                c000 0 0 3 &mpc5200_pic 1 2 3
-                                c000 0 0 4 &mpc5200_pic 1 3 3
-
-                                c800 0 0 1 &mpc5200_pic 1 1 3 // 2nd slot
-                                c800 0 0 2 &mpc5200_pic 1 2 3
-                                c800 0 0 3 &mpc5200_pic 1 3 3
-                                c800 0 0 4 &mpc5200_pic 0 0 3>;
+               reg = <0xf0000d00 0x100>;
+               interrupt-map-mask = <0xf800 0 0 7>;
+               interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot
+                                0xc000 0 0 2 &mpc5200_pic 1 1 3
+                                0xc000 0 0 3 &mpc5200_pic 1 2 3
+                                0xc000 0 0 4 &mpc5200_pic 1 3 3
+
+                                0xc800 0 0 1 &mpc5200_pic 1 1 3 // 2nd slot
+                                0xc800 0 0 2 &mpc5200_pic 1 2 3
+                                0xc800 0 0 3 &mpc5200_pic 1 3 3
+                                0xc800 0 0 4 &mpc5200_pic 0 0 3>;
                clock-frequency = <0>; // From boot loader
-               interrupts = <2 8 0 2 9 0 2 a 0>;
+               interrupts = <2 8 0 2 9 0 2 10 0>;
                interrupt-parent = <&mpc5200_pic>;
                bus-range = <0 0>;
-               ranges = <42000000 0 80000000 80000000 0 20000000
-                         02000000 0 a0000000 a0000000 0 10000000
-                         01000000 0 00000000 b0000000 0 01000000>;
+               ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000
+                         0x02000000 0 0xa0000000 0xa0000000 0 0x10000000
+                         0x01000000 0 0x00000000 0xb0000000 0 0x01000000>;
        };
 };
index 2b0dde058f8e14d3e2b788e02f2a51d36464c90a..9e3c921be16473b79934384b6145c97b17a3e186 100644 (file)
@@ -10,6 +10,8 @@
  * option) any later version.
  */
 
+/dts-v1/;
+
 / {
        model = "promess,motionpro";
        compatible = "promess,motionpro";
                PowerPC,5200@0 {
                        device_type = "cpu";
                        reg = <0>;
-                       d-cache-line-size = <20>;
-                       i-cache-line-size = <20>;
-                       d-cache-size = <4000>;          // L1, 16K
-                       i-cache-size = <4000>;          // L1, 16K
+                       d-cache-line-size = <32>;
+                       i-cache-line-size = <32>;
+                       d-cache-size = <0x4000>;        // L1, 16K
+                       i-cache-size = <0x4000>;        // L1, 16K
                        timebase-frequency = <0>;       // from bootloader
                        bus-frequency = <0>;            // from bootloader
                        clock-frequency = <0>;          // from bootloader
 
        memory {
                device_type = "memory";
-               reg = <00000000 04000000>;      // 64MB
+               reg = <0x00000000 0x04000000>;  // 64MB
        };
 
        soc5200@f0000000 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "fsl,mpc5200b-immr";
-               ranges = <0 f0000000 0000c000>;
-               reg = <f0000000 00000100>;
+               ranges = <0 0xf0000000 0x0000c000>;
+               reg = <0xf0000000 0x00000100>;
                bus-frequency = <0>;            // from bootloader
                system-frequency = <0>;         // from bootloader
 
                cdm@200 {
                        compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
-                       reg = <200 38>;
+                       reg = <0x200 0x38>;
                };
 
                mpc5200_pic: interrupt-controller@500 {
                        interrupt-controller;
                        #interrupt-cells = <3>;
                        compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
-                       reg = <500 80>;
+                       reg = <0x500 0x80>;
                };
 
                timer@600 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <600 10>;
+                       reg = <0x600 0x10>;
                        interrupts = <1 9 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl,has-wdt;
 
                timer@610 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <610 10>;
-                       interrupts = <1 a 0>;
+                       reg = <0x610 0x10>;
+                       interrupts = <1 10 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@620 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <620 10>;
-                       interrupts = <1 b 0>;
+                       reg = <0x620 0x10>;
+                       interrupts = <1 11 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@630 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <630 10>;
-                       interrupts = <1 c 0>;
+                       reg = <0x630 0x10>;
+                       interrupts = <1 12 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@640 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <640 10>;
-                       interrupts = <1 d 0>;
+                       reg = <0x640 0x10>;
+                       interrupts = <1 13 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                timer@650 {     // General Purpose Timer
                        compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
-                       reg = <650 10>;
-                       interrupts = <1 e 0>;
+                       reg = <0x650 0x10>;
+                       interrupts = <1 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                motionpro-led@660 {     // Motion-PRO status LED
                        compatible = "promess,motionpro-led";
                        label = "motionpro-statusled";
-                       reg = <660 10>;
-                       interrupts = <1 f 0>;
+                       reg = <0x660 0x10>;
+                       interrupts = <1 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
-                       blink-delay = <64>; // 100 msec
+                       blink-delay = <100>; // 100 msec
                };
 
                motionpro-led@670 {     // Motion-PRO ready LED
                        compatible = "promess,motionpro-led";
                        label = "motionpro-readyled";
-                       reg = <670 10>;
-                       interrupts = <1 10 0>;
+                       reg = <0x670 0x10>;
+                       interrupts = <1 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                rtc@800 {       // Real time clock
                        compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
-                       reg = <800 100>;
+                       reg = <0x800 0x100>;
                        interrupts = <1 5 0 1 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
-               mscan@980 {
+               can@980 {
                        compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
-                       interrupts = <2 12 0>;
+                       interrupts = <2 18 0>;
                        interrupt-parent = <&mpc5200_pic>;
-                       reg = <980 80>;
+                       reg = <0x980 0x80>;
                };
 
                gpio@b00 {
                        compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
-                       reg = <b00 40>;
+                       reg = <0xb00 0x40>;
                        interrupts = <1 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                gpio@c00 {
                        compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
-                       reg = <c00 40>;
+                       reg = <0xc00 0x40>;
                        interrupts = <1 8 0 0 3 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                spi@f00 {
                        compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
-                       reg = <f00 20>;
-                       interrupts = <2 d 0 2 e 0>;
+                       reg = <0xf00 0x20>;
+                       interrupts = <2 13 0 2 14 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                usb@1000 {
                        compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
-                       reg = <1000 ff>;
+                       reg = <0x1000 0xff>;
                        interrupts = <2 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                dma-controller@1200 {
                        compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
-                       reg = <1200 80>;
+                       reg = <0x1200 0x80>;
                        interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
                                      3 4 0  3 5 0  3 6 0  3 7 0
-                                     3 8 0  3 9 0  3 a 0  3 b 0
-                                     3 c 0  3 d 0  3 e 0  3 f 0>;
+                                     3 8 0  3 9 0  3 10 0  3 11 0
+                                     3 12 0  3 13 0  3 14 0  3 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                xlb@1f00 {
                        compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
-                       reg = <1f00 100>;
+                       reg = <0x1f00 0x100>;
                };
 
                serial@2000 {           // PSC1
                        device_type = "serial";
                        compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
                        port-number = <0>;  // Logical port assignment
-                       reg = <2000 100>;
+                       reg = <0x2000 0x100>;
                        interrupts = <2 1 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                spi@2200 {              // PSC2
                        compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi";
                        cell-index = <1>;
-                       reg = <2200 100>;
+                       reg = <0x2200 0x100>;
                        interrupts = <2 2 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        device_type = "serial";
                        compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
                        port-number = <4>;  // Logical port assignment
-                       reg = <2800 100>;
-                       interrupts = <2 c 0>;
+                       reg = <0x2800 0x100>;
+                       interrupts = <2 12 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                ethernet@3000 {
                        device_type = "network";
                        compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
-                       reg = <3000 400>;
+                       reg = <0x3000 0x400>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
                        interrupts = <2 5 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-                       reg = <3000 400>;       // fec range, since we need to setup fec interrupts
+                       reg = <0x3000 0x400>;       // fec range, since we need to setup fec interrupts
                        interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
                        interrupt-parent = <&mpc5200_pic>;
 
 
                ata@3a00 {
                        compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
-                       reg = <3a00 100>;
+                       reg = <0x3a00 0x100>;
                        interrupts = <2 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
-                       reg = <3d40 40>;
-                       interrupts = <2 10 0>;
+                       reg = <0x3d40 0x40>;
+                       interrupts = <2 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl5200-clocking;
 
                        rtc@68 {
                                device_type = "rtc";
                                compatible = "dallas,ds1339";
-                               reg = <68>;
+                               reg = <0x68>;
                        };
                };
 
                sram@8000 {
                        compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram";
-                       reg = <8000 4000>;
+                       reg = <0x8000 0x4000>;
                };
        };
 
                compatible = "fsl,lpb";
                #address-cells = <2>;
                #size-cells = <1>;
-               ranges = <0 0 ff000000 01000000
-                         1 0 50000000 00010000
-                         2 0 50010000 00010000
-                         3 0 50020000 00010000>;
+               ranges = <0 0 0xff000000 0x01000000
+                         1 0 0x50000000 0x00010000
+                         2 0 0x50010000 0x00010000
+                         3 0 0x50020000 0x00010000>;
 
                // 8-bit DualPort SRAM on LocalPlus Bus CS1
                kollmorgen@1,0 {
                        compatible = "promess,motionpro-kollmorgen";
-                       reg = <1 0 10000>;
+                       reg = <1 0 0x10000>;
                        interrupts = <1 1 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                // 8-bit board CPLD on LocalPlus Bus CS2
                cpld@2,0 {
                        compatible = "promess,motionpro-cpld";
-                       reg = <2 0 10000>;
+                       reg = <2 0 0x10000>;
                };
 
                // 8-bit custom Anybus Module on LocalPlus Bus CS3
                anybus@3,0 {
                        compatible = "promess,motionpro-anybus";
-                       reg = <3 0 10000>;
+                       reg = <3 0 0x10000>;
                };
                pro_module_general@3,0 {
                        compatible = "promess,pro_module_general";
                };
                pro_module_dio@3,800 {
                        compatible = "promess,pro_module_dio";
-                       reg = <3 800 2>;
+                       reg = <3 0x800 2>;
                };
 
                // 16-bit flash device at LocalPlus Bus CS0
                flash@0,0 {
                        compatible = "cfi-flash";
-                       reg = <0 0 01000000>;
+                       reg = <0 0 0x01000000>;
                        bank-width = <2>;
                        device-width = <2>;
                        #size-cells = <1>;
index 16c947b8a7217f17b9dc5bd99644403ba74c95e1..1f2f1e0a55714ffbbaea58c1b1216b6cab67adb1 100644 (file)
                reg = <0x00000000 0x20000000>;  // 512M at 0x0
        };
 
+       board-control@e8000000 {
+               compatible = "fsl,fpga-pixis";
+               reg = <0xe8000000 32>;          // pixis at 0xe8000000
+       };
+
        soc@e0000000 {
                #address-cells = <1>;
                #size-cells = <1>;
                        interrupt-parent = <&mpic>;
                };
 
+               display@2c000 {
+                       compatible = "fsl,diu";
+                       reg = <0x2c000 100>;
+                       interrupts = <72 2>;
+                       interrupt-parent = <&mpic>;
+               };
+
                mpic: interrupt-controller@40000 {
                        clock-frequency = <0>;
                        interrupt-controller;
index 7f9b999843ce2e25a81c1c038c2a6ed05c3842e7..1e4bfe9cadb99093f58532aed1eea73a4cdf3c3e 100644 (file)
@@ -26,6 +26,7 @@
                serial1 = &serial1;
                pci0 = &pci0;
                pci1 = &pci1;
+               rapidio0 = &rapidio0;
        };
 
        cpus {
                                  0x0 0x00100000>;
                };
        };
+       rapidio0: rapidio@f80c0000 {
+               #address-cells = <2>;
+               #size-cells = <2>;
+               compatible = "fsl,rapidio-delta";
+               reg = <0xf80c0000 0x20000>;
+               ranges = <0 0 0xc0000000 0 0x20000000>;
+               interrupt-parent = <&mpic>;
+               /* err_irq bell_outb_irq bell_inb_irq
+                       msg1_tx_irq msg1_rx_irq msg2_tx_irq msg2_rx_irq */
+               interrupts = <48 2 49 2 50 2 53 2 54 2 55 2 56 2>;
+       };
 };
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts
new file mode 100644 (file)
index 0000000..7c1bb95
--- /dev/null
@@ -0,0 +1,363 @@
+/*
+ * phyCORE-MPC5200B-tiny (pcm030) board Device Tree Source
+ *
+ * Copyright 2006 Pengutronix
+ * Sascha Hauer <s.hauer@pengutronix.de>
+ * Copyright 2007 Pengutronix
+ * Juergen Beisert <j.beisert@pengutronix.de>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+/dts-v1/;
+
+/ {
+       model = "phytec,pcm030";
+       compatible = "phytec,pcm030";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               PowerPC,5200@0 {
+                       device_type = "cpu";
+                       reg = <0>;
+                       d-cache-line-size = <32>;
+                       i-cache-line-size = <32>;
+                       d-cache-size = <0x4000>;        /* L1, 16K          */
+                       i-cache-size = <0x4000>;        /* L1, 16K          */
+                       timebase-frequency = <0>;       /* From Bootloader  */
+                       bus-frequency = <0>;            /* From Bootloader  */
+                       clock-frequency = <0>;          /* From Bootloader  */
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x00000000 0x04000000>;  /* 64MB */
+       };
+
+       soc5200@f0000000 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "fsl,mpc5200b-immr";
+               ranges = <0x0 0xf0000000 0x0000c000>;
+               bus-frequency = <0>;            /* From bootloader */
+               system-frequency = <0>;         /* From bootloader */
+
+               cdm@200 {
+                       compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm";
+                       reg = <0x200 0x38>;
+               };
+
+               mpc5200_pic: interrupt-controller@500 {
+                       /* 5200 interrupts are encoded into two levels; */
+                       interrupt-controller;
+                       #interrupt-cells = <3>;
+                       device_type = "interrupt-controller";
+                       compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic";
+                       reg = <0x500 0x80>;
+               };
+
+               timer@600 {     /* General Purpose Timer */
+                       compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+                       cell-index = <0>;
+                       reg = <0x600 0x10>;
+                       interrupts = <0x1 0x9 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       fsl,has-wdt;
+               };
+
+               timer@610 {     /* General Purpose Timer */
+                       compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt";
+                       cell-index = <1>;
+                       reg = <0x610 0x10>;
+                       interrupts = <0x1 0xa 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               gpt2: timer@620 { /* General Purpose Timer in GPIO mode */
+                       compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+                       cell-index = <2>;
+                       reg = <0x620 0x10>;
+                       interrupts = <0x1 0xb 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               gpt3: timer@630 { /* General Purpose Timer in GPIO mode */
+                       compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+                       cell-index = <3>;
+                       reg = <0x630 0x10>;
+                       interrupts = <0x1 0xc 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               gpt4: timer@640 { /* General Purpose Timer in GPIO mode */
+                       compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+                       cell-index = <4>;
+                       reg = <0x640 0x10>;
+                       interrupts = <0x1 0xd 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               gpt5: timer@650 { /* General Purpose Timer in GPIO mode */
+                       compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+                       cell-index = <5>;
+                       reg = <0x650 0x10>;
+                       interrupts = <0x1 0xe 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               gpt6: timer@660 { /* General Purpose Timer in GPIO mode */
+                       compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+                       cell-index = <6>;
+                       reg = <0x660 0x10>;
+                       interrupts = <0x1 0xf 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               gpt7: timer@670 { /* General Purpose Timer in GPIO mode */
+                       compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
+                       cell-index = <7>;
+                       reg = <0x670 0x10>;
+                       interrupts = <0x1 0x10 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               rtc@800 {       // Real time clock
+                       compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc";
+                       device_type = "rtc";
+                       reg = <0x800 0x100>;
+                       interrupts = <0x1 0x5 0x0 0x1 0x6 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               can@900 {
+                       compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
+                       cell-index = <0>;
+                       interrupts = <0x2 0x11 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       reg = <0x900 0x80>;
+               };
+
+               can@980 {
+                       compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan";
+                       cell-index = <1>;
+                       interrupts = <0x2 0x12 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       reg = <0x980 0x80>;
+               };
+
+               gpio_simple: gpio@b00 {
+                       compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio";
+                       reg = <0xb00 0x40>;
+                       interrupts = <0x1 0x7 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               gpio_wkup: gpio-wkup@c00 {
+                       compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup";
+                       reg = <0xc00 0x40>;
+                       interrupts = <0x1 0x8 0x0 0x0 0x3 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       gpio-controller;
+                       #gpio-cells = <2>;
+               };
+
+               spi@f00 {
+                       compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi";
+                       reg = <0xf00 0x20>;
+                       interrupts = <0x2 0xd 0x0 0x2 0xe 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               usb@1000 {
+                       compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be";
+                       reg = <0x1000 0xff>;
+                       interrupts = <0x2 0x6 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               dma-controller@1200 {
+                       device_type = "dma-controller";
+                       compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm";
+                       reg = <0x1200 0x80>;
+                       interrupts = <0x3 0x0 0x0  0x3 0x1 0x0  0x3 0x2 0x0  0x3 0x3 0x0
+                                     0x3 0x4 0x0  0x3 0x5 0x0  0x3 0x6 0x0  0x3 0x7 0x0
+                                     0x3 0x8 0x0  0x3 0x9 0x0  0x3 0xa 0x0  0x3 0xb 0x0
+                                     0x3 0xc 0x0  0x3 0xd 0x0  0x3 0xe 0x0  0x3 0xf 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               xlb@1f00 {
+                       compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb";
+                       reg = <0x1f00 0x100>;
+               };
+
+               ac97@2000 { /* PSC1 in ac97 mode */
+                       device_type = "sound";
+                       compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
+                       cell-index = <0>;
+                       reg = <0x2000 0x100>;
+                       interrupts = <0x2 0x2 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               /* PSC2 port is used by CAN1/2 */
+
+               serial@2400 { /* PSC3 in UART mode */
+                       device_type = "serial";
+                       compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+                       port-number = <0>;
+                       cell-index = <2>;
+                       reg = <0x2400 0x100>;
+                       interrupts = <0x2 0x3 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               /* PSC4 is ??? */
+
+               /* PSC5 is ??? */
+
+               serial@2c00 { /* PSC6 in UART mode */
+                       device_type = "serial";
+                       compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
+                       port-number = <1>;
+                       cell-index = <5>;
+                       reg = <0x2c00 0x100>;
+                       interrupts = <0x2 0x4 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               ethernet@3000 {
+                       device_type = "network";
+                       compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec";
+                       reg = <0x3000 0x400>;
+                       local-mac-address = [00 00 00 00 00 00];
+                       interrupts = <0x2 0x5 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       phy-handle = <&phy0>;
+               };
+
+               mdio@3000 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "fsl,mpc5200b-mdio", "fsl,mpc5200-mdio";
+                       reg = <0x3000 0x400>;   /* fec range, since we need to setup fec interrupts */
+                       interrupts = <0x2 0x5 0x0>;     /* these are for "mii command finished", not link changes & co. */
+                       interrupt-parent = <&mpc5200_pic>;
+
+                       phy0:ethernet-phy@0 {
+                               device_type = "ethernet-phy";
+                               reg = <0x0>;
+                       };
+               };
+
+               ata@3a00 {
+                       device_type = "ata";
+                       compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata";
+                       reg = <0x3a00 0x100>;
+                       interrupts = <0x2 0x7 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+               };
+
+               i2c@3d00 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
+                       cell-index = <0>;
+                       reg = <0x3d00 0x40>;
+                       interrupts = <0x2 0xf 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       fsl5200-clocking;
+               };
+
+               i2c@3d40 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+                       compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c";
+                       cell-index = <1>;
+                       reg = <0x3d40 0x40>;
+                       interrupts = <0x2 0x10 0x0>;
+                       interrupt-parent = <&mpc5200_pic>;
+                       fsl5200-clocking;
+                       rtc@51 {
+                               device_type = "rtc";
+                               compatible = "nxp,pcf8563";
+                               reg = <0x51>;
+                       };
+                       /* FIXME: EEPROM */
+               };
+
+               sram@8000 {
+                       compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram","sram";
+                       reg = <0x8000 0x4000>;
+               };
+
+               /* This is only an example device to show the usage of gpios. It maps all available
+                * gpios to the "gpio-provider" device.
+                */
+               gpio {
+                       compatible = "gpio-provider";
+
+                                                   /* mpc52xx          exp.con         patchfield */
+                       gpios = <&gpio_wkup     0 0 /* GPIO_WKUP_7      11d             jp13-3     */
+                                &gpio_wkup     1 0 /* GPIO_WKUP_6      14c                        */
+                                &gpio_wkup     6 0 /* PSC2_4           43c             x5-11      */
+                                &gpio_simple   2 0 /* IRDA_1           24c             x7-6    set GPS_PORT_CONFIG[IRDA] = 0 */
+                                &gpio_simple   3 0 /* IRDA_0                           x8-5    set GPS_PORT_CONFIG[IRDA] = 0 */
+                                &gpt2          0 0 /* timer2           12d             x4-4       */
+                                &gpt3          0 0 /* timer3           13d             x6-4       */
+                                &gpt4          0 0 /* timer4           61c             x2-16      */
+                                &gpt5          0 0 /* timer5           44c             x7-11      */
+                                &gpt6          0 0 /* timer6           60c             x8-15      */
+                                &gpt7          0 0 /* timer7           36a             x17-9      */
+                                >;
+               };
+       };
+
+       pci@f0000d00 {
+               #interrupt-cells = <1>;
+               #size-cells = <2>;
+               #address-cells = <3>;
+               device_type = "pci";
+               compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci";
+               reg = <0xf0000d00 0x100>;
+               interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
+               interrupt-map = <0xc000 0x0 0x0 0x1 &mpc5200_pic 0x0 0x0 0x3 /* 1st slot */
+                                0xc000 0x0 0x0 0x2 &mpc5200_pic 0x1 0x1 0x3
+                                0xc000 0x0 0x0 0x3 &mpc5200_pic 0x1 0x2 0x3
+                                0xc000 0x0 0x0 0x4 &mpc5200_pic 0x1 0x3 0x3
+
+                                0xc800 0x0 0x0 0x1 &mpc5200_pic 0x1 0x1 0x3 /* 2nd slot */
+                                0xc800 0x0 0x0 0x2 &mpc5200_pic 0x1 0x2 0x3
+                                0xc800 0x0 0x0 0x3 &mpc5200_pic 0x1 0x3 0x3
+                                0xc800 0x0 0x0 0x4 &mpc5200_pic 0x0 0x0 0x3>;
+               clock-frequency = <0>; // From boot loader
+               interrupts = <0x2 0x8 0x0 0x2 0x9 0x0 0x2 0xa 0x0>;
+               interrupt-parent = <&mpc5200_pic>;
+               bus-range = <0 0>;
+               ranges = <0x42000000 0x0 0x80000000 0x80000000 0x0 0x20000000
+                         0x02000000 0x0 0xa0000000 0xa0000000 0x0 0x10000000
+                         0x01000000 0x0 0x00000000 0xb0000000 0x0 0x01000000>;
+       };
+};
index 65bcea6a0173ab9b1a475e4b2f2210d297fc746b..773a68e0005813de3918fa8031538ae50721a186 100644 (file)
@@ -10,6 +10,8 @@
  * option) any later version.
  */
 
+/dts-v1/;
+
 / {
        model = "tqc,tqm5200";
        compatible = "tqc,tqm5200";
                PowerPC,5200@0 {
                        device_type = "cpu";
                        reg = <0>;
-                       d-cache-line-size = <20>;
-                       i-cache-line-size = <20>;
-                       d-cache-size = <4000>;          // L1, 16K
-                       i-cache-size = <4000>;          // L1, 16K
+                       d-cache-line-size = <32>;
+                       i-cache-line-size = <32>;
+                       d-cache-size = <0x4000>;        // L1, 16K
+                       i-cache-size = <0x4000>;        // L1, 16K
                        timebase-frequency = <0>;       // from bootloader
                        bus-frequency = <0>;            // from bootloader
                        clock-frequency = <0>;          // from bootloader
 
        memory {
                device_type = "memory";
-               reg = <00000000 04000000>;      // 64MB
+               reg = <0x00000000 0x04000000>;  // 64MB
        };
 
        soc5200@f0000000 {
                #address-cells = <1>;
                #size-cells = <1>;
                compatible = "fsl,mpc5200-immr";
-               ranges = <0 f0000000 0000c000>;
-               reg = <f0000000 00000100>;
+               ranges = <0 0xf0000000 0x0000c000>;
+               reg = <0xf0000000 0x00000100>;
                bus-frequency = <0>;            // from bootloader
                system-frequency = <0>;         // from bootloader
 
                cdm@200 {
                        compatible = "fsl,mpc5200-cdm";
-                       reg = <200 38>;
+                       reg = <0x200 0x38>;
                };
 
                mpc5200_pic: interrupt-controller@500 {
                        interrupt-controller;
                        #interrupt-cells = <3>;
                        compatible = "fsl,mpc5200-pic";
-                       reg = <500 80>;
+                       reg = <0x500 0x80>;
                };
 
                timer@600 {     // General Purpose Timer
                        compatible = "fsl,mpc5200-gpt";
-                       reg = <600 10>;
+                       reg = <0x600 0x10>;
                        interrupts = <1 9 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl,has-wdt;
 
                gpio@b00 {
                        compatible = "fsl,mpc5200-gpio";
-                       reg = <b00 40>;
+                       reg = <0xb00 0x40>;
                        interrupts = <1 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                usb@1000 {
                        compatible = "fsl,mpc5200-ohci","ohci-be";
-                       reg = <1000 ff>;
+                       reg = <0x1000 0xff>;
                        interrupts = <2 6 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                dma-controller@1200 {
                        compatible = "fsl,mpc5200-bestcomm";
-                       reg = <1200 80>;
+                       reg = <0x1200 0x80>;
                        interrupts = <3 0 0  3 1 0  3 2 0  3 3 0
                                      3 4 0  3 5 0  3 6 0  3 7 0
-                                     3 8 0  3 9 0  3 a 0  3 b 0
-                                     3 c 0  3 d 0  3 e 0  3 f 0>;
+                                     3 8 0  3 9 0  3 10 0  3 11 0
+                                     3 12 0  3 13 0  3 14 0  3 15 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
 
                xlb@1f00 {
                        compatible = "fsl,mpc5200-xlb";
-                       reg = <1f00 100>;
+                       reg = <0x1f00 0x100>;
                };
 
                serial@2000 {           // PSC1
                        device_type = "serial";
                        compatible = "fsl,mpc5200-psc-uart";
                        port-number = <0>;  // Logical port assignment
-                       reg = <2000 100>;
+                       reg = <0x2000 0x100>;
                        interrupts = <2 1 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        device_type = "serial";
                        compatible = "fsl,mpc5200-psc-uart";
                        port-number = <1>;  // Logical port assignment
-                       reg = <2200 100>;
+                       reg = <0x2200 0x100>;
                        interrupts = <2 2 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        device_type = "serial";
                        compatible = "fsl,mpc5200-psc-uart";
                        port-number = <2>;  // Logical port assignment
-                       reg = <2400 100>;
+                       reg = <0x2400 0x100>;
                        interrupts = <2 3 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                ethernet@3000 {
                        device_type = "network";
                        compatible = "fsl,mpc5200-fec";
-                       reg = <3000 400>;
+                       reg = <0x3000 0x400>;
                        local-mac-address = [ 00 00 00 00 00 00 ];
                        interrupts = <2 5 0>;
                        interrupt-parent = <&mpc5200_pic>;
                mdio@3000 {
                        #address-cells = <1>;
                        #size-cells = <0>;
-                       compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio";
-                       reg = <3000 400>;       // fec range, since we need to setup fec interrupts
+                       compatible = "fsl,mpc5200-mdio";
+                       reg = <0x3000 0x400>;       // fec range, since we need to setup fec interrupts
                        interrupts = <2 5 0>;   // these are for "mii command finished", not link changes & co.
                        interrupt-parent = <&mpc5200_pic>;
 
 
                ata@3a00 {
                        compatible = "fsl,mpc5200-ata";
-                       reg = <3a00 100>;
+                       reg = <0x3a00 0x100>;
                        interrupts = <2 7 0>;
                        interrupt-parent = <&mpc5200_pic>;
                };
                        #address-cells = <1>;
                        #size-cells = <0>;
                        compatible = "fsl,mpc5200-i2c","fsl-i2c";
-                       reg = <3d40 40>;
-                       interrupts = <2 10 0>;
+                       reg = <0x3d40 0x40>;
+                       interrupts = <2 16 0>;
                        interrupt-parent = <&mpc5200_pic>;
                        fsl5200-clocking;
 
                         rtc@68 {
                                device_type = "rtc";
                                compatible = "dallas,ds1307";
-                               reg = <68>;
+                               reg = <0x68>;
                        };
                };
 
                sram@8000 {
                        compatible = "fsl,mpc5200-sram";
-                       reg = <8000 4000>;
+                       reg = <0x8000 0x4000>;
                };
        };
 
                compatible = "fsl,lpb";
                #address-cells = <2>;
                #size-cells = <1>;
-               ranges = <0 0 fc000000 02000000>;
+               ranges = <0 0 0xfc000000 0x02000000>;
 
                flash@0,0 {
                        compatible = "cfi-flash";
-                       reg = <0 0 02000000>;
+                       reg = <0 0 0x02000000>;
                        bank-width = <4>;
                        device-width = <2>;
                        #size-cells = <1>;
                #address-cells = <3>;
                device_type = "pci";
                compatible = "fsl,mpc5200-pci";
-               reg = <f0000d00 100>;
-               interrupt-map-mask = <f800 0 0 7>;
-               interrupt-map = <c000 0 0 1 &mpc5200_pic 0 0 3
-                                c000 0 0 2 &mpc5200_pic 0 0 3
-                                c000 0 0 3 &mpc5200_pic 0 0 3
-                                c000 0 0 4 &mpc5200_pic 0 0 3>;
+               reg = <0xf0000d00 0x100>;
+               interrupt-map-mask = <0xf800 0 0 7>;
+               interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3
+                                0xc000 0 0 2 &mpc5200_pic 0 0 3
+                                0xc000 0 0 3 &mpc5200_pic 0 0 3
+                                0xc000 0 0 4 &mpc5200_pic 0 0 3>;
                clock-frequency = <0>; // From boot loader
-               interrupts = <2 8 0 2 9 0 2 a 0>;
+               interrupts = <2 8 0 2 9 0 2 10 0>;
                interrupt-parent = <&mpc5200_pic>;
                bus-range = <0 0>;
-               ranges = <42000000 0 80000000 80000000 0 10000000
-                         02000000 0 90000000 90000000 0 10000000
-                         01000000 0 00000000 a0000000 0 01000000>;
+               ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
+                         0x02000000 0 0x90000000 0x90000000 0 0x10000000
+                         0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
        };
 };
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
new file mode 100644 (file)
index 0000000..c10f739
--- /dev/null
@@ -0,0 +1,1099 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.25
+# Tue Apr 29 07:11:37 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+CONFIG_6xx=y
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_PPC_MM_SLICES is not set
+# CONFIG_SMP is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFAULT_UIMAGE=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+# CONFIG_EPOLL is not set
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_MODULES is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_86xx is not set
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_MPC512x is not set
+# CONFIG_PPC_MPC5121 is not set
+# CONFIG_MPC5121_ADS is not set
+CONFIG_PPC_MPC52xx=y
+CONFIG_PPC_MPC5200_SIMPLE=y
+# CONFIG_PPC_EFIKA is not set
+# CONFIG_PPC_LITE5200 is not set
+# CONFIG_PPC_MPC5200_BUGFIX is not set
+# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_PPC_BESTCOMM=y
+# CONFIG_PPC_BESTCOMM_ATA is not set
+CONFIG_PPC_BESTCOMM_FEC=y
+# CONFIG_PPC_BESTCOMM_GEN_BD is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_FSL_SOC=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_OF_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+CONFIG_OF_I2C=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_ATA is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+CONFIG_LXT_PHY=y
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+CONFIG_FEC_MPC52xx=y
+CONFIG_FEC_MPC52xx_MDIO=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=57600
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_GEN_RTC is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MPC5200_WDT is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PPC_SOC=y
+CONFIG_USB_OHCI_HCD_PPC_OF=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
+CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
+CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
+# CONFIG_USB_OHCI_LITTLE_ENDIAN is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MON is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_BIND34 is not set
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_PPC_CLOCK=y
+CONFIG_PPC_LIB_RHEAP=y
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
new file mode 100644 (file)
index 0000000..1a8a250
--- /dev/null
@@ -0,0 +1,1049 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.25
+# Tue Apr 29 07:12:56 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+CONFIG_6xx=y
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_PPC_MM_SLICES is not set
+# CONFIG_SMP is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFAULT_UIMAGE=y
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+# CONFIG_EPOLL is not set
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_86xx is not set
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_MPC512x is not set
+# CONFIG_PPC_MPC5121 is not set
+# CONFIG_MPC5121_ADS is not set
+CONFIG_PPC_MPC52xx=y
+CONFIG_PPC_MPC5200_SIMPLE=y
+# CONFIG_PPC_EFIKA is not set
+CONFIG_PPC_LITE5200=y
+# CONFIG_PPC_MPC5200_BUGFIX is not set
+# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_PPC_BESTCOMM=y
+CONFIG_PPC_BESTCOMM_ATA=y
+CONFIG_PPC_BESTCOMM_FEC=y
+CONFIG_PPC_BESTCOMM_GEN_BD=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+CONFIG_PM_SLEEP=y
+CONFIG_SUSPEND=y
+CONFIG_SUSPEND_FREEZER=y
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PPC_INDIRECT_PCI is not set
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+# CONFIG_PCI_MSI is not set
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=m
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+# CONFIG_MTD is not set
+CONFIG_OF_DEVICE=y
+CONFIG_OF_I2C=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_PHANTOM is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
+# CONFIG_SCSI_3W_9XXX is not set
+# CONFIG_SCSI_ACARD is not set
+# CONFIG_SCSI_AACRAID is not set
+# CONFIG_SCSI_AIC7XXX is not set
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_IPR is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_SRP is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_SATA_PMP=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_FSL is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+CONFIG_PATA_MPC52xx=y
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+CONFIG_LXT_PHY=y
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+CONFIG_FEC_MPC52xx=y
+CONFIG_FEC_MPC52xx_MDIO=y
+CONFIG_NETDEV_1000=y
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_E1000E_ENABLED is not set
+# CONFIG_IP1000 is not set
+# CONFIG_IGB is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_GIANFAR is not set
+# CONFIG_MV643XX_ETH is not set
+# CONFIG_QLA3XXX is not set
+# CONFIG_ATL1 is not set
+CONFIG_NETDEV_10000=y
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_CHELSIO_T3 is not set
+# CONFIG_IXGBE is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+# CONFIG_MYRI10GE is not set
+# CONFIG_NETXEN_NIC is not set
+# CONFIG_NIU is not set
+# CONFIG_MLX4_CORE is not set
+# CONFIG_TEHUTI is not set
+# CONFIG_BNX2X is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+CONFIG_VIDEO_OUTPUT_CONTROL=m
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+# CONFIG_USB is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_BIND34 is not set
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_NLS is not set
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+# CONFIG_CRYPTO_ECB is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+CONFIG_PPC_CLOCK=y
+CONFIG_PPC_LIB_RHEAP=y
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
new file mode 100644 (file)
index 0000000..8c7ba7c
--- /dev/null
@@ -0,0 +1,1107 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.25
+# Tue Apr 29 07:12:22 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+CONFIG_6xx=y
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_PPC_MM_SLICES is not set
+# CONFIG_SMP is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFAULT_UIMAGE=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+# CONFIG_EPOLL is not set
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_MODULES is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_86xx is not set
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_MPC512x is not set
+# CONFIG_PPC_MPC5121 is not set
+# CONFIG_MPC5121_ADS is not set
+CONFIG_PPC_MPC52xx=y
+CONFIG_PPC_MPC5200_SIMPLE=y
+# CONFIG_PPC_EFIKA is not set
+# CONFIG_PPC_LITE5200 is not set
+# CONFIG_PPC_MPC5200_BUGFIX is not set
+# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_PPC_BESTCOMM=y
+# CONFIG_PPC_BESTCOMM_ATA is not set
+CONFIG_PPC_BESTCOMM_FEC=y
+# CONFIG_PPC_BESTCOMM_GEN_BD is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_FSL_SOC=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_OF_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+CONFIG_MTD_ROM=y
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_PHYSMAP_OF is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+CONFIG_OF_I2C=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+CONFIG_MISC_DEVICES=y
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_TGT=y
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_SATA_PMP=y
+# CONFIG_SATA_FSL is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_MV is not set
+CONFIG_PATA_MPC52xx=y
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+CONFIG_ICPLUS_PHY=y
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+CONFIG_MDIO_BITBANG=y
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+CONFIG_FEC_MPC52xx=y
+CONFIG_FEC_MPC52xx_MDIO=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWLWIFI_LEDS is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+CONFIG_SENSORS_EEPROM=y
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MPC5200_WDT is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+CONFIG_DAB=y
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS1307=y
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_BIND34 is not set
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_PPC_CLOCK=y
+CONFIG_PPC_LIB_RHEAP=y
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
new file mode 100644 (file)
index 0000000..9c0caa4
--- /dev/null
@@ -0,0 +1,1115 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.25
+# Tue Apr 29 07:13:19 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+CONFIG_6xx=y
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_PPC_MM_SLICES is not set
+# CONFIG_SMP is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFAULT_UIMAGE=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_LOCK_KERNEL=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION="trunk"
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+CONFIG_POSIX_MQUEUE=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+# CONFIG_BLK_DEV_INITRD is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_KPROBES is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+# CONFIG_IOSCHED_AS is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_DEFAULT_AS is not set
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+CONFIG_DEFAULT_NOOP=y
+CONFIG_DEFAULT_IOSCHED="noop"
+CONFIG_CLASSIC_RCU=y
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_86xx is not set
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_MPC512x is not set
+# CONFIG_PPC_MPC5121 is not set
+# CONFIG_MPC5121_ADS is not set
+CONFIG_PPC_MPC52xx=y
+CONFIG_PPC_MPC5200_SIMPLE=y
+# CONFIG_PPC_EFIKA is not set
+# CONFIG_PPC_LITE5200 is not set
+# CONFIG_PPC_MPC5200_BUGFIX is not set
+# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_PPC_BESTCOMM=y
+CONFIG_PPC_BESTCOMM_ATA=y
+CONFIG_PPC_BESTCOMM_FEC=y
+CONFIG_PPC_BESTCOMM_GEN_BD=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+# CONFIG_SCHED_HRTICK is not set
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SECCOMP is not set
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PPC_INDIRECT_PCI is not set
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_SYSCALL=y
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_ARCH_SUPPORTS_MSI=y
+# CONFIG_PCI_MSI is not set
+CONFIG_PCI_LEGACY=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_OF_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x0
+CONFIG_MTD_PHYSMAP_LEN=0x0
+CONFIG_MTD_PHYSMAP_BANKWIDTH=1
+# CONFIG_MTD_PHYSMAP_OF is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+CONFIG_OF_I2C=y
+# CONFIG_PARPORT is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=m
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=m
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=m
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_SATA_PMP=y
+# CONFIG_SATA_AHCI is not set
+# CONFIG_SATA_SIL24 is not set
+# CONFIG_SATA_FSL is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_SVW is not set
+# CONFIG_ATA_PIIX is not set
+# CONFIG_SATA_MV is not set
+# CONFIG_SATA_NV is not set
+# CONFIG_PDC_ADMA is not set
+# CONFIG_SATA_QSTOR is not set
+# CONFIG_SATA_PROMISE is not set
+# CONFIG_SATA_SX4 is not set
+# CONFIG_SATA_SIL is not set
+# CONFIG_SATA_SIS is not set
+# CONFIG_SATA_ULI is not set
+# CONFIG_SATA_VIA is not set
+# CONFIG_SATA_VITESSE is not set
+# CONFIG_SATA_INIC162X is not set
+# CONFIG_PATA_ALI is not set
+# CONFIG_PATA_AMD is not set
+# CONFIG_PATA_ARTOP is not set
+# CONFIG_PATA_ATIIXP is not set
+# CONFIG_PATA_CMD640_PCI is not set
+# CONFIG_PATA_CMD64X is not set
+# CONFIG_PATA_CS5520 is not set
+# CONFIG_PATA_CS5530 is not set
+# CONFIG_PATA_CYPRESS is not set
+# CONFIG_PATA_EFAR is not set
+# CONFIG_ATA_GENERIC is not set
+# CONFIG_PATA_HPT366 is not set
+# CONFIG_PATA_HPT37X is not set
+# CONFIG_PATA_HPT3X2N is not set
+# CONFIG_PATA_HPT3X3 is not set
+# CONFIG_PATA_IT821X is not set
+# CONFIG_PATA_IT8213 is not set
+# CONFIG_PATA_JMICRON is not set
+# CONFIG_PATA_TRIFLEX is not set
+# CONFIG_PATA_MARVELL is not set
+CONFIG_PATA_MPC52xx=m
+# CONFIG_PATA_MPIIX is not set
+# CONFIG_PATA_OLDPIIX is not set
+# CONFIG_PATA_NETCELL is not set
+# CONFIG_PATA_NINJA32 is not set
+# CONFIG_PATA_NS87410 is not set
+# CONFIG_PATA_NS87415 is not set
+# CONFIG_PATA_OPTI is not set
+# CONFIG_PATA_OPTIDMA is not set
+# CONFIG_PATA_PDC_OLD is not set
+# CONFIG_PATA_RADISYS is not set
+# CONFIG_PATA_RZ1000 is not set
+# CONFIG_PATA_SC1200 is not set
+# CONFIG_PATA_SERVERWORKS is not set
+# CONFIG_PATA_PDC2027X is not set
+# CONFIG_PATA_SIL680 is not set
+# CONFIG_PATA_SIS is not set
+# CONFIG_PATA_VIA is not set
+# CONFIG_PATA_WINBOND is not set
+# CONFIG_PATA_PLATFORM is not set
+# CONFIG_MD is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_IEEE1394 is not set
+# CONFIG_I2O is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+# CONFIG_ARCNET is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_NET_PCI is not set
+# CONFIG_B44 is not set
+CONFIG_FEC_MPC52xx=y
+CONFIG_FEC_MPC52xx_MDIO=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_TR is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=9600
+# CONFIG_SERIAL_JSM is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_TINY_USB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+CONFIG_SENSORS_EEPROM=m
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_EHCI_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_HCD_PPC_SOC is not set
+CONFIG_USB_OHCI_HCD_PPC_OF=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
+# CONFIG_USB_OHCI_HCD_PCI is not set
+CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
+CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
+# CONFIG_USB_OHCI_LITTLE_ENDIAN is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=m
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_MON is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_INFINIBAND is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=m
+CONFIG_RTC_CLASS=m
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+# CONFIG_RTC_DRV_DS1307 is not set
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+CONFIG_RTC_DRV_PCF8563=m
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=m
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=m
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=m
+CONFIG_FS_MBCACHE=m
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=m
+# CONFIG_MSDOS_FS is not set
+CONFIG_VFAT_FS=m
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_BIND34 is not set
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+# CONFIG_NLS_CODEPAGE_437 is not set
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_KERNEL is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+# CONFIG_CRYPTO is not set
+CONFIG_PPC_CLOCK=y
+CONFIG_PPC_LIB_RHEAP=y
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
new file mode 100644 (file)
index 0000000..7672bfb
--- /dev/null
@@ -0,0 +1,1214 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.25
+# Tue Apr 29 07:12:39 2008
+#
+# CONFIG_PPC64 is not set
+
+#
+# Processor support
+#
+CONFIG_6xx=y
+# CONFIG_PPC_85xx is not set
+# CONFIG_PPC_8xx is not set
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_E200 is not set
+CONFIG_PPC_FPU=y
+# CONFIG_ALTIVEC is not set
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_PPC_MM_SLICES is not set
+# CONFIG_SMP is not set
+CONFIG_PPC32=y
+CONFIG_WORD_SIZE=32
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_HARDIRQS=y
+# CONFIG_HAVE_SETUP_PER_CPU_AREA is not set
+CONFIG_IRQ_PER_CPU=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_ARCH_HAS_ILOG2_U32=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_GENERIC_FIND_NEXT_BIT=y
+# CONFIG_ARCH_NO_VIRT_TO_BUS is not set
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_OF=y
+# CONFIG_PPC_UDBG_16550 is not set
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_AUDIT_ARCH=y
+CONFIG_GENERIC_BUG=y
+CONFIG_DEFAULT_UIMAGE=y
+# CONFIG_PPC_DCR_NATIVE is not set
+# CONFIG_PPC_DCR_MMIO is not set
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+
+#
+# General setup
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_CGROUPS is not set
+CONFIG_GROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_RT_GROUP_SCHED is not set
+CONFIG_USER_SCHED=y
+# CONFIG_CGROUP_SCHED is not set
+CONFIG_SYSFS_DEPRECATED=y
+CONFIG_SYSFS_DEPRECATED_V2=y
+# CONFIG_RELAY is not set
+# CONFIG_NAMESPACES is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SYSCTL_SYSCALL is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_COMPAT_BRK=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_ANON_INODES=y
+# CONFIG_EPOLL is not set
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_VM_EVENT_COUNTERS=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLAB is not set
+CONFIG_SLUB=y
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+# CONFIG_MARKERS is not set
+CONFIG_HAVE_OPROFILE=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_MODVERSIONS=y
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+CONFIG_BLOCK=y
+# CONFIG_LBD is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_LSF is not set
+# CONFIG_BLK_DEV_BSG is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_CLASSIC_RCU=y
+
+#
+# Platform support
+#
+CONFIG_PPC_MULTIPLATFORM=y
+# CONFIG_PPC_82xx is not set
+# CONFIG_PPC_83xx is not set
+# CONFIG_PPC_86xx is not set
+CONFIG_CLASSIC32=y
+# CONFIG_PPC_CHRP is not set
+# CONFIG_PPC_MPC512x is not set
+# CONFIG_PPC_MPC5121 is not set
+# CONFIG_MPC5121_ADS is not set
+CONFIG_PPC_MPC52xx=y
+CONFIG_PPC_MPC5200_SIMPLE=y
+# CONFIG_PPC_EFIKA is not set
+# CONFIG_PPC_LITE5200 is not set
+CONFIG_PPC_MPC5200_BUGFIX=y
+# CONFIG_PPC_MPC5200_GPIO is not set
+# CONFIG_PPC_PMAC is not set
+# CONFIG_PPC_CELL is not set
+# CONFIG_PPC_CELL_NATIVE is not set
+# CONFIG_PQ2ADS is not set
+# CONFIG_EMBEDDED6xx is not set
+# CONFIG_IPIC is not set
+# CONFIG_MPIC is not set
+# CONFIG_MPIC_WEIRD is not set
+# CONFIG_PPC_I8259 is not set
+# CONFIG_PPC_RTAS is not set
+# CONFIG_MMIO_NVRAM is not set
+# CONFIG_PPC_MPC106 is not set
+# CONFIG_PPC_970_NAP is not set
+# CONFIG_PPC_INDIRECT_IO is not set
+# CONFIG_GENERIC_IOMAP is not set
+# CONFIG_CPU_FREQ is not set
+# CONFIG_TAU is not set
+# CONFIG_FSL_ULI1575 is not set
+CONFIG_PPC_BESTCOMM=y
+# CONFIG_PPC_BESTCOMM_ATA is not set
+CONFIG_PPC_BESTCOMM_FEC=y
+# CONFIG_PPC_BESTCOMM_GEN_BD is not set
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_TICK_ONESHOT is not set
+# CONFIG_NO_HZ is not set
+# CONFIG_HIGH_RES_TIMERS is not set
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+# CONFIG_SCHED_HRTICK is not set
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_IOMMU_HELPER is not set
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_HAS_WALK_MEMORY=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_KEXEC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_ARCH_POPULATES_NODE_MAP=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+# CONFIG_RESOURCES_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+CONFIG_PM=y
+# CONFIG_PM_LEGACY is not set
+# CONFIG_PM_DEBUG is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_ZONE_DMA=y
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_FSL_SOC=y
+# CONFIG_PCI is not set
+# CONFIG_PCI_DOMAINS is not set
+# CONFIG_PCI_SYSCALL is not set
+# CONFIG_ARCH_SUPPORTS_MSI is not set
+# CONFIG_PCCARD is not set
+# CONFIG_HAS_RAPIDIO is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_PAGE_OFFSET=0xc0000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_PHYSICAL_START=0x00000000
+CONFIG_TASK_SIZE=0xc0000000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+CONFIG_XFRM=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_XFRM_TUNNEL is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_MD5SIG is not set
+# CONFIG_IPV6 is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETFILTER is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+
+#
+# Wireless
+#
+# CONFIG_CFG80211 is not set
+# CONFIG_WIRELESS_EXT is not set
+# CONFIG_MAC80211 is not set
+# CONFIG_IEEE80211 is not set
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_CONNECTOR is not set
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_OF_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_MTD_OOPS is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+CONFIG_MTD_ROM=y
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+CONFIG_MTD_PHYSMAP_OF=y
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# UBI - Unsorted block images
+#
+# CONFIG_MTD_UBI is not set
+CONFIG_OF_DEVICE=y
+CONFIG_OF_I2C=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+# CONFIG_BLK_DEV_XIP is not set
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_MISC_DEVICES is not set
+CONFIG_HAVE_IDE=y
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_TGT is not set
+# CONFIG_SCSI_NETLINK is not set
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+# CONFIG_SCSI_SCAN_ASYNC is not set
+CONFIG_SCSI_WAIT_SCAN=m
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_DEBUG is not set
+CONFIG_ATA=y
+# CONFIG_ATA_NONSTANDARD is not set
+CONFIG_SATA_PMP=y
+# CONFIG_SATA_FSL is not set
+CONFIG_ATA_SFF=y
+# CONFIG_SATA_MV is not set
+CONFIG_PATA_MPC52xx=y
+CONFIG_PATA_PLATFORM=y
+# CONFIG_PATA_OF_PLATFORM is not set
+# CONFIG_MD is not set
+# CONFIG_MACINTOSH_DRIVERS is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NETDEVICES_MULTIQUEUE is not set
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+# CONFIG_VETH is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+CONFIG_LXT_PHY=y
+# CONFIG_CICADA_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_BROADCOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_REALTEK_PHY is not set
+# CONFIG_FIXED_PHY is not set
+# CONFIG_MDIO_BITBANG is not set
+CONFIG_NET_ETHERNET=y
+# CONFIG_MII is not set
+# CONFIG_IBM_NEW_EMAC_ZMII is not set
+# CONFIG_IBM_NEW_EMAC_RGMII is not set
+# CONFIG_IBM_NEW_EMAC_TAH is not set
+# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
+# CONFIG_B44 is not set
+CONFIG_FEC_MPC52xx=y
+CONFIG_FEC_MPC52xx_MDIO=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+
+#
+# Wireless LAN
+#
+# CONFIG_WLAN_PRE80211 is not set
+# CONFIG_WLAN_80211 is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWLWIFI_LEDS is not set
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+# CONFIG_ISDN is not set
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+# CONFIG_INPUT is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_SERIAL_MPC52xx=y
+CONFIG_SERIAL_MPC52xx_CONSOLE=y
+CONFIG_SERIAL_MPC52xx_CONSOLE_BAUD=115200
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+# CONFIG_IPMI_HANDLER is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_NVRAM is not set
+# CONFIG_R3964 is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_TINY_USB is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_DS1682 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_PCF8575 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+# CONFIG_SPI is not set
+# CONFIG_W1 is not set
+# CONFIG_POWER_SUPPLY is not set
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+# CONFIG_SENSORS_ADT7473 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+# CONFIG_THERMAL is not set
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+# CONFIG_MPC5200_WDT is not set
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+
+#
+# Sonics Silicon Backplane
+#
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_DVB_CORE is not set
+# CONFIG_DAB is not set
+
+#
+# Graphics support
+#
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+# CONFIG_FB is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Display device support
+#
+# CONFIG_DISPLAY_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB_ARCH_HAS_EHCI is not set
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_DEVICE_CLASS is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_SUSPEND is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PPC_SOC=y
+CONFIG_USB_OHCI_HCD_PPC_OF=y
+CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
+# CONFIG_USB_OHCI_HCD_PPC_OF_LE is not set
+CONFIG_USB_OHCI_BIG_ENDIAN_DESC=y
+CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
+# CONFIG_USB_OHCI_LITTLE_ENDIAN is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_LIBUSUAL is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_BERRY_CHARGE is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGET is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_GADGET is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
+# CONFIG_EDAC is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# I2C RTC drivers
+#
+CONFIG_RTC_DRV_DS1307=y
+# CONFIG_RTC_DRV_DS1374 is not set
+# CONFIG_RTC_DRV_DS1672 is not set
+# CONFIG_RTC_DRV_MAX6900 is not set
+# CONFIG_RTC_DRV_RS5C372 is not set
+# CONFIG_RTC_DRV_ISL1208 is not set
+# CONFIG_RTC_DRV_X1205 is not set
+# CONFIG_RTC_DRV_PCF8563 is not set
+# CONFIG_RTC_DRV_PCF8583 is not set
+# CONFIG_RTC_DRV_M41T80 is not set
+# CONFIG_RTC_DRV_S35390A is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+# CONFIG_RTC_DRV_CMOS is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+
+#
+# on-CPU RTC drivers
+#
+# CONFIG_DMADEVICES is not set
+# CONFIG_UIO is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+# CONFIG_EXT4DEV_FS is not set
+CONFIG_JBD=y
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_HUGETLB_PAGE is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+# CONFIG_JFFS2_FS_XATTR is not set
+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+# CONFIG_JFFS2_RUBIN is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+# CONFIG_SUNRPC_BIND34 is not set
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_SYSV68_PARTITION is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+# CONFIG_NLS_CODEPAGE_850 is not set
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+# CONFIG_NLS_ISO8859_2 is not set
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_UTF8 is not set
+# CONFIG_DLM is not set
+
+#
+# Library routines
+#
+CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+# CONFIG_CRC_ITU_T is not set
+CONFIG_CRC32=y
+# CONFIG_CRC7 is not set
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_PLIST=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_HAVE_LMB=y
+
+#
+# Kernel hacking
+#
+CONFIG_PRINTK_TIME=y
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+CONFIG_DETECT_SOFTLOCKUP=y
+CONFIG_SCHED_DEBUG=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_TIMER_STATS is not set
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_SLUB_STATS is not set
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_SAMPLES is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+# CONFIG_DEBUGGER is not set
+# CONFIG_IRQSTACKS is not set
+# CONFIG_BDI_SWITCH is not set
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_PPC_EARLY_DEBUG is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITY_FILE_CAPABILITIES is not set
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+# CONFIG_CRYPTO_AUTHENC is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_XCBC is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_LZO is not set
+CONFIG_CRYPTO_HW=y
+CONFIG_PPC_CLOCK=y
+CONFIG_PPC_LIB_RHEAP=y
+# CONFIG_VIRTUALIZATION is not set
index a20501f89474a009cfed6420a0dbaad570ff4f4d..88338a9f5e95536d67246c04fcde09baa316c927 100644 (file)
@@ -696,6 +696,7 @@ CONFIG_WINDFARM=y
 CONFIG_WINDFARM_PM81=y
 CONFIG_WINDFARM_PM91=y
 CONFIG_WINDFARM_PM112=y
+CONFIG_WINDFARM_PM121=y
 # CONFIG_PMAC_RACKMETER is not set
 CONFIG_NETDEVICES=y
 # CONFIG_NETDEVICES_MULTIQUEUE is not set
index 9177b21b1a954a17b531fc1884e6b8bc61510cb1..d14cebf62bb03e42732c174a67d5f2134dffe13e 100644 (file)
@@ -73,7 +73,6 @@ pci64-$(CONFIG_PPC64)         += pci_dn.o isa-bridge.o
 obj-$(CONFIG_PCI)              += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
                                   pci-common.o
 obj-$(CONFIG_PCI_MSI)          += msi.o
-obj-$(CONFIG_RAPIDIO)          += rio.o
 obj-$(CONFIG_KEXEC)            += machine_kexec.o crash.o \
                                   machine_kexec_$(CONFIG_WORD_SIZE).o
 obj-$(CONFIG_AUDIT)            += audit.o
index adf1d09d726f9102199ca87ec0aeb01828349128..ec9228d687b08515319694001c6c1feffb577944 100644 (file)
 #include <linux/mm.h>
 #include <linux/suspend.h>
 #include <linux/hrtimer.h>
+#ifdef CONFIG_KVM
+#include <linux/kvm_host.h>
+#endif
 #ifdef CONFIG_PPC64
 #include <linux/time.h>
 #include <linux/hardirq.h>
 #endif
+#include <linux/kbuild.h>
 
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/iseries/alpaca.h>
 #endif
 
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main(void)
 {
        DEFINE(THREAD, offsetof(struct task_struct, thread));
@@ -64,6 +63,7 @@ int main(void)
 #endif /* CONFIG_PPC64 */
 
        DEFINE(KSP, offsetof(struct thread_struct, ksp));
+       DEFINE(KSP_LIMIT, offsetof(struct thread_struct, ksp_limit));
        DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
        DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
        DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
@@ -324,5 +324,30 @@ int main(void)
 
        DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
 
+#ifdef CONFIG_KVM
+       DEFINE(TLBE_BYTES, sizeof(struct tlbe));
+
+       DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
+       DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
+       DEFINE(VCPU_HOST_TLB, offsetof(struct kvm_vcpu, arch.host_tlb));
+       DEFINE(VCPU_SHADOW_TLB, offsetof(struct kvm_vcpu, arch.shadow_tlb));
+       DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
+       DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
+       DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
+       DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
+       DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
+       DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
+       DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr));
+       DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
+       DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
+       DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
+       DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7));
+       DEFINE(VCPU_PID, offsetof(struct kvm_vcpu, arch.pid));
+
+       DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
+       DEFINE(VCPU_FAULT_DEAR, offsetof(struct kvm_vcpu, arch.fault_dear));
+       DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr));
+#endif
+
        return 0;
 }
index 84c8686330681761ea173c2a11d56e74f4f6848a..0c8614d9875ca8b5966c148c7ad447bbcf3a987d 100644 (file)
@@ -137,11 +137,12 @@ transfer_to_handler:
 2:     /* if from kernel, check interrupted DOZE/NAP mode and
          * check for stack overflow
          */
-       lwz     r9,THREAD_INFO-THREAD(r12)
-       cmplw   r1,r9                   /* if r1 <= current->thread_info */
+       lwz     r9,KSP_LIMIT(r12)
+       cmplw   r1,r9                   /* if r1 <= ksp_limit */
        ble-    stack_ovf               /* then the kernel stack overflowed */
 5:
 #ifdef CONFIG_6xx
+       rlwinm  r9,r1,0,0,31-THREAD_SHIFT
        tophys(r9,r9)                   /* check local flags */
        lwz     r12,TI_LOCAL_FLAGS(r9)
        mtcrf   0x01,r12
index 215973a2c8d5971fd8431505d1c58b62a4bd5bf7..024805e1747df86a9c1e58f71b581932b70ddae7 100644 (file)
@@ -239,6 +239,10 @@ instruction_access_slb_pSeries:
        .globl  system_call_pSeries
 system_call_pSeries:
        HMT_MEDIUM
+BEGIN_FTR_SECTION
+       cmpdi   r0,0x1ebe
+       beq-    1f
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
        mr      r9,r13
        mfmsr   r10
        mfspr   r13,SPRN_SPRG3
@@ -253,6 +257,13 @@ system_call_pSeries:
        rfid
        b       .       /* prevent speculative execution */
 
+/* Fast LE/BE switch system call */
+1:     mfspr   r12,SPRN_SRR1
+       xori    r12,r12,MSR_LE
+       mtspr   SPRN_SRR1,r12
+       rfid            /* return to userspace */
+       b       .
+
        STD_EXCEPTION_PSERIES(0xd00, single_step)
        STD_EXCEPTION_PSERIES(0xe00, trap_0e)
 
index 425616f92d18a6b94ab94aad0f22634a043b0cb6..2f73f705d56449a3c4416f14180264f0e981d0ca 100644 (file)
@@ -307,6 +307,7 @@ void do_IRQ(struct pt_regs *regs)
                if (curtp != irqtp) {
                        struct irq_desc *desc = irq_desc + irq;
                        void *handler = desc->handle_irq;
+                       unsigned long saved_sp_limit = current->thread.ksp_limit;
                        if (handler == NULL)
                                handler = &__do_IRQ;
                        irqtp->task = curtp->task;
@@ -319,7 +320,10 @@ void do_IRQ(struct pt_regs *regs)
                                (irqtp->preempt_count & ~SOFTIRQ_MASK) |
                                (curtp->preempt_count & SOFTIRQ_MASK);
 
+                       current->thread.ksp_limit = (unsigned long)irqtp +
+                               _ALIGN_UP(sizeof(struct thread_info), 16);
                        call_handle_irq(irq, desc, irqtp, handler);
+                       current->thread.ksp_limit = saved_sp_limit;
                        irqtp->task = NULL;
 
 
@@ -352,9 +356,7 @@ void __init init_IRQ(void)
 {
        if (ppc_md.init_IRQ)
                ppc_md.init_IRQ();
-#ifdef CONFIG_PPC64
        irq_ctx_init();
-#endif
 }
 
 
@@ -383,11 +385,15 @@ void irq_ctx_init(void)
 static inline void do_softirq_onstack(void)
 {
        struct thread_info *curtp, *irqtp;
+       unsigned long saved_sp_limit = current->thread.ksp_limit;
 
        curtp = current_thread_info();
        irqtp = softirq_ctx[smp_processor_id()];
        irqtp->task = curtp->task;
+       current->thread.ksp_limit = (unsigned long)irqtp +
+                                   _ALIGN_UP(sizeof(struct thread_info), 16);
        call_do_softirq(irqtp);
+       current->thread.ksp_limit = saved_sp_limit;
        irqtp->task = NULL;
 }
 
index 1ffacc698ffb613b3614d666e5f41f15511743bd..1e656b43ad7fb87f45a71c393e9f96ea5e18a504 100644 (file)
@@ -591,10 +591,8 @@ int __init lparcfg_init(void)
                        !firmware_has_feature(FW_FEATURE_ISERIES))
                mode |= S_IWUSR;
 
-       ent = create_proc_entry("ppc64/lparcfg", mode, NULL);
-       if (ent) {
-               ent->proc_fops = &lparcfg_fops;
-       } else {
+       ent = proc_create("ppc64/lparcfg", mode, NULL, &lparcfg_fops);
+       if (!ent) {
                printk(KERN_ERR "Failed to create ppc64/lparcfg\n");
                return -EIO;
        }
index 2d202f274e7384722a0584a224b415420ee5ed11..29a0e039d436011b61f2f97b96e8b3237e992977 100644 (file)
@@ -74,20 +74,20 @@ void __init reserve_crashkernel(void)
        ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(),
                        &crash_size, &crash_base);
        if (ret == 0 && crash_size > 0) {
-               if (crash_base == 0)
-                       crash_base = KDUMP_KERNELBASE;
                crashk_res.start = crash_base;
-       } else {
-               /* handle the device tree */
-               crash_size = crashk_res.end - crashk_res.start + 1;
+               crashk_res.end = crash_base + crash_size - 1;
        }
 
-       if (crash_size == 0)
+       if (crashk_res.end == crashk_res.start) {
+               crashk_res.start = crashk_res.end = 0;
                return;
+       }
 
        /* We might have got these values via the command line or the
         * device tree, either way sanitise them now. */
 
+       crash_size = crashk_res.end - crashk_res.start + 1;
+
        if (crashk_res.start != KDUMP_KERNELBASE)
                printk("Crash kernel location must be 0x%x\n",
                                KDUMP_KERNELBASE);
index 92ccc6fcc5b03ba87f0e6b52e9dedeece2d874cc..89aaaa6f3561549c896fcd9983a04e96cba72410 100644 (file)
 
        .text
 
+#ifdef CONFIG_IRQSTACKS
+_GLOBAL(call_do_softirq)
+       mflr    r0
+       stw     r0,4(r1)
+       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
+       mr      r1,r3
+       bl      __do_softirq
+       lwz     r1,0(r1)
+       lwz     r0,4(r1)
+       mtlr    r0
+       blr
+
+_GLOBAL(call_handle_irq)
+       mflr    r0
+       stw     r0,4(r1)
+       mtctr   r6
+       stwu    r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
+       mr      r1,r5
+       bctrl
+       lwz     r1,0(r1)
+       lwz     r0,4(r1)
+       mtlr    r0
+       blr
+#endif /* CONFIG_IRQSTACKS */
+
 /*
  * This returns the high 64 bits of the product of two 64-bit numbers.
  */
index 09fcb50c45aec30647df4eb6650b4ec380f43d2a..cf6b5a7d8b3f764668fed2dbd0f4e3015269493f 100644 (file)
@@ -133,9 +133,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
 EXPORT_SYMBOL(cuda_request);
 EXPORT_SYMBOL(cuda_poll);
 #endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(kd_mksound);
-#endif
 EXPORT_SYMBOL(to_tm);
 
 #ifdef CONFIG_PPC32
index f78dfce1b771241256d4242c328286cbc3c92a8a..c647ddef40dc4dcf12865f834869101fa9bfb6da 100644 (file)
@@ -68,12 +68,11 @@ static int __init proc_ppc64_init(void)
 {
        struct proc_dir_entry *pde;
 
-       pde = create_proc_entry("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL);
+       pde = proc_create_data("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL,
+                              &page_map_fops, vdso_data);
        if (!pde)
                return 1;
-       pde->data = vdso_data;
        pde->size = PAGE_SIZE;
-       pde->proc_fops = &page_map_fops;
 
        return 0;
 }
index 6caad17ea72e6b0159461a84023c28ab454c637f..7de41c3948ec2dd4d148800c0dcd6421a3c77a2d 100644 (file)
@@ -589,6 +589,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        kregs = (struct pt_regs *) sp;
        sp -= STACK_FRAME_OVERHEAD;
        p->thread.ksp = sp;
+       p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
+                               _ALIGN_UP(sizeof(struct thread_info), 16);
 
 #ifdef CONFIG_PPC64
        if (cpu_has_feature(CPU_FTR_SLB)) {
diff --git a/arch/powerpc/kernel/rio.c b/arch/powerpc/kernel/rio.c
deleted file mode 100644 (file)
index 29487fe..0000000
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * RapidIO PPC32 support
- *
- * Copyright 2005 MontaVista Software, Inc.
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/rio.h>
-
-#include <asm/rio.h>
-
-/**
- * platform_rio_init - Do platform specific RIO init
- *
- * Any platform specific initialization of RapdIO
- * hardware is done here as well as registration
- * of any active master ports in the system.
- */
-void __attribute__ ((weak))
-    platform_rio_init(void)
-{
-       printk(KERN_WARNING "RIO: No platform_rio_init() present\n");
-}
-
-/**
- * ppc_rio_init - Do PPC32 RIO init
- *
- * Calls platform-specific RIO init code and then calls
- * rio_init_mports() to initialize any master ports that
- * have been registered with the RIO subsystem.
- */
-static int __init ppc_rio_init(void)
-{
-       printk(KERN_INFO "RIO: RapidIO init\n");
-
-       /* Platform specific initialization */
-       platform_rio_init();
-
-       /* Enumerate all registered ports */
-       rio_init_mports();
-
-       return 0;
-}
-
-subsys_initcall(ppc_rio_init);
index f2e3bc714d7695834971e627f2c89368c8798755..f9c6abc84a9483a9fce2fb496642ec863f369fa3 100644 (file)
@@ -255,8 +255,6 @@ static void check_location(struct seq_file *m, const char *c);
 
 static int __init proc_rtas_init(void)
 {
-       struct proc_dir_entry *entry;
-
        if (!machine_is(pseries))
                return -ENODEV;
 
@@ -264,35 +262,20 @@ static int __init proc_rtas_init(void)
        if (rtas_node == NULL)
                return -ENODEV;
 
-       entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL);
-       if (entry)
-               entry->proc_fops = &ppc_rtas_progress_operations;
-
-       entry = create_proc_entry("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL);
-       if (entry)
-               entry->proc_fops = &ppc_rtas_clock_operations;
-
-       entry = create_proc_entry("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL);
-       if (entry)
-               entry->proc_fops = &ppc_rtas_poweron_operations;
-
-       entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL);
-       if (entry)
-               entry->proc_fops = &ppc_rtas_sensors_operations;
-
-       entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO,
-                                 NULL);
-       if (entry)
-               entry->proc_fops = &ppc_rtas_tone_freq_operations;
-
-       entry = create_proc_entry("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL);
-       if (entry)
-               entry->proc_fops = &ppc_rtas_tone_volume_operations;
-
-       entry = create_proc_entry("ppc64/rtas/rmo_buffer", S_IRUSR, NULL);
-       if (entry)
-               entry->proc_fops = &ppc_rtas_rmo_buf_ops;
-
+       proc_create("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL,
+                   &ppc_rtas_progress_operations);
+       proc_create("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL,
+                   &ppc_rtas_clock_operations);
+       proc_create("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL,
+                   &ppc_rtas_poweron_operations);
+       proc_create("ppc64/rtas/sensors", S_IRUGO, NULL,
+                   &ppc_rtas_sensors_operations);
+       proc_create("ppc64/rtas/frequency", S_IWUSR|S_IRUGO, NULL,
+                   &ppc_rtas_tone_freq_operations);
+       proc_create("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL,
+                   &ppc_rtas_tone_volume_operations);
+       proc_create("ppc64/rtas/rmo_buffer", S_IRUSR, NULL,
+                   &ppc_rtas_rmo_buf_ops);
        return 0;
 }
 
index 627f126d1848b4ec1f5e0eb2e2d8c5870bda93c5..0a5e22b22729f6b70443c1b0c9fb622aca4ea26e 100644 (file)
@@ -704,18 +704,11 @@ static int initialize_flash_pde_data(const char *rtas_call_name,
 static struct proc_dir_entry *create_flash_pde(const char *filename,
                                               const struct file_operations *fops)
 {
-       struct proc_dir_entry *ent = NULL;
-
-       ent = create_proc_entry(filename, S_IRUSR | S_IWUSR, NULL);
-       if (ent != NULL) {
-               ent->proc_fops = fops;
-               ent->owner = THIS_MODULE;
-       }
-
-       return ent;
+       return proc_create(filename, S_IRUSR | S_IWUSR, NULL, fops);
 }
 
 static const struct file_operations rtas_flash_operations = {
+       .owner          = THIS_MODULE,
        .read           = rtas_flash_read,
        .write          = rtas_flash_write,
        .open           = rtas_excl_open,
@@ -723,6 +716,7 @@ static const struct file_operations rtas_flash_operations = {
 };
 
 static const struct file_operations manage_flash_operations = {
+       .owner          = THIS_MODULE,
        .read           = manage_flash_read,
        .write          = manage_flash_write,
        .open           = rtas_excl_open,
@@ -730,6 +724,7 @@ static const struct file_operations manage_flash_operations = {
 };
 
 static const struct file_operations validate_flash_operations = {
+       .owner          = THIS_MODULE,
        .read           = validate_flash_read,
        .write          = validate_flash_write,
        .open           = rtas_excl_open,
index 36f6779c88d4e3b9d61a6d6896958b8706299c58..5112a4aa801d7d6843c2202c7815147831a74d2e 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/root_dev.h>
 #include <linux/cpu.h>
 #include <linux/console.h>
+#include <linux/lmb.h>
 
 #include <asm/io.h>
 #include <asm/prom.h>
@@ -229,6 +230,24 @@ int __init ppc_init(void)
 
 arch_initcall(ppc_init);
 
+#ifdef CONFIG_IRQSTACKS
+static void __init irqstack_early_init(void)
+{
+       unsigned int i;
+
+       /* interrupt stacks must be in lowmem, we get that for free on ppc32
+        * as the lmb is limited to lowmem by LMB_REAL_LIMIT */
+       for_each_possible_cpu(i) {
+               softirq_ctx[i] = (struct thread_info *)
+                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+               hardirq_ctx[i] = (struct thread_info *)
+                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+       }
+}
+#else
+#define irqstack_early_init()
+#endif
+
 /* Warning, IO base is not yet inited */
 void __init setup_arch(char **cmdline_p)
 {
@@ -286,6 +305,8 @@ void __init setup_arch(char **cmdline_p)
        init_mm.end_data = (unsigned long) _edata;
        init_mm.brk = klimit;
 
+       irqstack_early_init();
+
        /* set up the bootmem stuff with available memory */
        do_init_bootmem();
        if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
index dff6308d1b5eb524f44fbaf5dfa0a0866e5e58b5..25e3fd8606ab589ebae8ee51143e39a517d32505 100644 (file)
@@ -487,9 +487,12 @@ static void __init emergency_stack_init(void)
         */
        limit = min(0x10000000UL, lmb.rmo_size);
 
-       for_each_possible_cpu(i)
-               paca[i].emergency_sp =
-               __va(lmb_alloc_base(HW_PAGE_SIZE, 128, limit)) + HW_PAGE_SIZE;
+       for_each_possible_cpu(i) {
+               unsigned long sp;
+               sp  = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+               sp += THREAD_SIZE;
+               paca[i].emergency_sp = __va(sp);
+       }
 }
 
 /*
index 3b26fbd6bec9ee4978ff6430ce1ef27a55723089..73401e83739a09b4dcefdaecc84451c018607ca5 100644 (file)
@@ -149,7 +149,7 @@ EXPORT_SYMBOL(tb_ticks_per_sec);    /* for cputime_t conversions */
 u64 tb_to_xs;
 unsigned tb_to_us;
 
-#define TICKLEN_SCALE  TICK_LENGTH_SHIFT
+#define TICKLEN_SCALE  NTP_SCALE_SHIFT
 u64 last_tick_len;     /* units are ns / 2^TICKLEN_SCALE */
 u64 ticklen_to_xs;     /* 0.64 fraction */
 
@@ -1007,8 +1007,6 @@ void __init time_init(void)
        vdso_data->stamp_xsec = (u64) xtime.tv_sec * XSEC_PER_SEC;
        vdso_data->tb_to_xs = tb_to_xs;
 
-       time_freq = 0;
-
        write_sequnlock_irqrestore(&xtime_lock, flags);
 
        /* Register the clocksource, if we're not running on iSeries */
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
new file mode 100644 (file)
index 0000000..f5d7a5e
--- /dev/null
@@ -0,0 +1,224 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+#include <linux/highmem.h>
+#include <asm/mmu-44x.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
+#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
+
+static unsigned int kvmppc_tlb_44x_pos;
+
+static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
+{
+       /* Mask off reserved bits. */
+       attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_ATTR_MASK;
+
+       if (!usermode) {
+               /* Guest is in supervisor mode, so we need to translate guest
+                * supervisor permissions into user permissions. */
+               attrib &= ~PPC44x_TLB_USER_PERM_MASK;
+               attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
+       }
+
+       /* Make sure host can always access this memory. */
+       attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
+
+       return attrib;
+}
+
+/* Search the guest TLB for a matching entry. */
+int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
+                         unsigned int as)
+{
+       int i;
+
+       /* XXX Replace loop with fancy data structures. */
+       for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+               struct tlbe *tlbe = &vcpu->arch.guest_tlb[i];
+               unsigned int tid;
+
+               if (eaddr < get_tlb_eaddr(tlbe))
+                       continue;
+
+               if (eaddr > get_tlb_end(tlbe))
+                       continue;
+
+               tid = get_tlb_tid(tlbe);
+               if (tid && (tid != pid))
+                       continue;
+
+               if (!get_tlb_v(tlbe))
+                       continue;
+
+               if (get_tlb_ts(tlbe) != as)
+                       continue;
+
+               return i;
+       }
+
+       return -1;
+}
+
+struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+       unsigned int as = !!(vcpu->arch.msr & MSR_IS);
+       unsigned int index;
+
+       index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+       if (index == -1)
+               return NULL;
+       return &vcpu->arch.guest_tlb[index];
+}
+
+struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr)
+{
+       unsigned int as = !!(vcpu->arch.msr & MSR_DS);
+       unsigned int index;
+
+       index = kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
+       if (index == -1)
+               return NULL;
+       return &vcpu->arch.guest_tlb[index];
+}
+
+static int kvmppc_44x_tlbe_is_writable(struct tlbe *tlbe)
+{
+       return tlbe->word2 & (PPC44x_TLB_SW|PPC44x_TLB_UW);
+}
+
+/* Must be called with mmap_sem locked for writing. */
+static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
+                                      unsigned int index)
+{
+       struct tlbe *stlbe = &vcpu->arch.shadow_tlb[index];
+       struct page *page = vcpu->arch.shadow_pages[index];
+
+       kunmap(vcpu->arch.shadow_pages[index]);
+
+       if (get_tlb_v(stlbe)) {
+               if (kvmppc_44x_tlbe_is_writable(stlbe))
+                       kvm_release_page_dirty(page);
+               else
+                       kvm_release_page_clean(page);
+       }
+}
+
+/* Caller must ensure that the specified guest TLB entry is safe to insert into
+ * the shadow TLB. */
+void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn, u64 asid,
+                    u32 flags)
+{
+       struct page *new_page;
+       struct tlbe *stlbe;
+       hpa_t hpaddr;
+       unsigned int victim;
+
+       /* Future optimization: don't overwrite the TLB entry containing the
+        * current PC (or stack?). */
+       victim = kvmppc_tlb_44x_pos++;
+       if (kvmppc_tlb_44x_pos > tlb_44x_hwater)
+               kvmppc_tlb_44x_pos = 0;
+       stlbe = &vcpu->arch.shadow_tlb[victim];
+
+       /* Get reference to new page. */
+       down_write(&current->mm->mmap_sem);
+       new_page = gfn_to_page(vcpu->kvm, gfn);
+       if (is_error_page(new_page)) {
+               printk(KERN_ERR "Couldn't get guest page!\n");
+               kvm_release_page_clean(new_page);
+               return;
+       }
+       hpaddr = page_to_phys(new_page);
+
+       /* Drop reference to old page. */
+       kvmppc_44x_shadow_release(vcpu, victim);
+       up_write(&current->mm->mmap_sem);
+
+       vcpu->arch.shadow_pages[victim] = new_page;
+
+       /* XXX Make sure (va, size) doesn't overlap any other
+        * entries. 440x6 user manual says the result would be
+        * "undefined." */
+
+       /* XXX what about AS? */
+
+       stlbe->tid = asid & 0xff;
+
+       /* Force TS=1 for all guest mappings. */
+       /* For now we hardcode 4KB mappings, but it will be important to
+        * use host large pages in the future. */
+       stlbe->word0 = (gvaddr & PAGE_MASK) | PPC44x_TLB_VALID | PPC44x_TLB_TS
+                      | PPC44x_TLB_4K;
+
+       stlbe->word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
+       stlbe->word2 = kvmppc_44x_tlb_shadow_attrib(flags,
+                                                   vcpu->arch.msr & MSR_PR);
+}
+
+void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid)
+{
+       unsigned int pid = asid & 0xff;
+       int i;
+
+       /* XXX Replace loop with fancy data structures. */
+       down_write(&current->mm->mmap_sem);
+       for (i = 0; i <= tlb_44x_hwater; i++) {
+               struct tlbe *stlbe = &vcpu->arch.shadow_tlb[i];
+               unsigned int tid;
+
+               if (!get_tlb_v(stlbe))
+                       continue;
+
+               if (eaddr < get_tlb_eaddr(stlbe))
+                       continue;
+
+               if (eaddr > get_tlb_end(stlbe))
+                       continue;
+
+               tid = get_tlb_tid(stlbe);
+               if (tid && (tid != pid))
+                       continue;
+
+               kvmppc_44x_shadow_release(vcpu, i);
+               stlbe->word0 = 0;
+       }
+       up_write(&current->mm->mmap_sem);
+}
+
+/* Invalidate all mappings, so that when they fault back in they will get the
+ * proper permission bits. */
+void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
+{
+       int i;
+
+       /* XXX Replace loop with fancy data structures. */
+       down_write(&current->mm->mmap_sem);
+       for (i = 0; i <= tlb_44x_hwater; i++) {
+               kvmppc_44x_shadow_release(vcpu, i);
+               vcpu->arch.shadow_tlb[i].word0 = 0;
+       }
+       up_write(&current->mm->mmap_sem);
+}
diff --git a/arch/powerpc/kvm/44x_tlb.h b/arch/powerpc/kvm/44x_tlb.h
new file mode 100644 (file)
index 0000000..2ccd46b
--- /dev/null
@@ -0,0 +1,91 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __KVM_POWERPC_TLB_H__
+#define __KVM_POWERPC_TLB_H__
+
+#include <linux/kvm_host.h>
+#include <asm/mmu-44x.h>
+
+extern int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr,
+                                unsigned int pid, unsigned int as);
+extern struct tlbe *kvmppc_44x_dtlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+extern struct tlbe *kvmppc_44x_itlb_search(struct kvm_vcpu *vcpu, gva_t eaddr);
+
+/* TLB helper functions */
+static inline unsigned int get_tlb_size(const struct tlbe *tlbe)
+{
+       return (tlbe->word0 >> 4) & 0xf;
+}
+
+static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe)
+{
+       return tlbe->word0 & 0xfffffc00;
+}
+
+static inline gva_t get_tlb_bytes(const struct tlbe *tlbe)
+{
+       unsigned int pgsize = get_tlb_size(tlbe);
+       return 1 << 10 << (pgsize << 1);
+}
+
+static inline gva_t get_tlb_end(const struct tlbe *tlbe)
+{
+       return get_tlb_eaddr(tlbe) + get_tlb_bytes(tlbe) - 1;
+}
+
+static inline u64 get_tlb_raddr(const struct tlbe *tlbe)
+{
+       u64 word1 = tlbe->word1;
+       return ((word1 & 0xf) << 32) | (word1 & 0xfffffc00);
+}
+
+static inline unsigned int get_tlb_tid(const struct tlbe *tlbe)
+{
+       return tlbe->tid & 0xff;
+}
+
+static inline unsigned int get_tlb_ts(const struct tlbe *tlbe)
+{
+       return (tlbe->word0 >> 8) & 0x1;
+}
+
+static inline unsigned int get_tlb_v(const struct tlbe *tlbe)
+{
+       return (tlbe->word0 >> 9) & 0x1;
+}
+
+static inline unsigned int get_mmucr_stid(const struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.mmucr & 0xff;
+}
+
+static inline unsigned int get_mmucr_sts(const struct kvm_vcpu *vcpu)
+{
+       return (vcpu->arch.mmucr >> 16) & 0x1;
+}
+
+static inline gpa_t tlb_xlate(struct tlbe *tlbe, gva_t eaddr)
+{
+       unsigned int pgmask = get_tlb_bytes(tlbe) - 1;
+
+       return get_tlb_raddr(tlbe) | (eaddr & pgmask);
+}
+
+#endif /* __KVM_POWERPC_TLB_H__ */
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
new file mode 100644 (file)
index 0000000..6b07601
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# KVM configuration
+#
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       ---help---
+         Say Y here to get to see options for using your Linux host to run
+         other operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and
+         disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       bool "Kernel-based Virtual Machine (KVM) support"
+       depends on 44x && EXPERIMENTAL
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       # We can only run on Book E hosts so far
+       select KVM_BOOKE_HOST
+       ---help---
+         Support hosting virtualized guest machines. You will also
+         need to select one or more of the processor modules below.
+
+         This module provides access to the hardware capabilities through
+         a character device node named /dev/kvm.
+
+         If unsure, say N.
+
+config KVM_BOOKE_HOST
+       bool "KVM host support for Book E PowerPC processors"
+       depends on KVM && 44x
+       ---help---
+         Provides host support for KVM on Book E PowerPC processors. Currently
+         this works on 440 processors only.
+
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
new file mode 100644 (file)
index 0000000..d0d358d
--- /dev/null
@@ -0,0 +1,15 @@
+#
+# Makefile for Kernel-based Virtual Machine module
+#
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+kvm-objs := $(common-objs) powerpc.o emulate.o booke_guest.o
+obj-$(CONFIG_KVM) += kvm.o
+
+AFLAGS_booke_interrupts.o := -I$(obj)
+
+kvm-booke-host-objs := booke_host.o booke_interrupts.o 44x_tlb.o
+obj-$(CONFIG_KVM_BOOKE_HOST) += kvm-booke-host.o
diff --git a/arch/powerpc/kvm/booke_guest.c b/arch/powerpc/kvm/booke_guest.c
new file mode 100644 (file)
index 0000000..6d9884a
--- /dev/null
@@ -0,0 +1,615 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "exits",      VCPU_STAT(sum_exits) },
+       { "mmio",       VCPU_STAT(mmio_exits) },
+       { "dcr",        VCPU_STAT(dcr_exits) },
+       { "sig",        VCPU_STAT(signal_exits) },
+       { "light",      VCPU_STAT(light_exits) },
+       { "itlb_r",     VCPU_STAT(itlb_real_miss_exits) },
+       { "itlb_v",     VCPU_STAT(itlb_virt_miss_exits) },
+       { "dtlb_r",     VCPU_STAT(dtlb_real_miss_exits) },
+       { "dtlb_v",     VCPU_STAT(dtlb_virt_miss_exits) },
+       { "sysc",       VCPU_STAT(syscall_exits) },
+       { "isi",        VCPU_STAT(isi_exits) },
+       { "dsi",        VCPU_STAT(dsi_exits) },
+       { "inst_emu",   VCPU_STAT(emulated_inst_exits) },
+       { "dec",        VCPU_STAT(dec_exits) },
+       { "ext_intr",   VCPU_STAT(ext_intr_exits) },
+       { NULL }
+};
+
+static const u32 interrupt_msr_mask[16] = {
+       [BOOKE_INTERRUPT_CRITICAL]      = MSR_ME,
+       [BOOKE_INTERRUPT_MACHINE_CHECK] = 0,
+       [BOOKE_INTERRUPT_DATA_STORAGE]  = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_INST_STORAGE]  = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_EXTERNAL]      = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_ALIGNMENT]     = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_PROGRAM]       = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_FP_UNAVAIL]    = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_SYSCALL]       = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_AP_UNAVAIL]    = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_DECREMENTER]   = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_FIT]           = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_WATCHDOG]      = MSR_ME,
+       [BOOKE_INTERRUPT_DTLB_MISS]     = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_ITLB_MISS]     = MSR_CE|MSR_ME|MSR_DE,
+       [BOOKE_INTERRUPT_DEBUG]         = MSR_ME,
+};
+
+const unsigned char exception_priority[] = {
+       [BOOKE_INTERRUPT_DATA_STORAGE] = 0,
+       [BOOKE_INTERRUPT_INST_STORAGE] = 1,
+       [BOOKE_INTERRUPT_ALIGNMENT] = 2,
+       [BOOKE_INTERRUPT_PROGRAM] = 3,
+       [BOOKE_INTERRUPT_FP_UNAVAIL] = 4,
+       [BOOKE_INTERRUPT_SYSCALL] = 5,
+       [BOOKE_INTERRUPT_AP_UNAVAIL] = 6,
+       [BOOKE_INTERRUPT_DTLB_MISS] = 7,
+       [BOOKE_INTERRUPT_ITLB_MISS] = 8,
+       [BOOKE_INTERRUPT_MACHINE_CHECK] = 9,
+       [BOOKE_INTERRUPT_DEBUG] = 10,
+       [BOOKE_INTERRUPT_CRITICAL] = 11,
+       [BOOKE_INTERRUPT_WATCHDOG] = 12,
+       [BOOKE_INTERRUPT_EXTERNAL] = 13,
+       [BOOKE_INTERRUPT_FIT] = 14,
+       [BOOKE_INTERRUPT_DECREMENTER] = 15,
+};
+
+const unsigned char priority_exception[] = {
+       BOOKE_INTERRUPT_DATA_STORAGE,
+       BOOKE_INTERRUPT_INST_STORAGE,
+       BOOKE_INTERRUPT_ALIGNMENT,
+       BOOKE_INTERRUPT_PROGRAM,
+       BOOKE_INTERRUPT_FP_UNAVAIL,
+       BOOKE_INTERRUPT_SYSCALL,
+       BOOKE_INTERRUPT_AP_UNAVAIL,
+       BOOKE_INTERRUPT_DTLB_MISS,
+       BOOKE_INTERRUPT_ITLB_MISS,
+       BOOKE_INTERRUPT_MACHINE_CHECK,
+       BOOKE_INTERRUPT_DEBUG,
+       BOOKE_INTERRUPT_CRITICAL,
+       BOOKE_INTERRUPT_WATCHDOG,
+       BOOKE_INTERRUPT_EXTERNAL,
+       BOOKE_INTERRUPT_FIT,
+       BOOKE_INTERRUPT_DECREMENTER,
+};
+
+
+void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct tlbe *tlbe;
+       int i;
+
+       printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
+       printk("| %2s | %3s | %8s | %8s | %8s |\n",
+                       "nr", "tid", "word0", "word1", "word2");
+
+       for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+               tlbe = &vcpu->arch.guest_tlb[i];
+               if (tlbe->word0 & PPC44x_TLB_VALID)
+                       printk(" G%2d |  %02X | %08X | %08X | %08X |\n",
+                              i, tlbe->tid, tlbe->word0, tlbe->word1,
+                              tlbe->word2);
+       }
+
+       for (i = 0; i < PPC44x_TLB_SIZE; i++) {
+               tlbe = &vcpu->arch.shadow_tlb[i];
+               if (tlbe->word0 & PPC44x_TLB_VALID)
+                       printk(" S%2d | %02X | %08X | %08X | %08X |\n",
+                              i, tlbe->tid, tlbe->word0, tlbe->word1,
+                              tlbe->word2);
+       }
+}
+
+/* TODO: use vcpu_printf() */
+void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       printk("pc:   %08x msr:  %08x\n", vcpu->arch.pc, vcpu->arch.msr);
+       printk("lr:   %08x ctr:  %08x\n", vcpu->arch.lr, vcpu->arch.ctr);
+       printk("srr0: %08x srr1: %08x\n", vcpu->arch.srr0, vcpu->arch.srr1);
+
+       printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+       for (i = 0; i < 32; i += 4) {
+               printk("gpr%02d: %08x %08x %08x %08x\n", i,
+                      vcpu->arch.gpr[i],
+                      vcpu->arch.gpr[i+1],
+                      vcpu->arch.gpr[i+2],
+                      vcpu->arch.gpr[i+3]);
+       }
+}
+
+/* Check if we are ready to deliver the interrupt */
+static int kvmppc_can_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+       int r;
+
+       switch (interrupt) {
+       case BOOKE_INTERRUPT_CRITICAL:
+               r = vcpu->arch.msr & MSR_CE;
+               break;
+       case BOOKE_INTERRUPT_MACHINE_CHECK:
+               r = vcpu->arch.msr & MSR_ME;
+               break;
+       case BOOKE_INTERRUPT_EXTERNAL:
+               r = vcpu->arch.msr & MSR_EE;
+               break;
+       case BOOKE_INTERRUPT_DECREMENTER:
+               r = vcpu->arch.msr & MSR_EE;
+               break;
+       case BOOKE_INTERRUPT_FIT:
+               r = vcpu->arch.msr & MSR_EE;
+               break;
+       case BOOKE_INTERRUPT_WATCHDOG:
+               r = vcpu->arch.msr & MSR_CE;
+               break;
+       case BOOKE_INTERRUPT_DEBUG:
+               r = vcpu->arch.msr & MSR_DE;
+               break;
+       default:
+               r = 1;
+       }
+
+       return r;
+}
+
+static void kvmppc_deliver_interrupt(struct kvm_vcpu *vcpu, int interrupt)
+{
+       switch (interrupt) {
+       case BOOKE_INTERRUPT_DECREMENTER:
+               vcpu->arch.tsr |= TSR_DIS;
+               break;
+       }
+
+       vcpu->arch.srr0 = vcpu->arch.pc;
+       vcpu->arch.srr1 = vcpu->arch.msr;
+       vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[interrupt];
+       kvmppc_set_msr(vcpu, vcpu->arch.msr & interrupt_msr_mask[interrupt]);
+}
+
+/* Check pending exceptions and deliver one, if possible. */
+void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu)
+{
+       unsigned long *pending = &vcpu->arch.pending_exceptions;
+       unsigned int exception;
+       unsigned int priority;
+
+       priority = find_first_bit(pending, BITS_PER_BYTE * sizeof(*pending));
+       while (priority <= BOOKE_MAX_INTERRUPT) {
+               exception = priority_exception[priority];
+               if (kvmppc_can_deliver_interrupt(vcpu, exception)) {
+                       kvmppc_clear_exception(vcpu, exception);
+                       kvmppc_deliver_interrupt(vcpu, exception);
+                       break;
+               }
+
+               priority = find_next_bit(pending,
+                                        BITS_PER_BYTE * sizeof(*pending),
+                                        priority + 1);
+       }
+}
+
+static int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er;
+       int r;
+
+       er = kvmppc_emulate_instruction(run, vcpu);
+       switch (er) {
+       case EMULATE_DONE:
+               /* Future optimization: only reload non-volatiles if they were
+                * actually modified. */
+               r = RESUME_GUEST_NV;
+               break;
+       case EMULATE_DO_MMIO:
+               run->exit_reason = KVM_EXIT_MMIO;
+               /* We must reload nonvolatiles because "update" load/store
+                * instructions modify register state. */
+               /* Future optimization: only reload non-volatiles if they were
+                * actually modified. */
+               r = RESUME_HOST_NV;
+               break;
+       case EMULATE_FAIL:
+               /* XXX Deliver Program interrupt to guest. */
+               printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+                      vcpu->arch.last_inst);
+               r = RESUME_HOST;
+               break;
+       default:
+               BUG();
+       }
+
+       return r;
+}
+
+/**
+ * kvmppc_handle_exit
+ *
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                       unsigned int exit_nr)
+{
+       enum emulation_result er;
+       int r = RESUME_HOST;
+
+       local_irq_enable();
+
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+
+       switch (exit_nr) {
+       case BOOKE_INTERRUPT_MACHINE_CHECK:
+               printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
+               kvmppc_dump_vcpu(vcpu);
+               r = RESUME_HOST;
+               break;
+
+       case BOOKE_INTERRUPT_EXTERNAL:
+       case BOOKE_INTERRUPT_DECREMENTER:
+               /* Since we switched IVPR back to the host's value, the host
+                * handled this interrupt the moment we enabled interrupts.
+                * Now we just offer it a chance to reschedule the guest. */
+
+               /* XXX At this point the TLB still holds our shadow TLB, so if
+                * we do reschedule the host will fault over it. Perhaps we
+                * should politely restore the host's entries to minimize
+                * misses before ceding control. */
+               if (need_resched())
+                       cond_resched();
+               if (exit_nr == BOOKE_INTERRUPT_DECREMENTER)
+                       vcpu->stat.dec_exits++;
+               else
+                       vcpu->stat.ext_intr_exits++;
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_PROGRAM:
+               if (vcpu->arch.msr & MSR_PR) {
+                       /* Program traps generated by user-level software must be handled
+                        * by the guest kernel. */
+                       vcpu->arch.esr = vcpu->arch.fault_esr;
+                       kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+                       r = RESUME_GUEST;
+                       break;
+               }
+
+               er = kvmppc_emulate_instruction(run, vcpu);
+               switch (er) {
+               case EMULATE_DONE:
+                       /* Future optimization: only reload non-volatiles if
+                        * they were actually modified by emulation. */
+                       vcpu->stat.emulated_inst_exits++;
+                       r = RESUME_GUEST_NV;
+                       break;
+               case EMULATE_DO_DCR:
+                       run->exit_reason = KVM_EXIT_DCR;
+                       r = RESUME_HOST;
+                       break;
+               case EMULATE_FAIL:
+                       /* XXX Deliver Program interrupt to guest. */
+                       printk(KERN_CRIT "%s: emulation at %x failed (%08x)\n",
+                              __func__, vcpu->arch.pc, vcpu->arch.last_inst);
+                       /* For debugging, encode the failing instruction and
+                        * report it to userspace. */
+                       run->hw.hardware_exit_reason = ~0ULL << 32;
+                       run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
+                       r = RESUME_HOST;
+                       break;
+               default:
+                       BUG();
+               }
+               break;
+
+       case BOOKE_INTERRUPT_DATA_STORAGE:
+               vcpu->arch.dear = vcpu->arch.fault_dear;
+               vcpu->arch.esr = vcpu->arch.fault_esr;
+               kvmppc_queue_exception(vcpu, exit_nr);
+               vcpu->stat.dsi_exits++;
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_INST_STORAGE:
+               vcpu->arch.esr = vcpu->arch.fault_esr;
+               kvmppc_queue_exception(vcpu, exit_nr);
+               vcpu->stat.isi_exits++;
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_SYSCALL:
+               kvmppc_queue_exception(vcpu, exit_nr);
+               vcpu->stat.syscall_exits++;
+               r = RESUME_GUEST;
+               break;
+
+       case BOOKE_INTERRUPT_DTLB_MISS: {
+               struct tlbe *gtlbe;
+               unsigned long eaddr = vcpu->arch.fault_dear;
+               gfn_t gfn;
+
+               /* Check the guest TLB. */
+               gtlbe = kvmppc_44x_dtlb_search(vcpu, eaddr);
+               if (!gtlbe) {
+                       /* The guest didn't have a mapping for it. */
+                       kvmppc_queue_exception(vcpu, exit_nr);
+                       vcpu->arch.dear = vcpu->arch.fault_dear;
+                       vcpu->arch.esr = vcpu->arch.fault_esr;
+                       vcpu->stat.dtlb_real_miss_exits++;
+                       r = RESUME_GUEST;
+                       break;
+               }
+
+               vcpu->arch.paddr_accessed = tlb_xlate(gtlbe, eaddr);
+               gfn = vcpu->arch.paddr_accessed >> PAGE_SHIFT;
+
+               if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+                       /* The guest TLB had a mapping, but the shadow TLB
+                        * didn't, and it is RAM. This could be because:
+                        * a) the entry is mapping the host kernel, or
+                        * b) the guest used a large mapping which we're faking
+                        * Either way, we need to satisfy the fault without
+                        * invoking the guest. */
+                       kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+                                      gtlbe->word2);
+                       vcpu->stat.dtlb_virt_miss_exits++;
+                       r = RESUME_GUEST;
+               } else {
+                       /* Guest has mapped and accessed a page which is not
+                        * actually RAM. */
+                       r = kvmppc_emulate_mmio(run, vcpu);
+               }
+
+               break;
+       }
+
+       case BOOKE_INTERRUPT_ITLB_MISS: {
+               struct tlbe *gtlbe;
+               unsigned long eaddr = vcpu->arch.pc;
+               gfn_t gfn;
+
+               r = RESUME_GUEST;
+
+               /* Check the guest TLB. */
+               gtlbe = kvmppc_44x_itlb_search(vcpu, eaddr);
+               if (!gtlbe) {
+                       /* The guest didn't have a mapping for it. */
+                       kvmppc_queue_exception(vcpu, exit_nr);
+                       vcpu->stat.itlb_real_miss_exits++;
+                       break;
+               }
+
+               vcpu->stat.itlb_virt_miss_exits++;
+
+               gfn = tlb_xlate(gtlbe, eaddr) >> PAGE_SHIFT;
+
+               if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
+                       /* The guest TLB had a mapping, but the shadow TLB
+                        * didn't. This could be because:
+                        * a) the entry is mapping the host kernel, or
+                        * b) the guest used a large mapping which we're faking
+                        * Either way, we need to satisfy the fault without
+                        * invoking the guest. */
+                       kvmppc_mmu_map(vcpu, eaddr, gfn, gtlbe->tid,
+                                      gtlbe->word2);
+               } else {
+                       /* Guest mapped and leaped at non-RAM! */
+                       kvmppc_queue_exception(vcpu,
+                                              BOOKE_INTERRUPT_MACHINE_CHECK);
+               }
+
+               break;
+       }
+
+       default:
+               printk(KERN_EMERG "exit_nr %d\n", exit_nr);
+               BUG();
+       }
+
+       local_irq_disable();
+
+       kvmppc_check_and_deliver_interrupts(vcpu);
+
+       /* Do some exit accounting. */
+       vcpu->stat.sum_exits++;
+       if (!(r & RESUME_HOST)) {
+               /* To avoid clobbering exit_reason, only check for signals if
+                * we aren't already exiting to userspace for some other
+                * reason. */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
+
+                       vcpu->stat.signal_exits++;
+               } else {
+                       vcpu->stat.light_exits++;
+               }
+       } else {
+               switch (run->exit_reason) {
+               case KVM_EXIT_MMIO:
+                       vcpu->stat.mmio_exits++;
+                       break;
+               case KVM_EXIT_DCR:
+                       vcpu->stat.dcr_exits++;
+                       break;
+               case KVM_EXIT_INTR:
+                       vcpu->stat.signal_exits++;
+                       break;
+               }
+       }
+
+       return r;
+}
+
+/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct tlbe *tlbe = &vcpu->arch.guest_tlb[0];
+
+       tlbe->tid = 0;
+       tlbe->word0 = PPC44x_TLB_16M | PPC44x_TLB_VALID;
+       tlbe->word1 = 0;
+       tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR;
+
+       tlbe++;
+       tlbe->tid = 0;
+       tlbe->word0 = 0xef600000 | PPC44x_TLB_4K | PPC44x_TLB_VALID;
+       tlbe->word1 = 0xef600000;
+       tlbe->word2 = PPC44x_TLB_SX | PPC44x_TLB_SW | PPC44x_TLB_SR
+                     | PPC44x_TLB_I | PPC44x_TLB_G;
+
+       vcpu->arch.pc = 0;
+       vcpu->arch.msr = 0;
+       vcpu->arch.gpr[1] = (16<<20) - 8; /* -8 for the callee-save LR slot */
+
+       /* Eye-catching number so we know if the guest takes an interrupt
+        * before it's programmed its own IVPR. */
+       vcpu->arch.ivpr = 0x55550000;
+
+       /* Since the guest can directly access the timebase, it must know the
+        * real timebase frequency. Accordingly, it must see the state of
+        * CCR1[TCS]. */
+       vcpu->arch.ccr1 = mfspr(SPRN_CCR1);
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       regs->pc = vcpu->arch.pc;
+       regs->cr = vcpu->arch.cr;
+       regs->ctr = vcpu->arch.ctr;
+       regs->lr = vcpu->arch.lr;
+       regs->xer = vcpu->arch.xer;
+       regs->msr = vcpu->arch.msr;
+       regs->srr0 = vcpu->arch.srr0;
+       regs->srr1 = vcpu->arch.srr1;
+       regs->pid = vcpu->arch.pid;
+       regs->sprg0 = vcpu->arch.sprg0;
+       regs->sprg1 = vcpu->arch.sprg1;
+       regs->sprg2 = vcpu->arch.sprg2;
+       regs->sprg3 = vcpu->arch.sprg3;
+       regs->sprg5 = vcpu->arch.sprg4;
+       regs->sprg6 = vcpu->arch.sprg5;
+       regs->sprg7 = vcpu->arch.sprg6;
+
+       for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
+               regs->gpr[i] = vcpu->arch.gpr[i];
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       vcpu->arch.pc = regs->pc;
+       vcpu->arch.cr = regs->cr;
+       vcpu->arch.ctr = regs->ctr;
+       vcpu->arch.lr = regs->lr;
+       vcpu->arch.xer = regs->xer;
+       vcpu->arch.msr = regs->msr;
+       vcpu->arch.srr0 = regs->srr0;
+       vcpu->arch.srr1 = regs->srr1;
+       vcpu->arch.sprg0 = regs->sprg0;
+       vcpu->arch.sprg1 = regs->sprg1;
+       vcpu->arch.sprg2 = regs->sprg2;
+       vcpu->arch.sprg3 = regs->sprg3;
+       vcpu->arch.sprg5 = regs->sprg4;
+       vcpu->arch.sprg6 = regs->sprg5;
+       vcpu->arch.sprg7 = regs->sprg6;
+
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++)
+               vcpu->arch.gpr[i] = regs->gpr[i];
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                  struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                  struct kvm_sregs *sregs)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOTSUPP;
+}
+
+/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                  struct kvm_translation *tr)
+{
+       struct tlbe *gtlbe;
+       int index;
+       gva_t eaddr;
+       u8 pid;
+       u8 as;
+
+       eaddr = tr->linear_address;
+       pid = (tr->linear_address >> 32) & 0xff;
+       as = (tr->linear_address >> 40) & 0x1;
+
+       index = kvmppc_44x_tlb_index(vcpu, eaddr, pid, as);
+       if (index == -1) {
+               tr->valid = 0;
+               return 0;
+       }
+
+       gtlbe = &vcpu->arch.guest_tlb[index];
+
+       tr->physical_address = tlb_xlate(gtlbe, eaddr);
+       /* XXX what does "writeable" and "usermode" even mean? */
+       tr->valid = 1;
+
+       return 0;
+}
diff --git a/arch/powerpc/kvm/booke_host.c b/arch/powerpc/kvm/booke_host.c
new file mode 100644 (file)
index 0000000..b480341
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <asm/cacheflush.h>
+#include <asm/kvm_ppc.h>
+
+unsigned long kvmppc_booke_handlers;
+
+static int kvmppc_booke_init(void)
+{
+       unsigned long ivor[16];
+       unsigned long max_ivor = 0;
+       int i;
+
+       /* We install our own exception handlers by hijacking IVPR. IVPR must
+        * be 16-bit aligned, so we need a 64KB allocation. */
+       kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
+                                                VCPU_SIZE_ORDER);
+       if (!kvmppc_booke_handlers)
+               return -ENOMEM;
+
+       /* XXX make sure our handlers are smaller than Linux's */
+
+       /* Copy our interrupt handlers to match host IVORs. That way we don't
+        * have to swap the IVORs on every guest/host transition. */
+       ivor[0] = mfspr(SPRN_IVOR0);
+       ivor[1] = mfspr(SPRN_IVOR1);
+       ivor[2] = mfspr(SPRN_IVOR2);
+       ivor[3] = mfspr(SPRN_IVOR3);
+       ivor[4] = mfspr(SPRN_IVOR4);
+       ivor[5] = mfspr(SPRN_IVOR5);
+       ivor[6] = mfspr(SPRN_IVOR6);
+       ivor[7] = mfspr(SPRN_IVOR7);
+       ivor[8] = mfspr(SPRN_IVOR8);
+       ivor[9] = mfspr(SPRN_IVOR9);
+       ivor[10] = mfspr(SPRN_IVOR10);
+       ivor[11] = mfspr(SPRN_IVOR11);
+       ivor[12] = mfspr(SPRN_IVOR12);
+       ivor[13] = mfspr(SPRN_IVOR13);
+       ivor[14] = mfspr(SPRN_IVOR14);
+       ivor[15] = mfspr(SPRN_IVOR15);
+
+       for (i = 0; i < 16; i++) {
+               if (ivor[i] > max_ivor)
+                       max_ivor = ivor[i];
+
+               memcpy((void *)kvmppc_booke_handlers + ivor[i],
+                      kvmppc_handlers_start + i * kvmppc_handler_len,
+                      kvmppc_handler_len);
+       }
+       flush_icache_range(kvmppc_booke_handlers,
+                          kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
+
+       return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvmppc_booke_exit(void)
+{
+       free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
+       kvm_exit();
+}
+
+module_init(kvmppc_booke_init)
+module_exit(kvmppc_booke_exit)
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
new file mode 100644 (file)
index 0000000..3b653b5
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <asm/ppc_asm.h>
+#include <asm/kvm_asm.h>
+#include <asm/reg.h>
+#include <asm/mmu-44x.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+
+#define KVMPPC_MSR_MASK (MSR_CE|MSR_EE|MSR_PR|MSR_DE|MSR_ME|MSR_IS|MSR_DS)
+
+#define VCPU_GPR(n)     (VCPU_GPRS + (n * 4))
+
+/* The host stack layout: */
+#define HOST_R1         0 /* Implied by stwu. */
+#define HOST_CALLEE_LR  4
+#define HOST_RUN        8
+/* r2 is special: it holds 'current', and it made nonvolatile in the
+ * kernel with the -ffixed-r2 gcc option. */
+#define HOST_R2         12
+#define HOST_NV_GPRS    16
+#define HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * 4))
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
+#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
+#define HOST_STACK_LR   (HOST_STACK_SIZE + 4) /* In caller stack frame. */
+
+#define NEED_INST_MASK ((1<<BOOKE_INTERRUPT_PROGRAM) | \
+                        (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_DEAR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+                        (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+#define NEED_ESR_MASK ((1<<BOOKE_INTERRUPT_DATA_STORAGE) | \
+                       (1<<BOOKE_INTERRUPT_INST_STORAGE) | \
+                       (1<<BOOKE_INTERRUPT_PROGRAM) | \
+                       (1<<BOOKE_INTERRUPT_DTLB_MISS))
+
+.macro KVM_HANDLER ivor_nr
+_GLOBAL(kvmppc_handler_\ivor_nr)
+       /* Get pointer to vcpu and record exit number. */
+       mtspr   SPRN_SPRG0, r4
+       mfspr   r4, SPRN_SPRG1
+       stw     r5, VCPU_GPR(r5)(r4)
+       stw     r6, VCPU_GPR(r6)(r4)
+       mfctr   r5
+       lis     r6, kvmppc_resume_host@h
+       stw     r5, VCPU_CTR(r4)
+       li      r5, \ivor_nr
+       ori     r6, r6, kvmppc_resume_host@l
+       mtctr   r6
+       bctr
+.endm
+
+_GLOBAL(kvmppc_handlers_start)
+KVM_HANDLER BOOKE_INTERRUPT_CRITICAL
+KVM_HANDLER BOOKE_INTERRUPT_MACHINE_CHECK
+KVM_HANDLER BOOKE_INTERRUPT_DATA_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_INST_STORAGE
+KVM_HANDLER BOOKE_INTERRUPT_EXTERNAL
+KVM_HANDLER BOOKE_INTERRUPT_ALIGNMENT
+KVM_HANDLER BOOKE_INTERRUPT_PROGRAM
+KVM_HANDLER BOOKE_INTERRUPT_FP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_SYSCALL
+KVM_HANDLER BOOKE_INTERRUPT_AP_UNAVAIL
+KVM_HANDLER BOOKE_INTERRUPT_DECREMENTER
+KVM_HANDLER BOOKE_INTERRUPT_FIT
+KVM_HANDLER BOOKE_INTERRUPT_WATCHDOG
+KVM_HANDLER BOOKE_INTERRUPT_DTLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_ITLB_MISS
+KVM_HANDLER BOOKE_INTERRUPT_DEBUG
+
+_GLOBAL(kvmppc_handler_len)
+       .long kvmppc_handler_1 - kvmppc_handler_0
+
+
+/* Registers:
+ *  SPRG0: guest r4
+ *  r4: vcpu pointer
+ *  r5: KVM exit number
+ */
+_GLOBAL(kvmppc_resume_host)
+       stw     r3, VCPU_GPR(r3)(r4)
+       mfcr    r3
+       stw     r3, VCPU_CR(r4)
+       stw     r7, VCPU_GPR(r7)(r4)
+       stw     r8, VCPU_GPR(r8)(r4)
+       stw     r9, VCPU_GPR(r9)(r4)
+
+       li      r6, 1
+       slw     r6, r6, r5
+
+       /* Save the faulting instruction and all GPRs for emulation. */
+       andi.   r7, r6, NEED_INST_MASK
+       beq     ..skip_inst_copy
+       mfspr   r9, SPRN_SRR0
+       mfmsr   r8
+       ori     r7, r8, MSR_DS
+       mtmsr   r7
+       isync
+       lwz     r9, 0(r9)
+       mtmsr   r8
+       isync
+       stw     r9, VCPU_LAST_INST(r4)
+
+       stw     r15, VCPU_GPR(r15)(r4)
+       stw     r16, VCPU_GPR(r16)(r4)
+       stw     r17, VCPU_GPR(r17)(r4)
+       stw     r18, VCPU_GPR(r18)(r4)
+       stw     r19, VCPU_GPR(r19)(r4)
+       stw     r20, VCPU_GPR(r20)(r4)
+       stw     r21, VCPU_GPR(r21)(r4)
+       stw     r22, VCPU_GPR(r22)(r4)
+       stw     r23, VCPU_GPR(r23)(r4)
+       stw     r24, VCPU_GPR(r24)(r4)
+       stw     r25, VCPU_GPR(r25)(r4)
+       stw     r26, VCPU_GPR(r26)(r4)
+       stw     r27, VCPU_GPR(r27)(r4)
+       stw     r28, VCPU_GPR(r28)(r4)
+       stw     r29, VCPU_GPR(r29)(r4)
+       stw     r30, VCPU_GPR(r30)(r4)
+       stw     r31, VCPU_GPR(r31)(r4)
+..skip_inst_copy:
+
+       /* Also grab DEAR and ESR before the host can clobber them. */
+
+       andi.   r7, r6, NEED_DEAR_MASK
+       beq     ..skip_dear
+       mfspr   r9, SPRN_DEAR
+       stw     r9, VCPU_FAULT_DEAR(r4)
+..skip_dear:
+
+       andi.   r7, r6, NEED_ESR_MASK
+       beq     ..skip_esr
+       mfspr   r9, SPRN_ESR
+       stw     r9, VCPU_FAULT_ESR(r4)
+..skip_esr:
+
+       /* Save remaining volatile guest register state to vcpu. */
+       stw     r0, VCPU_GPR(r0)(r4)
+       stw     r1, VCPU_GPR(r1)(r4)
+       stw     r2, VCPU_GPR(r2)(r4)
+       stw     r10, VCPU_GPR(r10)(r4)
+       stw     r11, VCPU_GPR(r11)(r4)
+       stw     r12, VCPU_GPR(r12)(r4)
+       stw     r13, VCPU_GPR(r13)(r4)
+       stw     r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
+       mflr    r3
+       stw     r3, VCPU_LR(r4)
+       mfxer   r3
+       stw     r3, VCPU_XER(r4)
+       mfspr   r3, SPRN_SPRG0
+       stw     r3, VCPU_GPR(r4)(r4)
+       mfspr   r3, SPRN_SRR0
+       stw     r3, VCPU_PC(r4)
+
+       /* Restore host stack pointer and PID before IVPR, since the host
+        * exception handlers use them. */
+       lwz     r1, VCPU_HOST_STACK(r4)
+       lwz     r3, VCPU_HOST_PID(r4)
+       mtspr   SPRN_PID, r3
+
+       /* Restore host IVPR before re-enabling interrupts. We cheat and know
+        * that Linux IVPR is always 0xc0000000. */
+       lis     r3, 0xc000
+       mtspr   SPRN_IVPR, r3
+
+       /* Switch to kernel stack and jump to handler. */
+       LOAD_REG_ADDR(r3, kvmppc_handle_exit)
+       mtctr   r3
+       lwz     r3, HOST_RUN(r1)
+       lwz     r2, HOST_R2(r1)
+       mr      r14, r4 /* Save vcpu pointer. */
+
+       bctrl   /* kvmppc_handle_exit() */
+
+       /* Restore vcpu pointer and the nonvolatiles we used. */
+       mr      r4, r14
+       lwz     r14, VCPU_GPR(r14)(r4)
+
+       /* Sometimes instruction emulation must restore complete GPR state. */
+       andi.   r5, r3, RESUME_FLAG_NV
+       beq     ..skip_nv_load
+       lwz     r15, VCPU_GPR(r15)(r4)
+       lwz     r16, VCPU_GPR(r16)(r4)
+       lwz     r17, VCPU_GPR(r17)(r4)
+       lwz     r18, VCPU_GPR(r18)(r4)
+       lwz     r19, VCPU_GPR(r19)(r4)
+       lwz     r20, VCPU_GPR(r20)(r4)
+       lwz     r21, VCPU_GPR(r21)(r4)
+       lwz     r22, VCPU_GPR(r22)(r4)
+       lwz     r23, VCPU_GPR(r23)(r4)
+       lwz     r24, VCPU_GPR(r24)(r4)
+       lwz     r25, VCPU_GPR(r25)(r4)
+       lwz     r26, VCPU_GPR(r26)(r4)
+       lwz     r27, VCPU_GPR(r27)(r4)
+       lwz     r28, VCPU_GPR(r28)(r4)
+       lwz     r29, VCPU_GPR(r29)(r4)
+       lwz     r30, VCPU_GPR(r30)(r4)
+       lwz     r31, VCPU_GPR(r31)(r4)
+..skip_nv_load:
+
+       /* Should we return to the guest? */
+       andi.   r5, r3, RESUME_FLAG_HOST
+       beq     lightweight_exit
+
+       srawi   r3, r3, 2 /* Shift -ERR back down. */
+
+heavyweight_exit:
+       /* Not returning to guest. */
+
+       /* We already saved guest volatile register state; now save the
+        * non-volatiles. */
+       stw     r15, VCPU_GPR(r15)(r4)
+       stw     r16, VCPU_GPR(r16)(r4)
+       stw     r17, VCPU_GPR(r17)(r4)
+       stw     r18, VCPU_GPR(r18)(r4)
+       stw     r19, VCPU_GPR(r19)(r4)
+       stw     r20, VCPU_GPR(r20)(r4)
+       stw     r21, VCPU_GPR(r21)(r4)
+       stw     r22, VCPU_GPR(r22)(r4)
+       stw     r23, VCPU_GPR(r23)(r4)
+       stw     r24, VCPU_GPR(r24)(r4)
+       stw     r25, VCPU_GPR(r25)(r4)
+       stw     r26, VCPU_GPR(r26)(r4)
+       stw     r27, VCPU_GPR(r27)(r4)
+       stw     r28, VCPU_GPR(r28)(r4)
+       stw     r29, VCPU_GPR(r29)(r4)
+       stw     r30, VCPU_GPR(r30)(r4)
+       stw     r31, VCPU_GPR(r31)(r4)
+
+       /* Load host non-volatile register state from host stack. */
+       lwz     r14, HOST_NV_GPR(r14)(r1)
+       lwz     r15, HOST_NV_GPR(r15)(r1)
+       lwz     r16, HOST_NV_GPR(r16)(r1)
+       lwz     r17, HOST_NV_GPR(r17)(r1)
+       lwz     r18, HOST_NV_GPR(r18)(r1)
+       lwz     r19, HOST_NV_GPR(r19)(r1)
+       lwz     r20, HOST_NV_GPR(r20)(r1)
+       lwz     r21, HOST_NV_GPR(r21)(r1)
+       lwz     r22, HOST_NV_GPR(r22)(r1)
+       lwz     r23, HOST_NV_GPR(r23)(r1)
+       lwz     r24, HOST_NV_GPR(r24)(r1)
+       lwz     r25, HOST_NV_GPR(r25)(r1)
+       lwz     r26, HOST_NV_GPR(r26)(r1)
+       lwz     r27, HOST_NV_GPR(r27)(r1)
+       lwz     r28, HOST_NV_GPR(r28)(r1)
+       lwz     r29, HOST_NV_GPR(r29)(r1)
+       lwz     r30, HOST_NV_GPR(r30)(r1)
+       lwz     r31, HOST_NV_GPR(r31)(r1)
+
+       /* Return to kvm_vcpu_run(). */
+       lwz     r4, HOST_STACK_LR(r1)
+       addi    r1, r1, HOST_STACK_SIZE
+       mtlr    r4
+       /* r3 still contains the return code from kvmppc_handle_exit(). */
+       blr
+
+
+/* Registers:
+ *  r3: kvm_run pointer
+ *  r4: vcpu pointer
+ */
+_GLOBAL(__kvmppc_vcpu_run)
+       stwu    r1, -HOST_STACK_SIZE(r1)
+       stw     r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
+
+       /* Save host state to stack. */
+       stw     r3, HOST_RUN(r1)
+       mflr    r3
+       stw     r3, HOST_STACK_LR(r1)
+
+       /* Save host non-volatile register state to stack. */
+       stw     r14, HOST_NV_GPR(r14)(r1)
+       stw     r15, HOST_NV_GPR(r15)(r1)
+       stw     r16, HOST_NV_GPR(r16)(r1)
+       stw     r17, HOST_NV_GPR(r17)(r1)
+       stw     r18, HOST_NV_GPR(r18)(r1)
+       stw     r19, HOST_NV_GPR(r19)(r1)
+       stw     r20, HOST_NV_GPR(r20)(r1)
+       stw     r21, HOST_NV_GPR(r21)(r1)
+       stw     r22, HOST_NV_GPR(r22)(r1)
+       stw     r23, HOST_NV_GPR(r23)(r1)
+       stw     r24, HOST_NV_GPR(r24)(r1)
+       stw     r25, HOST_NV_GPR(r25)(r1)
+       stw     r26, HOST_NV_GPR(r26)(r1)
+       stw     r27, HOST_NV_GPR(r27)(r1)
+       stw     r28, HOST_NV_GPR(r28)(r1)
+       stw     r29, HOST_NV_GPR(r29)(r1)
+       stw     r30, HOST_NV_GPR(r30)(r1)
+       stw     r31, HOST_NV_GPR(r31)(r1)
+
+       /* Load guest non-volatiles. */
+       lwz     r14, VCPU_GPR(r14)(r4)
+       lwz     r15, VCPU_GPR(r15)(r4)
+       lwz     r16, VCPU_GPR(r16)(r4)
+       lwz     r17, VCPU_GPR(r17)(r4)
+       lwz     r18, VCPU_GPR(r18)(r4)
+       lwz     r19, VCPU_GPR(r19)(r4)
+       lwz     r20, VCPU_GPR(r20)(r4)
+       lwz     r21, VCPU_GPR(r21)(r4)
+       lwz     r22, VCPU_GPR(r22)(r4)
+       lwz     r23, VCPU_GPR(r23)(r4)
+       lwz     r24, VCPU_GPR(r24)(r4)
+       lwz     r25, VCPU_GPR(r25)(r4)
+       lwz     r26, VCPU_GPR(r26)(r4)
+       lwz     r27, VCPU_GPR(r27)(r4)
+       lwz     r28, VCPU_GPR(r28)(r4)
+       lwz     r29, VCPU_GPR(r29)(r4)
+       lwz     r30, VCPU_GPR(r30)(r4)
+       lwz     r31, VCPU_GPR(r31)(r4)
+
+lightweight_exit:
+       stw     r2, HOST_R2(r1)
+
+       mfspr   r3, SPRN_PID
+       stw     r3, VCPU_HOST_PID(r4)
+       lwz     r3, VCPU_PID(r4)
+       mtspr   SPRN_PID, r3
+
+       /* Prevent all TLB updates. */
+       mfmsr   r5
+       lis     r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@h
+       ori     r6, r6, (MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
+       andc    r6, r5, r6
+       mtmsr   r6
+
+       /* Save the host's non-pinned TLB mappings, and load the guest mappings
+        * over them. Leave the host's "pinned" kernel mappings in place. */
+       /* XXX optimization: use generation count to avoid swapping unmodified
+        * entries. */
+       mfspr   r10, SPRN_MMUCR                 /* Save host MMUCR. */
+       lis     r8, tlb_44x_hwater@ha
+       lwz     r8, tlb_44x_hwater@l(r8)
+       addi    r3, r4, VCPU_HOST_TLB - 4
+       addi    r9, r4, VCPU_SHADOW_TLB - 4
+       li      r6, 0
+1:
+       /* Save host entry. */
+       tlbre   r7, r6, PPC44x_TLB_PAGEID
+       mfspr   r5, SPRN_MMUCR
+       stwu    r5, 4(r3)
+       stwu    r7, 4(r3)
+       tlbre   r7, r6, PPC44x_TLB_XLAT
+       stwu    r7, 4(r3)
+       tlbre   r7, r6, PPC44x_TLB_ATTRIB
+       stwu    r7, 4(r3)
+       /* Load guest entry. */
+       lwzu    r7, 4(r9)
+       mtspr   SPRN_MMUCR, r7
+       lwzu    r7, 4(r9)
+       tlbwe   r7, r6, PPC44x_TLB_PAGEID
+       lwzu    r7, 4(r9)
+       tlbwe   r7, r6, PPC44x_TLB_XLAT
+       lwzu    r7, 4(r9)
+       tlbwe   r7, r6, PPC44x_TLB_ATTRIB
+       /* Increment index. */
+       addi    r6, r6, 1
+       cmpw    r6, r8
+       blt     1b
+       mtspr   SPRN_MMUCR, r10                 /* Restore host MMUCR. */
+
+       iccci   0, 0 /* XXX hack */
+
+       /* Load some guest volatiles. */
+       lwz     r0, VCPU_GPR(r0)(r4)
+       lwz     r2, VCPU_GPR(r2)(r4)
+       lwz     r9, VCPU_GPR(r9)(r4)
+       lwz     r10, VCPU_GPR(r10)(r4)
+       lwz     r11, VCPU_GPR(r11)(r4)
+       lwz     r12, VCPU_GPR(r12)(r4)
+       lwz     r13, VCPU_GPR(r13)(r4)
+       lwz     r3, VCPU_LR(r4)
+       mtlr    r3
+       lwz     r3, VCPU_XER(r4)
+       mtxer   r3
+
+       /* Switch the IVPR. XXX If we take a TLB miss after this we're screwed,
+        * so how do we make sure vcpu won't fault? */
+       lis     r8, kvmppc_booke_handlers@ha
+       lwz     r8, kvmppc_booke_handlers@l(r8)
+       mtspr   SPRN_IVPR, r8
+
+       /* Save vcpu pointer for the exception handlers. */
+       mtspr   SPRN_SPRG1, r4
+
+       /* Can't switch the stack pointer until after IVPR is switched,
+        * because host interrupt handlers would get confused. */
+       lwz     r1, VCPU_GPR(r1)(r4)
+
+       /* XXX handle USPRG0 */
+       /* Host interrupt handlers may have clobbered these guest-readable
+        * SPRGs, so we need to reload them here with the guest's values. */
+       lwz     r3, VCPU_SPRG4(r4)
+       mtspr   SPRN_SPRG4, r3
+       lwz     r3, VCPU_SPRG5(r4)
+       mtspr   SPRN_SPRG5, r3
+       lwz     r3, VCPU_SPRG6(r4)
+       mtspr   SPRN_SPRG6, r3
+       lwz     r3, VCPU_SPRG7(r4)
+       mtspr   SPRN_SPRG7, r3
+
+       /* Finish loading guest volatiles and jump to guest. */
+       lwz     r3, VCPU_CTR(r4)
+       mtctr   r3
+       lwz     r3, VCPU_CR(r4)
+       mtcr    r3
+       lwz     r5, VCPU_GPR(r5)(r4)
+       lwz     r6, VCPU_GPR(r6)(r4)
+       lwz     r7, VCPU_GPR(r7)(r4)
+       lwz     r8, VCPU_GPR(r8)(r4)
+       lwz     r3, VCPU_PC(r4)
+       mtsrr0  r3
+       lwz     r3, VCPU_MSR(r4)
+       oris    r3, r3, KVMPPC_MSR_MASK@h
+       ori     r3, r3, KVMPPC_MSR_MASK@l
+       mtsrr1  r3
+       lwz     r3, VCPU_GPR(r3)(r4)
+       lwz     r4, VCPU_GPR(r4)(r4)
+       rfi
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
new file mode 100644 (file)
index 0000000..a03fe0c
--- /dev/null
@@ -0,0 +1,760 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#include <linux/jiffies.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/kvm_host.h>
+
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+#include <asm/time.h>
+#include <asm/byteorder.h>
+#include <asm/kvm_ppc.h>
+
+#include "44x_tlb.h"
+
+/* Instruction decoding */
+static inline unsigned int get_op(u32 inst)
+{
+       return inst >> 26;
+}
+
+static inline unsigned int get_xop(u32 inst)
+{
+       return (inst >> 1) & 0x3ff;
+}
+
+static inline unsigned int get_sprn(u32 inst)
+{
+       return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_dcrn(u32 inst)
+{
+       return ((inst >> 16) & 0x1f) | ((inst >> 6) & 0x3e0);
+}
+
+static inline unsigned int get_rt(u32 inst)
+{
+       return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_rs(u32 inst)
+{
+       return (inst >> 21) & 0x1f;
+}
+
+static inline unsigned int get_ra(u32 inst)
+{
+       return (inst >> 16) & 0x1f;
+}
+
+static inline unsigned int get_rb(u32 inst)
+{
+       return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_rc(u32 inst)
+{
+       return inst & 0x1;
+}
+
+static inline unsigned int get_ws(u32 inst)
+{
+       return (inst >> 11) & 0x1f;
+}
+
+static inline unsigned int get_d(u32 inst)
+{
+       return inst & 0xffff;
+}
+
+static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
+                             const struct tlbe *tlbe)
+{
+       gpa_t gpa;
+
+       if (!get_tlb_v(tlbe))
+               return 0;
+
+       /* Does it match current guest AS? */
+       /* XXX what about IS != DS? */
+       if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS))
+               return 0;
+
+       gpa = get_tlb_raddr(tlbe);
+       if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
+               /* Mapping is not for RAM. */
+               return 0;
+
+       return 1;
+}
+
+static int kvmppc_emul_tlbwe(struct kvm_vcpu *vcpu, u32 inst)
+{
+       u64 eaddr;
+       u64 raddr;
+       u64 asid;
+       u32 flags;
+       struct tlbe *tlbe;
+       unsigned int ra;
+       unsigned int rs;
+       unsigned int ws;
+       unsigned int index;
+
+       ra = get_ra(inst);
+       rs = get_rs(inst);
+       ws = get_ws(inst);
+
+       index = vcpu->arch.gpr[ra];
+       if (index > PPC44x_TLB_SIZE) {
+               printk("%s: index %d\n", __func__, index);
+               kvmppc_dump_vcpu(vcpu);
+               return EMULATE_FAIL;
+       }
+
+       tlbe = &vcpu->arch.guest_tlb[index];
+
+       /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
+       if (tlbe->word0 & PPC44x_TLB_VALID) {
+               eaddr = get_tlb_eaddr(tlbe);
+               asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+               kvmppc_mmu_invalidate(vcpu, eaddr, asid);
+       }
+
+       switch (ws) {
+       case PPC44x_TLB_PAGEID:
+               tlbe->tid = vcpu->arch.mmucr & 0xff;
+               tlbe->word0 = vcpu->arch.gpr[rs];
+               break;
+
+       case PPC44x_TLB_XLAT:
+               tlbe->word1 = vcpu->arch.gpr[rs];
+               break;
+
+       case PPC44x_TLB_ATTRIB:
+               tlbe->word2 = vcpu->arch.gpr[rs];
+               break;
+
+       default:
+               return EMULATE_FAIL;
+       }
+
+       if (tlbe_is_host_safe(vcpu, tlbe)) {
+               eaddr = get_tlb_eaddr(tlbe);
+               raddr = get_tlb_raddr(tlbe);
+               asid = (tlbe->word0 & PPC44x_TLB_TS) | tlbe->tid;
+               flags = tlbe->word2 & 0xffff;
+
+               /* Create a 4KB mapping on the host. If the guest wanted a
+                * large page, only the first 4KB is mapped here and the rest
+                * are mapped on the fly. */
+               kvmppc_mmu_map(vcpu, eaddr, raddr >> PAGE_SHIFT, asid, flags);
+       }
+
+       return EMULATE_DONE;
+}
+
+static void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
+{
+       if (vcpu->arch.tcr & TCR_DIE) {
+               /* The decrementer ticks at the same rate as the timebase, so
+                * that's how we convert the guest DEC value to the number of
+                * host ticks. */
+               unsigned long nr_jiffies;
+
+               nr_jiffies = vcpu->arch.dec / tb_ticks_per_jiffy;
+               mod_timer(&vcpu->arch.dec_timer,
+                         get_jiffies_64() + nr_jiffies);
+       } else {
+               del_timer(&vcpu->arch.dec_timer);
+       }
+}
+
+static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.pc = vcpu->arch.srr0;
+       kvmppc_set_msr(vcpu, vcpu->arch.srr1);
+}
+
+/* XXX to do:
+ * lhax
+ * lhaux
+ * lswx
+ * lswi
+ * stswx
+ * stswi
+ * lha
+ * lhau
+ * lmw
+ * stmw
+ *
+ * XXX is_bigendian should depend on MMU mapping or MSR[LE]
+ */
+int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       u32 inst = vcpu->arch.last_inst;
+       u32 ea;
+       int ra;
+       int rb;
+       int rc;
+       int rs;
+       int rt;
+       int sprn;
+       int dcrn;
+       enum emulation_result emulated = EMULATE_DONE;
+       int advance = 1;
+
+       switch (get_op(inst)) {
+       case 3:                                                 /* trap */
+               printk("trap!\n");
+               kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_PROGRAM);
+               advance = 0;
+               break;
+
+       case 19:
+               switch (get_xop(inst)) {
+               case 50:                                        /* rfi */
+                       kvmppc_emul_rfi(vcpu);
+                       advance = 0;
+                       break;
+
+               default:
+                       emulated = EMULATE_FAIL;
+                       break;
+               }
+               break;
+
+       case 31:
+               switch (get_xop(inst)) {
+
+               case 83:                                        /* mfmsr */
+                       rt = get_rt(inst);
+                       vcpu->arch.gpr[rt] = vcpu->arch.msr;
+                       break;
+
+               case 87:                                        /* lbzx */
+                       rt = get_rt(inst);
+                       emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+                       break;
+
+               case 131:                                       /* wrtee */
+                       rs = get_rs(inst);
+                       vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+                                        | (vcpu->arch.gpr[rs] & MSR_EE);
+                       break;
+
+               case 146:                                       /* mtmsr */
+                       rs = get_rs(inst);
+                       kvmppc_set_msr(vcpu, vcpu->arch.gpr[rs]);
+                       break;
+
+               case 163:                                       /* wrteei */
+                       vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE)
+                                        | (inst & MSR_EE);
+                       break;
+
+               case 215:                                       /* stbx */
+                       rs = get_rs(inst);
+                       emulated = kvmppc_handle_store(run, vcpu,
+                                                      vcpu->arch.gpr[rs],
+                                                      1, 1);
+                       break;
+
+               case 247:                                       /* stbux */
+                       rs = get_rs(inst);
+                       ra = get_ra(inst);
+                       rb = get_rb(inst);
+
+                       ea = vcpu->arch.gpr[rb];
+                       if (ra)
+                               ea += vcpu->arch.gpr[ra];
+
+                       emulated = kvmppc_handle_store(run, vcpu,
+                                                      vcpu->arch.gpr[rs],
+                                                      1, 1);
+                       vcpu->arch.gpr[rs] = ea;
+                       break;
+
+               case 279:                                       /* lhzx */
+                       rt = get_rt(inst);
+                       emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+                       break;
+
+               case 311:                                       /* lhzux */
+                       rt = get_rt(inst);
+                       ra = get_ra(inst);
+                       rb = get_rb(inst);
+
+                       ea = vcpu->arch.gpr[rb];
+                       if (ra)
+                               ea += vcpu->arch.gpr[ra];
+
+                       emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+                       vcpu->arch.gpr[ra] = ea;
+                       break;
+
+               case 323:                                       /* mfdcr */
+                       dcrn = get_dcrn(inst);
+                       rt = get_rt(inst);
+
+                       /* The guest may access CPR0 registers to determine the timebase
+                        * frequency, and it must know the real host frequency because it
+                        * can directly access the timebase registers.
+                        *
+                        * It would be possible to emulate those accesses in userspace,
+                        * but userspace can really only figure out the end frequency.
+                        * We could decompose that into the factors that compute it, but
+                        * that's tricky math, and it's easier to just report the real
+                        * CPR0 values.
+                        */
+                       switch (dcrn) {
+                       case DCRN_CPR0_CONFIG_ADDR:
+                               vcpu->arch.gpr[rt] = vcpu->arch.cpr0_cfgaddr;
+                               break;
+                       case DCRN_CPR0_CONFIG_DATA:
+                               local_irq_disable();
+                               mtdcr(DCRN_CPR0_CONFIG_ADDR,
+                                     vcpu->arch.cpr0_cfgaddr);
+                               vcpu->arch.gpr[rt] = mfdcr(DCRN_CPR0_CONFIG_DATA);
+                               local_irq_enable();
+                               break;
+                       default:
+                               run->dcr.dcrn = dcrn;
+                               run->dcr.data =  0;
+                               run->dcr.is_write = 0;
+                               vcpu->arch.io_gpr = rt;
+                               vcpu->arch.dcr_needed = 1;
+                               emulated = EMULATE_DO_DCR;
+                       }
+
+                       break;
+
+               case 339:                                       /* mfspr */
+                       sprn = get_sprn(inst);
+                       rt = get_rt(inst);
+
+                       switch (sprn) {
+                       case SPRN_SRR0:
+                               vcpu->arch.gpr[rt] = vcpu->arch.srr0; break;
+                       case SPRN_SRR1:
+                               vcpu->arch.gpr[rt] = vcpu->arch.srr1; break;
+                       case SPRN_MMUCR:
+                               vcpu->arch.gpr[rt] = vcpu->arch.mmucr; break;
+                       case SPRN_PID:
+                               vcpu->arch.gpr[rt] = vcpu->arch.pid; break;
+                       case SPRN_IVPR:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivpr; break;
+                       case SPRN_CCR0:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ccr0; break;
+                       case SPRN_CCR1:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ccr1; break;
+                       case SPRN_PVR:
+                               vcpu->arch.gpr[rt] = vcpu->arch.pvr; break;
+                       case SPRN_DEAR:
+                               vcpu->arch.gpr[rt] = vcpu->arch.dear; break;
+                       case SPRN_ESR:
+                               vcpu->arch.gpr[rt] = vcpu->arch.esr; break;
+                       case SPRN_DBCR0:
+                               vcpu->arch.gpr[rt] = vcpu->arch.dbcr0; break;
+                       case SPRN_DBCR1:
+                               vcpu->arch.gpr[rt] = vcpu->arch.dbcr1; break;
+
+                       /* Note: mftb and TBRL/TBWL are user-accessible, so
+                        * the guest can always access the real TB anyways.
+                        * In fact, we probably will never see these traps. */
+                       case SPRN_TBWL:
+                               vcpu->arch.gpr[rt] = mftbl(); break;
+                       case SPRN_TBWU:
+                               vcpu->arch.gpr[rt] = mftbu(); break;
+
+                       case SPRN_SPRG0:
+                               vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break;
+                       case SPRN_SPRG1:
+                               vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break;
+                       case SPRN_SPRG2:
+                               vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break;
+                       case SPRN_SPRG3:
+                               vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break;
+                       /* Note: SPRG4-7 are user-readable, so we don't get
+                        * a trap. */
+
+                       case SPRN_IVOR0:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[0]; break;
+                       case SPRN_IVOR1:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[1]; break;
+                       case SPRN_IVOR2:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[2]; break;
+                       case SPRN_IVOR3:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[3]; break;
+                       case SPRN_IVOR4:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[4]; break;
+                       case SPRN_IVOR5:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[5]; break;
+                       case SPRN_IVOR6:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[6]; break;
+                       case SPRN_IVOR7:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[7]; break;
+                       case SPRN_IVOR8:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[8]; break;
+                       case SPRN_IVOR9:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[9]; break;
+                       case SPRN_IVOR10:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[10]; break;
+                       case SPRN_IVOR11:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[11]; break;
+                       case SPRN_IVOR12:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[12]; break;
+                       case SPRN_IVOR13:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[13]; break;
+                       case SPRN_IVOR14:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[14]; break;
+                       case SPRN_IVOR15:
+                               vcpu->arch.gpr[rt] = vcpu->arch.ivor[15]; break;
+
+                       default:
+                               printk("mfspr: unknown spr %x\n", sprn);
+                               vcpu->arch.gpr[rt] = 0;
+                               break;
+                       }
+                       break;
+
+               case 407:                                       /* sthx */
+                       rs = get_rs(inst);
+                       ra = get_ra(inst);
+                       rb = get_rb(inst);
+
+                       emulated = kvmppc_handle_store(run, vcpu,
+                                                      vcpu->arch.gpr[rs],
+                                                      2, 1);
+                       break;
+
+               case 439:                                       /* sthux */
+                       rs = get_rs(inst);
+                       ra = get_ra(inst);
+                       rb = get_rb(inst);
+
+                       ea = vcpu->arch.gpr[rb];
+                       if (ra)
+                               ea += vcpu->arch.gpr[ra];
+
+                       emulated = kvmppc_handle_store(run, vcpu,
+                                                      vcpu->arch.gpr[rs],
+                                                      2, 1);
+                       vcpu->arch.gpr[ra] = ea;
+                       break;
+
+               case 451:                                       /* mtdcr */
+                       dcrn = get_dcrn(inst);
+                       rs = get_rs(inst);
+
+                       /* emulate some access in kernel */
+                       switch (dcrn) {
+                       case DCRN_CPR0_CONFIG_ADDR:
+                               vcpu->arch.cpr0_cfgaddr = vcpu->arch.gpr[rs];
+                               break;
+                       default:
+                               run->dcr.dcrn = dcrn;
+                               run->dcr.data = vcpu->arch.gpr[rs];
+                               run->dcr.is_write = 1;
+                               vcpu->arch.dcr_needed = 1;
+                               emulated = EMULATE_DO_DCR;
+                       }
+
+                       break;
+
+               case 467:                                       /* mtspr */
+                       sprn = get_sprn(inst);
+                       rs = get_rs(inst);
+                       switch (sprn) {
+                       case SPRN_SRR0:
+                               vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_SRR1:
+                               vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_MMUCR:
+                               vcpu->arch.mmucr = vcpu->arch.gpr[rs]; break;
+                       case SPRN_PID:
+                               vcpu->arch.pid = vcpu->arch.gpr[rs]; break;
+                       case SPRN_CCR0:
+                               vcpu->arch.ccr0 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_CCR1:
+                               vcpu->arch.ccr1 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_DEAR:
+                               vcpu->arch.dear = vcpu->arch.gpr[rs]; break;
+                       case SPRN_ESR:
+                               vcpu->arch.esr = vcpu->arch.gpr[rs]; break;
+                       case SPRN_DBCR0:
+                               vcpu->arch.dbcr0 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_DBCR1:
+                               vcpu->arch.dbcr1 = vcpu->arch.gpr[rs]; break;
+
+                       /* XXX We need to context-switch the timebase for
+                        * watchdog and FIT. */
+                       case SPRN_TBWL: break;
+                       case SPRN_TBWU: break;
+
+                       case SPRN_DEC:
+                               vcpu->arch.dec = vcpu->arch.gpr[rs];
+                               kvmppc_emulate_dec(vcpu);
+                               break;
+
+                       case SPRN_TSR:
+                               vcpu->arch.tsr &= ~vcpu->arch.gpr[rs]; break;
+
+                       case SPRN_TCR:
+                               vcpu->arch.tcr = vcpu->arch.gpr[rs];
+                               kvmppc_emulate_dec(vcpu);
+                               break;
+
+                       case SPRN_SPRG0:
+                               vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_SPRG1:
+                               vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_SPRG2:
+                               vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_SPRG3:
+                               vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break;
+
+                       /* Note: SPRG4-7 are user-readable. These values are
+                        * loaded into the real SPRGs when resuming the
+                        * guest. */
+                       case SPRN_SPRG4:
+                               vcpu->arch.sprg4 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_SPRG5:
+                               vcpu->arch.sprg5 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_SPRG6:
+                               vcpu->arch.sprg6 = vcpu->arch.gpr[rs]; break;
+                       case SPRN_SPRG7:
+                               vcpu->arch.sprg7 = vcpu->arch.gpr[rs]; break;
+
+                       case SPRN_IVPR:
+                               vcpu->arch.ivpr = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR0:
+                               vcpu->arch.ivor[0] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR1:
+                               vcpu->arch.ivor[1] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR2:
+                               vcpu->arch.ivor[2] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR3:
+                               vcpu->arch.ivor[3] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR4:
+                               vcpu->arch.ivor[4] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR5:
+                               vcpu->arch.ivor[5] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR6:
+                               vcpu->arch.ivor[6] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR7:
+                               vcpu->arch.ivor[7] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR8:
+                               vcpu->arch.ivor[8] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR9:
+                               vcpu->arch.ivor[9] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR10:
+                               vcpu->arch.ivor[10] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR11:
+                               vcpu->arch.ivor[11] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR12:
+                               vcpu->arch.ivor[12] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR13:
+                               vcpu->arch.ivor[13] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR14:
+                               vcpu->arch.ivor[14] = vcpu->arch.gpr[rs]; break;
+                       case SPRN_IVOR15:
+                               vcpu->arch.ivor[15] = vcpu->arch.gpr[rs]; break;
+
+                       default:
+                               printk("mtspr: unknown spr %x\n", sprn);
+                               emulated = EMULATE_FAIL;
+                               break;
+                       }
+                       break;
+
+               case 470:                                       /* dcbi */
+                       /* Do nothing. The guest is performing dcbi because
+                        * hardware DMA is not snooped by the dcache, but
+                        * emulated DMA either goes through the dcache as
+                        * normal writes, or the host kernel has handled dcache
+                        * coherence. */
+                       break;
+
+               case 534:                                       /* lwbrx */
+                       rt = get_rt(inst);
+                       emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
+                       break;
+
+               case 566:                                       /* tlbsync */
+                       break;
+
+               case 662:                                       /* stwbrx */
+                       rs = get_rs(inst);
+                       ra = get_ra(inst);
+                       rb = get_rb(inst);
+
+                       emulated = kvmppc_handle_store(run, vcpu,
+                                                      vcpu->arch.gpr[rs],
+                                                      4, 0);
+                       break;
+
+               case 978:                                       /* tlbwe */
+                       emulated = kvmppc_emul_tlbwe(vcpu, inst);
+                       break;
+
+               case 914:       {                               /* tlbsx */
+                       int index;
+                       unsigned int as = get_mmucr_sts(vcpu);
+                       unsigned int pid = get_mmucr_stid(vcpu);
+
+                       rt = get_rt(inst);
+                       ra = get_ra(inst);
+                       rb = get_rb(inst);
+                       rc = get_rc(inst);
+
+                       ea = vcpu->arch.gpr[rb];
+                       if (ra)
+                               ea += vcpu->arch.gpr[ra];
+
+                       index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
+                       if (rc) {
+                               if (index < 0)
+                                       vcpu->arch.cr &= ~0x20000000;
+                               else
+                                       vcpu->arch.cr |= 0x20000000;
+                       }
+                       vcpu->arch.gpr[rt] = index;
+
+                       }
+                       break;
+
+               case 790:                                       /* lhbrx */
+                       rt = get_rt(inst);
+                       emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
+                       break;
+
+               case 918:                                       /* sthbrx */
+                       rs = get_rs(inst);
+                       ra = get_ra(inst);
+                       rb = get_rb(inst);
+
+                       emulated = kvmppc_handle_store(run, vcpu,
+                                                      vcpu->arch.gpr[rs],
+                                                      2, 0);
+                       break;
+
+               case 966:                                       /* iccci */
+                       break;
+
+               default:
+                       printk("unknown: op %d xop %d\n", get_op(inst),
+                               get_xop(inst));
+                       emulated = EMULATE_FAIL;
+                       break;
+               }
+               break;
+
+       case 32:                                                /* lwz */
+               rt = get_rt(inst);
+               emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+               break;
+
+       case 33:                                                /* lwzu */
+               ra = get_ra(inst);
+               rt = get_rt(inst);
+               emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
+               vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+               break;
+
+       case 34:                                                /* lbz */
+               rt = get_rt(inst);
+               emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+               break;
+
+       case 35:                                                /* lbzu */
+               ra = get_ra(inst);
+               rt = get_rt(inst);
+               emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
+               vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+               break;
+
+       case 36:                                                /* stw */
+               rs = get_rs(inst);
+               emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+                                              4, 1);
+               break;
+
+       case 37:                                                /* stwu */
+               ra = get_ra(inst);
+               rs = get_rs(inst);
+               emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+                                              4, 1);
+               vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+               break;
+
+       case 38:                                                /* stb */
+               rs = get_rs(inst);
+               emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+                                              1, 1);
+               break;
+
+       case 39:                                                /* stbu */
+               ra = get_ra(inst);
+               rs = get_rs(inst);
+               emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+                                              1, 1);
+               vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+               break;
+
+       case 40:                                                /* lhz */
+               rt = get_rt(inst);
+               emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+               break;
+
+       case 41:                                                /* lhzu */
+               ra = get_ra(inst);
+               rt = get_rt(inst);
+               emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
+               vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+               break;
+
+       case 44:                                                /* sth */
+               rs = get_rs(inst);
+               emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+                                              2, 1);
+               break;
+
+       case 45:                                                /* sthu */
+               ra = get_ra(inst);
+               rs = get_rs(inst);
+               emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
+                                              2, 1);
+               vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
+               break;
+
+       default:
+               printk("unknown op %d\n", get_op(inst));
+               emulated = EMULATE_FAIL;
+               break;
+       }
+
+       if (advance)
+               vcpu->arch.pc += 4; /* Advance past emulated instruction. */
+
+       return emulated;
+}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
new file mode 100644 (file)
index 0000000..bad40bd
--- /dev/null
@@ -0,0 +1,436 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ *          Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <asm/cputable.h>
+#include <asm/uaccess.h>
+#include <asm/kvm_ppc.h>
+
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return gfn;
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
+{
+       /* XXX implement me */
+       return 0;
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+{
+       return 1;
+}
+
+
+int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er;
+       int r;
+
+       er = kvmppc_emulate_instruction(run, vcpu);
+       switch (er) {
+       case EMULATE_DONE:
+               /* Future optimization: only reload non-volatiles if they were
+                * actually modified. */
+               r = RESUME_GUEST_NV;
+               break;
+       case EMULATE_DO_MMIO:
+               run->exit_reason = KVM_EXIT_MMIO;
+               /* We must reload nonvolatiles because "update" load/store
+                * instructions modify register state. */
+               /* Future optimization: only reload non-volatiles if they were
+                * actually modified. */
+               r = RESUME_HOST_NV;
+               break;
+       case EMULATE_FAIL:
+               /* XXX Deliver Program interrupt to guest. */
+               printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
+                      vcpu->arch.last_inst);
+               r = RESUME_HOST;
+               break;
+       default:
+               BUG();
+       }
+
+       return r;
+}
+
+void kvm_arch_hardware_enable(void *garbage)
+{
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       int r;
+
+       if (strcmp(cur_cpu_spec->platform, "ppc440") == 0)
+               r = 0;
+       else
+               r = -ENOTSUPP;
+
+       *(int *)rtn = r;
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+       struct kvm *kvm;
+
+       kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+       if (!kvm)
+               return ERR_PTR(-ENOMEM);
+
+       return kvm;
+}
+
+static void kvmppc_free_vcpus(struct kvm *kvm)
+{
+       unsigned int i;
+
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               if (kvm->vcpus[i]) {
+                       kvm_arch_vcpu_free(kvm->vcpus[i]);
+                       kvm->vcpus[i] = NULL;
+               }
+       }
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kvmppc_free_vcpus(kvm);
+       kvm_free_physmem(kvm);
+       kfree(kvm);
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_USER_MEMORY:
+               r = 1;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+
+}
+
+long kvm_arch_dev_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_set_memory_region(struct kvm *kvm,
+                               struct kvm_userspace_memory_region *mem,
+                               struct kvm_memory_slot old,
+                               int user_alloc)
+{
+       return 0;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       struct kvm_vcpu *vcpu;
+       int err;
+
+       vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+       if (err)
+               goto free_vcpu;
+
+       return vcpu;
+
+free_vcpu:
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       kvm_vcpu_uninit(vcpu);
+       kmem_cache_free(kvm_vcpu_cache, vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       unsigned int priority = exception_priority[BOOKE_INTERRUPT_DECREMENTER];
+
+       return test_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static void kvmppc_decrementer_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func,
+                   (unsigned long)vcpu);
+
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+}
+
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+                                    struct kvm_debug_guest *dbg)
+{
+       return -ENOTSUPP;
+}
+
+static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
+                                     struct kvm_run *run)
+{
+       u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+       *gpr = run->dcr.data;
+}
+
+static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
+                                      struct kvm_run *run)
+{
+       u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
+
+       if (run->mmio.len > sizeof(*gpr)) {
+               printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
+               return;
+       }
+
+       if (vcpu->arch.mmio_is_bigendian) {
+               switch (run->mmio.len) {
+               case 4: *gpr = *(u32 *)run->mmio.data; break;
+               case 2: *gpr = *(u16 *)run->mmio.data; break;
+               case 1: *gpr = *(u8 *)run->mmio.data; break;
+               }
+       } else {
+               /* Convert BE data from userland back to LE. */
+               switch (run->mmio.len) {
+               case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
+               case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
+               case 1: *gpr = *(u8 *)run->mmio.data; break;
+               }
+       }
+}
+
+int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                       unsigned int rt, unsigned int bytes, int is_bigendian)
+{
+       if (bytes > sizeof(run->mmio.data)) {
+               printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+                      run->mmio.len);
+       }
+
+       run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+       run->mmio.len = bytes;
+       run->mmio.is_write = 0;
+
+       vcpu->arch.io_gpr = rt;
+       vcpu->arch.mmio_is_bigendian = is_bigendian;
+       vcpu->mmio_needed = 1;
+       vcpu->mmio_is_write = 0;
+
+       return EMULATE_DO_MMIO;
+}
+
+int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                        u32 val, unsigned int bytes, int is_bigendian)
+{
+       void *data = run->mmio.data;
+
+       if (bytes > sizeof(run->mmio.data)) {
+               printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
+                      run->mmio.len);
+       }
+
+       run->mmio.phys_addr = vcpu->arch.paddr_accessed;
+       run->mmio.len = bytes;
+       run->mmio.is_write = 1;
+       vcpu->mmio_needed = 1;
+       vcpu->mmio_is_write = 1;
+
+       /* Store the value at the lowest bytes in 'data'. */
+       if (is_bigendian) {
+               switch (bytes) {
+               case 4: *(u32 *)data = val; break;
+               case 2: *(u16 *)data = val; break;
+               case 1: *(u8  *)data = val; break;
+               }
+       } else {
+               /* Store LE value into 'data'. */
+               switch (bytes) {
+               case 4: st_le32(data, val); break;
+               case 2: st_le16(data, val); break;
+               case 1: *(u8 *)data = val; break;
+               }
+       }
+
+       return EMULATE_DO_MMIO;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int r;
+       sigset_t sigsaved;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvmppc_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       } else if (vcpu->arch.dcr_needed) {
+               if (!vcpu->arch.dcr_is_write)
+                       kvmppc_complete_dcr_load(vcpu, run);
+               vcpu->arch.dcr_needed = 0;
+       }
+
+       kvmppc_check_and_deliver_interrupts(vcpu);
+
+       local_irq_disable();
+       kvm_guest_enter();
+       r = __kvmppc_vcpu_run(run, vcpu);
+       kvm_guest_exit();
+       local_irq_enable();
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
+{
+       kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                    struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                    struct kvm_mp_state *mp_state)
+{
+       return -EINVAL;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                         unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+
+       switch (ioctl) {
+       case KVM_INTERRUPT: {
+               struct kvm_interrupt irq;
+               r = -EFAULT;
+               if (copy_from_user(&irq, argp, sizeof(irq)))
+                       goto out;
+               r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+               break;
+       }
+       default:
+               r = -EINVAL;
+       }
+
+out:
+       return r;
+}
+
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       return -ENOTSUPP;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+                       unsigned int ioctl, unsigned long arg)
+{
+       long r;
+
+       switch (ioctl) {
+       default:
+               r = -EINVAL;
+       }
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
index 5ccb579b81e41bd52c8ac7474c7d36067e911859..f67e118116fa919b3b64b3fa78de786fa7c926ec 100644 (file)
@@ -110,15 +110,6 @@ EXPORT_SYMBOL(phys_mem_access_prot);
 
 #ifdef CONFIG_MEMORY_HOTPLUG
 
-void online_page(struct page *page)
-{
-       ClearPageReserved(page);
-       init_page_count(page);
-       __free_page(page);
-       totalram_pages++;
-       num_physpages++;
-}
-
 #ifdef CONFIG_NUMA
 int memory_add_physaddr_to_nid(u64 start)
 {
@@ -163,19 +154,35 @@ out:
 
 /*
  * walk_memory_resource() needs to make sure there is no holes in a given
- * memory range. On PPC64, since this range comes from /sysfs, the range
- * is guaranteed to be valid, non-overlapping and can not contain any
- * holes. By the time we get here (memory add or remove), /proc/device-tree
- * is updated and correct. Only reason we need to check against device-tree
- * would be if we allow user-land to specify a memory range through a
- * system call/ioctl etc. instead of doing offline/online through /sysfs.
+ * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
+ * Instead it maintains it in lmb.memory structures.  Walk through the
+ * memory regions, find holes and callback for contiguous regions.
  */
 int
 walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
                        int (*func)(unsigned long, unsigned long, void *))
 {
-       return  (*func)(start_pfn, nr_pages, arg);
+       struct lmb_property res;
+       unsigned long pfn, len;
+       u64 end;
+       int ret = -1;
+
+       res.base = (u64) start_pfn << PAGE_SHIFT;
+       res.size = (u64) nr_pages << PAGE_SHIFT;
+
+       end = res.base + res.size - 1;
+       while ((res.base < end) && (lmb_find(&res) >= 0)) {
+               pfn = (unsigned long)(res.base >> PAGE_SHIFT);
+               len = (unsigned long)(res.size >> PAGE_SHIFT);
+               ret = (*func)(pfn, len, arg);
+               if (ret)
+                       break;
+               res.base += (res.size + 1);
+               res.size = (end - res.base + 1);
+       }
+       return ret;
 }
+EXPORT_SYMBOL_GPL(walk_memory_resource);
 
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
index cf945d55c2762b072a07a1c8bb2128fd4716081f..acd2fc8cf492086c83fecc7f69469c62b7c03c06 100644 (file)
@@ -44,3 +44,9 @@ config PPC_MPC5200_BUGFIX
 
          It is safe to say 'Y' here
 
+config PPC_MPC5200_GPIO
+       bool "MPC5200 GPIO support"
+       depends on PPC_MPC52xx
+       select HAVE_GPIO_LIB
+       help
+         Enable gpiolib support for mpc5200 based boards
index fe1b81bb522426d5678dbbe8f127152718c4f98f..daf0e1568d6d8e16aa9bc7cc3bbecf5c1b650eb5 100644 (file)
@@ -14,3 +14,5 @@ obj-$(CONFIG_PM)              += mpc52xx_sleep.o mpc52xx_pm.o
 ifeq ($(CONFIG_PPC_LITE5200),y)
        obj-$(CONFIG_PM)        += lite5200_sleep.o lite5200_pm.o
 endif
+
+obj-$(CONFIG_PPC_MPC5200_GPIO) += mpc52xx_gpio.o
\ No newline at end of file
index c48b82bc2aaddc034b711d246dad1143017ee9cd..a3bda0b9f1ff791fcb259699dba91f82fe61cc9a 100644 (file)
@@ -51,6 +51,7 @@ static void __init mpc5200_simple_setup_arch(void)
 /* list of the supported boards */
 static char *board[] __initdata = {
        "promess,motionpro",
+       "phytec,pcm030",
        "schindler,cm5200",
        "tqc,tqm5200",
        NULL
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c
new file mode 100644 (file)
index 0000000..48da5df
--- /dev/null
@@ -0,0 +1,465 @@
+/*
+ * MPC52xx gpio driver
+ *
+ * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/of.h>
+#include <linux/kernel.h>
+#include <linux/of_gpio.h>
+#include <linux/io.h>
+#include <linux/of_platform.h>
+
+#include <asm/gpio.h>
+#include <asm/mpc52xx.h>
+#include <sysdev/fsl_soc.h>
+
+static DEFINE_SPINLOCK(gpio_lock);
+
+struct mpc52xx_gpiochip {
+       struct of_mm_gpio_chip mmchip;
+       unsigned int shadow_dvo;
+       unsigned int shadow_gpioe;
+       unsigned int shadow_ddr;
+};
+
+/*
+ * GPIO LIB API implementation for wakeup GPIOs.
+ *
+ * There's a maximum of 8 wakeup GPIOs. Which of these are available
+ * for use depends on your board setup.
+ *
+ * 0 -> GPIO_WKUP_7
+ * 1 -> GPIO_WKUP_6
+ * 2 -> PSC6_1
+ * 3 -> PSC6_0
+ * 4 -> ETH_17
+ * 5 -> PSC3_9
+ * 6 -> PSC2_4
+ * 7 -> PSC1_4
+ *
+ */
+static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+       unsigned int ret;
+
+       ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1;
+
+       pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret);
+
+       return ret;
+}
+
+static inline void
+__mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+                       struct mpc52xx_gpiochip, mmchip);
+       struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs;
+
+       if (val)
+               chip->shadow_dvo |= 1 << (7 - gpio);
+       else
+               chip->shadow_dvo &= ~(1 << (7 - gpio));
+
+       out_8(&regs->wkup_dvo, chip->shadow_dvo);
+}
+
+static void
+mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       __mpc52xx_wkup_gpio_set(gc, gpio, val);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+}
+
+static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+                       struct mpc52xx_gpiochip, mmchip);
+       struct mpc52xx_gpio_wkup *regs = mm_gc->regs;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       /* set the direction */
+       chip->shadow_ddr &= ~(1 << (7 - gpio));
+       out_8(&regs->wkup_ddr, chip->shadow_ddr);
+
+       /* and enable the pin */
+       chip->shadow_gpioe |= 1 << (7 - gpio);
+       out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       return 0;
+}
+
+static int
+mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpio_wkup *regs = mm_gc->regs;
+       struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+                       struct mpc52xx_gpiochip, mmchip);
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       __mpc52xx_wkup_gpio_set(gc, gpio, val);
+
+       /* Then set direction */
+       chip->shadow_ddr |= 1 << (7 - gpio);
+       out_8(&regs->wkup_ddr, chip->shadow_ddr);
+
+       /* Finally enable the pin */
+       chip->shadow_gpioe |= 1 << (7 - gpio);
+       out_8(&regs->wkup_gpioe, chip->shadow_gpioe);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+       return 0;
+}
+
+static int __devinit mpc52xx_wkup_gpiochip_probe(struct of_device *ofdev,
+                                       const struct of_device_id *match)
+{
+       struct mpc52xx_gpiochip *chip;
+       struct mpc52xx_gpio_wkup *regs;
+       struct of_gpio_chip *ofchip;
+       int ret;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       ofchip = &chip->mmchip.of_gc;
+
+       ofchip->gpio_cells          = 2;
+       ofchip->gc.ngpio            = 8;
+       ofchip->gc.direction_input  = mpc52xx_wkup_gpio_dir_in;
+       ofchip->gc.direction_output = mpc52xx_wkup_gpio_dir_out;
+       ofchip->gc.get              = mpc52xx_wkup_gpio_get;
+       ofchip->gc.set              = mpc52xx_wkup_gpio_set;
+
+       ret = of_mm_gpiochip_add(ofdev->node, &chip->mmchip);
+       if (ret)
+               return ret;
+
+       regs = chip->mmchip.regs;
+       chip->shadow_gpioe = in_8(&regs->wkup_gpioe);
+       chip->shadow_ddr = in_8(&regs->wkup_ddr);
+       chip->shadow_dvo = in_8(&regs->wkup_dvo);
+
+       return 0;
+}
+
+static int mpc52xx_gpiochip_remove(struct of_device *ofdev)
+{
+       return -EBUSY;
+}
+
+static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = {
+       {
+               .compatible = "fsl,mpc5200-gpio-wkup",
+       },
+       {}
+};
+
+static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = {
+       .name = "gpio_wkup",
+       .match_table = mpc52xx_wkup_gpiochip_match,
+       .probe = mpc52xx_wkup_gpiochip_probe,
+       .remove = mpc52xx_gpiochip_remove,
+};
+
+/*
+ * GPIO LIB API implementation for simple GPIOs
+ *
+ * There's a maximum of 32 simple GPIOs. Which of these are available
+ * for use depends on your board setup.
+ * The numbering reflects the bit numbering in the port registers:
+ *
+ *  0..1  > reserved
+ *  2..3  > IRDA
+ *  4..7  > ETHR
+ *  8..11 > reserved
+ * 12..15 > USB
+ * 16..17 > reserved
+ * 18..23 > PSC3
+ * 24..27 > PSC2
+ * 28..31 > PSC1
+ */
+static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+       unsigned int ret;
+
+       ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1;
+
+       return ret;
+}
+
+static inline void
+__mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+                       struct mpc52xx_gpiochip, mmchip);
+       struct mpc52xx_gpio __iomem *regs = mm_gc->regs;
+
+       if (val)
+               chip->shadow_dvo |= 1 << (31 - gpio);
+       else
+               chip->shadow_dvo &= ~(1 << (31 - gpio));
+       out_be32(&regs->simple_dvo, chip->shadow_dvo);
+}
+
+static void
+mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       __mpc52xx_simple_gpio_set(gc, gpio, val);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+}
+
+static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+                       struct mpc52xx_gpiochip, mmchip);
+       struct mpc52xx_gpio *regs = mm_gc->regs;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       /* set the direction */
+       chip->shadow_ddr &= ~(1 << (31 - gpio));
+       out_be32(&regs->simple_ddr, chip->shadow_ddr);
+
+       /* and enable the pin */
+       chip->shadow_gpioe |= 1 << (31 - gpio);
+       out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       return 0;
+}
+
+static int
+mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpiochip *chip = container_of(mm_gc,
+                       struct mpc52xx_gpiochip, mmchip);
+       struct mpc52xx_gpio *regs = mm_gc->regs;
+       unsigned long flags;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       /* First set initial value */
+       __mpc52xx_simple_gpio_set(gc, gpio, val);
+
+       /* Then set direction */
+       chip->shadow_ddr |= 1 << (31 - gpio);
+       out_be32(&regs->simple_ddr, chip->shadow_ddr);
+
+       /* Finally enable the pin */
+       chip->shadow_gpioe |= 1 << (31 - gpio);
+       out_be32(&regs->simple_gpioe, chip->shadow_gpioe);
+
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+       return 0;
+}
+
+static int __devinit mpc52xx_simple_gpiochip_probe(struct of_device *ofdev,
+                                       const struct of_device_id *match)
+{
+       struct mpc52xx_gpiochip *chip;
+       struct of_gpio_chip *ofchip;
+       struct mpc52xx_gpio *regs;
+       int ret;
+
+       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+       if (!chip)
+               return -ENOMEM;
+
+       ofchip = &chip->mmchip.of_gc;
+
+       ofchip->gpio_cells          = 2;
+       ofchip->gc.ngpio            = 32;
+       ofchip->gc.direction_input  = mpc52xx_simple_gpio_dir_in;
+       ofchip->gc.direction_output = mpc52xx_simple_gpio_dir_out;
+       ofchip->gc.get              = mpc52xx_simple_gpio_get;
+       ofchip->gc.set              = mpc52xx_simple_gpio_set;
+
+       ret = of_mm_gpiochip_add(ofdev->node, &chip->mmchip);
+       if (ret)
+               return ret;
+
+       regs = chip->mmchip.regs;
+       chip->shadow_gpioe = in_be32(&regs->simple_gpioe);
+       chip->shadow_ddr = in_be32(&regs->simple_ddr);
+       chip->shadow_dvo = in_be32(&regs->simple_dvo);
+
+       return 0;
+}
+
+static const struct of_device_id mpc52xx_simple_gpiochip_match[] = {
+       {
+               .compatible = "fsl,mpc5200-gpio",
+       },
+       {}
+};
+
+static struct of_platform_driver mpc52xx_simple_gpiochip_driver = {
+       .name = "gpio",
+       .match_table = mpc52xx_simple_gpiochip_match,
+       .probe = mpc52xx_simple_gpiochip_probe,
+       .remove = mpc52xx_gpiochip_remove,
+};
+
+/*
+ * GPIO LIB API implementation for gpt GPIOs.
+ *
+ * Each gpt only has a single GPIO.
+ */
+static int mpc52xx_gpt_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpt __iomem *regs = mm_gc->regs;
+       unsigned int ret;
+
+       return (in_be32(&regs->status) & (1 << (31 - 23))) ? 1 : 0;
+
+       return ret;
+}
+
+static void
+mpc52xx_gpt_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpt __iomem *regs = mm_gc->regs;
+
+       if (val)
+               out_be32(&regs->mode, 0x34);
+       else
+               out_be32(&regs->mode, 0x24);
+
+       pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+}
+
+static int mpc52xx_gpt_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+       struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+       struct mpc52xx_gpt *regs = mm_gc->regs;
+
+       out_be32(&regs->mode, 0x04);
+
+       return 0;
+}
+
+static int
+mpc52xx_gpt_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+       mpc52xx_gpt_gpio_set(gc, gpio, val);
+       pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
+
+       return 0;
+}
+
+static int __devinit mpc52xx_gpt_gpiochip_probe(struct of_device *ofdev,
+                                       const struct of_device_id *match)
+{
+       struct of_mm_gpio_chip *mmchip;
+       struct of_gpio_chip *chip;
+
+       mmchip = kzalloc(sizeof(*mmchip), GFP_KERNEL);
+       if (!mmchip)
+               return -ENOMEM;
+
+       chip = &mmchip->of_gc;
+
+       chip->gpio_cells          = 2;
+       chip->gc.ngpio            = 1;
+       chip->gc.direction_input  = mpc52xx_gpt_gpio_dir_in;
+       chip->gc.direction_output = mpc52xx_gpt_gpio_dir_out;
+       chip->gc.get              = mpc52xx_gpt_gpio_get;
+       chip->gc.set              = mpc52xx_gpt_gpio_set;
+
+       return of_mm_gpiochip_add(ofdev->node, mmchip);
+}
+
+static const struct of_device_id mpc52xx_gpt_gpiochip_match[] = {
+       {
+               .compatible = "fsl,mpc5200-gpt-gpio",
+       },
+       {}
+};
+
+static struct of_platform_driver mpc52xx_gpt_gpiochip_driver = {
+       .name = "gpio_gpt",
+       .match_table = mpc52xx_gpt_gpiochip_match,
+       .probe = mpc52xx_gpt_gpiochip_probe,
+       .remove = mpc52xx_gpiochip_remove,
+};
+
+static int __init mpc52xx_gpio_init(void)
+{
+       if (of_register_platform_driver(&mpc52xx_wkup_gpiochip_driver))
+               printk(KERN_ERR "Unable to register wakeup GPIO driver\n");
+
+       if (of_register_platform_driver(&mpc52xx_simple_gpiochip_driver))
+               printk(KERN_ERR "Unable to register simple GPIO driver\n");
+
+       if (of_register_platform_driver(&mpc52xx_gpt_gpiochip_driver))
+               printk(KERN_ERR "Unable to register gpt GPIO driver\n");
+
+       return 0;
+}
+
+
+/* Make sure we get initialised before anyone else tries to use us */
+subsys_initcall(mpc52xx_gpio_init);
+
+/* No exit call at the moment as we cannot unregister of gpio chips */
+
+MODULE_DESCRIPTION("Freescale MPC52xx gpio driver");
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de");
+MODULE_LICENSE("GPL v2");
+
index d0dead8b9a959b286cedf758b6761062a276e52f..8479394e9ab4cb884d6f329ec02cc233934c69c0 100644 (file)
@@ -18,6 +18,7 @@
 
 #undef DEBUG
 
+#include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/of.h>
 #include <asm/io.h>
@@ -109,11 +110,48 @@ static void mpc52xx_extirq_ack(unsigned int virq)
        io_be_setbit(&intr->ctrl, 27-l2irq);
 }
 
+static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type)
+{
+       u32 ctrl_reg, type;
+       int irq;
+       int l2irq;
+
+       irq = irq_map[virq].hwirq;
+       l2irq = (irq & MPC52xx_IRQ_L2_MASK) >> MPC52xx_IRQ_L2_OFFSET;
+
+       pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type);
+
+       switch (flow_type) {
+       case IRQF_TRIGGER_HIGH:
+               type = 0;
+               break;
+       case IRQF_TRIGGER_RISING:
+               type = 1;
+               break;
+       case IRQF_TRIGGER_FALLING:
+               type = 2;
+               break;
+       case IRQF_TRIGGER_LOW:
+               type = 3;
+               break;
+       default:
+               type = 0;
+       }
+
+       ctrl_reg = in_be32(&intr->ctrl);
+       ctrl_reg &= ~(0x3 << (22 - (l2irq * 2)));
+       ctrl_reg |= (type << (22 - (l2irq * 2)));
+       out_be32(&intr->ctrl, ctrl_reg);
+
+       return 0;
+}
+
 static struct irq_chip mpc52xx_extirq_irqchip = {
        .typename = " MPC52xx IRQ[0-3] ",
        .mask = mpc52xx_extirq_mask,
        .unmask = mpc52xx_extirq_unmask,
        .ack = mpc52xx_extirq_ack,
+       .set_type = mpc52xx_extirq_set_type,
 };
 
 /*
index 7442c58d44f5d233a9a3dfc059a6b5af86f51523..053f49a1dcae6ca2a366ff03f77ab5421e5cced2 100644 (file)
@@ -8,6 +8,7 @@ config MPC8641_HPCN
        select PPC_I8259
        select DEFAULT_UIMAGE
        select FSL_ULI1575
+       select HAS_RAPIDIO
        help
          This option enables support for the MPC8641 HPCN board.
 
index 18b8ebe930d5b62833261ce2fa5e491a675eed5e..5e1e8cf14e75785675f4072afe033777daab241c 100644 (file)
@@ -3,11 +3,12 @@
  *
  * Initial author: Xianghua Xiao <x.xiao@freescale.com>
  * Recode: Jason Jin <jason.jin@freescale.com>
+ *         York Sun <yorksun@freescale.com>
  *
  * Rewrite the interrupt routing. remove the 8259PIC support,
  * All the integrated device in ULI use sideband interrupt.
  *
- * Copyright 2007 Freescale Semiconductor Inc.
+ * Copyright 2008 Freescale Semiconductor Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
@@ -38,6 +39,8 @@
 #include <sysdev/fsl_pci.h>
 #include <sysdev/fsl_soc.h>
 
+static unsigned char *pixis_bdcfg0, *pixis_arch;
+
 static struct of_device_id __initdata mpc8610_ids[] = {
        { .compatible = "fsl,mpc8610-immr", },
        {}
@@ -52,8 +55,7 @@ static int __init mpc8610_declare_of_platform_devices(void)
 }
 machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices);
 
-static void __init
-mpc86xx_hpcd_init_irq(void)
+static void __init mpc86xx_hpcd_init_irq(void)
 {
        struct mpic *mpic1;
        struct device_node *np;
@@ -161,12 +163,159 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AL, 0x5229, quirk_uli5229);
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, 0x5288, final_uli5288);
 #endif /* CONFIG_PCI */
 
-static void __init
-mpc86xx_hpcd_setup_arch(void)
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+
+static u32 get_busfreq(void)
 {
-#ifdef CONFIG_PCI
-       struct device_node *np;
+       struct device_node *node;
+
+       u32 fs_busfreq = 0;
+       node = of_find_node_by_type(NULL, "cpu");
+       if (node) {
+               unsigned int size;
+               const unsigned int *prop =
+                       of_get_property(node, "bus-frequency", &size);
+               if (prop)
+                       fs_busfreq = *prop;
+               of_node_put(node);
+       };
+       return fs_busfreq;
+}
+
+unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel,
+                                               int monitor_port)
+{
+       static const unsigned long pixelformat[][3] = {
+               {0x88882317, 0x88083218, 0x65052119},
+               {0x88883316, 0x88082219, 0x65053118},
+       };
+       unsigned int pix_fmt, arch_monitor;
+
+       arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1;
+               /* DVI port for board version 0x01 */
+
+       if (bits_per_pixel == 32)
+               pix_fmt = pixelformat[arch_monitor][0];
+       else if (bits_per_pixel == 24)
+               pix_fmt = pixelformat[arch_monitor][1];
+       else if (bits_per_pixel == 16)
+               pix_fmt = pixelformat[arch_monitor][2];
+       else
+               pix_fmt = pixelformat[1][0];
+
+       return pix_fmt;
+}
+
+void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base)
+{
+       int i;
+       if (monitor_port == 2) {                /* dual link LVDS */
+               for (i = 0; i < 256*3; i++)
+                       gamma_table_base[i] = (gamma_table_base[i] << 2) |
+                                        ((gamma_table_base[i] >> 6) & 0x03);
+       }
+}
+
+void mpc8610hpcd_set_monitor_port(int monitor_port)
+{
+       static const u8 bdcfg[] = {0xBD, 0xB5, 0xA5};
+       if (monitor_port < 3)
+               *pixis_bdcfg0 = bdcfg[monitor_port];
+}
+
+void mpc8610hpcd_set_pixel_clock(unsigned int pixclock)
+{
+       u32 __iomem *clkdvdr;
+       u32 temp;
+       /* variables for pixel clock calcs */
+       ulong  bestval, bestfreq, speed_ccb, minpixclock, maxpixclock;
+       ulong pixval;
+       long err;
+       int i;
+
+       clkdvdr = ioremap(get_immrbase() + 0xe0800, sizeof(u32));
+       if (!clkdvdr) {
+               printk(KERN_ERR "Err: can't map clock divider register!\n");
+               return;
+       }
+
+       /* Pixel Clock configuration */
+       pr_debug("DIU: Bus Frequency = %d\n", get_busfreq());
+       speed_ccb = get_busfreq();
+
+       /* Calculate the pixel clock with the smallest error */
+       /* calculate the following in steps to avoid overflow */
+       pr_debug("DIU pixclock in ps - %d\n", pixclock);
+       temp = 1000000000/pixclock;
+       temp *= 1000;
+       pixclock = temp;
+       pr_debug("DIU pixclock freq - %u\n", pixclock);
+
+       temp = pixclock * 5 / 100;
+       pr_debug("deviation = %d\n", temp);
+       minpixclock = pixclock - temp;
+       maxpixclock = pixclock + temp;
+       pr_debug("DIU minpixclock - %lu\n", minpixclock);
+       pr_debug("DIU maxpixclock - %lu\n", maxpixclock);
+       pixval = speed_ccb/pixclock;
+       pr_debug("DIU pixval = %lu\n", pixval);
+
+       err = 100000000;
+       bestval = pixval;
+       pr_debug("DIU bestval = %lu\n", bestval);
+
+       bestfreq = 0;
+       for (i = -1; i <= 1; i++) {
+               temp = speed_ccb / ((pixval+i) + 1);
+               pr_debug("DIU test pixval i= %d, pixval=%lu, temp freq. = %u\n",
+                                                       i, pixval, temp);
+               if ((temp < minpixclock) || (temp > maxpixclock))
+                       pr_debug("DIU exceeds monitor range (%lu to %lu)\n",
+                               minpixclock, maxpixclock);
+               else if (abs(temp - pixclock) < err) {
+                 pr_debug("Entered the else if block %d\n", i);
+                       err = abs(temp - pixclock);
+                       bestval = pixval+i;
+                       bestfreq = temp;
+               }
+       }
+
+       pr_debug("DIU chose = %lx\n", bestval);
+       pr_debug("DIU error = %ld\n NomPixClk ", err);
+       pr_debug("DIU: Best Freq = %lx\n", bestfreq);
+       /* Modify PXCLK in GUTS CLKDVDR */
+       pr_debug("DIU: Current value of CLKDVDR = 0x%08x\n", (*clkdvdr));
+       temp = (*clkdvdr) & 0x2000FFFF;
+       *clkdvdr = temp;                /* turn off clock */
+       *clkdvdr = temp | 0x80000000 | (((bestval) & 0x1F) << 16);
+       pr_debug("DIU: Modified value of CLKDVDR = 0x%08x\n", (*clkdvdr));
+       iounmap(clkdvdr);
+}
+
+ssize_t mpc8610hpcd_show_monitor_port(int monitor_port, char *buf)
+{
+       return snprintf(buf, PAGE_SIZE,
+                       "%c0 - DVI\n"
+                       "%c1 - Single link LVDS\n"
+                       "%c2 - Dual link LVDS\n",
+                       monitor_port == 0 ? '*' : ' ',
+                       monitor_port == 1 ? '*' : ' ',
+                       monitor_port == 2 ? '*' : ' ');
+}
+
+int mpc8610hpcd_set_sysfs_monitor_port(int val)
+{
+       return val < 3 ? val : 0;
+}
+
 #endif
+
+static void __init mpc86xx_hpcd_setup_arch(void)
+{
+       struct resource r;
+       struct device_node *np;
+       unsigned char *pixis;
+
        if (ppc_md.progress)
                ppc_md.progress("mpc86xx_hpcd_setup_arch()", 0);
 
@@ -183,6 +332,30 @@ mpc86xx_hpcd_setup_arch(void)
                }
         }
 #endif
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+       preallocate_diu_videomemory();
+       diu_ops.get_pixel_format        = mpc8610hpcd_get_pixel_format;
+       diu_ops.set_gamma_table         = mpc8610hpcd_set_gamma_table;
+       diu_ops.set_monitor_port        = mpc8610hpcd_set_monitor_port;
+       diu_ops.set_pixel_clock         = mpc8610hpcd_set_pixel_clock;
+       diu_ops.show_monitor_port       = mpc8610hpcd_show_monitor_port;
+       diu_ops.set_sysfs_monitor_port  = mpc8610hpcd_set_sysfs_monitor_port;
+#endif
+
+       np = of_find_compatible_node(NULL, NULL, "fsl,fpga-pixis");
+       if (np) {
+               of_address_to_resource(np, 0, &r);
+               of_node_put(np);
+               pixis = ioremap(r.start, 32);
+               if (!pixis) {
+                       printk(KERN_ERR "Err: can't map FPGA cfg register!\n");
+                       return;
+               }
+               pixis_bdcfg0 = pixis + 8;
+               pixis_arch = pixis + 1;
+       } else
+               printk(KERN_ERR "Err: "
+                               "can't find device node 'fsl,fpga-pixis'\n");
 
        printk("MPC86xx HPCD board from Freescale Semiconductor\n");
 }
@@ -200,8 +373,7 @@ static int __init mpc86xx_hpcd_probe(void)
        return 0;
 }
 
-static long __init
-mpc86xx_time_init(void)
+static long __init mpc86xx_time_init(void)
 {
        unsigned int temp;
 
index f947f555fd463458d17b617bee015878a2e632f2..f13704aabbea401fdd177b753f816961f0d1cffb 100644 (file)
@@ -221,6 +221,7 @@ mpc86xx_time_init(void)
 
 static __initdata struct of_device_id of_bus_ids[] = {
        { .compatible = "simple-bus", },
+       { .compatible = "fsl,rapidio-delta", },
        {},
 };
 
index ab24d94baab69bdf5716080def40613b788e6b0c..31da84c458d2cef69ef6e5acc5a812ce3d45845a 100644 (file)
@@ -36,8 +36,8 @@
 #include "celleb_scc.h"
 #include "celleb_pci.h"
 
-#define PEX_IN(base, off)      in_be32((void *)(base) + (off))
-#define PEX_OUT(base, off, data) out_be32((void *)(base) + (off), (data))
+#define PEX_IN(base, off)      in_be32((void __iomem *)(base) + (off))
+#define PEX_OUT(base, off, data) out_be32((void __iomem *)(base) + (off), (data))
 
 static void scc_pciex_io_flush(struct iowa_bus *bus)
 {
@@ -304,7 +304,7 @@ static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
        ((((0x1 << (size))-1) << ((addr) & 0x3)) << PEXDCMND_BYTE_EN_SHIFT)
 #define MK_PEXDCMND(cmd, addr, size) ((cmd) | MK_PEXDCMND_BYTE_EN(addr, size))
 
-static uint32_t config_read_pciex_dev(unsigned int *base,
+static uint32_t config_read_pciex_dev(unsigned int __iomem *base,
                uint64_t bus_no, uint64_t dev_no, uint64_t func_no,
                uint64_t off, uint64_t size)
 {
@@ -320,7 +320,7 @@ static uint32_t config_read_pciex_dev(unsigned int *base,
        return ret;
 }
 
-static void config_write_pciex_dev(unsigned int *base, uint64_t bus_no,
+static void config_write_pciex_dev(unsigned int __iomem *base, uint64_t bus_no,
        uint64_t dev_no, uint64_t func_no, uint64_t off, uint64_t size,
        uint32_t data)
 {
@@ -338,7 +338,7 @@ static void config_write_pciex_dev(unsigned int *base, uint64_t bus_no,
        ((((0x1 << (len)) - 1) << ((off) & 0x3)) << PEXCADRS_BYTE_EN_SHIFT)
 #define MK_PEXCADRS(cmd, addr, size) \
        ((cmd) | MK_PEXCADRS_BYTE_EN(addr, size) | ((addr) & ~0x3))
-static uint32_t config_read_pciex_rc(unsigned int *base,
+static uint32_t config_read_pciex_rc(unsigned int __iomem *base,
                                     uint32_t where, uint32_t size)
 {
        PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_READ, where, size));
@@ -346,7 +346,7 @@ static uint32_t config_read_pciex_rc(unsigned int *base,
                >> ((where & (4 - size)) * 8)) & ((0x1 << (size * 8)) - 1);
 }
 
-static void config_write_pciex_rc(unsigned int *base, uint32_t where,
+static void config_write_pciex_rc(unsigned int __iomem *base, uint32_t where,
                                  uint32_t size, uint32_t val)
 {
        uint32_t data;
@@ -410,7 +410,7 @@ static struct pci_ops scc_pciex_pci_ops = {
        scc_pciex_write_config,
 };
 
-static void pciex_clear_intr_all(unsigned int *base)
+static void pciex_clear_intr_all(unsigned int __iomem *base)
 {
        PEX_OUT(base, PEXAERRSTS, 0xffffffff);
        PEX_OUT(base, PEXPRERRSTS, 0xffffffff);
@@ -427,7 +427,7 @@ static void pciex_disable_intr_all(unsigned int *base)
 }
 #endif
 
-static void pciex_enable_intr_all(unsigned int *base)
+static void pciex_enable_intr_all(unsigned int __iomem *base)
 {
        PEX_OUT(base, PEXINTMASK, 0x0000e7f1);
        PEX_OUT(base, PEXAERRMASK, 0x03ff01ff);
@@ -435,7 +435,7 @@ static void pciex_enable_intr_all(unsigned int *base)
        PEX_OUT(base, PEXVDMASK, 0x00000001);
 }
 
-static void pciex_check_status(unsigned int *base)
+static void pciex_check_status(unsigned int __iomem *base)
 {
        uint32_t err = 0;
        uint32_t intsts, aerr, prerr, rcvcp, lenerr;
diff --git a/arch/powerpc/platforms/cell/spufs/.gitignore b/arch/powerpc/platforms/cell/spufs/.gitignore
new file mode 100644 (file)
index 0000000..a09ee8d
--- /dev/null
@@ -0,0 +1,2 @@
+spu_save_dump.h
+spu_restore_dump.h
index 0ad83aeb70b114758c68de984aa45161954c1eae..177735f793179ef4aed1bea211593611728dd4eb 100644 (file)
@@ -78,6 +78,7 @@ void destroy_spu_context(struct kref *kref)
 {
        struct spu_context *ctx;
        ctx = container_of(kref, struct spu_context, kref);
+       spu_context_nospu_trace(destroy_spu_context__enter, ctx);
        mutex_lock(&ctx->state_mutex);
        spu_deactivate(ctx);
        mutex_unlock(&ctx->state_mutex);
@@ -88,6 +89,7 @@ void destroy_spu_context(struct kref *kref)
                kref_put(ctx->prof_priv_kref, ctx->prof_priv_release);
        BUG_ON(!list_empty(&ctx->rq));
        atomic_dec(&nr_spu_contexts);
+       kfree(ctx->switch_log);
        kfree(ctx);
 }
 
@@ -150,6 +152,8 @@ int spu_acquire_saved(struct spu_context *ctx)
 {
        int ret;
 
+       spu_context_nospu_trace(spu_acquire_saved__enter, ctx);
+
        ret = spu_acquire(ctx);
        if (ret)
                return ret;
index 08f44d1971ac42cd952f055c6d9f4c96779c5883..80911a37340063a84d0ffaf045802858988eac17 100644 (file)
@@ -2386,6 +2386,171 @@ static const struct file_operations spufs_stat_fops = {
        .release        = single_release,
 };
 
+static inline int spufs_switch_log_used(struct spu_context *ctx)
+{
+       return (ctx->switch_log->head - ctx->switch_log->tail) %
+               SWITCH_LOG_BUFSIZE;
+}
+
+static inline int spufs_switch_log_avail(struct spu_context *ctx)
+{
+       return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx);
+}
+
+static int spufs_switch_log_open(struct inode *inode, struct file *file)
+{
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+
+       /*
+        * We (ab-)use the mapping_lock here because it serves the similar
+        * purpose for synchronizing open/close elsewhere.  Maybe it should
+        * be renamed eventually.
+        */
+       mutex_lock(&ctx->mapping_lock);
+       if (ctx->switch_log) {
+               spin_lock(&ctx->switch_log->lock);
+               ctx->switch_log->head = 0;
+               ctx->switch_log->tail = 0;
+               spin_unlock(&ctx->switch_log->lock);
+       } else {
+               /*
+                * We allocate the switch log data structures on first open.
+                * They will never be free because we assume a context will
+                * be traced until it goes away.
+                */
+               ctx->switch_log = kzalloc(sizeof(struct switch_log) +
+                       SWITCH_LOG_BUFSIZE * sizeof(struct switch_log_entry),
+                       GFP_KERNEL);
+               if (!ctx->switch_log)
+                       goto out;
+               spin_lock_init(&ctx->switch_log->lock);
+               init_waitqueue_head(&ctx->switch_log->wait);
+       }
+       mutex_unlock(&ctx->mapping_lock);
+
+       return 0;
+ out:
+       mutex_unlock(&ctx->mapping_lock);
+       return -ENOMEM;
+}
+
+static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n)
+{
+       struct switch_log_entry *p;
+
+       p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE;
+
+       return snprintf(tbuf, n, "%u.%09u %d %u %u %llu\n",
+                       (unsigned int) p->tstamp.tv_sec,
+                       (unsigned int) p->tstamp.tv_nsec,
+                       p->spu_id,
+                       (unsigned int) p->type,
+                       (unsigned int) p->val,
+                       (unsigned long long) p->timebase);
+}
+
+static ssize_t spufs_switch_log_read(struct file *file, char __user *buf,
+                            size_t len, loff_t *ppos)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       int error = 0, cnt = 0;
+
+       if (!buf || len < 0)
+               return -EINVAL;
+
+       while (cnt < len) {
+               char tbuf[128];
+               int width;
+
+               if (file->f_flags & O_NONBLOCK) {
+                       if (spufs_switch_log_used(ctx) <= 0)
+                               return cnt ? cnt : -EAGAIN;
+               } else {
+                       /* Wait for data in buffer */
+                       error = wait_event_interruptible(ctx->switch_log->wait,
+                                       spufs_switch_log_used(ctx) > 0);
+                       if (error)
+                               break;
+               }
+
+               spin_lock(&ctx->switch_log->lock);
+               if (ctx->switch_log->head == ctx->switch_log->tail) {
+                       /* multiple readers race? */
+                       spin_unlock(&ctx->switch_log->lock);
+                       continue;
+               }
+
+               width = switch_log_sprint(ctx, tbuf, sizeof(tbuf));
+               if (width < len) {
+                       ctx->switch_log->tail =
+                               (ctx->switch_log->tail + 1) %
+                                SWITCH_LOG_BUFSIZE;
+               }
+
+               spin_unlock(&ctx->switch_log->lock);
+
+               /*
+                * If the record is greater than space available return
+                * partial buffer (so far)
+                */
+               if (width >= len)
+                       break;
+
+               error = copy_to_user(buf + cnt, tbuf, width);
+               if (error)
+                       break;
+               cnt += width;
+       }
+
+       return cnt == 0 ? error : cnt;
+}
+
+static unsigned int spufs_switch_log_poll(struct file *file, poll_table *wait)
+{
+       struct inode *inode = file->f_path.dentry->d_inode;
+       struct spu_context *ctx = SPUFS_I(inode)->i_ctx;
+       unsigned int mask = 0;
+
+       poll_wait(file, &ctx->switch_log->wait, wait);
+
+       if (spufs_switch_log_used(ctx) > 0)
+               mask |= POLLIN;
+
+       return mask;
+}
+
+static const struct file_operations spufs_switch_log_fops = {
+       .owner  = THIS_MODULE,
+       .open   = spufs_switch_log_open,
+       .read   = spufs_switch_log_read,
+       .poll   = spufs_switch_log_poll,
+};
+
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+               u32 type, u32 val)
+{
+       if (!ctx->switch_log)
+               return;
+
+       spin_lock(&ctx->switch_log->lock);
+       if (spufs_switch_log_avail(ctx) > 1) {
+               struct switch_log_entry *p;
+
+               p = ctx->switch_log->log + ctx->switch_log->head;
+               ktime_get_ts(&p->tstamp);
+               p->timebase = get_tb();
+               p->spu_id = spu ? spu->number : -1;
+               p->type = type;
+               p->val = val;
+
+               ctx->switch_log->head =
+                       (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE;
+       }
+       spin_unlock(&ctx->switch_log->lock);
+
+       wake_up(&ctx->switch_log->wait);
+}
 
 struct tree_descr spufs_dir_contents[] = {
        { "capabilities", &spufs_caps_fops, 0444, },
@@ -2422,6 +2587,7 @@ struct tree_descr spufs_dir_contents[] = {
        { "proxydma_info", &spufs_proxydma_info_fops, 0444, },
        { "tid", &spufs_tid_fops, 0444, },
        { "stat", &spufs_stat_fops, 0444, },
+       { "switch_log", &spufs_switch_log_fops, 0444 },
        {},
 };
 
index 96bf7c2b86fcf61ca6d241e9d222853ed7ba883b..a9c35b7b719fda3bd3850dc1dfdaa74d2e5acc6b 100644 (file)
@@ -405,6 +405,8 @@ long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event)
        ret = spu_run_fini(ctx, npc, &status);
        spu_yield(ctx);
 
+       spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, status);
+
        if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
            (((status >> SPU_STOP_STATUS_SHIFT) & 0x3f00) == 0x2100))
                ctx->stats.libassist++;
index 00528ef84ad2085613703a8feab52a25b1ee9f93..7298e7db2c8365cf83f8519350f917a01a35c8c3 100644 (file)
@@ -240,6 +240,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
        spu->mfc_callback = spufs_mfc_callback;
        mb();
        spu_unmap_mappings(ctx);
+       spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
        spu_restore(&ctx->csa, spu);
        spu->timestamp = jiffies;
        spu_cpu_affinity_set(spu, raw_smp_processor_id());
@@ -419,6 +420,7 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
        spu_switch_notify(spu, NULL);
        spu_unmap_mappings(ctx);
        spu_save(&ctx->csa, spu);
+       spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
        spu->timestamp = jiffies;
        ctx->state = SPU_STATE_SAVED;
        spu->ibox_callback = NULL;
@@ -591,7 +593,7 @@ static struct spu *find_victim(struct spu_context *ctx)
        struct spu *spu;
        int node, n;
 
-       spu_context_nospu_trace(spu_find_vitim__enter, ctx);
+       spu_context_nospu_trace(spu_find_victim__enter, ctx);
 
        /*
         * Look for a possible preemption candidate on the local node first.
@@ -1063,10 +1065,9 @@ int __init spu_sched_init(void)
 
        mod_timer(&spuloadavg_timer, 0);
 
-       entry = create_proc_entry("spu_loadavg", 0, NULL);
+       entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
        if (!entry)
                goto out_stop_kthread;
-       entry->proc_fops = &spu_loadavg_fops;
 
        pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
                        SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
index cdc515182f823fc71357886c7dc5205f1d97f11d..7312745b7540d4586fd72c7e4b57e9b5a9abd641 100644 (file)
@@ -47,6 +47,30 @@ enum {
        SPU_SCHED_SPU_RUN,      /* context is within spu_run */
 };
 
+enum {
+       SWITCH_LOG_BUFSIZE = 4096,
+};
+
+enum {
+       SWITCH_LOG_START,
+       SWITCH_LOG_STOP,
+       SWITCH_LOG_EXIT,
+};
+
+struct switch_log {
+       spinlock_t              lock;
+       wait_queue_head_t       wait;
+       unsigned long           head;
+       unsigned long           tail;
+       struct switch_log_entry {
+               struct timespec tstamp;
+               s32             spu_id;
+               u32             type;
+               u32             val;
+               u64             timebase;
+       } log[];
+};
+
 struct spu_context {
        struct spu *spu;                  /* pointer to a physical SPU */
        struct spu_state csa;             /* SPU context save area. */
@@ -116,6 +140,9 @@ struct spu_context {
                unsigned long long libassist;
        } stats;
 
+       /* context switch log */
+       struct switch_log *switch_log;
+
        struct list_head aff_list;
        int aff_head;
        int aff_offset;
@@ -256,6 +283,8 @@ int spu_activate(struct spu_context *ctx, unsigned long flags);
 void spu_deactivate(struct spu_context *ctx);
 void spu_yield(struct spu_context *ctx);
 void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
+void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
+               u32 type, u32 val);
 void spu_set_timeslice(struct spu_context *ctx);
 void spu_update_sched_info(struct spu_context *ctx);
 void __spu_update_sched_info(struct spu_context *ctx);
@@ -330,8 +359,8 @@ extern void spuctx_switch_state(struct spu_context *ctx,
                enum spu_utilization_state new_state);
 
 #define spu_context_trace(name, ctx, spu) \
-       trace_mark(name, "%p %p", ctx, spu);
+       trace_mark(name, "ctx %p spu %p", ctx, spu);
 #define spu_context_nospu_trace(name, ctx) \
-       trace_mark(name, "%p", ctx);
+       trace_mark(name, "ctx %p", ctx);
 
 #endif
index 79aa773f3c992abff92f38f51608bdcc5497c0a1..53202422ba727c8ec9a9f95a80fb1db95181e12c 100644 (file)
@@ -171,24 +171,24 @@ static void spu_context_nospu_event(void *probe_private, void *call_data,
 }
 
 struct spu_probe spu_probes[] = {
-       { "spu_bind_context__enter", "%p %p", spu_context_event },
-       { "spu_unbind_context__enter", "%p %p", spu_context_event },
-       { "spu_get_idle__enter", "%p", spu_context_nospu_event },
-       { "spu_get_idle__found", "%p %p", spu_context_event },
-       { "spu_get_idle__not_found", "%p", spu_context_nospu_event },
-       { "spu_find_victim__enter", "%p", spu_context_nospu_event },
-       { "spusched_tick__preempt", "%p %p", spu_context_event },
-       { "spusched_tick__newslice", "%p", spu_context_nospu_event },
-       { "spu_yield__enter", "%p", spu_context_nospu_event },
-       { "spu_deactivate__enter", "%p", spu_context_nospu_event },
-       { "__spu_deactivate__unload", "%p %p", spu_context_event },
-       { "spufs_ps_nopfn__enter", "%p", spu_context_nospu_event },
-       { "spufs_ps_nopfn__sleep", "%p", spu_context_nospu_event },
-       { "spufs_ps_nopfn__wake", "%p %p", spu_context_event },
-       { "spufs_ps_nopfn__insert", "%p %p", spu_context_event },
-       { "spu_acquire_saved__enter", "%p", spu_context_nospu_event },
-       { "destroy_spu_context__enter", "%p", spu_context_nospu_event },
-       { "spufs_stop_callback__enter", "%p %p", spu_context_event },
+       { "spu_bind_context__enter", "ctx %p spu %p", spu_context_event },
+       { "spu_unbind_context__enter", "ctx %p spu %p", spu_context_event },
+       { "spu_get_idle__enter", "ctx %p", spu_context_nospu_event },
+       { "spu_get_idle__found", "ctx %p spu %p", spu_context_event },
+       { "spu_get_idle__not_found", "ctx %p", spu_context_nospu_event },
+       { "spu_find_victim__enter", "ctx %p", spu_context_nospu_event },
+       { "spusched_tick__preempt", "ctx %p spu %p", spu_context_event },
+       { "spusched_tick__newslice", "ctx %p", spu_context_nospu_event },
+       { "spu_yield__enter", "ctx %p", spu_context_nospu_event },
+       { "spu_deactivate__enter", "ctx %p", spu_context_nospu_event },
+       { "__spu_deactivate__unload", "ctx %p spu %p", spu_context_event },
+       { "spufs_ps_nopfn__enter", "ctx %p", spu_context_nospu_event },
+       { "spufs_ps_nopfn__sleep", "ctx %p", spu_context_nospu_event },
+       { "spufs_ps_nopfn__wake", "ctx %p spu %p", spu_context_event },
+       { "spufs_ps_nopfn__insert", "ctx %p spu %p", spu_context_event },
+       { "spu_acquire_saved__enter", "ctx %p", spu_context_nospu_event },
+       { "destroy_spu_context__enter", "ctx %p", spu_context_nospu_event },
+       { "spufs_stop_callback__enter", "ctx %p spu %p", spu_context_event },
 };
 
 static int __init sputrace_init(void)
@@ -201,10 +201,9 @@ static int __init sputrace_init(void)
        if (!sputrace_log)
                goto out;
 
-       entry = create_proc_entry("sputrace", S_IRUSR, NULL);
+       entry = proc_create("sputrace", S_IRUSR, NULL, &sputrace_fops);
        if (!entry)
                goto out_free_log;
-       entry->proc_fops = &sputrace_fops;
 
        for (i = 0; i < ARRAY_SIZE(spu_probes); i++) {
                struct spu_probe *p = &spu_probes[i];
index e5b40e3e0082cfd07c1597edd26c6f5ebbac54f5..b0f8a857ec02d356d1f4a88f8e21cfbcd8b4f66f 100644 (file)
@@ -330,15 +330,11 @@ static const struct file_operations proc_lpevents_operations = {
 
 static int __init proc_lpevents_init(void)
 {
-       struct proc_dir_entry *e;
-
        if (!firmware_has_feature(FW_FEATURE_ISERIES))
                return 0;
 
-       e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
-       if (e)
-               e->proc_fops = &proc_lpevents_operations;
-
+       proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
+                   &proc_lpevents_operations);
        return 0;
 }
 __initcall(proc_lpevents_init);
index c0f2433bc16e75c20532982c65e581902b0e5481..1dc7295746dac6471503b098182c9f1e128239d8 100644 (file)
@@ -1255,11 +1255,11 @@ static int __init mf_proc_init(void)
                if (i == 3)     /* no vmlinux entry for 'D' */
                        continue;
 
-               ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf);
+               ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf,
+                                      &proc_vmlinux_operations,
+                                      (void *)(long)i);
                if (!ent)
                        return 1;
-               ent->data = (void *)(long)i;
-               ent->proc_fops = &proc_vmlinux_operations;
        }
 
        ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
index f2cde4180204c6559d0229b156f5440a97c43bb0..91f4c6cd4b9936fea9635b14e30f48b858a657c8 100644 (file)
@@ -110,15 +110,11 @@ static const struct file_operations proc_titantod_operations = {
 
 static int __init iseries_proc_init(void)
 {
-       struct proc_dir_entry *e;
-
        if (!firmware_has_feature(FW_FEATURE_ISERIES))
                return 0;
 
-       e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
-       if (e)
-               e->proc_fops = &proc_titantod_operations;
-
+       proc_create("iSeries/titanTod", S_IFREG|S_IRUGO, NULL,
+                   &proc_titantod_operations);
        return 0;
 }
 __initcall(iseries_proc_init);
index df23331eb25c2416b99fd91c137fa62675c7bbb8..49ff4dc422b7be7b0bf8565335d48cb34cc0979f 100644 (file)
@@ -180,15 +180,10 @@ static const struct file_operations proc_viopath_operations = {
 
 static int __init vio_proc_init(void)
 {
-       struct proc_dir_entry *e;
-
        if (!firmware_has_feature(FW_FEATURE_ISERIES))
                return 0;
 
-       e = create_proc_entry("iSeries/config", 0, NULL);
-       if (e)
-               e->proc_fops = &proc_viopath_operations;
-
+       proc_create("iSeries/config", 0, NULL, &proc_viopath_operations);
         return 0;
 }
 __initcall(vio_proc_init);
index 78093d7f97af9af6ca4bed23814f8809b4ffb1aa..4d72c8f721598d77c60c68ec74799fd75920e931 100644 (file)
@@ -6,7 +6,10 @@ obj-y                          += pic.o setup.o time.o feature.o pci.o \
 obj-$(CONFIG_PMAC_BACKLIGHT)   += backlight.o
 obj-$(CONFIG_CPU_FREQ_PMAC)    += cpufreq_32.o
 obj-$(CONFIG_CPU_FREQ_PMAC64)  += cpufreq_64.o
-obj-$(CONFIG_NVRAM)            += nvram.o
+# CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really
+# need this to be a bool.  Cheat here and pretend CONFIG_NVRAM=m is really
+# CONFIG_NVRAM=y
+obj-$(CONFIG_NVRAM:m=y)                += nvram.o
 # ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff
 obj-$(CONFIG_PPC64)            += nvram.o
 obj-$(CONFIG_PPC32)            += bootx_init.o
index bf44c5441a365a0a148bdac23dd255c5a5b747b1..00bd0166d07fd78261a4230bbcb30555f313a356 100644 (file)
@@ -337,7 +337,8 @@ static void __init pmac_setup_arch(void)
        find_via_pmu();
        smu_init();
 
-#if defined(CONFIG_NVRAM) || defined(CONFIG_PPC64)
+#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \
+    defined(CONFIG_PPC64)
        pmac_nvram_init();
 #endif
 
index bd2593ed28dd2f3c002a617b363707295cd2f2b6..554c6e42ef2a9ff1f78340a0d5a9c49f5d46b611 100644 (file)
@@ -18,6 +18,7 @@ obj-$(CONFIG_PCI)     += pci.o pci_dlpar.o
 obj-$(CONFIG_PCI_MSI)  += msi.o
 
 obj-$(CONFIG_HOTPLUG_CPU)      += hotplug-cpu.o
+obj-$(CONFIG_MEMORY_HOTPLUG)   += hotplug-memory.o
 
 obj-$(CONFIG_HVC_CONSOLE)      += hvconsole.o
 obj-$(CONFIG_HVCS)             += hvcserver.o
index a3fd56b186e6063284306e37339a9a7cc7f8349d..6f544ba4b37feebfdd0be835b4cbf397c74783de 100644 (file)
@@ -1259,14 +1259,8 @@ static const struct file_operations proc_eeh_operations = {
 
 static int __init eeh_init_proc(void)
 {
-       struct proc_dir_entry *e;
-
-       if (machine_is(pseries)) {
-               e = create_proc_entry("ppc64/eeh", 0, NULL);
-               if (e)
-                       e->proc_fops = &proc_eeh_operations;
-       }
-
+       if (machine_is(pseries))
+               proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations);
        return 0;
 }
 __initcall(eeh_init_proc);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
new file mode 100644 (file)
index 0000000..3c5727d
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * pseries Memory Hotplug infrastructure.
+ *
+ * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
+ *
+ *      This program is free software; you can redistribute it and/or
+ *      modify it under the terms of the GNU General Public License
+ *      as published by the Free Software Foundation; either version
+ *      2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/of.h>
+#include <linux/lmb.h>
+#include <asm/firmware.h>
+#include <asm/machdep.h>
+#include <asm/pSeries_reconfig.h>
+
+static int pseries_remove_memory(struct device_node *np)
+{
+       const char *type;
+       const unsigned int *my_index;
+       const unsigned int *regs;
+       u64 start_pfn, start;
+       struct zone *zone;
+       int ret = -EINVAL;
+
+       /*
+        * Check to see if we are actually removing memory
+        */
+       type = of_get_property(np, "device_type", NULL);
+       if (type == NULL || strcmp(type, "memory") != 0)
+               return 0;
+
+       /*
+        * Find the memory index and size of the removing section
+        */
+       my_index = of_get_property(np, "ibm,my-drc-index", NULL);
+       if (!my_index)
+               return ret;
+
+       regs = of_get_property(np, "reg", NULL);
+       if (!regs)
+               return ret;
+
+       start_pfn = section_nr_to_pfn(*my_index & 0xffff);
+       zone = page_zone(pfn_to_page(start_pfn));
+
+       /*
+        * Remove section mappings and sysfs entries for the
+        * section of the memory we are removing.
+        *
+        * NOTE: Ideally, this should be done in generic code like
+        * remove_memory(). But remove_memory() gets called by writing
+        * to sysfs "state" file and we can't remove sysfs entries
+        * while writing to it. So we have to defer it to here.
+        */
+       ret = __remove_pages(zone, start_pfn, regs[3] >> PAGE_SHIFT);
+       if (ret)
+               return ret;
+
+       /*
+        * Update memory regions for memory remove
+        */
+       lmb_remove(start_pfn << PAGE_SHIFT, regs[3]);
+
+       /*
+        * Remove htab bolted mappings for this section of memory
+        */
+       start = (unsigned long)__va(start_pfn << PAGE_SHIFT);
+       ret = remove_section_mapping(start, start + regs[3]);
+       return ret;
+}
+
+static int pseries_add_memory(struct device_node *np)
+{
+       const char *type;
+       const unsigned int *my_index;
+       const unsigned int *regs;
+       u64 start_pfn;
+       int ret = -EINVAL;
+
+       /*
+        * Check to see if we are actually adding memory
+        */
+       type = of_get_property(np, "device_type", NULL);
+       if (type == NULL || strcmp(type, "memory") != 0)
+               return 0;
+
+       /*
+        * Find the memory index and size of the added section
+        */
+       my_index = of_get_property(np, "ibm,my-drc-index", NULL);
+       if (!my_index)
+               return ret;
+
+       regs = of_get_property(np, "reg", NULL);
+       if (!regs)
+               return ret;
+
+       start_pfn = section_nr_to_pfn(*my_index & 0xffff);
+
+       /*
+        * Update memory region to represent the memory add
+        */
+       lmb_add(start_pfn << PAGE_SHIFT, regs[3]);
+       return 0;
+}
+
+static int pseries_memory_notifier(struct notifier_block *nb,
+                               unsigned long action, void *node)
+{
+       int err = NOTIFY_OK;
+
+       switch (action) {
+       case PSERIES_RECONFIG_ADD:
+               if (pseries_add_memory(node))
+                       err = NOTIFY_BAD;
+               break;
+       case PSERIES_RECONFIG_REMOVE:
+               if (pseries_remove_memory(node))
+                       err = NOTIFY_BAD;
+               break;
+       default:
+               err = NOTIFY_DONE;
+               break;
+       }
+       return err;
+}
+
+static struct notifier_block pseries_mem_nb = {
+       .notifier_call = pseries_memory_notifier,
+};
+
+static int __init pseries_memory_hotplug_init(void)
+{
+       if (firmware_has_feature(FW_FEATURE_LPAR))
+               pSeries_reconfig_notifier_register(&pseries_mem_nb);
+
+       return 0;
+}
+machine_device_initcall(pseries, pseries_memory_hotplug_init);
index ac75c10de27842a967cfc3bc4d9333840d055e2f..75769aae41d5ec7e67ee31dfc5b552971a7b1f77 100644 (file)
@@ -512,12 +512,9 @@ static int proc_ppc64_create_ofdt(void)
        if (!machine_is(pseries))
                return 0;
 
-       ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL);
-       if (ent) {
-               ent->data = NULL;
+       ent = proc_create("ppc64/ofdt", S_IWUSR, NULL, &ofdt_fops);
+       if (ent)
                ent->size = 0;
-               ent->proc_fops = &ofdt_fops;
-       }
 
        return 0;
 }
index befadd4f9524880d197240836557369a5386a487..7d3e2b0bd4d299d5f467fde2a014dec0fb835fdb 100644 (file)
@@ -468,10 +468,9 @@ static int __init rtas_init(void)
                return -ENOMEM;
        }
 
-       entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL);
-       if (entry)
-               entry->proc_fops = &proc_rtas_log_operations;
-       else
+       entry = proc_create("ppc64/rtas/error_log", S_IRUSR, NULL,
+                           &proc_rtas_log_operations);
+       if (!entry)
                printk(KERN_ERR "Failed to create error_log proc entry\n");
 
        if (kernel_thread(rtasd, NULL, CLONE_FS) < 0)
index d359d6e929752f5cd7258a127b8845f13e3a2791..7f59188cd9a111fdb89cd36a9ae0dfafb3d8cff6 100644 (file)
@@ -143,7 +143,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
  */
 static int
 axon_ram_direct_access(struct block_device *device, sector_t sector,
-                      unsigned long *data)
+                      void **kaddr, unsigned long *pfn)
 {
        struct axon_ram_bank *bank = device->bd_disk->private_data;
        loff_t offset;
@@ -154,7 +154,8 @@ axon_ram_direct_access(struct block_device *device, sector_t sector,
                return -ERANGE;
        }
 
-       *data = bank->ph_addr + offset;
+       *kaddr = (void *)(bank->ph_addr + offset);
+       *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT;
 
        return 0;
 }
index af2425e4655fbb6830d4cbb3891fb8e4ef3d9715..3d920376f58e8b2a6bfd5746196136424be7ad1c 100644 (file)
@@ -1,5 +1,8 @@
 /*
- * MPC85xx RapidIO support
+ * Freescale MPC85xx/MPC86xx RapidIO support
+ *
+ * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc.
+ * Zhang Wei <wei.zhang@freescale.com>
  *
  * Copyright 2005 MontaVista Software, Inc.
  * Matt Porter <mporter@kernel.crashing.org>
 #include <linux/interrupt.h>
 #include <linux/rio.h>
 #include <linux/rio_drv.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
 
 #include <asm/io.h>
 
-#define RIO_REGS_BASE          (CCSRBAR + 0xc0000)
+/* RapidIO definition irq, which read from OF-tree */
+#define IRQ_RIO_BELL(m)                (((struct rio_priv *)(m->priv))->bellirq)
+#define IRQ_RIO_TX(m)          (((struct rio_priv *)(m->priv))->txirq)
+#define IRQ_RIO_RX(m)          (((struct rio_priv *)(m->priv))->rxirq)
+
 #define RIO_ATMU_REGS_OFFSET   0x10c00
-#define RIO_MSG_REGS_OFFSET    0x11000
+#define RIO_P_MSG_REGS_OFFSET  0x11000
+#define RIO_S_MSG_REGS_OFFSET  0x13000
+#define RIO_ESCSR              0x158
+#define RIO_CCSR               0x15c
+#define RIO_ISR_AACR           0x10120
+#define RIO_ISR_AACR_AA                0x1     /* Accept All ID */
 #define RIO_MAINT_WIN_SIZE     0x400000
 #define RIO_DBELL_WIN_SIZE     0x1000
 
 #define DOORBELL_DSR_TE                0x00000080
 #define DOORBELL_DSR_QFI       0x00000010
 #define DOORBELL_DSR_DIQI      0x00000001
-#define DOORBELL_TID_OFFSET    0x03
-#define DOORBELL_SID_OFFSET    0x05
+#define DOORBELL_TID_OFFSET    0x02
+#define DOORBELL_SID_OFFSET    0x04
 #define DOORBELL_INFO_OFFSET   0x06
 
 #define DOORBELL_MESSAGE_SIZE  0x08
-#define DBELL_SID(x)           (*(u8 *)(x + DOORBELL_SID_OFFSET))
-#define DBELL_TID(x)           (*(u8 *)(x + DOORBELL_TID_OFFSET))
+#define DBELL_SID(x)           (*(u16 *)(x + DOORBELL_SID_OFFSET))
+#define DBELL_TID(x)           (*(u16 *)(x + DOORBELL_TID_OFFSET))
 #define DBELL_INF(x)           (*(u16 *)(x + DOORBELL_INFO_OFFSET))
 
 struct rio_atmu_regs {
        u32 rowtar;
-       u32 pad1;
+       u32 rowtear;
        u32 rowbar;
        u32 pad2;
        u32 rowar;
@@ -87,7 +101,15 @@ struct rio_msg_regs {
        u32 ifqdpar;
        u32 pad6;
        u32 ifqepar;
-       u32 pad7[250];
+       u32 pad7[226];
+       u32 odmr;
+       u32 odsr;
+       u32 res0[4];
+       u32 oddpr;
+       u32 oddatr;
+       u32 res1[3];
+       u32 odretcr;
+       u32 res2[12];
        u32 dmr;
        u32 dsr;
        u32 pad8;
@@ -112,20 +134,12 @@ struct rio_tx_desc {
        u32 res4;
 };
 
-static u32 regs_win;
-static struct rio_atmu_regs *atmu_regs;
-static struct rio_atmu_regs *maint_atmu_regs;
-static struct rio_atmu_regs *dbell_atmu_regs;
-static u32 dbell_win;
-static u32 maint_win;
-static struct rio_msg_regs *msg_regs;
-
-static struct rio_dbell_ring {
+struct rio_dbell_ring {
        void *virt;
        dma_addr_t phys;
-} dbell_ring;
+};
 
-static struct rio_msg_tx_ring {
+struct rio_msg_tx_ring {
        void *virt;
        dma_addr_t phys;
        void *virt_buffer[RIO_MAX_TX_RING_SIZE];
@@ -133,19 +147,35 @@ static struct rio_msg_tx_ring {
        int tx_slot;
        int size;
        void *dev_id;
-} msg_tx_ring;
+};
 
-static struct rio_msg_rx_ring {
+struct rio_msg_rx_ring {
        void *virt;
        dma_addr_t phys;
        void *virt_buffer[RIO_MAX_RX_RING_SIZE];
        int rx_slot;
        int size;
        void *dev_id;
-} msg_rx_ring;
+};
+
+struct rio_priv {
+       void __iomem *regs_win;
+       struct rio_atmu_regs __iomem *atmu_regs;
+       struct rio_atmu_regs __iomem *maint_atmu_regs;
+       struct rio_atmu_regs __iomem *dbell_atmu_regs;
+       void __iomem *dbell_win;
+       void __iomem *maint_win;
+       struct rio_msg_regs __iomem *msg_regs;
+       struct rio_dbell_ring dbell_ring;
+       struct rio_msg_tx_ring msg_tx_ring;
+       struct rio_msg_rx_ring msg_rx_ring;
+       int bellirq;
+       int txirq;
+       int rxirq;
+};
 
 /**
- * mpc85xx_rio_doorbell_send - Send a MPC85xx doorbell message
+ * fsl_rio_doorbell_send - Send a MPC85xx doorbell message
  * @index: ID of RapidIO interface
  * @destid: Destination ID of target device
  * @data: 16-bit info field of RapidIO doorbell message
@@ -153,18 +183,34 @@ static struct rio_msg_rx_ring {
  * Sends a MPC85xx doorbell message. Returns %0 on success or
  * %-EINVAL on failure.
  */
-static int mpc85xx_rio_doorbell_send(int index, u16 destid, u16 data)
+static int fsl_rio_doorbell_send(struct rio_mport *mport,
+                               int index, u16 destid, u16 data)
 {
-       pr_debug("mpc85xx_doorbell_send: index %d destid %4.4x data %4.4x\n",
+       struct rio_priv *priv = mport->priv;
+       pr_debug("fsl_doorbell_send: index %d destid %4.4x data %4.4x\n",
                 index, destid, data);
-       out_be32((void *)&dbell_atmu_regs->rowtar, destid << 22);
-       out_be16((void *)(dbell_win), data);
+       switch (mport->phy_type) {
+       case RIO_PHY_PARALLEL:
+               out_be32(&priv->dbell_atmu_regs->rowtar, destid << 22);
+               out_be16(priv->dbell_win, data);
+               break;
+       case RIO_PHY_SERIAL:
+               /* In the serial version silicons, such as MPC8548, MPC8641,
+                * below operations is must be.
+                */
+               out_be32(&priv->msg_regs->odmr, 0x00000000);
+               out_be32(&priv->msg_regs->odretcr, 0x00000004);
+               out_be32(&priv->msg_regs->oddpr, destid << 16);
+               out_be32(&priv->msg_regs->oddatr, data);
+               out_be32(&priv->msg_regs->odmr, 0x00000001);
+               break;
+       }
 
        return 0;
 }
 
 /**
- * mpc85xx_local_config_read - Generate a MPC85xx local config space read
+ * fsl_local_config_read - Generate a MPC85xx local config space read
  * @index: ID of RapdiIO interface
  * @offset: Offset into configuration space
  * @len: Length (in bytes) of the maintenance transaction
@@ -173,17 +219,19 @@ static int mpc85xx_rio_doorbell_send(int index, u16 destid, u16 data)
  * Generates a MPC85xx local configuration space read. Returns %0 on
  * success or %-EINVAL on failure.
  */
-static int mpc85xx_local_config_read(int index, u32 offset, int len, u32 * data)
+static int fsl_local_config_read(struct rio_mport *mport,
+                               int index, u32 offset, int len, u32 *data)
 {
-       pr_debug("mpc85xx_local_config_read: index %d offset %8.8x\n", index,
+       struct rio_priv *priv = mport->priv;
+       pr_debug("fsl_local_config_read: index %d offset %8.8x\n", index,
                 offset);
-       *data = in_be32((void *)(regs_win + offset));
+       *data = in_be32(priv->regs_win + offset);
 
        return 0;
 }
 
 /**
- * mpc85xx_local_config_write - Generate a MPC85xx local config space write
+ * fsl_local_config_write - Generate a MPC85xx local config space write
  * @index: ID of RapdiIO interface
  * @offset: Offset into configuration space
  * @len: Length (in bytes) of the maintenance transaction
@@ -192,18 +240,20 @@ static int mpc85xx_local_config_read(int index, u32 offset, int len, u32 * data)
  * Generates a MPC85xx local configuration space write. Returns %0 on
  * success or %-EINVAL on failure.
  */
-static int mpc85xx_local_config_write(int index, u32 offset, int len, u32 data)
+static int fsl_local_config_write(struct rio_mport *mport,
+                               int index, u32 offset, int len, u32 data)
 {
+       struct rio_priv *priv = mport->priv;
        pr_debug
-           ("mpc85xx_local_config_write: index %d offset %8.8x data %8.8x\n",
+           ("fsl_local_config_write: index %d offset %8.8x data %8.8x\n",
             index, offset, data);
-       out_be32((void *)(regs_win + offset), data);
+       out_be32(priv->regs_win + offset, data);
 
        return 0;
 }
 
 /**
- * mpc85xx_rio_config_read - Generate a MPC85xx read maintenance transaction
+ * fsl_rio_config_read - Generate a MPC85xx read maintenance transaction
  * @index: ID of RapdiIO interface
  * @destid: Destination ID of transaction
  * @hopcount: Number of hops to target device
@@ -215,18 +265,19 @@ static int mpc85xx_local_config_write(int index, u32 offset, int len, u32 data)
  * success or %-EINVAL on failure.
  */
 static int
-mpc85xx_rio_config_read(int index, u16 destid, u8 hopcount, u32 offset, int len,
-                       u32 * val)
+fsl_rio_config_read(struct rio_mport *mport, int index, u16 destid,
+                       u8 hopcount, u32 offset, int len, u32 *val)
 {
+       struct rio_priv *priv = mport->priv;
        u8 *data;
 
        pr_debug
-           ("mpc85xx_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
+           ("fsl_rio_config_read: index %d destid %d hopcount %d offset %8.8x len %d\n",
             index, destid, hopcount, offset, len);
-       out_be32((void *)&maint_atmu_regs->rowtar,
+       out_be32(&priv->maint_atmu_regs->rowtar,
                 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
 
-       data = (u8 *) maint_win + offset;
+       data = (u8 *) priv->maint_win + offset;
        switch (len) {
        case 1:
                *val = in_8((u8 *) data);
@@ -243,7 +294,7 @@ mpc85xx_rio_config_read(int index, u16 destid, u8 hopcount, u32 offset, int len,
 }
 
 /**
- * mpc85xx_rio_config_write - Generate a MPC85xx write maintenance transaction
+ * fsl_rio_config_write - Generate a MPC85xx write maintenance transaction
  * @index: ID of RapdiIO interface
  * @destid: Destination ID of transaction
  * @hopcount: Number of hops to target device
@@ -255,17 +306,18 @@ mpc85xx_rio_config_read(int index, u16 destid, u8 hopcount, u32 offset, int len,
  * success or %-EINVAL on failure.
  */
 static int
-mpc85xx_rio_config_write(int index, u16 destid, u8 hopcount, u32 offset,
-                        int len, u32 val)
+fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
+                       u8 hopcount, u32 offset, int len, u32 val)
 {
+       struct rio_priv *priv = mport->priv;
        u8 *data;
        pr_debug
-           ("mpc85xx_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
+           ("fsl_rio_config_write: index %d destid %d hopcount %d offset %8.8x len %d val %8.8x\n",
             index, destid, hopcount, offset, len, val);
-       out_be32((void *)&maint_atmu_regs->rowtar,
+       out_be32(&priv->maint_atmu_regs->rowtar,
                 (destid << 22) | (hopcount << 12) | ((offset & ~0x3) >> 9));
 
-       data = (u8 *) maint_win + offset;
+       data = (u8 *) priv->maint_win + offset;
        switch (len) {
        case 1:
                out_8((u8 *) data, val);
@@ -296,9 +348,10 @@ int
 rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
                        void *buffer, size_t len)
 {
+       struct rio_priv *priv = mport->priv;
        u32 omr;
-       struct rio_tx_desc *desc =
-           (struct rio_tx_desc *)msg_tx_ring.virt + msg_tx_ring.tx_slot;
+       struct rio_tx_desc *desc = (struct rio_tx_desc *)priv->msg_tx_ring.virt
+                                       + priv->msg_tx_ring.tx_slot;
        int ret = 0;
 
        pr_debug
@@ -311,31 +364,43 @@ rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
        }
 
        /* Copy and clear rest of buffer */
-       memcpy(msg_tx_ring.virt_buffer[msg_tx_ring.tx_slot], buffer, len);
+       memcpy(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot], buffer,
+                       len);
        if (len < (RIO_MAX_MSG_SIZE - 4))
-               memset((void *)((u32) msg_tx_ring.
-                               virt_buffer[msg_tx_ring.tx_slot] + len), 0,
-                      RIO_MAX_MSG_SIZE - len);
+               memset(priv->msg_tx_ring.virt_buffer[priv->msg_tx_ring.tx_slot]
+                               + len, 0, RIO_MAX_MSG_SIZE - len);
 
-       /* Set mbox field for message */
-       desc->dport = mbox & 0x3;
+       switch (mport->phy_type) {
+       case RIO_PHY_PARALLEL:
+               /* Set mbox field for message */
+               desc->dport = mbox & 0x3;
 
-       /* Enable EOMI interrupt, set priority, and set destid */
-       desc->dattr = 0x28000000 | (rdev->destid << 2);
+               /* Enable EOMI interrupt, set priority, and set destid */
+               desc->dattr = 0x28000000 | (rdev->destid << 2);
+               break;
+       case RIO_PHY_SERIAL:
+               /* Set mbox field for message, and set destid */
+               desc->dport = (rdev->destid << 16) | (mbox & 0x3);
+
+               /* Enable EOMI interrupt and priority */
+               desc->dattr = 0x28000000;
+               break;
+       }
 
        /* Set transfer size aligned to next power of 2 (in double words) */
        desc->dwcnt = is_power_of_2(len) ? len : 1 << get_bitmask_order(len);
 
        /* Set snooping and source buffer address */
-       desc->saddr = 0x00000004 | msg_tx_ring.phys_buffer[msg_tx_ring.tx_slot];
+       desc->saddr = 0x00000004
+               | priv->msg_tx_ring.phys_buffer[priv->msg_tx_ring.tx_slot];
 
        /* Increment enqueue pointer */
-       omr = in_be32((void *)&msg_regs->omr);
-       out_be32((void *)&msg_regs->omr, omr | RIO_MSG_OMR_MUI);
+       omr = in_be32(&priv->msg_regs->omr);
+       out_be32(&priv->msg_regs->omr, omr | RIO_MSG_OMR_MUI);
 
        /* Go to next descriptor */
-       if (++msg_tx_ring.tx_slot == msg_tx_ring.size)
-               msg_tx_ring.tx_slot = 0;
+       if (++priv->msg_tx_ring.tx_slot == priv->msg_tx_ring.size)
+               priv->msg_tx_ring.tx_slot = 0;
 
       out:
        return ret;
@@ -344,7 +409,7 @@ rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
 EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
 
 /**
- * mpc85xx_rio_tx_handler - MPC85xx outbound message interrupt handler
+ * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
  * @irq: Linux interrupt number
  * @dev_instance: Pointer to interrupt-specific data
  *
@@ -352,32 +417,34 @@ EXPORT_SYMBOL_GPL(rio_hw_add_outb_message);
  * mailbox event handler and acks the interrupt occurrence.
  */
 static irqreturn_t
-mpc85xx_rio_tx_handler(int irq, void *dev_instance)
+fsl_rio_tx_handler(int irq, void *dev_instance)
 {
        int osr;
        struct rio_mport *port = (struct rio_mport *)dev_instance;
+       struct rio_priv *priv = port->priv;
 
-       osr = in_be32((void *)&msg_regs->osr);
+       osr = in_be32(&priv->msg_regs->osr);
 
        if (osr & RIO_MSG_OSR_TE) {
                pr_info("RIO: outbound message transmission error\n");
-               out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_TE);
+               out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_TE);
                goto out;
        }
 
        if (osr & RIO_MSG_OSR_QOI) {
                pr_info("RIO: outbound message queue overflow\n");
-               out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_QOI);
+               out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_QOI);
                goto out;
        }
 
        if (osr & RIO_MSG_OSR_EOMI) {
-               u32 dqp = in_be32((void *)&msg_regs->odqdpar);
-               int slot = (dqp - msg_tx_ring.phys) >> 5;
-               port->outb_msg[0].mcback(port, msg_tx_ring.dev_id, -1, slot);
+               u32 dqp = in_be32(&priv->msg_regs->odqdpar);
+               int slot = (dqp - priv->msg_tx_ring.phys) >> 5;
+               port->outb_msg[0].mcback(port, priv->msg_tx_ring.dev_id, -1,
+                               slot);
 
                /* Ack the end-of-message interrupt */
-               out_be32((void *)&msg_regs->osr, RIO_MSG_OSR_EOMI);
+               out_be32(&priv->msg_regs->osr, RIO_MSG_OSR_EOMI);
        }
 
       out:
@@ -398,6 +465,7 @@ mpc85xx_rio_tx_handler(int irq, void *dev_instance)
 int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
 {
        int i, j, rc = 0;
+       struct rio_priv *priv = mport->priv;
 
        if ((entries < RIO_MIN_TX_RING_SIZE) ||
            (entries > RIO_MAX_TX_RING_SIZE) || (!is_power_of_2(entries))) {
@@ -406,54 +474,53 @@ int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entr
        }
 
        /* Initialize shadow copy ring */
-       msg_tx_ring.dev_id = dev_id;
-       msg_tx_ring.size = entries;
-
-       for (i = 0; i < msg_tx_ring.size; i++) {
-               if (!
-                   (msg_tx_ring.virt_buffer[i] =
-                    dma_alloc_coherent(NULL, RIO_MSG_BUFFER_SIZE,
-                                       &msg_tx_ring.phys_buffer[i],
-                                       GFP_KERNEL))) {
+       priv->msg_tx_ring.dev_id = dev_id;
+       priv->msg_tx_ring.size = entries;
+
+       for (i = 0; i < priv->msg_tx_ring.size; i++) {
+               priv->msg_tx_ring.virt_buffer[i] =
+                       dma_alloc_coherent(NULL, RIO_MSG_BUFFER_SIZE,
+                               &priv->msg_tx_ring.phys_buffer[i], GFP_KERNEL);
+               if (!priv->msg_tx_ring.virt_buffer[i]) {
                        rc = -ENOMEM;
-                       for (j = 0; j < msg_tx_ring.size; j++)
-                               if (msg_tx_ring.virt_buffer[j])
+                       for (j = 0; j < priv->msg_tx_ring.size; j++)
+                               if (priv->msg_tx_ring.virt_buffer[j])
                                        dma_free_coherent(NULL,
-                                                         RIO_MSG_BUFFER_SIZE,
-                                                         msg_tx_ring.
-                                                         virt_buffer[j],
-                                                         msg_tx_ring.
-                                                         phys_buffer[j]);
+                                                       RIO_MSG_BUFFER_SIZE,
+                                                       priv->msg_tx_ring.
+                                                       virt_buffer[j],
+                                                       priv->msg_tx_ring.
+                                                       phys_buffer[j]);
                        goto out;
                }
        }
 
        /* Initialize outbound message descriptor ring */
-       if (!(msg_tx_ring.virt = dma_alloc_coherent(NULL,
-                                                   msg_tx_ring.size *
-                                                   RIO_MSG_DESC_SIZE,
-                                                   &msg_tx_ring.phys,
-                                                   GFP_KERNEL))) {
+       priv->msg_tx_ring.virt = dma_alloc_coherent(NULL,
+                               priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+                               &priv->msg_tx_ring.phys, GFP_KERNEL);
+       if (!priv->msg_tx_ring.virt) {
                rc = -ENOMEM;
                goto out_dma;
        }
-       memset(msg_tx_ring.virt, 0, msg_tx_ring.size * RIO_MSG_DESC_SIZE);
-       msg_tx_ring.tx_slot = 0;
+       memset(priv->msg_tx_ring.virt, 0,
+                       priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE);
+       priv->msg_tx_ring.tx_slot = 0;
 
        /* Point dequeue/enqueue pointers at first entry in ring */
-       out_be32((void *)&msg_regs->odqdpar, msg_tx_ring.phys);
-       out_be32((void *)&msg_regs->odqepar, msg_tx_ring.phys);
+       out_be32(&priv->msg_regs->odqdpar, priv->msg_tx_ring.phys);
+       out_be32(&priv->msg_regs->odqepar, priv->msg_tx_ring.phys);
 
        /* Configure for snooping */
-       out_be32((void *)&msg_regs->osar, 0x00000004);
+       out_be32(&priv->msg_regs->osar, 0x00000004);
 
        /* Clear interrupt status */
-       out_be32((void *)&msg_regs->osr, 0x000000b3);
+       out_be32(&priv->msg_regs->osr, 0x000000b3);
 
        /* Hook up outbound message handler */
-       if ((rc =
-            request_irq(MPC85xx_IRQ_RIO_TX, mpc85xx_rio_tx_handler, 0,
-                        "msg_tx", (void *)mport)) < 0)
+       rc = request_irq(IRQ_RIO_TX(mport), fsl_rio_tx_handler, 0,
+                        "msg_tx", (void *)mport);
+       if (rc < 0)
                goto out_irq;
 
        /*
@@ -463,28 +530,28 @@ int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entr
         *      Chaining mode
         *      Disable
         */
-       out_be32((void *)&msg_regs->omr, 0x00100220);
+       out_be32(&priv->msg_regs->omr, 0x00100220);
 
        /* Set number of entries */
-       out_be32((void *)&msg_regs->omr,
-                in_be32((void *)&msg_regs->omr) |
+       out_be32(&priv->msg_regs->omr,
+                in_be32(&priv->msg_regs->omr) |
                 ((get_bitmask_order(entries) - 2) << 12));
 
        /* Now enable the unit */
-       out_be32((void *)&msg_regs->omr, in_be32((void *)&msg_regs->omr) | 0x1);
+       out_be32(&priv->msg_regs->omr, in_be32(&priv->msg_regs->omr) | 0x1);
 
       out:
        return rc;
 
       out_irq:
-       dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
-                         msg_tx_ring.virt, msg_tx_ring.phys);
+       dma_free_coherent(NULL, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+                         priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
 
       out_dma:
-       for (i = 0; i < msg_tx_ring.size; i++)
+       for (i = 0; i < priv->msg_tx_ring.size; i++)
                dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
-                                 msg_tx_ring.virt_buffer[i],
-                                 msg_tx_ring.phys_buffer[i]);
+                                 priv->msg_tx_ring.virt_buffer[i],
+                                 priv->msg_tx_ring.phys_buffer[i]);
 
        return rc;
 }
@@ -499,19 +566,20 @@ int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entr
  */
 void rio_close_outb_mbox(struct rio_mport *mport, int mbox)
 {
+       struct rio_priv *priv = mport->priv;
        /* Disable inbound message unit */
-       out_be32((void *)&msg_regs->omr, 0);
+       out_be32(&priv->msg_regs->omr, 0);
 
        /* Free ring */
-       dma_free_coherent(NULL, msg_tx_ring.size * RIO_MSG_DESC_SIZE,
-                         msg_tx_ring.virt, msg_tx_ring.phys);
+       dma_free_coherent(NULL, priv->msg_tx_ring.size * RIO_MSG_DESC_SIZE,
+                         priv->msg_tx_ring.virt, priv->msg_tx_ring.phys);
 
        /* Free interrupt */
-       free_irq(MPC85xx_IRQ_RIO_TX, (void *)mport);
+       free_irq(IRQ_RIO_TX(mport), (void *)mport);
 }
 
 /**
- * mpc85xx_rio_rx_handler - MPC85xx inbound message interrupt handler
+ * fsl_rio_rx_handler - MPC85xx inbound message interrupt handler
  * @irq: Linux interrupt number
  * @dev_instance: Pointer to interrupt-specific data
  *
@@ -519,16 +587,17 @@ void rio_close_outb_mbox(struct rio_mport *mport, int mbox)
  * mailbox event handler and acks the interrupt occurrence.
  */
 static irqreturn_t
-mpc85xx_rio_rx_handler(int irq, void *dev_instance)
+fsl_rio_rx_handler(int irq, void *dev_instance)
 {
        int isr;
        struct rio_mport *port = (struct rio_mport *)dev_instance;
+       struct rio_priv *priv = port->priv;
 
-       isr = in_be32((void *)&msg_regs->isr);
+       isr = in_be32(&priv->msg_regs->isr);
 
        if (isr & RIO_MSG_ISR_TE) {
                pr_info("RIO: inbound message reception error\n");
-               out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_TE);
+               out_be32((void *)&priv->msg_regs->isr, RIO_MSG_ISR_TE);
                goto out;
        }
 
@@ -540,10 +609,10 @@ mpc85xx_rio_rx_handler(int irq, void *dev_instance)
                 * make the callback with an unknown/invalid mailbox number
                 * argument.
                 */
-               port->inb_msg[0].mcback(port, msg_rx_ring.dev_id, -1, -1);
+               port->inb_msg[0].mcback(port, priv->msg_rx_ring.dev_id, -1, -1);
 
                /* Ack the queueing interrupt */
-               out_be32((void *)&msg_regs->isr, RIO_MSG_ISR_DIQI);
+               out_be32(&priv->msg_regs->isr, RIO_MSG_ISR_DIQI);
        }
 
       out:
@@ -564,6 +633,7 @@ mpc85xx_rio_rx_handler(int irq, void *dev_instance)
 int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries)
 {
        int i, rc = 0;
+       struct rio_priv *priv = mport->priv;
 
        if ((entries < RIO_MIN_RX_RING_SIZE) ||
            (entries > RIO_MAX_RX_RING_SIZE) || (!is_power_of_2(entries))) {
@@ -572,36 +642,35 @@ int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entri
        }
 
        /* Initialize client buffer ring */
-       msg_rx_ring.dev_id = dev_id;
-       msg_rx_ring.size = entries;
-       msg_rx_ring.rx_slot = 0;
-       for (i = 0; i < msg_rx_ring.size; i++)
-               msg_rx_ring.virt_buffer[i] = NULL;
+       priv->msg_rx_ring.dev_id = dev_id;
+       priv->msg_rx_ring.size = entries;
+       priv->msg_rx_ring.rx_slot = 0;
+       for (i = 0; i < priv->msg_rx_ring.size; i++)
+               priv->msg_rx_ring.virt_buffer[i] = NULL;
 
        /* Initialize inbound message ring */
-       if (!(msg_rx_ring.virt = dma_alloc_coherent(NULL,
-                                                   msg_rx_ring.size *
-                                                   RIO_MAX_MSG_SIZE,
-                                                   &msg_rx_ring.phys,
-                                                   GFP_KERNEL))) {
+       priv->msg_rx_ring.virt = dma_alloc_coherent(NULL,
+                               priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+                               &priv->msg_rx_ring.phys, GFP_KERNEL);
+       if (!priv->msg_rx_ring.virt) {
                rc = -ENOMEM;
                goto out;
        }
 
        /* Point dequeue/enqueue pointers at first entry in ring */
-       out_be32((void *)&msg_regs->ifqdpar, (u32) msg_rx_ring.phys);
-       out_be32((void *)&msg_regs->ifqepar, (u32) msg_rx_ring.phys);
+       out_be32(&priv->msg_regs->ifqdpar, (u32) priv->msg_rx_ring.phys);
+       out_be32(&priv->msg_regs->ifqepar, (u32) priv->msg_rx_ring.phys);
 
        /* Clear interrupt status */
-       out_be32((void *)&msg_regs->isr, 0x00000091);
+       out_be32(&priv->msg_regs->isr, 0x00000091);
 
        /* Hook up inbound message handler */
-       if ((rc =
-            request_irq(MPC85xx_IRQ_RIO_RX, mpc85xx_rio_rx_handler, 0,
-                        "msg_rx", (void *)mport)) < 0) {
+       rc = request_irq(IRQ_RIO_RX(mport), fsl_rio_rx_handler, 0,
+                        "msg_rx", (void *)mport);
+       if (rc < 0) {
                dma_free_coherent(NULL, RIO_MSG_BUFFER_SIZE,
-                                 msg_tx_ring.virt_buffer[i],
-                                 msg_tx_ring.phys_buffer[i]);
+                                 priv->msg_tx_ring.virt_buffer[i],
+                                 priv->msg_tx_ring.phys_buffer[i]);
                goto out;
        }
 
@@ -612,15 +681,13 @@ int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entri
         *      Unmask all interrupt sources
         *      Disable
         */
-       out_be32((void *)&msg_regs->imr, 0x001b0060);
+       out_be32(&priv->msg_regs->imr, 0x001b0060);
 
        /* Set number of queue entries */
-       out_be32((void *)&msg_regs->imr,
-                in_be32((void *)&msg_regs->imr) |
-                ((get_bitmask_order(entries) - 2) << 12));
+       setbits32(&priv->msg_regs->imr, (get_bitmask_order(entries) - 2) << 12);
 
        /* Now enable the unit */
-       out_be32((void *)&msg_regs->imr, in_be32((void *)&msg_regs->imr) | 0x1);
+       setbits32(&priv->msg_regs->imr, 0x1);
 
       out:
        return rc;
@@ -636,15 +703,16 @@ int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entri
  */
 void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
 {
+       struct rio_priv *priv = mport->priv;
        /* Disable inbound message unit */
-       out_be32((void *)&msg_regs->imr, 0);
+       out_be32(&priv->msg_regs->imr, 0);
 
        /* Free ring */
-       dma_free_coherent(NULL, msg_rx_ring.size * RIO_MAX_MSG_SIZE,
-                         msg_rx_ring.virt, msg_rx_ring.phys);
+       dma_free_coherent(NULL, priv->msg_rx_ring.size * RIO_MAX_MSG_SIZE,
+                         priv->msg_rx_ring.virt, priv->msg_rx_ring.phys);
 
        /* Free interrupt */
-       free_irq(MPC85xx_IRQ_RIO_RX, (void *)mport);
+       free_irq(IRQ_RIO_RX(mport), (void *)mport);
 }
 
 /**
@@ -659,21 +727,22 @@ void rio_close_inb_mbox(struct rio_mport *mport, int mbox)
 int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
 {
        int rc = 0;
+       struct rio_priv *priv = mport->priv;
 
        pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n",
-                msg_rx_ring.rx_slot);
+                priv->msg_rx_ring.rx_slot);
 
-       if (msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot]) {
+       if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) {
                printk(KERN_ERR
                       "RIO: error adding inbound buffer %d, buffer exists\n",
-                      msg_rx_ring.rx_slot);
+                      priv->msg_rx_ring.rx_slot);
                rc = -EINVAL;
                goto out;
        }
 
-       msg_rx_ring.virt_buffer[msg_rx_ring.rx_slot] = buf;
-       if (++msg_rx_ring.rx_slot == msg_rx_ring.size)
-               msg_rx_ring.rx_slot = 0;
+       priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot] = buf;
+       if (++priv->msg_rx_ring.rx_slot == priv->msg_rx_ring.size)
+               priv->msg_rx_ring.rx_slot = 0;
 
       out:
        return rc;
@@ -691,20 +760,21 @@ EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer);
  */
 void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
 {
-       u32 imr;
+       struct rio_priv *priv = mport->priv;
        u32 phys_buf, virt_buf;
        void *buf = NULL;
        int buf_idx;
 
-       phys_buf = in_be32((void *)&msg_regs->ifqdpar);
+       phys_buf = in_be32(&priv->msg_regs->ifqdpar);
 
        /* If no more messages, then bail out */
-       if (phys_buf == in_be32((void *)&msg_regs->ifqepar))
+       if (phys_buf == in_be32(&priv->msg_regs->ifqepar))
                goto out2;
 
-       virt_buf = (u32) msg_rx_ring.virt + (phys_buf - msg_rx_ring.phys);
-       buf_idx = (phys_buf - msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
-       buf = msg_rx_ring.virt_buffer[buf_idx];
+       virt_buf = (u32) priv->msg_rx_ring.virt + (phys_buf
+                                               - priv->msg_rx_ring.phys);
+       buf_idx = (phys_buf - priv->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
+       buf = priv->msg_rx_ring.virt_buffer[buf_idx];
 
        if (!buf) {
                printk(KERN_ERR
@@ -716,11 +786,10 @@ void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
        memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE);
 
        /* Clear the available buffer */
-       msg_rx_ring.virt_buffer[buf_idx] = NULL;
+       priv->msg_rx_ring.virt_buffer[buf_idx] = NULL;
 
       out1:
-       imr = in_be32((void *)&msg_regs->imr);
-       out_be32((void *)&msg_regs->imr, imr | RIO_MSG_IMR_MI);
+       setbits32(&priv->msg_regs->imr, RIO_MSG_IMR_MI);
 
       out2:
        return buf;
@@ -729,7 +798,7 @@ void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox)
 EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
 
 /**
- * mpc85xx_rio_dbell_handler - MPC85xx doorbell interrupt handler
+ * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler
  * @irq: Linux interrupt number
  * @dev_instance: Pointer to interrupt-specific data
  *
@@ -737,31 +806,31 @@ EXPORT_SYMBOL_GPL(rio_hw_get_inb_message);
  * doorbell event handlers and executes a matching event handler.
  */
 static irqreturn_t
-mpc85xx_rio_dbell_handler(int irq, void *dev_instance)
+fsl_rio_dbell_handler(int irq, void *dev_instance)
 {
        int dsr;
        struct rio_mport *port = (struct rio_mport *)dev_instance;
+       struct rio_priv *priv = port->priv;
 
-       dsr = in_be32((void *)&msg_regs->dsr);
+       dsr = in_be32(&priv->msg_regs->dsr);
 
        if (dsr & DOORBELL_DSR_TE) {
                pr_info("RIO: doorbell reception error\n");
-               out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_TE);
+               out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_TE);
                goto out;
        }
 
        if (dsr & DOORBELL_DSR_QFI) {
                pr_info("RIO: doorbell queue full\n");
-               out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_QFI);
+               out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI);
                goto out;
        }
 
        /* XXX Need to check/dispatch until queue empty */
        if (dsr & DOORBELL_DSR_DIQI) {
                u32 dmsg =
-                   (u32) dbell_ring.virt +
-                   (in_be32((void *)&msg_regs->dqdpar) & 0xfff);
-               u32 dmr;
+                   (u32) priv->dbell_ring.virt +
+                   (in_be32(&priv->msg_regs->dqdpar) & 0xfff);
                struct rio_dbell *dbell;
                int found = 0;
 
@@ -784,9 +853,8 @@ mpc85xx_rio_dbell_handler(int irq, void *dev_instance)
                            ("RIO: spurious doorbell, sid %2.2x tid %2.2x info %4.4x\n",
                             DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg));
                }
-               dmr = in_be32((void *)&msg_regs->dmr);
-               out_be32((void *)&msg_regs->dmr, dmr | DOORBELL_DMR_DI);
-               out_be32((void *)&msg_regs->dsr, DOORBELL_DSR_DIQI);
+               setbits32(&priv->msg_regs->dmr, DOORBELL_DMR_DI);
+               out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_DIQI);
        }
 
       out:
@@ -794,21 +862,22 @@ mpc85xx_rio_dbell_handler(int irq, void *dev_instance)
 }
 
 /**
- * mpc85xx_rio_doorbell_init - MPC85xx doorbell interface init
+ * fsl_rio_doorbell_init - MPC85xx doorbell interface init
  * @mport: Master port implementing the inbound doorbell unit
  *
  * Initializes doorbell unit hardware and inbound DMA buffer
- * ring. Called from mpc85xx_rio_setup(). Returns %0 on success
+ * ring. Called from fsl_rio_setup(). Returns %0 on success
  * or %-ENOMEM on failure.
  */
-static int mpc85xx_rio_doorbell_init(struct rio_mport *mport)
+static int fsl_rio_doorbell_init(struct rio_mport *mport)
 {
+       struct rio_priv *priv = mport->priv;
        int rc = 0;
 
        /* Map outbound doorbell window immediately after maintenance window */
-       if (!(dbell_win =
-             (u32) ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
-                           RIO_DBELL_WIN_SIZE))) {
+       priv->dbell_win = ioremap(mport->iores.start + RIO_MAINT_WIN_SIZE,
+                           RIO_DBELL_WIN_SIZE);
+       if (!priv->dbell_win) {
                printk(KERN_ERR
                       "RIO: unable to map outbound doorbell window\n");
                rc = -ENOMEM;
@@ -816,37 +885,36 @@ static int mpc85xx_rio_doorbell_init(struct rio_mport *mport)
        }
 
        /* Initialize inbound doorbells */
-       if (!(dbell_ring.virt = dma_alloc_coherent(NULL,
-                                                  512 * DOORBELL_MESSAGE_SIZE,
-                                                  &dbell_ring.phys,
-                                                  GFP_KERNEL))) {
+       priv->dbell_ring.virt = dma_alloc_coherent(NULL, 512 *
+                   DOORBELL_MESSAGE_SIZE, &priv->dbell_ring.phys, GFP_KERNEL);
+       if (!priv->dbell_ring.virt) {
                printk(KERN_ERR "RIO: unable allocate inbound doorbell ring\n");
                rc = -ENOMEM;
-               iounmap((void *)dbell_win);
+               iounmap(priv->dbell_win);
                goto out;
        }
 
        /* Point dequeue/enqueue pointers at first entry in ring */
-       out_be32((void *)&msg_regs->dqdpar, (u32) dbell_ring.phys);
-       out_be32((void *)&msg_regs->dqepar, (u32) dbell_ring.phys);
+       out_be32(&priv->msg_regs->dqdpar, (u32) priv->dbell_ring.phys);
+       out_be32(&priv->msg_regs->dqepar, (u32) priv->dbell_ring.phys);
 
        /* Clear interrupt status */
-       out_be32((void *)&msg_regs->dsr, 0x00000091);
+       out_be32(&priv->msg_regs->dsr, 0x00000091);
 
        /* Hook up doorbell handler */
-       if ((rc =
-            request_irq(MPC85xx_IRQ_RIO_BELL, mpc85xx_rio_dbell_handler, 0,
-                        "dbell_rx", (void *)mport) < 0)) {
-               iounmap((void *)dbell_win);
+       rc = request_irq(IRQ_RIO_BELL(mport), fsl_rio_dbell_handler, 0,
+                        "dbell_rx", (void *)mport);
+       if (rc < 0) {
+               iounmap(priv->dbell_win);
                dma_free_coherent(NULL, 512 * DOORBELL_MESSAGE_SIZE,
-                                 dbell_ring.virt, dbell_ring.phys);
+                                 priv->dbell_ring.virt, priv->dbell_ring.phys);
                printk(KERN_ERR
                       "MPC85xx RIO: unable to request inbound doorbell irq");
                goto out;
        }
 
        /* Configure doorbells for snooping, 512 entries, and enable */
-       out_be32((void *)&msg_regs->dmr, 0x00108161);
+       out_be32(&priv->msg_regs->dmr, 0x00108161);
 
       out:
        return rc;
@@ -854,7 +922,7 @@ static int mpc85xx_rio_doorbell_init(struct rio_mport *mport)
 
 static char *cmdline = NULL;
 
-static int mpc85xx_rio_get_hdid(int index)
+static int fsl_rio_get_hdid(int index)
 {
        /* XXX Need to parse multiple entries in some format */
        if (!cmdline)
@@ -863,7 +931,7 @@ static int mpc85xx_rio_get_hdid(int index)
        return simple_strtol(cmdline, NULL, 0);
 }
 
-static int mpc85xx_rio_get_cmdline(char *s)
+static int fsl_rio_get_cmdline(char *s)
 {
        if (!s)
                return 0;
@@ -872,61 +940,266 @@ static int mpc85xx_rio_get_cmdline(char *s)
        return 1;
 }
 
-__setup("riohdid=", mpc85xx_rio_get_cmdline);
+__setup("riohdid=", fsl_rio_get_cmdline);
+
+static inline void fsl_rio_info(struct device *dev, u32 ccsr)
+{
+       const char *str;
+       if (ccsr & 1) {
+               /* Serial phy */
+               switch (ccsr >> 30) {
+               case 0:
+                       str = "1";
+                       break;
+               case 1:
+                       str = "4";
+                       break;
+               default:
+                       str = "Unknown";
+                       break;;
+               }
+               dev_info(dev, "Hardware port width: %s\n", str);
+
+               switch ((ccsr >> 27) & 7) {
+               case 0:
+                       str = "Single-lane 0";
+                       break;
+               case 1:
+                       str = "Single-lane 2";
+                       break;
+               case 2:
+                       str = "Four-lane";
+                       break;
+               default:
+                       str = "Unknown";
+                       break;
+               }
+               dev_info(dev, "Training connection status: %s\n", str);
+       } else {
+               /* Parallel phy */
+               if (!(ccsr & 0x80000000))
+                       dev_info(dev, "Output port operating in 8-bit mode\n");
+               if (!(ccsr & 0x08000000))
+                       dev_info(dev, "Input port operating in 8-bit mode\n");
+       }
+}
 
 /**
- * mpc85xx_rio_setup - Setup MPC85xx RapidIO interface
- * @law_start: Starting physical address of RapidIO LAW
- * @law_size: Size of RapidIO LAW
+ * fsl_rio_setup - Setup MPC85xx RapidIO interface
+ * @fsl_rio_setup - Setup Freescale PowerPC RapidIO interface
  *
  * Initializes MPC85xx RapidIO hardware interface, configures
  * master port with system-specific info, and registers the
  * master port with the RapidIO subsystem.
  */
-void mpc85xx_rio_setup(int law_start, int law_size)
+int fsl_rio_setup(struct of_device *dev)
 {
        struct rio_ops *ops;
        struct rio_mport *port;
+       struct rio_priv *priv;
+       int rc = 0;
+       const u32 *dt_range, *cell;
+       struct resource regs;
+       int rlen;
+       u32 ccsr;
+       u64 law_start, law_size;
+       int paw, aw, sw;
+
+       if (!dev->node) {
+               dev_err(&dev->dev, "Device OF-Node is NULL");
+               return -EFAULT;
+       }
+
+       rc = of_address_to_resource(dev->node, 0, &regs);
+       if (rc) {
+               dev_err(&dev->dev, "Can't get %s property 'reg'\n",
+                               dev->node->full_name);
+               return -EFAULT;
+       }
+       dev_info(&dev->dev, "Of-device full name %s\n", dev->node->full_name);
+       dev_info(&dev->dev, "Regs start 0x%08x size 0x%08x\n",  regs.start,
+                                               regs.end - regs.start + 1);
+
+       dt_range = of_get_property(dev->node, "ranges", &rlen);
+       if (!dt_range) {
+               dev_err(&dev->dev, "Can't get %s property 'ranges'\n",
+                               dev->node->full_name);
+               return -EFAULT;
+       }
+
+       /* Get node address wide */
+       cell = of_get_property(dev->node, "#address-cells", NULL);
+       if (cell)
+               aw = *cell;
+       else
+               aw = of_n_addr_cells(dev->node);
+       /* Get node size wide */
+       cell = of_get_property(dev->node, "#size-cells", NULL);
+       if (cell)
+               sw = *cell;
+       else
+               sw = of_n_size_cells(dev->node);
+       /* Get parent address wide wide */
+       paw = of_n_addr_cells(dev->node);
+
+       law_start = of_read_number(dt_range + aw, paw);
+       law_size = of_read_number(dt_range + aw + paw, sw);
+
+       dev_info(&dev->dev, "LAW start 0x%016llx, size 0x%016llx.\n",
+                       law_start, law_size);
 
        ops = kmalloc(sizeof(struct rio_ops), GFP_KERNEL);
-       ops->lcread = mpc85xx_local_config_read;
-       ops->lcwrite = mpc85xx_local_config_write;
-       ops->cread = mpc85xx_rio_config_read;
-       ops->cwrite = mpc85xx_rio_config_write;
-       ops->dsend = mpc85xx_rio_doorbell_send;
+       ops->lcread = fsl_local_config_read;
+       ops->lcwrite = fsl_local_config_write;
+       ops->cread = fsl_rio_config_read;
+       ops->cwrite = fsl_rio_config_write;
+       ops->dsend = fsl_rio_doorbell_send;
 
-       port = kmalloc(sizeof(struct rio_mport), GFP_KERNEL);
+       port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
        port->id = 0;
        port->index = 0;
+
+       priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL);
+       if (!priv) {
+               printk(KERN_ERR "Can't alloc memory for 'priv'\n");
+               rc = -ENOMEM;
+               goto err;
+       }
+
        INIT_LIST_HEAD(&port->dbells);
        port->iores.start = law_start;
        port->iores.end = law_start + law_size;
        port->iores.flags = IORESOURCE_MEM;
 
+       priv->bellirq = irq_of_parse_and_map(dev->node, 2);
+       priv->txirq = irq_of_parse_and_map(dev->node, 3);
+       priv->rxirq = irq_of_parse_and_map(dev->node, 4);
+       dev_info(&dev->dev, "bellirq: %d, txirq: %d, rxirq %d\n", priv->bellirq,
+                               priv->txirq, priv->rxirq);
+
        rio_init_dbell_res(&port->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
        rio_init_mbox_res(&port->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
        rio_init_mbox_res(&port->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
        strcpy(port->name, "RIO0 mport");
 
        port->ops = ops;
-       port->host_deviceid = mpc85xx_rio_get_hdid(port->id);
+       port->host_deviceid = fsl_rio_get_hdid(port->id);
 
+       port->priv = priv;
        rio_register_mport(port);
 
-       regs_win = (u32) ioremap(RIO_REGS_BASE, 0x20000);
-       atmu_regs = (struct rio_atmu_regs *)(regs_win + RIO_ATMU_REGS_OFFSET);
-       maint_atmu_regs = atmu_regs + 1;
-       dbell_atmu_regs = atmu_regs + 2;
-       msg_regs = (struct rio_msg_regs *)(regs_win + RIO_MSG_REGS_OFFSET);
+       priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1);
+
+       /* Probe the master port phy type */
+       ccsr = in_be32(priv->regs_win + RIO_CCSR);
+       port->phy_type = (ccsr & 1) ? RIO_PHY_SERIAL : RIO_PHY_PARALLEL;
+       dev_info(&dev->dev, "RapidIO PHY type: %s\n",
+                       (port->phy_type == RIO_PHY_PARALLEL) ? "parallel" :
+                       ((port->phy_type == RIO_PHY_SERIAL) ? "serial" :
+                        "unknown"));
+       /* Checking the port training status */
+       if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
+               dev_err(&dev->dev, "Port is not ready. "
+                                  "Try to restart connection...\n");
+               switch (port->phy_type) {
+               case RIO_PHY_SERIAL:
+                       /* Disable ports */
+                       out_be32(priv->regs_win + RIO_CCSR, 0);
+                       /* Set 1x lane */
+                       setbits32(priv->regs_win + RIO_CCSR, 0x02000000);
+                       /* Enable ports */
+                       setbits32(priv->regs_win + RIO_CCSR, 0x00600000);
+                       break;
+               case RIO_PHY_PARALLEL:
+                       /* Disable ports */
+                       out_be32(priv->regs_win + RIO_CCSR, 0x22000000);
+                       /* Enable ports */
+                       out_be32(priv->regs_win + RIO_CCSR, 0x44000000);
+                       break;
+               }
+               msleep(100);
+               if (in_be32((priv->regs_win + RIO_ESCSR)) & 1) {
+                       dev_err(&dev->dev, "Port restart failed.\n");
+                       rc = -ENOLINK;
+                       goto err;
+               }
+               dev_info(&dev->dev, "Port restart success!\n");
+       }
+       fsl_rio_info(&dev->dev, ccsr);
+
+       port->sys_size = (in_be32((priv->regs_win + RIO_PEF_CAR))
+                                       & RIO_PEF_CTLS) >> 4;
+       dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n",
+                       port->sys_size ? 65536 : 256);
+
+       priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win
+                                       + RIO_ATMU_REGS_OFFSET);
+       priv->maint_atmu_regs = priv->atmu_regs + 1;
+       priv->dbell_atmu_regs = priv->atmu_regs + 2;
+       priv->msg_regs = (struct rio_msg_regs *)(priv->regs_win +
+                               ((port->phy_type == RIO_PHY_SERIAL) ?
+                               RIO_S_MSG_REGS_OFFSET : RIO_P_MSG_REGS_OFFSET));
+
+       /* Set to receive any dist ID for serial RapidIO controller. */
+       if (port->phy_type == RIO_PHY_SERIAL)
+               out_be32((priv->regs_win + RIO_ISR_AACR), RIO_ISR_AACR_AA);
 
        /* Configure maintenance transaction window */
-       out_be32((void *)&maint_atmu_regs->rowbar, 0x000c0000);
-       out_be32((void *)&maint_atmu_regs->rowar, 0x80077015);
+       out_be32(&priv->maint_atmu_regs->rowbar, 0x000c0000);
+       out_be32(&priv->maint_atmu_regs->rowar, 0x80077015);
 
-       maint_win = (u32) ioremap(law_start, RIO_MAINT_WIN_SIZE);
+       priv->maint_win = ioremap(law_start, RIO_MAINT_WIN_SIZE);
 
        /* Configure outbound doorbell window */
-       out_be32((void *)&dbell_atmu_regs->rowbar, 0x000c0400);
-       out_be32((void *)&dbell_atmu_regs->rowar, 0x8004200b);
-       mpc85xx_rio_doorbell_init(port);
+       out_be32(&priv->dbell_atmu_regs->rowbar, 0x000c0400);
+       out_be32(&priv->dbell_atmu_regs->rowar, 0x8004200b);
+       fsl_rio_doorbell_init(port);
+
+       return 0;
+err:
+       if (priv)
+               iounmap(priv->regs_win);
+       kfree(ops);
+       kfree(priv);
+       kfree(port);
+       return rc;
+}
+
+/* The probe function for RapidIO peer-to-peer network.
+ */
+static int __devinit fsl_of_rio_rpn_probe(struct of_device *dev,
+                                    const struct of_device_id *match)
+{
+       int rc;
+       printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n",
+                       dev->node->full_name);
+
+       rc = fsl_rio_setup(dev);
+       if (rc)
+               goto out;
+
+       /* Enumerate all registered ports */
+       rc = rio_init_mports();
+out:
+       return rc;
+};
+
+static const struct of_device_id fsl_of_rio_rpn_ids[] = {
+       {
+               .compatible = "fsl,rapidio-delta",
+       },
+       {},
+};
+
+static struct of_platform_driver fsl_of_rio_rpn_driver = {
+       .name = "fsl-of-rio",
+       .match_table = fsl_of_rio_rpn_ids,
+       .probe = fsl_of_rio_rpn_probe,
+};
+
+static __init int fsl_of_rio_rpn_init(void)
+{
+       return of_register_platform_driver(&fsl_of_rio_rpn_driver);
 }
+
+subsys_initcall(fsl_of_rio_rpn_init);
diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h
deleted file mode 100644 (file)
index 6d3ff30..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * MPC85xx RapidIO definitions
- *
- * Copyright 2005 MontaVista Software, Inc.
- * Matt Porter <mporter@kernel.crashing.org>
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef __PPC_SYSLIB_PPC85XX_RIO_H
-#define __PPC_SYSLIB_PPC85XX_RIO_H
-
-#include <linux/init.h>
-
-extern void mpc85xx_rio_setup(int law_start, int law_size);
-
-#endif                         /* __PPC_SYSLIB_PPC85XX_RIO_H */
index 5c1b246aacccbd37d55027b1087150bdc8f2f9ec..324c01b70dddfc14701210dc1547c3dfd9a65fab 100644 (file)
@@ -418,22 +418,21 @@ arch_initcall(gfar_of_init);
 #include <linux/i2c.h>
 struct i2c_driver_device {
        char    *of_device;
-       char    *i2c_driver;
        char    *i2c_type;
 };
 
 static struct i2c_driver_device i2c_devices[] __initdata = {
-       {"ricoh,rs5c372a", "rtc-rs5c372", "rs5c372a",},
-       {"ricoh,rs5c372b", "rtc-rs5c372", "rs5c372b",},
-       {"ricoh,rv5c386",  "rtc-rs5c372", "rv5c386",},
-       {"ricoh,rv5c387a", "rtc-rs5c372", "rv5c387a",},
-       {"dallas,ds1307",  "rtc-ds1307",  "ds1307",},
-       {"dallas,ds1337",  "rtc-ds1307",  "ds1337",},
-       {"dallas,ds1338",  "rtc-ds1307",  "ds1338",},
-       {"dallas,ds1339",  "rtc-ds1307",  "ds1339",},
-       {"dallas,ds1340",  "rtc-ds1307",  "ds1340",},
-       {"stm,m41t00",     "rtc-ds1307",  "m41t00"},
-       {"dallas,ds1374",  "rtc-ds1374",  "rtc-ds1374",},
+       {"ricoh,rs5c372a", "rs5c372a"},
+       {"ricoh,rs5c372b", "rs5c372b"},
+       {"ricoh,rv5c386",  "rv5c386"},
+       {"ricoh,rv5c387a", "rv5c387a"},
+       {"dallas,ds1307",  "ds1307"},
+       {"dallas,ds1337",  "ds1337"},
+       {"dallas,ds1338",  "ds1338"},
+       {"dallas,ds1339",  "ds1339"},
+       {"dallas,ds1340",  "ds1340"},
+       {"stm,m41t00",     "m41t00"},
+       {"dallas,ds1374",  "rtc-ds1374"},
 };
 
 static int __init of_find_i2c_driver(struct device_node *node,
@@ -444,9 +443,7 @@ static int __init of_find_i2c_driver(struct device_node *node,
        for (i = 0; i < ARRAY_SIZE(i2c_devices); i++) {
                if (!of_device_is_compatible(node, i2c_devices[i].of_device))
                        continue;
-               if (strlcpy(info->driver_name, i2c_devices[i].i2c_driver,
-                           KOBJ_NAME_LEN) >= KOBJ_NAME_LEN ||
-                   strlcpy(info->type, i2c_devices[i].i2c_type,
+               if (strlcpy(info->type, i2c_devices[i].i2c_type,
                            I2C_NAME_SIZE) >= I2C_NAME_SIZE)
                        return -ENOMEM;
                return 0;
@@ -892,3 +889,44 @@ void fsl_rstcr_restart(char *cmd)
        while (1) ;
 }
 #endif
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+struct platform_diu_data_ops diu_ops = {
+       .diu_size = 1280 * 1024 * 4,    /* default one 1280x1024 buffer */
+};
+EXPORT_SYMBOL(diu_ops);
+
+int __init preallocate_diu_videomemory(void)
+{
+       pr_debug("diu_size=%lu\n", diu_ops.diu_size);
+
+       diu_ops.diu_mem = __alloc_bootmem(diu_ops.diu_size, 8, 0);
+       if (!diu_ops.diu_mem) {
+               printk(KERN_ERR "fsl-diu: cannot allocate %lu bytes\n",
+                       diu_ops.diu_size);
+               return -ENOMEM;
+       }
+
+       pr_debug("diu_mem=%p\n", diu_ops.diu_mem);
+
+       rh_init(&diu_ops.diu_rh_info, 4096, ARRAY_SIZE(diu_ops.diu_rh_block),
+               diu_ops.diu_rh_block);
+       return rh_attach_region(&diu_ops.diu_rh_info,
+                               (unsigned long) diu_ops.diu_mem,
+                               diu_ops.diu_size);
+}
+
+static int __init early_parse_diufb(char *p)
+{
+       if (!p)
+               return 1;
+
+       diu_ops.diu_size = _ALIGN_UP(memparse(p, &p), 8);
+
+       pr_debug("diu_size=%lu\n", diu_ops.diu_size);
+
+       return 0;
+}
+early_param("diufb", early_parse_diufb);
+
+#endif
index 74c4a9657b3376b18a0fbd478d31a161469c1d42..52c831fa1886f544f5b389b31a3aaecf78262698 100644 (file)
@@ -17,5 +17,28 @@ extern int fsl_spi_init(struct spi_board_info *board_infos,
                        void (*deactivate_cs)(u8 cs, u8 polarity));
 
 extern void fsl_rstcr_restart(char *cmd);
+
+#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
+#include <linux/bootmem.h>
+#include <asm/rheap.h>
+struct platform_diu_data_ops {
+       rh_block_t diu_rh_block[16];
+       rh_info_t diu_rh_info;
+       unsigned long diu_size;
+       void *diu_mem;
+
+       unsigned int (*get_pixel_format) (unsigned int bits_per_pixel,
+               int monitor_port);
+       void (*set_gamma_table) (int monitor_port, char *gamma_table_base);
+       void (*set_monitor_port) (int monitor_port);
+       void (*set_pixel_clock) (unsigned int pixclock);
+       ssize_t (*show_monitor_port) (int monitor_port, char *buf);
+       int (*set_sysfs_monitor_port) (int val);
+};
+
+extern struct platform_diu_data_ops diu_ops;
+int __init preallocate_diu_videomemory(void);
+#endif
+
 #endif
 #endif
index a51a17714231ea2c7cc3f168950a4a88df726d7e..8dcbdd6c2d2c3bae19ef6f846f2f4b25b26c6c58 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/suspend.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
+#include <linux/kbuild.h>
+
 #include <asm/io.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/thread_info.h>
 #include <asm/vdso_datapage.h>
 
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int
 main(void)
 {
index 50ce83f20adb75ff54f6b4830fdec1ea13b9298d..df3ef6db072c806b7295449ddf70a78499a6258c 100644 (file)
@@ -1121,8 +1121,8 @@ void __init pci_init_resource(struct resource *res, resource_size_t start,
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len)
index d9036ef0b6581215211397cd806f10eae4acfe38..16ac11ca7ba0feb0b022fc8093a21bcf6cae4f61 100644 (file)
@@ -183,9 +183,6 @@ EXPORT_SYMBOL(cuda_poll);
 #if defined(CONFIG_BOOTX_TEXT)
 EXPORT_SYMBOL(btext_update_display);
 #endif
-#ifdef CONFIG_VT
-EXPORT_SYMBOL(kd_mksound);
-#endif
 EXPORT_SYMBOL(to_tm);
 
 EXPORT_SYMBOL(pm_power_off);
index 0df6aacb8237e5429e1af92ba0e06284d25181cd..24f6e0694ac1499d5e28dba98772ac4bb17fe94e 100644 (file)
@@ -30,8 +30,6 @@ static void (*callback_init_IRQ)(void);
 
 extern unsigned char __res[sizeof(bd_t)];
 
-extern void (*late_time_init)(void);
-
 #ifdef CONFIG_GEN_RTC
 TODC_ALLOC();
 
index f6a68e178fc52952f31e4e95e7123af44252a919..29a7940f284f62fa1f9e27508bc7d55f9b50aca4 100644 (file)
@@ -62,6 +62,10 @@ config GENERIC_LOCKBREAK
        default y
        depends on SMP && PREEMPT
 
+config PGSTE
+       bool
+       default y if KVM
+
 mainmenu "Linux Kernel Configuration"
 
 config S390
@@ -69,6 +73,7 @@ config S390
        select HAVE_OPROFILE
        select HAVE_KPROBES
        select HAVE_KRETPROBES
+       select HAVE_KVM if 64BIT
 
 source "init/Kconfig"
 
@@ -295,6 +300,14 @@ comment "Kernel preemption"
 
 source "kernel/Kconfig.preempt"
 
+config ARCH_SPARSEMEM_ENABLE
+       def_bool y
+       select SPARSEMEM_VMEMMAP_ENABLE
+       select SPARSEMEM_VMEMMAP
+
+config ARCH_SPARSEMEM_DEFAULT
+       def_bool y
+
 source "mm/Kconfig"
 
 comment "I/O subsystem configuration"
@@ -515,6 +528,13 @@ config ZFCPDUMP
          Select this option if you want to build an zfcpdump enabled kernel.
          Refer to <file:Documentation/s390/zfcpdump.txt> for more details on this.
 
+config S390_GUEST
+bool "s390 guest support (EXPERIMENTAL)"
+       depends on 64BIT && EXPERIMENTAL
+       select VIRTIO
+       select VIRTIO_RING
+       help
+         Select this option if you want to run the kernel under s390 linux
 endmenu
 
 source "net/Kconfig"
@@ -536,3 +556,5 @@ source "security/Kconfig"
 source "crypto/Kconfig"
 
 source "lib/Kconfig"
+
+source "arch/s390/kvm/Kconfig"
index f708be367b030b672ffe58684e20fbcebc90c77a..792a4e7743cee19ef0b41d4b896d104df48aa73e 100644 (file)
@@ -87,7 +87,7 @@ LDFLAGS_vmlinux := -e start
 head-y         := arch/s390/kernel/head.o arch/s390/kernel/init_task.o
 
 core-y         += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \
-                  arch/s390/appldata/ arch/s390/hypfs/
+                  arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/
 libs-y         += arch/s390/lib/
 drivers-y      += drivers/s390/
 drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/
index a72f208e62d07262ebf9d6e794be83e6a6db08cd..aa341d0ea1e68ff6f5ea5903a335a19099b0179b 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25-rc4
-# Wed Mar  5 11:22:59 2008
+# Linux kernel version: 2.6.25
+# Wed Apr 30 11:07:45 2008
 #
 CONFIG_SCHED_MC=y
 CONFIG_MMU=y
@@ -14,10 +14,12 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y
 # CONFIG_ARCH_HAS_ILOG2_U64 is not set
 CONFIG_GENERIC_HWEIGHT=y
 CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_CLOCKEVENTS=y
 CONFIG_GENERIC_BUG=y
 CONFIG_NO_IOMEM=y
 CONFIG_NO_DMA=y
 CONFIG_GENERIC_LOCKBREAK=y
+CONFIG_PGSTE=y
 CONFIG_S390=y
 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
 
@@ -43,6 +45,7 @@ CONFIG_LOG_BUF_SHIFT=17
 CONFIG_CGROUPS=y
 # CONFIG_CGROUP_DEBUG is not set
 CONFIG_CGROUP_NS=y
+# CONFIG_CGROUP_DEVICE is not set
 # CONFIG_CPUSETS is not set
 CONFIG_GROUP_SCHED=y
 CONFIG_FAIR_GROUP_SCHED=y
@@ -65,6 +68,7 @@ CONFIG_INITRAMFS_SOURCE=""
 CONFIG_SYSCTL=y
 # CONFIG_EMBEDDED is not set
 CONFIG_SYSCTL_SYSCALL=y
+CONFIG_SYSCTL_SYSCALL_CHECK=y
 CONFIG_KALLSYMS=y
 # CONFIG_KALLSYMS_ALL is not set
 # CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -92,6 +96,7 @@ CONFIG_KPROBES=y
 CONFIG_KRETPROBES=y
 CONFIG_HAVE_KPROBES=y
 CONFIG_HAVE_KRETPROBES=y
+# CONFIG_HAVE_DMA_ATTRS is not set
 CONFIG_PROC_PAGE_MONITOR=y
 CONFIG_SLABINFO=y
 CONFIG_RT_MUTEXES=y
@@ -121,8 +126,8 @@ CONFIG_DEFAULT_DEADLINE=y
 # CONFIG_DEFAULT_CFQ is not set
 # CONFIG_DEFAULT_NOOP is not set
 CONFIG_DEFAULT_IOSCHED="deadline"
+CONFIG_PREEMPT_NOTIFIERS=y
 CONFIG_CLASSIC_RCU=y
-# CONFIG_PREEMPT_RCU is not set
 
 #
 # Base setup
@@ -131,6 +136,10 @@ CONFIG_CLASSIC_RCU=y
 #
 # Processor type and features
 #
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
 CONFIG_64BIT=y
 CONFIG_SMP=y
 CONFIG_NR_CPUS=32
@@ -161,15 +170,20 @@ CONFIG_ARCH_POPULATES_NODE_MAP=y
 # CONFIG_PREEMPT_NONE is not set
 # CONFIG_PREEMPT_VOLUNTARY is not set
 CONFIG_PREEMPT=y
-# CONFIG_RCU_TRACE is not set
+# CONFIG_PREEMPT_RCU is not set
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_ARCH_SPARSEMEM_DEFAULT=y
 CONFIG_SELECT_MEMORY_MODEL=y
-CONFIG_FLATMEM_MANUAL=y
+# CONFIG_FLATMEM_MANUAL is not set
 # CONFIG_DISCONTIGMEM_MANUAL is not set
-# CONFIG_SPARSEMEM_MANUAL is not set
-CONFIG_FLATMEM=y
-CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_SPARSEMEM_MANUAL=y
+CONFIG_SPARSEMEM=y
+CONFIG_HAVE_MEMORY_PRESENT=y
 # CONFIG_SPARSEMEM_STATIC is not set
-# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_PAGEFLAGS_EXTENDED=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
 CONFIG_RESOURCES_64BIT=y
 CONFIG_ZONE_DMA_FLAG=1
@@ -205,11 +219,10 @@ CONFIG_HZ_100=y
 # CONFIG_HZ_1000 is not set
 CONFIG_HZ=100
 # CONFIG_SCHED_HRTICK is not set
-CONFIG_NO_IDLE_HZ=y
-CONFIG_NO_IDLE_HZ_INIT=y
 CONFIG_S390_HYPFS_FS=y
 CONFIG_KEXEC=y
 # CONFIG_ZFCPDUMP is not set
+CONFIG_S390_GUEST=y
 
 #
 # Networking
@@ -272,8 +285,10 @@ CONFIG_INET6_XFRM_MODE_TUNNEL=y
 CONFIG_INET6_XFRM_MODE_BEET=y
 # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
 CONFIG_IPV6_SIT=y
+CONFIG_IPV6_NDISC_NODETYPE=y
 # CONFIG_IPV6_TUNNEL is not set
 # CONFIG_IPV6_MULTIPLE_TABLES is not set
+# CONFIG_IPV6_MROUTE is not set
 # CONFIG_NETWORK_SECMARK is not set
 CONFIG_NETFILTER=y
 # CONFIG_NETFILTER_DEBUG is not set
@@ -289,6 +304,7 @@ CONFIG_NF_CONNTRACK=m
 # CONFIG_NF_CT_ACCT is not set
 # CONFIG_NF_CONNTRACK_MARK is not set
 # CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CT_PROTO_DCCP is not set
 # CONFIG_NF_CT_PROTO_SCTP is not set
 # CONFIG_NF_CT_PROTO_UDPLITE is not set
 # CONFIG_NF_CONNTRACK_AMANDA is not set
@@ -439,6 +455,7 @@ CONFIG_DASD_ECKD=y
 CONFIG_DASD_FBA=y
 CONFIG_DASD_DIAG=y
 CONFIG_DASD_EER=y
+CONFIG_VIRTIO_BLK=m
 CONFIG_MISC_DEVICES=y
 # CONFIG_EEPROM_93CX6 is not set
 # CONFIG_ENCLOSURE_SERVICES is not set
@@ -533,7 +550,7 @@ CONFIG_NETDEV_10000=y
 # S/390 network device drivers
 #
 CONFIG_LCS=m
-CONFIG_CTC=m
+CONFIG_CTCM=m
 # CONFIG_NETIUCV is not set
 # CONFIG_SMSGIUCV is not set
 # CONFIG_CLAW is not set
@@ -547,10 +564,12 @@ CONFIG_CCWGROUP=y
 # CONFIG_NETCONSOLE is not set
 # CONFIG_NETPOLL is not set
 # CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_VIRTIO_NET=m
 
 #
 # Character devices
 #
+CONFIG_DEVKMEM=y
 CONFIG_UNIX98_PTYS=y
 CONFIG_LEGACY_PTYS=y
 CONFIG_LEGACY_PTY_COUNT=256
@@ -600,6 +619,7 @@ CONFIG_S390_VMUR=m
 # Sonics Silicon Backplane
 #
 # CONFIG_MEMSTICK is not set
+# CONFIG_NEW_LEDS is not set
 
 #
 # File systems
@@ -652,6 +672,7 @@ CONFIG_PROC_SYSCTL=y
 CONFIG_SYSFS=y
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_HUGETLBFS is not set
 # CONFIG_HUGETLB_PAGE is not set
 CONFIG_CONFIGFS_FS=m
 
@@ -678,12 +699,10 @@ CONFIG_NFS_FS=y
 CONFIG_NFS_V3=y
 # CONFIG_NFS_V3_ACL is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 CONFIG_NFSD=y
 CONFIG_NFSD_V3=y
 # CONFIG_NFSD_V3_ACL is not set
 # CONFIG_NFSD_V4 is not set
-CONFIG_NFSD_TCP=y
 CONFIG_LOCKD=y
 CONFIG_LOCKD_V4=y
 CONFIG_EXPORTFS=y
@@ -731,6 +750,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 # CONFIG_PRINTK_TIME is not set
 CONFIG_ENABLE_WARN_DEPRECATED=y
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=2048
 CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 CONFIG_DEBUG_FS=y
@@ -754,6 +774,7 @@ CONFIG_DEBUG_SPINLOCK_SLEEP=y
 CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
 # CONFIG_FRAME_POINTER is not set
@@ -775,58 +796,88 @@ CONFIG_SAMPLES=y
 # CONFIG_SECURITY is not set
 # CONFIG_SECURITY_FILE_CAPABILITIES is not set
 CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_AEAD=m
 CONFIG_CRYPTO_BLKCIPHER=y
-CONFIG_CRYPTO_SEQIV=m
 CONFIG_CRYPTO_HASH=m
 CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_GF128MUL=m
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=m
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+CONFIG_CRYPTO_CCM=m
+CONFIG_CRYPTO_GCM=m
+CONFIG_CRYPTO_SEQIV=m
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=m
+CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
 CONFIG_CRYPTO_HMAC=m
 # CONFIG_CRYPTO_XCBC is not set
-# CONFIG_CRYPTO_NULL is not set
+
+#
+# Digest
+#
+# CONFIG_CRYPTO_CRC32C is not set
 # CONFIG_CRYPTO_MD4 is not set
 CONFIG_CRYPTO_MD5=m
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
 CONFIG_CRYPTO_SHA1=m
 # CONFIG_CRYPTO_SHA256 is not set
 # CONFIG_CRYPTO_SHA512 is not set
-# CONFIG_CRYPTO_WP512 is not set
 # CONFIG_CRYPTO_TGR192 is not set
-CONFIG_CRYPTO_GF128MUL=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-CONFIG_CRYPTO_CTR=m
-CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CCM=m
-# CONFIG_CRYPTO_CRYPTD is not set
-# CONFIG_CRYPTO_DES is not set
-CONFIG_CRYPTO_FCRYPT=m
-# CONFIG_CRYPTO_BLOWFISH is not set
-# CONFIG_CRYPTO_TWOFISH is not set
-# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
 # CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+CONFIG_CRYPTO_CAMELLIA=m
 # CONFIG_CRYPTO_CAST5 is not set
 # CONFIG_CRYPTO_CAST6 is not set
-# CONFIG_CRYPTO_TEA is not set
-# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_DES is not set
+CONFIG_CRYPTO_FCRYPT=m
 # CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-CONFIG_CRYPTO_SEED=m
 CONFIG_CRYPTO_SALSA20=m
+CONFIG_CRYPTO_SEED=m
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+
+#
+# Compression
+#
 # CONFIG_CRYPTO_DEFLATE is not set
-# CONFIG_CRYPTO_MICHAEL_MIC is not set
-# CONFIG_CRYPTO_CRC32C is not set
-CONFIG_CRYPTO_CAMELLIA=m
-# CONFIG_CRYPTO_TEST is not set
-CONFIG_CRYPTO_AUTHENC=m
 CONFIG_CRYPTO_LZO=m
 CONFIG_CRYPTO_HW=y
 CONFIG_ZCRYPT=m
 # CONFIG_ZCRYPT_MONOLITHIC is not set
 # CONFIG_CRYPTO_SHA1_S390 is not set
 # CONFIG_CRYPTO_SHA256_S390 is not set
+CONFIG_CRYPTO_SHA512_S390=m
 # CONFIG_CRYPTO_DES_S390 is not set
 # CONFIG_CRYPTO_AES_S390 is not set
 CONFIG_S390_PRNG=m
@@ -835,6 +886,8 @@ CONFIG_S390_PRNG=m
 # Library routines
 #
 CONFIG_BITREVERSE=m
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
+# CONFIG_GENERIC_FIND_NEXT_BIT is not set
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
 # CONFIG_CRC_ITU_T is not set
@@ -844,3 +897,9 @@ CONFIG_LIBCRC32C=m
 CONFIG_LZO_COMPRESS=m
 CONFIG_LZO_DECOMPRESS=m
 CONFIG_PLIST=y
+CONFIG_HAVE_KVM=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=m
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_RING=y
+CONFIG_VIRTIO_BALLOON=m
index 77051cd27925832b468796049512b9923f827de0..6302f508258866120b2625198ec411c8f455202c 100644 (file)
@@ -2,8 +2,6 @@
 # Makefile for the linux kernel.
 #
 
-EXTRA_AFLAGS   := -traditional
-
 #
 # Passing null pointers is ok for smp code, since we access the lowcore here.
 #
index 1375f8a4469e8e89d56fadba47e9992fbcee6963..fa28ecae636b2237a8bfddb49f05851ef7056f9c 100644 (file)
@@ -5,44 +5,38 @@
  */
 
 #include <linux/sched.h>
-
-/* Use marker if you need to separate the values later */
-
-#define DEFINE(sym, val, marker) \
-       asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
+#include <linux/kbuild.h>
 
 int main(void)
 {
-       DEFINE(__THREAD_info, offsetof(struct task_struct, stack),);
-       DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
-       DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
+       DEFINE(__THREAD_info, offsetof(struct task_struct, stack));
+       DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp));
+       DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info));
        DEFINE(__THREAD_mm_segment,
-              offsetof(struct task_struct, thread.mm_segment),);
+              offsetof(struct task_struct, thread.mm_segment));
        BLANK();
-       DEFINE(__TASK_pid, offsetof(struct task_struct, pid),);
+       DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
        BLANK();
-       DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),);
-       DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),);
-       DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),);
+       DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid));
+       DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address));
+       DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id));
        BLANK();
-       DEFINE(__TI_task, offsetof(struct thread_info, task),);
-       DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
-       DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
-       DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
-       DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
+       DEFINE(__TI_task, offsetof(struct thread_info, task));
+       DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain));
+       DEFINE(__TI_flags, offsetof(struct thread_info, flags));
+       DEFINE(__TI_cpu, offsetof(struct thread_info, cpu));
+       DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count));
        BLANK();
-       DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),);
-       DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),);
-       DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),);
-       DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),);
-       DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),);
-       DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),);
-       DEFINE(__PT_SIZE, sizeof(struct pt_regs),);
+       DEFINE(__PT_ARGS, offsetof(struct pt_regs, args));
+       DEFINE(__PT_PSW, offsetof(struct pt_regs, psw));
+       DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
+       DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
+       DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc));
+       DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap));
+       DEFINE(__PT_SIZE, sizeof(struct pt_regs));
        BLANK();
-       DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),);
-       DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),);
-       DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),);
+       DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
+       DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs));
+       DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1));
        return 0;
 }
index 540a67f979b64820e4b45319451b1e3897ebb0e0..d0e09684b9ce8c4f0924017a51f86e12ddec93d5 100644 (file)
@@ -139,11 +139,15 @@ static noinline __init void detect_machine_type(void)
 
        /* Running under z/VM ? */
        if (cpuinfo->cpu_id.version == 0xff)
-               machine_flags |= 1;
+               machine_flags |= MACHINE_FLAG_VM;
 
        /* Running on a P/390 ? */
        if (cpuinfo->cpu_id.machine == 0x7490)
-               machine_flags |= 4;
+               machine_flags |= MACHINE_FLAG_P390;
+
+       /* Running under KVM ? */
+       if (cpuinfo->cpu_id.version == 0xfe)
+               machine_flags |= MACHINE_FLAG_KVM;
 }
 
 #ifdef CONFIG_64BIT
@@ -264,6 +268,118 @@ static noinline __init void setup_lowcore_early(void)
        s390_base_pgm_handler_fn = early_pgm_check_handler;
 }
 
+static noinline __init void setup_hpage(void)
+{
+#ifndef CONFIG_DEBUG_PAGEALLOC
+       unsigned int facilities;
+
+       facilities = stfl();
+       if (!(facilities & (1UL << 23)) || !(facilities & (1UL << 29)))
+               return;
+       machine_flags |= MACHINE_FLAG_HPAGE;
+       __ctl_set_bit(0, 23);
+#endif
+}
+
+static __init void detect_mvpg(void)
+{
+#ifndef CONFIG_64BIT
+       int rc;
+
+       asm volatile(
+               "       la      0,0\n"
+               "       mvpg    %2,%2\n"
+               "0:     la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc) : "0" (-EOPNOTSUPP), "a" (0) : "memory", "cc", "0");
+       if (!rc)
+               machine_flags |= MACHINE_FLAG_MVPG;
+#endif
+}
+
+static __init void detect_ieee(void)
+{
+#ifndef CONFIG_64BIT
+       int rc, tmp;
+
+       asm volatile(
+               "       efpc    %1,0\n"
+               "0:     la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc), "=d" (tmp): "0" (-EOPNOTSUPP) : "cc");
+       if (!rc)
+               machine_flags |= MACHINE_FLAG_IEEE;
+#endif
+}
+
+static __init void detect_csp(void)
+{
+#ifndef CONFIG_64BIT
+       int rc;
+
+       asm volatile(
+               "       la      0,0\n"
+               "       la      1,0\n"
+               "       la      2,4\n"
+               "       csp     0,2\n"
+               "0:     la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc", "0", "1", "2");
+       if (!rc)
+               machine_flags |= MACHINE_FLAG_CSP;
+#endif
+}
+
+static __init void detect_diag9c(void)
+{
+       unsigned int cpu_address;
+       int rc;
+
+       cpu_address = stap();
+       asm volatile(
+               "       diag    %2,0,0x9c\n"
+               "0:     la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc) : "0" (-EOPNOTSUPP), "d" (cpu_address) : "cc");
+       if (!rc)
+               machine_flags |= MACHINE_FLAG_DIAG9C;
+}
+
+static __init void detect_diag44(void)
+{
+#ifdef CONFIG_64BIT
+       int rc;
+
+       asm volatile(
+               "       diag    0,0,0x44\n"
+               "0:     la      %0,0\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "=d" (rc) : "0" (-EOPNOTSUPP) : "cc");
+       if (!rc)
+               machine_flags |= MACHINE_FLAG_DIAG44;
+#endif
+}
+
+static __init void detect_machine_facilities(void)
+{
+#ifdef CONFIG_64BIT
+       unsigned int facilities;
+
+       facilities = stfl();
+       if (facilities & (1 << 28))
+               machine_flags |= MACHINE_FLAG_IDTE;
+       if (facilities & (1 << 23))
+               machine_flags |= MACHINE_FLAG_PFMF;
+       if (facilities & (1 << 4))
+               machine_flags |= MACHINE_FLAG_MVCOS;
+#endif
+}
+
 /*
  * Save ipl parameters, clear bss memory, initialize storage keys
  * and create a kernel NSS at startup if the SAVESYS= parm is defined
@@ -281,6 +397,13 @@ void __init startup_init(void)
        create_kernel_nss();
        sort_main_extable();
        setup_lowcore_early();
+       detect_mvpg();
+       detect_ieee();
+       detect_csp();
+       detect_diag9c();
+       detect_diag44();
+       detect_machine_facilities();
+       setup_hpage();
        sclp_read_info_early();
        sclp_facilities_detect();
        memsize = sclp_memory_detect();
index 6766e37fe8ea01955da640895bd2068358f660d6..bdbb3bcd78a5d7088ed4cfd39f7fb444e2f15814 100644 (file)
@@ -49,9 +49,9 @@ SP_ILC             =  STACK_FRAME_OVERHEAD + __PT_ILC
 SP_TRAP      = STACK_FRAME_OVERHEAD + __PT_TRAP
 SP_SIZE      = STACK_FRAME_OVERHEAD + __PT_SIZE
 
-_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
+_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
-_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
+_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                 _TIF_MCCK_PENDING)
 
 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
@@ -316,7 +316,7 @@ sysc_work:
        bo      BASED(sysc_mcck_pending)
        tm      __TI_flags+3(%r9),_TIF_NEED_RESCHED
        bo      BASED(sysc_reschedule)
-       tm      __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
+       tm      __TI_flags+3(%r9),_TIF_SIGPENDING
        bnz     BASED(sysc_sigpending)
        tm      __TI_flags+3(%r9),_TIF_RESTART_SVC
        bo      BASED(sysc_restart)
@@ -342,7 +342,7 @@ sysc_mcck_pending:
        br      %r1                     # TIF bit will be cleared by handler
 
 #
-# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
+# _TIF_SIGPENDING is set, call do_signal
 #
 sysc_sigpending:
        ni      __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
@@ -657,7 +657,7 @@ io_work:
        lr      %r15,%r1
 #
 # One of the work bits is on. Find out which one.
-# Checked are: _TIF_SIGPENDING, _TIF_RESTORE_SIGMASK, _TIF_NEED_RESCHED
+# Checked are: _TIF_SIGPENDING, _TIF_NEED_RESCHED
 #              and _TIF_MCCK_PENDING
 #
 io_work_loop:
@@ -665,7 +665,7 @@ io_work_loop:
        bo      BASED(io_mcck_pending)
        tm      __TI_flags+3(%r9),_TIF_NEED_RESCHED
        bo      BASED(io_reschedule)
-       tm      __TI_flags+3(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
+       tm      __TI_flags+3(%r9),_TIF_SIGPENDING
        bnz     BASED(io_sigpending)
        b       BASED(io_restore)
 io_work_done:
@@ -693,7 +693,7 @@ io_reschedule:
        b       BASED(io_work_loop)
 
 #
-# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
+# _TIF_SIGPENDING is set, call do_signal
 #
 io_sigpending:
        TRACE_IRQS_ON
index cd959c0b2e16b4cd225429e960cf2a96e4b7276a..5a4a7bcd2bbad8e92e95eb529fb11559977c0dd9 100644 (file)
@@ -52,9 +52,9 @@ SP_SIZE      =        STACK_FRAME_OVERHEAD + __PT_SIZE
 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
 STACK_SIZE  = 1 << STACK_SHIFT
 
-_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
+_TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                 _TIF_MCCK_PENDING | _TIF_RESTART_SVC | _TIF_SINGLE_STEP )
-_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NEED_RESCHED | \
+_TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
                 _TIF_MCCK_PENDING)
 
 #define BASED(name) name-system_call(%r13)
@@ -308,7 +308,7 @@ sysc_work:
        jo      sysc_mcck_pending
        tm      __TI_flags+7(%r9),_TIF_NEED_RESCHED
        jo      sysc_reschedule
-       tm      __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
+       tm      __TI_flags+7(%r9),_TIF_SIGPENDING
        jnz     sysc_sigpending
        tm      __TI_flags+7(%r9),_TIF_RESTART_SVC
        jo      sysc_restart
@@ -332,7 +332,7 @@ sysc_mcck_pending:
        jg      s390_handle_mcck        # TIF bit will be cleared by handler
 
 #
-# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
+# _TIF_SIGPENDING is set, call do_signal
 #
 sysc_sigpending:
        ni      __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
@@ -648,7 +648,7 @@ io_work_loop:
        jo      io_mcck_pending
        tm      __TI_flags+7(%r9),_TIF_NEED_RESCHED
        jo      io_reschedule
-       tm      __TI_flags+7(%r9),(_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)
+       tm      __TI_flags+7(%r9),_TIF_SIGPENDING
        jnz     io_sigpending
        j       io_restore
 io_work_done:
@@ -674,7 +674,7 @@ io_reschedule:
        j       io_work_loop
 
 #
-# _TIF_SIGPENDING or _TIF_RESTORE_SIGMASK is set, call do_signal
+# _TIF_SIGPENDING or is set, call do_signal
 #
 io_sigpending:
        TRACE_IRQS_ON
index dc364c1419af068666e09f1e5aea0e99fe2961c1..a816e2de32b957ce24af24a2e0f45163e3d8e4da 100644 (file)
@@ -57,61 +57,6 @@ startup_continue:
 #
        l       %r14,.Lstartup_init-.LPG1(%r13)
        basr    %r14,%r14
-
-       l       %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
-#
-# find out if we have an IEEE fpu
-#
-       mvc     __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
-       efpc    %r0,0                   # test IEEE extract fpc instruction
-       oi      3(%r12),2               # set IEEE fpu flag
-.Lchkfpu:
-
-#
-# find out if we have the CSP instruction
-#
-       mvc      __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13)
-       la       %r0,0
-       lr      %r1,%r0
-       la      %r2,4
-       csp     %r0,%r2                 # Test CSP instruction
-       oi      3(%r12),8               # set CSP flag
-.Lchkcsp:
-
-#
-# find out if we have the MVPG instruction
-#
-       mvc     __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13)
-       sr      %r0,%r0
-       la      %r1,0
-       la      %r2,0
-       mvpg    %r1,%r2                 # Test CSP instruction
-       oi      3(%r12),16              # set MVPG flag
-.Lchkmvpg:
-
-#
-# find out if we have the IDTE instruction
-#
-       mvc     __LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
-       .long   0xb2b10000              # store facility list
-       tm      0xc8,0x08               # check bit for clearing-by-ASCE
-       bno     .Lchkidte-.LPG1(%r13)
-       lhi     %r1,2094
-       lhi     %r2,0
-       .long   0xb98e2001
-       oi      3(%r12),0x80            # set IDTE flag
-.Lchkidte:
-
-#
-# find out if the diag 0x9c is available
-#
-       mvc     __LC_PGM_NEW_PSW(8),.Lpcdiag9c-.LPG1(%r13)
-       stap    __LC_CPUID+4            # store cpu address
-       lh      %r1,__LC_CPUID+4
-       diag    %r1,0,0x9c              # test diag 0x9c
-       oi      2(%r12),1               # set diag9c flag
-.Lchkdiag9c:
-
        lpsw  .Lentry-.LPG1(13)         # jump to _stext in primary-space,
                                        # virtual and never return ...
        .align  8
@@ -132,13 +77,7 @@ startup_continue:
        .long   0                       # cr13: home space segment table
        .long   0xc0000000              # cr14: machine check handling off
        .long   0                       # cr15: linkage stack operations
-.Lpcfpu:.long  0x00080000,0x80000000 + .Lchkfpu
-.Lpccsp:.long  0x00080000,0x80000000 + .Lchkcsp
-.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
-.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
-.Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c
 .Lmchunk:.long memory_chunk
-.Lmflags:.long machine_flags
 .Lbss_bgn:  .long __bss_start
 .Lbss_end:  .long _end
 .Lparmaddr: .long PARMAREA
index 79dccd206a6ea48122374b73783dfb13f005a113..1d06961e87b328ec1bb27eb019c660194fda58f3 100644 (file)
@@ -125,73 +125,11 @@ startup_continue:
 # and create a kernel NSS if the SAVESYS= parm is defined
 #
        brasl   %r14,startup_init
-                                       # set program check new psw mask
-       mvc     __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
-       larl    %r12,machine_flags
-#
-# find out if we have the MVPG instruction
-#
-       la      %r1,0f-.LPG1(%r13)      # set program check address
-       stg     %r1,__LC_PGM_NEW_PSW+8
-       sgr     %r0,%r0
-       lghi    %r1,0
-       lghi    %r2,0
-       mvpg    %r1,%r2                 # test MVPG instruction
-       oi      7(%r12),16              # set MVPG flag
-0:
-
-#
-# find out if the diag 0x44 works in 64 bit mode
-#
-       la      %r1,0f-.LPG1(%r13)      # set program check address
-       stg     %r1,__LC_PGM_NEW_PSW+8
-       diag    0,0,0x44                # test diag 0x44
-       oi      7(%r12),32              # set diag44 flag
-0:
-
-#
-# find out if we have the IDTE instruction
-#
-       la      %r1,0f-.LPG1(%r13)      # set program check address
-       stg     %r1,__LC_PGM_NEW_PSW+8
-       .long   0xb2b10000              # store facility list
-       tm      0xc8,0x08               # check bit for clearing-by-ASCE
-       bno     0f-.LPG1(%r13)
-       lhi     %r1,2048
-       lhi     %r2,0
-       .long   0xb98e2001
-       oi      7(%r12),0x80            # set IDTE flag
-0:
-
-#
-# find out if the diag 0x9c is available
-#
-       la      %r1,0f-.LPG1(%r13)      # set program check address
-       stg     %r1,__LC_PGM_NEW_PSW+8
-       stap    __LC_CPUID+4            # store cpu address
-       lh      %r1,__LC_CPUID+4
-       diag    %r1,0,0x9c              # test diag 0x9c
-       oi      6(%r12),1               # set diag9c flag
-0:
-
-#
-# find out if we have the MVCOS instruction
-#
-       la      %r1,0f-.LPG1(%r13)      # set program check address
-       stg     %r1,__LC_PGM_NEW_PSW+8
-       .short  0xc800                  # mvcos 0(%r0),0(%r0),%r0
-       .short  0x0000
-       .short  0x0000
-0:     tm      0x8f,0x13               # special-operation exception?
-       bno     1f-.LPG1(%r13)          # if yes, MVCOS is present
-       oi      6(%r12),2               # set MVCOS flag
-1:
-
        lpswe   .Lentry-.LPG1(13)       # jump to _stext in primary-space,
                                        # virtual and never return ...
        .align  16
 .Lentry:.quad  0x0000000180000000,_stext
-.Lctl: .quad   0x04b50002              # cr0: various things
+.Lctl: .quad   0x04350002              # cr0: various things
        .quad   0                       # cr1: primary space segment table
        .quad   .Lduct                  # cr2: dispatchable unit control table
        .quad   0                       # cr3: instruction authorization
index c36d8123ca14bd464b2cfb59b2c3c9eb0498ca6d..c59a86dca5849617ac74428647fe3ddb7834937e 100644 (file)
@@ -60,8 +60,6 @@ init_IRQ(void)
 /*
  * Switch to the asynchronous interrupt stack for softirq execution.
  */
-extern void __do_softirq(void);
-
 asmlinkage void do_softirq(void)
 {
        unsigned long flags, old, new;
index c1aff194141d892871166a09a82e7ee23a8f23be..7920861109d2aa074de0ca9ae96f2c99d3c44b24 100644 (file)
@@ -180,24 +180,6 @@ void cpu_idle(void)
        }
 }
 
-void show_regs(struct pt_regs *regs)
-{
-       print_modules();
-       printk("CPU: %d %s %s %.*s\n",
-              task_thread_info(current)->cpu, print_tainted(),
-              init_utsname()->release,
-              (int)strcspn(init_utsname()->version, " "),
-              init_utsname()->version);
-       printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
-              current->comm, current->pid, current,
-              (void *) current->thread.ksp);
-       show_registers(regs);
-       /* Show stack backtrace if pt_regs is from kernel mode */
-       if (!(regs->psw.mask & PSW_MASK_PSTATE))
-               show_trace(NULL, (unsigned long *) regs->gprs[15]);
-       show_last_breaking_event(regs);
-}
-
 extern void kernel_thread_starter(void);
 
 asm(
index 58a064296987bd6b85fbbe04d370863c0c3ea24d..7f4270163744b1ea5af37b6a68ca69e2cef89437 100644 (file)
@@ -607,38 +607,8 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
 }
 #endif
 
-#define PT32_IEEE_IP 0x13c
-
-static int
-do_ptrace(struct task_struct *child, long request, long addr, long data)
+long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 {
-       int ret;
-
-       if (request == PTRACE_ATTACH)
-               return ptrace_attach(child);
-
-       /*
-        * Special cases to get/store the ieee instructions pointer.
-        */
-       if (child == current) {
-               if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
-                       return peek_user(child, addr, data);
-               if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
-                       return poke_user(child, addr, data);
-#ifdef CONFIG_COMPAT
-               if (request == PTRACE_PEEKUSR &&
-                   addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
-                       return peek_user_emu31(child, addr, data);
-               if (request == PTRACE_POKEUSR &&
-                   addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
-                       return poke_user_emu31(child, addr, data);
-#endif
-       }
-
-       ret = ptrace_check_attach(child, request == PTRACE_KILL);
-       if (ret < 0)
-               return ret;
-
        switch (request) {
        case PTRACE_SYSCALL:
                /* continue and stop at next (return from) syscall */
@@ -693,31 +663,6 @@ do_ptrace(struct task_struct *child, long request, long addr, long data)
        return -EIO;
 }
 
-asmlinkage long
-sys_ptrace(long request, long pid, long addr, long data)
-{
-       struct task_struct *child;
-       int ret;
-
-       lock_kernel();
-       if (request == PTRACE_TRACEME) {
-                ret = ptrace_traceme();
-                goto out;
-       }
-
-       child = ptrace_get_task_struct(pid);
-       if (IS_ERR(child)) {
-               ret = PTR_ERR(child);
-               goto out;
-       }
-
-       ret = do_ptrace(child, request, addr, data);
-       put_task_struct(child);
-out:
-       unlock_kernel();
-       return ret;
-}
-
 asmlinkage void
 syscall_trace(struct pt_regs *regs, int entryexit)
 {
index 7141147e6b639a2035e4542c167ac7d8a3b50c59..2bc70b6e876a4310991e884d5a1f92dde1a78681 100644 (file)
@@ -73,7 +73,7 @@ EXPORT_SYMBOL(uaccess);
 unsigned int console_mode = 0;
 unsigned int console_devno = -1;
 unsigned int console_irq = -1;
-unsigned long machine_flags = 0;
+unsigned long machine_flags;
 unsigned long elf_hwcap = 0;
 char elf_platform[ELF_PLATFORM_SIZE];
 
@@ -316,7 +316,11 @@ static int __init early_parse_ipldelay(char *p)
 early_param("ipldelay", early_parse_ipldelay);
 
 #ifdef CONFIG_S390_SWITCH_AMODE
+#ifdef CONFIG_PGSTE
+unsigned int switch_amode = 1;
+#else
 unsigned int switch_amode = 0;
+#endif
 EXPORT_SYMBOL_GPL(switch_amode);
 
 static void set_amode_and_uaccess(unsigned long user_amode,
@@ -679,15 +683,6 @@ setup_memory(void)
 #endif
 }
 
-static __init unsigned int stfl(void)
-{
-       asm volatile(
-               "       .insn   s,0xb2b10000,0(0)\n" /* stfl */
-               "0:\n"
-               EX_TABLE(0b,0b));
-       return S390_lowcore.stfl_fac_list;
-}
-
 static int __init __stfle(unsigned long long *list, int doublewords)
 {
        typedef struct { unsigned long long _[doublewords]; } addrtype;
@@ -754,6 +749,9 @@ static void __init setup_hwcaps(void)
                        elf_hwcap |= 1UL << 6;
        }
 
+       if (MACHINE_HAS_HPAGE)
+               elf_hwcap |= 1UL << 7;
+
        switch (cpuinfo->cpu_id.machine) {
        case 0x9672:
 #if !defined(CONFIG_64BIT)
@@ -797,9 +795,13 @@ setup_arch(char **cmdline_p)
               "This machine has an IEEE fpu\n" :
               "This machine has no IEEE fpu\n");
 #else /* CONFIG_64BIT */
-       printk((MACHINE_IS_VM) ?
-              "We are running under VM (64 bit mode)\n" :
-              "We are running native (64 bit mode)\n");
+       if (MACHINE_IS_VM)
+               printk("We are running under VM (64 bit mode)\n");
+       else if (MACHINE_IS_KVM) {
+               printk("We are running under KVM (64 bit mode)\n");
+               add_preferred_console("ttyS", 1, NULL);
+       } else
+               printk("We are running native (64 bit mode)\n");
 #endif /* CONFIG_64BIT */
 
        /* Save unparsed command line copy for /proc/cmdline */
@@ -873,8 +875,9 @@ void __cpuinit print_cpu_info(struct cpuinfo_S390 *cpuinfo)
 
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
-       static const char *hwcap_str[7] = {
-               "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp"
+       static const char *hwcap_str[8] = {
+               "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
+               "edat"
        };
         struct cpuinfo_S390 *cpuinfo;
        unsigned long n = (unsigned long) v - 1;
@@ -889,7 +892,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                               num_online_cpus(), loops_per_jiffy/(500000/HZ),
                               (loops_per_jiffy/(5000/HZ))%100);
                seq_puts(m, "features\t: ");
-               for (i = 0; i < 7; i++)
+               for (i = 0; i < 8; i++)
                        if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
                                seq_printf(m, "%s ", hwcap_str[i]);
                seq_puts(m, "\n");
index 0dfa988c1b26d74abae8411d92afb0f709d8fc2a..0aeb290060d9cb379532568880dc4d408b0ac01c 100644 (file)
@@ -505,7 +505,7 @@ out:
        return rc;
 }
 
-static int smp_rescan_cpus(void)
+static int __smp_rescan_cpus(void)
 {
        cpumask_t avail;
 
@@ -570,7 +570,7 @@ out:
        kfree(info);
        printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
        get_online_cpus();
-       smp_rescan_cpus();
+       __smp_rescan_cpus();
        put_online_cpus();
 }
 
@@ -890,8 +890,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
        if (val != 0 && val != 1)
                return -EINVAL;
 
-       mutex_lock(&smp_cpu_state_mutex);
        get_online_cpus();
+       mutex_lock(&smp_cpu_state_mutex);
        rc = -EBUSY;
        if (cpu_online(cpu))
                goto out;
@@ -919,8 +919,8 @@ static ssize_t cpu_configure_store(struct sys_device *dev, const char *buf,
                break;
        }
 out:
-       put_online_cpus();
        mutex_unlock(&smp_cpu_state_mutex);
+       put_online_cpus();
        return rc ? rc : count;
 }
 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
@@ -1088,17 +1088,17 @@ out:
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static ssize_t __ref rescan_store(struct sys_device *dev,
-                                 const char *buf, size_t count)
+
+int smp_rescan_cpus(void)
 {
        cpumask_t newcpus;
        int cpu;
        int rc;
 
-       mutex_lock(&smp_cpu_state_mutex);
        get_online_cpus();
+       mutex_lock(&smp_cpu_state_mutex);
        newcpus = cpu_present_map;
-       rc = smp_rescan_cpus();
+       rc = __smp_rescan_cpus();
        if (rc)
                goto out;
        cpus_andnot(newcpus, cpu_present_map, newcpus);
@@ -1109,10 +1109,19 @@ static ssize_t __ref rescan_store(struct sys_device *dev,
        }
        rc = 0;
 out:
-       put_online_cpus();
        mutex_unlock(&smp_cpu_state_mutex);
+       put_online_cpus();
        if (!cpus_empty(newcpus))
                topology_schedule_update();
+       return rc;
+}
+
+static ssize_t __ref rescan_store(struct sys_device *dev, const char *buf,
+                                 size_t count)
+{
+       int rc;
+
+       rc = smp_rescan_cpus();
        return rc ? rc : count;
 }
 static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
@@ -1139,16 +1148,16 @@ static ssize_t dispatching_store(struct sys_device *dev, const char *buf,
        if (val != 0 && val != 1)
                return -EINVAL;
        rc = 0;
-       mutex_lock(&smp_cpu_state_mutex);
        get_online_cpus();
+       mutex_lock(&smp_cpu_state_mutex);
        if (cpu_management == val)
                goto out;
        rc = topology_set_cpu_management(val);
        if (!rc)
                cpu_management = val;
 out:
-       put_online_cpus();
        mutex_unlock(&smp_cpu_state_mutex);
+       put_online_cpus();
        return rc ? rc : count;
 }
 static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
index 12b39b3d9c38db113f39b75433cede0a4287f4e4..661a07217057a37b8d08b7ad85c8d4a2508a2bd0 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/device.h>
 #include <linux/bootmem.h>
 #include <linux/sched.h>
+#include <linux/kthread.h>
 #include <linux/workqueue.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
@@ -66,6 +67,8 @@ static struct timer_list topology_timer;
 static void set_topology_timer(void);
 static DECLARE_WORK(topology_work, topology_work_fn);
 
+cpumask_t cpu_core_map[NR_CPUS];
+
 cpumask_t cpu_coregroup_map(unsigned int cpu)
 {
        struct core_info *core = &core_info;
@@ -199,6 +202,14 @@ int topology_set_cpu_management(int fc)
        return rc;
 }
 
+static void update_cpu_core_map(void)
+{
+       int cpu;
+
+       for_each_present_cpu(cpu)
+               cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+}
+
 void arch_update_cpu_topology(void)
 {
        struct tl_info *info = tl_info;
@@ -206,20 +217,33 @@ void arch_update_cpu_topology(void)
        int cpu;
 
        if (!machine_has_topology) {
+               update_cpu_core_map();
                topology_update_polarization_simple();
                return;
        }
        stsi(info, 15, 1, 2);
        tl_to_cores(info);
+       update_cpu_core_map();
        for_each_online_cpu(cpu) {
                sysdev = get_cpu_sysdev(cpu);
                kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
        }
 }
 
-static void topology_work_fn(struct work_struct *work)
+static int topology_kthread(void *data)
 {
        arch_reinit_sched_domains();
+       return 0;
+}
+
+static void topology_work_fn(struct work_struct *work)
+{
+       /* We can't call arch_reinit_sched_domains() from a multi-threaded
+        * workqueue context since it may deadlock in case of cpu hotplug.
+        * So we have to create a kernel thread in order to call
+        * arch_reinit_sched_domains().
+        */
+       kthread_run(topology_kthread, NULL, "topology_update");
 }
 
 void topology_schedule_update(void)
@@ -251,20 +275,23 @@ static int __init init_topology_update(void)
 {
        int rc;
 
+       rc = 0;
        if (!machine_has_topology) {
                topology_update_polarization_simple();
-               return 0;
+               goto out;
        }
        init_timer_deferrable(&topology_timer);
        if (machine_has_topology_irq) {
                rc = register_external_interrupt(0x2005, topology_interrupt);
                if (rc)
-                       return rc;
+                       goto out;
                ctl_set_bit(0, 8);
        }
        else
                set_topology_timer();
-       return 0;
+out:
+       update_cpu_core_map();
+       return rc;
 }
 __initcall(init_topology_update);
 
index 57b607b6110010f7fa3dd7cd2f74e1d3b94e0065..4584d81984c0f25dabb4bffc17371866081ed6a5 100644 (file)
@@ -113,7 +113,7 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
        }
 }
 
-void show_trace(struct task_struct *task, unsigned long *stack)
+static void show_trace(struct task_struct *task, unsigned long *stack)
 {
        register unsigned long __r15 asm ("15");
        unsigned long sp;
@@ -161,14 +161,14 @@ void show_stack(struct task_struct *task, unsigned long *sp)
        show_trace(task, sp);
 }
 
-#ifdef CONFIG_64BIT
-void show_last_breaking_event(struct pt_regs *regs)
+static void show_last_breaking_event(struct pt_regs *regs)
 {
+#ifdef CONFIG_64BIT
        printk("Last Breaking-Event-Address:\n");
        printk(" [<%016lx>] ", regs->args[0] & PSW_ADDR_INSN);
        print_symbol("%s\n", regs->args[0] & PSW_ADDR_INSN);
-}
 #endif
+}
 
 /*
  * The architecture-independent dump_stack generator
@@ -223,6 +223,24 @@ void show_registers(struct pt_regs *regs)
        show_code(regs);
 }      
 
+void show_regs(struct pt_regs *regs)
+{
+       print_modules();
+       printk("CPU: %d %s %s %.*s\n",
+              task_thread_info(current)->cpu, print_tainted(),
+              init_utsname()->release,
+              (int)strcspn(init_utsname()->version, " "),
+              init_utsname()->version);
+       printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
+              current->comm, current->pid, current,
+              (void *) current->thread.ksp);
+       show_registers(regs);
+       /* Show stack backtrace if pt_regs is from kernel mode */
+       if (!(regs->psw.mask & PSW_MASK_PSTATE))
+               show_trace(NULL, (unsigned long *) regs->gprs[15]);
+       show_last_breaking_event(regs);
+}
+
 /* This is called from fs/proc/array.c */
 void task_show_regs(struct seq_file *m, struct task_struct *task)
 {
index c5f05b3fb2c30f1548a062280be08fc12411340d..ca90ee3f930edc0ffc364d8a094826df731b3415 100644 (file)
@@ -110,6 +110,7 @@ void account_system_vtime(struct task_struct *tsk)
        S390_lowcore.steal_clock -= cputime << 12;
        account_system_time(tsk, 0, cputime);
 }
+EXPORT_SYMBOL_GPL(account_system_vtime);
 
 static inline void set_vtimer(__u64 expires)
 {
diff --git a/arch/s390/kvm/Kconfig b/arch/s390/kvm/Kconfig
new file mode 100644 (file)
index 0000000..1761b74
--- /dev/null
@@ -0,0 +1,46 @@
+#
+# KVM configuration
+#
+config HAVE_KVM
+       bool
+
+menuconfig VIRTUALIZATION
+       bool "Virtualization"
+       default y
+       ---help---
+         Say Y here to get to see options for using your Linux host to run other
+         operating systems inside virtual machines (guests).
+         This option alone does not add any kernel code.
+
+         If you say N, all options in this submenu will be skipped and disabled.
+
+if VIRTUALIZATION
+
+config KVM
+       tristate "Kernel-based Virtual Machine (KVM) support"
+       depends on HAVE_KVM && EXPERIMENTAL
+       select PREEMPT_NOTIFIERS
+       select ANON_INODES
+       select S390_SWITCH_AMODE
+       select PREEMPT
+       ---help---
+         Support hosting paravirtualized guest machines using the SIE
+         virtualization capability on the mainframe. This should work
+         on any 64bit machine.
+
+         This module provides access to the hardware capabilities through
+         a character device node named /dev/kvm.
+
+         To compile this as a module, choose M here: the module
+         will be called kvm.
+
+         If unsure, say N.
+
+config KVM_TRACE
+       bool
+
+# OK, it's a little counter-intuitive to do this, but it puts it neatly under
+# the virtualization menu.
+source drivers/virtio/Kconfig
+
+endif # VIRTUALIZATION
diff --git a/arch/s390/kvm/Makefile b/arch/s390/kvm/Makefile
new file mode 100644 (file)
index 0000000..e5221ec
--- /dev/null
@@ -0,0 +1,14 @@
+# Makefile for kernel virtual machines on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o)
+
+EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm
+
+kvm-objs := $(common-objs) kvm-s390.o sie64a.o intercept.o interrupt.o priv.o sigp.o diag.o
+obj-$(CONFIG_KVM) += kvm.o
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
new file mode 100644 (file)
index 0000000..f639a15
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * diag.c - handling diagnose instructions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include "kvm-s390.h"
+
+static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
+{
+       VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
+       vcpu->stat.diagnose_44++;
+       vcpu_put(vcpu);
+       schedule();
+       vcpu_load(vcpu);
+       return 0;
+}
+
+static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
+{
+       unsigned int reg = vcpu->arch.sie_block->ipa & 0xf;
+       unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff;
+
+       VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode);
+       switch (subcode) {
+       case 3:
+               vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR;
+               break;
+       case 4:
+               vcpu->run->s390_reset_flags = 0;
+               break;
+       default:
+               return -ENOTSUPP;
+       }
+
+       atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+       vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
+       vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
+       vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
+       vcpu->run->exit_reason = KVM_EXIT_S390_RESET;
+       VCPU_EVENT(vcpu, 3, "requesting userspace resets %lx",
+         vcpu->run->s390_reset_flags);
+       return -EREMOTE;
+}
+
+int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
+{
+       int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16;
+
+       switch (code) {
+       case 0x44:
+               return __diag_time_slice_end(vcpu);
+       case 0x308:
+               return __diag_ipl_functions(vcpu);
+       default:
+               return -ENOTSUPP;
+       }
+}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
new file mode 100644 (file)
index 0000000..4e0633c
--- /dev/null
@@ -0,0 +1,274 @@
+/*
+ * gaccess.h -  access guest memory
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+#ifndef __KVM_S390_GACCESS_H
+#define __KVM_S390_GACCESS_H
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/uaccess.h>
+
+static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
+                                              u64 guestaddr)
+{
+       u64 prefix  = vcpu->arch.sie_block->prefix;
+       u64 origin  = vcpu->kvm->arch.guest_origin;
+       u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+       if (guestaddr < 2 * PAGE_SIZE)
+               guestaddr += prefix;
+       else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
+               guestaddr -= prefix;
+
+       if (guestaddr > memsize)
+               return (void __user __force *) ERR_PTR(-EFAULT);
+
+       guestaddr += origin;
+
+       return (void __user *) guestaddr;
+}
+
+static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
+                               u64 *result)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       BUG_ON(guestaddr & 7);
+
+       if (IS_ERR((void __force *) uptr))
+               return PTR_ERR((void __force *) uptr);
+
+       return get_user(*result, (u64 __user *) uptr);
+}
+
+static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
+                               u32 *result)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       BUG_ON(guestaddr & 3);
+
+       if (IS_ERR((void __force *) uptr))
+               return PTR_ERR((void __force *) uptr);
+
+       return get_user(*result, (u32 __user *) uptr);
+}
+
+static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
+                               u16 *result)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       BUG_ON(guestaddr & 1);
+
+       if (IS_ERR(uptr))
+               return PTR_ERR(uptr);
+
+       return get_user(*result, (u16 __user *) uptr);
+}
+
+static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
+                              u8 *result)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       if (IS_ERR((void __force *) uptr))
+               return PTR_ERR((void __force *) uptr);
+
+       return get_user(*result, (u8 __user *) uptr);
+}
+
+static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr,
+                               u64 value)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       BUG_ON(guestaddr & 7);
+
+       if (IS_ERR((void __force *) uptr))
+               return PTR_ERR((void __force *) uptr);
+
+       return put_user(value, (u64 __user *) uptr);
+}
+
+static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr,
+                               u32 value)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       BUG_ON(guestaddr & 3);
+
+       if (IS_ERR((void __force *) uptr))
+               return PTR_ERR((void __force *) uptr);
+
+       return put_user(value, (u32 __user *) uptr);
+}
+
+static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr,
+                               u16 value)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       BUG_ON(guestaddr & 1);
+
+       if (IS_ERR((void __force *) uptr))
+               return PTR_ERR((void __force *) uptr);
+
+       return put_user(value, (u16 __user *) uptr);
+}
+
+static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr,
+                              u8 value)
+{
+       void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
+
+       if (IS_ERR((void __force *) uptr))
+               return PTR_ERR((void __force *) uptr);
+
+       return put_user(value, (u8 __user *) uptr);
+}
+
+
+static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest,
+                                      const void *from, unsigned long n)
+{
+       int rc;
+       unsigned long i;
+       const u8 *data = from;
+
+       for (i = 0; i < n; i++) {
+               rc = put_guest_u8(vcpu, guestdest++, *(data++));
+               if (rc < 0)
+                       return rc;
+       }
+       return 0;
+}
+
+static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest,
+                               const void *from, unsigned long n)
+{
+       u64 prefix  = vcpu->arch.sie_block->prefix;
+       u64 origin  = vcpu->kvm->arch.guest_origin;
+       u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+       if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
+               goto slowpath;
+
+       if ((guestdest < prefix) && (guestdest + n > prefix))
+               goto slowpath;
+
+       if ((guestdest < prefix + 2 * PAGE_SIZE)
+           && (guestdest + n > prefix + 2 * PAGE_SIZE))
+               goto slowpath;
+
+       if (guestdest < 2 * PAGE_SIZE)
+               guestdest += prefix;
+       else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
+               guestdest -= prefix;
+
+       if (guestdest + n > memsize)
+               return -EFAULT;
+
+       if (guestdest + n < guestdest)
+               return -EFAULT;
+
+       guestdest += origin;
+
+       return copy_to_user((void __user *) guestdest, from, n);
+slowpath:
+       return __copy_to_guest_slow(vcpu, guestdest, from, n);
+}
+
+static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
+                                        u64 guestsrc, unsigned long n)
+{
+       int rc;
+       unsigned long i;
+       u8 *data = to;
+
+       for (i = 0; i < n; i++) {
+               rc = get_guest_u8(vcpu, guestsrc++, data++);
+               if (rc < 0)
+                       return rc;
+       }
+       return 0;
+}
+
+static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
+                                 u64 guestsrc, unsigned long n)
+{
+       u64 prefix  = vcpu->arch.sie_block->prefix;
+       u64 origin  = vcpu->kvm->arch.guest_origin;
+       u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+       if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
+               goto slowpath;
+
+       if ((guestsrc < prefix) && (guestsrc + n > prefix))
+               goto slowpath;
+
+       if ((guestsrc < prefix + 2 * PAGE_SIZE)
+           && (guestsrc + n > prefix + 2 * PAGE_SIZE))
+               goto slowpath;
+
+       if (guestsrc < 2 * PAGE_SIZE)
+               guestsrc += prefix;
+       else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
+               guestsrc -= prefix;
+
+       if (guestsrc + n > memsize)
+               return -EFAULT;
+
+       if (guestsrc + n < guestsrc)
+               return -EFAULT;
+
+       guestsrc += origin;
+
+       return copy_from_user(to, (void __user *) guestsrc, n);
+slowpath:
+       return __copy_from_guest_slow(vcpu, to, guestsrc, n);
+}
+
+static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest,
+                                        const void *from, unsigned long n)
+{
+       u64 origin  = vcpu->kvm->arch.guest_origin;
+       u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+       if (guestdest + n > memsize)
+               return -EFAULT;
+
+       if (guestdest + n < guestdest)
+               return -EFAULT;
+
+       guestdest += origin;
+
+       return copy_to_user((void __user *) guestdest, from, n);
+}
+
+static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
+                                          u64 guestsrc, unsigned long n)
+{
+       u64 origin  = vcpu->kvm->arch.guest_origin;
+       u64 memsize = vcpu->kvm->arch.guest_memsize;
+
+       if (guestsrc + n > memsize)
+               return -EFAULT;
+
+       if (guestsrc + n < guestsrc)
+               return -EFAULT;
+
+       guestsrc += origin;
+
+       return copy_from_user(to, (void __user *) guestsrc, n);
+}
+#endif
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
new file mode 100644 (file)
index 0000000..349581a
--- /dev/null
@@ -0,0 +1,216 @@
+/*
+ * intercept.c - in-kernel handling for sie intercepts
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/errno.h>
+#include <linux/pagemap.h>
+
+#include <asm/kvm_host.h>
+
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+static int handle_lctg(struct kvm_vcpu *vcpu)
+{
+       int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+       int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
+                       ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
+       u64 useraddr;
+       int reg, rc;
+
+       vcpu->stat.instruction_lctg++;
+       if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f)
+               return -ENOTSUPP;
+
+       useraddr = disp2;
+       if (base2)
+               useraddr += vcpu->arch.guest_gprs[base2];
+
+       reg = reg1;
+
+       VCPU_EVENT(vcpu, 5, "lctg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
+                  disp2);
+
+       do {
+               rc = get_guest_u64(vcpu, useraddr,
+                                  &vcpu->arch.sie_block->gcr[reg]);
+               if (rc == -EFAULT) {
+                       kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+                       break;
+               }
+               useraddr += 8;
+               if (reg == reg3)
+                       break;
+               reg = (reg + 1) % 16;
+       } while (1);
+       return 0;
+}
+
+static int handle_lctl(struct kvm_vcpu *vcpu)
+{
+       int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+       int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+       u64 useraddr;
+       u32 val = 0;
+       int reg, rc;
+
+       vcpu->stat.instruction_lctl++;
+
+       useraddr = disp2;
+       if (base2)
+               useraddr += vcpu->arch.guest_gprs[base2];
+
+       VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2,
+                  disp2);
+
+       reg = reg1;
+       do {
+               rc = get_guest_u32(vcpu, useraddr, &val);
+               if (rc == -EFAULT) {
+                       kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+                       break;
+               }
+               vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
+               vcpu->arch.sie_block->gcr[reg] |= val;
+               useraddr += 4;
+               if (reg == reg3)
+                       break;
+               reg = (reg + 1) % 16;
+       } while (1);
+       return 0;
+}
+
+static intercept_handler_t instruction_handlers[256] = {
+       [0x83] = kvm_s390_handle_diag,
+       [0xae] = kvm_s390_handle_sigp,
+       [0xb2] = kvm_s390_handle_priv,
+       [0xb7] = handle_lctl,
+       [0xeb] = handle_lctg,
+};
+
+static int handle_noop(struct kvm_vcpu *vcpu)
+{
+       switch (vcpu->arch.sie_block->icptcode) {
+       case 0x10:
+               vcpu->stat.exit_external_request++;
+               break;
+       case 0x14:
+               vcpu->stat.exit_external_interrupt++;
+               break;
+       default:
+               break; /* nothing */
+       }
+       return 0;
+}
+
+static int handle_stop(struct kvm_vcpu *vcpu)
+{
+       int rc;
+
+       vcpu->stat.exit_stop_request++;
+       atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+       spin_lock_bh(&vcpu->arch.local_int.lock);
+       if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
+               vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
+               rc = __kvm_s390_vcpu_store_status(vcpu,
+                                                 KVM_S390_STORE_STATUS_NOADDR);
+               if (rc >= 0)
+                       rc = -ENOTSUPP;
+       }
+
+       if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
+               vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
+               VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
+               rc = -ENOTSUPP;
+       } else
+               rc = 0;
+       spin_unlock_bh(&vcpu->arch.local_int.lock);
+       return rc;
+}
+
+static int handle_validity(struct kvm_vcpu *vcpu)
+{
+       int viwhy = vcpu->arch.sie_block->ipb >> 16;
+       vcpu->stat.exit_validity++;
+       if (viwhy == 0x37) {
+               fault_in_pages_writeable((char __user *)
+                                        vcpu->kvm->arch.guest_origin +
+                                        vcpu->arch.sie_block->prefix,
+                                        PAGE_SIZE);
+               return 0;
+       }
+       VCPU_EVENT(vcpu, 2, "unhandled validity intercept code %d",
+                  viwhy);
+       return -ENOTSUPP;
+}
+
+static int handle_instruction(struct kvm_vcpu *vcpu)
+{
+       intercept_handler_t handler;
+
+       vcpu->stat.exit_instruction++;
+       handler = instruction_handlers[vcpu->arch.sie_block->ipa >> 8];
+       if (handler)
+               return handler(vcpu);
+       return -ENOTSUPP;
+}
+
+static int handle_prog(struct kvm_vcpu *vcpu)
+{
+       vcpu->stat.exit_program_interruption++;
+       return kvm_s390_inject_program_int(vcpu, vcpu->arch.sie_block->iprcc);
+}
+
+static int handle_instruction_and_prog(struct kvm_vcpu *vcpu)
+{
+       int rc, rc2;
+
+       vcpu->stat.exit_instr_and_program++;
+       rc = handle_instruction(vcpu);
+       rc2 = handle_prog(vcpu);
+
+       if (rc == -ENOTSUPP)
+               vcpu->arch.sie_block->icptcode = 0x04;
+       if (rc)
+               return rc;
+       return rc2;
+}
+
+static const intercept_handler_t intercept_funcs[0x48 >> 2] = {
+       [0x00 >> 2] = handle_noop,
+       [0x04 >> 2] = handle_instruction,
+       [0x08 >> 2] = handle_prog,
+       [0x0C >> 2] = handle_instruction_and_prog,
+       [0x10 >> 2] = handle_noop,
+       [0x14 >> 2] = handle_noop,
+       [0x1C >> 2] = kvm_s390_handle_wait,
+       [0x20 >> 2] = handle_validity,
+       [0x28 >> 2] = handle_stop,
+};
+
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu)
+{
+       intercept_handler_t func;
+       u8 code = vcpu->arch.sie_block->icptcode;
+
+       if (code & 3 || code > 0x48)
+               return -ENOTSUPP;
+       func = intercept_funcs[code >> 2];
+       if (func)
+               return func(vcpu);
+       return -ENOTSUPP;
+}
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
new file mode 100644 (file)
index 0000000..fcd1ed8
--- /dev/null
@@ -0,0 +1,592 @@
+/*
+ * interrupt.c - handling kvm guest interrupts
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+#include <asm/lowcore.h>
+#include <asm/uaccess.h>
+#include <linux/kvm_host.h>
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+static int psw_extint_disabled(struct kvm_vcpu *vcpu)
+{
+       return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
+}
+
+static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
+{
+       if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
+           (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
+           (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
+               return 0;
+       return 1;
+}
+
+static int __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
+                                     struct interrupt_info *inti)
+{
+       switch (inti->type) {
+       case KVM_S390_INT_EMERGENCY:
+               if (psw_extint_disabled(vcpu))
+                       return 0;
+               if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
+                       return 1;
+               return 0;
+       case KVM_S390_INT_SERVICE:
+               if (psw_extint_disabled(vcpu))
+                       return 0;
+               if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
+                       return 1;
+               return 0;
+       case KVM_S390_INT_VIRTIO:
+               if (psw_extint_disabled(vcpu))
+                       return 0;
+               if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
+                       return 1;
+               return 0;
+       case KVM_S390_PROGRAM_INT:
+       case KVM_S390_SIGP_STOP:
+       case KVM_S390_SIGP_SET_PREFIX:
+       case KVM_S390_RESTART:
+               return 1;
+       default:
+               BUG();
+       }
+       return 0;
+}
+
+static void __set_cpu_idle(struct kvm_vcpu *vcpu)
+{
+       BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
+       atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+       set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+}
+
+static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
+{
+       BUG_ON(vcpu->vcpu_id > KVM_MAX_VCPUS - 1);
+       atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
+       clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
+}
+
+static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
+{
+       atomic_clear_mask(CPUSTAT_ECALL_PEND |
+               CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
+               &vcpu->arch.sie_block->cpuflags);
+       vcpu->arch.sie_block->lctl = 0x0000;
+}
+
+static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
+{
+       atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
+}
+
+static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
+                                     struct interrupt_info *inti)
+{
+       switch (inti->type) {
+       case KVM_S390_INT_EMERGENCY:
+       case KVM_S390_INT_SERVICE:
+       case KVM_S390_INT_VIRTIO:
+               if (psw_extint_disabled(vcpu))
+                       __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
+               else
+                       vcpu->arch.sie_block->lctl |= LCTL_CR0;
+               break;
+       case KVM_S390_SIGP_STOP:
+               __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
+               break;
+       default:
+               BUG();
+       }
+}
+
+static void __do_deliver_interrupt(struct kvm_vcpu *vcpu,
+                                  struct interrupt_info *inti)
+{
+       const unsigned short table[] = { 2, 4, 4, 6 };
+       int rc, exception = 0;
+
+       switch (inti->type) {
+       case KVM_S390_INT_EMERGENCY:
+               VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
+               vcpu->stat.deliver_emergency_signal++;
+               rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1201);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+                        &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+                       __LC_EXT_NEW_PSW, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+               break;
+
+       case KVM_S390_INT_SERVICE:
+               VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
+                          inti->ext.ext_params);
+               vcpu->stat.deliver_service_signal++;
+               rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2401);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+                        &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+                       __LC_EXT_NEW_PSW, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
+               if (rc == -EFAULT)
+                       exception = 1;
+               break;
+
+       case KVM_S390_INT_VIRTIO:
+               VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%lx",
+                          inti->ext.ext_params, inti->ext.ext_params2);
+               vcpu->stat.deliver_virtio_interrupt++;
+               rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x2603);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = put_guest_u16(vcpu, __LC_CPU_ADDRESS, 0x0d00);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+                        &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+                       __LC_EXT_NEW_PSW, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = put_guest_u32(vcpu, __LC_EXT_PARAMS, inti->ext.ext_params);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = put_guest_u64(vcpu, __LC_PFAULT_INTPARM,
+                       inti->ext.ext_params2);
+               if (rc == -EFAULT)
+                       exception = 1;
+               break;
+
+       case KVM_S390_SIGP_STOP:
+               VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu stop");
+               vcpu->stat.deliver_stop_signal++;
+               __set_intercept_indicator(vcpu, inti);
+               break;
+
+       case KVM_S390_SIGP_SET_PREFIX:
+               VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x",
+                          inti->prefix.address);
+               vcpu->stat.deliver_prefix_signal++;
+               vcpu->arch.sie_block->prefix = inti->prefix.address;
+               vcpu->arch.sie_block->ihcpu = 0xffff;
+               break;
+
+       case KVM_S390_RESTART:
+               VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
+               vcpu->stat.deliver_restart_signal++;
+               rc = copy_to_guest(vcpu, offsetof(struct _lowcore,
+                 restart_old_psw), &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+                       offsetof(struct _lowcore, restart_psw), sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+               break;
+
+       case KVM_S390_PROGRAM_INT:
+               VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
+                          inti->pgm.code,
+                          table[vcpu->arch.sie_block->ipa >> 14]);
+               vcpu->stat.deliver_program_int++;
+               rc = put_guest_u16(vcpu, __LC_PGM_INT_CODE, inti->pgm.code);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = put_guest_u16(vcpu, __LC_PGM_ILC,
+                       table[vcpu->arch.sie_block->ipa >> 14]);
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_to_guest(vcpu, __LC_PGM_OLD_PSW,
+                        &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+
+               rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+                       __LC_PGM_NEW_PSW, sizeof(psw_t));
+               if (rc == -EFAULT)
+                       exception = 1;
+               break;
+
+       default:
+               BUG();
+       }
+
+       if (exception) {
+               VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering"
+                          " interrupt");
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               if (inti->type == KVM_S390_PROGRAM_INT) {
+                       printk(KERN_WARNING "kvm: recursive program check\n");
+                       BUG();
+               }
+       }
+}
+
+static int __try_deliver_ckc_interrupt(struct kvm_vcpu *vcpu)
+{
+       int rc, exception = 0;
+
+       if (psw_extint_disabled(vcpu))
+               return 0;
+       if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
+               return 0;
+       rc = put_guest_u16(vcpu, __LC_EXT_INT_CODE, 0x1004);
+       if (rc == -EFAULT)
+               exception = 1;
+       rc = copy_to_guest(vcpu, __LC_EXT_OLD_PSW,
+                &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
+       if (rc == -EFAULT)
+               exception = 1;
+       rc = copy_from_guest(vcpu, &vcpu->arch.sie_block->gpsw,
+               __LC_EXT_NEW_PSW, sizeof(psw_t));
+       if (rc == -EFAULT)
+               exception = 1;
+
+       if (exception) {
+               VCPU_EVENT(vcpu, 1, "%s", "program exception while delivering" \
+                          " ckc interrupt");
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               return 0;
+       }
+
+       return 1;
+}
+
+int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+       struct local_interrupt *li = &vcpu->arch.local_int;
+       struct float_interrupt *fi = vcpu->arch.local_int.float_int;
+       struct interrupt_info  *inti;
+       int rc = 0;
+
+       if (atomic_read(&li->active)) {
+               spin_lock_bh(&li->lock);
+               list_for_each_entry(inti, &li->list, list)
+                       if (__interrupt_is_deliverable(vcpu, inti)) {
+                               rc = 1;
+                               break;
+                       }
+               spin_unlock_bh(&li->lock);
+       }
+
+       if ((!rc) && atomic_read(&fi->active)) {
+               spin_lock_bh(&fi->lock);
+               list_for_each_entry(inti, &fi->list, list)
+                       if (__interrupt_is_deliverable(vcpu, inti)) {
+                               rc = 1;
+                               break;
+                       }
+               spin_unlock_bh(&fi->lock);
+       }
+
+       if ((!rc) && (vcpu->arch.sie_block->ckc <
+               get_clock() + vcpu->arch.sie_block->epoch)) {
+               if ((!psw_extint_disabled(vcpu)) &&
+                       (vcpu->arch.sie_block->gcr[0] & 0x800ul))
+                       rc = 1;
+       }
+
+       return rc;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
+{
+       u64 now, sltime;
+       DECLARE_WAITQUEUE(wait, current);
+
+       vcpu->stat.exit_wait_state++;
+       if (kvm_cpu_has_interrupt(vcpu))
+               return 0;
+
+       if (psw_interrupts_disabled(vcpu)) {
+               VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
+               __unset_cpu_idle(vcpu);
+               return -ENOTSUPP; /* disabled wait */
+       }
+
+       if (psw_extint_disabled(vcpu) ||
+           (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))) {
+               VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
+               goto no_timer;
+       }
+
+       now = get_clock() + vcpu->arch.sie_block->epoch;
+       if (vcpu->arch.sie_block->ckc < now) {
+               __unset_cpu_idle(vcpu);
+               return 0;
+       }
+
+       sltime = (vcpu->arch.sie_block->ckc - now) / (0xf4240000ul / HZ) + 1;
+
+       vcpu->arch.ckc_timer.expires = jiffies + sltime;
+
+       add_timer(&vcpu->arch.ckc_timer);
+       VCPU_EVENT(vcpu, 5, "enabled wait timer:%lx jiffies", sltime);
+no_timer:
+       spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+       spin_lock_bh(&vcpu->arch.local_int.lock);
+       __set_cpu_idle(vcpu);
+       vcpu->arch.local_int.timer_due = 0;
+       add_wait_queue(&vcpu->arch.local_int.wq, &wait);
+       while (list_empty(&vcpu->arch.local_int.list) &&
+               list_empty(&vcpu->arch.local_int.float_int->list) &&
+               (!vcpu->arch.local_int.timer_due) &&
+               !signal_pending(current)) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               spin_unlock_bh(&vcpu->arch.local_int.lock);
+               spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+               vcpu_put(vcpu);
+               schedule();
+               vcpu_load(vcpu);
+               spin_lock_bh(&vcpu->arch.local_int.float_int->lock);
+               spin_lock_bh(&vcpu->arch.local_int.lock);
+       }
+       __unset_cpu_idle(vcpu);
+       __set_current_state(TASK_RUNNING);
+       remove_wait_queue(&vcpu->wq, &wait);
+       spin_unlock_bh(&vcpu->arch.local_int.lock);
+       spin_unlock_bh(&vcpu->arch.local_int.float_int->lock);
+       del_timer(&vcpu->arch.ckc_timer);
+       return 0;
+}
+
+void kvm_s390_idle_wakeup(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       spin_lock_bh(&vcpu->arch.local_int.lock);
+       vcpu->arch.local_int.timer_due = 1;
+       if (waitqueue_active(&vcpu->arch.local_int.wq))
+               wake_up_interruptible(&vcpu->arch.local_int.wq);
+       spin_unlock_bh(&vcpu->arch.local_int.lock);
+}
+
+
+void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
+{
+       struct local_interrupt *li = &vcpu->arch.local_int;
+       struct float_interrupt *fi = vcpu->arch.local_int.float_int;
+       struct interrupt_info  *n, *inti = NULL;
+       int deliver;
+
+       __reset_intercept_indicators(vcpu);
+       if (atomic_read(&li->active)) {
+               do {
+                       deliver = 0;
+                       spin_lock_bh(&li->lock);
+                       list_for_each_entry_safe(inti, n, &li->list, list) {
+                               if (__interrupt_is_deliverable(vcpu, inti)) {
+                                       list_del(&inti->list);
+                                       deliver = 1;
+                                       break;
+                               }
+                               __set_intercept_indicator(vcpu, inti);
+                       }
+                       if (list_empty(&li->list))
+                               atomic_set(&li->active, 0);
+                       spin_unlock_bh(&li->lock);
+                       if (deliver) {
+                               __do_deliver_interrupt(vcpu, inti);
+                               kfree(inti);
+                       }
+               } while (deliver);
+       }
+
+       if ((vcpu->arch.sie_block->ckc <
+               get_clock() + vcpu->arch.sie_block->epoch))
+               __try_deliver_ckc_interrupt(vcpu);
+
+       if (atomic_read(&fi->active)) {
+               do {
+                       deliver = 0;
+                       spin_lock_bh(&fi->lock);
+                       list_for_each_entry_safe(inti, n, &fi->list, list) {
+                               if (__interrupt_is_deliverable(vcpu, inti)) {
+                                       list_del(&inti->list);
+                                       deliver = 1;
+                                       break;
+                               }
+                               __set_intercept_indicator(vcpu, inti);
+                       }
+                       if (list_empty(&fi->list))
+                               atomic_set(&fi->active, 0);
+                       spin_unlock_bh(&fi->lock);
+                       if (deliver) {
+                               __do_deliver_interrupt(vcpu, inti);
+                               kfree(inti);
+                       }
+               } while (deliver);
+       }
+}
+
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
+{
+       struct local_interrupt *li = &vcpu->arch.local_int;
+       struct interrupt_info *inti;
+
+       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+       if (!inti)
+               return -ENOMEM;
+
+       inti->type = KVM_S390_PROGRAM_INT;;
+       inti->pgm.code = code;
+
+       VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
+       spin_lock_bh(&li->lock);
+       list_add(&inti->list, &li->list);
+       atomic_set(&li->active, 1);
+       BUG_ON(waitqueue_active(&li->wq));
+       spin_unlock_bh(&li->lock);
+       return 0;
+}
+
+int kvm_s390_inject_vm(struct kvm *kvm,
+                      struct kvm_s390_interrupt *s390int)
+{
+       struct local_interrupt *li;
+       struct float_interrupt *fi;
+       struct interrupt_info *inti;
+       int sigcpu;
+
+       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+       if (!inti)
+               return -ENOMEM;
+
+       switch (s390int->type) {
+       case KVM_S390_INT_VIRTIO:
+               VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%lx",
+                        s390int->parm, s390int->parm64);
+               inti->type = s390int->type;
+               inti->ext.ext_params = s390int->parm;
+               inti->ext.ext_params2 = s390int->parm64;
+               break;
+       case KVM_S390_INT_SERVICE:
+               VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
+               inti->type = s390int->type;
+               inti->ext.ext_params = s390int->parm;
+               break;
+       case KVM_S390_PROGRAM_INT:
+       case KVM_S390_SIGP_STOP:
+       case KVM_S390_INT_EMERGENCY:
+       default:
+               kfree(inti);
+               return -EINVAL;
+       }
+
+       mutex_lock(&kvm->lock);
+       fi = &kvm->arch.float_int;
+       spin_lock_bh(&fi->lock);
+       list_add_tail(&inti->list, &fi->list);
+       atomic_set(&fi->active, 1);
+       sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
+       if (sigcpu == KVM_MAX_VCPUS) {
+               do {
+                       sigcpu = fi->next_rr_cpu++;
+                       if (sigcpu == KVM_MAX_VCPUS)
+                               sigcpu = fi->next_rr_cpu = 0;
+               } while (fi->local_int[sigcpu] == NULL);
+       }
+       li = fi->local_int[sigcpu];
+       spin_lock_bh(&li->lock);
+       atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+       if (waitqueue_active(&li->wq))
+               wake_up_interruptible(&li->wq);
+       spin_unlock_bh(&li->lock);
+       spin_unlock_bh(&fi->lock);
+       mutex_unlock(&kvm->lock);
+       return 0;
+}
+
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+                        struct kvm_s390_interrupt *s390int)
+{
+       struct local_interrupt *li;
+       struct interrupt_info *inti;
+
+       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+       if (!inti)
+               return -ENOMEM;
+
+       switch (s390int->type) {
+       case KVM_S390_PROGRAM_INT:
+               if (s390int->parm & 0xffff0000) {
+                       kfree(inti);
+                       return -EINVAL;
+               }
+               inti->type = s390int->type;
+               inti->pgm.code = s390int->parm;
+               VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
+                          s390int->parm);
+               break;
+       case KVM_S390_SIGP_STOP:
+       case KVM_S390_RESTART:
+       case KVM_S390_SIGP_SET_PREFIX:
+       case KVM_S390_INT_EMERGENCY:
+               VCPU_EVENT(vcpu, 3, "inject: type %x", s390int->type);
+               inti->type = s390int->type;
+               break;
+       case KVM_S390_INT_VIRTIO:
+       case KVM_S390_INT_SERVICE:
+       default:
+               kfree(inti);
+               return -EINVAL;
+       }
+
+       mutex_lock(&vcpu->kvm->lock);
+       li = &vcpu->arch.local_int;
+       spin_lock_bh(&li->lock);
+       if (inti->type == KVM_S390_PROGRAM_INT)
+               list_add(&inti->list, &li->list);
+       else
+               list_add_tail(&inti->list, &li->list);
+       atomic_set(&li->active, 1);
+       if (inti->type == KVM_S390_SIGP_STOP)
+               li->action_bits |= ACTION_STOP_ON_STOP;
+       atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+       if (waitqueue_active(&li->wq))
+               wake_up_interruptible(&vcpu->arch.local_int.wq);
+       spin_unlock_bh(&li->lock);
+       mutex_unlock(&vcpu->kvm->lock);
+       return 0;
+}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
new file mode 100644 (file)
index 0000000..98d1e73
--- /dev/null
@@ -0,0 +1,685 @@
+/*
+ * s390host.c --  hosting zSeries kernel virtual machines
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ *               Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/compiler.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/timer.h>
+#include <asm/lowcore.h>
+#include <asm/pgtable.h>
+
+#include "kvm-s390.h"
+#include "gaccess.h"
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
+
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "userspace_handled", VCPU_STAT(exit_userspace) },
+       { "exit_validity", VCPU_STAT(exit_validity) },
+       { "exit_stop_request", VCPU_STAT(exit_stop_request) },
+       { "exit_external_request", VCPU_STAT(exit_external_request) },
+       { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
+       { "exit_instruction", VCPU_STAT(exit_instruction) },
+       { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
+       { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
+       { "instruction_lctg", VCPU_STAT(instruction_lctg) },
+       { "instruction_lctl", VCPU_STAT(instruction_lctl) },
+       { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
+       { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
+       { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
+       { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
+       { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
+       { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
+       { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
+       { "exit_wait_state", VCPU_STAT(exit_wait_state) },
+       { "instruction_stidp", VCPU_STAT(instruction_stidp) },
+       { "instruction_spx", VCPU_STAT(instruction_spx) },
+       { "instruction_stpx", VCPU_STAT(instruction_stpx) },
+       { "instruction_stap", VCPU_STAT(instruction_stap) },
+       { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
+       { "instruction_stsch", VCPU_STAT(instruction_stsch) },
+       { "instruction_chsc", VCPU_STAT(instruction_chsc) },
+       { "instruction_stsi", VCPU_STAT(instruction_stsi) },
+       { "instruction_stfl", VCPU_STAT(instruction_stfl) },
+       { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
+       { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
+       { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
+       { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
+       { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
+       { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
+       { "diagnose_44", VCPU_STAT(diagnose_44) },
+       { NULL }
+};
+
+
+/* Section: not file related */
+void kvm_arch_hardware_enable(void *garbage)
+{
+       /* every s390 is virtualization enabled ;-) */
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+void decache_vcpus_on_cpu(int cpu)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+}
+
+int kvm_arch_init(void *opaque)
+{
+       return 0;
+}
+
+void kvm_arch_exit(void)
+{
+}
+
+/* Section: device related */
+long kvm_arch_dev_ioctl(struct file *filp,
+                       unsigned int ioctl, unsigned long arg)
+{
+       if (ioctl == KVM_S390_ENABLE_SIE)
+               return s390_enable_sie();
+       return -EINVAL;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       return 0;
+}
+
+/* Section: vm related */
+/*
+ * Get (and clear) the dirty memory log for a memory slot.
+ */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+                              struct kvm_dirty_log *log)
+{
+       return 0;
+}
+
+long kvm_arch_vm_ioctl(struct file *filp,
+                      unsigned int ioctl, unsigned long arg)
+{
+       struct kvm *kvm = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       int r;
+
+       switch (ioctl) {
+       case KVM_S390_INTERRUPT: {
+               struct kvm_s390_interrupt s390int;
+
+               r = -EFAULT;
+               if (copy_from_user(&s390int, argp, sizeof(s390int)))
+                       break;
+               r = kvm_s390_inject_vm(kvm, &s390int);
+               break;
+       }
+       default:
+               r = -EINVAL;
+       }
+
+       return r;
+}
+
+struct kvm *kvm_arch_create_vm(void)
+{
+       struct kvm *kvm;
+       int rc;
+       char debug_name[16];
+
+       rc = s390_enable_sie();
+       if (rc)
+               goto out_nokvm;
+
+       rc = -ENOMEM;
+       kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
+       if (!kvm)
+               goto out_nokvm;
+
+       kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
+       if (!kvm->arch.sca)
+               goto out_nosca;
+
+       sprintf(debug_name, "kvm-%u", current->pid);
+
+       kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
+       if (!kvm->arch.dbf)
+               goto out_nodbf;
+
+       spin_lock_init(&kvm->arch.float_int.lock);
+       INIT_LIST_HEAD(&kvm->arch.float_int.list);
+
+       debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
+       VM_EVENT(kvm, 3, "%s", "vm created");
+
+       try_module_get(THIS_MODULE);
+
+       return kvm;
+out_nodbf:
+       free_page((unsigned long)(kvm->arch.sca));
+out_nosca:
+       kfree(kvm);
+out_nokvm:
+       return ERR_PTR(rc);
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       debug_unregister(kvm->arch.dbf);
+       free_page((unsigned long)(kvm->arch.sca));
+       kfree(kvm);
+       module_put(THIS_MODULE);
+}
+
+/* Section: vcpu related */
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+       /* kvm common code refers to this, but does'nt call it */
+       BUG();
+}
+
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       save_fp_regs(&vcpu->arch.host_fpregs);
+       save_access_regs(vcpu->arch.host_acrs);
+       vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
+       restore_fp_regs(&vcpu->arch.guest_fpregs);
+       restore_access_regs(vcpu->arch.guest_acrs);
+
+       if (signal_pending(current))
+               atomic_set_mask(CPUSTAT_STOP_INT,
+                       &vcpu->arch.sie_block->cpuflags);
+}
+
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       save_fp_regs(&vcpu->arch.guest_fpregs);
+       save_access_regs(vcpu->arch.guest_acrs);
+       restore_fp_regs(&vcpu->arch.host_fpregs);
+       restore_access_regs(vcpu->arch.host_acrs);
+}
+
+static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
+{
+       /* this equals initial cpu reset in pop, but we don't switch to ESA */
+       vcpu->arch.sie_block->gpsw.mask = 0UL;
+       vcpu->arch.sie_block->gpsw.addr = 0UL;
+       vcpu->arch.sie_block->prefix    = 0UL;
+       vcpu->arch.sie_block->ihcpu     = 0xffff;
+       vcpu->arch.sie_block->cputm     = 0UL;
+       vcpu->arch.sie_block->ckc       = 0UL;
+       vcpu->arch.sie_block->todpr     = 0;
+       memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
+       vcpu->arch.sie_block->gcr[0]  = 0xE0UL;
+       vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
+       vcpu->arch.guest_fpregs.fpc = 0;
+       asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
+       vcpu->arch.sie_block->gbea = 1;
+}
+
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
+       vcpu->arch.sie_block->gmslm = 0xffffffffffUL;
+       vcpu->arch.sie_block->gmsor = 0x000000000000;
+       vcpu->arch.sie_block->ecb   = 2;
+       vcpu->arch.sie_block->eca   = 0xC1002001U;
+       setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
+                (unsigned long) vcpu);
+       get_cpu_id(&vcpu->arch.cpu_id);
+       vcpu->arch.cpu_id.version = 0xfe;
+       return 0;
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
+                                     unsigned int id)
+{
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+       int rc = -ENOMEM;
+
+       if (!vcpu)
+               goto out_nomem;
+
+       vcpu->arch.sie_block = (struct sie_block *) get_zeroed_page(GFP_KERNEL);
+
+       if (!vcpu->arch.sie_block)
+               goto out_free_cpu;
+
+       vcpu->arch.sie_block->icpua = id;
+       BUG_ON(!kvm->arch.sca);
+       BUG_ON(kvm->arch.sca->cpu[id].sda);
+       kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
+       vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
+       vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
+
+       spin_lock_init(&vcpu->arch.local_int.lock);
+       INIT_LIST_HEAD(&vcpu->arch.local_int.list);
+       vcpu->arch.local_int.float_int = &kvm->arch.float_int;
+       spin_lock_bh(&kvm->arch.float_int.lock);
+       kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
+       init_waitqueue_head(&vcpu->arch.local_int.wq);
+       vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
+       spin_unlock_bh(&kvm->arch.float_int.lock);
+
+       rc = kvm_vcpu_init(vcpu, kvm, id);
+       if (rc)
+               goto out_free_cpu;
+       VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
+                vcpu->arch.sie_block);
+
+       try_module_get(THIS_MODULE);
+
+       return vcpu;
+out_free_cpu:
+       kfree(vcpu);
+out_nomem:
+       return ERR_PTR(rc);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       VCPU_EVENT(vcpu, 3, "%s", "destroy cpu");
+       free_page((unsigned long)(vcpu->arch.sie_block));
+       kfree(vcpu);
+       module_put(THIS_MODULE);
+}
+
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       /* kvm common code refers to this, but never calls it */
+       BUG();
+       return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
+{
+       vcpu_load(vcpu);
+       kvm_s390_vcpu_initial_reset(vcpu);
+       vcpu_put(vcpu);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       vcpu_load(vcpu);
+       memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
+       vcpu_put(vcpu);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       vcpu_load(vcpu);
+       memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
+       vcpu_put(vcpu);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       vcpu_load(vcpu);
+       memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
+       memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
+       vcpu_put(vcpu);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       vcpu_load(vcpu);
+       memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
+       memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
+       vcpu_put(vcpu);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       vcpu_load(vcpu);
+       memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
+       vcpu->arch.guest_fpregs.fpc = fpu->fpc;
+       vcpu_put(vcpu);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       vcpu_load(vcpu);
+       memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
+       fpu->fpc = vcpu->arch.guest_fpregs.fpc;
+       vcpu_put(vcpu);
+       return 0;
+}
+
+static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
+{
+       int rc = 0;
+
+       vcpu_load(vcpu);
+       if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
+               rc = -EBUSY;
+       else
+               vcpu->arch.sie_block->gpsw = psw;
+       vcpu_put(vcpu);
+       return rc;
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                 struct kvm_translation *tr)
+{
+       return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
+                                   struct kvm_debug_guest *dbg)
+{
+       return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -EINVAL; /* not implemented yet */
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -EINVAL; /* not implemented yet */
+}
+
+static void __vcpu_run(struct kvm_vcpu *vcpu)
+{
+       memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
+
+       if (need_resched())
+               schedule();
+
+       vcpu->arch.sie_block->icptcode = 0;
+       local_irq_disable();
+       kvm_guest_enter();
+       local_irq_enable();
+       VCPU_EVENT(vcpu, 6, "entering sie flags %x",
+                  atomic_read(&vcpu->arch.sie_block->cpuflags));
+       sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
+       VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
+                  vcpu->arch.sie_block->icptcode);
+       local_irq_disable();
+       kvm_guest_exit();
+       local_irq_enable();
+
+       memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       int rc;
+       sigset_t sigsaved;
+
+       vcpu_load(vcpu);
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
+
+       BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
+
+       switch (kvm_run->exit_reason) {
+       case KVM_EXIT_S390_SIEIC:
+               vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
+               vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
+               break;
+       case KVM_EXIT_UNKNOWN:
+       case KVM_EXIT_S390_RESET:
+               break;
+       default:
+               BUG();
+       }
+
+       might_sleep();
+
+       do {
+               kvm_s390_deliver_pending_interrupts(vcpu);
+               __vcpu_run(vcpu);
+               rc = kvm_handle_sie_intercept(vcpu);
+       } while (!signal_pending(current) && !rc);
+
+       if (signal_pending(current) && !rc)
+               rc = -EINTR;
+
+       if (rc == -ENOTSUPP) {
+               /* intercept cannot be handled in-kernel, prepare kvm-run */
+               kvm_run->exit_reason         = KVM_EXIT_S390_SIEIC;
+               kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
+               kvm_run->s390_sieic.mask     = vcpu->arch.sie_block->gpsw.mask;
+               kvm_run->s390_sieic.addr     = vcpu->arch.sie_block->gpsw.addr;
+               kvm_run->s390_sieic.ipa      = vcpu->arch.sie_block->ipa;
+               kvm_run->s390_sieic.ipb      = vcpu->arch.sie_block->ipb;
+               rc = 0;
+       }
+
+       if (rc == -EREMOTE) {
+               /* intercept was handled, but userspace support is needed
+                * kvm_run has been prepared by the handler */
+               rc = 0;
+       }
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       vcpu_put(vcpu);
+
+       vcpu->stat.exit_userspace++;
+       return rc;
+}
+
+static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
+                      unsigned long n, int prefix)
+{
+       if (prefix)
+               return copy_to_guest(vcpu, guestdest, from, n);
+       else
+               return copy_to_guest_absolute(vcpu, guestdest, from, n);
+}
+
+/*
+ * store status at address
+ * we use have two special cases:
+ * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
+ * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
+ */
+int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+       const unsigned char archmode = 1;
+       int prefix;
+
+       if (addr == KVM_S390_STORE_STATUS_NOADDR) {
+               if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
+                       return -EFAULT;
+               addr = SAVE_AREA_BASE;
+               prefix = 0;
+       } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
+               if (copy_to_guest(vcpu, 163ul, &archmode, 1))
+                       return -EFAULT;
+               addr = SAVE_AREA_BASE;
+               prefix = 1;
+       } else
+               prefix = 0;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
+                       vcpu->arch.guest_fpregs.fprs, 128, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
+                       vcpu->arch.guest_gprs, 128, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
+                       &vcpu->arch.sie_block->gpsw, 16, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
+                       &vcpu->arch.sie_block->prefix, 4, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu,
+                       addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
+                       &vcpu->arch.guest_fpregs.fpc, 4, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
+                       &vcpu->arch.sie_block->todpr, 4, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
+                       &vcpu->arch.sie_block->cputm, 8, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
+                       &vcpu->arch.sie_block->ckc, 8, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
+                       &vcpu->arch.guest_acrs, 64, prefix))
+               return -EFAULT;
+
+       if (__guestcopy(vcpu,
+                       addr + offsetof(struct save_area_s390x, ctrl_regs),
+                       &vcpu->arch.sie_block->gcr, 128, prefix))
+               return -EFAULT;
+       return 0;
+}
+
+static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+       int rc;
+
+       vcpu_load(vcpu);
+       rc = __kvm_s390_vcpu_store_status(vcpu, addr);
+       vcpu_put(vcpu);
+       return rc;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp,
+                        unsigned int ioctl, unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+
+       switch (ioctl) {
+       case KVM_S390_INTERRUPT: {
+               struct kvm_s390_interrupt s390int;
+
+               if (copy_from_user(&s390int, argp, sizeof(s390int)))
+                       return -EFAULT;
+               return kvm_s390_inject_vcpu(vcpu, &s390int);
+       }
+       case KVM_S390_STORE_STATUS:
+               return kvm_s390_vcpu_store_status(vcpu, arg);
+       case KVM_S390_SET_INITIAL_PSW: {
+               psw_t psw;
+
+               if (copy_from_user(&psw, argp, sizeof(psw)))
+                       return -EFAULT;
+               return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
+       }
+       case KVM_S390_INITIAL_RESET:
+               return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
+       default:
+               ;
+       }
+       return -EINVAL;
+}
+
+/* Section: memory related */
+int kvm_arch_set_memory_region(struct kvm *kvm,
+                               struct kvm_userspace_memory_region *mem,
+                               struct kvm_memory_slot old,
+                               int user_alloc)
+{
+       /* A few sanity checks. We can have exactly one memory slot which has
+          to start at guest virtual zero and which has to be located at a
+          page boundary in userland and which has to end at a page boundary.
+          The memory in userland is ok to be fragmented into various different
+          vmas. It is okay to mmap() and munmap() stuff in this slot after
+          doing this call at any time */
+
+       if (mem->slot)
+               return -EINVAL;
+
+       if (mem->guest_phys_addr)
+               return -EINVAL;
+
+       if (mem->userspace_addr & (PAGE_SIZE - 1))
+               return -EINVAL;
+
+       if (mem->memory_size & (PAGE_SIZE - 1))
+               return -EINVAL;
+
+       kvm->arch.guest_origin = mem->userspace_addr;
+       kvm->arch.guest_memsize = mem->memory_size;
+
+       /* FIXME: we do want to interrupt running CPUs and update their memory
+          configuration now to avoid race conditions. But hey, changing the
+          memory layout while virtual CPUs are running is usually bad
+          programming practice. */
+
+       return 0;
+}
+
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+{
+       return gfn;
+}
+
+static int __init kvm_s390_init(void)
+{
+       return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
+}
+
+static void __exit kvm_s390_exit(void)
+{
+       kvm_exit();
+}
+
+module_init(kvm_s390_init);
+module_exit(kvm_s390_exit);
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
new file mode 100644 (file)
index 0000000..3893cf1
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * kvm_s390.h -  definition for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef ARCH_S390_KVM_S390_H
+#define ARCH_S390_KVM_S390_H
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+
+typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
+
+int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
+
+#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
+do { \
+       debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
+         d_args); \
+} while (0)
+
+#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
+do { \
+       debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
+         "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
+         d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
+         d_args); \
+} while (0)
+
+static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
+{
+       return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
+}
+
+int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
+void kvm_s390_idle_wakeup(unsigned long data);
+void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
+int kvm_s390_inject_vm(struct kvm *kvm,
+               struct kvm_s390_interrupt *s390int);
+int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
+               struct kvm_s390_interrupt *s390int);
+int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
+
+/* implemented in priv.c */
+int kvm_s390_handle_priv(struct kvm_vcpu *vcpu);
+
+/* implemented in sigp.c */
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
+
+/* implemented in kvm-s390.c */
+int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu,
+                                unsigned long addr);
+/* implemented in diag.c */
+int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
new file mode 100644 (file)
index 0000000..c02286c
--- /dev/null
@@ -0,0 +1,314 @@
+/*
+ * priv.c - handling privileged instructions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/errno.h>
+#include <asm/current.h>
+#include <asm/debug.h>
+#include <asm/ebcdic.h>
+#include <asm/sysinfo.h>
+#include "gaccess.h"
+#include "kvm-s390.h"
+
+static int handle_set_prefix(struct kvm_vcpu *vcpu)
+{
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+       u64 operand2;
+       u32 address = 0;
+       u8 tmp;
+
+       vcpu->stat.instruction_spx++;
+
+       operand2 = disp2;
+       if (base2)
+               operand2 += vcpu->arch.guest_gprs[base2];
+
+       /* must be word boundary */
+       if (operand2 & 3) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       /* get the value */
+       if (get_guest_u32(vcpu, operand2, &address)) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out;
+       }
+
+       address = address & 0x7fffe000u;
+
+       /* make sure that the new value is valid memory */
+       if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
+          (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out;
+       }
+
+       vcpu->arch.sie_block->prefix = address;
+       vcpu->arch.sie_block->ihcpu = 0xffff;
+
+       VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
+out:
+       return 0;
+}
+
+static int handle_store_prefix(struct kvm_vcpu *vcpu)
+{
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+       u64 operand2;
+       u32 address;
+
+       vcpu->stat.instruction_stpx++;
+       operand2 = disp2;
+       if (base2)
+               operand2 += vcpu->arch.guest_gprs[base2];
+
+       /* must be word boundary */
+       if (operand2 & 3) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       address = vcpu->arch.sie_block->prefix;
+       address = address & 0x7fffe000u;
+
+       /* get the value */
+       if (put_guest_u32(vcpu, operand2, address)) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out;
+       }
+
+       VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
+out:
+       return 0;
+}
+
+static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
+{
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+       u64 useraddr;
+       int rc;
+
+       vcpu->stat.instruction_stap++;
+       useraddr = disp2;
+       if (base2)
+               useraddr += vcpu->arch.guest_gprs[base2];
+
+       if (useraddr & 1) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id);
+       if (rc == -EFAULT) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out;
+       }
+
+       VCPU_EVENT(vcpu, 5, "storing cpu address to %lx", useraddr);
+out:
+       return 0;
+}
+
+static int handle_skey(struct kvm_vcpu *vcpu)
+{
+       vcpu->stat.instruction_storage_key++;
+       vcpu->arch.sie_block->gpsw.addr -= 4;
+       VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
+       return 0;
+}
+
+static int handle_stsch(struct kvm_vcpu *vcpu)
+{
+       vcpu->stat.instruction_stsch++;
+       VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3");
+       /* condition code 3 */
+       vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+       vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+       return 0;
+}
+
+static int handle_chsc(struct kvm_vcpu *vcpu)
+{
+       vcpu->stat.instruction_chsc++;
+       VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3");
+       /* condition code 3 */
+       vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+       vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
+       return 0;
+}
+
+static int handle_stfl(struct kvm_vcpu *vcpu)
+{
+       unsigned int facility_list = stfl();
+       int rc;
+
+       vcpu->stat.instruction_stfl++;
+       facility_list &= ~(1UL<<24); /* no stfle */
+
+       rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
+                          &facility_list, sizeof(facility_list));
+       if (rc == -EFAULT)
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+       else
+               VCPU_EVENT(vcpu, 5, "store facility list value %x",
+                          facility_list);
+       return 0;
+}
+
+static int handle_stidp(struct kvm_vcpu *vcpu)
+{
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+       u64 operand2;
+       int rc;
+
+       vcpu->stat.instruction_stidp++;
+       operand2 = disp2;
+       if (base2)
+               operand2 += vcpu->arch.guest_gprs[base2];
+
+       if (operand2 & 7) {
+               kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+               goto out;
+       }
+
+       rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data);
+       if (rc == -EFAULT) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out;
+       }
+
+       VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
+out:
+       return 0;
+}
+
+static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
+{
+       struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+       int cpus = 0;
+       int n;
+
+       spin_lock_bh(&fi->lock);
+       for (n = 0; n < KVM_MAX_VCPUS; n++)
+               if (fi->local_int[n])
+                       cpus++;
+       spin_unlock_bh(&fi->lock);
+
+       /* deal with other level 3 hypervisors */
+       if (stsi(mem, 3, 2, 2) == -ENOSYS)
+               mem->count = 0;
+       if (mem->count < 8)
+               mem->count++;
+       for (n = mem->count - 1; n > 0 ; n--)
+               memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
+
+       mem->vm[0].cpus_total = cpus;
+       mem->vm[0].cpus_configured = cpus;
+       mem->vm[0].cpus_standby = 0;
+       mem->vm[0].cpus_reserved = 0;
+       mem->vm[0].caf = 1000;
+       memcpy(mem->vm[0].name, "KVMguest", 8);
+       ASCEBC(mem->vm[0].name, 8);
+       memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
+       ASCEBC(mem->vm[0].cpi, 16);
+}
+
+static int handle_stsi(struct kvm_vcpu *vcpu)
+{
+       int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28;
+       int sel1 = vcpu->arch.guest_gprs[0] & 0xff;
+       int sel2 = vcpu->arch.guest_gprs[1] & 0xffff;
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+       u64 operand2;
+       unsigned long mem;
+
+       vcpu->stat.instruction_stsi++;
+       VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
+
+       operand2 = disp2;
+       if (base2)
+               operand2 += vcpu->arch.guest_gprs[base2];
+
+       if (operand2 & 0xfff && fc > 0)
+               return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+       switch (fc) {
+       case 0:
+               vcpu->arch.guest_gprs[0] = 3 << 28;
+               vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+               return 0;
+       case 1: /* same handling for 1 and 2 */
+       case 2:
+               mem = get_zeroed_page(GFP_KERNEL);
+               if (!mem)
+                       goto out_fail;
+               if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS)
+                       goto out_mem;
+               break;
+       case 3:
+               if (sel1 != 2 || sel2 != 2)
+                       goto out_fail;
+               mem = get_zeroed_page(GFP_KERNEL);
+               if (!mem)
+                       goto out_fail;
+               handle_stsi_3_2_2(vcpu, (void *) mem);
+               break;
+       default:
+               goto out_fail;
+       }
+
+       if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
+               kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
+               goto out_mem;
+       }
+       free_page(mem);
+       vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+       vcpu->arch.guest_gprs[0] = 0;
+       return 0;
+out_mem:
+       free_page(mem);
+out_fail:
+       /* condition code 3 */
+       vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
+       return 0;
+}
+
+static intercept_handler_t priv_handlers[256] = {
+       [0x02] = handle_stidp,
+       [0x10] = handle_set_prefix,
+       [0x11] = handle_store_prefix,
+       [0x12] = handle_store_cpu_address,
+       [0x29] = handle_skey,
+       [0x2a] = handle_skey,
+       [0x2b] = handle_skey,
+       [0x34] = handle_stsch,
+       [0x5f] = handle_chsc,
+       [0x7d] = handle_stsi,
+       [0xb1] = handle_stfl,
+};
+
+int kvm_s390_handle_priv(struct kvm_vcpu *vcpu)
+{
+       intercept_handler_t handler;
+
+       handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
+       if (handler)
+               return handler(vcpu);
+       return -ENOTSUPP;
+}
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
new file mode 100644 (file)
index 0000000..934fd6a
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * sie64a.S - low level sie call
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/errno.h>
+#include <asm/asm-offsets.h>
+
+SP_R5 =        5 * 8   # offset into stackframe
+SP_R6 =        6 * 8
+
+/*
+ * sie64a calling convention:
+ * %r2 pointer to sie control block
+ * %r3 guest register save area
+ */
+       .globl  sie64a
+sie64a:
+       lgr     %r5,%r3
+       stmg    %r5,%r14,SP_R5(%r15)    # save register on entry
+       lgr     %r14,%r2                # pointer to sie control block
+       lmg     %r0,%r13,0(%r3)         # load guest gprs 0-13
+sie_inst:
+       sie     0(%r14)
+       lg      %r14,SP_R5(%r15)
+       stmg    %r0,%r13,0(%r14)        # save guest gprs 0-13
+       lghi    %r2,0
+       lmg     %r6,%r14,SP_R6(%r15)
+       br      %r14
+
+sie_err:
+       lg      %r14,SP_R5(%r15)
+       stmg    %r0,%r13,0(%r14)        # save guest gprs 0-13
+       lghi    %r2,-EFAULT
+       lmg     %r6,%r14,SP_R6(%r15)
+       br      %r14
+
+       .section __ex_table,"a"
+       .quad   sie_inst,sie_err
+       .previous
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
new file mode 100644 (file)
index 0000000..0a236ac
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ * sigp.c - handlinge interprocessor communication
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include "gaccess.h"
+#include "kvm-s390.h"
+
+/* sigp order codes */
+#define SIGP_SENSE             0x01
+#define SIGP_EXTERNAL_CALL     0x02
+#define SIGP_EMERGENCY         0x03
+#define SIGP_START             0x04
+#define SIGP_STOP              0x05
+#define SIGP_RESTART           0x06
+#define SIGP_STOP_STORE_STATUS 0x09
+#define SIGP_INITIAL_CPU_RESET 0x0b
+#define SIGP_CPU_RESET         0x0c
+#define SIGP_SET_PREFIX        0x0d
+#define SIGP_STORE_STATUS_ADDR 0x0e
+#define SIGP_SET_ARCH          0x12
+
+/* cpu status bits */
+#define SIGP_STAT_EQUIPMENT_CHECK   0x80000000UL
+#define SIGP_STAT_INCORRECT_STATE   0x00000200UL
+#define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
+#define SIGP_STAT_EXT_CALL_PENDING  0x00000080UL
+#define SIGP_STAT_STOPPED           0x00000040UL
+#define SIGP_STAT_OPERATOR_INTERV   0x00000020UL
+#define SIGP_STAT_CHECK_STOP        0x00000010UL
+#define SIGP_STAT_INOPERATIVE       0x00000004UL
+#define SIGP_STAT_INVALID_ORDER     0x00000002UL
+#define SIGP_STAT_RECEIVER_CHECK    0x00000001UL
+
+
+static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr, u64 *reg)
+{
+       struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+       int rc;
+
+       if (cpu_addr >= KVM_MAX_VCPUS)
+               return 3; /* not operational */
+
+       spin_lock_bh(&fi->lock);
+       if (fi->local_int[cpu_addr] == NULL)
+               rc = 3; /* not operational */
+       else if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
+                & CPUSTAT_RUNNING) {
+               *reg &= 0xffffffff00000000UL;
+               rc = 1; /* status stored */
+       } else {
+               *reg &= 0xffffffff00000000UL;
+               *reg |= SIGP_STAT_STOPPED;
+               rc = 1; /* status stored */
+       }
+       spin_unlock_bh(&fi->lock);
+
+       VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
+       return rc;
+}
+
+static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
+{
+       struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+       struct local_interrupt *li;
+       struct interrupt_info *inti;
+       int rc;
+
+       if (cpu_addr >= KVM_MAX_VCPUS)
+               return 3; /* not operational */
+
+       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+       if (!inti)
+               return -ENOMEM;
+
+       inti->type = KVM_S390_INT_EMERGENCY;
+
+       spin_lock_bh(&fi->lock);
+       li = fi->local_int[cpu_addr];
+       if (li == NULL) {
+               rc = 3; /* not operational */
+               kfree(inti);
+               goto unlock;
+       }
+       spin_lock_bh(&li->lock);
+       list_add_tail(&inti->list, &li->list);
+       atomic_set(&li->active, 1);
+       atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
+       if (waitqueue_active(&li->wq))
+               wake_up_interruptible(&li->wq);
+       spin_unlock_bh(&li->lock);
+       rc = 0; /* order accepted */
+unlock:
+       spin_unlock_bh(&fi->lock);
+       VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
+       return rc;
+}
+
+static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int store)
+{
+       struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+       struct local_interrupt *li;
+       struct interrupt_info *inti;
+       int rc;
+
+       if (cpu_addr >= KVM_MAX_VCPUS)
+               return 3; /* not operational */
+
+       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+       if (!inti)
+               return -ENOMEM;
+
+       inti->type = KVM_S390_SIGP_STOP;
+
+       spin_lock_bh(&fi->lock);
+       li = fi->local_int[cpu_addr];
+       if (li == NULL) {
+               rc = 3; /* not operational */
+               kfree(inti);
+               goto unlock;
+       }
+       spin_lock_bh(&li->lock);
+       list_add_tail(&inti->list, &li->list);
+       atomic_set(&li->active, 1);
+       atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+       if (store)
+               li->action_bits |= ACTION_STORE_ON_STOP;
+       li->action_bits |= ACTION_STOP_ON_STOP;
+       if (waitqueue_active(&li->wq))
+               wake_up_interruptible(&li->wq);
+       spin_unlock_bh(&li->lock);
+       rc = 0; /* order accepted */
+unlock:
+       spin_unlock_bh(&fi->lock);
+       VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
+       return rc;
+}
+
+static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
+{
+       int rc;
+
+       switch (parameter & 0xff) {
+       case 0:
+               printk(KERN_WARNING "kvm: request to switch to ESA/390 mode"
+                                                       " not supported");
+               rc = 3; /* not operational */
+               break;
+       case 1:
+       case 2:
+               rc = 0; /* order accepted */
+               break;
+       default:
+               rc = -ENOTSUPP;
+       }
+       return rc;
+}
+
+static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
+                            u64 *reg)
+{
+       struct float_interrupt *fi = &vcpu->kvm->arch.float_int;
+       struct local_interrupt *li;
+       struct interrupt_info *inti;
+       int rc;
+       u8 tmp;
+
+       /* make sure that the new value is valid memory */
+       address = address & 0x7fffe000u;
+       if ((copy_from_guest(vcpu, &tmp,
+               (u64) (address + vcpu->kvm->arch.guest_origin) , 1)) ||
+          (copy_from_guest(vcpu, &tmp, (u64) (address +
+                       vcpu->kvm->arch.guest_origin + PAGE_SIZE), 1))) {
+               *reg |= SIGP_STAT_INVALID_PARAMETER;
+               return 1; /* invalid parameter */
+       }
+
+       inti = kzalloc(sizeof(*inti), GFP_KERNEL);
+       if (!inti)
+               return 2; /* busy */
+
+       spin_lock_bh(&fi->lock);
+       li = fi->local_int[cpu_addr];
+
+       if ((cpu_addr >= KVM_MAX_VCPUS) || (li == NULL)) {
+               rc = 1; /* incorrect state */
+               *reg &= SIGP_STAT_INCORRECT_STATE;
+               kfree(inti);
+               goto out_fi;
+       }
+
+       spin_lock_bh(&li->lock);
+       /* cpu must be in stopped state */
+       if (atomic_read(li->cpuflags) & CPUSTAT_RUNNING) {
+               rc = 1; /* incorrect state */
+               *reg &= SIGP_STAT_INCORRECT_STATE;
+               kfree(inti);
+               goto out_li;
+       }
+
+       inti->type = KVM_S390_SIGP_SET_PREFIX;
+       inti->prefix.address = address;
+
+       list_add_tail(&inti->list, &li->list);
+       atomic_set(&li->active, 1);
+       if (waitqueue_active(&li->wq))
+               wake_up_interruptible(&li->wq);
+       rc = 0; /* order accepted */
+
+       VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
+out_li:
+       spin_unlock_bh(&li->lock);
+out_fi:
+       spin_unlock_bh(&fi->lock);
+       return rc;
+}
+
+int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
+{
+       int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
+       int r3 = vcpu->arch.sie_block->ipa & 0x000f;
+       int base2 = vcpu->arch.sie_block->ipb >> 28;
+       int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
+       u32 parameter;
+       u16 cpu_addr = vcpu->arch.guest_gprs[r3];
+       u8 order_code;
+       int rc;
+
+       order_code = disp2;
+       if (base2)
+               order_code += vcpu->arch.guest_gprs[base2];
+
+       if (r1 % 2)
+               parameter = vcpu->arch.guest_gprs[r1];
+       else
+               parameter = vcpu->arch.guest_gprs[r1 + 1];
+
+       switch (order_code) {
+       case SIGP_SENSE:
+               vcpu->stat.instruction_sigp_sense++;
+               rc = __sigp_sense(vcpu, cpu_addr,
+                                 &vcpu->arch.guest_gprs[r1]);
+               break;
+       case SIGP_EMERGENCY:
+               vcpu->stat.instruction_sigp_emergency++;
+               rc = __sigp_emergency(vcpu, cpu_addr);
+               break;
+       case SIGP_STOP:
+               vcpu->stat.instruction_sigp_stop++;
+               rc = __sigp_stop(vcpu, cpu_addr, 0);
+               break;
+       case SIGP_STOP_STORE_STATUS:
+               vcpu->stat.instruction_sigp_stop++;
+               rc = __sigp_stop(vcpu, cpu_addr, 1);
+               break;
+       case SIGP_SET_ARCH:
+               vcpu->stat.instruction_sigp_arch++;
+               rc = __sigp_set_arch(vcpu, parameter);
+               break;
+       case SIGP_SET_PREFIX:
+               vcpu->stat.instruction_sigp_prefix++;
+               rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
+                                      &vcpu->arch.guest_gprs[r1]);
+               break;
+       case SIGP_RESTART:
+               vcpu->stat.instruction_sigp_restart++;
+               /* user space must know about restart */
+       default:
+               return -ENOTSUPP;
+       }
+
+       if (rc < 0)
+               return rc;
+
+       vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+       vcpu->arch.sie_block->gpsw.mask |= (rc & 3ul) << 44;
+       return 0;
+}
index 52084436ab69726252e894d5ecdcecbbb6580c24..ab6735df2d2155b2755170048701f979272e6f03 100644 (file)
@@ -2,8 +2,6 @@
 # Makefile for s390-specific library files..
 #
 
-EXTRA_AFLAGS := -traditional
-
 lib-y += delay.o string.o uaccess_std.o uaccess_pt.o
 obj-$(CONFIG_32BIT) += div64.o qrnnd.o
 lib-$(CONFIG_64BIT) += uaccess_mvcos.o
index 6d8772339d76182db7a7d0b03cf03067194b3fca..3f15aaf54855c8b66522e6d28a5cbce5547ed638 100644 (file)
@@ -162,6 +162,7 @@ static size_t clear_user_mvcos(size_t size, void __user *to)
        return size;
 }
 
+#ifdef CONFIG_S390_SWITCH_AMODE
 static size_t strnlen_user_mvcos(size_t count, const char __user *src)
 {
        char buf[256];
@@ -199,6 +200,7 @@ static size_t strncpy_from_user_mvcos(size_t count, const char __user *src,
        } while ((len_str == len) && (done < count));
        return done;
 }
+#endif /* CONFIG_S390_SWITCH_AMODE */
 
 struct uaccess_ops uaccess_mvcos = {
        .copy_from_user = copy_from_user_mvcos_check,
index 73b3e72efc46a460cc62a31c59400dbae232dedf..c84890341052ce7377fbcf9eb75e11436157dea6 100644 (file)
@@ -5,4 +5,3 @@
 obj-$(CONFIG_MATHEMU) := math.o
 
 EXTRA_CFLAGS := -I$(src) -Iinclude/math-emu -w
-EXTRA_AFLAGS := -traditional
index 66401930f83e725c4631c23c145c651a6231a7e2..fb988a48a754ff541d161ba341a108e10a2a15e1 100644 (file)
@@ -4,4 +4,4 @@
 
 obj-y   := init.o fault.o extmem.o mmap.o vmem.o pgtable.o
 obj-$(CONFIG_CMM) += cmm.o
-
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
index ed2af0a3303b06c7680fd548f82b7e9001f2977f..f231f5ec74b633555decc0827a8d14d3ceba3d6d 100644 (file)
@@ -287,7 +287,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
        if (rc < 0)
                goto out_free;
 
-       rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+       rc = vmem_add_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
 
        if (rc)
                goto out_free;
@@ -351,7 +351,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
        release_resource(seg->res);
        kfree(seg->res);
  out_shared:
-       remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+       vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
  out_free:
        kfree(seg);
  out:
@@ -474,7 +474,7 @@ segment_modify_shared (char *name, int do_nonshared)
        rc = 0;
        goto out_unlock;
  out_del:
-       remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+       vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
        list_del(&seg->list);
        dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
        kfree(seg);
@@ -508,7 +508,7 @@ segment_unload(char *name)
                goto out_unlock;
        release_resource(seg->res);
        kfree(seg->res);
-       remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
+       vmem_remove_mapping(seg->start_addr, seg->end - seg->start_addr + 1);
        list_del(&seg->list);
        dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
        kfree(seg);
index 2650f46001d098c5f04cee1ce5b11d7de3eaffb9..4d537205e83c0013d159a9cd8fc5145e0dd80813 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/hardirq.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
+#include <linux/hugetlb.h>
 #include <asm/system.h>
 #include <asm/pgtable.h>
 #include <asm/s390_ext.h>
@@ -367,6 +368,8 @@ good_area:
        }
 
 survive:
+       if (is_vm_hugetlb_page(vma))
+               address &= HPAGE_MASK;
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
new file mode 100644 (file)
index 0000000..f4b6124
--- /dev/null
@@ -0,0 +1,134 @@
+/*
+ *  IBM System z Huge TLB Page Support for Kernel.
+ *
+ *    Copyright 2007 IBM Corp.
+ *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *pteptr, pte_t pteval)
+{
+       pmd_t *pmdp = (pmd_t *) pteptr;
+       pte_t shadow_pteval = pteval;
+       unsigned long mask;
+
+       if (!MACHINE_HAS_HPAGE) {
+               pteptr = (pte_t *) pte_page(pteval)[1].index;
+               mask = pte_val(pteval) &
+                               (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
+               pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
+               if (mm->context.noexec) {
+                       pteptr += PTRS_PER_PTE;
+                       pte_val(shadow_pteval) =
+                                       (_SEGMENT_ENTRY + __pa(pteptr)) | mask;
+               }
+       }
+
+       pmd_val(*pmdp) = pte_val(pteval);
+       if (mm->context.noexec) {
+               pmdp = get_shadow_table(pmdp);
+               pmd_val(*pmdp) = pte_val(shadow_pteval);
+       }
+}
+
+int arch_prepare_hugepage(struct page *page)
+{
+       unsigned long addr = page_to_phys(page);
+       pte_t pte;
+       pte_t *ptep;
+       int i;
+
+       if (MACHINE_HAS_HPAGE)
+               return 0;
+
+       ptep = (pte_t *) pte_alloc_one(&init_mm, address);
+       if (!ptep)
+               return -ENOMEM;
+
+       pte = mk_pte(page, PAGE_RW);
+       for (i = 0; i < PTRS_PER_PTE; i++) {
+               set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
+               pte_val(pte) += PAGE_SIZE;
+       }
+       page[1].index = (unsigned long) ptep;
+       return 0;
+}
+
+void arch_release_hugepage(struct page *page)
+{
+       pte_t *ptep;
+
+       if (MACHINE_HAS_HPAGE)
+               return;
+
+       ptep = (pte_t *) page[1].index;
+       if (!ptep)
+               return;
+       pte_free(&init_mm, ptep);
+       page[1].index = 0;
+}
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp = NULL;
+
+       pgdp = pgd_offset(mm, addr);
+       pudp = pud_alloc(mm, pgdp, addr);
+       if (pudp)
+               pmdp = pmd_alloc(mm, pudp, addr);
+       return (pte_t *) pmdp;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+       pgd_t *pgdp;
+       pud_t *pudp;
+       pmd_t *pmdp = NULL;
+
+       pgdp = pgd_offset(mm, addr);
+       if (pgd_present(*pgdp)) {
+               pudp = pud_offset(pgdp, addr);
+               if (pud_present(*pudp))
+                       pmdp = pmd_offset(pudp, addr);
+       }
+       return (pte_t *) pmdp;
+}
+
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+       return 0;
+}
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+                             int write)
+{
+       return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+       if (!MACHINE_HAS_HPAGE)
+               return 0;
+
+       return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
+}
+
+struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+                            pmd_t *pmdp, int write)
+{
+       struct page *page;
+
+       if (!MACHINE_HAS_HPAGE)
+               return NULL;
+
+       page = pmd_page(*pmdp);
+       if (page)
+               page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
+       return page;
+}
index 202c952a29b49fe520ca54a5bb200a11df74237b..fa31de6ae97aef1577b99d0f9afb04cf257334ce 100644 (file)
@@ -77,28 +77,6 @@ void show_mem(void)
        printk("%lu pages pagetables\n", global_page_state(NR_PAGETABLE));
 }
 
-static void __init setup_ro_region(void)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *pte;
-       pte_t new_pte;
-       unsigned long address, end;
-
-       address = ((unsigned long)&_stext) & PAGE_MASK;
-       end = PFN_ALIGN((unsigned long)&_eshared);
-
-       for (; address < end; address += PAGE_SIZE) {
-               pgd = pgd_offset_k(address);
-               pud = pud_offset(pgd, address);
-               pmd = pmd_offset(pud, address);
-               pte = pte_offset_kernel(pmd, address);
-               new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
-               *pte = new_pte;
-       }
-}
-
 /*
  * paging_init() sets up the page tables
  */
@@ -121,7 +99,6 @@ void __init paging_init(void)
        clear_table((unsigned long *) init_mm.pgd, pgd_type,
                    sizeof(unsigned long)*2048);
        vmem_map_init();
-       setup_ro_region();
 
         /* enable virtual mapping in kernel mode */
        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
@@ -129,6 +106,8 @@ void __init paging_init(void)
        __ctl_load(S390_lowcore.kernel_asce, 13, 13);
        __raw_local_irq_ssm(ssm_mask);
 
+       sparse_memory_present_with_active_regions(MAX_NUMNODES);
+       sparse_init();
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 #ifdef CONFIG_ZONE_DMA
        max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
index fd072013f88ce11658adab8b7db4818a6479f50b..5c1aea97cd1229b2e231c7dd838ee71663087316 100644 (file)
 #define TABLES_PER_PAGE        4
 #define FRAG_MASK      15UL
 #define SECOND_HALVES  10UL
+
+void clear_table_pgstes(unsigned long *table)
+{
+       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
+       memset(table + 256, 0, PAGE_SIZE/4);
+       clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
+       memset(table + 768, 0, PAGE_SIZE/4);
+}
+
 #else
 #define ALLOC_ORDER    2
 #define TABLES_PER_PAGE        2
 #define FRAG_MASK      3UL
 #define SECOND_HALVES  2UL
+
+void clear_table_pgstes(unsigned long *table)
+{
+       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
+       memset(table + 256, 0, PAGE_SIZE/2);
+}
+
 #endif
 
 unsigned long *crst_table_alloc(struct mm_struct *mm, int noexec)
@@ -153,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
        unsigned long *table;
        unsigned long bits;
 
-       bits = mm->context.noexec ? 3UL : 1UL;
+       bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
        spin_lock(&mm->page_table_lock);
        page = NULL;
        if (!list_empty(&mm->context.pgtable_list)) {
@@ -170,7 +186,10 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
                pgtable_page_ctor(page);
                page->flags &= ~FRAG_MASK;
                table = (unsigned long *) page_to_phys(page);
-               clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
+               if (mm->context.pgstes)
+                       clear_table_pgstes(table);
+               else
+                       clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
                spin_lock(&mm->page_table_lock);
                list_add(&page->lru, &mm->context.pgtable_list);
        }
@@ -191,7 +210,7 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
        struct page *page;
        unsigned long bits;
 
-       bits = mm->context.noexec ? 3UL : 1UL;
+       bits = (mm->context.noexec || mm->context.pgstes) ? 3UL : 1UL;
        bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
        page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
        spin_lock(&mm->page_table_lock);
@@ -228,3 +247,43 @@ void disable_noexec(struct mm_struct *mm, struct task_struct *tsk)
        mm->context.noexec = 0;
        update_mm(mm, tsk);
 }
+
+/*
+ * switch on pgstes for its userspace process (for kvm)
+ */
+int s390_enable_sie(void)
+{
+       struct task_struct *tsk = current;
+       struct mm_struct *mm;
+       int rc;
+
+       task_lock(tsk);
+
+       rc = 0;
+       if (tsk->mm->context.pgstes)
+               goto unlock;
+
+       rc = -EINVAL;
+       if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
+           tsk->mm != tsk->active_mm || tsk->mm->ioctx_list)
+               goto unlock;
+
+       tsk->mm->context.pgstes = 1;    /* dirty little tricks .. */
+       mm = dup_mm(tsk);
+       tsk->mm->context.pgstes = 0;
+
+       rc = -ENOMEM;
+       if (!mm)
+               goto unlock;
+       mmput(tsk->mm);
+       tsk->mm = tsk->active_mm = mm;
+       preempt_disable();
+       update_mm(mm, tsk);
+       cpu_set(smp_processor_id(), mm->cpu_vm_mask);
+       preempt_enable();
+       rc = 0;
+unlock:
+       task_unlock(tsk);
+       return rc;
+}
+EXPORT_SYMBOL_GPL(s390_enable_sie);
index 35d90a4720fdcc7570a0dd1d09c231c85c1ab097..beccacf907f3481190b10496cad77a1bb4d1d23f 100644 (file)
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/list.h>
+#include <linux/hugetlb.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/setup.h>
 #include <asm/tlbflush.h>
+#include <asm/sections.h>
 
 static DEFINE_MUTEX(vmem_mutex);
 
@@ -25,43 +27,6 @@ struct memory_segment {
 
 static LIST_HEAD(mem_segs);
 
-void __meminit memmap_init(unsigned long size, int nid, unsigned long zone,
-                          unsigned long start_pfn)
-{
-       struct page *start, *end;
-       struct page *map_start, *map_end;
-       int i;
-
-       start = pfn_to_page(start_pfn);
-       end = start + size;
-
-       for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
-               unsigned long cstart, cend;
-
-               cstart = PFN_DOWN(memory_chunk[i].addr);
-               cend = cstart + PFN_DOWN(memory_chunk[i].size);
-
-               map_start = mem_map + cstart;
-               map_end = mem_map + cend;
-
-               if (map_start < start)
-                       map_start = start;
-               if (map_end > end)
-                       map_end = end;
-
-               map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
-                       / sizeof(struct page);
-               map_end += ((PFN_ALIGN((unsigned long) map_end)
-                            - (unsigned long) map_end)
-                           / sizeof(struct page));
-
-               if (map_start < map_end)
-                       memmap_init_zone((unsigned long)(map_end - map_start),
-                                        nid, zone, page_to_pfn(map_start),
-                                        MEMMAP_EARLY);
-       }
-}
-
 static void __ref *vmem_alloc_pages(unsigned int order)
 {
        if (slab_is_available())
@@ -77,8 +42,7 @@ static inline pud_t *vmem_pud_alloc(void)
        pud = vmem_alloc_pages(2);
        if (!pud)
                return NULL;
-       pud_val(*pud) = _REGION3_ENTRY_EMPTY;
-       memcpy(pud + 1, pud, (PTRS_PER_PUD - 1)*sizeof(pud_t));
+       clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
 #endif
        return pud;
 }
@@ -91,7 +55,7 @@ static inline pmd_t *vmem_pmd_alloc(void)
        pmd = vmem_alloc_pages(2);
        if (!pmd)
                return NULL;
-       clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE*4);
+       clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
 #endif
        return pmd;
 }
@@ -114,7 +78,7 @@ static pte_t __init_refok *vmem_pte_alloc(void)
 /*
  * Add a physical memory range to the 1:1 mapping.
  */
-static int vmem_add_range(unsigned long start, unsigned long size)
+static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
 {
        unsigned long address;
        pgd_t *pg_dir;
@@ -141,7 +105,19 @@ static int vmem_add_range(unsigned long start, unsigned long size)
                        pud_populate_kernel(&init_mm, pu_dir, pm_dir);
                }
 
+               pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
                pm_dir = pmd_offset(pu_dir, address);
+
+#ifdef __s390x__
+               if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
+                   (address + HPAGE_SIZE <= start + size) &&
+                   (address >= HPAGE_SIZE)) {
+                       pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
+                       pmd_val(*pm_dir) = pte_val(pte);
+                       address += HPAGE_SIZE - PAGE_SIZE;
+                       continue;
+               }
+#endif
                if (pmd_none(*pm_dir)) {
                        pt_dir = vmem_pte_alloc();
                        if (!pt_dir)
@@ -150,7 +126,6 @@ static int vmem_add_range(unsigned long start, unsigned long size)
                }
 
                pt_dir = pte_offset_kernel(pm_dir, address);
-               pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
                *pt_dir = pte;
        }
        ret = 0;
@@ -181,6 +156,13 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
                pm_dir = pmd_offset(pu_dir, address);
                if (pmd_none(*pm_dir))
                        continue;
+
+               if (pmd_huge(*pm_dir)) {
+                       pmd_clear_kernel(pm_dir);
+                       address += HPAGE_SIZE - PAGE_SIZE;
+                       continue;
+               }
+
                pt_dir = pte_offset_kernel(pm_dir, address);
                *pt_dir = pte;
        }
@@ -190,10 +172,9 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
 /*
  * Add a backed mem_map array to the virtual mem_map array.
  */
-static int vmem_add_mem_map(unsigned long start, unsigned long size)
+int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
 {
        unsigned long address, start_addr, end_addr;
-       struct page *map_start, *map_end;
        pgd_t *pg_dir;
        pud_t *pu_dir;
        pmd_t *pm_dir;
@@ -201,11 +182,8 @@ static int vmem_add_mem_map(unsigned long start, unsigned long size)
        pte_t  pte;
        int ret = -ENOMEM;
 
-       map_start = VMEM_MAP + PFN_DOWN(start);
-       map_end = VMEM_MAP + PFN_DOWN(start + size);
-
-       start_addr = (unsigned long) map_start & PAGE_MASK;
-       end_addr = PFN_ALIGN((unsigned long) map_end);
+       start_addr = (unsigned long) start;
+       end_addr = (unsigned long) (start + nr);
 
        for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
                pg_dir = pgd_offset_k(address);
@@ -249,16 +227,6 @@ out:
        return ret;
 }
 
-static int vmem_add_mem(unsigned long start, unsigned long size)
-{
-       int ret;
-
-       ret = vmem_add_mem_map(start, size);
-       if (ret)
-               return ret;
-       return vmem_add_range(start, size);
-}
-
 /*
  * Add memory segment to the segment list if it doesn't overlap with
  * an already present segment.
@@ -296,7 +264,7 @@ static void __remove_shared_memory(struct memory_segment *seg)
        vmem_remove_range(seg->start, seg->size);
 }
 
-int remove_shared_memory(unsigned long start, unsigned long size)
+int vmem_remove_mapping(unsigned long start, unsigned long size)
 {
        struct memory_segment *seg;
        int ret;
@@ -320,11 +288,9 @@ out:
        return ret;
 }
 
-int add_shared_memory(unsigned long start, unsigned long size)
+int vmem_add_mapping(unsigned long start, unsigned long size)
 {
        struct memory_segment *seg;
-       struct page *page;
-       unsigned long pfn, num_pfn, end_pfn;
        int ret;
 
        mutex_lock(&vmem_mutex);
@@ -339,24 +305,9 @@ int add_shared_memory(unsigned long start, unsigned long size)
        if (ret)
                goto out_free;
 
-       ret = vmem_add_mem(start, size);
+       ret = vmem_add_mem(start, size, 0);
        if (ret)
                goto out_remove;
-
-       pfn = PFN_DOWN(start);
-       num_pfn = PFN_DOWN(size);
-       end_pfn = pfn + num_pfn;
-
-       page = pfn_to_page(pfn);
-       memset(page, 0, num_pfn * sizeof(struct page));
-
-       for (; pfn < end_pfn; pfn++) {
-               page = pfn_to_page(pfn);
-               init_page_count(page);
-               reset_page_mapcount(page);
-               SetPageReserved(page);
-               INIT_LIST_HEAD(&page->lru);
-       }
        goto out;
 
 out_remove:
@@ -375,14 +326,34 @@ out:
  */
 void __init vmem_map_init(void)
 {
+       unsigned long ro_start, ro_end;
+       unsigned long start, end;
        int i;
 
        INIT_LIST_HEAD(&init_mm.context.crst_list);
        INIT_LIST_HEAD(&init_mm.context.pgtable_list);
        init_mm.context.noexec = 0;
-       NODE_DATA(0)->node_mem_map = VMEM_MAP;
-       for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
-               vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
+       ro_start = ((unsigned long)&_stext) & PAGE_MASK;
+       ro_end = PFN_ALIGN((unsigned long)&_eshared);
+       for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
+               start = memory_chunk[i].addr;
+               end = memory_chunk[i].addr + memory_chunk[i].size;
+               if (start >= ro_end || end <= ro_start)
+                       vmem_add_mem(start, end - start, 0);
+               else if (start >= ro_start && end <= ro_end)
+                       vmem_add_mem(start, end - start, 1);
+               else if (start >= ro_start) {
+                       vmem_add_mem(start, ro_end - start, 1);
+                       vmem_add_mem(ro_end, end - ro_end, 0);
+               } else if (end < ro_end) {
+                       vmem_add_mem(start, ro_start - start, 0);
+                       vmem_add_mem(ro_start, end - ro_start, 1);
+               } else {
+                       vmem_add_mem(start, ro_start - start, 0);
+                       vmem_add_mem(ro_start, ro_end - ro_start, 1);
+                       vmem_add_mem(ro_end, end - ro_end, 0);
+               }
+       }
 }
 
 /*
index 00d52a20d8a59425105f0307aa227d66b4871308..e7c150d4970297629be13c17fefa543f5cfe9ac5 100644 (file)
@@ -199,8 +199,7 @@ static struct platform_device *migor_devices[] __initdata = {
 
 static struct i2c_board_info __initdata migor_i2c_devices[] = {
        {
-               I2C_BOARD_INFO("rtc-rs5c372", 0x32),
-               .type   = "rs5c372b",
+               I2C_BOARD_INFO("rs5c372b", 0x32),
        },
        {
                I2C_BOARD_INFO("migor_ts", 0x51),
index a5c5e92365011bfd7f1ba4594b0fb26f1c116540..ac0a96522e45e3f9387ae578149fe9ec05e34578 100644 (file)
@@ -199,8 +199,7 @@ static struct platform_device smbus_device = {
 
 static struct i2c_board_info __initdata highlander_i2c_devices[] = {
        {
-               I2C_BOARD_INFO("rtc-rs5c372", 0x32),
-               .type   = "r2025sd",
+               I2C_BOARD_INFO("r2025sd", 0x32),
        },
 };
 
index 49b435c3a57a382302f97e8977954ee510c0aab3..08d2e7325252a19071fe446055417bd4d5368dd3 100644 (file)
@@ -191,8 +191,8 @@ void __init pcibios_update_irq(struct pci_dev *dev, int irq)
 
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (unlikely(!len || !start))
index dc6725c51a8971e4071897636ff60ec6214ac387..57cf0e0680f36b3e4b397d16979ec2f53c36df40 100644 (file)
 #include <linux/stddef.h>
 #include <linux/types.h>
 #include <linux/mm.h>
-#include <asm/thread_info.h>
-
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+#include <linux/kbuild.h>
 
-#define BLANK() asm volatile("\n->" : : )
+#include <asm/thread_info.h>
 
 int main(void)
 {
index 9bf19b00696ac5ae6ad0456e1a3ec981f1d0b53d..a2a99e487e33af4c6c1714f0d36e9065556f701a 100644 (file)
@@ -200,8 +200,6 @@ void irq_ctx_exit(int cpu)
        hardirq_ctx[cpu] = NULL;
 }
 
-extern asmlinkage void __do_softirq(void);
-
 asmlinkage void do_softirq(void)
 {
        unsigned long flags;
index 53dde06073627d693d431fd308e6c647d87da30e..d7df26bd1e5468b0f79a702220bf85352c61a0bc 100644 (file)
@@ -307,15 +307,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 #endif
 
 #ifdef CONFIG_MEMORY_HOTPLUG
-void online_page(struct page *page)
-{
-       ClearPageReserved(page);
-       init_page_count(page);
-       __free_page(page);
-       totalram_pages++;
-       num_physpages++;
-}
-
 int arch_add_memory(int nid, u64 start, u64 size)
 {
        pg_data_t *pgdat;
index 49590f8fe98cc60f26be4661f74a8d6574e4fd48..d211fdb24584d311884951b7f7df7c9a15b06fd9 100644 (file)
@@ -68,6 +68,7 @@ config SPARC
        default y
        select HAVE_IDE
        select HAVE_OPROFILE
+       select HAVE_ARCH_KGDB if !SMP
 
 # Identify this as a Sparc32 build
 config SPARC32
index 6a2c57a2fe71ae1d56f3af7071f26e7da5859bba..2e3a149ea0e76dc1b15b2063f384f7fd798985f2 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
 # Linux kernel version: 2.6.25
-# Sun Apr 20 01:49:51 2008
+# Tue Apr 29 01:28:58 2008
 #
 CONFIG_MMU=y
 CONFIG_HIGHMEM=y
@@ -217,12 +217,7 @@ CONFIG_IPV6_TUNNEL=m
 # CONFIG_NETWORK_SECMARK is not set
 # CONFIG_NETFILTER is not set
 # CONFIG_IP_DCCP is not set
-CONFIG_IP_SCTP=m
-# CONFIG_SCTP_DBG_MSG is not set
-CONFIG_SCTP_DBG_OBJCNT=y
-# CONFIG_SCTP_HMAC_NONE is not set
-# CONFIG_SCTP_HMAC_SHA1 is not set
-CONFIG_SCTP_HMAC_MD5=y
+# CONFIG_IP_SCTP is not set
 # CONFIG_TIPC is not set
 # CONFIG_ATM is not set
 # CONFIG_BRIDGE is not set
@@ -245,9 +240,7 @@ CONFIG_NET_PKTGEN=m
 # CONFIG_CAN is not set
 # CONFIG_IRDA is not set
 # CONFIG_BT is not set
-CONFIG_AF_RXRPC=m
-# CONFIG_AF_RXRPC_DEBUG is not set
-# CONFIG_RXKAD is not set
+# CONFIG_AF_RXRPC is not set
 
 #
 # Wireless
@@ -390,7 +383,7 @@ CONFIG_DUMMY=m
 # CONFIG_BONDING is not set
 # CONFIG_MACVLAN is not set
 # CONFIG_EQUALIZER is not set
-CONFIG_TUN=m
+# CONFIG_TUN is not set
 # CONFIG_VETH is not set
 # CONFIG_ARCNET is not set
 # CONFIG_PHYLIB is not set
@@ -544,6 +537,7 @@ CONFIG_SERIAL_SUNSU_CONSOLE=y
 # CONFIG_SERIAL_SUNSAB is not set
 CONFIG_SERIAL_CORE=y
 CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_CONSOLE_POLL=y
 # CONFIG_SERIAL_JSM is not set
 CONFIG_UNIX98_PTYS=y
 CONFIG_LEGACY_PTYS=y
@@ -595,6 +589,7 @@ CONFIG_SSB_POSSIBLE=y
 # Multifunction device drivers
 #
 # CONFIG_MFD_SM501 is not set
+# CONFIG_HTC_PASIC3 is not set
 
 #
 # Multimedia devices
@@ -645,10 +640,6 @@ CONFIG_USB_ARCH_HAS_EHCI=y
 # CONFIG_NEW_LEDS is not set
 # CONFIG_INFINIBAND is not set
 # CONFIG_RTC_CLASS is not set
-
-#
-# Userspace I/O
-#
 # CONFIG_UIO is not set
 
 #
@@ -680,16 +671,12 @@ CONFIG_FS_MBCACHE=y
 # CONFIG_REISERFS_FS is not set
 # CONFIG_JFS_FS is not set
 CONFIG_FS_POSIX_ACL=y
-CONFIG_XFS_FS=m
-CONFIG_XFS_QUOTA=y
-CONFIG_XFS_POSIX_ACL=y
-CONFIG_XFS_RT=y
+# CONFIG_XFS_FS is not set
 # CONFIG_OCFS2_FS is not set
 CONFIG_DNOTIFY=y
 CONFIG_INOTIFY=y
 CONFIG_INOTIFY_USER=y
 # CONFIG_QUOTA is not set
-CONFIG_QUOTACTL=y
 CONFIG_AUTOFS_FS=m
 CONFIG_AUTOFS4_FS=m
 # CONFIG_FUSE_FS is not set
@@ -725,11 +712,9 @@ CONFIG_SYSFS=y
 #
 # CONFIG_ADFS_FS is not set
 # CONFIG_AFFS_FS is not set
-# CONFIG_ECRYPT_FS is not set
 # CONFIG_HFS_FS is not set
 # CONFIG_HFSPLUS_FS is not set
-CONFIG_BEFS_FS=m
-# CONFIG_BEFS_DEBUG is not set
+# CONFIG_BEFS_FS is not set
 # CONFIG_BFS_FS is not set
 # CONFIG_EFS_FS is not set
 # CONFIG_CRAMFS is not set
@@ -744,7 +729,6 @@ CONFIG_NETWORK_FILESYSTEMS=y
 CONFIG_NFS_FS=y
 # CONFIG_NFS_V3 is not set
 # CONFIG_NFS_V4 is not set
-# CONFIG_NFS_DIRECTIO is not set
 # CONFIG_NFSD is not set
 CONFIG_ROOT_NFS=y
 CONFIG_LOCKD=y
@@ -755,16 +739,10 @@ CONFIG_SUNRPC_GSS=m
 CONFIG_RPCSEC_GSS_KRB5=m
 # CONFIG_RPCSEC_GSS_SPKM3 is not set
 # CONFIG_SMB_FS is not set
-CONFIG_CIFS=m
-# CONFIG_CIFS_STATS is not set
-# CONFIG_CIFS_WEAK_PW_HASH is not set
-# CONFIG_CIFS_XATTR is not set
-# CONFIG_CIFS_DEBUG2 is not set
-# CONFIG_CIFS_EXPERIMENTAL is not set
+# CONFIG_CIFS is not set
 # CONFIG_NCP_FS is not set
 # CONFIG_CODA_FS is not set
-CONFIG_AFS_FS=m
-# CONFIG_AFS_DEBUG is not set
+# CONFIG_AFS_FS is not set
 
 #
 # Partition Types
@@ -821,6 +799,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
 # CONFIG_PRINTK_TIME is not set
 # CONFIG_ENABLE_WARN_DEPRECATED is not set
 CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
 CONFIG_MAGIC_SYSRQ=y
 # CONFIG_UNUSED_SYMBOLS is not set
 # CONFIG_DEBUG_FS is not set
@@ -842,70 +821,105 @@ CONFIG_DETECT_SOFTLOCKUP=y
 CONFIG_DEBUG_BUGVERBOSE=y
 # CONFIG_DEBUG_INFO is not set
 # CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
 # CONFIG_DEBUG_LIST is not set
 # CONFIG_DEBUG_SG is not set
+CONFIG_FRAME_POINTER=y
 # CONFIG_BOOT_PRINTK_DELAY is not set
 # CONFIG_RCU_TORTURE_TEST is not set
 # CONFIG_BACKTRACE_SELF_TEST is not set
 # CONFIG_FAULT_INJECTION is not set
 # CONFIG_SAMPLES is not set
+CONFIG_KGDB=y
+CONFIG_HAVE_ARCH_KGDB=y
+CONFIG_KGDB_SERIAL_CONSOLE=y
+CONFIG_KGDB_TESTS=y
+# CONFIG_KGDB_TESTS_ON_BOOT is not set
 # CONFIG_DEBUG_STACK_USAGE is not set
 
 #
 # Security options
 #
-CONFIG_KEYS=y
-# CONFIG_KEYS_DEBUG_PROC_KEYS is not set
+# CONFIG_KEYS is not set
 # CONFIG_SECURITY is not set
 # CONFIG_SECURITY_FILE_CAPABILITIES is not set
 CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
 CONFIG_CRYPTO_ALGAPI=y
 CONFIG_CRYPTO_AEAD=y
 CONFIG_CRYPTO_BLKCIPHER=y
-# CONFIG_CRYPTO_SEQIV is not set
 CONFIG_CRYPTO_HASH=y
 CONFIG_CRYPTO_MANAGER=y
+# CONFIG_CRYPTO_GF128MUL is not set
+CONFIG_CRYPTO_NULL=m
+# CONFIG_CRYPTO_CRYPTD is not set
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=m
+# CONFIG_CRYPTO_LRW is not set
+CONFIG_CRYPTO_PCBC=m
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
 CONFIG_CRYPTO_HMAC=y
 # CONFIG_CRYPTO_XCBC is not set
-CONFIG_CRYPTO_NULL=m
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=m
 CONFIG_CRYPTO_MD4=y
 CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
 CONFIG_CRYPTO_SHA1=y
 CONFIG_CRYPTO_SHA256=m
 CONFIG_CRYPTO_SHA512=m
-# CONFIG_CRYPTO_WP512 is not set
 # CONFIG_CRYPTO_TGR192 is not set
-# CONFIG_CRYPTO_GF128MUL is not set
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_CBC=y
-CONFIG_CRYPTO_PCBC=m
-# CONFIG_CRYPTO_LRW is not set
-# CONFIG_CRYPTO_XTS is not set
-# CONFIG_CRYPTO_CTR is not set
-# CONFIG_CRYPTO_GCM is not set
-# CONFIG_CRYPTO_CCM is not set
-# CONFIG_CRYPTO_CRYPTD is not set
-CONFIG_CRYPTO_DES=y
-# CONFIG_CRYPTO_FCRYPT is not set
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_TWOFISH_COMMON=m
-CONFIG_CRYPTO_SERPENT=m
+# CONFIG_CRYPTO_WP512 is not set
+
+#
+# Ciphers
+#
 CONFIG_CRYPTO_AES=m
+# CONFIG_CRYPTO_ANUBIS is not set
+CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_BLOWFISH=m
+# CONFIG_CRYPTO_CAMELLIA is not set
 CONFIG_CRYPTO_CAST5=m
 CONFIG_CRYPTO_CAST6=m
-# CONFIG_CRYPTO_TEA is not set
-CONFIG_CRYPTO_ARC4=m
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
 # CONFIG_CRYPTO_KHAZAD is not set
-# CONFIG_CRYPTO_ANUBIS is not set
-# CONFIG_CRYPTO_SEED is not set
 # CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=m
+# CONFIG_CRYPTO_TEA is not set
+CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_TWOFISH_COMMON=m
+
+#
+# Compression
+#
 CONFIG_CRYPTO_DEFLATE=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_CRC32C=m
-# CONFIG_CRYPTO_CAMELLIA is not set
-# CONFIG_CRYPTO_TEST is not set
-CONFIG_CRYPTO_AUTHENC=y
 # CONFIG_CRYPTO_LZO is not set
 # CONFIG_CRYPTO_HW is not set
 
@@ -913,6 +927,7 @@ CONFIG_CRYPTO_AUTHENC=y
 # Library routines
 #
 CONFIG_BITREVERSE=y
+# CONFIG_GENERIC_FIND_FIRST_BIT is not set
 # CONFIG_CRC_CCITT is not set
 # CONFIG_CRC16 is not set
 # CONFIG_CRC_ITU_T is not set
index 59700aaaae93253fa0e4cfec0dede2f29a94a93a..6e03a2a7863c77e4a999a449b6e28968a75e9de7 100644 (file)
@@ -25,3 +25,4 @@ obj-$(CONFIG_PCI) += ebus.o
 obj-$(CONFIG_SUN_PM) += apc.o pmc.o
 obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o
 obj-$(CONFIG_SPARC_LED) += led.o
+obj-$(CONFIG_KGDB) += kgdb.o
index 6773ed76e414bec7b851d94cebddf2fec48ff1e7..cd3f7694e9b9240e20455a5cb3933c1a343b0f3b 100644 (file)
 
 #include <linux/sched.h>
 // #include <linux/mm.h>
-
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
+#include <linux/kbuild.h>
 
 int foo(void)
 {
index 484c83d23eef3e478f9e1ada60af29932cdb82d6..57d1bbdd0bd2aa5f996b9df420ff50415d64573e 100644 (file)
@@ -12,7 +12,6 @@
 #include <asm/head.h>
 #include <asm/asi.h>
 #include <asm/smp.h>
-#include <asm/kgdb.h>
 #include <asm/contregs.h>
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
        _SV; _SV; _SV; _SV; _SV; _SV; _SV; \
        _RS; _RS; _RS; _RS; _RS; _RS; _RS;
 
-/* First, KGDB low level things.  This is a rewrite
- * of the routines found in the sparc-stub.c asm() statement
- * from the gdb distribution.  This is also dual-purpose
- * as a software trap for userlevel programs.
- */
-       .data
-       .align  4
-
-in_trap_handler:
-       .word   0
-
        .text
-       .align  4
 
-#if 0 /* kgdb is dropped from 2.5.33 */
-! This function is called when any SPARC trap (except window overflow or
-! underflow) occurs.  It makes sure that the invalid register window is still
-! available before jumping into C code.  It will also restore the world if you
-! return from handle_exception.
-
-       .globl  trap_low
-trap_low:
-       rd      %wim, %l3
-       SAVE_ALL
-
-       sethi   %hi(in_trap_handler), %l4
-       ld      [%lo(in_trap_handler) + %l4], %l5
-       inc     %l5
-       st      %l5, [%lo(in_trap_handler) + %l4]
-
-       /* Make sure kgdb sees the same state we just saved. */
-       LOAD_PT_GLOBALS(sp)
-       LOAD_PT_INS(sp)
-       ld      [%sp + STACKFRAME_SZ + PT_Y], %l4
-       ld      [%sp + STACKFRAME_SZ + PT_WIM], %l3
-       ld      [%sp + STACKFRAME_SZ + PT_PSR], %l0
-       ld      [%sp + STACKFRAME_SZ + PT_PC], %l1
-       ld      [%sp + STACKFRAME_SZ + PT_NPC], %l2
-       rd      %tbr, %l5       /* Never changes... */
-
-       /* Make kgdb exception frame. */        
-       sub     %sp,(16+1+6+1+72)*4,%sp ! Make room for input & locals
-                                       ! + hidden arg + arg spill
-                                       ! + doubleword alignment
-                                       ! + registers[72] local var
-       SAVE_KGDB_GLOBALS(sp)
-       SAVE_KGDB_INS(sp)
-       SAVE_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
-
-       /* We are increasing PIL, so two writes. */
-       or      %l0, PSR_PIL, %l0
-       wr      %l0, 0, %psr
-       WRITE_PAUSE
-       wr      %l0, PSR_ET, %psr
-       WRITE_PAUSE
-
-       call    handle_exception
-        add    %sp, STACKFRAME_SZ, %o0 ! Pass address of registers
-
-       /* Load new kgdb register set. */
-       LOAD_KGDB_GLOBALS(sp)
-       LOAD_KGDB_INS(sp)
-       LOAD_KGDB_SREGS(sp, l4, l0, l3, l5, l1, l2)
-       wr      %l4, 0x0, %y
-
-       sethi   %hi(in_trap_handler), %l4
-       ld      [%lo(in_trap_handler) + %l4], %l5
-       dec     %l5
-       st      %l5, [%lo(in_trap_handler) + %l4]
-
-       add     %sp,(16+1+6+1+72)*4,%sp ! Undo the kgdb trap frame.
-
-       /* Now take what kgdb did and place it into the pt_regs
-        * frame which SparcLinux RESTORE_ALL understands.,
-        */
-       STORE_PT_INS(sp)
-       STORE_PT_GLOBALS(sp)
-       STORE_PT_YREG(sp, g2)
-       STORE_PT_PRIV(sp, l0, l1, l2)
-
-       RESTORE_ALL
+#ifdef CONFIG_KGDB
+       .align  4
+       .globl          arch_kgdb_breakpoint
+       .type           arch_kgdb_breakpoint,#function
+arch_kgdb_breakpoint:
+       ta              0x7d
+       retl
+        nop
+       .size           arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
 #endif
 
 #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE)
-       .text
        .align  4
        .globl  floppy_hardint
 floppy_hardint:
@@ -1596,6 +1524,23 @@ breakpoint_trap:
 
        RESTORE_ALL
 
+#ifdef CONFIG_KGDB
+       .align  4
+       .globl  kgdb_trap_low
+       .type   kgdb_trap_low,#function
+kgdb_trap_low:
+       rd      %wim,%l3
+       SAVE_ALL
+       wr      %l0, PSR_ET, %psr
+       WRITE_PAUSE
+
+       call    kgdb_trap
+        add    %sp, STACKFRAME_SZ, %o0
+
+       RESTORE_ALL
+       .size   kgdb_trap_low,.-kgdb_trap_low
+#endif
+
        .align  4
        .globl  __handle_exception, flush_patch_exception
 __handle_exception:
@@ -1698,4 +1643,22 @@ pcic_nmi_trap_patch:
 
 #endif /* CONFIG_PCI */
 
+       .globl  flushw_all
+flushw_all:
+       save    %sp, -0x40, %sp
+       save    %sp, -0x40, %sp
+       save    %sp, -0x40, %sp
+       save    %sp, -0x40, %sp
+       save    %sp, -0x40, %sp
+       save    %sp, -0x40, %sp
+       save    %sp, -0x40, %sp
+       restore
+       restore
+       restore
+       restore
+       restore
+       restore
+       ret
+        restore
+
 /* End of entry.S */
index b7f1e81c8ff29e4d1b8672dbadbf4305db170e93..8bec05fa5795b14003ce2525283c86b93dfd608d 100644 (file)
@@ -191,7 +191,8 @@ t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xe
 t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
 t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
 t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-t_badfc:BAD_TRAP(0xfc) BAD_TRAP(0xfd)
+t_badfc:BAD_TRAP(0xfc)
+t_kgdb:        KGDB_TRAP(0xfd)
 dbtrap:        BAD_TRAP(0xfe)                      /* Debugger/PROM breakpoint #1   */
 dbtrap2:BAD_TRAP(0xff)                      /* Debugger/PROM breakpoint #2   */        
 
@@ -267,7 +268,7 @@ trapbase_cpu1:
        BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
        BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
        BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-       BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+       BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
 
 trapbase_cpu2:
        BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
@@ -335,7 +336,7 @@ trapbase_cpu2:
        BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
        BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
        BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-       BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+       BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
 
 trapbase_cpu3:
        BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction)
@@ -403,7 +404,7 @@ trapbase_cpu3:
        BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1)
        BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6)
        BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb)
-       BAD_TRAP(0xfc) BAD_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
+       BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff)
 
 #endif
        .align PAGE_SIZE
diff --git a/arch/sparc/kernel/kgdb.c b/arch/sparc/kernel/kgdb.c
new file mode 100644 (file)
index 0000000..757805c
--- /dev/null
@@ -0,0 +1,164 @@
+/* kgdb.c: KGDB support for 32-bit sparc.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+
+#include <asm/kdebug.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+
+extern unsigned long trapbase;
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+       struct reg_window *win;
+       int i;
+
+       gdb_regs[GDB_G0] = 0;
+       for (i = 0; i < 15; i++)
+               gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i];
+
+       win = (struct reg_window *) regs->u_regs[UREG_FP];
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_L0 + i] = win->locals[i];
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_I0 + i] = win->ins[i];
+
+       for (i = GDB_F0; i <= GDB_F31; i++)
+               gdb_regs[i] = 0;
+
+       gdb_regs[GDB_Y] = regs->y;
+       gdb_regs[GDB_PSR] = regs->psr;
+       gdb_regs[GDB_WIM] = 0;
+       gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
+       gdb_regs[GDB_PC] = regs->pc;
+       gdb_regs[GDB_NPC] = regs->npc;
+       gdb_regs[GDB_FSR] = 0;
+       gdb_regs[GDB_CSR] = 0;
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+       struct thread_info *t = task_thread_info(p);
+       struct reg_window *win;
+       int i;
+
+       for (i = GDB_G0; i < GDB_G6; i++)
+               gdb_regs[i] = 0;
+       gdb_regs[GDB_G6] = (unsigned long) t;
+       gdb_regs[GDB_G7] = 0;
+       for (i = GDB_O0; i < GDB_SP; i++)
+               gdb_regs[i] = 0;
+       gdb_regs[GDB_SP] = t->ksp;
+       gdb_regs[GDB_O7] = 0;
+
+       win = (struct reg_window *) t->ksp;
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_L0 + i] = win->locals[i];
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_I0 + i] = win->ins[i];
+
+       for (i = GDB_F0; i <= GDB_F31; i++)
+               gdb_regs[i] = 0;
+
+       gdb_regs[GDB_Y] = 0;
+
+       gdb_regs[GDB_PSR] = t->kpsr;
+       gdb_regs[GDB_WIM] = t->kwim;
+       gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
+       gdb_regs[GDB_PC] = t->kpc;
+       gdb_regs[GDB_NPC] = t->kpc + 4;
+       gdb_regs[GDB_FSR] = 0;
+       gdb_regs[GDB_CSR] = 0;
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+       struct reg_window *win;
+       int i;
+
+       for (i = 0; i < 15; i++)
+               regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i];
+
+       /* If the PSR register is changing, we have to preserve
+        * the CWP field, otherwise window save/restore explodes.
+        */
+       if (regs->psr != gdb_regs[GDB_PSR]) {
+               unsigned long cwp = regs->psr & PSR_CWP;
+
+               regs->psr = (gdb_regs[GDB_PSR] & ~PSR_CWP) | cwp;
+       }
+
+       regs->pc = gdb_regs[GDB_PC];
+       regs->npc = gdb_regs[GDB_NPC];
+       regs->y = gdb_regs[GDB_Y];
+
+       win = (struct reg_window *) regs->u_regs[UREG_FP];
+       for (i = 0; i < 8; i++)
+               win->locals[i] = gdb_regs[GDB_L0 + i];
+       for (i = 0; i < 8; i++)
+               win->ins[i] = gdb_regs[GDB_I0 + i];
+}
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+                              char *remcomInBuffer, char *remcomOutBuffer,
+                              struct pt_regs *linux_regs)
+{
+       unsigned long addr;
+       char *ptr;
+
+       switch (remcomInBuffer[0]) {
+       case 'c':
+               /* try to read optional parameter, pc unchanged if no parm */
+               ptr = &remcomInBuffer[1];
+               if (kgdb_hex2long(&ptr, &addr)) {
+                       linux_regs->pc = addr;
+                       linux_regs->npc = addr + 4;
+               }
+               /* fallthru */
+
+       case 'D':
+       case 'k':
+               if (linux_regs->pc == (unsigned long) arch_kgdb_breakpoint) {
+                       linux_regs->pc = linux_regs->npc;
+                       linux_regs->npc += 4;
+               }
+               return 0;
+       }
+       return -1;
+}
+
+extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
+
+asmlinkage void kgdb_trap(struct pt_regs *regs)
+{
+       unsigned long flags;
+
+       if (user_mode(regs)) {
+               do_hw_interrupt(regs, 0xfd);
+               return;
+       }
+
+       flushw_all();
+
+       local_irq_save(flags);
+       kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
+       local_irq_restore(flags);
+}
+
+int kgdb_arch_init(void)
+{
+       return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+       /* Breakpoint instruction: ta 0x7d */
+       .gdb_bpt_instr          = { 0x91, 0xd0, 0x20, 0x7d },
+};
index 70c0dd22491d2a5e4d3e4aaf1ebdc72ec7ca855f..e7f35198ae34943f5adfeeedd7223014dfe146ff 100644 (file)
@@ -357,8 +357,6 @@ void flush_thread(void)
 {
        current_thread_info()->w_saved = 0;
 
-       /* No new signal delivery by default */
-       current->thread.new_signal = 0;
 #ifndef CONFIG_SMP
        if(last_task_used_math == current) {
 #else
index 3e849e8e34803778a1e6d9032fa807a41b0e1912..3c312290c3c288eda469d5272e71c3ae46226321 100644 (file)
@@ -1,5 +1,4 @@
-/*  $Id: signal.c,v 1.110 2002/02/08 03:57:14 davem Exp $
- *  linux/arch/sparc/kernel/signal.c
+/*  linux/arch/sparc/kernel/signal.c
  *
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -32,37 +31,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
                   void *fpqueue, unsigned long *fpqdepth);
 extern void fpload(unsigned long *fpregs, unsigned long *fsr);
 
-/* Signal frames: the original one (compatible with SunOS):
- *
- * Set up a signal frame... Make the stack look the way SunOS
- * expects it to look which is basically:
- *
- * ---------------------------------- <-- %sp at signal time
- * Struct sigcontext
- * Signal address
- * Ptr to sigcontext area above
- * Signal code
- * The signal number itself
- * One register window
- * ---------------------------------- <-- New %sp
- */
-struct signal_sframe {
-       struct reg_window       sig_window;
-       int                     sig_num;
-       int                     sig_code;
-       struct sigcontext __user *sig_scptr;
-       int                     sig_address;
-       struct sigcontext       sig_context;
-       unsigned int            extramask[_NSIG_WORDS - 1];
-};
-
-/* 
- * And the new one, intended to be used for Linux applications only
- * (we have enough in there to work with clone).
- * All the interesting bits are in the info field.
- */
-
-struct new_signal_frame {
+struct signal_frame {
        struct sparc_stackf     ss;
        __siginfo_t             info;
        __siginfo_fpu_t __user  *fpu_save;
@@ -85,8 +54,7 @@ struct rt_signal_frame {
 };
 
 /* Align macros */
-#define SF_ALIGNEDSZ  (((sizeof(struct signal_sframe) + 7) & (~7)))
-#define NF_ALIGNEDSZ  (((sizeof(struct new_signal_frame) + 7) & (~7)))
+#define SF_ALIGNEDSZ  (((sizeof(struct signal_frame) + 7) & (~7)))
 #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
 
 static int _sigpause_common(old_sigset_t set)
@@ -141,15 +109,20 @@ restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static inline void do_new_sigreturn (struct pt_regs *regs)
+asmlinkage void do_sigreturn(struct pt_regs *regs)
 {
-       struct new_signal_frame __user *sf;
+       struct signal_frame __user *sf;
        unsigned long up_psr, pc, npc;
        sigset_t set;
        __siginfo_fpu_t __user *fpu_save;
        int err;
 
-       sf = (struct new_signal_frame __user *) regs->u_regs[UREG_FP];
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+       synchronize_user_stack();
+
+       sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
        if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
@@ -198,73 +171,6 @@ segv_and_exit:
        force_sig(SIGSEGV, current);
 }
 
-asmlinkage void do_sigreturn(struct pt_regs *regs)
-{
-       struct sigcontext __user *scptr;
-       unsigned long pc, npc, psr;
-       sigset_t set;
-       int err;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-       synchronize_user_stack();
-
-       if (current->thread.new_signal) {
-               do_new_sigreturn(regs);
-               return;
-       }
-
-       scptr = (struct sigcontext __user *) regs->u_regs[UREG_I0];
-
-       /* Check sanity of the user arg. */
-       if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext)) ||
-           (((unsigned long) scptr) & 3))
-               goto segv_and_exit;
-
-       err = __get_user(pc, &scptr->sigc_pc);
-       err |= __get_user(npc, &scptr->sigc_npc);
-
-       if ((pc | npc) & 3)
-               goto segv_and_exit;
-
-       /* This is pretty much atomic, no amount locking would prevent
-        * the races which exist anyways.
-        */
-       err |= __get_user(set.sig[0], &scptr->sigc_mask);
-       /* Note that scptr + 1 points to extramask */
-       err |= __copy_from_user(&set.sig[1], scptr + 1,
-                               (_NSIG_WORDS - 1) * sizeof(unsigned int));
-       
-       if (err)
-               goto segv_and_exit;
-
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-
-       regs->pc = pc;
-       regs->npc = npc;
-
-       err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
-       err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
-       err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
-
-       /* User can only change condition codes in %psr. */
-       err |= __get_user(psr, &scptr->sigc_psr);
-       if (err)
-               goto segv_and_exit;
-               
-       regs->psr &= ~(PSR_ICC);
-       regs->psr |= (psr & PSR_ICC);
-       return;
-
-segv_and_exit:
-       force_sig(SIGSEGV, current);
-}
-
 asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
 {
        struct rt_signal_frame __user *sf;
@@ -351,128 +257,6 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re
        return (void __user *)(sp - framesize);
 }
 
-static inline void
-setup_frame(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
-{
-       struct signal_sframe __user *sframep;
-       struct sigcontext __user *sc;
-       int window = 0, err;
-       unsigned long pc = regs->pc;
-       unsigned long npc = regs->npc;
-       struct thread_info *tp = current_thread_info();
-       void __user *sig_address;
-       int sig_code;
-
-       synchronize_user_stack();
-       sframep = (struct signal_sframe __user *)
-               get_sigframe(sa, regs, SF_ALIGNEDSZ);
-       if (invalid_frame_pointer(sframep, sizeof(*sframep))){
-               /* Don't change signal code and address, so that
-                * post mortem debuggers can have a look.
-                */
-               goto sigill_and_return;
-       }
-
-       sc = &sframep->sig_context;
-
-       /* We've already made sure frame pointer isn't in kernel space... */
-       err  = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
-                        &sc->sigc_onstack);
-       err |= __put_user(oldset->sig[0], &sc->sigc_mask);
-       err |= __copy_to_user(sframep->extramask, &oldset->sig[1],
-                             (_NSIG_WORDS - 1) * sizeof(unsigned int));
-       err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
-       err |= __put_user(pc, &sc->sigc_pc);
-       err |= __put_user(npc, &sc->sigc_npc);
-       err |= __put_user(regs->psr, &sc->sigc_psr);
-       err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
-       err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
-       err |= __put_user(tp->w_saved, &sc->sigc_oswins);
-       if (tp->w_saved)
-               for (window = 0; window < tp->w_saved; window++) {
-                       put_user((char *)tp->rwbuf_stkptrs[window],
-                                &sc->sigc_spbuf[window]);
-                       err |= __copy_to_user(&sc->sigc_wbuf[window],
-                                             &tp->reg_window[window],
-                                             sizeof(struct reg_window));
-               }
-       else
-               err |= __copy_to_user(sframep, (char *) regs->u_regs[UREG_FP],
-                                     sizeof(struct reg_window));
-
-       tp->w_saved = 0; /* So process is allowed to execute. */
-
-       err |= __put_user(signr, &sframep->sig_num);
-       sig_address = NULL;
-       sig_code = 0;
-       if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
-               sig_address = info->si_addr;
-               switch (signr) {
-               case SIGSEGV:
-                       switch (info->si_code) {
-                       case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
-                       default: sig_code = SUBSIG_PROTECTION; break;
-                       }
-                       break;
-               case SIGILL:
-                       switch (info->si_code) {
-                       case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
-                       case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
-                       case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
-                       default: sig_code = SUBSIG_STACK; break;
-                       }
-                       break;
-               case SIGFPE:
-                       switch (info->si_code) {
-                       case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
-                       case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
-                       case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
-                       case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
-                       case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
-                       case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
-                       case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
-                       default: sig_code = SUBSIG_FPERROR; break;
-                       }
-                       break;
-               case SIGBUS:
-                       switch (info->si_code) {
-                       case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
-                       case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
-                       default: sig_code = SUBSIG_BUSTIMEOUT; break;
-                       }
-                       break;
-               case SIGEMT:
-                       switch (info->si_code) {
-                       case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
-                       }
-                       break;
-               case SIGSYS:
-                       if (info->si_code == (__SI_FAULT|0x100)) {
-                               sig_code = info->si_trapno;
-                               break;
-                       }
-               default:
-                       sig_address = NULL;
-               }
-       }
-       err |= __put_user((unsigned long)sig_address, &sframep->sig_address);
-       err |= __put_user(sig_code, &sframep->sig_code);
-       err |= __put_user(sc, &sframep->sig_scptr);
-       if (err)
-               goto sigsegv;
-
-       regs->u_regs[UREG_FP] = (unsigned long) sframep;
-       regs->pc = (unsigned long) sa->sa_handler;
-       regs->npc = (regs->pc + 4);
-       return;
-
-sigill_and_return:
-       do_exit(SIGILL);
-sigsegv:
-       force_sigsegv(signr, current);
-}
-
-
 static inline int
 save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 {
@@ -508,21 +292,20 @@ save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static inline void
-new_setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
-               int signo, sigset_t *oldset)
+static void setup_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                       int signo, sigset_t *oldset)
 {
-       struct new_signal_frame __user *sf;
+       struct signal_frame __user *sf;
        int sigframe_size, err;
 
        /* 1. Make sure everything is clean */
        synchronize_user_stack();
 
-       sigframe_size = NF_ALIGNEDSZ;
+       sigframe_size = SF_ALIGNEDSZ;
        if (!used_math())
                sigframe_size -= sizeof(__siginfo_fpu_t);
 
-       sf = (struct new_signal_frame __user *)
+       sf = (struct signal_frame __user *)
                get_sigframe(&ka->sa, regs, sigframe_size);
 
        if (invalid_frame_pointer(sf, sigframe_size))
@@ -586,9 +369,8 @@ sigsegv:
        force_sigsegv(signo, current);
 }
 
-static inline void
-new_setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
-                  int signo, sigset_t *oldset, siginfo_t *info)
+static void setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
+                          int signo, sigset_t *oldset, siginfo_t *info)
 {
        struct rt_signal_frame __user *sf;
        int sigframe_size;
@@ -674,11 +456,9 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
              siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
 {
        if (ka->sa.sa_flags & SA_SIGINFO)
-               new_setup_rt_frame(ka, regs, signr, oldset, info);
-       else if (current->thread.new_signal)
-               new_setup_frame(ka, regs, signr, oldset);
+               setup_rt_frame(ka, regs, signr, oldset, info);
        else
-               setup_frame(&ka->sa, regs, signr, oldset, info);
+               setup_frame(ka, regs, signr, oldset);
 
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
diff --git a/arch/sparc/kernel/sparc-stub.c b/arch/sparc/kernel/sparc-stub.c
deleted file mode 100644 (file)
index e84f815..0000000
+++ /dev/null
@@ -1,724 +0,0 @@
-/* $Id: sparc-stub.c,v 1.28 2001/10/30 04:54:21 davem Exp $
- * sparc-stub.c:  KGDB support for the Linux kernel.
- *
- * Modifications to run under Linux
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- *
- * This file originally came from the gdb sources, and the
- * copyright notices have been retained below.
- */
-
-/****************************************************************************
-
-               THIS SOFTWARE IS NOT COPYRIGHTED
-
-   HP offers the following for use in the public domain.  HP makes no
-   warranty with regard to the software or its performance and the
-   user accepts the software "AS IS" with all faults.
-
-   HP DISCLAIMS ANY WARRANTIES, EXPRESS OR IMPLIED, WITH REGARD
-   TO THIS SOFTWARE INCLUDING BUT NOT LIMITED TO THE WARRANTIES
-   OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
-
-****************************************************************************/
-
-/****************************************************************************
- *  Header: remcom.c,v 1.34 91/03/09 12:29:49 glenne Exp $
- *
- *  Module name: remcom.c $
- *  Revision: 1.34 $
- *  Date: 91/03/09 12:29:49 $
- *  Contributor:     Lake Stevens Instrument Division$
- *
- *  Description:     low level support for gdb debugger. $
- *
- *  Considerations:  only works on target hardware $
- *
- *  Written by:      Glenn Engel $
- *  ModuleState:     Experimental $
- *
- *  NOTES:           See Below $
- *
- *  Modified for SPARC by Stu Grossman, Cygnus Support.
- *
- *  This code has been extensively tested on the Fujitsu SPARClite demo board.
- *
- *  To enable debugger support, two things need to happen.  One, a
- *  call to set_debug_traps() is necessary in order to allow any breakpoints
- *  or error conditions to be properly intercepted and reported to gdb.
- *  Two, a breakpoint needs to be generated to begin communication.  This
- *  is most easily accomplished by a call to breakpoint().  Breakpoint()
- *  simulates a breakpoint by executing a trap #1.
- *
- *************
- *
- *    The following gdb commands are supported:
- *
- * command          function                               Return value
- *
- *    g             return the value of the CPU registers  hex data or ENN
- *    G             set the value of the CPU registers     OK or ENN
- *
- *    mAA..AA,LLLL  Read LLLL bytes at address AA..AA      hex data or ENN
- *    MAA..AA,LLLL: Write LLLL bytes at address AA.AA      OK or ENN
- *
- *    c             Resume at current address              SNN   ( signal NN)
- *    cAA..AA       Continue at address AA..AA             SNN
- *
- *    s             Step one instruction                   SNN
- *    sAA..AA       Step one instruction from AA..AA       SNN
- *
- *    k             kill
- *
- *    ?             What was the last sigval ?             SNN   (signal NN)
- *
- *    bBB..BB      Set baud rate to BB..BB                OK or BNN, then sets
- *                                                        baud rate
- *
- * All commands and responses are sent with a packet which includes a
- * checksum.  A packet consists of
- *
- * $<packet info>#<checksum>.
- *
- * where
- * <packet info> :: <characters representing the command or response>
- * <checksum>    :: < two hex digits computed as modulo 256 sum of <packetinfo>>
- *
- * When a packet is received, it is first acknowledged with either '+' or '-'.
- * '+' indicates a successful transfer.  '-' indicates a failed transfer.
- *
- * Example:
- *
- * Host:                  Reply:
- * $m0,10#2a               +$00010203040506070809101112131415#42
- *
- ****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-
-#include <asm/system.h>
-#include <asm/signal.h>
-#include <asm/oplib.h>
-#include <asm/head.h>
-#include <asm/traps.h>
-#include <asm/vac-ops.h>
-#include <asm/kgdb.h>
-#include <asm/pgalloc.h>
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-
-/*
- *
- * external low-level support routines
- */
-
-extern void putDebugChar(char);   /* write a single character      */
-extern char getDebugChar(void);   /* read and return a single char */
-
-/*
- * BUFMAX defines the maximum number of characters in inbound/outbound buffers
- * at least NUMREGBYTES*2 are needed for register packets
- */
-#define BUFMAX 2048
-
-static int initialized;        /* !0 means we've been initialized */
-
-static const char hexchars[]="0123456789abcdef";
-
-#define NUMREGS 72
-
-/* Number of bytes of registers.  */
-#define NUMREGBYTES (NUMREGS * 4)
-enum regnames {G0, G1, G2, G3, G4, G5, G6, G7,
-                O0, O1, O2, O3, O4, O5, SP, O7,
-                L0, L1, L2, L3, L4, L5, L6, L7,
-                I0, I1, I2, I3, I4, I5, FP, I7,
-
-                F0, F1, F2, F3, F4, F5, F6, F7,
-                F8, F9, F10, F11, F12, F13, F14, F15,
-                F16, F17, F18, F19, F20, F21, F22, F23,
-                F24, F25, F26, F27, F28, F29, F30, F31,
-                Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR };
-
-
-extern void trap_low(void);  /* In arch/sparc/kernel/entry.S */
-
-unsigned long get_sun4cpte(unsigned long addr)
-{
-       unsigned long entry;
-
-       __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" : 
-                            "=r" (entry) :
-                            "r" (addr), "i" (ASI_PTE));
-       return entry;
-}
-
-unsigned long get_sun4csegmap(unsigned long addr)
-{
-       unsigned long entry;
-
-       __asm__ __volatile__("\n\tlduba [%1] %2, %0\n\t" : 
-                            "=r" (entry) :
-                            "r" (addr), "i" (ASI_SEGMAP));
-       return entry;
-}
-
-#if 0
-/* Have to sort this out. This cannot be done after initialization. */
-static void flush_cache_all_nop(void) {}
-#endif
-
-/* Place where we save old trap entries for restoration */
-struct tt_entry kgdb_savettable[256];
-typedef void (*trapfunc_t)(void);
-
-/* Helper routine for manipulation of kgdb_savettable */
-static inline void copy_ttentry(struct tt_entry *src, struct tt_entry *dest)
-{
-       dest->inst_one = src->inst_one;
-       dest->inst_two = src->inst_two;
-       dest->inst_three = src->inst_three;
-       dest->inst_four = src->inst_four;
-}
-
-/* Initialize the kgdb_savettable so that debugging can commence */
-static void eh_init(void)
-{
-       int i;
-
-       for(i=0; i < 256; i++)
-               copy_ttentry(&sparc_ttable[i], &kgdb_savettable[i]);
-}
-
-/* Install an exception handler for kgdb */
-static void exceptionHandler(int tnum, trapfunc_t trap_entry)
-{
-       unsigned long te_addr = (unsigned long) trap_entry;
-
-       /* Make new vector */
-       sparc_ttable[tnum].inst_one =
-               SPARC_BRANCH((unsigned long) te_addr,
-                            (unsigned long) &sparc_ttable[tnum].inst_one);
-       sparc_ttable[tnum].inst_two = SPARC_RD_PSR_L0;
-       sparc_ttable[tnum].inst_three = SPARC_NOP;
-       sparc_ttable[tnum].inst_four = SPARC_NOP;
-}
-
-/* Convert ch from a hex digit to an int */
-static int
-hex(unsigned char ch)
-{
-       if (ch >= 'a' && ch <= 'f')
-               return ch-'a'+10;
-       if (ch >= '0' && ch <= '9')
-               return ch-'0';
-       if (ch >= 'A' && ch <= 'F')
-               return ch-'A'+10;
-       return -1;
-}
-
-/* scan for the sequence $<data>#<checksum>     */
-static void
-getpacket(char *buffer)
-{
-       unsigned char checksum;
-       unsigned char xmitcsum;
-       int i;
-       int count;
-       unsigned char ch;
-
-       do {
-               /* wait around for the start character, ignore all other characters */
-               while ((ch = (getDebugChar() & 0x7f)) != '$') ;
-
-               checksum = 0;
-               xmitcsum = -1;
-
-               count = 0;
-
-               /* now, read until a # or end of buffer is found */
-               while (count < BUFMAX) {
-                       ch = getDebugChar() & 0x7f;
-                       if (ch == '#')
-                               break;
-                       checksum = checksum + ch;
-                       buffer[count] = ch;
-                       count = count + 1;
-               }
-
-               if (count >= BUFMAX)
-                       continue;
-
-               buffer[count] = 0;
-
-               if (ch == '#') {
-                       xmitcsum = hex(getDebugChar() & 0x7f) << 4;
-                       xmitcsum |= hex(getDebugChar() & 0x7f);
-                       if (checksum != xmitcsum)
-                               putDebugChar('-');      /* failed checksum */
-                       else {
-                               putDebugChar('+'); /* successful transfer */
-                               /* if a sequence char is present, reply the ID */
-                               if (buffer[2] == ':') {
-                                       putDebugChar(buffer[0]);
-                                       putDebugChar(buffer[1]);
-                                       /* remove sequence chars from buffer */
-                                       count = strlen(buffer);
-                                       for (i=3; i <= count; i++)
-                                               buffer[i-3] = buffer[i];
-                               }
-                       }
-               }
-       } while (checksum != xmitcsum);
-}
-
-/* send the packet in buffer.  */
-
-static void
-putpacket(unsigned char *buffer)
-{
-       unsigned char checksum;
-       int count;
-       unsigned char ch, recv;
-
-       /*  $<packet info>#<checksum>. */
-       do {
-               putDebugChar('$');
-               checksum = 0;
-               count = 0;
-
-               while ((ch = buffer[count])) {
-                       putDebugChar(ch);
-                       checksum += ch;
-                       count += 1;
-               }
-
-               putDebugChar('#');
-               putDebugChar(hexchars[checksum >> 4]);
-               putDebugChar(hexchars[checksum & 0xf]);
-               recv = getDebugChar();
-       } while ((recv & 0x7f) != '+');
-}
-
-static char remcomInBuffer[BUFMAX];
-static char remcomOutBuffer[BUFMAX];
-
-/* Convert the memory pointed to by mem into hex, placing result in buf.
- * Return a pointer to the last char put in buf (null), in case of mem fault,
- * return 0.
- */
-
-static unsigned char *
-mem2hex(char *mem, char *buf, int count)
-{
-       unsigned char ch;
-
-       while (count-- > 0) {
-               /* This assembler code is basically:  ch = *mem++;
-                * except that we use the SPARC/Linux exception table
-                * mechanism (see how "fixup" works in kernel_mna_trap_fault)
-                * to arrange for a "return 0" upon a memory fault
-                */
-               __asm__(
-                       "\n1:\n\t"
-                       "ldub [%0], %1\n\t"
-                       "inc %0\n\t"
-                       ".section .fixup,#alloc,#execinstr\n\t"
-                       ".align 4\n"
-                       "2:\n\t"
-                       "retl\n\t"
-                       " mov 0, %%o0\n\t"
-                       ".section __ex_table, #alloc\n\t"
-                       ".align 4\n\t"
-                       ".word 1b, 2b\n\t"
-                       ".text\n"
-                       : "=r" (mem), "=r" (ch) : "0" (mem));
-               *buf++ = hexchars[ch >> 4];
-               *buf++ = hexchars[ch & 0xf];
-       }
-
-       *buf = 0;
-       return buf;
-}
-
-/* convert the hex array pointed to by buf into binary to be placed in mem
- * return a pointer to the character AFTER the last byte written.
-*/
-static char *
-hex2mem(char *buf, char *mem, int count)
-{
-       int i;
-       unsigned char ch;
-
-       for (i=0; i<count; i++) {
-
-               ch = hex(*buf++) << 4;
-               ch |= hex(*buf++);
-               /* Assembler code is   *mem++ = ch;   with return 0 on fault */
-               __asm__(
-                       "\n1:\n\t"
-                       "stb %1, [%0]\n\t"
-                       "inc %0\n\t"
-                       ".section .fixup,#alloc,#execinstr\n\t"
-                       ".align 4\n"
-                       "2:\n\t"
-                       "retl\n\t"
-                       " mov 0, %%o0\n\t"
-                       ".section __ex_table, #alloc\n\t"
-                       ".align 4\n\t"
-                       ".word 1b, 2b\n\t"
-                       ".text\n"
-                       : "=r" (mem) : "r" (ch) , "0" (mem));
-       }
-       return mem;
-}
-
-/* This table contains the mapping between SPARC hardware trap types, and
-   signals, which are primarily what GDB understands.  It also indicates
-   which hardware traps we need to commandeer when initializing the stub. */
-
-static struct hard_trap_info
-{
-  unsigned char tt;            /* Trap type code for SPARC */
-  unsigned char signo;         /* Signal that we map this trap into */
-} hard_trap_info[] = {
-  {SP_TRAP_SBPT, SIGTRAP},      /* ta 1 - Linux/KGDB software breakpoint */
-  {0, 0}                       /* Must be last */
-};
-
-/* Set up exception handlers for tracing and breakpoints */
-
-void
-set_debug_traps(void)
-{
-       struct hard_trap_info *ht;
-       unsigned long flags;
-
-       local_irq_save(flags);
-#if 0  
-/* Have to sort this out. This cannot be done after initialization. */
-       BTFIXUPSET_CALL(flush_cache_all, flush_cache_all_nop, BTFIXUPCALL_NOP);
-#endif
-
-       /* Initialize our copy of the Linux Sparc trap table */
-       eh_init();
-
-       for (ht = hard_trap_info; ht->tt && ht->signo; ht++) {
-               /* Only if it doesn't destroy our fault handlers */
-               if((ht->tt != SP_TRAP_TFLT) && 
-                  (ht->tt != SP_TRAP_DFLT))
-                       exceptionHandler(ht->tt, trap_low);
-       }
-
-       /* In case GDB is started before us, ack any packets (presumably
-        * "$?#xx") sitting there.
-        *
-        * I've found this code causes more problems than it solves,
-        * so that's why it's commented out.  GDB seems to work fine
-        * now starting either before or after the kernel   -bwb
-        */
-#if 0
-       while((c = getDebugChar()) != '$');
-       while((c = getDebugChar()) != '#');
-       c = getDebugChar(); /* eat first csum byte */
-       c = getDebugChar(); /* eat second csum byte */
-       putDebugChar('+'); /* ack it */
-#endif
-
-       initialized = 1; /* connect! */
-       local_irq_restore(flags);
-}
-
-/* Convert the SPARC hardware trap type code to a unix signal number. */
-
-static int
-computeSignal(int tt)
-{
-       struct hard_trap_info *ht;
-
-       for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
-               if (ht->tt == tt)
-                       return ht->signo;
-
-       return SIGHUP;         /* default for things we don't know about */
-}
-
-/*
- * While we find nice hex chars, build an int.
- * Return number of chars processed.
- */
-
-static int
-hexToInt(char **ptr, int *intValue)
-{
-       int numChars = 0;
-       int hexValue;
-
-       *intValue = 0;
-
-       while (**ptr) {
-               hexValue = hex(**ptr);
-               if (hexValue < 0)
-                       break;
-
-               *intValue = (*intValue << 4) | hexValue;
-               numChars ++;
-
-               (*ptr)++;
-       }
-
-       return (numChars);
-}
-
-/*
- * This function does all command processing for interfacing to gdb.  It
- * returns 1 if you should skip the instruction at the trap address, 0
- * otherwise.
- */
-
-extern void breakinst(void);
-
-void
-handle_exception (unsigned long *registers)
-{
-       int tt;       /* Trap type */
-       int sigval;
-       int addr;
-       int length;
-       char *ptr;
-       unsigned long *sp;
-
-       /* First, we must force all of the windows to be spilled out */
-
-       asm("save %sp, -64, %sp\n\t"
-           "save %sp, -64, %sp\n\t"
-           "save %sp, -64, %sp\n\t"
-           "save %sp, -64, %sp\n\t"
-           "save %sp, -64, %sp\n\t"
-           "save %sp, -64, %sp\n\t"
-           "save %sp, -64, %sp\n\t"
-           "save %sp, -64, %sp\n\t"
-           "restore\n\t"
-           "restore\n\t"
-           "restore\n\t"
-           "restore\n\t"
-           "restore\n\t"
-           "restore\n\t"
-           "restore\n\t"
-           "restore\n\t");
-
-       lock_kernel();
-       if (registers[PC] == (unsigned long)breakinst) {
-               /* Skip over breakpoint trap insn */
-               registers[PC] = registers[NPC];
-               registers[NPC] += 4;
-       }
-
-       sp = (unsigned long *)registers[SP];
-
-       tt = (registers[TBR] >> 4) & 0xff;
-
-       /* reply to host that an exception has occurred */
-       sigval = computeSignal(tt);
-       ptr = remcomOutBuffer;
-
-       *ptr++ = 'T';
-       *ptr++ = hexchars[sigval >> 4];
-       *ptr++ = hexchars[sigval & 0xf];
-
-       *ptr++ = hexchars[PC >> 4];
-       *ptr++ = hexchars[PC & 0xf];
-       *ptr++ = ':';
-       ptr = mem2hex((char *)&registers[PC], ptr, 4);
-       *ptr++ = ';';
-
-       *ptr++ = hexchars[FP >> 4];
-       *ptr++ = hexchars[FP & 0xf];
-       *ptr++ = ':';
-       ptr = mem2hex((char *) (sp + 8 + 6), ptr, 4); /* FP */
-       *ptr++ = ';';
-
-       *ptr++ = hexchars[SP >> 4];
-       *ptr++ = hexchars[SP & 0xf];
-       *ptr++ = ':';
-       ptr = mem2hex((char *)&sp, ptr, 4);
-       *ptr++ = ';';
-
-       *ptr++ = hexchars[NPC >> 4];
-       *ptr++ = hexchars[NPC & 0xf];
-       *ptr++ = ':';
-       ptr = mem2hex((char *)&registers[NPC], ptr, 4);
-       *ptr++ = ';';
-
-       *ptr++ = hexchars[O7 >> 4];
-       *ptr++ = hexchars[O7 & 0xf];
-       *ptr++ = ':';
-       ptr = mem2hex((char *)&registers[O7], ptr, 4);
-       *ptr++ = ';';
-
-       *ptr++ = 0;
-
-       putpacket(remcomOutBuffer);
-
-       /* XXX We may want to add some features dealing with poking the
-        * XXX page tables, the real ones on the srmmu, and what is currently
-        * XXX loaded in the sun4/sun4c tlb at this point in time.  But this
-        * XXX also required hacking to the gdb sources directly...
-        */
-
-       while (1) {
-               remcomOutBuffer[0] = 0;
-
-               getpacket(remcomInBuffer);
-               switch (remcomInBuffer[0]) {
-               case '?':
-                       remcomOutBuffer[0] = 'S';
-                       remcomOutBuffer[1] = hexchars[sigval >> 4];
-                       remcomOutBuffer[2] = hexchars[sigval & 0xf];
-                       remcomOutBuffer[3] = 0;
-                       break;
-
-               case 'd':
-                       /* toggle debug flag */
-                       break;
-
-               case 'g':               /* return the value of the CPU registers */
-               {
-                       ptr = remcomOutBuffer;
-                       /* G & O regs */
-                       ptr = mem2hex((char *)registers, ptr, 16 * 4);
-                       /* L & I regs */
-                       ptr = mem2hex((char *) (sp + 0), ptr, 16 * 4);
-                       /* Floating point */
-                       memset(ptr, '0', 32 * 8);
-                       /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
-                       mem2hex((char *)&registers[Y], (ptr + 32 * 4 * 2), (8 * 4));
-               }
-                       break;
-
-               case 'G':          /* set the value of the CPU registers - return OK */
-               {
-                       unsigned long *newsp, psr;
-
-                       psr = registers[PSR];
-
-                       ptr = &remcomInBuffer[1];
-                       /* G & O regs */
-                       hex2mem(ptr, (char *)registers, 16 * 4);
-                       /* L & I regs */
-                       hex2mem(ptr + 16 * 4 * 2, (char *) (sp + 0), 16 * 4);
-                       /* Y, PSR, WIM, TBR, PC, NPC, FPSR, CPSR */
-                       hex2mem(ptr + 64 * 4 * 2, (char *)&registers[Y], 8 * 4);
-
-                       /* See if the stack pointer has moved.  If so,
-                        * then copy the saved locals and ins to the
-                        * new location.  This keeps the window
-                        * overflow and underflow routines happy.
-                        */
-
-                       newsp = (unsigned long *)registers[SP];
-                       if (sp != newsp)
-                               sp = memcpy(newsp, sp, 16 * 4);
-
-                       /* Don't allow CWP to be modified. */
-
-                       if (psr != registers[PSR])
-                               registers[PSR] = (psr & 0x1f) | (registers[PSR] & ~0x1f);
-
-                       strcpy(remcomOutBuffer,"OK");
-               }
-                       break;
-
-               case 'm':         /* mAA..AA,LLLL  Read LLLL bytes at address AA..AA */
-                       /* Try to read %x,%x.  */
-
-                       ptr = &remcomInBuffer[1];
-
-                       if (hexToInt(&ptr, &addr)
-                           && *ptr++ == ','
-                           && hexToInt(&ptr, &length)) {
-                               if (mem2hex((char *)addr, remcomOutBuffer, length))
-                                       break;
-
-                               strcpy (remcomOutBuffer, "E03");
-                       } else {
-                               strcpy(remcomOutBuffer,"E01");
-                       }
-                       break;
-
-               case 'M': /* MAA..AA,LLLL: Write LLLL bytes at address AA.AA return OK */
-                       /* Try to read '%x,%x:'.  */
-
-                       ptr = &remcomInBuffer[1];
-
-                       if (hexToInt(&ptr, &addr)
-                           && *ptr++ == ','
-                           && hexToInt(&ptr, &length)
-                           && *ptr++ == ':') {
-                               if (hex2mem(ptr, (char *)addr, length)) {
-                                       strcpy(remcomOutBuffer, "OK");
-                               } else {
-                                       strcpy(remcomOutBuffer, "E03");
-                               }
-                       } else {
-                               strcpy(remcomOutBuffer, "E02");
-                       }
-                       break;
-
-               case 'c':    /* cAA..AA    Continue at address AA..AA(optional) */
-                       /* try to read optional parameter, pc unchanged if no parm */
-
-                       ptr = &remcomInBuffer[1];
-                       if (hexToInt(&ptr, &addr)) {
-                               registers[PC] = addr;
-                               registers[NPC] = addr + 4;
-                       }
-
-/* Need to flush the instruction cache here, as we may have deposited a
- * breakpoint, and the icache probably has no way of knowing that a data ref to
- * some location may have changed something that is in the instruction cache.
- */
-                       flush_cache_all();
-                       unlock_kernel();
-                       return;
-
-                       /* kill the program */
-               case 'k' :              /* do nothing */
-                       break;
-               case 'r':               /* Reset */
-                       asm ("call 0\n\t"
-                            "nop\n\t");
-                       break;
-               }                       /* switch */
-
-               /* reply to the request */
-               putpacket(remcomOutBuffer);
-       } /* while(1) */
-}
-
-/* This function will generate a breakpoint exception.  It is used at the
-   beginning of a program to sync up with a debugger and can be used
-   otherwise as a quick means to stop program execution and "break" into
-   the debugger. */
-
-void
-breakpoint(void)
-{
-       if (!initialized)
-               return;
-
-       /* Again, watch those c-prefixes for ELF kernels */
-#if defined(__svr4__) || defined(__ELF__)
-       asm(".globl breakinst\n"
-           "breakinst:\n\t"
-           "ta 1\n");
-#else
-       asm(".globl _breakinst\n"
-           "_breakinst:\n\t"
-           "ta 1\n");
-#endif
-}
index 0bcf98a7ef389b83f0ae4fb937823864a776f221..aa8ee06cf488b1f3de617df99faf0949d49cef0a 100644 (file)
@@ -282,3 +282,5 @@ EXPORT_SYMBOL(do_BUG);
 
 /* Sun Power Management Idle Handler */
 EXPORT_SYMBOL(pm_idle);
+
+EXPORT_SYMBOL(empty_zero_page);
index 0def48158c7d165ca34688a51053205372e79cf4..dfde77ff084870810b3a8e94e40f089b796418be 100644 (file)
@@ -335,37 +335,6 @@ void smp4d_cross_call_irq(void)
        ccall_info.processors_out[i] = 1;
 }
 
-static int smp4d_stop_cpu_sender;
-
-static void smp4d_stop_cpu(void)
-{
-       int me = hard_smp4d_processor_id();
-       
-       if (me != smp4d_stop_cpu_sender)
-               while(1) barrier();
-}
-
-/* Cross calls, in order to work efficiently and atomically do all
- * the message passing work themselves, only stopcpu and reschedule
- * messages come through here.
- */
-void smp4d_message_pass(int target, int msg, unsigned long data, int wait)
-{
-       int me = hard_smp4d_processor_id();
-
-       SMP_PRINTK(("smp4d_message_pass %d %d %08lx %d\n", target, msg, data, wait));
-       if (msg == MSG_STOP_CPU && target == MSG_ALL_BUT_SELF) {
-               unsigned long flags;
-               static DEFINE_SPINLOCK(stop_cpu_lock);
-               spin_lock_irqsave(&stop_cpu_lock, flags);
-               smp4d_stop_cpu_sender = me;
-               smp4d_cross_call((smpfunc_t)smp4d_stop_cpu, 0, 0, 0, 0, 0);
-               spin_unlock_irqrestore(&stop_cpu_lock, flags);
-       }
-       printk("Yeeee, trying to send SMP msg(%d) to %d on cpu %d\n", msg, target, me);
-       panic("Bogon SMP message pass.");
-}
-
 void smp4d_percpu_timer_interrupt(struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
@@ -439,7 +408,6 @@ void __init sun4d_init_smp(void)
        BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id);
        BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current);
        BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(smp_message_pass, smp4d_message_pass, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM);
        
        for (i = 0; i < NR_CPUS; i++) {
index 0b94072671623287c6c819fb4f405b18a0bb7038..ffb875aacb7e4ccb14b5e4c9f248f24eb48a5844 100644 (file)
@@ -34,8 +34,6 @@
 
 #include "irq.h"
 
-#define IRQ_RESCHEDULE         13
-#define IRQ_STOP_CPU           14
 #define IRQ_CROSS_CALL         15
 
 extern ctxd_t *srmmu_ctx_table_phys;
@@ -232,48 +230,6 @@ void smp4m_irq_rotate(int cpu)
                set_irq_udt(next);
 }
 
-/* Cross calls, in order to work efficiently and atomically do all
- * the message passing work themselves, only stopcpu and reschedule
- * messages come through here.
- */
-void smp4m_message_pass(int target, int msg, unsigned long data, int wait)
-{
-       static unsigned long smp_cpu_in_msg[NR_CPUS];
-       cpumask_t mask;
-       int me = smp_processor_id();
-       int irq, i;
-
-       if(msg == MSG_RESCHEDULE) {
-               irq = IRQ_RESCHEDULE;
-
-               if(smp_cpu_in_msg[me])
-                       return;
-       } else if(msg == MSG_STOP_CPU) {
-               irq = IRQ_STOP_CPU;
-       } else {
-               goto barf;
-       }
-
-       smp_cpu_in_msg[me]++;
-       if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) {
-               mask = cpu_online_map;
-               if(target == MSG_ALL_BUT_SELF)
-                       cpu_clear(me, mask);
-               for(i = 0; i < 4; i++) {
-                       if (cpu_isset(i, mask))
-                               set_cpu_int(i, irq);
-               }
-       } else {
-               set_cpu_int(target, irq);
-       }
-       smp_cpu_in_msg[me]--;
-
-       return;
-barf:
-       printk("Yeeee, trying to send SMP msg(%d) on cpu %d\n", msg, me);
-       panic("Bogon SMP message pass.");
-}
-
 static struct smp_funcall {
        smpfunc_t func;
        unsigned long arg1;
@@ -413,6 +369,5 @@ void __init sun4m_init_smp(void)
        BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id);
        BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current);
        BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM);
-       BTFIXUPSET_CALL(smp_message_pass, smp4m_message_pass, BTFIXUPCALL_NORM);
        BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM);
 }
index 42bf09db9a81ffaa11972289daffcb884fa22225..f188b5dc9fd001bec3691b03401408b5af655ce4 100644 (file)
@@ -1,5 +1,4 @@
-/* $Id: sys_sparc.c,v 1.70 2001/04/14 01:12:02 davem Exp $
- * linux/arch/sparc/kernel/sys_sparc.c
+/* linux/arch/sparc/kernel/sys_sparc.c
  *
  * This file contains various random system calls that
  * have a non-standard calling sequence on the Linux/sparc
@@ -395,10 +394,8 @@ sparc_sigaction (int sig, const struct old_sigaction __user *act,
        struct k_sigaction new_ka, old_ka;
        int ret;
 
-       if (sig < 0) {
-               current->thread.new_signal = 1;
-               sig = -sig;
-       }
+       WARN_ON_ONCE(sig >= 0);
+       sig = -sig;
 
        if (act) {
                unsigned long mask;
@@ -446,11 +443,6 @@ sys_rt_sigaction(int sig,
        if (sigsetsize != sizeof(sigset_t))
                return -EINVAL;
 
-       /* All tasks which use RT signals (effectively) use
-        * new style signals.
-        */
-       current->thread.new_signal = 1;
-
        if (act) {
                new_ka.ka_restorer = restorer;
                if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
index 54501c1ca785c00b10e11801a6fa240e6eec9341..9ef37e13a920e28ba99e197dc50dff7b812c5972 100644 (file)
@@ -21,8 +21,8 @@ EXPORT_SYMBOL(ioport_unmap);
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index 8acc5cc386212c4cba07ff12b93cd2fabf0a57b6..eb36f3b746b8ad280c7a26a07de046af25c01d3c 100644 (file)
@@ -1,9 +1,5 @@
-# $Id: config.in,v 1.158 2002/01/24 22:14:44 davem Exp $
-# For a description of the syntax of this configuration file,
-# see the Configure script.
-#
-
-mainmenu "Linux/UltraSPARC Kernel Configuration"
+# sparc64 configuration
+mainmenu "Linux Kernel Configuration for 64-bit SPARC"
 
 config SPARC
        bool
@@ -17,12 +13,7 @@ config SPARC64
        default y
        select HAVE_IDE
        select HAVE_LMB
-       help
-         SPARC is a family of RISC microprocessors designed and marketed by
-         Sun Microsystems, incorporated.  This port covers the newer 64-bit
-         UltraSPARC.  The UltraLinux project maintains both the SPARC32 and
-         SPARC64 ports; its web page is available at
-         <http://www.ultralinux.org/>.
+       select HAVE_ARCH_KGDB
 
 config GENERIC_TIME
        bool
@@ -97,7 +88,7 @@ config SPARC64_PAGE_SIZE_8KB
        help
          This lets you select the page size of the kernel.
 
-         8KB and 64KB work quite well, since Sparc ELF sections
+         8KB and 64KB work quite well, since SPARC ELF sections
          provide for up to 64KB alignment.
 
          Therefore, 512KB and 4MB are for expert hackers only.
@@ -138,7 +129,7 @@ config HOTPLUG_CPU
        bool "Support for hot-pluggable CPUs"
        depends on SMP
        select HOTPLUG
-       ---help---
+       help
          Say Y here to experiment with turning CPUs off and on.  CPUs
          can be controlled through /sys/devices/system/cpu/cpu#.
          Say N if you want to disable CPU hotplug.
@@ -155,23 +146,16 @@ source "kernel/time/Kconfig"
 
 config SMP
        bool "Symmetric multi-processing support"
-       ---help---
+       help
          This enables support for systems with more than one CPU. If you have
          a system with only one CPU, say N. If you have a system with more than
          one CPU, say Y.
 
          If you say N here, the kernel will run on single and multiprocessor
          machines, but will use only one CPU of a multiprocessor machine. If
-         you say Y here, the kernel will run on many, but not all,
-         singleprocessor machines. On a singleprocessor machine, the kernel
-         will run faster if you say N here.
-
-         People using multiprocessor machines who say Y here should also say
-         Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
-         Management" code will be disabled if you say Y here.
-
-         See also <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO
-         available at <http://www.tldp.org/docs.html#howto>.
+         you say Y here, the kernel will run on single-processor machines.
+         On a single-processor machine, the kernel will run faster if you say
+         N here.
 
          If you don't know what to do here, say N.
 
@@ -284,50 +268,19 @@ source "mm/Kconfig"
 
 config ISA
        bool
-       help
-         Find out whether you have ISA slots on your motherboard.  ISA is the
-         name of a bus system, i.e. the way the CPU talks to the other stuff
-         inside your box.  Other bus systems are PCI, EISA, MicroChannel
-         (MCA) or VESA.  ISA is an older system, now being displaced by PCI;
-         newer boards don't support it.  If you have ISA, say Y, otherwise N.
 
 config ISAPNP
        bool
-       help
-         Say Y here if you would like support for ISA Plug and Play devices.
-         Some information is in <file:Documentation/isapnp.txt>.
-
-         To compile this driver as a module, choose M here: the
-         module will be called isapnp.
-
-         If unsure, say Y.
 
 config EISA
        bool
-       ---help---
-         The Extended Industry Standard Architecture (EISA) bus was
-         developed as an open alternative to the IBM MicroChannel bus.
-
-         The EISA bus provided some of the features of the IBM MicroChannel
-         bus while maintaining backward compatibility with cards made for
-         the older ISA bus.  The EISA bus saw limited use between 1988 and
-         1995 when it was made obsolete by the PCI bus.
-
-         Say Y here if you are building a kernel for an EISA-based machine.
-
-         Otherwise, say N.
 
 config MCA
        bool
-       help
-         MicroChannel Architecture is found in some IBM PS/2 machines and
-         laptops.  It is a bus system similar to PCI or ISA. See
-         <file:Documentation/mca.txt> (and especially the web page given
-         there) before attempting to build an MCA bus kernel.
 
 config PCMCIA
        tristate
-       ---help---
+       help
          Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
          computer.  These are credit-card size devices such as network cards,
          modems or hard drives often used with laptops computers.  There are
@@ -369,10 +322,10 @@ config PCI
        bool "PCI support"
        select ARCH_SUPPORTS_MSI
        help
-         Find out whether you have a PCI motherboard. PCI is the name of a
-         bus system, i.e. the way the CPU talks to the other stuff inside
-         your box. Other bus systems are ISA, EISA, MicroChannel (MCA) or
-         VESA. If you have PCI, say Y, otherwise N.
+         Find out whether your system includes a PCI bus. PCI is the name of
+         bus system, i.e. the way the CPU talks to the other stuff inside
+         your box.  If you say Y here, the kernel will include drivers and
+         infrastructure code to support PCI bus devices.
 
 config PCI_DOMAINS
        def_bool PCI
@@ -396,15 +349,8 @@ menu "Executable file formats"
 
 source "fs/Kconfig.binfmt"
 
-config SPARC32_COMPAT
-       bool "Kernel support for Linux/Sparc 32bit binary compatibility"
-       help
-         This allows you to run 32-bit binaries on your Ultra.
-         Everybody wants this; say Y.
-
 config COMPAT
        bool
-       depends on SPARC32_COMPAT
        default y
        select COMPAT_BINFMT_ELF
 
@@ -421,8 +367,8 @@ config SCHED_SMT
        default y
        help
          SMT scheduler support improves the CPU scheduler's decision making
-         when dealing with UltraSPARC cpus at a cost of slightly increased
-         overhead in some places. If unsure say N here.
+         when dealing with SPARC cpus at a cost of slightly increased overhead
+         in some places. If unsure say N here.
 
 config SCHED_MC
        bool "Multi-core scheduler support"
index 92f79680f70d92da247c2e80130db38af86363ad..aff93c9d13f45fb87e94c3100b599711a9efd085 100644 (file)
@@ -1,7 +1,7 @@
 #
 # Automatically generated make config: don't edit
-# Linux kernel version: 2.6.25-numa
-# Wed Apr 23 04:49:08 2008
+# Linux kernel version: 2.6.25
+# Sat Apr 26 03:11:06 2008
 #
 CONFIG_SPARC=y
 CONFIG_SPARC64=y
@@ -152,7 +152,9 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y
 CONFIG_HUGETLB_PAGE_SIZE_4MB=y
 # CONFIG_HUGETLB_PAGE_SIZE_512K is not set
 # CONFIG_HUGETLB_PAGE_SIZE_64K is not set
-# CONFIG_NUMA is not set
+CONFIG_NUMA=y
+CONFIG_NODES_SHIFT=4
+CONFIG_NODES_SPAN_OTHER_NODES=y
 CONFIG_ARCH_POPULATES_NODE_MAP=y
 CONFIG_ARCH_SELECT_MEMORY_MODEL=y
 CONFIG_ARCH_SPARSEMEM_ENABLE=y
@@ -162,12 +164,14 @@ CONFIG_SELECT_MEMORY_MODEL=y
 # CONFIG_DISCONTIGMEM_MANUAL is not set
 CONFIG_SPARSEMEM_MANUAL=y
 CONFIG_SPARSEMEM=y
+CONFIG_NEED_MULTIPLE_NODES=y
 CONFIG_HAVE_MEMORY_PRESENT=y
 # CONFIG_SPARSEMEM_STATIC is not set
 CONFIG_SPARSEMEM_EXTREME=y
 CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
 CONFIG_SPARSEMEM_VMEMMAP=y
 CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_MIGRATION=y
 CONFIG_RESOURCES_64BIT=y
 CONFIG_ZONE_DMA_FLAG=0
 CONFIG_NR_QUICK=1
@@ -191,7 +195,6 @@ CONFIG_SUN_OPENPROMFS=m
 CONFIG_BINFMT_ELF=y
 CONFIG_COMPAT_BINFMT_ELF=y
 CONFIG_BINFMT_MISC=m
-CONFIG_SPARC32_COMPAT=y
 CONFIG_COMPAT=y
 CONFIG_SYSVIPC_COMPAT=y
 CONFIG_SCHED_SMT=y
@@ -746,13 +749,7 @@ CONFIG_DEVPORT=y
 CONFIG_I2C=y
 CONFIG_I2C_BOARDINFO=y
 # CONFIG_I2C_CHARDEV is not set
-
-#
-# I2C Algorithms
-#
 CONFIG_I2C_ALGOBIT=y
-# CONFIG_I2C_ALGOPCF is not set
-# CONFIG_I2C_ALGOPCA is not set
 
 #
 # I2C Hardware Bus support
@@ -780,6 +777,7 @@ CONFIG_I2C_ALGOBIT=y
 # CONFIG_I2C_VIA is not set
 # CONFIG_I2C_VIAPRO is not set
 # CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
 
 #
 # Miscellaneous I2C Chip support
@@ -1026,6 +1024,7 @@ CONFIG_SND_ALI5451=m
 # CONFIG_SND_AU8810 is not set
 # CONFIG_SND_AU8820 is not set
 # CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
 # CONFIG_SND_AZT3328 is not set
 # CONFIG_SND_BT87X is not set
 # CONFIG_SND_CA0106 is not set
@@ -1096,10 +1095,6 @@ CONFIG_SND_SUN_CS4231=m
 #
 # CONFIG_SND_SOC is not set
 
-#
-# SoC Audio support for SuperH
-#
-
 #
 # ALSA SoC audio for Freescale SOCs
 #
index 63c6ae0dd273ee9ad078de8d93eea015847aac3e..ec4f5ebb1ca669e72048317fe623c12d30b281d2 100644 (file)
@@ -15,17 +15,18 @@ obj-y               := process.o setup.o cpu.o idprom.o \
                   visemul.o prom.o of_device.o hvapi.o sstate.o mdesc.o
 
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
-obj-$(CONFIG_PCI)       += ebus.o isa.o pci_common.o \
+obj-$(CONFIG_PCI)       += ebus.o pci_common.o \
                            pci_psycho.o pci_sabre.o pci_schizo.o \
                            pci_sun4v.o pci_sun4v_asm.o pci_fire.o
 obj-$(CONFIG_PCI_MSI)  += pci_msi.o
 obj-$(CONFIG_SMP)       += smp.o trampoline.o hvtramp.o
-obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
+obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
 obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
 obj-$(CONFIG_KPROBES) += kprobes.o
 obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
 obj-$(CONFIG_AUDIT) += audit.o
-obj-$(CONFIG_AUDIT)$(CONFIG_SPARC32_COMPAT) += compat_audit.o
+obj-$(CONFIG_AUDIT)$(CONFIG_COMPAT) += compat_audit.o
 obj-y += $(obj-yy)
+obj-$(CONFIG_KGDB) += kgdb.o
index 24d7f4b4178ae5fdc98a520ec786dd7523f68e85..8fff0ac63d564018afcba4ae5c7d77b3bf82a003 100644 (file)
@@ -30,7 +30,7 @@ static unsigned signal_class[] = {
 
 int audit_classify_arch(int arch)
 {
-#ifdef CONFIG_SPARC32_COMPAT
+#ifdef CONFIG_COMPAT
        if (arch == AUDIT_ARCH_SPARC)
                return 1;
 #endif
@@ -39,7 +39,7 @@ int audit_classify_arch(int arch)
 
 int audit_classify_syscall(int abi, unsigned syscall)
 {
-#ifdef CONFIG_SPARC32_COMPAT
+#ifdef CONFIG_COMPAT
        extern int sparc32_classify_syscall(unsigned);
        if (abi == AUDIT_ARCH_SPARC)
                return sparc32_classify_syscall(syscall);
@@ -60,7 +60,7 @@ int audit_classify_syscall(int abi, unsigned syscall)
 
 static int __init audit_classes_init(void)
 {
-#ifdef CONFIG_SPARC32_COMPAT
+#ifdef CONFIG_COMPAT
        extern __u32 sparc32_dir_class[];
        extern __u32 sparc32_write_class[];
        extern __u32 sparc32_read_class[];
diff --git a/arch/sparc64/kernel/cherrs.S b/arch/sparc64/kernel/cherrs.S
new file mode 100644 (file)
index 0000000..89afebd
--- /dev/null
@@ -0,0 +1,579 @@
+       /* These get patched into the trap table at boot time
+        * once we know we have a cheetah processor.
+        */
+       .globl          cheetah_fecc_trap_vector
+       .type           cheetah_fecc_trap_vector,#function
+cheetah_fecc_trap_vector:
+       membar          #Sync
+       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
+       andn            %g1, DCU_DC | DCU_IC, %g1
+       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
+       membar          #Sync
+       sethi           %hi(cheetah_fast_ecc), %g2
+       jmpl            %g2 + %lo(cheetah_fast_ecc), %g0
+        mov            0, %g1
+       .size           cheetah_fecc_trap_vector,.-cheetah_fecc_trap_vector
+
+       .globl          cheetah_fecc_trap_vector_tl1
+       .type           cheetah_fecc_trap_vector_tl1,#function
+cheetah_fecc_trap_vector_tl1:
+       membar          #Sync
+       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
+       andn            %g1, DCU_DC | DCU_IC, %g1
+       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
+       membar          #Sync
+       sethi           %hi(cheetah_fast_ecc), %g2
+       jmpl            %g2 + %lo(cheetah_fast_ecc), %g0
+        mov            1, %g1
+       .size           cheetah_fecc_trap_vector_tl1,.-cheetah_fecc_trap_vector_tl1
+
+       .globl  cheetah_cee_trap_vector
+       .type   cheetah_cee_trap_vector,#function
+cheetah_cee_trap_vector:
+       membar          #Sync
+       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
+       andn            %g1, DCU_IC, %g1
+       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
+       membar          #Sync
+       sethi           %hi(cheetah_cee), %g2
+       jmpl            %g2 + %lo(cheetah_cee), %g0
+        mov            0, %g1
+       .size           cheetah_cee_trap_vector,.-cheetah_cee_trap_vector
+
+       .globl          cheetah_cee_trap_vector_tl1
+       .type           cheetah_cee_trap_vector_tl1,#function
+cheetah_cee_trap_vector_tl1:
+       membar          #Sync
+       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
+       andn            %g1, DCU_IC, %g1
+       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
+       membar          #Sync
+       sethi           %hi(cheetah_cee), %g2
+       jmpl            %g2 + %lo(cheetah_cee), %g0
+        mov            1, %g1
+       .size           cheetah_cee_trap_vector_tl1,.-cheetah_cee_trap_vector_tl1
+
+       .globl  cheetah_deferred_trap_vector
+       .type   cheetah_deferred_trap_vector,#function
+cheetah_deferred_trap_vector:
+       membar          #Sync
+       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1;
+       andn            %g1, DCU_DC | DCU_IC, %g1;
+       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG;
+       membar          #Sync;
+       sethi           %hi(cheetah_deferred_trap), %g2
+       jmpl            %g2 + %lo(cheetah_deferred_trap), %g0
+        mov            0, %g1
+       .size           cheetah_deferred_trap_vector,.-cheetah_deferred_trap_vector
+
+       .globl          cheetah_deferred_trap_vector_tl1
+       .type           cheetah_deferred_trap_vector_tl1,#function
+cheetah_deferred_trap_vector_tl1:
+       membar          #Sync;
+       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1;
+       andn            %g1, DCU_DC | DCU_IC, %g1;
+       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG;
+       membar          #Sync;
+       sethi           %hi(cheetah_deferred_trap), %g2
+       jmpl            %g2 + %lo(cheetah_deferred_trap), %g0
+        mov            1, %g1
+       .size           cheetah_deferred_trap_vector_tl1,.-cheetah_deferred_trap_vector_tl1
+
+       /* Cheetah+ specific traps. These are for the new I/D cache parity
+        * error traps.  The first argument to cheetah_plus_parity_handler
+        * is encoded as follows:
+        *
+        * Bit0:        0=dcache,1=icache
+        * Bit1:        0=recoverable,1=unrecoverable
+        */
+       .globl          cheetah_plus_dcpe_trap_vector
+       .type           cheetah_plus_dcpe_trap_vector,#function
+cheetah_plus_dcpe_trap_vector:
+       membar          #Sync
+       sethi           %hi(do_cheetah_plus_data_parity), %g7
+       jmpl            %g7 + %lo(do_cheetah_plus_data_parity), %g0
+        nop
+       nop
+       nop
+       nop
+       nop
+       .size           cheetah_plus_dcpe_trap_vector,.-cheetah_plus_dcpe_trap_vector
+
+       .type           do_cheetah_plus_data_parity,#function
+do_cheetah_plus_data_parity:
+       rdpr            %pil, %g2
+       wrpr            %g0, 15, %pil
+       ba,pt           %xcc, etrap_irq
+        rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
+       mov             0x0, %o0
+       call            cheetah_plus_parity_error
+        add            %sp, PTREGS_OFF, %o1
+       ba,a,pt         %xcc, rtrap_irq
+       .size           do_cheetah_plus_data_parity,.-do_cheetah_plus_data_parity
+
+       .globl          cheetah_plus_dcpe_trap_vector_tl1
+       .type           cheetah_plus_dcpe_trap_vector_tl1,#function
+cheetah_plus_dcpe_trap_vector_tl1:
+       membar          #Sync
+       wrpr            PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
+       sethi           %hi(do_dcpe_tl1), %g3
+       jmpl            %g3 + %lo(do_dcpe_tl1), %g0
+        nop
+       nop
+       nop
+       nop
+       .size           cheetah_plus_dcpe_trap_vector_tl1,.-cheetah_plus_dcpe_trap_vector_tl1
+
+       .globl          cheetah_plus_icpe_trap_vector
+       .type           cheetah_plus_icpe_trap_vector,#function
+cheetah_plus_icpe_trap_vector:
+       membar          #Sync
+       sethi           %hi(do_cheetah_plus_insn_parity), %g7
+       jmpl            %g7 + %lo(do_cheetah_plus_insn_parity), %g0
+        nop
+       nop
+       nop
+       nop
+       nop
+       .size           cheetah_plus_icpe_trap_vector,.-cheetah_plus_icpe_trap_vector
+
+       .type           do_cheetah_plus_insn_parity,#function
+do_cheetah_plus_insn_parity:
+       rdpr            %pil, %g2
+       wrpr            %g0, 15, %pil
+       ba,pt           %xcc, etrap_irq
+        rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
+       mov             0x1, %o0
+       call            cheetah_plus_parity_error
+        add            %sp, PTREGS_OFF, %o1
+       ba,a,pt         %xcc, rtrap_irq
+       .size           do_cheetah_plus_insn_parity,.-do_cheetah_plus_insn_parity
+
+       .globl          cheetah_plus_icpe_trap_vector_tl1
+       .type           cheetah_plus_icpe_trap_vector_tl1,#function
+cheetah_plus_icpe_trap_vector_tl1:
+       membar          #Sync
+       wrpr            PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
+       sethi           %hi(do_icpe_tl1), %g3
+       jmpl            %g3 + %lo(do_icpe_tl1), %g0
+        nop
+       nop
+       nop
+       nop
+       .size           cheetah_plus_icpe_trap_vector_tl1,.-cheetah_plus_icpe_trap_vector_tl1
+
+       /* If we take one of these traps when tl >= 1, then we
+        * jump to interrupt globals.  If some trap level above us
+        * was also using interrupt globals, we cannot recover.
+        * We may use all interrupt global registers except %g6.
+        */
+       .globl          do_dcpe_tl1
+       .type           do_dcpe_tl1,#function
+do_dcpe_tl1:
+       rdpr            %tl, %g1                ! Save original trap level
+       mov             1, %g2                  ! Setup TSTATE checking loop
+       sethi           %hi(TSTATE_IG), %g3     ! TSTATE mask bit
+1:     wrpr            %g2, %tl                ! Set trap level to check
+       rdpr            %tstate, %g4            ! Read TSTATE for this level
+       andcc           %g4, %g3, %g0           ! Interrupt globals in use?
+       bne,a,pn        %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
+        wrpr           %g1, %tl                ! Restore original trap level
+       add             %g2, 1, %g2             ! Next trap level
+       cmp             %g2, %g1                ! Hit them all yet?
+       ble,pt          %icc, 1b                ! Not yet
+        nop
+       wrpr            %g1, %tl                ! Restore original trap level
+do_dcpe_tl1_nonfatal:  /* Ok we may use interrupt globals safely. */
+       sethi           %hi(dcache_parity_tl1_occurred), %g2
+       lduw            [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
+       add             %g1, 1, %g1
+       stw             %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
+       /* Reset D-cache parity */
+       sethi           %hi(1 << 16), %g1       ! D-cache size
+       mov             (1 << 5), %g2           ! D-cache line size
+       sub             %g1, %g2, %g1           ! Move down 1 cacheline
+1:     srl             %g1, 14, %g3            ! Compute UTAG
+       membar          #Sync
+       stxa            %g3, [%g1] ASI_DCACHE_UTAG
+       membar          #Sync
+       sub             %g2, 8, %g3             ! 64-bit data word within line
+2:     membar          #Sync
+       stxa            %g0, [%g1 + %g3] ASI_DCACHE_DATA
+       membar          #Sync
+       subcc           %g3, 8, %g3             ! Next 64-bit data word
+       bge,pt          %icc, 2b
+        nop
+       subcc           %g1, %g2, %g1           ! Next cacheline
+       bge,pt          %icc, 1b
+        nop
+       ba,pt           %xcc, dcpe_icpe_tl1_common
+        nop
+
+do_dcpe_tl1_fatal:
+       sethi           %hi(1f), %g7
+       ba,pt           %xcc, etraptl1
+1:     or              %g7, %lo(1b), %g7
+       mov             0x2, %o0
+       call            cheetah_plus_parity_error
+        add            %sp, PTREGS_OFF, %o1
+       ba,pt           %xcc, rtrap
+        nop
+       .size           do_dcpe_tl1,.-do_dcpe_tl1
+
+       .globl          do_icpe_tl1
+       .type           do_icpe_tl1,#function
+do_icpe_tl1:
+       rdpr            %tl, %g1                ! Save original trap level
+       mov             1, %g2                  ! Setup TSTATE checking loop
+       sethi           %hi(TSTATE_IG), %g3     ! TSTATE mask bit
+1:     wrpr            %g2, %tl                ! Set trap level to check
+       rdpr            %tstate, %g4            ! Read TSTATE for this level
+       andcc           %g4, %g3, %g0           ! Interrupt globals in use?
+       bne,a,pn        %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
+        wrpr           %g1, %tl                ! Restore original trap level
+       add             %g2, 1, %g2             ! Next trap level
+       cmp             %g2, %g1                ! Hit them all yet?
+       ble,pt          %icc, 1b                ! Not yet
+        nop
+       wrpr            %g1, %tl                ! Restore original trap level
+do_icpe_tl1_nonfatal:  /* Ok we may use interrupt globals safely. */
+       sethi           %hi(icache_parity_tl1_occurred), %g2
+       lduw            [%g2 + %lo(icache_parity_tl1_occurred)], %g1
+       add             %g1, 1, %g1
+       stw             %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
+       /* Flush I-cache */
+       sethi           %hi(1 << 15), %g1       ! I-cache size
+       mov             (1 << 5), %g2           ! I-cache line size
+       sub             %g1, %g2, %g1
+1:     or              %g1, (2 << 3), %g3
+       stxa            %g0, [%g3] ASI_IC_TAG
+       membar          #Sync
+       subcc           %g1, %g2, %g1
+       bge,pt          %icc, 1b
+        nop
+       ba,pt           %xcc, dcpe_icpe_tl1_common
+        nop
+
+do_icpe_tl1_fatal:
+       sethi           %hi(1f), %g7
+       ba,pt           %xcc, etraptl1
+1:     or              %g7, %lo(1b), %g7
+       mov             0x3, %o0
+       call            cheetah_plus_parity_error
+        add            %sp, PTREGS_OFF, %o1
+       ba,pt           %xcc, rtrap
+        nop
+       .size           do_icpe_tl1,.-do_icpe_tl1
+       
+       .type           dcpe_icpe_tl1_common,#function
+dcpe_icpe_tl1_common:
+       /* Flush D-cache, re-enable D/I caches in DCU and finally
+        * retry the trapping instruction.
+        */
+       sethi           %hi(1 << 16), %g1       ! D-cache size
+       mov             (1 << 5), %g2           ! D-cache line size
+       sub             %g1, %g2, %g1
+1:     stxa            %g0, [%g1] ASI_DCACHE_TAG
+       membar          #Sync
+       subcc           %g1, %g2, %g1
+       bge,pt          %icc, 1b
+        nop
+       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
+       or              %g1, (DCU_DC | DCU_IC), %g1
+       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
+       membar          #Sync
+       retry
+       .size           dcpe_icpe_tl1_common,.-dcpe_icpe_tl1_common
+
+       /* Capture I/D/E-cache state into per-cpu error scoreboard.
+        *
+        * %g1:         (TL>=0) ? 1 : 0
+        * %g2:         scratch
+        * %g3:         scratch
+        * %g4:         AFSR
+        * %g5:         AFAR
+        * %g6:         unused, will have current thread ptr after etrap
+        * %g7:         scratch
+        */
+       .type           __cheetah_log_error,#function
+__cheetah_log_error:
+       /* Put "TL1" software bit into AFSR. */
+       and             %g1, 0x1, %g1
+       sllx            %g1, 63, %g2
+       or              %g4, %g2, %g4
+
+       /* Get log entry pointer for this cpu at this trap level. */
+       BRANCH_IF_JALAPENO(g2,g3,50f)
+       ldxa            [%g0] ASI_SAFARI_CONFIG, %g2
+       srlx            %g2, 17, %g2
+       ba,pt           %xcc, 60f
+        and            %g2, 0x3ff, %g2
+
+50:    ldxa            [%g0] ASI_JBUS_CONFIG, %g2
+       srlx            %g2, 17, %g2
+       and             %g2, 0x1f, %g2
+
+60:    sllx            %g2, 9, %g2
+       sethi           %hi(cheetah_error_log), %g3
+       ldx             [%g3 + %lo(cheetah_error_log)], %g3
+       brz,pn          %g3, 80f
+        nop
+
+       add             %g3, %g2, %g3
+       sllx            %g1, 8, %g1
+       add             %g3, %g1, %g1
+
+       /* %g1 holds pointer to the top of the logging scoreboard */
+       ldx             [%g1 + 0x0], %g7
+       cmp             %g7, -1
+       bne,pn          %xcc, 80f
+        nop
+
+       stx             %g4, [%g1 + 0x0]
+       stx             %g5, [%g1 + 0x8]
+       add             %g1, 0x10, %g1
+
+       /* %g1 now points to D-cache logging area */
+       set             0x3ff8, %g2     /* DC_addr mask         */
+       and             %g5, %g2, %g2   /* DC_addr bits of AFAR */
+       srlx            %g5, 12, %g3
+       or              %g3, 1, %g3     /* PHYS tag + valid     */
+
+10:    ldxa            [%g2] ASI_DCACHE_TAG, %g7
+       cmp             %g3, %g7        /* TAG match?           */
+       bne,pt          %xcc, 13f
+        nop
+
+       /* Yep, what we want, capture state. */
+       stx             %g2, [%g1 + 0x20]
+       stx             %g7, [%g1 + 0x28]
+
+       /* A membar Sync is required before and after utag access. */
+       membar          #Sync
+       ldxa            [%g2] ASI_DCACHE_UTAG, %g7
+       membar          #Sync
+       stx             %g7, [%g1 + 0x30]
+       ldxa            [%g2] ASI_DCACHE_SNOOP_TAG, %g7
+       stx             %g7, [%g1 + 0x38]
+       clr             %g3
+
+12:    ldxa            [%g2 + %g3] ASI_DCACHE_DATA, %g7
+       stx             %g7, [%g1]
+       add             %g3, (1 << 5), %g3
+       cmp             %g3, (4 << 5)
+       bl,pt           %xcc, 12b
+        add            %g1, 0x8, %g1
+
+       ba,pt           %xcc, 20f
+        add            %g1, 0x20, %g1
+
+13:    sethi           %hi(1 << 14), %g7
+       add             %g2, %g7, %g2
+       srlx            %g2, 14, %g7
+       cmp             %g7, 4
+       bl,pt           %xcc, 10b
+        nop
+
+       add             %g1, 0x40, %g1
+
+       /* %g1 now points to I-cache logging area */
+20:    set             0x1fe0, %g2     /* IC_addr mask         */
+       and             %g5, %g2, %g2   /* IC_addr bits of AFAR */
+       sllx            %g2, 1, %g2     /* IC_addr[13:6]==VA[12:5] */
+       srlx            %g5, (13 - 8), %g3 /* Make PTAG */
+       andn            %g3, 0xff, %g3  /* Mask off undefined bits */
+
+21:    ldxa            [%g2] ASI_IC_TAG, %g7
+       andn            %g7, 0xff, %g7
+       cmp             %g3, %g7
+       bne,pt          %xcc, 23f
+        nop
+
+       /* Yep, what we want, capture state. */
+       stx             %g2, [%g1 + 0x40]
+       stx             %g7, [%g1 + 0x48]
+       add             %g2, (1 << 3), %g2
+       ldxa            [%g2] ASI_IC_TAG, %g7
+       add             %g2, (1 << 3), %g2
+       stx             %g7, [%g1 + 0x50]
+       ldxa            [%g2] ASI_IC_TAG, %g7
+       add             %g2, (1 << 3), %g2
+       stx             %g7, [%g1 + 0x60]
+       ldxa            [%g2] ASI_IC_TAG, %g7
+       stx             %g7, [%g1 + 0x68]
+       sub             %g2, (3 << 3), %g2
+       ldxa            [%g2] ASI_IC_STAG, %g7
+       stx             %g7, [%g1 + 0x58]
+       clr             %g3
+       srlx            %g2, 2, %g2
+
+22:    ldxa            [%g2 + %g3] ASI_IC_INSTR, %g7
+       stx             %g7, [%g1]
+       add             %g3, (1 << 3), %g3
+       cmp             %g3, (8 << 3)
+       bl,pt           %xcc, 22b
+        add            %g1, 0x8, %g1
+
+       ba,pt           %xcc, 30f
+        add            %g1, 0x30, %g1
+
+23:    sethi           %hi(1 << 14), %g7
+       add             %g2, %g7, %g2
+       srlx            %g2, 14, %g7
+       cmp             %g7, 4
+       bl,pt           %xcc, 21b
+        nop
+
+       add             %g1, 0x70, %g1
+
+       /* %g1 now points to E-cache logging area */
+30:    andn            %g5, (32 - 1), %g2
+       stx             %g2, [%g1 + 0x20]
+       ldxa            [%g2] ASI_EC_TAG_DATA, %g7
+       stx             %g7, [%g1 + 0x28]
+       ldxa            [%g2] ASI_EC_R, %g0
+       clr             %g3
+
+31:    ldxa            [%g3] ASI_EC_DATA, %g7
+       stx             %g7, [%g1 + %g3]
+       add             %g3, 0x8, %g3
+       cmp             %g3, 0x20
+
+       bl,pt           %xcc, 31b
+        nop
+80:
+       rdpr            %tt, %g2
+       cmp             %g2, 0x70
+       be              c_fast_ecc
+        cmp            %g2, 0x63
+       be              c_cee
+        nop
+       ba,pt           %xcc, c_deferred
+       .size           __cheetah_log_error,.-__cheetah_log_error
+
+       /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
+        * in the trap table.  That code has done a memory barrier
+        * and has disabled both the I-cache and D-cache in the DCU
+        * control register.  The I-cache is disabled so that we may
+        * capture the corrupted cache line, and the D-cache is disabled
+        * because corrupt data may have been placed there and we don't
+        * want to reference it.
+        *
+        * %g1 is one if this trap occurred at %tl >= 1.
+        *
+        * Next, we turn off error reporting so that we don't recurse.
+        */
+       .globl          cheetah_fast_ecc
+       .type           cheetah_fast_ecc,#function
+cheetah_fast_ecc:
+       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g2
+       andn            %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
+       stxa            %g2, [%g0] ASI_ESTATE_ERROR_EN
+       membar          #Sync
+
+       /* Fetch and clear AFSR/AFAR */
+       ldxa            [%g0] ASI_AFSR, %g4
+       ldxa            [%g0] ASI_AFAR, %g5
+       stxa            %g4, [%g0] ASI_AFSR
+       membar          #Sync
+
+       ba,pt           %xcc, __cheetah_log_error
+        nop
+       .size           cheetah_fast_ecc,.-cheetah_fast_ecc
+
+       .type           c_fast_ecc,#function
+c_fast_ecc:
+       rdpr            %pil, %g2
+       wrpr            %g0, 15, %pil
+       ba,pt           %xcc, etrap_irq
+        rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            cheetah_fecc_handler
+        add            %sp, PTREGS_OFF, %o0
+       ba,a,pt         %xcc, rtrap_irq
+       .size           c_fast_ecc,.-c_fast_ecc
+
+       /* Our caller has disabled I-cache and performed membar Sync. */
+       .globl          cheetah_cee
+       .type           cheetah_cee,#function
+cheetah_cee:
+       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g2
+       andn            %g2, ESTATE_ERROR_CEEN, %g2
+       stxa            %g2, [%g0] ASI_ESTATE_ERROR_EN
+       membar          #Sync
+
+       /* Fetch and clear AFSR/AFAR */
+       ldxa            [%g0] ASI_AFSR, %g4
+       ldxa            [%g0] ASI_AFAR, %g5
+       stxa            %g4, [%g0] ASI_AFSR
+       membar          #Sync
+
+       ba,pt           %xcc, __cheetah_log_error
+        nop
+       .size           cheetah_cee,.-cheetah_cee
+
+       .type           c_cee,#function
+c_cee:
+       rdpr            %pil, %g2
+       wrpr            %g0, 15, %pil
+       ba,pt           %xcc, etrap_irq
+        rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            cheetah_cee_handler
+        add            %sp, PTREGS_OFF, %o0
+       ba,a,pt         %xcc, rtrap_irq
+       .size           c_cee,.-c_cee
+
+       /* Our caller has disabled I-cache+D-cache and performed membar Sync. */
+       .globl          cheetah_deferred_trap
+       .type           cheetah_deferred_trap,#function
+cheetah_deferred_trap:
+       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g2
+       andn            %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
+       stxa            %g2, [%g0] ASI_ESTATE_ERROR_EN
+       membar          #Sync
+
+       /* Fetch and clear AFSR/AFAR */
+       ldxa            [%g0] ASI_AFSR, %g4
+       ldxa            [%g0] ASI_AFAR, %g5
+       stxa            %g4, [%g0] ASI_AFSR
+       membar          #Sync
+
+       ba,pt           %xcc, __cheetah_log_error
+        nop
+       .size           cheetah_deferred_trap,.-cheetah_deferred_trap
+
+       .type           c_deferred,#function
+c_deferred:
+       rdpr            %pil, %g2
+       wrpr            %g0, 15, %pil
+       ba,pt           %xcc, etrap_irq
+        rd             %pc, %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            cheetah_deferred_handler
+        add            %sp, PTREGS_OFF, %o0
+       ba,a,pt         %xcc, rtrap_irq
+       .size           c_deferred,.-c_deferred
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
deleted file mode 100644 (file)
index fd06e93..0000000
+++ /dev/null
@@ -1,2575 +0,0 @@
-/* $Id: entry.S,v 1.144 2002/02/09 19:49:30 davem Exp $
- * arch/sparc64/kernel/entry.S:  Sparc64 trap low-level entry points.
- *
- * Copyright (C) 1995,1997 David S. Miller (davem@caip.rutgers.edu)
- * Copyright (C) 1996 Eddie C. Dost        (ecd@skynet.be)
- * Copyright (C) 1996 Miguel de Icaza      (miguel@nuclecu.unam.mx)
- * Copyright (C) 1996,98,99 Jakub Jelinek  (jj@sunsite.mff.cuni.cz)
- */
-
-#include <linux/errno.h>
-
-#include <asm/head.h>
-#include <asm/asi.h>
-#include <asm/smp.h>
-#include <asm/ptrace.h>
-#include <asm/page.h>
-#include <asm/signal.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/visasm.h>
-#include <asm/estate.h>
-#include <asm/auxio.h>
-#include <asm/sfafsr.h>
-#include <asm/pil.h>
-#include <asm/unistd.h>
-
-#define curptr      g6
-
-       .text
-       .align          32
-
-       /* This is trivial with the new code... */
-       .globl          do_fpdis
-do_fpdis:
-       sethi           %hi(TSTATE_PEF), %g4
-       rdpr            %tstate, %g5
-       andcc           %g5, %g4, %g0
-       be,pt           %xcc, 1f
-        nop
-       rd              %fprs, %g5
-       andcc           %g5, FPRS_FEF, %g0
-       be,pt           %xcc, 1f
-        nop
-
-       /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
-       sethi           %hi(109f), %g7
-       ba,pt           %xcc, etrap
-109:    or             %g7, %lo(109b), %g7
-       add             %g0, %g0, %g0
-       ba,a,pt         %xcc, rtrap
-
-1:     TRAP_LOAD_THREAD_REG(%g6, %g1)
-       ldub            [%g6 + TI_FPSAVED], %g5
-       wr              %g0, FPRS_FEF, %fprs
-       andcc           %g5, FPRS_FEF, %g0
-       be,a,pt         %icc, 1f
-        clr            %g7
-       ldx             [%g6 + TI_GSR], %g7
-1:     andcc           %g5, FPRS_DL, %g0
-       bne,pn          %icc, 2f
-        fzero          %f0
-       andcc           %g5, FPRS_DU, %g0
-       bne,pn          %icc, 1f
-        fzero          %f2
-       faddd           %f0, %f2, %f4
-       fmuld           %f0, %f2, %f6
-       faddd           %f0, %f2, %f8
-       fmuld           %f0, %f2, %f10
-       faddd           %f0, %f2, %f12
-       fmuld           %f0, %f2, %f14
-       faddd           %f0, %f2, %f16
-       fmuld           %f0, %f2, %f18
-       faddd           %f0, %f2, %f20
-       fmuld           %f0, %f2, %f22
-       faddd           %f0, %f2, %f24
-       fmuld           %f0, %f2, %f26
-       faddd           %f0, %f2, %f28
-       fmuld           %f0, %f2, %f30
-       faddd           %f0, %f2, %f32
-       fmuld           %f0, %f2, %f34
-       faddd           %f0, %f2, %f36
-       fmuld           %f0, %f2, %f38
-       faddd           %f0, %f2, %f40
-       fmuld           %f0, %f2, %f42
-       faddd           %f0, %f2, %f44
-       fmuld           %f0, %f2, %f46
-       faddd           %f0, %f2, %f48
-       fmuld           %f0, %f2, %f50
-       faddd           %f0, %f2, %f52
-       fmuld           %f0, %f2, %f54
-       faddd           %f0, %f2, %f56
-       fmuld           %f0, %f2, %f58
-       b,pt            %xcc, fpdis_exit2
-        faddd          %f0, %f2, %f60
-1:     mov             SECONDARY_CONTEXT, %g3
-       add             %g6, TI_FPREGS + 0x80, %g1
-       faddd           %f0, %f2, %f4
-       fmuld           %f0, %f2, %f6
-
-661:   ldxa            [%g3] ASI_DMMU, %g5
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       ldxa            [%g3] ASI_MMU, %g5
-       .previous
-
-       sethi           %hi(sparc64_kern_sec_context), %g2
-       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
-
-661:   stxa            %g2, [%g3] ASI_DMMU
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       stxa            %g2, [%g3] ASI_MMU
-       .previous
-
-       membar          #Sync
-       add             %g6, TI_FPREGS + 0xc0, %g2
-       faddd           %f0, %f2, %f8
-       fmuld           %f0, %f2, %f10
-       membar          #Sync
-       ldda            [%g1] ASI_BLK_S, %f32
-       ldda            [%g2] ASI_BLK_S, %f48
-       membar          #Sync
-       faddd           %f0, %f2, %f12
-       fmuld           %f0, %f2, %f14
-       faddd           %f0, %f2, %f16
-       fmuld           %f0, %f2, %f18
-       faddd           %f0, %f2, %f20
-       fmuld           %f0, %f2, %f22
-       faddd           %f0, %f2, %f24
-       fmuld           %f0, %f2, %f26
-       faddd           %f0, %f2, %f28
-       fmuld           %f0, %f2, %f30
-       b,pt            %xcc, fpdis_exit
-        nop
-2:     andcc           %g5, FPRS_DU, %g0
-       bne,pt          %icc, 3f
-        fzero          %f32
-       mov             SECONDARY_CONTEXT, %g3
-       fzero           %f34
-
-661:   ldxa            [%g3] ASI_DMMU, %g5
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       ldxa            [%g3] ASI_MMU, %g5
-       .previous
-
-       add             %g6, TI_FPREGS, %g1
-       sethi           %hi(sparc64_kern_sec_context), %g2
-       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
-
-661:   stxa            %g2, [%g3] ASI_DMMU
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       stxa            %g2, [%g3] ASI_MMU
-       .previous
-
-       membar          #Sync
-       add             %g6, TI_FPREGS + 0x40, %g2
-       faddd           %f32, %f34, %f36
-       fmuld           %f32, %f34, %f38
-       membar          #Sync
-       ldda            [%g1] ASI_BLK_S, %f0
-       ldda            [%g2] ASI_BLK_S, %f16
-       membar          #Sync
-       faddd           %f32, %f34, %f40
-       fmuld           %f32, %f34, %f42
-       faddd           %f32, %f34, %f44
-       fmuld           %f32, %f34, %f46
-       faddd           %f32, %f34, %f48
-       fmuld           %f32, %f34, %f50
-       faddd           %f32, %f34, %f52
-       fmuld           %f32, %f34, %f54
-       faddd           %f32, %f34, %f56
-       fmuld           %f32, %f34, %f58
-       faddd           %f32, %f34, %f60
-       fmuld           %f32, %f34, %f62
-       ba,pt           %xcc, fpdis_exit
-        nop
-3:     mov             SECONDARY_CONTEXT, %g3
-       add             %g6, TI_FPREGS, %g1
-
-661:   ldxa            [%g3] ASI_DMMU, %g5
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       ldxa            [%g3] ASI_MMU, %g5
-       .previous
-
-       sethi           %hi(sparc64_kern_sec_context), %g2
-       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
-
-661:   stxa            %g2, [%g3] ASI_DMMU
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       stxa            %g2, [%g3] ASI_MMU
-       .previous
-
-       membar          #Sync
-       mov             0x40, %g2
-       membar          #Sync
-       ldda            [%g1] ASI_BLK_S, %f0
-       ldda            [%g1 + %g2] ASI_BLK_S, %f16
-       add             %g1, 0x80, %g1
-       ldda            [%g1] ASI_BLK_S, %f32
-       ldda            [%g1 + %g2] ASI_BLK_S, %f48
-       membar          #Sync
-fpdis_exit:
-
-661:   stxa            %g5, [%g3] ASI_DMMU
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       stxa            %g5, [%g3] ASI_MMU
-       .previous
-
-       membar          #Sync
-fpdis_exit2:
-       wr              %g7, 0, %gsr
-       ldx             [%g6 + TI_XFSR], %fsr
-       rdpr            %tstate, %g3
-       or              %g3, %g4, %g3           ! anal...
-       wrpr            %g3, %tstate
-       wr              %g0, FPRS_FEF, %fprs    ! clean DU/DL bits
-       retry
-
-       .align          32
-fp_other_bounce:
-       call            do_fpother
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       .globl          do_fpother_check_fitos
-       .align          32
-do_fpother_check_fitos:
-       TRAP_LOAD_THREAD_REG(%g6, %g1)
-       sethi           %hi(fp_other_bounce - 4), %g7
-       or              %g7, %lo(fp_other_bounce - 4), %g7
-
-       /* NOTE: Need to preserve %g7 until we fully commit
-        *       to the fitos fixup.
-        */
-       stx             %fsr, [%g6 + TI_XFSR]
-       rdpr            %tstate, %g3
-       andcc           %g3, TSTATE_PRIV, %g0
-       bne,pn          %xcc, do_fptrap_after_fsr
-        nop
-       ldx             [%g6 + TI_XFSR], %g3
-       srlx            %g3, 14, %g1
-       and             %g1, 7, %g1
-       cmp             %g1, 2                  ! Unfinished FP-OP
-       bne,pn          %xcc, do_fptrap_after_fsr
-        sethi          %hi(1 << 23), %g1       ! Inexact
-       andcc           %g3, %g1, %g0
-       bne,pn          %xcc, do_fptrap_after_fsr
-        rdpr           %tpc, %g1
-       lduwa           [%g1] ASI_AIUP, %g3     ! This cannot ever fail
-#define FITOS_MASK     0xc1f83fe0
-#define FITOS_COMPARE  0x81a01880
-       sethi           %hi(FITOS_MASK), %g1
-       or              %g1, %lo(FITOS_MASK), %g1
-       and             %g3, %g1, %g1
-       sethi           %hi(FITOS_COMPARE), %g2
-       or              %g2, %lo(FITOS_COMPARE), %g2
-       cmp             %g1, %g2
-       bne,pn          %xcc, do_fptrap_after_fsr
-        nop
-       std             %f62, [%g6 + TI_FPREGS + (62 * 4)]
-       sethi           %hi(fitos_table_1), %g1
-       and             %g3, 0x1f, %g2
-       or              %g1, %lo(fitos_table_1),  %g1
-       sllx            %g2, 2, %g2
-       jmpl            %g1 + %g2, %g0
-        ba,pt          %xcc, fitos_emul_continue
-
-fitos_table_1:
-       fitod           %f0, %f62
-       fitod           %f1, %f62
-       fitod           %f2, %f62
-       fitod           %f3, %f62
-       fitod           %f4, %f62
-       fitod           %f5, %f62
-       fitod           %f6, %f62
-       fitod           %f7, %f62
-       fitod           %f8, %f62
-       fitod           %f9, %f62
-       fitod           %f10, %f62
-       fitod           %f11, %f62
-       fitod           %f12, %f62
-       fitod           %f13, %f62
-       fitod           %f14, %f62
-       fitod           %f15, %f62
-       fitod           %f16, %f62
-       fitod           %f17, %f62
-       fitod           %f18, %f62
-       fitod           %f19, %f62
-       fitod           %f20, %f62
-       fitod           %f21, %f62
-       fitod           %f22, %f62
-       fitod           %f23, %f62
-       fitod           %f24, %f62
-       fitod           %f25, %f62
-       fitod           %f26, %f62
-       fitod           %f27, %f62
-       fitod           %f28, %f62
-       fitod           %f29, %f62
-       fitod           %f30, %f62
-       fitod           %f31, %f62
-
-fitos_emul_continue:
-       sethi           %hi(fitos_table_2), %g1
-       srl             %g3, 25, %g2
-       or              %g1, %lo(fitos_table_2), %g1
-       and             %g2, 0x1f, %g2
-       sllx            %g2, 2, %g2
-       jmpl            %g1 + %g2, %g0
-        ba,pt          %xcc, fitos_emul_fini
-
-fitos_table_2:
-       fdtos           %f62, %f0
-       fdtos           %f62, %f1
-       fdtos           %f62, %f2
-       fdtos           %f62, %f3
-       fdtos           %f62, %f4
-       fdtos           %f62, %f5
-       fdtos           %f62, %f6
-       fdtos           %f62, %f7
-       fdtos           %f62, %f8
-       fdtos           %f62, %f9
-       fdtos           %f62, %f10
-       fdtos           %f62, %f11
-       fdtos           %f62, %f12
-       fdtos           %f62, %f13
-       fdtos           %f62, %f14
-       fdtos           %f62, %f15
-       fdtos           %f62, %f16
-       fdtos           %f62, %f17
-       fdtos           %f62, %f18
-       fdtos           %f62, %f19
-       fdtos           %f62, %f20
-       fdtos           %f62, %f21
-       fdtos           %f62, %f22
-       fdtos           %f62, %f23
-       fdtos           %f62, %f24
-       fdtos           %f62, %f25
-       fdtos           %f62, %f26
-       fdtos           %f62, %f27
-       fdtos           %f62, %f28
-       fdtos           %f62, %f29
-       fdtos           %f62, %f30
-       fdtos           %f62, %f31
-
-fitos_emul_fini:
-       ldd             [%g6 + TI_FPREGS + (62 * 4)], %f62
-       done
-
-       .globl          do_fptrap
-       .align          32
-do_fptrap:
-       TRAP_LOAD_THREAD_REG(%g6, %g1)
-       stx             %fsr, [%g6 + TI_XFSR]
-do_fptrap_after_fsr:
-       ldub            [%g6 + TI_FPSAVED], %g3
-       rd              %fprs, %g1
-       or              %g3, %g1, %g3
-       stb             %g3, [%g6 + TI_FPSAVED]
-       rd              %gsr, %g3
-       stx             %g3, [%g6 + TI_GSR]
-       mov             SECONDARY_CONTEXT, %g3
-
-661:   ldxa            [%g3] ASI_DMMU, %g5
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       ldxa            [%g3] ASI_MMU, %g5
-       .previous
-
-       sethi           %hi(sparc64_kern_sec_context), %g2
-       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
-
-661:   stxa            %g2, [%g3] ASI_DMMU
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       stxa            %g2, [%g3] ASI_MMU
-       .previous
-
-       membar          #Sync
-       add             %g6, TI_FPREGS, %g2
-       andcc           %g1, FPRS_DL, %g0
-       be,pn           %icc, 4f
-        mov            0x40, %g3
-       stda            %f0, [%g2] ASI_BLK_S
-       stda            %f16, [%g2 + %g3] ASI_BLK_S
-       andcc           %g1, FPRS_DU, %g0
-       be,pn           %icc, 5f
-4:       add           %g2, 128, %g2
-       stda            %f32, [%g2] ASI_BLK_S
-       stda            %f48, [%g2 + %g3] ASI_BLK_S
-5:     mov             SECONDARY_CONTEXT, %g1
-       membar          #Sync
-
-661:   stxa            %g5, [%g1] ASI_DMMU
-       .section        .sun4v_1insn_patch, "ax"
-       .word           661b
-       stxa            %g5, [%g1] ASI_MMU
-       .previous
-
-       membar          #Sync
-       ba,pt           %xcc, etrap
-        wr             %g0, 0, %fprs
-
-       /* The registers for cross calls will be:
-        *
-        * DATA 0: [low 32-bits]  Address of function to call, jmp to this
-        *         [high 32-bits] MMU Context Argument 0, place in %g5
-        * DATA 1: Address Argument 1, place in %g1
-        * DATA 2: Address Argument 2, place in %g7
-        *
-        * With this method we can do most of the cross-call tlb/cache
-        * flushing very quickly.
-        */
-       .text
-       .align          32
-       .globl          do_ivec
-do_ivec:
-       mov             0x40, %g3
-       ldxa            [%g3 + %g0] ASI_INTR_R, %g3
-       sethi           %hi(KERNBASE), %g4
-       cmp             %g3, %g4
-       bgeu,pn         %xcc, do_ivec_xcall
-        srlx           %g3, 32, %g5
-       stxa            %g0, [%g0] ASI_INTR_RECEIVE
-       membar          #Sync
-
-       sethi           %hi(ivector_table_pa), %g2
-       ldx             [%g2 + %lo(ivector_table_pa)], %g2
-       sllx            %g3, 4, %g3
-       add             %g2, %g3, %g3
-
-       TRAP_LOAD_IRQ_WORK_PA(%g6, %g1)
-
-       ldx             [%g6], %g5
-       stxa            %g5, [%g3] ASI_PHYS_USE_EC
-       stx             %g3, [%g6]
-       wr              %g0, 1 << PIL_DEVICE_IRQ, %set_softint
-       retry
-do_ivec_xcall:
-       mov             0x50, %g1
-       ldxa            [%g1 + %g0] ASI_INTR_R, %g1
-       srl             %g3, 0, %g3
-
-       mov             0x60, %g7
-       ldxa            [%g7 + %g0] ASI_INTR_R, %g7
-       stxa            %g0, [%g0] ASI_INTR_RECEIVE
-       membar          #Sync
-       ba,pt           %xcc, 1f
-        nop
-
-       .align          32
-1:     jmpl            %g3, %g0
-        nop
-
-       .globl          getcc, setcc
-getcc:
-       ldx             [%o0 + PT_V9_TSTATE], %o1
-       srlx            %o1, 32, %o1
-       and             %o1, 0xf, %o1
-       retl
-        stx            %o1, [%o0 + PT_V9_G1]
-setcc:
-       ldx             [%o0 + PT_V9_TSTATE], %o1
-       ldx             [%o0 + PT_V9_G1], %o2
-       or              %g0, %ulo(TSTATE_ICC), %o3
-       sllx            %o3, 32, %o3
-       andn            %o1, %o3, %o1
-       sllx            %o2, 32, %o2
-       and             %o2, %o3, %o2
-       or              %o1, %o2, %o1
-       retl
-        stx            %o1, [%o0 + PT_V9_TSTATE]
-
-       .globl          utrap_trap
-utrap_trap:            /* %g3=handler,%g4=level */
-       TRAP_LOAD_THREAD_REG(%g6, %g1)
-       ldx             [%g6 + TI_UTRAPS], %g1
-       brnz,pt         %g1, invoke_utrap
-        nop
-
-       ba,pt           %xcc, etrap
-        rd             %pc, %g7
-       mov             %l4, %o1
-        call           bad_trap
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-invoke_utrap:
-       sllx            %g3, 3, %g3
-       ldx             [%g1 + %g3], %g1
-       save            %sp, -128, %sp
-       rdpr            %tstate, %l6
-       rdpr            %cwp, %l7
-       andn            %l6, TSTATE_CWP, %l6
-       wrpr            %l6, %l7, %tstate
-       rdpr            %tpc, %l6
-       rdpr            %tnpc, %l7
-       wrpr            %g1, 0, %tnpc
-       done
-
-       /* We need to carefully read the error status, ACK
-        * the errors, prevent recursive traps, and pass the
-        * information on to C code for logging.
-        *
-        * We pass the AFAR in as-is, and we encode the status
-        * information as described in asm-sparc64/sfafsr.h
-        */
-       .globl          __spitfire_access_error
-__spitfire_access_error:
-       /* Disable ESTATE error reporting so that we do not
-        * take recursive traps and RED state the processor.
-        */
-       stxa            %g0, [%g0] ASI_ESTATE_ERROR_EN
-       membar          #Sync
-
-       mov             UDBE_UE, %g1
-       ldxa            [%g0] ASI_AFSR, %g4     ! Get AFSR
-
-       /* __spitfire_cee_trap branches here with AFSR in %g4 and
-        * UDBE_CE in %g1.  It only clears ESTATE_ERR_CE in the
-        * ESTATE Error Enable register.
-        */
-__spitfire_cee_trap_continue:
-       ldxa            [%g0] ASI_AFAR, %g5     ! Get AFAR
-
-       rdpr            %tt, %g3
-       and             %g3, 0x1ff, %g3         ! Paranoia
-       sllx            %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
-       or              %g4, %g3, %g4
-       rdpr            %tl, %g3
-       cmp             %g3, 1
-       mov             1, %g3
-       bleu            %xcc, 1f
-        sllx           %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
-
-       or              %g4, %g3, %g4
-
-       /* Read in the UDB error register state, clearing the
-        * sticky error bits as-needed.  We only clear them if
-        * the UE bit is set.  Likewise, __spitfire_cee_trap
-        * below will only do so if the CE bit is set.
-        *
-        * NOTE: UltraSparc-I/II have high and low UDB error
-        *       registers, corresponding to the two UDB units
-        *       present on those chips.  UltraSparc-IIi only
-        *       has a single UDB, called "SDB" in the manual.
-        *       For IIi the upper UDB register always reads
-        *       as zero so for our purposes things will just
-        *       work with the checks below.
-        */
-1:     ldxa            [%g0] ASI_UDBH_ERROR_R, %g3
-       and             %g3, 0x3ff, %g7         ! Paranoia
-       sllx            %g7, SFSTAT_UDBH_SHIFT, %g7
-       or              %g4, %g7, %g4
-       andcc           %g3, %g1, %g3           ! UDBE_UE or UDBE_CE
-       be,pn           %xcc, 1f
-        nop
-       stxa            %g3, [%g0] ASI_UDB_ERROR_W
-       membar          #Sync
-
-1:     mov             0x18, %g3
-       ldxa            [%g3] ASI_UDBL_ERROR_R, %g3
-       and             %g3, 0x3ff, %g7         ! Paranoia
-       sllx            %g7, SFSTAT_UDBL_SHIFT, %g7
-       or              %g4, %g7, %g4
-       andcc           %g3, %g1, %g3           ! UDBE_UE or UDBE_CE
-       be,pn           %xcc, 1f
-        nop
-       mov             0x18, %g7
-       stxa            %g3, [%g7] ASI_UDB_ERROR_W
-       membar          #Sync
-
-1:     /* Ok, now that we've latched the error state,
-        * clear the sticky bits in the AFSR.
-        */
-       stxa            %g4, [%g0] ASI_AFSR
-       membar          #Sync
-
-       rdpr            %tl, %g2
-       cmp             %g2, 1
-       rdpr            %pil, %g2
-       bleu,pt         %xcc, 1f
-        wrpr           %g0, 15, %pil
-
-       ba,pt           %xcc, etraptl1
-        rd             %pc, %g7
-
-       ba,pt           %xcc, 2f
-        nop
-
-1:     ba,pt           %xcc, etrap_irq
-        rd             %pc, %g7
-
-2:
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call    trace_hardirqs_off
-        nop
-#endif
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            spitfire_access_error
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       /* This is the trap handler entry point for ECC correctable
-        * errors.  They are corrected, but we listen for the trap
-        * so that the event can be logged.
-        *
-        * Disrupting errors are either:
-        * 1) single-bit ECC errors during UDB reads to system
-        *    memory
-        * 2) data parity errors during write-back events
-        *
-        * As far as I can make out from the manual, the CEE trap
-        * is only for correctable errors during memory read
-        * accesses by the front-end of the processor.
-        *
-        * The code below is only for trap level 1 CEE events,
-        * as it is the only situation where we can safely record
-        * and log.  For trap level >1 we just clear the CE bit
-        * in the AFSR and return.
-        *
-        * This is just like __spiftire_access_error above, but it
-        * specifically handles correctable errors.  If an
-        * uncorrectable error is indicated in the AFSR we
-        * will branch directly above to __spitfire_access_error
-        * to handle it instead.  Uncorrectable therefore takes
-        * priority over correctable, and the error logging
-        * C code will notice this case by inspecting the
-        * trap type.
-        */
-       .globl          __spitfire_cee_trap
-__spitfire_cee_trap:
-       ldxa            [%g0] ASI_AFSR, %g4     ! Get AFSR
-       mov             1, %g3
-       sllx            %g3, SFAFSR_UE_SHIFT, %g3
-       andcc           %g4, %g3, %g0           ! Check for UE
-       bne,pn          %xcc, __spitfire_access_error
-        nop
-
-       /* Ok, in this case we only have a correctable error.
-        * Indicate we only wish to capture that state in register
-        * %g1, and we only disable CE error reporting unlike UE
-        * handling which disables all errors.
-        */
-       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g3
-       andn            %g3, ESTATE_ERR_CE, %g3
-       stxa            %g3, [%g0] ASI_ESTATE_ERROR_EN
-       membar          #Sync
-
-       /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
-       ba,pt           %xcc, __spitfire_cee_trap_continue
-        mov            UDBE_CE, %g1
-
-       .globl          __spitfire_data_access_exception
-       .globl          __spitfire_data_access_exception_tl1
-__spitfire_data_access_exception_tl1:
-       rdpr            %pstate, %g4
-       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
-       mov             TLB_SFSR, %g3
-       mov             DMMU_SFAR, %g5
-       ldxa            [%g3] ASI_DMMU, %g4     ! Get SFSR
-       ldxa            [%g5] ASI_DMMU, %g5     ! Get SFAR
-       stxa            %g0, [%g3] ASI_DMMU     ! Clear SFSR.FaultValid bit
-       membar          #Sync
-       rdpr            %tt, %g3
-       cmp             %g3, 0x80               ! first win spill/fill trap
-       blu,pn          %xcc, 1f
-        cmp            %g3, 0xff               ! last win spill/fill trap
-       bgu,pn          %xcc, 1f
-        nop
-       ba,pt           %xcc, winfix_dax
-        rdpr           %tpc, %g3
-1:     sethi           %hi(109f), %g7
-       ba,pt           %xcc, etraptl1
-109:    or             %g7, %lo(109b), %g7
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            spitfire_data_access_exception_tl1
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-__spitfire_data_access_exception:
-       rdpr            %pstate, %g4
-       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
-       mov             TLB_SFSR, %g3
-       mov             DMMU_SFAR, %g5
-       ldxa            [%g3] ASI_DMMU, %g4     ! Get SFSR
-       ldxa            [%g5] ASI_DMMU, %g5     ! Get SFAR
-       stxa            %g0, [%g3] ASI_DMMU     ! Clear SFSR.FaultValid bit
-       membar          #Sync
-       sethi           %hi(109f), %g7
-       ba,pt           %xcc, etrap
-109:    or             %g7, %lo(109b), %g7
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            spitfire_data_access_exception
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       .globl          __spitfire_insn_access_exception
-       .globl          __spitfire_insn_access_exception_tl1
-__spitfire_insn_access_exception_tl1:
-       rdpr            %pstate, %g4
-       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
-       mov             TLB_SFSR, %g3
-       ldxa            [%g3] ASI_IMMU, %g4     ! Get SFSR
-       rdpr            %tpc, %g5               ! IMMU has no SFAR, use TPC
-       stxa            %g0, [%g3] ASI_IMMU     ! Clear FaultValid bit
-       membar          #Sync
-       sethi           %hi(109f), %g7
-       ba,pt           %xcc, etraptl1
-109:    or             %g7, %lo(109b), %g7
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            spitfire_insn_access_exception_tl1
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-__spitfire_insn_access_exception:
-       rdpr            %pstate, %g4
-       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
-       mov             TLB_SFSR, %g3
-       ldxa            [%g3] ASI_IMMU, %g4     ! Get SFSR
-       rdpr            %tpc, %g5               ! IMMU has no SFAR, use TPC
-       stxa            %g0, [%g3] ASI_IMMU     ! Clear FaultValid bit
-       membar          #Sync
-       sethi           %hi(109f), %g7
-       ba,pt           %xcc, etrap
-109:    or             %g7, %lo(109b), %g7
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            spitfire_insn_access_exception
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       /* These get patched into the trap table at boot time
-        * once we know we have a cheetah processor.
-        */
-       .globl          cheetah_fecc_trap_vector, cheetah_fecc_trap_vector_tl1
-cheetah_fecc_trap_vector:
-       membar          #Sync
-       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
-       andn            %g1, DCU_DC | DCU_IC, %g1
-       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
-       membar          #Sync
-       sethi           %hi(cheetah_fast_ecc), %g2
-       jmpl            %g2 + %lo(cheetah_fast_ecc), %g0
-        mov            0, %g1
-cheetah_fecc_trap_vector_tl1:
-       membar          #Sync
-       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
-       andn            %g1, DCU_DC | DCU_IC, %g1
-       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
-       membar          #Sync
-       sethi           %hi(cheetah_fast_ecc), %g2
-       jmpl            %g2 + %lo(cheetah_fast_ecc), %g0
-        mov            1, %g1
-       .globl  cheetah_cee_trap_vector, cheetah_cee_trap_vector_tl1
-cheetah_cee_trap_vector:
-       membar          #Sync
-       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
-       andn            %g1, DCU_IC, %g1
-       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
-       membar          #Sync
-       sethi           %hi(cheetah_cee), %g2
-       jmpl            %g2 + %lo(cheetah_cee), %g0
-        mov            0, %g1
-cheetah_cee_trap_vector_tl1:
-       membar          #Sync
-       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
-       andn            %g1, DCU_IC, %g1
-       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
-       membar          #Sync
-       sethi           %hi(cheetah_cee), %g2
-       jmpl            %g2 + %lo(cheetah_cee), %g0
-        mov            1, %g1
-       .globl  cheetah_deferred_trap_vector, cheetah_deferred_trap_vector_tl1
-cheetah_deferred_trap_vector:
-       membar          #Sync
-       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1;
-       andn            %g1, DCU_DC | DCU_IC, %g1;
-       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG;
-       membar          #Sync;
-       sethi           %hi(cheetah_deferred_trap), %g2
-       jmpl            %g2 + %lo(cheetah_deferred_trap), %g0
-        mov            0, %g1
-cheetah_deferred_trap_vector_tl1:
-       membar          #Sync;
-       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1;
-       andn            %g1, DCU_DC | DCU_IC, %g1;
-       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG;
-       membar          #Sync;
-       sethi           %hi(cheetah_deferred_trap), %g2
-       jmpl            %g2 + %lo(cheetah_deferred_trap), %g0
-        mov            1, %g1
-
-       /* Cheetah+ specific traps. These are for the new I/D cache parity
-        * error traps.  The first argument to cheetah_plus_parity_handler
-        * is encoded as follows:
-        *
-        * Bit0:        0=dcache,1=icache
-        * Bit1:        0=recoverable,1=unrecoverable
-        */
-       .globl          cheetah_plus_dcpe_trap_vector, cheetah_plus_dcpe_trap_vector_tl1
-cheetah_plus_dcpe_trap_vector:
-       membar          #Sync
-       sethi           %hi(do_cheetah_plus_data_parity), %g7
-       jmpl            %g7 + %lo(do_cheetah_plus_data_parity), %g0
-        nop
-       nop
-       nop
-       nop
-       nop
-
-do_cheetah_plus_data_parity:
-       rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
-       ba,pt           %xcc, etrap_irq
-        rd             %pc, %g7
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call            trace_hardirqs_off
-        nop
-#endif
-       mov             0x0, %o0
-       call            cheetah_plus_parity_error
-        add            %sp, PTREGS_OFF, %o1
-       ba,a,pt         %xcc, rtrap_irq
-
-cheetah_plus_dcpe_trap_vector_tl1:
-       membar          #Sync
-       wrpr            PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
-       sethi           %hi(do_dcpe_tl1), %g3
-       jmpl            %g3 + %lo(do_dcpe_tl1), %g0
-        nop
-       nop
-       nop
-       nop
-
-       .globl          cheetah_plus_icpe_trap_vector, cheetah_plus_icpe_trap_vector_tl1
-cheetah_plus_icpe_trap_vector:
-       membar          #Sync
-       sethi           %hi(do_cheetah_plus_insn_parity), %g7
-       jmpl            %g7 + %lo(do_cheetah_plus_insn_parity), %g0
-        nop
-       nop
-       nop
-       nop
-       nop
-
-do_cheetah_plus_insn_parity:
-       rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
-       ba,pt           %xcc, etrap_irq
-        rd             %pc, %g7
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call            trace_hardirqs_off
-        nop
-#endif
-       mov             0x1, %o0
-       call            cheetah_plus_parity_error
-        add            %sp, PTREGS_OFF, %o1
-       ba,a,pt         %xcc, rtrap_irq
-
-cheetah_plus_icpe_trap_vector_tl1:
-       membar          #Sync
-       wrpr            PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
-       sethi           %hi(do_icpe_tl1), %g3
-       jmpl            %g3 + %lo(do_icpe_tl1), %g0
-        nop
-       nop
-       nop
-       nop
-
-       /* If we take one of these traps when tl >= 1, then we
-        * jump to interrupt globals.  If some trap level above us
-        * was also using interrupt globals, we cannot recover.
-        * We may use all interrupt global registers except %g6.
-        */
-       .globl          do_dcpe_tl1, do_icpe_tl1
-do_dcpe_tl1:
-       rdpr            %tl, %g1                ! Save original trap level
-       mov             1, %g2                  ! Setup TSTATE checking loop
-       sethi           %hi(TSTATE_IG), %g3     ! TSTATE mask bit
-1:     wrpr            %g2, %tl                ! Set trap level to check
-       rdpr            %tstate, %g4            ! Read TSTATE for this level
-       andcc           %g4, %g3, %g0           ! Interrupt globals in use?
-       bne,a,pn        %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
-        wrpr           %g1, %tl                ! Restore original trap level
-       add             %g2, 1, %g2             ! Next trap level
-       cmp             %g2, %g1                ! Hit them all yet?
-       ble,pt          %icc, 1b                ! Not yet
-        nop
-       wrpr            %g1, %tl                ! Restore original trap level
-do_dcpe_tl1_nonfatal:  /* Ok we may use interrupt globals safely. */
-       sethi           %hi(dcache_parity_tl1_occurred), %g2
-       lduw            [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
-       add             %g1, 1, %g1
-       stw             %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
-       /* Reset D-cache parity */
-       sethi           %hi(1 << 16), %g1       ! D-cache size
-       mov             (1 << 5), %g2           ! D-cache line size
-       sub             %g1, %g2, %g1           ! Move down 1 cacheline
-1:     srl             %g1, 14, %g3            ! Compute UTAG
-       membar          #Sync
-       stxa            %g3, [%g1] ASI_DCACHE_UTAG
-       membar          #Sync
-       sub             %g2, 8, %g3             ! 64-bit data word within line
-2:     membar          #Sync
-       stxa            %g0, [%g1 + %g3] ASI_DCACHE_DATA
-       membar          #Sync
-       subcc           %g3, 8, %g3             ! Next 64-bit data word
-       bge,pt          %icc, 2b
-        nop
-       subcc           %g1, %g2, %g1           ! Next cacheline
-       bge,pt          %icc, 1b
-        nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
-
-do_dcpe_tl1_fatal:
-       sethi           %hi(1f), %g7
-       ba,pt           %xcc, etraptl1
-1:     or              %g7, %lo(1b), %g7
-       mov             0x2, %o0
-       call            cheetah_plus_parity_error
-        add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
-
-do_icpe_tl1:
-       rdpr            %tl, %g1                ! Save original trap level
-       mov             1, %g2                  ! Setup TSTATE checking loop
-       sethi           %hi(TSTATE_IG), %g3     ! TSTATE mask bit
-1:     wrpr            %g2, %tl                ! Set trap level to check
-       rdpr            %tstate, %g4            ! Read TSTATE for this level
-       andcc           %g4, %g3, %g0           ! Interrupt globals in use?
-       bne,a,pn        %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
-        wrpr           %g1, %tl                ! Restore original trap level
-       add             %g2, 1, %g2             ! Next trap level
-       cmp             %g2, %g1                ! Hit them all yet?
-       ble,pt          %icc, 1b                ! Not yet
-        nop
-       wrpr            %g1, %tl                ! Restore original trap level
-do_icpe_tl1_nonfatal:  /* Ok we may use interrupt globals safely. */
-       sethi           %hi(icache_parity_tl1_occurred), %g2
-       lduw            [%g2 + %lo(icache_parity_tl1_occurred)], %g1
-       add             %g1, 1, %g1
-       stw             %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
-       /* Flush I-cache */
-       sethi           %hi(1 << 15), %g1       ! I-cache size
-       mov             (1 << 5), %g2           ! I-cache line size
-       sub             %g1, %g2, %g1
-1:     or              %g1, (2 << 3), %g3
-       stxa            %g0, [%g3] ASI_IC_TAG
-       membar          #Sync
-       subcc           %g1, %g2, %g1
-       bge,pt          %icc, 1b
-        nop
-       ba,pt           %xcc, dcpe_icpe_tl1_common
-        nop
-
-do_icpe_tl1_fatal:
-       sethi           %hi(1f), %g7
-       ba,pt           %xcc, etraptl1
-1:     or              %g7, %lo(1b), %g7
-       mov             0x3, %o0
-       call            cheetah_plus_parity_error
-        add            %sp, PTREGS_OFF, %o1
-       ba,pt           %xcc, rtrap
-        nop
-       
-dcpe_icpe_tl1_common:
-       /* Flush D-cache, re-enable D/I caches in DCU and finally
-        * retry the trapping instruction.
-        */
-       sethi           %hi(1 << 16), %g1       ! D-cache size
-       mov             (1 << 5), %g2           ! D-cache line size
-       sub             %g1, %g2, %g1
-1:     stxa            %g0, [%g1] ASI_DCACHE_TAG
-       membar          #Sync
-       subcc           %g1, %g2, %g1
-       bge,pt          %icc, 1b
-        nop
-       ldxa            [%g0] ASI_DCU_CONTROL_REG, %g1
-       or              %g1, (DCU_DC | DCU_IC), %g1
-       stxa            %g1, [%g0] ASI_DCU_CONTROL_REG
-       membar          #Sync
-       retry
-
-       /* Capture I/D/E-cache state into per-cpu error scoreboard.
-        *
-        * %g1:         (TL>=0) ? 1 : 0
-        * %g2:         scratch
-        * %g3:         scratch
-        * %g4:         AFSR
-        * %g5:         AFAR
-        * %g6:         unused, will have current thread ptr after etrap
-        * %g7:         scratch
-        */
-__cheetah_log_error:
-       /* Put "TL1" software bit into AFSR. */
-       and             %g1, 0x1, %g1
-       sllx            %g1, 63, %g2
-       or              %g4, %g2, %g4
-
-       /* Get log entry pointer for this cpu at this trap level. */
-       BRANCH_IF_JALAPENO(g2,g3,50f)
-       ldxa            [%g0] ASI_SAFARI_CONFIG, %g2
-       srlx            %g2, 17, %g2
-       ba,pt           %xcc, 60f
-        and            %g2, 0x3ff, %g2
-
-50:    ldxa            [%g0] ASI_JBUS_CONFIG, %g2
-       srlx            %g2, 17, %g2
-       and             %g2, 0x1f, %g2
-
-60:    sllx            %g2, 9, %g2
-       sethi           %hi(cheetah_error_log), %g3
-       ldx             [%g3 + %lo(cheetah_error_log)], %g3
-       brz,pn          %g3, 80f
-        nop
-
-       add             %g3, %g2, %g3
-       sllx            %g1, 8, %g1
-       add             %g3, %g1, %g1
-
-       /* %g1 holds pointer to the top of the logging scoreboard */
-       ldx             [%g1 + 0x0], %g7
-       cmp             %g7, -1
-       bne,pn          %xcc, 80f
-        nop
-
-       stx             %g4, [%g1 + 0x0]
-       stx             %g5, [%g1 + 0x8]
-       add             %g1, 0x10, %g1
-
-       /* %g1 now points to D-cache logging area */
-       set             0x3ff8, %g2     /* DC_addr mask         */
-       and             %g5, %g2, %g2   /* DC_addr bits of AFAR */
-       srlx            %g5, 12, %g3
-       or              %g3, 1, %g3     /* PHYS tag + valid     */
-
-10:    ldxa            [%g2] ASI_DCACHE_TAG, %g7
-       cmp             %g3, %g7        /* TAG match?           */
-       bne,pt          %xcc, 13f
-        nop
-
-       /* Yep, what we want, capture state. */
-       stx             %g2, [%g1 + 0x20]
-       stx             %g7, [%g1 + 0x28]
-
-       /* A membar Sync is required before and after utag access. */
-       membar          #Sync
-       ldxa            [%g2] ASI_DCACHE_UTAG, %g7
-       membar          #Sync
-       stx             %g7, [%g1 + 0x30]
-       ldxa            [%g2] ASI_DCACHE_SNOOP_TAG, %g7
-       stx             %g7, [%g1 + 0x38]
-       clr             %g3
-
-12:    ldxa            [%g2 + %g3] ASI_DCACHE_DATA, %g7
-       stx             %g7, [%g1]
-       add             %g3, (1 << 5), %g3
-       cmp             %g3, (4 << 5)
-       bl,pt           %xcc, 12b
-        add            %g1, 0x8, %g1
-
-       ba,pt           %xcc, 20f
-        add            %g1, 0x20, %g1
-
-13:    sethi           %hi(1 << 14), %g7
-       add             %g2, %g7, %g2
-       srlx            %g2, 14, %g7
-       cmp             %g7, 4
-       bl,pt           %xcc, 10b
-        nop
-
-       add             %g1, 0x40, %g1
-
-       /* %g1 now points to I-cache logging area */
-20:    set             0x1fe0, %g2     /* IC_addr mask         */
-       and             %g5, %g2, %g2   /* IC_addr bits of AFAR */
-       sllx            %g2, 1, %g2     /* IC_addr[13:6]==VA[12:5] */
-       srlx            %g5, (13 - 8), %g3 /* Make PTAG */
-       andn            %g3, 0xff, %g3  /* Mask off undefined bits */
-
-21:    ldxa            [%g2] ASI_IC_TAG, %g7
-       andn            %g7, 0xff, %g7
-       cmp             %g3, %g7
-       bne,pt          %xcc, 23f
-        nop
-
-       /* Yep, what we want, capture state. */
-       stx             %g2, [%g1 + 0x40]
-       stx             %g7, [%g1 + 0x48]
-       add             %g2, (1 << 3), %g2
-       ldxa            [%g2] ASI_IC_TAG, %g7
-       add             %g2, (1 << 3), %g2
-       stx             %g7, [%g1 + 0x50]
-       ldxa            [%g2] ASI_IC_TAG, %g7
-       add             %g2, (1 << 3), %g2
-       stx             %g7, [%g1 + 0x60]
-       ldxa            [%g2] ASI_IC_TAG, %g7
-       stx             %g7, [%g1 + 0x68]
-       sub             %g2, (3 << 3), %g2
-       ldxa            [%g2] ASI_IC_STAG, %g7
-       stx             %g7, [%g1 + 0x58]
-       clr             %g3
-       srlx            %g2, 2, %g2
-
-22:    ldxa            [%g2 + %g3] ASI_IC_INSTR, %g7
-       stx             %g7, [%g1]
-       add             %g3, (1 << 3), %g3
-       cmp             %g3, (8 << 3)
-       bl,pt           %xcc, 22b
-        add            %g1, 0x8, %g1
-
-       ba,pt           %xcc, 30f
-        add            %g1, 0x30, %g1
-
-23:    sethi           %hi(1 << 14), %g7
-       add             %g2, %g7, %g2
-       srlx            %g2, 14, %g7
-       cmp             %g7, 4
-       bl,pt           %xcc, 21b
-        nop
-
-       add             %g1, 0x70, %g1
-
-       /* %g1 now points to E-cache logging area */
-30:    andn            %g5, (32 - 1), %g2
-       stx             %g2, [%g1 + 0x20]
-       ldxa            [%g2] ASI_EC_TAG_DATA, %g7
-       stx             %g7, [%g1 + 0x28]
-       ldxa            [%g2] ASI_EC_R, %g0
-       clr             %g3
-
-31:    ldxa            [%g3] ASI_EC_DATA, %g7
-       stx             %g7, [%g1 + %g3]
-       add             %g3, 0x8, %g3
-       cmp             %g3, 0x20
-
-       bl,pt           %xcc, 31b
-        nop
-80:
-       rdpr            %tt, %g2
-       cmp             %g2, 0x70
-       be              c_fast_ecc
-        cmp            %g2, 0x63
-       be              c_cee
-        nop
-       ba,pt           %xcc, c_deferred
-
-       /* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
-        * in the trap table.  That code has done a memory barrier
-        * and has disabled both the I-cache and D-cache in the DCU
-        * control register.  The I-cache is disabled so that we may
-        * capture the corrupted cache line, and the D-cache is disabled
-        * because corrupt data may have been placed there and we don't
-        * want to reference it.
-        *
-        * %g1 is one if this trap occurred at %tl >= 1.
-        *
-        * Next, we turn off error reporting so that we don't recurse.
-        */
-       .globl          cheetah_fast_ecc
-cheetah_fast_ecc:
-       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g2
-       andn            %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
-       stxa            %g2, [%g0] ASI_ESTATE_ERROR_EN
-       membar          #Sync
-
-       /* Fetch and clear AFSR/AFAR */
-       ldxa            [%g0] ASI_AFSR, %g4
-       ldxa            [%g0] ASI_AFAR, %g5
-       stxa            %g4, [%g0] ASI_AFSR
-       membar          #Sync
-
-       ba,pt           %xcc, __cheetah_log_error
-        nop
-
-c_fast_ecc:
-       rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
-       ba,pt           %xcc, etrap_irq
-        rd             %pc, %g7
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call            trace_hardirqs_off
-        nop
-#endif
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            cheetah_fecc_handler
-        add            %sp, PTREGS_OFF, %o0
-       ba,a,pt         %xcc, rtrap_irq
-
-       /* Our caller has disabled I-cache and performed membar Sync. */
-       .globl          cheetah_cee
-cheetah_cee:
-       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g2
-       andn            %g2, ESTATE_ERROR_CEEN, %g2
-       stxa            %g2, [%g0] ASI_ESTATE_ERROR_EN
-       membar          #Sync
-
-       /* Fetch and clear AFSR/AFAR */
-       ldxa            [%g0] ASI_AFSR, %g4
-       ldxa            [%g0] ASI_AFAR, %g5
-       stxa            %g4, [%g0] ASI_AFSR
-       membar          #Sync
-
-       ba,pt           %xcc, __cheetah_log_error
-        nop
-
-c_cee:
-       rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
-       ba,pt           %xcc, etrap_irq
-        rd             %pc, %g7
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call            trace_hardirqs_off
-        nop
-#endif
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            cheetah_cee_handler
-        add            %sp, PTREGS_OFF, %o0
-       ba,a,pt         %xcc, rtrap_irq
-
-       /* Our caller has disabled I-cache+D-cache and performed membar Sync. */
-       .globl          cheetah_deferred_trap
-cheetah_deferred_trap:
-       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g2
-       andn            %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
-       stxa            %g2, [%g0] ASI_ESTATE_ERROR_EN
-       membar          #Sync
-
-       /* Fetch and clear AFSR/AFAR */
-       ldxa            [%g0] ASI_AFSR, %g4
-       ldxa            [%g0] ASI_AFAR, %g5
-       stxa            %g4, [%g0] ASI_AFSR
-       membar          #Sync
-
-       ba,pt           %xcc, __cheetah_log_error
-        nop
-
-c_deferred:
-       rdpr            %pil, %g2
-       wrpr            %g0, 15, %pil
-       ba,pt           %xcc, etrap_irq
-        rd             %pc, %g7
-#ifdef CONFIG_TRACE_IRQFLAGS
-       call            trace_hardirqs_off
-        nop
-#endif
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            cheetah_deferred_handler
-        add            %sp, PTREGS_OFF, %o0
-       ba,a,pt         %xcc, rtrap_irq
-
-       .globl          __do_privact
-__do_privact:
-       mov             TLB_SFSR, %g3
-       stxa            %g0, [%g3] ASI_DMMU     ! Clear FaultValid bit
-       membar          #Sync
-       sethi           %hi(109f), %g7
-       ba,pt           %xcc, etrap
-109:   or              %g7, %lo(109b), %g7
-       call            do_privact
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       .globl          do_mna
-do_mna:
-       rdpr            %tl, %g3
-       cmp             %g3, 1
-
-       /* Setup %g4/%g5 now as they are used in the
-        * winfixup code.
-        */
-       mov             TLB_SFSR, %g3
-       mov             DMMU_SFAR, %g4
-       ldxa            [%g4] ASI_DMMU, %g4
-       ldxa            [%g3] ASI_DMMU, %g5
-       stxa            %g0, [%g3] ASI_DMMU     ! Clear FaultValid bit
-       membar          #Sync
-       bgu,pn          %icc, winfix_mna
-        rdpr           %tpc, %g3
-
-1:     sethi           %hi(109f), %g7
-       ba,pt           %xcc, etrap
-109:    or             %g7, %lo(109b), %g7
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            mem_address_unaligned
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       .globl          do_lddfmna
-do_lddfmna:
-       sethi           %hi(109f), %g7
-       mov             TLB_SFSR, %g4
-       ldxa            [%g4] ASI_DMMU, %g5
-       stxa            %g0, [%g4] ASI_DMMU     ! Clear FaultValid bit
-       membar          #Sync
-       mov             DMMU_SFAR, %g4
-       ldxa            [%g4] ASI_DMMU, %g4
-       ba,pt           %xcc, etrap
-109:    or             %g7, %lo(109b), %g7
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            handle_lddfmna
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       .globl          do_stdfmna
-do_stdfmna:
-       sethi           %hi(109f), %g7
-       mov             TLB_SFSR, %g4
-       ldxa            [%g4] ASI_DMMU, %g5
-       stxa            %g0, [%g4] ASI_DMMU     ! Clear FaultValid bit
-       membar          #Sync
-       mov             DMMU_SFAR, %g4
-       ldxa            [%g4] ASI_DMMU, %g4
-       ba,pt           %xcc, etrap
-109:    or             %g7, %lo(109b), %g7
-       mov             %l4, %o1
-       mov             %l5, %o2
-       call            handle_stdfmna
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       .globl  breakpoint_trap
-breakpoint_trap:
-       call            sparc_breakpoint
-        add            %sp, PTREGS_OFF, %o0
-       ba,pt           %xcc, rtrap
-        nop
-
-       /* SunOS's execv() call only specifies the argv argument, the
-        * environment settings are the same as the calling processes.
-        */
-       .globl  sunos_execv
-sys_execve:
-       sethi           %hi(sparc_execve), %g1
-       ba,pt           %xcc, execve_merge
-        or             %g1, %lo(sparc_execve), %g1
-#ifdef CONFIG_COMPAT
-       .globl  sys_execve
-sunos_execv:
-       stx             %g0, [%sp + PTREGS_OFF + PT_V9_I2]
-       .globl  sys32_execve
-sys32_execve:
-       sethi           %hi(sparc32_execve), %g1
-       or              %g1, %lo(sparc32_execve), %g1
-#endif
-execve_merge:
-       flushw
-       jmpl            %g1, %g0
-        add            %sp, PTREGS_OFF, %o0
-
-       .globl  sys_pipe, sys_sigpause, sys_nis_syscall
-       .globl  sys_rt_sigreturn
-       .globl  sys_ptrace
-       .globl  sys_sigaltstack
-       .align  32
-sys_pipe:      ba,pt           %xcc, sparc_pipe
-                add            %sp, PTREGS_OFF, %o0
-sys_nis_syscall:ba,pt          %xcc, c_sys_nis_syscall
-                add            %sp, PTREGS_OFF, %o0
-sys_memory_ordering:
-               ba,pt           %xcc, sparc_memory_ordering
-                add            %sp, PTREGS_OFF, %o1
-sys_sigaltstack:ba,pt          %xcc, do_sigaltstack
-                add            %i6, STACK_BIAS, %o2
-#ifdef CONFIG_COMPAT
-       .globl  sys32_sigstack
-sys32_sigstack:        ba,pt           %xcc, do_sys32_sigstack
-                mov            %i6, %o2
-       .globl  sys32_sigaltstack
-sys32_sigaltstack:
-               ba,pt           %xcc, do_sys32_sigaltstack
-                mov            %i6, %o2
-#endif
-               .align          32
-#ifdef CONFIG_COMPAT
-       .globl  sys32_sigreturn
-sys32_sigreturn:
-               add             %sp, PTREGS_OFF, %o0
-               call            do_sigreturn32
-                add            %o7, 1f-.-4, %o7
-               nop
-#endif
-sys_rt_sigreturn:
-               add             %sp, PTREGS_OFF, %o0
-               call            do_rt_sigreturn
-                add            %o7, 1f-.-4, %o7
-               nop
-#ifdef CONFIG_COMPAT
-       .globl  sys32_rt_sigreturn
-sys32_rt_sigreturn:
-               add             %sp, PTREGS_OFF, %o0
-               call            do_rt_sigreturn32
-                add            %o7, 1f-.-4, %o7
-               nop
-#endif
-               .align          32
-1:             ldx             [%curptr + TI_FLAGS], %l5
-               andcc           %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
-               be,pt           %icc, rtrap
-                nop
-               add             %sp, PTREGS_OFF, %o0
-               call            syscall_trace
-                mov            1, %o1
-
-               ba,pt           %xcc, rtrap
-                nop
-
-       /* This is how fork() was meant to be done, 8 instruction entry.
-        *
-        * I questioned the following code briefly, let me clear things
-        * up so you must not reason on it like I did.
-        *
-        * Know the fork_kpsr etc. we use in the sparc32 port?  We don't
-        * need it here because the only piece of window state we copy to
-        * the child is the CWP register.  Even if the parent sleeps,
-        * we are safe because we stuck it into pt_regs of the parent
-        * so it will not change.
-        *
-        * XXX This raises the question, whether we can do the same on
-        * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim.  The
-        * XXX answer is yes.  We stick fork_kpsr in UREG_G0 and
-        * XXX fork_kwim in UREG_G1 (global registers are considered
-        * XXX volatile across a system call in the sparc ABI I think
-        * XXX if it isn't we can use regs->y instead, anyone who depends
-        * XXX upon the Y register being preserved across a fork deserves
-        * XXX to lose).
-        *
-        * In fact we should take advantage of that fact for other things
-        * during system calls...
-        */
-       .globl  sys_fork, sys_vfork, sys_clone, sparc_exit
-       .globl  ret_from_syscall
-       .align  32
-sys_vfork:     /* Under Linux, vfork and fork are just special cases of clone. */
-               sethi           %hi(0x4000 | 0x0100 | SIGCHLD), %o0
-               or              %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
-               ba,pt           %xcc, sys_clone
-sys_fork:       clr            %o1
-               mov             SIGCHLD, %o0
-sys_clone:     flushw
-               movrz           %o1, %fp, %o1
-               mov             0, %o3
-               ba,pt           %xcc, sparc_do_fork
-                add            %sp, PTREGS_OFF, %o2
-ret_from_syscall:
-               /* Clear current_thread_info()->new_child, and
-                * check performance counter stuff too.
-                */
-               stb             %g0, [%g6 + TI_NEW_CHILD]
-               ldx             [%g6 + TI_FLAGS], %l0
-               call            schedule_tail
-                mov            %g7, %o0
-               andcc           %l0, _TIF_PERFCTR, %g0
-               be,pt           %icc, 1f
-                nop
-               ldx             [%g6 + TI_PCR], %o7
-               wr              %g0, %o7, %pcr
-
-               /* Blackbird errata workaround.  See commentary in
-                * smp.c:smp_percpu_timer_interrupt() for more
-                * information.
-                */
-               ba,pt           %xcc, 99f
-                nop
-               .align          64
-99:            wr              %g0, %g0, %pic
-               rd              %pic, %g0
-
-1:             b,pt            %xcc, ret_sys_call
-                ldx            [%sp + PTREGS_OFF + PT_V9_I0], %o0
-sparc_exit:    rdpr            %pstate, %g2
-               wrpr            %g2, PSTATE_IE, %pstate
-               rdpr            %otherwin, %g1
-               rdpr            %cansave, %g3
-               add             %g3, %g1, %g3
-               wrpr            %g3, 0x0, %cansave
-               wrpr            %g0, 0x0, %otherwin
-               wrpr            %g2, 0x0, %pstate
-               ba,pt           %xcc, sys_exit
-                stb            %g0, [%g6 + TI_WSAVED]
-
-linux_sparc_ni_syscall:
-       sethi           %hi(sys_ni_syscall), %l7
-       b,pt            %xcc, 4f
-        or             %l7, %lo(sys_ni_syscall), %l7
-
-linux_syscall_trace32:
-       add             %sp, PTREGS_OFF, %o0
-       call            syscall_trace
-        clr            %o1
-       srl             %i0, 0, %o0
-       srl             %i4, 0, %o4
-       srl             %i1, 0, %o1
-       srl             %i2, 0, %o2
-       b,pt            %xcc, 2f
-        srl            %i3, 0, %o3
-
-linux_syscall_trace:
-       add             %sp, PTREGS_OFF, %o0
-       call            syscall_trace
-        clr            %o1
-       mov             %i0, %o0
-       mov             %i1, %o1
-       mov             %i2, %o2
-       mov             %i3, %o3
-       b,pt            %xcc, 2f
-        mov            %i4, %o4
-
-
-       /* Linux 32-bit system calls enter here... */
-       .align  32
-       .globl  linux_sparc_syscall32
-linux_sparc_syscall32:
-       /* Direct access to user regs, much faster. */
-       cmp             %g1, NR_SYSCALLS                        ! IEU1  Group
-       bgeu,pn         %xcc, linux_sparc_ni_syscall            ! CTI
-        srl            %i0, 0, %o0                             ! IEU0
-       sll             %g1, 2, %l4                             ! IEU0  Group
-       srl             %i4, 0, %o4                             ! IEU1
-       lduw            [%l7 + %l4], %l7                        ! Load
-       srl             %i1, 0, %o1                             ! IEU0  Group
-       ldx             [%curptr + TI_FLAGS], %l0               ! Load
-
-       srl             %i5, 0, %o5                             ! IEU1
-       srl             %i2, 0, %o2                             ! IEU0  Group
-       andcc           %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
-       bne,pn          %icc, linux_syscall_trace32             ! CTI
-        mov            %i0, %l5                                ! IEU1
-       call            %l7                                     ! CTI   Group brk forced
-        srl            %i3, 0, %o3                             ! IEU0
-       ba,a,pt         %xcc, 3f
-
-       /* Linux native system calls enter here... */
-       .align  32
-       .globl  linux_sparc_syscall
-linux_sparc_syscall:
-       /* Direct access to user regs, much faster. */
-       cmp             %g1, NR_SYSCALLS                        ! IEU1  Group
-       bgeu,pn         %xcc, linux_sparc_ni_syscall            ! CTI
-        mov            %i0, %o0                                ! IEU0
-       sll             %g1, 2, %l4                             ! IEU0  Group
-       mov             %i1, %o1                                ! IEU1
-       lduw            [%l7 + %l4], %l7                        ! Load
-4:     mov             %i2, %o2                                ! IEU0  Group
-       ldx             [%curptr + TI_FLAGS], %l0               ! Load
-
-       mov             %i3, %o3                                ! IEU1
-       mov             %i4, %o4                                ! IEU0  Group
-       andcc           %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
-       bne,pn          %icc, linux_syscall_trace               ! CTI   Group
-        mov            %i0, %l5                                ! IEU0
-2:     call            %l7                                     ! CTI   Group brk forced
-        mov            %i5, %o5                                ! IEU0
-       nop
-
-3:     stx             %o0, [%sp + PTREGS_OFF + PT_V9_I0]
-ret_sys_call:
-       ldx             [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
-       ldx             [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
-       sra             %o0, 0, %o0
-       mov             %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
-       sllx            %g2, 32, %g2
-
-       /* Check if force_successful_syscall_return()
-        * was invoked.
-        */
-       ldub            [%curptr + TI_SYS_NOERROR], %l2
-       brnz,a,pn       %l2, 80f
-        stb            %g0, [%curptr + TI_SYS_NOERROR]
-
-       cmp             %o0, -ERESTART_RESTARTBLOCK
-       bgeu,pn         %xcc, 1f
-        andcc          %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
-80:
-       /* System call success, clear Carry condition code. */
-       andn            %g3, %g2, %g3
-       stx             %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]  
-       bne,pn          %icc, linux_syscall_trace2
-        add            %l1, 0x4, %l2                   ! npc = npc+4
-       stx             %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
-       ba,pt           %xcc, rtrap
-        stx            %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
-
-1:
-       /* System call failure, set Carry condition code.
-        * Also, get abs(errno) to return to the process.
-        */
-       andcc           %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6  
-       sub             %g0, %o0, %o0
-       or              %g3, %g2, %g3
-       stx             %o0, [%sp + PTREGS_OFF + PT_V9_I0]
-       stx             %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
-       bne,pn          %icc, linux_syscall_trace2
-        add            %l1, 0x4, %l2                   ! npc = npc+4
-       stx             %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
-
-       b,pt            %xcc, rtrap
-        stx            %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
-linux_syscall_trace2:
-       add             %sp, PTREGS_OFF, %o0
-       call            syscall_trace
-        mov            1, %o1
-       stx             %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
-       ba,pt           %xcc, rtrap
-        stx            %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
-
-       .align          32
-       .globl          __flushw_user
-__flushw_user:
-       rdpr            %otherwin, %g1
-       brz,pn          %g1, 2f
-        clr            %g2
-1:     save            %sp, -128, %sp
-       rdpr            %otherwin, %g1
-       brnz,pt         %g1, 1b
-        add            %g2, 1, %g2
-1:     sub             %g2, 1, %g2
-       brnz,pt         %g2, 1b
-        restore        %g0, %g0, %g0
-2:     retl
-        nop
-
-       /* Flush %fp and %i7 to the stack for all register
-        * windows active inside of the cpu.  This allows
-        * show_stack_trace() to avoid using an expensive
-        * 'flushw'.
-        */
-       .globl          stack_trace_flush
-       .type           stack_trace_flush,#function
-stack_trace_flush:
-       rdpr            %pstate, %o0
-       wrpr            %o0, PSTATE_IE, %pstate
-
-       rdpr            %cwp, %g1
-       rdpr            %canrestore, %g2
-       sub             %g1, 1, %g3
-
-1:     brz,pn          %g2, 2f
-        sub            %g2, 1, %g2
-       wrpr            %g3, %cwp
-       stx             %fp, [%sp + STACK_BIAS + RW_V9_I6]
-       stx             %i7, [%sp + STACK_BIAS + RW_V9_I7]
-       ba,pt           %xcc, 1b
-        sub            %g3, 1, %g3
-
-2:     wrpr            %g1, %cwp
-       wrpr            %o0, %pstate
-
-       retl
-        nop
-       .size           stack_trace_flush,.-stack_trace_flush
-
-#ifdef CONFIG_SMP
-       .globl          hard_smp_processor_id
-hard_smp_processor_id:
-#endif
-       .globl          real_hard_smp_processor_id
-real_hard_smp_processor_id:
-       __GET_CPUID(%o0)
-       retl
-        nop
-
-       /* %o0: devhandle
-        * %o1: devino
-        *
-        * returns %o0: sysino
-        */
-       .globl  sun4v_devino_to_sysino
-       .type   sun4v_devino_to_sysino,#function
-sun4v_devino_to_sysino:
-       mov     HV_FAST_INTR_DEVINO2SYSINO, %o5
-       ta      HV_FAST_TRAP
-       retl
-        mov    %o1, %o0
-       .size   sun4v_devino_to_sysino, .-sun4v_devino_to_sysino
-
-       /* %o0: sysino
-        *
-        * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
-        */
-       .globl  sun4v_intr_getenabled
-       .type   sun4v_intr_getenabled,#function
-sun4v_intr_getenabled:
-       mov     HV_FAST_INTR_GETENABLED, %o5
-       ta      HV_FAST_TRAP
-       retl
-        mov    %o1, %o0
-       .size   sun4v_intr_getenabled, .-sun4v_intr_getenabled
-
-       /* %o0: sysino
-        * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
-        */
-       .globl  sun4v_intr_setenabled
-       .type   sun4v_intr_setenabled,#function
-sun4v_intr_setenabled:
-       mov     HV_FAST_INTR_SETENABLED, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_intr_setenabled, .-sun4v_intr_setenabled
-
-       /* %o0: sysino
-        *
-        * returns %o0: intr_state (HV_INTR_STATE_*)
-        */
-       .globl  sun4v_intr_getstate
-       .type   sun4v_intr_getstate,#function
-sun4v_intr_getstate:
-       mov     HV_FAST_INTR_GETSTATE, %o5
-       ta      HV_FAST_TRAP
-       retl
-        mov    %o1, %o0
-       .size   sun4v_intr_getstate, .-sun4v_intr_getstate
-
-       /* %o0: sysino
-        * %o1: intr_state (HV_INTR_STATE_*)
-        */
-       .globl  sun4v_intr_setstate
-       .type   sun4v_intr_setstate,#function
-sun4v_intr_setstate:
-       mov     HV_FAST_INTR_SETSTATE, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_intr_setstate, .-sun4v_intr_setstate
-
-       /* %o0: sysino
-        *
-        * returns %o0: cpuid
-        */
-       .globl  sun4v_intr_gettarget
-       .type   sun4v_intr_gettarget,#function
-sun4v_intr_gettarget:
-       mov     HV_FAST_INTR_GETTARGET, %o5
-       ta      HV_FAST_TRAP
-       retl
-        mov    %o1, %o0
-       .size   sun4v_intr_gettarget, .-sun4v_intr_gettarget
-
-       /* %o0: sysino
-        * %o1: cpuid
-        */
-       .globl  sun4v_intr_settarget
-       .type   sun4v_intr_settarget,#function
-sun4v_intr_settarget:
-       mov     HV_FAST_INTR_SETTARGET, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_intr_settarget, .-sun4v_intr_settarget
-
-       /* %o0: cpuid
-        * %o1: pc
-        * %o2: rtba
-        * %o3: arg0
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_cpu_start
-       .type   sun4v_cpu_start,#function
-sun4v_cpu_start:
-       mov     HV_FAST_CPU_START, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_cpu_start, .-sun4v_cpu_start
-
-       /* %o0: cpuid
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_cpu_stop
-       .type   sun4v_cpu_stop,#function
-sun4v_cpu_stop:
-       mov     HV_FAST_CPU_STOP, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_cpu_stop, .-sun4v_cpu_stop
-
-       /* returns %o0: status  */
-       .globl  sun4v_cpu_yield
-       .type   sun4v_cpu_yield, #function
-sun4v_cpu_yield:
-       mov     HV_FAST_CPU_YIELD, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_cpu_yield, .-sun4v_cpu_yield
-
-       /* %o0: type
-        * %o1: queue paddr
-        * %o2: num queue entries
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_cpu_qconf
-       .type   sun4v_cpu_qconf,#function
-sun4v_cpu_qconf:
-       mov     HV_FAST_CPU_QCONF, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_cpu_qconf, .-sun4v_cpu_qconf
-
-       /* %o0: num cpus in cpu list
-        * %o1: cpu list paddr
-        * %o2: mondo block paddr
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_cpu_mondo_send
-       .type   sun4v_cpu_mondo_send,#function
-sun4v_cpu_mondo_send:
-       mov     HV_FAST_CPU_MONDO_SEND, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_cpu_mondo_send, .-sun4v_cpu_mondo_send
-
-       /* %o0: CPU ID
-        *
-        * returns %o0: -status if status non-zero, else
-        *         %o0: cpu state as HV_CPU_STATE_*
-        */
-       .globl  sun4v_cpu_state
-       .type   sun4v_cpu_state,#function
-sun4v_cpu_state:
-       mov     HV_FAST_CPU_STATE, %o5
-       ta      HV_FAST_TRAP
-       brnz,pn %o0, 1f
-        sub    %g0, %o0, %o0
-       mov     %o1, %o0
-1:     retl
-        nop
-       .size   sun4v_cpu_state, .-sun4v_cpu_state
-
-       /* %o0: virtual address
-        * %o1: must be zero
-        * %o2: TTE
-        * %o3: HV_MMU_* flags
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_mmu_map_perm_addr
-       .type   sun4v_mmu_map_perm_addr,#function
-sun4v_mmu_map_perm_addr:
-       mov     HV_FAST_MMU_MAP_PERM_ADDR, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_mmu_map_perm_addr, .-sun4v_mmu_map_perm_addr
-
-       /* %o0: number of TSB descriptions
-        * %o1: TSB descriptions real address
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_mmu_tsb_ctx0
-       .type   sun4v_mmu_tsb_ctx0,#function
-sun4v_mmu_tsb_ctx0:
-       mov     HV_FAST_MMU_TSB_CTX0, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_mmu_tsb_ctx0, .-sun4v_mmu_tsb_ctx0
-
-       /* %o0: API group number
-        * %o1: pointer to unsigned long major number storage
-        * %o2: pointer to unsigned long minor number storage
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_get_version
-       .type   sun4v_get_version,#function
-sun4v_get_version:
-       mov     HV_CORE_GET_VER, %o5
-       mov     %o1, %o3
-       mov     %o2, %o4
-       ta      HV_CORE_TRAP
-       stx     %o1, [%o3]
-       retl
-        stx    %o2, [%o4]
-       .size   sun4v_get_version, .-sun4v_get_version
-
-       /* %o0: API group number
-        * %o1: desired major number
-        * %o2: desired minor number
-        * %o3: pointer to unsigned long actual minor number storage
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_set_version
-       .type   sun4v_set_version,#function
-sun4v_set_version:
-       mov     HV_CORE_SET_VER, %o5
-       mov     %o3, %o4
-       ta      HV_CORE_TRAP
-       retl
-        stx    %o1, [%o4]
-       .size   sun4v_set_version, .-sun4v_set_version
-
-       /* %o0: pointer to unsigned long time
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_tod_get
-       .type   sun4v_tod_get,#function
-sun4v_tod_get:
-       mov     %o0, %o4
-       mov     HV_FAST_TOD_GET, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_tod_get, .-sun4v_tod_get
-
-       /* %o0: time
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_tod_set
-       .type   sun4v_tod_set,#function
-sun4v_tod_set:
-       mov     HV_FAST_TOD_SET, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_tod_set, .-sun4v_tod_set
-
-       /* %o0: pointer to unsigned long status
-        *
-        * returns %o0: signed character
-        */
-       .globl  sun4v_con_getchar
-       .type   sun4v_con_getchar,#function
-sun4v_con_getchar:
-       mov     %o0, %o4
-       mov     HV_FAST_CONS_GETCHAR, %o5
-       clr     %o0
-       clr     %o1
-       ta      HV_FAST_TRAP
-       stx     %o0, [%o4]
-       retl
-        sra    %o1, 0, %o0
-       .size   sun4v_con_getchar, .-sun4v_con_getchar
-
-       /* %o0: signed long character
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_con_putchar
-       .type   sun4v_con_putchar,#function
-sun4v_con_putchar:
-       mov     HV_FAST_CONS_PUTCHAR, %o5
-       ta      HV_FAST_TRAP
-       retl
-        sra    %o0, 0, %o0
-       .size   sun4v_con_putchar, .-sun4v_con_putchar
-
-       /* %o0: buffer real address
-        * %o1: buffer size
-        * %o2: pointer to unsigned long bytes_read
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_con_read
-       .type   sun4v_con_read,#function
-sun4v_con_read:
-       mov     %o2, %o4
-       mov     HV_FAST_CONS_READ, %o5
-       ta      HV_FAST_TRAP
-       brnz    %o0, 1f
-        cmp    %o1, -1         /* break */
-       be,a,pn %icc, 1f
-        mov    %o1, %o0
-       cmp     %o1, -2         /* hup */
-       be,a,pn %icc, 1f
-        mov    %o1, %o0
-       stx     %o1, [%o4]
-1:     retl
-        nop
-       .size   sun4v_con_read, .-sun4v_con_read
-
-       /* %o0: buffer real address
-        * %o1: buffer size
-        * %o2: pointer to unsigned long bytes_written
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_con_write
-       .type   sun4v_con_write,#function
-sun4v_con_write:
-       mov     %o2, %o4
-       mov     HV_FAST_CONS_WRITE, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_con_write, .-sun4v_con_write
-
-       /* %o0: soft state
-        * %o1: address of description string
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_mach_set_soft_state
-       .type   sun4v_mach_set_soft_state,#function
-sun4v_mach_set_soft_state:
-       mov     HV_FAST_MACH_SET_SOFT_STATE, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_mach_set_soft_state, .-sun4v_mach_set_soft_state
-
-       /* %o0: exit code
-        *
-        * Does not return.
-        */
-       .globl  sun4v_mach_exit
-       .type   sun4v_mach_exit,#function
-sun4v_mach_exit:
-       mov     HV_FAST_MACH_EXIT, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_mach_exit, .-sun4v_mach_exit
-
-       /* %o0: buffer real address
-        * %o1: buffer length
-        * %o2: pointer to unsigned long real_buf_len
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_mach_desc
-       .type   sun4v_mach_desc,#function
-sun4v_mach_desc:
-       mov     %o2, %o4
-       mov     HV_FAST_MACH_DESC, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_mach_desc, .-sun4v_mach_desc
-
-       /* %o0: new timeout in milliseconds
-        * %o1: pointer to unsigned long orig_timeout
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_mach_set_watchdog
-       .type   sun4v_mach_set_watchdog,#function
-sun4v_mach_set_watchdog:
-       mov     %o1, %o4
-       mov     HV_FAST_MACH_SET_WATCHDOG, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_mach_set_watchdog, .-sun4v_mach_set_watchdog
-
-       /* No inputs and does not return.  */
-       .globl  sun4v_mach_sir
-       .type   sun4v_mach_sir,#function
-sun4v_mach_sir:
-       mov     %o1, %o4
-       mov     HV_FAST_MACH_SIR, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_mach_sir, .-sun4v_mach_sir
-
-       /* %o0: channel
-        * %o1: ra
-        * %o2: num_entries
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_tx_qconf
-       .type   sun4v_ldc_tx_qconf,#function
-sun4v_ldc_tx_qconf:
-       mov     HV_FAST_LDC_TX_QCONF, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ldc_tx_qconf, .-sun4v_ldc_tx_qconf
-
-       /* %o0: channel
-        * %o1: pointer to unsigned long ra
-        * %o2: pointer to unsigned long num_entries
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_tx_qinfo
-       .type   sun4v_ldc_tx_qinfo,#function
-sun4v_ldc_tx_qinfo:
-       mov     %o1, %g1
-       mov     %o2, %g2
-       mov     HV_FAST_LDC_TX_QINFO, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       stx     %o2, [%g2]
-       retl
-        nop
-       .size   sun4v_ldc_tx_qinfo, .-sun4v_ldc_tx_qinfo
-
-       /* %o0: channel
-        * %o1: pointer to unsigned long head_off
-        * %o2: pointer to unsigned long tail_off
-        * %o2: pointer to unsigned long chan_state
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_tx_get_state
-       .type   sun4v_ldc_tx_get_state,#function
-sun4v_ldc_tx_get_state:
-       mov     %o1, %g1
-       mov     %o2, %g2
-       mov     %o3, %g3
-       mov     HV_FAST_LDC_TX_GET_STATE, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       stx     %o2, [%g2]
-       stx     %o3, [%g3]
-       retl
-        nop
-       .size   sun4v_ldc_tx_get_state, .-sun4v_ldc_tx_get_state
-
-       /* %o0: channel
-        * %o1: tail_off
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_tx_set_qtail
-       .type   sun4v_ldc_tx_set_qtail,#function
-sun4v_ldc_tx_set_qtail:
-       mov     HV_FAST_LDC_TX_SET_QTAIL, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ldc_tx_set_qtail, .-sun4v_ldc_tx_set_qtail
-
-       /* %o0: channel
-        * %o1: ra
-        * %o2: num_entries
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_rx_qconf
-       .type   sun4v_ldc_rx_qconf,#function
-sun4v_ldc_rx_qconf:
-       mov     HV_FAST_LDC_RX_QCONF, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ldc_rx_qconf, .-sun4v_ldc_rx_qconf
-
-       /* %o0: channel
-        * %o1: pointer to unsigned long ra
-        * %o2: pointer to unsigned long num_entries
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_rx_qinfo
-       .type   sun4v_ldc_rx_qinfo,#function
-sun4v_ldc_rx_qinfo:
-       mov     %o1, %g1
-       mov     %o2, %g2
-       mov     HV_FAST_LDC_RX_QINFO, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       stx     %o2, [%g2]
-       retl
-        nop
-       .size   sun4v_ldc_rx_qinfo, .-sun4v_ldc_rx_qinfo
-
-       /* %o0: channel
-        * %o1: pointer to unsigned long head_off
-        * %o2: pointer to unsigned long tail_off
-        * %o2: pointer to unsigned long chan_state
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_rx_get_state
-       .type   sun4v_ldc_rx_get_state,#function
-sun4v_ldc_rx_get_state:
-       mov     %o1, %g1
-       mov     %o2, %g2
-       mov     %o3, %g3
-       mov     HV_FAST_LDC_RX_GET_STATE, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       stx     %o2, [%g2]
-       stx     %o3, [%g3]
-       retl
-        nop
-       .size   sun4v_ldc_rx_get_state, .-sun4v_ldc_rx_get_state
-
-       /* %o0: channel
-        * %o1: head_off
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_rx_set_qhead
-       .type   sun4v_ldc_rx_set_qhead,#function
-sun4v_ldc_rx_set_qhead:
-       mov     HV_FAST_LDC_RX_SET_QHEAD, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ldc_rx_set_qhead, .-sun4v_ldc_rx_set_qhead
-
-       /* %o0: channel
-        * %o1: ra
-        * %o2: num_entries
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_set_map_table
-       .type   sun4v_ldc_set_map_table,#function
-sun4v_ldc_set_map_table:
-       mov     HV_FAST_LDC_SET_MAP_TABLE, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ldc_set_map_table, .-sun4v_ldc_set_map_table
-
-       /* %o0: channel
-        * %o1: pointer to unsigned long ra
-        * %o2: pointer to unsigned long num_entries
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_get_map_table
-       .type   sun4v_ldc_get_map_table,#function
-sun4v_ldc_get_map_table:
-       mov     %o1, %g1
-       mov     %o2, %g2
-       mov     HV_FAST_LDC_GET_MAP_TABLE, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       stx     %o2, [%g2]
-       retl
-        nop
-       .size   sun4v_ldc_get_map_table, .-sun4v_ldc_get_map_table
-
-       /* %o0: channel
-        * %o1: dir_code
-        * %o2: tgt_raddr
-        * %o3: lcl_raddr
-        * %o4: len
-        * %o5: pointer to unsigned long actual_len
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_copy
-       .type   sun4v_ldc_copy,#function
-sun4v_ldc_copy:
-       mov     %o5, %g1
-       mov     HV_FAST_LDC_COPY, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       retl
-        nop
-       .size   sun4v_ldc_copy, .-sun4v_ldc_copy
-
-       /* %o0: channel
-        * %o1: cookie
-        * %o2: pointer to unsigned long ra
-        * %o3: pointer to unsigned long perm
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_mapin
-       .type   sun4v_ldc_mapin,#function
-sun4v_ldc_mapin:
-       mov     %o2, %g1
-       mov     %o3, %g2
-       mov     HV_FAST_LDC_MAPIN, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       stx     %o2, [%g2]
-       retl
-        nop
-       .size   sun4v_ldc_mapin, .-sun4v_ldc_mapin
-
-       /* %o0: ra
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_unmap
-       .type   sun4v_ldc_unmap,#function
-sun4v_ldc_unmap:
-       mov     HV_FAST_LDC_UNMAP, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ldc_unmap, .-sun4v_ldc_unmap
-
-       /* %o0: channel
-        * %o1: cookie
-        * %o2: mte_cookie
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ldc_revoke
-       .type   sun4v_ldc_revoke,#function
-sun4v_ldc_revoke:
-       mov     HV_FAST_LDC_REVOKE, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ldc_revoke, .-sun4v_ldc_revoke
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: pointer to unsigned long cookie
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_get_cookie
-       .type   sun4v_vintr_get_cookie,#function
-sun4v_vintr_get_cookie:
-       mov     %o2, %g1
-       mov     HV_FAST_VINTR_GET_COOKIE, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       retl
-        nop
-       .size   sun4v_vintr_get_cookie, .-sun4v_vintr_get_cookie
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: cookie
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_set_cookie
-       .type   sun4v_vintr_set_cookie,#function
-sun4v_vintr_set_cookie:
-       mov     HV_FAST_VINTR_SET_COOKIE, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_vintr_set_cookie, .-sun4v_vintr_set_cookie
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: pointer to unsigned long valid_state
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_get_valid
-       .type   sun4v_vintr_get_valid,#function
-sun4v_vintr_get_valid:
-       mov     %o2, %g1
-       mov     HV_FAST_VINTR_GET_VALID, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       retl
-        nop
-       .size   sun4v_vintr_get_valid, .-sun4v_vintr_get_valid
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: valid_state
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_set_valid
-       .type   sun4v_vintr_set_valid,#function
-sun4v_vintr_set_valid:
-       mov     HV_FAST_VINTR_SET_VALID, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_vintr_set_valid, .-sun4v_vintr_set_valid
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: pointer to unsigned long state
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_get_state
-       .type   sun4v_vintr_get_state,#function
-sun4v_vintr_get_state:
-       mov     %o2, %g1
-       mov     HV_FAST_VINTR_GET_STATE, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       retl
-        nop
-       .size   sun4v_vintr_get_state, .-sun4v_vintr_get_state
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: state
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_set_state
-       .type   sun4v_vintr_set_state,#function
-sun4v_vintr_set_state:
-       mov     HV_FAST_VINTR_SET_STATE, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_vintr_set_state, .-sun4v_vintr_set_state
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: pointer to unsigned long cpuid
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_get_target
-       .type   sun4v_vintr_get_target,#function
-sun4v_vintr_get_target:
-       mov     %o2, %g1
-       mov     HV_FAST_VINTR_GET_TARGET, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%g1]
-       retl
-        nop
-       .size   sun4v_vintr_get_target, .-sun4v_vintr_get_target
-
-       /* %o0: device handle
-        * %o1: device INO
-        * %o2: cpuid
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_vintr_set_target
-       .type   sun4v_vintr_set_target,#function
-sun4v_vintr_set_target:
-       mov     HV_FAST_VINTR_SET_TARGET, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_vintr_set_target, .-sun4v_vintr_set_target
-
-       /* %o0: NCS sub-function
-        * %o1: sub-function arg real-address
-        * %o2: sub-function arg size
-        *
-        * returns %o0: status
-        */
-       .globl  sun4v_ncs_request
-       .type   sun4v_ncs_request,#function
-sun4v_ncs_request:
-       mov     HV_FAST_NCS_REQUEST, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_ncs_request, .-sun4v_ncs_request
-
-       .globl  sun4v_svc_send
-       .type   sun4v_svc_send,#function
-sun4v_svc_send:
-       save    %sp, -192, %sp
-       mov     %i0, %o0
-       mov     %i1, %o1
-       mov     %i2, %o2
-       mov     HV_FAST_SVC_SEND, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%i3]
-       ret
-       restore
-       .size   sun4v_svc_send, .-sun4v_svc_send
-
-       .globl  sun4v_svc_recv
-       .type   sun4v_svc_recv,#function
-sun4v_svc_recv:
-       save    %sp, -192, %sp
-       mov     %i0, %o0
-       mov     %i1, %o1
-       mov     %i2, %o2
-       mov     HV_FAST_SVC_RECV, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%i3]
-       ret
-       restore
-       .size   sun4v_svc_recv, .-sun4v_svc_recv
-
-       .globl  sun4v_svc_getstatus
-       .type   sun4v_svc_getstatus,#function
-sun4v_svc_getstatus:
-       mov     HV_FAST_SVC_GETSTATUS, %o5
-       mov     %o1, %o4
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_svc_getstatus, .-sun4v_svc_getstatus
-
-       .globl  sun4v_svc_setstatus
-       .type   sun4v_svc_setstatus,#function
-sun4v_svc_setstatus:
-       mov     HV_FAST_SVC_SETSTATUS, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_svc_setstatus, .-sun4v_svc_setstatus
-
-       .globl  sun4v_svc_clrstatus
-       .type   sun4v_svc_clrstatus,#function
-sun4v_svc_clrstatus:
-       mov     HV_FAST_SVC_CLRSTATUS, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_svc_clrstatus, .-sun4v_svc_clrstatus
-
-       .globl  sun4v_mmustat_conf
-       .type   sun4v_mmustat_conf,#function
-sun4v_mmustat_conf:
-       mov     %o1, %o4
-       mov     HV_FAST_MMUSTAT_CONF, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_mmustat_conf, .-sun4v_mmustat_conf
-
-       .globl  sun4v_mmustat_info
-       .type   sun4v_mmustat_info,#function
-sun4v_mmustat_info:
-       mov     %o0, %o4
-       mov     HV_FAST_MMUSTAT_INFO, %o5
-       ta      HV_FAST_TRAP
-       stx     %o1, [%o4]
-       retl
-        nop
-       .size   sun4v_mmustat_info, .-sun4v_mmustat_info
-
-       .globl  sun4v_mmu_demap_all
-       .type   sun4v_mmu_demap_all,#function
-sun4v_mmu_demap_all:
-       clr     %o0
-       clr     %o1
-       mov     HV_MMU_ALL, %o2
-       mov     HV_FAST_MMU_DEMAP_ALL, %o5
-       ta      HV_FAST_TRAP
-       retl
-        nop
-       .size   sun4v_mmu_demap_all, .-sun4v_mmu_demap_all
diff --git a/arch/sparc64/kernel/fpu_traps.S b/arch/sparc64/kernel/fpu_traps.S
new file mode 100644 (file)
index 0000000..a686482
--- /dev/null
@@ -0,0 +1,384 @@
+       /* This is trivial with the new code... */
+       .globl          do_fpdis
+       .type           do_fpdis,#function
+do_fpdis:
+       sethi           %hi(TSTATE_PEF), %g4
+       rdpr            %tstate, %g5
+       andcc           %g5, %g4, %g0
+       be,pt           %xcc, 1f
+        nop
+       rd              %fprs, %g5
+       andcc           %g5, FPRS_FEF, %g0
+       be,pt           %xcc, 1f
+        nop
+
+       /* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
+       sethi           %hi(109f), %g7
+       ba,pt           %xcc, etrap
+109:    or             %g7, %lo(109b), %g7
+       add             %g0, %g0, %g0
+       ba,a,pt         %xcc, rtrap
+
+1:     TRAP_LOAD_THREAD_REG(%g6, %g1)
+       ldub            [%g6 + TI_FPSAVED], %g5
+       wr              %g0, FPRS_FEF, %fprs
+       andcc           %g5, FPRS_FEF, %g0
+       be,a,pt         %icc, 1f
+        clr            %g7
+       ldx             [%g6 + TI_GSR], %g7
+1:     andcc           %g5, FPRS_DL, %g0
+       bne,pn          %icc, 2f
+        fzero          %f0
+       andcc           %g5, FPRS_DU, %g0
+       bne,pn          %icc, 1f
+        fzero          %f2
+       faddd           %f0, %f2, %f4
+       fmuld           %f0, %f2, %f6
+       faddd           %f0, %f2, %f8
+       fmuld           %f0, %f2, %f10
+       faddd           %f0, %f2, %f12
+       fmuld           %f0, %f2, %f14
+       faddd           %f0, %f2, %f16
+       fmuld           %f0, %f2, %f18
+       faddd           %f0, %f2, %f20
+       fmuld           %f0, %f2, %f22
+       faddd           %f0, %f2, %f24
+       fmuld           %f0, %f2, %f26
+       faddd           %f0, %f2, %f28
+       fmuld           %f0, %f2, %f30
+       faddd           %f0, %f2, %f32
+       fmuld           %f0, %f2, %f34
+       faddd           %f0, %f2, %f36
+       fmuld           %f0, %f2, %f38
+       faddd           %f0, %f2, %f40
+       fmuld           %f0, %f2, %f42
+       faddd           %f0, %f2, %f44
+       fmuld           %f0, %f2, %f46
+       faddd           %f0, %f2, %f48
+       fmuld           %f0, %f2, %f50
+       faddd           %f0, %f2, %f52
+       fmuld           %f0, %f2, %f54
+       faddd           %f0, %f2, %f56
+       fmuld           %f0, %f2, %f58
+       b,pt            %xcc, fpdis_exit2
+        faddd          %f0, %f2, %f60
+1:     mov             SECONDARY_CONTEXT, %g3
+       add             %g6, TI_FPREGS + 0x80, %g1
+       faddd           %f0, %f2, %f4
+       fmuld           %f0, %f2, %f6
+
+661:   ldxa            [%g3] ASI_DMMU, %g5
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       ldxa            [%g3] ASI_MMU, %g5
+       .previous
+
+       sethi           %hi(sparc64_kern_sec_context), %g2
+       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
+
+661:   stxa            %g2, [%g3] ASI_DMMU
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       stxa            %g2, [%g3] ASI_MMU
+       .previous
+
+       membar          #Sync
+       add             %g6, TI_FPREGS + 0xc0, %g2
+       faddd           %f0, %f2, %f8
+       fmuld           %f0, %f2, %f10
+       membar          #Sync
+       ldda            [%g1] ASI_BLK_S, %f32
+       ldda            [%g2] ASI_BLK_S, %f48
+       membar          #Sync
+       faddd           %f0, %f2, %f12
+       fmuld           %f0, %f2, %f14
+       faddd           %f0, %f2, %f16
+       fmuld           %f0, %f2, %f18
+       faddd           %f0, %f2, %f20
+       fmuld           %f0, %f2, %f22
+       faddd           %f0, %f2, %f24
+       fmuld           %f0, %f2, %f26
+       faddd           %f0, %f2, %f28
+       fmuld           %f0, %f2, %f30
+       b,pt            %xcc, fpdis_exit
+        nop
+2:     andcc           %g5, FPRS_DU, %g0
+       bne,pt          %icc, 3f
+        fzero          %f32
+       mov             SECONDARY_CONTEXT, %g3
+       fzero           %f34
+
+661:   ldxa            [%g3] ASI_DMMU, %g5
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       ldxa            [%g3] ASI_MMU, %g5
+       .previous
+
+       add             %g6, TI_FPREGS, %g1
+       sethi           %hi(sparc64_kern_sec_context), %g2
+       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
+
+661:   stxa            %g2, [%g3] ASI_DMMU
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       stxa            %g2, [%g3] ASI_MMU
+       .previous
+
+       membar          #Sync
+       add             %g6, TI_FPREGS + 0x40, %g2
+       faddd           %f32, %f34, %f36
+       fmuld           %f32, %f34, %f38
+       membar          #Sync
+       ldda            [%g1] ASI_BLK_S, %f0
+       ldda            [%g2] ASI_BLK_S, %f16
+       membar          #Sync
+       faddd           %f32, %f34, %f40
+       fmuld           %f32, %f34, %f42
+       faddd           %f32, %f34, %f44
+       fmuld           %f32, %f34, %f46
+       faddd           %f32, %f34, %f48
+       fmuld           %f32, %f34, %f50
+       faddd           %f32, %f34, %f52
+       fmuld           %f32, %f34, %f54
+       faddd           %f32, %f34, %f56
+       fmuld           %f32, %f34, %f58
+       faddd           %f32, %f34, %f60
+       fmuld           %f32, %f34, %f62
+       ba,pt           %xcc, fpdis_exit
+        nop
+3:     mov             SECONDARY_CONTEXT, %g3
+       add             %g6, TI_FPREGS, %g1
+
+661:   ldxa            [%g3] ASI_DMMU, %g5
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       ldxa            [%g3] ASI_MMU, %g5
+       .previous
+
+       sethi           %hi(sparc64_kern_sec_context), %g2
+       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
+
+661:   stxa            %g2, [%g3] ASI_DMMU
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       stxa            %g2, [%g3] ASI_MMU
+       .previous
+
+       membar          #Sync
+       mov             0x40, %g2
+       membar          #Sync
+       ldda            [%g1] ASI_BLK_S, %f0
+       ldda            [%g1 + %g2] ASI_BLK_S, %f16
+       add             %g1, 0x80, %g1
+       ldda            [%g1] ASI_BLK_S, %f32
+       ldda            [%g1 + %g2] ASI_BLK_S, %f48
+       membar          #Sync
+fpdis_exit:
+
+661:   stxa            %g5, [%g3] ASI_DMMU
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       stxa            %g5, [%g3] ASI_MMU
+       .previous
+
+       membar          #Sync
+fpdis_exit2:
+       wr              %g7, 0, %gsr
+       ldx             [%g6 + TI_XFSR], %fsr
+       rdpr            %tstate, %g3
+       or              %g3, %g4, %g3           ! anal...
+       wrpr            %g3, %tstate
+       wr              %g0, FPRS_FEF, %fprs    ! clean DU/DL bits
+       retry
+       .size           do_fpdis,.-do_fpdis
+
+       .align          32
+       .type           fp_other_bounce,#function
+fp_other_bounce:
+       call            do_fpother
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           fp_other_bounce,.-fp_other_bounce
+
+       .align          32
+       .globl          do_fpother_check_fitos
+       .type           do_fpother_check_fitos,#function
+do_fpother_check_fitos:
+       TRAP_LOAD_THREAD_REG(%g6, %g1)
+       sethi           %hi(fp_other_bounce - 4), %g7
+       or              %g7, %lo(fp_other_bounce - 4), %g7
+
+       /* NOTE: Need to preserve %g7 until we fully commit
+        *       to the fitos fixup.
+        */
+       stx             %fsr, [%g6 + TI_XFSR]
+       rdpr            %tstate, %g3
+       andcc           %g3, TSTATE_PRIV, %g0
+       bne,pn          %xcc, do_fptrap_after_fsr
+        nop
+       ldx             [%g6 + TI_XFSR], %g3
+       srlx            %g3, 14, %g1
+       and             %g1, 7, %g1
+       cmp             %g1, 2                  ! Unfinished FP-OP
+       bne,pn          %xcc, do_fptrap_after_fsr
+        sethi          %hi(1 << 23), %g1       ! Inexact
+       andcc           %g3, %g1, %g0
+       bne,pn          %xcc, do_fptrap_after_fsr
+        rdpr           %tpc, %g1
+       lduwa           [%g1] ASI_AIUP, %g3     ! This cannot ever fail
+#define FITOS_MASK     0xc1f83fe0
+#define FITOS_COMPARE  0x81a01880
+       sethi           %hi(FITOS_MASK), %g1
+       or              %g1, %lo(FITOS_MASK), %g1
+       and             %g3, %g1, %g1
+       sethi           %hi(FITOS_COMPARE), %g2
+       or              %g2, %lo(FITOS_COMPARE), %g2
+       cmp             %g1, %g2
+       bne,pn          %xcc, do_fptrap_after_fsr
+        nop
+       std             %f62, [%g6 + TI_FPREGS + (62 * 4)]
+       sethi           %hi(fitos_table_1), %g1
+       and             %g3, 0x1f, %g2
+       or              %g1, %lo(fitos_table_1),  %g1
+       sllx            %g2, 2, %g2
+       jmpl            %g1 + %g2, %g0
+        ba,pt          %xcc, fitos_emul_continue
+
+fitos_table_1:
+       fitod           %f0, %f62
+       fitod           %f1, %f62
+       fitod           %f2, %f62
+       fitod           %f3, %f62
+       fitod           %f4, %f62
+       fitod           %f5, %f62
+       fitod           %f6, %f62
+       fitod           %f7, %f62
+       fitod           %f8, %f62
+       fitod           %f9, %f62
+       fitod           %f10, %f62
+       fitod           %f11, %f62
+       fitod           %f12, %f62
+       fitod           %f13, %f62
+       fitod           %f14, %f62
+       fitod           %f15, %f62
+       fitod           %f16, %f62
+       fitod           %f17, %f62
+       fitod           %f18, %f62
+       fitod           %f19, %f62
+       fitod           %f20, %f62
+       fitod           %f21, %f62
+       fitod           %f22, %f62
+       fitod           %f23, %f62
+       fitod           %f24, %f62
+       fitod           %f25, %f62
+       fitod           %f26, %f62
+       fitod           %f27, %f62
+       fitod           %f28, %f62
+       fitod           %f29, %f62
+       fitod           %f30, %f62
+       fitod           %f31, %f62
+
+fitos_emul_continue:
+       sethi           %hi(fitos_table_2), %g1
+       srl             %g3, 25, %g2
+       or              %g1, %lo(fitos_table_2), %g1
+       and             %g2, 0x1f, %g2
+       sllx            %g2, 2, %g2
+       jmpl            %g1 + %g2, %g0
+        ba,pt          %xcc, fitos_emul_fini
+
+fitos_table_2:
+       fdtos           %f62, %f0
+       fdtos           %f62, %f1
+       fdtos           %f62, %f2
+       fdtos           %f62, %f3
+       fdtos           %f62, %f4
+       fdtos           %f62, %f5
+       fdtos           %f62, %f6
+       fdtos           %f62, %f7
+       fdtos           %f62, %f8
+       fdtos           %f62, %f9
+       fdtos           %f62, %f10
+       fdtos           %f62, %f11
+       fdtos           %f62, %f12
+       fdtos           %f62, %f13
+       fdtos           %f62, %f14
+       fdtos           %f62, %f15
+       fdtos           %f62, %f16
+       fdtos           %f62, %f17
+       fdtos           %f62, %f18
+       fdtos           %f62, %f19
+       fdtos           %f62, %f20
+       fdtos           %f62, %f21
+       fdtos           %f62, %f22
+       fdtos           %f62, %f23
+       fdtos           %f62, %f24
+       fdtos           %f62, %f25
+       fdtos           %f62, %f26
+       fdtos           %f62, %f27
+       fdtos           %f62, %f28
+       fdtos           %f62, %f29
+       fdtos           %f62, %f30
+       fdtos           %f62, %f31
+
+fitos_emul_fini:
+       ldd             [%g6 + TI_FPREGS + (62 * 4)], %f62
+       done
+       .size           do_fpother_check_fitos,.-do_fpother_check_fitos
+
+       .align          32
+       .globl          do_fptrap
+       .type           do_fptrap,#function
+do_fptrap:
+       TRAP_LOAD_THREAD_REG(%g6, %g1)
+       stx             %fsr, [%g6 + TI_XFSR]
+do_fptrap_after_fsr:
+       ldub            [%g6 + TI_FPSAVED], %g3
+       rd              %fprs, %g1
+       or              %g3, %g1, %g3
+       stb             %g3, [%g6 + TI_FPSAVED]
+       rd              %gsr, %g3
+       stx             %g3, [%g6 + TI_GSR]
+       mov             SECONDARY_CONTEXT, %g3
+
+661:   ldxa            [%g3] ASI_DMMU, %g5
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       ldxa            [%g3] ASI_MMU, %g5
+       .previous
+
+       sethi           %hi(sparc64_kern_sec_context), %g2
+       ldx             [%g2 + %lo(sparc64_kern_sec_context)], %g2
+
+661:   stxa            %g2, [%g3] ASI_DMMU
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       stxa            %g2, [%g3] ASI_MMU
+       .previous
+
+       membar          #Sync
+       add             %g6, TI_FPREGS, %g2
+       andcc           %g1, FPRS_DL, %g0
+       be,pn           %icc, 4f
+        mov            0x40, %g3
+       stda            %f0, [%g2] ASI_BLK_S
+       stda            %f16, [%g2 + %g3] ASI_BLK_S
+       andcc           %g1, FPRS_DU, %g0
+       be,pn           %icc, 5f
+4:       add           %g2, 128, %g2
+       stda            %f32, [%g2] ASI_BLK_S
+       stda            %f48, [%g2 + %g3] ASI_BLK_S
+5:     mov             SECONDARY_CONTEXT, %g1
+       membar          #Sync
+
+661:   stxa            %g5, [%g1] ASI_DMMU
+       .section        .sun4v_1insn_patch, "ax"
+       .word           661b
+       stxa            %g5, [%g1] ASI_MMU
+       .previous
+
+       membar          #Sync
+       ba,pt           %xcc, etrap
+        wr             %g0, 0, %fprs
+       .size           do_fptrap,.-do_fptrap
diff --git a/arch/sparc64/kernel/getsetcc.S b/arch/sparc64/kernel/getsetcc.S
new file mode 100644 (file)
index 0000000..a14d272
--- /dev/null
@@ -0,0 +1,24 @@
+       .globl          getcc
+       .type           getcc,#function
+getcc:
+       ldx             [%o0 + PT_V9_TSTATE], %o1
+       srlx            %o1, 32, %o1
+       and             %o1, 0xf, %o1
+       retl
+        stx            %o1, [%o0 + PT_V9_G1]
+       .size           getcc,.-getcc
+
+       .globl          setcc
+       .type           setcc,#function
+setcc:
+       ldx             [%o0 + PT_V9_TSTATE], %o1
+       ldx             [%o0 + PT_V9_G1], %o2
+       or              %g0, %ulo(TSTATE_ICC), %o3
+       sllx            %o3, 32, %o3
+       andn            %o1, %o3, %o1
+       sllx            %o2, 32, %o2
+       and             %o2, %o3, %o2
+       or              %o1, %o2, %o1
+       retl
+        stx            %o1, [%o0 + PT_V9_TSTATE]
+       .size           setcc,.-setcc
index 34f8ff57c56ba157ef591d7bf30329fea0be6fea..c9afef093d51b3aceed553bd0ad2c21c0858d565 100644 (file)
 #include <asm/ttable.h>
 #include <asm/mmu.h>
 #include <asm/cpudata.h>
+#include <asm/pil.h>
+#include <asm/estate.h>
+#include <asm/sfafsr.h>
+#include <asm/unistd.h>
        
 /* This section from from _start to sparc64_boot_end should fit into
  * 0x0000000000404000 to 0x0000000000408000.
@@ -823,7 +827,16 @@ sparc64_boot_end:
 #include "etrap.S"
 #include "rtrap.S"
 #include "winfixup.S"
-#include "entry.S"
+#include "fpu_traps.S"
+#include "ivec.S"
+#include "getsetcc.S"
+#include "utrap.S"
+#include "spiterrs.S"
+#include "cherrs.S"
+#include "misctrap.S"
+#include "syscalls.S"
+#include "helpers.S"
+#include "hvcalls.S"
 #include "sun4v_tlb_miss.S"
 #include "sun4v_ivec.S"
 #include "ktlb.S"
diff --git a/arch/sparc64/kernel/helpers.S b/arch/sparc64/kernel/helpers.S
new file mode 100644 (file)
index 0000000..314dd0c
--- /dev/null
@@ -0,0 +1,63 @@
+       .align  32
+       .globl  __flushw_user
+       .type   __flushw_user,#function
+__flushw_user:
+       rdpr    %otherwin, %g1
+       brz,pn  %g1, 2f
+        clr    %g2
+1:     save    %sp, -128, %sp
+       rdpr    %otherwin, %g1
+       brnz,pt %g1, 1b
+        add    %g2, 1, %g2
+1:     sub     %g2, 1, %g2
+       brnz,pt %g2, 1b
+        restore %g0, %g0, %g0
+2:     retl
+        nop
+       .size   __flushw_user,.-__flushw_user
+
+       /* Flush %fp and %i7 to the stack for all register
+        * windows active inside of the cpu.  This allows
+        * show_stack_trace() to avoid using an expensive
+        * 'flushw'.
+        */
+       .globl          stack_trace_flush
+       .type           stack_trace_flush,#function
+stack_trace_flush:
+       rdpr            %pstate, %o0
+       wrpr            %o0, PSTATE_IE, %pstate
+
+       rdpr            %cwp, %g1
+       rdpr            %canrestore, %g2
+       sub             %g1, 1, %g3
+
+1:     brz,pn          %g2, 2f
+        sub            %g2, 1, %g2
+       wrpr            %g3, %cwp
+       stx             %fp, [%sp + STACK_BIAS + RW_V9_I6]
+       stx             %i7, [%sp + STACK_BIAS + RW_V9_I7]
+       ba,pt           %xcc, 1b
+        sub            %g3, 1, %g3
+
+2:     wrpr            %g1, %cwp
+       wrpr            %o0, %pstate
+
+       retl
+        nop
+       .size           stack_trace_flush,.-stack_trace_flush
+
+#ifdef CONFIG_SMP
+       .globl          hard_smp_processor_id
+       .type           hard_smp_processor_id,#function
+hard_smp_processor_id:
+#endif
+       .globl          real_hard_smp_processor_id
+       .type           real_hard_smp_processor_id,#function
+real_hard_smp_processor_id:
+       __GET_CPUID(%o0)
+       retl
+        nop
+#ifdef CONFIG_SMP
+       .size           hard_smp_processor_id,.-hard_smp_processor_id
+#endif
+       .size           real_hard_smp_processor_id,.-real_hard_smp_processor_id
diff --git a/arch/sparc64/kernel/hvcalls.S b/arch/sparc64/kernel/hvcalls.S
new file mode 100644 (file)
index 0000000..a2810f3
--- /dev/null
@@ -0,0 +1,886 @@
+       /* %o0: devhandle
+        * %o1: devino
+        *
+        * returns %o0: sysino
+        */
+       .globl  sun4v_devino_to_sysino
+       .type   sun4v_devino_to_sysino,#function
+sun4v_devino_to_sysino:
+       mov     HV_FAST_INTR_DEVINO2SYSINO, %o5
+       ta      HV_FAST_TRAP
+       retl
+        mov    %o1, %o0
+       .size   sun4v_devino_to_sysino, .-sun4v_devino_to_sysino
+
+       /* %o0: sysino
+        *
+        * returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
+        */
+       .globl  sun4v_intr_getenabled
+       .type   sun4v_intr_getenabled,#function
+sun4v_intr_getenabled:
+       mov     HV_FAST_INTR_GETENABLED, %o5
+       ta      HV_FAST_TRAP
+       retl
+        mov    %o1, %o0
+       .size   sun4v_intr_getenabled, .-sun4v_intr_getenabled
+
+       /* %o0: sysino
+        * %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
+        */
+       .globl  sun4v_intr_setenabled
+       .type   sun4v_intr_setenabled,#function
+sun4v_intr_setenabled:
+       mov     HV_FAST_INTR_SETENABLED, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_intr_setenabled, .-sun4v_intr_setenabled
+
+       /* %o0: sysino
+        *
+        * returns %o0: intr_state (HV_INTR_STATE_*)
+        */
+       .globl  sun4v_intr_getstate
+       .type   sun4v_intr_getstate,#function
+sun4v_intr_getstate:
+       mov     HV_FAST_INTR_GETSTATE, %o5
+       ta      HV_FAST_TRAP
+       retl
+        mov    %o1, %o0
+       .size   sun4v_intr_getstate, .-sun4v_intr_getstate
+
+       /* %o0: sysino
+        * %o1: intr_state (HV_INTR_STATE_*)
+        */
+       .globl  sun4v_intr_setstate
+       .type   sun4v_intr_setstate,#function
+sun4v_intr_setstate:
+       mov     HV_FAST_INTR_SETSTATE, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_intr_setstate, .-sun4v_intr_setstate
+
+       /* %o0: sysino
+        *
+        * returns %o0: cpuid
+        */
+       .globl  sun4v_intr_gettarget
+       .type   sun4v_intr_gettarget,#function
+sun4v_intr_gettarget:
+       mov     HV_FAST_INTR_GETTARGET, %o5
+       ta      HV_FAST_TRAP
+       retl
+        mov    %o1, %o0
+       .size   sun4v_intr_gettarget, .-sun4v_intr_gettarget
+
+       /* %o0: sysino
+        * %o1: cpuid
+        */
+       .globl  sun4v_intr_settarget
+       .type   sun4v_intr_settarget,#function
+sun4v_intr_settarget:
+       mov     HV_FAST_INTR_SETTARGET, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_intr_settarget, .-sun4v_intr_settarget
+
+       /* %o0: cpuid
+        * %o1: pc
+        * %o2: rtba
+        * %o3: arg0
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_cpu_start
+       .type   sun4v_cpu_start,#function
+sun4v_cpu_start:
+       mov     HV_FAST_CPU_START, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_cpu_start, .-sun4v_cpu_start
+
+       /* %o0: cpuid
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_cpu_stop
+       .type   sun4v_cpu_stop,#function
+sun4v_cpu_stop:
+       mov     HV_FAST_CPU_STOP, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_cpu_stop, .-sun4v_cpu_stop
+
+       /* returns %o0: status  */
+       .globl  sun4v_cpu_yield
+       .type   sun4v_cpu_yield, #function
+sun4v_cpu_yield:
+       mov     HV_FAST_CPU_YIELD, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_cpu_yield, .-sun4v_cpu_yield
+
+       /* %o0: type
+        * %o1: queue paddr
+        * %o2: num queue entries
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_cpu_qconf
+       .type   sun4v_cpu_qconf,#function
+sun4v_cpu_qconf:
+       mov     HV_FAST_CPU_QCONF, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_cpu_qconf, .-sun4v_cpu_qconf
+
+       /* %o0: num cpus in cpu list
+        * %o1: cpu list paddr
+        * %o2: mondo block paddr
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_cpu_mondo_send
+       .type   sun4v_cpu_mondo_send,#function
+sun4v_cpu_mondo_send:
+       mov     HV_FAST_CPU_MONDO_SEND, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_cpu_mondo_send, .-sun4v_cpu_mondo_send
+
+       /* %o0: CPU ID
+        *
+        * returns %o0: -status if status non-zero, else
+        *         %o0: cpu state as HV_CPU_STATE_*
+        */
+       .globl  sun4v_cpu_state
+       .type   sun4v_cpu_state,#function
+sun4v_cpu_state:
+       mov     HV_FAST_CPU_STATE, %o5
+       ta      HV_FAST_TRAP
+       brnz,pn %o0, 1f
+        sub    %g0, %o0, %o0
+       mov     %o1, %o0
+1:     retl
+        nop
+       .size   sun4v_cpu_state, .-sun4v_cpu_state
+
+       /* %o0: virtual address
+        * %o1: must be zero
+        * %o2: TTE
+        * %o3: HV_MMU_* flags
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_mmu_map_perm_addr
+       .type   sun4v_mmu_map_perm_addr,#function
+sun4v_mmu_map_perm_addr:
+       mov     HV_FAST_MMU_MAP_PERM_ADDR, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_mmu_map_perm_addr, .-sun4v_mmu_map_perm_addr
+
+       /* %o0: number of TSB descriptions
+        * %o1: TSB descriptions real address
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_mmu_tsb_ctx0
+       .type   sun4v_mmu_tsb_ctx0,#function
+sun4v_mmu_tsb_ctx0:
+       mov     HV_FAST_MMU_TSB_CTX0, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_mmu_tsb_ctx0, .-sun4v_mmu_tsb_ctx0
+
+       /* %o0: API group number
+        * %o1: pointer to unsigned long major number storage
+        * %o2: pointer to unsigned long minor number storage
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_get_version
+       .type   sun4v_get_version,#function
+sun4v_get_version:
+       mov     HV_CORE_GET_VER, %o5
+       mov     %o1, %o3
+       mov     %o2, %o4
+       ta      HV_CORE_TRAP
+       stx     %o1, [%o3]
+       retl
+        stx    %o2, [%o4]
+       .size   sun4v_get_version, .-sun4v_get_version
+
+       /* %o0: API group number
+        * %o1: desired major number
+        * %o2: desired minor number
+        * %o3: pointer to unsigned long actual minor number storage
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_set_version
+       .type   sun4v_set_version,#function
+sun4v_set_version:
+       mov     HV_CORE_SET_VER, %o5
+       mov     %o3, %o4
+       ta      HV_CORE_TRAP
+       retl
+        stx    %o1, [%o4]
+       .size   sun4v_set_version, .-sun4v_set_version
+
+       /* %o0: pointer to unsigned long time
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_tod_get
+       .type   sun4v_tod_get,#function
+sun4v_tod_get:
+       mov     %o0, %o4
+       mov     HV_FAST_TOD_GET, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_tod_get, .-sun4v_tod_get
+
+       /* %o0: time
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_tod_set
+       .type   sun4v_tod_set,#function
+sun4v_tod_set:
+       mov     HV_FAST_TOD_SET, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_tod_set, .-sun4v_tod_set
+
+       /* %o0: pointer to unsigned long status
+        *
+        * returns %o0: signed character
+        */
+       .globl  sun4v_con_getchar
+       .type   sun4v_con_getchar,#function
+sun4v_con_getchar:
+       mov     %o0, %o4
+       mov     HV_FAST_CONS_GETCHAR, %o5
+       clr     %o0
+       clr     %o1
+       ta      HV_FAST_TRAP
+       stx     %o0, [%o4]
+       retl
+        sra    %o1, 0, %o0
+       .size   sun4v_con_getchar, .-sun4v_con_getchar
+
+       /* %o0: signed long character
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_con_putchar
+       .type   sun4v_con_putchar,#function
+sun4v_con_putchar:
+       mov     HV_FAST_CONS_PUTCHAR, %o5
+       ta      HV_FAST_TRAP
+       retl
+        sra    %o0, 0, %o0
+       .size   sun4v_con_putchar, .-sun4v_con_putchar
+
+       /* %o0: buffer real address
+        * %o1: buffer size
+        * %o2: pointer to unsigned long bytes_read
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_con_read
+       .type   sun4v_con_read,#function
+sun4v_con_read:
+       mov     %o2, %o4
+       mov     HV_FAST_CONS_READ, %o5
+       ta      HV_FAST_TRAP
+       brnz    %o0, 1f
+        cmp    %o1, -1         /* break */
+       be,a,pn %icc, 1f
+        mov    %o1, %o0
+       cmp     %o1, -2         /* hup */
+       be,a,pn %icc, 1f
+        mov    %o1, %o0
+       stx     %o1, [%o4]
+1:     retl
+        nop
+       .size   sun4v_con_read, .-sun4v_con_read
+
+       /* %o0: buffer real address
+        * %o1: buffer size
+        * %o2: pointer to unsigned long bytes_written
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_con_write
+       .type   sun4v_con_write,#function
+sun4v_con_write:
+       mov     %o2, %o4
+       mov     HV_FAST_CONS_WRITE, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_con_write, .-sun4v_con_write
+
+       /* %o0: soft state
+        * %o1: address of description string
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_mach_set_soft_state
+       .type   sun4v_mach_set_soft_state,#function
+sun4v_mach_set_soft_state:
+       mov     HV_FAST_MACH_SET_SOFT_STATE, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_mach_set_soft_state, .-sun4v_mach_set_soft_state
+
+       /* %o0: exit code
+        *
+        * Does not return.
+        */
+       .globl  sun4v_mach_exit
+       .type   sun4v_mach_exit,#function
+sun4v_mach_exit:
+       mov     HV_FAST_MACH_EXIT, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_mach_exit, .-sun4v_mach_exit
+
+       /* %o0: buffer real address
+        * %o1: buffer length
+        * %o2: pointer to unsigned long real_buf_len
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_mach_desc
+       .type   sun4v_mach_desc,#function
+sun4v_mach_desc:
+       mov     %o2, %o4
+       mov     HV_FAST_MACH_DESC, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_mach_desc, .-sun4v_mach_desc
+
+       /* %o0: new timeout in milliseconds
+        * %o1: pointer to unsigned long orig_timeout
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_mach_set_watchdog
+       .type   sun4v_mach_set_watchdog,#function
+sun4v_mach_set_watchdog:
+       mov     %o1, %o4
+       mov     HV_FAST_MACH_SET_WATCHDOG, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_mach_set_watchdog, .-sun4v_mach_set_watchdog
+
+       /* No inputs and does not return.  */
+       .globl  sun4v_mach_sir
+       .type   sun4v_mach_sir,#function
+sun4v_mach_sir:
+       mov     %o1, %o4
+       mov     HV_FAST_MACH_SIR, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_mach_sir, .-sun4v_mach_sir
+
+       /* %o0: channel
+        * %o1: ra
+        * %o2: num_entries
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_tx_qconf
+       .type   sun4v_ldc_tx_qconf,#function
+sun4v_ldc_tx_qconf:
+       mov     HV_FAST_LDC_TX_QCONF, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ldc_tx_qconf, .-sun4v_ldc_tx_qconf
+
+       /* %o0: channel
+        * %o1: pointer to unsigned long ra
+        * %o2: pointer to unsigned long num_entries
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_tx_qinfo
+       .type   sun4v_ldc_tx_qinfo,#function
+sun4v_ldc_tx_qinfo:
+       mov     %o1, %g1
+       mov     %o2, %g2
+       mov     HV_FAST_LDC_TX_QINFO, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       stx     %o2, [%g2]
+       retl
+        nop
+       .size   sun4v_ldc_tx_qinfo, .-sun4v_ldc_tx_qinfo
+
+       /* %o0: channel
+        * %o1: pointer to unsigned long head_off
+        * %o2: pointer to unsigned long tail_off
+        * %o2: pointer to unsigned long chan_state
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_tx_get_state
+       .type   sun4v_ldc_tx_get_state,#function
+sun4v_ldc_tx_get_state:
+       mov     %o1, %g1
+       mov     %o2, %g2
+       mov     %o3, %g3
+       mov     HV_FAST_LDC_TX_GET_STATE, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       stx     %o2, [%g2]
+       stx     %o3, [%g3]
+       retl
+        nop
+       .size   sun4v_ldc_tx_get_state, .-sun4v_ldc_tx_get_state
+
+       /* %o0: channel
+        * %o1: tail_off
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_tx_set_qtail
+       .type   sun4v_ldc_tx_set_qtail,#function
+sun4v_ldc_tx_set_qtail:
+       mov     HV_FAST_LDC_TX_SET_QTAIL, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ldc_tx_set_qtail, .-sun4v_ldc_tx_set_qtail
+
+       /* %o0: channel
+        * %o1: ra
+        * %o2: num_entries
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_rx_qconf
+       .type   sun4v_ldc_rx_qconf,#function
+sun4v_ldc_rx_qconf:
+       mov     HV_FAST_LDC_RX_QCONF, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ldc_rx_qconf, .-sun4v_ldc_rx_qconf
+
+       /* %o0: channel
+        * %o1: pointer to unsigned long ra
+        * %o2: pointer to unsigned long num_entries
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_rx_qinfo
+       .type   sun4v_ldc_rx_qinfo,#function
+sun4v_ldc_rx_qinfo:
+       mov     %o1, %g1
+       mov     %o2, %g2
+       mov     HV_FAST_LDC_RX_QINFO, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       stx     %o2, [%g2]
+       retl
+        nop
+       .size   sun4v_ldc_rx_qinfo, .-sun4v_ldc_rx_qinfo
+
+       /* %o0: channel
+        * %o1: pointer to unsigned long head_off
+        * %o2: pointer to unsigned long tail_off
+        * %o2: pointer to unsigned long chan_state
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_rx_get_state
+       .type   sun4v_ldc_rx_get_state,#function
+sun4v_ldc_rx_get_state:
+       mov     %o1, %g1
+       mov     %o2, %g2
+       mov     %o3, %g3
+       mov     HV_FAST_LDC_RX_GET_STATE, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       stx     %o2, [%g2]
+       stx     %o3, [%g3]
+       retl
+        nop
+       .size   sun4v_ldc_rx_get_state, .-sun4v_ldc_rx_get_state
+
+       /* %o0: channel
+        * %o1: head_off
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_rx_set_qhead
+       .type   sun4v_ldc_rx_set_qhead,#function
+sun4v_ldc_rx_set_qhead:
+       mov     HV_FAST_LDC_RX_SET_QHEAD, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ldc_rx_set_qhead, .-sun4v_ldc_rx_set_qhead
+
+       /* %o0: channel
+        * %o1: ra
+        * %o2: num_entries
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_set_map_table
+       .type   sun4v_ldc_set_map_table,#function
+sun4v_ldc_set_map_table:
+       mov     HV_FAST_LDC_SET_MAP_TABLE, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ldc_set_map_table, .-sun4v_ldc_set_map_table
+
+       /* %o0: channel
+        * %o1: pointer to unsigned long ra
+        * %o2: pointer to unsigned long num_entries
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_get_map_table
+       .type   sun4v_ldc_get_map_table,#function
+sun4v_ldc_get_map_table:
+       mov     %o1, %g1
+       mov     %o2, %g2
+       mov     HV_FAST_LDC_GET_MAP_TABLE, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       stx     %o2, [%g2]
+       retl
+        nop
+       .size   sun4v_ldc_get_map_table, .-sun4v_ldc_get_map_table
+
+       /* %o0: channel
+        * %o1: dir_code
+        * %o2: tgt_raddr
+        * %o3: lcl_raddr
+        * %o4: len
+        * %o5: pointer to unsigned long actual_len
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_copy
+       .type   sun4v_ldc_copy,#function
+sun4v_ldc_copy:
+       mov     %o5, %g1
+       mov     HV_FAST_LDC_COPY, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       retl
+        nop
+       .size   sun4v_ldc_copy, .-sun4v_ldc_copy
+
+       /* %o0: channel
+        * %o1: cookie
+        * %o2: pointer to unsigned long ra
+        * %o3: pointer to unsigned long perm
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_mapin
+       .type   sun4v_ldc_mapin,#function
+sun4v_ldc_mapin:
+       mov     %o2, %g1
+       mov     %o3, %g2
+       mov     HV_FAST_LDC_MAPIN, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       stx     %o2, [%g2]
+       retl
+        nop
+       .size   sun4v_ldc_mapin, .-sun4v_ldc_mapin
+
+       /* %o0: ra
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_unmap
+       .type   sun4v_ldc_unmap,#function
+sun4v_ldc_unmap:
+       mov     HV_FAST_LDC_UNMAP, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ldc_unmap, .-sun4v_ldc_unmap
+
+       /* %o0: channel
+        * %o1: cookie
+        * %o2: mte_cookie
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ldc_revoke
+       .type   sun4v_ldc_revoke,#function
+sun4v_ldc_revoke:
+       mov     HV_FAST_LDC_REVOKE, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ldc_revoke, .-sun4v_ldc_revoke
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: pointer to unsigned long cookie
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_get_cookie
+       .type   sun4v_vintr_get_cookie,#function
+sun4v_vintr_get_cookie:
+       mov     %o2, %g1
+       mov     HV_FAST_VINTR_GET_COOKIE, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       retl
+        nop
+       .size   sun4v_vintr_get_cookie, .-sun4v_vintr_get_cookie
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: cookie
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_set_cookie
+       .type   sun4v_vintr_set_cookie,#function
+sun4v_vintr_set_cookie:
+       mov     HV_FAST_VINTR_SET_COOKIE, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_vintr_set_cookie, .-sun4v_vintr_set_cookie
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: pointer to unsigned long valid_state
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_get_valid
+       .type   sun4v_vintr_get_valid,#function
+sun4v_vintr_get_valid:
+       mov     %o2, %g1
+       mov     HV_FAST_VINTR_GET_VALID, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       retl
+        nop
+       .size   sun4v_vintr_get_valid, .-sun4v_vintr_get_valid
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: valid_state
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_set_valid
+       .type   sun4v_vintr_set_valid,#function
+sun4v_vintr_set_valid:
+       mov     HV_FAST_VINTR_SET_VALID, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_vintr_set_valid, .-sun4v_vintr_set_valid
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: pointer to unsigned long state
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_get_state
+       .type   sun4v_vintr_get_state,#function
+sun4v_vintr_get_state:
+       mov     %o2, %g1
+       mov     HV_FAST_VINTR_GET_STATE, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       retl
+        nop
+       .size   sun4v_vintr_get_state, .-sun4v_vintr_get_state
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: state
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_set_state
+       .type   sun4v_vintr_set_state,#function
+sun4v_vintr_set_state:
+       mov     HV_FAST_VINTR_SET_STATE, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_vintr_set_state, .-sun4v_vintr_set_state
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: pointer to unsigned long cpuid
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_get_target
+       .type   sun4v_vintr_get_target,#function
+sun4v_vintr_get_target:
+       mov     %o2, %g1
+       mov     HV_FAST_VINTR_GET_TARGET, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%g1]
+       retl
+        nop
+       .size   sun4v_vintr_get_target, .-sun4v_vintr_get_target
+
+       /* %o0: device handle
+        * %o1: device INO
+        * %o2: cpuid
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_vintr_set_target
+       .type   sun4v_vintr_set_target,#function
+sun4v_vintr_set_target:
+       mov     HV_FAST_VINTR_SET_TARGET, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_vintr_set_target, .-sun4v_vintr_set_target
+
+       /* %o0: NCS sub-function
+        * %o1: sub-function arg real-address
+        * %o2: sub-function arg size
+        *
+        * returns %o0: status
+        */
+       .globl  sun4v_ncs_request
+       .type   sun4v_ncs_request,#function
+sun4v_ncs_request:
+       mov     HV_FAST_NCS_REQUEST, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_ncs_request, .-sun4v_ncs_request
+
+       .globl  sun4v_svc_send
+       .type   sun4v_svc_send,#function
+sun4v_svc_send:
+       save    %sp, -192, %sp
+       mov     %i0, %o0
+       mov     %i1, %o1
+       mov     %i2, %o2
+       mov     HV_FAST_SVC_SEND, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%i3]
+       ret
+       restore
+       .size   sun4v_svc_send, .-sun4v_svc_send
+
+       .globl  sun4v_svc_recv
+       .type   sun4v_svc_recv,#function
+sun4v_svc_recv:
+       save    %sp, -192, %sp
+       mov     %i0, %o0
+       mov     %i1, %o1
+       mov     %i2, %o2
+       mov     HV_FAST_SVC_RECV, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%i3]
+       ret
+       restore
+       .size   sun4v_svc_recv, .-sun4v_svc_recv
+
+       .globl  sun4v_svc_getstatus
+       .type   sun4v_svc_getstatus,#function
+sun4v_svc_getstatus:
+       mov     HV_FAST_SVC_GETSTATUS, %o5
+       mov     %o1, %o4
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_svc_getstatus, .-sun4v_svc_getstatus
+
+       .globl  sun4v_svc_setstatus
+       .type   sun4v_svc_setstatus,#function
+sun4v_svc_setstatus:
+       mov     HV_FAST_SVC_SETSTATUS, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_svc_setstatus, .-sun4v_svc_setstatus
+
+       .globl  sun4v_svc_clrstatus
+       .type   sun4v_svc_clrstatus,#function
+sun4v_svc_clrstatus:
+       mov     HV_FAST_SVC_CLRSTATUS, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_svc_clrstatus, .-sun4v_svc_clrstatus
+
+       .globl  sun4v_mmustat_conf
+       .type   sun4v_mmustat_conf,#function
+sun4v_mmustat_conf:
+       mov     %o1, %o4
+       mov     HV_FAST_MMUSTAT_CONF, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_mmustat_conf, .-sun4v_mmustat_conf
+
+       .globl  sun4v_mmustat_info
+       .type   sun4v_mmustat_info,#function
+sun4v_mmustat_info:
+       mov     %o0, %o4
+       mov     HV_FAST_MMUSTAT_INFO, %o5
+       ta      HV_FAST_TRAP
+       stx     %o1, [%o4]
+       retl
+        nop
+       .size   sun4v_mmustat_info, .-sun4v_mmustat_info
+
+       .globl  sun4v_mmu_demap_all
+       .type   sun4v_mmu_demap_all,#function
+sun4v_mmu_demap_all:
+       clr     %o0
+       clr     %o1
+       mov     HV_MMU_ALL, %o2
+       mov     HV_FAST_MMU_DEMAP_ALL, %o5
+       ta      HV_FAST_TRAP
+       retl
+        nop
+       .size   sun4v_mmu_demap_all, .-sun4v_mmu_demap_all
index eb88bd6e674ef7e05a1af6480539e99b860c62cb..b441a26b73b03ab514aa772609c92e8a179d2e12 100644 (file)
@@ -1,6 +1,6 @@
 /* irq.c: UltraSparc IRQ handling/init/registry.
  *
- * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net)
+ * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
  * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
  * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
  */
@@ -308,6 +308,7 @@ static void sun4u_irq_enable(unsigned int virt_irq)
                         IMAP_AID_SAFARI | IMAP_NID_SAFARI);
                val |= tid | IMAP_VALID;
                upa_writeq(val, imap);
+               upa_writeq(ICLR_IDLE, data->iclr);
        }
 }
 
diff --git a/arch/sparc64/kernel/isa.c b/arch/sparc64/kernel/isa.c
deleted file mode 100644 (file)
index a2af5ed..0000000
+++ /dev/null
@@ -1,191 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/slab.h>
-#include <asm/oplib.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
-#include <asm/isa.h>
-
-struct sparc_isa_bridge *isa_chain;
-
-static void __init fatal_err(const char *reason)
-{
-       prom_printf("ISA: fatal error, %s.\n", reason);
-}
-
-static void __init report_dev(struct sparc_isa_device *isa_dev, int child)
-{
-       if (child)
-               printk(" (%s)", isa_dev->prom_node->name);
-       else
-               printk(" [%s", isa_dev->prom_node->name);
-}
-
-static void __init isa_dev_get_resource(struct sparc_isa_device *isa_dev)
-{
-       struct of_device *op = of_find_device_by_node(isa_dev->prom_node);
-
-       memcpy(&isa_dev->resource, &op->resource[0], sizeof(struct resource));
-}
-
-static void __init isa_dev_get_irq(struct sparc_isa_device *isa_dev)
-{
-       struct of_device *op = of_find_device_by_node(isa_dev->prom_node);
-
-       if (!op || !op->num_irqs) {
-               isa_dev->irq = PCI_IRQ_NONE;
-       } else {
-               isa_dev->irq = op->irqs[0];
-       }
-}
-
-static void __init isa_fill_children(struct sparc_isa_device *parent_isa_dev)
-{
-       struct device_node *dp = parent_isa_dev->prom_node->child;
-
-       if (!dp)
-               return;
-
-       printk(" ->");
-       while (dp) {
-               struct sparc_isa_device *isa_dev;
-
-               isa_dev = kzalloc(sizeof(*isa_dev), GFP_KERNEL);
-               if (!isa_dev) {
-                       fatal_err("cannot allocate child isa_dev");
-                       prom_halt();
-               }
-
-               /* Link it in to parent. */
-               isa_dev->next = parent_isa_dev->child;
-               parent_isa_dev->child = isa_dev;
-
-               isa_dev->bus = parent_isa_dev->bus;
-               isa_dev->prom_node = dp;
-
-               isa_dev_get_resource(isa_dev);
-               isa_dev_get_irq(isa_dev);
-
-               report_dev(isa_dev, 1);
-
-               dp = dp->sibling;
-       }
-}
-
-static void __init isa_fill_devices(struct sparc_isa_bridge *isa_br)
-{
-       struct device_node *dp = isa_br->prom_node->child;
-
-       while (dp) {
-               struct sparc_isa_device *isa_dev;
-               struct dev_archdata *sd;
-
-               isa_dev = kzalloc(sizeof(*isa_dev), GFP_KERNEL);
-               if (!isa_dev) {
-                       printk(KERN_DEBUG "ISA: cannot allocate isa_dev");
-                       return;
-               }
-
-               sd = &isa_dev->ofdev.dev.archdata;
-               sd->prom_node = dp;
-               sd->op = &isa_dev->ofdev;
-               sd->iommu = isa_br->ofdev.dev.parent->archdata.iommu;
-               sd->stc = isa_br->ofdev.dev.parent->archdata.stc;
-               sd->numa_node = isa_br->ofdev.dev.parent->archdata.numa_node;
-
-               isa_dev->ofdev.node = dp;
-               isa_dev->ofdev.dev.parent = &isa_br->ofdev.dev;
-               isa_dev->ofdev.dev.bus = &isa_bus_type;
-               sprintf(isa_dev->ofdev.dev.bus_id, "isa[%08x]", dp->node);
-
-               /* Register with core */
-               if (of_device_register(&isa_dev->ofdev) != 0) {
-                       printk(KERN_DEBUG "isa: device registration error for %s!\n",
-                              dp->path_component_name);
-                       kfree(isa_dev);
-                       goto next_sibling;
-               }
-
-               /* Link it in. */
-               isa_dev->next = NULL;
-               if (isa_br->devices == NULL) {
-                       isa_br->devices = isa_dev;
-               } else {
-                       struct sparc_isa_device *tmp = isa_br->devices;
-
-                       while (tmp->next)
-                               tmp = tmp->next;
-
-                       tmp->next = isa_dev;
-               }
-
-               isa_dev->bus = isa_br;
-               isa_dev->prom_node = dp;
-
-               isa_dev_get_resource(isa_dev);
-               isa_dev_get_irq(isa_dev);
-
-               report_dev(isa_dev, 0);
-
-               isa_fill_children(isa_dev);
-
-               printk("]");
-
-       next_sibling:
-               dp = dp->sibling;
-       }
-}
-
-void __init isa_init(void)
-{
-       struct pci_dev *pdev;
-       unsigned short vendor, device;
-       int index = 0;
-
-       vendor = PCI_VENDOR_ID_AL;
-       device = PCI_DEVICE_ID_AL_M1533;
-
-       pdev = NULL;
-       while ((pdev = pci_get_device(vendor, device, pdev)) != NULL) {
-               struct sparc_isa_bridge *isa_br;
-               struct device_node *dp;
-
-               dp = pci_device_to_OF_node(pdev);
-
-               isa_br = kzalloc(sizeof(*isa_br), GFP_KERNEL);
-               if (!isa_br) {
-                       printk(KERN_DEBUG "isa: cannot allocate sparc_isa_bridge");
-                       pci_dev_put(pdev);
-                       return;
-               }
-
-               isa_br->ofdev.node = dp;
-               isa_br->ofdev.dev.parent = &pdev->dev;
-               isa_br->ofdev.dev.bus = &isa_bus_type;
-               sprintf(isa_br->ofdev.dev.bus_id, "isa%d", index);
-
-               /* Register with core */
-               if (of_device_register(&isa_br->ofdev) != 0) {
-                       printk(KERN_DEBUG "isa: device registration error for %s!\n",
-                              dp->path_component_name);
-                       kfree(isa_br);
-                       pci_dev_put(pdev);
-                       return;
-               }
-
-               /* Link it in. */
-               isa_br->next = isa_chain;
-               isa_chain = isa_br;
-
-               isa_br->self = pdev;
-               isa_br->index = index++;
-               isa_br->prom_node = dp;
-
-               printk("isa%d:", isa_br->index);
-
-               isa_fill_devices(isa_br);
-
-               printk("\n");
-       }
-}
diff --git a/arch/sparc64/kernel/ivec.S b/arch/sparc64/kernel/ivec.S
new file mode 100644 (file)
index 0000000..d29f92e
--- /dev/null
@@ -0,0 +1,51 @@
+       /* The registers for cross calls will be:
+        *
+        * DATA 0: [low 32-bits]  Address of function to call, jmp to this
+        *         [high 32-bits] MMU Context Argument 0, place in %g5
+        * DATA 1: Address Argument 1, place in %g1
+        * DATA 2: Address Argument 2, place in %g7
+        *
+        * With this method we can do most of the cross-call tlb/cache
+        * flushing very quickly.
+        */
+       .align          32
+       .globl          do_ivec
+       .type           do_ivec,#function
+do_ivec:
+       mov             0x40, %g3
+       ldxa            [%g3 + %g0] ASI_INTR_R, %g3
+       sethi           %hi(KERNBASE), %g4
+       cmp             %g3, %g4
+       bgeu,pn         %xcc, do_ivec_xcall
+        srlx           %g3, 32, %g5
+       stxa            %g0, [%g0] ASI_INTR_RECEIVE
+       membar          #Sync
+
+       sethi           %hi(ivector_table_pa), %g2
+       ldx             [%g2 + %lo(ivector_table_pa)], %g2
+       sllx            %g3, 4, %g3
+       add             %g2, %g3, %g3
+
+       TRAP_LOAD_IRQ_WORK_PA(%g6, %g1)
+
+       ldx             [%g6], %g5
+       stxa            %g5, [%g3] ASI_PHYS_USE_EC
+       stx             %g3, [%g6]
+       wr              %g0, 1 << PIL_DEVICE_IRQ, %set_softint
+       retry
+do_ivec_xcall:
+       mov             0x50, %g1
+       ldxa            [%g1 + %g0] ASI_INTR_R, %g1
+       srl             %g3, 0, %g3
+
+       mov             0x60, %g7
+       ldxa            [%g7 + %g0] ASI_INTR_R, %g7
+       stxa            %g0, [%g0] ASI_INTR_RECEIVE
+       membar          #Sync
+       ba,pt           %xcc, 1f
+        nop
+
+       .align          32
+1:     jmpl            %g3, %g0
+        nop
+       .size           do_ivec,.-do_ivec
diff --git a/arch/sparc64/kernel/kgdb.c b/arch/sparc64/kernel/kgdb.c
new file mode 100644 (file)
index 0000000..fefbe6d
--- /dev/null
@@ -0,0 +1,186 @@
+/* kgdb.c: KGDB support for 64-bit sparc.
+ *
+ * Copyright (C) 2008 David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+
+#include <asm/kdebug.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+       struct reg_window *win;
+       int i;
+
+       gdb_regs[GDB_G0] = 0;
+       for (i = 0; i < 15; i++)
+               gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i];
+
+       win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS);
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_L0 + i] = win->locals[i];
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_I0 + i] = win->ins[i];
+
+       for (i = GDB_F0; i <= GDB_F62; i++)
+               gdb_regs[i] = 0;
+
+       gdb_regs[GDB_PC] = regs->tpc;
+       gdb_regs[GDB_NPC] = regs->tnpc;
+       gdb_regs[GDB_STATE] = regs->tstate;
+       gdb_regs[GDB_FSR] = 0;
+       gdb_regs[GDB_FPRS] = 0;
+       gdb_regs[GDB_Y] = regs->y;
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+       struct thread_info *t = task_thread_info(p);
+       extern unsigned int switch_to_pc;
+       extern unsigned int ret_from_syscall;
+       struct reg_window *win;
+       unsigned long pc, cwp;
+       int i;
+
+       for (i = GDB_G0; i < GDB_G6; i++)
+               gdb_regs[i] = 0;
+       gdb_regs[GDB_G6] = (unsigned long) t;
+       gdb_regs[GDB_G7] = (unsigned long) p;
+       for (i = GDB_O0; i < GDB_SP; i++)
+               gdb_regs[i] = 0;
+       gdb_regs[GDB_SP] = t->ksp;
+       gdb_regs[GDB_O7] = 0;
+
+       win = (struct reg_window *) (t->ksp + STACK_BIAS);
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_L0 + i] = win->locals[i];
+       for (i = 0; i < 8; i++)
+               gdb_regs[GDB_I0 + i] = win->ins[i];
+
+       for (i = GDB_F0; i <= GDB_F62; i++)
+               gdb_regs[i] = 0;
+
+       if (t->new_child)
+               pc = (unsigned long) &ret_from_syscall;
+       else
+               pc = (unsigned long) &switch_to_pc;
+
+       gdb_regs[GDB_PC] = pc;
+       gdb_regs[GDB_NPC] = pc + 4;
+
+       cwp = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP];
+
+       gdb_regs[GDB_STATE] = (TSTATE_PRIV | TSTATE_IE | cwp);
+       gdb_regs[GDB_FSR] = 0;
+       gdb_regs[GDB_FPRS] = 0;
+       gdb_regs[GDB_Y] = 0;
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+       struct reg_window *win;
+       int i;
+
+       for (i = 0; i < 15; i++)
+               regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i];
+
+       /* If the TSTATE register is changing, we have to preserve
+        * the CWP field, otherwise window save/restore explodes.
+        */
+       if (regs->tstate != gdb_regs[GDB_STATE]) {
+               unsigned long cwp = regs->tstate & TSTATE_CWP;
+
+               regs->tstate = (gdb_regs[GDB_STATE] & ~TSTATE_CWP) | cwp;
+       }
+
+       regs->tpc = gdb_regs[GDB_PC];
+       regs->tnpc = gdb_regs[GDB_NPC];
+       regs->y = gdb_regs[GDB_Y];
+
+       win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS);
+       for (i = 0; i < 8; i++)
+               win->locals[i] = gdb_regs[GDB_L0 + i];
+       for (i = 0; i < 8; i++)
+               win->ins[i] = gdb_regs[GDB_I0 + i];
+}
+
+#ifdef CONFIG_SMP
+void smp_kgdb_capture_client(struct pt_regs *regs)
+{
+       unsigned long flags;
+
+       __asm__ __volatile__("rdpr      %%pstate, %0\n\t"
+                            "wrpr      %0, %1, %%pstate"
+                            : "=r" (flags)
+                            : "i" (PSTATE_IE));
+
+       flushw_all();
+
+       if (atomic_read(&kgdb_active) != -1)
+               kgdb_nmicallback(raw_smp_processor_id(), regs);
+
+       __asm__ __volatile__("wrpr      %0, 0, %%pstate"
+                            : : "r" (flags));
+}
+#endif
+
+int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
+                              char *remcomInBuffer, char *remcomOutBuffer,
+                              struct pt_regs *linux_regs)
+{
+       unsigned long addr;
+       char *ptr;
+
+       switch (remcomInBuffer[0]) {
+       case 'c':
+               /* try to read optional parameter, pc unchanged if no parm */
+               ptr = &remcomInBuffer[1];
+               if (kgdb_hex2long(&ptr, &addr)) {
+                       linux_regs->tpc = addr;
+                       linux_regs->tnpc = addr + 4;
+               }
+               /* fallthru */
+
+       case 'D':
+       case 'k':
+               if (linux_regs->tpc == (unsigned long) arch_kgdb_breakpoint) {
+                       linux_regs->tpc = linux_regs->tnpc;
+                       linux_regs->tnpc += 4;
+               }
+               return 0;
+       }
+       return -1;
+}
+
+asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
+{
+       unsigned long flags;
+
+       if (user_mode(regs)) {
+               bad_trap(regs, trap_level);
+               return;
+       }
+
+       flushw_all();
+
+       local_irq_save(flags);
+       kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
+       local_irq_restore(flags);
+}
+
+int kgdb_arch_init(void)
+{
+       return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+}
+
+struct kgdb_arch arch_kgdb_ops = {
+       /* Breakpoint instruction: ta 0x72 */
+       .gdb_bpt_instr          = { 0x91, 0xd0, 0x20, 0x72 },
+};
diff --git a/arch/sparc64/kernel/misctrap.S b/arch/sparc64/kernel/misctrap.S
new file mode 100644 (file)
index 0000000..753b4f0
--- /dev/null
@@ -0,0 +1,97 @@
+#ifdef CONFIG_KGDB
+       .globl          arch_kgdb_breakpoint
+       .type           arch_kgdb_breakpoint,#function
+arch_kgdb_breakpoint:
+       ta              0x72
+       retl
+        nop
+       .size           arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
+#endif
+
+       .type           __do_privact,#function
+__do_privact:
+       mov             TLB_SFSR, %g3
+       stxa            %g0, [%g3] ASI_DMMU     ! Clear FaultValid bit
+       membar          #Sync
+       sethi           %hi(109f), %g7
+       ba,pt           %xcc, etrap
+109:   or              %g7, %lo(109b), %g7
+       call            do_privact
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           __do_privact,.-__do_privact
+
+       .type           do_mna,#function
+do_mna:
+       rdpr            %tl, %g3
+       cmp             %g3, 1
+
+       /* Setup %g4/%g5 now as they are used in the
+        * winfixup code.
+        */
+       mov             TLB_SFSR, %g3
+       mov             DMMU_SFAR, %g4
+       ldxa            [%g4] ASI_DMMU, %g4
+       ldxa            [%g3] ASI_DMMU, %g5
+       stxa            %g0, [%g3] ASI_DMMU     ! Clear FaultValid bit
+       membar          #Sync
+       bgu,pn          %icc, winfix_mna
+        rdpr           %tpc, %g3
+
+1:     sethi           %hi(109f), %g7
+       ba,pt           %xcc, etrap
+109:    or             %g7, %lo(109b), %g7
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            mem_address_unaligned
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           do_mna,.-do_mna
+
+       .type           do_lddfmna,#function
+do_lddfmna:
+       sethi           %hi(109f), %g7
+       mov             TLB_SFSR, %g4
+       ldxa            [%g4] ASI_DMMU, %g5
+       stxa            %g0, [%g4] ASI_DMMU     ! Clear FaultValid bit
+       membar          #Sync
+       mov             DMMU_SFAR, %g4
+       ldxa            [%g4] ASI_DMMU, %g4
+       ba,pt           %xcc, etrap
+109:    or             %g7, %lo(109b), %g7
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            handle_lddfmna
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           do_lddfmna,.-do_lddfmna
+
+       .type           do_stdfmna,#function
+do_stdfmna:
+       sethi           %hi(109f), %g7
+       mov             TLB_SFSR, %g4
+       ldxa            [%g4] ASI_DMMU, %g5
+       stxa            %g0, [%g4] ASI_DMMU     ! Clear FaultValid bit
+       membar          #Sync
+       mov             DMMU_SFAR, %g4
+       ldxa            [%g4] ASI_DMMU, %g4
+       ba,pt           %xcc, etrap
+109:    or             %g7, %lo(109b), %g7
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            handle_stdfmna
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           do_stdfmna,.-do_stdfmna
+
+       .type           breakpoint_trap,#function
+breakpoint_trap:
+       call            sparc_breakpoint
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           breakpoint_trap,.-breakpoint_trap
index 9e58e8cba1c38655fdd7138526d9dc29e60ca5c1..d569f60c24b87fd4fc23d6e749fc37d083bb22a3 100644 (file)
@@ -412,12 +412,6 @@ static int __init build_one_resource(struct device_node *parent,
 
 static int __init use_1to1_mapping(struct device_node *pp)
 {
-       /* If this is on the PMU bus, don't try to translate it even
-        * if a ranges property exists.
-        */
-       if (!strcmp(pp->name, "pmu"))
-               return 1;
-
        /* If we have a ranges property in the parent, use it.  */
        if (of_find_property(pp, "ranges", NULL) != NULL)
                return 0;
index 49f912766519c493ebf51db93e4c2ced79a81db8..dbf2fc2f4d8713a83b5481e2b8f211a6f7c5866c 100644 (file)
@@ -23,7 +23,6 @@
 #include <asm/pgtable.h>
 #include <asm/irq.h>
 #include <asm/ebus.h>
-#include <asm/isa.h>
 #include <asm/prom.h>
 #include <asm/apb.h>
 
@@ -885,7 +884,6 @@ static int __init pcibios_init(void)
 
        pci_scan_each_controller_bus();
 
-       isa_init();
        ebus_init();
        power_init();
 
index acf8c5250aa9822f633e7367b662ea6f883a4059..0560137491578b25b7aba3d1ce321d46ce0e8209 100644 (file)
@@ -1,5 +1,4 @@
-/*  $Id: process.c,v 1.131 2002/02/09 19:49:30 davem Exp $
- *  arch/sparc64/kernel/process.c
+/*  arch/sparc64/kernel/process.c
  *
  *  Copyright (C) 1995, 1996 David S. Miller (davem@caip.rutgers.edu)
  *  Copyright (C) 1996       Eddie C. Dost   (ecd@skynet.be)
@@ -368,9 +367,6 @@ void flush_thread(void)
        
        if (get_thread_current_ds() != ASI_AIUS)
                set_fs(USER_DS);
-
-       /* Init new signal delivery disposition. */
-       clear_thread_flag(TIF_NEWSIGNALS);
 }
 
 /* It's a bit more tricky when 64-bit tasks are involved... */
@@ -595,6 +591,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
        if (clone_flags & CLONE_SETTLS)
                t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
 
+       /* We do not want to accidently trigger system call restart
+        * handling in the new thread.  Therefore, clear out the trap
+        * type, which will make pt_regs_regs_is_syscall() return false.
+        */
+       pt_regs_clear_trap_type(t->kregs);
+
        return 0;
 }
 
index 77a3e8592cbcfb25a6475ff0070339dd468c6710..f2d88d8f7a427e4ff01d2d3da5d0afbd0ed802ba 100644 (file)
@@ -8,7 +8,7 @@
  *  Copyright (C) 1997,1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
  */
 
-#ifdef CONFIG_SPARC32_COMPAT
+#ifdef CONFIG_COMPAT
 #include <linux/compat.h>      /* for compat_old_sigset_t */
 #endif
 #include <linux/sched.h>
@@ -236,9 +236,6 @@ struct rt_signal_frame {
        __siginfo_fpu_t         fpu_state;
 };
 
-/* Align macros */
-#define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame) + 7) & (~7)))
-
 static long _sigpause_common(old_sigset_t set)
 {
        set &= _BLOCKABLE;
@@ -400,7 +397,7 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
        synchronize_user_stack();
        save_and_clear_fpu();
        
-       sigframe_size = RT_ALIGNEDSZ;
+       sigframe_size = sizeof(struct rt_signal_frame);
        if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
                sigframe_size -= sizeof(__siginfo_fpu_t);
 
@@ -516,11 +513,10 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        struct k_sigaction ka;
        sigset_t *oldset;
        siginfo_t info;
-       int signr, tt;
+       int signr;
        
-       tt = regs->magic & 0x1ff;
-       if (tt == 0x110 || tt == 0x111 || tt == 0x16d) {
-               regs->magic &= ~0x1ff;
+       if (pt_regs_is_syscall(regs)) {
+               pt_regs_clear_trap_type(regs);
                cookie.restart_syscall = 1;
        } else
                cookie.restart_syscall = 0;
@@ -531,7 +527,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
        else
                oldset = &current->blocked;
 
-#ifdef CONFIG_SPARC32_COMPAT
+#ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
                extern void do_signal32(sigset_t *, struct pt_regs *,
                                        struct signal_deliver_cookie *);
index 43cdec64d9c9014c9b53380cc69632702e0639ca..91f8d0826db1084b0abb8c7206431406c9108f23 100644 (file)
@@ -1,5 +1,4 @@
-/*  $Id: signal32.c,v 1.74 2002/02/09 19:49:30 davem Exp $
- *  arch/sparc64/kernel/signal32.c
+/*  arch/sparc64/kernel/signal32.c
  *
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
 
 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
 
-/* Signal frames: the original one (compatible with SunOS):
- *
- * Set up a signal frame... Make the stack look the way SunOS
- * expects it to look which is basically:
- *
- * ---------------------------------- <-- %sp at signal time
- * Struct sigcontext
- * Signal address
- * Ptr to sigcontext area above
- * Signal code
- * The signal number itself
- * One register window
- * ---------------------------------- <-- New %sp
- */
-struct signal_sframe32 {
-       struct reg_window32 sig_window;
-       int sig_num;
-       int sig_code;
-       /* struct sigcontext32 * */ u32 sig_scptr;
-       int sig_address;
-       struct sigcontext32 sig_context;
-       unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
-};
-
 /* This magic should be in g_upper[0] for all upper parts
  * to be valid.
  */
@@ -65,12 +40,7 @@ typedef struct {
        unsigned int asi;
 } siginfo_extra_v8plus_t;
 
-/* 
- * And the new one, intended to be used for Linux applications only
- * (we have enough in there to work with clone).
- * All the interesting bits are in the info field.
- */
-struct new_signal_frame32 {
+struct signal_frame32 {
        struct sparc_stackf32   ss;
        __siginfo32_t           info;
        /* __siginfo_fpu32_t * */ u32 fpu_save;
@@ -149,8 +119,7 @@ struct rt_signal_frame32 {
 };
 
 /* Align macros */
-#define SF_ALIGNEDSZ  (((sizeof(struct signal_sframe32) + 7) & (~7)))
-#define NF_ALIGNEDSZ  (((sizeof(struct new_signal_frame32) + 7) & (~7)))
+#define SF_ALIGNEDSZ  (((sizeof(struct signal_frame32) + 7) & (~7)))
 #define RT_ALIGNEDSZ  (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
 
 int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
@@ -241,17 +210,22 @@ static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu
        return err;
 }
 
-void do_new_sigreturn32(struct pt_regs *regs)
+void do_sigreturn32(struct pt_regs *regs)
 {
-       struct new_signal_frame32 __user *sf;
+       struct signal_frame32 __user *sf;
        unsigned int psr;
        unsigned pc, npc, fpu_save;
        sigset_t set;
        unsigned seta[_COMPAT_NSIG_WORDS];
        int err, i;
        
+       /* Always make any pending restarted system calls return -EINTR */
+       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+       synchronize_user_stack();
+
        regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
-       sf = (struct new_signal_frame32 __user *) regs->u_regs[UREG_FP];
+       sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
 
        /* 1. Make sure we are not getting garbage from the user */
        if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
@@ -319,76 +293,6 @@ segv:
        force_sig(SIGSEGV, current);
 }
 
-asmlinkage void do_sigreturn32(struct pt_regs *regs)
-{
-       struct sigcontext32 __user *scptr;
-       unsigned int pc, npc, psr;
-       sigset_t set;
-       unsigned int seta[_COMPAT_NSIG_WORDS];
-       int err;
-
-       /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
-       synchronize_user_stack();
-       if (test_thread_flag(TIF_NEWSIGNALS)) {
-               do_new_sigreturn32(regs);
-               return;
-       }
-
-       scptr = (struct sigcontext32 __user *)
-               (regs->u_regs[UREG_I0] & 0x00000000ffffffffUL);
-       /* Check sanity of the user arg. */
-       if (!access_ok(VERIFY_READ, scptr, sizeof(struct sigcontext32)) ||
-           (((unsigned long) scptr) & 3))
-               goto segv;
-
-       err = __get_user(pc, &scptr->sigc_pc);
-       err |= __get_user(npc, &scptr->sigc_npc);
-
-       if ((pc | npc) & 3)
-               goto segv; /* Nice try. */
-
-       err |= __get_user(seta[0], &scptr->sigc_mask);
-       /* Note that scptr + 1 points to extramask */
-       err |= copy_from_user(seta+1, scptr + 1,
-                             (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
-       if (err)
-               goto segv;
-       switch (_NSIG_WORDS) {
-               case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
-               case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
-               case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
-               case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
-       }
-       sigdelsetmask(&set, ~_BLOCKABLE);
-       spin_lock_irq(&current->sighand->siglock);
-       current->blocked = set;
-       recalc_sigpending();
-       spin_unlock_irq(&current->sighand->siglock);
-       
-       if (test_thread_flag(TIF_32BIT)) {
-               pc &= 0xffffffff;
-               npc &= 0xffffffff;
-       }
-       regs->tpc = pc;
-       regs->tnpc = npc;
-       err = __get_user(regs->u_regs[UREG_FP], &scptr->sigc_sp);
-       err |= __get_user(regs->u_regs[UREG_I0], &scptr->sigc_o0);
-       err |= __get_user(regs->u_regs[UREG_G1], &scptr->sigc_g1);
-
-       /* User can only change condition codes in %tstate. */
-       err |= __get_user(psr, &scptr->sigc_psr);
-       if (err)
-               goto segv;
-       regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
-       regs->tstate |= psr_to_tstate_icc(psr);
-       return;
-
-segv:
-       force_sig(SIGSEGV, current);
-}
-
 asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
 {
        struct rt_signal_frame32 __user *sf;
@@ -504,145 +408,6 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns
        return (void __user *)(sp - framesize);
 }
 
-static void
-setup_frame32(struct sigaction *sa, struct pt_regs *regs, int signr, sigset_t *oldset, siginfo_t *info)
-{
-       struct signal_sframe32 __user *sframep;
-       struct sigcontext32 __user *sc;
-       unsigned int seta[_COMPAT_NSIG_WORDS];
-       int err = 0;
-       void __user *sig_address;
-       int sig_code;
-       unsigned long pc = regs->tpc;
-       unsigned long npc = regs->tnpc;
-       unsigned int psr;
-
-       if (test_thread_flag(TIF_32BIT)) {
-               pc &= 0xffffffff;
-               npc &= 0xffffffff;
-       }
-
-       synchronize_user_stack();
-       save_and_clear_fpu();
-
-       sframep = (struct signal_sframe32 __user *)
-               get_sigframe(sa, regs, SF_ALIGNEDSZ);
-       if (invalid_frame_pointer(sframep, sizeof(*sframep))){
-               /* Don't change signal code and address, so that
-                * post mortem debuggers can have a look.
-                */
-               do_exit(SIGILL);
-       }
-
-       sc = &sframep->sig_context;
-
-       /* We've already made sure frame pointer isn't in kernel space... */
-       err = __put_user((sas_ss_flags(regs->u_regs[UREG_FP]) == SS_ONSTACK),
-                        &sc->sigc_onstack);
-       
-       switch (_NSIG_WORDS) {
-       case 4: seta[7] = (oldset->sig[3] >> 32);
-               seta[6] = oldset->sig[3];
-       case 3: seta[5] = (oldset->sig[2] >> 32);
-               seta[4] = oldset->sig[2];
-       case 2: seta[3] = (oldset->sig[1] >> 32);
-               seta[2] = oldset->sig[1];
-       case 1: seta[1] = (oldset->sig[0] >> 32);
-               seta[0] = oldset->sig[0];
-       }
-       err |= __put_user(seta[0], &sc->sigc_mask);
-       err |= __copy_to_user(sframep->extramask, seta + 1,
-                             (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
-       err |= __put_user(regs->u_regs[UREG_FP], &sc->sigc_sp);
-       err |= __put_user(pc, &sc->sigc_pc);
-       err |= __put_user(npc, &sc->sigc_npc);
-       psr = tstate_to_psr(regs->tstate);
-       if (current_thread_info()->fpsaved[0] & FPRS_FEF)
-               psr |= PSR_EF;
-       err |= __put_user(psr, &sc->sigc_psr);
-       err |= __put_user(regs->u_regs[UREG_G1], &sc->sigc_g1);
-       err |= __put_user(regs->u_regs[UREG_I0], &sc->sigc_o0);
-       err |= __put_user(get_thread_wsaved(), &sc->sigc_oswins);
-
-       err |= copy_in_user((u32 __user *)sframep,
-                           (u32 __user *)(regs->u_regs[UREG_FP]),
-                           sizeof(struct reg_window32));
-                      
-       set_thread_wsaved(0); /* So process is allowed to execute. */
-       err |= __put_user(signr, &sframep->sig_num);
-       sig_address = NULL;
-       sig_code = 0;
-       if (SI_FROMKERNEL (info) && (info->si_code & __SI_MASK) == __SI_FAULT) {
-               sig_address = info->si_addr;
-               switch (signr) {
-               case SIGSEGV:
-                       switch (info->si_code) {
-                       case SEGV_MAPERR: sig_code = SUBSIG_NOMAPPING; break;
-                       default: sig_code = SUBSIG_PROTECTION; break;
-                       }
-                       break;
-               case SIGILL:
-                       switch (info->si_code) {
-                       case ILL_ILLOPC: sig_code = SUBSIG_ILLINST; break;
-                       case ILL_PRVOPC: sig_code = SUBSIG_PRIVINST; break;
-                       case ILL_ILLTRP: sig_code = SUBSIG_BADTRAP(info->si_trapno); break;
-                       default: sig_code = SUBSIG_STACK; break;
-                       }
-                       break;
-               case SIGFPE:
-                       switch (info->si_code) {
-                       case FPE_INTDIV: sig_code = SUBSIG_IDIVZERO; break;
-                       case FPE_INTOVF: sig_code = SUBSIG_FPINTOVFL; break;
-                       case FPE_FLTDIV: sig_code = SUBSIG_FPDIVZERO; break;
-                       case FPE_FLTOVF: sig_code = SUBSIG_FPOVFLOW; break;
-                       case FPE_FLTUND: sig_code = SUBSIG_FPUNFLOW; break;
-                       case FPE_FLTRES: sig_code = SUBSIG_FPINEXACT; break;
-                       case FPE_FLTINV: sig_code = SUBSIG_FPOPERROR; break;
-                       default: sig_code = SUBSIG_FPERROR; break;
-                       }
-                       break;
-               case SIGBUS:
-                       switch (info->si_code) {
-                       case BUS_ADRALN: sig_code = SUBSIG_ALIGNMENT; break;
-                       case BUS_ADRERR: sig_code = SUBSIG_MISCERROR; break;
-                       default: sig_code = SUBSIG_BUSTIMEOUT; break;
-                       }
-                       break;
-               case SIGEMT:
-                       switch (info->si_code) {
-                       case EMT_TAGOVF: sig_code = SUBSIG_TAG; break;
-                       }
-                       break;
-               case SIGSYS:
-                       if (info->si_code == (__SI_FAULT|0x100)) {
-                               /* See sys_sunos32.c */
-                               sig_code = info->si_trapno;
-                               break;
-                       }
-               default:
-                       sig_address = NULL;
-               }
-       }
-       err |= __put_user(ptr_to_compat(sig_address), &sframep->sig_address);
-       err |= __put_user(sig_code, &sframep->sig_code);
-       err |= __put_user(ptr_to_compat(sc), &sframep->sig_scptr);
-       if (err)
-               goto sigsegv;
-
-       regs->u_regs[UREG_FP] = (unsigned long) sframep;
-       regs->tpc = (unsigned long) sa->sa_handler;
-       regs->tnpc = (regs->tpc + 4);
-       if (test_thread_flag(TIF_32BIT)) {
-               regs->tpc &= 0xffffffff;
-               regs->tnpc &= 0xffffffff;
-       }
-       return;
-
-sigsegv:
-       force_sigsegv(signr, current);
-}
-
-
 static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
 {
        unsigned long *fpregs = current_thread_info()->fpregs;
@@ -663,10 +428,10 @@ static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
        return err;
 }
 
-static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
-                             int signo, sigset_t *oldset)
+static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
+                         int signo, sigset_t *oldset)
 {
-       struct new_signal_frame32 __user *sf;
+       struct signal_frame32 __user *sf;
        int sigframe_size;
        u32 psr;
        int i, err;
@@ -676,11 +441,11 @@ static void new_setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
        synchronize_user_stack();
        save_and_clear_fpu();
        
-       sigframe_size = NF_ALIGNEDSZ;
+       sigframe_size = SF_ALIGNEDSZ;
        if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
                sigframe_size -= sizeof(__siginfo_fpu_t);
 
-       sf = (struct new_signal_frame32 __user *)
+       sf = (struct signal_frame32 __user *)
                get_sigframe(&ka->sa, regs, sigframe_size);
        
        if (invalid_frame_pointer(sf, sigframe_size))
@@ -944,10 +709,9 @@ static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
 {
        if (ka->sa.sa_flags & SA_SIGINFO)
                setup_rt_frame32(ka, regs, signr, oldset, info);
-       else if (test_thread_flag(TIF_NEWSIGNALS))
-               new_setup_frame32(ka, regs, signr, oldset);
        else
-               setup_frame32(&ka->sa, regs, signr, oldset, info);
+               setup_frame32(ka, regs, signr, oldset);
+
        spin_lock_irq(&current->sighand->siglock);
        sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
        if (!(ka->sa.sa_flags & SA_NOMASK))
index 409dd71f2738c88f59ae367bdc93aceb2e83455a..3aba47624df45aa0e773c169fe6498b5bb5e377e 100644 (file)
@@ -38,7 +38,6 @@
 #include <asm/pgtable.h>
 #include <asm/oplib.h>
 #include <asm/uaccess.h>
-#include <asm/timer.h>
 #include <asm/starfire.h>
 #include <asm/tlb.h>
 #include <asm/sections.h>
@@ -910,6 +909,9 @@ extern unsigned long xcall_flush_tlb_kernel_range;
 extern unsigned long xcall_report_regs;
 extern unsigned long xcall_receive_signal;
 extern unsigned long xcall_new_mmu_context_version;
+#ifdef CONFIG_KGDB
+extern unsigned long xcall_kgdb_capture;
+#endif
 
 #ifdef DCACHE_ALIASING_POSSIBLE
 extern unsigned long xcall_flush_dcache_page_cheetah;
@@ -1079,6 +1081,13 @@ void smp_new_mmu_context_version(void)
        smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
 }
 
+#ifdef CONFIG_KGDB
+void kgdb_roundup_cpus(unsigned long flags)
+{
+       smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
+}
+#endif
+
 void smp_report_regs(void)
 {
        smp_cross_call(&xcall_report_regs, 0, 0, 0);
index 66336590e83087c1698c9e4b25f4bbde14cf688f..8ac0b99f2c5563bad603edd5628794f1bdf5a898 100644 (file)
@@ -49,7 +49,6 @@
 #endif
 #ifdef CONFIG_PCI
 #include <asm/ebus.h>
-#include <asm/isa.h>
 #endif
 #include <asm/ns87303.h>
 #include <asm/timer.h>
@@ -187,7 +186,6 @@ EXPORT_SYMBOL(insw);
 EXPORT_SYMBOL(insl);
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL(ebus_chain);
-EXPORT_SYMBOL(isa_chain);
 EXPORT_SYMBOL(pci_alloc_consistent);
 EXPORT_SYMBOL(pci_free_consistent);
 EXPORT_SYMBOL(pci_map_single);
diff --git a/arch/sparc64/kernel/spiterrs.S b/arch/sparc64/kernel/spiterrs.S
new file mode 100644 (file)
index 0000000..ef902c6
--- /dev/null
@@ -0,0 +1,245 @@
+       /* We need to carefully read the error status, ACK the errors,
+        * prevent recursive traps, and pass the information on to C
+        * code for logging.
+        *
+        * We pass the AFAR in as-is, and we encode the status
+        * information as described in asm-sparc64/sfafsr.h
+        */
+       .type           __spitfire_access_error,#function
+__spitfire_access_error:
+       /* Disable ESTATE error reporting so that we do not take
+        * recursive traps and RED state the processor.
+        */
+       stxa            %g0, [%g0] ASI_ESTATE_ERROR_EN
+       membar          #Sync
+
+       mov             UDBE_UE, %g1
+       ldxa            [%g0] ASI_AFSR, %g4     ! Get AFSR
+
+       /* __spitfire_cee_trap branches here with AFSR in %g4 and
+        * UDBE_CE in %g1.  It only clears ESTATE_ERR_CE in the ESTATE
+        * Error Enable register.
+        */
+__spitfire_cee_trap_continue:
+       ldxa            [%g0] ASI_AFAR, %g5     ! Get AFAR
+
+       rdpr            %tt, %g3
+       and             %g3, 0x1ff, %g3         ! Paranoia
+       sllx            %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
+       or              %g4, %g3, %g4
+       rdpr            %tl, %g3
+       cmp             %g3, 1
+       mov             1, %g3
+       bleu            %xcc, 1f
+        sllx           %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
+
+       or              %g4, %g3, %g4
+
+       /* Read in the UDB error register state, clearing the sticky
+        * error bits as-needed.  We only clear them if the UE bit is
+        * set.  Likewise, __spitfire_cee_trap below will only do so
+        * if the CE bit is set.
+        *
+        * NOTE: UltraSparc-I/II have high and low UDB error
+        *       registers, corresponding to the two UDB units
+        *       present on those chips.  UltraSparc-IIi only
+        *       has a single UDB, called "SDB" in the manual.
+        *       For IIi the upper UDB register always reads
+        *       as zero so for our purposes things will just
+        *       work with the checks below.
+        */
+1:     ldxa            [%g0] ASI_UDBH_ERROR_R, %g3
+       and             %g3, 0x3ff, %g7         ! Paranoia
+       sllx            %g7, SFSTAT_UDBH_SHIFT, %g7
+       or              %g4, %g7, %g4
+       andcc           %g3, %g1, %g3           ! UDBE_UE or UDBE_CE
+       be,pn           %xcc, 1f
+        nop
+       stxa            %g3, [%g0] ASI_UDB_ERROR_W
+       membar          #Sync
+
+1:     mov             0x18, %g3
+       ldxa            [%g3] ASI_UDBL_ERROR_R, %g3
+       and             %g3, 0x3ff, %g7         ! Paranoia
+       sllx            %g7, SFSTAT_UDBL_SHIFT, %g7
+       or              %g4, %g7, %g4
+       andcc           %g3, %g1, %g3           ! UDBE_UE or UDBE_CE
+       be,pn           %xcc, 1f
+        nop
+       mov             0x18, %g7
+       stxa            %g3, [%g7] ASI_UDB_ERROR_W
+       membar          #Sync
+
+1:     /* Ok, now that we've latched the error state, clear the
+        * sticky bits in the AFSR.
+        */
+       stxa            %g4, [%g0] ASI_AFSR
+       membar          #Sync
+
+       rdpr            %tl, %g2
+       cmp             %g2, 1
+       rdpr            %pil, %g2
+       bleu,pt         %xcc, 1f
+        wrpr           %g0, 15, %pil
+
+       ba,pt           %xcc, etraptl1
+        rd             %pc, %g7
+
+       ba,pt           %xcc, 2f
+        nop
+
+1:     ba,pt           %xcc, etrap_irq
+        rd             %pc, %g7
+
+2:
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call    trace_hardirqs_off
+        nop
+#endif
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            spitfire_access_error
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           __spitfire_access_error,.-__spitfire_access_error
+
+       /* This is the trap handler entry point for ECC correctable
+        * errors.  They are corrected, but we listen for the trap so
+        * that the event can be logged.
+        *
+        * Disrupting errors are either:
+        * 1) single-bit ECC errors during UDB reads to system
+        *    memory
+        * 2) data parity errors during write-back events
+        *
+        * As far as I can make out from the manual, the CEE trap is
+        * only for correctable errors during memory read accesses by
+        * the front-end of the processor.
+        *
+        * The code below is only for trap level 1 CEE events, as it
+        * is the only situation where we can safely record and log.
+        * For trap level >1 we just clear the CE bit in the AFSR and
+        * return.
+        *
+        * This is just like __spiftire_access_error above, but it
+        * specifically handles correctable errors.  If an
+        * uncorrectable error is indicated in the AFSR we will branch
+        * directly above to __spitfire_access_error to handle it
+        * instead.  Uncorrectable therefore takes priority over
+        * correctable, and the error logging C code will notice this
+        * case by inspecting the trap type.
+        */
+       .type           __spitfire_cee_trap,#function
+__spitfire_cee_trap:
+       ldxa            [%g0] ASI_AFSR, %g4     ! Get AFSR
+       mov             1, %g3
+       sllx            %g3, SFAFSR_UE_SHIFT, %g3
+       andcc           %g4, %g3, %g0           ! Check for UE
+       bne,pn          %xcc, __spitfire_access_error
+        nop
+
+       /* Ok, in this case we only have a correctable error.
+        * Indicate we only wish to capture that state in register
+        * %g1, and we only disable CE error reporting unlike UE
+        * handling which disables all errors.
+        */
+       ldxa            [%g0] ASI_ESTATE_ERROR_EN, %g3
+       andn            %g3, ESTATE_ERR_CE, %g3
+       stxa            %g3, [%g0] ASI_ESTATE_ERROR_EN
+       membar          #Sync
+
+       /* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
+       ba,pt           %xcc, __spitfire_cee_trap_continue
+        mov            UDBE_CE, %g1
+       .size           __spitfire_cee_trap,.-__spitfire_cee_trap
+
+       .type           __spitfire_data_access_exception_tl1,#function
+__spitfire_data_access_exception_tl1:
+       rdpr            %pstate, %g4
+       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
+       mov             TLB_SFSR, %g3
+       mov             DMMU_SFAR, %g5
+       ldxa            [%g3] ASI_DMMU, %g4     ! Get SFSR
+       ldxa            [%g5] ASI_DMMU, %g5     ! Get SFAR
+       stxa            %g0, [%g3] ASI_DMMU     ! Clear SFSR.FaultValid bit
+       membar          #Sync
+       rdpr            %tt, %g3
+       cmp             %g3, 0x80               ! first win spill/fill trap
+       blu,pn          %xcc, 1f
+        cmp            %g3, 0xff               ! last win spill/fill trap
+       bgu,pn          %xcc, 1f
+        nop
+       ba,pt           %xcc, winfix_dax
+        rdpr           %tpc, %g3
+1:     sethi           %hi(109f), %g7
+       ba,pt           %xcc, etraptl1
+109:    or             %g7, %lo(109b), %g7
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            spitfire_data_access_exception_tl1
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
+
+       .type           __spitfire_data_access_exception,#function
+__spitfire_data_access_exception:
+       rdpr            %pstate, %g4
+       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
+       mov             TLB_SFSR, %g3
+       mov             DMMU_SFAR, %g5
+       ldxa            [%g3] ASI_DMMU, %g4     ! Get SFSR
+       ldxa            [%g5] ASI_DMMU, %g5     ! Get SFAR
+       stxa            %g0, [%g3] ASI_DMMU     ! Clear SFSR.FaultValid bit
+       membar          #Sync
+       sethi           %hi(109f), %g7
+       ba,pt           %xcc, etrap
+109:    or             %g7, %lo(109b), %g7
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            spitfire_data_access_exception
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           __spitfire_data_access_exception,.-__spitfire_data_access_exception
+
+       .type           __spitfire_insn_access_exception_tl1,#function
+__spitfire_insn_access_exception_tl1:
+       rdpr            %pstate, %g4
+       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
+       mov             TLB_SFSR, %g3
+       ldxa            [%g3] ASI_IMMU, %g4     ! Get SFSR
+       rdpr            %tpc, %g5               ! IMMU has no SFAR, use TPC
+       stxa            %g0, [%g3] ASI_IMMU     ! Clear FaultValid bit
+       membar          #Sync
+       sethi           %hi(109f), %g7
+       ba,pt           %xcc, etraptl1
+109:    or             %g7, %lo(109b), %g7
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            spitfire_insn_access_exception_tl1
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
+
+       .type           __spitfire_insn_access_exception,#function
+__spitfire_insn_access_exception:
+       rdpr            %pstate, %g4
+       wrpr            %g4, PSTATE_MG|PSTATE_AG, %pstate
+       mov             TLB_SFSR, %g3
+       ldxa            [%g3] ASI_IMMU, %g4     ! Get SFSR
+       rdpr            %tpc, %g5               ! IMMU has no SFAR, use TPC
+       stxa            %g0, [%g3] ASI_IMMU     ! Clear FaultValid bit
+       membar          #Sync
+       sethi           %hi(109f), %g7
+       ba,pt           %xcc, etrap
+109:    or             %g7, %lo(109b), %g7
+       mov             %l4, %o1
+       mov             %l5, %o2
+       call            spitfire_insn_access_exception
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+       .size           __spitfire_insn_access_exception,.-__spitfire_insn_access_exception
index c1a61e98899a7ec741db282798bfdaddbdc4763d..161ce4710fe76143326905011db732358de7c891 100644 (file)
@@ -554,10 +554,8 @@ asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act
         struct k_sigaction new_ka, old_ka;
         int ret;
 
-       if (sig < 0) {
-               set_thread_flag(TIF_NEWSIGNALS);
-               sig = -sig;
-       }
+       WARN_ON_ONCE(sig >= 0);
+       sig = -sig;
 
         if (act) {
                compat_old_sigset_t mask;
@@ -601,11 +599,6 @@ asmlinkage long compat_sys_rt_sigaction(int sig,
         if (sigsetsize != sizeof(compat_sigset_t))
                 return -EINVAL;
 
-       /* All tasks which use RT signals (effectively) use
-        * new style signals.
-        */
-       set_thread_flag(TIF_NEWSIGNALS);
-
         if (act) {
                u32 u_handler, u_restorer;
 
diff --git a/arch/sparc64/kernel/syscalls.S b/arch/sparc64/kernel/syscalls.S
new file mode 100644 (file)
index 0000000..db19ed6
--- /dev/null
@@ -0,0 +1,279 @@
+       /* SunOS's execv() call only specifies the argv argument, the
+        * environment settings are the same as the calling processes.
+        */
+sys_execve:
+       sethi   %hi(sparc_execve), %g1
+       ba,pt   %xcc, execve_merge
+        or     %g1, %lo(sparc_execve), %g1
+
+#ifdef CONFIG_COMPAT
+sunos_execv:
+       stx     %g0, [%sp + PTREGS_OFF + PT_V9_I2]
+sys32_execve:
+       sethi   %hi(sparc32_execve), %g1
+       or      %g1, %lo(sparc32_execve), %g1
+#endif
+
+execve_merge:
+       flushw
+       jmpl    %g1, %g0
+        add    %sp, PTREGS_OFF, %o0
+
+       .align  32
+sys_pipe:
+       ba,pt   %xcc, sparc_pipe
+        add    %sp, PTREGS_OFF, %o0
+sys_nis_syscall:
+       ba,pt   %xcc, c_sys_nis_syscall
+        add    %sp, PTREGS_OFF, %o0
+sys_memory_ordering:
+       ba,pt   %xcc, sparc_memory_ordering
+        add    %sp, PTREGS_OFF, %o1
+sys_sigaltstack:
+       ba,pt   %xcc, do_sigaltstack
+        add    %i6, STACK_BIAS, %o2
+#ifdef CONFIG_COMPAT
+sys32_sigstack:
+       ba,pt   %xcc, do_sys32_sigstack
+        mov    %i6, %o2
+sys32_sigaltstack:
+       ba,pt   %xcc, do_sys32_sigaltstack
+        mov    %i6, %o2
+#endif
+       .align  32
+#ifdef CONFIG_COMPAT
+sys32_sigreturn:
+       add     %sp, PTREGS_OFF, %o0
+       call    do_sigreturn32
+        add    %o7, 1f-.-4, %o7
+       nop
+#endif
+sys_rt_sigreturn:
+       add     %sp, PTREGS_OFF, %o0
+       call    do_rt_sigreturn
+        add    %o7, 1f-.-4, %o7
+       nop
+#ifdef CONFIG_COMPAT
+sys32_rt_sigreturn:
+       add     %sp, PTREGS_OFF, %o0
+       call    do_rt_sigreturn32
+        add    %o7, 1f-.-4, %o7
+       nop
+#endif
+       .align  32
+1:     ldx     [%g6 + TI_FLAGS], %l5
+       andcc   %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+       be,pt   %icc, rtrap
+        nop
+       add     %sp, PTREGS_OFF, %o0
+       call    syscall_trace
+        mov    1, %o1
+       ba,pt   %xcc, rtrap
+        nop
+
+       /* This is how fork() was meant to be done, 8 instruction entry.
+        *
+        * I questioned the following code briefly, let me clear things
+        * up so you must not reason on it like I did.
+        *
+        * Know the fork_kpsr etc. we use in the sparc32 port?  We don't
+        * need it here because the only piece of window state we copy to
+        * the child is the CWP register.  Even if the parent sleeps,
+        * we are safe because we stuck it into pt_regs of the parent
+        * so it will not change.
+        *
+        * XXX This raises the question, whether we can do the same on
+        * XXX sparc32 to get rid of fork_kpsr _and_ fork_kwim.  The
+        * XXX answer is yes.  We stick fork_kpsr in UREG_G0 and
+        * XXX fork_kwim in UREG_G1 (global registers are considered
+        * XXX volatile across a system call in the sparc ABI I think
+        * XXX if it isn't we can use regs->y instead, anyone who depends
+        * XXX upon the Y register being preserved across a fork deserves
+        * XXX to lose).
+        *
+        * In fact we should take advantage of that fact for other things
+        * during system calls...
+        */
+       .align  32
+sys_vfork: /* Under Linux, vfork and fork are just special cases of clone. */
+       sethi   %hi(0x4000 | 0x0100 | SIGCHLD), %o0
+       or      %o0, %lo(0x4000 | 0x0100 | SIGCHLD), %o0
+       ba,pt   %xcc, sys_clone
+sys_fork:
+        clr    %o1
+       mov     SIGCHLD, %o0
+sys_clone:
+       flushw
+       movrz   %o1, %fp, %o1
+       mov     0, %o3
+       ba,pt   %xcc, sparc_do_fork
+        add    %sp, PTREGS_OFF, %o2
+
+       .globl  ret_from_syscall
+ret_from_syscall:
+       /* Clear current_thread_info()->new_child, and
+        * check performance counter stuff too.
+        */
+       stb     %g0, [%g6 + TI_NEW_CHILD]
+       ldx     [%g6 + TI_FLAGS], %l0
+       call    schedule_tail
+        mov    %g7, %o0
+       andcc   %l0, _TIF_PERFCTR, %g0
+       be,pt   %icc, 1f
+        nop
+       ldx     [%g6 + TI_PCR], %o7
+       wr      %g0, %o7, %pcr
+
+       /* Blackbird errata workaround.  See commentary in
+        * smp.c:smp_percpu_timer_interrupt() for more
+        * information.
+        */
+       ba,pt   %xcc, 99f
+        nop
+
+       .align  64
+99:    wr      %g0, %g0, %pic
+       rd      %pic, %g0
+
+1:     ba,pt   %xcc, ret_sys_call
+        ldx    [%sp + PTREGS_OFF + PT_V9_I0], %o0
+
+       .globl  sparc_exit
+       .type   sparc_exit,#function
+sparc_exit:
+       rdpr    %pstate, %g2
+       wrpr    %g2, PSTATE_IE, %pstate
+       rdpr    %otherwin, %g1
+       rdpr    %cansave, %g3
+       add     %g3, %g1, %g3
+       wrpr    %g3, 0x0, %cansave
+       wrpr    %g0, 0x0, %otherwin
+       wrpr    %g2, 0x0, %pstate
+       ba,pt   %xcc, sys_exit
+        stb    %g0, [%g6 + TI_WSAVED]
+       .size   sparc_exit,.-sparc_exit
+
+linux_sparc_ni_syscall:
+       sethi   %hi(sys_ni_syscall), %l7
+       ba,pt   %xcc, 4f
+        or     %l7, %lo(sys_ni_syscall), %l7
+
+linux_syscall_trace32:
+       add     %sp, PTREGS_OFF, %o0
+       call    syscall_trace
+        clr    %o1
+       srl     %i0, 0, %o0
+       srl     %i4, 0, %o4
+       srl     %i1, 0, %o1
+       srl     %i2, 0, %o2
+       ba,pt   %xcc, 2f
+        srl    %i3, 0, %o3
+
+linux_syscall_trace:
+       add     %sp, PTREGS_OFF, %o0
+       call    syscall_trace
+        clr    %o1
+       mov     %i0, %o0
+       mov     %i1, %o1
+       mov     %i2, %o2
+       mov     %i3, %o3
+       b,pt    %xcc, 2f
+        mov    %i4, %o4
+
+
+       /* Linux 32-bit system calls enter here... */
+       .align  32
+       .globl  linux_sparc_syscall32
+linux_sparc_syscall32:
+       /* Direct access to user regs, much faster. */
+       cmp     %g1, NR_SYSCALLS                        ! IEU1  Group
+       bgeu,pn %xcc, linux_sparc_ni_syscall            ! CTI
+        srl    %i0, 0, %o0                             ! IEU0
+       sll     %g1, 2, %l4                             ! IEU0  Group
+       srl     %i4, 0, %o4                             ! IEU1
+       lduw    [%l7 + %l4], %l7                        ! Load
+       srl     %i1, 0, %o1                             ! IEU0  Group
+       ldx     [%g6 + TI_FLAGS], %l0           ! Load
+
+       srl     %i5, 0, %o5                             ! IEU1
+       srl     %i2, 0, %o2                             ! IEU0  Group
+       andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+       bne,pn  %icc, linux_syscall_trace32             ! CTI
+        mov    %i0, %l5                                ! IEU1
+       call    %l7                                     ! CTI   Group brk forced
+        srl    %i3, 0, %o3                             ! IEU0
+       ba,a,pt %xcc, 3f
+
+       /* Linux native system calls enter here... */
+       .align  32
+       .globl  linux_sparc_syscall
+linux_sparc_syscall:
+       /* Direct access to user regs, much faster. */
+       cmp     %g1, NR_SYSCALLS                        ! IEU1  Group
+       bgeu,pn %xcc, linux_sparc_ni_syscall            ! CTI
+        mov    %i0, %o0                                ! IEU0
+       sll     %g1, 2, %l4                             ! IEU0  Group
+       mov     %i1, %o1                                ! IEU1
+       lduw    [%l7 + %l4], %l7                        ! Load
+4:     mov     %i2, %o2                                ! IEU0  Group
+       ldx     [%g6 + TI_FLAGS], %l0           ! Load
+
+       mov     %i3, %o3                                ! IEU1
+       mov     %i4, %o4                                ! IEU0  Group
+       andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %g0
+       bne,pn  %icc, linux_syscall_trace               ! CTI   Group
+        mov    %i0, %l5                                ! IEU0
+2:     call    %l7                                     ! CTI   Group brk forced
+        mov    %i5, %o5                                ! IEU0
+       nop
+
+3:     stx     %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+ret_sys_call:
+       ldx     [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
+       ldx     [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
+       sra     %o0, 0, %o0
+       mov     %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
+       sllx    %g2, 32, %g2
+
+       /* Check if force_successful_syscall_return()
+        * was invoked.
+        */
+       ldub    [%g6 + TI_SYS_NOERROR], %l2
+       brnz,a,pn %l2, 80f
+        stb    %g0, [%g6 + TI_SYS_NOERROR]
+
+       cmp     %o0, -ERESTART_RESTARTBLOCK
+       bgeu,pn %xcc, 1f
+        andcc  %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6
+80:
+       /* System call success, clear Carry condition code. */
+       andn    %g3, %g2, %g3
+       stx     %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]  
+       bne,pn  %icc, linux_syscall_trace2
+        add    %l1, 0x4, %l2                   ! npc = npc+4
+       stx     %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+       ba,pt   %xcc, rtrap
+        stx    %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+
+1:
+       /* System call failure, set Carry condition code.
+        * Also, get abs(errno) to return to the process.
+        */
+       andcc   %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT), %l6  
+       sub     %g0, %o0, %o0
+       or      %g3, %g2, %g3
+       stx     %o0, [%sp + PTREGS_OFF + PT_V9_I0]
+       stx     %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE]
+       bne,pn  %icc, linux_syscall_trace2
+        add    %l1, 0x4, %l2                   ! npc = npc+4
+       stx     %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+
+       b,pt    %xcc, rtrap
+        stx    %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
+linux_syscall_trace2:
+       add     %sp, PTREGS_OFF, %o0
+       call    syscall_trace
+        mov    1, %o1
+       stx     %l1, [%sp + PTREGS_OFF + PT_V9_TPC]
+       ba,pt   %xcc, rtrap
+        stx    %l2, [%sp + PTREGS_OFF + PT_V9_TNPC]
index b0de4c00b11a5cbcdaf2fd9277e8b3b27a3fc357..450053af039e48abd0d6c92faca49a69e6d2107f 100644 (file)
@@ -153,7 +153,7 @@ tl0_resv164:        BTRAP(0x164) BTRAP(0x165) BTRAP(0x166) BTRAP(0x167) BTRAP(0x168)
 tl0_resv169:   BTRAP(0x169) BTRAP(0x16a) BTRAP(0x16b) BTRAP(0x16c)
 tl0_linux64:   LINUX_64BIT_SYSCALL_TRAP
 tl0_gsctx:     TRAP(sparc64_get_context) TRAP(sparc64_set_context)
-tl0_resv170:   KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) BTRAP(0x172)
+tl0_resv170:   KPROBES_TRAP(0x170) KPROBES_TRAP(0x171) KGDB_TRAP(0x172)
 tl0_resv173:   BTRAP(0x173) BTRAP(0x174) BTRAP(0x175) BTRAP(0x176) BTRAP(0x177)
 tl0_resv178:   BTRAP(0x178) BTRAP(0x179) BTRAP(0x17a) BTRAP(0x17b) BTRAP(0x17c)
 tl0_resv17d:   BTRAP(0x17d) BTRAP(0x17e) BTRAP(0x17f)
diff --git a/arch/sparc64/kernel/utrap.S b/arch/sparc64/kernel/utrap.S
new file mode 100644 (file)
index 0000000..b7f0f3f
--- /dev/null
@@ -0,0 +1,29 @@
+       .globl          utrap_trap
+       .type           utrap_trap,#function
+utrap_trap:            /* %g3=handler,%g4=level */
+       TRAP_LOAD_THREAD_REG(%g6, %g1)
+       ldx             [%g6 + TI_UTRAPS], %g1
+       brnz,pt         %g1, invoke_utrap
+        nop
+
+       ba,pt           %xcc, etrap
+        rd             %pc, %g7
+       mov             %l4, %o1
+        call           bad_trap
+        add            %sp, PTREGS_OFF, %o0
+       ba,pt           %xcc, rtrap
+        nop
+
+invoke_utrap:
+       sllx            %g3, 3, %g3
+       ldx             [%g1 + %g3], %g1
+       save            %sp, -128, %sp
+       rdpr            %tstate, %l6
+       rdpr            %cwp, %l7
+       andn            %l6, TSTATE_CWP, %l6
+       wrpr            %l6, %l7, %tstate
+       rdpr            %tpc, %l6
+       rdpr            %tnpc, %l7
+       wrpr            %g1, 0, %tnpc
+       done
+       .size           utrap_trap,.-utrap_trap
index ac556db06973a12297c8cc9e3845d22da787b40c..7120ebbd4d039020e058cce8fe46c1e3c71418b3 100644 (file)
@@ -21,8 +21,8 @@ EXPORT_SYMBOL(ioport_unmap);
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
-       unsigned long start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t start = pci_resource_start(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index 177d8aaeec42e2d147b92617dd308afa9c2baec1..4cad0b32b0afcd5256dce395a1ce6e682c50715c 100644 (file)
@@ -160,6 +160,7 @@ extern unsigned int sparc_ramdisk_image;
 extern unsigned int sparc_ramdisk_size;
 
 struct page *mem_map_zero __read_mostly;
+EXPORT_SYMBOL(mem_map_zero);
 
 unsigned int sparc64_highest_unlocked_tlb_ent __read_mostly;
 
@@ -1699,9 +1700,21 @@ void __init paging_init(void)
         * functions like clear_dcache_dirty_cpu use the cpu mask
         * in 13-bit signed-immediate instruction fields.
         */
-       BUILD_BUG_ON(FLAGS_RESERVED != 32);
+
+       /*
+        * Page flags must not reach into upper 32 bits that are used
+        * for the cpu number
+        */
+       BUILD_BUG_ON(NR_PAGEFLAGS > 32);
+
+       /*
+        * The bit fields placed in the high range must not reach below
+        * the 32 bit boundary. Otherwise we cannot place the cpu field
+        * at the 32 bit boundary.
+        */
        BUILD_BUG_ON(SECTIONS_WIDTH + NODES_WIDTH + ZONES_WIDTH +
-                    ilog2(roundup_pow_of_two(NR_CPUS)) > FLAGS_RESERVED);
+               ilog2(roundup_pow_of_two(NR_CPUS)) > 32);
+
        BUILD_BUG_ON(NR_CPUS > 4096);
 
        kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
index e686a67561af081aacc6e22fb8c4768508d4f096..796e005dad8bb1f622d4a7ff4d4a9f6a5adf468d 100644 (file)
@@ -676,6 +676,33 @@ xcall_new_mmu_context_version:
        wr              %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
        retry
 
+#ifdef CONFIG_KGDB
+       .globl          xcall_kgdb_capture
+xcall_kgdb_capture:
+661:   rdpr            %pstate, %g2
+       wrpr            %g2, PSTATE_IG | PSTATE_AG, %pstate
+       .section        .sun4v_2insn_patch, "ax"
+       .word           661b
+       nop
+       nop
+       .previous
+
+       rdpr            %pil, %g2
+       wrpr            %g0, 15, %pil
+       sethi           %hi(109f), %g7
+       ba,pt           %xcc, etrap_irq
+109:    or             %g7, %lo(109b), %g7
+#ifdef CONFIG_TRACE_IRQFLAGS
+       call            trace_hardirqs_off
+        nop
+#endif
+       call            smp_kgdb_capture_client
+        add            %sp, PTREGS_OFF, %o0
+       /* Has to be a non-v9 branch due to the large distance. */
+       ba              rtrap_xcall
+        ldx            [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+#endif
+
 #endif /* CONFIG_SMP */
 
 
index 3fbe69e359edcd6ddcc7b18e701c7a04071741fa..5696e7b374b31904b7559d045b7235b4e25bcf3e 100644 (file)
@@ -1,3 +1,10 @@
+
+menu "Host processor type and features"
+
+source "arch/x86/Kconfig.cpu"
+
+endmenu
+
 config UML_X86
        bool
        default y
index db3082b4da46573e7b7ddc427d5e4bbdbbdda366..6e51424745ab3453bd839a24e8dd59fbfa37cfdf 100644 (file)
@@ -125,7 +125,7 @@ static int open_one_chan(struct chan *chan)
        return 0;
 }
 
-int open_chan(struct list_head *chans)
+static int open_chan(struct list_head *chans)
 {
        struct list_head *ele;
        struct chan *chan;
@@ -583,19 +583,6 @@ int parse_chan_pair(char *str, struct line *line, int device,
        return 0;
 }
 
-int chan_out_fd(struct list_head *chans)
-{
-       struct list_head *ele;
-       struct chan *chan;
-
-       list_for_each(ele, chans) {
-               chan = list_entry(ele, struct chan, list);
-               if (chan->primary && chan->output)
-                       return chan->fd;
-       }
-       return -1;
-}
-
 void chan_interrupt(struct list_head *chans, struct delayed_work *task,
                    struct tty_struct *tty, int irq)
 {
index 2c898c4d6b6ae6b7864199e73ae2ec32db84f243..10b86e1cc659ee8ca02bee67c2e931067eef8de2 100644 (file)
@@ -304,7 +304,7 @@ int line_ioctl(struct tty_struct *tty, struct file * file,
                                break;
                if (i == ARRAY_SIZE(tty_ioctls)) {
                        printk(KERN_ERR "%s: %s: unknown ioctl: 0x%x\n",
-                              __FUNCTION__, tty->name, cmd);
+                              __func__, tty->name, cmd);
                }
                ret = -ENOIOCTLCMD;
                break;
index 822092f149bedb666740b1958b0778cd9ca01b3d..8c4378a76d6333d88d14537ae1c9a3b42b099537 100644 (file)
@@ -58,7 +58,7 @@ static const struct net_kern_info mcast_kern_info = {
        .write                  = mcast_write,
 };
 
-int mcast_setup(char *str, char **mac_out, void *data)
+static int mcast_setup(char *str, char **mac_out, void *data)
 {
        struct mcast_init *init = data;
        char *port_str = NULL, *ttl_str = NULL, *remain;
index 13af2f03ed8479ac8f7f17ce4d7389120cdac297..f8cf4c8bedef3eb91f161a6da67e474ae508464d 100644 (file)
@@ -39,7 +39,7 @@ static struct mconsole_command commands[] = {
 /* Initialized in mconsole_init, which is an initcall */
 char mconsole_socket_name[256];
 
-int mconsole_reply_v0(struct mc_request *req, char *reply)
+static int mconsole_reply_v0(struct mc_request *req, char *reply)
 {
        struct iovec iov;
        struct msghdr msg;
index 1d43bdfc20c43d37c6b187195556054fd0ae2eeb..5b4ca8d93682cf70fe191c16c0c1180d872c8c78 100644 (file)
@@ -116,7 +116,7 @@ static void uml_dev_close(struct work_struct *work)
        dev_close(lp->dev);
 }
 
-irqreturn_t uml_net_interrupt(int irq, void *dev_id)
+static irqreturn_t uml_net_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        struct uml_net_private *lp = dev->priv;
@@ -296,7 +296,7 @@ static struct ethtool_ops uml_net_ethtool_ops = {
        .get_link       = ethtool_op_get_link,
 };
 
-void uml_net_user_timer_expire(unsigned long _conn)
+static void uml_net_user_timer_expire(unsigned long _conn)
 {
 #ifdef undef
        struct connection *conn = (struct connection *)_conn;
@@ -786,7 +786,7 @@ static int uml_inetaddr_event(struct notifier_block *this, unsigned long event,
 }
 
 /* uml_net_init shouldn't be called twice on two CPUs at the same time */
-struct notifier_block uml_inetaddr_notifier = {
+static struct notifier_block uml_inetaddr_notifier = {
        .notifier_call          = uml_inetaddr_event,
 };
 
index addd759026560bb75eb1129e3b1e2818e77cf345..d269ca387f108de63595a5bf46085864668537e6 100644 (file)
@@ -153,7 +153,7 @@ struct port_pre_exec_data {
        int pipe_fd;
 };
 
-void port_pre_exec(void *arg)
+static void port_pre_exec(void *arg)
 {
        struct port_pre_exec_data *data = arg;
 
index 6b4a0f9e38deeb3d4852639ec4c3d83684e5612f..d19faec7046e5cdb05eba59ff32c1c1beb477beb 100644 (file)
@@ -13,7 +13,7 @@ struct slip_init {
        char *gate_addr;
 };
 
-void slip_init(struct net_device *dev, void *data)
+static void slip_init(struct net_device *dev, void *data)
 {
        struct uml_net_private *private;
        struct slip_data *spri;
@@ -57,7 +57,7 @@ static int slip_write(int fd, struct sk_buff *skb, struct uml_net_private *lp)
                               (struct slip_data *) &lp->user);
 }
 
-const struct net_kern_info slip_kern_info = {
+static const struct net_kern_info slip_kern_info = {
        .init                   = slip_init,
        .protocol               = slip_protocol,
        .read                   = slip_read,
index cec0c33cdd395d6461783c2793a74e18acfc07b6..49266f6108c480c3afb7e551c9b0d788f9c90f00 100644 (file)
@@ -34,7 +34,7 @@
 
 static struct tty_driver *console_driver;
 
-void stdio_announce(char *dev_name, int dev)
+static void stdio_announce(char *dev_name, int dev)
 {
        printk(KERN_INFO "Virtual console %d assigned device '%s'\n", dev,
               dev_name);
@@ -158,7 +158,7 @@ static struct console stdiocons = {
        .index          = -1,
 };
 
-int stdio_init(void)
+static int stdio_init(void)
 {
        char *new_title;
 
index be3a2797dac4ee054c3c4f5e44734d2d76ce85f7..5e45e39a8a8db5a5c57e4f27162804773c465c9e 100644 (file)
@@ -72,18 +72,6 @@ struct io_thread_req {
        int error;
 };
 
-extern int open_ubd_file(char *file, struct openflags *openflags, int shared,
-                        char **backing_file_out, int *bitmap_offset_out,
-                        unsigned long *bitmap_len_out, int *data_offset_out,
-                        int *create_cow_out);
-extern int create_cow_file(char *cow_file, char *backing_file,
-                          struct openflags flags, int sectorsize,
-                          int alignment, int *bitmap_offset_out,
-                          unsigned long *bitmap_len_out,
-                          int *data_offset_out);
-extern int read_cow_bitmap(int fd, void *buf, int offset, int len);
-extern void do_io(struct io_thread_req *req);
-
 static inline int ubd_test_bit(__u64 bit, unsigned char *data)
 {
        __u64 n;
@@ -200,7 +188,7 @@ struct ubd {
 }
 
 /* Protected by ubd_lock */
-struct ubd ubd_devs[MAX_DEV] = { [ 0 ... MAX_DEV - 1 ] = DEFAULT_UBD };
+static struct ubd ubd_devs[MAX_DEV] = { [0 ... MAX_DEV - 1] = DEFAULT_UBD };
 
 /* Only changed by fake_ide_setup which is a setup */
 static int fake_ide = 0;
@@ -463,7 +451,7 @@ __uml_help(udb_setup,
 static void do_ubd_request(struct request_queue * q);
 
 /* Only changed by ubd_init, which is an initcall. */
-int thread_fd = -1;
+static int thread_fd = -1;
 
 static void ubd_end_request(struct request *req, int bytes, int error)
 {
@@ -531,7 +519,7 @@ static irqreturn_t ubd_intr(int irq, void *dev)
 /* Only changed by ubd_init, which is an initcall. */
 static int io_pid = -1;
 
-void kill_io_thread(void)
+static void kill_io_thread(void)
 {
        if(io_pid != -1)
                os_kill_process(io_pid, 1);
@@ -547,6 +535,192 @@ static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out)
        return os_file_size(file, size_out);
 }
 
+static int read_cow_bitmap(int fd, void *buf, int offset, int len)
+{
+       int err;
+
+       err = os_seek_file(fd, offset);
+       if (err < 0)
+               return err;
+
+       err = os_read_file(fd, buf, len);
+       if (err < 0)
+               return err;
+
+       return 0;
+}
+
+static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
+{
+       unsigned long modtime;
+       unsigned long long actual;
+       int err;
+
+       err = os_file_modtime(file, &modtime);
+       if (err < 0) {
+               printk(KERN_ERR "Failed to get modification time of backing "
+                      "file \"%s\", err = %d\n", file, -err);
+               return err;
+       }
+
+       err = os_file_size(file, &actual);
+       if (err < 0) {
+               printk(KERN_ERR "Failed to get size of backing file \"%s\", "
+                      "err = %d\n", file, -err);
+               return err;
+       }
+
+       if (actual != size) {
+               /*__u64 can be a long on AMD64 and with %lu GCC complains; so
+                * the typecast.*/
+               printk(KERN_ERR "Size mismatch (%llu vs %llu) of COW header "
+                      "vs backing file\n", (unsigned long long) size, actual);
+               return -EINVAL;
+       }
+       if (modtime != mtime) {
+               printk(KERN_ERR "mtime mismatch (%ld vs %ld) of COW header vs "
+                      "backing file\n", mtime, modtime);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
+{
+       struct uml_stat buf1, buf2;
+       int err;
+
+       if (from_cmdline == NULL)
+               return 0;
+       if (!strcmp(from_cmdline, from_cow))
+               return 0;
+
+       err = os_stat_file(from_cmdline, &buf1);
+       if (err < 0) {
+               printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cmdline,
+                      -err);
+               return 0;
+       }
+       err = os_stat_file(from_cow, &buf2);
+       if (err < 0) {
+               printk(KERN_ERR "Couldn't stat '%s', err = %d\n", from_cow,
+                      -err);
+               return 1;
+       }
+       if ((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
+               return 0;
+
+       printk(KERN_ERR "Backing file mismatch - \"%s\" requested, "
+              "\"%s\" specified in COW header of \"%s\"\n",
+              from_cmdline, from_cow, cow);
+       return 1;
+}
+
+static int open_ubd_file(char *file, struct openflags *openflags, int shared,
+                 char **backing_file_out, int *bitmap_offset_out,
+                 unsigned long *bitmap_len_out, int *data_offset_out,
+                 int *create_cow_out)
+{
+       time_t mtime;
+       unsigned long long size;
+       __u32 version, align;
+       char *backing_file;
+       int fd, err, sectorsize, asked_switch, mode = 0644;
+
+       fd = os_open_file(file, *openflags, mode);
+       if (fd < 0) {
+               if ((fd == -ENOENT) && (create_cow_out != NULL))
+                       *create_cow_out = 1;
+               if (!openflags->w ||
+                   ((fd != -EROFS) && (fd != -EACCES)))
+                       return fd;
+               openflags->w = 0;
+               fd = os_open_file(file, *openflags, mode);
+               if (fd < 0)
+                       return fd;
+       }
+
+       if (shared)
+               printk(KERN_INFO "Not locking \"%s\" on the host\n", file);
+       else {
+               err = os_lock_file(fd, openflags->w);
+               if (err < 0) {
+                       printk(KERN_ERR "Failed to lock '%s', err = %d\n",
+                              file, -err);
+                       goto out_close;
+               }
+       }
+
+       /* Successful return case! */
+       if (backing_file_out == NULL)
+               return fd;
+
+       err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
+                             &size, &sectorsize, &align, bitmap_offset_out);
+       if (err && (*backing_file_out != NULL)) {
+               printk(KERN_ERR "Failed to read COW header from COW file "
+                      "\"%s\", errno = %d\n", file, -err);
+               goto out_close;
+       }
+       if (err)
+               return fd;
+
+       asked_switch = path_requires_switch(*backing_file_out, backing_file,
+                                           file);
+
+       /* Allow switching only if no mismatch. */
+       if (asked_switch && !backing_file_mismatch(*backing_file_out, size,
+                                                  mtime)) {
+               printk(KERN_ERR "Switching backing file to '%s'\n",
+                      *backing_file_out);
+               err = write_cow_header(file, fd, *backing_file_out,
+                                      sectorsize, align, &size);
+               if (err) {
+                       printk(KERN_ERR "Switch failed, errno = %d\n", -err);
+                       goto out_close;
+               }
+       } else {
+               *backing_file_out = backing_file;
+               err = backing_file_mismatch(*backing_file_out, size, mtime);
+               if (err)
+                       goto out_close;
+       }
+
+       cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
+                 bitmap_len_out, data_offset_out);
+
+       return fd;
+ out_close:
+       os_close_file(fd);
+       return err;
+}
+
+static int create_cow_file(char *cow_file, char *backing_file,
+                   struct openflags flags,
+                   int sectorsize, int alignment, int *bitmap_offset_out,
+                   unsigned long *bitmap_len_out, int *data_offset_out)
+{
+       int err, fd;
+
+       flags.c = 1;
+       fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL);
+       if (fd < 0) {
+               err = fd;
+               printk(KERN_ERR "Open of COW file '%s' failed, errno = %d\n",
+                      cow_file, -err);
+               goto out;
+       }
+
+       err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment,
+                           bitmap_offset_out, bitmap_len_out,
+                           data_offset_out);
+       if (!err)
+               return fd;
+       os_close_file(fd);
+ out:
+       return err;
+}
+
 static void ubd_close_dev(struct ubd *ubd_dev)
 {
        os_close_file(ubd_dev->fd);
@@ -1166,185 +1340,6 @@ static int ubd_ioctl(struct inode * inode, struct file * file,
        return -EINVAL;
 }
 
-static int path_requires_switch(char *from_cmdline, char *from_cow, char *cow)
-{
-       struct uml_stat buf1, buf2;
-       int err;
-
-       if(from_cmdline == NULL)
-               return 0;
-       if(!strcmp(from_cmdline, from_cow))
-               return 0;
-
-       err = os_stat_file(from_cmdline, &buf1);
-       if(err < 0){
-               printk("Couldn't stat '%s', err = %d\n", from_cmdline, -err);
-               return 0;
-       }
-       err = os_stat_file(from_cow, &buf2);
-       if(err < 0){
-               printk("Couldn't stat '%s', err = %d\n", from_cow, -err);
-               return 1;
-       }
-       if((buf1.ust_dev == buf2.ust_dev) && (buf1.ust_ino == buf2.ust_ino))
-               return 0;
-
-       printk("Backing file mismatch - \"%s\" requested,\n"
-              "\"%s\" specified in COW header of \"%s\"\n",
-              from_cmdline, from_cow, cow);
-       return 1;
-}
-
-static int backing_file_mismatch(char *file, __u64 size, time_t mtime)
-{
-       unsigned long modtime;
-       unsigned long long actual;
-       int err;
-
-       err = os_file_modtime(file, &modtime);
-       if(err < 0){
-               printk("Failed to get modification time of backing file "
-                      "\"%s\", err = %d\n", file, -err);
-               return err;
-       }
-
-       err = os_file_size(file, &actual);
-       if(err < 0){
-               printk("Failed to get size of backing file \"%s\", "
-                      "err = %d\n", file, -err);
-               return err;
-       }
-
-       if(actual != size){
-               /*__u64 can be a long on AMD64 and with %lu GCC complains; so
-                * the typecast.*/
-               printk("Size mismatch (%llu vs %llu) of COW header vs backing "
-                      "file\n", (unsigned long long) size, actual);
-               return -EINVAL;
-       }
-       if(modtime != mtime){
-               printk("mtime mismatch (%ld vs %ld) of COW header vs backing "
-                      "file\n", mtime, modtime);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-int read_cow_bitmap(int fd, void *buf, int offset, int len)
-{
-       int err;
-
-       err = os_seek_file(fd, offset);
-       if(err < 0)
-               return err;
-
-       err = os_read_file(fd, buf, len);
-       if(err < 0)
-               return err;
-
-       return 0;
-}
-
-int open_ubd_file(char *file, struct openflags *openflags, int shared,
-                 char **backing_file_out, int *bitmap_offset_out,
-                 unsigned long *bitmap_len_out, int *data_offset_out,
-                 int *create_cow_out)
-{
-       time_t mtime;
-       unsigned long long size;
-       __u32 version, align;
-       char *backing_file;
-       int fd, err, sectorsize, asked_switch, mode = 0644;
-
-       fd = os_open_file(file, *openflags, mode);
-       if (fd < 0) {
-               if ((fd == -ENOENT) && (create_cow_out != NULL))
-                       *create_cow_out = 1;
-               if (!openflags->w ||
-                   ((fd != -EROFS) && (fd != -EACCES)))
-                       return fd;
-               openflags->w = 0;
-               fd = os_open_file(file, *openflags, mode);
-               if (fd < 0)
-                       return fd;
-       }
-
-       if(shared)
-               printk("Not locking \"%s\" on the host\n", file);
-       else {
-               err = os_lock_file(fd, openflags->w);
-               if(err < 0){
-                       printk("Failed to lock '%s', err = %d\n", file, -err);
-                       goto out_close;
-               }
-       }
-
-       /* Successful return case! */
-       if(backing_file_out == NULL)
-               return fd;
-
-       err = read_cow_header(file_reader, &fd, &version, &backing_file, &mtime,
-                             &size, &sectorsize, &align, bitmap_offset_out);
-       if(err && (*backing_file_out != NULL)){
-               printk("Failed to read COW header from COW file \"%s\", "
-                      "errno = %d\n", file, -err);
-               goto out_close;
-       }
-       if(err)
-               return fd;
-
-       asked_switch = path_requires_switch(*backing_file_out, backing_file, file);
-
-       /* Allow switching only if no mismatch. */
-       if (asked_switch && !backing_file_mismatch(*backing_file_out, size, mtime)) {
-               printk("Switching backing file to '%s'\n", *backing_file_out);
-               err = write_cow_header(file, fd, *backing_file_out,
-                                      sectorsize, align, &size);
-               if (err) {
-                       printk("Switch failed, errno = %d\n", -err);
-                       goto out_close;
-               }
-       } else {
-               *backing_file_out = backing_file;
-               err = backing_file_mismatch(*backing_file_out, size, mtime);
-               if (err)
-                       goto out_close;
-       }
-
-       cow_sizes(version, size, sectorsize, align, *bitmap_offset_out,
-                 bitmap_len_out, data_offset_out);
-
-       return fd;
- out_close:
-       os_close_file(fd);
-       return err;
-}
-
-int create_cow_file(char *cow_file, char *backing_file, struct openflags flags,
-                   int sectorsize, int alignment, int *bitmap_offset_out,
-                   unsigned long *bitmap_len_out, int *data_offset_out)
-{
-       int err, fd;
-
-       flags.c = 1;
-       fd = open_ubd_file(cow_file, &flags, 0, NULL, NULL, NULL, NULL, NULL);
-       if(fd < 0){
-               err = fd;
-               printk("Open of COW file '%s' failed, errno = %d\n", cow_file,
-                      -err);
-               goto out;
-       }
-
-       err = init_cow_file(fd, cow_file, backing_file, sectorsize, alignment,
-                           bitmap_offset_out, bitmap_len_out,
-                           data_offset_out);
-       if(!err)
-               return fd;
-       os_close_file(fd);
- out:
-       return err;
-}
-
 static int update_bitmap(struct io_thread_req *req)
 {
        int n;
@@ -1369,7 +1364,7 @@ static int update_bitmap(struct io_thread_req *req)
        return 0;
 }
 
-void do_io(struct io_thread_req *req)
+static void do_io(struct io_thread_req *req)
 {
        char *buf;
        unsigned long len;
index 624b5100a3cd778e5d7293cda886e16bae5fc943..1e651457e049f8cf67fce1b7e8c166a5a3802f38 100644 (file)
@@ -31,7 +31,6 @@ extern void chan_interrupt(struct list_head *chans, struct delayed_work *task,
                           struct tty_struct *tty, int irq);
 extern int parse_chan_pair(char *str, struct line *line, int device,
                           const struct chan_opts *opts, char **error_out);
-extern int open_chan(struct list_head *chans);
 extern int write_chan(struct list_head *chans, const char *buf, int len,
                             int write_irq);
 extern int console_write_chan(struct list_head *chans, const char *buf, 
@@ -45,7 +44,6 @@ extern void close_chan(struct list_head *chans, int delay_free_irq);
 extern int chan_window_size(struct list_head *chans, 
                             unsigned short *rows_out, 
                             unsigned short *cols_out);
-extern int chan_out_fd(struct list_head *chans);
 extern int chan_config_string(struct list_head *chans, char *str, int size,
                              char **error_out);
 
index 984f80e668ca5d7c85b3f8ae549dc13fa5aabe57..6540d2c9fbb76f684db03539e26e202178cf18f5 100644 (file)
@@ -59,7 +59,7 @@ static int make_proc_exitcode(void)
 {
        struct proc_dir_entry *ent;
 
-       ent = create_proc_entry("exitcode", 0600, &proc_root);
+       ent = create_proc_entry("exitcode", 0600, NULL);
        if (ent == NULL) {
                printk(KERN_WARNING "make_proc_exitcode : Failed to register "
                       "/proc/exitcode\n");
index e8cb9ff183e9fe1da41020bc39ffe22b43ef56c7..83603cfbde819ce736a034d414361ea71c24cca7 100644 (file)
@@ -364,7 +364,7 @@ int __init make_proc_sysemu(void)
        if (!sysemu_supported)
                return 0;
 
-       ent = create_proc_entry("sysemu", 0600, &proc_root);
+       ent = create_proc_entry("sysemu", 0600, NULL);
 
        if (ent == NULL)
        {
index e066e84493b129a23f7018ce09d145dddf14f97b..0d0cea2ac98d5df30a308aa4468b14db3f7f681b 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <linux/clockchips.h>
+#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/jiffies.h>
 #include <linux/threads.h>
@@ -109,8 +110,6 @@ static void __init setup_itimer(void)
        clockevents_register_device(&itimer_clockevent);
 }
 
-extern void (*late_time_init)(void);
-
 void __init time_init(void)
 {
        long long nsecs;
index a6c1dd1cf5a17a7806401e7b6fb97c3e2ceacd26..56deed623446bd0726e2e7408d6f6bbc687df2b0 100644 (file)
@@ -115,7 +115,7 @@ static int have_root __initdata = 0;
 /* Set in uml_mem_setup and modified in linux_main */
 long long physmem_size = 32 * 1024 * 1024;
 
-static char *usage_string =
+static const char *usage_string =
 "User Mode Linux v%s\n"
 "      available at http://user-mode-linux.sourceforge.net/\n\n";
 
@@ -202,7 +202,7 @@ static void __init uml_checksetup(char *line, int *add)
 
        p = &__uml_setup_start;
        while (p < &__uml_setup_end) {
-               int n;
+               size_t n;
 
                n = strlen(p->str);
                if (!strncmp(line, p->str, n) && p->setup_func(line + n, add))
@@ -258,7 +258,8 @@ int __init linux_main(int argc, char **argv)
 {
        unsigned long avail, diff;
        unsigned long virtmem_size, max_physmem;
-       unsigned int i, add;
+       unsigned int i;
+       int add;
        char * mode;
 
        for (i = 1; i < argc; i++) {
index f4bd349d441222fb7c400ac63d8e7b8c390784a2..f25c29a12d007676d736ed8b8fcb9c9e47a0eaed 100644 (file)
@@ -14,6 +14,7 @@
 #include "os.h"
 #include "um_malloc.h"
 #include "user.h"
+#include <linux/limits.h>
 
 struct helper_data {
        void (*pre_exec)(void*);
index b616e15638fbc8c1aa5a8bdf710862715124de42..997d01944f91d5c84e4778c59b1ac429c814b4bb 100644 (file)
 #include "registers.h"
 #include "skas_ptrace.h"
 
-static int ptrace_child(void)
+static void ptrace_child(void)
 {
        int ret;
        /* Calling os_getpid because some libcs cached getpid incorrectly */
        int pid = os_getpid(), ppid = getppid();
        int sc_result;
 
-       change_sig(SIGWINCH, 0);
-       if (ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) {
+       if (change_sig(SIGWINCH, 0) < 0 ||
+           ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) {
                perror("ptrace");
                kill(pid, SIGKILL);
        }
@@ -75,9 +75,8 @@ static void fatal(char *fmt, ...)
        va_list list;
 
        va_start(list, fmt);
-       vprintf(fmt, list);
+       vfprintf(stderr, fmt, list);
        va_end(list);
-       fflush(stdout);
 
        exit(1);
 }
@@ -87,9 +86,8 @@ static void non_fatal(char *fmt, ...)
        va_list list;
 
        va_start(list, fmt);
-       vprintf(fmt, list);
+       vfprintf(stderr, fmt, list);
        va_end(list);
-       fflush(stdout);
 }
 
 static int start_ptraced_child(void)
@@ -495,7 +493,7 @@ int __init parse_iomem(char *str, int *add)
        driver = str;
        file = strchr(str,',');
        if (file == NULL) {
-               printf("parse_iomem : failed to parse iomem\n");
+               fprintf(stderr, "parse_iomem : failed to parse iomem\n");
                goto out;
        }
        *file = '\0';
index 48d211b3d9a1d2a3455f61deab7de81fc05f0bdb..ccb49b0aff598d673659d12a8e7249feb485fdee 100644 (file)
@@ -88,7 +88,10 @@ unsigned long os_get_task_size(void)
        sa.sa_handler = segfault;
        sigemptyset(&sa.sa_mask);
        sa.sa_flags = SA_NODEFER;
-       sigaction(SIGSEGV, &sa, &old);
+       if (sigaction(SIGSEGV, &sa, &old)) {
+               perror("os_get_task_size");
+               exit(1);
+       }
 
        if (!page_ok(bottom)) {
                fprintf(stderr, "Address 0x%x no good?\n",
@@ -110,11 +113,12 @@ unsigned long os_get_task_size(void)
 
 out:
        /* Restore the old SIGSEGV handling */
-       sigaction(SIGSEGV, &old, NULL);
-
+       if (sigaction(SIGSEGV, &old, NULL)) {
+               perror("os_get_task_size");
+               exit(1);
+       }
        top <<= UM_KERN_PAGE_SHIFT;
        printf("0x%x\n", top);
-       fflush(stdout);
 
        return top;
 }
index 964dc1a04c37a7774f56a5d0842d737d477c85c4..598b5c1903af11f10c5c171402b6d283ba9e7565 100644 (file)
@@ -6,7 +6,7 @@ obj-y = bug.o bugs.o checksum.o delay.o fault.o ksyms.o ldt.o ptrace.o \
        ptrace_user.o setjmp.o signal.o stub.o stub_segv.o syscalls.o sysrq.o \
        sys_call_table.o tls.o
 
-subarch-obj-y = lib/bitops_32.o lib/semaphore_32.o lib/string_32.o
+subarch-obj-y = lib/semaphore_32.o lib/string_32.o
 subarch-obj-$(CONFIG_HIGHMEM) += mm/highmem_32.o
 subarch-obj-$(CONFIG_MODULES) += kernel/module_32.o
 
index 3c22de532088b4c5baef1bee93cb85d9cd668840..c8b4cce9cfe1ce333dd877c8644f5b7d4fab3558 100644 (file)
@@ -10,7 +10,7 @@ obj-y = bug.o bugs.o delay.o fault.o ldt.o mem.o ptrace.o ptrace_user.o \
 
 obj-$(CONFIG_MODULES) += um_module.o
 
-subarch-obj-y = lib/bitops_64.o lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
+subarch-obj-y = lib/csum-partial_64.o lib/memcpy_64.o lib/thunk_64.o
 subarch-obj-$(CONFIG_MODULES) += kernel/module_64.o
 
 ldt-y = ../sys-i386/ldt.o
index cee5c3142d41c41644341e62a5acb86f6247aa9b..581e6986a7767cc994b1a09e9a7f529a2c25ad01 100644 (file)
 #include <linux/kernel_stat.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/kbuild.h>
+
 #include <asm/irq.h>
 #include <asm/errno.h>
 
-#define DEFINE(sym, val) \
-       asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
 int main (void)
 {
        /* offsets into the task struct */
index 7165478824e7405757536e65d04d493d281d762c..687e367d8b64c81e39a99632e87d012e1c8015b5 100644 (file)
@@ -790,8 +790,8 @@ pci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr,
 
 void __iomem *pci_iomap (struct pci_dev *dev, int bar, unsigned long max)
 {
-       unsigned long start = pci_resource_start (dev, bar);
-       unsigned long len = pci_resource_len (dev, bar);
+       resource_size_t start = pci_resource_start (dev, bar);
+       resource_size_t len = pci_resource_len (dev, bar);
 
        if (!start || len == 0)
                return 0;
index 2fadf794483d1f8dad597c34de6dafb7082b4383..c3f880902d66555afd6e079292f23ec009126f34 100644 (file)
@@ -25,6 +25,18 @@ config X86
        select HAVE_KVM if ((X86_32 && !X86_VOYAGER && !X86_VISWS && !X86_NUMAQ) || X86_64)
        select HAVE_ARCH_KGDB if !X86_VOYAGER
 
+config DEFCONFIG_LIST
+       string
+       depends on X86_32
+       option defconfig_list
+       default "arch/x86/configs/i386_defconfig"
+
+config DEFCONFIG_LIST
+       string
+       depends on X86_64
+       option defconfig_list
+       default "arch/x86/configs/x86_64_defconfig"
+
 
 config GENERIC_LOCKBREAK
        def_bool n
@@ -86,9 +98,6 @@ config GENERIC_GPIO
 config ARCH_MAY_HAVE_PC_FDC
        def_bool y
 
-config DMI
-       def_bool y
-
 config RWSEM_GENERIC_SPINLOCK
        def_bool !X86_XADD
 
@@ -114,6 +123,9 @@ config GENERIC_TIME_VSYSCALL
 config ARCH_HAS_CPU_RELAX
        def_bool y
 
+config ARCH_HAS_CACHE_LINE_SIZE
+       def_bool y
+
 config HAVE_SETUP_PER_CPU_AREA
        def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
 
@@ -180,7 +192,7 @@ config X86_HT
 
 config X86_BIOS_REBOOT
        bool
-       depends on X86_32 && !(X86_VISWS || X86_VOYAGER)
+       depends on !X86_VISWS && !X86_VOYAGER
        default y
 
 config X86_TRAMPOLINE
@@ -373,6 +385,25 @@ config VMI
          at the moment), by linking the kernel to a GPL-ed ROM module
          provided by the hypervisor.
 
+config KVM_CLOCK
+       bool "KVM paravirtualized clock"
+       select PARAVIRT
+       depends on !(X86_VISWS || X86_VOYAGER)
+       help
+         Turning on this option will allow you to run a paravirtualized clock
+         when running over the KVM hypervisor. Instead of relying on a PIT
+         (or probably other) emulation by the underlying device model, the host
+         provides the guest with timing infrastructure such as time of day, and
+         system time
+
+config KVM_GUEST
+       bool "KVM Guest support"
+       select PARAVIRT
+       depends on !(X86_VISWS || X86_VOYAGER)
+       help
+        This option enables various optimizations for running under the KVM
+        hypervisor.
+
 source "arch/x86/lguest/Kconfig"
 
 config PARAVIRT
@@ -463,6 +494,15 @@ config HPET_EMULATE_RTC
 
 # Mark as embedded because too many people got it wrong.
 # The code disables itself when not needed.
+config DMI
+       default y
+       bool "Enable DMI scanning" if EMBEDDED
+       help
+         Enabled scanning of DMI to identify machine quirks. Say Y
+         here unless you have verified that your setup is not
+         affected by entries in the DMI blacklist. Required by PNP
+         BIOS code.
+
 config GART_IOMMU
        bool "GART IOMMU support" if EMBEDDED
        default y
@@ -509,9 +549,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
          Calgary anyway, pass 'iommu=calgary' on the kernel command line.
          If unsure, say Y.
 
-config IOMMU_HELPER
-       def_bool (CALGARY_IOMMU || GART_IOMMU)
-
 # need this always selected by IOMMU for the VIA workaround
 config SWIOTLB
        bool
@@ -522,6 +559,8 @@ config SWIOTLB
          access 32-bits of memory can be used on systems with more than
          3 GB of memory. If unsure, say Y.
 
+config IOMMU_HELPER
+       def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB)
 
 config NR_CPUS
        int "Maximum number of CPUs (2-255)"
@@ -1134,7 +1173,7 @@ source kernel/Kconfig.hz
 
 config KEXEC
        bool "kexec system call"
-       depends on X86_64 || X86_BIOS_REBOOT
+       depends on X86_BIOS_REBOOT
        help
          kexec is a system call that implements the ability to shutdown your
          current kernel, and to start another kernel.  It is like a reboot
@@ -1477,6 +1516,10 @@ config PCI_GODIRECT
 config PCI_GOANY
        bool "Any"
 
+config PCI_GOOLPC
+       bool "OLPC"
+       depends on OLPC
+
 endchoice
 
 config PCI_BIOS
@@ -1486,12 +1529,17 @@ config PCI_BIOS
 # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
 config PCI_DIRECT
        def_bool y
-       depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY) || X86_VISWS)
+       depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY || PCI_GOOLPC) || X86_VISWS)
 
 config PCI_MMCONFIG
        def_bool y
        depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
 
+config PCI_OLPC
+       bool
+       depends on PCI && PCI_GOOLPC
+       default y
+
 config PCI_DOMAINS
        def_bool y
        depends on PCI
@@ -1611,6 +1659,13 @@ config GEODE_MFGPT_TIMER
          MFGPTs have a better resolution and max interval than the
          generic PIT, and are suitable for use as high-res timers.
 
+config OLPC
+       bool "One Laptop Per Child support"
+       default n
+       help
+         Add support for detecting the unique features of the OLPC
+         XO hardware.
+
 endif # X86_32
 
 config K8_NB
index 4da3cdb9c1b1015321e9ed4799bef1053187d2bc..2ad6301849a1ba68ec78c9ea3c5f39232484c645 100644 (file)
@@ -292,6 +292,11 @@ config X86_GENERIC
 
 endif
 
+config X86_CPU
+       def_bool y
+       select GENERIC_FIND_FIRST_BIT
+       select GENERIC_FIND_NEXT_BIT
+
 #
 # Define implied options from the CPU selection here
 config X86_L1_CACHE_BYTES
@@ -398,7 +403,7 @@ config X86_TSC
 # generates cmov.
 config X86_CMOV
        def_bool y
-       depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7)
+       depends on (MK7 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || X86_64)
 
 config X86_MINIMUM_CPU_FAMILY
        int
index 5b1979a45a1ea07f6ffc8d6b563fc1ddcb64dae3..ac1e31ba4795cb0f067a280826da257432f93f33 100644 (file)
@@ -118,7 +118,6 @@ config DEBUG_NX_TEST
 config 4KSTACKS
        bool "Use 4Kb for kernel stacks instead of 8Kb"
        depends on X86_32
-       default y
        help
          If you say Y here the kernel will use a 4Kb stacksize for the
          kernel stack attached to each process/thread. This facilitates
@@ -256,11 +255,9 @@ config CPA_DEBUG
        help
          Do change_page_attr() self-tests every 30 seconds.
 
-endmenu
-
 config OPTIMIZE_INLINING
        bool "Allow gcc to uninline functions marked 'inline'"
-       default y
+       depends on BROKEN
        help
          This option determines if the kernel forces gcc to inline the functions
          developers have marked 'inline'. Doing so takes away freedom from gcc to
@@ -270,3 +267,6 @@ config OPTIMIZE_INLINING
          this algorithm is so good that allowing gcc4 to make the decision can
          become the default in the future, until then this option is there to
          test gcc for this.
+
+endmenu
+
index d84a48ece78503b10e8429981e44397b6755a454..03399d64013b21459f63942c06bc6c33b803f425 100644 (file)
@@ -126,17 +126,25 @@ void query_edd(void)
 {
        char eddarg[8];
        int do_mbr = 1;
+#ifdef CONFIG_EDD_OFF
+       int do_edd = 0;
+#else
        int do_edd = 1;
+#endif
        int be_quiet;
        int devno;
        struct edd_info ei, *edp;
        u32 *mbrptr;
 
        if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) {
-               if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip"))
+               if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) {
+                       do_edd = 1;
                        do_mbr = 0;
+               }
                else if (!strcmp(eddarg, "off"))
                        do_edd = 0;
+               else if (!strcmp(eddarg, "on"))
+                       do_edd = 1;
        }
 
        be_quiet = cmdline_find_option_bool("quiet");
index 6d2df8d61c54089a553c0cdee879dde6f421130c..af86e431acfae2af91957a9932c3bf5fb96bd5fb 100644 (file)
@@ -120,7 +120,7 @@ _start:
        # Part 2 of the header, from the old setup.S
 
                .ascii  "HdrS"          # header signature
-               .word   0x0208          # header version number (>= 0x0105)
+               .word   0x0209          # header version number (>= 0x0105)
                                        # or else old loadlin-1.5 will fail)
                .globl realmode_swtch
 realmode_swtch:        .word   0, 0            # default_switch, SETUPSEG
@@ -227,6 +227,10 @@ hardware_subarch_data:     .quad 0
 payload_offset:                .long input_data
 payload_length:                .long input_data_end-input_data
 
+setup_data:            .quad 0                 # 64-bit physical pointer to
+                                               # single linked list of
+                                               # struct setup_data
+
 # End of setup header #####################################################
 
        .section ".inittext", "ax"
index bbed3a26ce5567abb107f0371651c906b5f368a7..cb3856a18c8544e1ffe722d33a7713439043829d 100644 (file)
@@ -128,7 +128,7 @@ asmlinkage long sys32_sigsuspend(int history0, int history1, old_sigset_t mask)
 
        current->state = TASK_INTERRUPTIBLE;
        schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
+       set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
 
index 90e092d0af0c639211932fc44a0f5dd959ec590b..bbdacb398d48e05a05c17f23af0d2b650751d912 100644 (file)
@@ -40,7 +40,6 @@ obj-$(CONFIG_STACKTRACE)      += stacktrace.o
 obj-y                          += cpu/
 obj-y                          += acpi/
 obj-$(CONFIG_X86_BIOS_REBOOT)  += reboot.o
-obj-$(CONFIG_X86_64)           += reboot.o
 obj-$(CONFIG_MCA)              += mca_32.o
 obj-$(CONFIG_X86_MSR)          += msr.o
 obj-$(CONFIG_X86_CPUID)                += cpuid.o
@@ -80,6 +79,8 @@ obj-$(CONFIG_DEBUG_RODATA_TEST)       += test_rodata.o
 obj-$(CONFIG_DEBUG_NX_TEST)    += test_nx.o
 
 obj-$(CONFIG_VMI)              += vmi_32.o vmiclock_32.o
+obj-$(CONFIG_KVM_GUEST)                += kvm.o
+obj-$(CONFIG_KVM_CLOCK)                += kvmclock.o
 obj-$(CONFIG_PARAVIRT)         += paravirt.o paravirt_patch_$(BITS).o
 
 ifdef CONFIG_INPUT_PCSPKR
@@ -89,6 +90,8 @@ endif
 obj-$(CONFIG_SCx200)           += scx200.o
 scx200-y                       += scx200_32.o
 
+obj-$(CONFIG_OLPC)             += olpc.o
+
 ###
 # 64 bit specific files
 ifeq ($(CONFIG_X86_64),y)
@@ -99,4 +102,6 @@ ifeq ($(CONFIG_X86_64),y)
         obj-$(CONFIG_GART_IOMMU)       += pci-gart_64.o aperture_64.o
         obj-$(CONFIG_CALGARY_IOMMU)    += pci-calgary_64.o tce_64.o
         obj-$(CONFIG_SWIOTLB)          += pci-swiotlb_64.o
+
+        obj-$(CONFIG_PCI_MMCONFIG)     += mmconf-fam10h_64.o
 endif
index 977ed5cdeaa3b1b6a8d5f78b3446df7be7e21b53..c49ebcc6c41e84392fe1eeb1b473b01e881b1975 100644 (file)
@@ -771,6 +771,32 @@ static void __init acpi_register_lapic_address(unsigned long address)
                boot_cpu_physical_apicid  = GET_APIC_ID(read_apic_id());
 }
 
+static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
+{
+       int count;
+
+       if (!cpu_has_apic)
+               return -ENODEV;
+
+       /*
+        * Note that the LAPIC address is obtained from the MADT (32-bit value)
+        * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
+        */
+
+       count =
+           acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC_OVERRIDE,
+                                 acpi_parse_lapic_addr_ovr, 0);
+       if (count < 0) {
+               printk(KERN_ERR PREFIX
+                      "Error parsing LAPIC address override entry\n");
+               return count;
+       }
+
+       acpi_register_lapic_address(acpi_lapic_addr);
+
+       return count;
+}
+
 static int __init acpi_parse_madt_lapic_entries(void)
 {
        int count;
@@ -901,6 +927,33 @@ static inline int acpi_parse_madt_ioapic_entries(void)
 }
 #endif /* !CONFIG_X86_IO_APIC */
 
+static void __init early_acpi_process_madt(void)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+       int error;
+
+       if (!acpi_table_parse(ACPI_SIG_MADT, acpi_parse_madt)) {
+
+               /*
+                * Parse MADT LAPIC entries
+                */
+               error = early_acpi_parse_madt_lapic_addr_ovr();
+               if (!error) {
+                       acpi_lapic = 1;
+                       smp_found_config = 1;
+               }
+               if (error == -EINVAL) {
+                       /*
+                        * Dell Precision Workstation 410, 610 come here.
+                        */
+                       printk(KERN_ERR PREFIX
+                              "Invalid BIOS MADT, disabling ACPI\n");
+                       disable_acpi();
+               }
+       }
+#endif
+}
+
 static void __init acpi_process_madt(void)
 {
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -1233,6 +1286,23 @@ int __init acpi_boot_table_init(void)
        return 0;
 }
 
+int __init early_acpi_boot_init(void)
+{
+       /*
+        * If acpi_disabled, bail out
+        * One exception: acpi=ht continues far enough to enumerate LAPICs
+        */
+       if (acpi_disabled && !acpi_ht)
+               return 1;
+
+       /*
+        * Process the Multiple APIC Description Table (MADT), if present
+        */
+       early_acpi_process_madt();
+
+       return 0;
+}
+
 int __init acpi_boot_init(void)
 {
        /*
index f0030a0999c74f01785c4acd7a499778de58a4ff..bf9290e290136a36d6006d032c1ca4fe392f84bd 100644 (file)
@@ -904,6 +904,7 @@ recalc:
                        original_pm_idle();
                else
                        default_idle();
+               local_irq_disable();
                jiffies_since_last_check = jiffies - last_jiffies;
                if (jiffies_since_last_check > idle_period)
                        goto recalc;
@@ -911,6 +912,8 @@ recalc:
 
        if (apm_idle_done)
                apm_do_busy();
+
+       local_irq_enable();
 }
 
 /**
@@ -1189,19 +1192,6 @@ static int suspend(int vetoable)
        int err;
        struct apm_user *as;
 
-       if (pm_send_all(PM_SUSPEND, (void *)3)) {
-               /* Vetoed */
-               if (vetoable) {
-                       if (apm_info.connection_version > 0x100)
-                               set_system_power_state(APM_STATE_REJECT);
-                       err = -EBUSY;
-                       ignore_sys_suspend = 0;
-                       printk(KERN_WARNING "apm: suspend was vetoed.\n");
-                       goto out;
-               }
-               printk(KERN_CRIT "apm: suspend was vetoed, but suspending anyway.\n");
-       }
-
        device_suspend(PMSG_SUSPEND);
        local_irq_disable();
        device_power_down(PMSG_SUSPEND);
@@ -1224,9 +1214,7 @@ static int suspend(int vetoable)
        device_power_up();
        local_irq_enable();
        device_resume();
-       pm_send_all(PM_RESUME, (void *)0);
        queue_event(APM_NORMAL_RESUME, NULL);
- out:
        spin_lock(&user_list_lock);
        for (as = user_list; as != NULL; as = as->next) {
                as->suspend_wait = 0;
@@ -1337,7 +1325,6 @@ static void check_events(void)
                        if ((event != APM_NORMAL_RESUME)
                            || (ignore_normal_resume == 0)) {
                                device_resume();
-                               pm_send_all(PM_RESUME, (void *)0);
                                queue_event(event, NULL);
                        }
                        ignore_normal_resume = 0;
index 670c3c311289b185cd3fc3a2f12c64bc31a3cc4c..92588083950f8106bd08aafb081ebe46ce3023fd 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/signal.h>
 #include <linux/personality.h>
 #include <linux/suspend.h>
+#include <linux/kbuild.h>
 #include <asm/ucontext.h>
 #include "sigframe.h"
 #include <asm/pgtable.h>
 #include <linux/lguest.h>
 #include "../../../drivers/lguest/lg.h"
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-       DEFINE(sym, offsetof(struct str, mem));
-
 /* workaround for a warning with -Wmissing-prototypes */
 void foo(void);
 
index 494e1e096ee6df0c986aeb14dbc3f8b7a407bb50..f126c05d6170a17e8fdd880b5973324eb8ebb7d0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/errno.h> 
 #include <linux/hardirq.h>
 #include <linux/suspend.h>
+#include <linux/kbuild.h>
 #include <asm/pda.h>
 #include <asm/processor.h>
 #include <asm/segment.h>
 #include <asm/ia32.h>
 #include <asm/bootparam.h>
 
-#define DEFINE(sym, val) \
-        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
-#define BLANK() asm volatile("\n->" : : )
-
-#define OFFSET(sym, str, mem) \
-       DEFINE(sym, offsetof(struct str, mem))
-
 #define __NO_STUBS 1
 #undef __SYSCALL
 #undef _ASM_X86_64_UNISTD_H_
index e2d870de837c2ba73599a8f58c9408669446ae9c..b0c8208df9fa17363b5a3ed5ce35bdec8c52ad25 100644 (file)
@@ -339,6 +339,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
 {
        struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu);
        unsigned int freq;
+       unsigned int cached_freq;
 
        dprintk("get_cur_freq_on_cpu (%d)\n", cpu);
 
@@ -347,7 +348,16 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu)
                return 0;
        }
 
+       cached_freq = data->freq_table[data->acpi_data->state].frequency;
        freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data);
+       if (freq != cached_freq) {
+               /*
+                * The dreaded BIOS frequency change behind our back.
+                * Force set the frequency on next target call.
+                */
+               data->resume = 1;
+       }
+
        dprintk("cur freq = %u\n", freq);
 
        return freq;
@@ -591,6 +601,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
            policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
                policy->cpus = perf->shared_cpu_map;
        }
+       policy->related_cpus = perf->shared_cpu_map;
 
 #ifdef CONFIG_SMP
        dmi_check_system(sw_any_bug_dmi_table);
index 353efe4f5017aeaf8c75abcf97639dd8b07dc2d1..5d241ce94a44f4f5b0fceb7a7a8292e08b6d9b08 100644 (file)
@@ -90,7 +90,7 @@ u8 mtrr_type_lookup(u64 start, u64 end)
         * Look of multiple ranges matching this address and pick type
         * as per MTRR precedence
         */
-       if (!mtrr_state.enabled & 2) {
+       if (!(mtrr_state.enabled & 2)) {
                return mtrr_state.def_type;
        }
 
index 1960f1985e5e107c52fb2a5d5dd6a2aa89c64021..84c480bb3715af21405c6d55fab0f3160cbf3a59 100644 (file)
@@ -424,7 +424,7 @@ static int __init mtrr_if_init(void)
                return -ENODEV;
 
        proc_root_mtrr =
-               proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops);
+               proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops);
 
        if (proc_root_mtrr)
                proc_root_mtrr->owner = THIS_MODULE;
index 2251d0ae9570de24259a6c132aec2c4ef2d05f46..2685538179097a1e8e073ebf07cd49e898ef4f6e 100644 (file)
@@ -25,6 +25,7 @@
 #include <asm/hpet.h>
 #include <linux/kdebug.h>
 #include <asm/smp.h>
+#include <asm/reboot.h>
 
 #include <mach_ipi.h>
 
@@ -117,7 +118,7 @@ static void nmi_shootdown_cpus(void)
 }
 #endif
 
-void machine_crash_shutdown(struct pt_regs *regs)
+void native_machine_crash_shutdown(struct pt_regs *regs)
 {
        /* This function is only called after the system
         * has panicked or is otherwise in a critical state.
index cbd42e51cb082d82b8f287eaa1c244913268d64c..124480c0008dd2a5e348cd2db7b928ff13d3aa41 100644 (file)
@@ -84,14 +84,41 @@ void __init reserve_early(unsigned long start, unsigned long end, char *name)
                strncpy(r->name, name, sizeof(r->name) - 1);
 }
 
-void __init early_res_to_bootmem(void)
+void __init free_early(unsigned long start, unsigned long end)
+{
+       struct early_res *r;
+       int i, j;
+
+       for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
+               r = &early_res[i];
+               if (start == r->start && end == r->end)
+                       break;
+       }
+       if (i >= MAX_EARLY_RES || !early_res[i].end)
+               panic("free_early on not reserved area: %lx-%lx!", start, end);
+
+       for (j = i + 1; j < MAX_EARLY_RES && early_res[j].end; j++)
+               ;
+
+       memmove(&early_res[i], &early_res[i + 1],
+              (j - 1 - i) * sizeof(struct early_res));
+
+       early_res[j - 1].end = 0;
+}
+
+void __init early_res_to_bootmem(unsigned long start, unsigned long end)
 {
        int i;
+       unsigned long final_start, final_end;
        for (i = 0; i < MAX_EARLY_RES && early_res[i].end; i++) {
                struct early_res *r = &early_res[i];
-               printk(KERN_INFO "early res: %d [%lx-%lx] %s\n", i,
-                       r->start, r->end - 1, r->name);
-               reserve_bootmem_generic(r->start, r->end - r->start);
+               final_start = max(start, r->start);
+               final_end = min(end, r->end);
+               if (final_start >= final_end)
+                       continue;
+               printk(KERN_INFO "  early res: %d [%lx-%lx] %s\n", i,
+                       final_start, final_end - 1, r->name);
+               reserve_bootmem_generic(final_start, final_end - final_start);
        }
 }
 
index 021624c835832d81f00f1ed6ef5d5437e9303813..cbaaf69bedb29c92055e2272a12efba7f49f9ee1 100644 (file)
@@ -83,7 +83,7 @@ unsigned int read_apic_id(void)
 {
        unsigned int id;
 
-       WARN_ON(preemptible());
+       WARN_ON(preemptible() && num_online_cpus() > 1);
        id = apic_read(APIC_ID);
        if (uv_system_type >= UV_X2APIC)
                id  |= __get_cpu_var(x2apic_extra_bits);
index d31d6b72d60dd2229c7978300c19e4f83630b4de..e25c57b8aa844937742f246288e9ffa2e2be5229 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/string.h>
 #include <linux/percpu.h>
 #include <linux/start_kernel.h>
+#include <linux/io.h>
 
 #include <asm/processor.h>
 #include <asm/proto.h>
@@ -100,6 +101,24 @@ static void __init reserve_ebda_region(void)
        reserve_early(lowmem, 0x100000, "BIOS reserved");
 }
 
+static void __init reserve_setup_data(void)
+{
+       struct setup_data *data;
+       unsigned long pa_data;
+       char buf[32];
+
+       if (boot_params.hdr.version < 0x0209)
+               return;
+       pa_data = boot_params.hdr.setup_data;
+       while (pa_data) {
+               data = early_ioremap(pa_data, sizeof(*data));
+               sprintf(buf, "setup data %x", data->type);
+               reserve_early(pa_data, pa_data+sizeof(*data)+data->len, buf);
+               pa_data = data->next;
+               early_iounmap(data, sizeof(*data));
+       }
+}
+
 void __init x86_64_start_kernel(char * real_mode_data)
 {
        int i;
@@ -156,6 +175,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
 #endif
 
        reserve_ebda_region();
+       reserve_setup_data();
 
        /*
         * At this point everything still needed from the boot loader
index 90f038af3adc326725cbdec61825e1b31f340ebb..b2cc73768a9daaa5f6e568bd4977f6fea7889d87 100644 (file)
@@ -656,15 +656,16 @@ int_msg:
        .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
 
 fault_msg:
-       .asciz                                                          \
-/* fault info: */      "BUG: Int %d: CR2 %p\n"                         \
-/* pusha regs: */      "     EDI %p  ESI %p  EBP %p  ESP %p\n"         \
-                       "     EBX %p  EDX %p  ECX %p  EAX %p\n"         \
-/* fault frame: */     "     err %p  EIP %p   CS %p  flg %p\n"         \
-                                                                       \
-                       "Stack: %p %p %p %p %p %p %p %p\n"              \
-                       "       %p %p %p %p %p %p %p %p\n"              \
-                       "       %p %p %p %p %p %p %p %p\n"
+/* fault info: */
+       .ascii "BUG: Int %d: CR2 %p\n"
+/* pusha regs: */
+       .ascii "     EDI %p  ESI %p  EBP %p  ESP %p\n"
+       .ascii "     EBX %p  EDX %p  ECX %p  EAX %p\n"
+/* fault frame: */
+       .ascii "     err %p  EIP %p   CS %p  flg %p\n"
+       .ascii "Stack: %p %p %p %p %p %p %p %p\n"
+       .ascii "       %p %p %p %p %p %p %p %p\n"
+       .asciz "       %p %p %p %p %p %p %p %p\n"
 
 #include "../../x86/xen/xen-head.S"
 
index 9007f9ea64eed80e9537be44d391fe55d6f7f749..9b5cfcdfc426cc194365020c061c6c0b46e54083 100644 (file)
@@ -137,9 +137,10 @@ static void hpet_reserve_platform_timers(unsigned long id)
        hd.hd_irq[0] = HPET_LEGACY_8254;
        hd.hd_irq[1] = HPET_LEGACY_RTC;
 
-       for (i = 2; i < nrtimers; timer++, i++)
-               hd.hd_irq[i] = (timer->hpet_config & Tn_INT_ROUTE_CNF_MASK) >>
+       for (i = 2; i < nrtimers; timer++, i++) {
+               hd.hd_irq[i] = (readl(&timer->hpet_config) & Tn_INT_ROUTE_CNF_MASK) >>
                        Tn_INT_ROUTE_CNF_SHIFT;
+       }
 
        hpet_alloc(&hd);
 
index 696b8e4e66bbf89bd9b71499f549697b0f68f745..a40d54fc1fdd819125a1d7b609566180062b6ecc 100644 (file)
@@ -2444,6 +2444,7 @@ void destroy_irq(unsigned int irq)
        dynamic_irq_cleanup(irq);
 
        spin_lock_irqsave(&vector_lock, flags);
+       clear_bit(irq_vector[irq], used_vectors);
        irq_vector[irq] = 0;
        spin_unlock_irqrestore(&vector_lock, flags);
 }
index 00bda7bcda63741178db3ad46b444fe25c8b87a7..147352df28b9c74146a102f6c8b22b17e2b11d6d 100644 (file)
@@ -190,8 +190,6 @@ void irq_ctx_exit(int cpu)
        hardirq_ctx[cpu] = NULL;
 }
 
-extern asmlinkage void __do_softirq(void);
-
 asmlinkage void do_softirq(void)
 {
        unsigned long flags;
index 73354302fda76c3ee5a326e63deed6f37acca3a9..c03205991718b30da8adfed31d915f7f325b2aa0 100644 (file)
  *
  * This file is released under the GPLv2.
  */
-
 #include <linux/debugfs.h>
+#include <linux/uaccess.h>
 #include <linux/stat.h>
 #include <linux/init.h>
+#include <linux/io.h>
+#include <linux/mm.h>
 
 #include <asm/setup.h>
 
 #ifdef CONFIG_DEBUG_BOOT_PARAMS
+struct setup_data_node {
+       u64 paddr;
+       u32 type;
+       u32 len;
+};
+
+static ssize_t
+setup_data_read(struct file *file, char __user *user_buf, size_t count,
+               loff_t *ppos)
+{
+       struct setup_data_node *node = file->private_data;
+       unsigned long remain;
+       loff_t pos = *ppos;
+       struct page *pg;
+       void *p;
+       u64 pa;
+
+       if (pos < 0)
+               return -EINVAL;
+       if (pos >= node->len)
+               return 0;
+
+       if (count > node->len - pos)
+               count = node->len - pos;
+       pa = node->paddr + sizeof(struct setup_data) + pos;
+       pg = pfn_to_page((pa + count - 1) >> PAGE_SHIFT);
+       if (PageHighMem(pg)) {
+               p = ioremap_cache(pa, count);
+               if (!p)
+                       return -ENXIO;
+       } else {
+               p = __va(pa);
+       }
+
+       remain = copy_to_user(user_buf, p, count);
+
+       if (PageHighMem(pg))
+               iounmap(p);
+
+       if (remain)
+               return -EFAULT;
+
+       *ppos = pos + count;
+
+       return count;
+}
+
+static int setup_data_open(struct inode *inode, struct file *file)
+{
+       file->private_data = inode->i_private;
+       return 0;
+}
+
+static const struct file_operations fops_setup_data = {
+       .read =         setup_data_read,
+       .open =         setup_data_open,
+};
+
+static int __init
+create_setup_data_node(struct dentry *parent, int no,
+                      struct setup_data_node *node)
+{
+       struct dentry *d, *type, *data;
+       char buf[16];
+       int error;
+
+       sprintf(buf, "%d", no);
+       d = debugfs_create_dir(buf, parent);
+       if (!d) {
+               error = -ENOMEM;
+               goto err_return;
+       }
+       type = debugfs_create_x32("type", S_IRUGO, d, &node->type);
+       if (!type) {
+               error = -ENOMEM;
+               goto err_dir;
+       }
+       data = debugfs_create_file("data", S_IRUGO, d, node, &fops_setup_data);
+       if (!data) {
+               error = -ENOMEM;
+               goto err_type;
+       }
+       return 0;
+
+err_type:
+       debugfs_remove(type);
+err_dir:
+       debugfs_remove(d);
+err_return:
+       return error;
+}
+
+static int __init create_setup_data_nodes(struct dentry *parent)
+{
+       struct setup_data_node *node;
+       struct setup_data *data;
+       int error, no = 0;
+       struct dentry *d;
+       struct page *pg;
+       u64 pa_data;
+
+       d = debugfs_create_dir("setup_data", parent);
+       if (!d) {
+               error = -ENOMEM;
+               goto err_return;
+       }
+
+       pa_data = boot_params.hdr.setup_data;
+
+       while (pa_data) {
+               node = kmalloc(sizeof(*node), GFP_KERNEL);
+               if (!node) {
+                       error = -ENOMEM;
+                       goto err_dir;
+               }
+               pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT);
+               if (PageHighMem(pg)) {
+                       data = ioremap_cache(pa_data, sizeof(*data));
+                       if (!data) {
+                               error = -ENXIO;
+                               goto err_dir;
+                       }
+               } else {
+                       data = __va(pa_data);
+               }
+
+               node->paddr = pa_data;
+               node->type = data->type;
+               node->len = data->len;
+               error = create_setup_data_node(d, no, node);
+               pa_data = data->next;
+
+               if (PageHighMem(pg))
+                       iounmap(data);
+               if (error)
+                       goto err_dir;
+               no++;
+       }
+       return 0;
+
+err_dir:
+       debugfs_remove(d);
+err_return:
+       return error;
+}
+
 static struct debugfs_blob_wrapper boot_params_blob = {
-       .data = &boot_params,
-       .size = sizeof(boot_params),
+       .data           = &boot_params,
+       .size           = sizeof(boot_params),
 };
 
 static int __init boot_params_kdebugfs_init(void)
 {
-       int error;
        struct dentry *dbp, *version, *data;
+       int error;
 
        dbp = debugfs_create_dir("boot_params", NULL);
        if (!dbp) {
@@ -41,7 +189,13 @@ static int __init boot_params_kdebugfs_init(void)
                error = -ENOMEM;
                goto err_version;
        }
+       error = create_setup_data_nodes(dbp);
+       if (error)
+               goto err_data;
        return 0;
+
+err_data:
+       debugfs_remove(data);
 err_version:
        debugfs_remove(version);
 err_dir:
@@ -61,5 +215,4 @@ static int __init arch_kdebugfs_init(void)
 
        return error;
 }
-
 arch_initcall(arch_kdebugfs_init);
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
new file mode 100644 (file)
index 0000000..8b7a3cf
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+ * KVM paravirt_ops implementation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ * Copyright IBM Corporation, 2007
+ *   Authors: Anthony Liguori <aliguori@us.ibm.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kvm_para.h>
+#include <linux/cpu.h>
+#include <linux/mm.h>
+#include <linux/highmem.h>
+#include <linux/hardirq.h>
+
+#define MMU_QUEUE_SIZE 1024
+
+struct kvm_para_state {
+       u8 mmu_queue[MMU_QUEUE_SIZE];
+       int mmu_queue_len;
+       enum paravirt_lazy_mode mode;
+};
+
+static DEFINE_PER_CPU(struct kvm_para_state, para_state);
+
+static struct kvm_para_state *kvm_para_state(void)
+{
+       return &per_cpu(para_state, raw_smp_processor_id());
+}
+
+/*
+ * No need for any "IO delay" on KVM
+ */
+static void kvm_io_delay(void)
+{
+}
+
+static void kvm_mmu_op(void *buffer, unsigned len)
+{
+       int r;
+       unsigned long a1, a2;
+
+       do {
+               a1 = __pa(buffer);
+               a2 = 0;   /* on i386 __pa() always returns <4G */
+               r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
+               buffer += r;
+               len -= r;
+       } while (len);
+}
+
+static void mmu_queue_flush(struct kvm_para_state *state)
+{
+       if (state->mmu_queue_len) {
+               kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
+               state->mmu_queue_len = 0;
+       }
+}
+
+static void kvm_deferred_mmu_op(void *buffer, int len)
+{
+       struct kvm_para_state *state = kvm_para_state();
+
+       if (state->mode != PARAVIRT_LAZY_MMU) {
+               kvm_mmu_op(buffer, len);
+               return;
+       }
+       if (state->mmu_queue_len + len > sizeof state->mmu_queue)
+               mmu_queue_flush(state);
+       memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
+       state->mmu_queue_len += len;
+}
+
+static void kvm_mmu_write(void *dest, u64 val)
+{
+       __u64 pte_phys;
+       struct kvm_mmu_op_write_pte wpte;
+
+#ifdef CONFIG_HIGHPTE
+       struct page *page;
+       unsigned long dst = (unsigned long) dest;
+
+       page = kmap_atomic_to_page(dest);
+       pte_phys = page_to_pfn(page);
+       pte_phys <<= PAGE_SHIFT;
+       pte_phys += (dst & ~(PAGE_MASK));
+#else
+       pte_phys = (unsigned long)__pa(dest);
+#endif
+       wpte.header.op = KVM_MMU_OP_WRITE_PTE;
+       wpte.pte_val = val;
+       wpte.pte_phys = pte_phys;
+
+       kvm_deferred_mmu_op(&wpte, sizeof wpte);
+}
+
+/*
+ * We only need to hook operations that are MMU writes.  We hook these so that
+ * we can use lazy MMU mode to batch these operations.  We could probably
+ * improve the performance of the host code if we used some of the information
+ * here to simplify processing of batched writes.
+ */
+static void kvm_set_pte(pte_t *ptep, pte_t pte)
+{
+       kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
+                          pte_t *ptep, pte_t pte)
+{
+       kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+       kvm_mmu_write(pmdp, pmd_val(pmd));
+}
+
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
+{
+       kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_set_pte_present(struct mm_struct *mm, unsigned long addr,
+                               pte_t *ptep, pte_t pte)
+{
+       kvm_mmu_write(ptep, pte_val(pte));
+}
+
+static void kvm_pte_clear(struct mm_struct *mm,
+                         unsigned long addr, pte_t *ptep)
+{
+       kvm_mmu_write(ptep, 0);
+}
+
+static void kvm_pmd_clear(pmd_t *pmdp)
+{
+       kvm_mmu_write(pmdp, 0);
+}
+#endif
+
+static void kvm_set_pud(pud_t *pudp, pud_t pud)
+{
+       kvm_mmu_write(pudp, pud_val(pud));
+}
+
+#if PAGETABLE_LEVELS == 4
+static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+       kvm_mmu_write(pgdp, pgd_val(pgd));
+}
+#endif
+#endif /* PAGETABLE_LEVELS >= 3 */
+
+static void kvm_flush_tlb(void)
+{
+       struct kvm_mmu_op_flush_tlb ftlb = {
+               .header.op = KVM_MMU_OP_FLUSH_TLB,
+       };
+
+       kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
+}
+
+static void kvm_release_pt(u32 pfn)
+{
+       struct kvm_mmu_op_release_pt rpt = {
+               .header.op = KVM_MMU_OP_RELEASE_PT,
+               .pt_phys = (u64)pfn << PAGE_SHIFT,
+       };
+
+       kvm_mmu_op(&rpt, sizeof rpt);
+}
+
+static void kvm_enter_lazy_mmu(void)
+{
+       struct kvm_para_state *state = kvm_para_state();
+
+       paravirt_enter_lazy_mmu();
+       state->mode = paravirt_get_lazy_mode();
+}
+
+static void kvm_leave_lazy_mmu(void)
+{
+       struct kvm_para_state *state = kvm_para_state();
+
+       mmu_queue_flush(state);
+       paravirt_leave_lazy(paravirt_get_lazy_mode());
+       state->mode = paravirt_get_lazy_mode();
+}
+
+static void paravirt_ops_setup(void)
+{
+       pv_info.name = "KVM";
+       pv_info.paravirt_enabled = 1;
+
+       if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
+               pv_cpu_ops.io_delay = kvm_io_delay;
+
+       if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
+               pv_mmu_ops.set_pte = kvm_set_pte;
+               pv_mmu_ops.set_pte_at = kvm_set_pte_at;
+               pv_mmu_ops.set_pmd = kvm_set_pmd;
+#if PAGETABLE_LEVELS >= 3
+#ifdef CONFIG_X86_PAE
+               pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
+               pv_mmu_ops.set_pte_present = kvm_set_pte_present;
+               pv_mmu_ops.pte_clear = kvm_pte_clear;
+               pv_mmu_ops.pmd_clear = kvm_pmd_clear;
+#endif
+               pv_mmu_ops.set_pud = kvm_set_pud;
+#if PAGETABLE_LEVELS == 4
+               pv_mmu_ops.set_pgd = kvm_set_pgd;
+#endif
+#endif
+               pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
+               pv_mmu_ops.release_pte = kvm_release_pt;
+               pv_mmu_ops.release_pmd = kvm_release_pt;
+               pv_mmu_ops.release_pud = kvm_release_pt;
+
+               pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
+               pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
+       }
+}
+
+void __init kvm_guest_init(void)
+{
+       if (!kvm_para_available())
+               return;
+
+       paravirt_ops_setup();
+}
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
new file mode 100644 (file)
index 0000000..ddee040
--- /dev/null
@@ -0,0 +1,187 @@
+/*  KVM paravirtual clock driver. A clocksource implementation
+    Copyright (C) 2008 Glauber de Oliveira Costa, Red Hat Inc.
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+*/
+
+#include <linux/clocksource.h>
+#include <linux/kvm_para.h>
+#include <asm/arch_hooks.h>
+#include <asm/msr.h>
+#include <asm/apic.h>
+#include <linux/percpu.h>
+#include <asm/reboot.h>
+
+#define KVM_SCALE 22
+
+static int kvmclock = 1;
+
+static int parse_no_kvmclock(char *arg)
+{
+       kvmclock = 0;
+       return 0;
+}
+early_param("no-kvmclock", parse_no_kvmclock);
+
+/* The hypervisor will put information about time periodically here */
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct kvm_vcpu_time_info, hv_clock);
+#define get_clock(cpu, field) per_cpu(hv_clock, cpu).field
+
+static inline u64 kvm_get_delta(u64 last_tsc)
+{
+       int cpu = smp_processor_id();
+       u64 delta = native_read_tsc() - last_tsc;
+       return (delta * get_clock(cpu, tsc_to_system_mul)) >> KVM_SCALE;
+}
+
+static struct kvm_wall_clock wall_clock;
+static cycle_t kvm_clock_read(void);
+/*
+ * The wallclock is the time of day when we booted. Since then, some time may
+ * have elapsed since the hypervisor wrote the data. So we try to account for
+ * that with system time
+ */
+unsigned long kvm_get_wallclock(void)
+{
+       u32 wc_sec, wc_nsec;
+       u64 delta;
+       struct timespec ts;
+       int version, nsec;
+       int low, high;
+
+       low = (int)__pa(&wall_clock);
+       high = ((u64)__pa(&wall_clock) >> 32);
+
+       delta = kvm_clock_read();
+
+       native_write_msr(MSR_KVM_WALL_CLOCK, low, high);
+       do {
+               version = wall_clock.wc_version;
+               rmb();
+               wc_sec = wall_clock.wc_sec;
+               wc_nsec = wall_clock.wc_nsec;
+               rmb();
+       } while ((wall_clock.wc_version != version) || (version & 1));
+
+       delta = kvm_clock_read() - delta;
+       delta += wc_nsec;
+       nsec = do_div(delta, NSEC_PER_SEC);
+       set_normalized_timespec(&ts, wc_sec + delta, nsec);
+       /*
+        * Of all mechanisms of time adjustment I've tested, this one
+        * was the champion!
+        */
+       return ts.tv_sec + 1;
+}
+
+int kvm_set_wallclock(unsigned long now)
+{
+       return 0;
+}
+
+/*
+ * This is our read_clock function. The host puts an tsc timestamp each time
+ * it updates a new time. Without the tsc adjustment, we can have a situation
+ * in which a vcpu starts to run earlier (smaller system_time), but probes
+ * time later (compared to another vcpu), leading to backwards time
+ */
+static cycle_t kvm_clock_read(void)
+{
+       u64 last_tsc, now;
+       int cpu;
+
+       preempt_disable();
+       cpu = smp_processor_id();
+
+       last_tsc = get_clock(cpu, tsc_timestamp);
+       now = get_clock(cpu, system_time);
+
+       now += kvm_get_delta(last_tsc);
+       preempt_enable();
+
+       return now;
+}
+static struct clocksource kvm_clock = {
+       .name = "kvm-clock",
+       .read = kvm_clock_read,
+       .rating = 400,
+       .mask = CLOCKSOURCE_MASK(64),
+       .mult = 1 << KVM_SCALE,
+       .shift = KVM_SCALE,
+       .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int kvm_register_clock(void)
+{
+       int cpu = smp_processor_id();
+       int low, high;
+       low = (int)__pa(&per_cpu(hv_clock, cpu)) | 1;
+       high = ((u64)__pa(&per_cpu(hv_clock, cpu)) >> 32);
+
+       return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
+}
+
+static void kvm_setup_secondary_clock(void)
+{
+       /*
+        * Now that the first cpu already had this clocksource initialized,
+        * we shouldn't fail.
+        */
+       WARN_ON(kvm_register_clock());
+       /* ok, done with our trickery, call native */
+       setup_secondary_APIC_clock();
+}
+
+/*
+ * After the clock is registered, the host will keep writing to the
+ * registered memory location. If the guest happens to shutdown, this memory
+ * won't be valid. In cases like kexec, in which you install a new kernel, this
+ * means a random memory location will be kept being written. So before any
+ * kind of shutdown from our side, we unregister the clock by writting anything
+ * that does not have the 'enable' bit set in the msr
+ */
+#ifdef CONFIG_KEXEC
+static void kvm_crash_shutdown(struct pt_regs *regs)
+{
+       native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+       native_machine_crash_shutdown(regs);
+}
+#endif
+
+static void kvm_shutdown(void)
+{
+       native_write_msr_safe(MSR_KVM_SYSTEM_TIME, 0, 0);
+       native_machine_shutdown();
+}
+
+void __init kvmclock_init(void)
+{
+       if (!kvm_para_available())
+               return;
+
+       if (kvmclock && kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE)) {
+               if (kvm_register_clock())
+                       return;
+               pv_time_ops.get_wallclock = kvm_get_wallclock;
+               pv_time_ops.set_wallclock = kvm_set_wallclock;
+               pv_time_ops.sched_clock = kvm_clock_read;
+               pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
+               machine_ops.shutdown  = kvm_shutdown;
+#ifdef CONFIG_KEXEC
+               machine_ops.crash_shutdown  = kvm_crash_shutdown;
+#endif
+               clocksource_register(&kvm_clock);
+       }
+}
index cfc2648d25ff924e704b9316286f5cca1bae3049..3cad17fe026b5d513096386b7bcdce5064733cc5 100644 (file)
@@ -63,7 +63,7 @@ static int __init mfgpt_fix(char *s)
 
        /* The following udocumented bit resets the MFGPT timers */
        val = 0xFF; dummy = 0;
-       wrmsr(0x5140002B, val, dummy);
+       wrmsr(MSR_MFGPT_SETUP, val, dummy);
        return 1;
 }
 __setup("mfgptfix", mfgpt_fix);
@@ -127,17 +127,17 @@ int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
                 * 6; that is, resets for 7 and 8 will be ignored.  Is this
                 * a problem?   -dilinger
                 */
-               msr = MFGPT_NR_MSR;
+               msr = MSR_MFGPT_NR;
                mask = 1 << (timer + 24);
                break;
 
        case MFGPT_EVENT_NMI:
-               msr = MFGPT_NR_MSR;
+               msr = MSR_MFGPT_NR;
                mask = 1 << (timer + shift);
                break;
 
        case MFGPT_EVENT_IRQ:
-               msr = MFGPT_IRQ_MSR;
+               msr = MSR_MFGPT_IRQ;
                mask = 1 << (timer + shift);
                break;
 
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
new file mode 100644 (file)
index 0000000..edc5fbf
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * AMD Family 10h mmconfig enablement
+ */
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include <asm/pci-direct.h>
+#include <linux/sort.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/acpi.h>
+
+#include "../pci/pci.h"
+
+struct pci_hostbridge_probe {
+       u32 bus;
+       u32 slot;
+       u32 vendor;
+       u32 device;
+};
+
+static u64 __cpuinitdata fam10h_pci_mmconf_base;
+static int __cpuinitdata fam10h_pci_mmconf_base_status;
+
+static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
+       { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
+       { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
+};
+
+struct range {
+       u64 start;
+       u64 end;
+};
+
+static int __cpuinit cmp_range(const void *x1, const void *x2)
+{
+       const struct range *r1 = x1;
+       const struct range *r2 = x2;
+       int start1, start2;
+
+       start1 = r1->start >> 32;
+       start2 = r2->start >> 32;
+
+       return start1 - start2;
+}
+
+/*[47:0] */
+/* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */
+#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
+#define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32)))
+static void __cpuinit get_fam10h_pci_mmconf_base(void)
+{
+       int i;
+       unsigned bus;
+       unsigned slot;
+       int found;
+
+       u64 val;
+       u32 address;
+       u64 tom2;
+       u64 base = FAM10H_PCI_MMCONF_BASE;
+
+       int hi_mmio_num;
+       struct range range[8];
+
+       /* only try to get setting from BSP */
+       /* -1 or 1 */
+       if (fam10h_pci_mmconf_base_status)
+               return;
+
+       if (!early_pci_allowed())
+               goto fail;
+
+       found = 0;
+       for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
+               u32 id;
+               u16 device;
+               u16 vendor;
+
+               bus = pci_probes[i].bus;
+               slot = pci_probes[i].slot;
+               id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID);
+
+               vendor = id & 0xffff;
+               device = (id>>16) & 0xffff;
+               if (pci_probes[i].vendor == vendor &&
+                   pci_probes[i].device == device) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               goto fail;
+
+       /* SYS_CFG */
+       address = MSR_K8_SYSCFG;
+       rdmsrl(address, val);
+
+       /* TOP_MEM2 is not enabled? */
+       if (!(val & (1<<21))) {
+               tom2 = 0;
+       } else {
+               /* TOP_MEM2 */
+               address = MSR_K8_TOP_MEM2;
+               rdmsrl(address, val);
+               tom2 = val & (0xffffULL<<32);
+       }
+
+       if (base <= tom2)
+               base = tom2 + (1ULL<<32);
+
+       /*
+        * need to check if the range is in the high mmio range that is
+        * above 4G
+        */
+       hi_mmio_num = 0;
+       for (i = 0; i < 8; i++) {
+               u32 reg;
+               u64 start;
+               u64 end;
+               reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3));
+               if (!(reg & 3))
+                       continue;
+
+               start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/
+               reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3));
+               end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/
+
+               if (!end)
+                       continue;
+
+               range[hi_mmio_num].start = start;
+               range[hi_mmio_num].end = end;
+               hi_mmio_num++;
+       }
+
+       if (!hi_mmio_num)
+               goto out;
+
+       /* sort the range */
+       sort(range, hi_mmio_num, sizeof(struct range), cmp_range, NULL);
+
+       if (range[hi_mmio_num - 1].end < base)
+               goto out;
+       if (range[0].start > base)
+               goto out;
+
+       /* need to find one window */
+       base = range[0].start - (1ULL << 32);
+       if ((base > tom2) && BASE_VALID(base))
+               goto out;
+       base = range[hi_mmio_num - 1].end + (1ULL << 32);
+       if ((base > tom2) && BASE_VALID(base))
+               goto out;
+       /* need to find window between ranges */
+       if (hi_mmio_num > 1)
+       for (i = 0; i < hi_mmio_num - 1; i++) {
+               if (range[i + 1].start > (range[i].end + (1ULL << 32))) {
+                       base = range[i].end + (1ULL << 32);
+                       if ((base > tom2) && BASE_VALID(base))
+                               goto out;
+               }
+       }
+
+fail:
+       fam10h_pci_mmconf_base_status = -1;
+       return;
+out:
+       fam10h_pci_mmconf_base = base;
+       fam10h_pci_mmconf_base_status = 1;
+}
+
+void __cpuinit fam10h_check_enable_mmcfg(void)
+{
+       u64 val;
+       u32 address;
+
+       if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
+               return;
+
+       address = MSR_FAM10H_MMIO_CONF_BASE;
+       rdmsrl(address, val);
+
+       /* try to make sure that AP's setting is identical to BSP setting */
+       if (val & FAM10H_MMIO_CONF_ENABLE) {
+               unsigned busnbits;
+               busnbits = (val >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+                       FAM10H_MMIO_CONF_BUSRANGE_MASK;
+
+               /* only trust the one handle 256 buses, if acpi=off */
+               if (!acpi_pci_disabled || busnbits >= 8) {
+                       u64 base;
+                       base = val & (0xffffULL << 32);
+                       if (fam10h_pci_mmconf_base_status <= 0) {
+                               fam10h_pci_mmconf_base = base;
+                               fam10h_pci_mmconf_base_status = 1;
+                               return;
+                       } else if (fam10h_pci_mmconf_base ==  base)
+                               return;
+               }
+       }
+
+       /*
+        * if it is not enabled, try to enable it and assume only one segment
+        * with 256 buses
+        */
+       get_fam10h_pci_mmconf_base();
+       if (fam10h_pci_mmconf_base_status <= 0)
+               return;
+
+       printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n");
+       val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) |
+            (FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT));
+       val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) |
+              FAM10H_MMIO_CONF_ENABLE;
+       wrmsrl(address, val);
+}
+
+static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
+{
+        pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
+        return 0;
+}
+
+static struct dmi_system_id __devinitdata mmconf_dmi_table[] = {
+        {
+                .callback = set_check_enable_amd_mmconf,
+                .ident = "Sun Microsystems Machine",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "Sun Microsystems"),
+                },
+        },
+       {}
+};
+
+void __init check_enable_amd_mmconf_dmi(void)
+{
+       dmi_check_system(mmconf_dmi_table);
+}
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c
new file mode 100644 (file)
index 0000000..3e66722
--- /dev/null
@@ -0,0 +1,260 @@
+/*
+ * Support for the OLPC DCON and OLPC EC access
+ *
+ * Copyright Â© 2006  Advanced Micro Devices, Inc.
+ * Copyright Â© 2007-2008  Andres Salomon <dilinger@debian.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <asm/geode.h>
+#include <asm/olpc.h>
+
+#ifdef CONFIG_OPEN_FIRMWARE
+#include <asm/ofw.h>
+#endif
+
+struct olpc_platform_t olpc_platform_info;
+EXPORT_SYMBOL_GPL(olpc_platform_info);
+
+static DEFINE_SPINLOCK(ec_lock);
+
+/* what the timeout *should* be (in ms) */
+#define EC_BASE_TIMEOUT 20
+
+/* the timeout that bugs in the EC might force us to actually use */
+static int ec_timeout = EC_BASE_TIMEOUT;
+
+static int __init olpc_ec_timeout_set(char *str)
+{
+       if (get_option(&str, &ec_timeout) != 1) {
+               ec_timeout = EC_BASE_TIMEOUT;
+               printk(KERN_ERR "olpc-ec:  invalid argument to "
+                               "'olpc_ec_timeout=', ignoring!\n");
+       }
+       printk(KERN_DEBUG "olpc-ec:  using %d ms delay for EC commands.\n",
+                       ec_timeout);
+       return 1;
+}
+__setup("olpc_ec_timeout=", olpc_ec_timeout_set);
+
+/*
+ * These {i,o}bf_status functions return whether the buffers are full or not.
+ */
+
+static inline unsigned int ibf_status(unsigned int port)
+{
+       return !!(inb(port) & 0x02);
+}
+
+static inline unsigned int obf_status(unsigned int port)
+{
+       return inb(port) & 0x01;
+}
+
+#define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d))
+static int __wait_on_ibf(unsigned int line, unsigned int port, int desired)
+{
+       unsigned int timeo;
+       int state = ibf_status(port);
+
+       for (timeo = ec_timeout; state != desired && timeo; timeo--) {
+               mdelay(1);
+               state = ibf_status(port);
+       }
+
+       if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
+                       timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
+               printk(KERN_WARNING "olpc-ec:  %d: waited %u ms for IBF!\n",
+                               line, ec_timeout - timeo);
+       }
+
+       return !(state == desired);
+}
+
+#define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d))
+static int __wait_on_obf(unsigned int line, unsigned int port, int desired)
+{
+       unsigned int timeo;
+       int state = obf_status(port);
+
+       for (timeo = ec_timeout; state != desired && timeo; timeo--) {
+               mdelay(1);
+               state = obf_status(port);
+       }
+
+       if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
+                       timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
+               printk(KERN_WARNING "olpc-ec:  %d: waited %u ms for OBF!\n",
+                               line, ec_timeout - timeo);
+       }
+
+       return !(state == desired);
+}
+
+/*
+ * This allows the kernel to run Embedded Controller commands.  The EC is
+ * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the
+ * available EC commands are here:
+ * <http://wiki.laptop.org/go/Ec_specification>.  Unfortunately, while
+ * OpenFirmware's source is available, the EC's is not.
+ */
+int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
+               unsigned char *outbuf,  size_t outlen)
+{
+       unsigned long flags;
+       int ret = -EIO;
+       int i;
+
+       spin_lock_irqsave(&ec_lock, flags);
+
+       /* Clear OBF */
+       for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++)
+               inb(0x68);
+       if (i == 10) {
+               printk(KERN_ERR "olpc-ec:  timeout while attempting to "
+                               "clear OBF flag!\n");
+               goto err;
+       }
+
+       if (wait_on_ibf(0x6c, 0)) {
+               printk(KERN_ERR "olpc-ec:  timeout waiting for EC to "
+                               "quiesce!\n");
+               goto err;
+       }
+
+restart:
+       /*
+        * Note that if we time out during any IBF checks, that's a failure;
+        * we have to return.  There's no way for the kernel to clear that.
+        *
+        * If we time out during an OBF check, we can restart the command;
+        * reissuing it will clear the OBF flag, and we should be alright.
+        * The OBF flag will sometimes misbehave due to what we believe
+        * is a hardware quirk..
+        */
+       printk(KERN_DEBUG "olpc-ec:  running cmd 0x%x\n", cmd);
+       outb(cmd, 0x6c);
+
+       if (wait_on_ibf(0x6c, 0)) {
+               printk(KERN_ERR "olpc-ec:  timeout waiting for EC to read "
+                               "command!\n");
+               goto err;
+       }
+
+       if (inbuf && inlen) {
+               /* write data to EC */
+               for (i = 0; i < inlen; i++) {
+                       if (wait_on_ibf(0x6c, 0)) {
+                               printk(KERN_ERR "olpc-ec:  timeout waiting for"
+                                               " EC accept data!\n");
+                               goto err;
+                       }
+                       printk(KERN_DEBUG "olpc-ec:  sending cmd arg 0x%x\n",
+                                       inbuf[i]);
+                       outb(inbuf[i], 0x68);
+               }
+       }
+       if (outbuf && outlen) {
+               /* read data from EC */
+               for (i = 0; i < outlen; i++) {
+                       if (wait_on_obf(0x6c, 1)) {
+                               printk(KERN_ERR "olpc-ec:  timeout waiting for"
+                                               " EC to provide data!\n");
+                               goto restart;
+                       }
+                       outbuf[i] = inb(0x68);
+                       printk(KERN_DEBUG "olpc-ec:  received 0x%x\n",
+                                       outbuf[i]);
+               }
+       }
+
+       ret = 0;
+err:
+       spin_unlock_irqrestore(&ec_lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(olpc_ec_cmd);
+
+#ifdef CONFIG_OPEN_FIRMWARE
+static void __init platform_detect(void)
+{
+       size_t propsize;
+       u32 rev;
+
+       if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4,
+                       &propsize) || propsize != 4) {
+               printk(KERN_ERR "ofw: getprop call failed!\n");
+               rev = 0;
+       }
+       olpc_platform_info.boardrev = be32_to_cpu(rev);
+}
+#else
+static void __init platform_detect(void)
+{
+       /* stopgap until OFW support is added to the kernel */
+       olpc_platform_info.boardrev = be32_to_cpu(0xc2);
+}
+#endif
+
+static int __init olpc_init(void)
+{
+       unsigned char *romsig;
+
+       /* The ioremap check is dangerous; limit what we run it on */
+       if (!is_geode() || geode_has_vsa2())
+               return 0;
+
+       spin_lock_init(&ec_lock);
+
+       romsig = ioremap(0xffffffc0, 16);
+       if (!romsig)
+               return 0;
+
+       if (strncmp(romsig, "CL1   Q", 7))
+               goto unmap;
+       if (strncmp(romsig+6, romsig+13, 3)) {
+               printk(KERN_INFO "OLPC BIOS signature looks invalid.  "
+                               "Assuming not OLPC\n");
+               goto unmap;
+       }
+
+       printk(KERN_INFO "OLPC board with OpenFirmware %.16s\n", romsig);
+       olpc_platform_info.flags |= OLPC_F_PRESENT;
+
+       /* get the platform revision */
+       platform_detect();
+
+       /* assume B1 and above models always have a DCON */
+       if (olpc_board_at_least(olpc_board(0xb1)))
+               olpc_platform_info.flags |= OLPC_F_DCON;
+
+       /* get the EC revision */
+       olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0,
+                       (unsigned char *) &olpc_platform_info.ecver, 1);
+
+       /* check to see if the VSA exists */
+       if (geode_has_vsa2())
+               olpc_platform_info.flags |= OLPC_F_VSA;
+
+       printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n",
+                       ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "",
+                       olpc_platform_info.boardrev >> 4,
+                       olpc_platform_info.ecver);
+
+unmap:
+       iounmap(romsig);
+       return 0;
+}
+
+postcore_initcall(olpc_init);
index 388b113a7d88ffbc0efaf52c348412af9e439a8c..0c37f16b69502e1fe31f4e94c80775dc1fd22060 100644 (file)
@@ -14,7 +14,7 @@ EXPORT_SYMBOL(forbid_dac);
 const struct dma_mapping_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-int iommu_sac_force __read_mostly = 0;
+static int iommu_sac_force __read_mostly;
 
 #ifdef CONFIG_IOMMU_DEBUG
 int panic_on_overflow __read_mostly = 1;
index 3004d716539dde2b882acd4e0576676531382965..67e9b4a1e89d5a1ac3a3851beaab9d48bbe22c0b 100644 (file)
@@ -4,6 +4,8 @@
 #include <linux/smp.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/pm.h>
 
 struct kmem_cache *task_xstate_cachep;
 
@@ -42,3 +44,118 @@ void arch_task_cache_init(void)
                                  __alignof__(union thread_xstate),
                                  SLAB_PANIC, NULL);
 }
+
+static void do_nothing(void *unused)
+{
+}
+
+/*
+ * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
+ * pm_idle and update to new pm_idle value. Required while changing pm_idle
+ * handler on SMP systems.
+ *
+ * Caller must have changed pm_idle to the new value before the call. Old
+ * pm_idle value will not be used by any CPU after the return of this function.
+ */
+void cpu_idle_wait(void)
+{
+       smp_mb();
+       /* kick all the CPUs so that they exit out of pm_idle */
+       smp_call_function(do_nothing, NULL, 0, 1);
+}
+EXPORT_SYMBOL_GPL(cpu_idle_wait);
+
+/*
+ * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
+ * which can obviate IPI to trigger checking of need_resched.
+ * We execute MONITOR against need_resched and enter optimized wait state
+ * through MWAIT. Whenever someone changes need_resched, we would be woken
+ * up from MWAIT (without an IPI).
+ *
+ * New with Core Duo processors, MWAIT can take some hints based on CPU
+ * capability.
+ */
+void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
+{
+       if (!need_resched()) {
+               __monitor((void *)&current_thread_info()->flags, 0, 0);
+               smp_mb();
+               if (!need_resched())
+                       __mwait(ax, cx);
+       }
+}
+
+/* Default MONITOR/MWAIT with no hints, used for default C1 state */
+static void mwait_idle(void)
+{
+       if (!need_resched()) {
+               __monitor((void *)&current_thread_info()->flags, 0, 0);
+               smp_mb();
+               if (!need_resched())
+                       __sti_mwait(0, 0);
+               else
+                       local_irq_enable();
+       } else
+               local_irq_enable();
+}
+
+
+static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
+{
+       if (force_mwait)
+               return 1;
+       /* Any C1 states supported? */
+       return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
+}
+
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static void poll_idle(void)
+{
+       local_irq_enable();
+       cpu_relax();
+}
+
+void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
+{
+       static int selected;
+
+       if (selected)
+               return;
+#ifdef CONFIG_X86_SMP
+       if (pm_idle == poll_idle && smp_num_siblings > 1) {
+               printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
+                       " performance may degrade.\n");
+       }
+#endif
+       if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
+               /*
+                * Skip, if setup has overridden idle.
+                * One CPU supports mwait => All CPUs supports mwait
+                */
+               if (!pm_idle) {
+                       printk(KERN_INFO "using mwait in idle threads.\n");
+                       pm_idle = mwait_idle;
+               }
+       }
+       selected = 1;
+}
+
+static int __init idle_setup(char *str)
+{
+       if (!strcmp(str, "poll")) {
+               printk("using polling idle threads.\n");
+               pm_idle = poll_idle;
+       } else if (!strcmp(str, "mwait"))
+               force_mwait = 1;
+       else
+               return -1;
+
+       boot_option_idle_override = 1;
+       return 0;
+}
+early_param("idle", idle_setup);
+
index 77de848bd1fbbca9d498a07f4c0fbc65cf05c43b..f8476dfbb60d9818f5da08d3a5b95a223930964f 100644 (file)
@@ -111,12 +111,10 @@ void default_idle(void)
                 */
                smp_mb();
 
-               local_irq_disable();
-               if (!need_resched()) {
+               if (!need_resched())
                        safe_halt();    /* enables interrupts racelessly */
-                       local_irq_disable();
-               }
-               local_irq_enable();
+               else
+                       local_irq_enable();
                current_thread_info()->status |= TS_POLLING;
        } else {
                local_irq_enable();
@@ -128,17 +126,6 @@ void default_idle(void)
 EXPORT_SYMBOL(default_idle);
 #endif
 
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->work.need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
-       local_irq_enable();
-       cpu_relax();
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 #include <asm/nmi.h>
 /* We don't actually take CPU down, just spin without interrupts. */
@@ -196,6 +183,7 @@ void cpu_idle(void)
                        if (cpu_is_offline(cpu))
                                play_dead();
 
+                       local_irq_disable();
                        __get_cpu_var(irq_stat).idle_timestamp = jiffies;
                        idle();
                }
@@ -206,104 +194,6 @@ void cpu_idle(void)
        }
 }
 
-static void do_nothing(void *unused)
-{
-}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
- * handler on SMP systems.
- *
- * Caller must have changed pm_idle to the new value before the call. Old
- * pm_idle value will not be used by any CPU after the return of this function.
- */
-void cpu_idle_wait(void)
-{
-       smp_mb();
-       /* kick all the CPUs so that they exit out of pm_idle */
-       smp_call_function(do_nothing, NULL, 0, 1);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
-       if (!need_resched()) {
-               __monitor((void *)&current_thread_info()->flags, 0, 0);
-               smp_mb();
-               if (!need_resched())
-                       __sti_mwait(ax, cx);
-               else
-                       local_irq_enable();
-       } else
-               local_irq_enable();
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
-       local_irq_enable();
-       mwait_idle_with_hints(0, 0);
-}
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
-       if (force_mwait)
-               return 1;
-       /* Any C1 states supported? */
-       return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
-       static int selected;
-
-       if (selected)
-               return;
-#ifdef CONFIG_X86_SMP
-       if (pm_idle == poll_idle && smp_num_siblings > 1) {
-               printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
-                       " performance may degrade.\n");
-       }
-#endif
-       if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
-               /*
-                * Skip, if setup has overridden idle.
-                * One CPU supports mwait => All CPUs supports mwait
-                */
-               if (!pm_idle) {
-                       printk(KERN_INFO "using mwait in idle threads.\n");
-                       pm_idle = mwait_idle;
-               }
-       }
-       selected = 1;
-}
-
-static int __init idle_setup(char *str)
-{
-       if (!strcmp(str, "poll")) {
-               printk("using polling idle threads.\n");
-               pm_idle = poll_idle;
-       } else if (!strcmp(str, "mwait"))
-               force_mwait = 1;
-       else
-               return -1;
-
-       boot_option_idle_override = 1;
-       return 0;
-}
-early_param("idle", idle_setup);
-
 void __show_registers(struct pt_regs *regs, int all)
 {
        unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
index 131c2ee7ac5618e9fc92f576f4db19676bd235af..e2319f39988b042f364a73174a0d2ac254a5b60d 100644 (file)
@@ -106,26 +106,13 @@ void default_idle(void)
         * test NEED_RESCHED:
         */
        smp_mb();
-       local_irq_disable();
-       if (!need_resched()) {
+       if (!need_resched())
                safe_halt();    /* enables interrupts racelessly */
-               local_irq_disable();
-       }
-       local_irq_enable();
+       else
+               local_irq_enable();
        current_thread_info()->status |= TS_POLLING;
 }
 
-/*
- * On SMP it's slightly faster (but much more power-consuming!)
- * to poll the ->need_resched flag instead of waiting for the
- * cross-CPU IPI to arrive. Use this option with caution.
- */
-static void poll_idle(void)
-{
-       local_irq_enable();
-       cpu_relax();
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 DECLARE_PER_CPU(int, cpu_state);
 
@@ -192,110 +179,6 @@ void cpu_idle(void)
        }
 }
 
-static void do_nothing(void *unused)
-{
-}
-
-/*
- * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
- * pm_idle and update to new pm_idle value. Required while changing pm_idle
- * handler on SMP systems.
- *
- * Caller must have changed pm_idle to the new value before the call. Old
- * pm_idle value will not be used by any CPU after the return of this function.
- */
-void cpu_idle_wait(void)
-{
-       smp_mb();
-       /* kick all the CPUs so that they exit out of pm_idle */
-       smp_call_function(do_nothing, NULL, 0, 1);
-}
-EXPORT_SYMBOL_GPL(cpu_idle_wait);
-
-/*
- * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
- * which can obviate IPI to trigger checking of need_resched.
- * We execute MONITOR against need_resched and enter optimized wait state
- * through MWAIT. Whenever someone changes need_resched, we would be woken
- * up from MWAIT (without an IPI).
- *
- * New with Core Duo processors, MWAIT can take some hints based on CPU
- * capability.
- */
-void mwait_idle_with_hints(unsigned long ax, unsigned long cx)
-{
-       if (!need_resched()) {
-               __monitor((void *)&current_thread_info()->flags, 0, 0);
-               smp_mb();
-               if (!need_resched())
-                       __mwait(ax, cx);
-       }
-}
-
-/* Default MONITOR/MWAIT with no hints, used for default C1 state */
-static void mwait_idle(void)
-{
-       if (!need_resched()) {
-               __monitor((void *)&current_thread_info()->flags, 0, 0);
-               smp_mb();
-               if (!need_resched())
-                       __sti_mwait(0, 0);
-               else
-                       local_irq_enable();
-       } else {
-               local_irq_enable();
-       }
-}
-
-
-static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
-{
-       if (force_mwait)
-               return 1;
-       /* Any C1 states supported? */
-       return c->cpuid_level >= 5 && ((cpuid_edx(5) >> 4) & 0xf) > 0;
-}
-
-void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
-{
-       static int selected;
-
-       if (selected)
-               return;
-#ifdef CONFIG_X86_SMP
-       if (pm_idle == poll_idle && smp_num_siblings > 1) {
-               printk(KERN_WARNING "WARNING: polling idle and HT enabled,"
-                       " performance may degrade.\n");
-       }
-#endif
-       if (cpu_has(c, X86_FEATURE_MWAIT) && mwait_usable(c)) {
-               /*
-                * Skip, if setup has overridden idle.
-                * One CPU supports mwait => All CPUs supports mwait
-                */
-               if (!pm_idle) {
-                       printk(KERN_INFO "using mwait in idle threads.\n");
-                       pm_idle = mwait_idle;
-               }
-       }
-       selected = 1;
-}
-
-static int __init idle_setup(char *str)
-{
-       if (!strcmp(str, "poll")) {
-               printk("using polling idle threads.\n");
-               pm_idle = poll_idle;
-       } else if (!strcmp(str, "mwait"))
-               force_mwait = 1;
-       else
-               return -1;
-
-       boot_option_idle_override = 1;
-       return 0;
-}
-early_param("idle", idle_setup);
-
 /* Prints also some state that isn't saved in the pt_regs */
 void __show_regs(struct pt_regs * regs)
 {
index 1791a751a772d0d6d1a6256591707ad5fca8c237..07c6d42ab5ff326203f417c1b8ccbc9ae1fc5dac 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/desc.h>
 #include <asm/hpet.h>
 #include <asm/pgtable.h>
+#include <asm/proto.h>
 #include <asm/reboot_fixups.h>
 #include <asm/reboot.h>
 
@@ -399,7 +400,7 @@ static void native_machine_emergency_restart(void)
        }
 }
 
-static void native_machine_shutdown(void)
+void native_machine_shutdown(void)
 {
        /* Stop the cpus and apics */
 #ifdef CONFIG_SMP
@@ -470,7 +471,10 @@ struct machine_ops machine_ops = {
        .shutdown = native_machine_shutdown,
        .emergency_restart = native_machine_emergency_restart,
        .restart = native_machine_restart,
-       .halt = native_machine_halt
+       .halt = native_machine_halt,
+#ifdef CONFIG_KEXEC
+       .crash_shutdown = native_machine_crash_shutdown,
+#endif
 };
 
 void machine_power_off(void)
@@ -498,3 +502,9 @@ void machine_halt(void)
        machine_ops.halt();
 }
 
+#ifdef CONFIG_KEXEC
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+       machine_ops.crash_shutdown(regs);
+}
+#endif
index 44cc9b933932f2502cd2900fc4e3893513b1ea62..2283422af7946fef30a86189dddb0a7a12fe3fc1 100644 (file)
@@ -47,6 +47,7 @@
 #include <linux/pfn.h>
 #include <linux/pci.h>
 #include <linux/init_ohci1394_dma.h>
+#include <linux/kvm_para.h>
 
 #include <video/edid.h>
 
@@ -820,6 +821,10 @@ void __init setup_arch(char **cmdline_p)
 
        max_low_pfn = setup_memory();
 
+#ifdef CONFIG_KVM_CLOCK
+       kvmclock_init();
+#endif
+
 #ifdef CONFIG_VMI
        /*
         * Must be after max_low_pfn is determined, and before kernel
@@ -827,6 +832,7 @@ void __init setup_arch(char **cmdline_p)
         */
        vmi_init();
 #endif
+       kvm_guest_init();
 
        /*
         * NOTE: before this point _nobody_ is allowed to allocate
index 17bdf2343095df5e8ddfeb4d20a1a06bf7d3d8b9..22c14e21c97c435a8d172d8f23159738bf0dbe2f 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/crash_dump.h>
 #include <linux/root_dev.h>
 #include <linux/pci.h>
+#include <asm/pci-direct.h>
 #include <linux/efi.h>
 #include <linux/acpi.h>
 #include <linux/kallsyms.h>
 #include <linux/dmi.h>
 #include <linux/dma-mapping.h>
 #include <linux/ctype.h>
+#include <linux/sort.h>
 #include <linux/uaccess.h>
 #include <linux/init_ohci1394_dma.h>
+#include <linux/kvm_para.h>
 
 #include <asm/mtrr.h>
 #include <asm/uaccess.h>
@@ -190,6 +193,7 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
        bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
        e820_register_active_regions(0, start_pfn, end_pfn);
        free_bootmem_with_active_regions(0, end_pfn);
+       early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
        reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
 }
 #endif
@@ -264,6 +268,40 @@ void __attribute__((weak)) __init memory_setup(void)
        machine_specific_memory_setup();
 }
 
+static void __init parse_setup_data(void)
+{
+       struct setup_data *data;
+       unsigned long pa_data;
+
+       if (boot_params.hdr.version < 0x0209)
+               return;
+       pa_data = boot_params.hdr.setup_data;
+       while (pa_data) {
+               data = early_ioremap(pa_data, PAGE_SIZE);
+               switch (data->type) {
+               default:
+                       break;
+               }
+#ifndef CONFIG_DEBUG_BOOT_PARAMS
+               free_early(pa_data, pa_data+sizeof(*data)+data->len);
+#endif
+               pa_data = data->next;
+               early_iounmap(data, PAGE_SIZE);
+       }
+}
+
+#ifdef CONFIG_PCI_MMCONFIG
+extern void __cpuinit fam10h_check_enable_mmcfg(void);
+extern void __init check_enable_amd_mmconf_dmi(void);
+#else
+void __cpuinit fam10h_check_enable_mmcfg(void)
+{
+}
+void __init check_enable_amd_mmconf_dmi(void)
+{
+}
+#endif
+
 /*
  * setup_arch - architecture-specific boot-time initializations
  *
@@ -316,6 +354,8 @@ void __init setup_arch(char **cmdline_p)
        strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
        *cmdline_p = command_line;
 
+       parse_setup_data();
+
        parse_early_param();
 
 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
@@ -359,6 +399,10 @@ void __init setup_arch(char **cmdline_p)
 
        io_delay_init();
 
+#ifdef CONFIG_KVM_CLOCK
+       kvmclock_init();
+#endif
+
 #ifdef CONFIG_SMP
        /* setup to use the early static init tables during kernel startup */
        x86_cpu_to_apicid_early_ptr = (void *)x86_cpu_to_apicid_init;
@@ -397,8 +441,6 @@ void __init setup_arch(char **cmdline_p)
        contig_initmem_init(0, end_pfn);
 #endif
 
-       early_res_to_bootmem();
-
        dma32_reserve_bootmem();
 
 #ifdef CONFIG_ACPI_SLEEP
@@ -465,6 +507,8 @@ void __init setup_arch(char **cmdline_p)
        init_apic_mappings();
        ioapic_init_mappings();
 
+       kvm_guest_init();
+
        /*
         * We trust e820 completely. No explicit ROM probing in memory.
         */
@@ -485,6 +529,9 @@ void __init setup_arch(char **cmdline_p)
        conswitchp = &dummy_con;
 #endif
 #endif
+
+       /* do this before identify_cpu for boot cpu */
+       check_enable_amd_mmconf_dmi();
 }
 
 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
@@ -737,6 +784,9 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
        /* MFENCE stops RDTSC speculation */
        set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC);
 
+       if (c->x86 == 0x10)
+               fam10h_check_enable_mmcfg();
+
        if (amd_apic_timer_broken())
                disable_apic_timer = 1;
 
index 8e05e7f7bd40cc779887c71e91769a22e572f09e..d92373630963f980fb5471a0d96d903b5d4b18bf 100644 (file)
@@ -57,7 +57,7 @@ sys_sigsuspend(int history0, int history1, old_sigset_t mask)
 
        current->state = TASK_INTERRUPTIBLE;
        schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
+       set_restore_sigmask();
 
        return -ERESTARTNOHAND;
 }
@@ -593,7 +593,7 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
                oldset = &current->saved_sigmask;
        else
                oldset = &current->blocked;
@@ -612,13 +612,12 @@ static void do_signal(struct pt_regs *regs)
                /* Whee! Actually deliver the signal.  */
                if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
                        /*
-                        * a signal was successfully delivered; the saved
+                        * A signal was successfully delivered; the saved
                         * sigmask will have been stored in the signal frame,
                         * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag
+                        * clear the TS_RESTORE_SIGMASK flag.
                         */
-                       if (test_thread_flag(TIF_RESTORE_SIGMASK))
-                               clear_thread_flag(TIF_RESTORE_SIGMASK);
+                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
                }
                return;
        }
@@ -645,8 +644,8 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
+       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
+               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
                sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
        }
 }
@@ -665,7 +664,7 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
        }
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs);
 
        if (thread_info_flags & _TIF_HRTICK_RESCHED)
index ccb2a4560c2d91496bdbee9d35ed04d07c9f65bf..e53b267662e712681d99b4b5ae6014af9bf790c1 100644 (file)
@@ -427,7 +427,7 @@ static void do_signal(struct pt_regs *regs)
        if (!user_mode(regs))
                return;
 
-       if (test_thread_flag(TIF_RESTORE_SIGMASK))
+       if (current_thread_info()->status & TS_RESTORE_SIGMASK)
                oldset = &current->saved_sigmask;
        else
                oldset = &current->blocked;
@@ -444,11 +444,13 @@ static void do_signal(struct pt_regs *regs)
 
                /* Whee!  Actually deliver the signal.  */
                if (handle_signal(signr, &info, &ka, oldset, regs) == 0) {
-                       /* a signal was successfully delivered; the saved
+                       /*
+                        * A signal was successfully delivered; the saved
                         * sigmask will have been stored in the signal frame,
                         * and will be restored by sigreturn, so we can simply
-                        * clear the TIF_RESTORE_SIGMASK flag */
-                       clear_thread_flag(TIF_RESTORE_SIGMASK);
+                        * clear the TS_RESTORE_SIGMASK flag.
+                        */
+                       current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
                }
                return;
        }
@@ -476,8 +478,8 @@ static void do_signal(struct pt_regs *regs)
         * If there's no signal to deliver, we just put the saved sigmask
         * back.
         */
-       if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
-               clear_thread_flag(TIF_RESTORE_SIGMASK);
+       if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
+               current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
                sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
        }
 }
@@ -498,7 +500,7 @@ void do_notify_resume(struct pt_regs *regs, void *unused,
 #endif /* CONFIG_X86_MCE */
 
        /* deal with pending signal delivery */
-       if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
+       if (thread_info_flags & _TIF_SIGPENDING)
                do_signal(regs);
 
        if (thread_info_flags & _TIF_HRTICK_RESCHED)
index 04c662ba18f1dbf217326a9ffe2ade458617ee61..84241a256dc819bbb1f2fcaff679f189782f8564 100644 (file)
@@ -1149,14 +1149,10 @@ static int __init smp_sanity_check(unsigned max_cpus)
                                 "forcing use of dummy APIC emulation.\n");
                smpboot_clear_io_apic();
 #ifdef CONFIG_X86_32
-               if (nmi_watchdog == NMI_LOCAL_APIC) {
-                       printk(KERN_INFO "activating minimal APIC for"
-                                        "NMI watchdog use.\n");
-                       connect_bsp_APIC();
-                       setup_local_APIC();
-                       end_local_APIC_setup();
-               }
+               connect_bsp_APIC();
 #endif
+               setup_local_APIC();
+               end_local_APIC_setup();
                return -1;
        }
 
index 1a89e93f3f1ccfad3696921f25492e60f2bfdfbc..2ff21f398934b049975a427e463b046e9226d079 100644 (file)
@@ -115,7 +115,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-extern void (*late_time_init)(void);
 /* Duplicate of time_init() below, with hpet_enable part added */
 void __init hpet_time_init(void)
 {
index b7ab3c335fae73242ace2deec574165f05ce774a..fad3674b06a58f990cabab44bd61910f975a6682 100644 (file)
@@ -209,12 +209,6 @@ SECTIONS
        EXIT_DATA
   }
 
-/* vdso blob that is mapped into user space */
-  vdso_start = . ;
-  .vdso  : AT(ADDR(.vdso) - LOAD_OFFSET) { *(.vdso) }
-  . = ALIGN(PAGE_SIZE);
-  vdso_end = .;
-
 #ifdef CONFIG_BLK_DEV_INITRD
   . = ALIGN(PAGE_SIZE);
   __initramfs_start = .;
index caf2a26f5cfd1b712c346a835b3755b842a466b7..ba8c0b75ab0ae8c4fbbaba61c4afd6637cf4bd4a 100644 (file)
@@ -133,7 +133,7 @@ int is_vsmp_box(void)
        }
 }
 #else
-static int __init detect_vsmp_box(void)
+static void __init detect_vsmp_box(void)
 {
 }
 int is_vsmp_box(void)
index 41962e793c0fe9f122d22517956535875ea1170f..8d45fabc5f3baf556b1da2558a42df183ab09e80 100644 (file)
@@ -19,7 +19,7 @@ if VIRTUALIZATION
 
 config KVM
        tristate "Kernel-based Virtual Machine (KVM) support"
-       depends on HAVE_KVM && EXPERIMENTAL
+       depends on HAVE_KVM
        select PREEMPT_NOTIFIERS
        select ANON_INODES
        ---help---
@@ -50,6 +50,17 @@ config KVM_AMD
          Provides support for KVM on AMD processors equipped with the AMD-V
          (SVM) extensions.
 
+config KVM_TRACE
+       bool "KVM trace support"
+       depends on KVM && MARKERS && SYSFS
+       select RELAY
+       select DEBUG_FS
+       default n
+       ---help---
+         This option allows reading a trace of kvm-related events through
+         relayfs.  Note the ABI is not considered stable and will be
+         modified in future updates.
+
 # OK, it's a little counter-intuitive to do this, but it puts it neatly under
 # the virtualization menu.
 source drivers/lguest/Kconfig
index ffdd0b310784059527a4837ca040400c170db0d2..c97d35c218dbe0ad4827776ff30c20e13d107852 100644 (file)
@@ -3,10 +3,14 @@
 #
 
 common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o)
+ifeq ($(CONFIG_KVM_TRACE),y)
+common-objs += $(addprefix ../../../virt/kvm/, kvm_trace.o)
+endif
 
 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm
 
-kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o
+kvm-objs := $(common-objs) x86.o mmu.o x86_emulate.o i8259.o irq.o lapic.o \
+       i8254.o
 obj-$(CONFIG_KVM) += kvm.o
 kvm-intel-objs = vmx.o
 obj-$(CONFIG_KVM_INTEL) += kvm-intel.o
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
new file mode 100644 (file)
index 0000000..4c943ea
--- /dev/null
@@ -0,0 +1,611 @@
+/*
+ * 8253/8254 interval timer emulation
+ *
+ * Copyright (c) 2003-2004 Fabrice Bellard
+ * Copyright (c) 2006 Intel Corporation
+ * Copyright (c) 2007 Keir Fraser, XenSource Inc
+ * Copyright (c) 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ * Authors:
+ *   Sheng Yang <sheng.yang@intel.com>
+ *   Based on QEMU and Xen.
+ */
+
+#include <linux/kvm_host.h>
+
+#include "irq.h"
+#include "i8254.h"
+
+#ifndef CONFIG_X86_64
+#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
+#else
+#define mod_64(x, y) ((x) % (y))
+#endif
+
+#define RW_STATE_LSB 1
+#define RW_STATE_MSB 2
+#define RW_STATE_WORD0 3
+#define RW_STATE_WORD1 4
+
+/* Compute with 96 bit intermediate result: (a*b)/c */
+static u64 muldiv64(u64 a, u32 b, u32 c)
+{
+       union {
+               u64 ll;
+               struct {
+                       u32 low, high;
+               } l;
+       } u, res;
+       u64 rl, rh;
+
+       u.ll = a;
+       rl = (u64)u.l.low * (u64)b;
+       rh = (u64)u.l.high * (u64)b;
+       rh += (rl >> 32);
+       res.l.high = div64_u64(rh, c);
+       res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
+       return res.ll;
+}
+
+static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
+{
+       struct kvm_kpit_channel_state *c =
+               &kvm->arch.vpit->pit_state.channels[channel];
+
+       WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+       switch (c->mode) {
+       default:
+       case 0:
+       case 4:
+               /* XXX: just disable/enable counting */
+               break;
+       case 1:
+       case 2:
+       case 3:
+       case 5:
+               /* Restart counting on rising edge. */
+               if (c->gate < val)
+                       c->count_load_time = ktime_get();
+               break;
+       }
+
+       c->gate = val;
+}
+
+int pit_get_gate(struct kvm *kvm, int channel)
+{
+       WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+       return kvm->arch.vpit->pit_state.channels[channel].gate;
+}
+
+static int pit_get_count(struct kvm *kvm, int channel)
+{
+       struct kvm_kpit_channel_state *c =
+               &kvm->arch.vpit->pit_state.channels[channel];
+       s64 d, t;
+       int counter;
+
+       WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+       t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+       d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+
+       switch (c->mode) {
+       case 0:
+       case 1:
+       case 4:
+       case 5:
+               counter = (c->count - d) & 0xffff;
+               break;
+       case 3:
+               /* XXX: may be incorrect for odd counts */
+               counter = c->count - (mod_64((2 * d), c->count));
+               break;
+       default:
+               counter = c->count - mod_64(d, c->count);
+               break;
+       }
+       return counter;
+}
+
+static int pit_get_out(struct kvm *kvm, int channel)
+{
+       struct kvm_kpit_channel_state *c =
+               &kvm->arch.vpit->pit_state.channels[channel];
+       s64 d, t;
+       int out;
+
+       WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+       t = ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
+       d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+
+       switch (c->mode) {
+       default:
+       case 0:
+               out = (d >= c->count);
+               break;
+       case 1:
+               out = (d < c->count);
+               break;
+       case 2:
+               out = ((mod_64(d, c->count) == 0) && (d != 0));
+               break;
+       case 3:
+               out = (mod_64(d, c->count) < ((c->count + 1) >> 1));
+               break;
+       case 4:
+       case 5:
+               out = (d == c->count);
+               break;
+       }
+
+       return out;
+}
+
+static void pit_latch_count(struct kvm *kvm, int channel)
+{
+       struct kvm_kpit_channel_state *c =
+               &kvm->arch.vpit->pit_state.channels[channel];
+
+       WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+       if (!c->count_latched) {
+               c->latched_count = pit_get_count(kvm, channel);
+               c->count_latched = c->rw_mode;
+       }
+}
+
+static void pit_latch_status(struct kvm *kvm, int channel)
+{
+       struct kvm_kpit_channel_state *c =
+               &kvm->arch.vpit->pit_state.channels[channel];
+
+       WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+
+       if (!c->status_latched) {
+               /* TODO: Return NULL COUNT (bit 6). */
+               c->status = ((pit_get_out(kvm, channel) << 7) |
+                               (c->rw_mode << 4) |
+                               (c->mode << 1) |
+                               c->bcd);
+               c->status_latched = 1;
+       }
+}
+
+int __pit_timer_fn(struct kvm_kpit_state *ps)
+{
+       struct kvm_vcpu *vcpu0 = ps->pit->kvm->vcpus[0];
+       struct kvm_kpit_timer *pt = &ps->pit_timer;
+
+       atomic_inc(&pt->pending);
+       smp_mb__after_atomic_inc();
+       /* FIXME: handle case where the guest is in guest mode */
+       if (vcpu0 && waitqueue_active(&vcpu0->wq)) {
+               vcpu0->arch.mp_state = KVM_MP_STATE_RUNNABLE;
+               wake_up_interruptible(&vcpu0->wq);
+       }
+
+       pt->timer.expires = ktime_add_ns(pt->timer.expires, pt->period);
+       pt->scheduled = ktime_to_ns(pt->timer.expires);
+
+       return (pt->period == 0 ? 0 : 1);
+}
+
+int pit_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+
+       if (pit && vcpu->vcpu_id == 0)
+               return atomic_read(&pit->pit_state.pit_timer.pending);
+
+       return 0;
+}
+
+static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
+{
+       struct kvm_kpit_state *ps;
+       int restart_timer = 0;
+
+       ps = container_of(data, struct kvm_kpit_state, pit_timer.timer);
+
+       restart_timer = __pit_timer_fn(ps);
+
+       if (restart_timer)
+               return HRTIMER_RESTART;
+       else
+               return HRTIMER_NORESTART;
+}
+
+static void destroy_pit_timer(struct kvm_kpit_timer *pt)
+{
+       pr_debug("pit: execute del timer!\n");
+       hrtimer_cancel(&pt->timer);
+}
+
+static void create_pit_timer(struct kvm_kpit_timer *pt, u32 val, int is_period)
+{
+       s64 interval;
+
+       interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+
+       pr_debug("pit: create pit timer, interval is %llu nsec\n", interval);
+
+       /* TODO The new value only affected after the retriggered */
+       hrtimer_cancel(&pt->timer);
+       pt->period = (is_period == 0) ? 0 : interval;
+       pt->timer.function = pit_timer_fn;
+       atomic_set(&pt->pending, 0);
+
+       hrtimer_start(&pt->timer, ktime_add_ns(ktime_get(), interval),
+                     HRTIMER_MODE_ABS);
+}
+
+static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+{
+       struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+
+       WARN_ON(!mutex_is_locked(&ps->lock));
+
+       pr_debug("pit: load_count val is %d, channel is %d\n", val, channel);
+
+       /*
+        * Though spec said the state of 8254 is undefined after power-up,
+        * seems some tricky OS like Windows XP depends on IRQ0 interrupt
+        * when booting up.
+        * So here setting initialize rate for it, and not a specific number
+        */
+       if (val == 0)
+               val = 0x10000;
+
+       ps->channels[channel].count_load_time = ktime_get();
+       ps->channels[channel].count = val;
+
+       if (channel != 0)
+               return;
+
+       /* Two types of timer
+        * mode 1 is one shot, mode 2 is period, otherwise del timer */
+       switch (ps->channels[0].mode) {
+       case 1:
+               create_pit_timer(&ps->pit_timer, val, 0);
+               break;
+       case 2:
+               create_pit_timer(&ps->pit_timer, val, 1);
+               break;
+       default:
+               destroy_pit_timer(&ps->pit_timer);
+       }
+}
+
+void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val)
+{
+       mutex_lock(&kvm->arch.vpit->pit_state.lock);
+       pit_load_count(kvm, channel, val);
+       mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+}
+
+static void pit_ioport_write(struct kvm_io_device *this,
+                            gpa_t addr, int len, const void *data)
+{
+       struct kvm_pit *pit = (struct kvm_pit *)this->private;
+       struct kvm_kpit_state *pit_state = &pit->pit_state;
+       struct kvm *kvm = pit->kvm;
+       int channel, access;
+       struct kvm_kpit_channel_state *s;
+       u32 val = *(u32 *) data;
+
+       val  &= 0xff;
+       addr &= KVM_PIT_CHANNEL_MASK;
+
+       mutex_lock(&pit_state->lock);
+
+       if (val != 0)
+               pr_debug("pit: write addr is 0x%x, len is %d, val is 0x%x\n",
+                         (unsigned int)addr, len, val);
+
+       if (addr == 3) {
+               channel = val >> 6;
+               if (channel == 3) {
+                       /* Read-Back Command. */
+                       for (channel = 0; channel < 3; channel++) {
+                               s = &pit_state->channels[channel];
+                               if (val & (2 << channel)) {
+                                       if (!(val & 0x20))
+                                               pit_latch_count(kvm, channel);
+                                       if (!(val & 0x10))
+                                               pit_latch_status(kvm, channel);
+                               }
+                       }
+               } else {
+                       /* Select Counter <channel>. */
+                       s = &pit_state->channels[channel];
+                       access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
+                       if (access == 0) {
+                               pit_latch_count(kvm, channel);
+                       } else {
+                               s->rw_mode = access;
+                               s->read_state = access;
+                               s->write_state = access;
+                               s->mode = (val >> 1) & 7;
+                               if (s->mode > 5)
+                                       s->mode -= 4;
+                               s->bcd = val & 1;
+                       }
+               }
+       } else {
+               /* Write Count. */
+               s = &pit_state->channels[addr];
+               switch (s->write_state) {
+               default:
+               case RW_STATE_LSB:
+                       pit_load_count(kvm, addr, val);
+                       break;
+               case RW_STATE_MSB:
+                       pit_load_count(kvm, addr, val << 8);
+                       break;
+               case RW_STATE_WORD0:
+                       s->write_latch = val;
+                       s->write_state = RW_STATE_WORD1;
+                       break;
+               case RW_STATE_WORD1:
+                       pit_load_count(kvm, addr, s->write_latch | (val << 8));
+                       s->write_state = RW_STATE_WORD0;
+                       break;
+               }
+       }
+
+       mutex_unlock(&pit_state->lock);
+}
+
+static void pit_ioport_read(struct kvm_io_device *this,
+                           gpa_t addr, int len, void *data)
+{
+       struct kvm_pit *pit = (struct kvm_pit *)this->private;
+       struct kvm_kpit_state *pit_state = &pit->pit_state;
+       struct kvm *kvm = pit->kvm;
+       int ret, count;
+       struct kvm_kpit_channel_state *s;
+
+       addr &= KVM_PIT_CHANNEL_MASK;
+       s = &pit_state->channels[addr];
+
+       mutex_lock(&pit_state->lock);
+
+       if (s->status_latched) {
+               s->status_latched = 0;
+               ret = s->status;
+       } else if (s->count_latched) {
+               switch (s->count_latched) {
+               default:
+               case RW_STATE_LSB:
+                       ret = s->latched_count & 0xff;
+                       s->count_latched = 0;
+                       break;
+               case RW_STATE_MSB:
+                       ret = s->latched_count >> 8;
+                       s->count_latched = 0;
+                       break;
+               case RW_STATE_WORD0:
+                       ret = s->latched_count & 0xff;
+                       s->count_latched = RW_STATE_MSB;
+                       break;
+               }
+       } else {
+               switch (s->read_state) {
+               default:
+               case RW_STATE_LSB:
+                       count = pit_get_count(kvm, addr);
+                       ret = count & 0xff;
+                       break;
+               case RW_STATE_MSB:
+                       count = pit_get_count(kvm, addr);
+                       ret = (count >> 8) & 0xff;
+                       break;
+               case RW_STATE_WORD0:
+                       count = pit_get_count(kvm, addr);
+                       ret = count & 0xff;
+                       s->read_state = RW_STATE_WORD1;
+                       break;
+               case RW_STATE_WORD1:
+                       count = pit_get_count(kvm, addr);
+                       ret = (count >> 8) & 0xff;
+                       s->read_state = RW_STATE_WORD0;
+                       break;
+               }
+       }
+
+       if (len > sizeof(ret))
+               len = sizeof(ret);
+       memcpy(data, (char *)&ret, len);
+
+       mutex_unlock(&pit_state->lock);
+}
+
+static int pit_in_range(struct kvm_io_device *this, gpa_t addr)
+{
+       return ((addr >= KVM_PIT_BASE_ADDRESS) &&
+               (addr < KVM_PIT_BASE_ADDRESS + KVM_PIT_MEM_LENGTH));
+}
+
+static void speaker_ioport_write(struct kvm_io_device *this,
+                                gpa_t addr, int len, const void *data)
+{
+       struct kvm_pit *pit = (struct kvm_pit *)this->private;
+       struct kvm_kpit_state *pit_state = &pit->pit_state;
+       struct kvm *kvm = pit->kvm;
+       u32 val = *(u32 *) data;
+
+       mutex_lock(&pit_state->lock);
+       pit_state->speaker_data_on = (val >> 1) & 1;
+       pit_set_gate(kvm, 2, val & 1);
+       mutex_unlock(&pit_state->lock);
+}
+
+static void speaker_ioport_read(struct kvm_io_device *this,
+                               gpa_t addr, int len, void *data)
+{
+       struct kvm_pit *pit = (struct kvm_pit *)this->private;
+       struct kvm_kpit_state *pit_state = &pit->pit_state;
+       struct kvm *kvm = pit->kvm;
+       unsigned int refresh_clock;
+       int ret;
+
+       /* Refresh clock toggles at about 15us. We approximate as 2^14ns. */
+       refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
+
+       mutex_lock(&pit_state->lock);
+       ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
+               (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
+       if (len > sizeof(ret))
+               len = sizeof(ret);
+       memcpy(data, (char *)&ret, len);
+       mutex_unlock(&pit_state->lock);
+}
+
+static int speaker_in_range(struct kvm_io_device *this, gpa_t addr)
+{
+       return (addr == KVM_SPEAKER_BASE_ADDRESS);
+}
+
+void kvm_pit_reset(struct kvm_pit *pit)
+{
+       int i;
+       struct kvm_kpit_channel_state *c;
+
+       mutex_lock(&pit->pit_state.lock);
+       for (i = 0; i < 3; i++) {
+               c = &pit->pit_state.channels[i];
+               c->mode = 0xff;
+               c->gate = (i != 2);
+               pit_load_count(pit->kvm, i, 0);
+       }
+       mutex_unlock(&pit->pit_state.lock);
+
+       atomic_set(&pit->pit_state.pit_timer.pending, 0);
+       pit->pit_state.inject_pending = 1;
+}
+
+struct kvm_pit *kvm_create_pit(struct kvm *kvm)
+{
+       struct kvm_pit *pit;
+       struct kvm_kpit_state *pit_state;
+
+       pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
+       if (!pit)
+               return NULL;
+
+       mutex_init(&pit->pit_state.lock);
+       mutex_lock(&pit->pit_state.lock);
+
+       /* Initialize PIO device */
+       pit->dev.read = pit_ioport_read;
+       pit->dev.write = pit_ioport_write;
+       pit->dev.in_range = pit_in_range;
+       pit->dev.private = pit;
+       kvm_io_bus_register_dev(&kvm->pio_bus, &pit->dev);
+
+       pit->speaker_dev.read = speaker_ioport_read;
+       pit->speaker_dev.write = speaker_ioport_write;
+       pit->speaker_dev.in_range = speaker_in_range;
+       pit->speaker_dev.private = pit;
+       kvm_io_bus_register_dev(&kvm->pio_bus, &pit->speaker_dev);
+
+       kvm->arch.vpit = pit;
+       pit->kvm = kvm;
+
+       pit_state = &pit->pit_state;
+       pit_state->pit = pit;
+       hrtimer_init(&pit_state->pit_timer.timer,
+                    CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       mutex_unlock(&pit->pit_state.lock);
+
+       kvm_pit_reset(pit);
+
+       return pit;
+}
+
+void kvm_free_pit(struct kvm *kvm)
+{
+       struct hrtimer *timer;
+
+       if (kvm->arch.vpit) {
+               mutex_lock(&kvm->arch.vpit->pit_state.lock);
+               timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
+               hrtimer_cancel(timer);
+               mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+               kfree(kvm->arch.vpit);
+       }
+}
+
+void __inject_pit_timer_intr(struct kvm *kvm)
+{
+       mutex_lock(&kvm->lock);
+       kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 1);
+       kvm_ioapic_set_irq(kvm->arch.vioapic, 0, 0);
+       kvm_pic_set_irq(pic_irqchip(kvm), 0, 1);
+       kvm_pic_set_irq(pic_irqchip(kvm), 0, 0);
+       mutex_unlock(&kvm->lock);
+}
+
+void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu)
+{
+       struct kvm_pit *pit = vcpu->kvm->arch.vpit;
+       struct kvm *kvm = vcpu->kvm;
+       struct kvm_kpit_state *ps;
+
+       if (vcpu && pit) {
+               ps = &pit->pit_state;
+
+               /* Try to inject pending interrupts when:
+                * 1. Pending exists
+                * 2. Last interrupt was accepted or waited for too long time*/
+               if (atomic_read(&ps->pit_timer.pending) &&
+                   (ps->inject_pending ||
+                   (jiffies - ps->last_injected_time
+                               >= KVM_MAX_PIT_INTR_INTERVAL))) {
+                       ps->inject_pending = 0;
+                       __inject_pit_timer_intr(kvm);
+                       ps->last_injected_time = jiffies;
+               }
+       }
+}
+
+void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
+{
+       struct kvm_arch *arch = &vcpu->kvm->arch;
+       struct kvm_kpit_state *ps;
+
+       if (vcpu && arch->vpit) {
+               ps = &arch->vpit->pit_state;
+               if (atomic_read(&ps->pit_timer.pending) &&
+               (((arch->vpic->pics[0].imr & 1) == 0 &&
+                 arch->vpic->pics[0].irq_base == vec) ||
+                 (arch->vioapic->redirtbl[0].fields.vector == vec &&
+                 arch->vioapic->redirtbl[0].fields.mask != 1))) {
+                       ps->inject_pending = 1;
+                       atomic_dec(&ps->pit_timer.pending);
+                       ps->channels[0].count_load_time = ktime_get();
+               }
+       }
+}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
new file mode 100644 (file)
index 0000000..db25c2a
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef __I8254_H
+#define __I8254_H
+
+#include "iodev.h"
+
+struct kvm_kpit_timer {
+       struct hrtimer timer;
+       int irq;
+       s64 period; /* unit: ns */
+       s64 scheduled;
+       ktime_t last_update;
+       atomic_t pending;
+};
+
+struct kvm_kpit_channel_state {
+       u32 count; /* can be 65536 */
+       u16 latched_count;
+       u8 count_latched;
+       u8 status_latched;
+       u8 status;
+       u8 read_state;
+       u8 write_state;
+       u8 write_latch;
+       u8 rw_mode;
+       u8 mode;
+       u8 bcd; /* not supported */
+       u8 gate; /* timer start */
+       ktime_t count_load_time;
+};
+
+struct kvm_kpit_state {
+       struct kvm_kpit_channel_state channels[3];
+       struct kvm_kpit_timer pit_timer;
+       u32    speaker_data_on;
+       struct mutex lock;
+       struct kvm_pit *pit;
+       bool inject_pending; /* if inject pending interrupts */
+       unsigned long last_injected_time;
+};
+
+struct kvm_pit {
+       unsigned long base_addresss;
+       struct kvm_io_device dev;
+       struct kvm_io_device speaker_dev;
+       struct kvm *kvm;
+       struct kvm_kpit_state pit_state;
+};
+
+#define KVM_PIT_BASE_ADDRESS       0x40
+#define KVM_SPEAKER_BASE_ADDRESS    0x61
+#define KVM_PIT_MEM_LENGTH         4
+#define KVM_PIT_FREQ               1193181
+#define KVM_MAX_PIT_INTR_INTERVAL   HZ / 100
+#define KVM_PIT_CHANNEL_MASK       0x3
+
+void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu);
+void kvm_pit_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
+void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val);
+struct kvm_pit *kvm_create_pit(struct kvm *kvm);
+void kvm_free_pit(struct kvm *kvm);
+void kvm_pit_reset(struct kvm_pit *pit);
+
+#endif
index e5714759e97fcee29989eda9a262fef7cd5183fb..ce1f583459b1f4875db0d3eeea72d728aa0a9873 100644 (file)
 #include <linux/kvm_host.h>
 
 #include "irq.h"
+#include "i8254.h"
+
+/*
+ * check if there are pending timer events
+ * to be processed.
+ */
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       int ret;
+
+       ret = pit_has_pending_timer(vcpu);
+       ret |= apic_has_pending_timer(vcpu);
+
+       return ret;
+}
+EXPORT_SYMBOL(kvm_cpu_has_pending_timer);
 
 /*
  * check if there is pending interrupt without
@@ -66,6 +82,7 @@ EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt);
 void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
 {
        kvm_inject_apic_timer_irqs(vcpu);
+       kvm_inject_pit_timer_irqs(vcpu);
        /* TODO: PIT, RTC etc. */
 }
 EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
@@ -73,6 +90,7 @@ EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
 void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
 {
        kvm_apic_timer_intr_post(vcpu, vec);
+       kvm_pit_timer_intr_post(vcpu, vec);
        /* TODO: PIT, RTC etc. */
 }
 EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
index fa5ed5d59b5da9fa83529b42e09301f25f97f87a..1802134b836fc352529f59e6fd7b82b81ef28e95 100644 (file)
@@ -85,4 +85,7 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu);
 
+int pit_has_pending_timer(struct kvm_vcpu *vcpu);
+int apic_has_pending_timer(struct kvm_vcpu *vcpu);
+
 #endif
index ecdfe97e4635393eb0df2ba755ffd0150443e7dd..65ef0fc2c036ab15527e4abb04a9f2b3ca12eb4f 100644 (file)
@@ -39,6 +39,8 @@ struct vcpu_svm {
        unsigned long host_db_regs[NUM_DB_REGS];
        unsigned long host_dr6;
        unsigned long host_dr7;
+
+       u32 *msrpm;
 };
 
 #endif
index 68a6b1511934760e97117080166f78b9a7d5ab72..36809d79788bedffcd8332609c7313175aa09e3a 100644 (file)
 #include <linux/hrtimer.h>
 #include <linux/io.h>
 #include <linux/module.h>
+#include <linux/math64.h>
 #include <asm/processor.h>
 #include <asm/msr.h>
 #include <asm/page.h>
 #include <asm/current.h>
 #include <asm/apicdef.h>
 #include <asm/atomic.h>
-#include <asm/div64.h>
 #include "irq.h"
 
 #define PRId64 "d"
@@ -338,10 +338,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
                } else
                        apic_clear_vector(vector, apic->regs + APIC_TMR);
 
-               if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
+               if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
                        kvm_vcpu_kick(vcpu);
-               else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) {
-                       vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+               else if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED) {
+                       vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
                        if (waitqueue_active(&vcpu->wq))
                                wake_up_interruptible(&vcpu->wq);
                }
@@ -362,11 +362,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
 
        case APIC_DM_INIT:
                if (level) {
-                       if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
+                       if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE)
                                printk(KERN_DEBUG
                                       "INIT on a runnable vcpu %d\n",
                                       vcpu->vcpu_id);
-                       vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED;
+                       vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
                        kvm_vcpu_kick(vcpu);
                } else {
                        printk(KERN_DEBUG
@@ -379,9 +379,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
        case APIC_DM_STARTUP:
                printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
                       vcpu->vcpu_id, vector);
-               if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
+               if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
                        vcpu->arch.sipi_vector = vector;
-                       vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
+                       vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED;
                        if (waitqueue_active(&vcpu->wq))
                                wake_up_interruptible(&vcpu->wq);
                }
@@ -526,8 +526,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
        } else
                passed = ktime_sub(now, apic->timer.last_update);
 
-       counter_passed = div64_64(ktime_to_ns(passed),
-                                 (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
+       counter_passed = div64_u64(ktime_to_ns(passed),
+                                  (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
 
        if (counter_passed > tmcct) {
                if (unlikely(!apic_lvtt_period(apic))) {
@@ -658,7 +658,7 @@ static void start_apic_timer(struct kvm_lapic *apic)
        apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
                           PRIx64 ", "
                           "timer initial count 0x%x, period %lldns, "
-                          "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__,
+                          "expire @ 0x%016" PRIx64 ".\n", __func__,
                           APIC_BUS_CYCLE_NS, ktime_to_ns(now),
                           apic_get_reg(apic, APIC_TMICT),
                           apic->timer.period,
@@ -691,7 +691,7 @@ static void apic_mmio_write(struct kvm_io_device *this,
        /* too common printing */
        if (offset != APIC_EOI)
                apic_debug("%s: offset 0x%x with length 0x%x, and value is "
-                          "0x%x\n", __FUNCTION__, offset, len, val);
+                          "0x%x\n", __func__, offset, len, val);
 
        offset &= 0xff0;
 
@@ -822,6 +822,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
        apic_set_tpr(apic, ((cr8 & 0x0f) << 4)
                     | (apic_get_reg(apic, APIC_TASKPRI) & 4));
 }
+EXPORT_SYMBOL_GPL(kvm_lapic_set_tpr);
 
 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
 {
@@ -869,7 +870,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
        struct kvm_lapic *apic;
        int i;
 
-       apic_debug("%s\n", __FUNCTION__);
+       apic_debug("%s\n", __func__);
 
        ASSERT(vcpu);
        apic = vcpu->arch.apic;
@@ -907,7 +908,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
        apic_update_ppr(apic);
 
        apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
-                  "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
+                  "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
                   vcpu, kvm_apic_id(apic),
                   vcpu->arch.apic_base, apic->base_address);
 }
@@ -940,7 +941,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
 
        atomic_inc(&apic->timer.pending);
        if (waitqueue_active(q)) {
-               apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+               apic->vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
                wake_up_interruptible(q);
        }
        if (apic_lvtt_period(apic)) {
@@ -952,6 +953,16 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
        return result;
 }
 
+int apic_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       struct kvm_lapic *lapic = vcpu->arch.apic;
+
+       if (lapic)
+               return atomic_read(&lapic->timer.pending);
+
+       return 0;
+}
+
 static int __inject_apic_timer_irq(struct kvm_lapic *apic)
 {
        int vector;
index e55af12e11b7aeb523ff0296d7e525d2f7a33a6d..2ad6f5481671997c24c420b31fce7c9d67b31794 100644 (file)
 #include <linux/highmem.h>
 #include <linux/module.h>
 #include <linux/swap.h>
+#include <linux/hugetlb.h>
+#include <linux/compiler.h>
 
 #include <asm/page.h>
 #include <asm/cmpxchg.h>
 #include <asm/io.h>
 
+/*
+ * When setting this variable to true it enables Two-Dimensional-Paging
+ * where the hardware walks 2 page tables:
+ * 1. the guest-virtual to guest-physical
+ * 2. while doing 1. it walks guest-physical to host-physical
+ * If the hardware supports that we don't need to do shadow paging.
+ */
+bool tdp_enabled = false;
+
 #undef MMU_DEBUG
 
 #undef AUDIT
@@ -101,8 +112,6 @@ static int dbg = 1;
 #define PT_FIRST_AVAIL_BITS_SHIFT 9
 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
 
-#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
-
 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
 
 #define PT64_LEVEL_BITS 9
@@ -159,6 +168,13 @@ static int dbg = 1;
 #define ACC_USER_MASK    PT_USER_MASK
 #define ACC_ALL          (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
 
+struct kvm_pv_mmu_op_buffer {
+       void *ptr;
+       unsigned len;
+       unsigned processed;
+       char buf[512] __aligned(sizeof(long));
+};
+
 struct kvm_rmap_desc {
        u64 *shadow_ptes[RMAP_EXT];
        struct kvm_rmap_desc *more;
@@ -200,11 +216,15 @@ static int is_present_pte(unsigned long pte)
 
 static int is_shadow_present_pte(u64 pte)
 {
-       pte &= ~PT_SHADOW_IO_MARK;
        return pte != shadow_trap_nonpresent_pte
                && pte != shadow_notrap_nonpresent_pte;
 }
 
+static int is_large_pte(u64 pte)
+{
+       return pte & PT_PAGE_SIZE_MASK;
+}
+
 static int is_writeble_pte(unsigned long pte)
 {
        return pte & PT_WRITABLE_MASK;
@@ -215,14 +235,14 @@ static int is_dirty_pte(unsigned long pte)
        return pte & PT_DIRTY_MASK;
 }
 
-static int is_io_pte(unsigned long pte)
+static int is_rmap_pte(u64 pte)
 {
-       return pte & PT_SHADOW_IO_MARK;
+       return is_shadow_present_pte(pte);
 }
 
-static int is_rmap_pte(u64 pte)
+static pfn_t spte_to_pfn(u64 pte)
 {
-       return is_shadow_present_pte(pte);
+       return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
 }
 
 static gfn_t pse36_gfn_delta(u32 gpte)
@@ -348,17 +368,101 @@ static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
        kfree(rd);
 }
 
+/*
+ * Return the pointer to the largepage write count for a given
+ * gfn, handling slots that are not large page aligned.
+ */
+static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
+{
+       unsigned long idx;
+
+       idx = (gfn / KVM_PAGES_PER_HPAGE) -
+             (slot->base_gfn / KVM_PAGES_PER_HPAGE);
+       return &slot->lpage_info[idx].write_count;
+}
+
+static void account_shadowed(struct kvm *kvm, gfn_t gfn)
+{
+       int *write_count;
+
+       write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
+       *write_count += 1;
+       WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
+}
+
+static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
+{
+       int *write_count;
+
+       write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
+       *write_count -= 1;
+       WARN_ON(*write_count < 0);
+}
+
+static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
+{
+       struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+       int *largepage_idx;
+
+       if (slot) {
+               largepage_idx = slot_largepage_idx(gfn, slot);
+               return *largepage_idx;
+       }
+
+       return 1;
+}
+
+static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
+{
+       struct vm_area_struct *vma;
+       unsigned long addr;
+
+       addr = gfn_to_hva(kvm, gfn);
+       if (kvm_is_error_hva(addr))
+               return 0;
+
+       vma = find_vma(current->mm, addr);
+       if (vma && is_vm_hugetlb_page(vma))
+               return 1;
+
+       return 0;
+}
+
+static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
+{
+       struct kvm_memory_slot *slot;
+
+       if (has_wrprotected_page(vcpu->kvm, large_gfn))
+               return 0;
+
+       if (!host_largepage_backed(vcpu->kvm, large_gfn))
+               return 0;
+
+       slot = gfn_to_memslot(vcpu->kvm, large_gfn);
+       if (slot && slot->dirty_bitmap)
+               return 0;
+
+       return 1;
+}
+
 /*
  * Take gfn and return the reverse mapping to it.
  * Note: gfn must be unaliased before this function get called
  */
 
-static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
+static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
 {
        struct kvm_memory_slot *slot;
+       unsigned long idx;
 
        slot = gfn_to_memslot(kvm, gfn);
-       return &slot->rmap[gfn - slot->base_gfn];
+       if (!lpage)
+               return &slot->rmap[gfn - slot->base_gfn];
+
+       idx = (gfn / KVM_PAGES_PER_HPAGE) -
+             (slot->base_gfn / KVM_PAGES_PER_HPAGE);
+
+       return &slot->lpage_info[idx].rmap_pde;
 }
 
 /*
@@ -370,7 +474,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
  * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
  * containing more mappings.
  */
-static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
+static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
 {
        struct kvm_mmu_page *sp;
        struct kvm_rmap_desc *desc;
@@ -382,7 +486,7 @@ static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
        gfn = unalias_gfn(vcpu->kvm, gfn);
        sp = page_header(__pa(spte));
        sp->gfns[spte - sp->spt] = gfn;
-       rmapp = gfn_to_rmap(vcpu->kvm, gfn);
+       rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
        if (!*rmapp) {
                rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
                *rmapp = (unsigned long)spte;
@@ -435,20 +539,21 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        struct kvm_rmap_desc *desc;
        struct kvm_rmap_desc *prev_desc;
        struct kvm_mmu_page *sp;
-       struct page *page;
+       pfn_t pfn;
        unsigned long *rmapp;
        int i;
 
        if (!is_rmap_pte(*spte))
                return;
        sp = page_header(__pa(spte));
-       page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
-       mark_page_accessed(page);
+       pfn = spte_to_pfn(*spte);
+       if (*spte & PT_ACCESSED_MASK)
+               kvm_set_pfn_accessed(pfn);
        if (is_writeble_pte(*spte))
-               kvm_release_page_dirty(page);
+               kvm_release_pfn_dirty(pfn);
        else
-               kvm_release_page_clean(page);
-       rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt]);
+               kvm_release_pfn_clean(pfn);
+       rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
        if (!*rmapp) {
                printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
                BUG();
@@ -514,7 +619,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
        int write_protected = 0;
 
        gfn = unalias_gfn(kvm, gfn);
-       rmapp = gfn_to_rmap(kvm, gfn);
+       rmapp = gfn_to_rmap(kvm, gfn, 0);
 
        spte = rmap_next(kvm, rmapp, NULL);
        while (spte) {
@@ -527,8 +632,35 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
                }
                spte = rmap_next(kvm, rmapp, spte);
        }
+       if (write_protected) {
+               pfn_t pfn;
+
+               spte = rmap_next(kvm, rmapp, NULL);
+               pfn = spte_to_pfn(*spte);
+               kvm_set_pfn_dirty(pfn);
+       }
+
+       /* check for huge page mappings */
+       rmapp = gfn_to_rmap(kvm, gfn, 1);
+       spte = rmap_next(kvm, rmapp, NULL);
+       while (spte) {
+               BUG_ON(!spte);
+               BUG_ON(!(*spte & PT_PRESENT_MASK));
+               BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
+               pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
+               if (is_writeble_pte(*spte)) {
+                       rmap_remove(kvm, spte);
+                       --kvm->stat.lpages;
+                       set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+                       write_protected = 1;
+               }
+               spte = rmap_next(kvm, rmapp, spte);
+       }
+
        if (write_protected)
                kvm_flush_remote_tlbs(kvm);
+
+       account_shadowed(kvm, gfn);
 }
 
 #ifdef MMU_DEBUG
@@ -538,8 +670,8 @@ static int is_empty_shadow_page(u64 *spt)
        u64 *end;
 
        for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
-               if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
-                       printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
+               if (*pos != shadow_trap_nonpresent_pte) {
+                       printk(KERN_ERR "%s: %p %llx\n", __func__,
                               pos, *pos);
                        return 0;
                }
@@ -559,7 +691,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
 
 static unsigned kvm_page_table_hashfn(gfn_t gfn)
 {
-       return gfn;
+       return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
 }
 
 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
@@ -662,13 +794,14 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
        struct kvm_mmu_page *sp;
        struct hlist_node *node;
 
-       pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
-       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
+       index = kvm_page_table_hashfn(gfn);
        bucket = &kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry(sp, node, bucket, hash_link)
-               if (sp->gfn == gfn && !sp->role.metaphysical) {
+               if (sp->gfn == gfn && !sp->role.metaphysical
+                   && !sp->role.invalid) {
                        pgprintk("%s: found role %x\n",
-                                __FUNCTION__, sp->role.word);
+                                __func__, sp->role.word);
                        return sp;
                }
        return NULL;
@@ -699,27 +832,27 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
                role.quadrant = quadrant;
        }
-       pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
+       pgprintk("%s: looking gfn %lx role %x\n", __func__,
                 gfn, role.word);
-       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry(sp, node, bucket, hash_link)
                if (sp->gfn == gfn && sp->role.word == role.word) {
                        mmu_page_add_parent_pte(vcpu, sp, parent_pte);
-                       pgprintk("%s: found\n", __FUNCTION__);
+                       pgprintk("%s: found\n", __func__);
                        return sp;
                }
        ++vcpu->kvm->stat.mmu_cache_miss;
        sp = kvm_mmu_alloc_page(vcpu, parent_pte);
        if (!sp)
                return sp;
-       pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
+       pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
        sp->gfn = gfn;
        sp->role = role;
        hlist_add_head(&sp->hash_link, bucket);
-       vcpu->arch.mmu.prefetch_page(vcpu, sp);
        if (!metaphysical)
                rmap_write_protect(vcpu->kvm, gfn);
+       vcpu->arch.mmu.prefetch_page(vcpu, sp);
        return sp;
 }
 
@@ -745,11 +878,17 @@ static void kvm_mmu_page_unlink_children(struct kvm *kvm,
        for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
                ent = pt[i];
 
+               if (is_shadow_present_pte(ent)) {
+                       if (!is_large_pte(ent)) {
+                               ent &= PT64_BASE_ADDR_MASK;
+                               mmu_page_remove_parent_pte(page_header(ent),
+                                                          &pt[i]);
+                       } else {
+                               --kvm->stat.lpages;
+                               rmap_remove(kvm, &pt[i]);
+                       }
+               }
                pt[i] = shadow_trap_nonpresent_pte;
-               if (!is_shadow_present_pte(ent))
-                       continue;
-               ent &= PT64_BASE_ADDR_MASK;
-               mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
        }
        kvm_flush_remote_tlbs(kvm);
 }
@@ -789,10 +928,15 @@ static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
        }
        kvm_mmu_page_unlink_children(kvm, sp);
        if (!sp->root_count) {
+               if (!sp->role.metaphysical)
+                       unaccount_shadowed(kvm, sp->gfn);
                hlist_del(&sp->hash_link);
                kvm_mmu_free_page(kvm, sp);
-       } else
+       } else {
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
+               sp->role.invalid = 1;
+               kvm_reload_remote_mmus(kvm);
+       }
        kvm_mmu_reset_last_pte_updated(kvm);
 }
 
@@ -838,13 +982,13 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
        struct hlist_node *node, *n;
        int r;
 
-       pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
+       pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
        r = 0;
-       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       index = kvm_page_table_hashfn(gfn);
        bucket = &kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
                if (sp->gfn == gfn && !sp->role.metaphysical) {
-                       pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
+                       pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
                                 sp->role.word);
                        kvm_mmu_zap_page(kvm, sp);
                        r = 1;
@@ -857,7 +1001,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
        struct kvm_mmu_page *sp;
 
        while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
-               pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word);
+               pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
                kvm_mmu_zap_page(kvm, sp);
        }
 }
@@ -889,26 +1033,39 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
 static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                         unsigned pt_access, unsigned pte_access,
                         int user_fault, int write_fault, int dirty,
-                        int *ptwrite, gfn_t gfn, struct page *page)
+                        int *ptwrite, int largepage, gfn_t gfn,
+                        pfn_t pfn, bool speculative)
 {
        u64 spte;
        int was_rmapped = 0;
        int was_writeble = is_writeble_pte(*shadow_pte);
-       hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
 
        pgprintk("%s: spte %llx access %x write_fault %d"
                 " user_fault %d gfn %lx\n",
-                __FUNCTION__, *shadow_pte, pt_access,
+                __func__, *shadow_pte, pt_access,
                 write_fault, user_fault, gfn);
 
        if (is_rmap_pte(*shadow_pte)) {
-               if (host_pfn != page_to_pfn(page)) {
+               /*
+                * If we overwrite a PTE page pointer with a 2MB PMD, unlink
+                * the parent of the now unreachable PTE.
+                */
+               if (largepage && !is_large_pte(*shadow_pte)) {
+                       struct kvm_mmu_page *child;
+                       u64 pte = *shadow_pte;
+
+                       child = page_header(pte & PT64_BASE_ADDR_MASK);
+                       mmu_page_remove_parent_pte(child, shadow_pte);
+               } else if (pfn != spte_to_pfn(*shadow_pte)) {
                        pgprintk("hfn old %lx new %lx\n",
-                                host_pfn, page_to_pfn(page));
+                                spte_to_pfn(*shadow_pte), pfn);
                        rmap_remove(vcpu->kvm, shadow_pte);
+               } else {
+                       if (largepage)
+                               was_rmapped = is_large_pte(*shadow_pte);
+                       else
+                               was_rmapped = 1;
                }
-               else
-                       was_rmapped = 1;
        }
 
        /*
@@ -917,6 +1074,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
         * demand paging).
         */
        spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
+       if (!speculative)
+               pte_access |= PT_ACCESSED_MASK;
        if (!dirty)
                pte_access &= ~ACC_WRITE_MASK;
        if (!(pte_access & ACC_EXEC_MASK))
@@ -925,15 +1084,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
        spte |= PT_PRESENT_MASK;
        if (pte_access & ACC_USER_MASK)
                spte |= PT_USER_MASK;
+       if (largepage)
+               spte |= PT_PAGE_SIZE_MASK;
 
-       if (is_error_page(page)) {
-               set_shadow_pte(shadow_pte,
-                              shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
-               kvm_release_page_clean(page);
-               return;
-       }
-
-       spte |= page_to_phys(page);
+       spte |= (u64)pfn << PAGE_SHIFT;
 
        if ((pte_access & ACC_WRITE_MASK)
            || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
@@ -946,9 +1100,10 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
                }
 
                shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
-               if (shadow) {
+               if (shadow ||
+                  (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
                        pgprintk("%s: found shadow page for %lx, marking ro\n",
-                                __FUNCTION__, gfn);
+                                __func__, gfn);
                        pte_access &= ~ACC_WRITE_MASK;
                        if (is_writeble_pte(spte)) {
                                spte &= ~PT_WRITABLE_MASK;
@@ -964,18 +1119,25 @@ unshadowed:
        if (pte_access & ACC_WRITE_MASK)
                mark_page_dirty(vcpu->kvm, gfn);
 
-       pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte);
+       pgprintk("%s: setting spte %llx\n", __func__, spte);
+       pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
+                (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
+                (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
        set_shadow_pte(shadow_pte, spte);
+       if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
+           && (spte & PT_PRESENT_MASK))
+               ++vcpu->kvm->stat.lpages;
+
        page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
        if (!was_rmapped) {
-               rmap_add(vcpu, shadow_pte, gfn);
+               rmap_add(vcpu, shadow_pte, gfn, largepage);
                if (!is_rmap_pte(*shadow_pte))
-                       kvm_release_page_clean(page);
+                       kvm_release_pfn_clean(pfn);
        } else {
                if (was_writeble)
-                       kvm_release_page_dirty(page);
+                       kvm_release_pfn_dirty(pfn);
                else
-                       kvm_release_page_clean(page);
+                       kvm_release_pfn_clean(pfn);
        }
        if (!ptwrite || !*ptwrite)
                vcpu->arch.last_pte_updated = shadow_pte;
@@ -985,10 +1147,10 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
 {
 }
 
-static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
-                          gfn_t gfn, struct page *page)
+static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
+                          int largepage, gfn_t gfn, pfn_t pfn,
+                          int level)
 {
-       int level = PT32E_ROOT_LEVEL;
        hpa_t table_addr = vcpu->arch.mmu.root_hpa;
        int pt_write = 0;
 
@@ -1001,8 +1163,14 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
 
                if (level == 1) {
                        mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
-                                    0, write, 1, &pt_write, gfn, page);
-                       return pt_write || is_io_pte(table[index]);
+                                    0, write, 1, &pt_write, 0, gfn, pfn, false);
+                       return pt_write;
+               }
+
+               if (largepage && level == 2) {
+                       mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
+                                    0, write, 1, &pt_write, 1, gfn, pfn, false);
+                       return pt_write;
                }
 
                if (table[index] == shadow_trap_nonpresent_pte) {
@@ -1016,7 +1184,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
                                                     1, ACC_ALL, &table[index]);
                        if (!new_table) {
                                pgprintk("nonpaging_map: ENOMEM\n");
-                               kvm_release_page_clean(page);
+                               kvm_release_pfn_clean(pfn);
                                return -ENOMEM;
                        }
 
@@ -1030,21 +1198,30 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
 {
        int r;
-
-       struct page *page;
-
-       down_read(&vcpu->kvm->slots_lock);
+       int largepage = 0;
+       pfn_t pfn;
 
        down_read(&current->mm->mmap_sem);
-       page = gfn_to_page(vcpu->kvm, gfn);
+       if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
+               gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+               largepage = 1;
+       }
+
+       pfn = gfn_to_pfn(vcpu->kvm, gfn);
        up_read(&current->mm->mmap_sem);
 
+       /* mmio */
+       if (is_error_pfn(pfn)) {
+               kvm_release_pfn_clean(pfn);
+               return 1;
+       }
+
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
-       r = __nonpaging_map(vcpu, v, write, gfn, page);
+       r = __direct_map(vcpu, v, write, largepage, gfn, pfn,
+                        PT32E_ROOT_LEVEL);
        spin_unlock(&vcpu->kvm->mmu_lock);
 
-       up_read(&vcpu->kvm->slots_lock);
 
        return r;
 }
@@ -1073,6 +1250,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
 
                sp = page_header(root);
                --sp->root_count;
+               if (!sp->root_count && sp->role.invalid)
+                       kvm_mmu_zap_page(vcpu->kvm, sp);
                vcpu->arch.mmu.root_hpa = INVALID_PAGE;
                spin_unlock(&vcpu->kvm->mmu_lock);
                return;
@@ -1085,6 +1264,8 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
                        root &= PT64_BASE_ADDR_MASK;
                        sp = page_header(root);
                        --sp->root_count;
+                       if (!sp->root_count && sp->role.invalid)
+                               kvm_mmu_zap_page(vcpu->kvm, sp);
                }
                vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
        }
@@ -1097,6 +1278,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
        int i;
        gfn_t root_gfn;
        struct kvm_mmu_page *sp;
+       int metaphysical = 0;
 
        root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
 
@@ -1105,14 +1287,20 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                hpa_t root = vcpu->arch.mmu.root_hpa;
 
                ASSERT(!VALID_PAGE(root));
+               if (tdp_enabled)
+                       metaphysical = 1;
                sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
-                                     PT64_ROOT_LEVEL, 0, ACC_ALL, NULL);
+                                     PT64_ROOT_LEVEL, metaphysical,
+                                     ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
                vcpu->arch.mmu.root_hpa = root;
                return;
        }
 #endif
+       metaphysical = !is_paging(vcpu);
+       if (tdp_enabled)
+               metaphysical = 1;
        for (i = 0; i < 4; ++i) {
                hpa_t root = vcpu->arch.mmu.pae_root[i];
 
@@ -1126,7 +1314,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
                } else if (vcpu->arch.mmu.root_level == 0)
                        root_gfn = 0;
                sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
-                                     PT32_ROOT_LEVEL, !is_paging(vcpu),
+                                     PT32_ROOT_LEVEL, metaphysical,
                                      ACC_ALL, NULL);
                root = __pa(sp->spt);
                ++sp->root_count;
@@ -1146,7 +1334,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
        gfn_t gfn;
        int r;
 
-       pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code);
+       pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
@@ -1160,6 +1348,41 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
                             error_code & PFERR_WRITE_MASK, gfn);
 }
 
+static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
+                               u32 error_code)
+{
+       pfn_t pfn;
+       int r;
+       int largepage = 0;
+       gfn_t gfn = gpa >> PAGE_SHIFT;
+
+       ASSERT(vcpu);
+       ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
+
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               return r;
+
+       down_read(&current->mm->mmap_sem);
+       if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
+               gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+               largepage = 1;
+       }
+       pfn = gfn_to_pfn(vcpu->kvm, gfn);
+       up_read(&current->mm->mmap_sem);
+       if (is_error_pfn(pfn)) {
+               kvm_release_pfn_clean(pfn);
+               return 1;
+       }
+       spin_lock(&vcpu->kvm->mmu_lock);
+       kvm_mmu_free_some_pages(vcpu);
+       r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
+                        largepage, gfn, pfn, TDP_ROOT_LEVEL);
+       spin_unlock(&vcpu->kvm->mmu_lock);
+
+       return r;
+}
+
 static void nonpaging_free(struct kvm_vcpu *vcpu)
 {
        mmu_free_roots(vcpu);
@@ -1188,7 +1411,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
 
 static void paging_new_cr3(struct kvm_vcpu *vcpu)
 {
-       pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3);
+       pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
        mmu_free_roots(vcpu);
 }
 
@@ -1253,7 +1476,35 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
        return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
 }
 
-static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mmu *context = &vcpu->arch.mmu;
+
+       context->new_cr3 = nonpaging_new_cr3;
+       context->page_fault = tdp_page_fault;
+       context->free = nonpaging_free;
+       context->prefetch_page = nonpaging_prefetch_page;
+       context->shadow_root_level = TDP_ROOT_LEVEL;
+       context->root_hpa = INVALID_PAGE;
+
+       if (!is_paging(vcpu)) {
+               context->gva_to_gpa = nonpaging_gva_to_gpa;
+               context->root_level = 0;
+       } else if (is_long_mode(vcpu)) {
+               context->gva_to_gpa = paging64_gva_to_gpa;
+               context->root_level = PT64_ROOT_LEVEL;
+       } else if (is_pae(vcpu)) {
+               context->gva_to_gpa = paging64_gva_to_gpa;
+               context->root_level = PT32E_ROOT_LEVEL;
+       } else {
+               context->gva_to_gpa = paging32_gva_to_gpa;
+               context->root_level = PT32_ROOT_LEVEL;
+       }
+
+       return 0;
+}
+
+static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
        ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
@@ -1268,6 +1519,16 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
                return paging32_init_context(vcpu);
 }
 
+static int init_kvm_mmu(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.update_pte.pfn = bad_pfn;
+
+       if (tdp_enabled)
+               return init_kvm_tdp_mmu(vcpu);
+       else
+               return init_kvm_softmmu(vcpu);
+}
+
 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
 {
        ASSERT(vcpu);
@@ -1316,7 +1577,8 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
 
        pte = *spte;
        if (is_shadow_present_pte(pte)) {
-               if (sp->role.level == PT_PAGE_TABLE_LEVEL)
+               if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
+                   is_large_pte(pte))
                        rmap_remove(vcpu->kvm, spte);
                else {
                        child = page_header(pte & PT64_BASE_ADDR_MASK);
@@ -1324,24 +1586,26 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
                }
        }
        set_shadow_pte(spte, shadow_trap_nonpresent_pte);
+       if (is_large_pte(pte))
+               --vcpu->kvm->stat.lpages;
 }
 
 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
                                  struct kvm_mmu_page *sp,
                                  u64 *spte,
-                                 const void *new, int bytes,
-                                 int offset_in_pte)
+                                 const void *new)
 {
-       if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
+       if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
+           && !vcpu->arch.update_pte.largepage) {
                ++vcpu->kvm->stat.mmu_pde_zapped;
                return;
        }
 
        ++vcpu->kvm->stat.mmu_pte_updated;
        if (sp->role.glevels == PT32_ROOT_LEVEL)
-               paging32_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+               paging32_update_pte(vcpu, sp, spte, new);
        else
-               paging64_update_pte(vcpu, sp, spte, new, bytes, offset_in_pte);
+               paging64_update_pte(vcpu, sp, spte, new);
 }
 
 static bool need_remote_flush(u64 old, u64 new)
@@ -1378,7 +1642,9 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        gfn_t gfn;
        int r;
        u64 gpte = 0;
-       struct page *page;
+       pfn_t pfn;
+
+       vcpu->arch.update_pte.largepage = 0;
 
        if (bytes != 4 && bytes != 8)
                return;
@@ -1408,11 +1674,19 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
 
        down_read(&current->mm->mmap_sem);
-       page = gfn_to_page(vcpu->kvm, gfn);
+       if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
+               gfn &= ~(KVM_PAGES_PER_HPAGE-1);
+               vcpu->arch.update_pte.largepage = 1;
+       }
+       pfn = gfn_to_pfn(vcpu->kvm, gfn);
        up_read(&current->mm->mmap_sem);
 
+       if (is_error_pfn(pfn)) {
+               kvm_release_pfn_clean(pfn);
+               return;
+       }
        vcpu->arch.update_pte.gfn = gfn;
-       vcpu->arch.update_pte.page = page;
+       vcpu->arch.update_pte.pfn = pfn;
 }
 
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
@@ -1423,7 +1697,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        struct hlist_node *node, *n;
        struct hlist_head *bucket;
        unsigned index;
-       u64 entry;
+       u64 entry, gentry;
        u64 *spte;
        unsigned offset = offset_in_page(gpa);
        unsigned pte_size;
@@ -1433,8 +1707,9 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        int level;
        int flooded = 0;
        int npte;
+       int r;
 
-       pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
+       pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
        mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
@@ -1450,7 +1725,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                vcpu->arch.last_pt_write_count = 1;
                vcpu->arch.last_pte_updated = NULL;
        }
-       index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
+       index = kvm_page_table_hashfn(gfn);
        bucket = &vcpu->kvm->arch.mmu_page_hash[index];
        hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
                if (sp->gfn != gfn || sp->role.metaphysical)
@@ -1496,20 +1771,29 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                                continue;
                }
                spte = &sp->spt[page_offset / sizeof(*spte)];
+               if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
+                       gentry = 0;
+                       r = kvm_read_guest_atomic(vcpu->kvm,
+                                                 gpa & ~(u64)(pte_size - 1),
+                                                 &gentry, pte_size);
+                       new = (const void *)&gentry;
+                       if (r < 0)
+                               new = NULL;
+               }
                while (npte--) {
                        entry = *spte;
                        mmu_pte_write_zap_pte(vcpu, sp, spte);
-                       mmu_pte_write_new_pte(vcpu, sp, spte, new, bytes,
-                                             page_offset & (pte_size - 1));
+                       if (new)
+                               mmu_pte_write_new_pte(vcpu, sp, spte, new);
                        mmu_pte_write_flush_tlb(vcpu, entry, *spte);
                        ++spte;
                }
        }
        kvm_mmu_audit(vcpu, "post pte write");
        spin_unlock(&vcpu->kvm->mmu_lock);
-       if (vcpu->arch.update_pte.page) {
-               kvm_release_page_clean(vcpu->arch.update_pte.page);
-               vcpu->arch.update_pte.page = NULL;
+       if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
+               kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
+               vcpu->arch.update_pte.pfn = bad_pfn;
        }
 }
 
@@ -1518,9 +1802,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
        gpa_t gpa;
        int r;
 
-       down_read(&vcpu->kvm->slots_lock);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
-       up_read(&vcpu->kvm->slots_lock);
 
        spin_lock(&vcpu->kvm->mmu_lock);
        r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -1577,6 +1859,12 @@ out:
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
 
+void kvm_enable_tdp(void)
+{
+       tdp_enabled = true;
+}
+EXPORT_SYMBOL_GPL(kvm_enable_tdp);
+
 static void free_mmu_pages(struct kvm_vcpu *vcpu)
 {
        struct kvm_mmu_page *sp;
@@ -1677,7 +1965,53 @@ void kvm_mmu_zap_all(struct kvm *kvm)
        kvm_flush_remote_tlbs(kvm);
 }
 
-void kvm_mmu_module_exit(void)
+void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm)
+{
+       struct kvm_mmu_page *page;
+
+       page = container_of(kvm->arch.active_mmu_pages.prev,
+                           struct kvm_mmu_page, link);
+       kvm_mmu_zap_page(kvm, page);
+}
+
+static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
+{
+       struct kvm *kvm;
+       struct kvm *kvm_freed = NULL;
+       int cache_count = 0;
+
+       spin_lock(&kvm_lock);
+
+       list_for_each_entry(kvm, &vm_list, vm_list) {
+               int npages;
+
+               spin_lock(&kvm->mmu_lock);
+               npages = kvm->arch.n_alloc_mmu_pages -
+                        kvm->arch.n_free_mmu_pages;
+               cache_count += npages;
+               if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
+                       kvm_mmu_remove_one_alloc_mmu_page(kvm);
+                       cache_count--;
+                       kvm_freed = kvm;
+               }
+               nr_to_scan--;
+
+               spin_unlock(&kvm->mmu_lock);
+       }
+       if (kvm_freed)
+               list_move_tail(&kvm_freed->vm_list, &vm_list);
+
+       spin_unlock(&kvm_lock);
+
+       return cache_count;
+}
+
+static struct shrinker mmu_shrinker = {
+       .shrink = mmu_shrink,
+       .seeks = DEFAULT_SEEKS * 10,
+};
+
+void mmu_destroy_caches(void)
 {
        if (pte_chain_cache)
                kmem_cache_destroy(pte_chain_cache);
@@ -1687,6 +2021,12 @@ void kvm_mmu_module_exit(void)
                kmem_cache_destroy(mmu_page_header_cache);
 }
 
+void kvm_mmu_module_exit(void)
+{
+       mmu_destroy_caches();
+       unregister_shrinker(&mmu_shrinker);
+}
+
 int kvm_mmu_module_init(void)
 {
        pte_chain_cache = kmem_cache_create("kvm_pte_chain",
@@ -1706,10 +2046,12 @@ int kvm_mmu_module_init(void)
        if (!mmu_page_header_cache)
                goto nomem;
 
+       register_shrinker(&mmu_shrinker);
+
        return 0;
 
 nomem:
-       kvm_mmu_module_exit();
+       mmu_destroy_caches();
        return -ENOMEM;
 }
 
@@ -1732,6 +2074,127 @@ unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
        return nr_mmu_pages;
 }
 
+static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
+                               unsigned len)
+{
+       if (len > buffer->len)
+               return NULL;
+       return buffer->ptr;
+}
+
+static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
+                               unsigned len)
+{
+       void *ret;
+
+       ret = pv_mmu_peek_buffer(buffer, len);
+       if (!ret)
+               return ret;
+       buffer->ptr += len;
+       buffer->len -= len;
+       buffer->processed += len;
+       return ret;
+}
+
+static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
+                            gpa_t addr, gpa_t value)
+{
+       int bytes = 8;
+       int r;
+
+       if (!is_long_mode(vcpu) && !is_pae(vcpu))
+               bytes = 4;
+
+       r = mmu_topup_memory_caches(vcpu);
+       if (r)
+               return r;
+
+       if (!emulator_write_phys(vcpu, addr, &value, bytes))
+               return -EFAULT;
+
+       return 1;
+}
+
+static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
+{
+       kvm_x86_ops->tlb_flush(vcpu);
+       return 1;
+}
+
+static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
+{
+       spin_lock(&vcpu->kvm->mmu_lock);
+       mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
+       spin_unlock(&vcpu->kvm->mmu_lock);
+       return 1;
+}
+
+static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
+                            struct kvm_pv_mmu_op_buffer *buffer)
+{
+       struct kvm_mmu_op_header *header;
+
+       header = pv_mmu_peek_buffer(buffer, sizeof *header);
+       if (!header)
+               return 0;
+       switch (header->op) {
+       case KVM_MMU_OP_WRITE_PTE: {
+               struct kvm_mmu_op_write_pte *wpte;
+
+               wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
+               if (!wpte)
+                       return 0;
+               return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
+                                       wpte->pte_val);
+       }
+       case KVM_MMU_OP_FLUSH_TLB: {
+               struct kvm_mmu_op_flush_tlb *ftlb;
+
+               ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
+               if (!ftlb)
+                       return 0;
+               return kvm_pv_mmu_flush_tlb(vcpu);
+       }
+       case KVM_MMU_OP_RELEASE_PT: {
+               struct kvm_mmu_op_release_pt *rpt;
+
+               rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
+               if (!rpt)
+                       return 0;
+               return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
+       }
+       default: return 0;
+       }
+}
+
+int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
+                 gpa_t addr, unsigned long *ret)
+{
+       int r;
+       struct kvm_pv_mmu_op_buffer buffer;
+
+       buffer.ptr = buffer.buf;
+       buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
+       buffer.processed = 0;
+
+       r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
+       if (r)
+               goto out;
+
+       while (buffer.len) {
+               r = kvm_pv_mmu_op_one(vcpu, &buffer);
+               if (r < 0)
+                       goto out;
+               if (r == 0)
+                       break;
+       }
+
+       r = 1;
+out:
+       *ret = buffer.processed;
+       return r;
+}
+
 #ifdef AUDIT
 
 static const char *audit_msg;
@@ -1768,8 +2231,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                        audit_mappings_page(vcpu, ent, va, level - 1);
                } else {
                        gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
-                       struct page *page = gpa_to_page(vcpu, gpa);
-                       hpa_t hpa = page_to_phys(page);
+                       hpa_t hpa = (hpa_t)gpa_to_pfn(vcpu, gpa) << PAGE_SHIFT;
 
                        if (is_shadow_present_pte(ent)
                            && (ent & PT64_BASE_ADDR_MASK) != hpa)
@@ -1782,7 +2244,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
                                 && !is_error_hpa(hpa))
                                printk(KERN_ERR "audit: (%s) notrap shadow,"
                                       " valid guest gva %lx\n", audit_msg, va);
-                       kvm_release_page_clean(page);
+                       kvm_release_pfn_clean(pfn);
 
                }
        }
@@ -1867,7 +2329,7 @@ static void audit_rmap(struct kvm_vcpu *vcpu)
 
        if (n_rmap != n_actual)
                printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
-                      __FUNCTION__, audit_msg, n_rmap, n_actual);
+                      __func__, audit_msg, n_rmap, n_actual);
 }
 
 static void audit_write_protection(struct kvm_vcpu *vcpu)
@@ -1887,7 +2349,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu)
                if (*rmapp)
                        printk(KERN_ERR "%s: (%s) shadow page has writable"
                               " mappings: gfn %lx role %x\n",
-                              __FUNCTION__, audit_msg, sp->gfn,
+                              __func__, audit_msg, sp->gfn,
                               sp->role.word);
        }
 }
index 1fce19ec7a2380c61c79ef0f50e71e4c39d2af66..e64e9f56a65eb04889cb2c5912c9fda9d36d96f1 100644 (file)
@@ -3,6 +3,12 @@
 
 #include <linux/kvm_host.h>
 
+#ifdef CONFIG_X86_64
+#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL
+#else
+#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL
+#endif
+
 static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
 {
        if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES))
index ecc0856268c47c8c7a5c5773ac0d239d37dbc58e..156fe10288ae330c6d9ac2d3e0b857a9d6c853b5 100644 (file)
@@ -130,7 +130,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
        unsigned index, pt_access, pte_access;
        gpa_t pte_gpa;
 
-       pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
+       pgprintk("%s: addr %lx\n", __func__, addr);
 walk:
        walker->level = vcpu->arch.mmu.root_level;
        pte = vcpu->arch.cr3;
@@ -155,7 +155,7 @@ walk:
                pte_gpa += index * sizeof(pt_element_t);
                walker->table_gfn[walker->level - 1] = table_gfn;
                walker->pte_gpa[walker->level - 1] = pte_gpa;
-               pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__,
+               pgprintk("%s: table_gfn[%d] %lx\n", __func__,
                         walker->level - 1, table_gfn);
 
                kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte));
@@ -222,7 +222,7 @@ walk:
        walker->pt_access = pt_access;
        walker->pte_access = pte_access;
        pgprintk("%s: pte %llx pte_access %x pt_access %x\n",
-                __FUNCTION__, (u64)pte, pt_access, pte_access);
+                __func__, (u64)pte, pt_access, pte_access);
        return 1;
 
 not_present:
@@ -243,31 +243,30 @@ err:
 }
 
 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
-                             u64 *spte, const void *pte, int bytes,
-                             int offset_in_pte)
+                             u64 *spte, const void *pte)
 {
        pt_element_t gpte;
        unsigned pte_access;
-       struct page *npage;
+       pfn_t pfn;
+       int largepage = vcpu->arch.update_pte.largepage;
 
        gpte = *(const pt_element_t *)pte;
        if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) {
-               if (!offset_in_pte && !is_present_pte(gpte))
+               if (!is_present_pte(gpte))
                        set_shadow_pte(spte, shadow_notrap_nonpresent_pte);
                return;
        }
-       if (bytes < sizeof(pt_element_t))
-               return;
-       pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+       pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
        pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte);
        if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn)
                return;
-       npage = vcpu->arch.update_pte.page;
-       if (!npage)
+       pfn = vcpu->arch.update_pte.pfn;
+       if (is_error_pfn(pfn))
                return;
-       get_page(npage);
+       kvm_get_pfn(pfn);
        mmu_set_spte(vcpu, spte, page->role.access, pte_access, 0, 0,
-                    gpte & PT_DIRTY_MASK, NULL, gpte_to_gfn(gpte), npage);
+                    gpte & PT_DIRTY_MASK, NULL, largepage, gpte_to_gfn(gpte),
+                    pfn, true);
 }
 
 /*
@@ -275,8 +274,8 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
  */
 static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                         struct guest_walker *walker,
-                        int user_fault, int write_fault, int *ptwrite,
-                        struct page *page)
+                        int user_fault, int write_fault, int largepage,
+                        int *ptwrite, pfn_t pfn)
 {
        hpa_t shadow_addr;
        int level;
@@ -304,11 +303,19 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                shadow_ent = ((u64 *)__va(shadow_addr)) + index;
                if (level == PT_PAGE_TABLE_LEVEL)
                        break;
-               if (is_shadow_present_pte(*shadow_ent)) {
+
+               if (largepage && level == PT_DIRECTORY_LEVEL)
+                       break;
+
+               if (is_shadow_present_pte(*shadow_ent)
+                   && !is_large_pte(*shadow_ent)) {
                        shadow_addr = *shadow_ent & PT64_BASE_ADDR_MASK;
                        continue;
                }
 
+               if (is_large_pte(*shadow_ent))
+                       rmap_remove(vcpu->kvm, shadow_ent);
+
                if (level - 1 == PT_PAGE_TABLE_LEVEL
                    && walker->level == PT_DIRECTORY_LEVEL) {
                        metaphysical = 1;
@@ -329,7 +336,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
                                                  walker->pte_gpa[level - 2],
                                                  &curr_pte, sizeof(curr_pte));
                        if (r || curr_pte != walker->ptes[level - 2]) {
-                               kvm_release_page_clean(page);
+                               kvm_release_pfn_clean(pfn);
                                return NULL;
                        }
                }
@@ -342,7 +349,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
        mmu_set_spte(vcpu, shadow_ent, access, walker->pte_access & access,
                     user_fault, write_fault,
                     walker->ptes[walker->level-1] & PT_DIRTY_MASK,
-                    ptwrite, walker->gfn, page);
+                    ptwrite, largepage, walker->gfn, pfn, false);
 
        return shadow_ent;
 }
@@ -371,16 +378,16 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
        u64 *shadow_pte;
        int write_pt = 0;
        int r;
-       struct page *page;
+       pfn_t pfn;
+       int largepage = 0;
 
-       pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code);
+       pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
        kvm_mmu_audit(vcpu, "pre page fault");
 
        r = mmu_topup_memory_caches(vcpu);
        if (r)
                return r;
 
-       down_read(&vcpu->kvm->slots_lock);
        /*
         * Look up the shadow pte for the faulting address.
         */
@@ -391,40 +398,45 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
         * The page is not mapped by the guest.  Let the guest handle it.
         */
        if (!r) {
-               pgprintk("%s: guest page fault\n", __FUNCTION__);
+               pgprintk("%s: guest page fault\n", __func__);
                inject_page_fault(vcpu, addr, walker.error_code);
                vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
-               up_read(&vcpu->kvm->slots_lock);
                return 0;
        }
 
        down_read(&current->mm->mmap_sem);
-       page = gfn_to_page(vcpu->kvm, walker.gfn);
+       if (walker.level == PT_DIRECTORY_LEVEL) {
+               gfn_t large_gfn;
+               large_gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE-1);
+               if (is_largepage_backed(vcpu, large_gfn)) {
+                       walker.gfn = large_gfn;
+                       largepage = 1;
+               }
+       }
+       pfn = gfn_to_pfn(vcpu->kvm, walker.gfn);
        up_read(&current->mm->mmap_sem);
 
+       /* mmio */
+       if (is_error_pfn(pfn)) {
+               pgprintk("gfn %x is mmio\n", walker.gfn);
+               kvm_release_pfn_clean(pfn);
+               return 1;
+       }
+
        spin_lock(&vcpu->kvm->mmu_lock);
        kvm_mmu_free_some_pages(vcpu);
        shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
-                                 &write_pt, page);
-       pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__,
+                                 largepage, &write_pt, pfn);
+
+       pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
                 shadow_pte, *shadow_pte, write_pt);
 
        if (!write_pt)
                vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
 
-       /*
-        * mmio: emulate if accessible, otherwise its a guest fault.
-        */
-       if (shadow_pte && is_io_pte(*shadow_pte)) {
-               spin_unlock(&vcpu->kvm->mmu_lock);
-               up_read(&vcpu->kvm->slots_lock);
-               return 1;
-       }
-
        ++vcpu->stat.pf_fixed;
        kvm_mmu_audit(vcpu, "post page fault (fixed)");
        spin_unlock(&vcpu->kvm->mmu_lock);
-       up_read(&vcpu->kvm->slots_lock);
 
        return write_pt;
 }
diff --git a/arch/x86/kvm/segment_descriptor.h b/arch/x86/kvm/segment_descriptor.h
deleted file mode 100644 (file)
index 56fc4c8..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#ifndef __SEGMENT_DESCRIPTOR_H
-#define __SEGMENT_DESCRIPTOR_H
-
-struct segment_descriptor {
-       u16 limit_low;
-       u16 base_low;
-       u8  base_mid;
-       u8  type : 4;
-       u8  system : 1;
-       u8  dpl : 2;
-       u8  present : 1;
-       u8  limit_high : 4;
-       u8  avl : 1;
-       u8  long_mode : 1;
-       u8  default_op : 1;
-       u8  granularity : 1;
-       u8  base_high;
-} __attribute__((packed));
-
-#ifdef CONFIG_X86_64
-/* LDT or TSS descriptor in the GDT. 16 bytes. */
-struct segment_descriptor_64 {
-       struct segment_descriptor s;
-       u32 base_higher;
-       u32 pad_zero;
-};
-
-#endif
-#endif
index 1a582f1090e895aaa19634aa21d02ccf6c1584f0..89e0be2c10d0ee02153fd6f72df1ebd6e31f6e7b 100644 (file)
@@ -47,6 +47,18 @@ MODULE_LICENSE("GPL");
 #define SVM_FEATURE_LBRV (1 << 1)
 #define SVM_DEATURE_SVML (1 << 2)
 
+#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
+
+/* enable NPT for AMD64 and X86 with PAE */
+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
+static bool npt_enabled = true;
+#else
+static bool npt_enabled = false;
+#endif
+static int npt = 1;
+
+module_param(npt, int, S_IRUGO);
+
 static void kvm_reput_irq(struct vcpu_svm *svm);
 
 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
@@ -54,8 +66,7 @@ static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
        return container_of(vcpu, struct vcpu_svm, vcpu);
 }
 
-unsigned long iopm_base;
-unsigned long msrpm_base;
+static unsigned long iopm_base;
 
 struct kvm_ldttss_desc {
        u16 limit0;
@@ -182,7 +193,7 @@ static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
 
 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-       if (!(efer & EFER_LMA))
+       if (!npt_enabled && !(efer & EFER_LMA))
                efer &= ~EFER_LME;
 
        to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
@@ -219,12 +230,12 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!svm->next_rip) {
-               printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__);
+               printk(KERN_DEBUG "%s: NOP\n", __func__);
                return;
        }
        if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE)
                printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n",
-                      __FUNCTION__,
+                      __func__,
                       svm->vmcb->save.rip,
                       svm->next_rip);
 
@@ -279,11 +290,7 @@ static void svm_hardware_enable(void *garbage)
 
        struct svm_cpu_data *svm_data;
        uint64_t efer;
-#ifdef CONFIG_X86_64
-       struct desc_ptr gdt_descr;
-#else
        struct desc_ptr gdt_descr;
-#endif
        struct desc_struct *gdt;
        int me = raw_smp_processor_id();
 
@@ -302,7 +309,6 @@ static void svm_hardware_enable(void *garbage)
        svm_data->asid_generation = 1;
        svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
        svm_data->next_asid = svm_data->max_asid + 1;
-       svm_features = cpuid_edx(SVM_CPUID_FUNC);
 
        asm volatile ("sgdt %0" : "=m"(gdt_descr));
        gdt = (struct desc_struct *)gdt_descr.address;
@@ -361,12 +367,51 @@ static void set_msr_interception(u32 *msrpm, unsigned msr,
        BUG();
 }
 
+static void svm_vcpu_init_msrpm(u32 *msrpm)
+{
+       memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
+
+#ifdef CONFIG_X86_64
+       set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
+       set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
+       set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
+       set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
+       set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
+       set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
+#endif
+       set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
+       set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
+       set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
+       set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
+}
+
+static void svm_enable_lbrv(struct vcpu_svm *svm)
+{
+       u32 *msrpm = svm->msrpm;
+
+       svm->vmcb->control.lbr_ctl = 1;
+       set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
+       set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
+       set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
+       set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
+}
+
+static void svm_disable_lbrv(struct vcpu_svm *svm)
+{
+       u32 *msrpm = svm->msrpm;
+
+       svm->vmcb->control.lbr_ctl = 0;
+       set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
+       set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
+       set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
+       set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
+}
+
 static __init int svm_hardware_setup(void)
 {
        int cpu;
        struct page *iopm_pages;
-       struct page *msrpm_pages;
-       void *iopm_va, *msrpm_va;
+       void *iopm_va;
        int r;
 
        iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
@@ -379,41 +424,33 @@ static __init int svm_hardware_setup(void)
        clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
        iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
 
+       if (boot_cpu_has(X86_FEATURE_NX))
+               kvm_enable_efer_bits(EFER_NX);
 
-       msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+       for_each_online_cpu(cpu) {
+               r = svm_cpu_init(cpu);
+               if (r)
+                       goto err;
+       }
 
-       r = -ENOMEM;
-       if (!msrpm_pages)
-               goto err_1;
+       svm_features = cpuid_edx(SVM_CPUID_FUNC);
 
-       msrpm_va = page_address(msrpm_pages);
-       memset(msrpm_va, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
-       msrpm_base = page_to_pfn(msrpm_pages) << PAGE_SHIFT;
+       if (!svm_has(SVM_FEATURE_NPT))
+               npt_enabled = false;
 
-#ifdef CONFIG_X86_64
-       set_msr_interception(msrpm_va, MSR_GS_BASE, 1, 1);
-       set_msr_interception(msrpm_va, MSR_FS_BASE, 1, 1);
-       set_msr_interception(msrpm_va, MSR_KERNEL_GS_BASE, 1, 1);
-       set_msr_interception(msrpm_va, MSR_LSTAR, 1, 1);
-       set_msr_interception(msrpm_va, MSR_CSTAR, 1, 1);
-       set_msr_interception(msrpm_va, MSR_SYSCALL_MASK, 1, 1);
-#endif
-       set_msr_interception(msrpm_va, MSR_K6_STAR, 1, 1);
-       set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_CS, 1, 1);
-       set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_ESP, 1, 1);
-       set_msr_interception(msrpm_va, MSR_IA32_SYSENTER_EIP, 1, 1);
+       if (npt_enabled && !npt) {
+               printk(KERN_INFO "kvm: Nested Paging disabled\n");
+               npt_enabled = false;
+       }
 
-       for_each_online_cpu(cpu) {
-               r = svm_cpu_init(cpu);
-               if (r)
-                       goto err_2;
+       if (npt_enabled) {
+               printk(KERN_INFO "kvm: Nested Paging enabled\n");
+               kvm_enable_tdp();
        }
+
        return 0;
 
-err_2:
-       __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
-       msrpm_base = 0;
-err_1:
+err:
        __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
        iopm_base = 0;
        return r;
@@ -421,9 +458,8 @@ err_1:
 
 static __exit void svm_hardware_unsetup(void)
 {
-       __free_pages(pfn_to_page(msrpm_base >> PAGE_SHIFT), MSRPM_ALLOC_ORDER);
        __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
-       iopm_base = msrpm_base = 0;
+       iopm_base = 0;
 }
 
 static void init_seg(struct vmcb_seg *seg)
@@ -443,15 +479,14 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
        seg->base = 0;
 }
 
-static void init_vmcb(struct vmcb *vmcb)
+static void init_vmcb(struct vcpu_svm *svm)
 {
-       struct vmcb_control_area *control = &vmcb->control;
-       struct vmcb_save_area *save = &vmcb->save;
+       struct vmcb_control_area *control = &svm->vmcb->control;
+       struct vmcb_save_area *save = &svm->vmcb->save;
 
        control->intercept_cr_read =    INTERCEPT_CR0_MASK |
                                        INTERCEPT_CR3_MASK |
-                                       INTERCEPT_CR4_MASK |
-                                       INTERCEPT_CR8_MASK;
+                                       INTERCEPT_CR4_MASK;
 
        control->intercept_cr_write =   INTERCEPT_CR0_MASK |
                                        INTERCEPT_CR3_MASK |
@@ -471,23 +506,13 @@ static void init_vmcb(struct vmcb *vmcb)
                                        INTERCEPT_DR7_MASK;
 
        control->intercept_exceptions = (1 << PF_VECTOR) |
-                                       (1 << UD_VECTOR);
+                                       (1 << UD_VECTOR) |
+                                       (1 << MC_VECTOR);
 
 
        control->intercept =    (1ULL << INTERCEPT_INTR) |
                                (1ULL << INTERCEPT_NMI) |
                                (1ULL << INTERCEPT_SMI) |
-               /*
-                * selective cr0 intercept bug?
-                *      0:   0f 22 d8                mov    %eax,%cr3
-                *      3:   0f 20 c0                mov    %cr0,%eax
-                *      6:   0d 00 00 00 80          or     $0x80000000,%eax
-                *      b:   0f 22 c0                mov    %eax,%cr0
-                * set cr3 ->interception
-                * get cr0 ->interception
-                * set cr0 -> no interception
-                */
-               /*              (1ULL << INTERCEPT_SELECTIVE_CR0) | */
                                (1ULL << INTERCEPT_CPUID) |
                                (1ULL << INTERCEPT_INVD) |
                                (1ULL << INTERCEPT_HLT) |
@@ -508,7 +533,7 @@ static void init_vmcb(struct vmcb *vmcb)
                                (1ULL << INTERCEPT_MWAIT);
 
        control->iopm_base_pa = iopm_base;
-       control->msrpm_base_pa = msrpm_base;
+       control->msrpm_base_pa = __pa(svm->msrpm);
        control->tsc_offset = 0;
        control->int_ctl = V_INTR_MASKING_MASK;
 
@@ -550,13 +575,30 @@ static void init_vmcb(struct vmcb *vmcb)
        save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
        save->cr4 = X86_CR4_PAE;
        /* rdx = ?? */
+
+       if (npt_enabled) {
+               /* Setup VMCB for Nested Paging */
+               control->nested_ctl = 1;
+               control->intercept &= ~(1ULL << INTERCEPT_TASK_SWITCH);
+               control->intercept_exceptions &= ~(1 << PF_VECTOR);
+               control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
+                                               INTERCEPT_CR3_MASK);
+               control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
+                                                INTERCEPT_CR3_MASK);
+               save->g_pat = 0x0007040600070406ULL;
+               /* enable caching because the QEMU Bios doesn't enable it */
+               save->cr0 = X86_CR0_ET;
+               save->cr3 = 0;
+               save->cr4 = 0;
+       }
+       force_new_asid(&svm->vcpu);
 }
 
 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       init_vmcb(svm->vmcb);
+       init_vmcb(svm);
 
        if (vcpu->vcpu_id != 0) {
                svm->vmcb->save.rip = 0;
@@ -571,6 +613,7 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
 {
        struct vcpu_svm *svm;
        struct page *page;
+       struct page *msrpm_pages;
        int err;
 
        svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -589,12 +632,19 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
                goto uninit;
        }
 
+       err = -ENOMEM;
+       msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
+       if (!msrpm_pages)
+               goto uninit;
+       svm->msrpm = page_address(msrpm_pages);
+       svm_vcpu_init_msrpm(svm->msrpm);
+
        svm->vmcb = page_address(page);
        clear_page(svm->vmcb);
        svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
        svm->asid_generation = 0;
        memset(svm->db_regs, 0, sizeof(svm->db_regs));
-       init_vmcb(svm->vmcb);
+       init_vmcb(svm);
 
        fx_init(&svm->vcpu);
        svm->vcpu.fpu_active = 1;
@@ -617,6 +667,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
 
        __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
+       __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, svm);
 }
@@ -731,6 +782,13 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->unusable = !var->present;
 }
 
+static int svm_get_cpl(struct kvm_vcpu *vcpu)
+{
+       struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
+
+       return save->cpl;
+}
+
 static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -784,6 +842,9 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                }
        }
 #endif
+       if (npt_enabled)
+               goto set;
+
        if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
                svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
                vcpu->fpu_active = 1;
@@ -791,18 +852,29 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
        vcpu->arch.cr0 = cr0;
        cr0 |= X86_CR0_PG | X86_CR0_WP;
-       cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        if (!vcpu->fpu_active) {
                svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
                cr0 |= X86_CR0_TS;
        }
+set:
+       /*
+        * re-enable caching here because the QEMU bios
+        * does not do it - this results in some delay at
+        * reboot
+        */
+       cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
        svm->vmcb->save.cr0 = cr0;
 }
 
 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
-       vcpu->arch.cr4 = cr4;
-       to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
+       unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
+
+       vcpu->arch.cr4 = cr4;
+       if (!npt_enabled)
+               cr4 |= X86_CR4_PAE;
+       cr4 |= host_cr4_mce;
+       to_svm(vcpu)->vmcb->save.cr4 = cr4;
 }
 
 static void svm_set_segment(struct kvm_vcpu *vcpu,
@@ -833,13 +905,6 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
 
 }
 
-/* FIXME:
-
-       svm(vcpu)->vmcb->control.int_ctl &= ~V_TPR_MASK;
-       svm(vcpu)->vmcb->control.int_ctl |= (sregs->cr8 & V_TPR_MASK);
-
-*/
-
 static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
 {
        return -EOPNOTSUPP;
@@ -920,7 +985,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
        }
        default:
                printk(KERN_DEBUG "%s: unexpected dr %u\n",
-                      __FUNCTION__, dr);
+                      __func__, dr);
                *exception = UD_VECTOR;
                return;
        }
@@ -962,6 +1027,19 @@ static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
        return 1;
 }
 
+static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+       /*
+        * On an #MC intercept the MCE handler is not called automatically in
+        * the host. So do it by hand here.
+        */
+       asm volatile (
+               "int $0x12\n");
+       /* not sure if we ever come back to this point */
+
+       return 1;
+}
+
 static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        /*
@@ -969,7 +1047,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
         * so reinitialize it.
         */
        clear_page(svm->vmcb);
-       init_vmcb(svm->vmcb);
+       init_vmcb(svm);
 
        kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
        return 0;
@@ -1033,9 +1111,18 @@ static int invalid_op_interception(struct vcpu_svm *svm,
 static int task_switch_interception(struct vcpu_svm *svm,
                                    struct kvm_run *kvm_run)
 {
-       pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__);
-       kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-       return 0;
+       u16 tss_selector;
+
+       tss_selector = (u16)svm->vmcb->control.exit_info_1;
+       if (svm->vmcb->control.exit_info_2 &
+           (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
+               return kvm_task_switch(&svm->vcpu, tss_selector,
+                                      TASK_SWITCH_IRET);
+       if (svm->vmcb->control.exit_info_2 &
+           (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
+               return kvm_task_switch(&svm->vcpu, tss_selector,
+                                      TASK_SWITCH_JMP);
+       return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
 }
 
 static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
@@ -1049,7 +1136,7 @@ static int emulate_on_interception(struct vcpu_svm *svm,
                                   struct kvm_run *kvm_run)
 {
        if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
-               pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
+               pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
        return 1;
 }
 
@@ -1179,8 +1266,19 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
                svm->vmcb->save.sysenter_esp = data;
                break;
        case MSR_IA32_DEBUGCTLMSR:
-               pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTLMSR 0x%llx, nop\n",
-                               __FUNCTION__, data);
+               if (!svm_has(SVM_FEATURE_LBRV)) {
+                       pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
+                                       __func__, data);
+                       break;
+               }
+               if (data & DEBUGCTL_RESERVED_BITS)
+                       return 1;
+
+               svm->vmcb->save.dbgctl = data;
+               if (data & (1ULL<<0))
+                       svm_enable_lbrv(svm);
+               else
+                       svm_disable_lbrv(svm);
                break;
        case MSR_K7_EVNTSEL0:
        case MSR_K7_EVNTSEL1:
@@ -1265,6 +1363,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
        [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
        [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
        [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
+       [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
        [SVM_EXIT_INTR]                         = nop_on_interception,
        [SVM_EXIT_NMI]                          = nop_on_interception,
        [SVM_EXIT_SMI]                          = nop_on_interception,
@@ -1290,14 +1389,34 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
        [SVM_EXIT_WBINVD]                       = emulate_on_interception,
        [SVM_EXIT_MONITOR]                      = invalid_op_interception,
        [SVM_EXIT_MWAIT]                        = invalid_op_interception,
+       [SVM_EXIT_NPF]                          = pf_interception,
 };
 
-
 static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u32 exit_code = svm->vmcb->control.exit_code;
 
+       if (npt_enabled) {
+               int mmu_reload = 0;
+               if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
+                       svm_set_cr0(vcpu, svm->vmcb->save.cr0);
+                       mmu_reload = 1;
+               }
+               vcpu->arch.cr0 = svm->vmcb->save.cr0;
+               vcpu->arch.cr3 = svm->vmcb->save.cr3;
+               if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
+                       if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
+                               kvm_inject_gp(vcpu, 0);
+                               return 1;
+                       }
+               }
+               if (mmu_reload) {
+                       kvm_mmu_reset_context(vcpu);
+                       kvm_mmu_load(vcpu);
+               }
+       }
+
        kvm_reput_irq(svm);
 
        if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
@@ -1308,10 +1427,11 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        }
 
        if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
-           exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR)
+           exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
+           exit_code != SVM_EXIT_NPF)
                printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
                       "exit_code 0x%x\n",
-                      __FUNCTION__, svm->vmcb->control.exit_int_info,
+                      __func__, svm->vmcb->control.exit_int_info,
                       exit_code);
 
        if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
@@ -1364,6 +1484,27 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
        svm_inject_irq(svm, irq);
 }
 
+static void update_cr8_intercept(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       struct vmcb *vmcb = svm->vmcb;
+       int max_irr, tpr;
+
+       if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
+               return;
+
+       vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
+
+       max_irr = kvm_lapic_find_highest_irr(vcpu);
+       if (max_irr == -1)
+               return;
+
+       tpr = kvm_lapic_get_cr8(vcpu) << 4;
+
+       if (tpr >= (max_irr & 0xf0))
+               vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
+}
+
 static void svm_intr_assist(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1376,14 +1517,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
                              SVM_EVTINJ_VEC_MASK;
                vmcb->control.exit_int_info = 0;
                svm_inject_irq(svm, intr_vector);
-               return;
+               goto out;
        }
 
        if (vmcb->control.int_ctl & V_IRQ_MASK)
-               return;
+               goto out;
 
        if (!kvm_cpu_has_interrupt(vcpu))
-               return;
+               goto out;
 
        if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
            (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
@@ -1391,12 +1532,14 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
                /* unable to deliver irq, set pending irq */
                vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
                svm_inject_irq(svm, 0x0);
-               return;
+               goto out;
        }
        /* Okay, we can deliver the interrupt: grab it and update PIC state. */
        intr_vector = kvm_cpu_get_interrupt(vcpu);
        svm_inject_irq(svm, intr_vector);
        kvm_timer_intr_post(vcpu, intr_vector);
+out:
+       update_cr8_intercept(vcpu);
 }
 
 static void kvm_reput_irq(struct vcpu_svm *svm)
@@ -1482,6 +1625,29 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
 {
 }
 
+static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
+               int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
+               kvm_lapic_set_tpr(vcpu, cr8);
+       }
+}
+
+static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_svm *svm = to_svm(vcpu);
+       u64 cr8;
+
+       if (!irqchip_in_kernel(vcpu->kvm))
+               return;
+
+       cr8 = kvm_get_cr8(vcpu);
+       svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
+       svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
+}
+
 static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
@@ -1491,6 +1657,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        pre_svm_run(svm);
 
+       sync_lapic_to_cr8(vcpu);
+
        save_host_msrs(vcpu);
        fs_selector = read_fs();
        gs_selector = read_gs();
@@ -1499,6 +1667,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        svm->host_dr6 = read_dr6();
        svm->host_dr7 = read_dr7();
        svm->vmcb->save.cr2 = vcpu->arch.cr2;
+       /* required for live migration with NPT */
+       if (npt_enabled)
+               svm->vmcb->save.cr3 = vcpu->arch.cr3;
 
        if (svm->vmcb->save.dr7 & 0xff) {
                write_dr7(0);
@@ -1635,6 +1806,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        stgi();
 
+       sync_cr8_to_lapic(vcpu);
+
        svm->next_rip = 0;
 }
 
@@ -1642,6 +1815,12 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       if (npt_enabled) {
+               svm->vmcb->control.nested_cr3 = root;
+               force_new_asid(vcpu);
+               return;
+       }
+
        svm->vmcb->save.cr3 = root;
        force_new_asid(vcpu);
 
@@ -1709,6 +1888,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .get_segment_base = svm_get_segment_base,
        .get_segment = svm_get_segment,
        .set_segment = svm_set_segment,
+       .get_cpl = svm_get_cpl,
        .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
        .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
        .set_cr0 = svm_set_cr0,
index 5fd50491b55505fd1f02a033c3f244e134efdf5b..1b8afa78e869449f3c66bfa7c31929b429d9513a 100644 (file)
@@ -238,6 +238,9 @@ struct __attribute__ ((__packed__)) vmcb {
 #define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
 #define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
 
+#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
+#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+
 #define        SVM_EXIT_READ_CR0       0x000
 #define        SVM_EXIT_READ_CR3       0x003
 #define        SVM_EXIT_READ_CR4       0x004
diff --git a/arch/x86/kvm/tss.h b/arch/x86/kvm/tss.h
new file mode 100644 (file)
index 0000000..622aa10
--- /dev/null
@@ -0,0 +1,59 @@
+#ifndef __TSS_SEGMENT_H
+#define __TSS_SEGMENT_H
+
+struct tss_segment_32 {
+       u32 prev_task_link;
+       u32 esp0;
+       u32 ss0;
+       u32 esp1;
+       u32 ss1;
+       u32 esp2;
+       u32 ss2;
+       u32 cr3;
+       u32 eip;
+       u32 eflags;
+       u32 eax;
+       u32 ecx;
+       u32 edx;
+       u32 ebx;
+       u32 esp;
+       u32 ebp;
+       u32 esi;
+       u32 edi;
+       u32 es;
+       u32 cs;
+       u32 ss;
+       u32 ds;
+       u32 fs;
+       u32 gs;
+       u32 ldt_selector;
+       u16 t;
+       u16 io_map;
+};
+
+struct tss_segment_16 {
+       u16 prev_task_link;
+       u16 sp0;
+       u16 ss0;
+       u16 sp1;
+       u16 ss1;
+       u16 sp2;
+       u16 ss2;
+       u16 ip;
+       u16 flag;
+       u16 ax;
+       u16 cx;
+       u16 dx;
+       u16 bx;
+       u16 sp;
+       u16 bp;
+       u16 si;
+       u16 di;
+       u16 es;
+       u16 cs;
+       u16 ss;
+       u16 ds;
+       u16 ldt;
+};
+
+#endif
index 8e1462880d1f1dec6d6162124db057ae5f687868..8e5d6645b90d11e08c6f71153e722a004c570457 100644 (file)
@@ -17,7 +17,6 @@
 
 #include "irq.h"
 #include "vmx.h"
-#include "segment_descriptor.h"
 #include "mmu.h"
 
 #include <linux/kvm_host.h>
@@ -37,6 +36,12 @@ MODULE_LICENSE("GPL");
 static int bypass_guest_pf = 1;
 module_param(bypass_guest_pf, bool, 0);
 
+static int enable_vpid = 1;
+module_param(enable_vpid, bool, 0);
+
+static int flexpriority_enabled = 1;
+module_param(flexpriority_enabled, bool, 0);
+
 struct vmcs {
        u32 revision_id;
        u32 abort;
@@ -71,6 +76,7 @@ struct vcpu_vmx {
                        unsigned rip;
                } irq;
        } rmode;
+       int vpid;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -85,6 +91,10 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
 
 static struct page *vmx_io_bitmap_a;
 static struct page *vmx_io_bitmap_b;
+static struct page *vmx_msr_bitmap;
+
+static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
+static DEFINE_SPINLOCK(vmx_vpid_lock);
 
 static struct vmcs_config {
        int size;
@@ -176,6 +186,11 @@ static inline int is_external_interrupt(u32 intr_info)
                == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
+static inline int cpu_has_vmx_msr_bitmap(void)
+{
+       return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
+}
+
 static inline int cpu_has_vmx_tpr_shadow(void)
 {
        return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
@@ -194,8 +209,9 @@ static inline int cpu_has_secondary_exec_ctrls(void)
 
 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
 {
-       return (vmcs_config.cpu_based_2nd_exec_ctrl &
-               SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
+       return flexpriority_enabled
+               && (vmcs_config.cpu_based_2nd_exec_ctrl &
+                   SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
 }
 
 static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
@@ -204,6 +220,12 @@ static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
                (irqchip_in_kernel(kvm)));
 }
 
+static inline int cpu_has_vmx_vpid(void)
+{
+       return (vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_ENABLE_VPID);
+}
+
 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
 {
        int i;
@@ -214,6 +236,20 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
        return -1;
 }
 
+static inline void __invvpid(int ext, u16 vpid, gva_t gva)
+{
+    struct {
+       u64 vpid : 16;
+       u64 rsvd : 48;
+       u64 gva;
+    } operand = { vpid, 0, gva };
+
+    asm volatile (ASM_VMX_INVVPID
+                 /* CF==1 or ZF==1 --> rc = -1 */
+                 "; ja 1f ; ud2 ; 1:"
+                 : : "a"(&operand), "c"(ext) : "cc", "memory");
+}
+
 static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
 {
        int i;
@@ -257,6 +293,14 @@ static void vcpu_clear(struct vcpu_vmx *vmx)
        vmx->launched = 0;
 }
 
+static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
+{
+       if (vmx->vpid == 0)
+               return;
+
+       __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
+}
+
 static unsigned long vmcs_readl(unsigned long field)
 {
        unsigned long value;
@@ -353,7 +397,7 @@ static void reload_tss(void)
         * VT restores TR but not its size.  Useless.
         */
        struct descriptor_table gdt;
-       struct segment_descriptor *descs;
+       struct desc_struct *descs;
 
        get_gdt(&gdt);
        descs = (void *)gdt.base;
@@ -485,11 +529,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u64 phys_addr = __pa(vmx->vmcs);
-       u64 tsc_this, delta;
+       u64 tsc_this, delta, new_offset;
 
        if (vcpu->cpu != cpu) {
                vcpu_clear(vmx);
                kvm_migrate_apic_timer(vcpu);
+               vpid_sync_vcpu_all(vmx);
        }
 
        if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
@@ -524,8 +569,11 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * Make sure the time stamp counter is monotonous.
                 */
                rdtscll(tsc_this);
-               delta = vcpu->arch.host_tsc - tsc_this;
-               vmcs_write64(TSC_OFFSET, vmcs_read64(TSC_OFFSET) + delta);
+               if (tsc_this < vcpu->arch.host_tsc) {
+                       delta = vcpu->arch.host_tsc - tsc_this;
+                       new_offset = vmcs_read64(TSC_OFFSET) + delta;
+                       vmcs_write64(TSC_OFFSET, new_offset);
+               }
        }
 }
 
@@ -596,7 +644,7 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
 {
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                     nr | INTR_TYPE_EXCEPTION
-                    | (has_error_code ? INTR_INFO_DELIEVER_CODE_MASK : 0)
+                    | (has_error_code ? INTR_INFO_DELIVER_CODE_MASK : 0)
                     | INTR_INFO_VALID_MASK);
        if (has_error_code)
                vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
@@ -959,6 +1007,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
              CPU_BASED_MOV_DR_EXITING |
              CPU_BASED_USE_TSC_OFFSETING;
        opt = CPU_BASED_TPR_SHADOW |
+             CPU_BASED_USE_MSR_BITMAPS |
              CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
        if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
                                &_cpu_based_exec_control) < 0)
@@ -971,7 +1020,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
                min = 0;
                opt = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
-                       SECONDARY_EXEC_WBINVD_EXITING;
+                       SECONDARY_EXEC_WBINVD_EXITING |
+                       SECONDARY_EXEC_ENABLE_VPID;
                if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
                        return -EIO;
@@ -1080,6 +1130,10 @@ static __init int hardware_setup(void)
 {
        if (setup_vmcs_config(&vmcs_config) < 0)
                return -EIO;
+
+       if (boot_cpu_has(X86_FEATURE_NX))
+               kvm_enable_efer_bits(EFER_NX);
+
        return alloc_kvm_area();
 }
 
@@ -1214,7 +1268,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
        guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
        if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
                printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
-                      __FUNCTION__);
+                      __func__);
                vmcs_write32(GUEST_TR_AR_BYTES,
                             (guest_tr_ar & ~AR_TYPE_MASK)
                             | AR_TYPE_BUSY_64_TSS);
@@ -1239,6 +1293,11 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
 
 #endif
 
+static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
+{
+       vpid_sync_vcpu_all(to_vmx(vcpu));
+}
+
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
@@ -1275,6 +1334,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 
 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
+       vmx_flush_tlb(vcpu);
        vmcs_writel(GUEST_CR3, cr3);
        if (vcpu->arch.cr0 & X86_CR0_PE)
                vmx_fpu_deactivate(vcpu);
@@ -1288,14 +1348,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        vcpu->arch.cr4 = cr4;
 }
 
-#ifdef CONFIG_X86_64
-
 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
 
        vcpu->arch.shadow_efer = efer;
+       if (!msr)
+               return;
        if (efer & EFER_LMA) {
                vmcs_write32(VM_ENTRY_CONTROLS,
                                     vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1312,8 +1372,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
        setup_msrs(vmx);
 }
 
-#endif
-
 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
@@ -1344,6 +1402,20 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
        var->unusable = (ar >> 16) & 1;
 }
 
+static int vmx_get_cpl(struct kvm_vcpu *vcpu)
+{
+       struct kvm_segment kvm_seg;
+
+       if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
+               return 0;
+
+       if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
+               return 3;
+
+       vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
+       return kvm_seg.selector & 3;
+}
+
 static u32 vmx_segment_access_rights(struct kvm_segment *var)
 {
        u32 ar;
@@ -1433,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm)
        int ret = 0;
        int r;
 
-       down_read(&kvm->slots_lock);
        r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
        if (r < 0)
                goto out;
@@ -1456,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm)
 
        ret = 1;
 out:
-       up_read(&kvm->slots_lock);
        return ret;
 }
 
@@ -1494,6 +1564,46 @@ out:
        return r;
 }
 
+static void allocate_vpid(struct vcpu_vmx *vmx)
+{
+       int vpid;
+
+       vmx->vpid = 0;
+       if (!enable_vpid || !cpu_has_vmx_vpid())
+               return;
+       spin_lock(&vmx_vpid_lock);
+       vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
+       if (vpid < VMX_NR_VPIDS) {
+               vmx->vpid = vpid;
+               __set_bit(vpid, vmx_vpid_bitmap);
+       }
+       spin_unlock(&vmx_vpid_lock);
+}
+
+void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr)
+{
+       void *va;
+
+       if (!cpu_has_vmx_msr_bitmap())
+               return;
+
+       /*
+        * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+        * have the write-low and read-high bitmap offsets the wrong way round.
+        * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+        */
+       va = kmap(msr_bitmap);
+       if (msr <= 0x1fff) {
+               __clear_bit(msr, va + 0x000); /* read-low */
+               __clear_bit(msr, va + 0x800); /* write-low */
+       } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+               msr &= 0x1fff;
+               __clear_bit(msr, va + 0x400); /* read-high */
+               __clear_bit(msr, va + 0xc00); /* write-high */
+       }
+       kunmap(msr_bitmap);
+}
+
 /*
  * Sets up the vmcs for emulated real mode.
  */
@@ -1511,6 +1621,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a));
        vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b));
 
+       if (cpu_has_vmx_msr_bitmap())
+               vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap));
+
        vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
 
        /* Control */
@@ -1532,6 +1645,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
                        exec_control &=
                                ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
+               if (vmx->vpid == 0)
+                       exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
        }
 
@@ -1613,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        u64 msr;
        int ret;
 
+       down_read(&vcpu->kvm->slots_lock);
        if (!init_rmode_tss(vmx->vcpu.kvm)) {
                ret = -ENOMEM;
                goto out;
@@ -1621,7 +1737,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        vmx->vcpu.arch.rmode.active = 0;
 
        vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
-       set_cr8(&vmx->vcpu, 0);
+       kvm_set_cr8(&vmx->vcpu, 0);
        msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
        if (vmx->vcpu.vcpu_id == 0)
                msr |= MSR_IA32_APICBASE_BSP;
@@ -1704,18 +1820,22 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
                vmcs_write64(APIC_ACCESS_ADDR,
                             page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
 
+       if (vmx->vpid != 0)
+               vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
+
        vmx->vcpu.arch.cr0 = 0x60000010;
        vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
        vmx_set_cr4(&vmx->vcpu, 0);
-#ifdef CONFIG_X86_64
        vmx_set_efer(&vmx->vcpu, 0);
-#endif
        vmx_fpu_activate(&vmx->vcpu);
        update_exception_bitmap(&vmx->vcpu);
 
-       return 0;
+       vpid_sync_vcpu_all(vmx);
+
+       ret = 0;
 
 out:
+       up_read(&vcpu->kvm->slots_lock);
        return ret;
 }
 
@@ -1723,6 +1843,8 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
+
        if (vcpu->arch.rmode.active) {
                vmx->rmode.irq.pending = true;
                vmx->rmode.irq.vector = irq;
@@ -1844,7 +1966,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        if ((vect_info & VECTORING_INFO_VALID_MASK) &&
                                                !is_page_fault(intr_info))
                printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
-                      "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info);
+                      "intr info 0x%x\n", __func__, vect_info, intr_info);
 
        if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
                int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
@@ -1869,10 +1991,12 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        error_code = 0;
        rip = vmcs_readl(GUEST_RIP);
-       if (intr_info & INTR_INFO_DELIEVER_CODE_MASK)
+       if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
                error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
        if (is_page_fault(intr_info)) {
                cr2 = vmcs_readl(EXIT_QUALIFICATION);
+               KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
+                           (u32)((u64)cr2 >> 32), handler);
                return kvm_mmu_page_fault(vcpu, cr2, error_code);
        }
 
@@ -1901,6 +2025,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu,
                                     struct kvm_run *kvm_run)
 {
        ++vcpu->stat.irq_exits;
+       KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
        return 1;
 }
 
@@ -1958,25 +2083,27 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        reg = (exit_qualification >> 8) & 15;
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
+               KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg],
+                           (u32)((u64)vcpu->arch.regs[reg] >> 32), handler);
                switch (cr) {
                case 0:
                        vcpu_load_rsp_rip(vcpu);
-                       set_cr0(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr0(vcpu, vcpu->arch.regs[reg]);
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 3:
                        vcpu_load_rsp_rip(vcpu);
-                       set_cr3(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr3(vcpu, vcpu->arch.regs[reg]);
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 4:
                        vcpu_load_rsp_rip(vcpu);
-                       set_cr4(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr4(vcpu, vcpu->arch.regs[reg]);
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 8:
                        vcpu_load_rsp_rip(vcpu);
-                       set_cr8(vcpu, vcpu->arch.regs[reg]);
+                       kvm_set_cr8(vcpu, vcpu->arch.regs[reg]);
                        skip_emulated_instruction(vcpu);
                        if (irqchip_in_kernel(vcpu->kvm))
                                return 1;
@@ -1990,6 +2117,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vcpu->arch.cr0 &= ~X86_CR0_TS;
                vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
                vmx_fpu_activate(vcpu);
+               KVMTRACE_0D(CLTS, vcpu, handler);
                skip_emulated_instruction(vcpu);
                return 1;
        case 1: /*mov from cr*/
@@ -1998,18 +2126,24 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        vcpu_load_rsp_rip(vcpu);
                        vcpu->arch.regs[reg] = vcpu->arch.cr3;
                        vcpu_put_rsp_rip(vcpu);
+                       KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
+                                   (u32)vcpu->arch.regs[reg],
+                                   (u32)((u64)vcpu->arch.regs[reg] >> 32),
+                                   handler);
                        skip_emulated_instruction(vcpu);
                        return 1;
                case 8:
                        vcpu_load_rsp_rip(vcpu);
-                       vcpu->arch.regs[reg] = get_cr8(vcpu);
+                       vcpu->arch.regs[reg] = kvm_get_cr8(vcpu);
                        vcpu_put_rsp_rip(vcpu);
+                       KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
+                                   (u32)vcpu->arch.regs[reg], handler);
                        skip_emulated_instruction(vcpu);
                        return 1;
                }
                break;
        case 3: /* lmsw */
-               lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+               kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
 
                skip_emulated_instruction(vcpu);
                return 1;
@@ -2049,6 +2183,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        val = 0;
                }
                vcpu->arch.regs[reg] = val;
+               KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
        } else {
                /* mov to dr */
        }
@@ -2073,6 +2208,9 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                return 1;
        }
 
+       KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32),
+                   handler);
+
        /* FIXME: handling of bits 32:63 of rax, rdx */
        vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
        vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
@@ -2086,6 +2224,9 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
                | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
+       KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32),
+                   handler);
+
        if (vmx_set_msr(vcpu, ecx, data) != 0) {
                kvm_inject_gp(vcpu, 0);
                return 1;
@@ -2110,6 +2251,9 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
        cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
        cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
        vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+
+       KVMTRACE_0D(PEND_INTR, vcpu, handler);
+
        /*
         * If the user space waits to inject interrupts, exit as soon as
         * possible
@@ -2152,6 +2296,8 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
        offset = exit_qualification & 0xffful;
 
+       KVMTRACE_1D(APIC_ACCESS, vcpu, (u32)offset, handler);
+
        er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
 
        if (er !=  EMULATE_DONE) {
@@ -2163,6 +2309,20 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
+static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+{
+       unsigned long exit_qualification;
+       u16 tss_selector;
+       int reason;
+
+       exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+       reason = (u32)exit_qualification >> 30;
+       tss_selector = exit_qualification;
+
+       return kvm_task_switch(vcpu, tss_selector, reason);
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -2185,6 +2345,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
        [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
        [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
        [EXIT_REASON_WBINVD]                  = handle_wbinvd,
+       [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -2200,6 +2361,9 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 vectoring_info = vmx->idt_vectoring_info;
 
+       KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP),
+                   (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit);
+
        if (unlikely(vmx->fail)) {
                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
                kvm_run->fail_entry.hardware_entry_failure_reason
@@ -2210,7 +2374,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
                                exit_reason != EXIT_REASON_EXCEPTION_NMI)
                printk(KERN_WARNING "%s: unexpected, valid vectoring info and "
-                      "exit reason is 0x%x\n", __FUNCTION__, exit_reason);
+                      "exit reason is 0x%x\n", __func__, exit_reason);
        if (exit_reason < kvm_vmx_max_exit_handlers
            && kvm_vmx_exit_handlers[exit_reason])
                return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
@@ -2221,10 +2385,6 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
        return 0;
 }
 
-static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
-{
-}
-
 static void update_tpr_threshold(struct kvm_vcpu *vcpu)
 {
        int max_irr, tpr;
@@ -2285,11 +2445,13 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
                        return;
                }
 
+               KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler);
+
                vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field);
                vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
                                vmcs_read32(VM_EXIT_INSTRUCTION_LEN));
 
-               if (unlikely(idtv_info_field & INTR_INFO_DELIEVER_CODE_MASK))
+               if (unlikely(idtv_info_field & INTR_INFO_DELIVER_CODE_MASK))
                        vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
                                vmcs_read32(IDT_VECTORING_ERROR_CODE));
                if (unlikely(has_ext_irq))
@@ -2470,8 +2632,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
        /* We need to handle NMIs before interrupts are enabled */
-       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+       if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
+               KVMTRACE_0D(NMI, vcpu, handler);
                asm("int $2");
+       }
 }
 
 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
@@ -2489,6 +2653,10 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       spin_lock(&vmx_vpid_lock);
+       if (vmx->vpid != 0)
+               __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+       spin_unlock(&vmx_vpid_lock);
        vmx_free_vmcs(vcpu);
        kfree(vmx->host_msrs);
        kfree(vmx->guest_msrs);
@@ -2505,6 +2673,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
        if (!vmx)
                return ERR_PTR(-ENOMEM);
 
+       allocate_vpid(vmx);
+
        err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
        if (err)
                goto free_vcpu;
@@ -2591,14 +2761,13 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .get_segment_base = vmx_get_segment_base,
        .get_segment = vmx_get_segment,
        .set_segment = vmx_set_segment,
+       .get_cpl = vmx_get_cpl,
        .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
        .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
        .set_cr0 = vmx_set_cr0,
        .set_cr3 = vmx_set_cr3,
        .set_cr4 = vmx_set_cr4,
-#ifdef CONFIG_X86_64
        .set_efer = vmx_set_efer,
-#endif
        .get_idt = vmx_get_idt,
        .set_idt = vmx_set_idt,
        .get_gdt = vmx_get_gdt,
@@ -2626,7 +2795,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
 
 static int __init vmx_init(void)
 {
-       void *iova;
+       void *va;
        int r;
 
        vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
@@ -2639,28 +2808,48 @@ static int __init vmx_init(void)
                goto out;
        }
 
+       vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
+       if (!vmx_msr_bitmap) {
+               r = -ENOMEM;
+               goto out1;
+       }
+
        /*
         * Allow direct access to the PC debug port (it is often used for I/O
         * delays, but the vmexits simply slow things down).
         */
-       iova = kmap(vmx_io_bitmap_a);
-       memset(iova, 0xff, PAGE_SIZE);
-       clear_bit(0x80, iova);
+       va = kmap(vmx_io_bitmap_a);
+       memset(va, 0xff, PAGE_SIZE);
+       clear_bit(0x80, va);
        kunmap(vmx_io_bitmap_a);
 
-       iova = kmap(vmx_io_bitmap_b);
-       memset(iova, 0xff, PAGE_SIZE);
+       va = kmap(vmx_io_bitmap_b);
+       memset(va, 0xff, PAGE_SIZE);
        kunmap(vmx_io_bitmap_b);
 
+       va = kmap(vmx_msr_bitmap);
+       memset(va, 0xff, PAGE_SIZE);
+       kunmap(vmx_msr_bitmap);
+
+       set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
+
        r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
        if (r)
-               goto out1;
+               goto out2;
+
+       vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE);
+       vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE);
+       vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS);
+       vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP);
+       vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP);
 
        if (bypass_guest_pf)
                kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
 
        return 0;
 
+out2:
+       __free_page(vmx_msr_bitmap);
 out1:
        __free_page(vmx_io_bitmap_b);
 out:
@@ -2670,6 +2859,7 @@ out:
 
 static void __exit vmx_exit(void)
 {
+       __free_page(vmx_msr_bitmap);
        __free_page(vmx_io_bitmap_b);
        __free_page(vmx_io_bitmap_a);
 
index d52ae8d7303de2586351d80662fd385366331ec5..5dff4606b988591d4e0e82f6247945a829f7f977 100644 (file)
@@ -49,6 +49,7 @@
  * Definitions of Secondary Processor-Based VM-Execution Controls.
  */
 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
+#define SECONDARY_EXEC_ENABLE_VPID              0x00000020
 #define SECONDARY_EXEC_WBINVD_EXITING          0x00000040
 
 
@@ -65,6 +66,7 @@
 
 /* VMCS Encodings */
 enum vmcs_field {
+       VIRTUAL_PROCESSOR_ID            = 0x00000000,
        GUEST_ES_SELECTOR               = 0x00000800,
        GUEST_CS_SELECTOR               = 0x00000802,
        GUEST_SS_SELECTOR               = 0x00000804,
@@ -231,12 +233,12 @@ enum vmcs_field {
  */
 #define INTR_INFO_VECTOR_MASK           0xff            /* 7:0 */
 #define INTR_INFO_INTR_TYPE_MASK        0x700           /* 10:8 */
-#define INTR_INFO_DELIEVER_CODE_MASK    0x800           /* 11 */
+#define INTR_INFO_DELIVER_CODE_MASK     0x800           /* 11 */
 #define INTR_INFO_VALID_MASK            0x80000000      /* 31 */
 
 #define VECTORING_INFO_VECTOR_MASK             INTR_INFO_VECTOR_MASK
 #define VECTORING_INFO_TYPE_MASK               INTR_INFO_INTR_TYPE_MASK
-#define VECTORING_INFO_DELIEVER_CODE_MASK      INTR_INFO_DELIEVER_CODE_MASK
+#define VECTORING_INFO_DELIVER_CODE_MASK       INTR_INFO_DELIVER_CODE_MASK
 #define VECTORING_INFO_VALID_MASK              INTR_INFO_VALID_MASK
 
 #define INTR_TYPE_EXT_INTR              (0 << 8) /* external interrupt */
@@ -321,4 +323,8 @@ enum vmcs_field {
 
 #define APIC_ACCESS_PAGE_PRIVATE_MEMSLOT       9
 
+#define VMX_NR_VPIDS                           (1 << 16)
+#define VMX_VPID_EXTENT_SINGLE_CONTEXT         1
+#define VMX_VPID_EXTENT_ALL_CONTEXT            2
+
 #endif
index 6b01552bd1f1cb91c64af4fdf9f3124e734d4d19..0ce556372a4d1df614499224a634f4967afc8eb3 100644 (file)
  */
 
 #include <linux/kvm_host.h>
-#include "segment_descriptor.h"
 #include "irq.h"
 #include "mmu.h"
+#include "i8254.h"
+#include "tss.h"
 
+#include <linux/clocksource.h>
 #include <linux/kvm.h>
 #include <linux/fs.h>
 #include <linux/vmalloc.h>
@@ -28,6 +30,7 @@
 
 #include <asm/uaccess.h>
 #include <asm/msr.h>
+#include <asm/desc.h>
 
 #define MAX_IO_MSRS 256
 #define CR0_RESERVED_BITS                                              \
                          | X86_CR4_OSXMMEXCPT | X86_CR4_VMXE))
 
 #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR)
-#define EFER_RESERVED_BITS 0xfffffffffffff2fe
+/* EFER defaults:
+ * - enable syscall per default because its emulated by KVM
+ * - enable LME and LMA per default on 64 bit KVM
+ */
+#ifdef CONFIG_X86_64
+static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffafeULL;
+#else
+static u64 __read_mostly efer_reserved_bits = 0xfffffffffffffffeULL;
+#endif
 
 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
@@ -63,6 +74,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "irq_window", VCPU_STAT(irq_window_exits) },
        { "halt_exits", VCPU_STAT(halt_exits) },
        { "halt_wakeup", VCPU_STAT(halt_wakeup) },
+       { "hypercalls", VCPU_STAT(hypercalls) },
        { "request_irq", VCPU_STAT(request_irq_exits) },
        { "irq_exits", VCPU_STAT(irq_exits) },
        { "host_state_reload", VCPU_STAT(host_state_reload) },
@@ -78,6 +90,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "mmu_recycled", VM_STAT(mmu_recycled) },
        { "mmu_cache_miss", VM_STAT(mmu_cache_miss) },
        { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
+       { "largepages", VM_STAT(lpages) },
        { NULL }
 };
 
@@ -85,7 +98,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 unsigned long segment_base(u16 selector)
 {
        struct descriptor_table gdt;
-       struct segment_descriptor *d;
+       struct desc_struct *d;
        unsigned long table_base;
        unsigned long v;
 
@@ -101,13 +114,12 @@ unsigned long segment_base(u16 selector)
                asm("sldt %0" : "=g"(ldt_selector));
                table_base = segment_base(ldt_selector);
        }
-       d = (struct segment_descriptor *)(table_base + (selector & ~7));
-       v = d->base_low | ((unsigned long)d->base_mid << 16) |
-               ((unsigned long)d->base_high << 24);
+       d = (struct desc_struct *)(table_base + (selector & ~7));
+       v = d->base0 | ((unsigned long)d->base1 << 16) |
+               ((unsigned long)d->base2 << 24);
 #ifdef CONFIG_X86_64
-       if (d->system == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
-               v |= ((unsigned long) \
-                     ((struct segment_descriptor_64 *)d)->base_higher) << 32;
+       if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+               v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
 #endif
        return v;
 }
@@ -145,11 +157,16 @@ void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr,
                           u32 error_code)
 {
        ++vcpu->stat.pf_guest;
-       if (vcpu->arch.exception.pending && vcpu->arch.exception.nr == PF_VECTOR) {
-               printk(KERN_DEBUG "kvm: inject_page_fault:"
-                      " double fault 0x%lx\n", addr);
-               vcpu->arch.exception.nr = DF_VECTOR;
-               vcpu->arch.exception.error_code = 0;
+       if (vcpu->arch.exception.pending) {
+               if (vcpu->arch.exception.nr == PF_VECTOR) {
+                       printk(KERN_DEBUG "kvm: inject_page_fault:"
+                                       " double fault 0x%lx\n", addr);
+                       vcpu->arch.exception.nr = DF_VECTOR;
+                       vcpu->arch.exception.error_code = 0;
+               } else if (vcpu->arch.exception.nr == DF_VECTOR) {
+                       /* triple fault -> shutdown */
+                       set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
+               }
                return;
        }
        vcpu->arch.cr2 = addr;
@@ -184,7 +201,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
        int ret;
        u64 pdpte[ARRAY_SIZE(vcpu->arch.pdptrs)];
 
-       down_read(&vcpu->kvm->slots_lock);
        ret = kvm_read_guest_page(vcpu->kvm, pdpt_gfn, pdpte,
                                  offset * sizeof(u64), sizeof(pdpte));
        if (ret < 0) {
@@ -201,10 +217,10 @@ int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        memcpy(vcpu->arch.pdptrs, pdpte, sizeof(vcpu->arch.pdptrs));
 out:
-       up_read(&vcpu->kvm->slots_lock);
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(load_pdptrs);
 
 static bool pdptrs_changed(struct kvm_vcpu *vcpu)
 {
@@ -215,18 +231,16 @@ static bool pdptrs_changed(struct kvm_vcpu *vcpu)
        if (is_long_mode(vcpu) || !is_pae(vcpu))
                return false;
 
-       down_read(&vcpu->kvm->slots_lock);
        r = kvm_read_guest(vcpu->kvm, vcpu->arch.cr3 & ~31u, pdpte, sizeof(pdpte));
        if (r < 0)
                goto out;
        changed = memcmp(pdpte, vcpu->arch.pdptrs, sizeof(pdpte)) != 0;
 out:
-       up_read(&vcpu->kvm->slots_lock);
 
        return changed;
 }
 
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
+void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        if (cr0 & CR0_RESERVED_BITS) {
                printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
@@ -284,15 +298,18 @@ void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        kvm_mmu_reset_context(vcpu);
        return;
 }
-EXPORT_SYMBOL_GPL(set_cr0);
+EXPORT_SYMBOL_GPL(kvm_set_cr0);
 
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
 {
-       set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+       kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
+       KVMTRACE_1D(LMSW, vcpu,
+                   (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
+                   handler);
 }
-EXPORT_SYMBOL_GPL(lmsw);
+EXPORT_SYMBOL_GPL(kvm_lmsw);
 
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
+void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        if (cr4 & CR4_RESERVED_BITS) {
                printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
@@ -323,9 +340,9 @@ void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        vcpu->arch.cr4 = cr4;
        kvm_mmu_reset_context(vcpu);
 }
-EXPORT_SYMBOL_GPL(set_cr4);
+EXPORT_SYMBOL_GPL(kvm_set_cr4);
 
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
+void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 {
        if (cr3 == vcpu->arch.cr3 && !pdptrs_changed(vcpu)) {
                kvm_mmu_flush_tlb(vcpu);
@@ -359,7 +376,6 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                 */
        }
 
-       down_read(&vcpu->kvm->slots_lock);
        /*
         * Does the new cr3 value map to physical memory? (Note, we
         * catch an invalid cr3 even in real-mode, because it would
@@ -375,11 +391,10 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                vcpu->arch.cr3 = cr3;
                vcpu->arch.mmu.new_cr3(vcpu);
        }
-       up_read(&vcpu->kvm->slots_lock);
 }
-EXPORT_SYMBOL_GPL(set_cr3);
+EXPORT_SYMBOL_GPL(kvm_set_cr3);
 
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
+void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        if (cr8 & CR8_RESERVED_BITS) {
                printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
@@ -391,16 +406,16 @@ void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
        else
                vcpu->arch.cr8 = cr8;
 }
-EXPORT_SYMBOL_GPL(set_cr8);
+EXPORT_SYMBOL_GPL(kvm_set_cr8);
 
-unsigned long get_cr8(struct kvm_vcpu *vcpu)
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
 {
        if (irqchip_in_kernel(vcpu->kvm))
                return kvm_lapic_get_cr8(vcpu);
        else
                return vcpu->arch.cr8;
 }
-EXPORT_SYMBOL_GPL(get_cr8);
+EXPORT_SYMBOL_GPL(kvm_get_cr8);
 
 /*
  * List of msr numbers which we expose to userspace through KVM_GET_MSRS
@@ -415,7 +430,8 @@ static u32 msrs_to_save[] = {
 #ifdef CONFIG_X86_64
        MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR,
 #endif
-       MSR_IA32_TIME_STAMP_COUNTER,
+       MSR_IA32_TIME_STAMP_COUNTER, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+       MSR_IA32_PERF_STATUS,
 };
 
 static unsigned num_msrs_to_save;
@@ -424,11 +440,9 @@ static u32 emulated_msrs[] = {
        MSR_IA32_MISC_ENABLE,
 };
 
-#ifdef CONFIG_X86_64
-
 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
-       if (efer & EFER_RESERVED_BITS) {
+       if (efer & efer_reserved_bits) {
                printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
                       efer);
                kvm_inject_gp(vcpu, 0);
@@ -450,7 +464,12 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        vcpu->arch.shadow_efer = efer;
 }
 
-#endif
+void kvm_enable_efer_bits(u64 mask)
+{
+       efer_reserved_bits &= ~mask;
+}
+EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
+
 
 /*
  * Writes msr value into into the appropriate "register".
@@ -470,26 +489,86 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
        return kvm_set_msr(vcpu, index, *data);
 }
 
+static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock)
+{
+       static int version;
+       struct kvm_wall_clock wc;
+       struct timespec wc_ts;
+
+       if (!wall_clock)
+               return;
+
+       version++;
+
+       kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+
+       wc_ts = current_kernel_time();
+       wc.wc_sec = wc_ts.tv_sec;
+       wc.wc_nsec = wc_ts.tv_nsec;
+       wc.wc_version = version;
+
+       kvm_write_guest(kvm, wall_clock, &wc, sizeof(wc));
+
+       version++;
+       kvm_write_guest(kvm, wall_clock, &version, sizeof(version));
+}
+
+static void kvm_write_guest_time(struct kvm_vcpu *v)
+{
+       struct timespec ts;
+       unsigned long flags;
+       struct kvm_vcpu_arch *vcpu = &v->arch;
+       void *shared_kaddr;
+
+       if ((!vcpu->time_page))
+               return;
+
+       /* Keep irq disabled to prevent changes to the clock */
+       local_irq_save(flags);
+       kvm_get_msr(v, MSR_IA32_TIME_STAMP_COUNTER,
+                         &vcpu->hv_clock.tsc_timestamp);
+       ktime_get_ts(&ts);
+       local_irq_restore(flags);
+
+       /* With all the info we got, fill in the values */
+
+       vcpu->hv_clock.system_time = ts.tv_nsec +
+                                    (NSEC_PER_SEC * (u64)ts.tv_sec);
+       /*
+        * The interface expects us to write an even number signaling that the
+        * update is finished. Since the guest won't see the intermediate
+        * state, we just write "2" at the end
+        */
+       vcpu->hv_clock.version = 2;
+
+       shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
+
+       memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
+               sizeof(vcpu->hv_clock));
+
+       kunmap_atomic(shared_kaddr, KM_USER0);
+
+       mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+}
+
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
-#ifdef CONFIG_X86_64
        case MSR_EFER:
                set_efer(vcpu, data);
                break;
-#endif
        case MSR_IA32_MC0_STATUS:
                pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n",
-                      __FUNCTION__, data);
+                      __func__, data);
                break;
        case MSR_IA32_MCG_STATUS:
                pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n",
-                       __FUNCTION__, data);
+                       __func__, data);
                break;
        case MSR_IA32_MCG_CTL:
                pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n",
-                       __FUNCTION__, data);
+                       __func__, data);
                break;
        case MSR_IA32_UCODE_REV:
        case MSR_IA32_UCODE_WRITE:
@@ -501,6 +580,42 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        case MSR_IA32_MISC_ENABLE:
                vcpu->arch.ia32_misc_enable_msr = data;
                break;
+       case MSR_KVM_WALL_CLOCK:
+               vcpu->kvm->arch.wall_clock = data;
+               kvm_write_wall_clock(vcpu->kvm, data);
+               break;
+       case MSR_KVM_SYSTEM_TIME: {
+               if (vcpu->arch.time_page) {
+                       kvm_release_page_dirty(vcpu->arch.time_page);
+                       vcpu->arch.time_page = NULL;
+               }
+
+               vcpu->arch.time = data;
+
+               /* we verify if the enable bit is set... */
+               if (!(data & 1))
+                       break;
+
+               /* ...but clean it before doing the actual write */
+               vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+
+               vcpu->arch.hv_clock.tsc_to_system_mul =
+                                       clocksource_khz2mult(tsc_khz, 22);
+               vcpu->arch.hv_clock.tsc_shift = 22;
+
+               down_read(&current->mm->mmap_sem);
+               vcpu->arch.time_page =
+                               gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+               up_read(&current->mm->mmap_sem);
+
+               if (is_error_page(vcpu->arch.time_page)) {
+                       kvm_release_page_clean(vcpu->arch.time_page);
+                       vcpu->arch.time_page = NULL;
+               }
+
+               kvm_write_guest_time(vcpu);
+               break;
+       }
        default:
                pr_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n", msr, data);
                return 1;
@@ -540,7 +655,6 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MC0_MISC+12:
        case MSR_IA32_MC0_MISC+16:
        case MSR_IA32_UCODE_REV:
-       case MSR_IA32_PERF_STATUS:
        case MSR_IA32_EBL_CR_POWERON:
                /* MTRR registers */
        case 0xfe:
@@ -556,11 +670,21 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MISC_ENABLE:
                data = vcpu->arch.ia32_misc_enable_msr;
                break;
-#ifdef CONFIG_X86_64
+       case MSR_IA32_PERF_STATUS:
+               /* TSC increment by tick */
+               data = 1000ULL;
+               /* CPU multiplier */
+               data |= (((uint64_t)4ULL) << 40);
+               break;
        case MSR_EFER:
                data = vcpu->arch.shadow_efer;
                break;
-#endif
+       case MSR_KVM_WALL_CLOCK:
+               data = vcpu->kvm->arch.wall_clock;
+               break;
+       case MSR_KVM_SYSTEM_TIME:
+               data = vcpu->arch.time;
+               break;
        default:
                pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
                return 1;
@@ -584,9 +708,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs,
 
        vcpu_load(vcpu);
 
+       down_read(&vcpu->kvm->slots_lock);
        for (i = 0; i < msrs->nmsrs; ++i)
                if (do_msr(vcpu, entries[i].index, &entries[i].data))
                        break;
+       up_read(&vcpu->kvm->slots_lock);
 
        vcpu_put(vcpu);
 
@@ -688,11 +814,24 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_USER_MEMORY:
        case KVM_CAP_SET_TSS_ADDR:
        case KVM_CAP_EXT_CPUID:
+       case KVM_CAP_CLOCKSOURCE:
+       case KVM_CAP_PIT:
+       case KVM_CAP_NOP_IO_DELAY:
+       case KVM_CAP_MP_STATE:
                r = 1;
                break;
        case KVM_CAP_VAPIC:
                r = !kvm_x86_ops->cpu_has_accelerated_tpr();
                break;
+       case KVM_CAP_NR_VCPUS:
+               r = KVM_MAX_VCPUS;
+               break;
+       case KVM_CAP_NR_MEMSLOTS:
+               r = KVM_MEMORY_SLOTS;
+               break;
+       case KVM_CAP_PV_MMU:
+               r = !tdp_enabled;
+               break;
        default:
                r = 0;
                break;
@@ -763,6 +902,7 @@ out:
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        kvm_x86_ops->vcpu_load(vcpu, cpu);
+       kvm_write_guest_time(vcpu);
 }
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -958,32 +1098,32 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
        }
        /* function 4 and 0xb have additional index. */
        case 4: {
-               int index, cache_type;
+               int i, cache_type;
 
                entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
                /* read more entries until cache_type is zero */
-               for (index = 1; *nent < maxnent; ++index) {
-                       cache_type = entry[index - 1].eax & 0x1f;
+               for (i = 1; *nent < maxnent; ++i) {
+                       cache_type = entry[i - 1].eax & 0x1f;
                        if (!cache_type)
                                break;
-                       do_cpuid_1_ent(&entry[index], function, index);
-                       entry[index].flags |=
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
                               KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
                        ++*nent;
                }
                break;
        }
        case 0xb: {
-               int index, level_type;
+               int i, level_type;
 
                entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
                /* read more entries until level_type is zero */
-               for (index = 1; *nent < maxnent; ++index) {
-                       level_type = entry[index - 1].ecx & 0xff;
+               for (i = 1; *nent < maxnent; ++i) {
+                       level_type = entry[i - 1].ecx & 0xff;
                        if (!level_type)
                                break;
-                       do_cpuid_1_ent(&entry[index], function, index);
-                       entry[index].flags |=
+                       do_cpuid_1_ent(&entry[i], function, i);
+                       entry[i].flags |=
                               KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
                        ++*nent;
                }
@@ -1365,6 +1505,23 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
        return r;
 }
 
+static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+       int r = 0;
+
+       memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
+       return r;
+}
+
+static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
+{
+       int r = 0;
+
+       memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
+       kvm_pit_load_count(kvm, 0, ps->channels[0].count);
+       return r;
+}
+
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
@@ -1457,6 +1614,12 @@ long kvm_arch_vm_ioctl(struct file *filp,
                } else
                        goto out;
                break;
+       case KVM_CREATE_PIT:
+               r = -ENOMEM;
+               kvm->arch.vpit = kvm_create_pit(kvm);
+               if (kvm->arch.vpit)
+                       r = 0;
+               break;
        case KVM_IRQ_LINE: {
                struct kvm_irq_level irq_event;
 
@@ -1512,6 +1675,37 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = 0;
                break;
        }
+       case KVM_GET_PIT: {
+               struct kvm_pit_state ps;
+               r = -EFAULT;
+               if (copy_from_user(&ps, argp, sizeof ps))
+                       goto out;
+               r = -ENXIO;
+               if (!kvm->arch.vpit)
+                       goto out;
+               r = kvm_vm_ioctl_get_pit(kvm, &ps);
+               if (r)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user(argp, &ps, sizeof ps))
+                       goto out;
+               r = 0;
+               break;
+       }
+       case KVM_SET_PIT: {
+               struct kvm_pit_state ps;
+               r = -EFAULT;
+               if (copy_from_user(&ps, argp, sizeof ps))
+                       goto out;
+               r = -ENXIO;
+               if (!kvm->arch.vpit)
+                       goto out;
+               r = kvm_vm_ioctl_set_pit(kvm, &ps);
+               if (r)
+                       goto out;
+               r = 0;
+               break;
+       }
        default:
                ;
        }
@@ -1570,7 +1764,6 @@ int emulator_read_std(unsigned long addr,
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
-       down_read(&vcpu->kvm->slots_lock);
        while (bytes) {
                gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
                unsigned offset = addr & (PAGE_SIZE-1);
@@ -1592,7 +1785,6 @@ int emulator_read_std(unsigned long addr,
                addr += tocopy;
        }
 out:
-       up_read(&vcpu->kvm->slots_lock);
        return r;
 }
 EXPORT_SYMBOL_GPL(emulator_read_std);
@@ -1611,9 +1803,7 @@ static int emulator_read_emulated(unsigned long addr,
                return X86EMUL_CONTINUE;
        }
 
-       down_read(&vcpu->kvm->slots_lock);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
-       up_read(&vcpu->kvm->slots_lock);
 
        /* For APIC access vmexit */
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -1646,19 +1836,15 @@ mmio:
        return X86EMUL_UNHANDLEABLE;
 }
 
-static int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
-                              const void *val, int bytes)
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+                         const void *val, int bytes)
 {
        int ret;
 
-       down_read(&vcpu->kvm->slots_lock);
        ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
-       if (ret < 0) {
-               up_read(&vcpu->kvm->slots_lock);
+       if (ret < 0)
                return 0;
-       }
        kvm_mmu_pte_write(vcpu, gpa, val, bytes);
-       up_read(&vcpu->kvm->slots_lock);
        return 1;
 }
 
@@ -1670,9 +1856,7 @@ static int emulator_write_emulated_onepage(unsigned long addr,
        struct kvm_io_device *mmio_dev;
        gpa_t                 gpa;
 
-       down_read(&vcpu->kvm->slots_lock);
        gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
-       up_read(&vcpu->kvm->slots_lock);
 
        if (gpa == UNMAPPED_GVA) {
                kvm_inject_page_fault(vcpu, addr, 2);
@@ -1749,7 +1933,6 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
                char *kaddr;
                u64 val;
 
-               down_read(&vcpu->kvm->slots_lock);
                gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
 
                if (gpa == UNMAPPED_GVA ||
@@ -1769,9 +1952,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
                set_64bit((u64 *)(kaddr + offset_in_page(gpa)), val);
                kunmap_atomic(kaddr, KM_USER0);
                kvm_release_page_dirty(page);
-       emul_write:
-               up_read(&vcpu->kvm->slots_lock);
        }
+emul_write:
 #endif
 
        return emulator_write_emulated(addr, new, bytes, vcpu);
@@ -1802,7 +1984,7 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
                *dest = kvm_x86_ops->get_dr(vcpu, dr);
                return X86EMUL_CONTINUE;
        default:
-               pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr);
+               pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
                return X86EMUL_UNHANDLEABLE;
        }
 }
@@ -1840,7 +2022,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 }
 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
 
-struct x86_emulate_ops emulate_ops = {
+static struct x86_emulate_ops emulate_ops = {
        .read_std            = emulator_read_std,
        .read_emulated       = emulator_read_emulated,
        .write_emulated      = emulator_write_emulated,
@@ -2091,6 +2273,13 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->arch.pio.guest_page_offset = 0;
        vcpu->arch.pio.rep = 0;
 
+       if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+               KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+                           handler);
+       else
+               KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+                           handler);
+
        kvm_x86_ops->cache_regs(vcpu);
        memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4);
        kvm_x86_ops->decache_regs(vcpu);
@@ -2129,6 +2318,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
        vcpu->arch.pio.guest_page_offset = offset_in_page(address);
        vcpu->arch.pio.rep = rep;
 
+       if (vcpu->run->io.direction == KVM_EXIT_IO_IN)
+               KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size,
+                           handler);
+       else
+               KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
+                           handler);
+
        if (!count) {
                kvm_x86_ops->skip_emulated_instruction(vcpu);
                return 1;
@@ -2163,10 +2359,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
                kvm_x86_ops->skip_emulated_instruction(vcpu);
 
        for (i = 0; i < nr_pages; ++i) {
-               down_read(&vcpu->kvm->slots_lock);
                page = gva_to_page(vcpu, address + i * PAGE_SIZE);
                vcpu->arch.pio.guest_pages[i] = page;
-               up_read(&vcpu->kvm->slots_lock);
                if (!page) {
                        kvm_inject_gp(vcpu, 0);
                        free_pio_guest_pages(vcpu);
@@ -2238,10 +2432,13 @@ void kvm_arch_exit(void)
 int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.halt_exits;
+       KVMTRACE_0D(HLT, vcpu, handler);
        if (irqchip_in_kernel(vcpu->kvm)) {
-               vcpu->arch.mp_state = VCPU_MP_STATE_HALTED;
+               vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
+               up_read(&vcpu->kvm->slots_lock);
                kvm_vcpu_block(vcpu);
-               if (vcpu->arch.mp_state != VCPU_MP_STATE_RUNNABLE)
+               down_read(&vcpu->kvm->slots_lock);
+               if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
                        return -EINTR;
                return 1;
        } else {
@@ -2251,9 +2448,19 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_halt);
 
+static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
+                          unsigned long a1)
+{
+       if (is_long_mode(vcpu))
+               return a0;
+       else
+               return a0 | ((gpa_t)a1 << 32);
+}
+
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
        unsigned long nr, a0, a1, a2, a3, ret;
+       int r = 1;
 
        kvm_x86_ops->cache_regs(vcpu);
 
@@ -2263,6 +2470,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        a2 = vcpu->arch.regs[VCPU_REGS_RDX];
        a3 = vcpu->arch.regs[VCPU_REGS_RSI];
 
+       KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler);
+
        if (!is_long_mode(vcpu)) {
                nr &= 0xFFFFFFFF;
                a0 &= 0xFFFFFFFF;
@@ -2275,13 +2484,17 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
        case KVM_HC_VAPIC_POLL_IRQ:
                ret = 0;
                break;
+       case KVM_HC_MMU_OP:
+               r = kvm_pv_mmu_op(vcpu, a0, hc_gpa(vcpu, a1, a2), &ret);
+               break;
        default:
                ret = -KVM_ENOSYS;
                break;
        }
        vcpu->arch.regs[VCPU_REGS_RAX] = ret;
        kvm_x86_ops->decache_regs(vcpu);
-       return 0;
+       ++vcpu->stat.hypercalls;
+       return r;
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
 
@@ -2329,7 +2542,7 @@ void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
                   unsigned long *rflags)
 {
-       lmsw(vcpu, msw);
+       kvm_lmsw(vcpu, msw);
        *rflags = kvm_x86_ops->get_rflags(vcpu);
 }
 
@@ -2346,9 +2559,9 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
        case 4:
                return vcpu->arch.cr4;
        case 8:
-               return get_cr8(vcpu);
+               return kvm_get_cr8(vcpu);
        default:
-               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
                return 0;
        }
 }
@@ -2358,23 +2571,23 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
 {
        switch (cr) {
        case 0:
-               set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
+               kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
                *rflags = kvm_x86_ops->get_rflags(vcpu);
                break;
        case 2:
                vcpu->arch.cr2 = val;
                break;
        case 3:
-               set_cr3(vcpu, val);
+               kvm_set_cr3(vcpu, val);
                break;
        case 4:
-               set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
+               kvm_set_cr4(vcpu, mk_cr_64(vcpu->arch.cr4, val));
                break;
        case 8:
-               set_cr8(vcpu, val & 0xfUL);
+               kvm_set_cr8(vcpu, val & 0xfUL);
                break;
        default:
-               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr);
+               vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
        }
 }
 
@@ -2447,6 +2660,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
        }
        kvm_x86_ops->decache_regs(vcpu);
        kvm_x86_ops->skip_emulated_instruction(vcpu);
+       KVMTRACE_5D(CPUID, vcpu, function,
+                   (u32)vcpu->arch.regs[VCPU_REGS_RAX],
+                   (u32)vcpu->arch.regs[VCPU_REGS_RBX],
+                   (u32)vcpu->arch.regs[VCPU_REGS_RCX],
+                   (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler);
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
 
@@ -2469,7 +2687,7 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu,
                              struct kvm_run *kvm_run)
 {
        kvm_run->if_flag = (kvm_x86_ops->get_rflags(vcpu) & X86_EFLAGS_IF) != 0;
-       kvm_run->cr8 = get_cr8(vcpu);
+       kvm_run->cr8 = kvm_get_cr8(vcpu);
        kvm_run->apic_base = kvm_get_apic_base(vcpu);
        if (irqchip_in_kernel(vcpu->kvm))
                kvm_run->ready_for_interrupt_injection = 1;
@@ -2509,16 +2727,17 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
 
-       if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED)) {
+       if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
                pr_debug("vcpu %d received sipi with vector # %x\n",
                       vcpu->vcpu_id, vcpu->arch.sipi_vector);
                kvm_lapic_reset(vcpu);
                r = kvm_x86_ops->vcpu_reset(vcpu);
                if (r)
                        return r;
-               vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        }
 
+       down_read(&vcpu->kvm->slots_lock);
        vapic_enter(vcpu);
 
 preempted:
@@ -2526,6 +2745,10 @@ preempted:
                kvm_x86_ops->guest_debug_pre(vcpu);
 
 again:
+       if (vcpu->requests)
+               if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+                       kvm_mmu_unload(vcpu);
+
        r = kvm_mmu_reload(vcpu);
        if (unlikely(r))
                goto out;
@@ -2539,6 +2762,11 @@ again:
                        r = 0;
                        goto out;
                }
+               if (test_and_clear_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests)) {
+                       kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+                       r = 0;
+                       goto out;
+               }
        }
 
        kvm_inject_pending_timer_irqs(vcpu);
@@ -2557,6 +2785,14 @@ again:
                goto out;
        }
 
+       if (vcpu->requests)
+               if (test_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests)) {
+                       local_irq_enable();
+                       preempt_enable();
+                       r = 1;
+                       goto out;
+               }
+
        if (signal_pending(current)) {
                local_irq_enable();
                preempt_enable();
@@ -2566,6 +2802,13 @@ again:
                goto out;
        }
 
+       vcpu->guest_mode = 1;
+       /*
+        * Make sure that guest_mode assignment won't happen after
+        * testing the pending IRQ vector bitmap.
+        */
+       smp_wmb();
+
        if (vcpu->arch.exception.pending)
                __queue_exception(vcpu);
        else if (irqchip_in_kernel(vcpu->kvm))
@@ -2575,13 +2818,15 @@ again:
 
        kvm_lapic_sync_to_vapic(vcpu);
 
-       vcpu->guest_mode = 1;
+       up_read(&vcpu->kvm->slots_lock);
+
        kvm_guest_enter();
 
        if (vcpu->requests)
                if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests))
                        kvm_x86_ops->tlb_flush(vcpu);
 
+       KVMTRACE_0D(VMENTRY, vcpu, entryexit);
        kvm_x86_ops->run(vcpu, kvm_run);
 
        vcpu->guest_mode = 0;
@@ -2601,6 +2846,8 @@ again:
 
        preempt_enable();
 
+       down_read(&vcpu->kvm->slots_lock);
+
        /*
         * Profile KVM exit RIPs:
         */
@@ -2628,14 +2875,18 @@ again:
        }
 
 out:
+       up_read(&vcpu->kvm->slots_lock);
        if (r > 0) {
                kvm_resched(vcpu);
+               down_read(&vcpu->kvm->slots_lock);
                goto preempted;
        }
 
        post_kvm_run_save(vcpu, kvm_run);
 
+       down_read(&vcpu->kvm->slots_lock);
        vapic_exit(vcpu);
+       up_read(&vcpu->kvm->slots_lock);
 
        return r;
 }
@@ -2647,7 +2898,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        vcpu_load(vcpu);
 
-       if (unlikely(vcpu->arch.mp_state == VCPU_MP_STATE_UNINITIALIZED)) {
+       if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                kvm_vcpu_block(vcpu);
                vcpu_put(vcpu);
                return -EAGAIN;
@@ -2658,7 +2909,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        /* re-sync apic's tpr */
        if (!irqchip_in_kernel(vcpu->kvm))
-               set_cr8(vcpu, kvm_run->cr8);
+               kvm_set_cr8(vcpu, kvm_run->cr8);
 
        if (vcpu->arch.pio.cur_count) {
                r = complete_pio(vcpu);
@@ -2670,9 +2921,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
                vcpu->mmio_read_completed = 1;
                vcpu->mmio_needed = 0;
+
+               down_read(&vcpu->kvm->slots_lock);
                r = emulate_instruction(vcpu, kvm_run,
                                        vcpu->arch.mmio_fault_cr2, 0,
                                        EMULTYPE_NO_DECODE);
+               up_read(&vcpu->kvm->slots_lock);
                if (r == EMULATE_DO_MMIO) {
                        /*
                         * Read-modify-write.  Back to userspace.
@@ -2773,7 +3027,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 static void get_segment(struct kvm_vcpu *vcpu,
                        struct kvm_segment *var, int seg)
 {
-       return kvm_x86_ops->get_segment(vcpu, var, seg);
+       kvm_x86_ops->get_segment(vcpu, var, seg);
 }
 
 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -2816,7 +3070,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        sregs->cr2 = vcpu->arch.cr2;
        sregs->cr3 = vcpu->arch.cr3;
        sregs->cr4 = vcpu->arch.cr4;
-       sregs->cr8 = get_cr8(vcpu);
+       sregs->cr8 = kvm_get_cr8(vcpu);
        sregs->efer = vcpu->arch.shadow_efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
@@ -2836,12 +3090,438 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       vcpu_load(vcpu);
+       mp_state->mp_state = vcpu->arch.mp_state;
+       vcpu_put(vcpu);
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       vcpu_load(vcpu);
+       vcpu->arch.mp_state = mp_state->mp_state;
+       vcpu_put(vcpu);
+       return 0;
+}
+
 static void set_segment(struct kvm_vcpu *vcpu,
                        struct kvm_segment *var, int seg)
 {
-       return kvm_x86_ops->set_segment(vcpu, var, seg);
+       kvm_x86_ops->set_segment(vcpu, var, seg);
+}
+
+static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
+                                  struct kvm_segment *kvm_desct)
+{
+       kvm_desct->base = seg_desc->base0;
+       kvm_desct->base |= seg_desc->base1 << 16;
+       kvm_desct->base |= seg_desc->base2 << 24;
+       kvm_desct->limit = seg_desc->limit0;
+       kvm_desct->limit |= seg_desc->limit << 16;
+       kvm_desct->selector = selector;
+       kvm_desct->type = seg_desc->type;
+       kvm_desct->present = seg_desc->p;
+       kvm_desct->dpl = seg_desc->dpl;
+       kvm_desct->db = seg_desc->d;
+       kvm_desct->s = seg_desc->s;
+       kvm_desct->l = seg_desc->l;
+       kvm_desct->g = seg_desc->g;
+       kvm_desct->avl = seg_desc->avl;
+       if (!selector)
+               kvm_desct->unusable = 1;
+       else
+               kvm_desct->unusable = 0;
+       kvm_desct->padding = 0;
+}
+
+static void get_segment_descritptor_dtable(struct kvm_vcpu *vcpu,
+                                          u16 selector,
+                                          struct descriptor_table *dtable)
+{
+       if (selector & 1 << 2) {
+               struct kvm_segment kvm_seg;
+
+               get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
+
+               if (kvm_seg.unusable)
+                       dtable->limit = 0;
+               else
+                       dtable->limit = kvm_seg.limit;
+               dtable->base = kvm_seg.base;
+       }
+       else
+               kvm_x86_ops->get_gdt(vcpu, dtable);
+}
+
+/* allowed just for 8 bytes segments */
+static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                                        struct desc_struct *seg_desc)
+{
+       struct descriptor_table dtable;
+       u16 index = selector >> 3;
+
+       get_segment_descritptor_dtable(vcpu, selector, &dtable);
+
+       if (dtable.limit < index * 8 + 7) {
+               kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
+               return 1;
+       }
+       return kvm_read_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+}
+
+/* allowed just for 8 bytes segments */
+static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                                        struct desc_struct *seg_desc)
+{
+       struct descriptor_table dtable;
+       u16 index = selector >> 3;
+
+       get_segment_descritptor_dtable(vcpu, selector, &dtable);
+
+       if (dtable.limit < index * 8 + 7)
+               return 1;
+       return kvm_write_guest(vcpu->kvm, dtable.base + index * 8, seg_desc, 8);
+}
+
+static u32 get_tss_base_addr(struct kvm_vcpu *vcpu,
+                            struct desc_struct *seg_desc)
+{
+       u32 base_addr;
+
+       base_addr = seg_desc->base0;
+       base_addr |= (seg_desc->base1 << 16);
+       base_addr |= (seg_desc->base2 << 24);
+
+       return base_addr;
+}
+
+static int load_tss_segment32(struct kvm_vcpu *vcpu,
+                             struct desc_struct *seg_desc,
+                             struct tss_segment_32 *tss)
+{
+       u32 base_addr;
+
+       base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+       return kvm_read_guest(vcpu->kvm, base_addr, tss,
+                             sizeof(struct tss_segment_32));
+}
+
+static int save_tss_segment32(struct kvm_vcpu *vcpu,
+                             struct desc_struct *seg_desc,
+                             struct tss_segment_32 *tss)
+{
+       u32 base_addr;
+
+       base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+       return kvm_write_guest(vcpu->kvm, base_addr, tss,
+                              sizeof(struct tss_segment_32));
+}
+
+static int load_tss_segment16(struct kvm_vcpu *vcpu,
+                             struct desc_struct *seg_desc,
+                             struct tss_segment_16 *tss)
+{
+       u32 base_addr;
+
+       base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+       return kvm_read_guest(vcpu->kvm, base_addr, tss,
+                             sizeof(struct tss_segment_16));
+}
+
+static int save_tss_segment16(struct kvm_vcpu *vcpu,
+                             struct desc_struct *seg_desc,
+                             struct tss_segment_16 *tss)
+{
+       u32 base_addr;
+
+       base_addr = get_tss_base_addr(vcpu, seg_desc);
+
+       return kvm_write_guest(vcpu->kvm, base_addr, tss,
+                              sizeof(struct tss_segment_16));
+}
+
+static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
+{
+       struct kvm_segment kvm_seg;
+
+       get_segment(vcpu, &kvm_seg, seg);
+       return kvm_seg.selector;
+}
+
+static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
+                                               u16 selector,
+                                               struct kvm_segment *kvm_seg)
+{
+       struct desc_struct seg_desc;
+
+       if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
+               return 1;
+       seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
+       return 0;
+}
+
+static int load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
+                                  int type_bits, int seg)
+{
+       struct kvm_segment kvm_seg;
+
+       if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
+               return 1;
+       kvm_seg.type |= type_bits;
+
+       if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
+           seg != VCPU_SREG_LDTR)
+               if (!kvm_seg.s)
+                       kvm_seg.unusable = 1;
+
+       set_segment(vcpu, &kvm_seg, seg);
+       return 0;
+}
+
+static void save_state_to_tss32(struct kvm_vcpu *vcpu,
+                               struct tss_segment_32 *tss)
+{
+       tss->cr3 = vcpu->arch.cr3;
+       tss->eip = vcpu->arch.rip;
+       tss->eflags = kvm_x86_ops->get_rflags(vcpu);
+       tss->eax = vcpu->arch.regs[VCPU_REGS_RAX];
+       tss->ecx = vcpu->arch.regs[VCPU_REGS_RCX];
+       tss->edx = vcpu->arch.regs[VCPU_REGS_RDX];
+       tss->ebx = vcpu->arch.regs[VCPU_REGS_RBX];
+       tss->esp = vcpu->arch.regs[VCPU_REGS_RSP];
+       tss->ebp = vcpu->arch.regs[VCPU_REGS_RBP];
+       tss->esi = vcpu->arch.regs[VCPU_REGS_RSI];
+       tss->edi = vcpu->arch.regs[VCPU_REGS_RDI];
+
+       tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
+       tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
+       tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
+       tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
+       tss->fs = get_segment_selector(vcpu, VCPU_SREG_FS);
+       tss->gs = get_segment_selector(vcpu, VCPU_SREG_GS);
+       tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
+       tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
+}
+
+static int load_state_from_tss32(struct kvm_vcpu *vcpu,
+                                 struct tss_segment_32 *tss)
+{
+       kvm_set_cr3(vcpu, tss->cr3);
+
+       vcpu->arch.rip = tss->eip;
+       kvm_x86_ops->set_rflags(vcpu, tss->eflags | 2);
+
+       vcpu->arch.regs[VCPU_REGS_RAX] = tss->eax;
+       vcpu->arch.regs[VCPU_REGS_RCX] = tss->ecx;
+       vcpu->arch.regs[VCPU_REGS_RDX] = tss->edx;
+       vcpu->arch.regs[VCPU_REGS_RBX] = tss->ebx;
+       vcpu->arch.regs[VCPU_REGS_RSP] = tss->esp;
+       vcpu->arch.regs[VCPU_REGS_RBP] = tss->ebp;
+       vcpu->arch.regs[VCPU_REGS_RSI] = tss->esi;
+       vcpu->arch.regs[VCPU_REGS_RDI] = tss->edi;
+
+       if (load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
+               return 1;
+       return 0;
+}
+
+static void save_state_to_tss16(struct kvm_vcpu *vcpu,
+                               struct tss_segment_16 *tss)
+{
+       tss->ip = vcpu->arch.rip;
+       tss->flag = kvm_x86_ops->get_rflags(vcpu);
+       tss->ax = vcpu->arch.regs[VCPU_REGS_RAX];
+       tss->cx = vcpu->arch.regs[VCPU_REGS_RCX];
+       tss->dx = vcpu->arch.regs[VCPU_REGS_RDX];
+       tss->bx = vcpu->arch.regs[VCPU_REGS_RBX];
+       tss->sp = vcpu->arch.regs[VCPU_REGS_RSP];
+       tss->bp = vcpu->arch.regs[VCPU_REGS_RBP];
+       tss->si = vcpu->arch.regs[VCPU_REGS_RSI];
+       tss->di = vcpu->arch.regs[VCPU_REGS_RDI];
+
+       tss->es = get_segment_selector(vcpu, VCPU_SREG_ES);
+       tss->cs = get_segment_selector(vcpu, VCPU_SREG_CS);
+       tss->ss = get_segment_selector(vcpu, VCPU_SREG_SS);
+       tss->ds = get_segment_selector(vcpu, VCPU_SREG_DS);
+       tss->ldt = get_segment_selector(vcpu, VCPU_SREG_LDTR);
+       tss->prev_task_link = get_segment_selector(vcpu, VCPU_SREG_TR);
+}
+
+static int load_state_from_tss16(struct kvm_vcpu *vcpu,
+                                struct tss_segment_16 *tss)
+{
+       vcpu->arch.rip = tss->ip;
+       kvm_x86_ops->set_rflags(vcpu, tss->flag | 2);
+       vcpu->arch.regs[VCPU_REGS_RAX] = tss->ax;
+       vcpu->arch.regs[VCPU_REGS_RCX] = tss->cx;
+       vcpu->arch.regs[VCPU_REGS_RDX] = tss->dx;
+       vcpu->arch.regs[VCPU_REGS_RBX] = tss->bx;
+       vcpu->arch.regs[VCPU_REGS_RSP] = tss->sp;
+       vcpu->arch.regs[VCPU_REGS_RBP] = tss->bp;
+       vcpu->arch.regs[VCPU_REGS_RSI] = tss->si;
+       vcpu->arch.regs[VCPU_REGS_RDI] = tss->di;
+
+       if (load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+               return 1;
+
+       if (load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+               return 1;
+       return 0;
+}
+
+int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
+                      struct desc_struct *cseg_desc,
+                      struct desc_struct *nseg_desc)
+{
+       struct tss_segment_16 tss_segment_16;
+       int ret = 0;
+
+       if (load_tss_segment16(vcpu, cseg_desc, &tss_segment_16))
+               goto out;
+
+       save_state_to_tss16(vcpu, &tss_segment_16);
+       save_tss_segment16(vcpu, cseg_desc, &tss_segment_16);
+
+       if (load_tss_segment16(vcpu, nseg_desc, &tss_segment_16))
+               goto out;
+       if (load_state_from_tss16(vcpu, &tss_segment_16))
+               goto out;
+
+       ret = 1;
+out:
+       return ret;
+}
+
+int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
+                      struct desc_struct *cseg_desc,
+                      struct desc_struct *nseg_desc)
+{
+       struct tss_segment_32 tss_segment_32;
+       int ret = 0;
+
+       if (load_tss_segment32(vcpu, cseg_desc, &tss_segment_32))
+               goto out;
+
+       save_state_to_tss32(vcpu, &tss_segment_32);
+       save_tss_segment32(vcpu, cseg_desc, &tss_segment_32);
+
+       if (load_tss_segment32(vcpu, nseg_desc, &tss_segment_32))
+               goto out;
+       if (load_state_from_tss32(vcpu, &tss_segment_32))
+               goto out;
+
+       ret = 1;
+out:
+       return ret;
 }
 
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
+{
+       struct kvm_segment tr_seg;
+       struct desc_struct cseg_desc;
+       struct desc_struct nseg_desc;
+       int ret = 0;
+
+       get_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+
+       if (load_guest_segment_descriptor(vcpu, tss_selector, &nseg_desc))
+               goto out;
+
+       if (load_guest_segment_descriptor(vcpu, tr_seg.selector, &cseg_desc))
+               goto out;
+
+
+       if (reason != TASK_SWITCH_IRET) {
+               int cpl;
+
+               cpl = kvm_x86_ops->get_cpl(vcpu);
+               if ((tss_selector & 3) > nseg_desc.dpl || cpl > nseg_desc.dpl) {
+                       kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
+                       return 1;
+               }
+       }
+
+       if (!nseg_desc.p || (nseg_desc.limit0 | nseg_desc.limit << 16) < 0x67) {
+               kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
+               return 1;
+       }
+
+       if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
+               cseg_desc.type &= ~(1 << 8); //clear the B flag
+               save_guest_segment_descriptor(vcpu, tr_seg.selector,
+                                             &cseg_desc);
+       }
+
+       if (reason == TASK_SWITCH_IRET) {
+               u32 eflags = kvm_x86_ops->get_rflags(vcpu);
+               kvm_x86_ops->set_rflags(vcpu, eflags & ~X86_EFLAGS_NT);
+       }
+
+       kvm_x86_ops->skip_emulated_instruction(vcpu);
+       kvm_x86_ops->cache_regs(vcpu);
+
+       if (nseg_desc.type & 8)
+               ret = kvm_task_switch_32(vcpu, tss_selector, &cseg_desc,
+                                        &nseg_desc);
+       else
+               ret = kvm_task_switch_16(vcpu, tss_selector, &cseg_desc,
+                                        &nseg_desc);
+
+       if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) {
+               u32 eflags = kvm_x86_ops->get_rflags(vcpu);
+               kvm_x86_ops->set_rflags(vcpu, eflags | X86_EFLAGS_NT);
+       }
+
+       if (reason != TASK_SWITCH_IRET) {
+               nseg_desc.type |= (1 << 8);
+               save_guest_segment_descriptor(vcpu, tss_selector,
+                                             &nseg_desc);
+       }
+
+       kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 | X86_CR0_TS);
+       seg_desct_to_kvm_desct(&nseg_desc, tss_selector, &tr_seg);
+       tr_seg.type = 11;
+       set_segment(vcpu, &tr_seg, VCPU_SREG_TR);
+out:
+       kvm_x86_ops->decache_regs(vcpu);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(kvm_task_switch);
+
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
@@ -2862,12 +3542,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3;
        vcpu->arch.cr3 = sregs->cr3;
 
-       set_cr8(vcpu, sregs->cr8);
+       kvm_set_cr8(vcpu, sregs->cr8);
 
        mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
-#ifdef CONFIG_X86_64
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
-#endif
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
        kvm_x86_ops->decache_cr4_guest_bits(vcpu);
@@ -3141,9 +3819,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 
        vcpu->arch.mmu.root_hpa = INVALID_PAGE;
        if (!irqchip_in_kernel(kvm) || vcpu->vcpu_id == 0)
-               vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
+               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
        else
-               vcpu->arch.mp_state = VCPU_MP_STATE_UNINITIALIZED;
+               vcpu->arch.mp_state = KVM_MP_STATE_UNINITIALIZED;
 
        page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (!page) {
@@ -3175,7 +3853,9 @@ fail:
 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
 {
        kvm_free_lapic(vcpu);
+       down_read(&vcpu->kvm->slots_lock);
        kvm_mmu_destroy(vcpu);
+       up_read(&vcpu->kvm->slots_lock);
        free_page((unsigned long)vcpu->arch.pio_data);
 }
 
@@ -3219,10 +3899,13 @@ static void kvm_free_vcpus(struct kvm *kvm)
 
 void kvm_arch_destroy_vm(struct kvm *kvm)
 {
+       kvm_free_pit(kvm);
        kfree(kvm->arch.vpic);
        kfree(kvm->arch.vioapic);
        kvm_free_vcpus(kvm);
        kvm_free_physmem(kvm);
+       if (kvm->arch.apic_access_page)
+               put_page(kvm->arch.apic_access_page);
        kfree(kvm);
 }
 
@@ -3278,8 +3961,8 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-       return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
-              || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
+       return vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE
+              || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED;
 }
 
 static void vcpu_kick_intr(void *info)
@@ -3293,11 +3976,17 @@ static void vcpu_kick_intr(void *info)
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
 {
        int ipi_pcpu = vcpu->cpu;
+       int cpu = get_cpu();
 
        if (waitqueue_active(&vcpu->wq)) {
                wake_up_interruptible(&vcpu->wq);
                ++vcpu->stat.halt_wakeup;
        }
-       if (vcpu->guest_mode)
+       /*
+        * We may be called synchronously with irqs disabled in guest mode,
+        * So need not to call smp_call_function_single() in that case.
+        */
+       if (vcpu->guest_mode && vcpu->cpu != cpu)
                smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0);
+       put_cpu();
 }
index 79586003397aa7c90bc171f020710ce1fa2054e0..2ca08386f9937ea43c4d126ca011af96c10abde9 100644 (file)
 #define MemAbs      (1<<9)      /* Memory operand is absolute displacement */
 #define String      (1<<10)     /* String instruction (rep capable) */
 #define Stack       (1<<11)     /* Stack instruction (push/pop) */
+#define Group       (1<<14)     /* Bits 3:5 of modrm byte extend opcode */
+#define GroupDual   (1<<15)     /* Alternate decoding of mod == 3 */
+#define GroupMask   0xff        /* Group number stored in bits 0:7 */
+
+enum {
+       Group1_80, Group1_81, Group1_82, Group1_83,
+       Group1A, Group3_Byte, Group3, Group4, Group5, Group7,
+};
 
 static u16 opcode_table[256] = {
        /* 0x00 - 0x07 */
@@ -123,14 +131,14 @@ static u16 opcode_table[256] = {
        ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
        ImplicitOps, ImplicitOps, ImplicitOps, ImplicitOps,
        /* 0x80 - 0x87 */
-       ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
-       ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM,
+       Group | Group1_80, Group | Group1_81,
+       Group | Group1_82, Group | Group1_83,
        ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
        ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM,
        /* 0x88 - 0x8F */
        ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov,
        ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov,
-       0, ModRM | DstReg, 0, DstMem | SrcNone | ModRM | Mov | Stack,
+       0, ModRM | DstReg, 0, Group | Group1A,
        /* 0x90 - 0x9F */
        0, 0, 0, 0, 0, 0, 0, 0,
        0, 0, 0, 0, ImplicitOps | Stack, ImplicitOps | Stack, 0, 0,
@@ -164,16 +172,15 @@ static u16 opcode_table[256] = {
        0, 0, 0, 0,
        /* 0xF0 - 0xF7 */
        0, 0, 0, 0,
-       ImplicitOps, ImplicitOps,
-       ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM,
+       ImplicitOps, ImplicitOps, Group | Group3_Byte, Group | Group3,
        /* 0xF8 - 0xFF */
        ImplicitOps, 0, ImplicitOps, ImplicitOps,
-       0, 0, ByteOp | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM
+       0, 0, Group | Group4, Group | Group5,
 };
 
 static u16 twobyte_table[256] = {
        /* 0x00 - 0x0F */
-       0, SrcMem | ModRM | DstReg, 0, 0, 0, 0, ImplicitOps, 0,
+       0, Group | GroupDual | Group7, 0, 0, 0, 0, ImplicitOps, 0,
        ImplicitOps, ImplicitOps, 0, 0, 0, ImplicitOps | ModRM, 0, 0,
        /* 0x10 - 0x1F */
        0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0,
@@ -229,6 +236,56 @@ static u16 twobyte_table[256] = {
        0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
 };
 
+static u16 group_table[] = {
+       [Group1_80*8] =
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       [Group1_81*8] =
+       DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+       DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+       DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+       DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM,
+       [Group1_82*8] =
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       ByteOp | DstMem | SrcImm | ModRM, ByteOp | DstMem | SrcImm | ModRM,
+       [Group1_83*8] =
+       DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+       DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+       DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+       DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM,
+       [Group1A*8] =
+       DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0,
+       [Group3_Byte*8] =
+       ByteOp | SrcImm | DstMem | ModRM, 0,
+       ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+       0, 0, 0, 0,
+       [Group3*8] =
+       DstMem | SrcImm | ModRM | SrcImm, 0,
+       DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+       0, 0, 0, 0,
+       [Group4*8] =
+       ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM,
+       0, 0, 0, 0, 0, 0,
+       [Group5*8] =
+       DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, 0, 0,
+       SrcMem | ModRM, 0, SrcMem | ModRM | Stack, 0,
+       [Group7*8] =
+       0, 0, ModRM | SrcMem, ModRM | SrcMem,
+       SrcNone | ModRM | DstMem | Mov, 0,
+       SrcMem16 | ModRM | Mov, SrcMem | ModRM | ByteOp,
+};
+
+static u16 group2_table[] = {
+       [Group7*8] =
+       SrcNone | ModRM, 0, 0, 0,
+       SrcNone | ModRM | DstMem | Mov, 0,
+       SrcMem16 | ModRM | Mov, 0,
+};
+
 /* EFLAGS bit definitions. */
 #define EFLG_OF (1<<11)
 #define EFLG_DF (1<<10)
@@ -317,7 +374,7 @@ static u16 twobyte_table[256] = {
 
 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
        do {                                                                 \
-               unsigned long _tmp;                                          \
+               unsigned long __tmp;                                         \
                switch ((_dst).bytes) {                                      \
                case 1:                                                      \
                        __asm__ __volatile__ (                               \
@@ -325,7 +382,7 @@ static u16 twobyte_table[256] = {
                                _op"b %"_bx"3,%1; "                          \
                                _POST_EFLAGS("0", "4", "2")                  \
                                : "=m" (_eflags), "=m" ((_dst).val),         \
-                                 "=&r" (_tmp)                               \
+                                 "=&r" (__tmp)                              \
                                : _by ((_src).val), "i" (EFLAGS_MASK));      \
                        break;                                               \
                default:                                                     \
@@ -426,29 +483,40 @@ static u16 twobyte_table[256] = {
        (_type)_x;                                                      \
 })
 
+static inline unsigned long ad_mask(struct decode_cache *c)
+{
+       return (1UL << (c->ad_bytes << 3)) - 1;
+}
+
 /* Access/update address held in a register, based on addressing mode. */
-#define address_mask(reg)                                              \
-       ((c->ad_bytes == sizeof(unsigned long)) ?                       \
-               (reg) : ((reg) & ((1UL << (c->ad_bytes << 3)) - 1)))
-#define register_address(base, reg)                                     \
-       ((base) + address_mask(reg))
-#define register_address_increment(reg, inc)                            \
-       do {                                                            \
-               /* signed type ensures sign extension to long */        \
-               int _inc = (inc);                                       \
-               if (c->ad_bytes == sizeof(unsigned long))               \
-                       (reg) += _inc;                                  \
-               else                                                    \
-                       (reg) = ((reg) &                                \
-                                ~((1UL << (c->ad_bytes << 3)) - 1)) |  \
-                               (((reg) + _inc) &                       \
-                                ((1UL << (c->ad_bytes << 3)) - 1));    \
-       } while (0)
+static inline unsigned long
+address_mask(struct decode_cache *c, unsigned long reg)
+{
+       if (c->ad_bytes == sizeof(unsigned long))
+               return reg;
+       else
+               return reg & ad_mask(c);
+}
 
-#define JMP_REL(rel)                                                   \
-       do {                                                            \
-               register_address_increment(c->eip, rel);                \
-       } while (0)
+static inline unsigned long
+register_address(struct decode_cache *c, unsigned long base, unsigned long reg)
+{
+       return base + address_mask(c, reg);
+}
+
+static inline void
+register_address_increment(struct decode_cache *c, unsigned long *reg, int inc)
+{
+       if (c->ad_bytes == sizeof(unsigned long))
+               *reg += inc;
+       else
+               *reg = (*reg & ~ad_mask(c)) | ((*reg + inc) & ad_mask(c));
+}
+
+static inline void jmp_rel(struct decode_cache *c, int rel)
+{
+       register_address_increment(c, &c->eip, rel);
+}
 
 static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
                              struct x86_emulate_ops *ops,
@@ -763,7 +831,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
        struct decode_cache *c = &ctxt->decode;
        int rc = 0;
        int mode = ctxt->mode;
-       int def_op_bytes, def_ad_bytes;
+       int def_op_bytes, def_ad_bytes, group;
 
        /* Shadow copy of register state. Committed on successful emulation. */
 
@@ -864,12 +932,24 @@ done_prefixes:
                        c->b = insn_fetch(u8, 1, c->eip);
                        c->d = twobyte_table[c->b];
                }
+       }
 
-               /* Unrecognised? */
-               if (c->d == 0) {
-                       DPRINTF("Cannot emulate %02x\n", c->b);
-                       return -1;
-               }
+       if (c->d & Group) {
+               group = c->d & GroupMask;
+               c->modrm = insn_fetch(u8, 1, c->eip);
+               --c->eip;
+
+               group = (group << 3) + ((c->modrm >> 3) & 7);
+               if ((c->d & GroupDual) && (c->modrm >> 6) == 3)
+                       c->d = group2_table[group];
+               else
+                       c->d = group_table[group];
+       }
+
+       /* Unrecognised? */
+       if (c->d == 0) {
+               DPRINTF("Cannot emulate %02x\n", c->b);
+               return -1;
        }
 
        if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
@@ -924,6 +1004,7 @@ done_prefixes:
                 */
                if ((c->d & ModRM) && c->modrm_mod == 3) {
                        c->src.type = OP_REG;
+                       c->src.val = c->modrm_val;
                        break;
                }
                c->src.type = OP_MEM;
@@ -967,6 +1048,7 @@ done_prefixes:
        case DstMem:
                if ((c->d & ModRM) && c->modrm_mod == 3) {
                        c->dst.type = OP_REG;
+                       c->dst.val = c->dst.orig_val = c->modrm_val;
                        break;
                }
                c->dst.type = OP_MEM;
@@ -984,8 +1066,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
        c->dst.type  = OP_MEM;
        c->dst.bytes = c->op_bytes;
        c->dst.val = c->src.val;
-       register_address_increment(c->regs[VCPU_REGS_RSP], -c->op_bytes);
-       c->dst.ptr = (void *) register_address(ctxt->ss_base,
+       register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
+       c->dst.ptr = (void *) register_address(c, ctxt->ss_base,
                                               c->regs[VCPU_REGS_RSP]);
 }
 
@@ -995,13 +1077,13 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
        struct decode_cache *c = &ctxt->decode;
        int rc;
 
-       rc = ops->read_std(register_address(ctxt->ss_base,
+       rc = ops->read_std(register_address(c, ctxt->ss_base,
                                            c->regs[VCPU_REGS_RSP]),
                           &c->dst.val, c->dst.bytes, ctxt->vcpu);
        if (rc != 0)
                return rc;
 
-       register_address_increment(c->regs[VCPU_REGS_RSP], c->dst.bytes);
+       register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->dst.bytes);
 
        return 0;
 }
@@ -1043,26 +1125,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
 
        switch (c->modrm_reg) {
        case 0 ... 1:   /* test */
-               /*
-                * Special case in Grp3: test has an immediate
-                * source operand.
-                */
-               c->src.type = OP_IMM;
-               c->src.ptr = (unsigned long *)c->eip;
-               c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-               if (c->src.bytes == 8)
-                       c->src.bytes = 4;
-               switch (c->src.bytes) {
-               case 1:
-                       c->src.val = insn_fetch(s8, 1, c->eip);
-                       break;
-               case 2:
-                       c->src.val = insn_fetch(s16, 2, c->eip);
-                       break;
-               case 4:
-                       c->src.val = insn_fetch(s32, 4, c->eip);
-                       break;
-               }
                emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags);
                break;
        case 2: /* not */
@@ -1076,7 +1138,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt,
                rc = X86EMUL_UNHANDLEABLE;
                break;
        }
-done:
        return rc;
 }
 
@@ -1084,7 +1145,6 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
                               struct x86_emulate_ops *ops)
 {
        struct decode_cache *c = &ctxt->decode;
-       int rc;
 
        switch (c->modrm_reg) {
        case 0: /* inc */
@@ -1094,36 +1154,11 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
                emulate_1op("dec", c->dst, ctxt->eflags);
                break;
        case 4: /* jmp abs */
-               if (c->b == 0xff)
-                       c->eip = c->dst.val;
-               else {
-                       DPRINTF("Cannot emulate %02x\n", c->b);
-                       return X86EMUL_UNHANDLEABLE;
-               }
+               c->eip = c->src.val;
                break;
        case 6: /* push */
-
-               /* 64-bit mode: PUSH always pushes a 64-bit operand. */
-
-               if (ctxt->mode == X86EMUL_MODE_PROT64) {
-                       c->dst.bytes = 8;
-                       rc = ops->read_std((unsigned long)c->dst.ptr,
-                                          &c->dst.val, 8, ctxt->vcpu);
-                       if (rc != 0)
-                               return rc;
-               }
-               register_address_increment(c->regs[VCPU_REGS_RSP],
-                                          -c->dst.bytes);
-               rc = ops->write_emulated(register_address(ctxt->ss_base,
-                                   c->regs[VCPU_REGS_RSP]), &c->dst.val,
-                                   c->dst.bytes, ctxt->vcpu);
-               if (rc != 0)
-                       return rc;
-               c->dst.type = OP_NONE;
+               emulate_push(ctxt);
                break;
-       default:
-               DPRINTF("Cannot emulate %02x\n", c->b);
-               return X86EMUL_UNHANDLEABLE;
        }
        return 0;
 }
@@ -1361,19 +1396,19 @@ special_insn:
                c->dst.type  = OP_MEM;
                c->dst.bytes = c->op_bytes;
                c->dst.val = c->src.val;
-               register_address_increment(c->regs[VCPU_REGS_RSP],
+               register_address_increment(c, &c->regs[VCPU_REGS_RSP],
                                           -c->op_bytes);
                c->dst.ptr = (void *) register_address(
-                       ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
+                       c, ctxt->ss_base, c->regs[VCPU_REGS_RSP]);
                break;
        case 0x58 ... 0x5f: /* pop reg */
        pop_instruction:
-               if ((rc = ops->read_std(register_address(ctxt->ss_base,
+               if ((rc = ops->read_std(register_address(c, ctxt->ss_base,
                        c->regs[VCPU_REGS_RSP]), c->dst.ptr,
                        c->op_bytes, ctxt->vcpu)) != 0)
                        goto done;
 
-               register_address_increment(c->regs[VCPU_REGS_RSP],
+               register_address_increment(c, &c->regs[VCPU_REGS_RSP],
                                           c->op_bytes);
                c->dst.type = OP_NONE;  /* Disable writeback. */
                break;
@@ -1393,9 +1428,9 @@ special_insn:
                                1,
                                (c->d & ByteOp) ? 1 : c->op_bytes,
                                c->rep_prefix ?
-                               address_mask(c->regs[VCPU_REGS_RCX]) : 1,
+                               address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
                                (ctxt->eflags & EFLG_DF),
-                               register_address(ctxt->es_base,
+                               register_address(c, ctxt->es_base,
                                                 c->regs[VCPU_REGS_RDI]),
                                c->rep_prefix,
                                c->regs[VCPU_REGS_RDX]) == 0) {
@@ -1409,9 +1444,9 @@ special_insn:
                                0,
                                (c->d & ByteOp) ? 1 : c->op_bytes,
                                c->rep_prefix ?
-                               address_mask(c->regs[VCPU_REGS_RCX]) : 1,
+                               address_mask(c, c->regs[VCPU_REGS_RCX]) : 1,
                                (ctxt->eflags & EFLG_DF),
-                               register_address(c->override_base ?
+                               register_address(c, c->override_base ?
                                                        *c->override_base :
                                                        ctxt->ds_base,
                                                 c->regs[VCPU_REGS_RSI]),
@@ -1425,7 +1460,7 @@ special_insn:
                int rel = insn_fetch(s8, 1, c->eip);
 
                if (test_cc(c->b, ctxt->eflags))
-                       JMP_REL(rel);
+                       jmp_rel(c, rel);
                break;
        }
        case 0x80 ... 0x83:     /* Grp1 */
@@ -1477,7 +1512,7 @@ special_insn:
        case 0x88 ... 0x8b:     /* mov */
                goto mov;
        case 0x8d: /* lea r16/r32, m */
-               c->dst.val = c->modrm_val;
+               c->dst.val = c->modrm_ea;
                break;
        case 0x8f:              /* pop (sole member of Grp1a) */
                rc = emulate_grp1a(ctxt, ops);
@@ -1501,27 +1536,27 @@ special_insn:
        case 0xa4 ... 0xa5:     /* movs */
                c->dst.type = OP_MEM;
                c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-               c->dst.ptr = (unsigned long *)register_address(
+               c->dst.ptr = (unsigned long *)register_address(c,
                                                   ctxt->es_base,
                                                   c->regs[VCPU_REGS_RDI]);
-               if ((rc = ops->read_emulated(register_address(
+               if ((rc = ops->read_emulated(register_address(c,
                      c->override_base ? *c->override_base :
                                        ctxt->ds_base,
                                        c->regs[VCPU_REGS_RSI]),
                                        &c->dst.val,
                                        c->dst.bytes, ctxt->vcpu)) != 0)
                        goto done;
-               register_address_increment(c->regs[VCPU_REGS_RSI],
+               register_address_increment(c, &c->regs[VCPU_REGS_RSI],
                                       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
                                                           : c->dst.bytes);
-               register_address_increment(c->regs[VCPU_REGS_RDI],
+               register_address_increment(c, &c->regs[VCPU_REGS_RDI],
                                       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
                                                           : c->dst.bytes);
                break;
        case 0xa6 ... 0xa7:     /* cmps */
                c->src.type = OP_NONE; /* Disable writeback. */
                c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-               c->src.ptr = (unsigned long *)register_address(
+               c->src.ptr = (unsigned long *)register_address(c,
                                c->override_base ? *c->override_base :
                                                   ctxt->ds_base,
                                                   c->regs[VCPU_REGS_RSI]);
@@ -1533,7 +1568,7 @@ special_insn:
 
                c->dst.type = OP_NONE; /* Disable writeback. */
                c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-               c->dst.ptr = (unsigned long *)register_address(
+               c->dst.ptr = (unsigned long *)register_address(c,
                                                   ctxt->es_base,
                                                   c->regs[VCPU_REGS_RDI]);
                if ((rc = ops->read_emulated((unsigned long)c->dst.ptr,
@@ -1546,10 +1581,10 @@ special_insn:
 
                emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
 
-               register_address_increment(c->regs[VCPU_REGS_RSI],
+               register_address_increment(c, &c->regs[VCPU_REGS_RSI],
                                       (ctxt->eflags & EFLG_DF) ? -c->src.bytes
                                                                  : c->src.bytes);
-               register_address_increment(c->regs[VCPU_REGS_RDI],
+               register_address_increment(c, &c->regs[VCPU_REGS_RDI],
                                       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
                                                                  : c->dst.bytes);
 
@@ -1557,11 +1592,11 @@ special_insn:
        case 0xaa ... 0xab:     /* stos */
                c->dst.type = OP_MEM;
                c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
-               c->dst.ptr = (unsigned long *)register_address(
+               c->dst.ptr = (unsigned long *)register_address(c,
                                                   ctxt->es_base,
                                                   c->regs[VCPU_REGS_RDI]);
                c->dst.val = c->regs[VCPU_REGS_RAX];
-               register_address_increment(c->regs[VCPU_REGS_RDI],
+               register_address_increment(c, &c->regs[VCPU_REGS_RDI],
                                       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
                                                           : c->dst.bytes);
                break;
@@ -1569,7 +1604,7 @@ special_insn:
                c->dst.type = OP_REG;
                c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
                c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
-               if ((rc = ops->read_emulated(register_address(
+               if ((rc = ops->read_emulated(register_address(c,
                                c->override_base ? *c->override_base :
                                                   ctxt->ds_base,
                                                 c->regs[VCPU_REGS_RSI]),
@@ -1577,7 +1612,7 @@ special_insn:
                                                 c->dst.bytes,
                                                 ctxt->vcpu)) != 0)
                        goto done;
-               register_address_increment(c->regs[VCPU_REGS_RSI],
+               register_address_increment(c, &c->regs[VCPU_REGS_RSI],
                                       (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
                                                           : c->dst.bytes);
                break;
@@ -1616,14 +1651,14 @@ special_insn:
                        goto cannot_emulate;
                }
                c->src.val = (unsigned long) c->eip;
-               JMP_REL(rel);
+               jmp_rel(c, rel);
                c->op_bytes = c->ad_bytes;
                emulate_push(ctxt);
                break;
        }
        case 0xe9: /* jmp rel */
        case 0xeb: /* jmp rel short */
-               JMP_REL(c->src.val);
+               jmp_rel(c, c->src.val);
                c->dst.type = OP_NONE; /* Disable writeback. */
                break;
        case 0xf4:              /* hlt */
@@ -1690,6 +1725,8 @@ twobyte_insn:
                                goto done;
 
                        kvm_emulate_hypercall(ctxt->vcpu);
+                       /* Disable writeback. */
+                       c->dst.type = OP_NONE;
                        break;
                case 2: /* lgdt */
                        rc = read_descriptor(ctxt, ops, c->src.ptr,
@@ -1697,6 +1734,8 @@ twobyte_insn:
                        if (rc)
                                goto done;
                        realmode_lgdt(ctxt->vcpu, size, address);
+                       /* Disable writeback. */
+                       c->dst.type = OP_NONE;
                        break;
                case 3: /* lidt/vmmcall */
                        if (c->modrm_mod == 3 && c->modrm_rm == 1) {
@@ -1712,27 +1751,25 @@ twobyte_insn:
                                        goto done;
                                realmode_lidt(ctxt->vcpu, size, address);
                        }
+                       /* Disable writeback. */
+                       c->dst.type = OP_NONE;
                        break;
                case 4: /* smsw */
-                       if (c->modrm_mod != 3)
-                               goto cannot_emulate;
-                       *(u16 *)&c->regs[c->modrm_rm]
-                               = realmode_get_cr(ctxt->vcpu, 0);
+                       c->dst.bytes = 2;
+                       c->dst.val = realmode_get_cr(ctxt->vcpu, 0);
                        break;
                case 6: /* lmsw */
-                       if (c->modrm_mod != 3)
-                               goto cannot_emulate;
-                       realmode_lmsw(ctxt->vcpu, (u16)c->modrm_val,
-                                                 &ctxt->eflags);
+                       realmode_lmsw(ctxt->vcpu, (u16)c->src.val,
+                                     &ctxt->eflags);
                        break;
                case 7: /* invlpg*/
                        emulate_invlpg(ctxt->vcpu, memop);
+                       /* Disable writeback. */
+                       c->dst.type = OP_NONE;
                        break;
                default:
                        goto cannot_emulate;
                }
-               /* Disable writeback. */
-               c->dst.type = OP_NONE;
                break;
        case 0x06:
                emulate_clts(ctxt->vcpu);
@@ -1823,7 +1860,7 @@ twobyte_insn:
                        goto cannot_emulate;
                }
                if (test_cc(c->b, ctxt->eflags))
-                       JMP_REL(rel);
+                       jmp_rel(c, rel);
                c->dst.type = OP_NONE;
                break;
        }
index 25df1c1989fe5e5574ba4e833978d99317b168c2..76f60f52a885246ff1a8f6ce9bad2c3791d1df94 100644 (file)
@@ -11,7 +11,7 @@ lib-y += memcpy_$(BITS).o
 ifeq ($(CONFIG_X86_32),y)
         lib-y += checksum_32.o
         lib-y += strstr_32.o
-        lib-y += bitops_32.o semaphore_32.o string_32.o
+        lib-y += semaphore_32.o string_32.o
 
         lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
 else
@@ -21,7 +21,6 @@ else
 
         lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
         lib-y += thunk_64.o clear_page_64.o copy_page_64.o
-        lib-y += bitops_64.o
         lib-y += memmove_64.o memset_64.o
         lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
 endif
diff --git a/arch/x86/lib/bitops_32.c b/arch/x86/lib/bitops_32.c
deleted file mode 100644 (file)
index b654404..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-#include <linux/bitops.h>
-#include <linux/module.h>
-
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_bit(const unsigned long *addr, int size, int offset)
-{
-       const unsigned long *p = addr + (offset >> 5);
-       int set = 0, bit = offset & 31, res;
-
-       if (bit) {
-               /*
-                * Look for nonzero in the first 32 bits:
-                */
-               __asm__("bsfl %1,%0\n\t"
-                       "jne 1f\n\t"
-                       "movl $32, %0\n"
-                       "1:"
-                       : "=r" (set)
-                       : "r" (*p >> bit));
-               if (set < (32 - bit))
-                       return set + offset;
-               set = 32 - bit;
-               p++;
-       }
-       /*
-        * No set bit yet, search remaining full words for a bit
-        */
-       res = find_first_bit (p, size - 32 * (p - addr));
-       return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_bit);
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-int find_next_zero_bit(const unsigned long *addr, int size, int offset)
-{
-       const unsigned long *p = addr + (offset >> 5);
-       int set = 0, bit = offset & 31, res;
-
-       if (bit) {
-               /*
-                * Look for zero in the first 32 bits.
-                */
-               __asm__("bsfl %1,%0\n\t"
-                       "jne 1f\n\t"
-                       "movl $32, %0\n"
-                       "1:"
-                       : "=r" (set)
-                       : "r" (~(*p >> bit)));
-               if (set < (32 - bit))
-                       return set + offset;
-               set = 32 - bit;
-               p++;
-       }
-       /*
-        * No zero yet, search remaining full bytes for a zero
-        */
-       res = find_first_zero_bit(p, size - 32 * (p - addr));
-       return (offset + set + res);
-}
-EXPORT_SYMBOL(find_next_zero_bit);
diff --git a/arch/x86/lib/bitops_64.c b/arch/x86/lib/bitops_64.c
deleted file mode 100644 (file)
index 0e8f491..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-#include <linux/bitops.h>
-
-#undef find_first_zero_bit
-#undef find_next_zero_bit
-#undef find_first_bit
-#undef find_next_bit
-
-static inline long
-__find_first_zero_bit(const unsigned long * addr, unsigned long size)
-{
-       long d0, d1, d2;
-       long res;
-
-       /*
-        * We must test the size in words, not in bits, because
-        * otherwise incoming sizes in the range -63..-1 will not run
-        * any scasq instructions, and then the flags used by the je
-        * instruction will have whatever random value was in place
-        * before.  Nobody should call us like that, but
-        * find_next_zero_bit() does when offset and size are at the
-        * same word and it fails to find a zero itself.
-        */
-       size += 63;
-       size >>= 6;
-       if (!size)
-               return 0;
-       asm volatile(
-               "  repe; scasq\n"
-               "  je 1f\n"
-               "  xorq -8(%%rdi),%%rax\n"
-               "  subq $8,%%rdi\n"
-               "  bsfq %%rax,%%rdx\n"
-               "1:  subq %[addr],%%rdi\n"
-               "  shlq $3,%%rdi\n"
-               "  addq %%rdi,%%rdx"
-               :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
-               :"0" (0ULL), "1" (size), "2" (addr), "3" (-1ULL),
-                [addr] "S" (addr) : "memory");
-       /*
-        * Any register would do for [addr] above, but GCC tends to
-        * prefer rbx over rsi, even though rsi is readily available
-        * and doesn't have to be saved.
-        */
-       return res;
-}
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-long find_first_zero_bit(const unsigned long * addr, unsigned long size)
-{
-       return __find_first_zero_bit (addr, size);
-}
-
-/**
- * find_next_zero_bit - find the next zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-long find_next_zero_bit (const unsigned long * addr, long size, long offset)
-{
-       const unsigned long * p = addr + (offset >> 6);
-       unsigned long set = 0;
-       unsigned long res, bit = offset&63;
-
-       if (bit) {
-               /*
-                * Look for zero in first word
-                */
-               asm("bsfq %1,%0\n\t"
-                   "cmoveq %2,%0"
-                   : "=r" (set)
-                   : "r" (~(*p >> bit)), "r"(64L));
-               if (set < (64 - bit))
-                       return set + offset;
-               set = 64 - bit;
-               p++;
-       }
-       /*
-        * No zero yet, search remaining full words for a zero
-        */
-       res = __find_first_zero_bit (p, size - 64 * (p - addr));
-
-       return (offset + set + res);
-}
-
-static inline long
-__find_first_bit(const unsigned long * addr, unsigned long size)
-{
-       long d0, d1;
-       long res;
-
-       /*
-        * We must test the size in words, not in bits, because
-        * otherwise incoming sizes in the range -63..-1 will not run
-        * any scasq instructions, and then the flags used by the jz
-        * instruction will have whatever random value was in place
-        * before.  Nobody should call us like that, but
-        * find_next_bit() does when offset and size are at the same
-        * word and it fails to find a one itself.
-        */
-       size += 63;
-       size >>= 6;
-       if (!size)
-               return 0;
-       asm volatile(
-               "   repe; scasq\n"
-               "   jz 1f\n"
-               "   subq $8,%%rdi\n"
-               "   bsfq (%%rdi),%%rax\n"
-               "1: subq %[addr],%%rdi\n"
-               "   shlq $3,%%rdi\n"
-               "   addq %%rdi,%%rax"
-               :"=a" (res), "=&c" (d0), "=&D" (d1)
-               :"0" (0ULL), "1" (size), "2" (addr),
-                [addr] "r" (addr) : "memory");
-       return res;
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit-number of the first set bit, not the number of the byte
- * containing a bit.
- */
-long find_first_bit(const unsigned long * addr, unsigned long size)
-{
-       return __find_first_bit(addr,size);
-}
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
- */
-long find_next_bit(const unsigned long * addr, long size, long offset)
-{
-       const unsigned long * p = addr + (offset >> 6);
-       unsigned long set = 0, bit = offset & 63, res;
-
-       if (bit) {
-               /*
-                * Look for nonzero in the first 64 bits:
-                */
-               asm("bsfq %1,%0\n\t"
-                   "cmoveq %2,%0\n\t"
-                   : "=r" (set)
-                   : "r" (*p >> bit), "r" (64L));
-               if (set < (64 - bit))
-                       return set + offset;
-               set = 64 - bit;
-               p++;
-       }
-       /*
-        * No set bit yet, search remaining full words for a bit
-        */
-       res = __find_first_bit (p, size - 64 * (p - addr));
-       return (offset + set + res);
-}
-
-#include <linux/module.h>
-
-EXPORT_SYMBOL(find_next_bit);
-EXPORT_SYMBOL(find_first_bit);
-EXPORT_SYMBOL(find_first_zero_bit);
-EXPORT_SYMBOL(find_next_zero_bit);
index ecab9fff0fd17579b43d550f8bae2e5eea98f22b..2ad598c104af77d398e59a1e6d4edc04ed91ea53 100644 (file)
@@ -877,7 +877,7 @@ void __init voyager_cat_init(void)
                        request_resource(&iomem_resource, res);
                }
 
-               qic_addr = (unsigned long)ioremap(qic_addr, 0x400);
+               qic_addr = (unsigned long)ioremap_cache(qic_addr, 0x400);
 
                for (j = 0; j < 4; j++) {
                        __u8 cpu;
index 9cf33d3ee5bc7ee0d10fbc4b0ce9c9fc04697ee8..165c871ba9af0211e0c939e0bc2212750d4bf39f 100644 (file)
@@ -155,4 +155,3 @@ EXPORT_SYMBOL(kmap);
 EXPORT_SYMBOL(kunmap);
 EXPORT_SYMBOL(kmap_atomic);
 EXPORT_SYMBOL(kunmap_atomic);
-EXPORT_SYMBOL(kmap_atomic_to_page);
index 4a4761892951af0c3f0de4a5c055152254c9c26c..de236e419cb5f019feb154b710d519f6c3c95f55 100644 (file)
@@ -287,47 +287,17 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
        pkmap_page_table = pte;
 }
 
-static void __meminit free_new_highpage(struct page *page)
-{
-       init_page_count(page);
-       __free_page(page);
-       totalhigh_pages++;
-}
-
 void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
 {
        if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
                ClearPageReserved(page);
-               free_new_highpage(page);
+               init_page_count(page);
+               __free_page(page);
+               totalhigh_pages++;
        } else
                SetPageReserved(page);
 }
 
-static int __meminit
-add_one_highpage_hotplug(struct page *page, unsigned long pfn)
-{
-       free_new_highpage(page);
-       totalram_pages++;
-#ifdef CONFIG_FLATMEM
-       max_mapnr = max(pfn, max_mapnr);
-#endif
-       num_physpages++;
-
-       return 0;
-}
-
-/*
- * Not currently handling the NUMA case.
- * Assuming single node and all memory that
- * has been added dynamically that would be
- * onlined here is in HIGHMEM.
- */
-void __meminit online_page(struct page *page)
-{
-       ClearPageReserved(page);
-       add_one_highpage_hotplug(page, page_to_pfn(page));
-}
-
 #ifndef CONFIG_NUMA
 static void __init set_highmem_pages_init(int bad_ppro)
 {
index 0cca626630377738a8af7d8f16a29dfdad5ac27e..32ba13b0f818183bbeb2eb37eba74c1bdc4a35a9 100644 (file)
@@ -620,15 +620,6 @@ void __init paging_init(void)
 /*
  * Memory hotplug specific functions
  */
-void online_page(struct page *page)
-{
-       ClearPageReserved(page);
-       init_page_count(page);
-       __free_page(page);
-       totalram_pages++;
-       num_physpages++;
-}
-
 #ifdef CONFIG_MEMORY_HOTPLUG
 /*
  * Memory is added always to NORMAL zone. This means you will never get
@@ -810,7 +801,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
 {
 #ifdef CONFIG_NUMA
-       int nid = phys_to_nid(phys);
+       int nid, next_nid;
 #endif
        unsigned long pfn = phys >> PAGE_SHIFT;
 
@@ -829,10 +820,16 @@ void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
 
        /* Should check here against the e820 map to avoid double free */
 #ifdef CONFIG_NUMA
-       reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
+       nid = phys_to_nid(phys);
+       next_nid = phys_to_nid(phys + len - 1);
+       if (nid == next_nid)
+               reserve_bootmem_node(NODE_DATA(nid), phys, len, BOOTMEM_DEFAULT);
+       else
+               reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
 #else
        reserve_bootmem(phys, len, BOOTMEM_DEFAULT);
 #endif
+
        if (phys+len <= MAX_DMA_PFN*PAGE_SIZE) {
                dma_reserve += len / PAGE_SIZE;
                set_dma_reserve(dma_reserve);
@@ -926,6 +923,10 @@ const char *arch_vma_name(struct vm_area_struct *vma)
 /*
  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
  */
+static long __meminitdata addr_start, addr_end;
+static void __meminitdata *p_start, *p_end;
+static int __meminitdata node_start;
+
 int __meminit
 vmemmap_populate(struct page *start_page, unsigned long size, int node)
 {
@@ -960,12 +961,32 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
                                                        PAGE_KERNEL_LARGE);
                        set_pmd(pmd, __pmd(pte_val(entry)));
 
-                       printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",
-                               addr, addr + PMD_SIZE - 1, p, node);
+                       /* check to see if we have contiguous blocks */
+                       if (p_end != p || node_start != node) {
+                               if (p_start)
+                                       printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+                                               addr_start, addr_end-1, p_start, p_end-1, node_start);
+                               addr_start = addr;
+                               node_start = node;
+                               p_start = p;
+                       }
+                       addr_end = addr + PMD_SIZE;
+                       p_end = p + PMD_SIZE;
                } else {
                        vmemmap_verify((pte_t *)pmd, node, addr, next);
                }
        }
        return 0;
 }
+
+void __meminit vmemmap_populate_print_last(void)
+{
+       if (p_start) {
+               printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+                       addr_start, addr_end-1, p_start, p_end-1, node_start);
+               p_start = NULL;
+               p_end = NULL;
+               node_start = 0;
+       }
+}
 #endif
index d176b23110cc875187be67d3c22f227ec38cbdeb..71bb3159031afba912c9a60cad9f4510a3074ff3 100644 (file)
@@ -117,8 +117,8 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
  * have to convert them into an offset in a page-aligned mapping, but the
  * caller shouldn't need to know that small detail.
  */
-static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
-                              unsigned long prot_val)
+static void __iomem *__ioremap_caller(resource_size_t phys_addr,
+               unsigned long size, unsigned long prot_val, void *caller)
 {
        unsigned long pfn, offset, vaddr;
        resource_size_t last_addr;
@@ -149,7 +149,8 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
         * Don't allow anybody to remap normal RAM that we're using..
         */
        for (pfn = phys_addr >> PAGE_SHIFT;
-                               (pfn << PAGE_SHIFT) < last_addr; pfn++) {
+                               (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
+                               pfn++) {
 
                int is_ram = page_is_ram(pfn);
 
@@ -176,11 +177,11 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
                /*
                 * Do not fallback to certain memory types with certain
                 * requested type:
-                * - request is uncached, return cannot be write-back
-                * - request is uncached, return cannot be write-combine
+                * - request is uc-, return cannot be write-back
+                * - request is uc-, return cannot be write-combine
                 * - request is write-combine, return cannot be write-back
                 */
-               if ((prot_val == _PAGE_CACHE_UC &&
+               if ((prot_val == _PAGE_CACHE_UC_MINUS &&
                     (new_prot_val == _PAGE_CACHE_WB ||
                      new_prot_val == _PAGE_CACHE_WC)) ||
                    (prot_val == _PAGE_CACHE_WC &&
@@ -201,6 +202,9 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        default:
                prot = PAGE_KERNEL_NOCACHE;
                break;
+       case _PAGE_CACHE_UC_MINUS:
+               prot = PAGE_KERNEL_UC_MINUS;
+               break;
        case _PAGE_CACHE_WC:
                prot = PAGE_KERNEL_WC;
                break;
@@ -212,7 +216,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
        /*
         * Ok, go for it..
         */
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP, caller);
        if (!area)
                return NULL;
        area->phys_addr = phys_addr;
@@ -255,7 +259,17 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
  */
 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
+       /*
+        * Ideally, this should be:
+        *      pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
+        *
+        * Till we fix all X drivers to use ioremap_wc(), we will use
+        * UC MINUS.
+        */
+       unsigned long val = _PAGE_CACHE_UC_MINUS;
+
+       return __ioremap_caller(phys_addr, size, val,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
@@ -272,7 +286,8 @@ EXPORT_SYMBOL(ioremap_nocache);
 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
 {
        if (pat_wc_enabled)
-               return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
+               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
+                                       __builtin_return_address(0));
        else
                return ioremap_nocache(phys_addr, size);
 }
@@ -280,7 +295,8 @@ EXPORT_SYMBOL(ioremap_wc);
 
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
-       return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(ioremap_cache);
 
index 86808e666f9c2aeea15492a8609400915f819a09..1f476e477844df02d09f21ec23ddb8f5084c4cba 100644 (file)
 #include <linux/nodemask.h>
 #include <asm/io.h>
 #include <linux/pci_ids.h>
+#include <linux/acpi.h>
 #include <asm/types.h>
 #include <asm/mmzone.h>
 #include <asm/proto.h>
 #include <asm/e820.h>
 #include <asm/pci-direct.h>
 #include <asm/numa.h>
+#include <asm/mpspec.h>
+#include <asm/apic.h>
 
 static __init int find_northbridge(void)
 {
@@ -44,6 +47,30 @@ static __init int find_northbridge(void)
        return -1;
 }
 
+static __init void early_get_boot_cpu_id(void)
+{
+       /*
+        * need to get boot_cpu_id so can use that to create apicid_to_node
+        * in k8_scan_nodes()
+        */
+       /*
+        * Find possible boot-time SMP configuration:
+        */
+       early_find_smp_config();
+#ifdef CONFIG_ACPI
+       /*
+        * Read APIC information from ACPI tables.
+        */
+       early_acpi_boot_init();
+#endif
+       /*
+        * get boot-time SMP configuration:
+        */
+       if (smp_found_config)
+               early_get_smp_config();
+       early_init_lapic_mapping();
+}
+
 int __init k8_scan_nodes(unsigned long start, unsigned long end)
 {
        unsigned long prevbase;
@@ -56,6 +83,7 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
        unsigned cores;
        unsigned bits;
        int j;
+       unsigned apicid_base;
 
        if (!early_pci_allowed())
                return -1;
@@ -174,11 +202,19 @@ int __init k8_scan_nodes(unsigned long start, unsigned long end)
        /* use the coreid bits from early_identify_cpu */
        bits = boot_cpu_data.x86_coreid_bits;
        cores = (1<<bits);
+       apicid_base = 0;
+       /* need to get boot_cpu_id early for system with apicid lifting */
+       early_get_boot_cpu_id();
+       if (boot_cpu_physical_apicid > 0) {
+               printk(KERN_INFO "BSP APIC ID: %02x\n",
+                                boot_cpu_physical_apicid);
+               apicid_base = boot_cpu_physical_apicid;
+       }
 
        for (i = 0; i < 8; i++) {
                if (nodes[i].start != nodes[i].end) {
                        nodeid = nodeids[i];
-                       for (j = 0; j < cores; j++)
+                       for (j = apicid_base; j < cores + apicid_base; j++)
                                apicid_to_node[(nodeid << bits) + j] = i;
                        setup_node_bootmem(i, nodes[i].start, nodes[i].end);
                }
index 9a6892200b271a1ebec5fe64da0adef13b54df78..c5066d519e5de4cf73dec673f6d51c2ca131075b 100644 (file)
@@ -196,6 +196,7 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
        unsigned long bootmap_start, nodedata_phys;
        void *bootmap;
        const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
+       int nid;
 
        start = round_up(start, ZONE_ALIGN);
 
@@ -218,9 +219,19 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
        NODE_DATA(nodeid)->node_start_pfn = start_pfn;
        NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
 
-       /* Find a place for the bootmem map */
+       /*
+        * Find a place for the bootmem map
+        * nodedata_phys could be on other nodes by alloc_bootmem,
+        * so need to sure bootmap_start not to be small, otherwise
+        * early_node_mem will get that with find_e820_area instead
+        * of alloc_bootmem, that could clash with reserved range
+        */
        bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
-       bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
+       nid = phys_to_nid(nodedata_phys);
+       if (nid == nodeid)
+               bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
+       else
+               bootmap_start = round_up(start, PAGE_SIZE);
        /*
         * SMP_CAHCE_BYTES could be enough, but init_bootmem_node like
         * to use that to align to PAGE_SIZE
@@ -245,10 +256,29 @@ void __init setup_node_bootmem(int nodeid, unsigned long start,
 
        free_bootmem_with_active_regions(nodeid, end);
 
-       reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size,
-                       BOOTMEM_DEFAULT);
-       reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
-                       bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
+       /*
+        * convert early reserve to bootmem reserve earlier
+        * otherwise early_node_mem could use early reserved mem
+        * on previous node
+        */
+       early_res_to_bootmem(start, end);
+
+       /*
+        * in some case early_node_mem could use alloc_bootmem
+        * to get range on other node, don't reserve that again
+        */
+       if (nid != nodeid)
+               printk(KERN_INFO "    NODE_DATA(%d) on node %d\n", nodeid, nid);
+       else
+               reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
+                                       pgdat_size, BOOTMEM_DEFAULT);
+       nid = phys_to_nid(bootmap_start);
+       if (nid != nodeid)
+               printk(KERN_INFO "    bootmap(%d) on node %d\n", nodeid, nid);
+       else
+               reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
+                                bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
+
 #ifdef CONFIG_ACPI_NUMA
        srat_reserve_add_area(nodeid);
 #endif
index bd5e05c654dccfb33d368718e4b7acdbdd16d5d0..60bcb5b6a37ef0130fb08da8ef38db58fd37910a 100644 (file)
@@ -777,14 +777,20 @@ static inline int change_page_attr_clear(unsigned long addr, int numpages,
 
 int _set_memory_uc(unsigned long addr, int numpages)
 {
+       /*
+        * for now UC MINUS. see comments in ioremap_nocache()
+        */
        return change_page_attr_set(addr, numpages,
-                                   __pgprot(_PAGE_CACHE_UC));
+                                   __pgprot(_PAGE_CACHE_UC_MINUS));
 }
 
 int set_memory_uc(unsigned long addr, int numpages)
 {
+       /*
+        * for now UC MINUS. see comments in ioremap_nocache()
+        */
        if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
-                           _PAGE_CACHE_UC, NULL))
+                           _PAGE_CACHE_UC_MINUS, NULL))
                return -EINVAL;
 
        return _set_memory_uc(addr, numpages);
index 9851265e4d65d40db7729bd6c899f1fd9b7753ce..277446cd30b6de81a4634061949385cb6eaede69 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/msr.h>
 #include <asm/tlbflush.h>
 #include <asm/processor.h>
+#include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/pat.h>
 #include <asm/e820.h>
@@ -386,8 +387,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
                                break;
                        }
 
-                       printk(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
-                              saved_ptr->start, saved_ptr->end);
+                       pr_debug(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n",
+                                saved_ptr->start, saved_ptr->end);
                        /* No conflict. Go ahead and add this new entry */
                        list_add(&new_entry->nd, &saved_ptr->nd);
                        new_entry = NULL;
@@ -477,14 +478,43 @@ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
        return vma_prot;
 }
 
+#ifdef CONFIG_NONPROMISC_DEVMEM
+/* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+       return 1;
+}
+#else
+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
+{
+       u64 from = ((u64)pfn) << PAGE_SHIFT;
+       u64 to = from + size;
+       u64 cursor = from;
+
+       while (cursor < to) {
+               if (!devmem_is_allowed(pfn)) {
+                       printk(KERN_INFO
+               "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
+                               current->comm, from, to);
+                       return 0;
+               }
+               cursor += PAGE_SIZE;
+               pfn++;
+       }
+       return 1;
+}
+#endif /* CONFIG_NONPROMISC_DEVMEM */
+
 int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
                                unsigned long size, pgprot_t *vma_prot)
 {
        u64 offset = ((u64) pfn) << PAGE_SHIFT;
        unsigned long flags = _PAGE_CACHE_UC_MINUS;
-       unsigned long ret_flags;
        int retval;
 
+       if (!range_is_allowed(pfn, size))
+               return 0;
+
        if (file->f_flags & O_SYNC) {
                flags = _PAGE_CACHE_UC;
        }
@@ -518,14 +548,12 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
        if (flags != _PAGE_CACHE_UC_MINUS) {
                retval = reserve_memtype(offset, offset + size, flags, NULL);
        } else {
-               retval = reserve_memtype(offset, offset + size, -1, &ret_flags);
+               retval = reserve_memtype(offset, offset + size, -1, &flags);
        }
 
        if (retval < 0)
                return 0;
 
-       flags = ret_flags;
-
        if (pfn <= max_pfn_mapped &&
             ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) {
                free_memtype(offset, offset + size);
index cdd6828b5abb4015f91091cf4ccc5d1a6432ded0..7fa519868d7091d4dd0b64fbb880c68517428f99 100644 (file)
@@ -3,12 +3,14 @@ obj-y                         := i386.o init.o
 obj-$(CONFIG_PCI_BIOS)         += pcbios.o
 obj-$(CONFIG_PCI_MMCONFIG)     += mmconfig_32.o direct.o mmconfig-shared.o
 obj-$(CONFIG_PCI_DIRECT)       += direct.o
+obj-$(CONFIG_PCI_OLPC)         += olpc.o
 
 pci-y                          := fixup.o
 pci-$(CONFIG_ACPI)             += acpi.o
 pci-y                          += legacy.o irq.o
 
-pci-$(CONFIG_X86_VISWS)                := visws.o fixup.o
-pci-$(CONFIG_X86_NUMAQ)                := numa.o irq.o
+pci-$(CONFIG_X86_VISWS)                += visws.o fixup.o
+pci-$(CONFIG_X86_NUMAQ)                += numa.o irq.o
+pci-$(CONFIG_NUMA)             += mp_bus_to_node.o
 
 obj-y                          += $(pci-y) common.o early.o
index 7d8c467bf14306cb42b03f9b32ee427b719d28f9..8fbd19832cf6795f34be5e61d0c9c9c942ee6b74 100644 (file)
@@ -13,5 +13,5 @@ obj-y                 += legacy.o irq.o common.o early.o
 # mmconfig has a 64bit special
 obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_64.o direct.o mmconfig-shared.o
 
-obj-$(CONFIG_NUMA)     += k8-bus_64.o
+obj-y          += k8-bus_64.o
 
index 2664cb3fc96c51fe964f1750225983ecdd745e77..1a9c0c6a1a1847f88c0024b06d18dfbd91d8cc22 100644 (file)
@@ -191,7 +191,10 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
 {
        struct pci_bus *bus;
        struct pci_sysdata *sd;
+       int node;
+#ifdef CONFIG_ACPI_NUMA
        int pxm;
+#endif
 
        dmi_check_system(acpi_pciprobe_dmi_table);
 
@@ -201,6 +204,17 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
                return NULL;
        }
 
+       node = -1;
+#ifdef CONFIG_ACPI_NUMA
+       pxm = acpi_get_pxm(device->handle);
+       if (pxm >= 0)
+               node = pxm_to_node(pxm);
+       if (node != -1)
+               set_mp_bus_to_node(busnum, node);
+       else
+               node = get_mp_bus_to_node(busnum);
+#endif
+
        /* Allocate per-root-bus (not per bus) arch-specific data.
         * TODO: leak; this memory is never freed.
         * It's arguable whether it's worth the trouble to care.
@@ -212,13 +226,7 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
        }
 
        sd->domain = domain;
-       sd->node = -1;
-
-       pxm = acpi_get_pxm(device->handle);
-#ifdef CONFIG_ACPI_NUMA
-       if (pxm >= 0)
-               sd->node = pxm_to_node(pxm);
-#endif
+       sd->node = node;
        /*
         * Maybe the desired pci bus has been already scanned. In such case
         * it is unnecessary to scan the pci bus with the given domain,busnum.
@@ -238,9 +246,9 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
                kfree(sd);
 
 #ifdef CONFIG_ACPI_NUMA
-       if (bus != NULL) {
+       if (bus) {
                if (pxm >= 0) {
-                       printk("bus %d -> pxm %d -> node %d\n",
+                       printk(KERN_DEBUG "bus %02x -> pxm %d -> node %d\n",
                                busnum, pxm, pxm_to_node(pxm));
                }
        }
@@ -248,7 +256,6 @@ struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_device *device, int do
 
        if (bus && (pci_probe & PCI_USE__CRS))
                get_current_resources(device, busnum, domain, bus);
-       
        return bus;
 }
 
index 75fcc29ecf52ac16d86429a8139ab23884cde4df..2a4d751818b731fd1ae0007ed2f1f9a94cdf12d1 100644 (file)
@@ -342,9 +342,14 @@ struct pci_bus * __devinit pcibios_scan_root(int busnum)
                return NULL;
        }
 
+       sd->node = get_mp_bus_to_node(busnum);
+
        printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
+       bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
+       if (!bus)
+               kfree(sd);
 
-       return pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
+       return bus;
 }
 
 extern u8 pci_cache_line_size;
@@ -420,6 +425,10 @@ char * __devinit  pcibios_setup(char *str)
                pci_probe &= ~PCI_PROBE_MMCONF;
                return NULL;
        }
+       else if (!strcmp(str, "check_enable_amd_mmconf")) {
+               pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
+               return NULL;
+       }
 #endif
        else if (!strcmp(str, "noacpi")) {
                acpi_noirq_set();
@@ -480,7 +489,7 @@ void pcibios_disable_device (struct pci_dev *dev)
                pcibios_disable_irq(dev);
 }
 
-struct pci_bus *__devinit pci_scan_bus_with_sysdata(int busno)
+struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
 {
        struct pci_bus *bus = NULL;
        struct pci_sysdata *sd;
@@ -495,10 +504,15 @@ struct pci_bus *__devinit pci_scan_bus_with_sysdata(int busno)
                printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno);
                return NULL;
        }
-       sd->node = -1;
-       bus = pci_scan_bus(busno, &pci_root_ops, sd);
+       sd->node = node;
+       bus = pci_scan_bus(busno, ops, sd);
        if (!bus)
                kfree(sd);
 
        return bus;
 }
+
+struct pci_bus *pci_scan_bus_with_sysdata(int busno)
+{
+       return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
+}
index 42f3e4cad179754c1dfc2594e4f759695eca1089..21d1e0e0d535bfef58228d695b983c747dcea805 100644 (file)
@@ -258,7 +258,8 @@ void __init pci_direct_init(int type)
 {
        if (type == 0)
                return;
-       printk(KERN_INFO "PCI: Using configuration type %d\n", type);
+       printk(KERN_INFO "PCI: Using configuration type %d for base access\n",
+                type);
        if (type == 1)
                raw_pci_ops = &pci_direct_conf1;
        else
@@ -275,8 +276,10 @@ int __init pci_direct_probe(void)
        if (!region)
                goto type2;
 
-       if (pci_check_type1())
+       if (pci_check_type1()) {
+               raw_pci_ops = &pci_direct_conf1;
                return 1;
+       }
        release_resource(region);
 
  type2:
@@ -290,7 +293,6 @@ int __init pci_direct_probe(void)
                goto fail2;
 
        if (pci_check_type2()) {
-               printk(KERN_INFO "PCI: Using configuration type 2\n");
                raw_pci_ops = &pci_direct_conf2;
                return 2;
        }
index a5ef5f55137313fcb1faec0145200beac774d4f8..b60b2abd480cf774bad3f7c43e1e058c0a595681 100644 (file)
@@ -493,3 +493,20 @@ static void __devinit pci_siemens_interrupt_controller(struct pci_dev *dev)
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SIEMENS, 0x0015,
                          pci_siemens_interrupt_controller);
+
+/*
+ * Regular PCI devices have 256 bytes, but AMD Family 10h Opteron ext config
+ * have 4096 bytes.  Even if the device is capable, that doesn't mean we can
+ * access it.  Maybe we don't have a way to generate extended config space
+ * accesses.   So check it
+ */
+static void fam10h_pci_cfg_space_size(struct pci_dev *dev)
+{
+       dev->cfg_size = pci_cfg_space_size_ext(dev, 0);
+}
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1200, fam10h_pci_cfg_space_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1201, fam10h_pci_cfg_space_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1202, fam10h_pci_cfg_space_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1203, fam10h_pci_cfg_space_size);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x1204, fam10h_pci_cfg_space_size);
index 94f6c73a53d0aae6ac048668ea6b06a0a1f370d0..8af0f0bae2af45f0baca08051056946235db7d1e 100644 (file)
@@ -301,6 +301,13 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
        prot = pgprot_val(vma->vm_page_prot);
        if (pat_wc_enabled && write_combine)
                prot |= _PAGE_CACHE_WC;
+       else if (pat_wc_enabled)
+               /*
+                * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
+                * To avoid attribute conflicts, request UC MINUS here
+                * aswell.
+                */
+               prot |= _PAGE_CACHE_UC_MINUS;
        else if (boot_cpu_data.x86 > 3)
                prot |= _PAGE_CACHE_UC;
 
@@ -319,9 +326,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
                 * - request is uncached, return cannot be write-combine
                 * - request is write-combine, return cannot be write-back
                 */
-               if ((flags == _PAGE_CACHE_UC &&
-                    (new_flags == _PAGE_CACHE_WB ||
-                     new_flags == _PAGE_CACHE_WC)) ||
+               if ((flags == _PAGE_CACHE_UC_MINUS &&
+                    (new_flags == _PAGE_CACHE_WB)) ||
                    (flags == _PAGE_CACHE_WC &&
                     new_flags == _PAGE_CACHE_WB)) {
                        free_memtype(addr, addr+len);
index 3de9f9ba2da6aed8be73921f3f416ed8af444a49..dd30c6076b5d74ff1be2929d7b8f84081c6a8250 100644 (file)
@@ -6,16 +6,17 @@
    in the right sequence from here. */
 static __init int pci_access_init(void)
 {
-       int type __maybe_unused = 0;
-
 #ifdef CONFIG_PCI_DIRECT
+       int type = 0;
+
        type = pci_direct_probe();
 #endif
-#ifdef CONFIG_PCI_MMCONFIG
-       pci_mmcfg_init(type);
+
+       pci_mmcfg_early_init();
+
+#ifdef CONFIG_PCI_OLPC
+       pci_olpc_init();
 #endif
-       if (raw_pci_ops)
-               return 0;
 #ifdef CONFIG_PCI_BIOS
        pci_pcbios_init();
 #endif
@@ -28,7 +29,7 @@ static __init int pci_access_init(void)
 #ifdef CONFIG_PCI_DIRECT
        pci_direct_init(type);
 #endif
-       if (!raw_pci_ops)
+       if (!raw_pci_ops && !raw_pci_ext_ops)
                printk(KERN_ERR
                "PCI: Fatal: No config space access function found\n");
 
index 579745ca6b667dd0ccb7c62ac1133c42c7527088..0908fca901bf356ff2597ce13afbeffea35c641e 100644 (file)
@@ -136,9 +136,11 @@ static void __init pirq_peer_trick(void)
                busmap[e->bus] = 1;
        }
        for(i = 1; i < 256; i++) {
+               int node;
                if (!busmap[i] || pci_find_bus(0, i))
                        continue;
-               if (pci_scan_bus_with_sysdata(i))
+               node = get_mp_bus_to_node(i);
+               if (pci_scan_bus_on_node(i, &pci_root_ops, node))
                        printk(KERN_INFO "PCI: Discovered primary peer "
                               "bus %02x [IRQ]\n", i);
        }
index 9cc813e29706d3febdd70aacf5b4aa2d6e4940af..ab6d4b18a88fdcc01fc63835ca60f1eded7827e1 100644 (file)
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <asm/pci-direct.h>
 #include <asm/mpspec.h>
 #include <linux/cpumask.h>
+#include <linux/topology.h>
 
 /*
  * This discovers the pcibus <-> node mapping on AMD K8.
- *
- * RED-PEN need to call this again on PCI hotplug
- * RED-PEN empty cpus get reported wrong
+ * also get peer root bus resource for io,mmio
  */
 
-#define NODE_ID_REGISTER 0x60
-#define NODE_ID(dword) (dword & 0x07)
-#define LDT_BUS_NUMBER_REGISTER_0 0x94
-#define LDT_BUS_NUMBER_REGISTER_1 0xB4
-#define LDT_BUS_NUMBER_REGISTER_2 0xD4
-#define NR_LDT_BUS_NUMBER_REGISTERS 3
-#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
-#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
-#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
+
+/*
+ * sub bus (transparent) will use entres from 3 to store extra from root,
+ * so need to make sure have enought slot there, increase PCI_BUS_NUM_RESOURCES?
+ */
+#define RES_NUM 16
+struct pci_root_info {
+       char name[12];
+       unsigned int res_num;
+       struct resource res[RES_NUM];
+       int bus_min;
+       int bus_max;
+       int node;
+       int link;
+};
+
+/* 4 at this time, it may become to 32 */
+#define PCI_ROOT_NR 4
+static int pci_root_num;
+static struct pci_root_info pci_root_info[PCI_ROOT_NR];
+
+#ifdef CONFIG_NUMA
+
+#define BUS_NR 256
+
+static int mp_bus_to_node[BUS_NR];
+
+void set_mp_bus_to_node(int busnum, int node)
+{
+       if (busnum >= 0 &&  busnum < BUS_NR)
+               mp_bus_to_node[busnum] = node;
+}
+
+int get_mp_bus_to_node(int busnum)
+{
+       int node = -1;
+
+       if (busnum < 0 || busnum > (BUS_NR - 1))
+               return node;
+
+       node = mp_bus_to_node[busnum];
+
+       /*
+        * let numa_node_id to decide it later in dma_alloc_pages
+        * if there is no ram on that node
+        */
+       if (node != -1 && !node_online(node))
+               node = -1;
+
+       return node;
+}
+#endif
+
+void set_pci_bus_resources_arch_default(struct pci_bus *b)
+{
+       int i;
+       int j;
+       struct pci_root_info *info;
+
+       /* if only one root bus, don't need to anything */
+       if (pci_root_num < 2)
+               return;
+
+       for (i = 0; i < pci_root_num; i++) {
+               if (pci_root_info[i].bus_min == b->number)
+                       break;
+       }
+
+       if (i == pci_root_num)
+               return;
+
+       info = &pci_root_info[i];
+       for (j = 0; j < info->res_num; j++) {
+               struct resource *res;
+               struct resource *root;
+
+               res = &info->res[j];
+               b->resource[j] = res;
+               if (res->flags & IORESOURCE_IO)
+                       root = &ioport_resource;
+               else
+                       root = &iomem_resource;
+               insert_resource(root, res);
+       }
+}
+
+#define RANGE_NUM 16
+
+struct res_range {
+       size_t start;
+       size_t end;
+};
+
+static void __init update_range(struct res_range *range, size_t start,
+                               size_t end)
+{
+       int i;
+       int j;
+
+       for (j = 0; j < RANGE_NUM; j++) {
+               if (!range[j].end)
+                       continue;
+
+               if (start <= range[j].start && end >= range[j].end) {
+                       range[j].start = 0;
+                       range[j].end = 0;
+                       continue;
+               }
+
+               if (start <= range[j].start && end < range[j].end && range[j].start < end + 1) {
+                       range[j].start = end + 1;
+                       continue;
+               }
+
+
+               if (start > range[j].start && end >= range[j].end && range[j].end > start - 1) {
+                       range[j].end = start - 1;
+                       continue;
+               }
+
+               if (start > range[j].start && end < range[j].end) {
+                       /* find the new spare */
+                       for (i = 0; i < RANGE_NUM; i++) {
+                               if (range[i].end == 0)
+                                       break;
+                       }
+                       if (i < RANGE_NUM) {
+                               range[i].end = range[j].end;
+                               range[i].start = end + 1;
+                       } else {
+                               printk(KERN_ERR "run of slot in ranges\n");
+                       }
+                       range[j].end = start - 1;
+                       continue;
+               }
+       }
+}
+
+static void __init update_res(struct pci_root_info *info, size_t start,
+                             size_t end, unsigned long flags, int merge)
+{
+       int i;
+       struct resource *res;
+
+       if (!merge)
+               goto addit;
+
+       /* try to merge it with old one */
+       for (i = 0; i < info->res_num; i++) {
+               size_t final_start, final_end;
+               size_t common_start, common_end;
+
+               res = &info->res[i];
+               if (res->flags != flags)
+                       continue;
+
+               common_start = max((size_t)res->start, start);
+               common_end = min((size_t)res->end, end);
+               if (common_start > common_end + 1)
+                       continue;
+
+               final_start = min((size_t)res->start, start);
+               final_end = max((size_t)res->end, end);
+
+               res->start = final_start;
+               res->end = final_end;
+               return;
+       }
+
+addit:
+
+       /* need to add that */
+       if (info->res_num >= RES_NUM)
+               return;
+
+       res = &info->res[info->res_num];
+       res->name = info->name;
+       res->flags = flags;
+       res->start = start;
+       res->end = end;
+       res->child = NULL;
+       info->res_num++;
+}
+
+struct pci_hostbridge_probe {
+       u32 bus;
+       u32 slot;
+       u32 vendor;
+       u32 device;
+};
+
+static struct pci_hostbridge_probe pci_probes[] __initdata = {
+       { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1100 },
+       { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
+       { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
+       { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1300 },
+};
+
+static u64 __initdata fam10h_mmconf_start;
+static u64 __initdata fam10h_mmconf_end;
+static void __init get_pci_mmcfg_amd_fam10h_range(void)
+{
+       u32 address;
+       u64 base, msr;
+       unsigned segn_busn_bits;
+
+       /* assume all cpus from fam10h have mmconf */
+        if (boot_cpu_data.x86 < 0x10)
+               return;
+
+       address = MSR_FAM10H_MMIO_CONF_BASE;
+       rdmsrl(address, msr);
+
+       /* mmconfig is not enable */
+       if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+               return;
+
+       base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
+
+       segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+                        FAM10H_MMIO_CONF_BUSRANGE_MASK;
+
+       fam10h_mmconf_start = base;
+       fam10h_mmconf_end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
+}
 
 /**
- * fill_mp_bus_to_cpumask()
+ * early_fill_mp_bus_to_node()
+ * called before pcibios_scan_root and pci_scan_bus
  * fills the mp_bus_to_cpumask array based according to the LDT Bus Number
  * Registers found in the K8 northbridge
  */
-__init static int
-fill_mp_bus_to_cpumask(void)
+static int __init early_fill_mp_bus_info(void)
 {
-       struct pci_dev *nb_dev = NULL;
-       int i, j;
-       u32 ldtbus, nid;
-       static int lbnr[3] = {
-               LDT_BUS_NUMBER_REGISTER_0,
-               LDT_BUS_NUMBER_REGISTER_1,
-               LDT_BUS_NUMBER_REGISTER_2
-       };
-
-       while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
-                       PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) {
-               pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
-
-               for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
-                       pci_read_config_dword(nb_dev, lbnr[i], &ldtbus);
-                       /*
-                        * if there are no busses hanging off of the current
-                        * ldt link then both the secondary and subordinate
-                        * bus number fields are set to 0.
-                        * 
-                        * RED-PEN
-                        * This is slightly broken because it assumes
-                        * HT node IDs == Linux node ids, which is not always
-                        * true. However it is probably mostly true.
-                        */
-                       if (!(SECONDARY_LDT_BUS_NUMBER(ldtbus) == 0
-                               && SUBORDINATE_LDT_BUS_NUMBER(ldtbus) == 0)) {
-                               for (j = SECONDARY_LDT_BUS_NUMBER(ldtbus);
-                                    j <= SUBORDINATE_LDT_BUS_NUMBER(ldtbus);
-                                    j++) { 
-                                       struct pci_bus *bus;
-                                       struct pci_sysdata *sd;
-
-                                       long node = NODE_ID(nid);
-                                       /* Algorithm a bit dumb, but
-                                          it shouldn't matter here */
-                                       bus = pci_find_bus(0, j);
-                                       if (!bus)
-                                               continue;
-                                       if (!node_online(node))
-                                               node = 0;
-
-                                       sd = bus->sysdata;
-                                       sd->node = node;
-                               }               
+       int i;
+       int j;
+       unsigned bus;
+       unsigned slot;
+       int found;
+       int node;
+       int link;
+       int def_node;
+       int def_link;
+       struct pci_root_info *info;
+       u32 reg;
+       struct resource *res;
+       size_t start;
+       size_t end;
+       struct res_range range[RANGE_NUM];
+       u64 val;
+       u32 address;
+
+#ifdef CONFIG_NUMA
+       for (i = 0; i < BUS_NR; i++)
+               mp_bus_to_node[i] = -1;
+#endif
+
+       if (!early_pci_allowed())
+               return -1;
+
+       found = 0;
+       for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
+               u32 id;
+               u16 device;
+               u16 vendor;
+
+               bus = pci_probes[i].bus;
+               slot = pci_probes[i].slot;
+               id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID);
+
+               vendor = id & 0xffff;
+               device = (id>>16) & 0xffff;
+               if (pci_probes[i].vendor == vendor &&
+                   pci_probes[i].device == device) {
+                       found = 1;
+                       break;
+               }
+       }
+
+       if (!found)
+               return 0;
+
+       pci_root_num = 0;
+       for (i = 0; i < 4; i++) {
+               int min_bus;
+               int max_bus;
+               reg = read_pci_config(bus, slot, 1, 0xe0 + (i << 2));
+
+               /* Check if that register is enabled for bus range */
+               if ((reg & 7) != 3)
+                       continue;
+
+               min_bus = (reg >> 16) & 0xff;
+               max_bus = (reg >> 24) & 0xff;
+               node = (reg >> 4) & 0x07;
+#ifdef CONFIG_NUMA
+               for (j = min_bus; j <= max_bus; j++)
+                       mp_bus_to_node[j] = (unsigned char) node;
+#endif
+               link = (reg >> 8) & 0x03;
+
+               info = &pci_root_info[pci_root_num];
+               info->bus_min = min_bus;
+               info->bus_max = max_bus;
+               info->node = node;
+               info->link = link;
+               sprintf(info->name, "PCI Bus #%02x", min_bus);
+               pci_root_num++;
+       }
+
+       /* get the default node and link for left over res */
+       reg = read_pci_config(bus, slot, 0, 0x60);
+       def_node = (reg >> 8) & 0x07;
+       reg = read_pci_config(bus, slot, 0, 0x64);
+       def_link = (reg >> 8) & 0x03;
+
+       memset(range, 0, sizeof(range));
+       range[0].end = 0xffff;
+       /* io port resource */
+       for (i = 0; i < 4; i++) {
+               reg = read_pci_config(bus, slot, 1, 0xc0 + (i << 3));
+               if (!(reg & 3))
+                       continue;
+
+               start = reg & 0xfff000;
+               reg = read_pci_config(bus, slot, 1, 0xc4 + (i << 3));
+               node = reg & 0x07;
+               link = (reg >> 4) & 0x03;
+               end = (reg & 0xfff000) | 0xfff;
+
+               /* find the position */
+               for (j = 0; j < pci_root_num; j++) {
+                       info = &pci_root_info[j];
+                       if (info->node == node && info->link == link)
+                               break;
+               }
+               if (j == pci_root_num)
+                       continue; /* not found */
+
+               info = &pci_root_info[j];
+               printk(KERN_DEBUG "node %d link %d: io port [%llx, %llx]\n",
+                      node, link, (u64)start, (u64)end);
+
+               /* kernel only handle 16 bit only */
+               if (end > 0xffff)
+                       end = 0xffff;
+               update_res(info, start, end, IORESOURCE_IO, 1);
+               update_range(range, start, end);
+       }
+       /* add left over io port range to def node/link, [0, 0xffff] */
+       /* find the position */
+       for (j = 0; j < pci_root_num; j++) {
+               info = &pci_root_info[j];
+               if (info->node == def_node && info->link == def_link)
+                       break;
+       }
+       if (j < pci_root_num) {
+               info = &pci_root_info[j];
+               for (i = 0; i < RANGE_NUM; i++) {
+                       if (!range[i].end)
+                               continue;
+
+                       update_res(info, range[i].start, range[i].end,
+                                  IORESOURCE_IO, 1);
+               }
+       }
+
+       memset(range, 0, sizeof(range));
+       /* 0xfd00000000-0xffffffffff for HT */
+       range[0].end = (0xfdULL<<32) - 1;
+
+       /* need to take out [0, TOM) for RAM*/
+       address = MSR_K8_TOP_MEM1;
+       rdmsrl(address, val);
+       end = (val & 0xffffff8000000ULL);
+       printk(KERN_INFO "TOM: %016lx aka %ldM\n", end, end>>20);
+       if (end < (1ULL<<32))
+               update_range(range, 0, end - 1);
+
+       /* get mmconfig */
+       get_pci_mmcfg_amd_fam10h_range();
+       /* need to take out mmconf range */
+       if (fam10h_mmconf_end) {
+               printk(KERN_DEBUG "Fam 10h mmconf [%llx, %llx]\n", fam10h_mmconf_start, fam10h_mmconf_end);
+               update_range(range, fam10h_mmconf_start, fam10h_mmconf_end);
+       }
+
+       /* mmio resource */
+       for (i = 0; i < 8; i++) {
+               reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3));
+               if (!(reg & 3))
+                       continue;
+
+               start = reg & 0xffffff00; /* 39:16 on 31:8*/
+               start <<= 8;
+               reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3));
+               node = reg & 0x07;
+               link = (reg >> 4) & 0x03;
+               end = (reg & 0xffffff00);
+               end <<= 8;
+               end |= 0xffff;
+
+               /* find the position */
+               for (j = 0; j < pci_root_num; j++) {
+                       info = &pci_root_info[j];
+                       if (info->node == node && info->link == link)
+                               break;
+               }
+               if (j == pci_root_num)
+                       continue; /* not found */
+
+               info = &pci_root_info[j];
+
+               printk(KERN_DEBUG "node %d link %d: mmio [%llx, %llx]",
+                      node, link, (u64)start, (u64)end);
+               /*
+                * some sick allocation would have range overlap with fam10h
+                * mmconf range, so need to update start and end.
+                */
+               if (fam10h_mmconf_end) {
+                       int changed = 0;
+                       u64 endx = 0;
+                       if (start >= fam10h_mmconf_start &&
+                           start <= fam10h_mmconf_end) {
+                               start = fam10h_mmconf_end + 1;
+                               changed = 1;
+                       }
+
+                       if (end >= fam10h_mmconf_start &&
+                           end <= fam10h_mmconf_end) {
+                               end = fam10h_mmconf_start - 1;
+                               changed = 1;
+                       }
+
+                       if (start < fam10h_mmconf_start &&
+                           end > fam10h_mmconf_end) {
+                               /* we got a hole */
+                               endx = fam10h_mmconf_start - 1;
+                               update_res(info, start, endx, IORESOURCE_MEM, 0);
+                               update_range(range, start, endx);
+                               printk(KERN_CONT " ==> [%llx, %llx]", (u64)start, endx);
+                               start = fam10h_mmconf_end + 1;
+                               changed = 1;
+                       }
+                       if (changed) {
+                               if (start <= end) {
+                                       printk(KERN_CONT " %s [%llx, %llx]", endx?"and":"==>", (u64)start, (u64)end);
+                               } else {
+                                       printk(KERN_CONT "%s\n", endx?"":" ==> none");
+                                       continue;
+                               }
                        }
                }
+
+               update_res(info, start, end, IORESOURCE_MEM, 1);
+               update_range(range, start, end);
+               printk(KERN_CONT "\n");
+       }
+
+       /* need to take out [4G, TOM2) for RAM*/
+       /* SYS_CFG */
+       address = MSR_K8_SYSCFG;
+       rdmsrl(address, val);
+       /* TOP_MEM2 is enabled? */
+       if (val & (1<<21)) {
+               /* TOP_MEM2 */
+               address = MSR_K8_TOP_MEM2;
+               rdmsrl(address, val);
+               end = (val & 0xffffff8000000ULL);
+               printk(KERN_INFO "TOM2: %016lx aka %ldM\n", end, end>>20);
+               update_range(range, 1ULL<<32, end - 1);
+       }
+
+       /*
+        * add left over mmio range to def node/link ?
+        * that is tricky, just record range in from start_min to 4G
+        */
+       for (j = 0; j < pci_root_num; j++) {
+               info = &pci_root_info[j];
+               if (info->node == def_node && info->link == def_link)
+                       break;
+       }
+       if (j < pci_root_num) {
+               info = &pci_root_info[j];
+
+               for (i = 0; i < RANGE_NUM; i++) {
+                       if (!range[i].end)
+                               continue;
+
+                       update_res(info, range[i].start, range[i].end,
+                                  IORESOURCE_MEM, 1);
+               }
+       }
+
+#ifdef CONFIG_NUMA
+       for (i = 0; i < BUS_NR; i++) {
+               node = mp_bus_to_node[i];
+               if (node >= 0)
+                       printk(KERN_DEBUG "bus: %02x to node: %02x\n", i, node);
+       }
+#endif
+
+       for (i = 0; i < pci_root_num; i++) {
+               int res_num;
+               int busnum;
+
+               info = &pci_root_info[i];
+               res_num = info->res_num;
+               busnum = info->bus_min;
+               printk(KERN_DEBUG "bus: [%02x,%02x] on node %x link %x\n",
+                      info->bus_min, info->bus_max, info->node, info->link);
+               for (j = 0; j < res_num; j++) {
+                       res = &info->res[j];
+                       printk(KERN_DEBUG "bus: %02x index %x %s: [%llx, %llx]\n",
+                              busnum, j,
+                              (res->flags & IORESOURCE_IO)?"io port":"mmio",
+                              res->start, res->end);
+               }
        }
 
        return 0;
 }
 
-fs_initcall(fill_mp_bus_to_cpumask);
+postcore_initcall(early_fill_mp_bus_info);
index e041ced0ce138714a93147f9cc5015e0f7f51d3e..a67921ce60af1faabf9c8ccbf538b546103ebb04 100644 (file)
@@ -12,6 +12,7 @@
 static void __devinit pcibios_fixup_peer_bridges(void)
 {
        int n, devfn;
+       long node;
 
        if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
                return;
@@ -21,12 +22,13 @@ static void __devinit pcibios_fixup_peer_bridges(void)
                u32 l;
                if (pci_find_bus(0, n))
                        continue;
+               node = get_mp_bus_to_node(n);
                for (devfn = 0; devfn < 256; devfn += 8) {
                        if (!raw_pci_read(0, n, devfn, PCI_VENDOR_ID, 2, &l) &&
                            l != 0x0000 && l != 0xffff) {
                                DBG("Found device at %02x:%02x [%04x]\n", n, devfn, l);
                                printk(KERN_INFO "PCI: Discovered peer bus %02x\n", n);
-                               pci_scan_bus_with_sysdata(n);
+                               pci_scan_bus_on_node(n, &pci_root_ops, node);
                                break;
                        }
                }
index 8d54df4dfaad551518bcbf77cbe0be20d0f36562..0cfebecf2a8f128cc2b52f632a95d55eef397fb3 100644 (file)
@@ -28,7 +28,7 @@ static int __initdata pci_mmcfg_resources_inserted;
 static const char __init *pci_mmcfg_e7520(void)
 {
        u32 win;
-       pci_direct_conf1.read(0, 0, PCI_DEVFN(0,0), 0xce, 2, &win);
+       raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
 
        win = win & 0xf000;
        if(win == 0x0000 || win == 0xf000)
@@ -53,7 +53,7 @@ static const char __init *pci_mmcfg_intel_945(void)
 
        pci_mmcfg_config_num = 1;
 
-       pci_direct_conf1.read(0, 0, PCI_DEVFN(0,0), 0x48, 4, &pciexbar);
+       raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0x48, 4, &pciexbar);
 
        /* Enable bit */
        if (!(pciexbar & 1))
@@ -100,33 +100,102 @@ static const char __init *pci_mmcfg_intel_945(void)
        return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
 }
 
+static const char __init *pci_mmcfg_amd_fam10h(void)
+{
+       u32 low, high, address;
+       u64 base, msr;
+       int i;
+       unsigned segnbits = 0, busnbits;
+
+       if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF))
+               return NULL;
+
+       address = MSR_FAM10H_MMIO_CONF_BASE;
+       if (rdmsr_safe(address, &low, &high))
+               return NULL;
+
+       msr = high;
+       msr <<= 32;
+       msr |= low;
+
+       /* mmconfig is not enable */
+       if (!(msr & FAM10H_MMIO_CONF_ENABLE))
+               return NULL;
+
+       base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
+
+       busnbits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
+                        FAM10H_MMIO_CONF_BUSRANGE_MASK;
+
+       /*
+        * only handle bus 0 ?
+        * need to skip it
+        */
+       if (!busnbits)
+               return NULL;
+
+       if (busnbits > 8) {
+               segnbits = busnbits - 8;
+               busnbits = 8;
+       }
+
+       pci_mmcfg_config_num = (1 << segnbits);
+       pci_mmcfg_config = kzalloc(sizeof(pci_mmcfg_config[0]) *
+                                  pci_mmcfg_config_num, GFP_KERNEL);
+       if (!pci_mmcfg_config)
+               return NULL;
+
+       for (i = 0; i < (1 << segnbits); i++) {
+               pci_mmcfg_config[i].address = base + (1<<28) * i;
+               pci_mmcfg_config[i].pci_segment = i;
+               pci_mmcfg_config[i].start_bus_number = 0;
+               pci_mmcfg_config[i].end_bus_number = (1 << busnbits) - 1;
+       }
+
+       return "AMD Family 10h NB";
+}
+
 struct pci_mmcfg_hostbridge_probe {
+       u32 bus;
+       u32 devfn;
        u32 vendor;
        u32 device;
        const char *(*probe)(void);
 };
 
 static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
-       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
-       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 },
+       { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
+         PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
+       { 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
+         PCI_DEVICE_ID_INTEL_82945G_HB, pci_mmcfg_intel_945 },
+       { 0, PCI_DEVFN(0x18, 0), PCI_VENDOR_ID_AMD,
+         0x1200, pci_mmcfg_amd_fam10h },
+       { 0xff, PCI_DEVFN(0, 0), PCI_VENDOR_ID_AMD,
+         0x1200, pci_mmcfg_amd_fam10h },
 };
 
 static int __init pci_mmcfg_check_hostbridge(void)
 {
        u32 l;
+       u32 bus, devfn;
        u16 vendor, device;
        int i;
        const char *name;
 
-       pci_direct_conf1.read(0, 0, PCI_DEVFN(0,0), 0, 4, &l);
-       vendor = l & 0xffff;
-       device = (l >> 16) & 0xffff;
+       if (!raw_pci_ops)
+               return 0;
 
        pci_mmcfg_config_num = 0;
        pci_mmcfg_config = NULL;
        name = NULL;
 
        for (i = 0; !name && i < ARRAY_SIZE(pci_mmcfg_probes); i++) {
+               bus =  pci_mmcfg_probes[i].bus;
+               devfn = pci_mmcfg_probes[i].devfn;
+               raw_pci_ops->read(0, bus, devfn, 0, 4, &l);
+               vendor = l & 0xffff;
+               device = (l >> 16) & 0xffff;
+
                if (pci_mmcfg_probes[i].vendor == vendor &&
                    pci_mmcfg_probes[i].device == device)
                        name = pci_mmcfg_probes[i].probe();
@@ -173,9 +242,78 @@ static void __init pci_mmcfg_insert_resources(unsigned long resource_flags)
        pci_mmcfg_resources_inserted = 1;
 }
 
-static void __init pci_mmcfg_reject_broken(int type)
+static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
+                                             void *data)
+{
+       struct resource *mcfg_res = data;
+       struct acpi_resource_address64 address;
+       acpi_status status;
+
+       if (res->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) {
+               struct acpi_resource_fixed_memory32 *fixmem32 =
+                       &res->data.fixed_memory32;
+               if (!fixmem32)
+                       return AE_OK;
+               if ((mcfg_res->start >= fixmem32->address) &&
+                   (mcfg_res->end < (fixmem32->address +
+                                     fixmem32->address_length))) {
+                       mcfg_res->flags = 1;
+                       return AE_CTRL_TERMINATE;
+               }
+       }
+       if ((res->type != ACPI_RESOURCE_TYPE_ADDRESS32) &&
+           (res->type != ACPI_RESOURCE_TYPE_ADDRESS64))
+               return AE_OK;
+
+       status = acpi_resource_to_address64(res, &address);
+       if (ACPI_FAILURE(status) ||
+          (address.address_length <= 0) ||
+          (address.resource_type != ACPI_MEMORY_RANGE))
+               return AE_OK;
+
+       if ((mcfg_res->start >= address.minimum) &&
+           (mcfg_res->end < (address.minimum + address.address_length))) {
+               mcfg_res->flags = 1;
+               return AE_CTRL_TERMINATE;
+       }
+       return AE_OK;
+}
+
+static acpi_status __init find_mboard_resource(acpi_handle handle, u32 lvl,
+               void *context, void **rv)
+{
+       struct resource *mcfg_res = context;
+
+       acpi_walk_resources(handle, METHOD_NAME__CRS,
+                           check_mcfg_resource, context);
+
+       if (mcfg_res->flags)
+               return AE_CTRL_TERMINATE;
+
+       return AE_OK;
+}
+
+static int __init is_acpi_reserved(unsigned long start, unsigned long end)
+{
+       struct resource mcfg_res;
+
+       mcfg_res.start = start;
+       mcfg_res.end = end;
+       mcfg_res.flags = 0;
+
+       acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
+
+       if (!mcfg_res.flags)
+               acpi_get_devices("PNP0C02", find_mboard_resource, &mcfg_res,
+                                NULL);
+
+       return mcfg_res.flags;
+}
+
+static void __init pci_mmcfg_reject_broken(int early)
 {
        typeof(pci_mmcfg_config[0]) *cfg;
+       int i;
 
        if ((pci_mmcfg_config_num == 0) ||
            (pci_mmcfg_config == NULL) ||
@@ -184,51 +322,80 @@ static void __init pci_mmcfg_reject_broken(int type)
 
        cfg = &pci_mmcfg_config[0];
 
-       /*
-        * Handle more broken MCFG tables on Asus etc.
-        * They only contain a single entry for bus 0-0.
-        */
-       if (pci_mmcfg_config_num == 1 &&
-           cfg->pci_segment == 0 &&
-           (cfg->start_bus_number | cfg->end_bus_number) == 0) {
-               printk(KERN_ERR "PCI: start and end of bus number is 0. "
-                      "Rejected as broken MCFG.\n");
-               goto reject;
+       for (i = 0; i < pci_mmcfg_config_num; i++) {
+               int valid = 0;
+               u32 size = (cfg->end_bus_number + 1) << 20;
+               cfg = &pci_mmcfg_config[i];
+               printk(KERN_NOTICE "PCI: MCFG configuration %d: base %lx "
+                      "segment %hu buses %u - %u\n",
+                      i, (unsigned long)cfg->address, cfg->pci_segment,
+                      (unsigned int)cfg->start_bus_number,
+                      (unsigned int)cfg->end_bus_number);
+
+               if (!early &&
+                   is_acpi_reserved(cfg->address, cfg->address + size - 1)) {
+                       printk(KERN_NOTICE "PCI: MCFG area at %Lx reserved "
+                              "in ACPI motherboard resources\n",
+                              cfg->address);
+                       valid = 1;
+               }
+
+               if (valid)
+                       continue;
+
+               if (!early)
+                       printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not"
+                              " reserved in ACPI motherboard resources\n",
+                              cfg->address);
+               /* Don't try to do this check unless configuration
+                  type 1 is available. how about type 2 ?*/
+               if (raw_pci_ops && e820_all_mapped(cfg->address,
+                                                 cfg->address + size - 1,
+                                                 E820_RESERVED)) {
+                       printk(KERN_NOTICE
+                              "PCI: MCFG area at %Lx reserved in E820\n",
+                              cfg->address);
+                       valid = 1;
+               }
+
+               if (!valid)
+                       goto reject;
        }
 
-       /*
-        * Only do this check when type 1 works. If it doesn't work
-        * assume we run on a Mac and always use MCFG
-        */
-       if (type == 1 && !e820_all_mapped(cfg->address,
-                                         cfg->address + MMCONFIG_APER_MIN,
-                                         E820_RESERVED)) {
-               printk(KERN_ERR "PCI: BIOS Bug: MCFG area at %Lx is not"
-                      " E820-reserved\n", cfg->address);
-               goto reject;
-       }
        return;
 
 reject:
        printk(KERN_ERR "PCI: Not using MMCONFIG.\n");
+       pci_mmcfg_arch_free();
        kfree(pci_mmcfg_config);
        pci_mmcfg_config = NULL;
        pci_mmcfg_config_num = 0;
 }
 
-void __init pci_mmcfg_init(int type)
-{
-       int known_bridge = 0;
+static int __initdata known_bridge;
 
+void __init __pci_mmcfg_init(int early)
+{
+       /* MMCONFIG disabled */
        if ((pci_probe & PCI_PROBE_MMCONF) == 0)
                return;
 
-       if (type == 1 && pci_mmcfg_check_hostbridge())
-               known_bridge = 1;
+       /* MMCONFIG already enabled */
+       if (!early && !(pci_probe & PCI_PROBE_MASK & ~PCI_PROBE_MMCONF))
+               return;
+
+       /* for late to exit */
+       if (known_bridge)
+               return;
+
+       if (early) {
+               if (pci_mmcfg_check_hostbridge())
+                       known_bridge = 1;
+       }
 
        if (!known_bridge) {
                acpi_table_parse(ACPI_SIG_MCFG, acpi_parse_mcfg);
-               pci_mmcfg_reject_broken(type);
+               pci_mmcfg_reject_broken(early);
        }
 
        if ((pci_mmcfg_config_num == 0) ||
@@ -249,6 +416,16 @@ void __init pci_mmcfg_init(int type)
        }
 }
 
+void __init pci_mmcfg_early_init(void)
+{
+       __pci_mmcfg_init(1);
+}
+
+void __init pci_mmcfg_late_init(void)
+{
+       __pci_mmcfg_init(0);
+}
+
 static int __init pci_mmcfg_late_insert_resources(void)
 {
        /*
index 081816ada05792dc87d670db497a7ecf500534ba..f3c761dce6957118494ff8f2492edae9c7d9e651 100644 (file)
@@ -136,3 +136,7 @@ int __init pci_mmcfg_arch_init(void)
        raw_pci_ext_ops = &pci_mmcfg;
        return 1;
 }
+
+void __init pci_mmcfg_arch_free(void)
+{
+}
index 9207fd49233c1f120f6bbbd3e4b7c87f77c671b3..a1994163c99dd328e3c542d1bc0f5dd246e583cf 100644 (file)
@@ -127,7 +127,7 @@ static void __iomem * __init mcfg_ioremap(struct acpi_mcfg_allocation *cfg)
 int __init pci_mmcfg_arch_init(void)
 {
        int i;
-       pci_mmcfg_virt = kmalloc(sizeof(*pci_mmcfg_virt) *
+       pci_mmcfg_virt = kzalloc(sizeof(*pci_mmcfg_virt) *
                                 pci_mmcfg_config_num, GFP_KERNEL);
        if (pci_mmcfg_virt == NULL) {
                printk(KERN_ERR "PCI: Can not allocate memory for mmconfig structures\n");
@@ -141,9 +141,29 @@ int __init pci_mmcfg_arch_init(void)
                        printk(KERN_ERR "PCI: Cannot map mmconfig aperture for "
                                        "segment %d\n",
                                pci_mmcfg_config[i].pci_segment);
+                       pci_mmcfg_arch_free();
                        return 0;
                }
        }
        raw_pci_ext_ops = &pci_mmcfg;
        return 1;
 }
+
+void __init pci_mmcfg_arch_free(void)
+{
+       int i;
+
+       if (pci_mmcfg_virt == NULL)
+               return;
+
+       for (i = 0; i < pci_mmcfg_config_num; ++i) {
+               if (pci_mmcfg_virt[i].virt) {
+                       iounmap(pci_mmcfg_virt[i].virt);
+                       pci_mmcfg_virt[i].virt = NULL;
+                       pci_mmcfg_virt[i].cfg = NULL;
+               }
+       }
+
+       kfree(pci_mmcfg_virt);
+       pci_mmcfg_virt = NULL;
+}
diff --git a/arch/x86/pci/mp_bus_to_node.c b/arch/x86/pci/mp_bus_to_node.c
new file mode 100644 (file)
index 0000000..0229439
--- /dev/null
@@ -0,0 +1,23 @@
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/topology.h>
+
+#define BUS_NR 256
+
+static unsigned char mp_bus_to_node[BUS_NR];
+
+void set_mp_bus_to_node(int busnum, int node)
+{
+       if (busnum >= 0 &&  busnum < BUS_NR)
+       mp_bus_to_node[busnum] = (unsigned char) node;
+}
+
+int get_mp_bus_to_node(int busnum)
+{
+       int node;
+
+       if (busnum < 0 || busnum > (BUS_NR - 1))
+               return 0;
+       node = mp_bus_to_node[busnum];
+       return node;
+}
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
new file mode 100644 (file)
index 0000000..5e76365
--- /dev/null
@@ -0,0 +1,313 @@
+/*
+ * Low-level PCI config space access for OLPC systems who lack the VSA
+ * PCI virtualization software.
+ *
+ * Copyright Â© 2006  Advanced Micro Devices, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * The AMD Geode chipset (ie: GX2 processor, cs5536 I/O companion device)
+ * has some I/O functions (display, southbridge, sound, USB HCIs, etc)
+ * that more or less behave like PCI devices, but the hardware doesn't
+ * directly implement the PCI configuration space headers.  AMD provides
+ * "VSA" (Virtual System Architecture) software that emulates PCI config
+ * space for these devices, by trapping I/O accesses to PCI config register
+ * (CF8/CFC) and running some code in System Management Mode interrupt state.
+ * On the OLPC platform, we don't want to use that VSA code because
+ * (a) it slows down suspend/resume, and (b) recompiling it requires special
+ * compilers that are hard to get.  So instead of letting the complex VSA
+ * code simulate the PCI config registers for the on-chip devices, we
+ * just simulate them the easy way, by inserting the code into the
+ * pci_write_config and pci_read_config path.  Most of the config registers
+ * are read-only anyway, so the bulk of the simulation is just table lookup.
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <asm/olpc.h>
+#include <asm/geode.h>
+#include "pci.h"
+
+/*
+ * In the tables below, the first two line (8 longwords) are the
+ * size masks that are used when the higher level PCI code determines
+ * the size of the region by writing ~0 to a base address register
+ * and reading back the result.
+ *
+ * The following lines are the values that are read during normal
+ * PCI config access cycles, i.e. not after just having written
+ * ~0 to a base address register.
+ */
+
+static const uint32_t lxnb_hdr[] = {  /* dev 1 function 0 - devfn = 8 */
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+
+       0x281022, 0x2200005, 0x6000021, 0x80f808,       /* AMD Vendor ID */
+       0x0,    0x0,    0x0,    0x0,   /* No virtual registers, hence no BAR */
+       0x0,    0x0,    0x0,    0x28100b,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+};
+
+static const uint32_t gxnb_hdr[] = {  /* dev 1 function 0 - devfn = 8 */
+       0xfffffffd, 0x0, 0x0,   0x0,
+       0x0,    0x0,    0x0,    0x0,
+
+       0x28100b, 0x2200005, 0x6000021, 0x80f808,       /* NSC Vendor ID */
+       0xac1d, 0x0,    0x0,    0x0,  /* I/O BAR - base of virtual registers */
+       0x0,    0x0,    0x0,    0x28100b,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+};
+
+static const uint32_t lxfb_hdr[] = {  /* dev 1 function 1 - devfn = 9 */
+       0xff000008, 0xffffc000, 0xffffc000, 0xffffc000,
+       0xffffc000,     0x0,    0x0,    0x0,
+
+       0x20811022, 0x2200003, 0x3000000, 0x0,          /* AMD Vendor ID */
+       0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */
+       0xfe00c000, 0x0, 0x0,   0x30100b,               /* VIP */
+       0x0,    0x0,    0x0,    0x10e,     /* INTA, IRQ14 for graphics accel */
+       0x0,    0x0,    0x0,    0x0,
+       0x3d0,  0x3c0,  0xa0000, 0x0,       /* VG IO, VG IO, EGA FB, MONO FB */
+       0x0,    0x0,    0x0,    0x0,
+};
+
+static const uint32_t gxfb_hdr[] = {  /* dev 1 function 1 - devfn = 9 */
+       0xff800008, 0xffffc000, 0xffffc000, 0xffffc000,
+       0x0,    0x0,    0x0,    0x0,
+
+       0x30100b, 0x2200003, 0x3000000, 0x0,            /* NSC Vendor ID */
+       0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */
+       0x0,    0x0,    0x0,    0x30100b,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x3d0,  0x3c0,  0xa0000, 0x0,       /* VG IO, VG IO, EGA FB, MONO FB */
+       0x0,    0x0,    0x0,    0x0,
+};
+
+static const uint32_t aes_hdr[] = {    /* dev 1 function 2 - devfn = 0xa */
+       0xffffc000, 0x0, 0x0,   0x0,
+       0x0,    0x0,    0x0,    0x0,
+
+       0x20821022, 0x2a00006, 0x10100000, 0x8,         /* NSC Vendor ID */
+       0xfe010000, 0x0, 0x0,   0x0,                    /* AES registers */
+       0x0,    0x0,    0x0,    0x20821022,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+};
+
+
+static const uint32_t isa_hdr[] = {  /* dev f function 0 - devfn = 78 */
+       0xfffffff9, 0xffffff01, 0xffffffc1, 0xffffffe1,
+       0xffffff81, 0xffffffc1, 0x0, 0x0,
+
+       0x20901022, 0x2a00049, 0x6010003, 0x802000,
+       0x18b1, 0x1001, 0x1801, 0x1881, /* SMB-8   GPIO-256 MFGPT-64  IRQ-32 */
+       0x1401, 0x1841, 0x0,    0x20901022,             /* PMS-128 ACPI-64 */
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0xaa5b,                 /* IRQ steering */
+       0x0,    0x0,    0x0,    0x0,
+};
+
+static const uint32_t ac97_hdr[] = {  /* dev f function 3 - devfn = 7b */
+       0xffffff81, 0x0, 0x0,   0x0,
+       0x0,    0x0,    0x0,    0x0,
+
+       0x20931022, 0x2a00041, 0x4010001, 0x0,
+       0x1481, 0x0,    0x0,    0x0,                    /* I/O BAR-128 */
+       0x0,    0x0,    0x0,    0x20931022,
+       0x0,    0x0,    0x0,    0x205,                  /* IntB, IRQ5 */
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+};
+
+static const uint32_t ohci_hdr[] = {  /* dev f function 4 - devfn = 7c */
+       0xfffff000, 0x0, 0x0,   0x0,
+       0x0,    0x0,    0x0,    0x0,
+
+       0x20941022, 0x2300006, 0xc031002, 0x0,
+       0xfe01a000, 0x0, 0x0,   0x0,                    /* MEMBAR-1000 */
+       0x0,    0x0,    0x0,    0x20941022,
+       0x0,    0x40,   0x0,    0x40a,                  /* CapPtr INT-D, IRQA */
+       0xc8020001, 0x0, 0x0,   0x0,    /* Capabilities - 40 is R/O,
+                                          44 is mask 8103 (power control) */
+       0x0,    0x0,    0x0,    0x0,
+       0x0,    0x0,    0x0,    0x0,
+};
+
+static const uint32_t ehci_hdr[] = {  /* dev f function 4 - devfn = 7d */
+       0xfffff000, 0x0, 0x0,   0x0,
+       0x0,    0x0,    0x0,    0x0,
+
+       0x20951022, 0x2300006, 0xc032002, 0x0,
+       0xfe01b000, 0x0, 0x0,   0x0,                    /* MEMBAR-1000 */
+       0x0,    0x0,    0x0,    0x20951022,
+       0x0,    0x40,   0x0,    0x40a,                  /* CapPtr INT-D, IRQA */
+       0xc8020001, 0x0, 0x0,   0x0,    /* Capabilities - 40 is R/O, 44 is
+                                          mask 8103 (power control) */
+#if 0
+       0x1,    0x40080000, 0x0, 0x0,   /* EECP - see EHCI spec section 2.1.7 */
+#endif
+       0x01000001, 0x0, 0x0,   0x0,    /* EECP - see EHCI spec section 2.1.7 */
+       0x2020, 0x0,    0x0,    0x0,    /* (EHCI page 8) 60 SBRN (R/O),
+                                          61 FLADJ (R/W), PORTWAKECAP  */
+};
+
+static uint32_t ff_loc = ~0;
+static uint32_t zero_loc;
+static int bar_probing;                /* Set after a write of ~0 to a BAR */
+static int is_lx;
+
+#define NB_SLOT 0x1    /* Northbridge - GX chip - Device 1 */
+#define SB_SLOT 0xf    /* Southbridge - CS5536 chip - Device F */
+
+static int is_simulated(unsigned int bus, unsigned int devfn)
+{
+       return (!bus && ((PCI_SLOT(devfn) == NB_SLOT) ||
+                       (PCI_SLOT(devfn) == SB_SLOT)));
+}
+
+static uint32_t *hdr_addr(const uint32_t *hdr, int reg)
+{
+       uint32_t addr;
+
+       /*
+        * This is a little bit tricky.  The header maps consist of
+        * 0x20 bytes of size masks, followed by 0x70 bytes of header data.
+        * In the normal case, when not probing a BAR's size, we want
+        * to access the header data, so we add 0x20 to the reg offset,
+        * thus skipping the size mask area.
+        * In the BAR probing case, we want to access the size mask for
+        * the BAR, so we subtract 0x10 (the config header offset for
+        * BAR0), and don't skip the size mask area.
+        */
+
+       addr = (uint32_t)hdr + reg + (bar_probing ? -0x10 : 0x20);
+
+       bar_probing = 0;
+       return (uint32_t *)addr;
+}
+
+static int pci_olpc_read(unsigned int seg, unsigned int bus,
+               unsigned int devfn, int reg, int len, uint32_t *value)
+{
+       uint32_t *addr;
+
+       /* Use the hardware mechanism for non-simulated devices */
+       if (!is_simulated(bus, devfn))
+               return pci_direct_conf1.read(seg, bus, devfn, reg, len, value);
+
+       /*
+        * No device has config registers past 0x70, so we save table space
+        * by not storing entries for the nonexistent registers
+        */
+       if (reg >= 0x70)
+               addr = &zero_loc;
+       else {
+               switch (devfn) {
+               case  0x8:
+                       addr = hdr_addr(is_lx ? lxnb_hdr : gxnb_hdr, reg);
+                       break;
+               case  0x9:
+                       addr = hdr_addr(is_lx ? lxfb_hdr : gxfb_hdr, reg);
+                       break;
+               case  0xa:
+                       addr = is_lx ? hdr_addr(aes_hdr, reg) : &ff_loc;
+                       break;
+               case 0x78:
+                       addr = hdr_addr(isa_hdr, reg);
+                       break;
+               case 0x7b:
+                       addr = hdr_addr(ac97_hdr, reg);
+                       break;
+               case 0x7c:
+                       addr = hdr_addr(ohci_hdr, reg);
+                       break;
+               case 0x7d:
+                       addr = hdr_addr(ehci_hdr, reg);
+                       break;
+               default:
+                       addr = &ff_loc;
+                       break;
+               }
+       }
+       switch (len) {
+       case 1:
+               *value = *(uint8_t *)addr;
+               break;
+       case 2:
+               *value = *(uint16_t *)addr;
+               break;
+       case 4:
+               *value = *addr;
+               break;
+       default:
+               BUG();
+       }
+
+       return 0;
+}
+
+static int pci_olpc_write(unsigned int seg, unsigned int bus,
+               unsigned int devfn, int reg, int len, uint32_t value)
+{
+       /* Use the hardware mechanism for non-simulated devices */
+       if (!is_simulated(bus, devfn))
+               return pci_direct_conf1.write(seg, bus, devfn, reg, len, value);
+
+       /* XXX we may want to extend this to simulate EHCI power management */
+
+       /*
+        * Mostly we just discard writes, but if the write is a size probe
+        * (i.e. writing ~0 to a BAR), we remember it and arrange to return
+        * the appropriate size mask on the next read.  This is cheating
+        * to some extent, because it depends on the fact that the next
+        * access after such a write will always be a read to the same BAR.
+        */
+
+       if ((reg >= 0x10) && (reg < 0x2c)) {
+               /* write is to a BAR */
+               if (value == ~0)
+                       bar_probing = 1;
+       } else {
+               /*
+                * No warning on writes to ROM BAR, CMD, LATENCY_TIMER,
+                * CACHE_LINE_SIZE, or PM registers.
+                */
+               if ((reg != PCI_ROM_ADDRESS) && (reg != PCI_COMMAND_MASTER) &&
+                               (reg != PCI_LATENCY_TIMER) &&
+                               (reg != PCI_CACHE_LINE_SIZE) && (reg != 0x44))
+                       printk(KERN_WARNING "OLPC PCI: Config write to devfn"
+                               " %x reg %x value %x\n", devfn, reg, value);
+       }
+
+       return 0;
+}
+
+static struct pci_raw_ops pci_olpc_conf = {
+       .read = pci_olpc_read,
+       .write = pci_olpc_write,
+};
+
+void __init pci_olpc_init(void)
+{
+       if (!machine_is_olpc() || olpc_has_vsa())
+               return;
+
+       printk(KERN_INFO "PCI: Using configuration type OLPC\n");
+       raw_pci_ops = &pci_olpc_conf;
+       is_lx = is_geode_lx();
+}
index c4bddaeff6194400539bad30890c528c20e9bf3f..c58805a92db5788e508482dd1579640c782750d0 100644 (file)
@@ -26,6 +26,7 @@
 #define PCI_ASSIGN_ALL_BUSSES  0x4000
 #define PCI_CAN_SKIP_ISA_ALIGN 0x8000
 #define PCI_USE__CRS           0x10000
+#define PCI_CHECK_ENABLE_AMD_MMCONF    0x20000
 
 extern unsigned int pci_probe;
 extern unsigned long pirq_table_addr;
@@ -97,11 +98,12 @@ extern struct pci_raw_ops pci_direct_conf1;
 extern int pci_direct_probe(void);
 extern void pci_direct_init(int type);
 extern void pci_pcbios_init(void);
-extern void pci_mmcfg_init(int type);
+extern void pci_olpc_init(void);
 
 /* pci-mmconfig.c */
 
 extern int __init pci_mmcfg_arch_init(void);
+extern void __init pci_mmcfg_arch_free(void);
 
 /*
  * AMD Fam10h CPUs are buggy, and cannot access MMIO config space
index 4b1620a1529e82863a18a96fc8507afbc1734b5d..1d3aa6b8718115fa8f7cadc65c04145b8b79bbcd 100644 (file)
@@ -1,2 +1,10 @@
-       .section ".vdso","a"
+#include <linux/init.h>
+
+__INITDATA
+
+       .globl vdso_start, vdso_end
+vdso_start:
        .incbin "arch/x86/vdso/vdso.so"
+vdso_end:
+
+__FINIT
index e2af8eee80e35b2915c461b469be6c0a4eb0f804..4dceeb1fc5e0f229305c983d39c8cf6497b5a409 100644 (file)
@@ -303,8 +303,6 @@ int __init sysenter_setup(void)
 
 #ifdef CONFIG_X86_32
        gate_vma_init();
-
-       printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
 #endif
 
        if (!vdso32_sysenter()) {
index 6cbcf65609addd6f04d6d6b8d05607624442337b..126766d43aea11bdfa024af7b1fae126542abff6 100644 (file)
@@ -387,7 +387,7 @@ static void xen_do_pin(unsigned level, unsigned long pfn)
 
 static int pin_page(struct page *page, enum pt_level level)
 {
-       unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
+       unsigned pgfl = TestSetPagePinned(page);
        int flush;
 
        if (pgfl)
@@ -468,7 +468,7 @@ void __init xen_mark_init_mm_pinned(void)
 
 static int unpin_page(struct page *page, enum pt_level level)
 {
-       unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);
+       unsigned pgfl = TestClearPagePinned(page);
 
        if (pgfl && !PageHighMem(page)) {
                void *pt = lowmem_page_address(page);
index ef63adadf7f4106ccded17d9370fd619a6557fb4..070ff8af3a21e45a338c95dad5d0e6d8bb74907c 100644 (file)
 #include <linux/thread_info.h>
 #include <linux/ptrace.h>
 #include <linux/mm.h>
+#include <linux/kbuild.h>
 
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
-#define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val))
-
 int main(void)
 {
        /* struct pt_regs */
index 55c5f1fc4f1fcd90c1f119a9bc0f58b46f446e83..a09ead19f9c5702a1ad76d709c54969176fe9e94 100644 (file)
@@ -26,8 +26,7 @@ int blk_queue_ordered(struct request_queue *q, unsigned ordered,
 {
        if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
            prepare_flush_fn == NULL) {
-               printk(KERN_ERR "%s: prepare_flush_fn required\n",
-                                                               __FUNCTION__);
+               printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
                return -EINVAL;
        }
 
@@ -53,7 +52,7 @@ EXPORT_SYMBOL(blk_queue_ordered);
 /*
  * Cache flushing for ordered writes handling
  */
-inline unsigned blk_ordered_cur_seq(struct request_queue *q)
+unsigned blk_ordered_cur_seq(struct request_queue *q)
 {
        if (!q->ordseq)
                return 0;
@@ -143,10 +142,8 @@ static void queue_flush(struct request_queue *q, unsigned which)
                end_io = post_flush_end_io;
        }
 
+       blk_rq_init(q, rq);
        rq->cmd_flags = REQ_HARDBARRIER;
-       rq_init(q, rq);
-       rq->elevator_private = NULL;
-       rq->elevator_private2 = NULL;
        rq->rq_disk = q->bar_rq.rq_disk;
        rq->end_io = end_io;
        q->prepare_flush_fn(q, rq);
@@ -167,14 +164,11 @@ static inline struct request *start_ordered(struct request_queue *q,
        blkdev_dequeue_request(rq);
        q->orig_bar_rq = rq;
        rq = &q->bar_rq;
-       rq->cmd_flags = 0;
-       rq_init(q, rq);
+       blk_rq_init(q, rq);
        if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
                rq->cmd_flags |= REQ_RW;
        if (q->ordered & QUEUE_ORDERED_FUA)
                rq->cmd_flags |= REQ_FUA;
-       rq->elevator_private = NULL;
-       rq->elevator_private2 = NULL;
        init_request_from_bio(rq, q->orig_bar_rq->bio);
        rq->end_io = bar_end_io;
 
index 2a438a93f7233d8bc2033956f23e3f436f0e1f44..b754a4a2f9bd26e2e3aaed11fb6fa3a9c3d67d0b 100644 (file)
@@ -107,41 +107,21 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
 }
 EXPORT_SYMBOL(blk_get_backing_dev_info);
 
-/*
- * We can't just memset() the structure, since the allocation path
- * already stored some information in the request.
- */
-void rq_init(struct request_queue *q, struct request *rq)
+void blk_rq_init(struct request_queue *q, struct request *rq)
 {
+       memset(rq, 0, sizeof(*rq));
+
        INIT_LIST_HEAD(&rq->queuelist);
        INIT_LIST_HEAD(&rq->donelist);
        rq->q = q;
        rq->sector = rq->hard_sector = (sector_t) -1;
-       rq->nr_sectors = rq->hard_nr_sectors = 0;
-       rq->current_nr_sectors = rq->hard_cur_sectors = 0;
-       rq->bio = rq->biotail = NULL;
        INIT_HLIST_NODE(&rq->hash);
        RB_CLEAR_NODE(&rq->rb_node);
-       rq->rq_disk = NULL;
-       rq->nr_phys_segments = 0;
-       rq->nr_hw_segments = 0;
-       rq->ioprio = 0;
-       rq->special = NULL;
-       rq->buffer = NULL;
+       rq->cmd = rq->__cmd;
        rq->tag = -1;
-       rq->errors = 0;
        rq->ref_count = 1;
-       rq->cmd_len = 0;
-       memset(rq->cmd, 0, sizeof(rq->cmd));
-       rq->data_len = 0;
-       rq->extra_len = 0;
-       rq->sense_len = 0;
-       rq->data = NULL;
-       rq->sense = NULL;
-       rq->end_io = NULL;
-       rq->end_io_data = NULL;
-       rq->next_rq = NULL;
 }
+EXPORT_SYMBOL(blk_rq_init);
 
 static void req_bio_endio(struct request *rq, struct bio *bio,
                          unsigned int nbytes, int error)
@@ -156,7 +136,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
 
                if (unlikely(nbytes > bio->bi_size)) {
                        printk(KERN_ERR "%s: want %u bytes done, %u left\n",
-                              __FUNCTION__, nbytes, bio->bi_size);
+                              __func__, nbytes, bio->bi_size);
                        nbytes = bio->bi_size;
                }
 
@@ -194,7 +174,7 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
 
        if (blk_pc_request(rq)) {
                printk(KERN_INFO "  cdb: ");
-               for (bit = 0; bit < sizeof(rq->cmd); bit++)
+               for (bit = 0; bit < BLK_MAX_CDB; bit++)
                        printk("%02x ", rq->cmd[bit]);
                printk("\n");
        }
@@ -220,7 +200,8 @@ void blk_plug_device(struct request_queue *q)
        if (blk_queue_stopped(q))
                return;
 
-       if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+       if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) {
+               __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
                mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
                blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG);
        }
@@ -235,9 +216,10 @@ int blk_remove_plug(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
-       if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+       if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
                return 0;
 
+       queue_flag_clear(QUEUE_FLAG_PLUGGED, q);
        del_timer(&q->unplug_timer);
        return 1;
 }
@@ -333,15 +315,16 @@ void blk_start_queue(struct request_queue *q)
 {
        WARN_ON(!irqs_disabled());
 
-       clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+       queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 
        /*
         * one level of recursion is ok and is much faster than kicking
         * the unplug handling
         */
-       if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+       if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+               queue_flag_set(QUEUE_FLAG_REENTER, q);
                q->request_fn(q);
-               clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+               queue_flag_clear(QUEUE_FLAG_REENTER, q);
        } else {
                blk_plug_device(q);
                kblockd_schedule_work(&q->unplug_work);
@@ -366,7 +349,7 @@ EXPORT_SYMBOL(blk_start_queue);
 void blk_stop_queue(struct request_queue *q)
 {
        blk_remove_plug(q);
-       set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+       queue_flag_set(QUEUE_FLAG_STOPPED, q);
 }
 EXPORT_SYMBOL(blk_stop_queue);
 
@@ -395,11 +378,8 @@ EXPORT_SYMBOL(blk_sync_queue);
  * blk_run_queue - run a single device queue
  * @q: The queue to run
  */
-void blk_run_queue(struct request_queue *q)
+void __blk_run_queue(struct request_queue *q)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(q->queue_lock, flags);
        blk_remove_plug(q);
 
        /*
@@ -407,15 +387,28 @@ void blk_run_queue(struct request_queue *q)
         * handling reinvoke the handler shortly if we already got there.
         */
        if (!elv_queue_empty(q)) {
-               if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+               if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+                       queue_flag_set(QUEUE_FLAG_REENTER, q);
                        q->request_fn(q);
-                       clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+                       queue_flag_clear(QUEUE_FLAG_REENTER, q);
                } else {
                        blk_plug_device(q);
                        kblockd_schedule_work(&q->unplug_work);
                }
        }
+}
+EXPORT_SYMBOL(__blk_run_queue);
+
+/**
+ * blk_run_queue - run a single device queue
+ * @q: The queue to run
+ */
+void blk_run_queue(struct request_queue *q)
+{
+       unsigned long flags;
 
+       spin_lock_irqsave(q->queue_lock, flags);
+       __blk_run_queue(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 EXPORT_SYMBOL(blk_run_queue);
@@ -428,7 +421,7 @@ void blk_put_queue(struct request_queue *q)
 void blk_cleanup_queue(struct request_queue *q)
 {
        mutex_lock(&q->sysfs_lock);
-       set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+       queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q);
        mutex_unlock(&q->sysfs_lock);
 
        if (q->elevator)
@@ -607,6 +600,8 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask)
        if (!rq)
                return NULL;
 
+       blk_rq_init(q, rq);
+
        /*
         * first three bits are identical in rq->cmd_flags and bio->bi_rw,
         * see bio.h and blkdev.h
@@ -789,8 +784,6 @@ rq_starved:
        if (ioc_batching(q, ioc))
                ioc->nr_batch_requests--;
 
-       rq_init(q, rq);
-
        blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ);
 out:
        return rq;
@@ -1573,8 +1566,7 @@ static int __end_that_request_first(struct request *req, int error,
                        if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
                                blk_dump_rq_flags(req, "__end_that");
                                printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n",
-                                               __FUNCTION__, bio->bi_idx,
-                                               bio->bi_vcnt);
+                                      __func__, bio->bi_idx, bio->bi_vcnt);
                                break;
                        }
 
index 3c942bd6422a2ba639597ace2ad9f3768ccf1456..0b1af5a3537ca0728407c8acfedfaba44338cd19 100644 (file)
@@ -255,10 +255,18 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
  * @kbuf:      the kernel buffer
  * @len:       length of user data
  * @gfp_mask:  memory allocation flags
+ *
+ * Description:
+ *    Data will be mapped directly if possible. Otherwise a bounce
+ *    buffer is used.
  */
 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
                    unsigned int len, gfp_t gfp_mask)
 {
+       unsigned long kaddr;
+       unsigned int alignment;
+       int reading = rq_data_dir(rq) == READ;
+       int do_copy = 0;
        struct bio *bio;
 
        if (len > (q->max_hw_sectors << 9))
@@ -266,13 +274,24 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
        if (!len || !kbuf)
                return -EINVAL;
 
-       bio = bio_map_kern(q, kbuf, len, gfp_mask);
+       kaddr = (unsigned long)kbuf;
+       alignment = queue_dma_alignment(q) | q->dma_pad_mask;
+       do_copy = ((kaddr & alignment) || (len & alignment));
+
+       if (do_copy)
+               bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+       else
+               bio = bio_map_kern(q, kbuf, len, gfp_mask);
+
        if (IS_ERR(bio))
                return PTR_ERR(bio);
 
        if (rq_data_dir(rq) == WRITE)
                bio->bi_rw |= (1 << BIO_RW);
 
+       if (do_copy)
+               rq->cmd_flags |= REQ_COPY_USER;
+
        blk_rq_bio_prep(q, rq, bio);
        blk_queue_bounce(q, &rq->bio);
        rq->buffer = rq->data = NULL;
index b5c5c4a9e3f08d051a25d44e92689580d19cbce5..73b23562af20086f3a8623df6e9f32561ae330b5 100644 (file)
@@ -55,7 +55,7 @@ void blk_recalc_rq_segments(struct request *rq)
        if (!rq->bio)
                return;
 
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
        hw_seg_size = seg_size = 0;
        phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
        rq_for_each_segment(bv, rq, iter) {
@@ -128,7 +128,7 @@ EXPORT_SYMBOL(blk_recount_segments);
 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
                                   struct bio *nxt)
 {
-       if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+       if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
                return 0;
 
        if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
@@ -175,7 +175,7 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
        int nsegs, cluster;
 
        nsegs = 0;
-       cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+       cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
 
        /*
         * for each bio in rq
index 5713f7e5cbd266c6db06a8b5311b0ffa97d225b2..bb93d4c32775abdc2fdba16c3bc208fb20045f47 100644 (file)
@@ -14,7 +14,6 @@ unsigned long blk_max_low_pfn;
 EXPORT_SYMBOL(blk_max_low_pfn);
 
 unsigned long blk_max_pfn;
-EXPORT_SYMBOL(blk_max_pfn);
 
 /**
  * blk_queue_prep_rq - set a prepare_request function for queue
@@ -169,8 +168,8 @@ void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors)
 {
        if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
                max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_sectors);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_sectors);
        }
 
        if (BLK_DEF_MAX_SECTORS > max_sectors)
@@ -197,8 +196,8 @@ void blk_queue_max_phys_segments(struct request_queue *q,
 {
        if (!max_segments) {
                max_segments = 1;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_segments);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_segments);
        }
 
        q->max_phys_segments = max_segments;
@@ -221,8 +220,8 @@ void blk_queue_max_hw_segments(struct request_queue *q,
 {
        if (!max_segments) {
                max_segments = 1;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_segments);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_segments);
        }
 
        q->max_hw_segments = max_segments;
@@ -242,8 +241,8 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
 {
        if (max_size < PAGE_CACHE_SIZE) {
                max_size = PAGE_CACHE_SIZE;
-               printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__,
-                                                       max_size);
+               printk(KERN_INFO "%s: set to minimum %d\n",
+                      __func__, max_size);
        }
 
        q->max_segment_size = max_size;
@@ -288,7 +287,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
        t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
        t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
        if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags))
-               clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags);
+               queue_flag_clear(QUEUE_FLAG_CLUSTER, t);
 }
 EXPORT_SYMBOL(blk_queue_stack_limits);
 
@@ -358,8 +357,8 @@ void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
 {
        if (mask < PAGE_CACHE_SIZE - 1) {
                mask = PAGE_CACHE_SIZE - 1;
-               printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__,
-                                                       mask);
+               printk(KERN_INFO "%s: set to minimum %lx\n",
+                      __func__, mask);
        }
 
        q->seg_boundary_mask = mask;
index fc41d83be22bb02bab61f09b4b356dbba4697a05..e85c4013e8a29a5924c4657447a2b6611eb17702 100644 (file)
@@ -135,6 +135,25 @@ static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
        return queue_var_show(max_hw_sectors_kb, (page));
 }
 
+static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
+{
+       return queue_var_show(blk_queue_nomerges(q), page);
+}
+
+static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
+                                   size_t count)
+{
+       unsigned long nm;
+       ssize_t ret = queue_var_store(&nm, page, count);
+
+       if (nm)
+              set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+       else
+              clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags);
+
+       return ret;
+}
+
 
 static struct queue_sysfs_entry queue_requests_entry = {
        .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
@@ -170,6 +189,12 @@ static struct queue_sysfs_entry queue_hw_sector_size_entry = {
        .show = queue_hw_sector_size_show,
 };
 
+static struct queue_sysfs_entry queue_nomerges_entry = {
+       .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
+       .show = queue_nomerges_show,
+       .store = queue_nomerges_store,
+};
+
 static struct attribute *default_attrs[] = {
        &queue_requests_entry.attr,
        &queue_ra_entry.attr,
@@ -177,6 +202,7 @@ static struct attribute *default_attrs[] = {
        &queue_max_sectors_entry.attr,
        &queue_iosched_entry.attr,
        &queue_hw_sector_size_entry.attr,
+       &queue_nomerges_entry.attr,
        NULL,
 };
 
index 4780a46ce2346898953085a6f4079c7734dcbaf7..de64e04299771f08eebb391616d63b72c2fca64a 100644 (file)
@@ -70,7 +70,7 @@ void __blk_queue_free_tags(struct request_queue *q)
        __blk_free_tags(bqt);
 
        q->queue_tags = NULL;
-       q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+       queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 
 /**
@@ -98,7 +98,7 @@ EXPORT_SYMBOL(blk_free_tags);
  **/
 void blk_queue_free_tags(struct request_queue *q)
 {
-       clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+       queue_flag_clear(QUEUE_FLAG_QUEUED, q);
 }
 EXPORT_SYMBOL(blk_queue_free_tags);
 
@@ -112,7 +112,7 @@ init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
        if (q && depth > q->nr_requests * 2) {
                depth = q->nr_requests * 2;
                printk(KERN_ERR "%s: adjusted depth to %d\n",
-                               __FUNCTION__, depth);
+                      __func__, depth);
        }
 
        tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
@@ -188,7 +188,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
                rc = blk_queue_resize_tags(q, depth);
                if (rc)
                        return rc;
-               set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+               queue_flag_set(QUEUE_FLAG_QUEUED, q);
                return 0;
        } else
                atomic_inc(&tags->refcnt);
@@ -197,7 +197,7 @@ int blk_queue_init_tags(struct request_queue *q, int depth,
         * assign it, all done
         */
        q->queue_tags = tags;
-       q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+       queue_flag_set(QUEUE_FLAG_QUEUED, q);
        INIT_LIST_HEAD(&q->tag_busy_list);
        return 0;
 fail:
@@ -296,13 +296,13 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
 
        if (unlikely(bqt->tag_index[tag] == NULL))
                printk(KERN_ERR "%s: tag %d is missing\n",
-                      __FUNCTION__, tag);
+                      __func__, tag);
 
        bqt->tag_index[tag] = NULL;
 
        if (unlikely(!test_bit(tag, bqt->tag_map))) {
                printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
-                      __FUNCTION__, tag);
+                      __func__, tag);
                return;
        }
        /*
@@ -340,7 +340,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
        if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
                printk(KERN_ERR
                       "%s: request %p for device [%s] already tagged %d",
-                      __FUNCTION__, rq,
+                      __func__, rq,
                       rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
                BUG();
        }
index ec9120fb789a67c560df9c12e38dc8e0cc46a008..59776ab4742aae29678d34886665dc437dadd00c 100644 (file)
@@ -10,7 +10,6 @@
 extern struct kmem_cache *blk_requestq_cachep;
 extern struct kobj_type blk_queue_ktype;
 
-void rq_init(struct request_queue *q, struct request *rq);
 void init_request_from_bio(struct request *req, struct bio *bio);
 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
                        struct bio *bio);
index f51172ed27c25b02f54dc91895e794d30e9d3914..fa796b605f5547b546ff41cc4d387496dca424ad 100644 (file)
@@ -57,7 +57,7 @@ enum {
 #undef BSG_DEBUG
 
 #ifdef BSG_DEBUG
-#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ##args)
+#define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
 #else
 #define dprintk(fmt, args...)
 #endif
@@ -699,14 +699,26 @@ static struct bsg_device *bsg_alloc_device(void)
        return bd;
 }
 
+static void bsg_kref_release_function(struct kref *kref)
+{
+       struct bsg_class_device *bcd =
+               container_of(kref, struct bsg_class_device, ref);
+
+       if (bcd->release)
+               bcd->release(bcd->parent);
+
+       put_device(bcd->parent);
+}
+
 static int bsg_put_device(struct bsg_device *bd)
 {
-       int ret = 0;
-       struct device *dev = bd->queue->bsg_dev.dev;
+       int ret = 0, do_free;
+       struct request_queue *q = bd->queue;
 
        mutex_lock(&bsg_mutex);
 
-       if (!atomic_dec_and_test(&bd->ref_count))
+       do_free = atomic_dec_and_test(&bd->ref_count);
+       if (!do_free)
                goto out;
 
        dprintk("%s: tearing down\n", bd->name);
@@ -723,12 +735,13 @@ static int bsg_put_device(struct bsg_device *bd)
         */
        ret = bsg_complete_all_commands(bd);
 
-       blk_put_queue(bd->queue);
        hlist_del(&bd->dev_list);
        kfree(bd);
 out:
        mutex_unlock(&bsg_mutex);
-       put_device(dev);
+       kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
+       if (do_free)
+               blk_put_queue(q);
        return ret;
 }
 
@@ -796,7 +809,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
        mutex_lock(&bsg_mutex);
        bcd = idr_find(&bsg_minor_idr, iminor(inode));
        if (bcd)
-               get_device(bcd->dev);
+               kref_get(&bcd->ref);
        mutex_unlock(&bsg_mutex);
 
        if (!bcd)
@@ -808,7 +821,7 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
 
        bd = bsg_add_device(inode, bcd->queue, file);
        if (IS_ERR(bd))
-               put_device(bcd->dev);
+               kref_put(&bcd->ref, bsg_kref_release_function);
 
        return bd;
 }
@@ -947,14 +960,14 @@ void bsg_unregister_queue(struct request_queue *q)
        idr_remove(&bsg_minor_idr, bcd->minor);
        sysfs_remove_link(&q->kobj, "bsg");
        device_unregister(bcd->class_dev);
-       put_device(bcd->dev);
        bcd->class_dev = NULL;
+       kref_put(&bcd->ref, bsg_kref_release_function);
        mutex_unlock(&bsg_mutex);
 }
 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
 
-int bsg_register_queue(struct request_queue *q, struct device *gdev,
-                      const char *name)
+int bsg_register_queue(struct request_queue *q, struct device *parent,
+                      const char *name, void (*release)(struct device *))
 {
        struct bsg_class_device *bcd;
        dev_t dev;
@@ -965,7 +978,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
        if (name)
                devname = name;
        else
-               devname = gdev->bus_id;
+               devname = parent->bus_id;
 
        /*
         * we need a proper transport to send commands, not a stacked device
@@ -996,9 +1009,11 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
 
        bcd->minor = minor;
        bcd->queue = q;
-       bcd->dev = get_device(gdev);
+       bcd->parent = get_device(parent);
+       bcd->release = release;
+       kref_init(&bcd->ref);
        dev = MKDEV(bsg_major, bcd->minor);
-       class_dev = device_create(bsg_class, gdev, dev, "%s", devname);
+       class_dev = device_create(bsg_class, parent, dev, "%s", devname);
        if (IS_ERR(class_dev)) {
                ret = PTR_ERR(class_dev);
                goto put_dev;
@@ -1017,7 +1032,7 @@ int bsg_register_queue(struct request_queue *q, struct device *gdev,
 unregister_class_dev:
        device_unregister(class_dev);
 put_dev:
-       put_device(gdev);
+       put_device(parent);
 remove_idr:
        idr_remove(&bsg_minor_idr, minor);
 unlock:
index 88318c383608defa5018bdfa929918c767c8d7fa..980f8ae147b4c396b15886be25bd9b2342322472 100644 (file)
@@ -69,7 +69,7 @@ static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
 /*
  * can we safely merge with this request?
  */
-inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+int elv_rq_merge_ok(struct request *rq, struct bio *bio)
 {
        if (!rq_mergeable(rq))
                return 0;
@@ -488,6 +488,9 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
                }
        }
 
+       if (blk_queue_nomerges(q))
+               return ELEVATOR_NO_MERGE;
+
        /*
         * See if our hash lookup can find a potential backmerge.
         */
@@ -647,7 +650,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 
        default:
                printk(KERN_ERR "%s: bad insertion point %d\n",
-                      __FUNCTION__, where);
+                      __func__, where);
                BUG();
        }
 
@@ -805,8 +808,7 @@ struct request *elv_next_request(struct request_queue *q)
                        rq->cmd_flags |= REQ_QUIET;
                        end_queued_request(rq, 0);
                } else {
-                       printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
-                                                               ret);
+                       printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
                        break;
                }
        }
@@ -1070,7 +1072,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
         */
        spin_lock_irq(q->queue_lock);
 
-       set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+       queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
 
        elv_drain_elevator(q);
 
@@ -1104,7 +1106,10 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
         * finally exit old elevator and turn off BYPASS.
         */
        elevator_exit(old_elevator);
-       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+       spin_lock_irq(q->queue_lock);
+       queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+       spin_unlock_irq(q->queue_lock);
+
        return 1;
 
 fail_register:
@@ -1115,7 +1120,11 @@ fail_register:
        elevator_exit(e);
        q->elevator = old_elevator;
        elv_register_queue(q);
-       clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
+
+       spin_lock_irq(q->queue_lock);
+       queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
+       spin_unlock_irq(q->queue_lock);
+
        return 0;
 }
 
index 00da5219ee37c02a066d45e6ceefa65d9606d5e6..fda9c7a63c29e3ecc323fa373365843771860ec4 100644 (file)
@@ -182,11 +182,17 @@ static int exact_lock(dev_t devt, void *data)
  */
 void add_disk(struct gendisk *disk)
 {
+       struct backing_dev_info *bdi;
+
        disk->flags |= GENHD_FL_UP;
        blk_register_region(MKDEV(disk->major, disk->first_minor),
                            disk->minors, NULL, exact_match, exact_lock, disk);
        register_disk(disk);
        blk_register_queue(disk);
+
+       bdi = &disk->queue->backing_dev_info;
+       bdi_register_dev(bdi, MKDEV(disk->major, disk->first_minor));
+       sysfs_create_link(&disk->dev.kobj, &bdi->dev->kobj, "bdi");
 }
 
 EXPORT_SYMBOL(add_disk);
@@ -194,6 +200,8 @@ EXPORT_SYMBOL(del_gendisk); /* in partitions/check.c */
 
 void unlink_gendisk(struct gendisk *disk)
 {
+       sysfs_remove_link(&disk->dev.kobj, "bdi");
+       bdi_unregister(&disk->queue->backing_dev_info);
        blk_unregister_queue(disk);
        blk_unregister_region(MKDEV(disk->major, disk->first_minor),
                              disk->minors);
index a2c3a936ebf98e1481346e76a7ef268b803c76ea..ffa3720e6ca05de7830a466664327deed289aa12 100644 (file)
@@ -217,8 +217,6 @@ EXPORT_SYMBOL_GPL(blk_verify_command);
 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
                             struct sg_io_hdr *hdr, int has_write_perm)
 {
-       memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */
-
        if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
                return -EFAULT;
        if (blk_verify_command(rq->cmd, has_write_perm))
@@ -531,7 +529,6 @@ static int __blk_send_generic(struct request_queue *q, struct gendisk *bd_disk,
        rq->data_len = 0;
        rq->extra_len = 0;
        rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
-       memset(rq->cmd, 0, sizeof(rq->cmd));
        rq->cmd[0] = cmd;
        rq->cmd[4] = data;
        rq->cmd_len = 6;
index ed8ac5a6fa5ff0bc22fa67fc193a1b0629d4a063..4b226768752abed604033f708d093f0b04aee41b 100644 (file)
@@ -217,9 +217,10 @@ static void crypto_authenc_givencrypt_done(struct crypto_async_request *req,
                                           int err)
 {
        if (!err) {
-               struct aead_givcrypt_request *greq = req->data;
+               struct aead_request *areq = req->data;
+               struct skcipher_givcrypt_request *greq = aead_request_ctx(areq);
 
-               err = crypto_authenc_genicv(&greq->areq, greq->giv, 0);
+               err = crypto_authenc_genicv(areq, greq->giv, 0);
        }
 
        aead_request_complete(req->data, err);
index 250425263e00e59fad2b5a11436ce5a02b9dbe41..b150de562057f59bd85a69f0bb4e3267589c6bff 100644 (file)
@@ -190,8 +190,10 @@ static struct crypto_instance *cryptd_alloc_instance(struct crypto_alg *alg,
        int err;
 
        inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
-       if (IS_ERR(inst))
+       if (!inst) {
+               inst = ERR_PTR(-ENOMEM);
                goto out;
+       }
 
        err = -ENAMETOOLONG;
        if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
index b14f14e314b6cf3ada4bc0694aa9f5f364ab73e7..881d30910434831f528f5be244ab502bd984d0b6 100644 (file)
@@ -136,7 +136,8 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
        }
 
        ablkcipher_request_set_crypt(subreq, reqctx->src, dst,
-                                    req->creq.nbytes, req->creq.info);
+                                    req->creq.nbytes + ivsize,
+                                    req->creq.info);
 
        memcpy(req->creq.info, ctx->salt, ivsize);
 
index 80f0ec91e2cf3fd04f6076661c6e223ec931a5db..59f33fa6af3ea8f332bc8e23682eda538a0acb08 100644 (file)
@@ -84,6 +84,8 @@ source "drivers/memstick/Kconfig"
 
 source "drivers/leds/Kconfig"
 
+source "drivers/accessibility/Kconfig"
+
 source "drivers/infiniband/Kconfig"
 
 source "drivers/edac/Kconfig"
index e5e394a7e6c0873be132cb10086b7a89c93332d7..f65deda72d6102636e47554c9864d529df27a258 100644 (file)
@@ -70,6 +70,7 @@ obj-$(CONFIG_WATCHDOG)                += watchdog/
 obj-$(CONFIG_PHONE)            += telephony/
 obj-$(CONFIG_MD)               += md/
 obj-$(CONFIG_BT)               += bluetooth/
+obj-$(CONFIG_ACCESSIBILITY)    += accessibility/
 obj-$(CONFIG_ISDN)             += isdn/
 obj-$(CONFIG_EDAC)             += edac/
 obj-$(CONFIG_MCA)              += mca/
diff --git a/drivers/accessibility/Kconfig b/drivers/accessibility/Kconfig
new file mode 100644 (file)
index 0000000..1264c4b
--- /dev/null
@@ -0,0 +1,23 @@
+menuconfig ACCESSIBILITY
+       bool "Accessibility support"
+       ---help---
+         Enable a submenu where accessibility items may be enabled.
+
+         If unsure, say N.
+
+if ACCESSIBILITY
+config A11Y_BRAILLE_CONSOLE
+       bool "Console on braille device"
+       depends on VT
+       depends on SERIAL_CORE_CONSOLE
+       ---help---
+         Enables console output on a braille device connected to a 8250
+         serial port. For now only the VisioBraille device is supported.
+
+         To actually enable it, you need to pass option
+         console=brl,ttyS0
+         to the kernel. Options are the same as for serial console.
+
+         If unsure, say N.
+
+endif # ACCESSIBILITY
diff --git a/drivers/accessibility/Makefile b/drivers/accessibility/Makefile
new file mode 100644 (file)
index 0000000..72b01a4
--- /dev/null
@@ -0,0 +1 @@
+obj-y                          += braille/
diff --git a/drivers/accessibility/braille/Makefile b/drivers/accessibility/braille/Makefile
new file mode 100644 (file)
index 0000000..2e9f16c
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_A11Y_BRAILLE_CONSOLE)             += braille_console.o
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
new file mode 100644 (file)
index 0000000..0a5f6b2
--- /dev/null
@@ -0,0 +1,397 @@
+/*
+ * Minimalistic braille device kernel support.
+ *
+ * By default, shows console messages on the braille device.
+ * Pressing Insert switches to VC browsing.
+ *
+ *  Copyright (C) Samuel Thibault <samuel.thibault@ens-lyon.org>
+ *
+ * This program is free software ; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation ; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY ; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with the program ; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/console.h>
+#include <linux/notifier.h>
+
+#include <linux/selection.h>
+#include <linux/vt_kern.h>
+#include <linux/consolemap.h>
+
+#include <linux/keyboard.h>
+#include <linux/kbd_kern.h>
+#include <linux/input.h>
+
+MODULE_AUTHOR("samuel.thibault@ens-lyon.org");
+MODULE_DESCRIPTION("braille device");
+MODULE_LICENSE("GPL");
+
+/*
+ * Braille device support part.
+ */
+
+/* Emit various sounds */
+static int sound;
+module_param(sound, bool, 0);
+MODULE_PARM_DESC(sound, "emit sounds");
+
+static void beep(unsigned int freq)
+{
+       if (sound)
+               kd_mksound(freq, HZ/10);
+}
+
+/* mini console */
+#define WIDTH 40
+#define BRAILLE_KEY KEY_INSERT
+static u16 console_buf[WIDTH];
+static int console_cursor;
+
+/* mini view of VC */
+static int vc_x, vc_y, lastvc_x, lastvc_y;
+
+/* show console ? (or show VC) */
+static int console_show = 1;
+/* pending newline ? */
+static int console_newline = 1;
+static int lastVC = -1;
+
+static struct console *braille_co;
+
+/* Very VisioBraille-specific */
+static void braille_write(u16 *buf)
+{
+       static u16 lastwrite[WIDTH];
+       unsigned char data[1 + 1 + 2*WIDTH + 2 + 1], csum = 0, *c;
+       u16 out;
+       int i;
+
+       if (!braille_co)
+               return;
+
+       if (!memcmp(lastwrite, buf, WIDTH * sizeof(*buf)))
+               return;
+       memcpy(lastwrite, buf, WIDTH * sizeof(*buf));
+
+#define SOH 1
+#define STX 2
+#define ETX 2
+#define EOT 4
+#define ENQ 5
+       data[0] = STX;
+       data[1] = '>';
+       csum ^= '>';
+       c = &data[2];
+       for (i = 0; i < WIDTH; i++) {
+               out = buf[i];
+               if (out >= 0x100)
+                       out = '?';
+               else if (out == 0x00)
+                       out = ' ';
+               csum ^= out;
+               if (out <= 0x05) {
+                       *c++ = SOH;
+                       out |= 0x40;
+               }
+               *c++ = out;
+       }
+
+       if (csum <= 0x05) {
+               *c++ = SOH;
+               csum |= 0x40;
+       }
+       *c++ = csum;
+       *c++ = ETX;
+
+       braille_co->write(braille_co, data, c - data);
+}
+
+/* Follow the VC cursor*/
+static void vc_follow_cursor(struct vc_data *vc)
+{
+       vc_x = vc->vc_x - (vc->vc_x % WIDTH);
+       vc_y = vc->vc_y;
+       lastvc_x = vc->vc_x;
+       lastvc_y = vc->vc_y;
+}
+
+/* Maybe the VC cursor moved, if so follow it */
+static void vc_maybe_cursor_moved(struct vc_data *vc)
+{
+       if (vc->vc_x != lastvc_x || vc->vc_y != lastvc_y)
+               vc_follow_cursor(vc);
+}
+
+/* Show portion of VC at vc_x, vc_y */
+static void vc_refresh(struct vc_data *vc)
+{
+       u16 buf[WIDTH];
+       int i;
+
+       for (i = 0; i < WIDTH; i++) {
+               u16 glyph = screen_glyph(vc,
+                               2 * (vc_x + i) + vc_y * vc->vc_size_row);
+               buf[i] = inverse_translate(vc, glyph, 1);
+       }
+       braille_write(buf);
+}
+
+/*
+ * Link to keyboard
+ */
+
+static int keyboard_notifier_call(struct notifier_block *blk,
+                                 unsigned long code, void *_param)
+{
+       struct keyboard_notifier_param *param = _param;
+       struct vc_data *vc = param->vc;
+       int ret = NOTIFY_OK;
+
+       if (!param->down)
+               return ret;
+
+       switch (code) {
+       case KBD_KEYCODE:
+               if (console_show) {
+                       if (param->value == BRAILLE_KEY) {
+                               console_show = 0;
+                               beep(880);
+                               vc_maybe_cursor_moved(vc);
+                               vc_refresh(vc);
+                               ret = NOTIFY_STOP;
+                       }
+               } else {
+                       ret = NOTIFY_STOP;
+                       switch (param->value) {
+                       case KEY_INSERT:
+                               beep(440);
+                               console_show = 1;
+                               lastVC = -1;
+                               braille_write(console_buf);
+                               break;
+                       case KEY_LEFT:
+                               if (vc_x > 0) {
+                                       vc_x -= WIDTH;
+                                       if (vc_x < 0)
+                                               vc_x = 0;
+                               } else if (vc_y >= 1) {
+                                       beep(880);
+                                       vc_y--;
+                                       vc_x = vc->vc_cols-WIDTH;
+                               } else
+                                       beep(220);
+                               break;
+                       case KEY_RIGHT:
+                               if (vc_x + WIDTH < vc->vc_cols) {
+                                       vc_x += WIDTH;
+                               } else if (vc_y + 1 < vc->vc_rows) {
+                                       beep(880);
+                                       vc_y++;
+                                       vc_x = 0;
+                               } else
+                                       beep(220);
+                               break;
+                       case KEY_DOWN:
+                               if (vc_y + 1 < vc->vc_rows)
+                                       vc_y++;
+                               else
+                                       beep(220);
+                               break;
+                       case KEY_UP:
+                               if (vc_y >= 1)
+                                       vc_y--;
+                               else
+                                       beep(220);
+                               break;
+                       case KEY_HOME:
+                               vc_follow_cursor(vc);
+                               break;
+                       case KEY_PAGEUP:
+                               vc_x = 0;
+                               vc_y = 0;
+                               break;
+                       case KEY_PAGEDOWN:
+                               vc_x = 0;
+                               vc_y = vc->vc_rows-1;
+                               break;
+                       default:
+                               ret = NOTIFY_OK;
+                               break;
+                       }
+                       if (ret == NOTIFY_STOP)
+                               vc_refresh(vc);
+               }
+               break;
+       case KBD_POST_KEYSYM:
+       {
+               unsigned char type = KTYP(param->value) - 0xf0;
+               if (type == KT_SPEC) {
+                       unsigned char val = KVAL(param->value);
+                       int on_off = -1;
+
+                       switch (val) {
+                       case KVAL(K_CAPS):
+                               on_off = vc_kbd_led(kbd_table + fg_console,
+                                               VC_CAPSLOCK);
+                               break;
+                       case KVAL(K_NUM):
+                               on_off = vc_kbd_led(kbd_table + fg_console,
+                                               VC_NUMLOCK);
+                               break;
+                       case KVAL(K_HOLD):
+                               on_off = vc_kbd_led(kbd_table + fg_console,
+                                               VC_SCROLLOCK);
+                               break;
+                       }
+                       if (on_off == 1)
+                               beep(880);
+                       else if (on_off == 0)
+                               beep(440);
+               }
+       }
+       case KBD_UNBOUND_KEYCODE:
+       case KBD_UNICODE:
+       case KBD_KEYSYM:
+               /* Unused */
+               break;
+       }
+       return ret;
+}
+
+static struct notifier_block keyboard_notifier_block = {
+       .notifier_call = keyboard_notifier_call,
+};
+
+static int vt_notifier_call(struct notifier_block *blk,
+                           unsigned long code, void *_param)
+{
+       struct vt_notifier_param *param = _param;
+       struct vc_data *vc = param->vc;
+       switch (code) {
+       case VT_ALLOCATE:
+               break;
+       case VT_DEALLOCATE:
+               break;
+       case VT_WRITE:
+       {
+               unsigned char c = param->c;
+               if (vc->vc_num != fg_console)
+                       break;
+               switch (c) {
+               case '\b':
+               case 127:
+                       if (console_cursor > 0) {
+                               console_cursor--;
+                               console_buf[console_cursor] = ' ';
+                       }
+                       break;
+               case '\n':
+               case '\v':
+               case '\f':
+               case '\r':
+                       console_newline = 1;
+                       break;
+               case '\t':
+                       c = ' ';
+                       /* Fallthrough */
+               default:
+                       if (c < 32)
+                               /* Ignore other control sequences */
+                               break;
+                       if (console_newline) {
+                               memset(console_buf, 0, sizeof(console_buf));
+                               console_cursor = 0;
+                               console_newline = 0;
+                       }
+                       if (console_cursor == WIDTH)
+                               memmove(console_buf, &console_buf[1],
+                                       (WIDTH-1) * sizeof(*console_buf));
+                       else
+                               console_cursor++;
+                       console_buf[console_cursor-1] = c;
+                       break;
+               }
+               if (console_show)
+                       braille_write(console_buf);
+               else {
+                       vc_maybe_cursor_moved(vc);
+                       vc_refresh(vc);
+               }
+               break;
+       }
+       case VT_UPDATE:
+               /* Maybe a VT switch, flush */
+               if (console_show) {
+                       if (vc->vc_num != lastVC) {
+                               lastVC = vc->vc_num;
+                               memset(console_buf, 0, sizeof(console_buf));
+                               console_cursor = 0;
+                               braille_write(console_buf);
+                       }
+               } else {
+                       vc_maybe_cursor_moved(vc);
+                       vc_refresh(vc);
+               }
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block vt_notifier_block = {
+       .notifier_call = vt_notifier_call,
+};
+
+/*
+ * Called from printk.c when console=brl is given
+ */
+
+int braille_register_console(struct console *console, int index,
+               char *console_options, char *braille_options)
+{
+       int ret;
+       if (!console_options)
+               /* Only support VisioBraille for now */
+               console_options = "57600o8";
+       if (braille_co)
+               return -ENODEV;
+       if (console->setup) {
+               ret = console->setup(console, console_options);
+               if (ret != 0)
+                       return ret;
+       }
+       console->flags |= CON_ENABLED;
+       console->index = index;
+       braille_co = console;
+       return 0;
+}
+
+int braille_unregister_console(struct console *console)
+{
+       if (braille_co != console)
+               return -EINVAL;
+       braille_co = NULL;
+       return 0;
+}
+
+static int __init braille_init(void)
+{
+       register_keyboard_notifier(&keyboard_notifier_block);
+       register_vt_notifier(&vt_notifier_block);
+       return 0;
+}
+
+console_initcall(braille_init);
index b4f5e85428294eed21bb8f342af5a7a7c3fac87b..c52fca833268c7f1eaf53c091fa2f41e49d5d4a0 100644 (file)
@@ -140,6 +140,7 @@ config ACPI_VIDEO
        tristate "Video"
        depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
        depends on INPUT
+       select THERMAL
        help
          This driver implement the ACPI Extensions For Display Adapters
          for integrated graphics devices on motherboard, as specified in
@@ -151,6 +152,7 @@ config ACPI_VIDEO
 
 config ACPI_FAN
        tristate "Fan"
+       select THERMAL
        default y
        help
          This driver adds support for ACPI fan devices, allowing user-mode 
@@ -172,6 +174,7 @@ config ACPI_BAY
 
 config ACPI_PROCESSOR
        tristate "Processor"
+       select THERMAL
        default y
        help
          This driver installs ACPI as the idle handler for Linux, and uses
index 43a95e5640debc27496f518a2170b4b2a5090b2c..5b73f6a2cd86c839d36ed8c9cb83bb5659f0cc04 100644 (file)
@@ -92,6 +92,7 @@ struct acpi_ac {
 
 #ifdef CONFIG_ACPI_PROCFS_POWER
 static const struct file_operations acpi_ac_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_ac_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -195,16 +196,11 @@ static int acpi_ac_add_fs(struct acpi_device *device)
        }
 
        /* 'state' [R] */
-       entry = create_proc_entry(ACPI_AC_FILE_STATE,
-                                 S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data(ACPI_AC_FILE_STATE,
+                                S_IRUGO, acpi_device_dir(device),
+                                &acpi_ac_fops, acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_ac_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
-
        return 0;
 }
 
index d5729d5dc1904564921cede6f7f80d72b987205c..b1c723f9f58d1fce3861730870b58550fe13f6a2 100644 (file)
@@ -741,15 +741,13 @@ static int acpi_battery_add_fs(struct acpi_device *device)
        }
 
        for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) {
-               entry = create_proc_entry(acpi_battery_file[i].name,
-                                 acpi_battery_file[i].mode, acpi_device_dir(device));
+               entry = proc_create_data(acpi_battery_file[i].name,
+                                        acpi_battery_file[i].mode,
+                                        acpi_device_dir(device),
+                                        &acpi_battery_file[i].ops,
+                                        acpi_driver_data(device));
                if (!entry)
                        return -ENODEV;
-               else {
-                       entry->proc_fops = &acpi_battery_file[i].ops;
-                       entry->data = acpi_driver_data(device);
-                       entry->owner = THIS_MODULE;
-               }
        }
        return 0;
 }
index 1fa86811b8ee6ee8e5e675666fc1092f0b4c6eb5..d2fc94161848ff91e424b08df40f5a0d9643aafe 100644 (file)
@@ -201,6 +201,7 @@ static int is_ejectable_bay(acpi_handle handle)
        return 0;
 }
 
+#if 0
 /**
  * eject_removable_drive - try to eject this drive
  * @dev : the device structure of the drive
@@ -225,6 +226,7 @@ int eject_removable_drive(struct device *dev)
        return 0;
 }
 EXPORT_SYMBOL_GPL(eject_removable_drive);
+#endif  /*  0  */
 
 static int acpi_bay_add_fs(struct bay *bay)
 {
index 2d1955c118337fae68004ccaf3f7d3f3238b9cba..a6dbcf4d9ef57dcdfa93ecc045e51fd60a4b20d1 100644 (file)
@@ -35,6 +35,7 @@
 #ifdef CONFIG_X86
 #include <asm/mpspec.h>
 #endif
+#include <linux/pci.h>
 #include <acpi/acpi_bus.h>
 #include <acpi/acpi_drivers.h>
 
@@ -784,6 +785,7 @@ static int __init acpi_init(void)
        result = acpi_bus_init();
 
        if (!result) {
+               pci_mmcfg_late_init();
                if (!(pm_flags & PM_APM))
                        pm_flags |= PM_ACPI;
                else {
index 6c5da83cdb6807be36db10253bd42c2c332c0402..1dfec413588ca6e07ba06f51b69b7ea431906a2b 100644 (file)
@@ -102,6 +102,7 @@ struct acpi_button {
 };
 
 static const struct file_operations acpi_button_info_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_button_info_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -109,6 +110,7 @@ static const struct file_operations acpi_button_info_fops = {
 };
 
 static const struct file_operations acpi_button_state_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_button_state_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -207,27 +209,21 @@ static int acpi_button_add_fs(struct acpi_device *device)
        acpi_device_dir(device)->owner = THIS_MODULE;
 
        /* 'info' [R] */
-       entry = create_proc_entry(ACPI_BUTTON_FILE_INFO,
-                                 S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data(ACPI_BUTTON_FILE_INFO,
+                                S_IRUGO, acpi_device_dir(device),
+                                &acpi_button_info_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_button_info_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
 
        /* show lid state [R] */
        if (button->type == ACPI_BUTTON_TYPE_LID) {
-               entry = create_proc_entry(ACPI_BUTTON_FILE_STATE,
-                                         S_IRUGO, acpi_device_dir(device));
+               entry = proc_create_data(ACPI_BUTTON_FILE_STATE,
+                                        S_IRUGO, acpi_device_dir(device),
+                                        &acpi_button_state_fops,
+                                        acpi_driver_data(device));
                if (!entry)
                        return -ENODEV;
-               else {
-                       entry->proc_fops = &acpi_button_state_fops;
-                       entry->data = acpi_driver_data(device);
-                       entry->owner = THIS_MODULE;
-               }
        }
 
        return 0;
index f049639bac355db3e6996505037624d0e742e92b..c78078315be9f707c8adc3b88184e4c70c5a097e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -89,12 +89,16 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
 
        ACPI_FUNCTION_TRACE(ds_create_buffer_field);
 
-       /* Get the name_string argument */
-
+       /*
+        * Get the name_string argument (name of the new buffer_field)
+        */
        if (op->common.aml_opcode == AML_CREATE_FIELD_OP) {
+
+               /* For create_field, name is the 4th argument */
+
                arg = acpi_ps_get_arg(op, 3);
        } else {
-               /* Create Bit/Byte/Word/Dword field */
+               /* For all other create_xXXField operators, name is the 3rd argument */
 
                arg = acpi_ps_get_arg(op, 2);
        }
@@ -107,26 +111,30 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
                node = walk_state->deferred_node;
                status = AE_OK;
        } else {
-               /*
-                * During the load phase, we want to enter the name of the field into
-                * the namespace.  During the execute phase (when we evaluate the size
-                * operand), we want to lookup the name
-                */
-               if (walk_state->parse_flags & ACPI_PARSE_EXECUTE) {
-                       flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE;
-               } else {
-                       flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
-                           ACPI_NS_ERROR_IF_FOUND;
+               /* Execute flag should always be set when this function is entered */
+
+               if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+                       return_ACPI_STATUS(AE_AML_INTERNAL);
                }
 
-               /*
-                * Enter the name_string into the namespace
-                */
+               /* Creating new namespace node, should not already exist */
+
+               flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
+                   ACPI_NS_ERROR_IF_FOUND;
+
+               /* Mark node temporary if we are executing a method */
+
+               if (walk_state->method_node) {
+                       flags |= ACPI_NS_TEMPORARY;
+               }
+
+               /* Enter the name_string into the namespace */
+
                status =
                    acpi_ns_lookup(walk_state->scope_info,
                                   arg->common.value.string, ACPI_TYPE_ANY,
                                   ACPI_IMODE_LOAD_PASS1, flags, walk_state,
-                                  &(node));
+                                  &node);
                if (ACPI_FAILURE(status)) {
                        ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
                        return_ACPI_STATUS(status);
@@ -136,13 +144,13 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
        /*
         * We could put the returned object (Node) on the object stack for later,
         * but for now, we will put it in the "op" object that the parser uses,
-        * so we can get it again at the end of this scope
+        * so we can get it again at the end of this scope.
         */
        op->common.node = node;
 
        /*
         * If there is no object attached to the node, this node was just created
-        * and we need to create the field object.  Otherwise, this was a lookup
+        * and we need to create the field object. Otherwise, this was a lookup
         * of an existing node and we don't want to create the field object again.
         */
        obj_desc = acpi_ns_get_attached_object(node);
@@ -164,9 +172,8 @@ acpi_ds_create_buffer_field(union acpi_parse_object *op,
        }
 
        /*
-        * Remember location in AML stream of the field unit
-        * opcode and operands -- since the buffer and index
-        * operands must be evaluated.
+        * Remember location in AML stream of the field unit opcode and operands --
+        * since the buffer and index operands must be evaluated.
         */
        second_desc = obj_desc->common.next_object;
        second_desc->extra.aml_start = op->named.data;
@@ -261,7 +268,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
 
                case AML_INT_NAMEDFIELD_OP:
 
-                       /* Lookup the name */
+                       /* Lookup the name, it should already exist */
 
                        status = acpi_ns_lookup(walk_state->scope_info,
                                                (char *)&arg->named.name,
@@ -272,20 +279,23 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                        if (ACPI_FAILURE(status)) {
                                ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
                                                     status);
-                               if (status != AE_ALREADY_EXISTS) {
-                                       return_ACPI_STATUS(status);
-                               }
-
-                               /* Already exists, ignore error */
+                               return_ACPI_STATUS(status);
                        } else {
                                arg->common.node = info->field_node;
                                info->field_bit_length = arg->common.value.size;
 
-                               /* Create and initialize an object for the new Field Node */
-
-                               status = acpi_ex_prep_field_value(info);
-                               if (ACPI_FAILURE(status)) {
-                                       return_ACPI_STATUS(status);
+                               /*
+                                * If there is no object attached to the node, this node was
+                                * just created and we need to create the field object.
+                                * Otherwise, this was a lookup of an existing node and we
+                                * don't want to create the field object again.
+                                */
+                               if (!acpi_ns_get_attached_object
+                                   (info->field_node)) {
+                                       status = acpi_ex_prep_field_value(info);
+                                       if (ACPI_FAILURE(status)) {
+                                               return_ACPI_STATUS(status);
+                                       }
                                }
                        }
 
@@ -399,9 +409,27 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
        union acpi_parse_object *arg = NULL;
        struct acpi_namespace_node *node;
        u8 type = 0;
+       u32 flags;
 
        ACPI_FUNCTION_TRACE_PTR(ds_init_field_objects, op);
 
+       /* Execute flag should always be set when this function is entered */
+
+       if (!(walk_state->parse_flags & ACPI_PARSE_EXECUTE)) {
+               if (walk_state->parse_flags & ACPI_PARSE_DEFERRED_OP) {
+
+                       /* bank_field Op is deferred, just return OK */
+
+                       return_ACPI_STATUS(AE_OK);
+               }
+
+               return_ACPI_STATUS(AE_AML_INTERNAL);
+       }
+
+       /*
+        * Get the field_list argument for this opcode. This is the start of the
+        * list of field elements.
+        */
        switch (walk_state->opcode) {
        case AML_FIELD_OP:
                arg = acpi_ps_get_arg(op, 2);
@@ -422,20 +450,33 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
+       if (!arg) {
+               return_ACPI_STATUS(AE_AML_NO_OPERAND);
+       }
+
+       /* Creating new namespace node(s), should not already exist */
+
+       flags = ACPI_NS_NO_UPSEARCH | ACPI_NS_DONT_OPEN_SCOPE |
+           ACPI_NS_ERROR_IF_FOUND;
+
+       /* Mark node(s) temporary if we are executing a method */
+
+       if (walk_state->method_node) {
+               flags |= ACPI_NS_TEMPORARY;
+       }
+
        /*
         * Walk the list of entries in the field_list
         */
        while (arg) {
-
-               /* Ignore OFFSET and ACCESSAS terms here */
-
+               /*
+                * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
+                * field names in order to enter them into the namespace.
+                */
                if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
                        status = acpi_ns_lookup(walk_state->scope_info,
-                                               (char *)&arg->named.name,
-                                               type, ACPI_IMODE_LOAD_PASS1,
-                                               ACPI_NS_NO_UPSEARCH |
-                                               ACPI_NS_DONT_OPEN_SCOPE |
-                                               ACPI_NS_ERROR_IF_FOUND,
+                                               (char *)&arg->named.name, type,
+                                               ACPI_IMODE_LOAD_PASS1, flags,
                                                walk_state, &node);
                        if (ACPI_FAILURE(status)) {
                                ACPI_ERROR_NAMESPACE((char *)&arg->named.name,
@@ -452,7 +493,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
                        arg->common.node = node;
                }
 
-               /* Move to next field in the list */
+               /* Get the next field element in the list */
 
                arg = arg->common.next;
        }
@@ -466,7 +507,7 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
  *
  * PARAMETERS:  Op              - Op containing the Field definition and args
  *              region_node     - Object for the containing Operation Region
- *  `           walk_state      - Current method state
+ *              walk_state      - Current method state
  *
  * RETURN:      Status
  *
@@ -513,36 +554,13 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
                return_ACPI_STATUS(status);
        }
 
-       /* Third arg is the bank_value */
-
-       /* TBD: This arg is a term_arg, not a constant, and must be evaluated */
-
+       /*
+        * Third arg is the bank_value
+        * This arg is a term_arg, not a constant
+        * It will be evaluated later, by acpi_ds_eval_bank_field_operands
+        */
        arg = arg->common.next;
 
-       /* Currently, only the following constants are supported */
-
-       switch (arg->common.aml_opcode) {
-       case AML_ZERO_OP:
-               info.bank_value = 0;
-               break;
-
-       case AML_ONE_OP:
-               info.bank_value = 1;
-               break;
-
-       case AML_BYTE_OP:
-       case AML_WORD_OP:
-       case AML_DWORD_OP:
-       case AML_QWORD_OP:
-               info.bank_value = (u32) arg->common.value.integer;
-               break;
-
-       default:
-               info.bank_value = 0;
-               ACPI_ERROR((AE_INFO,
-                           "Non-constant BankValue for BankField is not implemented"));
-       }
-
        /* Fourth arg is the field flags */
 
        arg = arg->common.next;
@@ -553,8 +571,17 @@ acpi_ds_create_bank_field(union acpi_parse_object *op,
        info.field_type = ACPI_TYPE_LOCAL_BANK_FIELD;
        info.region_node = region_node;
 
-       status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
+       /*
+        * Use Info.data_register_node to store bank_field Op
+        * It's safe because data_register_node will never be used when create bank field
+        * We store aml_start and aml_length in the bank_field Op for late evaluation
+        * Used in acpi_ex_prep_field_value(Info)
+        *
+        * TBD: Or, should we add a field in struct acpi_create_field_info, like "void *ParentOp"?
+        */
+       info.data_register_node = (struct acpi_namespace_node *)op;
 
+       status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
        return_ACPI_STATUS(status);
 }
 
index af923c3885205ef70c7313aee98611a9c3b7656c..610b1ee102b02c9b9e3bdb6f3a5fea8599a7cb1b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1cbe6190582494ed9f3557c58a17b659644f4979..e48a3ea03117ab3cdc7cc993ebbcb6dd019889e4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -42,7 +42,6 @@
  */
 
 #include <acpi/acpi.h>
-#include <acpi/acparser.h>
 #include <acpi/amlcode.h>
 #include <acpi/acdispat.h>
 #include <acpi/acinterp.h>
@@ -102,7 +101,7 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
                                                    walk_state->opcode,
                                                    walk_state->aml_offset,
                                                    NULL);
-               (void)acpi_ex_enter_interpreter();
+               acpi_ex_enter_interpreter();
        }
 #ifdef ACPI_DISASSEMBLER
        if (ACPI_FAILURE(status)) {
@@ -232,9 +231,9 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
                 * recursive call.
                 */
                if (!walk_state ||
-                   !obj_desc->method.mutex->mutex.owner_thread ||
-                   (walk_state->thread !=
-                    obj_desc->method.mutex->mutex.owner_thread)) {
+                   !obj_desc->method.mutex->mutex.thread_id ||
+                   (walk_state->thread->thread_id !=
+                    obj_desc->method.mutex->mutex.thread_id)) {
                        /*
                         * Acquire the method mutex. This releases the interpreter if we
                         * block (and reacquires it before it returns)
@@ -254,8 +253,8 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
                                    original_sync_level =
                                    walk_state->thread->current_sync_level;
 
-                               obj_desc->method.mutex->mutex.owner_thread =
-                                   walk_state->thread;
+                               obj_desc->method.mutex->mutex.thread_id =
+                                   walk_state->thread->thread_id;
                                walk_state->thread->current_sync_level =
                                    obj_desc->method.sync_level;
                        } else {
@@ -535,8 +534,6 @@ void
 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
                                 struct acpi_walk_state *walk_state)
 {
-       struct acpi_namespace_node *method_node;
-       acpi_status status;
 
        ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
 
@@ -551,34 +548,26 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
                /* Delete all arguments and locals */
 
                acpi_ds_method_data_delete_all(walk_state);
-       }
 
-       /*
-        * If method is serialized, release the mutex and restore the
-        * current sync level for this thread
-        */
-       if (method_desc->method.mutex) {
+               /*
+                * If method is serialized, release the mutex and restore the
+                * current sync level for this thread
+                */
+               if (method_desc->method.mutex) {
 
-               /* Acquisition Depth handles recursive calls */
+                       /* Acquisition Depth handles recursive calls */
 
-               method_desc->method.mutex->mutex.acquisition_depth--;
-               if (!method_desc->method.mutex->mutex.acquisition_depth) {
-                       walk_state->thread->current_sync_level =
-                           method_desc->method.mutex->mutex.
-                           original_sync_level;
+                       method_desc->method.mutex->mutex.acquisition_depth--;
+                       if (!method_desc->method.mutex->mutex.acquisition_depth) {
+                               walk_state->thread->current_sync_level =
+                                   method_desc->method.mutex->mutex.
+                                   original_sync_level;
 
-                       acpi_os_release_mutex(method_desc->method.mutex->mutex.
-                                             os_mutex);
-                       method_desc->method.mutex->mutex.owner_thread = NULL;
+                               acpi_os_release_mutex(method_desc->method.
+                                                     mutex->mutex.os_mutex);
+                               method_desc->method.mutex->mutex.thread_id = 0;
+                       }
                }
-       }
-
-       if (walk_state) {
-               /*
-                * Delete any objects created by this method during execution.
-                * The method Node is stored in the walk state
-                */
-               method_node = walk_state->method_node;
 
                /*
                 * Delete any namespace objects created anywhere within
@@ -620,7 +609,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
                 */
                if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED)
                    && (!method_desc->method.mutex)) {
-                       status = acpi_ds_create_method_mutex(method_desc);
+                       (void)acpi_ds_create_method_mutex(method_desc);
                }
 
                /* No more threads, we can free the owner_id */
index ba4626e06a5e816867a9e1a114a3edb485de1fa9..13c43eac35dba9570b0dc2ad8bf9a0c866a2d312 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 954ac8ce958a63c55a6740a4dcc1bbab5e4071bc..1022e38994c2bffcdd8a52d676e9ecf951cc49f7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -157,7 +157,9 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
                         * will remain as named references. This behavior is not described
                         * in the ACPI spec, but it appears to be an oversight.
                         */
-                       obj_desc = (union acpi_operand_object *)op->common.node;
+                       obj_desc =
+                           ACPI_CAST_PTR(union acpi_operand_object,
+                                         op->common.node);
 
                        status =
                            acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
@@ -172,7 +174,19 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
                        switch (op->common.node->type) {
                                /*
                                 * For these types, we need the actual node, not the subobject.
-                                * However, the subobject got an extra reference count above.
+                                * However, the subobject did not get an extra reference count above.
+                                *
+                                * TBD: should ex_resolve_node_to_value be changed to fix this?
+                                */
+                       case ACPI_TYPE_DEVICE:
+                       case ACPI_TYPE_THERMAL:
+
+                               acpi_ut_add_reference(op->common.node->object);
+
+                               /*lint -fallthrough */
+                               /*
+                                * For these types, we need the actual node, not the subobject.
+                                * The subobject got an extra reference count in ex_resolve_node_to_value.
                                 */
                        case ACPI_TYPE_MUTEX:
                        case ACPI_TYPE_METHOD:
@@ -180,25 +194,15 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
                        case ACPI_TYPE_PROCESSOR:
                        case ACPI_TYPE_EVENT:
                        case ACPI_TYPE_REGION:
-                       case ACPI_TYPE_DEVICE:
-                       case ACPI_TYPE_THERMAL:
 
-                               obj_desc =
-                                   (union acpi_operand_object *)op->common.
-                                   node;
+                               /* We will create a reference object for these types below */
                                break;
 
                        default:
-                               break;
-                       }
-
-                       /*
-                        * If above resolved to an operand object, we are done. Otherwise,
-                        * we have a NS node, we must create the package entry as a named
-                        * reference.
-                        */
-                       if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
-                           ACPI_DESC_TYPE_NAMED) {
+                               /*
+                                * All other types - the node was resolved to an actual
+                                * object, we are done.
+                                */
                                goto exit;
                        }
                }
@@ -223,7 +227,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
 
       exit:
        *obj_desc_ptr = obj_desc;
-       return_ACPI_STATUS(AE_OK);
+       return_ACPI_STATUS(status);
 }
 
 /*******************************************************************************
@@ -369,7 +373,9 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
        union acpi_parse_object *parent;
        union acpi_operand_object *obj_desc = NULL;
        acpi_status status = AE_OK;
-       acpi_native_uint i;
+       unsigned i;
+       u16 index;
+       u16 reference_count;
 
        ACPI_FUNCTION_TRACE(ds_build_internal_package_obj);
 
@@ -447,13 +453,60 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
                                                               package.
                                                               elements[i]);
                }
+
+               if (*obj_desc_ptr) {
+
+                       /* Existing package, get existing reference count */
+
+                       reference_count =
+                           (*obj_desc_ptr)->common.reference_count;
+                       if (reference_count > 1) {
+
+                               /* Make new element ref count match original ref count */
+
+                               for (index = 0; index < (reference_count - 1);
+                                    index++) {
+                                       acpi_ut_add_reference((obj_desc->
+                                                              package.
+                                                              elements[i]));
+                               }
+                       }
+               }
+
                arg = arg->common.next;
        }
 
-       if (!arg) {
+       /* Check for match between num_elements and actual length of package_list */
+
+       if (arg) {
+               /*
+                * num_elements was exhausted, but there are remaining elements in the
+                * package_list.
+                *
+                * Note: technically, this is an error, from ACPI spec: "It is an error
+                * for NumElements to be less than the number of elements in the
+                * PackageList". However, for now, we just print an error message and
+                * no exception is returned.
+                */
+               while (arg) {
+
+                       /* Find out how many elements there really are */
+
+                       i++;
+                       arg = arg->common.next;
+               }
+
+               ACPI_ERROR((AE_INFO,
+                           "Package List length (%X) larger than NumElements count (%X), truncated\n",
+                           i, element_count));
+       } else if (i < element_count) {
+               /*
+                * Arg list (elements) was exhausted, but we did not reach num_elements count.
+                * Note: this is not an error, the package is padded out with NULLs.
+                */
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Package List length larger than NumElements count (%X), truncated\n",
-                                 element_count));
+                                 "Package List length (%X) smaller than NumElements count (%X), padded with null elements\n",
+                                 i, element_count));
        }
 
        obj_desc->package.flags |= AOPOBJ_DATA_VALID;
@@ -721,6 +774,8 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
                                /* Node was saved in Op */
 
                                obj_desc->reference.node = op->common.node;
+                               obj_desc->reference.object =
+                                   op->common.node->object;
                        }
 
                        obj_desc->reference.opcode = opcode;
index f501e083aac78779c6f5a0f4f0012bc505f6d6b1..a818e0ddb996c0bf124a6060adc25cf74f5f2cd4 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,7 @@
 #include <acpi/acinterp.h>
 #include <acpi/acnamesp.h>
 #include <acpi/acevents.h>
+#include <acpi/actables.h>
 
 #define _COMPONENT          ACPI_DISPATCHER
 ACPI_MODULE_NAME("dsopcode")
@@ -217,6 +218,50 @@ acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
        return_ACPI_STATUS(status);
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_get_bank_field_arguments
+ *
+ * PARAMETERS:  obj_desc        - A valid bank_field object
+ *
+ * RETURN:      Status.
+ *
+ * DESCRIPTION: Get bank_field bank_value. This implements the late
+ *              evaluation of these field attributes.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
+{
+       union acpi_operand_object *extra_desc;
+       struct acpi_namespace_node *node;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
+
+       if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
+               return_ACPI_STATUS(AE_OK);
+       }
+
+       /* Get the AML pointer (method object) and bank_field node */
+
+       extra_desc = acpi_ns_get_secondary_object(obj_desc);
+       node = obj_desc->bank_field.node;
+
+       ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
+                       (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
+       ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
+                         acpi_ut_get_node_name(node)));
+
+       /* Execute the AML code for the term_arg arguments */
+
+       status = acpi_ds_execute_arguments(node, acpi_ns_get_parent_node(node),
+                                          extra_desc->extra.aml_length,
+                                          extra_desc->extra.aml_start);
+       return_ACPI_STATUS(status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ds_get_buffer_arguments
@@ -770,7 +815,109 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
                          obj_desc,
-                         ACPI_FORMAT_UINT64(obj_desc->region.address),
+                         ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
+                         obj_desc->region.length));
+
+       /* Now the address and length are valid for this opregion */
+
+       obj_desc->region.flags |= AOPOBJ_DATA_VALID;
+
+       return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_eval_table_region_operands
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *              Op              - A valid region Op object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get region address and length
+ *              Called from acpi_ds_exec_end_op during data_table_region parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
+                                  union acpi_parse_object *op)
+{
+       acpi_status status;
+       union acpi_operand_object *obj_desc;
+       union acpi_operand_object **operand;
+       struct acpi_namespace_node *node;
+       union acpi_parse_object *next_op;
+       acpi_native_uint table_index;
+       struct acpi_table_header *table;
+
+       ACPI_FUNCTION_TRACE_PTR(ds_eval_table_region_operands, op);
+
+       /*
+        * This is where we evaluate the signature_string and oem_iDString
+        * and oem_table_iDString of the data_table_region declaration
+        */
+       node = op->common.node;
+
+       /* next_op points to signature_string op */
+
+       next_op = op->common.value.arg;
+
+       /*
+        * Evaluate/create the signature_string and oem_iDString
+        * and oem_table_iDString operands
+        */
+       status = acpi_ds_create_operands(walk_state, next_op);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /*
+        * Resolve the signature_string and oem_iDString
+        * and oem_table_iDString operands
+        */
+       status = acpi_ex_resolve_operands(op->common.aml_opcode,
+                                         ACPI_WALK_OPERANDS, walk_state);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
+                          acpi_ps_get_opcode_name(op->common.aml_opcode),
+                          1, "after AcpiExResolveOperands");
+
+       operand = &walk_state->operands[0];
+
+       /* Find the ACPI table */
+
+       status = acpi_tb_find_table(operand[0]->string.pointer,
+                                   operand[1]->string.pointer,
+                                   operand[2]->string.pointer, &table_index);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       acpi_ut_remove_reference(operand[0]);
+       acpi_ut_remove_reference(operand[1]);
+       acpi_ut_remove_reference(operand[2]);
+
+       status = acpi_get_table_by_index(table_index, &table);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       obj_desc = acpi_ns_get_attached_object(node);
+       if (!obj_desc) {
+               return_ACPI_STATUS(AE_NOT_EXIST);
+       }
+
+       obj_desc->region.address =
+           (acpi_physical_address) ACPI_TO_INTEGER(table);
+       obj_desc->region.length = table->length;
+
+       ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "RgnObj %p Addr %8.8X%8.8X Len %X\n",
+                         obj_desc,
+                         ACPI_FORMAT_NATIVE_UINT(obj_desc->region.address),
                          obj_desc->region.length));
 
        /* Now the address and length are valid for this opregion */
@@ -808,6 +955,12 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
 
        /* The first operand (for all of these data objects) is the length */
 
+       /*
+        * Set proper index into operand stack for acpi_ds_obj_stack_push
+        * invoked inside acpi_ds_create_operand.
+        */
+       walk_state->operand_index = walk_state->num_operands;
+
        status = acpi_ds_create_operand(walk_state, op->common.value.arg, 1);
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
@@ -876,6 +1029,106 @@ acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
        return_ACPI_STATUS(status);
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ds_eval_bank_field_operands
+ *
+ * PARAMETERS:  walk_state      - Current walk
+ *              Op              - A valid bank_field Op object
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get bank_field bank_value
+ *              Called from acpi_ds_exec_end_op during bank_field parse tree walk
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
+                                union acpi_parse_object *op)
+{
+       acpi_status status;
+       union acpi_operand_object *obj_desc;
+       union acpi_operand_object *operand_desc;
+       struct acpi_namespace_node *node;
+       union acpi_parse_object *next_op;
+       union acpi_parse_object *arg;
+
+       ACPI_FUNCTION_TRACE_PTR(ds_eval_bank_field_operands, op);
+
+       /*
+        * This is where we evaluate the bank_value field of the
+        * bank_field declaration
+        */
+
+       /* next_op points to the op that holds the Region */
+
+       next_op = op->common.value.arg;
+
+       /* next_op points to the op that holds the Bank Register */
+
+       next_op = next_op->common.next;
+
+       /* next_op points to the op that holds the Bank Value */
+
+       next_op = next_op->common.next;
+
+       /*
+        * Set proper index into operand stack for acpi_ds_obj_stack_push
+        * invoked inside acpi_ds_create_operand.
+        *
+        * We use walk_state->Operands[0] to store the evaluated bank_value
+        */
+       walk_state->operand_index = 0;
+
+       status = acpi_ds_create_operand(walk_state, next_op, 0);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       status = acpi_ex_resolve_to_value(&walk_state->operands[0], walk_state);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       ACPI_DUMP_OPERANDS(ACPI_WALK_OPERANDS, ACPI_IMODE_EXECUTE,
+                          acpi_ps_get_opcode_name(op->common.aml_opcode),
+                          1, "after AcpiExResolveOperands");
+
+       /*
+        * Get the bank_value operand and save it
+        * (at Top of stack)
+        */
+       operand_desc = walk_state->operands[0];
+
+       /* Arg points to the start Bank Field */
+
+       arg = acpi_ps_get_arg(op, 4);
+       while (arg) {
+
+               /* Ignore OFFSET and ACCESSAS terms here */
+
+               if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
+                       node = arg->common.node;
+
+                       obj_desc = acpi_ns_get_attached_object(node);
+                       if (!obj_desc) {
+                               return_ACPI_STATUS(AE_NOT_EXIST);
+                       }
+
+                       obj_desc->bank_field.value =
+                           (u32) operand_desc->integer.value;
+               }
+
+               /* Move to next field in the list */
+
+               arg = arg->common.next;
+       }
+
+       acpi_ut_remove_reference(operand_desc);
+       return_ACPI_STATUS(status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ds_exec_begin_control_op
@@ -1070,8 +1323,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
                         * is set to anything other than zero!
                         */
                        walk_state->return_desc = walk_state->operands[0];
-               } else if ((walk_state->results) &&
-                          (walk_state->results->results.num_results > 0)) {
+               } else if (walk_state->result_count) {
 
                        /* Since we have a real Return(), delete any implicit return */
 
index 71503c036f7c1496c2cd873a55a5e4bba676d6f2..b398982f0d8baef2e534efc85108a5b338fa0f1a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -278,7 +278,9 @@ acpi_ds_is_result_used(union acpi_parse_object * op,
                        AML_VAR_PACKAGE_OP)
                    || (op->common.parent->common.aml_opcode == AML_BUFFER_OP)
                    || (op->common.parent->common.aml_opcode ==
-                       AML_INT_EVAL_SUBTREE_OP)) {
+                       AML_INT_EVAL_SUBTREE_OP)
+                   || (op->common.parent->common.aml_opcode ==
+                       AML_BANK_FIELD_OP)) {
                        /*
                         * These opcodes allow term_arg(s) as operands and therefore
                         * the operands can be method calls.  The result is used.
@@ -472,7 +474,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
        /* A valid name must be looked up in the namespace */
 
        if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
-           (arg->common.value.string)) {
+           (arg->common.value.string) &&
+           !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
                ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n",
                                  arg));
 
@@ -595,7 +598,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
        } else {
                /* Check for null name case */
 
-               if (arg->common.aml_opcode == AML_INT_NAMEPATH_OP) {
+               if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) &&
+                   !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
                        /*
                         * If the name is null, this means that this is an
                         * optional result parameter that was not specified
@@ -617,7 +621,8 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
                        return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
                }
 
-               if (op_info->flags & AML_HAS_RETVAL) {
+               if ((op_info->flags & AML_HAS_RETVAL)
+                   || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) {
                        ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
                                          "Argument previously created, already stacked\n"));
 
@@ -630,9 +635,7 @@ acpi_ds_create_operand(struct acpi_walk_state *walk_state,
                         * Use value that was already previously returned
                         * by the evaluation of this argument
                         */
-                       status =
-                           acpi_ds_result_pop_from_bottom(&obj_desc,
-                                                          walk_state);
+                       status = acpi_ds_result_pop(&obj_desc, walk_state);
                        if (ACPI_FAILURE(status)) {
                                /*
                                 * Only error is underflow, and this indicates
@@ -698,27 +701,52 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
 {
        acpi_status status = AE_OK;
        union acpi_parse_object *arg;
+       union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS];
        u32 arg_count = 0;
+       u32 index = walk_state->num_operands;
+       u32 i;
 
        ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg);
 
-       /* For all arguments in the list... */
+       /* Get all arguments in the list */
 
        arg = first_arg;
        while (arg) {
-               status = acpi_ds_create_operand(walk_state, arg, arg_count);
-               if (ACPI_FAILURE(status)) {
-                       goto cleanup;
+               if (index >= ACPI_OBJ_NUM_OPERANDS) {
+                       return_ACPI_STATUS(AE_BAD_DATA);
                }
 
-               ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
-                                 "Arg #%d (%p) done, Arg1=%p\n", arg_count,
-                                 arg, first_arg));
+               arguments[index] = arg;
+               walk_state->operands[index] = NULL;
 
                /* Move on to next argument, if any */
 
                arg = arg->common.next;
                arg_count++;
+               index++;
+       }
+
+       index--;
+
+       /* It is the appropriate order to get objects from the Result stack */
+
+       for (i = 0; i < arg_count; i++) {
+               arg = arguments[index];
+
+               /* Force the filling of the operand stack in inverse order */
+
+               walk_state->operand_index = (u8) index;
+
+               status = acpi_ds_create_operand(walk_state, arg, index);
+               if (ACPI_FAILURE(status)) {
+                       goto cleanup;
+               }
+
+               index--;
+
+               ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+                                 "Arg #%d (%p) done, Arg1=%p\n", index, arg,
+                                 first_arg));
        }
 
        return_ACPI_STATUS(status);
@@ -729,9 +757,112 @@ acpi_ds_create_operands(struct acpi_walk_state *walk_state,
         * pop everything off of the operand stack and delete those
         * objects
         */
-       (void)acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+       acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state);
+
+       ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d", index));
+       return_ACPI_STATUS(status);
+}
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_ds_evaluate_name_path
+ *
+ * PARAMETERS:  walk_state      - Current state of the parse tree walk,
+ *                                the opcode of current operation should be
+ *                                AML_INT_NAMEPATH_OP
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Translate the -name_path- parse tree object to the equivalent
+ *              interpreter object, convert it to value, if needed, duplicate
+ *              it, if needed, and push it onto the current result stack.
+ *
+ ****************************************************************************/
+
+acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state)
+{
+       acpi_status status = AE_OK;
+       union acpi_parse_object *op = walk_state->op;
+       union acpi_operand_object **operand = &walk_state->operands[0];
+       union acpi_operand_object *new_obj_desc;
+       u8 type;
+
+       ACPI_FUNCTION_TRACE_PTR(ds_evaluate_name_path, walk_state);
+
+       if (!op->common.parent) {
+
+               /* This happens after certain exception processing */
+
+               goto exit;
+       }
+
+       if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
+           (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) ||
+           (op->common.parent->common.aml_opcode == AML_REF_OF_OP)) {
+
+               /* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */
+
+               goto exit;
+       }
+
+       status = acpi_ds_create_operand(walk_state, op, 0);
+       if (ACPI_FAILURE(status)) {
+               goto exit;
+       }
+
+       if (op->common.flags & ACPI_PARSEOP_TARGET) {
+               new_obj_desc = *operand;
+               goto push_result;
+       }
+
+       type = ACPI_GET_OBJECT_TYPE(*operand);
+
+       status = acpi_ex_resolve_to_value(operand, walk_state);
+       if (ACPI_FAILURE(status)) {
+               goto exit;
+       }
+
+       if (type == ACPI_TYPE_INTEGER) {
+
+               /* It was incremented by acpi_ex_resolve_to_value */
+
+               acpi_ut_remove_reference(*operand);
+
+               status =
+                   acpi_ut_copy_iobject_to_iobject(*operand, &new_obj_desc,
+                                                   walk_state);
+               if (ACPI_FAILURE(status)) {
+                       goto exit;
+               }
+       } else {
+               /*
+                * The object either was anew created or is
+                * a Namespace node - don't decrement it.
+                */
+               new_obj_desc = *operand;
+       }
+
+       /* Cleanup for name-path operand */
+
+       status = acpi_ds_obj_stack_pop(1, walk_state);
+       if (ACPI_FAILURE(status)) {
+               walk_state->result_obj = new_obj_desc;
+               goto exit;
+       }
+
+      push_result:
+
+       walk_state->result_obj = new_obj_desc;
+
+       status = acpi_ds_result_push(walk_state->result_obj, walk_state);
+       if (ACPI_SUCCESS(status)) {
+
+               /* Force to take it from stack */
+
+               op->common.flags |= ACPI_PARSEOP_IN_STACK;
+       }
+
+      exit:
 
-       ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %d",
-                       (arg_count + 1)));
        return_ACPI_STATUS(status);
 }
index 69693fa07224b8514e5c6c05b3bb7e81a12ce7ed..b246b9657eada3b0c2f7e2fd55eb156ce78ed7a9 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -285,11 +285,6 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
        switch (opcode_class) {
        case AML_CLASS_CONTROL:
 
-               status = acpi_ds_result_stack_push(walk_state);
-               if (ACPI_FAILURE(status)) {
-                       goto error_exit;
-               }
-
                status = acpi_ds_exec_begin_control_op(walk_state, op);
                break;
 
@@ -305,20 +300,11 @@ acpi_ds_exec_begin_op(struct acpi_walk_state *walk_state,
                        status = acpi_ds_load2_begin_op(walk_state, NULL);
                }
 
-               if (op->common.aml_opcode == AML_REGION_OP) {
-                       status = acpi_ds_result_stack_push(walk_state);
-               }
                break;
 
        case AML_CLASS_EXECUTE:
        case AML_CLASS_CREATE:
-               /*
-                * Most operators with arguments (except create_xxx_field operators)
-                * Start a new result/operand state
-                */
-               if (walk_state->op_info->object_type != ACPI_TYPE_BUFFER_FIELD) {
-                       status = acpi_ds_result_stack_push(walk_state);
-               }
+
                break;
 
        default:
@@ -374,6 +360,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
        /* Init the walk state */
 
        walk_state->num_operands = 0;
+       walk_state->operand_index = 0;
        walk_state->return_desc = NULL;
        walk_state->result_obj = NULL;
 
@@ -388,10 +375,17 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
        /* Decode the Opcode Class */
 
        switch (op_class) {
-       case AML_CLASS_ARGUMENT:        /* constants, literals, etc. - do nothing */
+       case AML_CLASS_ARGUMENT:        /* Constants, literals, etc. */
+
+               if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
+                       status = acpi_ds_evaluate_name_path(walk_state);
+                       if (ACPI_FAILURE(status)) {
+                               goto cleanup;
+                       }
+               }
                break;
 
-       case AML_CLASS_EXECUTE: /* most operators with arguments */
+       case AML_CLASS_EXECUTE: /* Most operators with arguments */
 
                /* Build resolved operand stack */
 
@@ -400,13 +394,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
                        goto cleanup;
                }
 
-               /* Done with this result state (Now that operand stack is built) */
-
-               status = acpi_ds_result_stack_pop(walk_state);
-               if (ACPI_FAILURE(status)) {
-                       goto cleanup;
-               }
-
                /*
                 * All opcodes require operand resolution, with the only exceptions
                 * being the object_type and size_of operators.
@@ -487,16 +474,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
 
                        status = acpi_ds_exec_end_control_op(walk_state, op);
 
-                       /* Make sure to properly pop the result stack */
-
-                       if (ACPI_SUCCESS(status)) {
-                               status = acpi_ds_result_stack_pop(walk_state);
-                       } else if (status == AE_CTRL_PENDING) {
-                               status = acpi_ds_result_stack_pop(walk_state);
-                               if (ACPI_SUCCESS(status)) {
-                                       status = AE_CTRL_PENDING;
-                               }
-                       }
                        break;
 
                case AML_TYPE_METHOD_CALL:
@@ -516,7 +493,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
 
                                op->common.node =
                                    (struct acpi_namespace_node *)op->asl.value.
-                                   arg->asl.node->object;
+                                   arg->asl.node;
                                acpi_ut_add_reference(op->asl.value.arg->asl.
                                                      node->object);
                                return_ACPI_STATUS(AE_OK);
@@ -632,13 +609,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
                                break;
                        }
 
-                       /* Done with result state (Now that operand stack is built) */
-
-                       status = acpi_ds_result_stack_pop(walk_state);
-                       if (ACPI_FAILURE(status)) {
-                               goto cleanup;
-                       }
-
                        /*
                         * If a result object was returned from above, push it on the
                         * current result stack
@@ -671,8 +641,28 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
                                if (ACPI_FAILURE(status)) {
                                        break;
                                }
+                       } else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
+                               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                                                 "Executing DataTableRegion Strings Op=%p\n",
+                                                 op));
+
+                               status =
+                                   acpi_ds_eval_table_region_operands
+                                   (walk_state, op);
+                               if (ACPI_FAILURE(status)) {
+                                       break;
+                               }
+                       } else if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
+                               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                                                 "Executing BankField Op=%p\n",
+                                                 op));
 
-                               status = acpi_ds_result_stack_pop(walk_state);
+                               status =
+                                   acpi_ds_eval_bank_field_operands(walk_state,
+                                                                    op);
+                               if (ACPI_FAILURE(status)) {
+                                       break;
+                               }
                        }
                        break;
 
index 8ab9d1b29a4ce4062f1163ed746a16a258c4cd4f..dff7a3e445a88e7dbd694f858536a7fd5dba3612 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -443,6 +443,15 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
                        if (ACPI_FAILURE(status)) {
                                return_ACPI_STATUS(status);
                        }
+               } else if (op->common.aml_opcode == AML_DATA_REGION_OP) {
+                       status =
+                           acpi_ex_create_region(op->named.data,
+                                                 op->named.length,
+                                                 REGION_DATA_TABLE,
+                                                 walk_state);
+                       if (ACPI_FAILURE(status)) {
+                               return_ACPI_STATUS(status);
+                       }
                }
        }
 #endif
@@ -767,6 +776,12 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
                    acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
                                   object_type, ACPI_IMODE_LOAD_PASS2, flags,
                                   walk_state, &node);
+
+               if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
+                       ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
+                                         "***New Node [%4.4s] %p is temporary\n",
+                                         acpi_ut_get_node_name(node), node));
+               }
                break;
        }
 
@@ -823,6 +838,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
        struct acpi_namespace_node *new_node;
 #ifndef ACPI_NO_METHOD_EXECUTION
        u32 i;
+       u8 region_space;
 #endif
 
        ACPI_FUNCTION_TRACE(ds_load2_end_op);
@@ -1003,11 +1019,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
                        status = acpi_ex_create_event(walk_state);
                        break;
 
-               case AML_DATA_REGION_OP:
-
-                       status = acpi_ex_create_table_region(walk_state);
-                       break;
-
                case AML_ALIAS_OP:
 
                        status = acpi_ex_create_alias(walk_state);
@@ -1035,6 +1046,15 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
                switch (op->common.aml_opcode) {
 #ifndef ACPI_NO_METHOD_EXECUTION
                case AML_REGION_OP:
+               case AML_DATA_REGION_OP:
+
+                       if (op->common.aml_opcode == AML_REGION_OP) {
+                               region_space = (acpi_adr_space_type)
+                                   ((op->common.value.arg)->common.value.
+                                    integer);
+                       } else {
+                               region_space = REGION_DATA_TABLE;
+                       }
 
                        /*
                         * If we are executing a method, initialize the region
@@ -1043,10 +1063,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
                                status =
                                    acpi_ex_create_region(op->named.data,
                                                          op->named.length,
-                                                         (acpi_adr_space_type)
-                                                         ((op->common.value.
-                                                           arg)->common.value.
-                                                          integer),
+                                                         region_space,
                                                          walk_state);
                                if (ACPI_FAILURE(status)) {
                                        return (status);
index 3927c495e4bfef7a12374f80e3aa0f672f2477ad..9e60732658734c4c1741f32c3eb1ff236da20e61 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5afcdd9c74492aedd4eda84deeb3a9f3882e36c7..1386ced332ecedcacdd0a7c3d61fdcf89f409b20 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define _COMPONENT          ACPI_DISPATCHER
 ACPI_MODULE_NAME("dswstate")
 
-/* Local prototypes */
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-acpi_status
-acpi_ds_result_insert(void *object,
-                     u32 index, struct acpi_walk_state *walk_state);
-
-acpi_status acpi_ds_obj_stack_delete_all(struct acpi_walk_state *walk_state);
-
-acpi_status
-acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
-                            struct acpi_walk_state *walk_state);
-
-void *acpi_ds_obj_stack_get_value(u32 index,
-                                 struct acpi_walk_state *walk_state);
-#endif
-
-#ifdef ACPI_FUTURE_USAGE
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_result_remove
- *
- * PARAMETERS:  Object              - Where to return the popped object
- *              Index               - Where to extract the object
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop an object off the bottom of this walk's result stack.  In
- *              other words, this is a FIFO.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_result_remove(union acpi_operand_object **object,
-                     u32 index, struct acpi_walk_state *walk_state)
-{
-       union acpi_generic_state *state;
-
-       ACPI_FUNCTION_NAME(ds_result_remove);
-
-       state = walk_state->results;
-       if (!state) {
-               ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
-                           walk_state));
-               return (AE_NOT_EXIST);
-       }
-
-       if (index >= ACPI_OBJ_MAX_OPERAND) {
-               ACPI_ERROR((AE_INFO,
-                           "Index out of range: %X State=%p Num=%X",
-                           index, walk_state, state->results.num_results));
-       }
-
-       /* Check for a valid result object */
-
-       if (!state->results.obj_desc[index]) {
-               ACPI_ERROR((AE_INFO,
-                           "Null operand! State=%p #Ops=%X, Index=%X",
-                           walk_state, state->results.num_results, index));
-               return (AE_AML_NO_RETURN_VALUE);
-       }
-
-       /* Remove the object */
-
-       state->results.num_results--;
-
-       *object = state->results.obj_desc[index];
-       state->results.obj_desc[index] = NULL;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                         "Obj=%p [%s] Index=%X State=%p Num=%X\n",
-                         *object,
-                         (*object) ? acpi_ut_get_object_type_name(*object) :
-                         "NULL", index, walk_state,
-                         state->results.num_results));
-
-       return (AE_OK);
-}
-#endif                         /*  ACPI_FUTURE_USAGE  */
+  /* Local prototypes */
+static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *ws);
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *ws);
 
 /*******************************************************************************
  *
@@ -138,122 +62,67 @@ acpi_ds_result_remove(union acpi_operand_object **object,
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Pop an object off the bottom of this walk's result stack.  In
- *              other words, this is a FIFO.
+ * DESCRIPTION: Pop an object off the top of this walk's result stack
  *
  ******************************************************************************/
 
 acpi_status
-acpi_ds_result_pop(union acpi_operand_object ** object,
-                  struct acpi_walk_state * walk_state)
+acpi_ds_result_pop(union acpi_operand_object **object,
+                  struct acpi_walk_state *walk_state)
 {
        acpi_native_uint index;
        union acpi_generic_state *state;
+       acpi_status status;
 
        ACPI_FUNCTION_NAME(ds_result_pop);
 
        state = walk_state->results;
-       if (!state) {
-               return (AE_OK);
-       }
-
-       if (!state->results.num_results) {
-               ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
-                           walk_state));
-               return (AE_AML_NO_RETURN_VALUE);
-       }
 
-       /* Remove top element */
+       /* Incorrect state of result stack */
 
-       state->results.num_results--;
-
-       for (index = ACPI_OBJ_NUM_OPERANDS; index; index--) {
-
-               /* Check for a valid result object */
-
-               if (state->results.obj_desc[index - 1]) {
-                       *object = state->results.obj_desc[index - 1];
-                       state->results.obj_desc[index - 1] = NULL;
-
-                       ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                                         "Obj=%p [%s] Index=%X State=%p Num=%X\n",
-                                         *object,
-                                         (*object) ?
-                                         acpi_ut_get_object_type_name(*object)
-                                         : "NULL", (u32) index - 1, walk_state,
-                                         state->results.num_results));
-
-                       return (AE_OK);
-               }
+       if (state && !walk_state->result_count) {
+               ACPI_ERROR((AE_INFO, "No results on result stack"));
+               return (AE_AML_INTERNAL);
        }
 
-       ACPI_ERROR((AE_INFO, "No result objects! State=%p", walk_state));
-       return (AE_AML_NO_RETURN_VALUE);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_result_pop_from_bottom
- *
- * PARAMETERS:  Object              - Where to return the popped object
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop an object off the bottom of this walk's result stack.  In
- *              other words, this is a FIFO.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_result_pop_from_bottom(union acpi_operand_object ** object,
-                              struct acpi_walk_state * walk_state)
-{
-       acpi_native_uint index;
-       union acpi_generic_state *state;
+       if (!state && walk_state->result_count) {
+               ACPI_ERROR((AE_INFO, "No result state for result stack"));
+               return (AE_AML_INTERNAL);
+       }
 
-       ACPI_FUNCTION_NAME(ds_result_pop_from_bottom);
+       /* Empty result stack */
 
-       state = walk_state->results;
        if (!state) {
-               ACPI_ERROR((AE_INFO,
-                           "No result object pushed! State=%p", walk_state));
-               return (AE_NOT_EXIST);
-       }
-
-       if (!state->results.num_results) {
-               ACPI_ERROR((AE_INFO, "No result objects! State=%p",
+               ACPI_ERROR((AE_INFO, "Result stack is empty! State=%p",
                            walk_state));
                return (AE_AML_NO_RETURN_VALUE);
        }
 
-       /* Remove Bottom element */
-
-       *object = state->results.obj_desc[0];
-
-       /* Push entire stack down one element */
-
-       for (index = 0; index < state->results.num_results; index++) {
-               state->results.obj_desc[index] =
-                   state->results.obj_desc[index + 1];
-       }
+       /* Return object of the top element and clean that top element result stack */
 
-       state->results.num_results--;
-
-       /* Check for a valid result object */
+       walk_state->result_count--;
+       index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
 
+       *object = state->results.obj_desc[index];
        if (!*object) {
                ACPI_ERROR((AE_INFO,
-                           "Null operand! State=%p #Ops=%X Index=%X",
-                           walk_state, state->results.num_results,
-                           (u32) index));
+                           "No result objects on result stack, State=%p",
+                           walk_state));
                return (AE_AML_NO_RETURN_VALUE);
        }
 
-       ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] Results=%p State=%p\n",
-                         *object,
-                         (*object) ? acpi_ut_get_object_type_name(*object) :
-                         "NULL", state, walk_state));
+       state->results.obj_desc[index] = NULL;
+       if (index == 0) {
+               status = acpi_ds_result_stack_pop(walk_state);
+               if (ACPI_FAILURE(status)) {
+                       return (status);
+               }
+       }
+
+       ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                         "Obj=%p [%s] Index=%X State=%p Num=%X\n", *object,
+                         acpi_ut_get_object_type_name(*object),
+                         (u32) index, walk_state, walk_state->result_count));
 
        return (AE_OK);
 }
@@ -276,39 +145,56 @@ acpi_ds_result_push(union acpi_operand_object * object,
                    struct acpi_walk_state * walk_state)
 {
        union acpi_generic_state *state;
+       acpi_status status;
+       acpi_native_uint index;
 
        ACPI_FUNCTION_NAME(ds_result_push);
 
+       if (walk_state->result_count > walk_state->result_size) {
+               ACPI_ERROR((AE_INFO, "Result stack is full"));
+               return (AE_AML_INTERNAL);
+       } else if (walk_state->result_count == walk_state->result_size) {
+
+               /* Extend the result stack */
+
+               status = acpi_ds_result_stack_push(walk_state);
+               if (ACPI_FAILURE(status)) {
+                       ACPI_ERROR((AE_INFO,
+                                   "Failed to extend the result stack"));
+                       return (status);
+               }
+       }
+
+       if (!(walk_state->result_count < walk_state->result_size)) {
+               ACPI_ERROR((AE_INFO, "No free elements in result stack"));
+               return (AE_AML_INTERNAL);
+       }
+
        state = walk_state->results;
        if (!state) {
                ACPI_ERROR((AE_INFO, "No result stack frame during push"));
                return (AE_AML_INTERNAL);
        }
 
-       if (state->results.num_results == ACPI_OBJ_NUM_OPERANDS) {
-               ACPI_ERROR((AE_INFO,
-                           "Result stack overflow: Obj=%p State=%p Num=%X",
-                           object, walk_state, state->results.num_results));
-               return (AE_STACK_OVERFLOW);
-       }
-
        if (!object) {
                ACPI_ERROR((AE_INFO,
                            "Null Object! Obj=%p State=%p Num=%X",
-                           object, walk_state, state->results.num_results));
+                           object, walk_state, walk_state->result_count));
                return (AE_BAD_PARAMETER);
        }
 
-       state->results.obj_desc[state->results.num_results] = object;
-       state->results.num_results++;
+       /* Assign the address of object to the top free element of result stack */
+
+       index = walk_state->result_count % ACPI_RESULTS_FRAME_OBJ_NUM;
+       state->results.obj_desc[index] = object;
+       walk_state->result_count++;
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p Num=%X Cur=%X\n",
                          object,
-                         object ?
                          acpi_ut_get_object_type_name((union
                                                        acpi_operand_object *)
-                                                      object) : "NULL",
-                         walk_state, state->results.num_results,
+                                                      object), walk_state,
+                         walk_state->result_count,
                          walk_state->current_result));
 
        return (AE_OK);
@@ -322,16 +208,25 @@ acpi_ds_result_push(union acpi_operand_object * object,
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Push an object onto the walk_state result stack.
+ * DESCRIPTION: Push an object onto the walk_state result stack
  *
  ******************************************************************************/
 
-acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
+static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
 {
        union acpi_generic_state *state;
 
        ACPI_FUNCTION_NAME(ds_result_stack_push);
 
+       /* Check for stack overflow */
+
+       if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
+           ACPI_RESULTS_OBJ_NUM_MAX) {
+               ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X",
+                           walk_state, walk_state->result_size));
+               return (AE_STACK_OVERFLOW);
+       }
+
        state = acpi_ut_create_generic_state();
        if (!state) {
                return (AE_NO_MEMORY);
@@ -340,6 +235,10 @@ acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
        state->common.descriptor_type = ACPI_DESC_TYPE_STATE_RESULT;
        acpi_ut_push_generic_state(&walk_state->results, state);
 
+       /* Increase the length of the result stack by the length of frame */
+
+       walk_state->result_size += ACPI_RESULTS_FRAME_OBJ_NUM;
+
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Results=%p State=%p\n",
                          state, walk_state));
 
@@ -354,11 +253,11 @@ acpi_status acpi_ds_result_stack_push(struct acpi_walk_state * walk_state)
  *
  * RETURN:      Status
  *
- * DESCRIPTION: Pop an object off of the walk_state result stack.
+ * DESCRIPTION: Pop an object off of the walk_state result stack
  *
  ******************************************************************************/
 
-acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
+static acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state)
 {
        union acpi_generic_state *state;
 
@@ -367,18 +266,27 @@ acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state * walk_state)
        /* Check for stack underflow */
 
        if (walk_state->results == NULL) {
-               ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Underflow - State=%p\n",
+               ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
+                                 "Result stack underflow - State=%p\n",
                                  walk_state));
                return (AE_AML_NO_OPERAND);
        }
 
+       if (walk_state->result_size < ACPI_RESULTS_FRAME_OBJ_NUM) {
+               ACPI_ERROR((AE_INFO, "Insufficient result stack size"));
+               return (AE_AML_INTERNAL);
+       }
+
        state = acpi_ut_pop_generic_state(&walk_state->results);
+       acpi_ut_delete_generic_state(state);
+
+       /* Decrease the length of result stack by the length of frame */
+
+       walk_state->result_size -= ACPI_RESULTS_FRAME_OBJ_NUM;
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
                          "Result=%p RemainingResults=%X State=%p\n",
-                         state, state->results.num_results, walk_state));
-
-       acpi_ut_delete_generic_state(state);
+                         state, walk_state->result_count, walk_state));
 
        return (AE_OK);
 }
@@ -412,9 +320,13 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
 
        /* Put the object onto the stack */
 
-       walk_state->operands[walk_state->num_operands] = object;
+       walk_state->operands[walk_state->operand_index] = object;
        walk_state->num_operands++;
 
+       /* For the usual order of filling the operand stack */
+
+       walk_state->operand_index++;
+
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n",
                          object,
                          acpi_ut_get_object_type_name((union
@@ -484,43 +396,36 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
  *
  ******************************************************************************/
 
-acpi_status
+void
 acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
-                                struct acpi_walk_state * walk_state)
+                                struct acpi_walk_state *walk_state)
 {
-       u32 i;
+       acpi_native_int i;
        union acpi_operand_object *obj_desc;
 
        ACPI_FUNCTION_NAME(ds_obj_stack_pop_and_delete);
 
-       for (i = 0; i < pop_count; i++) {
-
-               /* Check for stack underflow */
+       if (pop_count == 0) {
+               return;
+       }
 
+       for (i = (acpi_native_int) (pop_count - 1); i >= 0; i--) {
                if (walk_state->num_operands == 0) {
-                       ACPI_ERROR((AE_INFO,
-                                   "Object stack underflow! Count=%X State=%p #Ops=%X",
-                                   pop_count, walk_state,
-                                   walk_state->num_operands));
-                       return (AE_STACK_UNDERFLOW);
+                       return;
                }
 
                /* Pop the stack and delete an object if present in this stack entry */
 
                walk_state->num_operands--;
-               obj_desc = walk_state->operands[walk_state->num_operands];
+               obj_desc = walk_state->operands[i];
                if (obj_desc) {
-                       acpi_ut_remove_reference(walk_state->
-                                                operands[walk_state->
-                                                         num_operands]);
-                       walk_state->operands[walk_state->num_operands] = NULL;
+                       acpi_ut_remove_reference(walk_state->operands[i]);
+                       walk_state->operands[i] = NULL;
                }
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n",
                          pop_count, walk_state, walk_state->num_operands));
-
-       return (AE_OK);
 }
 
 /*******************************************************************************
@@ -560,7 +465,7 @@ struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
  *
  * RETURN:      None
  *
- * DESCRIPTION: Place the Thread state at the head of the state list.
+ * DESCRIPTION: Place the Thread state at the head of the state list
  *
  ******************************************************************************/
 
@@ -636,7 +541,6 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union
                                                  *thread)
 {
        struct acpi_walk_state *walk_state;
-       acpi_status status;
 
        ACPI_FUNCTION_TRACE(ds_create_walk_state);
 
@@ -659,14 +563,6 @@ struct acpi_walk_state *acpi_ds_create_walk_state(acpi_owner_id owner_id, union
        acpi_ds_method_data_init(walk_state);
 #endif
 
-       /* Create an initial result stack entry */
-
-       status = acpi_ds_result_stack_push(walk_state);
-       if (ACPI_FAILURE(status)) {
-               ACPI_FREE(walk_state);
-               return_PTR(NULL);
-       }
-
        /* Put the new state at the head of the walk list */
 
        if (thread) {
@@ -860,190 +756,3 @@ void acpi_ds_delete_walk_state(struct acpi_walk_state *walk_state)
        ACPI_FREE(walk_state);
        return_VOID;
 }
-
-#ifdef ACPI_OBSOLETE_FUNCTIONS
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_result_insert
- *
- * PARAMETERS:  Object              - Object to push
- *              Index               - Where to insert the object
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Insert an object onto this walk's result stack
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_result_insert(void *object,
-                     u32 index, struct acpi_walk_state *walk_state)
-{
-       union acpi_generic_state *state;
-
-       ACPI_FUNCTION_NAME(ds_result_insert);
-
-       state = walk_state->results;
-       if (!state) {
-               ACPI_ERROR((AE_INFO, "No result object pushed! State=%p",
-                           walk_state));
-               return (AE_NOT_EXIST);
-       }
-
-       if (index >= ACPI_OBJ_NUM_OPERANDS) {
-               ACPI_ERROR((AE_INFO,
-                           "Index out of range: %X Obj=%p State=%p Num=%X",
-                           index, object, walk_state,
-                           state->results.num_results));
-               return (AE_BAD_PARAMETER);
-       }
-
-       if (!object) {
-               ACPI_ERROR((AE_INFO,
-                           "Null Object! Index=%X Obj=%p State=%p Num=%X",
-                           index, object, walk_state,
-                           state->results.num_results));
-               return (AE_BAD_PARAMETER);
-       }
-
-       state->results.obj_desc[index] = object;
-       state->results.num_results++;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
-                         "Obj=%p [%s] State=%p Num=%X Cur=%X\n",
-                         object,
-                         object ?
-                         acpi_ut_get_object_type_name((union
-                                                       acpi_operand_object *)
-                                                      object) : "NULL",
-                         walk_state, state->results.num_results,
-                         walk_state->current_result));
-
-       return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_obj_stack_delete_all
- *
- * PARAMETERS:  walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Clear the object stack by deleting all objects that are on it.
- *              Should be used with great care, if at all!
- *
- ******************************************************************************/
-
-acpi_status acpi_ds_obj_stack_delete_all(struct acpi_walk_state * walk_state)
-{
-       u32 i;
-
-       ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_delete_all, walk_state);
-
-       /* The stack size is configurable, but fixed */
-
-       for (i = 0; i < ACPI_OBJ_NUM_OPERANDS; i++) {
-               if (walk_state->operands[i]) {
-                       acpi_ut_remove_reference(walk_state->operands[i]);
-                       walk_state->operands[i] = NULL;
-               }
-       }
-
-       return_ACPI_STATUS(AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_obj_stack_pop_object
- *
- * PARAMETERS:  Object              - Where to return the popped object
- *              walk_state          - Current Walk state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Pop this walk's object stack.  Objects on the stack are NOT
- *              deleted by this routine.
- *
- ******************************************************************************/
-
-acpi_status
-acpi_ds_obj_stack_pop_object(union acpi_operand_object **object,
-                            struct acpi_walk_state *walk_state)
-{
-       ACPI_FUNCTION_NAME(ds_obj_stack_pop_object);
-
-       /* Check for stack underflow */
-
-       if (walk_state->num_operands == 0) {
-               ACPI_ERROR((AE_INFO,
-                           "Missing operand/stack empty! State=%p #Ops=%X",
-                           walk_state, walk_state->num_operands));
-               *object = NULL;
-               return (AE_AML_NO_OPERAND);
-       }
-
-       /* Pop the stack */
-
-       walk_state->num_operands--;
-
-       /* Check for a valid operand */
-
-       if (!walk_state->operands[walk_state->num_operands]) {
-               ACPI_ERROR((AE_INFO,
-                           "Null operand! State=%p #Ops=%X",
-                           walk_state, walk_state->num_operands));
-               *object = NULL;
-               return (AE_AML_NO_OPERAND);
-       }
-
-       /* Get operand and set stack entry to null */
-
-       *object = walk_state->operands[walk_state->num_operands];
-       walk_state->operands[walk_state->num_operands] = NULL;
-
-       ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Obj=%p [%s] State=%p #Ops=%X\n",
-                         *object, acpi_ut_get_object_type_name(*object),
-                         walk_state, walk_state->num_operands));
-
-       return (AE_OK);
-}
-
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ds_obj_stack_get_value
- *
- * PARAMETERS:  Index               - Stack index whose value is desired.  Based
- *                                    on the top of the stack (index=0 == top)
- *              walk_state          - Current Walk state
- *
- * RETURN:      Pointer to the requested operand
- *
- * DESCRIPTION: Retrieve an object from this walk's operand stack.  Index must
- *              be within the range of the current stack pointer.
- *
- ******************************************************************************/
-
-void *acpi_ds_obj_stack_get_value(u32 index, struct acpi_walk_state *walk_state)
-{
-
-       ACPI_FUNCTION_TRACE_PTR(ds_obj_stack_get_value, walk_state);
-
-       /* Can't do it if the stack is empty */
-
-       if (walk_state->num_operands == 0) {
-               return_PTR(NULL);
-       }
-
-       /* or if the index is past the top of the stack */
-
-       if (index > (walk_state->num_operands - (u32) 1)) {
-               return_PTR(NULL);
-       }
-
-       return_PTR(walk_state->
-                  operands[(acpi_native_uint) (walk_state->num_operands - 1) -
-                           index]);
-}
-#endif
index 7222a18a03198d0bc70121d1246dc27af1d0629e..0924992187e87031809bf70c1b75c6367cc38a64 100644 (file)
@@ -73,38 +73,14 @@ enum ec_event {
 
 #define ACPI_EC_DELAY          500     /* Wait 500ms max. during EC ops */
 #define ACPI_EC_UDELAY_GLK     1000    /* Wait 1ms max. to get global lock */
+#define ACPI_EC_UDELAY         100     /* Wait 100us before polling EC again */
 
 enum {
        EC_FLAGS_WAIT_GPE = 0,          /* Don't check status until GPE arrives */
        EC_FLAGS_QUERY_PENDING,         /* Query is pending */
        EC_FLAGS_GPE_MODE,              /* Expect GPE to be sent for status change */
-       EC_FLAGS_NO_ADDRESS_GPE,        /* Expect GPE only for non-address event */
-       EC_FLAGS_ADDRESS,               /* Address is being written */
-       EC_FLAGS_NO_WDATA_GPE,          /* Don't expect WDATA GPE event */
-       EC_FLAGS_WDATA,                 /* Data is being written */
-       EC_FLAGS_NO_OBF1_GPE,           /* Don't expect GPE before read */
-};
-
-static int acpi_ec_remove(struct acpi_device *device, int type);
-static int acpi_ec_start(struct acpi_device *device);
-static int acpi_ec_stop(struct acpi_device *device, int type);
-static int acpi_ec_add(struct acpi_device *device);
-
-static const struct acpi_device_id ec_device_ids[] = {
-       {"PNP0C09", 0},
-       {"", 0},
-};
-
-static struct acpi_driver acpi_ec_driver = {
-       .name = "ec",
-       .class = ACPI_EC_CLASS,
-       .ids = ec_device_ids,
-       .ops = {
-               .add = acpi_ec_add,
-               .remove = acpi_ec_remove,
-               .start = acpi_ec_start,
-               .stop = acpi_ec_stop,
-               },
+       EC_FLAGS_NO_GPE,                /* Don't use GPE mode */
+       EC_FLAGS_RESCHEDULE_POLL        /* Re-schedule poll */
 };
 
 /* If we find an EC via the ECDT, we need to keep a ptr to its context */
@@ -129,6 +105,8 @@ static struct acpi_ec {
        struct mutex lock;
        wait_queue_head_t wait;
        struct list_head list;
+       struct delayed_work work;
+       atomic_t irq_count;
        u8 handlers_installed;
 } *boot_ec, *first_ec;
 
@@ -177,65 +155,52 @@ static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event)
        return 0;
 }
 
-static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
+static void ec_schedule_ec_poll(struct acpi_ec *ec)
 {
-       int ret = 0;
+       if (test_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags))
+               schedule_delayed_work(&ec->work,
+                                     msecs_to_jiffies(ACPI_EC_DELAY));
+}
+
+static void ec_switch_to_poll_mode(struct acpi_ec *ec)
+{
+       set_bit(EC_FLAGS_NO_GPE, &ec->flags);
+       clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+       acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+       set_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
+}
 
-       if (unlikely(event == ACPI_EC_EVENT_OBF_1 &&
-                    test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags)))
-               force_poll = 1;
-       if (unlikely(test_bit(EC_FLAGS_ADDRESS, &ec->flags) &&
-                    test_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags)))
-               force_poll = 1;
-       if (unlikely(test_bit(EC_FLAGS_WDATA, &ec->flags) &&
-                    test_bit(EC_FLAGS_NO_WDATA_GPE, &ec->flags)))
-               force_poll = 1;
+static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
+{
+       atomic_set(&ec->irq_count, 0);
        if (likely(test_bit(EC_FLAGS_GPE_MODE, &ec->flags)) &&
            likely(!force_poll)) {
                if (wait_event_timeout(ec->wait, acpi_ec_check_status(ec, event),
                                       msecs_to_jiffies(ACPI_EC_DELAY)))
-                       goto end;
+                       return 0;
                clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
                if (acpi_ec_check_status(ec, event)) {
-                       if (event == ACPI_EC_EVENT_OBF_1) {
-                               /* miss OBF_1 GPE, don't expect it */
-                               pr_info(PREFIX "missing OBF confirmation, "
-                                       "don't expect it any longer.\n");
-                               set_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags);
-                       } else if (test_bit(EC_FLAGS_ADDRESS, &ec->flags)) {
-                               /* miss address GPE, don't expect it anymore */
-                               pr_info(PREFIX "missing address confirmation, "
-                                       "don't expect it any longer.\n");
-                               set_bit(EC_FLAGS_NO_ADDRESS_GPE, &ec->flags);
-                       } else if (test_bit(EC_FLAGS_WDATA, &ec->flags)) {
-                               /* miss write data GPE, don't expect it */
-                               pr_info(PREFIX "missing write data confirmation, "
-                                       "don't expect it any longer.\n");
-                               set_bit(EC_FLAGS_NO_WDATA_GPE, &ec->flags);
-                       } else {
-                               /* missing GPEs, switch back to poll mode */
-                               if (printk_ratelimit())
-                                       pr_info(PREFIX "missing confirmations, "
+                       /* missing GPEs, switch back to poll mode */
+                       if (printk_ratelimit())
+                               pr_info(PREFIX "missing confirmations, "
                                                "switch off interrupt mode.\n");
-                               clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
-                       }
-                       goto end;
+                       ec_switch_to_poll_mode(ec);
+                       ec_schedule_ec_poll(ec);
+                       return 0;
                }
        } else {
                unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY);
                clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
                while (time_before(jiffies, delay)) {
                        if (acpi_ec_check_status(ec, event))
-                               goto end;
+                               return 0;
+                       udelay(ACPI_EC_UDELAY);
                }
        }
-       pr_err(PREFIX "acpi_ec_wait timeout,"
-                              " status = %d, expect_event = %d\n",
-                              acpi_ec_read_status(ec), event);
-       ret = -ETIME;
-      end:
-       clear_bit(EC_FLAGS_ADDRESS, &ec->flags);
-       return ret;
+       pr_err(PREFIX "acpi_ec_wait timeout, status = 0x%2.2x, event = %s\n",
+               acpi_ec_read_status(ec),
+               (event == ACPI_EC_EVENT_OBF_1) ? "\"b0=1\"" : "\"b1=0\"");
+       return -ETIME;
 }
 
 static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
@@ -245,8 +210,8 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
 {
        int result = 0;
        set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
-       acpi_ec_write_cmd(ec, command);
        pr_debug(PREFIX "transaction start\n");
+       acpi_ec_write_cmd(ec, command);
        for (; wdata_len > 0; --wdata_len) {
                result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
                if (result) {
@@ -254,15 +219,11 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, u8 command,
                               "write_cmd timeout, command = %d\n", command);
                        goto end;
                }
-               /* mark the address byte written to EC */
-               if (rdata_len + wdata_len > 1)
-                       set_bit(EC_FLAGS_ADDRESS, &ec->flags);
                set_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
                acpi_ec_write_data(ec, *(wdata++));
        }
 
        if (!rdata_len) {
-               set_bit(EC_FLAGS_WDATA, &ec->flags);
                result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, force_poll);
                if (result) {
                        pr_err(PREFIX
@@ -527,46 +488,50 @@ static u32 acpi_ec_gpe_handler(void *data)
 {
        acpi_status status = AE_OK;
        struct acpi_ec *ec = data;
+       u8 state = acpi_ec_read_status(ec);
 
        pr_debug(PREFIX "~~~> interrupt\n");
+       atomic_inc(&ec->irq_count);
+       if (atomic_read(&ec->irq_count) > 5) {
+               pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
+               ec_switch_to_poll_mode(ec);
+               goto end;
+       }
        clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
        if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
                wake_up(&ec->wait);
 
-       if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_SCI) {
+       if (state & ACPI_EC_FLAG_SCI) {
                if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
                        status = acpi_os_execute(OSL_EC_BURST_HANDLER,
                                acpi_ec_gpe_query, ec);
-       } else if (unlikely(!test_bit(EC_FLAGS_GPE_MODE, &ec->flags))) {
+       } else if (!test_bit(EC_FLAGS_GPE_MODE, &ec->flags) &&
+                  !test_bit(EC_FLAGS_NO_GPE, &ec->flags) &&
+                  in_interrupt()) {
                /* this is non-query, must be confirmation */
                if (printk_ratelimit())
                        pr_info(PREFIX "non-query interrupt received,"
                                " switching to interrupt mode\n");
                set_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+               clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
        }
-
+end:
+       ec_schedule_ec_poll(ec);
        return ACPI_SUCCESS(status) ?
            ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED;
 }
 
+static void do_ec_poll(struct work_struct *work)
+{
+       struct acpi_ec *ec = container_of(work, struct acpi_ec, work.work);
+       atomic_set(&ec->irq_count, 0);
+       (void)acpi_ec_gpe_handler(ec);
+}
+
 /* --------------------------------------------------------------------------
                              Address Space Management
    -------------------------------------------------------------------------- */
 
-static acpi_status
-acpi_ec_space_setup(acpi_handle region_handle,
-                   u32 function, void *handler_context, void **return_context)
-{
-       /*
-        * The EC object is in the handler context and is needed
-        * when calling the acpi_ec_space_handler.
-        */
-       *return_context = (function != ACPI_REGION_DEACTIVATE) ?
-           handler_context : NULL;
-
-       return AE_OK;
-}
-
 static acpi_status
 acpi_ec_space_handler(u32 function, acpi_physical_address address,
                      u32 bits, acpi_integer *value,
@@ -669,16 +634,11 @@ static int acpi_ec_add_fs(struct acpi_device *device)
                        return -ENODEV;
        }
 
-       entry = create_proc_entry(ACPI_EC_FILE_INFO, S_IRUGO,
-                                 acpi_device_dir(device));
+       entry = proc_create_data(ACPI_EC_FILE_INFO, S_IRUGO,
+                                acpi_device_dir(device),
+                                &acpi_ec_info_ops, acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_ec_info_ops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
-
        return 0;
 }
 
@@ -709,6 +669,8 @@ static struct acpi_ec *make_acpi_ec(void)
        mutex_init(&ec->lock);
        init_waitqueue_head(&ec->wait);
        INIT_LIST_HEAD(&ec->list);
+       INIT_DELAYED_WORK_DEFERRABLE(&ec->work, do_ec_poll);
+       atomic_set(&ec->irq_count, 0);
        return ec;
 }
 
@@ -741,17 +703,21 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
        status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe);
        if (ACPI_FAILURE(status))
                return status;
-       /* Find and register all query methods */
-       acpi_walk_namespace(ACPI_TYPE_METHOD, handle, 1,
-                           acpi_ec_register_query_methods, ec, NULL);
        /* Use the global lock for all EC transactions? */
        acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock);
        ec->handle = handle;
        return AE_CTRL_TERMINATE;
 }
 
+static void ec_poll_stop(struct acpi_ec *ec)
+{
+       clear_bit(EC_FLAGS_RESCHEDULE_POLL, &ec->flags);
+       cancel_delayed_work(&ec->work);
+}
+
 static void ec_remove_handlers(struct acpi_ec *ec)
 {
+       ec_poll_stop(ec);
        if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
                                ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
                pr_err(PREFIX "failed to remove space handler\n");
@@ -771,31 +737,28 @@ static int acpi_ec_add(struct acpi_device *device)
        strcpy(acpi_device_class(device), ACPI_EC_CLASS);
 
        /* Check for boot EC */
-       if (boot_ec) {
-               if (boot_ec->handle == device->handle) {
-                       /* Pre-loaded EC from DSDT, just move pointer */
-                       ec = boot_ec;
-                       boot_ec = NULL;
-                       goto end;
-               } else if (boot_ec->handle == ACPI_ROOT_OBJECT) {
-                       /* ECDT-based EC, time to shut it down */
-                       ec_remove_handlers(boot_ec);
-                       kfree(boot_ec);
-                       first_ec = boot_ec = NULL;
+       if (boot_ec &&
+           (boot_ec->handle == device->handle ||
+            boot_ec->handle == ACPI_ROOT_OBJECT)) {
+               ec = boot_ec;
+               boot_ec = NULL;
+       } else {
+               ec = make_acpi_ec();
+               if (!ec)
+                       return -ENOMEM;
+               if (ec_parse_device(device->handle, 0, ec, NULL) !=
+                   AE_CTRL_TERMINATE) {
+                       kfree(ec);
+                       return -EINVAL;
                }
        }
 
-       ec = make_acpi_ec();
-       if (!ec)
-               return -ENOMEM;
-
-       if (ec_parse_device(device->handle, 0, ec, NULL) !=
-           AE_CTRL_TERMINATE) {
-               kfree(ec);
-               return -EINVAL;
-       }
        ec->handle = device->handle;
-      end:
+
+       /* Find and register all query methods */
+       acpi_walk_namespace(ACPI_TYPE_METHOD, ec->handle, 1,
+                           acpi_ec_register_query_methods, ec, NULL);
+
        if (!first_ec)
                first_ec = ec;
        acpi_driver_data(device) = ec;
@@ -870,7 +833,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
        status = acpi_install_address_space_handler(ec->handle,
                                                    ACPI_ADR_SPACE_EC,
                                                    &acpi_ec_space_handler,
-                                                   &acpi_ec_space_setup, ec);
+                                                   NULL, ec);
        if (ACPI_FAILURE(status)) {
                acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler);
                return -ENODEV;
@@ -897,6 +860,7 @@ static int acpi_ec_start(struct acpi_device *device)
 
        /* EC is fully operational, allow queries */
        clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+       ec_schedule_ec_poll(ec);
        return ret;
 }
 
@@ -924,6 +888,11 @@ int __init acpi_boot_ec_enable(void)
        return -EFAULT;
 }
 
+static const struct acpi_device_id ec_device_ids[] = {
+       {"PNP0C09", 0},
+       {"", 0},
+};
+
 int __init acpi_ec_ecdt_probe(void)
 {
        int ret;
@@ -944,6 +913,7 @@ int __init acpi_ec_ecdt_probe(void)
                boot_ec->data_addr = ecdt_ptr->data.address;
                boot_ec->gpe = ecdt_ptr->gpe;
                boot_ec->handle = ACPI_ROOT_OBJECT;
+               acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
        } else {
                /* This workaround is needed only on some broken machines,
                 * which require early EC, but fail to provide ECDT */
@@ -973,6 +943,39 @@ int __init acpi_ec_ecdt_probe(void)
        return -ENODEV;
 }
 
+static int acpi_ec_suspend(struct acpi_device *device, pm_message_t state)
+{
+       struct acpi_ec *ec = acpi_driver_data(device);
+       /* Stop using GPE */
+       set_bit(EC_FLAGS_NO_GPE, &ec->flags);
+       clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
+       acpi_disable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+       return 0;
+}
+
+static int acpi_ec_resume(struct acpi_device *device)
+{
+       struct acpi_ec *ec = acpi_driver_data(device);
+       /* Enable use of GPE back */
+       clear_bit(EC_FLAGS_NO_GPE, &ec->flags);
+       acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR);
+       return 0;
+}
+
+static struct acpi_driver acpi_ec_driver = {
+       .name = "ec",
+       .class = ACPI_EC_CLASS,
+       .ids = ec_device_ids,
+       .ops = {
+               .add = acpi_ec_add,
+               .remove = acpi_ec_remove,
+               .start = acpi_ec_start,
+               .stop = acpi_ec_stop,
+               .suspend = acpi_ec_suspend,
+               .resume = acpi_ec_resume,
+               },
+};
+
 static int __init acpi_ec_init(void)
 {
        int result = 0;
index abec1ca94cf4bb4f54a387121c93168c944e7d9e..0c24bd4d65629bb4647fee1958ceeac2dda424ad 100644 (file)
@@ -102,6 +102,7 @@ static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait)
 }
 
 static const struct file_operations acpi_system_event_ops = {
+       .owner = THIS_MODULE,
        .open = acpi_system_open_event,
        .read = acpi_system_read_event,
        .release = acpi_system_close_event,
@@ -294,10 +295,9 @@ static int __init acpi_event_init(void)
 
 #ifdef CONFIG_ACPI_PROC_EVENT
        /* 'event' [R] */
-       entry = create_proc_entry("event", S_IRUSR, acpi_root_dir);
-       if (entry)
-               entry->proc_fops = &acpi_system_event_ops;
-       else
+       entry = proc_create("event", S_IRUSR, acpi_root_dir,
+                           &acpi_system_event_ops);
+       if (!entry)
                return -ENODEV;
 #endif
 
index 3048801a37b5e1e91dfd6a7595b560cfe27531c2..5d30e5be1b1c73edad8df547dfa1f7ce9eb55809 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0dadd2adc8001a109669b4ae62c673da60f4747b..5354be44f87678beda3531645527a8e95a02edaf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -248,10 +248,6 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
 
        ACPI_FUNCTION_TRACE(ev_disable_gpe);
 
-       if (!(gpe_event_info->flags & ACPI_GPE_ENABLE_MASK)) {
-               return_ACPI_STATUS(AE_OK);
-       }
-
        /* Make sure HW enable masks are updated */
 
        status =
index 361ebe6c4a6f0b40cdf9c86acb484eb235862dbd..e6c4d4c49e79ee1f1fb4d409e60b949b2cff5e05 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 21cb749d0c75567534ebee05c78551bb289f1972..2113e58e2221520b6091155b84ac3ab8b9956197 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define _COMPONENT          ACPI_EVENTS
 ACPI_MODULE_NAME("evmisc")
 
-/* Names for Notify() values, used for debug output */
-#ifdef ACPI_DEBUG_OUTPUT
-static const char *acpi_notify_value_names[] = {
-       "Bus Check",
-       "Device Check",
-       "Device Wake",
-       "Eject Request",
-       "Device Check Light",
-       "Frequency Mismatch",
-       "Bus Mode Mismatch",
-       "Power Fault"
-};
-#endif
-
 /* Pointer to FACS needed for the Global Lock */
-
 static struct acpi_table_facs *facs = NULL;
 
 /* Local prototypes */
@@ -94,7 +79,6 @@ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node)
        switch (node->type) {
        case ACPI_TYPE_DEVICE:
        case ACPI_TYPE_PROCESSOR:
-       case ACPI_TYPE_POWER:
        case ACPI_TYPE_THERMAL:
                /*
                 * These are the ONLY objects that can receive ACPI notifications
@@ -139,17 +123,9 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
         *   initiate soft-off or sleep operation?
         */
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                         "Dispatching Notify(%X) on node %p\n", notify_value,
-                         node));
-
-       if (notify_value <= 7) {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Notify value: %s\n",
-                                 acpi_notify_value_names[notify_value]));
-       } else {
-               ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "Notify value: 0x%2.2X **Device Specific**\n",
-                                 notify_value));
-       }
+                         "Dispatching Notify on [%4.4s] Node %p Value 0x%2.2X (%s)\n",
+                         acpi_ut_get_node_name(node), node, notify_value,
+                         acpi_ut_get_notify_name(notify_value)));
 
        /* Get the notify object attached to the NS Node */
 
@@ -159,10 +135,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
                /* We have the notify object, Get the right handler */
 
                switch (node->type) {
+
+                       /* Notify allowed only on these types */
+
                case ACPI_TYPE_DEVICE:
                case ACPI_TYPE_THERMAL:
                case ACPI_TYPE_PROCESSOR:
-               case ACPI_TYPE_POWER:
 
                        if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
                                handler_obj =
@@ -179,8 +157,13 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
                }
        }
 
-       /* If there is any handler to run, schedule the dispatcher */
-
+       /*
+        * If there is any handler to run, schedule the dispatcher.
+        * Check for:
+        * 1) Global system notify handler
+        * 2) Global device notify handler
+        * 3) Per-device notify handler
+        */
        if ((acpi_gbl_system_notify.handler
             && (notify_value <= ACPI_MAX_SYS_NOTIFY))
            || (acpi_gbl_device_notify.handler
@@ -190,6 +173,13 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
                        return (AE_NO_MEMORY);
                }
 
+               if (!handler_obj) {
+                       ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+                                         "Executing system notify handler for Notify (%4.4s, %X) node %p\n",
+                                         acpi_ut_get_node_name(node),
+                                         notify_value, node));
+               }
+
                notify_info->common.descriptor_type =
                    ACPI_DESC_TYPE_STATE_NOTIFY;
                notify_info->notify.node = node;
@@ -202,15 +192,12 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node * node,
                if (ACPI_FAILURE(status)) {
                        acpi_ut_delete_generic_state(notify_info);
                }
-       }
-
-       if (!handler_obj) {
+       } else {
                /*
-                * There is no per-device notify handler for this device.
-                * This may or may not be a problem.
+                * There is no notify handler (per-device or system) for this device.
                 */
                ACPI_DEBUG_PRINT((ACPI_DB_INFO,
-                                 "No notify handler for Notify(%4.4s, %X) node %p\n",
+                                 "No notify handler for Notify (%4.4s, %X) node %p\n",
                                  acpi_ut_get_node_name(node), notify_value,
                                  node));
        }
@@ -349,9 +336,10 @@ acpi_status acpi_ev_init_global_lock_handler(void)
 
        ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
 
-       status =
-           acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-                                   (struct acpi_table_header **)&facs);
+       status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+                                        ACPI_CAST_INDIRECT_PTR(struct
+                                                               acpi_table_header,
+                                                               &facs));
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
@@ -439,7 +427,8 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
         * Only one thread can acquire the GL at a time, the global_lock_mutex
         * enforces this. This interface releases the interpreter if we must wait.
         */
-       status = acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex, 0);
+       status = acpi_ex_system_wait_mutex(
+                       acpi_gbl_global_lock_mutex->mutex.os_mutex, 0);
        if (status == AE_TIME) {
                if (acpi_ev_global_lock_thread_id == acpi_os_get_thread_id()) {
                        acpi_ev_global_lock_acquired++;
@@ -448,9 +437,9 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
        }
 
        if (ACPI_FAILURE(status)) {
-               status =
-                   acpi_ex_system_wait_mutex(acpi_gbl_global_lock_mutex,
-                                             timeout);
+               status = acpi_ex_system_wait_mutex(
+                               acpi_gbl_global_lock_mutex->mutex.os_mutex,
+                               timeout);
        }
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
@@ -459,6 +448,19 @@ acpi_status acpi_ev_acquire_global_lock(u16 timeout)
        acpi_ev_global_lock_thread_id = acpi_os_get_thread_id();
        acpi_ev_global_lock_acquired++;
 
+       /*
+        * Update the global lock handle and check for wraparound. The handle is
+        * only used for the external global lock interfaces, but it is updated
+        * here to properly handle the case where a single thread may acquire the
+        * lock via both the AML and the acpi_acquire_global_lock interfaces. The
+        * handle is therefore updated on the first acquire from a given thread
+        * regardless of where the acquisition request originated.
+        */
+       acpi_gbl_global_lock_handle++;
+       if (acpi_gbl_global_lock_handle == 0) {
+               acpi_gbl_global_lock_handle = 1;
+       }
+
        /*
         * Make sure that a global lock actually exists. If not, just treat
         * the lock as a standard mutex.
@@ -555,7 +557,7 @@ acpi_status acpi_ev_release_global_lock(void)
        /* Release the local GL mutex */
        acpi_ev_global_lock_thread_id = NULL;
        acpi_ev_global_lock_acquired = 0;
-       acpi_os_release_mutex(acpi_gbl_global_lock_mutex);
+       acpi_os_release_mutex(acpi_gbl_global_lock_mutex->mutex.os_mutex);
        return_ACPI_STATUS(status);
 }
 
index 58ad09725dd2a69dcac8f1f45fa3226041981bf3..1628f59347524c9bbf49bc800581b93a224fb5c2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        ACPI_DEBUG_PRINT((ACPI_DB_OPREGION,
                          "Handler %p (@%p) Address %8.8X%8.8X [%s]\n",
                          &region_obj->region.handler->address_space, handler,
-                         ACPI_FORMAT_UINT64(address),
+                         ACPI_FORMAT_NATIVE_UINT(address),
                          acpi_ut_get_region_name(region_obj->region.
                                                  space_id)));
 
index b1aaa0e84588f136b0d17b4bb7f71a78fe4313aa..2e3d2c5e4f4d2a7a7b4571475646fe970733f1e3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7e5d15ce2395fd0cec232c0146ef8253c658495a..2a8b77877610a13df05762d10a9a77ce4adf8eb1 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6d866a01f5f439e06f79624de1bd268267b93f2a..94a6efe020bed116118c408e92f35f30a0d23a2e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -758,6 +758,12 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
  *
  * DESCRIPTION: Acquire the ACPI Global Lock
  *
+ * Note: Allows callers with the same thread ID to acquire the global lock
+ * multiple times. In other words, externally, the behavior of the global lock
+ * is identical to an AML mutex. On the first acquire, a new handle is
+ * returned. On any subsequent calls to acquire by the same thread, the same
+ * handle is returned.
+ *
  ******************************************************************************/
 acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
 {
@@ -770,14 +776,19 @@ acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle)
        /* Must lock interpreter to prevent race conditions */
 
        acpi_ex_enter_interpreter();
-       status = acpi_ev_acquire_global_lock(timeout);
-       acpi_ex_exit_interpreter();
+
+       status = acpi_ex_acquire_mutex_object(timeout,
+                                             acpi_gbl_global_lock_mutex,
+                                             acpi_os_get_thread_id());
 
        if (ACPI_SUCCESS(status)) {
-               acpi_gbl_global_lock_handle++;
+
+               /* Return the global lock handle (updated in acpi_ev_acquire_global_lock) */
+
                *handle = acpi_gbl_global_lock_handle;
        }
 
+       acpi_ex_exit_interpreter();
        return (status);
 }
 
@@ -798,11 +809,11 @@ acpi_status acpi_release_global_lock(u32 handle)
 {
        acpi_status status;
 
-       if (handle != acpi_gbl_global_lock_handle) {
+       if (!handle || (handle != acpi_gbl_global_lock_handle)) {
                return (AE_NOT_ACQUIRED);
        }
 
-       status = acpi_ev_release_global_lock();
+       status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
        return (status);
 }
 
index 9cbd3414a574ebf4cad5c94f54a7f95883363189..99a7502e6a87465894e262b566b99f2a8dec35bf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7bf09c5fb2421a5fa42a6a9001115aa3f4490665..e8750807e57d004151fab56cc56198f73acb29b6 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 25802f302ffe18c00608ff5ae347f79931b57645..24da921d13e3b2112048b428f97a43298bb7c1c9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -45,7 +45,6 @@
 #include <acpi/acinterp.h>
 #include <acpi/amlcode.h>
 #include <acpi/acnamesp.h>
-#include <acpi/acevents.h>
 #include <acpi/actables.h>
 #include <acpi/acdispat.h>
 
@@ -138,6 +137,14 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
 
        ACPI_FUNCTION_TRACE(ex_load_table_op);
 
+       /* Validate lengths for the signature_string, OEMIDString, OEMtable_iD */
+
+       if ((operand[0]->string.length > ACPI_NAME_SIZE) ||
+           (operand[1]->string.length > ACPI_OEM_ID_SIZE) ||
+           (operand[2]->string.length > ACPI_OEM_TABLE_ID_SIZE)) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
        /* Find the ACPI table in the RSDT/XSDT */
 
        status = acpi_tb_find_table(operand[0]->string.pointer,
@@ -229,11 +236,18 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
        status = acpi_get_table_by_index(table_index, &table);
        if (ACPI_SUCCESS(status)) {
                ACPI_INFO((AE_INFO,
-                          "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]",
+                          "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]",
                           table->signature, table->oem_id,
                           table->oem_table_id));
        }
 
+       /* Invoke table handler if present */
+
+       if (acpi_gbl_table_handler) {
+               (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD, table,
+                                            acpi_gbl_table_handler_context);
+       }
+
        *return_desc = ddb_handle;
        return_ACPI_STATUS(status);
 }
@@ -268,6 +282,7 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
        struct acpi_table_desc table_desc;
        acpi_native_uint table_index;
        acpi_status status;
+       u32 length;
 
        ACPI_FUNCTION_TRACE(ex_load_op);
 
@@ -278,16 +293,16 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
        switch (ACPI_GET_OBJECT_TYPE(obj_desc)) {
        case ACPI_TYPE_REGION:
 
+               ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
+                                 obj_desc,
+                                 acpi_ut_get_object_type_name(obj_desc)));
+
                /* Region must be system_memory (from ACPI spec) */
 
                if (obj_desc->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) {
                        return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
                }
 
-               ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Load from Region %p %s\n",
-                                 obj_desc,
-                                 acpi_ut_get_object_type_name(obj_desc)));
-
                /*
                 * If the Region Address and Length have not been previously evaluated,
                 * evaluate them now and save the results.
@@ -299,6 +314,11 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
                        }
                }
 
+               /*
+                * We will simply map the memory region for the table. However, the
+                * memory region is technically not guaranteed to remain stable and
+                * we may eventually have to copy the table to a local buffer.
+                */
                table_desc.address = obj_desc->region.address;
                table_desc.length = obj_desc->region.length;
                table_desc.flags = ACPI_TABLE_ORIGIN_MAPPED;
@@ -306,18 +326,41 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
 
        case ACPI_TYPE_BUFFER:  /* Buffer or resolved region_field */
 
-               /* Simply extract the buffer from the buffer object */
-
                ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
                                  "Load from Buffer or Field %p %s\n", obj_desc,
                                  acpi_ut_get_object_type_name(obj_desc)));
 
-               table_desc.pointer = ACPI_CAST_PTR(struct acpi_table_header,
-                                                  obj_desc->buffer.pointer);
-               table_desc.length = table_desc.pointer->length;
-               table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
+               length = obj_desc->buffer.length;
+
+               /* Must have at least an ACPI table header */
+
+               if (length < sizeof(struct acpi_table_header)) {
+                       return_ACPI_STATUS(AE_INVALID_TABLE_LENGTH);
+               }
+
+               /* Validate checksum here. It won't get validated in tb_add_table */
 
-               obj_desc->buffer.pointer = NULL;
+               status =
+                   acpi_tb_verify_checksum(ACPI_CAST_PTR
+                                           (struct acpi_table_header,
+                                            obj_desc->buffer.pointer), length);
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+
+               /*
+                * We need to copy the buffer since the original buffer could be
+                * changed or deleted in the future
+                */
+               table_desc.pointer = ACPI_ALLOCATE(length);
+               if (!table_desc.pointer) {
+                       return_ACPI_STATUS(AE_NO_MEMORY);
+               }
+
+               ACPI_MEMCPY(table_desc.pointer, obj_desc->buffer.pointer,
+                           length);
+               table_desc.length = length;
+               table_desc.flags = ACPI_TABLE_ORIGIN_ALLOCATED;
                break;
 
        default:
@@ -333,7 +376,8 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
        }
 
        status =
-           acpi_ex_add_table(table_index, acpi_gbl_root_node, &ddb_handle);
+           acpi_ex_add_table(table_index, walk_state->scope_info->scope.node,
+                             &ddb_handle);
        if (ACPI_FAILURE(status)) {
 
                /* On error, table_ptr was deallocated above */
@@ -349,11 +393,23 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
 
                /* table_ptr was deallocated above */
 
+               acpi_ut_remove_reference(ddb_handle);
                return_ACPI_STATUS(status);
        }
 
+       /* Invoke table handler if present */
+
+       if (acpi_gbl_table_handler) {
+               (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_LOAD,
+                                            table_desc.pointer,
+                                            acpi_gbl_table_handler_context);
+       }
+
       cleanup:
        if (ACPI_FAILURE(status)) {
+
+               /* Delete allocated buffer or mapping */
+
                acpi_tb_delete_table(&table_desc);
        }
        return_ACPI_STATUS(status);
@@ -376,6 +432,7 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
        acpi_status status = AE_OK;
        union acpi_operand_object *table_desc = ddb_handle;
        acpi_native_uint table_index;
+       struct acpi_table_header *table;
 
        ACPI_FUNCTION_TRACE(ex_unload_table);
 
@@ -395,17 +452,25 @@ acpi_status acpi_ex_unload_table(union acpi_operand_object *ddb_handle)
 
        table_index = (acpi_native_uint) table_desc->reference.object;
 
+       /* Invoke table handler if present */
+
+       if (acpi_gbl_table_handler) {
+               status = acpi_get_table_by_index(table_index, &table);
+               if (ACPI_SUCCESS(status)) {
+                       (void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_UNLOAD,
+                                                    table,
+                                                    acpi_gbl_table_handler_context);
+               }
+       }
+
        /*
         * Delete the entire namespace under this table Node
         * (Offset contains the table_id)
         */
        acpi_tb_delete_namespace_by_owner(table_index);
-       acpi_tb_release_owner_id(table_index);
+       (void)acpi_tb_release_owner_id(table_index);
 
        acpi_tb_set_table_loaded_flag(table_index, FALSE);
 
-       /* Delete the table descriptor (ddb_handle) */
-
-       acpi_ut_remove_reference(table_desc);
-       return_ACPI_STATUS(status);
+       return_ACPI_STATUS(AE_OK);
 }
index 79f2c0d42c06a59ff5a56b8c4062371aeb18362b..fd954b4ed83d7fc1cfdbaaa4a42c41e9fbc89473 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6e9a23e47fef38e5c39497daa0c46a713d3ee726..60e62c4f0577aae22c5171500bfc6236b0b75285 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -96,16 +96,28 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
         * to the original Node.
         */
        switch (target_node->type) {
+
+               /* For these types, the sub-object can change dynamically via a Store */
+
        case ACPI_TYPE_INTEGER:
        case ACPI_TYPE_STRING:
        case ACPI_TYPE_BUFFER:
        case ACPI_TYPE_PACKAGE:
        case ACPI_TYPE_BUFFER_FIELD:
 
+               /*
+                * These types open a new scope, so we need the NS node in order to access
+                * any children.
+                */
+       case ACPI_TYPE_DEVICE:
+       case ACPI_TYPE_POWER:
+       case ACPI_TYPE_PROCESSOR:
+       case ACPI_TYPE_THERMAL:
+       case ACPI_TYPE_LOCAL_SCOPE:
+
                /*
                 * The new alias has the type ALIAS and points to the original
-                * NS node, not the object itself.  This is because for these
-                * types, the object can change dynamically via a Store.
+                * NS node, not the object itself.
                 */
                alias_node->type = ACPI_TYPE_LOCAL_ALIAS;
                alias_node->object =
@@ -115,9 +127,7 @@ acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state)
        case ACPI_TYPE_METHOD:
 
                /*
-                * The new alias has the type ALIAS and points to the original
-                * NS node, not the object itself.  This is because for these
-                * types, the object can change dynamically via a Store.
+                * Control method aliases need to be differentiated
                 */
                alias_node->type = ACPI_TYPE_LOCAL_METHOD_ALIAS;
                alias_node->object =
@@ -340,101 +350,6 @@ acpi_ex_create_region(u8 * aml_start,
        return_ACPI_STATUS(status);
 }
 
-/*******************************************************************************
- *
- * FUNCTION:    acpi_ex_create_table_region
- *
- * PARAMETERS:  walk_state          - Current state
- *
- * RETURN:      Status
- *
- * DESCRIPTION: Create a new data_table_region object
- *
- ******************************************************************************/
-
-acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state)
-{
-       acpi_status status;
-       union acpi_operand_object **operand = &walk_state->operands[0];
-       union acpi_operand_object *obj_desc;
-       struct acpi_namespace_node *node;
-       union acpi_operand_object *region_obj2;
-       acpi_native_uint table_index;
-       struct acpi_table_header *table;
-
-       ACPI_FUNCTION_TRACE(ex_create_table_region);
-
-       /* Get the Node from the object stack  */
-
-       node = walk_state->op->common.node;
-
-       /*
-        * If the region object is already attached to this node,
-        * just return
-        */
-       if (acpi_ns_get_attached_object(node)) {
-               return_ACPI_STATUS(AE_OK);
-       }
-
-       /* Find the ACPI table */
-
-       status = acpi_tb_find_table(operand[1]->string.pointer,
-                                   operand[2]->string.pointer,
-                                   operand[3]->string.pointer, &table_index);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
-       /* Create the region descriptor */
-
-       obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_REGION);
-       if (!obj_desc) {
-               return_ACPI_STATUS(AE_NO_MEMORY);
-       }
-
-       region_obj2 = obj_desc->common.next_object;
-       region_obj2->extra.region_context = NULL;
-
-       status = acpi_get_table_by_index(table_index, &table);
-       if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
-       }
-
-       /* Init the region from the operands */
-
-       obj_desc->region.space_id = REGION_DATA_TABLE;
-       obj_desc->region.address =
-           (acpi_physical_address) ACPI_TO_INTEGER(table);
-       obj_desc->region.length = table->length;
-       obj_desc->region.node = node;
-       obj_desc->region.flags = AOPOBJ_DATA_VALID;
-
-       /* Install the new region object in the parent Node */
-
-       status = acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_REGION);
-       if (ACPI_FAILURE(status)) {
-               goto cleanup;
-       }
-
-       status = acpi_ev_initialize_region(obj_desc, FALSE);
-       if (ACPI_FAILURE(status)) {
-               if (status == AE_NOT_EXIST) {
-                       status = AE_OK;
-               } else {
-                       goto cleanup;
-               }
-       }
-
-       obj_desc->region.flags |= AOPOBJ_SETUP_COMPLETE;
-
-      cleanup:
-
-       /* Remove local reference to the object */
-
-       acpi_ut_remove_reference(obj_desc);
-       return_ACPI_STATUS(status);
-}
-
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ex_create_processor
index 51c9c29987c3d102a857ecf8d30f937771b27da4..74f1b22601b39b4b5106966d211c12fdd62946ee 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -500,25 +500,28 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
                        acpi_os_printf("Reference: Debug\n");
                        break;
 
-               case AML_NAME_OP:
+               case AML_INDEX_OP:
 
-                       ACPI_DUMP_PATHNAME(obj_desc->reference.object,
-                                          "Reference: Name: ", ACPI_LV_INFO,
-                                          _COMPONENT);
-                       ACPI_DUMP_ENTRY(obj_desc->reference.object,
-                                       ACPI_LV_INFO);
+                       acpi_os_printf("Reference: Index %p\n",
+                                      obj_desc->reference.object);
                        break;
 
-               case AML_INDEX_OP:
+               case AML_LOAD_OP:
 
-                       acpi_os_printf("Reference: Index %p\n",
+                       acpi_os_printf("Reference: [DdbHandle] TableIndex %p\n",
                                       obj_desc->reference.object);
                        break;
 
                case AML_REF_OF_OP:
 
-                       acpi_os_printf("Reference: (RefOf) %p\n",
-                                      obj_desc->reference.object);
+                       acpi_os_printf("Reference: (RefOf) %p [%s]\n",
+                                      obj_desc->reference.object,
+                                      acpi_ut_get_type_name(((union
+                                                              acpi_operand_object
+                                                              *)obj_desc->
+                                                             reference.
+                                                             object)->common.
+                                                            type));
                        break;
 
                case AML_ARG_OP:
@@ -559,8 +562,9 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
 
                case AML_INT_NAMEPATH_OP:
 
-                       acpi_os_printf("Reference.Node->Name %X\n",
-                                      obj_desc->reference.node->name.integer);
+                       acpi_os_printf("Reference: Namepath %X [%4.4s]\n",
+                                      obj_desc->reference.node->name.integer,
+                                      obj_desc->reference.node->name.ascii);
                        break;
 
                default:
@@ -640,8 +644,8 @@ void acpi_ex_dump_operand(union acpi_operand_object *obj_desc, u32 depth)
                        acpi_os_printf("\n");
                } else {
                        acpi_os_printf(" base %8.8X%8.8X Length %X\n",
-                                      ACPI_FORMAT_UINT64(obj_desc->region.
-                                                         address),
+                                      ACPI_FORMAT_NATIVE_UINT(obj_desc->region.
+                                                              address),
                                       obj_desc->region.length);
                }
                break;
@@ -877,20 +881,43 @@ static void acpi_ex_dump_reference_obj(union acpi_operand_object *obj_desc)
        ret_buf.length = ACPI_ALLOCATE_LOCAL_BUFFER;
 
        if (obj_desc->reference.opcode == AML_INT_NAMEPATH_OP) {
-               acpi_os_printf("Named Object %p ", obj_desc->reference.node);
+               acpi_os_printf(" Named Object %p ", obj_desc->reference.node);
 
                status =
                    acpi_ns_handle_to_pathname(obj_desc->reference.node,
                                               &ret_buf);
                if (ACPI_FAILURE(status)) {
-                       acpi_os_printf("Could not convert name to pathname\n");
+                       acpi_os_printf(" Could not convert name to pathname\n");
                } else {
                        acpi_os_printf("%s\n", (char *)ret_buf.pointer);
                        ACPI_FREE(ret_buf.pointer);
                }
        } else if (obj_desc->reference.object) {
-               acpi_os_printf("\nReferenced Object: %p\n",
-                              obj_desc->reference.object);
+               if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) ==
+                   ACPI_DESC_TYPE_OPERAND) {
+                       acpi_os_printf(" Target: %p",
+                                      obj_desc->reference.object);
+                       if (obj_desc->reference.opcode == AML_LOAD_OP) {
+                               /*
+                                * For DDBHandle reference,
+                                * obj_desc->Reference.Object is the table index
+                                */
+                               acpi_os_printf(" [DDBHandle]\n");
+                       } else {
+                               acpi_os_printf(" [%s]\n",
+                                              acpi_ut_get_type_name(((union
+                                                                      acpi_operand_object
+                                                                      *)
+                                                                     obj_desc->
+                                                                     reference.
+                                                                     object)->
+                                                                    common.
+                                                                    type));
+                       }
+               } else {
+                       acpi_os_printf(" Target: %p\n",
+                                      obj_desc->reference.object);
+               }
        }
 }
 
@@ -976,7 +1003,9 @@ acpi_ex_dump_package_obj(union acpi_operand_object *obj_desc,
 
        case ACPI_TYPE_LOCAL_REFERENCE:
 
-               acpi_os_printf("[Object Reference] ");
+               acpi_os_printf("[Object Reference] %s",
+                              (acpi_ps_get_opcode_info
+                               (obj_desc->reference.opcode))->name);
                acpi_ex_dump_reference_obj(obj_desc);
                break;
 
index 2d88a3d8d1ad5f75815c5b5fcf0f0aec9ef48927..3e440d84226a4e85aea18a5b49156145e1bfbfd7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -71,7 +71,6 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
        union acpi_operand_object *buffer_desc;
        acpi_size length;
        void *buffer;
-       u8 locked;
 
        ACPI_FUNCTION_TRACE_PTR(ex_read_data_from_field, obj_desc);
 
@@ -111,9 +110,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
 
                /* Lock entire transaction if requested */
 
-               locked =
-                   acpi_ex_acquire_global_lock(obj_desc->common_field.
-                                               field_flags);
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
 
                /*
                 * Perform the read.
@@ -125,7 +122,7 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
                                                             buffer.pointer),
                                               ACPI_READ | (obj_desc->field.
                                                            attribute << 16));
-               acpi_ex_release_global_lock(locked);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
                goto exit;
        }
 
@@ -175,13 +172,12 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
 
        /* Lock entire transaction if requested */
 
-       locked =
-           acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+       acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
 
        /* Read from the field */
 
        status = acpi_ex_extract_from_field(obj_desc, buffer, (u32) length);
-       acpi_ex_release_global_lock(locked);
+       acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 
       exit:
        if (ACPI_FAILURE(status)) {
@@ -214,10 +210,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 {
        acpi_status status;
        u32 length;
-       u32 required_length;
        void *buffer;
-       void *new_buffer;
-       u8 locked;
        union acpi_operand_object *buffer_desc;
 
        ACPI_FUNCTION_TRACE_PTR(ex_write_data_to_field, obj_desc);
@@ -278,9 +271,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
                /* Lock entire transaction if requested */
 
-               locked =
-                   acpi_ex_acquire_global_lock(obj_desc->common_field.
-                                               field_flags);
+               acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
 
                /*
                 * Perform the write (returns status and perhaps data in the
@@ -291,7 +282,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                                               (acpi_integer *) buffer,
                                               ACPI_WRITE | (obj_desc->field.
                                                             attribute << 16));
-               acpi_ex_release_global_lock(locked);
+               acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 
                *result_desc = buffer_desc;
                return_ACPI_STATUS(status);
@@ -319,35 +310,6 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
        }
 
-       /*
-        * We must have a buffer that is at least as long as the field
-        * we are writing to.  This is because individual fields are
-        * indivisible and partial writes are not supported -- as per
-        * the ACPI specification.
-        */
-       new_buffer = NULL;
-       required_length =
-           ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
-
-       if (length < required_length) {
-
-               /* We need to create a new buffer */
-
-               new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
-               if (!new_buffer) {
-                       return_ACPI_STATUS(AE_NO_MEMORY);
-               }
-
-               /*
-                * Copy the original data to the new buffer, starting
-                * at Byte zero.  All unused (upper) bytes of the
-                * buffer will be 0.
-                */
-               ACPI_MEMCPY((char *)new_buffer, (char *)buffer, length);
-               buffer = new_buffer;
-               length = required_length;
-       }
-
        ACPI_DEBUG_PRINT((ACPI_DB_BFIELD,
                          "FieldWrite [FROM]: Obj %p (%s:%X), Buf %p, ByteLen %X\n",
                          source_desc,
@@ -366,19 +328,12 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
        /* Lock entire transaction if requested */
 
-       locked =
-           acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
+       acpi_ex_acquire_global_lock(obj_desc->common_field.field_flags);
 
        /* Write to the field */
 
        status = acpi_ex_insert_into_field(obj_desc, buffer, length);
-       acpi_ex_release_global_lock(locked);
-
-       /* Free temporary buffer if we used one */
-
-       if (new_buffer) {
-               ACPI_FREE(new_buffer);
-       }
+       acpi_ex_release_global_lock(obj_desc->common_field.field_flags);
 
        return_ACPI_STATUS(status);
 }
index 65a48b6170ee3c9803771cad7a58c2caecc04b77..e336b5dc7a50431720fa4aa437b85c5f1fd0ff94 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -263,7 +263,8 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
                              rgn_desc->region.space_id,
                              obj_desc->common_field.access_byte_width,
                              obj_desc->common_field.base_byte_offset,
-                             field_datum_byte_offset, (void *)address));
+                             field_datum_byte_offset, ACPI_CAST_PTR(void,
+                                                                    address)));
 
        /* Invoke the appropriate address_space/op_region handler */
 
@@ -805,18 +806,39 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
        u32 datum_count;
        u32 field_datum_count;
        u32 i;
+       u32 required_length;
+       void *new_buffer;
 
        ACPI_FUNCTION_TRACE(ex_insert_into_field);
 
        /* Validate input buffer */
 
-       if (buffer_length <
-           ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
-               ACPI_ERROR((AE_INFO,
-                           "Field size %X (bits) is too large for buffer (%X)",
-                           obj_desc->common_field.bit_length, buffer_length));
+       new_buffer = NULL;
+       required_length =
+           ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length);
+       /*
+        * We must have a buffer that is at least as long as the field
+        * we are writing to.  This is because individual fields are
+        * indivisible and partial writes are not supported -- as per
+        * the ACPI specification.
+        */
+       if (buffer_length < required_length) {
 
-               return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
+               /* We need to create a new buffer */
+
+               new_buffer = ACPI_ALLOCATE_ZEROED(required_length);
+               if (!new_buffer) {
+                       return_ACPI_STATUS(AE_NO_MEMORY);
+               }
+
+               /*
+                * Copy the original data to the new buffer, starting
+                * at Byte zero.  All unused (upper) bytes of the
+                * buffer will be 0.
+                */
+               ACPI_MEMCPY((char *)new_buffer, (char *)buffer, buffer_length);
+               buffer = new_buffer;
+               buffer_length = required_length;
        }
 
        /*
@@ -866,7 +888,7 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                                                        merged_datum,
                                                        field_offset);
                if (ACPI_FAILURE(status)) {
-                       return_ACPI_STATUS(status);
+                       goto exit;
                }
 
                field_offset += obj_desc->common_field.access_byte_width;
@@ -924,5 +946,11 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc,
                                                mask, merged_datum,
                                                field_offset);
 
+      exit:
+       /* Free temporary buffer if we used one */
+
+       if (new_buffer) {
+               ACPI_FREE(new_buffer);
+       }
        return_ACPI_STATUS(status);
 }
index f13d1cec2d6db1a3d277a916e10ea0f8564d4dda..cc956a5b5267b5e0564b2836b235803bfc170182 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6748e3ef09972472d9aeeaeaa1abf639773a77a8..c873ab40cd0ec59835a426d984eb0f261fe6deb6 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -124,6 +124,79 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
        thread->acquired_mutex_list = obj_desc;
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_acquire_mutex_object
+ *
+ * PARAMETERS:  time_desc           - Timeout in milliseconds
+ *              obj_desc            - Mutex object
+ *              Thread              - Current thread state
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Acquire an AML mutex, low-level interface. Provides a common
+ *              path that supports multiple acquires by the same thread.
+ *
+ * MUTEX:       Interpreter must be locked
+ *
+ * NOTE: This interface is called from three places:
+ * 1) From acpi_ex_acquire_mutex, via an AML Acquire() operator
+ * 2) From acpi_ex_acquire_global_lock when an AML Field access requires the
+ *    global lock
+ * 3) From the external interface, acpi_acquire_global_lock
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_ex_acquire_mutex_object(u16 timeout,
+                            union acpi_operand_object *obj_desc,
+                            acpi_thread_id thread_id)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE_PTR(ex_acquire_mutex_object, obj_desc);
+
+       if (!obj_desc) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       /* Support for multiple acquires by the owning thread */
+
+       if (obj_desc->mutex.thread_id == thread_id) {
+               /*
+                * The mutex is already owned by this thread, just increment the
+                * acquisition depth
+                */
+               obj_desc->mutex.acquisition_depth++;
+               return_ACPI_STATUS(AE_OK);
+       }
+
+       /* Acquire the mutex, wait if necessary. Special case for Global Lock */
+
+       if (obj_desc == acpi_gbl_global_lock_mutex) {
+               status = acpi_ev_acquire_global_lock(timeout);
+       } else {
+               status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
+                                                  timeout);
+       }
+
+       if (ACPI_FAILURE(status)) {
+
+               /* Includes failure from a timeout on time_desc */
+
+               return_ACPI_STATUS(status);
+       }
+
+       /* Acquired the mutex: update mutex object */
+
+       obj_desc->mutex.thread_id = thread_id;
+       obj_desc->mutex.acquisition_depth = 1;
+       obj_desc->mutex.original_sync_level = 0;
+       obj_desc->mutex.owner_thread = NULL;    /* Used only for AML Acquire() */
+
+       return_ACPI_STATUS(AE_OK);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ex_acquire_mutex
@@ -151,7 +224,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
-       /* Sanity check: we must have a valid thread ID */
+       /* Must have a valid thread ID */
 
        if (!walk_state->thread) {
                ACPI_ERROR((AE_INFO,
@@ -161,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
        }
 
        /*
-        * Current Sync must be less than or equal to the sync level of the
+        * Current sync level must be less than or equal to the sync level of the
         * mutex. This mechanism provides some deadlock prevention
         */
        if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
@@ -172,51 +245,89 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
                return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
        }
 
-       /* Support for multiple acquires by the owning thread */
+       status = acpi_ex_acquire_mutex_object((u16) time_desc->integer.value,
+                                             obj_desc,
+                                             walk_state->thread->thread_id);
+       if (ACPI_SUCCESS(status) && obj_desc->mutex.acquisition_depth == 1) {
 
-       if (obj_desc->mutex.owner_thread) {
-               if (obj_desc->mutex.owner_thread->thread_id ==
-                   walk_state->thread->thread_id) {
-                       /*
-                        * The mutex is already owned by this thread, just increment the
-                        * acquisition depth
-                        */
-                       obj_desc->mutex.acquisition_depth++;
-                       return_ACPI_STATUS(AE_OK);
-               }
+               /* Save Thread object, original/current sync levels */
+
+               obj_desc->mutex.owner_thread = walk_state->thread;
+               obj_desc->mutex.original_sync_level =
+                   walk_state->thread->current_sync_level;
+               walk_state->thread->current_sync_level =
+                   obj_desc->mutex.sync_level;
+
+               /* Link the mutex to the current thread for force-unlock at method exit */
+
+               acpi_ex_link_mutex(obj_desc, walk_state->thread);
        }
 
-       /* Acquire the mutex, wait if necessary. Special case for Global Lock */
+       return_ACPI_STATUS(status);
+}
 
-       if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
-               status =
-                   acpi_ev_acquire_global_lock((u16) time_desc->integer.value);
-       } else {
-               status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex,
-                                                  (u16) time_desc->integer.
-                                                  value);
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ex_release_mutex_object
+ *
+ * PARAMETERS:  obj_desc            - The object descriptor for this op
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release a previously acquired Mutex, low level interface.
+ *              Provides a common path that supports multiple releases (after
+ *              previous multiple acquires) by the same thread.
+ *
+ * MUTEX:       Interpreter must be locked
+ *
+ * NOTE: This interface is called from three places:
+ * 1) From acpi_ex_release_mutex, via an AML Acquire() operator
+ * 2) From acpi_ex_release_global_lock when an AML Field access requires the
+ *    global lock
+ * 3) From the external interface, acpi_release_global_lock
+ *
+ ******************************************************************************/
+
+acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc)
+{
+       acpi_status status = AE_OK;
+
+       ACPI_FUNCTION_TRACE(ex_release_mutex_object);
+
+       if (obj_desc->mutex.acquisition_depth == 0) {
+               return (AE_NOT_ACQUIRED);
        }
 
-       if (ACPI_FAILURE(status)) {
+       /* Match multiple Acquires with multiple Releases */
 
-               /* Includes failure from a timeout on time_desc */
+       obj_desc->mutex.acquisition_depth--;
+       if (obj_desc->mutex.acquisition_depth != 0) {
 
-               return_ACPI_STATUS(status);
+               /* Just decrement the depth and return */
+
+               return_ACPI_STATUS(AE_OK);
        }
 
-       /* Have the mutex: update mutex and walk info and save the sync_level */
+       if (obj_desc->mutex.owner_thread) {
 
-       obj_desc->mutex.owner_thread = walk_state->thread;
-       obj_desc->mutex.acquisition_depth = 1;
-       obj_desc->mutex.original_sync_level =
-           walk_state->thread->current_sync_level;
+               /* Unlink the mutex from the owner's list */
 
-       walk_state->thread->current_sync_level = obj_desc->mutex.sync_level;
+               acpi_ex_unlink_mutex(obj_desc);
+               obj_desc->mutex.owner_thread = NULL;
+       }
 
-       /* Link the mutex to the current thread for force-unlock at method exit */
+       /* Release the mutex, special case for Global Lock */
 
-       acpi_ex_link_mutex(obj_desc, walk_state->thread);
-       return_ACPI_STATUS(AE_OK);
+       if (obj_desc == acpi_gbl_global_lock_mutex) {
+               status = acpi_ev_release_global_lock();
+       } else {
+               acpi_os_release_mutex(obj_desc->mutex.os_mutex);
+       }
+
+       /* Clear mutex info */
+
+       obj_desc->mutex.thread_id = 0;
+       return_ACPI_STATUS(status);
 }
 
 /*******************************************************************************
@@ -253,22 +364,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
                return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED);
        }
 
-       /* Sanity check: we must have a valid thread ID */
-
-       if (!walk_state->thread) {
-               ACPI_ERROR((AE_INFO,
-                           "Cannot release Mutex [%4.4s], null thread info",
-                           acpi_ut_get_node_name(obj_desc->mutex.node)));
-               return_ACPI_STATUS(AE_AML_INTERNAL);
-       }
-
        /*
         * The Mutex is owned, but this thread must be the owner.
         * Special case for Global Lock, any thread can release
         */
        if ((obj_desc->mutex.owner_thread->thread_id !=
             walk_state->thread->thread_id)
-           && (obj_desc->mutex.os_mutex != acpi_gbl_global_lock_mutex)) {
+           && (obj_desc != acpi_gbl_global_lock_mutex)) {
                ACPI_ERROR((AE_INFO,
                            "Thread %lX cannot release Mutex [%4.4s] acquired by thread %lX",
                            (unsigned long)walk_state->thread->thread_id,
@@ -278,45 +380,37 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
                return_ACPI_STATUS(AE_AML_NOT_OWNER);
        }
 
+       /* Must have a valid thread ID */
+
+       if (!walk_state->thread) {
+               ACPI_ERROR((AE_INFO,
+                           "Cannot release Mutex [%4.4s], null thread info",
+                           acpi_ut_get_node_name(obj_desc->mutex.node)));
+               return_ACPI_STATUS(AE_AML_INTERNAL);
+       }
+
        /*
         * The sync level of the mutex must be less than or equal to the current
         * sync level
         */
        if (obj_desc->mutex.sync_level > walk_state->thread->current_sync_level) {
                ACPI_ERROR((AE_INFO,
-                           "Cannot release Mutex [%4.4s], incorrect SyncLevel",
-                           acpi_ut_get_node_name(obj_desc->mutex.node)));
+                           "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d",
+                           acpi_ut_get_node_name(obj_desc->mutex.node),
+                           obj_desc->mutex.sync_level,
+                           walk_state->thread->current_sync_level));
                return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
        }
 
-       /* Match multiple Acquires with multiple Releases */
-
-       obj_desc->mutex.acquisition_depth--;
-       if (obj_desc->mutex.acquisition_depth != 0) {
-
-               /* Just decrement the depth and return */
-
-               return_ACPI_STATUS(AE_OK);
-       }
-
-       /* Unlink the mutex from the owner's list */
+       status = acpi_ex_release_mutex_object(obj_desc);
 
-       acpi_ex_unlink_mutex(obj_desc);
+       if (obj_desc->mutex.acquisition_depth == 0) {
 
-       /* Release the mutex, special case for Global Lock */
+               /* Restore the original sync_level */
 
-       if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
-               status = acpi_ev_release_global_lock();
-       } else {
-               acpi_os_release_mutex(obj_desc->mutex.os_mutex);
+               walk_state->thread->current_sync_level =
+                   obj_desc->mutex.original_sync_level;
        }
-
-       /* Update the mutex and restore sync_level */
-
-       obj_desc->mutex.owner_thread = NULL;
-       walk_state->thread->current_sync_level =
-           obj_desc->mutex.original_sync_level;
-
        return_ACPI_STATUS(status);
 }
 
@@ -357,7 +451,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
 
                /* Release the mutex, special case for Global Lock */
 
-               if (obj_desc->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
+               if (obj_desc == acpi_gbl_global_lock_mutex) {
 
                        /* Ignore errors */
 
@@ -369,6 +463,7 @@ void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread)
                /* Mark mutex unowned */
 
                obj_desc->mutex.owner_thread = NULL;
+               obj_desc->mutex.thread_id = 0;
 
                /* Update Thread sync_level (Last mutex is the important one) */
 
index 308eae52dc054d72c670fb8bccc8a90f6dd7e93d..817e67be36977c12aa7db442217ecf25c87676f3 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 252f10acbbccc3436a4def9c1ac305509fc6f0e1..7c3bea575e02653c2960a472c4b994a48ce9574b 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -121,6 +121,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
 
        if ((ACPI_FAILURE(status)) || walk_state->result_obj) {
                acpi_ut_remove_reference(return_desc);
+               walk_state->result_obj = NULL;
        } else {
                /* Save the return value */
 
@@ -739,26 +740,38 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
                        value = acpi_gbl_integer_byte_width;
                        break;
 
-               case ACPI_TYPE_BUFFER:
-                       value = temp_desc->buffer.length;
-                       break;
-
                case ACPI_TYPE_STRING:
                        value = temp_desc->string.length;
                        break;
 
+               case ACPI_TYPE_BUFFER:
+
+                       /* Buffer arguments may not be evaluated at this point */
+
+                       status = acpi_ds_get_buffer_arguments(temp_desc);
+                       value = temp_desc->buffer.length;
+                       break;
+
                case ACPI_TYPE_PACKAGE:
+
+                       /* Package arguments may not be evaluated at this point */
+
+                       status = acpi_ds_get_package_arguments(temp_desc);
                        value = temp_desc->package.count;
                        break;
 
                default:
                        ACPI_ERROR((AE_INFO,
-                                   "Operand is not Buf/Int/Str/Pkg - found type %s",
+                                   "Operand must be Buffer/Integer/String/Package - found type %s",
                                    acpi_ut_get_type_name(type)));
                        status = AE_AML_OPERAND_TYPE;
                        goto cleanup;
                }
 
+               if (ACPI_FAILURE(status)) {
+                       goto cleanup;
+               }
+
                /*
                 * Now that we have the size of the object, create a result
                 * object to hold the value
index 17e652e653799f97c9d88de6fd9e3d1a166f0ce3..8e8bbb6ccebd541008b0f15caa2ffacbe62eaf34 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -241,10 +241,6 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
                goto cleanup;
        }
 
-       /* Return the remainder */
-
-       walk_state->result_obj = return_desc1;
-
       cleanup:
        /*
         * Since the remainder is not returned indirectly, remove a reference to
@@ -259,6 +255,12 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
                acpi_ut_remove_reference(return_desc1);
        }
 
+       /* Save return object (the remainder) on success */
+
+       else {
+               walk_state->result_obj = return_desc1;
+       }
+
        return_ACPI_STATUS(status);
 }
 
@@ -490,6 +492,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
 
        if (ACPI_FAILURE(status)) {
                acpi_ut_remove_reference(return_desc);
+               walk_state->result_obj = NULL;
        }
 
        return_ACPI_STATUS(status);
@@ -583,8 +586,6 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
                return_desc->integer.value = ACPI_INTEGER_MAX;
        }
 
-       walk_state->result_obj = return_desc;
-
       cleanup:
 
        /* Delete return object on error */
@@ -593,5 +594,11 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
                acpi_ut_remove_reference(return_desc);
        }
 
+       /* Save return object on success */
+
+       else {
+               walk_state->result_obj = return_desc;
+       }
+
        return_ACPI_STATUS(status);
 }
index 7fe67cf82cee016e47f3699aee0b2eb1d57ab407..9cb4197681af11a6d7e2d59904781cb3e0cee05e 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -260,6 +260,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
 
        if (ACPI_FAILURE(status) || walk_state->result_obj) {
                acpi_ut_remove_reference(return_desc);
+               walk_state->result_obj = NULL;
        }
 
        /* Set the return object and exit */
index bd80a9cb3d6529bce2a2804800639e4d0578d93c..67d48737af5360011b7facec5aa909d62f2d9425 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -322,8 +322,6 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
                goto cleanup;
        }
 
-       walk_state->result_obj = return_desc;
-
       cleanup:
 
        /* Delete return object on error */
@@ -332,5 +330,11 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
                acpi_ut_remove_reference(return_desc);
        }
 
+       /* Save return object on success */
+
+       else {
+               walk_state->result_obj = return_desc;
+       }
+
        return_ACPI_STATUS(status);
 }
index efe5d4b461a47a7c91351608ce876ffc76de1566..3a2f8cd4c62a9bffc6d27c25ed2b4bff3c112c0c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -412,6 +412,7 @@ acpi_ex_prep_common_field_object(union acpi_operand_object *obj_desc,
 acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
 {
        union acpi_operand_object *obj_desc;
+       union acpi_operand_object *second_desc = NULL;
        u32 type;
        acpi_status status;
 
@@ -494,6 +495,20 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                                  obj_desc->field.access_byte_width,
                                  obj_desc->bank_field.region_obj,
                                  obj_desc->bank_field.bank_obj));
+
+               /*
+                * Remember location in AML stream of the field unit
+                * opcode and operands -- since the bank_value
+                * operands must be evaluated.
+                */
+               second_desc = obj_desc->common.next_object;
+               second_desc->extra.aml_start =
+                   ((union acpi_parse_object *)(info->data_register_node))->
+                   named.data;
+               second_desc->extra.aml_length =
+                   ((union acpi_parse_object *)(info->data_register_node))->
+                   named.length;
+
                break;
 
        case ACPI_TYPE_LOCAL_INDEX_FIELD:
index 3f51b7e84a17f532801dd47bdff391f5252bcc55..7cd8bb54fa016ba38aa07a7f1109e8304a7a8c05 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -160,7 +160,7 @@ acpi_ex_system_memory_space_handler(u32 function,
                if (!mem_info->mapped_logical_address) {
                        ACPI_ERROR((AE_INFO,
                                    "Could not map memory at %8.8X%8.8X, size %X",
-                                   ACPI_FORMAT_UINT64(address),
+                                   ACPI_FORMAT_NATIVE_UINT(address),
                                    (u32) window_size));
                        mem_info->mapped_length = 0;
                        return_ACPI_STATUS(AE_NO_MEMORY);
@@ -182,7 +182,8 @@ acpi_ex_system_memory_space_handler(u32 function,
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                          "System-Memory (width %d) R/W %d Address=%8.8X%8.8X\n",
-                         bit_width, function, ACPI_FORMAT_UINT64(address)));
+                         bit_width, function,
+                         ACPI_FORMAT_NATIVE_UINT(address)));
 
        /*
         * Perform the memory read or write
@@ -284,7 +285,8 @@ acpi_ex_system_io_space_handler(u32 function,
 
        ACPI_DEBUG_PRINT((ACPI_DB_INFO,
                          "System-IO (width %d) R/W %d Address=%8.8X%8.8X\n",
-                         bit_width, function, ACPI_FORMAT_UINT64(address)));
+                         bit_width, function,
+                         ACPI_FORMAT_NATIVE_UINT(address)));
 
        /* Decode the function parameter */
 
index 2b3a01cc4929e2bf9b0dbd7c2f22707a89d3fdd2..5596f42c9676038c329f6f3d0f473773adbfcb3f 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -116,9 +116,11 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
         * Several object types require no further processing:
         * 1) Device/Thermal objects don't have a "real" subobject, return the Node
         * 2) Method locals and arguments have a pseudo-Node
+        * 3) 10/2007: Added method type to assist with Package construction.
         */
        if ((entry_type == ACPI_TYPE_DEVICE) ||
            (entry_type == ACPI_TYPE_THERMAL) ||
+           (entry_type == ACPI_TYPE_METHOD) ||
            (node->flags & (ANOBJ_METHOD_ARG | ANOBJ_METHOD_LOCAL))) {
                return_ACPI_STATUS(AE_OK);
        }
@@ -214,7 +216,6 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
                /* For these objects, just return the object attached to the Node */
 
        case ACPI_TYPE_MUTEX:
-       case ACPI_TYPE_METHOD:
        case ACPI_TYPE_POWER:
        case ACPI_TYPE_PROCESSOR:
        case ACPI_TYPE_EVENT:
@@ -238,13 +239,12 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
        case ACPI_TYPE_LOCAL_REFERENCE:
 
                switch (source_desc->reference.opcode) {
-               case AML_LOAD_OP:
+               case AML_LOAD_OP:       /* This is a ddb_handle */
+               case AML_REF_OF_OP:
+               case AML_INDEX_OP:
 
-                       /* This is a ddb_handle */
                        /* Return an additional reference to the object */
 
-               case AML_REF_OF_OP:
-
                        obj_desc = source_desc;
                        acpi_ut_add_reference(obj_desc);
                        break;
index 6c64e55dab0e467178948fb8082fc7f9e4e89041..b35f7c817acf169a244b21e052d09d26db074504 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -140,7 +140,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
 {
        acpi_status status = AE_OK;
        union acpi_operand_object *stack_desc;
-       void *temp_node;
        union acpi_operand_object *obj_desc = NULL;
        u16 opcode;
 
@@ -156,23 +155,6 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
                opcode = stack_desc->reference.opcode;
 
                switch (opcode) {
-               case AML_NAME_OP:
-
-                       /*
-                        * Convert name reference to a namespace node
-                        * Then, acpi_ex_resolve_node_to_value can be used to get the value
-                        */
-                       temp_node = stack_desc->reference.object;
-
-                       /* Delete the Reference Object */
-
-                       acpi_ut_remove_reference(stack_desc);
-
-                       /* Return the namespace node */
-
-                       (*stack_ptr) = temp_node;
-                       break;
-
                case AML_LOCAL_OP:
                case AML_ARG_OP:
 
@@ -207,15 +189,25 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
                        switch (stack_desc->reference.target_type) {
                        case ACPI_TYPE_BUFFER_FIELD:
 
-                               /* Just return - leave the Reference on the stack */
+                               /* Just return - do not dereference */
                                break;
 
                        case ACPI_TYPE_PACKAGE:
 
+                               /* If method call or copy_object - do not dereference */
+
+                               if ((walk_state->opcode ==
+                                    AML_INT_METHODCALL_OP)
+                                   || (walk_state->opcode == AML_COPY_OP)) {
+                                       break;
+                               }
+
+                               /* Otherwise, dereference the package_index to a package element */
+
                                obj_desc = *stack_desc->reference.where;
                                if (obj_desc) {
                                        /*
-                                        * Valid obj descriptor, copy pointer to return value
+                                        * Valid object descriptor, copy pointer to return value
                                         * (i.e., dereference the package index)
                                         * Delete the ref object, increment the returned object
                                         */
@@ -224,11 +216,11 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
                                        *stack_ptr = obj_desc;
                                } else {
                                        /*
-                                        * A NULL object descriptor means an unitialized element of
+                                        * A NULL object descriptor means an uninitialized element of
                                         * the package, can't dereference it
                                         */
                                        ACPI_ERROR((AE_INFO,
-                                                   "Attempt to deref an Index to NULL pkg element Idx=%p",
+                                                   "Attempt to dereference an Index to NULL package element Idx=%p",
                                                    stack_desc));
                                        status = AE_AML_UNINITIALIZED_ELEMENT;
                                }
@@ -239,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
                                /* Invalid reference object */
 
                                ACPI_ERROR((AE_INFO,
-                                           "Unknown TargetType %X in Index/Reference obj %p",
+                                           "Unknown TargetType %X in Index/Reference object %p",
                                            stack_desc->reference.target_type,
                                            stack_desc));
                                status = AE_AML_INTERNAL;
@@ -251,7 +243,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
                case AML_DEBUG_OP:
                case AML_LOAD_OP:
 
-                       /* Just leave the object as-is */
+                       /* Just leave the object as-is, do not dereference */
 
                        break;
 
@@ -390,10 +382,10 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
        }
 
        /*
-        * For reference objects created via the ref_of or Index operators,
-        * we need to get to the base object (as per the ACPI specification
-        * of the object_type and size_of operators). This means traversing
-        * the list of possibly many nested references.
+        * For reference objects created via the ref_of, Index, or Load/load_table
+        * operators, we need to get to the base object (as per the ACPI
+        * specification of the object_type and size_of operators). This means
+        * traversing the list of possibly many nested references.
         */
        while (ACPI_GET_OBJECT_TYPE(obj_desc) == ACPI_TYPE_LOCAL_REFERENCE) {
                switch (obj_desc->reference.opcode) {
@@ -463,6 +455,11 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
                        }
                        break;
 
+               case AML_LOAD_OP:
+
+                       type = ACPI_TYPE_DDB_HANDLE;
+                       goto exit;
+
                case AML_LOCAL_OP:
                case AML_ARG_OP:
 
index 09d897b3f6d5b1848490eac18b9d09370409f782..73e29e566a70450eca74348cbfa3a723316999f6 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -137,7 +137,6 @@ acpi_ex_resolve_operands(u16 opcode,
        union acpi_operand_object *obj_desc;
        acpi_status status = AE_OK;
        u8 object_type;
-       void *temp_node;
        u32 arg_types;
        const struct acpi_opcode_info *op_info;
        u32 this_arg_type;
@@ -239,7 +238,6 @@ acpi_ex_resolve_operands(u16 opcode,
 
                                        /*lint -fallthrough */
 
-                               case AML_NAME_OP:
                                case AML_INDEX_OP:
                                case AML_REF_OF_OP:
                                case AML_ARG_OP:
@@ -332,15 +330,6 @@ acpi_ex_resolve_operands(u16 opcode,
                        if (ACPI_FAILURE(status)) {
                                return_ACPI_STATUS(status);
                        }
-
-                       if (obj_desc->reference.opcode == AML_NAME_OP) {
-
-                               /* Convert a named reference to the actual named object */
-
-                               temp_node = obj_desc->reference.object;
-                               acpi_ut_remove_reference(obj_desc);
-                               (*stack_ptr) = temp_node;
-                       }
                        goto next_operand;
 
                case ARGI_DATAREFOBJ:   /* Store operator only */
index f4b69a63782091ffe4ba226c397793344bf6aede..76c875bc3154cf8ff799efd80fc6ae3d047aab0c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -84,8 +84,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
 
        ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
 
-       ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
-                             level, " "));
+       /* Print line header as long as we are not in the middle of an object display */
+
+       if (!((level > 0) && index == 0)) {
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
+                                     level, " "));
+       }
 
        /* Display index for package output only */
 
@@ -95,12 +99,12 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
        }
 
        if (!source_desc) {
-               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "<Null Object>\n"));
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
                return_VOID;
        }
 
        if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
-               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: ",
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
                                      acpi_ut_get_object_type_name
                                      (source_desc)));
 
@@ -123,6 +127,8 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
                return_VOID;
        }
 
+       /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
+
        switch (ACPI_GET_OBJECT_TYPE(source_desc)) {
        case ACPI_TYPE_INTEGER:
 
@@ -147,7 +153,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
                                      (u32) source_desc->buffer.length));
                ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
                                 (source_desc->buffer.length <
-                                 32) ? source_desc->buffer.length : 32);
+                                 256) ? source_desc->buffer.length : 256);
                break;
 
        case ACPI_TYPE_STRING:
@@ -160,7 +166,7 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
        case ACPI_TYPE_PACKAGE:
 
                ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
-                                     "[0x%.2X Elements]\n",
+                                     "[Contains 0x%.2X Elements]\n",
                                      source_desc->package.count));
 
                /* Output the entire contents of the package */
@@ -180,12 +186,59 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
                                              (source_desc->reference.opcode),
                                              source_desc->reference.offset));
                } else {
-                       ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]\n",
+                       ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s]",
                                              acpi_ps_get_opcode_name
                                              (source_desc->reference.opcode)));
                }
 
-               if (source_desc->reference.object) {
+               if (source_desc->reference.opcode == AML_LOAD_OP) {     /* Load and load_table */
+                       ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+                                             " Table OwnerId %p\n",
+                                             source_desc->reference.object));
+                       break;
+               }
+
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "  "));
+
+               /* Check for valid node first, then valid object */
+
+               if (source_desc->reference.node) {
+                       if (ACPI_GET_DESCRIPTOR_TYPE
+                           (source_desc->reference.node) !=
+                           ACPI_DESC_TYPE_NAMED) {
+                               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+                                                     " %p - Not a valid namespace node\n",
+                                                     source_desc->reference.
+                                                     node));
+                       } else {
+                               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
+                                                     "Node %p [%4.4s] ",
+                                                     source_desc->reference.
+                                                     node,
+                                                     (source_desc->reference.
+                                                      node)->name.ascii));
+
+                               switch ((source_desc->reference.node)->type) {
+
+                                       /* These types have no attached object */
+
+                               case ACPI_TYPE_DEVICE:
+                                       acpi_os_printf("Device\n");
+                                       break;
+
+                               case ACPI_TYPE_THERMAL:
+                                       acpi_os_printf("Thermal Zone\n");
+                                       break;
+
+                               default:
+                                       acpi_ex_do_debug_object((source_desc->
+                                                                reference.
+                                                                node)->object,
+                                                               level + 4, 0);
+                                       break;
+                               }
+                       }
+               } else if (source_desc->reference.object) {
                        if (ACPI_GET_DESCRIPTOR_TYPE
                            (source_desc->reference.object) ==
                            ACPI_DESC_TYPE_NAMED) {
@@ -198,18 +251,13 @@ acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
                                acpi_ex_do_debug_object(source_desc->reference.
                                                        object, level + 4, 0);
                        }
-               } else if (source_desc->reference.node) {
-                       acpi_ex_do_debug_object((source_desc->reference.node)->
-                                               object, level + 4, 0);
                }
                break;
 
        default:
 
-               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p %s\n",
-                                     source_desc,
-                                     acpi_ut_get_object_type_name
-                                     (source_desc)));
+               ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
+                                     source_desc));
                break;
        }
 
@@ -313,7 +361,6 @@ acpi_ex_store(union acpi_operand_object *source_desc,
         * 4) Store to the debug object
         */
        switch (ref_desc->reference.opcode) {
-       case AML_NAME_OP:
        case AML_REF_OF_OP:
 
                /* Storing an object into a Name "container" */
@@ -415,11 +462,24 @@ acpi_ex_store_object_to_index(union acpi_operand_object *source_desc,
                 */
                obj_desc = *(index_desc->reference.where);
 
-               status =
-                   acpi_ut_copy_iobject_to_iobject(source_desc, &new_desc,
-                                                   walk_state);
-               if (ACPI_FAILURE(status)) {
-                       return_ACPI_STATUS(status);
+               if (ACPI_GET_OBJECT_TYPE(source_desc) ==
+                   ACPI_TYPE_LOCAL_REFERENCE
+                   && source_desc->reference.opcode == AML_LOAD_OP) {
+
+                       /* This is a DDBHandle, just add a reference to it */
+
+                       acpi_ut_add_reference(source_desc);
+                       new_desc = source_desc;
+               } else {
+                       /* Normal object, copy it */
+
+                       status =
+                           acpi_ut_copy_iobject_to_iobject(source_desc,
+                                                           &new_desc,
+                                                           walk_state);
+                       if (ACPI_FAILURE(status)) {
+                               return_ACPI_STATUS(status);
+                       }
                }
 
                if (obj_desc) {
@@ -571,10 +631,17 @@ acpi_ex_store_object_to_node(union acpi_operand_object *source_desc,
 
        /* If no implicit conversion, drop into the default case below */
 
-       if ((!implicit_conversion) || (walk_state->opcode == AML_COPY_OP)) {
-
-               /* Force execution of default (no implicit conversion) */
-
+       if ((!implicit_conversion) ||
+           ((walk_state->opcode == AML_COPY_OP) &&
+            (target_type != ACPI_TYPE_LOCAL_REGION_FIELD) &&
+            (target_type != ACPI_TYPE_LOCAL_BANK_FIELD) &&
+            (target_type != ACPI_TYPE_LOCAL_INDEX_FIELD))) {
+               /*
+                * Force execution of default (no implicit conversion). Note:
+                * copy_object does not perform an implicit conversion, as per the ACPI
+                * spec -- except in case of region/bank/index fields -- because these
+                * objects must retain their original type permanently.
+                */
                target_type = ACPI_TYPE_ANY;
        }
 
index 1d622c625c6406bc91a7e7b0f7542e4be107f955..a6d2168b81f99f9301f091ba3cf6ba4a0eb48b1d 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8233d40178eed88a5d6863578ff9f78451a7b2ea..9a75ff09fb0c46b38bf849261d6b860949251cf7 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9460baff30328f4c5565e2c5d671eabfdf4f634c..68990f1df371df76886f196ecbc5d42a7a867da3 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -44,7 +44,6 @@
 
 #include <acpi/acpi.h>
 #include <acpi/acinterp.h>
-#include <acpi/acevents.h>
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exsystem")
index 6b0aeccbb69b3f59738d5a9dd22fe875a1a2d018..86c03880b5233a32a524d7efc5a583c4fb73ea7e 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -61,7 +61,6 @@
 #include <acpi/acpi.h>
 #include <acpi/acinterp.h>
 #include <acpi/amlcode.h>
-#include <acpi/acevents.h>
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exutils")
@@ -217,9 +216,10 @@ void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
 
        /*
         * Object must be a valid number and we must be executing
-        * a control method
+        * a control method. NS node could be there for AML_INT_NAMEPATH_OP.
         */
        if ((!obj_desc) ||
+           (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) != ACPI_DESC_TYPE_OPERAND) ||
            (ACPI_GET_OBJECT_TYPE(obj_desc) != ACPI_TYPE_INTEGER)) {
                return;
        }
@@ -240,72 +240,73 @@ void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc)
  * PARAMETERS:  field_flags           - Flags with Lock rule:
  *                                      always_lock or never_lock
  *
- * RETURN:      TRUE/FALSE indicating whether the lock was actually acquired
+ * RETURN:      None
  *
- * DESCRIPTION: Obtain the global lock and keep track of this fact via two
- *              methods.  A global variable keeps the state of the lock, and
- *              the state is returned to the caller.
+ * DESCRIPTION: Obtain the ACPI hardware Global Lock, only if the field
+ *              flags specifiy that it is to be obtained before field access.
  *
  ******************************************************************************/
 
-u8 acpi_ex_acquire_global_lock(u32 field_flags)
+void acpi_ex_acquire_global_lock(u32 field_flags)
 {
-       u8 locked = FALSE;
        acpi_status status;
 
        ACPI_FUNCTION_TRACE(ex_acquire_global_lock);
 
-       /* Only attempt lock if the always_lock bit is set */
+       /* Only use the lock if the always_lock bit is set */
+
+       if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
+               return_VOID;
+       }
 
-       if (field_flags & AML_FIELD_LOCK_RULE_MASK) {
+       /* Attempt to get the global lock, wait forever */
 
-               /* We should attempt to get the lock, wait forever */
+       status = acpi_ex_acquire_mutex_object(ACPI_WAIT_FOREVER,
+                                             acpi_gbl_global_lock_mutex,
+                                             acpi_os_get_thread_id());
 
-               status = acpi_ev_acquire_global_lock(ACPI_WAIT_FOREVER);
-               if (ACPI_SUCCESS(status)) {
-                       locked = TRUE;
-               } else {
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Could not acquire Global Lock"));
-               }
+       if (ACPI_FAILURE(status)) {
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "Could not acquire Global Lock"));
        }
 
-       return_UINT8(locked);
+       return_VOID;
 }
 
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ex_release_global_lock
  *
- * PARAMETERS:  locked_by_me    - Return value from corresponding call to
- *                                acquire_global_lock.
+ * PARAMETERS:  field_flags           - Flags with Lock rule:
+ *                                      always_lock or never_lock
  *
  * RETURN:      None
  *
- * DESCRIPTION: Release the global lock if it is locked.
+ * DESCRIPTION: Release the ACPI hardware Global Lock
  *
  ******************************************************************************/
 
-void acpi_ex_release_global_lock(u8 locked_by_me)
+void acpi_ex_release_global_lock(u32 field_flags)
 {
        acpi_status status;
 
        ACPI_FUNCTION_TRACE(ex_release_global_lock);
 
-       /* Only attempt unlock if the caller locked it */
+       /* Only use the lock if the always_lock bit is set */
 
-       if (locked_by_me) {
+       if (!(field_flags & AML_FIELD_LOCK_RULE_MASK)) {
+               return_VOID;
+       }
 
-               /* OK, now release the lock */
+       /* Release the global lock */
 
-               status = acpi_ev_release_global_lock();
-               if (ACPI_FAILURE(status)) {
+       status = acpi_ex_release_mutex_object(acpi_gbl_global_lock_mutex);
+       if (ACPI_FAILURE(status)) {
 
-                       /* Report the error, but there isn't much else we can do */
+               /* Report the error, but there isn't much else we can do */
 
-                       ACPI_EXCEPTION((AE_INFO, status,
-                                       "Could not release ACPI Global Lock"));
-               }
+               ACPI_EXCEPTION((AE_INFO, status,
+                               "Could not release Global Lock"));
        }
 
        return_VOID;
index c8e3cba423ef19d7548b63bfdb041d266454d4e1..6cf10cbc1eee41687e462d14168827d6830b4cb0 100644 (file)
@@ -192,17 +192,13 @@ static int acpi_fan_add_fs(struct acpi_device *device)
        }
 
        /* 'status' [R/W] */
-       entry = create_proc_entry(ACPI_FAN_FILE_STATE,
-                                 S_IFREG | S_IRUGO | S_IWUSR,
-                                 acpi_device_dir(device));
+       entry = proc_create_data(ACPI_FAN_FILE_STATE,
+                                S_IFREG | S_IRUGO | S_IWUSR,
+                                acpi_device_dir(device),
+                                &acpi_fan_state_ops,
+                                device);
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_fan_state_ops;
-               entry->data = device;
-               entry->owner = THIS_MODULE;
-       }
-
        return 0;
 }
 
@@ -260,24 +256,23 @@ static int acpi_fan_add(struct acpi_device *device)
                result = PTR_ERR(cdev);
                goto end;
        }
-       if (cdev) {
-               printk(KERN_INFO PREFIX
-                       "%s is registered as cooling_device%d\n",
-                       device->dev.bus_id, cdev->id);
-
-               acpi_driver_data(device) = cdev;
-               result = sysfs_create_link(&device->dev.kobj,
-                                          &cdev->device.kobj,
-                                          "thermal_cooling");
-               if (result)
-                       return result;
-
-               result = sysfs_create_link(&cdev->device.kobj,
-                                          &device->dev.kobj,
-                                          "device");
-               if (result)
-                       return result;
-       }
+
+       printk(KERN_INFO PREFIX
+               "%s is registered as cooling_device%d\n",
+               device->dev.bus_id, cdev->id);
+
+       acpi_driver_data(device) = cdev;
+       result = sysfs_create_link(&device->dev.kobj,
+                                  &cdev->device.kobj,
+                                  "thermal_cooling");
+       if (result)
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
+
+       result = sysfs_create_link(&cdev->device.kobj,
+                                  &device->dev.kobj,
+                                  "device");
+       if (result)
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
 
        result = acpi_fan_add_fs(device);
        if (result)
index eda0978b57c6946b9580ab24869794a2bc14b2bd..06f8634fe58b192498ad73c5e4d4a4e715b01cf3 100644 (file)
@@ -142,6 +142,7 @@ EXPORT_SYMBOL(acpi_get_physical_device);
 
 static int acpi_bind_one(struct device *dev, acpi_handle handle)
 {
+       struct acpi_device *acpi_dev;
        acpi_status status;
 
        if (dev->archdata.acpi_handle) {
@@ -157,6 +158,16 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle)
        }
        dev->archdata.acpi_handle = handle;
 
+       status = acpi_bus_get_device(handle, &acpi_dev);
+       if (!ACPI_FAILURE(status)) {
+               int ret;
+
+               ret = sysfs_create_link(&dev->kobj, &acpi_dev->dev.kobj,
+                               "firmware_node");
+               ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj,
+                               "physical_node");
+       }
+
        return 0;
 }
 
@@ -165,8 +176,17 @@ static int acpi_unbind_one(struct device *dev)
        if (!dev->archdata.acpi_handle)
                return 0;
        if (dev == acpi_get_physical_device(dev->archdata.acpi_handle)) {
+               struct acpi_device *acpi_dev;
+
                /* acpi_get_physical_device increase refcnt by one */
                put_device(dev);
+
+               if (!acpi_bus_get_device(dev->archdata.acpi_handle,
+                                       &acpi_dev)) {
+                       sysfs_remove_link(&dev->kobj, "firmware_node");
+                       sysfs_remove_link(&acpi_dev->dev.kobj, "physical_node");
+               }
+
                acpi_detach_data(dev->archdata.acpi_handle,
                                 acpi_glue_data_handler);
                dev->archdata.acpi_handle = NULL;
index 6031ca13dd2f2b9c73b66abb56aca71edd5f1d3b..816894ea839eba7fe21a85bd68f97f9329a715dc 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 117a05cadaaa9014ca2a6c5b29a5fa566accca08..14bc4f456ae8c3762889fef4c3badaf812f9977b 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 73f9c5fb1ba74b54022ed1490d8fc378c693b8af..ddf792adcf9619fe581c9067c06761d8b2780955 100644 (file)
@@ -7,7 +7,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4290e0193097d28fd2405b814ac0352a0cf5fb93..d9937e05ec6a237d0eff801091dca208eaadcb3c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,9 +70,10 @@ acpi_set_firmware_waking_vector(acpi_physical_address physical_address)
 
        /* Get the FACS */
 
-       status =
-           acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-                                   (struct acpi_table_header **)&facs);
+       status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+                                        ACPI_CAST_INDIRECT_PTR(struct
+                                                               acpi_table_header,
+                                                               &facs));
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
@@ -124,9 +125,10 @@ acpi_get_firmware_waking_vector(acpi_physical_address * physical_address)
 
        /* Get the FACS */
 
-       status =
-           acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
-                                   (struct acpi_table_header **)&facs);
+       status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
+                                        ACPI_CAST_INDIRECT_PTR(struct
+                                                               acpi_table_header,
+                                                               &facs));
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
index c32eab696acd11f4fad79daab380c9d189faaf71..b53d575491b908719643dc7b6f02d7405909d5cb 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 57faf598bad8f2fe98055f34589f23a2c341d976..c39a7f68b88929adb1fd86c94c6db3922b6a0829 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -208,8 +208,7 @@ acpi_status acpi_ns_root_initialize(void)
                                /* Special case for ACPI Global Lock */
 
                                if (ACPI_STRCMP(init_val->name, "_GL_") == 0) {
-                                       acpi_gbl_global_lock_mutex =
-                                           obj_desc->mutex.os_mutex;
+                                       acpi_gbl_global_lock_mutex = obj_desc;
 
                                        /* Create additional counting semaphore for global lock */
 
@@ -582,44 +581,68 @@ acpi_ns_lookup(union acpi_generic_state *scope_info,
                        return_ACPI_STATUS(status);
                }
 
-               /*
-                * Sanity typecheck of the target object:
-                *
-                * If 1) This is the last segment (num_segments == 0)
-                *    2) And we are looking for a specific type
-                *       (Not checking for TYPE_ANY)
-                *    3) Which is not an alias
-                *    4) Which is not a local type (TYPE_SCOPE)
-                *    5) And the type of target object is known (not TYPE_ANY)
-                *    6) And target object does not match what we are looking for
-                *
-                * Then we have a type mismatch.  Just warn and ignore it.
-                */
-               if ((num_segments == 0) &&
-                   (type_to_check_for != ACPI_TYPE_ANY) &&
-                   (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) &&
-                   (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS) &&
-                   (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE) &&
-                   (this_node->type != ACPI_TYPE_ANY) &&
-                   (this_node->type != type_to_check_for)) {
-
-                       /* Complain about a type mismatch */
-
-                       ACPI_WARNING((AE_INFO,
-                                     "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
-                                     ACPI_CAST_PTR(char, &simple_name),
-                                     acpi_ut_get_type_name(this_node->type),
-                                     acpi_ut_get_type_name
-                                     (type_to_check_for)));
+               /* More segments to follow? */
+
+               if (num_segments > 0) {
+                       /*
+                        * If we have an alias to an object that opens a scope (such as a
+                        * device or processor), we need to dereference the alias here so that
+                        * we can access any children of the original node (via the remaining
+                        * segments).
+                        */
+                       if (this_node->type == ACPI_TYPE_LOCAL_ALIAS) {
+                               if (acpi_ns_opens_scope
+                                   (((struct acpi_namespace_node *)this_node->
+                                     object)->type)) {
+                                       this_node =
+                                           (struct acpi_namespace_node *)
+                                           this_node->object;
+                               }
+                       }
                }
 
-               /*
-                * If this is the last name segment and we are not looking for a
-                * specific type, but the type of found object is known, use that type
-                * to see if it opens a scope.
-                */
-               if ((num_segments == 0) && (type == ACPI_TYPE_ANY)) {
-                       type = this_node->type;
+               /* Special handling for the last segment (num_segments == 0) */
+
+               else {
+                       /*
+                        * Sanity typecheck of the target object:
+                        *
+                        * If 1) This is the last segment (num_segments == 0)
+                        *    2) And we are looking for a specific type
+                        *       (Not checking for TYPE_ANY)
+                        *    3) Which is not an alias
+                        *    4) Which is not a local type (TYPE_SCOPE)
+                        *    5) And the type of target object is known (not TYPE_ANY)
+                        *    6) And target object does not match what we are looking for
+                        *
+                        * Then we have a type mismatch. Just warn and ignore it.
+                        */
+                       if ((type_to_check_for != ACPI_TYPE_ANY) &&
+                           (type_to_check_for != ACPI_TYPE_LOCAL_ALIAS) &&
+                           (type_to_check_for != ACPI_TYPE_LOCAL_METHOD_ALIAS)
+                           && (type_to_check_for != ACPI_TYPE_LOCAL_SCOPE)
+                           && (this_node->type != ACPI_TYPE_ANY)
+                           && (this_node->type != type_to_check_for)) {
+
+                               /* Complain about a type mismatch */
+
+                               ACPI_WARNING((AE_INFO,
+                                             "NsLookup: Type mismatch on %4.4s (%s), searching for (%s)",
+                                             ACPI_CAST_PTR(char, &simple_name),
+                                             acpi_ut_get_type_name(this_node->
+                                                                   type),
+                                             acpi_ut_get_type_name
+                                             (type_to_check_for)));
+                       }
+
+                       /*
+                        * If this is the last name segment and we are not looking for a
+                        * specific type, but the type of found object is known, use that type
+                        * to (later) see if it opens a scope.
+                        */
+                       if (type == ACPI_TYPE_ANY) {
+                               type = this_node->type;
+                       }
                }
 
                /* Point to next name segment and make this node current */
index 1d693d8ad2d8a044e528920331513254738d98b1..3a1740ac2edcc012a7b51eabe889e03c3bd637d4 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1fc4f86676e1969b5981ef6ffae26a853ad74606..5445751b8a3e7ee6e9f3eae788fc297995d36d43 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -249,7 +249,9 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
                        acpi_os_printf("ID %X Len %.4X Addr %p\n",
                                       obj_desc->processor.proc_id,
                                       obj_desc->processor.length,
-                                      (char *)obj_desc->processor.address);
+                                      ACPI_CAST_PTR(void,
+                                                    obj_desc->processor.
+                                                    address));
                        break;
 
                case ACPI_TYPE_DEVICE:
@@ -320,9 +322,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
                                                               space_id));
                        if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
                                acpi_os_printf(" Addr %8.8X%8.8X Len %.4X\n",
-                                              ACPI_FORMAT_UINT64(obj_desc->
-                                                                 region.
-                                                                 address),
+                                              ACPI_FORMAT_NATIVE_UINT
+                                              (obj_desc->region.address),
                                               obj_desc->region.length);
                        } else {
                                acpi_os_printf
index 5097e167939ea44101446d6a19e29f1e3ff02438..428f50fde11a8cbe876d2a130e7cc09b791df75f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 97b2ac57c16be69d7ad94c1899410f806164e9af..14bdfa92bea04cc9da1bfde6a0344ad57a29e2f4 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 33db2241044e5930430c5091f6580c9013721f15..6d6d930c8e1820d21d0bc40441065dddf9ba0ef9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -244,6 +244,10 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
                info->field_count++;
                break;
 
+       case ACPI_TYPE_LOCAL_BANK_FIELD:
+               info->field_count++;
+               break;
+
        case ACPI_TYPE_BUFFER:
                info->buffer_count++;
                break;
@@ -287,6 +291,12 @@ acpi_ns_init_one_object(acpi_handle obj_handle,
                status = acpi_ds_get_buffer_field_arguments(obj_desc);
                break;
 
+       case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+               info->field_init++;
+               status = acpi_ds_get_bank_field_arguments(obj_desc);
+               break;
+
        case ACPI_TYPE_BUFFER:
 
                info->buffer_init++;
index d4f9654fd20f315289d02af1cdb33bd335088e38..2c92f6cf5ce173c94a4a2aeaf64294e036089ded 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -107,11 +107,11 @@ acpi_ns_load_table(acpi_native_uint table_index,
                goto unlock;
        }
 
-       status = acpi_ns_parse_table(table_index, node->child);
+       status = acpi_ns_parse_table(table_index, node);
        if (ACPI_SUCCESS(status)) {
                acpi_tb_set_table_loaded_flag(table_index, TRUE);
        } else {
-               acpi_tb_release_owner_id(table_index);
+               (void)acpi_tb_release_owner_id(table_index);
        }
 
       unlock:
index cbd94af08cc5e00aa07c8c1075591c0124194700..cffef1bcbdbc3dde9db746c19be2314a6bc67d0c 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -180,6 +180,12 @@ acpi_size acpi_ns_get_pathname_length(struct acpi_namespace_node *node)
        next_node = node;
 
        while (next_node && (next_node != acpi_gbl_root_node)) {
+               if (ACPI_GET_DESCRIPTOR_TYPE(next_node) != ACPI_DESC_TYPE_NAMED) {
+                       ACPI_ERROR((AE_INFO,
+                                   "Invalid NS Node (%p) while traversing path",
+                                   next_node));
+                       return 0;
+               }
                size += ACPI_PATH_SEGMENT_LENGTH;
                next_node = acpi_ns_get_parent_node(next_node);
        }
index d9d7377bc6e6434bba2f36b1bf9ef8a9d5192caa..15fe09e24f71ef3698ac25824c91b5849e567bf3 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e696aa8479909a5cf273813d008fa2366e9d8fd7..46a79b0103b676ef5edf0008f40f70f1eb0ad80a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -64,7 +64,8 @@ ACPI_MODULE_NAME("nsparse")
  ******************************************************************************/
 acpi_status
 acpi_ns_one_complete_parse(acpi_native_uint pass_number,
-                          acpi_native_uint table_index)
+                          acpi_native_uint table_index,
+                          struct acpi_namespace_node * start_node)
 {
        union acpi_parse_object *parse_root;
        acpi_status status;
@@ -111,14 +112,25 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number,
                aml_start = (u8 *) table + sizeof(struct acpi_table_header);
                aml_length = table->length - sizeof(struct acpi_table_header);
                status = acpi_ds_init_aml_walk(walk_state, parse_root, NULL,
-                                              aml_start, aml_length, NULL,
-                                              (u8) pass_number);
+                                              aml_start, (u32) aml_length,
+                                              NULL, (u8) pass_number);
        }
 
        if (ACPI_FAILURE(status)) {
                acpi_ds_delete_walk_state(walk_state);
-               acpi_ps_delete_parse_tree(parse_root);
-               return_ACPI_STATUS(status);
+               goto cleanup;
+       }
+
+       /* start_node is the default location to load the table */
+
+       if (start_node && start_node != acpi_gbl_root_node) {
+               status =
+                   acpi_ds_scope_stack_push(start_node, ACPI_TYPE_METHOD,
+                                            walk_state);
+               if (ACPI_FAILURE(status)) {
+                       acpi_ds_delete_walk_state(walk_state);
+                       goto cleanup;
+               }
        }
 
        /* Parse the AML */
@@ -127,6 +139,7 @@ acpi_ns_one_complete_parse(acpi_native_uint pass_number,
                          (unsigned)pass_number));
        status = acpi_ps_parse_aml(walk_state);
 
+      cleanup:
        acpi_ps_delete_parse_tree(parse_root);
        return_ACPI_STATUS(status);
 }
@@ -163,7 +176,9 @@ acpi_ns_parse_table(acpi_native_uint table_index,
         * performs another complete parse of the AML.
         */
        ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 1\n"));
-       status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index);
+       status =
+           acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1, table_index,
+                                      start_node);
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
@@ -178,7 +193,9 @@ acpi_ns_parse_table(acpi_native_uint table_index,
         * parse objects are all cached.
         */
        ACPI_DEBUG_PRINT((ACPI_DB_PARSE, "**** Start pass 2\n"));
-       status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index);
+       status =
+           acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2, table_index,
+                                      start_node);
        if (ACPI_FAILURE(status)) {
                return_ACPI_STATUS(status);
        }
index e863be665ce8f6fc11464be35c2163b68cc2d22a..8399276cba1e4049155ffd7229e9c06b4fdad8c9 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 90fd059615ff94632d4b543f00be23560c22b226..64c039843ed2ba9efb6da4df7978c5cce674811d 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 280b8357c46c32ebe3f5dc9933f6a19ae95f4d0e..3c905ce26d7d209d93b0b28242fc79b851a18bc7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -77,9 +77,7 @@ struct acpi_namespace_node *acpi_ns_get_next_node(acpi_object_type type, struct
 
                /* It's really the parent's _scope_ that we want */
 
-               if (parent_node->child) {
-                       next_node = parent_node->child;
-               }
+               next_node = parent_node->child;
        }
 
        else {
index b92133faf5b7087fde85829c3511a64d3b24afb3..a8d549187c84262d5ae7ca9bd34ceee6d4a358dd 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -467,10 +467,13 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
                return (AE_CTRL_DEPTH);
        }
 
-       if (!(flags & ACPI_STA_DEVICE_PRESENT)) {
-
-               /* Don't examine children of the device if not present */
-
+       if (!(flags & ACPI_STA_DEVICE_PRESENT) &&
+           !(flags & ACPI_STA_DEVICE_FUNCTIONING)) {
+               /*
+                * Don't examine the children of the device only when the
+                * device is neither present nor functional. See ACPI spec,
+                * description of _STA for more information.
+                */
                return (AE_CTRL_DEPTH);
        }
 
@@ -539,7 +542,7 @@ acpi_ns_get_device_callback(acpi_handle obj_handle,
  *              value is returned to the caller.
  *
  *              This is a wrapper for walk_namespace, but the callback performs
- *              additional filtering. Please see acpi_get_device_callback.
+ *              additional filtering. Please see acpi_ns_get_device_callback.
  *
  ******************************************************************************/
 
index b489781b22a85e616e7bdb3c6a1b64d43e041037..a287ed550f5474db1802e2bb785ecd3531b33f10 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index faa3758872012c9a208ec82a0a757f8e82092365..2b375ee80cef794f835e699b2ac61330a45751de 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a498a6cc68fec1d463786fba1d3b6c84a6e6b68f..235a1386888a42fb19c59f6080155a6e774071b3 100644 (file)
@@ -742,6 +742,7 @@ EXPORT_SYMBOL(acpi_os_execute);
 void acpi_os_wait_events_complete(void *context)
 {
        flush_workqueue(kacpid_wq);
+       flush_workqueue(kacpi_notify_wq);
 }
 
 EXPORT_SYMBOL(acpi_os_wait_events_complete);
index c2b9835c890b892269e47079cf69a94fc6b9fe1e..f1e8bf65e24e82aa0db7d4a33da9241494d3aaf7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -230,12 +230,12 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
                          struct acpi_parse_state *parser_state,
                          union acpi_parse_object *arg, u8 possible_method_call)
 {
+       acpi_status status;
        char *path;
        union acpi_parse_object *name_op;
-       acpi_status status;
        union acpi_operand_object *method_desc;
        struct acpi_namespace_node *node;
-       union acpi_generic_state scope_info;
+       u8 *start = parser_state->aml;
 
        ACPI_FUNCTION_TRACE(ps_get_next_namepath);
 
@@ -249,25 +249,18 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
                return_ACPI_STATUS(AE_OK);
        }
 
-       /* Setup search scope info */
-
-       scope_info.scope.node = NULL;
-       node = parser_state->start_node;
-       if (node) {
-               scope_info.scope.node = node;
-       }
-
        /*
-        * Lookup the name in the internal namespace. We don't want to add
-        * anything new to the namespace here, however, so we use MODE_EXECUTE.
+        * Lookup the name in the internal namespace, starting with the current
+        * scope. We don't want to add anything new to the namespace here,
+        * however, so we use MODE_EXECUTE.
         * Allow searching of the parent tree, but don't open a new scope -
         * we just want to lookup the object (must be mode EXECUTE to perform
         * the upsearch)
         */
-       status =
-           acpi_ns_lookup(&scope_info, path, ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
-                          ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
-                          NULL, &node);
+       status = acpi_ns_lookup(walk_state->scope_info, path,
+                               ACPI_TYPE_ANY, ACPI_IMODE_EXECUTE,
+                               ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE,
+                               NULL, &node);
 
        /*
         * If this name is a control method invocation, we must
@@ -275,6 +268,16 @@ acpi_ps_get_next_namepath(struct acpi_walk_state *walk_state,
         */
        if (ACPI_SUCCESS(status) &&
            possible_method_call && (node->type == ACPI_TYPE_METHOD)) {
+               if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) {
+                       /*
+                        * acpi_ps_get_next_namestring has increased the AML pointer,
+                        * so we need to restore the saved AML pointer for method call.
+                        */
+                       walk_state->parser_state.aml = start;
+                       walk_state->arg_count = 1;
+                       acpi_ps_init_op(arg, AML_INT_METHODCALL_OP);
+                       return_ACPI_STATUS(AE_OK);
+               }
 
                /* This name is actually a control method invocation */
 
@@ -686,9 +689,29 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
                                return_ACPI_STATUS(AE_NO_MEMORY);
                        }
 
-                       status =
-                           acpi_ps_get_next_namepath(walk_state, parser_state,
-                                                     arg, 0);
+                       /* To support super_name arg of Unload */
+
+                       if (walk_state->op->common.aml_opcode == AML_UNLOAD_OP) {
+                               status =
+                                   acpi_ps_get_next_namepath(walk_state,
+                                                             parser_state, arg,
+                                                             1);
+
+                               /*
+                                * If the super_name arg of Unload is a method call,
+                                * we have restored the AML pointer, just free this Arg
+                                */
+                               if (arg->common.aml_opcode ==
+                                   AML_INT_METHODCALL_OP) {
+                                       acpi_ps_free_op(arg);
+                                       arg = NULL;
+                               }
+                       } else {
+                               status =
+                                   acpi_ps_get_next_namepath(walk_state,
+                                                             parser_state, arg,
+                                                             0);
+                       }
                } else {
                        /* Single complex argument, nothing returned */
 
index 773aee82fbb8e18fb5edf7ed349c220935e0e4ea..c06238e55d98bebd1582536a684949e194c74229 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -182,6 +182,7 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
        ACPI_FUNCTION_TRACE_PTR(ps_build_named_op, walk_state);
 
        unnamed_op->common.value.arg = NULL;
+       unnamed_op->common.arg_list_length = 0;
        unnamed_op->common.aml_opcode = walk_state->opcode;
 
        /*
@@ -241,7 +242,8 @@ acpi_ps_build_named_op(struct acpi_walk_state *walk_state,
        acpi_ps_append_arg(*op, unnamed_op->common.value.arg);
        acpi_gbl_depth++;
 
-       if ((*op)->common.aml_opcode == AML_REGION_OP) {
+       if ((*op)->common.aml_opcode == AML_REGION_OP ||
+           (*op)->common.aml_opcode == AML_DATA_REGION_OP) {
                /*
                 * Defer final parsing of an operation_region body, because we don't
                 * have enough info in the first pass to parse it correctly (i.e.,
@@ -280,6 +282,9 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
        acpi_status status = AE_OK;
        union acpi_parse_object *op;
        union acpi_parse_object *named_op = NULL;
+       union acpi_parse_object *parent_scope;
+       u8 argument_count;
+       const struct acpi_opcode_info *op_info;
 
        ACPI_FUNCTION_TRACE_PTR(ps_create_op, walk_state);
 
@@ -320,8 +325,32 @@ acpi_ps_create_op(struct acpi_walk_state *walk_state,
                op->named.length = 0;
        }
 
-       acpi_ps_append_arg(acpi_ps_get_parent_scope
-                          (&(walk_state->parser_state)), op);
+       if (walk_state->opcode == AML_BANK_FIELD_OP) {
+               /*
+                * Backup to beginning of bank_field declaration
+                * body_length is unknown until we parse the body
+                */
+               op->named.data = aml_op_start;
+               op->named.length = 0;
+       }
+
+       parent_scope = acpi_ps_get_parent_scope(&(walk_state->parser_state));
+       acpi_ps_append_arg(parent_scope, op);
+
+       if (parent_scope) {
+               op_info =
+                   acpi_ps_get_opcode_info(parent_scope->common.aml_opcode);
+               if (op_info->flags & AML_HAS_TARGET) {
+                       argument_count =
+                           acpi_ps_get_argument_count(op_info->type);
+                       if (parent_scope->common.arg_list_length >
+                           argument_count) {
+                               op->common.flags |= ACPI_PARSEOP_TARGET;
+                       }
+               } else if (parent_scope->common.aml_opcode == AML_INCREMENT_OP) {
+                       op->common.flags |= ACPI_PARSEOP_TARGET;
+               }
+       }
 
        if (walk_state->descending_callback != NULL) {
                /*
@@ -603,13 +632,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
                        acpi_ps_pop_scope(&(walk_state->parser_state), op,
                                          &walk_state->arg_types,
                                          &walk_state->arg_count);
-
-                       if ((*op)->common.aml_opcode != AML_WHILE_OP) {
-                               status2 = acpi_ds_result_stack_pop(walk_state);
-                               if (ACPI_FAILURE(status2)) {
-                                       return_ACPI_STATUS(status2);
-                               }
-                       }
                }
 
                /* Close this iteration of the While loop */
@@ -640,10 +662,6 @@ acpi_ps_complete_op(struct acpi_walk_state *walk_state,
                                if (ACPI_FAILURE(status2)) {
                                        return_ACPI_STATUS(status2);
                                }
-                               status2 = acpi_ds_result_stack_pop(walk_state);
-                               if (ACPI_FAILURE(status2)) {
-                                       return_ACPI_STATUS(status2);
-                               }
 
                                acpi_ut_delete_generic_state
                                    (acpi_ut_pop_generic_state
@@ -1005,7 +1023,8 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                                acpi_gbl_depth--;
                        }
 
-                       if (op->common.aml_opcode == AML_REGION_OP) {
+                       if (op->common.aml_opcode == AML_REGION_OP ||
+                           op->common.aml_opcode == AML_DATA_REGION_OP) {
                                /*
                                 * Skip parsing of control method or opregion body,
                                 * because we don't have enough info in the first pass
@@ -1030,6 +1049,16 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
                            (u32) (parser_state->aml - op->named.data);
                }
 
+               if (op->common.aml_opcode == AML_BANK_FIELD_OP) {
+                       /*
+                        * Backup to beginning of bank_field declaration
+                        *
+                        * body_length is unknown until we parse the body
+                        */
+                       op->named.length =
+                           (u32) (parser_state->aml - op->named.data);
+               }
+
                /* This op complete, notify the dispatcher */
 
                if (walk_state->ascending_callback != NULL) {
index 9296e86761d74f2aa4000167f068f611d36486d3..f425ab30eae83b05fabe07ef7a9ff4ef23e5dabd 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,9 @@
 #define _COMPONENT          ACPI_PARSER
 ACPI_MODULE_NAME("psopcode")
 
+static const u8 acpi_gbl_argument_count[] =
+    { 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 6 };
+
 /*******************************************************************************
  *
  * NAME:        acpi_gbl_aml_op_info
@@ -59,6 +62,7 @@ ACPI_MODULE_NAME("psopcode")
  *              the operand type.
  *
  ******************************************************************************/
+
 /*
  * Summary of opcode types/flags
  *
@@ -176,6 +180,7 @@ ACPI_MODULE_NAME("psopcode")
        AML_CREATE_QWORD_FIELD_OP
 
  ******************************************************************************/
+
 /*
  * Master Opcode information table.  A summary of everything we know about each
  * opcode, all in one place.
@@ -515,9 +520,10 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
                 AML_TYPE_NAMED_FIELD,
                 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
 /* 5F */ ACPI_OP("BankField", ARGP_BANK_FIELD_OP, ARGI_BANK_FIELD_OP,
-                ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
+                ACPI_TYPE_LOCAL_BANK_FIELD, AML_CLASS_NAMED_OBJECT,
                 AML_TYPE_NAMED_FIELD,
-                AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD),
+                AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE | AML_FIELD |
+                AML_DEFER),
 
 /* Internal opcodes that map to invalid AML opcodes */
 
@@ -619,9 +625,9 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
                 AML_TYPE_EXEC_6A_0T_1R, AML_FLAGS_EXEC_6A_0T_1R),
 /* 7C */ ACPI_OP("DataTableRegion", ARGP_DATA_REGION_OP,
                 ARGI_DATA_REGION_OP, ACPI_TYPE_REGION,
-                AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_SIMPLE,
+                AML_CLASS_NAMED_OBJECT, AML_TYPE_NAMED_COMPLEX,
                 AML_HAS_ARGS | AML_NSOBJECT | AML_NSOPCODE |
-                AML_NSNODE | AML_NAMED),
+                AML_NSNODE | AML_NAMED | AML_DEFER),
 /* 7D */ ACPI_OP("[EvalSubTree]", ARGP_SCOPE_OP, ARGI_SCOPE_OP,
                 ACPI_TYPE_ANY, AML_CLASS_NAMED_OBJECT,
                 AML_TYPE_NAMED_NO_OBJ,
@@ -779,3 +785,25 @@ char *acpi_ps_get_opcode_name(u16 opcode)
 
 #endif
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ps_get_argument_count
+ *
+ * PARAMETERS:  op_type             - Type associated with the AML opcode
+ *
+ * RETURN:      Argument count
+ *
+ * DESCRIPTION: Obtain the number of expected arguments for an AML opcode
+ *
+ ******************************************************************************/
+
+u8 acpi_ps_get_argument_count(u32 op_type)
+{
+
+       if (op_type <= AML_TYPE_EXEC_6A_0T_1R) {
+               return (acpi_gbl_argument_count[op_type]);
+       }
+
+       return (0);
+}
index 5d63f48e56b5cadc475b0af1206be28ba2c83d35..15e1702e48d606ad4313d547dafbc4b99b518d75 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -204,6 +204,8 @@ acpi_ps_complete_this_op(struct acpi_walk_state * walk_state,
                                AML_BUFFER_OP)
                            || (op->common.parent->common.aml_opcode ==
                                AML_PACKAGE_OP)
+                           || (op->common.parent->common.aml_opcode ==
+                               AML_BANK_FIELD_OP)
                            || (op->common.parent->common.aml_opcode ==
                                AML_VAR_PACKAGE_OP)) {
                                replacement_op =
@@ -349,19 +351,13 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
 
                parser_state->aml = walk_state->aml_last_while;
                walk_state->control_state->common.value = FALSE;
-               status = acpi_ds_result_stack_pop(walk_state);
-               if (ACPI_SUCCESS(status)) {
-                       status = AE_CTRL_BREAK;
-               }
+               status = AE_CTRL_BREAK;
                break;
 
        case AE_CTRL_CONTINUE:
 
                parser_state->aml = walk_state->aml_last_while;
-               status = acpi_ds_result_stack_pop(walk_state);
-               if (ACPI_SUCCESS(status)) {
-                       status = AE_CTRL_CONTINUE;
-               }
+               status = AE_CTRL_CONTINUE;
                break;
 
        case AE_CTRL_PENDING:
@@ -383,10 +379,7 @@ acpi_ps_next_parse_state(struct acpi_walk_state *walk_state,
                 * Just close out this package
                 */
                parser_state->aml = acpi_ps_get_next_package_end(parser_state);
-               status = acpi_ds_result_stack_pop(walk_state);
-               if (ACPI_SUCCESS(status)) {
-                       status = AE_CTRL_PENDING;
-               }
+               status = AE_CTRL_PENDING;
                break;
 
        case AE_CTRL_FALSE:
@@ -541,7 +534,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
                        if ((status == AE_ALREADY_EXISTS) &&
                            (!walk_state->method_desc->method.mutex)) {
                                ACPI_INFO((AE_INFO,
-                                          "Marking method %4.4s as Serialized",
+                                          "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
                                           walk_state->method_node->name.
                                           ascii));
 
@@ -601,6 +594,30 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state)
                                 * The object is deleted
                                 */
                                if (!previous_walk_state->return_desc) {
+                                       /*
+                                        * In slack mode execution, if there is no return value
+                                        * we should implicitly return zero (0) as a default value.
+                                        */
+                                       if (acpi_gbl_enable_interpreter_slack &&
+                                           !previous_walk_state->
+                                           implicit_return_obj) {
+                                               previous_walk_state->
+                                                   implicit_return_obj =
+                                                   acpi_ut_create_internal_object
+                                                   (ACPI_TYPE_INTEGER);
+                                               if (!previous_walk_state->
+                                                   implicit_return_obj) {
+                                                       return_ACPI_STATUS
+                                                           (AE_NO_MEMORY);
+                                               }
+
+                                               previous_walk_state->
+                                                   implicit_return_obj->
+                                                   integer.value = 0;
+                                       }
+
+                                       /* Restart the calling control method */
+
                                        status =
                                            acpi_ds_restart_control_method
                                            (walk_state,
index 77cfa4ed0cfeccfcc94e187fc0726a96e3df7006..ee50e67c94431d28d9cd7052ce6ada8a92af020e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 966e7ea2a0c49b2cdc9a25e76fb23f9c02a9a3d1..1dd355ddd182da6b2a903b3579589e23564cb35d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -171,6 +171,8 @@ acpi_ps_append_arg(union acpi_parse_object *op, union acpi_parse_object *arg)
        while (arg) {
                arg->common.parent = op;
                arg = arg->common.next;
+
+               op->common.arg_list_length++;
        }
 }
 
index 8ca52002db5523244f1e6f69a615e179982b557d..7cf1f65cd5bb833a9c328b94ba27fecb0a57af53 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 49f9757434e4742bd5324c3e2c384af9be07f45e..8b86ad5a3201395e47be1801c6721e087f7a8aae 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 94103bced75e0044ac62820ac47a482665d04285..52581454c47c3a21ca071df64818d19f173f25f0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 76bf6d90c700efac7905d6167680915ca50e1498..81e4f081a4aefd2d53d3004fae15d0935610bb90 100644 (file)
@@ -93,6 +93,7 @@ struct acpi_power_resource {
 static struct list_head acpi_power_resource_list;
 
 static const struct file_operations acpi_power_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_power_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -121,7 +122,7 @@ acpi_power_get_context(acpi_handle handle,
        }
 
        *resource = acpi_driver_data(device);
-       if (!resource)
+       if (!*resource)
                return -ENODEV;
 
        return 0;
@@ -543,15 +544,11 @@ static int acpi_power_add_fs(struct acpi_device *device)
        }
 
        /* 'status' [R] */
-       entry = create_proc_entry(ACPI_POWER_FILE_STATUS,
-                                 S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data(ACPI_POWER_FILE_STATUS,
+                                S_IRUGO, acpi_device_dir(device),
+                                &acpi_power_fops, acpi_driver_data(device));
        if (!entry)
                return -EIO;
-       else {
-               entry->proc_fops = &acpi_power_fops;
-               entry->data = acpi_driver_data(device);
-       }
-
        return 0;
 }
 
index a825b431b64f294df234e05e1244d56d6ce83bf3..386e5aa48834cc087ccf2e2ac6757bc14ee5a566 100644 (file)
@@ -112,6 +112,7 @@ static struct acpi_driver acpi_processor_driver = {
 #define UNINSTALL_NOTIFY_HANDLER       2
 
 static const struct file_operations acpi_processor_info_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_processor_info_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -326,40 +327,30 @@ static int acpi_processor_add_fs(struct acpi_device *device)
        acpi_device_dir(device)->owner = THIS_MODULE;
 
        /* 'info' [R] */
-       entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO,
-                                 S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO,
+                                S_IRUGO, acpi_device_dir(device),
+                                &acpi_processor_info_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -EIO;
-       else {
-               entry->proc_fops = &acpi_processor_info_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
 
        /* 'throttling' [R/W] */
-       entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING,
-                                 S_IFREG | S_IRUGO | S_IWUSR,
-                                 acpi_device_dir(device));
+       entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING,
+                                S_IFREG | S_IRUGO | S_IWUSR,
+                                acpi_device_dir(device),
+                                &acpi_processor_throttling_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -EIO;
-       else {
-               entry->proc_fops = &acpi_processor_throttling_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
 
        /* 'limit' [R/W] */
-       entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT,
-                                 S_IFREG | S_IRUGO | S_IWUSR,
-                                 acpi_device_dir(device));
+       entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT,
+                                S_IFREG | S_IRUGO | S_IWUSR,
+                                acpi_device_dir(device),
+                                &acpi_processor_limit_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -EIO;
-       else {
-               entry->proc_fops = &acpi_processor_limit_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
-
        return 0;
 }
 
@@ -612,6 +603,15 @@ static int acpi_processor_get_info(struct acpi_processor *pr, unsigned has_uid)
                request_region(pr->throttling.address, 6, "ACPI CPU throttle");
        }
 
+       /*
+        * If ACPI describes a slot number for this CPU, we can use it
+        * ensure we get the right value in the "physical id" field
+        * of /proc/cpuinfo
+        */
+       status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer);
+       if (ACPI_SUCCESS(status))
+               arch_fix_phys_package_id(pr->id, object.integer.value);
+
        return 0;
 }
 
@@ -674,22 +674,21 @@ static int __cpuinit acpi_processor_start(struct acpi_device *device)
                result = PTR_ERR(pr->cdev);
                goto end;
        }
-       if (pr->cdev) {
-               printk(KERN_INFO PREFIX
-                       "%s is registered as cooling_device%d\n",
-                       device->dev.bus_id, pr->cdev->id);
-
-               result = sysfs_create_link(&device->dev.kobj,
-                                          &pr->cdev->device.kobj,
-                                          "thermal_cooling");
-               if (result)
-                       return result;
-               result = sysfs_create_link(&pr->cdev->device.kobj,
-                                          &device->dev.kobj,
-                                          "device");
-               if (result)
-                       return result;
-       }
+
+       printk(KERN_INFO PREFIX
+               "%s is registered as cooling_device%d\n",
+               device->dev.bus_id, pr->cdev->id);
+
+       result = sysfs_create_link(&device->dev.kobj,
+                                  &pr->cdev->device.kobj,
+                                  "thermal_cooling");
+       if (result)
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
+       result = sysfs_create_link(&pr->cdev->device.kobj,
+                                  &device->dev.kobj,
+                                  "device");
+       if (result)
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
 
        if (pr->flags.throttling) {
                printk(KERN_INFO PREFIX "%s [%s] (supports",
index 788da9781f80ffb5311b396cd3557b0786376dcd..2dd2c1f3a01ca11bf10ae1543d57f9d3fcd71dea 100644 (file)
@@ -418,13 +418,12 @@ static void acpi_processor_idle(void)
 
        cx = pr->power.state;
        if (!cx || acpi_idle_suspend) {
-               if (pm_idle_save)
-                       pm_idle_save();
-               else
+               if (pm_idle_save) {
+                       pm_idle_save(); /* enables IRQs */
+               } else {
                        acpi_safe_halt();
-
-               if (irqs_disabled())
                        local_irq_enable();
+               }
 
                return;
        }
@@ -520,10 +519,12 @@ static void acpi_processor_idle(void)
                 * Use the appropriate idle routine, the one that would
                 * be used without acpi C-states.
                 */
-               if (pm_idle_save)
-                       pm_idle_save();
-               else
+               if (pm_idle_save) {
+                       pm_idle_save(); /* enables IRQs */
+               } else {
                        acpi_safe_halt();
+                       local_irq_enable();
+               }
 
                /*
                 * TBD: Can't get time duration while in C1, as resumes
@@ -534,8 +535,6 @@ static void acpi_processor_idle(void)
                 *       skew otherwise.
                 */
                sleep_ticks = 0xFFFFFFFF;
-               if (irqs_disabled())
-                       local_irq_enable();
 
                break;
 
@@ -848,6 +847,7 @@ static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
                /* all processors need to support C1 */
                pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
                pr->power.states[ACPI_STATE_C1].valid = 1;
+               pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
        }
        /* the C0 state only exists as a filler in our array */
        pr->power.states[ACPI_STATE_C0].valid = 1;
@@ -960,6 +960,9 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
                                 cx.address);
                }
 
+               if (cx.type == ACPI_STATE_C1) {
+                       cx.valid = 1;
+               }
 
                obj = &(element->package.elements[2]);
                if (obj->type != ACPI_TYPE_INTEGER)
@@ -1283,6 +1286,7 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations acpi_processor_power_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_processor_power_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -1295,6 +1299,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 {
        int result = 0;
 
+       if (boot_option_idle_override)
+               return 0;
 
        if (!pr)
                return -EINVAL;
@@ -1734,6 +1740,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr)
 {
        int ret;
 
+       if (boot_option_idle_override)
+               return 0;
+
        if (!pr)
                return -EINVAL;
 
@@ -1764,6 +1773,8 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
        struct proc_dir_entry *entry = NULL;
        unsigned int i;
 
+       if (boot_option_idle_override)
+               return 0;
 
        if (!first_run) {
                dmi_check_system(processor_power_dmi_table);
@@ -1799,7 +1810,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
         * Note that we use previously set idle handler will be used on
         * platforms that only support C1.
         */
-       if ((pr->flags.power) && (!boot_option_idle_override)) {
+       if (pr->flags.power) {
 #ifdef CONFIG_CPU_IDLE
                acpi_processor_setup_cpuidle(pr);
                pr->power.dev.cpu = pr->id;
@@ -1823,24 +1834,23 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
        }
 
        /* 'power' [R] */
-       entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
-                                 S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
+                                S_IRUGO, acpi_device_dir(device),
+                                &acpi_processor_power_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -EIO;
-       else {
-               entry->proc_fops = &acpi_processor_power_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
-
        return 0;
 }
 
 int acpi_processor_power_exit(struct acpi_processor *pr,
                              struct acpi_device *device)
 {
+       if (boot_option_idle_override)
+               return 0;
+
 #ifdef CONFIG_CPU_IDLE
-       if ((pr->flags.power) && (!boot_option_idle_override))
+       if (pr->flags.power)
                cpuidle_unregister_device(&pr->power.dev);
 #endif
        pr->flags.power_setup_done = 0;
index b477a4be8a698fde564465278cf7c09321f9b00b..d80b2d1441afbadb561cf58abfdb135dd20aeb8e 100644 (file)
@@ -411,6 +411,7 @@ EXPORT_SYMBOL(acpi_processor_notify_smm);
 
 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_processor_perf_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_processor_perf_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -456,7 +457,6 @@ static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
 
 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
 {
-       struct proc_dir_entry *entry = NULL;
        struct acpi_device *device = NULL;
 
 
@@ -464,14 +464,9 @@ static void acpi_cpufreq_add_file(struct acpi_processor *pr)
                return;
 
        /* add file 'performance' [R/W] */
-       entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
-                                 S_IFREG | S_IRUGO,
-                                 acpi_device_dir(device));
-       if (entry){
-               entry->proc_fops = &acpi_processor_perf_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+       proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO,
+                        acpi_device_dir(device),
+                        &acpi_processor_perf_fops, acpi_driver_data(device));
        return;
 }
 
index 9cb43f52f7b6cf45bed03ab7ef0c74eab93e7a7f..ef34b18f95ca52e50c5bae79c2436d5b76a4d6fb 100644 (file)
@@ -97,7 +97,7 @@ static int acpi_processor_apply_limit(struct acpi_processor *pr)
 #define CPUFREQ_THERMAL_MIN_STEP 0
 #define CPUFREQ_THERMAL_MAX_STEP 3
 
-static unsigned int cpufreq_thermal_reduction_pctg[NR_CPUS];
+static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
 static unsigned int acpi_thermal_cpufreq_is_init = 0;
 
 static int cpu_has_cpufreq(unsigned int cpu)
@@ -113,9 +113,9 @@ static int acpi_thermal_cpufreq_increase(unsigned int cpu)
        if (!cpu_has_cpufreq(cpu))
                return -ENODEV;
 
-       if (cpufreq_thermal_reduction_pctg[cpu] <
+       if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
                CPUFREQ_THERMAL_MAX_STEP) {
-               cpufreq_thermal_reduction_pctg[cpu]++;
+               per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
                cpufreq_update_policy(cpu);
                return 0;
        }
@@ -128,14 +128,14 @@ static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
        if (!cpu_has_cpufreq(cpu))
                return -ENODEV;
 
-       if (cpufreq_thermal_reduction_pctg[cpu] >
+       if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
                (CPUFREQ_THERMAL_MIN_STEP + 1))
-               cpufreq_thermal_reduction_pctg[cpu]--;
+               per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
        else
-               cpufreq_thermal_reduction_pctg[cpu] = 0;
+               per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
        cpufreq_update_policy(cpu);
        /* We reached max freq again and can leave passive mode */
-       return !cpufreq_thermal_reduction_pctg[cpu];
+       return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
 }
 
 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
@@ -147,9 +147,10 @@ static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
        if (event != CPUFREQ_ADJUST)
                goto out;
 
-       max_freq =
-           (policy->cpuinfo.max_freq *
-            (100 - cpufreq_thermal_reduction_pctg[policy->cpu] * 20)) / 100;
+       max_freq = (
+           policy->cpuinfo.max_freq *
+           (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
+       ) / 100;
 
        cpufreq_verify_within_limits(policy, 0, max_freq);
 
@@ -174,7 +175,7 @@ static int cpufreq_get_cur_state(unsigned int cpu)
        if (!cpu_has_cpufreq(cpu))
                return 0;
 
-       return cpufreq_thermal_reduction_pctg[cpu];
+       return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
 }
 
 static int cpufreq_set_cur_state(unsigned int cpu, int state)
@@ -182,7 +183,7 @@ static int cpufreq_set_cur_state(unsigned int cpu, int state)
        if (!cpu_has_cpufreq(cpu))
                return 0;
 
-       cpufreq_thermal_reduction_pctg[cpu] = state;
+       per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
        cpufreq_update_policy(cpu);
        return 0;
 }
@@ -191,8 +192,9 @@ void acpi_thermal_cpufreq_init(void)
 {
        int i;
 
-       for (i = 0; i < NR_CPUS; i++)
-               cpufreq_thermal_reduction_pctg[i] = 0;
+       for (i = 0; i < nr_cpu_ids; i++)
+               if (cpu_present(i))
+                       per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
 
        i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
                                      CPUFREQ_POLICY_NOTIFIER);
@@ -507,6 +509,7 @@ static ssize_t acpi_processor_write_limit(struct file * file,
 }
 
 struct file_operations acpi_processor_limit_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_processor_limit_open_fs,
        .read = seq_read,
        .write = acpi_processor_write_limit,
index 0bba3a914e865562c51b11703fca695cfd0051a0..bb06738860c4113599ab185435834253d8af4afb 100644 (file)
@@ -1252,6 +1252,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file,
 }
 
 struct file_operations acpi_processor_throttling_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_processor_throttling_open_fs,
        .read = seq_read,
        .write = acpi_processor_write_throttling,
index 271e61509eebcde1bee47e5b60c02b15f56b8d5d..7f96332822bf2cff8bab0a3a88d1ed1c19c7329a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0dd2ce8a3475d4c062a0a67941805fb29a3063ec..8a112d11d491eebac39708e4d0d1cc59e3f4fc96 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -73,7 +73,7 @@ acpi_rs_stream_option_length(u32 resource_length, u32 minimum_total_length);
 
 static u8 acpi_rs_count_set_bits(u16 bit_field)
 {
-       u8 bits_set;
+       acpi_native_uint bits_set;
 
        ACPI_FUNCTION_ENTRY();
 
@@ -81,10 +81,10 @@ static u8 acpi_rs_count_set_bits(u16 bit_field)
 
                /* Zero the least significant bit that is set */
 
-               bit_field &= (bit_field - 1);
+               bit_field &= (u16) (bit_field - 1);
        }
 
-       return (bits_set);
+       return ((u8) bits_set);
 }
 
 /*******************************************************************************
@@ -211,6 +211,24 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
                 * variable-length fields
                 */
                switch (resource->type) {
+               case ACPI_RESOURCE_TYPE_IRQ:
+
+                       /* Length can be 3 or 2 */
+
+                       if (resource->data.irq.descriptor_length == 2) {
+                               total_size--;
+                       }
+                       break;
+
+               case ACPI_RESOURCE_TYPE_START_DEPENDENT:
+
+                       /* Length can be 1 or 0 */
+
+                       if (resource->data.irq.descriptor_length == 0) {
+                               total_size--;
+                       }
+                       break;
+
                case ACPI_RESOURCE_TYPE_VENDOR:
                        /*
                         * Vendor Defined Resource:
index 50da494c3ee25f742ea67d3614c99812512299c1..faddaee1bc07529c38e5ce075a88e53077d6ab72 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 46da116a4030eacb57756273641f4395b3f002b7..6bbbb7b8941a4f77a8456dc308f62f1625670934 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -87,8 +87,10 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table);
  *
  ******************************************************************************/
 
-struct acpi_rsdump_info acpi_rs_dump_irq[6] = {
+struct acpi_rsdump_info acpi_rs_dump_irq[7] = {
        {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_irq), "IRQ", NULL},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(irq.descriptor_length),
+        "Descriptor Length", NULL},
        {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.triggering), "Triggering",
         acpi_gbl_he_decode},
        {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(irq.polarity), "Polarity",
@@ -115,9 +117,11 @@ struct acpi_rsdump_info acpi_rs_dump_dma[6] = {
         NULL}
 };
 
-struct acpi_rsdump_info acpi_rs_dump_start_dpf[3] = {
+struct acpi_rsdump_info acpi_rs_dump_start_dpf[4] = {
        {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_start_dpf),
         "Start-Dependent-Functions", NULL},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(start_dpf.descriptor_length),
+        "Descriptor Length", NULL},
        {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.compatibility_priority),
         "Compatibility Priority", acpi_gbl_config_decode},
        {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(start_dpf.performance_robustness),
index 2c2adb6292c160bfa68771b692a0f98ca316d97d..3f0a1fedbe0ec96a0671f25e4a343f1df07f9dec 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b297bc3e441942b4b993faca1e0133b846e85e83..b66d42e7402e4430a7f85f951e2b57bafcf8f934 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -185,7 +185,7 @@ struct acpi_rsconvert_info acpi_rs_convert_end_tag[2] = {
  *
  ******************************************************************************/
 
-struct acpi_rsconvert_info acpi_rs_get_start_dpf[5] = {
+struct acpi_rsconvert_info acpi_rs_get_start_dpf[6] = {
        {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_START_DEPENDENT,
         ACPI_RS_SIZE(struct acpi_resource_start_dependent),
         ACPI_RSC_TABLE_SIZE(acpi_rs_get_start_dpf)},
@@ -196,6 +196,12 @@ struct acpi_rsconvert_info acpi_rs_get_start_dpf[5] = {
         ACPI_ACCEPTABLE_CONFIGURATION,
         2},
 
+       /* Get the descriptor length (0 or 1 for Start Dpf descriptor) */
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+        AML_OFFSET(start_dpf.descriptor_type),
+        0},
+
        /* All done if there is no flag byte present in the descriptor */
 
        {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 1},
@@ -219,7 +225,9 @@ struct acpi_rsconvert_info acpi_rs_get_start_dpf[5] = {
  *
  ******************************************************************************/
 
-struct acpi_rsconvert_info acpi_rs_set_start_dpf[6] = {
+struct acpi_rsconvert_info acpi_rs_set_start_dpf[10] = {
+       /* Start with a default descriptor of length 1 */
+
        {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_START_DEPENDENT,
         sizeof(struct aml_resource_start_dependent),
         ACPI_RSC_TABLE_SIZE(acpi_rs_set_start_dpf)},
@@ -235,6 +243,33 @@ struct acpi_rsconvert_info acpi_rs_set_start_dpf[6] = {
         ACPI_RS_OFFSET(data.start_dpf.performance_robustness),
         AML_OFFSET(start_dpf.flags),
         2},
+       /*
+        * All done if the output descriptor length is required to be 1
+        * (i.e., optimization to 0 bytes cannot be attempted)
+        */
+       {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+        ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+        1},
+
+       /* Set length to 0 bytes (no flags byte) */
+
+       {ACPI_RSC_LENGTH, 0, 0,
+        sizeof(struct aml_resource_start_dependent_noprio)},
+
+       /*
+        * All done if the output descriptor length is required to be 0.
+        *
+        * TBD: Perhaps we should check for error if input flags are not
+        * compatible with a 0-byte descriptor.
+        */
+       {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+        ACPI_RS_OFFSET(data.start_dpf.descriptor_length),
+        0},
+
+       /* Reset length to 1 byte (descriptor with flags byte) */
+
+       {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_start_dependent)},
+
        /*
         * All done if flags byte is necessary -- if either priority value
         * is not ACPI_ACCEPTABLE_CONFIGURATION
index 5657f7b950399f619023edced4baec792876bc74..a8805efc036658d4622dac5dadfb631fcd6493a6 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -52,7 +52,7 @@ ACPI_MODULE_NAME("rsirq")
  * acpi_rs_get_irq
  *
  ******************************************************************************/
-struct acpi_rsconvert_info acpi_rs_get_irq[7] = {
+struct acpi_rsconvert_info acpi_rs_get_irq[8] = {
        {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_IRQ,
         ACPI_RS_SIZE(struct acpi_resource_irq),
         ACPI_RSC_TABLE_SIZE(acpi_rs_get_irq)},
@@ -69,6 +69,12 @@ struct acpi_rsconvert_info acpi_rs_get_irq[7] = {
         ACPI_EDGE_SENSITIVE,
         1},
 
+       /* Get the descriptor length (2 or 3 for IRQ descriptor) */
+
+       {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.irq.descriptor_length),
+        AML_OFFSET(irq.descriptor_type),
+        0},
+
        /* All done if no flag byte present in descriptor */
 
        {ACPI_RSC_EXIT_NE, ACPI_RSC_COMPARE_AML_LENGTH, 0, 3},
@@ -94,7 +100,9 @@ struct acpi_rsconvert_info acpi_rs_get_irq[7] = {
  *
  ******************************************************************************/
 
-struct acpi_rsconvert_info acpi_rs_set_irq[9] = {
+struct acpi_rsconvert_info acpi_rs_set_irq[13] = {
+       /* Start with a default descriptor of length 3 */
+
        {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_IRQ,
         sizeof(struct aml_resource_irq),
         ACPI_RSC_TABLE_SIZE(acpi_rs_set_irq)},
@@ -105,7 +113,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[9] = {
         AML_OFFSET(irq.irq_mask),
         ACPI_RS_OFFSET(data.irq.interrupt_count)},
 
-       /* Set the flags byte by default */
+       /* Set the flags byte */
 
        {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.triggering),
         AML_OFFSET(irq.flags),
@@ -118,6 +126,33 @@ struct acpi_rsconvert_info acpi_rs_set_irq[9] = {
        {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.irq.sharable),
         AML_OFFSET(irq.flags),
         4},
+
+       /*
+        * All done if the output descriptor length is required to be 3
+        * (i.e., optimization to 2 bytes cannot be attempted)
+        */
+       {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+        ACPI_RS_OFFSET(data.irq.descriptor_length),
+        3},
+
+       /* Set length to 2 bytes (no flags byte) */
+
+       {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)},
+
+       /*
+        * All done if the output descriptor length is required to be 2.
+        *
+        * TBD: Perhaps we should check for error if input flags are not
+        * compatible with a 2-byte descriptor.
+        */
+       {ACPI_RSC_EXIT_EQ, ACPI_RSC_COMPARE_VALUE,
+        ACPI_RS_OFFSET(data.irq.descriptor_length),
+        2},
+
+       /* Reset length to 3 bytes (descriptor with flags byte) */
+
+       {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq)},
+
        /*
         * Check if the flags byte is necessary. Not needed if the flags are:
         * ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_HIGH, ACPI_EXCLUSIVE
@@ -134,7 +169,7 @@ struct acpi_rsconvert_info acpi_rs_set_irq[9] = {
         ACPI_RS_OFFSET(data.irq.sharable),
         ACPI_EXCLUSIVE},
 
-       /* irq_no_flags() descriptor can be used */
+       /* We can optimize to a 2-byte irq_no_flags() descriptor */
 
        {ACPI_RSC_LENGTH, 0, 0, sizeof(struct aml_resource_irq_noflags)}
 };
index ca21e4660c79c8b4471a9ee41ec2cf058f96c7b9..b78c7e797a1955ad8be12526d7657d926d98e237 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 521eab7dd8df531d8af5fa0d24fc0ba91f60d25d..63b21abd90bb42c994a43b7895519344cabad853 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c7081afa893aeb22529ee2a8ea89bc39261b7eb9..de1ac3881b224ab1121a5edd97409aecf7831071 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -497,6 +497,17 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
                        }
                        break;
 
+               case ACPI_RSC_EXIT_EQ:
+                       /*
+                        * Control - Exit conversion if equal
+                        */
+                       if (*ACPI_ADD_PTR(u8, resource,
+                                         COMPARE_TARGET(info)) ==
+                           COMPARE_VALUE(info)) {
+                               goto exit;
+                       }
+                       break;
+
                default:
 
                        ACPI_ERROR((AE_INFO, "Invalid conversion opcode"));
index 11c0bd7b9cfdea8272f374f51d127654586f0a0c..befe2302f41b31b622dec454924a68ab5b847dd7 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -97,17 +97,17 @@ u8 acpi_rs_decode_bitmask(u16 mask, u8 * list)
 u16 acpi_rs_encode_bitmask(u8 * list, u8 count)
 {
        acpi_native_uint i;
-       u16 mask;
+       acpi_native_uint mask;
 
        ACPI_FUNCTION_ENTRY();
 
        /* Encode the list into a single bitmask */
 
        for (i = 0, mask = 0; i < count; i++) {
-               mask |= (0x0001 << list[i]);
+               mask |= (0x1 << list[i]);
        }
 
-       return (mask);
+       return ((u16) mask);
 }
 
 /*******************************************************************************
index 4c3fd4cdaf7382f6b04fb9b1470716c95454aea7..f59f4c4e034c023c46f99691b09404698b4c8918 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 585ae3c9c8ea98651e33f46de53876fac237f48d..10a36512647ca6107a76c5982a5f3acaa140b571 100644 (file)
@@ -483,8 +483,6 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
                struct file_operations *state_fops,
                struct file_operations *alarm_fops, void *data)
 {
-       struct proc_dir_entry *entry = NULL;
-
        if (!*dir) {
                *dir = proc_mkdir(dir_name, parent_dir);
                if (!*dir) {
@@ -494,34 +492,19 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir,
        }
 
        /* 'info' [R] */
-       if (info_fops) {
-               entry = create_proc_entry(ACPI_SBS_FILE_INFO, S_IRUGO, *dir);
-               if (entry) {
-                       entry->proc_fops = info_fops;
-                       entry->data = data;
-                       entry->owner = THIS_MODULE;
-               }
-       }
+       if (info_fops)
+               proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir,
+                                info_fops, data);
 
        /* 'state' [R] */
-       if (state_fops) {
-               entry = create_proc_entry(ACPI_SBS_FILE_STATE, S_IRUGO, *dir);
-               if (entry) {
-                       entry->proc_fops = state_fops;
-                       entry->data = data;
-                       entry->owner = THIS_MODULE;
-               }
-       }
+       if (state_fops)
+               proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir,
+                                state_fops, data);
 
        /* 'alarm' [R/W] */
-       if (alarm_fops) {
-               entry = create_proc_entry(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir);
-               if (entry) {
-                       entry->proc_fops = alarm_fops;
-                       entry->data = data;
-                       entry->owner = THIS_MODULE;
-               }
-       }
+       if (alarm_fops)
+               proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir,
+                                alarm_fops, data);
        return 0;
 }
 
index e6ce262b5d449673932c6c49fa1f41bbc8cb8436..6d85289f1c12ad50fdf46ddd72a863c00d1070db 100644 (file)
@@ -677,9 +677,8 @@ acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
        device->wakeup.resources.count = package->package.count - 2;
        for (i = 0; i < device->wakeup.resources.count; i++) {
                element = &(package->package.elements[i + 2]);
-               if (element->type != ACPI_TYPE_ANY) {
+               if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
                        return AE_BAD_DATA;
-               }
 
                device->wakeup.resources.handles[i] = element->reference.handle;
        }
@@ -692,6 +691,9 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
        acpi_status status = 0;
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *package = NULL;
+       union acpi_object in_arg[3];
+       struct acpi_object_list arg_list = { 3, in_arg };
+       acpi_status psw_status = AE_OK;
 
        struct acpi_device_id button_device_ids[] = {
                {"PNP0C0D", 0},
@@ -700,7 +702,6 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
                {"", 0},
        };
 
-
        /* _PRW */
        status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
        if (ACPI_FAILURE(status)) {
@@ -718,6 +719,45 @@ static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
        kfree(buffer.pointer);
 
        device->wakeup.flags.valid = 1;
+       /* Call _PSW/_DSW object to disable its ability to wake the sleeping
+        * system for the ACPI device with the _PRW object.
+        * The _PSW object is depreciated in ACPI 3.0 and is replaced by _DSW.
+        * So it is necessary to call _DSW object first. Only when it is not
+        * present will the _PSW object used.
+        */
+       /*
+        * Three agruments are needed for the _DSW object.
+        * Argument 0: enable/disable the wake capabilities
+        * When _DSW object is called to disable the wake capabilities, maybe
+        * the first argument is filled. The value of the other two agruments
+        * is meaningless.
+        */
+       in_arg[0].type = ACPI_TYPE_INTEGER;
+       in_arg[0].integer.value = 0;
+       in_arg[1].type = ACPI_TYPE_INTEGER;
+       in_arg[1].integer.value = 0;
+       in_arg[2].type = ACPI_TYPE_INTEGER;
+       in_arg[2].integer.value = 0;
+       psw_status = acpi_evaluate_object(device->handle, "_DSW",
+                                               &arg_list, NULL);
+       if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
+               ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in evaluate _DSW\n"));
+       /*
+        * When the _DSW object is not present, OSPM will call _PSW object.
+        */
+       if (psw_status == AE_NOT_FOUND) {
+               /*
+                * Only one agruments is required for the _PSW object.
+                * agrument 0: enable/disable the wake capabilities
+                */
+               arg_list.count = 1;
+               in_arg[0].integer.value = 0;
+               psw_status = acpi_evaluate_object(device->handle, "_PSW",
+                                               &arg_list, NULL);
+               if (ACPI_FAILURE(psw_status) && (psw_status != AE_NOT_FOUND))
+                       ACPI_DEBUG_PRINT((ACPI_DB_INFO, "error in "
+                                               "evaluate _PSW\n"));
+       }
        /* Power button, Lid switch always enable wakeup */
        if (!acpi_match_device_ids(device, button_device_ids))
                device->wakeup.flags.run_wake = 1;
@@ -882,10 +922,7 @@ static void acpi_device_get_busid(struct acpi_device *device,
 static int
 acpi_video_bus_match(struct acpi_device *device)
 {
-       acpi_handle h_dummy1;
-       acpi_handle h_dummy2;
-       acpi_handle h_dummy3;
-
+       acpi_handle h_dummy;
 
        if (!device)
                return -EINVAL;
@@ -895,18 +932,18 @@ acpi_video_bus_match(struct acpi_device *device)
         */
 
        /* Does this device able to support video switching ? */
-       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy1)) &&
-           ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy2)))
+       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) &&
+           ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
                return 0;
 
        /* Does this device able to retrieve a video ROM ? */
-       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy1)))
+       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
                return 0;
 
        /* Does this device able to configure which video head to be POSTed ? */
-       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy1)) &&
-           ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy2)) &&
-           ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy3)))
+       if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_VPO", &h_dummy)) &&
+           ACPI_SUCCESS(acpi_get_handle(device->handle, "_GPD", &h_dummy)) &&
+           ACPI_SUCCESS(acpi_get_handle(device->handle, "_SPD", &h_dummy)))
                return 0;
 
        return -ENODEV;
index 71183eea7906fe05424a0500676d31107d119c4e..c3b0cd88d09f22972342af30e02abfafe18fadcc 100644 (file)
@@ -51,7 +51,7 @@ static int acpi_sleep_prepare(u32 acpi_state)
 }
 
 #ifdef CONFIG_SUSPEND
-static struct platform_suspend_ops acpi_pm_ops;
+static struct platform_suspend_ops acpi_suspend_ops;
 
 extern void do_suspend_lowlevel(void);
 
@@ -65,11 +65,11 @@ static u32 acpi_suspend_states[] = {
 static int init_8259A_after_S1;
 
 /**
- *     acpi_pm_begin - Set the target system sleep state to the state
+ *     acpi_suspend_begin - Set the target system sleep state to the state
  *             associated with given @pm_state, if supported.
  */
 
-static int acpi_pm_begin(suspend_state_t pm_state)
+static int acpi_suspend_begin(suspend_state_t pm_state)
 {
        u32 acpi_state = acpi_suspend_states[pm_state];
        int error = 0;
@@ -85,13 +85,13 @@ static int acpi_pm_begin(suspend_state_t pm_state)
 }
 
 /**
- *     acpi_pm_prepare - Do preliminary suspend work.
+ *     acpi_suspend_prepare - Do preliminary suspend work.
  *
  *     If necessary, set the firmware waking vector and do arch-specific
  *     nastiness to get the wakeup code to the waking vector.
  */
 
-static int acpi_pm_prepare(void)
+static int acpi_suspend_prepare(void)
 {
        int error = acpi_sleep_prepare(acpi_target_sleep_state);
 
@@ -104,7 +104,7 @@ static int acpi_pm_prepare(void)
 }
 
 /**
- *     acpi_pm_enter - Actually enter a sleep state.
+ *     acpi_suspend_enter - Actually enter a sleep state.
  *     @pm_state: ignored
  *
  *     Flush caches and go to sleep. For STR we have to call arch-specific
@@ -112,7 +112,7 @@ static int acpi_pm_prepare(void)
  *     It's unfortunate, but it works. Please fix if you're feeling frisky.
  */
 
-static int acpi_pm_enter(suspend_state_t pm_state)
+static int acpi_suspend_enter(suspend_state_t pm_state)
 {
        acpi_status status = AE_OK;
        unsigned long flags = 0;
@@ -169,13 +169,13 @@ static int acpi_pm_enter(suspend_state_t pm_state)
 }
 
 /**
- *     acpi_pm_finish - Instruct the platform to leave a sleep state.
+ *     acpi_suspend_finish - Instruct the platform to leave a sleep state.
  *
  *     This is called after we wake back up (or if entering the sleep state
  *     failed). 
  */
 
-static void acpi_pm_finish(void)
+static void acpi_suspend_finish(void)
 {
        u32 acpi_state = acpi_target_sleep_state;
 
@@ -196,19 +196,19 @@ static void acpi_pm_finish(void)
 }
 
 /**
- *     acpi_pm_end - Finish up suspend sequence.
+ *     acpi_suspend_end - Finish up suspend sequence.
  */
 
-static void acpi_pm_end(void)
+static void acpi_suspend_end(void)
 {
        /*
-        * This is necessary in case acpi_pm_finish() is not called during a
+        * This is necessary in case acpi_suspend_finish() is not called during a
         * failing transition to a sleep state.
         */
        acpi_target_sleep_state = ACPI_STATE_S0;
 }
 
-static int acpi_pm_state_valid(suspend_state_t pm_state)
+static int acpi_suspend_state_valid(suspend_state_t pm_state)
 {
        u32 acpi_state;
 
@@ -224,13 +224,13 @@ static int acpi_pm_state_valid(suspend_state_t pm_state)
        }
 }
 
-static struct platform_suspend_ops acpi_pm_ops = {
-       .valid = acpi_pm_state_valid,
-       .begin = acpi_pm_begin,
-       .prepare = acpi_pm_prepare,
-       .enter = acpi_pm_enter,
-       .finish = acpi_pm_finish,
-       .end = acpi_pm_end,
+static struct platform_suspend_ops acpi_suspend_ops = {
+       .valid = acpi_suspend_state_valid,
+       .begin = acpi_suspend_begin,
+       .prepare = acpi_suspend_prepare,
+       .enter = acpi_suspend_enter,
+       .finish = acpi_suspend_finish,
+       .end = acpi_suspend_end,
 };
 
 /*
@@ -492,7 +492,7 @@ int __init acpi_sleep_init(void)
                }
        }
 
-       suspend_set_ops(&acpi_pm_ops);
+       suspend_set_ops(&acpi_suspend_ops);
 #endif
 
 #ifdef CONFIG_HIBERNATION
index f8df5217d477ee525c30f4c1050bf7cc8eb6e35b..8a5fe87105139c5529119bfc69b84d349d2cf1e7 100644 (file)
@@ -440,6 +440,7 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations acpi_system_wakeup_device_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_system_wakeup_device_open_fs,
        .read = seq_read,
        .write = acpi_system_write_wakeup_device,
@@ -449,6 +450,7 @@ static const struct file_operations acpi_system_wakeup_device_fops = {
 
 #ifdef CONFIG_ACPI_PROCFS
 static const struct file_operations acpi_system_sleep_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_system_sleep_open_fs,
        .read = seq_read,
        .write = acpi_system_write_sleep,
@@ -459,6 +461,7 @@ static const struct file_operations acpi_system_sleep_fops = {
 
 #ifdef HAVE_ACPI_LEGACY_ALARM
 static const struct file_operations acpi_system_alarm_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_system_alarm_open_fs,
        .read = seq_read,
        .write = acpi_system_write_alarm,
@@ -477,37 +480,26 @@ static u32 rtc_handler(void *context)
 
 static int __init acpi_sleep_proc_init(void)
 {
-       struct proc_dir_entry *entry = NULL;
-
        if (acpi_disabled)
                return 0;
 
 #ifdef CONFIG_ACPI_PROCFS
        /* 'sleep' [R/W] */
-       entry =
-           create_proc_entry("sleep", S_IFREG | S_IRUGO | S_IWUSR,
-                             acpi_root_dir);
-       if (entry)
-               entry->proc_fops = &acpi_system_sleep_fops;
+       proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR,
+                   acpi_root_dir, &acpi_system_sleep_fops);
 #endif                         /* CONFIG_ACPI_PROCFS */
 
 #ifdef HAVE_ACPI_LEGACY_ALARM
        /* 'alarm' [R/W] */
-       entry =
-           create_proc_entry("alarm", S_IFREG | S_IRUGO | S_IWUSR,
-                             acpi_root_dir);
-       if (entry)
-               entry->proc_fops = &acpi_system_alarm_fops;
+       proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR,
+                   acpi_root_dir, &acpi_system_alarm_fops);
 
        acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL);
 #endif                         /* HAVE_ACPI_LEGACY_ALARM */
 
        /* 'wakeup device' [R/W] */
-       entry =
-           create_proc_entry("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
-                             acpi_root_dir);
-       if (entry)
-               entry->proc_fops = &acpi_system_wakeup_device_fops;
+       proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR,
+                   acpi_root_dir, &acpi_system_wakeup_device_fops);
 
        return 0;
 }
index 4749f379a915a4c4f6308f21b21de4205f399387..769f24855eb6356b7cfb437e6982d20149f1aca2 100644 (file)
@@ -396,6 +396,7 @@ static int acpi_system_info_open_fs(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations acpi_system_info_ops = {
+       .owner = THIS_MODULE,
        .open = acpi_system_info_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -406,6 +407,7 @@ static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t,
                                     loff_t *);
 
 static const struct file_operations acpi_system_dsdt_ops = {
+       .owner = THIS_MODULE,
        .read = acpi_system_read_dsdt,
 };
 
@@ -430,6 +432,7 @@ static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t,
                                     loff_t *);
 
 static const struct file_operations acpi_system_fadt_ops = {
+       .owner = THIS_MODULE,
        .read = acpi_system_read_fadt,
 };
 
@@ -454,31 +457,23 @@ static int acpi_system_procfs_init(void)
 {
        struct proc_dir_entry *entry;
        int error = 0;
-       char *name;
 
        /* 'info' [R] */
-       name = ACPI_SYSTEM_FILE_INFO;
-       entry = create_proc_entry(name, S_IRUGO, acpi_root_dir);
+       entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir,
+                           &acpi_system_info_ops);
        if (!entry)
                goto Error;
-       else {
-               entry->proc_fops = &acpi_system_info_ops;
-       }
 
        /* 'dsdt' [R] */
-       name = ACPI_SYSTEM_FILE_DSDT;
-       entry = create_proc_entry(name, S_IRUSR, acpi_root_dir);
-       if (entry)
-               entry->proc_fops = &acpi_system_dsdt_ops;
-       else
+       entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir,
+                           &acpi_system_dsdt_ops);
+       if (!entry)
                goto Error;
 
        /* 'fadt' [R] */
-       name = ACPI_SYSTEM_FILE_FADT;
-       entry = create_proc_entry(name, S_IRUSR, acpi_root_dir);
-       if (entry)
-               entry->proc_fops = &acpi_system_fadt_ops;
-       else
+       entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir,
+                           &acpi_system_fadt_ops);
+       if (!entry)
                goto Error;
 
       Done:
index 002bb33003af8c8c30c1b48bfb0607c397d974cf..949d4114eb9feb92ae17ff7183d4cc4b314d0d45 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 058c064948e16894be9ae766bf1c2e97885ff642..9ca3afc98c80027fe46fc037678520b33f3126ad 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,12 +70,22 @@ acpi_tb_find_table(char *signature,
 {
        acpi_native_uint i;
        acpi_status status;
+       struct acpi_table_header header;
 
        ACPI_FUNCTION_TRACE(tb_find_table);
 
+       /* Normalize the input strings */
+
+       ACPI_MEMSET(&header, 0, sizeof(struct acpi_table_header));
+       ACPI_STRNCPY(header.signature, signature, ACPI_NAME_SIZE);
+       ACPI_STRNCPY(header.oem_id, oem_id, ACPI_OEM_ID_SIZE);
+       ACPI_STRNCPY(header.oem_table_id, oem_table_id, ACPI_OEM_TABLE_ID_SIZE);
+
+       /* Search for the table */
+
        for (i = 0; i < acpi_gbl_root_table_list.count; ++i) {
                if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
-                               signature, ACPI_NAME_SIZE)) {
+                               header.signature, ACPI_NAME_SIZE)) {
 
                        /* Not the requested table */
 
@@ -104,20 +114,24 @@ acpi_tb_find_table(char *signature,
 
                if (!ACPI_MEMCMP
                    (acpi_gbl_root_table_list.tables[i].pointer->signature,
-                    signature, ACPI_NAME_SIZE) && (!oem_id[0]
-                                                   ||
-                                                   !ACPI_MEMCMP
-                                                   (acpi_gbl_root_table_list.
-                                                    tables[i].pointer->oem_id,
-                                                    oem_id, ACPI_OEM_ID_SIZE))
+                    header.signature, ACPI_NAME_SIZE) && (!oem_id[0]
+                                                          ||
+                                                          !ACPI_MEMCMP
+                                                          (acpi_gbl_root_table_list.
+                                                           tables[i].pointer->
+                                                           oem_id,
+                                                           header.oem_id,
+                                                           ACPI_OEM_ID_SIZE))
                    && (!oem_table_id[0]
                        || !ACPI_MEMCMP(acpi_gbl_root_table_list.tables[i].
-                                       pointer->oem_table_id, oem_table_id,
+                                       pointer->oem_table_id,
+                                       header.oem_table_id,
                                        ACPI_OEM_TABLE_ID_SIZE))) {
                        *table_index = i;
 
                        ACPI_DEBUG_PRINT((ACPI_DB_TABLES,
-                                         "Found table [%4.4s]\n", signature));
+                                         "Found table [%4.4s]\n",
+                                         header.signature));
                        return_ACPI_STATUS(AE_OK);
                }
        }
index 3bc0c67a9283e395943ee0e42c5db83b3c43f134..402f93e1ff2079eb5f49dde56d690513560b1c81 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -125,13 +125,20 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc,
 
        /* The table must be either an SSDT or a PSDT or an OEMx */
 
-       if ((!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT))
-           &&
-           (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT))
-           && (strncmp(table_desc->pointer->signature, "OEM", 3))) {
-               ACPI_ERROR((AE_INFO,
-                           "Table has invalid signature [%4.4s], must be SSDT, PSDT or OEMx",
-                           table_desc->pointer->signature));
+       if (!ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_PSDT)&&
+           !ACPI_COMPARE_NAME(table_desc->pointer->signature, ACPI_SIG_SSDT)&&
+           strncmp(table_desc->pointer->signature, "OEM", 3)) {
+               /* Check for a printable name */
+               if (acpi_ut_valid_acpi_name(
+                       *(u32 *) table_desc->pointer->signature)) {
+                       ACPI_ERROR((AE_INFO, "Table has invalid signature "
+                                       "[%4.4s], must be SSDT or PSDT",
+                                   table_desc->pointer->signature));
+               } else {
+                       ACPI_ERROR((AE_INFO, "Table has invalid signature "
+                                       "(0x%8.8X), must be SSDT or PSDT",
+                                   *(u32 *) table_desc->pointer->signature));
+               }
                return_ACPI_STATUS(AE_BAD_SIGNATURE);
        }
 
@@ -162,6 +169,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc,
 
                acpi_tb_delete_table(table_desc);
                *table_index = i;
+               status = AE_ALREADY_EXISTS;
                goto release;
        }
 
index 010f19652f80a633305a9da6c73729a73f58f545..bc019b9b6a68ec7e690c17da8aa5751e62cdbe30 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -212,7 +212,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
 
        if (checksum) {
                ACPI_WARNING((AE_INFO,
-                             "Incorrect checksum in table [%4.4s] -  %2.2X, should be %2.2X",
+                             "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X",
                              table->signature, table->checksum,
                              (u8) (table->checksum - checksum)));
 
index a9e3331fee5deade7a3e8047a3c9da980e103f25..fb57b93c24953cc1cc83e3f822a23a94e88a89d5 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -635,6 +635,95 @@ acpi_status acpi_load_tables(void)
 ACPI_EXPORT_SYMBOL(acpi_load_tables)
 
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_install_table_handler
+ *
+ * PARAMETERS:  Handler         - Table event handler
+ *              Context         - Value passed to the handler on each event
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Install table event handler
+ *
+ ******************************************************************************/
+acpi_status
+acpi_install_table_handler(acpi_tbl_handler handler, void *context)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_install_table_handler);
+
+       if (!handler) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Don't allow more than one handler */
+
+       if (acpi_gbl_table_handler) {
+               status = AE_ALREADY_EXISTS;
+               goto cleanup;
+       }
+
+       /* Install the handler */
+
+       acpi_gbl_table_handler = handler;
+       acpi_gbl_table_handler_context = context;
+
+      cleanup:
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_install_table_handler)
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_remove_table_handler
+ *
+ * PARAMETERS:  Handler         - Table event handler that was installed
+ *                                previously.
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Remove table event handler
+ *
+ ******************************************************************************/
+acpi_status acpi_remove_table_handler(acpi_tbl_handler handler)
+{
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(acpi_remove_table_handler);
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /* Make sure that the installed handler is the same */
+
+       if (!handler || handler != acpi_gbl_table_handler) {
+               status = AE_BAD_PARAMETER;
+               goto cleanup;
+       }
+
+       /* Remove the handler */
+
+       acpi_gbl_table_handler = NULL;
+
+      cleanup:
+       (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_remove_table_handler)
+
+
 static int __init acpi_no_auto_ssdt_setup(char *s) {
 
         printk(KERN_NOTICE "ACPI: SSDT auto-load disabled\n");
index 9ecb4b6c1e7d5d37b9d6ffd8691fff1efc46d4dd..b8c0dfa084f621e9c9731d75245cfe19fd06fc12 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1bcecc7dd2cab63abd3f06e5d3750694a4fe95e6..504385b1f2116614bd8e53e598b5c093414b2841 100644 (file)
@@ -198,6 +198,7 @@ struct acpi_thermal {
 };
 
 static const struct file_operations acpi_thermal_state_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_thermal_state_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -205,6 +206,7 @@ static const struct file_operations acpi_thermal_state_fops = {
 };
 
 static const struct file_operations acpi_thermal_temp_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_thermal_temp_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -212,6 +214,7 @@ static const struct file_operations acpi_thermal_temp_fops = {
 };
 
 static const struct file_operations acpi_thermal_trip_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_thermal_trip_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -219,6 +222,7 @@ static const struct file_operations acpi_thermal_trip_fops = {
 };
 
 static const struct file_operations acpi_thermal_cooling_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_thermal_cooling_open_fs,
        .read = seq_read,
        .write = acpi_thermal_write_cooling_mode,
@@ -227,6 +231,7 @@ static const struct file_operations acpi_thermal_cooling_fops = {
 };
 
 static const struct file_operations acpi_thermal_polling_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_thermal_polling_open_fs,
        .read = seq_read,
        .write = acpi_thermal_write_polling,
@@ -884,10 +889,15 @@ static void acpi_thermal_check(void *data)
 static int thermal_get_temp(struct thermal_zone_device *thermal, char *buf)
 {
        struct acpi_thermal *tz = thermal->devdata;
+       int result;
 
        if (!tz)
                return -EINVAL;
 
+       result = acpi_thermal_get_temperature(tz);
+       if (result)
+               return result;
+
        return sprintf(buf, "%ld\n", KELVIN_TO_MILLICELSIUS(tz->temperature));
 }
 
@@ -1012,6 +1022,18 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
        return -EINVAL;
 }
 
+static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
+                               unsigned long *temperature) {
+       struct acpi_thermal *tz = thermal->devdata;
+
+       if (tz->trips.critical.flags.valid) {
+               *temperature = KELVIN_TO_MILLICELSIUS(
+                               tz->trips.critical.temperature);
+               return 0;
+       } else
+               return -EINVAL;
+}
+
 typedef int (*cb)(struct thermal_zone_device *, int,
                  struct thermal_cooling_device *);
 static int acpi_thermal_cooling_device_cb(struct thermal_zone_device *thermal,
@@ -1103,6 +1125,7 @@ static struct thermal_zone_device_ops acpi_thermal_zone_ops = {
        .set_mode = thermal_set_mode,
        .get_trip_type = thermal_get_trip_type,
        .get_trip_temp = thermal_get_trip_temp,
+       .get_crit_temp = thermal_get_crit_temp,
 };
 
 static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
@@ -1123,7 +1146,7 @@ static int acpi_thermal_register_thermal_zone(struct acpi_thermal *tz)
 
        for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
                        tz->trips.active[i].flags.valid; i++, trips++);
-       tz->thermal_zone = thermal_zone_device_register("ACPI thermal zone",
+       tz->thermal_zone = thermal_zone_device_register("acpitz",
                                        trips, tz, &acpi_thermal_zone_ops);
        if (IS_ERR(tz->thermal_zone))
                return -ENODEV;
@@ -1419,63 +1442,47 @@ static int acpi_thermal_add_fs(struct acpi_device *device)
        }
 
        /* 'state' [R] */
-       entry = create_proc_entry(ACPI_THERMAL_FILE_STATE,
-                                 S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data(ACPI_THERMAL_FILE_STATE,
+                                S_IRUGO, acpi_device_dir(device),
+                                &acpi_thermal_state_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_thermal_state_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
 
        /* 'temperature' [R] */
-       entry = create_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE,
-                                 S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE,
+                                S_IRUGO, acpi_device_dir(device),
+                                &acpi_thermal_temp_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_thermal_temp_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
 
        /* 'trip_points' [R] */
-       entry = create_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS,
-                                 S_IRUGO,
-                                 acpi_device_dir(device));
+       entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS,
+                                S_IRUGO,
+                                acpi_device_dir(device),
+                                &acpi_thermal_trip_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_thermal_trip_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
 
        /* 'cooling_mode' [R/W] */
-       entry = create_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE,
-                                 S_IFREG | S_IRUGO | S_IWUSR,
-                                 acpi_device_dir(device));
+       entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE,
+                                S_IFREG | S_IRUGO | S_IWUSR,
+                                acpi_device_dir(device),
+                                &acpi_thermal_cooling_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_thermal_cooling_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
 
        /* 'polling_frequency' [R/W] */
-       entry = create_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ,
-                                 S_IFREG | S_IRUGO | S_IWUSR,
-                                 acpi_device_dir(device));
+       entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ,
+                                S_IFREG | S_IRUGO | S_IWUSR,
+                                acpi_device_dir(device),
+                                &acpi_thermal_polling_fops,
+                                acpi_driver_data(device));
        if (!entry)
                return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_thermal_polling_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
-
        return 0;
 }
 
@@ -1710,7 +1717,6 @@ static int acpi_thermal_resume(struct acpi_device *device)
        return AE_OK;
 }
 
-#ifdef CONFIG_DMI
 static int thermal_act(const struct dmi_system_id *d) {
 
        if (act == 0) {
@@ -1785,7 +1791,6 @@ static struct dmi_system_id thermal_dmi_table[] __initdata = {
        },
        {}
 };
-#endif /* CONFIG_DMI */
 
 static int __init acpi_thermal_init(void)
 {
index 6e56d5f7c43a05a9e4c1f9d90d16e3cd2c0111a6..ede084829a705a70209641d4f899ae95d8ab5612 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -147,7 +147,7 @@ acpi_status acpi_ut_delete_caches(void)
 
        if (acpi_gbl_display_final_mem_stats) {
                ACPI_STRCPY(buffer, "MEMORY");
-               acpi_db_display_statistics(buffer);
+               (void)acpi_db_display_statistics(buffer);
        }
 #endif
 
index 285a0f5317604c21f088d273f611d53d9eff494d..245fa80cf60004da09db0195fc02ac748d9412a7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 879eaa10d3ae77cf92777f4198dc60eb1b4c7058..655c290aca7b6bb95c69b7147ff4fda7b0d0ca13 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -43,6 +43,8 @@
 
 #include <acpi/acpi.h>
 #include <acpi/amlcode.h>
+#include <acpi/acnamesp.h>
+
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utcopy")
@@ -172,22 +174,21 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
 
        case ACPI_TYPE_LOCAL_REFERENCE:
 
-               /*
-                * This is an object reference.  Attempt to dereference it.
-                */
+               /* This is an object reference. */
+
                switch (internal_object->reference.opcode) {
                case AML_INT_NAMEPATH_OP:
 
                        /* For namepath, return the object handle ("reference") */
 
                default:
-                       /*
-                        * Use the object type of "Any" to indicate a reference
-                        * to object containing a handle to an ACPI named object.
-                        */
-                       external_object->type = ACPI_TYPE_ANY;
+
+                       /* We are referring to the namespace node */
+
                        external_object->reference.handle =
                            internal_object->reference.node;
+                       external_object->reference.actual_type =
+                           acpi_ns_get_type(internal_object->reference.node);
                        break;
                }
                break;
@@ -215,6 +216,11 @@ acpi_ut_copy_isimple_to_esimple(union acpi_operand_object *internal_object,
                /*
                 * There is no corresponding external object type
                 */
+               ACPI_ERROR((AE_INFO,
+                           "Unsupported object type, cannot convert to external object: %s",
+                           acpi_ut_get_type_name(ACPI_GET_OBJECT_TYPE
+                                                 (internal_object))));
+
                return_ACPI_STATUS(AE_SUPPORT);
        }
 
@@ -455,6 +461,7 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
        case ACPI_TYPE_STRING:
        case ACPI_TYPE_BUFFER:
        case ACPI_TYPE_INTEGER:
+       case ACPI_TYPE_LOCAL_REFERENCE:
 
                internal_object = acpi_ut_create_internal_object((u8)
                                                                 external_object->
@@ -464,9 +471,18 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
                }
                break;
 
+       case ACPI_TYPE_ANY:     /* This is the case for a NULL object */
+
+               *ret_internal_object = NULL;
+               return_ACPI_STATUS(AE_OK);
+
        default:
                /* All other types are not supported */
 
+               ACPI_ERROR((AE_INFO,
+                           "Unsupported object type, cannot convert to internal object: %s",
+                           acpi_ut_get_type_name(external_object->type)));
+
                return_ACPI_STATUS(AE_SUPPORT);
        }
 
@@ -502,6 +518,10 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
                            external_object->buffer.length);
 
                internal_object->buffer.length = external_object->buffer.length;
+
+               /* Mark buffer data valid */
+
+               internal_object->buffer.flags |= AOPOBJ_DATA_VALID;
                break;
 
        case ACPI_TYPE_INTEGER:
@@ -509,6 +529,15 @@ acpi_ut_copy_esimple_to_isimple(union acpi_object *external_object,
                internal_object->integer.value = external_object->integer.value;
                break;
 
+       case ACPI_TYPE_LOCAL_REFERENCE:
+
+               /* TBD: should validate incoming handle */
+
+               internal_object->reference.opcode = AML_INT_NAMEPATH_OP;
+               internal_object->reference.node =
+                   external_object->reference.handle;
+               break;
+
        default:
                /* Other types can't get here */
                break;
@@ -570,13 +599,17 @@ acpi_ut_copy_epackage_to_ipackage(union acpi_object *external_object,
 
                        /* Truncate package and delete it */
 
-                       package_object->package.count = i;
+                       package_object->package.count = (u32) i;
                        package_elements[i] = NULL;
                        acpi_ut_remove_reference(package_object);
                        return_ACPI_STATUS(status);
                }
        }
 
+       /* Mark package data valid */
+
+       package_object->package.flags |= AOPOBJ_DATA_VALID;
+
        *internal_object = package_object;
        return_ACPI_STATUS(status);
 }
@@ -709,7 +742,15 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
                /*
                 * We copied the reference object, so we now must add a reference
                 * to the object pointed to by the reference
+                *
+                * DDBHandle reference (from Load/load_table is a special reference,
+                * it's Reference.Object is the table index, so does not need to
+                * increase the reference count
                 */
+               if (source_desc->reference.opcode == AML_LOAD_OP) {
+                       break;
+               }
+
                acpi_ut_add_reference(source_desc->reference.object);
                break;
 
index 7361204b1eef2114ca22a65902dae2b6ae75276f..f938f465efa4df6d0c2bd03f9dab870b7ec7e0f7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -68,9 +68,9 @@ static const char *acpi_ut_trim_function_name(const char *function_name);
 
 void acpi_ut_init_stack_ptr_trace(void)
 {
-       u32 current_sp;
+       acpi_size current_sp;
 
-       acpi_gbl_entry_stack_pointer = ACPI_PTR_DIFF(&current_sp, NULL);
+       acpi_gbl_entry_stack_pointer = &current_sp;
 }
 
 /*******************************************************************************
@@ -89,10 +89,8 @@ void acpi_ut_track_stack_ptr(void)
 {
        acpi_size current_sp;
 
-       current_sp = ACPI_PTR_DIFF(&current_sp, NULL);
-
-       if (current_sp < acpi_gbl_lowest_stack_pointer) {
-               acpi_gbl_lowest_stack_pointer = current_sp;
+       if (&current_sp < acpi_gbl_lowest_stack_pointer) {
+               acpi_gbl_lowest_stack_pointer = &current_sp;
        }
 
        if (acpi_gbl_nesting_level > acpi_gbl_deepest_nesting) {
@@ -203,6 +201,7 @@ acpi_ut_debug_print(u32 requested_debug_level,
 
        va_start(args, format);
        acpi_os_vprintf(format, args);
+       va_end(args);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_ut_debug_print)
@@ -240,6 +239,7 @@ acpi_ut_debug_print_raw(u32 requested_debug_level,
 
        va_start(args, format);
        acpi_os_vprintf(format, args);
+       va_end(args);
 }
 
 ACPI_EXPORT_SYMBOL(acpi_ut_debug_print_raw)
@@ -524,6 +524,11 @@ void acpi_ut_dump_buffer2(u8 * buffer, u32 count, u32 display)
        u32 temp32;
        u8 buf_char;
 
+       if (!buffer) {
+               acpi_os_printf("Null Buffer Pointer in DumpBuffer!\n");
+               return;
+       }
+
        if ((count < 4) || (count & 0x01)) {
                display = DB_BYTE_DISPLAY;
        }
index f777cebdc46dd85f8ec66b491096f9d2f20af1a3..1fbc35139e84ebdd85d1c5de4b89c24f04c8e549 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -158,7 +158,7 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                                  "***** Mutex %p, OS Mutex %p\n",
                                  object, object->mutex.os_mutex));
 
-               if (object->mutex.os_mutex == acpi_gbl_global_lock_mutex) {
+               if (object == acpi_gbl_global_lock_mutex) {
 
                        /* Global Lock has extra semaphore */
 
@@ -252,6 +252,17 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                }
                break;
 
+       case ACPI_TYPE_LOCAL_BANK_FIELD:
+
+               ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
+                                 "***** Bank Field %p\n", object));
+
+               second_desc = acpi_ns_get_secondary_object(object);
+               if (second_desc) {
+                       acpi_ut_delete_object_desc(second_desc);
+               }
+               break;
+
        default:
                break;
        }
@@ -524,10 +535,12 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action)
 
                case ACPI_TYPE_LOCAL_REFERENCE:
                        /*
-                        * The target of an Index (a package, string, or buffer) must track
-                        * changes to the ref count of the index.
+                        * The target of an Index (a package, string, or buffer) or a named
+                        * reference must track changes to the ref count of the index or
+                        * target object.
                         */
-                       if (object->reference.opcode == AML_INDEX_OP) {
+                       if ((object->reference.opcode == AML_INDEX_OP) ||
+                           (object->reference.opcode == AML_INT_NAMEPATH_OP)) {
                                next_object = object->reference.object;
                        }
                        break;
index 0042b7e78b269647508b41ed6d5d7fbc2f378ef8..05e61be267d5b09450f9299f4a1e1e73c91c62f5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 630c9a2c5b7bd78887c4eab62924fcde075925b8..a6e71b801d2d5708642a90b76549175957aa2662 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -602,6 +602,48 @@ char *acpi_ut_get_mutex_name(u32 mutex_id)
 
        return (acpi_gbl_mutex_names[mutex_id]);
 }
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_notify_name
+ *
+ * PARAMETERS:  notify_value    - Value from the Notify() request
+ *
+ * RETURN:      String corresponding to the Notify Value.
+ *
+ * DESCRIPTION: Translate a Notify Value to a notify namestring.
+ *
+ ******************************************************************************/
+
+/* Names for Notify() values, used for debug output */
+
+static const char *acpi_gbl_notify_value_names[] = {
+       "Bus Check",
+       "Device Check",
+       "Device Wake",
+       "Eject Request",
+       "Device Check Light",
+       "Frequency Mismatch",
+       "Bus Mode Mismatch",
+       "Power Fault",
+       "Capabilities Check",
+       "Device PLD Check",
+       "Reserved",
+       "System Locality Update"
+};
+
+const char *acpi_ut_get_notify_name(u32 notify_value)
+{
+
+       if (notify_value <= ACPI_NOTIFY_MAX) {
+               return (acpi_gbl_notify_value_names[notify_value]);
+       } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
+               return ("Reserved");
+       } else {                /* Greater or equal to 0x80 */
+
+               return ("**Device Specific**");
+       }
+}
 #endif
 
 /*******************************************************************************
@@ -675,12 +717,13 @@ void acpi_ut_init_globals(void)
        acpi_gbl_gpe_fadt_blocks[0] = NULL;
        acpi_gbl_gpe_fadt_blocks[1] = NULL;
 
-       /* Global notify handlers */
+       /* Global handlers */
 
        acpi_gbl_system_notify.handler = NULL;
        acpi_gbl_device_notify.handler = NULL;
        acpi_gbl_exception_handler = NULL;
        acpi_gbl_init_handler = NULL;
+       acpi_gbl_table_handler = NULL;
 
        /* Global Lock support */
 
@@ -722,7 +765,7 @@ void acpi_ut_init_globals(void)
        acpi_gbl_root_node_struct.flags = ANOBJ_END_OF_PEER_LIST;
 
 #ifdef ACPI_DEBUG_OUTPUT
-       acpi_gbl_lowest_stack_pointer = ACPI_SIZE_MAX;
+       acpi_gbl_lowest_stack_pointer = ACPI_CAST_PTR(acpi_size, ACPI_SIZE_MAX);
 #endif
 
 #ifdef ACPI_DBG_TRACK_ALLOCATIONS
index ad3c0d0a5cf8fa67d40b5cea3b7e4bc311bd2cf9..cae515fc02d318db0091f1c98591e39549c97256 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -125,9 +125,12 @@ void acpi_ut_subsystem_shutdown(void)
        acpi_gbl_startup_flags = 0;
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Shutting down ACPI Subsystem\n"));
 
+#ifndef ACPI_ASL_COMPILER
+
        /* Close the acpi_event Handling */
 
        acpi_ev_terminate();
+#endif
 
        /* Close the Namespace */
 
index 0c56a0d20b29064fd375f50418feafc89fca1922..c927324fdd26536a2905088397900a858208079c 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -276,7 +276,7 @@ acpi_ut_short_divide(acpi_integer in_dividend,
                *out_quotient = in_dividend / divisor;
        }
        if (out_remainder) {
-               *out_remainder = (u32) in_dividend % divisor;
+               *out_remainder = (u32) (in_dividend % divisor);
        }
 
        return_ACPI_STATUS(AE_OK);
index 2d19f71e9cfa44c95b1588a484f622d549340444..e4ba7192cd15274ea634d666e5c13a60efb17f51 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -1033,6 +1033,7 @@ acpi_ut_error(char *module_name, u32 line_number, char *format, ...)
        va_start(args, format);
        acpi_os_vprintf(format, args);
        acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+       va_end(args);
 }
 
 void ACPI_INTERNAL_VAR_XFACE
@@ -1061,6 +1062,8 @@ acpi_ut_warning(char *module_name, u32 line_number, char *format, ...)
        va_start(args, format);
        acpi_os_vprintf(format, args);
        acpi_os_printf(" [%X]\n", ACPI_CA_VERSION);
+       va_end(args);
+       va_end(args);
 }
 
 void ACPI_INTERNAL_VAR_XFACE
@@ -1077,4 +1080,5 @@ acpi_ut_info(char *module_name, u32 line_number, char *format, ...)
        va_start(args, format);
        acpi_os_vprintf(format, args);
        acpi_os_printf("\n");
+       va_end(args);
 }
index 4820bc86d1f5d44ec1e90e07332e93a57974a0ce..f7d602b1a89467bedd215b222899c9ca47880faa 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e08b3fa6639f593c0c1d4c893d591be26d36243c..e68466de80447c7068ffbd740ff9f265a22421fd 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -107,6 +107,7 @@ union acpi_operand_object *acpi_ut_create_internal_object_dbg(char *module_name,
        switch (type) {
        case ACPI_TYPE_REGION:
        case ACPI_TYPE_BUFFER_FIELD:
+       case ACPI_TYPE_LOCAL_BANK_FIELD:
 
                /* These types require a secondary object */
 
@@ -469,9 +470,8 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
        case ACPI_TYPE_PROCESSOR:
        case ACPI_TYPE_POWER:
 
-               /*
-                * No extra data for these types
-                */
+               /* No extra data for these types */
+
                break;
 
        case ACPI_TYPE_LOCAL_REFERENCE:
index b630ee137ee197b4a2c8c9555ce1879886a26a3b..c3e3e1308edc76a6a572f59090ff448db5187525 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index edcaafad0a31f6f0af4cd03487555c99f0c193fe..63a6d3d77d88fc0c4e9de3105904a2a4302c278c 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2d496918b3cda4593d383b36cce08f18e2535253..f8bdadf3c32fffba99a72c5e4d905d9634331fdc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,7 @@
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utxface")
 
+#ifndef ACPI_ASL_COMPILER
 /*******************************************************************************
  *
  * FUNCTION:    acpi_initialize_subsystem
@@ -192,24 +193,6 @@ acpi_status acpi_enable_subsystem(u32 flags)
                }
        }
 
-       /*
-        * Complete the GPE initialization for the GPE blocks defined in the FADT
-        * (GPE block 0 and 1).
-        *
-        * Note1: This is where the _PRW methods are executed for the GPEs. These
-        * methods can only be executed after the SCI and Global Lock handlers are
-        * installed and initialized.
-        *
-        * Note2: Currently, there seems to be no need to run the _REG methods
-        * before execution of the _PRW methods and enabling of the GPEs.
-        */
-       if (!(flags & ACPI_NO_EVENT_INIT)) {
-               status = acpi_ev_install_fadt_gpes();
-               if (ACPI_FAILURE(status)) {
-                       return (status);
-               }
-       }
-
        return_ACPI_STATUS(status);
 }
 
@@ -279,6 +262,23 @@ acpi_status acpi_initialize_objects(u32 flags)
                }
        }
 
+       /*
+        * Complete the GPE initialization for the GPE blocks defined in the FADT
+        * (GPE block 0 and 1).
+        *
+        * Note1: This is where the _PRW methods are executed for the GPEs. These
+        * methods can only be executed after the SCI and Global Lock handlers are
+        * installed and initialized.
+        *
+        * Note2: Currently, there seems to be no need to run the _REG methods
+        * before execution of the _PRW methods and enabling of the GPEs.
+        */
+       if (!(flags & ACPI_NO_EVENT_INIT)) {
+               status = acpi_ev_install_fadt_gpes();
+               if (ACPI_FAILURE(status))
+                       return (status);
+       }
+
        /*
         * Empty the caches (delete the cached objects) on the assumption that
         * the table load filled them up more than they will be at runtime --
@@ -292,6 +292,7 @@ acpi_status acpi_initialize_objects(u32 flags)
 
 ACPI_EXPORT_SYMBOL(acpi_initialize_objects)
 
+#endif
 /*******************************************************************************
  *
  * FUNCTION:    acpi_terminate
@@ -335,6 +336,7 @@ acpi_status acpi_terminate(void)
 }
 
 ACPI_EXPORT_SYMBOL(acpi_terminate)
+#ifndef ACPI_ASL_COMPILER
 #ifdef ACPI_FUTURE_USAGE
 /*******************************************************************************
  *
@@ -490,3 +492,4 @@ acpi_status acpi_purge_cached_objects(void)
 }
 
 ACPI_EXPORT_SYMBOL(acpi_purge_cached_objects)
+#endif
index 44ea60cf21c09c00adc1135394decbb4af2faa1d..1009261438187d90e201f08ae4fd08d937a92bc1 100644 (file)
@@ -398,7 +398,7 @@ acpi_evaluate_reference(acpi_handle handle,
 
                element = &(package->package.elements[i]);
 
-               if (element->type != ACPI_TYPE_ANY) {
+               if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
                        status = AE_BAD_DATA;
                        printk(KERN_ERR PREFIX
                                    "Expecting a [Reference] package element, found type %X\n",
index 980a74188781eba10b5372934de94bef0e2a2ea2..5e5dda3a3027abfa1554a9b44c5ae636cd1e7133 100644 (file)
@@ -57,8 +57,6 @@
 #define ACPI_VIDEO_NOTIFY_ZERO_BRIGHTNESS      0x88
 #define ACPI_VIDEO_NOTIFY_DISPLAY_OFF          0x89
 
-#define ACPI_VIDEO_HEAD_INVALID                (~0u - 1)
-#define ACPI_VIDEO_HEAD_END            (~0u)
 #define MAX_NAME_LEN   20
 
 #define ACPI_VIDEO_DISPLAY_CRT 1
@@ -192,6 +190,7 @@ struct acpi_video_device {
 /* bus */
 static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_info_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_bus_info_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -200,6 +199,7 @@ static struct file_operations acpi_video_bus_info_fops = {
 
 static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_ROM_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_bus_ROM_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -209,6 +209,7 @@ static struct file_operations acpi_video_bus_ROM_fops = {
 static int acpi_video_bus_POST_info_open_fs(struct inode *inode,
                                            struct file *file);
 static struct file_operations acpi_video_bus_POST_info_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_bus_POST_info_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -217,6 +218,7 @@ static struct file_operations acpi_video_bus_POST_info_fops = {
 
 static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_POST_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_bus_POST_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -225,6 +227,7 @@ static struct file_operations acpi_video_bus_POST_fops = {
 
 static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file);
 static struct file_operations acpi_video_bus_DOS_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_bus_DOS_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -235,6 +238,7 @@ static struct file_operations acpi_video_bus_DOS_fops = {
 static int acpi_video_device_info_open_fs(struct inode *inode,
                                          struct file *file);
 static struct file_operations acpi_video_device_info_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_device_info_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -244,6 +248,7 @@ static struct file_operations acpi_video_device_info_fops = {
 static int acpi_video_device_state_open_fs(struct inode *inode,
                                           struct file *file);
 static struct file_operations acpi_video_device_state_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_device_state_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -253,6 +258,7 @@ static struct file_operations acpi_video_device_state_fops = {
 static int acpi_video_device_brightness_open_fs(struct inode *inode,
                                                struct file *file);
 static struct file_operations acpi_video_device_brightness_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_device_brightness_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -262,6 +268,7 @@ static struct file_operations acpi_video_device_brightness_fops = {
 static int acpi_video_device_EDID_open_fs(struct inode *inode,
                                          struct file *file);
 static struct file_operations acpi_video_device_EDID_fops = {
+       .owner = THIS_MODULE,
        .open = acpi_video_device_EDID_open_fs,
        .read = seq_read,
        .llseek = seq_lseek,
@@ -734,21 +741,19 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
                if (IS_ERR(device->cdev))
                        return;
 
-               if (device->cdev) {
-                       printk(KERN_INFO PREFIX
-                               "%s is registered as cooling_device%d\n",
-                               device->dev->dev.bus_id, device->cdev->id);
-                       result = sysfs_create_link(&device->dev->dev.kobj,
-                                         &device->cdev->device.kobj,
-                                         "thermal_cooling");
-                       if (result)
-                               printk(KERN_ERR PREFIX "Create sysfs link\n");
-                       result = sysfs_create_link(&device->cdev->device.kobj,
-                                         &device->dev->dev.kobj,
-                                         "device");
-                        if (result)
-                               printk(KERN_ERR PREFIX "Create sysfs link\n");
-               }
+               printk(KERN_INFO PREFIX
+                       "%s is registered as cooling_device%d\n",
+                       device->dev->dev.bus_id, device->cdev->id);
+               result = sysfs_create_link(&device->dev->dev.kobj,
+                               &device->cdev->device.kobj,
+                               "thermal_cooling");
+               if (result)
+                       printk(KERN_ERR PREFIX "Create sysfs link\n");
+               result = sysfs_create_link(&device->cdev->device.kobj,
+                               &device->dev->dev.kobj, "device");
+               if (result)
+                       printk(KERN_ERR PREFIX "Create sysfs link\n");
+
        }
        if (device->cap._DCS && device->cap._DSS){
                static int count = 0;
@@ -1050,87 +1055,82 @@ acpi_video_device_EDID_open_fs(struct inode *inode, struct file *file)
 
 static int acpi_video_device_add_fs(struct acpi_device *device)
 {
-       struct proc_dir_entry *entry = NULL;
+       struct proc_dir_entry *entry, *device_dir;
        struct acpi_video_device *vid_dev;
 
-
-       if (!device)
-               return -ENODEV;
-
        vid_dev = acpi_driver_data(device);
        if (!vid_dev)
                return -ENODEV;
 
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    vid_dev->video->dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-               acpi_device_dir(device)->owner = THIS_MODULE;
-       }
+       device_dir = proc_mkdir(acpi_device_bid(device),
+                               vid_dev->video->dir);
+       if (!device_dir)
+               return -ENOMEM;
+
+       device_dir->owner = THIS_MODULE;
 
        /* 'info' [R] */
-       entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data("info", S_IRUGO, device_dir,
+                       &acpi_video_device_info_fops, acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_video_device_info_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_dir;
 
        /* 'state' [R/W] */
-       entry =
-           create_proc_entry("state", S_IFREG | S_IRUGO | S_IWUSR,
-                             acpi_device_dir(device));
+       acpi_video_device_state_fops.write = acpi_video_device_write_state;
+       entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR,
+                                device_dir,
+                                &acpi_video_device_state_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               acpi_video_device_state_fops.write = acpi_video_device_write_state;
-               entry->proc_fops = &acpi_video_device_state_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_info;
 
        /* 'brightness' [R/W] */
-       entry =
-           create_proc_entry("brightness", S_IFREG | S_IRUGO | S_IWUSR,
-                             acpi_device_dir(device));
+       acpi_video_device_brightness_fops.write =
+               acpi_video_device_write_brightness;
+       entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR,
+                                device_dir,
+                                &acpi_video_device_brightness_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               acpi_video_device_brightness_fops.write = acpi_video_device_write_brightness;
-               entry->proc_fops = &acpi_video_device_brightness_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_state;
 
        /* 'EDID' [R] */
-       entry = create_proc_entry("EDID", S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data("EDID", S_IRUGO, device_dir,
+                                &acpi_video_device_EDID_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_video_device_EDID_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_brightness;
+
+       acpi_device_dir(device) = device_dir;
 
        return 0;
+
+ err_remove_brightness:
+       remove_proc_entry("brightness", device_dir);
+ err_remove_state:
+       remove_proc_entry("state", device_dir);
+ err_remove_info:
+       remove_proc_entry("info", device_dir);
+ err_remove_dir:
+       remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
+       return -ENOMEM;
 }
 
 static int acpi_video_device_remove_fs(struct acpi_device *device)
 {
        struct acpi_video_device *vid_dev;
+       struct proc_dir_entry *device_dir;
 
        vid_dev = acpi_driver_data(device);
        if (!vid_dev || !vid_dev->video || !vid_dev->video->dir)
                return -ENODEV;
 
-       if (acpi_device_dir(device)) {
-               remove_proc_entry("info", acpi_device_dir(device));
-               remove_proc_entry("state", acpi_device_dir(device));
-               remove_proc_entry("brightness", acpi_device_dir(device));
-               remove_proc_entry("EDID", acpi_device_dir(device));
+       device_dir = acpi_device_dir(device);
+       if (device_dir) {
+               remove_proc_entry("info", device_dir);
+               remove_proc_entry("state", device_dir);
+               remove_proc_entry("brightness", device_dir);
+               remove_proc_entry("EDID", device_dir);
                remove_proc_entry(acpi_device_bid(device), vid_dev->video->dir);
                acpi_device_dir(device) = NULL;
        }
@@ -1337,94 +1337,81 @@ acpi_video_bus_write_DOS(struct file *file,
 
 static int acpi_video_bus_add_fs(struct acpi_device *device)
 {
-       struct proc_dir_entry *entry = NULL;
-       struct acpi_video_bus *video;
+       struct acpi_video_bus *video = acpi_driver_data(device);
+       struct proc_dir_entry *device_dir;
+       struct proc_dir_entry *entry;
 
+       device_dir = proc_mkdir(acpi_device_bid(device), acpi_video_dir);
+       if (!device_dir)
+               return -ENOMEM;
 
-       video = acpi_driver_data(device);
-
-       if (!acpi_device_dir(device)) {
-               acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device),
-                                                    acpi_video_dir);
-               if (!acpi_device_dir(device))
-                       return -ENODEV;
-               video->dir = acpi_device_dir(device);
-               acpi_device_dir(device)->owner = THIS_MODULE;
-       }
+       device_dir->owner = THIS_MODULE;
 
        /* 'info' [R] */
-       entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data("info", S_IRUGO, device_dir,
+                                &acpi_video_bus_info_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_video_bus_info_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_dir;
 
        /* 'ROM' [R] */
-       entry = create_proc_entry("ROM", S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data("ROM", S_IRUGO, device_dir,
+                                &acpi_video_bus_ROM_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_video_bus_ROM_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_info;
 
        /* 'POST_info' [R] */
-       entry =
-           create_proc_entry("POST_info", S_IRUGO, acpi_device_dir(device));
+       entry = proc_create_data("POST_info", S_IRUGO, device_dir,
+                                &acpi_video_bus_POST_info_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               entry->proc_fops = &acpi_video_bus_POST_info_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_rom;
 
        /* 'POST' [R/W] */
-       entry =
-           create_proc_entry("POST", S_IFREG | S_IRUGO | S_IRUSR,
-                             acpi_device_dir(device));
+       acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST;
+       entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IWUSR,
+                                device_dir,
+                                &acpi_video_bus_POST_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST;
-               entry->proc_fops = &acpi_video_bus_POST_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_post_info;
 
        /* 'DOS' [R/W] */
-       entry =
-           create_proc_entry("DOS", S_IFREG | S_IRUGO | S_IRUSR,
-                             acpi_device_dir(device));
+       acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS;
+       entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IWUSR,
+                                device_dir,
+                                &acpi_video_bus_DOS_fops,
+                                acpi_driver_data(device));
        if (!entry)
-               return -ENODEV;
-       else {
-               acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS;
-               entry->proc_fops = &acpi_video_bus_DOS_fops;
-               entry->data = acpi_driver_data(device);
-               entry->owner = THIS_MODULE;
-       }
+               goto err_remove_post;
 
+       video->dir = acpi_device_dir(device) = device_dir;
        return 0;
+
+ err_remove_post:
+       remove_proc_entry("POST", device_dir);
+ err_remove_post_info:
+       remove_proc_entry("POST_info", device_dir);
+ err_remove_rom:
+       remove_proc_entry("ROM", device_dir);
+ err_remove_info:
+       remove_proc_entry("info", device_dir);
+ err_remove_dir:
+       remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
+       return -ENOMEM;
 }
 
 static int acpi_video_bus_remove_fs(struct acpi_device *device)
 {
-       struct acpi_video_bus *video;
+       struct proc_dir_entry *device_dir = acpi_device_dir(device);
 
-
-       video = acpi_driver_data(device);
-
-       if (acpi_device_dir(device)) {
-               remove_proc_entry("info", acpi_device_dir(device));
-               remove_proc_entry("ROM", acpi_device_dir(device));
-               remove_proc_entry("POST_info", acpi_device_dir(device));
-               remove_proc_entry("POST", acpi_device_dir(device));
-               remove_proc_entry("DOS", acpi_device_dir(device));
+       if (device_dir) {
+               remove_proc_entry("info", device_dir);
+               remove_proc_entry("ROM", device_dir);
+               remove_proc_entry("POST_info", device_dir);
+               remove_proc_entry("POST", device_dir);
+               remove_proc_entry("DOS", device_dir);
                remove_proc_entry(acpi_device_bid(device), acpi_video_dir);
                acpi_device_dir(device) = NULL;
        }
@@ -1440,11 +1427,15 @@ static int acpi_video_bus_remove_fs(struct acpi_device *device)
 static struct acpi_video_device_attrib*
 acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id)
 {
-       int count;
+       struct acpi_video_enumerated_device *ids;
+       int i;
+
+       for (i = 0; i < video->attached_count; i++) {
+               ids = &video->attached_array[i];
+               if ((ids->value.int_val & 0xffff) == device_id)
+                       return &ids->value.attrib;
+       }
 
-       for(count = 0; count < video->attached_count; count++)
-               if((video->attached_array[count].value.int_val & 0xffff) == device_id)
-                       return &(video->attached_array[count].value.attrib);
        return NULL;
 }
 
@@ -1571,20 +1562,16 @@ static void
 acpi_video_device_bind(struct acpi_video_bus *video,
                       struct acpi_video_device *device)
 {
+       struct acpi_video_enumerated_device *ids;
        int i;
 
-#define IDS_VAL(i) video->attached_array[i].value.int_val
-#define IDS_BIND(i) video->attached_array[i].bind_info
-
-       for (i = 0; IDS_VAL(i) != ACPI_VIDEO_HEAD_INVALID &&
-            i < video->attached_count; i++) {
-               if (device->device_id == (IDS_VAL(i) & 0xffff)) {
-                       IDS_BIND(i) = device;
+       for (i = 0; i < video->attached_count; i++) {
+               ids = &video->attached_array[i];
+               if (device->device_id == (ids->value.int_val & 0xffff)) {
+                       ids->bind_info = device;
                        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "device_bind %d\n", i));
                }
        }
-#undef IDS_VAL
-#undef IDS_BIND
 }
 
 /*
@@ -1603,7 +1590,7 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
        int status;
        int count;
        int i;
-       struct acpi_video_enumerated_device *active_device_list;
+       struct acpi_video_enumerated_device *active_list;
        struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *dod = NULL;
        union acpi_object *obj;
@@ -1624,13 +1611,10 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
        ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d video heads in _DOD\n",
                          dod->package.count));
 
-       active_device_list = kmalloc((1 +
-                                     dod->package.count) *
-                                    sizeof(struct
-                                           acpi_video_enumerated_device),
-                                    GFP_KERNEL);
-
-       if (!active_device_list) {
+       active_list = kcalloc(1 + dod->package.count,
+                             sizeof(struct acpi_video_enumerated_device),
+                             GFP_KERNEL);
+       if (!active_list) {
                status = -ENOMEM;
                goto out;
        }
@@ -1640,23 +1624,24 @@ static int acpi_video_device_enumerate(struct acpi_video_bus *video)
                obj = &dod->package.elements[i];
 
                if (obj->type != ACPI_TYPE_INTEGER) {
-                       printk(KERN_ERR PREFIX "Invalid _DOD data\n");
-                       active_device_list[i].value.int_val =
-                           ACPI_VIDEO_HEAD_INVALID;
+                       printk(KERN_ERR PREFIX
+                               "Invalid _DOD data in element %d\n", i);
+                       continue;
                }
-               active_device_list[i].value.int_val = obj->integer.value;
-               active_device_list[i].bind_info = NULL;
+
+               active_list[count].value.int_val = obj->integer.value;
+               active_list[count].bind_info = NULL;
                ACPI_DEBUG_PRINT((ACPI_DB_INFO, "dod element[%d] = %d\n", i,
                                  (int)obj->integer.value));
                count++;
        }
-       active_device_list[count].value.int_val = ACPI_VIDEO_HEAD_END;
 
        kfree(video->attached_array);
 
-       video->attached_array = active_device_list;
+       video->attached_array = active_list;
        video->attached_count = count;
-      out:
+
+ out:
        kfree(buffer.pointer);
        return status;
 }
index 292aa9a0f02f8d5b3913bdc5dca00ab86fda641f..1c11df9a5f32a92e3ff7fda7700450deccf228b4 100644 (file)
@@ -566,11 +566,11 @@ config PATA_RADISYS
 
          If unsure, say N.
 
-config PATA_RB500
-       tristate "RouterBoard 500 PATA CompactFlash support"
-       depends on MIKROTIK_RB500
+config PATA_RB532
+       tristate "RouterBoard 532 PATA CompactFlash support"
+       depends on MIKROTIK_RB532
        help
-         This option enables support for the RouterBoard 500
+         This option enables support for the RouterBoard 532
          PATA CompactFlash controller.
 
          If unsure, say N.
index 1fbc2aa648b739955f1bb746c2ba0703039e4bb7..b693d829383a3b7ff9e33bd0d181523f23297379 100644 (file)
@@ -55,7 +55,7 @@ obj-$(CONFIG_PATA_PDC2027X)   += pata_pdc2027x.o
 obj-$(CONFIG_PATA_PDC_OLD)     += pata_pdc202xx_old.o
 obj-$(CONFIG_PATA_QDI)         += pata_qdi.o
 obj-$(CONFIG_PATA_RADISYS)     += pata_radisys.o
-obj-$(CONFIG_PATA_RB500)       += pata_rb500_cf.o
+obj-$(CONFIG_PATA_RB532)       += pata_rb532_cf.o
 obj-$(CONFIG_PATA_RZ1000)      += pata_rz1000.o
 obj-$(CONFIG_PATA_SC1200)      += pata_sc1200.o
 obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
index 7c4f886f1f1635cc99911db1085f5630ac9ba77f..8cace9aa9c0398bafe101115ba511bb5dd29f0fb 100644 (file)
@@ -358,7 +358,7 @@ static const struct ata_port_info ahci_port_info[] = {
        /* board_ahci_sb600 */
        {
                AHCI_HFLAGS     (AHCI_HFLAG_IGN_SERR_INTERNAL |
-                                AHCI_HFLAG_32BIT_ONLY |
+                                AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
                                 AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP),
                .flags          = AHCI_FLAG_COMMON,
                .pio_mask       = 0x1f, /* pio0-4 */
index 51b7d2fad36a2539d72da75f44bcb2683ee041a5..3bc488538204abe225b388346d3b2a65a1c5ed5d 100644 (file)
@@ -3933,6 +3933,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
 
        /* Devices which get the IVB wrong */
        { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
+       /* Maybe we should just blacklist TSSTcorp... */
+       { "TSSTcorp CDDVDW SH-S202H", "SB00",     ATA_HORKAGE_IVB, },
+       { "TSSTcorp CDDVDW SH-S202H", "SB01",     ATA_HORKAGE_IVB, },
        { "TSSTcorp CDDVDW SH-S202J", "SB00",     ATA_HORKAGE_IVB, },
        { "TSSTcorp CDDVDW SH-S202J", "SB01",     ATA_HORKAGE_IVB, },
        { "TSSTcorp CDDVDW SH-S202N", "SB00",     ATA_HORKAGE_IVB, },
index a34f32442edf8687c832213f02c3a01e144d49a1..3ce43920e459b037d685239ec1315ed11097df6a 100644 (file)
 
 #include "libata.h"
 
-#define SECTOR_SIZE    512
+#define SECTOR_SIZE            512
+#define ATA_SCSI_RBUF_SIZE     4096
+
+static DEFINE_SPINLOCK(ata_scsi_rbuf_lock);
+static u8 ata_scsi_rbuf[ATA_SCSI_RBUF_SIZE];
 
 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc);
 
@@ -179,6 +183,13 @@ DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,
                ata_scsi_lpm_show, ata_scsi_lpm_put);
 EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy);
 
+static void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
+{
+       cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
+
+       scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
+}
+
 static void ata_scsi_invalid_field(struct scsi_cmnd *cmd,
                                   void (*done)(struct scsi_cmnd *))
 {
@@ -1632,53 +1643,48 @@ defer:
 
 /**
  *     ata_scsi_rbuf_get - Map response buffer.
- *     @cmd: SCSI command containing buffer to be mapped.
- *     @buf_out: Pointer to mapped area.
+ *     @flags: unsigned long variable to store irq enable status
+ *     @copy_in: copy in from user buffer
  *
- *     Maps buffer contained within SCSI command @cmd.
+ *     Prepare buffer for simulated SCSI commands.
  *
  *     LOCKING:
- *     spin_lock_irqsave(host lock)
+ *     spin_lock_irqsave(ata_scsi_rbuf_lock) on success
  *
  *     RETURNS:
- *     Length of response buffer.
+ *     Pointer to response buffer.
  */
-
-static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out)
+static void *ata_scsi_rbuf_get(struct scsi_cmnd *cmd, bool copy_in,
+                              unsigned long *flags)
 {
-       u8 *buf;
-       unsigned int buflen;
-
-       struct scatterlist *sg = scsi_sglist(cmd);
+       spin_lock_irqsave(&ata_scsi_rbuf_lock, *flags);
 
-       if (sg) {
-               buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset;
-               buflen = sg->length;
-       } else {
-               buf = NULL;
-               buflen = 0;
-       }
-
-       *buf_out = buf;
-       return buflen;
+       memset(ata_scsi_rbuf, 0, ATA_SCSI_RBUF_SIZE);
+       if (copy_in)
+               sg_copy_to_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+                                 ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+       return ata_scsi_rbuf;
 }
 
 /**
  *     ata_scsi_rbuf_put - Unmap response buffer.
  *     @cmd: SCSI command containing buffer to be unmapped.
- *     @buf: buffer to unmap
+ *     @copy_out: copy out result
+ *     @flags: @flags passed to ata_scsi_rbuf_get()
  *
- *     Unmaps response buffer contained within @cmd.
+ *     Returns rbuf buffer.  The result is copied to @cmd's buffer if
+ *     @copy_back is true.
  *
  *     LOCKING:
- *     spin_lock_irqsave(host lock)
+ *     Unlocks ata_scsi_rbuf_lock.
  */
-
-static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
+static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, bool copy_out,
+                                    unsigned long *flags)
 {
-       struct scatterlist *sg = scsi_sglist(cmd);
-       if (sg)
-               kunmap_atomic(buf - sg->offset, KM_IRQ0);
+       if (copy_out)
+               sg_copy_from_buffer(scsi_sglist(cmd), scsi_sg_count(cmd),
+                                   ata_scsi_rbuf, ATA_SCSI_RBUF_SIZE);
+       spin_unlock_irqrestore(&ata_scsi_rbuf_lock, *flags);
 }
 
 /**
@@ -1696,51 +1702,27 @@ static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf)
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
-                       unsigned int (*actor) (struct ata_scsi_args *args,
-                                              u8 *rbuf, unsigned int buflen))
+static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
+               unsigned int (*actor)(struct ata_scsi_args *args, u8 *rbuf))
 {
        u8 *rbuf;
-       unsigned int buflen, rc;
+       unsigned int rc;
        struct scsi_cmnd *cmd = args->cmd;
        unsigned long flags;
 
-       local_irq_save(flags);
-
-       buflen = ata_scsi_rbuf_get(cmd, &rbuf);
-       memset(rbuf, 0, buflen);
-       rc = actor(args, rbuf, buflen);
-       ata_scsi_rbuf_put(cmd, rbuf);
-
-       local_irq_restore(flags);
+       rbuf = ata_scsi_rbuf_get(cmd, false, &flags);
+       rc = actor(args, rbuf);
+       ata_scsi_rbuf_put(cmd, rc == 0, &flags);
 
        if (rc == 0)
                cmd->result = SAM_STAT_GOOD;
        args->done(cmd);
 }
 
-/**
- *     ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer
- *     @idx: byte index into SCSI response buffer
- *     @val: value to set
- *
- *     To be used by SCSI command simulator functions.  This macros
- *     expects two local variables, u8 *rbuf and unsigned int buflen,
- *     are in scope.
- *
- *     LOCKING:
- *     None.
- */
-#define ATA_SCSI_RBUF_SET(idx, val) do { \
-               if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \
-       } while (0)
-
 /**
  *     ata_scsiop_inq_std - Simulate INQUIRY command
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Returns standard device identification data associated
  *     with non-VPD INQUIRY command output.
@@ -1748,10 +1730,17 @@ void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
-                              unsigned int buflen)
+static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
 {
+       const u8 versions[] = {
+               0x60,   /* SAM-3 (no version claimed) */
+
+               0x03,
+               0x20,   /* SBC-2 (no version claimed) */
+
+               0x02,
+               0x60    /* SPC-3 (no version claimed) */
+       };
        u8 hdr[] = {
                TYPE_DISK,
                0,
@@ -1760,35 +1749,21 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
                95 - 4
        };
 
+       VPRINTK("ENTER\n");
+
        /* set scsi removeable (RMB) bit per ata bit */
        if (ata_id_removeable(args->id))
                hdr[1] |= (1 << 7);
 
-       VPRINTK("ENTER\n");
-
        memcpy(rbuf, hdr, sizeof(hdr));
+       memcpy(&rbuf[8], "ATA     ", 8);
+       ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
+       ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
 
-       if (buflen > 35) {
-               memcpy(&rbuf[8], "ATA     ", 8);
-               ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
-               ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
-               if (rbuf[32] == 0 || rbuf[32] == ' ')
-                       memcpy(&rbuf[32], "n/a ", 4);
-       }
-
-       if (buflen > 63) {
-               const u8 versions[] = {
-                       0x60,   /* SAM-3 (no version claimed) */
-
-                       0x03,
-                       0x20,   /* SBC-2 (no version claimed) */
+       if (rbuf[32] == 0 || rbuf[32] == ' ')
+               memcpy(&rbuf[32], "n/a ", 4);
 
-                       0x02,
-                       0x60    /* SPC-3 (no version claimed) */
-               };
-
-               memcpy(rbuf + 59, versions, sizeof(versions));
-       }
+       memcpy(rbuf + 59, versions, sizeof(versions));
 
        return 0;
 }
@@ -1797,27 +1772,22 @@ unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
  *     ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Returns list of inquiry VPD pages available.
  *
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
-                             unsigned int buflen)
+static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
 {
        const u8 pages[] = {
                0x00,   /* page 0x00, this page */
                0x80,   /* page 0x80, unit serial no page */
                0x83    /* page 0x83, device ident page */
        };
-       rbuf[3] = sizeof(pages);        /* number of supported VPD pages */
-
-       if (buflen > 6)
-               memcpy(rbuf + 4, pages, sizeof(pages));
 
+       rbuf[3] = sizeof(pages);        /* number of supported VPD pages */
+       memcpy(rbuf + 4, pages, sizeof(pages));
        return 0;
 }
 
@@ -1825,16 +1795,13 @@ unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
  *     ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Returns ATA device serial number.
  *
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
-                             unsigned int buflen)
+static unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf)
 {
        const u8 hdr[] = {
                0,
@@ -1842,12 +1809,10 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
                0,
                ATA_ID_SERNO_LEN,       /* page len */
        };
-       memcpy(rbuf, hdr, sizeof(hdr));
-
-       if (buflen > (ATA_ID_SERNO_LEN + 4 - 1))
-               ata_id_string(args->id, (unsigned char *) &rbuf[4],
-                             ATA_ID_SERNO, ATA_ID_SERNO_LEN);
 
+       memcpy(rbuf, hdr, sizeof(hdr));
+       ata_id_string(args->id, (unsigned char *) &rbuf[4],
+                     ATA_ID_SERNO, ATA_ID_SERNO_LEN);
        return 0;
 }
 
@@ -1855,7 +1820,6 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
  *     ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Yields two logical unit device identification designators:
  *      - vendor specific ASCII containing the ATA serial number
@@ -1865,41 +1829,37 @@ unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
-                             unsigned int buflen)
+static unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf)
 {
-       int num;
        const int sat_model_serial_desc_len = 68;
+       int num;
 
        rbuf[1] = 0x83;                 /* this page code */
        num = 4;
 
-       if (buflen > (ATA_ID_SERNO_LEN + num + 3)) {
-               /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
-               rbuf[num + 0] = 2;
-               rbuf[num + 3] = ATA_ID_SERNO_LEN;
-               num += 4;
-               ata_id_string(args->id, (unsigned char *) rbuf + num,
-                             ATA_ID_SERNO, ATA_ID_SERNO_LEN);
-               num += ATA_ID_SERNO_LEN;
-       }
-       if (buflen > (sat_model_serial_desc_len + num + 3)) {
-               /* SAT defined lu model and serial numbers descriptor */
-               /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
-               rbuf[num + 0] = 2;
-               rbuf[num + 1] = 1;
-               rbuf[num + 3] = sat_model_serial_desc_len;
-               num += 4;
-               memcpy(rbuf + num, "ATA     ", 8);
-               num += 8;
-               ata_id_string(args->id, (unsigned char *) rbuf + num,
-                             ATA_ID_PROD, ATA_ID_PROD_LEN);
-               num += ATA_ID_PROD_LEN;
-               ata_id_string(args->id, (unsigned char *) rbuf + num,
-                             ATA_ID_SERNO, ATA_ID_SERNO_LEN);
-               num += ATA_ID_SERNO_LEN;
-       }
+       /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */
+       rbuf[num + 0] = 2;
+       rbuf[num + 3] = ATA_ID_SERNO_LEN;
+       num += 4;
+       ata_id_string(args->id, (unsigned char *) rbuf + num,
+                     ATA_ID_SERNO, ATA_ID_SERNO_LEN);
+       num += ATA_ID_SERNO_LEN;
+
+       /* SAT defined lu model and serial numbers descriptor */
+       /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */
+       rbuf[num + 0] = 2;
+       rbuf[num + 1] = 1;
+       rbuf[num + 3] = sat_model_serial_desc_len;
+       num += 4;
+       memcpy(rbuf + num, "ATA     ", 8);
+       num += 8;
+       ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_PROD,
+                     ATA_ID_PROD_LEN);
+       num += ATA_ID_PROD_LEN;
+       ata_id_string(args->id, (unsigned char *) rbuf + num, ATA_ID_SERNO,
+                     ATA_ID_SERNO_LEN);
+       num += ATA_ID_SERNO_LEN;
+
        rbuf[3] = num - 4;    /* page len (assume less than 256 bytes) */
        return 0;
 }
@@ -1908,35 +1868,26 @@ unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
  *     ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Yields SAT-specified ATA VPD page.
  *
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
-                             unsigned int buflen)
+static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf)
 {
-       u8 pbuf[60];
        struct ata_taskfile tf;
-       unsigned int i;
 
-       if (!buflen)
-               return 0;
-
-       memset(&pbuf, 0, sizeof(pbuf));
        memset(&tf, 0, sizeof(tf));
 
-       pbuf[1] = 0x89;                 /* our page code */
-       pbuf[2] = (0x238 >> 8);         /* page size fixed at 238h */
-       pbuf[3] = (0x238 & 0xff);
+       rbuf[1] = 0x89;                 /* our page code */
+       rbuf[2] = (0x238 >> 8);         /* page size fixed at 238h */
+       rbuf[3] = (0x238 & 0xff);
 
-       memcpy(&pbuf[8], "linux   ", 8);
-       memcpy(&pbuf[16], "libata          ", 16);
-       memcpy(&pbuf[32], DRV_VERSION, 4);
-       ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4);
+       memcpy(&rbuf[8], "linux   ", 8);
+       memcpy(&rbuf[16], "libata          ", 16);
+       memcpy(&rbuf[32], DRV_VERSION, 4);
+       ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4);
 
        /* we don't store the ATA device signature, so we fake it */
 
@@ -1944,19 +1895,12 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
        tf.lbal = 0x1;
        tf.nsect = 0x1;
 
-       ata_tf_to_fis(&tf, 0, 1, &pbuf[36]);    /* TODO: PMP? */
-       pbuf[36] = 0x34;                /* force D2H Reg FIS (34h) */
+       ata_tf_to_fis(&tf, 0, 1, &rbuf[36]);    /* TODO: PMP? */
+       rbuf[36] = 0x34;                /* force D2H Reg FIS (34h) */
 
-       pbuf[56] = ATA_CMD_ID_ATA;
+       rbuf[56] = ATA_CMD_ID_ATA;
 
-       i = min(buflen, 60U);
-       memcpy(rbuf, &pbuf[0], i);
-       buflen -= i;
-
-       if (!buflen)
-               return 0;
-
-       memcpy(&rbuf[60], &args->id[0], min(buflen, 512U));
+       memcpy(&rbuf[60], &args->id[0], 512);
        return 0;
 }
 
@@ -1964,7 +1908,6 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
  *     ata_scsiop_noop - Command handler that simply returns success.
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     No operation.  Simply returns success to caller, to indicate
  *     that the caller should successfully complete this SCSI command.
@@ -1972,47 +1915,16 @@ static unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf,
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
-                           unsigned int buflen)
+static unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf)
 {
        VPRINTK("ENTER\n");
        return 0;
 }
 
-/**
- *     ata_msense_push - Push data onto MODE SENSE data output buffer
- *     @ptr_io: (input/output) Location to store more output data
- *     @last: End of output data buffer
- *     @buf: Pointer to BLOB being added to output buffer
- *     @buflen: Length of BLOB
- *
- *     Store MODE SENSE data on an output buffer.
- *
- *     LOCKING:
- *     None.
- */
-
-static void ata_msense_push(u8 **ptr_io, const u8 *last,
-                           const u8 *buf, unsigned int buflen)
-{
-       u8 *ptr = *ptr_io;
-
-       if ((ptr + buflen - 1) > last)
-               return;
-
-       memcpy(ptr, buf, buflen);
-
-       ptr += buflen;
-
-       *ptr_io = ptr;
-}
-
 /**
  *     ata_msense_caching - Simulate MODE SENSE caching info page
  *     @id: device IDENTIFY data
- *     @ptr_io: (input/output) Location to store more output data
- *     @last: End of output data buffer
+ *     @buf: output buffer
  *
  *     Generate a caching info page, which conditionally indicates
  *     write caching to the SCSI layer, depending on device
@@ -2021,58 +1933,43 @@ static void ata_msense_push(u8 **ptr_io, const u8 *last,
  *     LOCKING:
  *     None.
  */
-
-static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io,
-                                      const u8 *last)
+static unsigned int ata_msense_caching(u16 *id, u8 *buf)
 {
-       u8 page[CACHE_MPAGE_LEN];
-
-       memcpy(page, def_cache_mpage, sizeof(page));
+       memcpy(buf, def_cache_mpage, sizeof(def_cache_mpage));
        if (ata_id_wcache_enabled(id))
-               page[2] |= (1 << 2);    /* write cache enable */
+               buf[2] |= (1 << 2);     /* write cache enable */
        if (!ata_id_rahead_enabled(id))
-               page[12] |= (1 << 5);   /* disable read ahead */
-
-       ata_msense_push(ptr_io, last, page, sizeof(page));
-       return sizeof(page);
+               buf[12] |= (1 << 5);    /* disable read ahead */
+       return sizeof(def_cache_mpage);
 }
 
 /**
  *     ata_msense_ctl_mode - Simulate MODE SENSE control mode page
- *     @dev: Device associated with this MODE SENSE command
- *     @ptr_io: (input/output) Location to store more output data
- *     @last: End of output data buffer
+ *     @buf: output buffer
  *
  *     Generate a generic MODE SENSE control mode page.
  *
  *     LOCKING:
  *     None.
  */
-
-static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last)
+static unsigned int ata_msense_ctl_mode(u8 *buf)
 {
-       ata_msense_push(ptr_io, last, def_control_mpage,
-                       sizeof(def_control_mpage));
+       memcpy(buf, def_control_mpage, sizeof(def_control_mpage));
        return sizeof(def_control_mpage);
 }
 
 /**
  *     ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page
- *     @dev: Device associated with this MODE SENSE command
- *     @ptr_io: (input/output) Location to store more output data
- *     @last: End of output data buffer
+ *     @bufp: output buffer
  *
  *     Generate a generic MODE SENSE r/w error recovery page.
  *
  *     LOCKING:
  *     None.
  */
-
-static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last)
+static unsigned int ata_msense_rw_recovery(u8 *buf)
 {
-
-       ata_msense_push(ptr_io, last, def_rw_recovery_mpage,
-                       sizeof(def_rw_recovery_mpage));
+       memcpy(buf, def_rw_recovery_mpage, sizeof(def_rw_recovery_mpage));
        return sizeof(def_rw_recovery_mpage);
 }
 
@@ -2104,7 +2001,6 @@ static int ata_dev_supports_fua(u16 *id)
  *     ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Simulate MODE SENSE commands. Assume this is invoked for direct
  *     access devices (e.g. disks) only. There should be no block
@@ -2113,19 +2009,17 @@ static int ata_dev_supports_fua(u16 *id)
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
-                                 unsigned int buflen)
+static unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf)
 {
        struct ata_device *dev = args->dev;
-       u8 *scsicmd = args->cmd->cmnd, *p, *last;
+       u8 *scsicmd = args->cmd->cmnd, *p = rbuf;
        const u8 sat_blk_desc[] = {
                0, 0, 0, 0,     /* number of blocks: sat unspecified */
                0,
                0, 0x2, 0x0     /* block length: 512 bytes */
        };
        u8 pg, spg;
-       unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen;
+       unsigned int ebd, page_control, six_byte;
        u8 dpofua;
 
        VPRINTK("ENTER\n");
@@ -2148,17 +2042,10 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
                goto invalid_fld;
        }
 
-       if (six_byte) {
-               output_len = 4 + (ebd ? 8 : 0);
-               alloc_len = scsicmd[4];
-       } else {
-               output_len = 8 + (ebd ? 8 : 0);
-               alloc_len = (scsicmd[7] << 8) + scsicmd[8];
-       }
-       minlen = (alloc_len < buflen) ? alloc_len : buflen;
-
-       p = rbuf + output_len;
-       last = rbuf + minlen - 1;
+       if (six_byte)
+               p += 4 + (ebd ? 8 : 0);
+       else
+               p += 8 + (ebd ? 8 : 0);
 
        pg = scsicmd[2] & 0x3f;
        spg = scsicmd[3];
@@ -2171,61 +2058,48 @@ unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
 
        switch(pg) {
        case RW_RECOVERY_MPAGE:
-               output_len += ata_msense_rw_recovery(&p, last);
+               p += ata_msense_rw_recovery(p);
                break;
 
        case CACHE_MPAGE:
-               output_len += ata_msense_caching(args->id, &p, last);
+               p += ata_msense_caching(args->id, p);
                break;
 
-       case CONTROL_MPAGE: {
-               output_len += ata_msense_ctl_mode(&p, last);
+       case CONTROL_MPAGE:
+               p += ata_msense_ctl_mode(p);
                break;
-               }
 
        case ALL_MPAGES:
-               output_len += ata_msense_rw_recovery(&p, last);
-               output_len += ata_msense_caching(args->id, &p, last);
-               output_len += ata_msense_ctl_mode(&p, last);
+               p += ata_msense_rw_recovery(p);
+               p += ata_msense_caching(args->id, p);
+               p += ata_msense_ctl_mode(p);
                break;
 
        default:                /* invalid page code */
                goto invalid_fld;
        }
 
-       if (minlen < 1)
-               return 0;
-
        dpofua = 0;
        if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) &&
            (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count))
                dpofua = 1 << 4;
 
        if (six_byte) {
-               output_len--;
-               rbuf[0] = output_len;
-               if (minlen > 2)
-                       rbuf[2] |= dpofua;
+               rbuf[0] = p - rbuf - 1;
+               rbuf[2] |= dpofua;
                if (ebd) {
-                       if (minlen > 3)
-                               rbuf[3] = sizeof(sat_blk_desc);
-                       if (minlen > 11)
-                               memcpy(rbuf + 4, sat_blk_desc,
-                                      sizeof(sat_blk_desc));
+                       rbuf[3] = sizeof(sat_blk_desc);
+                       memcpy(rbuf + 4, sat_blk_desc, sizeof(sat_blk_desc));
                }
        } else {
-               output_len -= 2;
+               unsigned int output_len = p - rbuf - 2;
+
                rbuf[0] = output_len >> 8;
-               if (minlen > 1)
-                       rbuf[1] = output_len;
-               if (minlen > 3)
-                       rbuf[3] |= dpofua;
+               rbuf[1] = output_len;
+               rbuf[3] |= dpofua;
                if (ebd) {
-                       if (minlen > 7)
-                               rbuf[7] = sizeof(sat_blk_desc);
-                       if (minlen > 15)
-                               memcpy(rbuf + 8, sat_blk_desc,
-                                      sizeof(sat_blk_desc));
+                       rbuf[7] = sizeof(sat_blk_desc);
+                       memcpy(rbuf + 8, sat_blk_desc, sizeof(sat_blk_desc));
                }
        }
        return 0;
@@ -2245,15 +2119,13 @@ saving_not_supp:
  *     ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Simulate READ CAPACITY commands.
  *
  *     LOCKING:
  *     None.
  */
-unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
-                                unsigned int buflen)
+static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
 {
        u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */
 
@@ -2264,28 +2136,28 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
                        last_lba = 0xffffffff;
 
                /* sector count, 32-bit */
-               ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3));
-               ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2));
-               ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1));
-               ATA_SCSI_RBUF_SET(3, last_lba);
+               rbuf[0] = last_lba >> (8 * 3);
+               rbuf[1] = last_lba >> (8 * 2);
+               rbuf[2] = last_lba >> (8 * 1);
+               rbuf[3] = last_lba;
 
                /* sector size */
-               ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8);
-               ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE & 0xff);
+               rbuf[6] = ATA_SECT_SIZE >> 8;
+               rbuf[7] = ATA_SECT_SIZE & 0xff;
        } else {
                /* sector count, 64-bit */
-               ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7));
-               ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6));
-               ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5));
-               ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4));
-               ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3));
-               ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2));
-               ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1));
-               ATA_SCSI_RBUF_SET(7, last_lba);
+               rbuf[0] = last_lba >> (8 * 7);
+               rbuf[1] = last_lba >> (8 * 6);
+               rbuf[2] = last_lba >> (8 * 5);
+               rbuf[3] = last_lba >> (8 * 4);
+               rbuf[4] = last_lba >> (8 * 3);
+               rbuf[5] = last_lba >> (8 * 2);
+               rbuf[6] = last_lba >> (8 * 1);
+               rbuf[7] = last_lba;
 
                /* sector size */
-               ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8);
-               ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE & 0xff);
+               rbuf[10] = ATA_SECT_SIZE >> 8;
+               rbuf[11] = ATA_SECT_SIZE & 0xff;
        }
 
        return 0;
@@ -2295,16 +2167,13 @@ unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
  *     ata_scsiop_report_luns - Simulate REPORT LUNS command
  *     @args: device IDENTIFY data / SCSI command of interest.
  *     @rbuf: Response buffer, to which simulated SCSI cmd output is sent.
- *     @buflen: Response buffer length.
  *
  *     Simulate REPORT LUNS command.
  *
  *     LOCKING:
  *     spin_lock_irqsave(host lock)
  */
-
-unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
-                                  unsigned int buflen)
+static unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf)
 {
        VPRINTK("ENTER\n");
        rbuf[3] = 8;    /* just one lun, LUN 0, size 8 bytes */
@@ -2312,53 +2181,6 @@ unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
        return 0;
 }
 
-/**
- *     ata_scsi_set_sense - Set SCSI sense data and status
- *     @cmd: SCSI request to be handled
- *     @sk: SCSI-defined sense key
- *     @asc: SCSI-defined additional sense code
- *     @ascq: SCSI-defined additional sense code qualifier
- *
- *     Helper function that builds a valid fixed format, current
- *     response code and the given sense key (sk), additional sense
- *     code (asc) and additional sense code qualifier (ascq) with
- *     a SCSI command status of %SAM_STAT_CHECK_CONDITION and
- *     DRIVER_SENSE set in the upper bits of scsi_cmnd::result .
- *
- *     LOCKING:
- *     Not required
- */
-
-void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq)
-{
-       cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
-
-       scsi_build_sense_buffer(0, cmd->sense_buffer, sk, asc, ascq);
-}
-
-/**
- *     ata_scsi_badcmd - End a SCSI request with an error
- *     @cmd: SCSI request to be handled
- *     @done: SCSI command completion function
- *     @asc: SCSI-defined additional sense code
- *     @ascq: SCSI-defined additional sense code qualifier
- *
- *     Helper function that completes a SCSI command with
- *     %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST
- *     and the specified additional sense codes.
- *
- *     LOCKING:
- *     spin_lock_irqsave(host lock)
- */
-
-void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq)
-{
-       DPRINTK("ENTER\n");
-       ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq);
-
-       done(cmd);
-}
-
 static void atapi_sense_complete(struct ata_queued_cmd *qc)
 {
        if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
@@ -2485,13 +2307,10 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
                u8 *scsicmd = cmd->cmnd;
 
                if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
-                       u8 *buf = NULL;
-                       unsigned int buflen;
                        unsigned long flags;
+                       u8 *buf;
 
-                       local_irq_save(flags);
-
-                       buflen = ata_scsi_rbuf_get(cmd, &buf);
+                       buf = ata_scsi_rbuf_get(cmd, true, &flags);
 
        /* ATAPI devices typically report zero for their SCSI version,
         * and sometimes deviate from the spec WRT response data
@@ -2506,9 +2325,7 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
                                buf[3] = 0x32;
                        }
 
-                       ata_scsi_rbuf_put(cmd, buf);
-
-                       local_irq_restore(flags);
+                       ata_scsi_rbuf_put(cmd, true, &flags);
                }
 
                cmd->result = SAM_STAT_GOOD;
index ae2cfd95d43ef7591b1b6ead5b4497080ec5876e..4514283937ea4a9a4254664ac897586266809771 100644 (file)
@@ -146,34 +146,6 @@ extern void ata_scsi_scan_host(struct ata_port *ap, int sync);
 extern int ata_scsi_offline_dev(struct ata_device *dev);
 extern void ata_scsi_media_change_notify(struct ata_device *dev);
 extern void ata_scsi_hotplug(struct work_struct *work);
-extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
-                              unsigned int buflen);
-
-extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf,
-                             unsigned int buflen);
-
-extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf,
-                             unsigned int buflen);
-extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf,
-                             unsigned int buflen);
-extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf,
-                           unsigned int buflen);
-extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf,
-                                 unsigned int buflen);
-extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf,
-                                 unsigned int buflen);
-extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf,
-                               unsigned int buflen);
-extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf,
-                                  unsigned int buflen);
-extern void ata_scsi_badcmd(struct scsi_cmnd *cmd,
-                           void (*done)(struct scsi_cmnd *),
-                           u8 asc, u8 ascq);
-extern void ata_scsi_set_sense(struct scsi_cmnd *cmd,
-                              u8 sk, u8 asc, u8 ascq);
-extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
-                        unsigned int (*actor) (struct ata_scsi_args *args,
-                                           u8 *rbuf, unsigned int buflen));
 extern void ata_schedule_scsi_eh(struct Scsi_Host *shost);
 extern void ata_scsi_dev_rescan(struct work_struct *work);
 extern int ata_bus_probe(struct ata_port *ap);
index 78738fb4223b666078fc5ac1e0344b13d937d2d7..d7de7baf58a8ccb8551a8685167410b60ddf031d 100644 (file)
@@ -88,8 +88,8 @@ static void atiixp_set_pio_timing(struct ata_port *ap, struct ata_device *adev,
        pci_write_config_word(pdev, ATIIXP_IDE_PIO_MODE, pio_mode_data);
 
        pci_read_config_word(pdev, ATIIXP_IDE_PIO_TIMING, &pio_timing_data);
-       pio_mode_data &= ~(0xFF << timing_shift);
-       pio_mode_data |= (pio_timings[pio] << timing_shift);
+       pio_timing_data &= ~(0xFF << timing_shift);
+       pio_timing_data |= (pio_timings[pio] << timing_shift);
        pci_write_config_word(pdev, ATIIXP_IDE_PIO_TIMING, pio_timing_data);
 }
 
index a75de0684c15cff77f21728fd36af031e7867b2c..9ab89732cf9474f1fa34458717bb9bd7da755d88 100644 (file)
@@ -1272,8 +1272,8 @@ static void bfin_freeze(struct ata_port *ap)
 
 void bfin_thaw(struct ata_port *ap)
 {
+       dev_dbg(ap->dev, "in atapi dma thaw\n");
        bfin_check_status(ap);
-       bfin_irq_clear(ap);
        bfin_irq_on(ap);
 }
 
@@ -1339,13 +1339,130 @@ static int bfin_port_start(struct ata_port *ap)
        return 0;
 }
 
+static unsigned int bfin_ata_host_intr(struct ata_port *ap,
+                                  struct ata_queued_cmd *qc)
+{
+       struct ata_eh_info *ehi = &ap->link.eh_info;
+       u8 status, host_stat = 0;
+
+       VPRINTK("ata%u: protocol %d task_state %d\n",
+               ap->print_id, qc->tf.protocol, ap->hsm_task_state);
+
+       /* Check whether we are expecting interrupt in this state */
+       switch (ap->hsm_task_state) {
+       case HSM_ST_FIRST:
+               /* Some pre-ATAPI-4 devices assert INTRQ
+                * at this state when ready to receive CDB.
+                */
+
+               /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
+                * The flag was turned on only for atapi devices.
+                * No need to check is_atapi_taskfile(&qc->tf) again.
+                */
+               if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
+                       goto idle_irq;
+               break;
+       case HSM_ST_LAST:
+               if (qc->tf.protocol == ATA_PROT_DMA ||
+                   qc->tf.protocol == ATAPI_PROT_DMA) {
+                       /* check status of DMA engine */
+                       host_stat = ap->ops->bmdma_status(ap);
+                       VPRINTK("ata%u: host_stat 0x%X\n",
+                               ap->print_id, host_stat);
+
+                       /* if it's not our irq... */
+                       if (!(host_stat & ATA_DMA_INTR))
+                               goto idle_irq;
+
+                       /* before we do anything else, clear DMA-Start bit */
+                       ap->ops->bmdma_stop(qc);
+
+                       if (unlikely(host_stat & ATA_DMA_ERR)) {
+                               /* error when transfering data to/from memory */
+                               qc->err_mask |= AC_ERR_HOST_BUS;
+                               ap->hsm_task_state = HSM_ST_ERR;
+                       }
+               }
+               break;
+       case HSM_ST:
+               break;
+       default:
+               goto idle_irq;
+       }
+
+       /* check altstatus */
+       status = ap->ops->sff_check_altstatus(ap);
+       if (status & ATA_BUSY)
+               goto busy_ata;
+
+       /* check main status, clearing INTRQ */
+       status = ap->ops->sff_check_status(ap);
+       if (unlikely(status & ATA_BUSY))
+               goto busy_ata;
+
+       /* ack bmdma irq events */
+       ap->ops->sff_irq_clear(ap);
+
+       ata_sff_hsm_move(ap, qc, status, 0);
+
+       if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
+                                      qc->tf.protocol == ATAPI_PROT_DMA))
+               ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
+
+busy_ata:
+       return 1;       /* irq handled */
+
+idle_irq:
+       ap->stats.idle_irq++;
+
+#ifdef ATA_IRQ_TRAP
+       if ((ap->stats.idle_irq % 1000) == 0) {
+               ap->ops->irq_ack(ap, 0); /* debug trap */
+               ata_port_printk(ap, KERN_WARNING, "irq trap\n");
+               return 1;
+       }
+#endif
+       return 0;       /* irq not handled */
+}
+
+static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
+{
+       struct ata_host *host = dev_instance;
+       unsigned int i;
+       unsigned int handled = 0;
+       unsigned long flags;
+
+       /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
+       spin_lock_irqsave(&host->lock, flags);
+
+       for (i = 0; i < host->n_ports; i++) {
+               struct ata_port *ap;
+
+               ap = host->ports[i];
+               if (ap &&
+                   !(ap->flags & ATA_FLAG_DISABLED)) {
+                       struct ata_queued_cmd *qc;
+
+                       qc = ata_qc_from_tag(ap, ap->link.active_tag);
+                       if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
+                           (qc->flags & ATA_QCFLAG_ACTIVE))
+                               handled |= bfin_ata_host_intr(ap, qc);
+               }
+       }
+
+       spin_unlock_irqrestore(&host->lock, flags);
+
+       return IRQ_RETVAL(handled);
+}
+
+
 static struct scsi_host_template bfin_sht = {
        ATA_BASE_SHT(DRV_NAME),
        .sg_tablesize           = SG_NONE,
        .dma_boundary           = ATA_DMA_BOUNDARY,
 };
 
-static const struct ata_port_operations bfin_pata_ops = {
+static struct ata_port_operations bfin_pata_ops = {
        .inherits               = &ata_sff_port_ops,
 
        .set_piomode            = bfin_set_piomode,
@@ -1370,7 +1487,6 @@ static const struct ata_port_operations bfin_pata_ops = {
        .thaw                   = bfin_thaw,
        .softreset              = bfin_softreset,
        .postreset              = bfin_postreset,
-       .post_internal_cmd      = bfin_bmdma_stop,
 
        .sff_irq_clear          = bfin_irq_clear,
        .sff_irq_on             = bfin_irq_on,
@@ -1507,7 +1623,7 @@ static int __devinit bfin_atapi_probe(struct platform_device *pdev)
        }
 
        if (ata_host_activate(host, platform_get_irq(pdev, 0),
-               ata_sff_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
+               bfin_ata_interrupt, IRQF_SHARED, &bfin_sht) != 0) {
                peripheral_free_list(atapi_io_port);
                dev_err(&pdev->dev, "Fail to attach ATAPI device\n");
                return -ENODEV;
similarity index 72%
rename from drivers/ata/pata_rb500_cf.c
rename to drivers/ata/pata_rb532_cf.c
index 4345174aaeecc7fd8038bf45dc280aa73830701e..a108d259f19d73768423c10ec86e4b4b20c1c1a1 100644 (file)
@@ -32,7 +32,7 @@
 
 #include <asm/gpio.h>
 
-#define DRV_NAME       "pata-rb500-cf"
+#define DRV_NAME       "pata-rb532-cf"
 #define DRV_VERSION    "0.1.0"
 #define DRV_DESC       "PATA driver for RouterBOARD 532 Compact Flash"
 
@@ -43,7 +43,7 @@
 #define RB500_CF_REG_CTRL      0x080E
 #define RB500_CF_REG_DATA      0x0C00
 
-struct rb500_cf_info {
+struct rb532_cf_info {
        void __iomem    *iobase;
        unsigned int    gpio_line;
        int             frozen;
@@ -52,10 +52,10 @@ struct rb500_cf_info {
 
 /* ------------------------------------------------------------------------ */
 
-static inline void rb500_pata_finish_io(struct ata_port *ap)
+static inline void rb532_pata_finish_io(struct ata_port *ap)
 {
        struct ata_host *ah = ap->host;
-       struct rb500_cf_info *info = ah->private_data;
+       struct rb532_cf_info *info = ah->private_data;
 
        ata_sff_altstatus(ap);
        ndelay(RB500_CF_IO_DELAY);
@@ -63,14 +63,14 @@ static inline void rb500_pata_finish_io(struct ata_port *ap)
        set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
 }
 
-static void rb500_pata_exec_command(struct ata_port *ap,
+static void rb532_pata_exec_command(struct ata_port *ap,
                                const struct ata_taskfile *tf)
 {
        writeb(tf->command, ap->ioaddr.command_addr);
-       rb500_pata_finish_io(ap);
+       rb532_pata_finish_io(ap);
 }
 
-static void rb500_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
+static void rb532_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
                                unsigned int buflen, int write_data)
 {
        struct ata_port *ap = adev->link->ap;
@@ -84,27 +84,27 @@ static void rb500_pata_data_xfer(struct ata_device *adev, unsigned char *buf,
                        *buf = readb(ioaddr);
        }
 
-       rb500_pata_finish_io(adev->link->ap);
+       rb532_pata_finish_io(adev->link->ap);
 }
 
-static void rb500_pata_freeze(struct ata_port *ap)
+static void rb532_pata_freeze(struct ata_port *ap)
 {
-       struct rb500_cf_info *info = ap->host->private_data;
+       struct rb532_cf_info *info = ap->host->private_data;
 
        info->frozen = 1;
 }
 
-static void rb500_pata_thaw(struct ata_port *ap)
+static void rb532_pata_thaw(struct ata_port *ap)
 {
-       struct rb500_cf_info *info = ap->host->private_data;
+       struct rb532_cf_info *info = ap->host->private_data;
 
        info->frozen = 0;
 }
 
-static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance)
+static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
 {
        struct ata_host *ah = dev_instance;
-       struct rb500_cf_info *info = ah->private_data;
+       struct rb532_cf_info *info = ah->private_data;
 
        if (gpio_get_value(info->gpio_line)) {
                set_irq_type(info->irq, IRQ_TYPE_LEVEL_LOW);
@@ -117,30 +117,30 @@ static irqreturn_t rb500_pata_irq_handler(int irq, void *dev_instance)
        return IRQ_HANDLED;
 }
 
-static struct ata_port_operations rb500_pata_port_ops = {
+static struct ata_port_operations rb532_pata_port_ops = {
        .inherits               = &ata_sff_port_ops,
-       .sff_exec_command       = rb500_pata_exec_command,
-       .sff_data_xfer          = rb500_pata_data_xfer,
-       .freeze                 = rb500_pata_freeze,
-       .thaw                   = rb500_pata_thaw,
+       .sff_exec_command       = rb532_pata_exec_command,
+       .sff_data_xfer          = rb532_pata_data_xfer,
+       .freeze                 = rb532_pata_freeze,
+       .thaw                   = rb532_pata_thaw,
 };
 
 /* ------------------------------------------------------------------------ */
 
-static struct scsi_host_template rb500_pata_sht = {
+static struct scsi_host_template rb532_pata_sht = {
        ATA_PIO_SHT(DRV_NAME),
 };
 
 /* ------------------------------------------------------------------------ */
 
-static void rb500_pata_setup_ports(struct ata_host *ah)
+static void rb532_pata_setup_ports(struct ata_host *ah)
 {
-       struct rb500_cf_info *info = ah->private_data;
+       struct rb532_cf_info *info = ah->private_data;
        struct ata_port *ap;
 
        ap = ah->ports[0];
 
-       ap->ops         = &rb500_pata_port_ops;
+       ap->ops         = &rb532_pata_port_ops;
        ap->pio_mask    = 0x1f; /* PIO4 */
        ap->flags       = ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO;
 
@@ -153,13 +153,13 @@ static void rb500_pata_setup_ports(struct ata_host *ah)
        ap->ioaddr.data_addr    = info->iobase + RB500_CF_REG_DATA;
 }
 
-static __devinit int rb500_pata_driver_probe(struct platform_device *pdev)
+static __devinit int rb532_pata_driver_probe(struct platform_device *pdev)
 {
        unsigned int irq;
        int gpio;
        struct resource *res;
        struct ata_host *ah;
-       struct rb500_cf_info *info;
+       struct rb532_cf_info *info;
        int ret;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -213,10 +213,10 @@ static __devinit int rb500_pata_driver_probe(struct platform_device *pdev)
                goto err_free_gpio;
        }
 
-       rb500_pata_setup_ports(ah);
+       rb532_pata_setup_ports(ah);
 
-       ret = ata_host_activate(ah, irq, rb500_pata_irq_handler,
-                               IRQF_TRIGGER_LOW, &rb500_pata_sht);
+       ret = ata_host_activate(ah, irq, rb532_pata_irq_handler,
+                               IRQF_TRIGGER_LOW, &rb532_pata_sht);
        if (ret)
                goto err_free_gpio;
 
@@ -228,10 +228,10 @@ err_free_gpio:
        return ret;
 }
 
-static __devexit int rb500_pata_driver_remove(struct platform_device *pdev)
+static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
 {
        struct ata_host *ah = platform_get_drvdata(pdev);
-       struct rb500_cf_info *info = ah->private_data;
+       struct rb532_cf_info *info = ah->private_data;
 
        ata_host_detach(ah);
        gpio_free(info->gpio_line);
@@ -242,9 +242,9 @@ static __devexit int rb500_pata_driver_remove(struct platform_device *pdev)
 /* work with hotplug and coldplug */
 MODULE_ALIAS("platform:" DRV_NAME);
 
-static struct platform_driver rb500_pata_platform_driver = {
-       .probe          = rb500_pata_driver_probe,
-       .remove         = __devexit_p(rb500_pata_driver_remove),
+static struct platform_driver rb532_pata_platform_driver = {
+       .probe          = rb532_pata_driver_probe,
+       .remove         = __devexit_p(rb532_pata_driver_remove),
        .driver  = {
                .name   = DRV_NAME,
                .owner  = THIS_MODULE,
@@ -255,16 +255,16 @@ static struct platform_driver rb500_pata_platform_driver = {
 
 #define DRV_INFO DRV_DESC " version " DRV_VERSION
 
-static int __init rb500_pata_module_init(void)
+static int __init rb532_pata_module_init(void)
 {
        printk(KERN_INFO DRV_INFO "\n");
 
-       return platform_driver_register(&rb500_pata_platform_driver);
+       return platform_driver_register(&rb532_pata_platform_driver);
 }
 
-static void __exit rb500_pata_module_exit(void)
+static void __exit rb532_pata_module_exit(void)
 {
-       platform_driver_unregister(&rb500_pata_platform_driver);
+       platform_driver_unregister(&rb532_pata_platform_driver);
 }
 
 MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
@@ -273,5 +273,5 @@ MODULE_DESCRIPTION(DRV_DESC);
 MODULE_VERSION(DRV_VERSION);
 MODULE_LICENSE("GPL");
 
-module_init(rb500_pata_module_init);
-module_exit(rb500_pata_module_exit);
+module_init(rb532_pata_module_init);
+module_exit(rb532_pata_module_exit);
index d4840748fb5c902cfdd15bee9ee9c3b39081914d..2fea6cbe7755853b73331d09c1de61ccbba28a35 100644 (file)
@@ -464,11 +464,12 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        pci_dev_put(isa);
 
-       /* 0x40 low bits indicate enabled channels */
-       pci_read_config_byte(pdev, 0x40 , &enable);
-       enable &= 3;
-       if (enable == 0) {
-               return -ENODEV;
+       if (!(config->flags & VIA_NO_ENABLES)) {
+               /* 0x40 low bits indicate enabled channels */
+               pci_read_config_byte(pdev, 0x40 , &enable);
+               enable &= 3;
+               if (enable == 0)
+                       return -ENODEV;
        }
 
        /* Initialise the FIFO for the enabled channels. */
index fddd346b1d5728c289d1a9a44ae5ab8c2de37412..853559e32315894ee58412ab502cc80f55d15078 100644 (file)
@@ -678,7 +678,7 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
        return ata_dev_classify(&tf);
 }
 
-static int sata_fsl_prereset(struct ata_linke *link, unsigned long deadline)
+static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline)
 {
        /* FIXME: Never skip softreset, sata_fsl_softreset() is
         * combination of soft and hard resets.  sata_fsl_softreset()
index 26a6337195b3681587d2025a9a97519f01bf8ca1..842b1a15b78cadfafb5e031a8fe57245e6045380 100644 (file)
@@ -172,10 +172,11 @@ enum {
        PCIE_IRQ_MASK_OFS       = 0x1910,
        PCIE_UNMASK_ALL_IRQS    = 0x40a,        /* assorted bits */
 
-       HC_MAIN_IRQ_CAUSE_OFS   = 0x1d60,
-       HC_MAIN_IRQ_MASK_OFS    = 0x1d64,
-       HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
-       HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
+       /* Host Controller Main Interrupt Cause/Mask registers (1 per-chip) */
+       PCI_HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
+       PCI_HC_MAIN_IRQ_MASK_OFS  = 0x1d64,
+       SOC_HC_MAIN_IRQ_CAUSE_OFS = 0x20020,
+       SOC_HC_MAIN_IRQ_MASK_OFS  = 0x20024,
        ERR_IRQ                 = (1 << 0),     /* shift by port # */
        DONE_IRQ                = (1 << 1),     /* shift by port # */
        HC0_IRQ_PEND            = 0x1ff,        /* bits 0-8 = HC0's ports */
@@ -445,8 +446,8 @@ struct mv_host_priv {
        const struct mv_hw_ops  *ops;
        int                     n_ports;
        void __iomem            *base;
-       void __iomem            *main_cause_reg_addr;
-       void __iomem            *main_mask_reg_addr;
+       void __iomem            *main_irq_cause_addr;
+       void __iomem            *main_irq_mask_addr;
        u32                     irq_cause_ofs;
        u32                     irq_mask_ofs;
        u32                     unmask_all_irqs;
@@ -727,8 +728,8 @@ static inline unsigned int mv_hardport_from_port(unsigned int port)
  * Simple code, with two return values, so macro rather than inline.
  *
  * port is the sole input, in range 0..7.
- * shift is one output, for use with the main_cause and main_mask registers.
- * hardport is the other output, in range 0..3
+ * shift is one output, for use with main_irq_cause / main_irq_mask registers.
+ * hardport is the other output, in range 0..3.
  *
  * Note that port and hardport may be the same variable in some cases.
  */
@@ -1679,12 +1680,12 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
 /**
  *      mv_host_intr - Handle all interrupts on the given host controller
  *      @host: host specific structure
- *      @main_cause: Main interrupt cause register for the chip.
+ *      @main_irq_cause: Main interrupt cause register for the chip.
  *
  *      LOCKING:
  *      Inherited from caller.
  */
-static int mv_host_intr(struct ata_host *host, u32 main_cause)
+static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
 {
        struct mv_host_priv *hpriv = host->private_data;
        void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
@@ -1705,7 +1706,7 @@ static int mv_host_intr(struct ata_host *host, u32 main_cause)
                 * Do nothing if port is not interrupting or is disabled:
                 */
                MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
-               port_cause = (main_cause >> shift) & (DONE_IRQ | ERR_IRQ);
+               port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
                if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
                        continue;
                /*
@@ -1811,20 +1812,20 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
        struct ata_host *host = dev_instance;
        struct mv_host_priv *hpriv = host->private_data;
        unsigned int handled = 0;
-       u32 main_cause, main_mask;
+       u32 main_irq_cause, main_irq_mask;
 
        spin_lock(&host->lock);
-       main_cause = readl(hpriv->main_cause_reg_addr);
-       main_mask  = readl(hpriv->main_mask_reg_addr);
+       main_irq_cause = readl(hpriv->main_irq_cause_addr);
+       main_irq_mask  = readl(hpriv->main_irq_mask_addr);
        /*
         * Deal with cases where we either have nothing pending, or have read
         * a bogus register value which can indicate HW removal or PCI fault.
         */
-       if ((main_cause & main_mask) && (main_cause != 0xffffffffU)) {
-               if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host)))
+       if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) {
+               if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host)))
                        handled = mv_pci_error(host, hpriv->base);
                else
-                       handled = mv_host_intr(host, main_cause);
+                       handled = mv_host_intr(host, main_irq_cause);
        }
        spin_unlock(&host->lock);
        return IRQ_RETVAL(handled);
@@ -2027,7 +2028,7 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
        ZERO(MV_PCI_DISC_TIMER);
        ZERO(MV_PCI_MSI_TRIGGER);
        writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
-       ZERO(HC_MAIN_IRQ_MASK_OFS);
+       ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
        ZERO(MV_PCI_SERR_MASK);
        ZERO(hpriv->irq_cause_ofs);
        ZERO(hpriv->irq_mask_ofs);
@@ -2404,7 +2405,7 @@ static void mv_eh_freeze(struct ata_port *ap)
 {
        struct mv_host_priv *hpriv = ap->host->private_data;
        unsigned int shift, hardport, port = ap->port_no;
-       u32 main_mask;
+       u32 main_irq_mask;
 
        /* FIXME: handle coalescing completion events properly */
 
@@ -2412,9 +2413,9 @@ static void mv_eh_freeze(struct ata_port *ap)
        MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
 
        /* disable assertion of portN err, done events */
-       main_mask = readl(hpriv->main_mask_reg_addr);
-       main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
-       writelfl(main_mask, hpriv->main_mask_reg_addr);
+       main_irq_mask = readl(hpriv->main_irq_mask_addr);
+       main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
+       writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
 }
 
 static void mv_eh_thaw(struct ata_port *ap)
@@ -2423,7 +2424,7 @@ static void mv_eh_thaw(struct ata_port *ap)
        unsigned int shift, hardport, port = ap->port_no;
        void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
        void __iomem *port_mmio = mv_ap_base(ap);
-       u32 main_mask, hc_irq_cause;
+       u32 main_irq_mask, hc_irq_cause;
 
        /* FIXME: handle coalescing completion events properly */
 
@@ -2438,9 +2439,9 @@ static void mv_eh_thaw(struct ata_port *ap)
        writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
 
        /* enable assertion of portN err, done events */
-       main_mask = readl(hpriv->main_mask_reg_addr);
-       main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
-       writelfl(main_mask, hpriv->main_mask_reg_addr);
+       main_irq_mask = readl(hpriv->main_irq_mask_addr);
+       main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
+       writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
 }
 
 /**
@@ -2654,15 +2655,15 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
                goto done;
 
        if (HAS_PCI(host)) {
-               hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
-               hpriv->main_mask_reg_addr  = mmio + HC_MAIN_IRQ_MASK_OFS;
+               hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
+               hpriv->main_irq_mask_addr  = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
        } else {
-               hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
-               hpriv->main_mask_reg_addr  = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
+               hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
+               hpriv->main_irq_mask_addr  = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
        }
 
        /* global interrupt mask: 0 == mask everything */
-       writel(0, hpriv->main_mask_reg_addr);
+       writel(0, hpriv->main_irq_mask_addr);
 
        n_hc = mv_get_hc_count(host->ports[0]->flags);
 
@@ -2712,23 +2713,23 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
                writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
                if (IS_GEN_I(hpriv))
                        writelfl(~HC_MAIN_MASKED_IRQS_5,
-                                hpriv->main_mask_reg_addr);
+                                hpriv->main_irq_mask_addr);
                else
                        writelfl(~HC_MAIN_MASKED_IRQS,
-                                hpriv->main_mask_reg_addr);
+                                hpriv->main_irq_mask_addr);
 
                VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
                        "PCI int cause/mask=0x%08x/0x%08x\n",
-                       readl(hpriv->main_cause_reg_addr),
-                       readl(hpriv->main_mask_reg_addr),
+                       readl(hpriv->main_irq_cause_addr),
+                       readl(hpriv->main_irq_mask_addr),
                        readl(mmio + hpriv->irq_cause_ofs),
                        readl(mmio + hpriv->irq_mask_ofs));
        } else {
                writelfl(~HC_MAIN_MASKED_IRQS_SOC,
-                        hpriv->main_mask_reg_addr);
+                        hpriv->main_irq_mask_addr);
                VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
-                       readl(hpriv->main_cause_reg_addr),
-                       readl(hpriv->main_mask_reg_addr));
+                       readl(hpriv->main_irq_cause_addr),
+                       readl(hpriv->main_irq_mask_addr));
        }
 done:
        return rc;
index 5aa12b011a9a6499efb5ba841f01e740975db559..6adb72a2f8769bd9205395b266088ee02e07ccee 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/interrupt.h>
 #include <linux/poison.h>
 #include <linux/bitrev.h>
+#include <linux/mutex.h>
 
 #include <asm/atomic.h>
 #include <asm/io.h>
@@ -1177,7 +1178,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
     
     vcc->tx_frame_bits = tx_frame_bits;
     
-    down (&dev->vcc_sf);
+    mutex_lock(&dev->vcc_sf);
     if (dev->rxer[vci]) {
       // RXer on the channel already, just modify rate...
       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
@@ -1203,7 +1204,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
        schedule();
     }
     dev->txer[vci].tx_present = 1;
-    up (&dev->vcc_sf);
+    mutex_unlock(&dev->vcc_sf);
   }
   
   if (rxtp->traffic_class != ATM_NONE) {
@@ -1211,7 +1212,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
     
     vcc->rx_info.pool = pool;
     
-    down (&dev->vcc_sf); 
+    mutex_lock(&dev->vcc_sf);
     /* grow RX buffer pool */
     if (!dev->rxq[pool].buffers_wanted)
       dev->rxq[pool].buffers_wanted = rx_lats;
@@ -1237,7 +1238,7 @@ static int amb_open (struct atm_vcc * atm_vcc)
       schedule();
     // this link allows RX frames through
     dev->rxer[vci] = atm_vcc;
-    up (&dev->vcc_sf);
+    mutex_unlock(&dev->vcc_sf);
   }
   
   // indicate readiness
@@ -1262,7 +1263,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
   if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) {
     command cmd;
     
-    down (&dev->vcc_sf);
+    mutex_lock(&dev->vcc_sf);
     if (dev->rxer[vci]) {
       // RXer still on the channel, just modify rate... XXX not really needed
       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE);
@@ -1277,7 +1278,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
     dev->txer[vci].tx_present = 0;
     while (command_do (dev, &cmd))
       schedule();
-    up (&dev->vcc_sf);
+    mutex_unlock(&dev->vcc_sf);
   }
   
   // disable RXing
@@ -1287,7 +1288,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
     // this is (the?) one reason why we need the amb_vcc struct
     unsigned char pool = vcc->rx_info.pool;
     
-    down (&dev->vcc_sf);
+    mutex_lock(&dev->vcc_sf);
     if (dev->txer[vci].tx_present) {
       // TXer still on the channel, just go to pool zero XXX not really needed
       cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS);
@@ -1314,7 +1315,7 @@ static void amb_close (struct atm_vcc * atm_vcc) {
       dev->rxq[pool].buffers_wanted = 0;
       drain_rx_pool (dev, pool);
     }
-    up (&dev->vcc_sf);
+    mutex_unlock(&dev->vcc_sf);
   }
   
   // free our structure
@@ -2188,7 +2189,7 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev)
       
       // semaphore for txer/rxer modifications - we cannot use a
       // spinlock as the critical region needs to switch processes
-      init_MUTEX (&dev->vcc_sf);
+      mutex_init(&dev->vcc_sf);
       // queue manipulation spinlocks; we want atomic reads and
       // writes to the queue descriptors (handles IRQ and SMP)
       // consider replacing "int pending" -> "atomic_t available"
index ff2a303cbe000325c71079d2846ffc98ed26fdba..df55fa8387dc4d374d567ef33dd6d66643d29305 100644 (file)
@@ -638,7 +638,7 @@ struct amb_dev {
   amb_txq          txq;
   amb_rxq          rxq[NUM_RX_POOLS];
   
-  struct semaphore vcc_sf;
+  struct mutex     vcc_sf;
   amb_tx_info      txer[NUM_VCS];
   struct atm_vcc * rxer[NUM_VCS];
   unsigned int     tx_avail;
index 38c769f8d2b7a991762c1727cf1ff2f192b85c67..3da804b1627d6b7e8578b381aae4b2c3f7c51be1 100644 (file)
@@ -415,7 +415,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
    card->pcidev = pcidev;
    membase = pci_resource_start(pcidev, 1);
    card->membase = ioremap(membase, NS_IOREMAP_SIZE);
-   if (card->membase == 0)
+   if (!card->membase)
    {
       printk("nicstar%d: can't ioremap() membase.\n",i);
       error = 3;
index c0444146c09a880a51e1b514e9fe8710119a2f67..2c9ae43e221933607e81f32b26c40f7156447dc5 100644 (file)
@@ -64,17 +64,6 @@ extern void sysdev_shutdown(void);
 extern int sysdev_suspend(pm_message_t state);
 extern int sysdev_resume(void);
 
-static inline struct class_device *to_class_dev(struct kobject *obj)
-{
-       return container_of(obj, struct class_device, kobj);
-}
-
-static inline
-struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
-{
-       return container_of(_attr, struct class_device_attribute, attr);
-}
-
 extern char *make_class_name(const char *name, struct kobject *kobj);
 
 extern int devres_release_all(struct device *dev);
index b4901799308b00d2755e1536e477976a8302ff08..0ef00e8d4153879513bfa2823a7fad0e25e4f6b7 100644 (file)
@@ -179,27 +179,13 @@ static void class_create_release(struct class *cls)
        kfree(cls);
 }
 
-static void class_device_create_release(struct class_device *class_dev)
-{
-       pr_debug("%s called for %s\n", __func__, class_dev->class_id);
-       kfree(class_dev);
-}
-
-/* needed to allow these devices to have parent class devices */
-static int class_device_create_uevent(struct class_device *class_dev,
-                                     struct kobj_uevent_env *env)
-{
-       pr_debug("%s called for %s\n", __func__, class_dev->class_id);
-       return 0;
-}
-
 /**
  * class_create - create a struct class structure
  * @owner: pointer to the module that is to "own" this struct class
  * @name: pointer to a string for the name of this class.
  *
  * This is used to create a struct class pointer that can then be used
- * in calls to class_device_create().
+ * in calls to device_create().
  *
  * Note, the pointer created here is to be destroyed when finished by
  * making a call to class_destroy().
@@ -218,7 +204,6 @@ struct class *class_create(struct module *owner, const char *name)
        cls->name = name;
        cls->owner = owner;
        cls->class_release = class_create_release;
-       cls->release = class_device_create_release;
 
        retval = class_register(cls);
        if (retval)
@@ -246,113 +231,6 @@ void class_destroy(struct class *cls)
        class_unregister(cls);
 }
 
-/* Class Device Stuff */
-
-int class_device_create_file(struct class_device *class_dev,
-                            const struct class_device_attribute *attr)
-{
-       int error = -EINVAL;
-       if (class_dev)
-               error = sysfs_create_file(&class_dev->kobj, &attr->attr);
-       return error;
-}
-
-void class_device_remove_file(struct class_device *class_dev,
-                             const struct class_device_attribute *attr)
-{
-       if (class_dev)
-               sysfs_remove_file(&class_dev->kobj, &attr->attr);
-}
-
-int class_device_create_bin_file(struct class_device *class_dev,
-                                struct bin_attribute *attr)
-{
-       int error = -EINVAL;
-       if (class_dev)
-               error = sysfs_create_bin_file(&class_dev->kobj, attr);
-       return error;
-}
-
-void class_device_remove_bin_file(struct class_device *class_dev,
-                                 struct bin_attribute *attr)
-{
-       if (class_dev)
-               sysfs_remove_bin_file(&class_dev->kobj, attr);
-}
-
-static ssize_t class_device_attr_show(struct kobject *kobj,
-                                     struct attribute *attr, char *buf)
-{
-       struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
-       struct class_device *cd = to_class_dev(kobj);
-       ssize_t ret = 0;
-
-       if (class_dev_attr->show)
-               ret = class_dev_attr->show(cd, buf);
-       return ret;
-}
-
-static ssize_t class_device_attr_store(struct kobject *kobj,
-                                      struct attribute *attr,
-                                      const char *buf, size_t count)
-{
-       struct class_device_attribute *class_dev_attr = to_class_dev_attr(attr);
-       struct class_device *cd = to_class_dev(kobj);
-       ssize_t ret = 0;
-
-       if (class_dev_attr->store)
-               ret = class_dev_attr->store(cd, buf, count);
-       return ret;
-}
-
-static struct sysfs_ops class_dev_sysfs_ops = {
-       .show   = class_device_attr_show,
-       .store  = class_device_attr_store,
-};
-
-static void class_dev_release(struct kobject *kobj)
-{
-       struct class_device *cd = to_class_dev(kobj);
-       struct class *cls = cd->class;
-
-       pr_debug("device class '%s': release.\n", cd->class_id);
-
-       if (cd->release)
-               cd->release(cd);
-       else if (cls->release)
-               cls->release(cd);
-       else {
-               printk(KERN_ERR "Class Device '%s' does not have a release() "
-                       "function, it is broken and must be fixed.\n",
-                       cd->class_id);
-               WARN_ON(1);
-       }
-}
-
-static struct kobj_type class_device_ktype = {
-       .sysfs_ops      = &class_dev_sysfs_ops,
-       .release        = class_dev_release,
-};
-
-static int class_uevent_filter(struct kset *kset, struct kobject *kobj)
-{
-       struct kobj_type *ktype = get_ktype(kobj);
-
-       if (ktype == &class_device_ktype) {
-               struct class_device *class_dev = to_class_dev(kobj);
-               if (class_dev->class)
-                       return 1;
-       }
-       return 0;
-}
-
-static const char *class_uevent_name(struct kset *kset, struct kobject *kobj)
-{
-       struct class_device *class_dev = to_class_dev(kobj);
-
-       return class_dev->class->name;
-}
-
 #ifdef CONFIG_SYSFS_DEPRECATED
 char *make_class_name(const char *name, struct kobject *kobj)
 {
@@ -370,445 +248,8 @@ char *make_class_name(const char *name, struct kobject *kobj)
        strcat(class_name, kobject_name(kobj));
        return class_name;
 }
-
-static int make_deprecated_class_device_links(struct class_device *class_dev)
-{
-       char *class_name;
-       int error;
-
-       if (!class_dev->dev)
-               return 0;
-
-       class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
-       if (class_name)
-               error = sysfs_create_link(&class_dev->dev->kobj,
-                                         &class_dev->kobj, class_name);
-       else
-               error = -ENOMEM;
-       kfree(class_name);
-       return error;
-}
-
-static void remove_deprecated_class_device_links(struct class_device *class_dev)
-{
-       char *class_name;
-
-       if (!class_dev->dev)
-               return;
-
-       class_name = make_class_name(class_dev->class->name, &class_dev->kobj);
-       if (class_name)
-               sysfs_remove_link(&class_dev->dev->kobj, class_name);
-       kfree(class_name);
-}
-#else
-static inline int make_deprecated_class_device_links(struct class_device *cd)
-{ return 0; }
-static void remove_deprecated_class_device_links(struct class_device *cd)
-{ }
 #endif
 
-static int class_uevent(struct kset *kset, struct kobject *kobj,
-                       struct kobj_uevent_env *env)
-{
-       struct class_device *class_dev = to_class_dev(kobj);
-       struct device *dev = class_dev->dev;
-       int retval = 0;
-
-       pr_debug("%s - name = %s\n", __func__, class_dev->class_id);
-
-       if (MAJOR(class_dev->devt)) {
-               add_uevent_var(env, "MAJOR=%u", MAJOR(class_dev->devt));
-
-               add_uevent_var(env, "MINOR=%u", MINOR(class_dev->devt));
-       }
-
-       if (dev) {
-               const char *path = kobject_get_path(&dev->kobj, GFP_KERNEL);
-               if (path) {
-                       add_uevent_var(env, "PHYSDEVPATH=%s", path);
-                       kfree(path);
-               }
-
-               if (dev->bus)
-                       add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
-
-               if (dev->driver)
-                       add_uevent_var(env, "PHYSDEVDRIVER=%s",
-                                      dev->driver->name);
-       }
-
-       if (class_dev->uevent) {
-               /* have the class device specific function add its stuff */
-               retval = class_dev->uevent(class_dev, env);
-               if (retval)
-                       pr_debug("class_dev->uevent() returned %d\n", retval);
-       } else if (class_dev->class->uevent) {
-               /* have the class specific function add its stuff */
-               retval = class_dev->class->uevent(class_dev, env);
-               if (retval)
-                       pr_debug("class->uevent() returned %d\n", retval);
-       }
-
-       return retval;
-}
-
-static struct kset_uevent_ops class_uevent_ops = {
-       .filter =       class_uevent_filter,
-       .name =         class_uevent_name,
-       .uevent =       class_uevent,
-};
-
-/*
- * DO NOT copy how this is created, kset_create_and_add() should be
- * called, but this is a hold-over from the old-way and will be deleted
- * entirely soon.
- */
-static struct kset class_obj_subsys = {
-       .uevent_ops = &class_uevent_ops,
-};
-
-static int class_device_add_attrs(struct class_device *cd)
-{
-       int i;
-       int error = 0;
-       struct class *cls = cd->class;
-
-       if (cls->class_dev_attrs) {
-               for (i = 0; attr_name(cls->class_dev_attrs[i]); i++) {
-                       error = class_device_create_file(cd,
-                                               &cls->class_dev_attrs[i]);
-                       if (error)
-                               goto err;
-               }
-       }
-done:
-       return error;
-err:
-       while (--i >= 0)
-               class_device_remove_file(cd, &cls->class_dev_attrs[i]);
-       goto done;
-}
-
-static void class_device_remove_attrs(struct class_device *cd)
-{
-       int i;
-       struct class *cls = cd->class;
-
-       if (cls->class_dev_attrs) {
-               for (i = 0; attr_name(cls->class_dev_attrs[i]); i++)
-                       class_device_remove_file(cd, &cls->class_dev_attrs[i]);
-       }
-}
-
-static int class_device_add_groups(struct class_device *cd)
-{
-       int i;
-       int error = 0;
-
-       if (cd->groups) {
-               for (i = 0; cd->groups[i]; i++) {
-                       error = sysfs_create_group(&cd->kobj, cd->groups[i]);
-                       if (error) {
-                               while (--i >= 0)
-                                       sysfs_remove_group(&cd->kobj,
-                                                          cd->groups[i]);
-                               goto out;
-                       }
-               }
-       }
-out:
-       return error;
-}
-
-static void class_device_remove_groups(struct class_device *cd)
-{
-       int i;
-       if (cd->groups)
-               for (i = 0; cd->groups[i]; i++)
-                       sysfs_remove_group(&cd->kobj, cd->groups[i]);
-}
-
-static ssize_t show_dev(struct class_device *class_dev, char *buf)
-{
-       return print_dev_t(buf, class_dev->devt);
-}
-
-static struct class_device_attribute class_devt_attr =
-       __ATTR(dev, S_IRUGO, show_dev, NULL);
-
-static ssize_t store_uevent(struct class_device *class_dev,
-                           const char *buf, size_t count)
-{
-       kobject_uevent(&class_dev->kobj, KOBJ_ADD);
-       return count;
-}
-
-static struct class_device_attribute class_uevent_attr =
-       __ATTR(uevent, S_IWUSR, NULL, store_uevent);
-
-void class_device_initialize(struct class_device *class_dev)
-{
-       class_dev->kobj.kset = &class_obj_subsys;
-       kobject_init(&class_dev->kobj, &class_device_ktype);
-       INIT_LIST_HEAD(&class_dev->node);
-}
-
-int class_device_add(struct class_device *class_dev)
-{
-       struct class *parent_class = NULL;
-       struct class_device *parent_class_dev = NULL;
-       struct class_interface *class_intf;
-       int error = -EINVAL;
-
-       class_dev = class_device_get(class_dev);
-       if (!class_dev)
-               return -EINVAL;
-
-       if (!strlen(class_dev->class_id))
-               goto out1;
-
-       parent_class = class_get(class_dev->class);
-       if (!parent_class)
-               goto out1;
-
-       parent_class_dev = class_device_get(class_dev->parent);
-
-       pr_debug("CLASS: registering class device: ID = '%s'\n",
-                class_dev->class_id);
-
-       /* first, register with generic layer. */
-       if (parent_class_dev)
-               class_dev->kobj.parent = &parent_class_dev->kobj;
-       else
-               class_dev->kobj.parent = &parent_class->subsys.kobj;
-
-       error = kobject_add(&class_dev->kobj, class_dev->kobj.parent,
-                           "%s", class_dev->class_id);
-       if (error)
-               goto out2;
-
-       /* add the needed attributes to this device */
-       error = sysfs_create_link(&class_dev->kobj,
-                                 &parent_class->subsys.kobj, "subsystem");
-       if (error)
-               goto out3;
-
-       error = class_device_create_file(class_dev, &class_uevent_attr);
-       if (error)
-               goto out3;
-
-       if (MAJOR(class_dev->devt)) {
-               error = class_device_create_file(class_dev, &class_devt_attr);
-               if (error)
-                       goto out4;
-       }
-
-       error = class_device_add_attrs(class_dev);
-       if (error)
-               goto out5;
-
-       if (class_dev->dev) {
-               error = sysfs_create_link(&class_dev->kobj,
-                                         &class_dev->dev->kobj, "device");
-               if (error)
-                       goto out6;
-       }
-
-       error = class_device_add_groups(class_dev);
-       if (error)
-               goto out7;
-
-       error = make_deprecated_class_device_links(class_dev);
-       if (error)
-               goto out8;
-
-       kobject_uevent(&class_dev->kobj, KOBJ_ADD);
-
-       /* notify any interfaces this device is now here */
-       down(&parent_class->sem);
-       list_add_tail(&class_dev->node, &parent_class->children);
-       list_for_each_entry(class_intf, &parent_class->interfaces, node) {
-               if (class_intf->add)
-                       class_intf->add(class_dev, class_intf);
-       }
-       up(&parent_class->sem);
-
-       goto out1;
-
- out8:
-       class_device_remove_groups(class_dev);
- out7:
-       if (class_dev->dev)
-               sysfs_remove_link(&class_dev->kobj, "device");
- out6:
-       class_device_remove_attrs(class_dev);
- out5:
-       if (MAJOR(class_dev->devt))
-               class_device_remove_file(class_dev, &class_devt_attr);
- out4:
-       class_device_remove_file(class_dev, &class_uevent_attr);
- out3:
-       kobject_del(&class_dev->kobj);
- out2:
-       if (parent_class_dev)
-               class_device_put(parent_class_dev);
-       class_put(parent_class);
- out1:
-       class_device_put(class_dev);
-       return error;
-}
-
-int class_device_register(struct class_device *class_dev)
-{
-       class_device_initialize(class_dev);
-       return class_device_add(class_dev);
-}
-
-/**
- * class_device_create - creates a class device and registers it with sysfs
- * @cls: pointer to the struct class that this device should be registered to.
- * @parent: pointer to the parent struct class_device of this new device, if
- * any.
- * @devt: the dev_t for the char device to be added.
- * @device: a pointer to a struct device that is assiociated with this class
- * device.
- * @fmt: string for the class device's name
- *
- * This function can be used by char device classes.  A struct
- * class_device will be created in sysfs, registered to the specified
- * class.
- * A "dev" file will be created, showing the dev_t for the device, if
- * the dev_t is not 0,0.
- * If a pointer to a parent struct class_device is passed in, the newly
- * created struct class_device will be a child of that device in sysfs.
- * The pointer to the struct class_device will be returned from the
- * call.  Any further sysfs files that might be required can be created
- * using this pointer.
- *
- * Note: the struct class passed to this function must have previously
- * been created with a call to class_create().
- */
-struct class_device *class_device_create(struct class *cls,
-                                        struct class_device *parent,
-                                        dev_t devt,
-                                        struct device *device,
-                                        const char *fmt, ...)
-{
-       va_list args;
-       struct class_device *class_dev = NULL;
-       int retval = -ENODEV;
-
-       if (cls == NULL || IS_ERR(cls))
-               goto error;
-
-       class_dev = kzalloc(sizeof(*class_dev), GFP_KERNEL);
-       if (!class_dev) {
-               retval = -ENOMEM;
-               goto error;
-       }
-
-       class_dev->devt = devt;
-       class_dev->dev = device;
-       class_dev->class = cls;
-       class_dev->parent = parent;
-       class_dev->release = class_device_create_release;
-       class_dev->uevent = class_device_create_uevent;
-
-       va_start(args, fmt);
-       vsnprintf(class_dev->class_id, BUS_ID_SIZE, fmt, args);
-       va_end(args);
-       retval = class_device_register(class_dev);
-       if (retval)
-               goto error;
-
-       return class_dev;
-
-error:
-       kfree(class_dev);
-       return ERR_PTR(retval);
-}
-
-void class_device_del(struct class_device *class_dev)
-{
-       struct class *parent_class = class_dev->class;
-       struct class_device *parent_device = class_dev->parent;
-       struct class_interface *class_intf;
-
-       if (parent_class) {
-               down(&parent_class->sem);
-               list_del_init(&class_dev->node);
-               list_for_each_entry(class_intf, &parent_class->interfaces, node)
-                       if (class_intf->remove)
-                               class_intf->remove(class_dev, class_intf);
-               up(&parent_class->sem);
-       }
-
-       if (class_dev->dev) {
-               remove_deprecated_class_device_links(class_dev);
-               sysfs_remove_link(&class_dev->kobj, "device");
-       }
-       sysfs_remove_link(&class_dev->kobj, "subsystem");
-       class_device_remove_file(class_dev, &class_uevent_attr);
-       if (MAJOR(class_dev->devt))
-               class_device_remove_file(class_dev, &class_devt_attr);
-       class_device_remove_attrs(class_dev);
-       class_device_remove_groups(class_dev);
-
-       kobject_uevent(&class_dev->kobj, KOBJ_REMOVE);
-       kobject_del(&class_dev->kobj);
-
-       class_device_put(parent_device);
-       class_put(parent_class);
-}
-
-void class_device_unregister(struct class_device *class_dev)
-{
-       pr_debug("CLASS: Unregistering class device. ID = '%s'\n",
-                class_dev->class_id);
-       class_device_del(class_dev);
-       class_device_put(class_dev);
-}
-
-/**
- * class_device_destroy - removes a class device that was created with class_device_create()
- * @cls: the pointer to the struct class that this device was registered * with.
- * @devt: the dev_t of the device that was previously registered.
- *
- * This call unregisters and cleans up a class device that was created with a
- * call to class_device_create()
- */
-void class_device_destroy(struct class *cls, dev_t devt)
-{
-       struct class_device *class_dev = NULL;
-       struct class_device *class_dev_tmp;
-
-       down(&cls->sem);
-       list_for_each_entry(class_dev_tmp, &cls->children, node) {
-               if (class_dev_tmp->devt == devt) {
-                       class_dev = class_dev_tmp;
-                       break;
-               }
-       }
-       up(&cls->sem);
-
-       if (class_dev)
-               class_device_unregister(class_dev);
-}
-
-struct class_device *class_device_get(struct class_device *class_dev)
-{
-       if (class_dev)
-               return to_class_dev(kobject_get(&class_dev->kobj));
-       return NULL;
-}
-
-void class_device_put(struct class_device *class_dev)
-{
-       if (class_dev)
-               kobject_put(&class_dev->kobj);
-}
-
 /**
  * class_for_each_device - device iterator
  * @class: the class we're iterating
@@ -897,56 +338,9 @@ struct device *class_find_device(struct class *class, void *data,
 }
 EXPORT_SYMBOL_GPL(class_find_device);
 
-/**
- * class_find_child - device iterator for locating a particular class_device
- * @class: the class we're iterating
- * @data: data for the match function
- * @match: function to check class_device
- *
- * This function returns a reference to a class_device that is 'found' for
- * later use, as determined by the @match callback.
- *
- * The callback should return 0 if the class_device doesn't match and non-zero
- * if it does.  If the callback returns non-zero, this function will
- * return to the caller and not iterate over any more class_devices.
- *
- * Note, you will need to drop the reference with class_device_put() after use.
- *
- * We hold class->sem in this function, so it can not be
- * re-acquired in @match, otherwise it will self-deadlocking. For
- * example, calls to add or remove class members would be verboten.
- */
-struct class_device *class_find_child(struct class *class, void *data,
-                                  int (*match)(struct class_device *, void *))
-{
-       struct class_device *dev;
-       int found = 0;
-
-       if (!class)
-               return NULL;
-
-       down(&class->sem);
-       list_for_each_entry(dev, &class->children, node) {
-               dev = class_device_get(dev);
-               if (dev) {
-                       if (match(dev, data)) {
-                               found = 1;
-                               break;
-                       } else
-                               class_device_put(dev);
-               } else
-                       break;
-       }
-       up(&class->sem);
-
-       return found ? dev : NULL;
-}
-EXPORT_SYMBOL_GPL(class_find_child);
-
 int class_interface_register(struct class_interface *class_intf)
 {
        struct class *parent;
-       struct class_device *class_dev;
        struct device *dev;
 
        if (!class_intf || !class_intf->class)
@@ -958,10 +352,6 @@ int class_interface_register(struct class_interface *class_intf)
 
        down(&parent->sem);
        list_add_tail(&class_intf->node, &parent->interfaces);
-       if (class_intf->add) {
-               list_for_each_entry(class_dev, &parent->children, node)
-                       class_intf->add(class_dev, class_intf);
-       }
        if (class_intf->add_dev) {
                list_for_each_entry(dev, &parent->devices, node)
                        class_intf->add_dev(dev, class_intf);
@@ -974,7 +364,6 @@ int class_interface_register(struct class_interface *class_intf)
 void class_interface_unregister(struct class_interface *class_intf)
 {
        struct class *parent = class_intf->class;
-       struct class_device *class_dev;
        struct device *dev;
 
        if (!parent)
@@ -982,10 +371,6 @@ void class_interface_unregister(struct class_interface *class_intf)
 
        down(&parent->sem);
        list_del_init(&class_intf->node);
-       if (class_intf->remove) {
-               list_for_each_entry(class_dev, &parent->children, node)
-                       class_intf->remove(class_dev, class_intf);
-       }
        if (class_intf->remove_dev) {
                list_for_each_entry(dev, &parent->devices, node)
                        class_intf->remove_dev(dev, class_intf);
@@ -1000,13 +385,6 @@ int __init classes_init(void)
        class_kset = kset_create_and_add("class", NULL, NULL);
        if (!class_kset)
                return -ENOMEM;
-
-       /* ick, this is ugly, the things we go through to keep from showing up
-        * in sysfs... */
-       kset_init(&class_obj_subsys);
-       kobject_set_name(&class_obj_subsys.kobj, "class_obj");
-       if (!class_obj_subsys.kobj.parent)
-               class_obj_subsys.kobj.parent = &class_obj_subsys.kobj;
        return 0;
 }
 
@@ -1017,19 +395,5 @@ EXPORT_SYMBOL_GPL(class_unregister);
 EXPORT_SYMBOL_GPL(class_create);
 EXPORT_SYMBOL_GPL(class_destroy);
 
-EXPORT_SYMBOL_GPL(class_device_register);
-EXPORT_SYMBOL_GPL(class_device_unregister);
-EXPORT_SYMBOL_GPL(class_device_initialize);
-EXPORT_SYMBOL_GPL(class_device_add);
-EXPORT_SYMBOL_GPL(class_device_del);
-EXPORT_SYMBOL_GPL(class_device_get);
-EXPORT_SYMBOL_GPL(class_device_put);
-EXPORT_SYMBOL_GPL(class_device_create);
-EXPORT_SYMBOL_GPL(class_device_destroy);
-EXPORT_SYMBOL_GPL(class_device_create_file);
-EXPORT_SYMBOL_GPL(class_device_remove_file);
-EXPORT_SYMBOL_GPL(class_device_create_bin_file);
-EXPORT_SYMBOL_GPL(class_device_remove_bin_file);
-
 EXPORT_SYMBOL_GPL(class_interface_register);
 EXPORT_SYMBOL_GPL(class_interface_unregister);
index 9248e0927d0873062800a4fd10ee2746bb956c34..be288b5e41809cbd8f991b4a953d4850604ea4a9 100644 (file)
@@ -787,6 +787,10 @@ int device_add(struct device *dev)
        parent = get_device(dev->parent);
        setup_parent(dev, parent);
 
+       /* use parent numa_node */
+       if (parent)
+               set_dev_node(dev, dev_to_node(parent));
+
        /* first, register with generic layer. */
        error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev->bus_id);
        if (error)
@@ -1306,8 +1310,11 @@ int device_move(struct device *dev, struct device *new_parent)
        dev->parent = new_parent;
        if (old_parent)
                klist_remove(&dev->knode_parent);
-       if (new_parent)
+       if (new_parent) {
                klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
+               set_dev_node(dev, dev_to_node(new_parent));
+       }
+
        if (!dev->class)
                goto out_put;
        error = device_move_class_links(dev, old_parent, new_parent);
@@ -1317,9 +1324,12 @@ int device_move(struct device *dev, struct device *new_parent)
                if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
                        if (new_parent)
                                klist_remove(&dev->knode_parent);
-                       if (old_parent)
+                       dev->parent = old_parent;
+                       if (old_parent) {
                                klist_add_tail(&dev->knode_parent,
                                               &old_parent->klist_children);
+                               set_dev_node(dev, dev_to_node(old_parent));
+                       }
                }
                cleanup_glue_dir(dev, new_parent_kobj);
                put_device(new_parent);
index 6fe417429977058b40f9ac2caee0b5b933078070..e38dfed41d80b8e6dff45379ee1fdfb8e8d3c750 100644 (file)
@@ -18,7 +18,7 @@ struct sysdev_class cpu_sysdev_class = {
 };
 EXPORT_SYMBOL(cpu_sysdev_class);
 
-static struct sys_device *cpu_sys_devices[NR_CPUS];
+static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
 
 #ifdef CONFIG_HOTPLUG_CPU
 static ssize_t show_online(struct sys_device *dev, char *buf)
@@ -68,7 +68,7 @@ void unregister_cpu(struct cpu *cpu)
        sysdev_remove_file(&cpu->sysdev, &attr_online);
 
        sysdev_unregister(&cpu->sysdev);
-       cpu_sys_devices[logical_cpu] = NULL;
+       per_cpu(cpu_sys_devices, logical_cpu) = NULL;
        return;
 }
 #else /* ... !CONFIG_HOTPLUG_CPU */
@@ -167,7 +167,7 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
        if (!error && cpu->hotpluggable)
                register_cpu_control(cpu);
        if (!error)
-               cpu_sys_devices[num] = &cpu->sysdev;
+               per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
        if (!error)
                register_cpu_under_node(num, cpu_to_node(num));
 
@@ -180,8 +180,8 @@ int __cpuinit register_cpu(struct cpu *cpu, int num)
 
 struct sys_device *get_cpu_sysdev(unsigned cpu)
 {
-       if (cpu < NR_CPUS)
-               return cpu_sys_devices[cpu];
+       if (cpu < nr_cpu_ids && cpu_possible(cpu))
+               return per_cpu(cpu_sys_devices, cpu);
        else
                return NULL;
 }
index 9a6537f14401df5aae7a81842c7bd7a145c2f849..2ef5acf4368b9e8deef4440d865a87cef8d0b0a6 100644 (file)
@@ -217,12 +217,22 @@ static void driver_remove_groups(struct device_driver *drv,
 int driver_register(struct device_driver *drv)
 {
        int ret;
+       struct device_driver *other;
 
        if ((drv->bus->probe && drv->probe) ||
            (drv->bus->remove && drv->remove) ||
            (drv->bus->shutdown && drv->shutdown))
                printk(KERN_WARNING "Driver '%s' needs updating - please use "
                        "bus_type methods\n", drv->name);
+
+       other = driver_find(drv->name, drv->bus);
+       if (other) {
+               put_driver(other);
+               printk(KERN_ERR "Error: Driver '%s' is already registered, "
+                       "aborting...\n", drv->name);
+               return -EEXIST;
+       }
+
        ret = bus_add_driver(drv);
        if (ret)
                return ret;
index 1fef7df8c9d601417312c088a6c0b8b9351646c8..9fd4a8534146e1747160e0a987fe8e4224eae01f 100644 (file)
@@ -396,6 +396,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
        if (!firmware_p)
                return -EINVAL;
 
+       printk(KERN_INFO "firmware: requesting %s\n", name);
+
        *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
        if (!firmware) {
                printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n",
index 12fde2d03d695a112f02debd25c4fb897a4b937c..39f3d1b3a21354aa112f5fb222b862d99daafb04 100644 (file)
@@ -77,6 +77,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
                       "Node %d PageTables:   %8lu kB\n"
                       "Node %d NFS_Unstable: %8lu kB\n"
                       "Node %d Bounce:       %8lu kB\n"
+                      "Node %d WritebackTmp: %8lu kB\n"
                       "Node %d Slab:         %8lu kB\n"
                       "Node %d SReclaimable: %8lu kB\n"
                       "Node %d SUnreclaim:   %8lu kB\n",
@@ -99,6 +100,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf)
                       nid, K(node_page_state(nid, NR_PAGETABLE)),
                       nid, K(node_page_state(nid, NR_UNSTABLE_NFS)),
                       nid, K(node_page_state(nid, NR_BOUNCE)),
+                      nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)),
                       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) +
                                node_page_state(nid, NR_SLAB_UNRECLAIMABLE)),
                       nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)),
index 280e71ee744ca659e164cdb0d266b11efb3383ba..5b4c6e649c11170803a8ed7bedc7ca333c69a99a 100644 (file)
@@ -195,7 +195,6 @@ void aoedev_exit(void);
 struct aoedev *aoedev_by_aoeaddr(int maj, int min);
 struct aoedev *aoedev_by_sysminor_m(ulong sysminor);
 void aoedev_downdev(struct aoedev *d);
-int aoedev_isbusy(struct aoedev *d);
 int aoedev_flush(const char __user *str, size_t size);
 
 int aoenet_init(void);
index d00293ba3b456187a21b1b2aa6fa5ccff49c0dfc..8fc429cf82b6b41d9ddf91d41a339831ac96a778 100644 (file)
@@ -668,16 +668,16 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
        u16 n;
 
        /* word 83: command set supported */
-       n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1]));
+       n = get_unaligned_le16(&id[83 << 1]);
 
        /* word 86: command set/feature enabled */
-       n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1]));
+       n |= get_unaligned_le16(&id[86 << 1]);
 
        if (n & (1<<10)) {      /* bit 10: LBA 48 */
                d->flags |= DEVFL_EXT;
 
                /* word 100: number lba48 sectors */
-               ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1]));
+               ssize = get_unaligned_le64(&id[100 << 1]);
 
                /* set as in ide-disk.c:init_idedisk_capacity */
                d->geo.cylinders = ssize;
@@ -688,12 +688,12 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
                d->flags &= ~DEVFL_EXT;
 
                /* number lba28 sectors */
-               ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1]));
+               ssize = get_unaligned_le32(&id[60 << 1]);
 
                /* NOTE: obsolete in ATA 6 */
-               d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1]));
-               d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1]));
-               d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1]));
+               d->geo.cylinders = get_unaligned_le16(&id[54 << 1]);
+               d->geo.heads = get_unaligned_le16(&id[55 << 1]);
+               d->geo.sectors = get_unaligned_le16(&id[56 << 1]);
        }
 
        if (d->ssize != ssize)
@@ -779,7 +779,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
        u16 aoemajor;
 
        hin = (struct aoe_hdr *) skb_mac_header(skb);
-       aoemajor = be16_to_cpu(get_unaligned(&hin->major));
+       aoemajor = get_unaligned_be16(&hin->major);
        d = aoedev_by_aoeaddr(aoemajor, hin->minor);
        if (d == NULL) {
                snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
@@ -791,7 +791,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
 
        spin_lock_irqsave(&d->lock, flags);
 
-       n = be32_to_cpu(get_unaligned(&hin->tag));
+       n = get_unaligned_be32(&hin->tag);
        t = gettgt(d, hin->src);
        if (t == NULL) {
                printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n",
@@ -806,9 +806,9 @@ aoecmd_ata_rsp(struct sk_buff *skb)
                snprintf(ebuf, sizeof ebuf,
                        "%15s e%d.%d    tag=%08x@%08lx\n",
                        "unexpected rsp",
-                       be16_to_cpu(get_unaligned(&hin->major)),
+                       get_unaligned_be16(&hin->major),
                        hin->minor,
-                       be32_to_cpu(get_unaligned(&hin->tag)),
+                       get_unaligned_be32(&hin->tag),
                        jiffies);
                aoechr_error(ebuf);
                return;
@@ -873,7 +873,7 @@ aoecmd_ata_rsp(struct sk_buff *skb)
                        printk(KERN_INFO
                                "aoe: unrecognized ata command %2.2Xh for %d.%d\n",
                                ahout->cmdstat,
-                               be16_to_cpu(get_unaligned(&hin->major)),
+                               get_unaligned_be16(&hin->major),
                                hin->minor);
                }
        }
index f9a1cd9edb77e156e5a7f81c389f26ea21161f41..a1d813ab0d6b6046b7d1a1e34ac3d60fd4b68179 100644 (file)
@@ -18,24 +18,6 @@ static void skbpoolfree(struct aoedev *d);
 static struct aoedev *devlist;
 static DEFINE_SPINLOCK(devlist_lock);
 
-int
-aoedev_isbusy(struct aoedev *d)
-{
-       struct aoetgt **t, **te;
-       struct frame *f, *e;
-
-       t = d->targets;
-       te = t + NTARGETS;
-       for (; t < te && *t; t++) {
-               f = (*t)->frames;
-               e = f + (*t)->nframes;
-               for (; f < e; f++)
-                       if (f->tag != FREETAG)
-                               return 1;
-       }
-       return 0;
-}
-
 struct aoedev *
 aoedev_by_aoeaddr(int maj, int min)
 {
index 18d243c73eee0b34966a1a4eea6da1e673ece772..d625169c8e48a99951cbb3f833755f77ee0eca1f 100644 (file)
@@ -128,7 +128,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
        skb_push(skb, ETH_HLEN);        /* (1) */
 
        h = (struct aoe_hdr *) skb_mac_header(skb);
-       n = be32_to_cpu(get_unaligned(&h->tag));
+       n = get_unaligned_be32(&h->tag);
        if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
                goto exit;
 
@@ -140,7 +140,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
                        printk(KERN_ERR
                                "%s%d.%d@%s; ecode=%d '%s'\n",
                                "aoe: error packet from ",
-                               be16_to_cpu(get_unaligned(&h->major)),
+                               get_unaligned_be16(&h->major),
                                h->minor, skb->dev->name,
                                h->err, aoe_errlist[n]);
                goto exit;
index 7bd76639544c18d78d8b4ed87fb6befb7dcf685d..a196ef7f147fa234ead35696b8c115dd9ea69927 100644 (file)
@@ -319,7 +319,7 @@ out:
 
 #ifdef CONFIG_BLK_DEV_XIP
 static int brd_direct_access (struct block_device *bdev, sector_t sector,
-                       unsigned long *data)
+                       void **kaddr, unsigned long *pfn)
 {
        struct brd_device *brd = bdev->bd_disk->private_data;
        struct page *page;
@@ -333,7 +333,8 @@ static int brd_direct_access (struct block_device *bdev, sector_t sector,
        page = brd_insert_page(brd, sector);
        if (!page)
                return -ENOMEM;
-       *data = (unsigned long)page_address(page);
+       *kaddr = page_address(page);
+       *pfn = page_to_pfn(page);
 
        return 0;
 }
@@ -386,10 +387,14 @@ static struct block_device_operations brd_fops = {
  */
 static int rd_nr;
 int rd_size = CONFIG_BLK_DEV_RAM_SIZE;
+static int max_part;
+static int part_shift;
 module_param(rd_nr, int, 0);
 MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices");
 module_param(rd_size, int, 0);
 MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
+module_param(max_part, int, 0);
+MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
 
@@ -434,11 +439,11 @@ static struct brd_device *brd_alloc(int i)
        blk_queue_max_sectors(brd->brd_queue, 1024);
        blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
 
-       disk = brd->brd_disk = alloc_disk(1);
+       disk = brd->brd_disk = alloc_disk(1 << part_shift);
        if (!disk)
                goto out_free_queue;
        disk->major             = RAMDISK_MAJOR;
-       disk->first_minor       = i;
+       disk->first_minor       = i << part_shift;
        disk->fops              = &brd_fops;
        disk->private_data      = brd;
        disk->queue             = brd->brd_queue;
@@ -522,7 +527,12 @@ static int __init brd_init(void)
         *     themselves and have kernel automatically instantiate actual
         *     device on-demand.
         */
-       if (rd_nr > 1UL << MINORBITS)
+
+       part_shift = 0;
+       if (max_part > 0)
+               part_shift = fls(max_part);
+
+       if (rd_nr > 1UL << (MINORBITS - part_shift))
                return -EINVAL;
 
        if (rd_nr) {
@@ -530,7 +540,7 @@ static int __init brd_init(void)
                range = rd_nr;
        } else {
                nr = CONFIG_BLK_DEV_RAM_COUNT;
-               range = 1UL << MINORBITS;
+               range = 1UL << (MINORBITS - part_shift);
        }
 
        if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
@@ -569,7 +579,7 @@ static void __exit brd_exit(void)
        unsigned long range;
        struct brd_device *brd, *next;
 
-       range = rd_nr ? rd_nr :  1UL << MINORBITS;
+       range = rd_nr ? rd_nr :  1UL << (MINORBITS - part_shift);
 
        list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
                brd_del_one(brd);
index cf6083a1f9280e5f0008363de88c532abb04b527..e336b05fe4a7f5d763dd0db8c61dc740b93635a9 100644 (file)
@@ -425,16 +425,12 @@ static void __devinit cciss_procinit(int i)
        struct proc_dir_entry *pde;
 
        if (proc_cciss == NULL)
-               proc_cciss = proc_mkdir("cciss", proc_root_driver);
+               proc_cciss = proc_mkdir("driver/cciss", NULL);
        if (!proc_cciss)
                return;
-       pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
+       pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP |
                                        S_IROTH, proc_cciss,
-                                       &cciss_proc_fops);
-       if (!pde)
-               return;
-
-       pde->data = hba[i];
+                                       &cciss_proc_fops, hba[i]);
 }
 #endif                         /* CONFIG_PROC_FS */
 
@@ -3700,7 +3696,7 @@ static void __exit cciss_cleanup(void)
                        cciss_remove_one(hba[i]->pdev);
                }
        }
-       remove_proc_entry("cciss", proc_root_driver);
+       remove_proc_entry("driver/cciss", NULL);
 }
 
 static void fail_all_cmds(unsigned long ctlr)
index 69199185ff4b8acba9e4cdaca15e2521263ec1d0..09c14341e6e39582c4230d71ec8bddb384e26610 100644 (file)
@@ -214,7 +214,7 @@ static struct proc_dir_entry *proc_array;
 static void __init ida_procinit(int i)
 {
        if (proc_array == NULL) {
-               proc_array = proc_mkdir("cpqarray", proc_root_driver);
+               proc_array = proc_mkdir("driver/cpqarray", NULL);
                if (!proc_array) return;
        }
 
@@ -1796,7 +1796,7 @@ static void __exit cpqarray_exit(void)
                }
        }
 
-       remove_proc_entry("cpqarray", proc_root_driver);
+       remove_proc_entry("driver/cpqarray", NULL);
 }
 
 module_init(cpqarray_init)
index 7652e87d60c52cb7c2a18053198fd205ec1ef9b2..395f8ea7981c54f2fc5b6dfec3cf99acfd83e1ff 100644 (file)
@@ -4526,14 +4526,15 @@ static void __init parse_floppy_cfg_string(char *cfg)
        }
 }
 
-int __init init_module(void)
+static int __init floppy_module_init(void)
 {
        if (floppy)
                parse_floppy_cfg_string(floppy);
        return floppy_init();
 }
+module_init(floppy_module_init);
 
-void cleanup_module(void)
+static void __exit floppy_module_exit(void)
 {
        int drive;
 
@@ -4562,6 +4563,7 @@ void cleanup_module(void)
        /* eject disk, if any */
        fd_eject(0);
 }
+module_exit(floppy_module_exit);
 
 module_param(floppy, charp, 0);
 module_param(FLOPPY_IRQ, int, 0);
index f7f163557aa0f3ea88bd1bf91efd41942b59dfc0..d3a25b027ff9fc5e6331372a252b5e134b6270c4 100644 (file)
@@ -546,7 +546,7 @@ static void loop_unplug(struct request_queue *q)
 {
        struct loop_device *lo = q->queuedata;
 
-       clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
+       queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q);
        blk_run_address_space(lo->lo_backing_file->f_mapping);
 }
 
index 60cc54368b66eaa1c7888ab905f7c9b83c92bd41..ad98dda6037dd5193baa266bf2471c52ca1b6f79 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/kernel.h>
 #include <net/sock.h>
 #include <linux/net.h>
+#include <linux/kthread.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -55,6 +56,7 @@ static unsigned int debugflags;
 
 static unsigned int nbds_max = 16;
 static struct nbd_device *nbd_dev;
+static int max_part;
 
 /*
  * Use just one lock (or at most 1 per NIC). Two arguments for this:
@@ -337,7 +339,7 @@ static struct request *nbd_read_stat(struct nbd_device *lo)
        }
 
        req = nbd_find_request(lo, *(struct request **)reply.handle);
-       if (unlikely(IS_ERR(req))) {
+       if (IS_ERR(req)) {
                result = PTR_ERR(req);
                if (result != -ENOENT)
                        goto harderror;
@@ -441,6 +443,85 @@ static void nbd_clear_que(struct nbd_device *lo)
 }
 
 
+static void nbd_handle_req(struct nbd_device *lo, struct request *req)
+{
+       if (!blk_fs_request(req))
+               goto error_out;
+
+       nbd_cmd(req) = NBD_CMD_READ;
+       if (rq_data_dir(req) == WRITE) {
+               nbd_cmd(req) = NBD_CMD_WRITE;
+               if (lo->flags & NBD_READ_ONLY) {
+                       printk(KERN_ERR "%s: Write on read-only\n",
+                                       lo->disk->disk_name);
+                       goto error_out;
+               }
+       }
+
+       req->errors = 0;
+
+       mutex_lock(&lo->tx_lock);
+       if (unlikely(!lo->sock)) {
+               mutex_unlock(&lo->tx_lock);
+               printk(KERN_ERR "%s: Attempted send on closed socket\n",
+                      lo->disk->disk_name);
+               req->errors++;
+               nbd_end_request(req);
+               return;
+       }
+
+       lo->active_req = req;
+
+       if (nbd_send_req(lo, req) != 0) {
+               printk(KERN_ERR "%s: Request send failed\n",
+                               lo->disk->disk_name);
+               req->errors++;
+               nbd_end_request(req);
+       } else {
+               spin_lock(&lo->queue_lock);
+               list_add(&req->queuelist, &lo->queue_head);
+               spin_unlock(&lo->queue_lock);
+       }
+
+       lo->active_req = NULL;
+       mutex_unlock(&lo->tx_lock);
+       wake_up_all(&lo->active_wq);
+
+       return;
+
+error_out:
+       req->errors++;
+       nbd_end_request(req);
+}
+
+static int nbd_thread(void *data)
+{
+       struct nbd_device *lo = data;
+       struct request *req;
+
+       set_user_nice(current, -20);
+       while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) {
+               /* wait for something to do */
+               wait_event_interruptible(lo->waiting_wq,
+                                        kthread_should_stop() ||
+                                        !list_empty(&lo->waiting_queue));
+
+               /* extract request */
+               if (list_empty(&lo->waiting_queue))
+                       continue;
+
+               spin_lock_irq(&lo->queue_lock);
+               req = list_entry(lo->waiting_queue.next, struct request,
+                                queuelist);
+               list_del_init(&req->queuelist);
+               spin_unlock_irq(&lo->queue_lock);
+
+               /* handle request */
+               nbd_handle_req(lo, req);
+       }
+       return 0;
+}
+
 /*
  * We always wait for result of write, for now. It would be nice to make it optional
  * in future
@@ -456,65 +537,23 @@ static void do_nbd_request(struct request_queue * q)
                struct nbd_device *lo;
 
                blkdev_dequeue_request(req);
+
+               spin_unlock_irq(q->queue_lock);
+
                dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n",
                                req->rq_disk->disk_name, req, req->cmd_type);
 
-               if (!blk_fs_request(req))
-                       goto error_out;
-
                lo = req->rq_disk->private_data;
 
                BUG_ON(lo->magic != LO_MAGIC);
 
-               nbd_cmd(req) = NBD_CMD_READ;
-               if (rq_data_dir(req) == WRITE) {
-                       nbd_cmd(req) = NBD_CMD_WRITE;
-                       if (lo->flags & NBD_READ_ONLY) {
-                               printk(KERN_ERR "%s: Write on read-only\n",
-                                               lo->disk->disk_name);
-                               goto error_out;
-                       }
-               }
-
-               req->errors = 0;
-               spin_unlock_irq(q->queue_lock);
-
-               mutex_lock(&lo->tx_lock);
-               if (unlikely(!lo->sock)) {
-                       mutex_unlock(&lo->tx_lock);
-                       printk(KERN_ERR "%s: Attempted send on closed socket\n",
-                              lo->disk->disk_name);
-                       req->errors++;
-                       nbd_end_request(req);
-                       spin_lock_irq(q->queue_lock);
-                       continue;
-               }
-
-               lo->active_req = req;
+               spin_lock_irq(&lo->queue_lock);
+               list_add_tail(&req->queuelist, &lo->waiting_queue);
+               spin_unlock_irq(&lo->queue_lock);
 
-               if (nbd_send_req(lo, req) != 0) {
-                       printk(KERN_ERR "%s: Request send failed\n",
-                                       lo->disk->disk_name);
-                       req->errors++;
-                       nbd_end_request(req);
-               } else {
-                       spin_lock(&lo->queue_lock);
-                       list_add(&req->queuelist, &lo->queue_head);
-                       spin_unlock(&lo->queue_lock);
-               }
-
-               lo->active_req = NULL;
-               mutex_unlock(&lo->tx_lock);
-               wake_up_all(&lo->active_wq);
+               wake_up(&lo->waiting_wq);
 
                spin_lock_irq(q->queue_lock);
-               continue;
-
-error_out:
-               req->errors++;
-               spin_unlock(q->queue_lock);
-               nbd_end_request(req);
-               spin_lock(q->queue_lock);
        }
 }
 
@@ -524,6 +563,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
        struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
        int error;
        struct request sreq ;
+       struct task_struct *thread;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -537,6 +577,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
        switch (cmd) {
        case NBD_DISCONNECT:
                printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
+               blk_rq_init(NULL, &sreq);
                sreq.cmd_type = REQ_TYPE_SPECIAL;
                nbd_cmd(&sreq) = NBD_CMD_DISC;
                /*
@@ -571,10 +612,13 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
                error = -EINVAL;
                file = fget(arg);
                if (file) {
+                       struct block_device *bdev = inode->i_bdev;
                        inode = file->f_path.dentry->d_inode;
                        if (S_ISSOCK(inode->i_mode)) {
                                lo->file = file;
                                lo->sock = SOCKET_I(inode);
+                               if (max_part > 0)
+                                       bdev->bd_invalidated = 1;
                                error = 0;
                        } else {
                                fput(file);
@@ -606,7 +650,12 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
        case NBD_DO_IT:
                if (!lo->file)
                        return -EINVAL;
+               thread = kthread_create(nbd_thread, lo, lo->disk->disk_name);
+               if (IS_ERR(thread))
+                       return PTR_ERR(thread);
+               wake_up_process(thread);
                error = nbd_do_it(lo);
+               kthread_stop(thread);
                if (error)
                        return error;
                sock_shutdown(lo, 1);
@@ -619,6 +668,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
                lo->bytesize = 0;
                inode->i_bdev->bd_inode->i_size = 0;
                set_capacity(lo->disk, 0);
+               if (max_part > 0)
+                       ioctl_by_bdev(inode->i_bdev, BLKRRPART, 0);
                return lo->harderror;
        case NBD_CLEAR_QUE:
                /*
@@ -652,6 +703,7 @@ static int __init nbd_init(void)
 {
        int err = -ENOMEM;
        int i;
+       int part_shift;
 
        BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
 
@@ -659,8 +711,17 @@ static int __init nbd_init(void)
        if (!nbd_dev)
                return -ENOMEM;
 
+       if (max_part < 0) {
+               printk(KERN_CRIT "nbd: max_part must be >= 0\n");
+               return -EINVAL;
+       }
+
+       part_shift = 0;
+       if (max_part > 0)
+               part_shift = fls(max_part);
+
        for (i = 0; i < nbds_max; i++) {
-               struct gendisk *disk = alloc_disk(1);
+               struct gendisk *disk = alloc_disk(1 << part_shift);
                elevator_t *old_e;
                if (!disk)
                        goto out;
@@ -695,17 +756,18 @@ static int __init nbd_init(void)
                nbd_dev[i].file = NULL;
                nbd_dev[i].magic = LO_MAGIC;
                nbd_dev[i].flags = 0;
+               INIT_LIST_HEAD(&nbd_dev[i].waiting_queue);
                spin_lock_init(&nbd_dev[i].queue_lock);
                INIT_LIST_HEAD(&nbd_dev[i].queue_head);
                mutex_init(&nbd_dev[i].tx_lock);
                init_waitqueue_head(&nbd_dev[i].active_wq);
+               init_waitqueue_head(&nbd_dev[i].waiting_wq);
                nbd_dev[i].blksize = 1024;
                nbd_dev[i].bytesize = 0;
                disk->major = NBD_MAJOR;
-               disk->first_minor = i;
+               disk->first_minor = i << part_shift;
                disk->fops = &nbd_fops;
                disk->private_data = &nbd_dev[i];
-               disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
                sprintf(disk->disk_name, "nbd%d", i);
                set_capacity(disk, 0);
                add_disk(disk);
@@ -743,7 +805,9 @@ MODULE_DESCRIPTION("Network Block Device");
 MODULE_LICENSE("GPL");
 
 module_param(nbds_max, int, 0444);
-MODULE_PARM_DESC(nbds_max, "How many network block devices to initialize.");
+MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
+module_param(max_part, int, 0444);
+MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)");
 #ifndef NDEBUG
 module_param(debugflags, int, 0644);
 MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
index df819f8a95a6d0db78e95153e0194deea4730e88..570f3b70dce795afc8e1beabd104de3dc31e26ad 100644 (file)
@@ -716,10 +716,8 @@ static int pd_special_command(struct pd_unit *disk,
        struct request rq;
        int err = 0;
 
-       memset(&rq, 0, sizeof(rq));
-       rq.errors = 0;
+       blk_rq_init(NULL, &rq);
        rq.rq_disk = disk->gd;
-       rq.ref_count = 1;
        rq.end_io_data = &wait;
        rq.end_io = blk_end_sync_rq;
        blk_insert_request(disk->gd->queue, &rq, 0, func);
index 18feb1c7c33b1b03ec0d394767e252b466000876..3ba1df93e9e32e8fb3c8641f7f11b9a33a575cc2 100644 (file)
@@ -776,8 +776,6 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
 
        rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]);
        memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
-       if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
-               memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
 
        rq->timeout = 60*HZ;
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
@@ -2744,7 +2742,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
        int i;
        int ret = 0;
        char b[BDEVNAME_SIZE];
-       struct proc_dir_entry *proc;
        struct block_device *bdev;
 
        if (pd->pkt_dev == dev) {
@@ -2788,11 +2785,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
                goto out_mem;
        }
 
-       proc = create_proc_entry(pd->name, 0, pkt_proc);
-       if (proc) {
-               proc->data = pd;
-               proc->proc_fops = &pkt_proc_fops;
-       }
+       proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd);
        DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
        return 0;
 
@@ -3101,7 +3094,7 @@ static int __init pkt_init(void)
                goto out_misc;
        }
 
-       pkt_proc = proc_mkdir(DRIVER_NAME, proc_root_driver);
+       pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL);
 
        return 0;
 
@@ -3117,7 +3110,7 @@ out2:
 
 static void __exit pkt_exit(void)
 {
-       remove_proc_entry(DRIVER_NAME, proc_root_driver);
+       remove_proc_entry("driver/"DRIVER_NAME, NULL);
        misc_deregister(&pkt_misc);
 
        pkt_debugfs_cleanup();
index 7483f947f0e995d435585508daaa8583be926b3a..d797e209951d43fe2163e91e32fe392f13296e77 100644 (file)
@@ -102,8 +102,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
                dev_dbg(&dev->sbd.core,
                        "%s:%u: bio %u: %u segs %u sectors from %lu\n",
                        __func__, __LINE__, i, bio_segments(iter.bio),
-                       bio_sectors(iter.bio),
-                       (unsigned long)iter.bio->bi_sector);
+                       bio_sectors(iter.bio), iter.bio->bi_sector);
 
                size = bvec->bv_len;
                buf = bvec_kmap_irq(bvec, &flags);
@@ -406,7 +405,6 @@ static void ps3disk_prepare_flush(struct request_queue *q, struct request *req)
 
        dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__);
 
-       memset(req->cmd, 0, sizeof(req->cmd));
        req->cmd_type = REQ_TYPE_FLUSH;
 }
 
index 27bfe72aab59360951b133b0be25c88ba85ea6c4..e322cce8c12d7870d45b2b202c8ff60e9e924c9f 100644 (file)
@@ -2399,7 +2399,7 @@ static void ub_disconnect(struct usb_interface *intf)
                del_gendisk(lun->disk);
                /*
                 * I wish I could do:
-                *    set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+                *    queue_flag_set(QUEUE_FLAG_DEAD, q);
                 * As it is, we rely on our internal poisoning and let
                 * the upper levels to spin furiously failing all the I/O.
                 */
index d771da816d95d708cff253e41e3a6ab05cd9d930..f2fff5799ddf0fe1b41909b64d606d747f9081f7 100644 (file)
@@ -137,7 +137,7 @@ static void blkif_restart_queue_callback(void *arg)
        schedule_work(&info->work);
 }
 
-int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
+static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
 {
        /* We don't have real geometry info, but let's at least return
           values consistent with the size of the device */
index 7e31d5f1bc8aa41444821da3b6c4d34b82fe96b8..e5cd856a2fea9cce67404059f0e52d8a82ac5d2c 100644 (file)
@@ -143,7 +143,7 @@ restart:
                int len;
 
                set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-               len = tty->driver->write(tty, skb->data, skb->len);
+               len = tty->ops->write(tty, skb->data, skb->len);
                hdev->stat.byte_tx += len;
 
                skb_pull(skb, len);
@@ -190,8 +190,7 @@ static int hci_uart_flush(struct hci_dev *hdev)
 
        /* Flush any pending characters in the driver and discipline. */
        tty_ldisc_flush(tty);
-       if (tty->driver && tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
 
        if (test_bit(HCI_UART_PROTO_SET, &hu->flags))
                hu->proto->flush(hu);
@@ -285,9 +284,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
 
        if (tty->ldisc.flush_buffer)
                tty->ldisc.flush_buffer(tty);
-
-       if (tty->driver && tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
 
        return 0;
 }
@@ -373,9 +370,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f
        hu->hdev->stat.byte_rx += count;
        spin_unlock(&hu->rx_lock);
 
-       if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
-                                       tty->driver->unthrottle)
-               tty->driver->unthrottle(tty);
+       tty_unthrottle(tty);
 }
 
 static int hci_uart_register_dev(struct hci_uart *hu)
index ac3829030ac581bca1475b9f345e4f7a3f7e7f85..69f26eb6415bdbb9824ed89ca1307134fe55b647 100644 (file)
@@ -2194,7 +2194,6 @@ static int cdrom_read_cdda_bpc(struct cdrom_device_info *cdi, __u8 __user *ubuf,
                if (ret)
                        break;
 
-               memset(rq->cmd, 0, sizeof(rq->cmd));
                rq->cmd[0] = GPCMD_READ_CD;
                rq->cmd[1] = 1 << 2;
                rq->cmd[2] = (lba >> 24) & 0xff;
index b74b6c2768a8fbcd15d0468dc33fbe450e843609..5245a4a0ba74c4d1d0fb0debd89c0296f43a39b3 100644 (file)
@@ -144,6 +144,7 @@ static int proc_viocd_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_viocd_operations = {
+       .owner          = THIS_MODULE,
        .open           = proc_viocd_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -679,7 +680,6 @@ static struct vio_driver viocd_driver = {
 
 static int __init viocd_init(void)
 {
-       struct proc_dir_entry *e;
        int ret = 0;
 
        if (!firmware_has_feature(FW_FEATURE_ISERIES))
@@ -719,12 +719,8 @@ static int __init viocd_init(void)
        if (ret)
                goto out_free_info;
 
-       e = create_proc_entry("iSeries/viocd", S_IFREG|S_IRUGO, NULL);
-       if (e) {
-               e->owner = THIS_MODULE;
-               e->proc_fops = &proc_viocd_operations;
-       }
-
+       proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL,
+                   &proc_viocd_operations);
        return 0;
 
 out_free_info:
index 2906ee7bd298f5e87f2cfea5ea10e113d0abfb08..5dce3877eee589f8bd680a7c3df8eab74cdd4888 100644 (file)
@@ -80,6 +80,15 @@ config VT_HW_CONSOLE_BINDING
         information. For framebuffer console users, please refer to
         <file:Documentation/fb/fbcon.txt>.
 
+config DEVKMEM
+       bool "/dev/kmem virtual device support"
+       default y
+       help
+         Say Y here if you want to support the /dev/kmem device. The
+         /dev/kmem device is rarely used, but can be used for certain
+         kind of kernel debugging operations.
+         When in doubt, say "N".
+
 config SERIAL_NONSTANDARD
        bool "Non-standard serial port support"
        depends on HAS_IOMEM
@@ -732,9 +741,16 @@ config NVRAM
          To compile this driver as a module, choose M here: the
          module will be called nvram.
 
+#
+# These legacy RTC drivers just cause too many conflicts with the generic
+# RTC framework ... let's not even try to coexist any more.
+#
+if RTC_LIB=n
+
 config RTC
        tristate "Enhanced Real Time Clock Support"
-       depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV && !ARM && !SUPERH && !S390 && !AVR32
+       depends on !PPC && !PARISC && !IA64 && !M68K && !SPARC && !FRV \
+                       && !ARM && !SUPERH && !S390 && !AVR32
        ---help---
          If you say Y here and create a character special file /dev/rtc with
          major number 10 and minor number 135 using mknod ("man mknod"), you
@@ -840,6 +856,8 @@ config DS1302
          will get access to the real time clock (or hardware clock) built
          into your computer.
 
+endif # RTC_LIB
+
 config COBALT_LCD
        bool "Support for Cobalt LCD"
        depends on MIPS_COBALT
index c69f79598e47cb4c9e375bc648daaa0e28e26646..99e6a406efb436b8269fe5a57cfe5971400bbdf2 100644 (file)
@@ -35,7 +35,7 @@
 
 //#define AGP_DEBUG 1
 #ifdef AGP_DEBUG
-#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __FUNCTION__ , ## y)
+#define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y)
 #else
 #define DBG(x,y...) do { } while (0)
 #endif
index 3d468f502d2db021cc99007784ecf14fa59c6526..37457e5a4f2b8ce6d06f728788dbeee2a54e3fee 100644 (file)
@@ -832,33 +832,34 @@ static void change_speed(struct async_struct *info,
        local_irq_restore(flags);
 }
 
-static void rs_put_char(struct tty_struct *tty, unsigned char ch)
+static int rs_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct async_struct *info;
        unsigned long flags;
 
        if (!tty)
-               return;
+               return 0;
 
        info = tty->driver_data;
 
        if (serial_paranoia_check(info, tty->name, "rs_put_char"))
-               return;
+               return 0;
 
        if (!info->xmit.buf)
-               return;
+               return 0;
 
        local_irq_save(flags);
        if (CIRC_SPACE(info->xmit.head,
                       info->xmit.tail,
                       SERIAL_XMIT_SIZE) == 0) {
                local_irq_restore(flags);
-               return;
+               return 0;
        }
 
        info->xmit.buf[info->xmit.head++] = ch;
        info->xmit.head &= SERIAL_XMIT_SIZE-1;
        local_irq_restore(flags);
+       return 1;
 }
 
 static void rs_flush_chars(struct tty_struct *tty)
@@ -1074,6 +1075,7 @@ static int get_serial_info(struct async_struct * info,
        if (!retinfo)
                return -EFAULT;
        memset(&tmp, 0, sizeof(tmp));
+       lock_kernel();
        tmp.type = state->type;
        tmp.line = state->line;
        tmp.port = state->port;
@@ -1084,6 +1086,7 @@ static int get_serial_info(struct async_struct * info,
        tmp.close_delay = state->close_delay;
        tmp.closing_wait = state->closing_wait;
        tmp.custom_divisor = state->custom_divisor;
+       unlock_kernel();
        if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
                return -EFAULT;
        return 0;
@@ -1099,13 +1102,17 @@ static int set_serial_info(struct async_struct * info,
 
        if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
                return -EFAULT;
+
+       lock_kernel();
        state = info->state;
        old_state = *state;
   
        change_irq = new_serial.irq != state->irq;
        change_port = (new_serial.port != state->port);
-       if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size))
+       if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) {
+         unlock_kernel();
          return -EINVAL;
+       }
   
        if (!serial_isroot()) {
                if ((new_serial.baud_base != state->baud_base) ||
@@ -1122,8 +1129,10 @@ static int set_serial_info(struct async_struct * info,
                goto check_and_exit;
        }
 
-       if (new_serial.baud_base < 9600)
+       if (new_serial.baud_base < 9600) {
+               unlock_kernel();
                return -EINVAL;
+       }
 
        /*
         * OK, past this point, all the error checking has been done.
@@ -1157,6 +1166,7 @@ check_and_exit:
                }
        } else
                retval = startup(info);
+       unlock_kernel();
        return retval;
 }
 
@@ -1496,8 +1506,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
                rs_wait_until_sent(tty, info->timeout);
        }
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       rs_flush_buffer(tty);
                
        tty_ldisc_flush(tty);
        tty->closing = 0;
@@ -1530,6 +1539,8 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
                return; /* Just in case.... */
 
        orig_jiffies = jiffies;
+
+       lock_kernel();
        /*
         * Set the check interval to be 1/5 of the estimated time to
         * send a single character, and make it at least 1.  The check
@@ -1570,6 +1581,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
                        break;
        }
        __set_current_state(TASK_RUNNING);
+       unlock_kernel();
 #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
        printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
 #endif
index 17d54315e146e5d95b794f456a270e2e818b08c6..cdd876dbb2b0f77ab05e27c37f0671724743537c 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/poll.h>
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/miscdevice.h>
 #include <linux/apm_bios.h>
 #include <linux/capability.h>
@@ -493,11 +494,10 @@ static struct miscdevice apm_device = {
  *     -1: Unknown
  *   8) min = minutes; sec = seconds
  */
-static int apm_get_info(char *buf, char **start, off_t fpos, int length)
+static int proc_apm_show(struct seq_file *m, void *v)
 {
        struct apm_power_info info;
        char *units;
-       int ret;
 
        info.ac_line_status = 0xff;
        info.battery_status = 0xff;
@@ -515,14 +515,27 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length)
        case 1:         units = "sec";  break;
        }
 
-       ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
+       seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n",
                     driver_version, APM_32_BIT_SUPPORT,
                     info.ac_line_status, info.battery_status,
                     info.battery_flag, info.battery_life,
                     info.time, units);
 
-       return ret;
+       return 0;
 }
+
+static int proc_apm_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, proc_apm_show, NULL);
+}
+
+static const struct file_operations apm_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = proc_apm_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 #endif
 
 static int kapmd(void *arg)
@@ -593,7 +606,7 @@ static int __init apm_init(void)
        wake_up_process(kapmd_tsk);
 
 #ifdef CONFIG_PROC_FS
-       create_proc_info_entry("apm", 0, NULL, apm_get_info);
+       proc_create("apm", 0, NULL, &apm_proc_fops);
 #endif
 
        ret = misc_register(&apm_device);
index a7c4990b5b6bf0f3929bad97d5957aad0e134ce6..31d08b641f5be84667c5d53e49c862915e65d0c8 100644 (file)
@@ -199,7 +199,7 @@ static int __init applicom_init(void)
                if (pci_enable_device(dev))
                        return -EIO;
 
-               RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO);
+               RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO);
 
                if (!RamIO) {
                        printk(KERN_INFO "ac.o: Failed to ioremap PCI memory "
@@ -254,7 +254,7 @@ static int __init applicom_init(void)
        /* Now try the specified ISA cards */
 
        for (i = 0; i < MAX_ISA_BOARD; i++) {
-               RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
+               RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO);
 
                if (!RamIO) {
                        printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1);
index 6b104e45a322b3a5bd321a07f12a4c3936441db6..4246b8e36cb32853eedb658b3e96f3c4a46ba978 100644 (file)
@@ -277,6 +277,7 @@ u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode)
                        return p->inverse_translations[m][glyph];
        }
 }
+EXPORT_SYMBOL_GPL(inverse_translate);
 
 static void update_user_maps(void)
 {
index c2d23cae95155d5c78fefa8995c672c5b30643a7..c0a4a0bb509e77397875b023bc4ad4f1324946bf 100644 (file)
@@ -215,7 +215,7 @@ static int __init cs5535_gpio_init(void)
        else
                mask = 0x0b003c66;
 
-       if (request_region(gpio_base, CS5535_GPIO_SIZE, NAME) == 0) {
+       if (!request_region(gpio_base, CS5535_GPIO_SIZE, NAME)) {
                printk(KERN_ERR NAME ": can't allocate I/O for GPIO\n");
                return -ENODEV;
        }
index e4f579c3e2455640a3d1dc27c742e20d26ea2c01..ef73e72daedcf978197c062d181a41de86b465d6 100644 (file)
@@ -21,7 +21,6 @@
  *
  * This version supports shared IRQ's (only for PCI boards).
  *
- * $Log: cyclades.c,v $
  * Prevent users from opening non-existing Z ports.
  *
  * Revision 2.3.2.8   2000/07/06 18:14:16 ivan
@@ -62,7 +61,7 @@
  * Driver now makes sure that the constant SERIAL_XMIT_SIZE is defined;
  *
  * Revision 2.3.2.2   1999/10/01 11:27:43 ivan
- * Fixed bug in cyz_poll that would make all ports but port 0 
+ * Fixed bug in cyz_poll that would make all ports but port 0
  * unable to transmit/receive data (Cyclades-Z only);
  * Implemented logic to prevent the RX buffer from being stuck with data
  * due to a driver / firmware race condition in interrupt op mode
  * Revision 2.3.1.1   1999/07/15 16:45:53 ivan
  * Removed CY_PROC conditional compilation;
  * Implemented SMP-awareness for the driver;
- * Implemented a new ISA IRQ autoprobe that uses the irq_probe_[on|off] 
+ * Implemented a new ISA IRQ autoprobe that uses the irq_probe_[on|off]
  * functions;
  * The driver now accepts memory addresses (maddr=0xMMMMM) and IRQs
  * (irq=NN) as parameters (only for ISA boards);
- * Fixed bug in set_line_char that would prevent the Cyclades-Z 
+ * Fixed bug in set_line_char that would prevent the Cyclades-Z
  * ports from being configured at speeds above 115.2Kbps;
  * Fixed bug in cy_set_termios that would prevent XON/XOFF flow control
  * switching from working properly;
- * The driver now only prints IRQ info for the Cyclades-Z if it's 
+ * The driver now only prints IRQ info for the Cyclades-Z if it's
  * configured to work in interrupt mode;
  *
  * Revision 2.2.2.3   1999/06/28 11:13:29 ivan
  * Added support for interrupt mode operation for the Z cards;
  * Removed the driver inactivity control for the Z;
- * Added a missing MOD_DEC_USE_COUNT in the cy_open function for when 
+ * Added a missing MOD_DEC_USE_COUNT in the cy_open function for when
  * the Z firmware is not loaded yet;
- * Replaced the "manual" Z Tx flush buffer by a call to a FW command of 
+ * Replaced the "manual" Z Tx flush buffer by a call to a FW command of
  * same functionality;
- * Implemented workaround for IRQ setting loss on the PCI configuration 
+ * Implemented workaround for IRQ setting loss on the PCI configuration
  * registers after a PCI bridge EEPROM reload (affects PLX9060 only);
  *
  * Revision 2.2.2.2  1999/05/14 17:18:15 ivan
  * BREAK implementation changed in order to make use of the 'break_ctl'
  * TTY facility;
  * Fixed typo in TTY structure field 'driver_name';
- * Included a PCI bridge reset and EEPROM reload in the board 
+ * Included a PCI bridge reset and EEPROM reload in the board
  * initialization code (for both Y and Z series).
  *
  * Revision 2.2.2.1  1999/04/08 16:17:43 ivan
- * Fixed a bug in cy_wait_until_sent that was preventing the port to be 
+ * Fixed a bug in cy_wait_until_sent that was preventing the port to be
  * closed properly after a SIGINT;
  * Module usage counter scheme revisited;
  * Added support to the upcoming Y PCI boards (i.e., support to additional
  * PCI Device ID's).
- * 
+ *
  * Revision 2.2.1.10 1999/01/20 16:14:29 ivan
  * Removed all unnecessary page-alignement operations in ioremap calls
  * (ioremap is currently safe for these operations).
  *
  * Revision 2.2.1.9  1998/12/30 18:18:30 ivan
- * Changed access to PLX PCI bridge registers from I/O to MMIO, in 
+ * Changed access to PLX PCI bridge registers from I/O to MMIO, in
  * order to make PLX9050-based boards work with certain motherboards.
  *
  * Revision 2.2.1.8  1998/11/13 12:46:20 ivan
  * Fixed Cyclom-4Yo hardware detection bug.
  *
  * Revision 2.2.1.4  1998/08/04 11:02:50 ivan
- * /proc/cyclades implementation with great collaboration of 
+ * /proc/cyclades implementation with great collaboration of
  * Marc Lewis <marc@blarg.net>;
  * cyy_interrupt was changed to avoid occurrence of kernel oopses
  * during PPP operation.
  * General code review in order to comply with 2.1 kernel standards;
  * data loss prevention for slow devices revisited (cy_wait_until_sent
  * was created);
- * removed conditional compilation for new/old PCI structure support 
+ * removed conditional compilation for new/old PCI structure support
  * (now the driver only supports the new PCI structure).
  *
  * Revision 2.2.1.1  1998/03/19 16:43:12 ivan
  * cleaned up the data loss fix;
  * fixed XON/XOFF handling once more (Cyclades-Z);
  * general review of the driver routines;
- * introduction of a mechanism to prevent data loss with slow 
+ * introduction of a mechanism to prevent data loss with slow
  * printers, by forcing a delay before closing the port.
  *
  * Revision 2.1.1.2  1998/02/17 16:50:00 ivan
  * Code review for the module cleanup routine;
  * fixed RTS and DTR status report for new CD1400's in get_modem_info;
  * includes anonymous changes regarding signal_pending.
- * 
+ *
  * Revision 2.1  1997/11/01 17:42:41 ivan
  * Changes in the driver to support Alpha systems (except 8Zo V_1);
  * BREAK fix for the Cyclades-Z boards;
  * driver inactivity control by FW implemented;
- * introduction of flag that allows driver to take advantage of 
+ * introduction of flag that allows driver to take advantage of
  * a special CD1400 feature related to HW flow control;
  * added support for the CD1400  rev. J (Cyclom-Y boards);
  * introduction of ioctls to:
  *  - adjust the polling interval (Cyclades-Z);
  *
  * Revision 1.36.4.33  1997/06/27 19:00:00  ivan
- * Fixes related to kernel version conditional 
+ * Fixes related to kernel version conditional
  * compilation.
- *  
+ *
  * Revision 1.36.4.32  1997/06/14 19:30:00  ivan
- * Compatibility issues between kernels 2.0.x and 
+ * Compatibility issues between kernels 2.0.x and
  * 2.1.x (mainly related to clear_bit function).
- *  
+ *
  * Revision 1.36.4.31  1997/06/03 15:30:00  ivan
- * Changes to define the memory window according to the 
+ * Changes to define the memory window according to the
  * board type.
- *  
+ *
  * Revision 1.36.4.30  1997/05/16 15:30:00  daniel
  * Changes to support new cycladesZ boards.
  *
 #undef CY_PCI_DEBUG
 
 /*
- * Include section 
+ * Include section
  */
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/firmware.h>
 
 #include <asm/system.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/irq.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #include <linux/kernel.h>
 #include <linux/pci.h>
@@ -668,10 +667,10 @@ static void cy_send_xchar(struct tty_struct *tty, char ch);
        ((readl(&((struct RUNTIME_9060 __iomem *) \
                ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0)
 
-#define ISZLOADED(card)        (((ZO_V1==readl(&((struct RUNTIME_9060 __iomem *) \
+#define ISZLOADED(card)        (((ZO_V1 == readl(&((struct RUNTIME_9060 __iomem *) \
                        ((card).ctl_addr))->mail_box_0)) || \
                        Z_FPGA_CHECK(card)) && \
-                       (ZFIRM_ID==readl(&((struct FIRM_ID __iomem *) \
+                       (ZFIRM_ID == readl(&((struct FIRM_ID __iomem *) \
                        ((card).base_addr+ID_ADDRESS))->signature)))
 
 #ifndef SERIAL_XMIT_SIZE
@@ -809,12 +808,12 @@ static char baud_cor3[] = {       /* receive threshold */
 
 /*
  * The Cyclades driver implements HW flow control as any serial driver.
- * The cyclades_port structure member rflow and the vector rflow_thr 
- * allows us to take advantage of a special feature in the CD1400 to avoid 
- * data loss even when the system interrupt latency is too high. These flags 
- * are to be used only with very special applications. Setting these flags 
- * requires the use of a special cable (DTR and RTS reversed). In the new 
- * CD1400-based boards (rev. 6.00 or later), there is no need for special 
+ * The cyclades_port structure member rflow and the vector rflow_thr
+ * allows us to take advantage of a special feature in the CD1400 to avoid
+ * data loss even when the system interrupt latency is too high. These flags
+ * are to be used only with very special applications. Setting these flags
+ * requires the use of a special cable (DTR and RTS reversed). In the new
+ * CD1400-based boards (rev. 6.00 or later), there is no need for special
  * cables.
  */
 
@@ -841,14 +840,22 @@ static int cy_chip_offset[] = { 0x0000,
 
 #ifdef CONFIG_PCI
 static struct pci_device_id cy_pci_dev_id[] __devinitdata = {
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) },      /* PCI < 1Mb */
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) },      /* PCI > 1Mb */
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) },     /* 4Y PCI < 1Mb */
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) },     /* 4Y PCI > 1Mb */
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) },     /* 8Y PCI < 1Mb */
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) },     /* 8Y PCI > 1Mb */
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) },      /* Z PCI < 1Mb */
-       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) },      /* Z PCI > 1Mb */
+       /* PCI < 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) },
+       /* PCI > 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) },
+       /* 4Y PCI < 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) },
+       /* 4Y PCI > 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) },
+       /* 8Y PCI < 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) },
+       /* 8Y PCI > 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) },
+       /* Z PCI < 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) },
+       /* Z PCI > 1Mb */
+       { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) },
        { }                     /* end of table */
 };
 MODULE_DEVICE_TABLE(pci, cy_pci_dev_id);
@@ -905,15 +912,14 @@ static inline int serial_paranoia_check(struct cyclades_port *info,
 
    This function is only called from inside spinlock-protected code.
  */
-static int cyy_issue_cmd(void __iomem * base_addr, u_char cmd, int index)
+static int cyy_issue_cmd(void __iomem *base_addr, u_char cmd, int index)
 {
        unsigned int i;
 
        /* Check to see that the previous command has completed */
        for (i = 0; i < 100; i++) {
-               if (readb(base_addr + (CyCCR << index)) == 0) {
+               if (readb(base_addr + (CyCCR << index)) == 0)
                        break;
-               }
                udelay(10L);
        }
        /* if the CCR never cleared, the previous command
@@ -929,7 +935,7 @@ static int cyy_issue_cmd(void __iomem * base_addr, u_char cmd, int index)
 
 #ifdef CONFIG_ISA
 /* ISA interrupt detection code */
-static unsigned detect_isa_irq(void __iomem * address)
+static unsigned detect_isa_irq(void __iomem *address)
 {
        int irq;
        unsigned long irqs, flags;
@@ -1038,7 +1044,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip,
                                        if (info->flags & ASYNC_SAK)
                                                do_SAK(tty);
                                } else if (data & CyFRAME) {
-                                       tty_insert_flip_char( tty,
+                                       tty_insert_flip_char(tty,
                                                readb(base_addr + (CyRDSR <<
                                                        index)), TTY_FRAME);
                                        info->icount.rx++;
@@ -1320,7 +1326,8 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id)
 
        if (unlikely(cinfo == NULL)) {
 #ifdef CY_DEBUG_INTERRUPTS
-               printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",irq);
+               printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",
+                               irq);
 #endif
                return IRQ_NONE;        /* spurious interrupt */
        }
@@ -1375,12 +1382,12 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id)
 
 /***********************************************************/
 /********* End of block of Cyclom-Y specific code **********/
-/******** Start of block of Cyclades-Z specific code *********/
+/******** Start of block of Cyclades-Z specific code *******/
 /***********************************************************/
 
 static int
 cyz_fetch_msg(struct cyclades_card *cinfo,
-               __u32 * channel, __u8 * cmd, __u32 * param)
+               __u32 *channel, __u8 *cmd, __u32 *param)
 {
        struct FIRM_ID __iomem *firm_id;
        struct ZFW_CTRL __iomem *zfw_ctrl;
@@ -1388,9 +1395,8 @@ cyz_fetch_msg(struct cyclades_card *cinfo,
        unsigned long loc_doorbell;
 
        firm_id = cinfo->base_addr + ID_ADDRESS;
-       if (!ISZLOADED(*cinfo)) {
+       if (!ISZLOADED(*cinfo))
                return -1;
-       }
        zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
        board_ctrl = &zfw_ctrl->board_ctrl;
 
@@ -1418,9 +1424,9 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
        unsigned int index;
 
        firm_id = cinfo->base_addr + ID_ADDRESS;
-       if (!ISZLOADED(*cinfo)) {
+       if (!ISZLOADED(*cinfo))
                return -1;
-       }
+
        zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff);
        board_ctrl = &zfw_ctrl->board_ctrl;
 
@@ -1428,9 +1434,8 @@ cyz_issue_cmd(struct cyclades_card *cinfo,
        pci_doorbell =
            &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell;
        while ((readl(pci_doorbell) & 0xff) != 0) {
-               if (index++ == 1000) {
+               if (index++ == 1000)
                        return (int)(readl(pci_doorbell) & 0xff);
-               }
                udelay(50L);
        }
        cy_writel(&board_ctrl->hcmd_channel, channel);
@@ -1504,7 +1509,8 @@ static void cyz_handle_rx(struct cyclades_port *info,
                        while (len--) {
                                data = readb(cinfo->base_addr + rx_bufaddr +
                                                new_rx_get);
-                               new_rx_get = (new_rx_get + 1)& (rx_bufsize - 1);
+                               new_rx_get = (new_rx_get + 1) &
+                                                       (rx_bufsize - 1);
                                tty_insert_flip_char(tty, data, TTY_NORMAL);
                                info->idle_stats.recv_bytes++;
                                info->icount.rx++;
@@ -1636,7 +1642,8 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo)
                special_count = 0;
                delta_count = 0;
                info = &cinfo->ports[channel];
-               if ((tty = info->tty) == NULL)
+               tty = info->tty;
+               if (tty == NULL)
                        continue;
 
                ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]);
@@ -1732,7 +1739,8 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id)
 
        if (unlikely(cinfo == NULL)) {
 #ifdef CY_DEBUG_INTERRUPTS
-               printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",irq);
+               printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",
+                                                                       irq);
 #endif
                return IRQ_NONE;        /* spurious interrupt */
        }
@@ -1851,9 +1859,8 @@ static int startup(struct cyclades_port *info)
        }
 
        if (!info->type) {
-               if (info->tty) {
+               if (info->tty)
                        set_bit(TTY_IO_ERROR, &info->tty->flags);
-               }
                free_page(page);
                goto errout;
        }
@@ -1904,9 +1911,8 @@ static int startup(struct cyclades_port *info)
                        readb(base_addr + (CySRER << index)) | CyRxData);
                info->flags |= ASYNC_INITIALIZED;
 
-               if (info->tty) {
+               if (info->tty)
                        clear_bit(TTY_IO_ERROR, &info->tty->flags);
-               }
                info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
                info->breakon = info->breakoff = 0;
                memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
@@ -1925,9 +1931,8 @@ static int startup(struct cyclades_port *info)
                base_addr = card->base_addr;
 
                firm_id = base_addr + ID_ADDRESS;
-               if (!ISZLOADED(*card)) {
+               if (!ISZLOADED(*card))
                        return -ENODEV;
-               }
 
                zfw_ctrl = card->base_addr +
                                (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -1990,9 +1995,8 @@ static int startup(struct cyclades_port *info)
                /* enable send, recv, modem !!! */
 
                info->flags |= ASYNC_INITIALIZED;
-               if (info->tty) {
+               if (info->tty)
                        clear_bit(TTY_IO_ERROR, &info->tty->flags);
-               }
                info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
                info->breakon = info->breakoff = 0;
                memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats));
@@ -2061,9 +2065,8 @@ static void shutdown(struct cyclades_port *info)
        void __iomem *base_addr;
        int chip, channel, index;
 
-       if (!(info->flags & ASYNC_INITIALIZED)) {
+       if (!(info->flags & ASYNC_INITIALIZED))
                return;
-       }
 
        card = info->card;
        channel = info->line - card->first_line;
@@ -2105,9 +2108,8 @@ static void shutdown(struct cyclades_port *info)
                /* it may be appropriate to clear _XMIT at
                   some later date (after testing)!!! */
 
-               if (info->tty) {
+               if (info->tty)
                        set_bit(TTY_IO_ERROR, &info->tty->flags);
-               }
                info->flags &= ~ASYNC_INITIALIZED;
                spin_unlock_irqrestore(&card->card_lock, flags);
        } else {
@@ -2124,9 +2126,8 @@ static void shutdown(struct cyclades_port *info)
 #endif
 
                firm_id = base_addr + ID_ADDRESS;
-               if (!ISZLOADED(*card)) {
+               if (!ISZLOADED(*card))
                        return;
-               }
 
                zfw_ctrl = card->base_addr +
                                (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -2157,9 +2158,8 @@ static void shutdown(struct cyclades_port *info)
 #endif
                }
 
-               if (info->tty) {
+               if (info->tty)
                        set_bit(TTY_IO_ERROR, &info->tty->flags);
-               }
                info->flags &= ~ASYNC_INITIALIZED;
 
                spin_unlock_irqrestore(&card->card_lock, flags);
@@ -2204,7 +2204,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
         * If non-blocking mode is set, then make the check up front
         * and then exit.
         */
-       if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) {
+       if ((filp->f_flags & O_NONBLOCK) ||
+                                       (tty->flags & (1 << TTY_IO_ERROR))) {
                info->flags |= ASYNC_NORMAL_ACTIVE;
                return 0;
        }
@@ -2301,7 +2302,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp,
                        return -EINVAL;
                }
 
-               zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr)& 0xfffff);
+               zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr)
+                                                               & 0xfffff);
                board_ctrl = &zfw_ctrl->board_ctrl;
                ch_ctrl = zfw_ctrl->ch_ctrl;
 
@@ -2378,9 +2380,9 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
        int retval;
 
        line = tty->index;
-       if ((tty->index < 0) || (NR_PORTS <= line)) {
+       if (tty->index < 0 || NR_PORTS <= line)
                return -ENODEV;
-       }
+
        for (i = 0; i < NR_CARDS; i++)
                if (line < cy_card[i].first_line + cy_card[i].nports &&
                                line >= cy_card[i].first_line)
@@ -2388,9 +2390,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
        if (i >= NR_CARDS)
                return -ENODEV;
        info = &cy_card[i].ports[line - cy_card[i].first_line];
-       if (info->line < 0) {
+       if (info->line < 0)
                return -ENODEV;
-       }
 
        /* If the card's firmware hasn't been loaded,
           treat it as absent from the system.  This
@@ -2456,9 +2457,9 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
 #endif
        tty->driver_data = info;
        info->tty = tty;
-       if (serial_paranoia_check(info, tty->name, "cy_open")) {
+       if (serial_paranoia_check(info, tty->name, "cy_open"))
                return -ENODEV;
-       }
+
 #ifdef CY_DEBUG_OPEN
        printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
                        info->count);
@@ -2482,9 +2483,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
         * Start up serial port
         */
        retval = startup(info);
-       if (retval) {
+       if (retval)
                return retval;
-       }
 
        retval = block_til_ready(tty, filp, info);
        if (retval) {
@@ -2522,6 +2522,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
                return;         /* Just in case.... */
 
        orig_jiffies = jiffies;
+       lock_kernel();
        /*
         * Set the check interval to be 1/5 of the estimated time to
         * send a single character, and make it at least 1.  The check
@@ -2573,11 +2574,47 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout)
        }
        /* Run one more char cycle */
        msleep_interruptible(jiffies_to_msecs(char_time * 5));
+       unlock_kernel();
 #ifdef CY_DEBUG_WAIT_UNTIL_SENT
        printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies);
 #endif
 }
 
+static void cy_flush_buffer(struct tty_struct *tty)
+{
+       struct cyclades_port *info = tty->driver_data;
+       struct cyclades_card *card;
+       int channel, retval;
+       unsigned long flags;
+
+#ifdef CY_DEBUG_IO
+       printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line);
+#endif
+
+       if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
+               return;
+
+       card = info->card;
+       channel = info->line - card->first_line;
+
+       spin_lock_irqsave(&card->card_lock, flags);
+       info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+       spin_unlock_irqrestore(&card->card_lock, flags);
+
+       if (IS_CYC_Z(*card)) {  /* If it is a Z card, flush the on-board
+                                          buffers as well */
+               spin_lock_irqsave(&card->card_lock, flags);
+               retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
+               if (retval != 0) {
+                       printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d "
+                               "was %x\n", info->line, retval);
+               }
+               spin_unlock_irqrestore(&card->card_lock, flags);
+       }
+       tty_wakeup(tty);
+}                              /* cy_flush_buffer */
+
+
 /*
  * This routine is called when a particular tty device is closed.
  */
@@ -2591,9 +2628,8 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
        printk(KERN_DEBUG "cyc:cy_close ttyC%d\n", info->line);
 #endif
 
-       if (!info || serial_paranoia_check(info, tty->name, "cy_close")) {
+       if (!info || serial_paranoia_check(info, tty->name, "cy_close"))
                return;
-       }
 
        card = info->card;
 
@@ -2641,9 +2677,9 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
         */
        tty->closing = 1;
        spin_unlock_irqrestore(&card->card_lock, flags);
-       if (info->closing_wait != CY_CLOSING_WAIT_NONE) {
+       if (info->closing_wait != CY_CLOSING_WAIT_NONE)
                tty_wait_until_sent(tty, info->closing_wait);
-       }
+
        spin_lock_irqsave(&card->card_lock, flags);
 
        if (!IS_CYC_Z(*card)) {
@@ -2657,15 +2693,16 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
                cy_writeb(base_addr + (CySRER << index),
                          readb(base_addr + (CySRER << index)) & ~CyRxData);
                if (info->flags & ASYNC_INITIALIZED) {
-                       /* Waiting for on-board buffers to be empty before closing
-                          the port */
+                       /* Waiting for on-board buffers to be empty before
+                          closing the port */
                        spin_unlock_irqrestore(&card->card_lock, flags);
                        cy_wait_until_sent(tty, info->timeout);
                        spin_lock_irqsave(&card->card_lock, flags);
                }
        } else {
 #ifdef Z_WAKE
-               /* Waiting for on-board buffers to be empty before closing the port */
+               /* Waiting for on-board buffers to be empty before closing
+                  the port */
                void __iomem *base_addr = card->base_addr;
                struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS;
                struct ZFW_CTRL __iomem *zfw_ctrl =
@@ -2689,8 +2726,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
 
        spin_unlock_irqrestore(&card->card_lock, flags);
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       cy_flush_buffer(tty);
        tty_ldisc_flush(tty);
        spin_lock_irqsave(&card->card_lock, flags);
 
@@ -2738,17 +2774,16 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
        printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line);
 #endif
 
-       if (serial_paranoia_check(info, tty->name, "cy_write")) {
+       if (serial_paranoia_check(info, tty->name, "cy_write"))
                return 0;
-       }
 
        if (!info->xmit_buf)
                return 0;
 
        spin_lock_irqsave(&info->card->card_lock, flags);
        while (1) {
-               c = min(count, min((int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1),
-                                  (int)(SERIAL_XMIT_SIZE - info->xmit_head)));
+               c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1));
+               c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head));
 
                if (c <= 0)
                        break;
@@ -2766,9 +2801,9 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
        info->idle_stats.xmit_bytes += ret;
        info->idle_stats.xmit_idle = jiffies;
 
-       if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) {
+       if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped)
                start_xmit(info);
-       }
+
        return ret;
 }                              /* cy_write */
 
@@ -2779,7 +2814,7 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count)
  * done stuffing characters into the driver.  If there is no room
  * in the queue, the character is ignored.
  */
-static void cy_put_char(struct tty_struct *tty, unsigned char ch)
+static int cy_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct cyclades_port *info = tty->driver_data;
        unsigned long flags;
@@ -2789,15 +2824,15 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch)
 #endif
 
        if (serial_paranoia_check(info, tty->name, "cy_put_char"))
-               return;
+               return 0;
 
        if (!info->xmit_buf)
-               return;
+               return 0;
 
        spin_lock_irqsave(&info->card->card_lock, flags);
        if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) {
                spin_unlock_irqrestore(&info->card->card_lock, flags);
-               return;
+               return 0;
        }
 
        info->xmit_buf[info->xmit_head++] = ch;
@@ -2806,11 +2841,12 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch)
        info->idle_stats.xmit_bytes++;
        info->idle_stats.xmit_idle = jiffies;
        spin_unlock_irqrestore(&info->card->card_lock, flags);
+       return 1;
 }                              /* cy_put_char */
 
 /*
  * This routine is called by the kernel after it has written a
- * series of characters to the tty device using put_char().  
+ * series of characters to the tty device using put_char().
  */
 static void cy_flush_chars(struct tty_struct *tty)
 {
@@ -2882,6 +2918,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
                int char_count;
                __u32 tx_put, tx_get, tx_bufsize;
 
+               lock_kernel();
                firm_id = card->base_addr + ID_ADDRESS;
                zfw_ctrl = card->base_addr +
                        (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -2899,6 +2936,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty)
                printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n",
                        info->line, info->xmit_cnt + char_count);
 #endif
+               unlock_kernel();
                return info->xmit_cnt + char_count;
        }
 #endif                         /* Z_EXT_CHARS_IN_BUFFER */
@@ -2950,12 +2988,12 @@ static void set_line_char(struct cyclades_port *info)
        int baud, baud_rate = 0;
        int i;
 
-       if (!info->tty || !info->tty->termios) {
+       if (!info->tty || !info->tty->termios)
                return;
-       }
-       if (info->line == -1) {
+
+       if (info->line == -1)
                return;
-       }
+
        cflag = info->tty->termios->c_cflag;
        iflag = info->tty->termios->c_iflag;
 
@@ -2994,13 +3032,11 @@ static void set_line_char(struct cyclades_port *info)
                }
                /* find the baud index */
                for (i = 0; i < 20; i++) {
-                       if (baud == baud_table[i]) {
+                       if (baud == baud_table[i])
                                break;
-                       }
                }
-               if (i == 20) {
+               if (i == 20)
                        i = 19; /* CD1400_MAX_SPEED */
-               }
 
                if (baud == 38400 && (info->flags & ASYNC_SPD_MASK) ==
                                ASYNC_SPD_CUST) {
@@ -3059,18 +3095,16 @@ static void set_line_char(struct cyclades_port *info)
                        info->cor1 = Cy_8_BITS;
                        break;
                }
-               if (cflag & CSTOPB) {
+               if (cflag & CSTOPB)
                        info->cor1 |= Cy_2_STOP;
-               }
+
                if (cflag & PARENB) {
-                       if (cflag & PARODD) {
+                       if (cflag & PARODD)
                                info->cor1 |= CyPARITY_O;
-                       } else {
+                       else
                                info->cor1 |= CyPARITY_E;
-                       }
-               } else {
+               } else
                        info->cor1 |= CyPARITY_NONE;
-               }
 
                /* CTS flow control flag */
                if (cflag & CRTSCTS) {
@@ -3123,7 +3157,8 @@ static void set_line_char(struct cyclades_port *info)
                cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch |
                                CyCOR3ch, index);
 
-               cy_writeb(base_addr + (CyCAR << index), (u_char) channel);      /* !!! Is this needed? */
+               /* !!! Is this needed? */
+               cy_writeb(base_addr + (CyCAR << index), (u_char) channel);
                cy_writeb(base_addr + (CyRTPR << index),
                        (info->default_timeout ? info->default_timeout : 0x02));
                /* 10ms rx timeout */
@@ -3191,9 +3226,8 @@ static void set_line_char(struct cyclades_port *info)
 #endif
                }
 
-               if (info->tty) {
+               if (info->tty)
                        clear_bit(TTY_IO_ERROR, &info->tty->flags);
-               }
                spin_unlock_irqrestore(&card->card_lock, flags);
 
        } else {
@@ -3206,9 +3240,8 @@ static void set_line_char(struct cyclades_port *info)
                int retval;
 
                firm_id = card->base_addr + ID_ADDRESS;
-               if (!ISZLOADED(*card)) {
+               if (!ISZLOADED(*card))
                        return;
-               }
 
                zfw_ctrl = card->base_addr +
                        (readl(&firm_id->zfwctrl_addr) & 0xfffff);
@@ -3268,14 +3301,12 @@ static void set_line_char(struct cyclades_port *info)
                                  readl(&ch_ctrl->comm_data_l) | C_DL_1STOP);
                }
                if (cflag & PARENB) {
-                       if (cflag & PARODD) {
+                       if (cflag & PARODD)
                                cy_writel(&ch_ctrl->comm_parity, C_PR_ODD);
-                       } else {
+                       else
                                cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN);
-                       }
-               } else {
+               } else
                        cy_writel(&ch_ctrl->comm_parity, C_PR_NONE);
-               }
 
                /* CTS flow control flag */
                if (cflag & CRTSCTS) {
@@ -3305,11 +3336,10 @@ static void set_line_char(struct cyclades_port *info)
                }
 
                /* CD sensitivity */
-               if (cflag & CLOCAL) {
+               if (cflag & CLOCAL)
                        info->flags &= ~ASYNC_CHECK_CD;
-               } else {
+               else
                        info->flags |= ASYNC_CHECK_CD;
-               }
 
                if (baud == 0) {        /* baud rate is zero, turn off line */
                        cy_writel(&ch_ctrl->rs_control,
@@ -3325,21 +3355,20 @@ static void set_line_char(struct cyclades_port *info)
 #endif
                }
 
-               retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM,0L);
+               retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L);
                if (retval != 0) {
                        printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d "
                                "was %x\n", info->line, retval);
                }
 
-               if (info->tty) {
+               if (info->tty)
                        clear_bit(TTY_IO_ERROR, &info->tty->flags);
-               }
        }
 }                              /* set_line_char */
 
 static int
 get_serial_info(struct cyclades_port *info,
-               struct serial_struct __user * retinfo)
+               struct serial_struct __user *retinfo)
 {
        struct serial_struct tmp;
        struct cyclades_card *cinfo = info->card;
@@ -3363,7 +3392,7 @@ get_serial_info(struct cyclades_port *info,
 
 static int
 set_serial_info(struct cyclades_port *info,
-               struct serial_struct __user * new_info)
+               struct serial_struct __user *new_info)
 {
        struct serial_struct new_serial;
        struct cyclades_port old_info;
@@ -3417,7 +3446,7 @@ check_and_exit:
  *         transmit holding register is empty.  This functionality
  *         allows an RS485 driver to be written in user space.
  */
-static int get_lsr_info(struct cyclades_port *info, unsigned int __user * value)
+static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value)
 {
        struct cyclades_card *card;
        int chip, channel, index;
@@ -3461,9 +3490,11 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
        struct BOARD_CTRL __iomem *board_ctrl;
        struct CH_CTRL __iomem *ch_ctrl;
 
-       if (serial_paranoia_check(info, tty->name, __FUNCTION__))
+       if (serial_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
 
+       lock_kernel();
+
        card = info->card;
        channel = info->line - card->first_line;
        if (!IS_CYC_Z(*card)) {
@@ -3506,10 +3537,12 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file)
                                ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0);
                } else {
                        result = 0;
+                       unlock_kernel();
                        return -ENODEV;
                }
 
        }
+       unlock_kernel();
        return result;
 }                              /* cy_tiomget */
 
@@ -3528,7 +3561,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file,
        struct CH_CTRL __iomem *ch_ctrl;
        int retval;
 
-       if (serial_paranoia_check(info, tty->name, __FUNCTION__))
+       if (serial_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
 
        card = info->card;
@@ -3727,8 +3760,8 @@ static void cy_break(struct tty_struct *tty, int break_state)
        spin_unlock_irqrestore(&card->card_lock, flags);
 }                              /* cy_break */
 
-static int
-get_mon_info(struct cyclades_port *info, struct cyclades_monitor __user * mon)
+static int get_mon_info(struct cyclades_port *info,
+                               struct cyclades_monitor __user *mon)
 {
 
        if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor)))
@@ -3767,8 +3800,8 @@ static int set_threshold(struct cyclades_port *info, unsigned long value)
        return 0;
 }                              /* set_threshold */
 
-static int
-get_threshold(struct cyclades_port *info, unsigned long __user * value)
+static int get_threshold(struct cyclades_port *info,
+                                               unsigned long __user *value)
 {
        struct cyclades_card *card;
        void __iomem *base_addr;
@@ -3789,15 +3822,15 @@ get_threshold(struct cyclades_port *info, unsigned long __user * value)
        return 0;
 }                              /* get_threshold */
 
-static int
-set_default_threshold(struct cyclades_port *info, unsigned long value)
+static int set_default_threshold(struct cyclades_port *info,
+                                                       unsigned long value)
 {
        info->default_threshold = value & 0x0f;
        return 0;
 }                              /* set_default_threshold */
 
-static int
-get_default_threshold(struct cyclades_port *info, unsigned long __user * value)
+static int get_default_threshold(struct cyclades_port *info,
+                                               unsigned long __user *value)
 {
        return put_user(info->default_threshold, value);
 }                              /* get_default_threshold */
@@ -3824,7 +3857,8 @@ static int set_timeout(struct cyclades_port *info, unsigned long value)
        return 0;
 }                              /* set_timeout */
 
-static int get_timeout(struct cyclades_port *info, unsigned long __user * value)
+static int get_timeout(struct cyclades_port *info,
+                                               unsigned long __user *value)
 {
        struct cyclades_card *card;
        void __iomem *base_addr;
@@ -3851,8 +3885,8 @@ static int set_default_timeout(struct cyclades_port *info, unsigned long value)
        return 0;
 }                              /* set_default_timeout */
 
-static int
-get_default_timeout(struct cyclades_port *info, unsigned long __user * value)
+static int get_default_timeout(struct cyclades_port *info,
+                                       unsigned long __user *value)
 {
        return put_user(info->default_timeout, value);
 }                              /* get_default_timeout */
@@ -3880,6 +3914,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
        printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n",
                info->line, cmd, arg);
 #endif
+       lock_kernel();
 
        switch (cmd) {
        case CYGETMON:
@@ -3936,7 +3971,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
                break;
 #endif                         /* CONFIG_CYZ_INTR */
        case CYSETWAIT:
-               info->closing_wait = (unsigned short)arg *HZ / 100;
+               info->closing_wait = (unsigned short)arg * HZ / 100;
                ret_val = 0;
                break;
        case CYGETWAIT:
@@ -3988,47 +4023,47 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
                p_cuser = argp;
                ret_val = put_user(cnow.cts, &p_cuser->cts);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.dsr, &p_cuser->dsr);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.rng, &p_cuser->rng);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.dcd, &p_cuser->dcd);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.rx, &p_cuser->rx);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.tx, &p_cuser->tx);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.frame, &p_cuser->frame);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.overrun, &p_cuser->overrun);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.parity, &p_cuser->parity);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.brk, &p_cuser->brk);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = put_user(cnow.buf_overrun, &p_cuser->buf_overrun);
                if (ret_val)
-                       return ret_val;
+                       break;
                ret_val = 0;
                break;
        default:
                ret_val = -ENOIOCTLCMD;
        }
+       unlock_kernel();
 
 #ifdef CY_DEBUG_OTHER
        printk(KERN_DEBUG "cyc:cy_ioctl done\n");
 #endif
-
        return ret_val;
 }                              /* cy_ioctl */
 
@@ -4113,9 +4148,8 @@ static void cy_throttle(struct tty_struct *tty)
                        tty->ldisc.chars_in_buffer(tty), info->line);
 #endif
 
-       if (serial_paranoia_check(info, tty->name, "cy_throttle")) {
+       if (serial_paranoia_check(info, tty->name, "cy_throttle"))
                return;
-       }
 
        card = info->card;
 
@@ -4169,12 +4203,11 @@ static void cy_unthrottle(struct tty_struct *tty)
        char buf[64];
 
        printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n",
-               tty_name(tty, buf), tty->ldisc.chars_in_buffer(tty),info->line);
+               tty_name(tty, buf), tty_chars_in_buffer(tty), info->line);
 #endif
 
-       if (serial_paranoia_check(info, tty->name, "cy_unthrottle")) {
+       if (serial_paranoia_check(info, tty->name, "cy_unthrottle"))
                return;
-       }
 
        if (I_IXOFF(tty)) {
                if (info->x_char)
@@ -4269,47 +4302,14 @@ static void cy_start(struct tty_struct *tty)
                base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index);
 
                spin_lock_irqsave(&cinfo->card_lock, flags);
-               cy_writeb(base_addr + (CyCAR << index), (u_char) (channel & 0x0003));   /* index channel */
+               cy_writeb(base_addr + (CyCAR << index),
+                       (u_char) (channel & 0x0003));   /* index channel */
                cy_writeb(base_addr + (CySRER << index),
                          readb(base_addr + (CySRER << index)) | CyTxRdy);
                spin_unlock_irqrestore(&cinfo->card_lock, flags);
        }
 }                              /* cy_start */
 
-static void cy_flush_buffer(struct tty_struct *tty)
-{
-       struct cyclades_port *info = tty->driver_data;
-       struct cyclades_card *card;
-       int channel, retval;
-       unsigned long flags;
-
-#ifdef CY_DEBUG_IO
-       printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line);
-#endif
-
-       if (serial_paranoia_check(info, tty->name, "cy_flush_buffer"))
-               return;
-
-       card = info->card;
-       channel = info->line - card->first_line;
-
-       spin_lock_irqsave(&card->card_lock, flags);
-       info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-       spin_unlock_irqrestore(&card->card_lock, flags);
-
-       if (IS_CYC_Z(*card)) {  /* If it is a Z card, flush the on-board
-                                          buffers as well */
-               spin_lock_irqsave(&card->card_lock, flags);
-               retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L);
-               if (retval != 0) {
-                       printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d "
-                               "was %x\n", info->line, retval);
-               }
-               spin_unlock_irqrestore(&card->card_lock, flags);
-       }
-       tty_wakeup(tty);
-}                              /* cy_flush_buffer */
-
 /*
  * cy_hangup() --- called by tty_hangup() when a hangup is signaled.
  */
@@ -4406,10 +4406,11 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo)
                        info->cor3 = 0x08;      /* _very_ small rcv threshold */
 
                        chip_number = (port - cinfo->first_line) / 4;
-                       if ((info->chip_rev = readb(cinfo->base_addr +
-                                     (cy_chip_offset[chip_number] <<
-                                      index) + (CyGFRCR << index))) >=
-                           CD1400_REV_J) {
+                       info->chip_rev = readb(cinfo->base_addr +
+                                     (cy_chip_offset[chip_number] << index) +
+                                     (CyGFRCR << index));
+
+                       if (info->chip_rev >= CD1400_REV_J) {
                                /* It is a CD1400 rev. J or later */
                                info->tbpr = baud_bpr_60[13];   /* Tx BPR */
                                info->tco = baud_co_60[13];     /* Tx CO */
@@ -4454,7 +4455,8 @@ static unsigned short __devinit cyy_init_card(void __iomem *true_base_addr,
        /* Cy_ClrIntr is 0x1800 */
        udelay(500L);
 
-       for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD; chip_number++) {
+       for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD;
+                                                       chip_number++) {
                base_addr =
                    true_base_addr + (cy_chip_offset[chip_number] << index);
                mdelay(1);
@@ -4555,12 +4557,11 @@ static int __init cy_detect_isa(void)
        /* scan the address table probing for Cyclom-Y/ISA boards */
        for (i = 0; i < NR_ISA_ADDRS; i++) {
                unsigned int isa_address = cy_isa_addresses[i];
-               if (isa_address == 0x0000) {
+               if (isa_address == 0x0000)
                        return nboard;
-               }
 
                /* probe for CD1400... */
-               cy_isa_address = ioremap(isa_address, CyISA_Ywin);
+               cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin);
                if (cy_isa_address == NULL) {
                        printk(KERN_ERR "Cyclom-Y/ISA: can't remap base "
                                        "address\n");
@@ -4847,12 +4848,10 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr,
        if (mailbox != 0) {
                /* set window to last 512K of RAM */
                cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE);
-               //sleep(1);
                for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++)
                        cy_writeb(tmp, 255);
                /* set window to beginning of RAM */
                cy_writel(&ctl_addr->loc_addr_base, WIN_RAM);
-               //sleep(1);
        }
 
        retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL);
@@ -5382,7 +5381,8 @@ static void __exit cy_cleanup_module(void)
        del_timer_sync(&cyz_timerlist);
 #endif /* CONFIG_CYZ_INTR */
 
-       if ((e1 = tty_unregister_driver(cy_serial_driver)))
+       e1 = tty_unregister_driver(cy_serial_driver);
+       if (e1)
                printk(KERN_ERR "failed to unregister Cyclades serial "
                                "driver(%d)\n", e1);
 
index ecee3547a13f2216d076c62708293b7ee168767d..213b3ca3468e5bdaf1c80db13a7d8c2c5e44a38a 100644 (file)
@@ -160,7 +160,7 @@ struct drm_device;
  * \param arg arguments
  */
 #define DRM_ERROR(fmt, arg...) \
-       printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg)
+       printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg)
 
 /**
  * Memory error output.
@@ -170,7 +170,7 @@ struct drm_device;
  * \param arg arguments
  */
 #define DRM_MEM_ERROR(area, fmt, arg...) \
-       printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \
+       printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \
               drm_mem_stats[area].name , ##arg)
 
 #define DRM_INFO(fmt, arg...)  printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg)
@@ -187,7 +187,7 @@ struct drm_device;
                if ( drm_debug )                        \
                        printk(KERN_DEBUG                               \
                               "[" DRM_NAME ":%s] " fmt ,       \
-                              __FUNCTION__ , ##arg);                   \
+                              __func__ , ##arg);                       \
        } while (0)
 #else
 #define DRM_DEBUG(fmt, arg...)          do { } while (0)
@@ -238,7 +238,7 @@ do {                                                                        \
        if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) ||           \
             dev->lock.file_priv != file_priv ) {                       \
                DRM_ERROR( "%s called without lock held, held  %d owner %p %p\n",\
-                          __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
+                          __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
                           dev->lock.file_priv, file_priv );            \
                return -EINVAL;                                         \
        }                                                               \
index 7a1d9a782ddb3469a3e5105a6dc0841bd468b2a3..9a32169e88fb9595036998ed8ef3c9f5543c9ac8 100644 (file)
@@ -34,7 +34,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state)
        struct drm_minor *drm_minor = to_drm_minor(dev);
        struct drm_device *drm_dev = drm_minor->dev;
 
-       printk(KERN_ERR "%s\n", __FUNCTION__);
+       printk(KERN_ERR "%s\n", __func__);
 
        if (drm_dev->driver->suspend)
                return drm_dev->driver->suspend(drm_dev, state);
index 60c9376be486635e9ab902c64af5acb06d3ad339..a86ab30b4620b14dffe1cb11009f3d0ababf0a21 100644 (file)
@@ -692,7 +692,7 @@ static void i830EmitState(struct drm_device * dev)
        drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
        unsigned int dirty = sarea_priv->dirty;
 
-       DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
+       DRM_DEBUG("%s %x\n", __func__, dirty);
 
        if (dirty & I830_UPLOAD_BUFFERS) {
                i830EmitDestVerified(dev, sarea_priv->BufferState);
@@ -1043,7 +1043,7 @@ static void i830_dma_dispatch_flip(struct drm_device * dev)
        RING_LOCALS;
 
        DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
-                 __FUNCTION__,
+                 __func__,
                  dev_priv->current_page,
                  dev_priv->sarea_priv->pf_current_page);
 
@@ -1206,7 +1206,7 @@ static void i830_dma_quiescent(struct drm_device * dev)
        OUT_RING(0);
        ADVANCE_LP_RING();
 
-       i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+       i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
 }
 
 static int i830_flush_queue(struct drm_device * dev)
@@ -1223,7 +1223,7 @@ static int i830_flush_queue(struct drm_device * dev)
        OUT_RING(0);
        ADVANCE_LP_RING();
 
-       i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+       i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
 
        for (i = 0; i < dma->buf_count; i++) {
                struct drm_buf *buf = dma->buflist[i];
@@ -1344,7 +1344,7 @@ static void i830_do_init_pageflip(struct drm_device * dev)
 {
        drm_i830_private_t *dev_priv = dev->dev_private;
 
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("%s\n", __func__);
        dev_priv->page_flipping = 1;
        dev_priv->current_page = 0;
        dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@@ -1354,7 +1354,7 @@ static int i830_do_cleanup_pageflip(struct drm_device * dev)
 {
        drm_i830_private_t *dev_priv = dev->dev_private;
 
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("%s\n", __func__);
        if (dev_priv->current_page != 0)
                i830_dma_dispatch_flip(dev);
 
@@ -1367,7 +1367,7 @@ static int i830_flip_bufs(struct drm_device *dev, void *data,
 {
        drm_i830_private_t *dev_priv = dev->dev_private;
 
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("%s\n", __func__);
 
        LOCK_TEST_WITH_RETURN(dev, file_priv);
 
@@ -1437,7 +1437,7 @@ static int i830_getparam(struct drm_device *dev, void *data,
        int value;
 
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("%s called with no initialization\n", __func__);
                return -EINVAL;
        }
 
@@ -1464,7 +1464,7 @@ static int i830_setparam(struct drm_device *dev, void *data,
        drm_i830_setparam_t *param = data;
 
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("%s called with no initialization\n", __func__);
                return -EINVAL;
        }
 
index 4caba8c54455582271385d3bc69ebf61b93ed632..b5bf8cc0fdaa5845387962584ef92af28b78011a 100644 (file)
@@ -158,7 +158,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
        if (I830_VERBOSE)                               \
                printk("BEGIN_LP_RING(%d)\n", (n));     \
        if (dev_priv->ring.space < n*4)                 \
-               i830_wait_ring(dev, n*4, __FUNCTION__);         \
+               i830_wait_ring(dev, n*4, __func__);             \
        outcount = 0;                                   \
        outring = dev_priv->ring.tail;                  \
        ringmask = dev_priv->ring.tail_mask;            \
index a33db5f0967fbbb798706ec6c3125347371b6f10..91ec2bb497e97d6622be7f38c048360402f4caf9 100644 (file)
@@ -58,7 +58,7 @@ static int i830_emit_irq(struct drm_device * dev)
        drm_i830_private_t *dev_priv = dev->dev_private;
        RING_LOCALS;
 
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("%s\n", __func__);
 
        atomic_inc(&dev_priv->irq_emitted);
 
@@ -77,7 +77,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
        unsigned long end = jiffies + HZ * 3;
        int ret = 0;
 
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("%s\n", __func__);
 
        if (atomic_read(&dev_priv->irq_received) >= irq_nr)
                return 0;
@@ -124,7 +124,7 @@ int i830_irq_emit(struct drm_device *dev, void *data,
        LOCK_TEST_WITH_RETURN(dev, file_priv);
 
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("%s called with no initialization\n", __func__);
                return -EINVAL;
        }
 
@@ -147,7 +147,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
        drm_i830_irq_wait_t *irqwait = data;
 
        if (!dev_priv) {
-               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               DRM_ERROR("%s called with no initialization\n", __func__);
                return -EINVAL;
        }
 
index ef7bf143a80c632a9c8044fa7e0d77f4fbc3a57f..f47e46e3529fae1ef0be1049205de3f245210a49 100644 (file)
@@ -194,7 +194,7 @@ static int i915_dma_resume(struct drm_device * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-       DRM_DEBUG("%s\n", __FUNCTION__);
+       DRM_DEBUG("%s\n", __func__);
 
        if (!dev_priv->sarea) {
                DRM_ERROR("can not find sarea!\n");
@@ -609,7 +609,7 @@ static int i915_quiescent(struct drm_device * dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        i915_kernel_lost_context(dev);
-       return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
+       return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
 }
 
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
index c614d78b3dfdfca2a69ba4bc5ee8c85e376760f1..db7001f22561f6ea1a1f745b512e4bf51fd6e8c7 100644 (file)
@@ -272,7 +272,7 @@ extern void i915_mem_release(struct drm_device * dev,
        if (I915_VERBOSE)                               \
                DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n));  \
        if (dev_priv->ring.space < (n)*4)               \
-               i915_wait_ring(dev, (n)*4, __FUNCTION__);               \
+               i915_wait_ring(dev, (n)*4, __func__);           \
        outcount = 0;                                   \
        outring = dev_priv->ring.tail;                  \
        ringmask = dev_priv->ring.tail_mask;            \
index f36adbd3aaf5cccac2db4e48921f484f3d3d076e..c31afbde62e72d8fdadb31b5255a34918c64d2f1 100644 (file)
@@ -817,7 +817,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
        for (i = 0; i < dma->buf_count; i++) {
                buf = dma->buflist[i];
                buf_priv = buf->dev_private;
-               if (buf->file_priv == 0)
+               if (!buf->file_priv)
                        return buf;
        }
 
index 9072e4a1894e4d773a64b82cf72865f04b4db1fb..f6f6c92bf7710ad4c60892dc165eabaf50693219 100644 (file)
@@ -894,7 +894,7 @@ static u32 RADEON_READ_IGPGART(drm_radeon_private_t *dev_priv, int addr)
 #if RADEON_FIFO_DEBUG
 static void radeon_status(drm_radeon_private_t * dev_priv)
 {
-       printk("%s:\n", __FUNCTION__);
+       printk("%s:\n", __func__);
        printk("RBBM_STATUS = 0x%08x\n",
               (unsigned int)RADEON_READ(RADEON_RBBM_STATUS));
        printk("CP_RB_RTPR = 0x%08x\n",
index 59146e3365bac8bbda37b45d33297c0c9d071b7e..ea35ab2c9909490bb490bb51eae47d39c31661b3 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/spinlock.h>
 #include <linux/bcd.h>
 #include <linux/proc_fs.h>
+#include <linux/jiffies.h>
 
 #include <asm/uaccess.h>
 #include <asm/system.h>
@@ -451,7 +452,7 @@ static void ds1286_get_time(struct rtc_time *rtc_tm)
         */
 
        if (ds1286_is_updating() != 0)
-               while (jiffies - uip_watchdog < 2*HZ/100)
+               while (time_before(jiffies, uip_watchdog + 2*HZ/100))
                        barrier();
 
        /*
index ffd747c5dff00dc7e69a1835d078003dd7ffc3d7..60a4df7dac12ed9e073c29c991e97178d42c7e1b 100644 (file)
@@ -38,8 +38,8 @@
 #include <linux/slab.h>
 #include <linux/ioport.h>
 #include <linux/interrupt.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include "digiPCI.h"
@@ -73,7 +73,8 @@ static int invalid_lilo_config;
  */
 static DEFINE_SPINLOCK(epca_lock);
 
-/* MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 7 below. */
+/* MAXBOARDS is typically 12, but ISA and EISA cards are restricted
+   to 7 below. */
 static struct board_info boards[MAXBOARDS];
 
 static struct tty_driver *pc_driver;
@@ -157,13 +158,12 @@ static void epca_error(int, char *);
 static void pc_close(struct tty_struct *, struct file *);
 static void shutdown(struct channel *);
 static void pc_hangup(struct tty_struct *);
-static void pc_put_char(struct tty_struct *, unsigned char);
 static int pc_write_room(struct tty_struct *);
 static int pc_chars_in_buffer(struct tty_struct *);
 static void pc_flush_buffer(struct tty_struct *);
 static void pc_flush_chars(struct tty_struct *);
 static int block_til_ready(struct tty_struct *, struct file *,
-                           struct channel *);
+                       struct channel *);
 static int pc_open(struct tty_struct *, struct file *);
 static void post_fep_init(unsigned int crd);
 static void epcapoll(unsigned long);
@@ -175,18 +175,18 @@ static unsigned termios2digi_c(struct channel *ch, unsigned);
 static void epcaparam(struct tty_struct *, struct channel *);
 static void receive_data(struct channel *);
 static int pc_ioctl(struct tty_struct *, struct file *,
-                    unsigned int, unsigned long);
+                       unsigned int, unsigned long);
 static int info_ioctl(struct tty_struct *, struct file *,
-                    unsigned int, unsigned long);
+                       unsigned int, unsigned long);
 static void pc_set_termios(struct tty_struct *, struct ktermios *);
 static void do_softint(struct work_struct *work);
 static void pc_stop(struct tty_struct *);
 static void pc_start(struct tty_struct *);
-static void pc_throttle(struct tty_struct * tty);
+static void pc_throttle(struct tty_struct *tty);
 static void pc_unthrottle(struct tty_struct *tty);
 static void digi_send_break(struct channel *ch, int msec);
 static void setup_empty_event(struct tty_struct *tty, struct channel *ch);
-void epca_setup(char *, int *);
+static void epca_setup(char *, int *);
 
 static int pc_write(struct tty_struct *, const unsigned char *, int);
 static int pc_init(void);
@@ -243,7 +243,7 @@ static void assertmemoff(struct channel *ch)
 /* PCXEM windowing is the same as that used in the PCXR and CX series cards. */
 static void pcxem_memwinon(struct board_info *b, unsigned int win)
 {
-        outb_p(FEPWIN|win, b->port + 1);
+       outb_p(FEPWIN | win, b->port + 1);
 }
 
 static void pcxem_memwinoff(struct board_info *b, unsigned int win)
@@ -253,7 +253,7 @@ static void pcxem_memwinoff(struct board_info *b, unsigned int win)
 
 static void pcxem_globalwinon(struct channel *ch)
 {
-       outb_p( FEPWIN, (int)ch->board->port + 1);
+       outb_p(FEPWIN, (int)ch->board->port + 1);
 }
 
 static void pcxem_rxwinon(struct channel *ch)
@@ -394,7 +394,7 @@ static struct channel *verifyChannel(struct tty_struct *tty)
         */
        if (tty) {
                struct channel *ch = (struct channel *)tty->driver_data;
-               if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs])) {
+               if (ch >= &digi_channels[0] && ch < &digi_channels[nbdevs]) {
                        if (ch->magic == EPCA_MAGIC)
                                return ch;
                }
@@ -414,7 +414,7 @@ static void pc_sched_event(struct channel *ch, int event)
 
 static void epca_error(int line, char *msg)
 {
-       printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg);
+       printk(KERN_ERR "epca_error (Digi): line = %d %s\n", line, msg);
 }
 
 static void pc_close(struct tty_struct *tty, struct file *filp)
@@ -425,7 +425,8 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL) {
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
                spin_lock_irqsave(&epca_lock, flags);
                if (tty_hung_up_p(filp)) {
                        spin_unlock_irqrestore(&epca_lock, flags);
@@ -440,7 +441,6 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
                        spin_unlock_irqrestore(&epca_lock, flags);
                        return;
                }
-
                /* Port open only once go ahead with shutdown & reset */
                BUG_ON(ch->count < 0);
 
@@ -455,12 +455,13 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
                spin_unlock_irqrestore(&epca_lock, flags);
 
                if (ch->asyncflags & ASYNC_INITIALIZED)  {
-                       /* Setup an event to indicate when the transmit buffer empties */
+                       /* Setup an event to indicate when the
+                          transmit buffer empties */
                        setup_empty_event(tty, ch);
-                       tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
+                       /* 30 seconds timeout */
+                       tty_wait_until_sent(tty, 3000);
                }
-               if (tty->driver->flush_buffer)
-                       tty->driver->flush_buffer(tty);
+               pc_flush_buffer(tty);
 
                tty_ldisc_flush(tty);
                shutdown(ch);
@@ -477,7 +478,7 @@ static void pc_close(struct tty_struct *tty, struct file *filp)
                        wake_up_interruptible(&ch->open_wait);
                }
                ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED |
-                                     ASYNC_CLOSING);
+                                       ASYNC_CLOSING);
                wake_up_interruptible(&ch->close_wait);
        }
 }
@@ -524,16 +525,15 @@ static void shutdown(struct channel *ch)
 static void pc_hangup(struct tty_struct *tty)
 {
        struct channel *ch;
-
        /*
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL) {
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
                unsigned long flags;
 
-               if (tty->driver->flush_buffer)
-                       tty->driver->flush_buffer(tty);
+               pc_flush_buffer(tty);
                tty_ldisc_flush(tty);
                shutdown(ch);
 
@@ -548,7 +548,7 @@ static void pc_hangup(struct tty_struct *tty)
 }
 
 static int pc_write(struct tty_struct *tty,
-                    const unsigned char *buf, int bytesAvailable)
+                       const unsigned char *buf, int bytesAvailable)
 {
        unsigned int head, tail;
        int dataLen;
@@ -572,7 +572,8 @@ static int pc_write(struct tty_struct *tty,
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) == NULL)
+       ch = verifyChannel(tty);
+       if (ch == NULL)
                return 0;
 
        /* Make a pointer to the channel data structure found on the board. */
@@ -645,26 +646,19 @@ static int pc_write(struct tty_struct *tty,
        return amountCopied;
 }
 
-static void pc_put_char(struct tty_struct *tty, unsigned char c)
-{
-       pc_write(tty, &c, 1);
-}
-
 static int pc_write_room(struct tty_struct *tty)
 {
-       int remain;
+       int remain = 0;
        struct channel *ch;
        unsigned long flags;
        unsigned int head, tail;
        struct board_chan __iomem *bc;
-
-       remain = 0;
-
        /*
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL)  {
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
                spin_lock_irqsave(&epca_lock, flags);
                globalwinon(ch);
 
@@ -676,8 +670,8 @@ static int pc_write_room(struct tty_struct *tty)
                        tail = readw(&bc->tout);
                /* Wrap tail if necessary */
                tail &= (ch->txbufsize - 1);
-
-               if ((remain = tail - head - 1) < 0 )
+               remain = tail - head - 1;
+               if (remain < 0)
                        remain += ch->txbufsize;
 
                if (remain && (ch->statusflags & LOWWAIT) == 0) {
@@ -699,12 +693,12 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
        unsigned long flags;
        struct channel *ch;
        struct board_chan __iomem *bc;
-
        /*
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) == NULL)
+       ch = verifyChannel(tty);
+       if (ch == NULL)
                return 0;
 
        spin_lock_irqsave(&epca_lock, flags);
@@ -715,7 +709,8 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
        head = readw(&bc->tin);
        ctail = readw(&ch->mailbox->cout);
 
-       if (tail == head && readw(&ch->mailbox->cin) == ctail && readb(&bc->tbusy) == 0)
+       if (tail == head && readw(&ch->mailbox->cin) == ctail &&
+                                               readb(&bc->tbusy) == 0)
                chars = 0;
        else  { /* Begin if some space on the card has been used */
                head = readw(&bc->tin) & (ch->txbufsize - 1);
@@ -725,7 +720,8 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
                 * pc_write_room here we are finding the amount of bytes in the
                 * buffer filled. Not the amount of bytes empty.
                 */
-               if ((remain = tail - head - 1) < 0 )
+               remain = tail - head - 1;
+               if (remain < 0)
                        remain += ch->txbufsize;
                chars = (int)(ch->txbufsize - remain);
                /*
@@ -736,7 +732,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty)
                 * transmit buffer empties.
                 */
                if (!(ch->statusflags & EMPTYWAIT))
-                       setup_empty_event(tty,ch);
+                       setup_empty_event(tty, ch);
        } /* End if some space on the card has been used */
        memoff(ch);
        spin_unlock_irqrestore(&epca_lock, flags);
@@ -754,7 +750,8 @@ static void pc_flush_buffer(struct tty_struct *tty)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) == NULL)
+       ch = verifyChannel(tty);
+       if (ch == NULL)
                return;
 
        spin_lock_irqsave(&epca_lock, flags);
@@ -775,23 +772,25 @@ static void pc_flush_chars(struct tty_struct *tty)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL) {
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
                unsigned long flags;
                spin_lock_irqsave(&epca_lock, flags);
                /*
                 * If not already set and the transmitter is busy setup an
                 * event to indicate when the transmit empties.
                 */
-               if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT))
-                       setup_empty_event(tty,ch);
+               if ((ch->statusflags & TXBUSY) &&
+                               !(ch->statusflags & EMPTYWAIT))
+                       setup_empty_event(tty, ch);
                spin_unlock_irqrestore(&epca_lock, flags);
        }
 }
 
 static int block_til_ready(struct tty_struct *tty,
-                           struct file *filp, struct channel *ch)
+                               struct file *filp, struct channel *ch)
 {
-       DECLARE_WAITQUEUE(wait,current);
+       DECLARE_WAITQUEUE(wait, current);
        int retval, do_clocal = 0;
        unsigned long flags;
 
@@ -839,8 +838,7 @@ static int block_til_ready(struct tty_struct *tty,
        while (1) {
                set_current_state(TASK_INTERRUPTIBLE);
                if (tty_hung_up_p(filp) ||
-                   !(ch->asyncflags & ASYNC_INITIALIZED))
-               {
+                               !(ch->asyncflags & ASYNC_INITIALIZED)) {
                        if (ch->asyncflags & ASYNC_HUP_NOTIFY)
                                retval = -EAGAIN;
                        else
@@ -880,7 +878,7 @@ static int block_til_ready(struct tty_struct *tty,
        return 0;
 }
 
-static int pc_open(struct tty_struct *tty, struct file * filp)
+static int pc_open(struct tty_struct *tty, struct file *filp)
 {
        struct channel *ch;
        unsigned long flags;
@@ -923,7 +921,8 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
                return(-ENODEV);
        }
 
-       if ((bc = ch->brdchan) == 0) {
+       bc = ch->brdchan;
+       if (bc == NULL) {
                tty->driver_data = NULL;
                return -ENODEV;
        }
@@ -964,7 +963,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp)
         * The below routine generally sets up parity, baud, flow control
         * issues, etc.... It effect both control flags and input flags.
         */
-       epcaparam(tty,ch);
+       epcaparam(tty, ch);
        ch->asyncflags |= ASYNC_INITIALIZED;
        memoff(ch);
        spin_unlock_irqrestore(&epca_lock, flags);
@@ -1002,8 +1001,8 @@ static void __exit epca_module_exit(void)
 
        del_timer_sync(&epca_timer);
 
-       if (tty_unregister_driver(pc_driver) || tty_unregister_driver(pc_info))
-       {
+       if (tty_unregister_driver(pc_driver) ||
+                               tty_unregister_driver(pc_info)) {
                printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n");
                return;
        }
@@ -1034,7 +1033,6 @@ static const struct tty_operations pc_ops = {
        .flush_buffer = pc_flush_buffer,
        .chars_in_buffer = pc_chars_in_buffer,
        .flush_chars = pc_flush_chars,
-       .put_char = pc_put_char,
        .ioctl = pc_ioctl,
        .set_termios = pc_set_termios,
        .stop = pc_stop,
@@ -1044,7 +1042,7 @@ static const struct tty_operations pc_ops = {
        .hangup = pc_hangup,
 };
 
-static int info_open(struct tty_struct *tty, struct file * filp)
+static int info_open(struct tty_struct *tty, struct file *filp)
 {
        return 0;
 }
@@ -1099,7 +1097,7 @@ static int __init pc_init(void)
         * Set up interrupt, we will worry about memory allocation in
         * post_fep_init.
         */
-       printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION);
+       printk(KERN_INFO "DIGI epca driver version %s loaded.\n", VERSION);
 
        /*
         * NOTE : This code assumes that the number of ports found in the
@@ -1252,7 +1250,7 @@ static int __init pc_init(void)
                                if ((board_id & 0x30) == 0x30)
                                        bd->memory_seg = 0x8000;
                        } else
-                               printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n",(int)bd->port);
+                               printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n", (int)bd->port);
                        break;
                }
        }
@@ -1326,12 +1324,12 @@ static void post_fep_init(unsigned int crd)
                 */
                /* PCI cards are already remapped at this point ISA are not */
                bd->numports = readw(bd->re_map_membase + XEMPORTS);
-               epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports");
+               epcaassert(bd->numports <= 64, "PCI returned a invalid number of ports");
                nbdevs += (bd->numports);
        } else {
                /* Fix up the mappings for ISA/EISA etc */
                /* FIXME: 64K - can we be smarter ? */
-               bd->re_map_membase = ioremap(bd->membase, 0x10000);
+               bd->re_map_membase = ioremap_nocache(bd->membase, 0x10000);
        }
 
        if (crd != 0)
@@ -1362,7 +1360,8 @@ static void post_fep_init(unsigned int crd)
         * XEPORTS (address 0xc22) points at the number of channels the card
         * supports. (For 64XE, XI, XEM, and XR use 0xc02)
         */
-       if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3))
+       if ((bd->type == PCXEVE || bd->type == PCXE) &&
+                                       (readw(memaddr + XEPORTS) < 3))
                shrinkmem = 1;
        if (bd->type < PCIXEM)
                if (!request_region((int)bd->port, 4, board_desc[bd->type]))
@@ -1461,10 +1460,12 @@ static void post_fep_init(unsigned int crd)
 
                case PCXEVE:
                case PCXE:
-                       ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff);
+                       ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4)
+                                                               & 0x1fff);
                        ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9);
-                       ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff);
-                       ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 );
+                       ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4)
+                                                               & 0x1fff);
+                       ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >> 9);
                        break;
 
                case PCXI:
@@ -1518,8 +1519,9 @@ static void post_fep_init(unsigned int crd)
        }
 
        printk(KERN_INFO
-               "Digi PC/Xx Driver V%s:  %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
-               VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports);
+       "Digi PC/Xx Driver V%s:  %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n",
+                               VERSION, board_desc[bd->type], (long)bd->port,
+                                       (long)bd->membase, bd->numports);
        memwinoff(bd, 0);
 }
 
@@ -1527,7 +1529,7 @@ static void epcapoll(unsigned long ignored)
 {
        unsigned long flags;
        int crd;
-       volatile unsigned int head, tail;
+       unsigned int head, tail;
        struct channel *ch;
        struct board_info *bd;
 
@@ -1593,7 +1595,9 @@ static void doevent(int crd)
        chan0 = card_ptr[crd];
        epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range");
        assertgwinon(chan0);
-       while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein))) { /* Begin while something in event queue */
+       while ((tail = readw(&chan0->mailbox->eout)) !=
+                       (head = readw(&chan0->mailbox->ein))) {
+               /* Begin while something in event queue */
                assertgwinon(chan0);
                eventbuf = bd->re_map_membase + tail + ISTART;
                /* Get the channel the event occurred on */
@@ -1617,7 +1621,8 @@ static void doevent(int crd)
                        goto next;
                }
 
-               if ((bc = ch->brdchan) == NULL)
+               bc = ch->brdchan;
+               if (bc == NULL)
                        goto next;
 
                if (event & DATA_IND)  { /* Begin DATA_IND */
@@ -1629,10 +1634,11 @@ static void doevent(int crd)
                        /* A modem signal change has been indicated */
                        ch->imodem = mstat;
                        if (ch->asyncflags & ASYNC_CHECK_CD) {
-                               if (mstat & ch->dcd)  /* We are now receiving dcd */
+                               /* We are now receiving dcd */
+                               if (mstat & ch->dcd)
                                        wake_up_interruptible(&ch->open_wait);
-                               else
-                                       pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */
+                               else    /* No dcd; hangup */
+                                       pc_sched_event(ch, EPCA_EVENT_HANGUP);
                        }
                }
                tty = ch->tty;
@@ -1647,7 +1653,8 @@ static void doevent(int crd)
                                        tty_wakeup(tty);
                                }
                        } else if (event & EMPTYTX_IND) {
-                               /* This event is generated by setup_empty_event */
+                               /* This event is generated by
+                                  setup_empty_event */
                                ch->statusflags &= ~TXBUSY;
                                if (ch->statusflags & EMPTYWAIT) {
                                        ch->statusflags &= ~EMPTYWAIT;
@@ -1655,7 +1662,7 @@ static void doevent(int crd)
                                }
                        }
                }
-       next:
+next:
                globalwinon(ch);
                BUG_ON(!bc);
                writew(1, &bc->idata);
@@ -1665,7 +1672,7 @@ static void doevent(int crd)
 }
 
 static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
-                   int byte2, int ncmds, int bytecmd)
+                                       int byte2, int ncmds, int bytecmd)
 {
        unchar __iomem *memaddr;
        unsigned int head, cmdTail, cmdStart, cmdMax;
@@ -1690,8 +1697,10 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte,
        memaddr = ch->board->re_map_membase;
 
        if (head >= (cmdMax - cmdStart) || (head & 03))  {
-               printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__,  cmd, head);
-               printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__,  cmdMax, cmdStart);
+               printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n",
+                                               __LINE__,  cmd, head);
+               printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n",
+                                               __LINE__,  cmdMax, cmdStart);
                return;
        }
        if (bytecmd)  {
@@ -1770,7 +1779,7 @@ static unsigned termios2digi_h(struct channel *ch, unsigned cflag)
 static unsigned termios2digi_i(struct channel *ch, unsigned iflag)
 {
        unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
-                               INPCK | ISTRIP|IXON|IXANY|IXOFF);
+                                       INPCK | ISTRIP | IXON | IXANY | IXOFF);
        if (ch->digiext.digi_flags & DIGI_AIXON)
                res |= IAIXON;
        return res;
@@ -1838,7 +1847,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
        unsigned mval, hflow, cflag, iflag;
 
        bc = ch->brdchan;
-       epcaassert(bc !=0, "bc out of range");
+       epcaassert(bc != NULL, "bc out of range");
 
        assertgwinon(ch);
        ts = tty->termios;
@@ -1884,8 +1893,10 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch)
                 * Command sets channels iflag structure on the board. Such
                 * things as input soft flow control, handling of parity
                 * errors, and break handling are all set here.
+                *
+                * break handling, parity handling, input stripping,
+                * flow control chars
                 */
-               /* break handling, parity handling, input stripping, flow control chars */
                fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0);
        }
        /*
@@ -1981,7 +1992,7 @@ static void receive_data(struct channel *ch)
                return;
 
        /* If CREAD bit is off or device not open, set TX tail to head */
-       if (!tty || !ts || !(ts->c_cflag & CREAD))  {
+       if (!tty || !ts || !(ts->c_cflag & CREAD)) {
                writew(head, &bc->rout);
                return;
        }
@@ -1991,18 +2002,21 @@ static void receive_data(struct channel *ch)
 
        if (readb(&bc->orun)) {
                writeb(0, &bc->orun);
-               printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",tty->name);
+               printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",
+                                                               tty->name);
                tty_insert_flip_char(tty, 0, TTY_OVERRUN);
        }
        rxwinon(ch);
-       while (bytesAvailable > 0)  { /* Begin while there is data on the card */
+       while (bytesAvailable > 0) {
+               /* Begin while there is data on the card */
                wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail;
                /*
                 * Even if head has wrapped around only report the amount of
                 * data to be equal to the size - tail. Remember memcpy can't
                 * automaticly wrap around the receive buffer.
                 */
-               dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable;
+               dataToRead = (wrapgap < bytesAvailable) ? wrapgap
+                                                       : bytesAvailable;
                /* Make sure we don't overflow the buffer */
                dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead);
                if (dataToRead == 0)
@@ -2153,14 +2167,14 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file,
         * The below routine generally sets up parity, baud, flow control
         * issues, etc.... It effect both control flags and input flags.
         */
-       epcaparam(tty,ch);
+       epcaparam(tty, ch);
        memoff(ch);
        spin_unlock_irqrestore(&epca_lock, flags);
        return 0;
 }
 
-static int pc_ioctl(struct tty_struct *tty, struct file * file,
-                   unsigned int cmd, unsigned long arg)
+static int pc_ioctl(struct tty_struct *tty, struct file *file,
+                                       unsigned int cmd, unsigned long arg)
 {
        digiflow_t dflow;
        int retval;
@@ -2175,7 +2189,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
                bc = ch->brdchan;
        else
                return -EINVAL;
-
        /*
         * For POSIX compliance we need to add more ioctls. See tty_ioctl.c in
         * /usr/src/linux/drivers/char for a good example. In particular think
@@ -2186,9 +2199,10 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
                retval = tty_check_change(tty);
                if (retval)
                        return retval;
-               /* Setup an event to indicate when the transmit buffer empties */
+               /* Setup an event to indicate when the transmit
+                  buffer empties */
                spin_lock_irqsave(&epca_lock, flags);
-               setup_empty_event(tty,ch);
+               setup_empty_event(tty, ch);
                spin_unlock_irqrestore(&epca_lock, flags);
                tty_wait_until_sent(tty, 0);
                if (!arg)
@@ -2198,29 +2212,14 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
                retval = tty_check_change(tty);
                if (retval)
                        return retval;
-
-               /* Setup an event to indicate when the transmit buffer empties */
+               /* Setup an event to indicate when the transmit buffer
+                  empties */
                spin_lock_irqsave(&epca_lock, flags);
-               setup_empty_event(tty,ch);
+               setup_empty_event(tty, ch);
                spin_unlock_irqrestore(&epca_lock, flags);
                tty_wait_until_sent(tty, 0);
                digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4);
                return 0;
-       case TIOCGSOFTCAR:
-               if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg))
-                       return -EFAULT;
-               return 0;
-       case TIOCSSOFTCAR:
-               {
-                       unsigned int value;
-
-                       if (get_user(value, (unsigned __user *)argp))
-                               return -EFAULT;
-                       tty->termios->c_cflag =
-                               ((tty->termios->c_cflag & ~CLOCAL) |
-                                (value ? CLOCAL : 0));
-                       return 0;
-               }
        case TIOCMODG:
                mflag = pc_tiocmget(tty, file);
                if (put_user(mflag, (unsigned long __user *)argp))
@@ -2253,10 +2252,12 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
                break;
        case DIGI_SETAW:
        case DIGI_SETAF:
+               lock_kernel();
                if (cmd == DIGI_SETAW) {
-                       /* Setup an event to indicate when the transmit buffer empties */
+                       /* Setup an event to indicate when the transmit
+                          buffer empties */
                        spin_lock_irqsave(&epca_lock, flags);
-                       setup_empty_event(tty,ch);
+                       setup_empty_event(tty, ch);
                        spin_unlock_irqrestore(&epca_lock, flags);
                        tty_wait_until_sent(tty, 0);
                } else {
@@ -2264,6 +2265,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
                        if (tty->ldisc.flush_buffer)
                                tty->ldisc.flush_buffer(tty);
                }
+               unlock_kernel();
                /* Fall Thru */
        case DIGI_SETA:
                if (copy_from_user(&ch->digiext, argp, sizeof(digi_t)))
@@ -2285,7 +2287,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
                 * control issues, etc.... It effect both control flags and
                 * input flags.
                 */
-               epcaparam(tty,ch);
+               epcaparam(tty, ch);
                memoff(ch);
                spin_unlock_irqrestore(&epca_lock, flags);
                break;
@@ -2321,18 +2323,21 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file,
                if (copy_from_user(&dflow, argp, sizeof(dflow)))
                        return -EFAULT;
 
-               if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin  if setflow toggled */
+               if (dflow.startc != startc || dflow.stopc != stopc) {
+                       /* Begin  if setflow toggled */
                        spin_lock_irqsave(&epca_lock, flags);
                        globalwinon(ch);
 
                        if (cmd == DIGI_SETFLOW) {
                                ch->fepstartc = ch->startc = dflow.startc;
                                ch->fepstopc = ch->stopc = dflow.stopc;
-                               fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1);
+                               fepcmd(ch, SONOFFC, ch->fepstartc,
+                                               ch->fepstopc, 0, 1);
                        } else {
                                ch->fepstartca = ch->startca = dflow.startc;
                                ch->fepstopca  = ch->stopca = dflow.stopc;
-                               fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1);
+                               fepcmd(ch, SAUXONOFFC, ch->fepstartca,
+                                               ch->fepstopca, 0, 1);
                        }
 
                        if (ch->statusflags & TXSTOPPED)
@@ -2356,7 +2361,9 @@ static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL)  { /* Begin if channel valid */
+       ch = verifyChannel(tty);
+
+       if (ch != NULL)  { /* Begin if channel valid */
                spin_lock_irqsave(&epca_lock, flags);
                globalwinon(ch);
                epcaparam(tty, ch);
@@ -2383,7 +2390,7 @@ static void do_softint(struct work_struct *work)
 
                if (tty && tty->driver_data) {
                        if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) {
-                               tty_hangup(tty);        /* FIXME: module removal race here - AKPM */
+                               tty_hangup(tty);
                                wake_up_interruptible(&ch->open_wait);
                                ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
                        }
@@ -2403,9 +2410,11 @@ static void pc_stop(struct tty_struct *tty)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL) {
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
                spin_lock_irqsave(&epca_lock, flags);
-               if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */
+               if ((ch->statusflags & TXSTOPPED) == 0) {
+                       /* Begin if transmit stop requested */
                        globalwinon(ch);
                        /* STOP transmitting now !! */
                        fepcmd(ch, PAUSETX, 0, 0, 0, 0);
@@ -2423,11 +2432,14 @@ static void pc_start(struct tty_struct *tty)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL) {
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
                unsigned long flags;
                spin_lock_irqsave(&epca_lock, flags);
-               /* Just in case output was resumed because of a change in Digi-flow */
-               if (ch->statusflags & TXSTOPPED)  { /* Begin transmit resume requested */
+               /* Just in case output was resumed because of a change
+                  in Digi-flow */
+               if (ch->statusflags & TXSTOPPED)  {
+                       /* Begin transmit resume requested */
                        struct board_chan __iomem *bc;
                        globalwinon(ch);
                        bc = ch->brdchan;
@@ -2457,7 +2469,8 @@ static void pc_throttle(struct tty_struct *tty)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL) {
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
                spin_lock_irqsave(&epca_lock, flags);
                if ((ch->statusflags & RXSTOPPED) == 0) {
                        globalwinon(ch);
@@ -2477,8 +2490,10 @@ static void pc_unthrottle(struct tty_struct *tty)
         * verifyChannel returns the channel from the tty struct if it is
         * valid. This serves as a sanity check.
         */
-       if ((ch = verifyChannel(tty)) != NULL) {
-               /* Just in case output was resumed because of a change in Digi-flow */
+       ch = verifyChannel(tty);
+       if (ch != NULL) {
+               /* Just in case output was resumed because of a change
+                  in Digi-flow */
                spin_lock_irqsave(&epca_lock, flags);
                if (ch->statusflags & RXSTOPPED) {
                        globalwinon(ch);
@@ -2490,7 +2505,7 @@ static void pc_unthrottle(struct tty_struct *tty)
        }
 }
 
-void digi_send_break(struct channel *ch, int msec)
+static void digi_send_break(struct channel *ch, int msec)
 {
        unsigned long flags;
 
@@ -2523,7 +2538,7 @@ static void setup_empty_event(struct tty_struct *tty, struct channel *ch)
        memoff(ch);
 }
 
-void epca_setup(char *str, int *ints)
+static void epca_setup(char *str, int *ints)
 {
        struct board_info board;
        int               index, loop, last;
@@ -2552,14 +2567,16 @@ void epca_setup(char *str, int *ints)
                         * instructing the driver to ignore epcaconfig.) For
                         * this reason we check for 2.
                         */
-                       if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */
+                       if (board.status == 2) {
+                       /* Begin ignore epcaconfig as well as lilo cmd line */
                                nbdevs = 0;
                                num_cards = 0;
                                return;
                        } /* End ignore epcaconfig as well as lilo cmd line */
 
                        if (board.status > 2) {
-                               printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status);
+                               printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n",
+                                               board.status);
                                invalid_lilo_config = 1;
                                setup_error_code |= INVALID_BOARD_STATUS;
                                return;
@@ -2613,7 +2630,8 @@ void epca_setup(char *str, int *ints)
                case 6:
                        board.membase = ints[index];
                        if (ints[index] <= 0) {
-                               printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase);
+                               printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",
+                                       (unsigned int)board.membase);
                                invalid_lilo_config = 1;
                                setup_error_code |= INVALID_MEM_BASE;
                                return;
@@ -2744,7 +2762,7 @@ void epca_setup(char *str, int *ints)
                                t2++;
 
                        if (*t2) {
-                               printk(KERN_ERR "epca_setup: Invalid memory base %s\n",str);
+                               printk(KERN_ERR "epca_setup: Invalid memory base %s\n", str);
                                invalid_lilo_config = 1;
                                setup_error_code |= INVALID_MEM_BASE;
                                return;
@@ -2766,7 +2784,7 @@ void epca_setup(char *str, int *ints)
 
        /* I should REALLY validate the stuff here */
        /* Copies our local copy of board into boards */
-       memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board));
+       memcpy((void *)&boards[num_cards], (void *)&board, sizeof(board));
        /* Does this get called once per lilo arg are what ? */
        printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n",
                num_cards, board_desc[board.type],
@@ -2807,9 +2825,9 @@ static int __devinit epca_init_one(struct pci_dev *pdev,
        if (board_idx >= MAXBOARDS)
                goto err_out;
 
-       addr = pci_resource_start (pdev, epca_info_tbl[info_idx].bar_idx);
+       addr = pci_resource_start(pdev, epca_info_tbl[info_idx].bar_idx);
        if (!addr) {
-               printk (KERN_ERR PFX "PCI region #%d not available (size 0)\n",
+               printk(KERN_ERR PFX "PCI region #%d not available (size 0)\n",
                        epca_info_tbl[info_idx].bar_idx);
                goto err_out;
        }
@@ -2820,28 +2838,29 @@ static int __devinit epca_init_one(struct pci_dev *pdev,
        boards[board_idx].port = addr + PCI_IO_OFFSET;
        boards[board_idx].membase = addr;
 
-       if (!request_mem_region (addr + PCI_IO_OFFSET, 0x200000, "epca")) {
-               printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
+       if (!request_mem_region(addr + PCI_IO_OFFSET, 0x200000, "epca")) {
+               printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
                        0x200000, addr + PCI_IO_OFFSET);
                goto err_out;
        }
 
-       boards[board_idx].re_map_port = ioremap(addr + PCI_IO_OFFSET, 0x200000);
+       boards[board_idx].re_map_port = ioremap_nocache(addr + PCI_IO_OFFSET,
+                                                               0x200000);
        if (!boards[board_idx].re_map_port) {
-               printk (KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
+               printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
                        0x200000, addr + PCI_IO_OFFSET);
                goto err_out_free_pciio;
        }
 
-       if (!request_mem_region (addr, 0x200000, "epca")) {
-               printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
+       if (!request_mem_region(addr, 0x200000, "epca")) {
+               printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n",
                        0x200000, addr);
                goto err_out_free_iounmap;
        }
 
-       boards[board_idx].re_map_membase = ioremap(addr, 0x200000);
+       boards[board_idx].re_map_membase = ioremap_nocache(addr, 0x200000);
        if (!boards[board_idx].re_map_membase) {
-               printk (KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
+               printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n",
                        0x200000, addr + PCI_IO_OFFSET);
                goto err_out_free_memregion;
        }
@@ -2858,11 +2877,11 @@ static int __devinit epca_init_one(struct pci_dev *pdev,
        return 0;
 
 err_out_free_memregion:
-       release_mem_region (addr, 0x200000);
+       release_mem_region(addr, 0x200000);
 err_out_free_iounmap:
-       iounmap (boards[board_idx].re_map_port);
+       iounmap(boards[board_idx].re_map_port);
 err_out_free_pciio:
-       release_mem_region (addr + PCI_IO_OFFSET, 0x200000);
+       release_mem_region(addr + PCI_IO_OFFSET, 0x200000);
 err_out:
        return -ENODEV;
 }
@@ -2878,9 +2897,9 @@ static struct pci_device_id epca_pci_tbl[] = {
 
 MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
 
-int __init init_PCI (void)
+static int __init init_PCI(void)
 {
-       memset (&epca_driver, 0, sizeof (epca_driver));
+       memset(&epca_driver, 0, sizeof(epca_driver));
        epca_driver.name = "epca";
        epca_driver.id_table = epca_pci_tbl;
        epca_driver.probe = epca_init_one;
index f3fe620673440e267172e957f174028933beacbc..84840ba13ff05e584f78e3ca252eb0d74db10dd1 100644 (file)
@@ -8,7 +8,7 @@
  *  Extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92.  Now
  *  much more extensible to support other serial cards based on the
  *  16450/16550A UART's.  Added support for the AST FourPort and the
- *  Accent Async board.  
+ *  Accent Async board.
  *
  *  set_serial_info fixed to set the flags, custom divisor, and uart
  *     type fields.  Fix suggested by Michael K. Johnson 12/12/92.
 #include <linux/bitops.h>
 
 #include <asm/system.h>
-#include <asm/io.h>
+#include <linux/io.h>
 
 #include <asm/dma.h>
 #include <linux/slab.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #include <linux/hayesesp.h>
 
@@ -127,8 +127,10 @@ static struct tty_driver *esp_driver;
 #undef SERIAL_DEBUG_FLOW
 
 #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT)
-#define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
- tty->name, (info->flags), serial_driver.refcount,info->count,tty->count,s)
+#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \
+                               tty->name, info->flags, \
+                               serial_driver.refcount, \
+                               info->count, tty->count, s)
 #else
 #define DBG_CNT(s)
 #endif
@@ -189,7 +191,7 @@ static inline void serial_out(struct esp_struct *info, int offset,
  */
 static void rs_stop(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
 
        if (serial_paranoia_check(info, tty->name, "rs_stop"))
@@ -206,12 +208,12 @@ static void rs_stop(struct tty_struct *tty)
 
 static void rs_start(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
-       
+
        if (serial_paranoia_check(info, tty->name, "rs_start"))
                return;
-       
+
        spin_lock_irqsave(&info->lock, flags);
        if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) {
                info->IER |= UART_IER_THRI;
@@ -233,7 +235,7 @@ static void rs_start(struct tty_struct *tty)
  * rs_interrupt() should try to keep the interrupt handler as fast as
  * possible.  After you are done making modifications, it is not a bad
  * idea to do:
- * 
+ *
  * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c
  *
  * and look at the resulting assemble code in serial.s.
@@ -290,7 +292,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
        }
 
        status_mask = (info->read_status_mask >> 2) & 0x07;
-               
+
        for (i = 0; i < num_bytes - 1; i += 2) {
                *((unsigned short *)(pio_buf->data + i)) =
                        inw(info->port + UART_ESI_RX);
@@ -325,8 +327,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
                                flag = TTY_BREAK;
                                if (info->flags & ASYNC_SAK)
                                        do_SAK(tty);
-                       }
-                       else if (err_buf->data[i] & 0x02)
+                       } else if (err_buf->data[i] & 0x02)
                                flag = TTY_FRAME;
                        else if (err_buf->data[i] & 0x01)
                                flag = TTY_PARITY;
@@ -341,23 +342,29 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes)
        release_pio_buffer(err_buf);
 }
 
-static inline void receive_chars_dma(struct esp_struct *info, int num_bytes)
+static void program_isa_dma(int dma, int dir, unsigned long addr, int len)
 {
        unsigned long flags;
+
+       flags = claim_dma_lock();
+       disable_dma(dma);
+       clear_dma_ff(dma);
+       set_dma_mode(dma, dir);
+       set_dma_addr(dma, addr);
+       set_dma_count(dma, len);
+       enable_dma(dma);
+       release_dma_lock(flags);
+}
+
+static void receive_chars_dma(struct esp_struct *info, int num_bytes)
+{
        info->stat_flags &= ~ESP_STAT_RX_TIMEOUT;
        dma_bytes = num_bytes;
        info->stat_flags |= ESP_STAT_DMA_RX;
-       
-       flags=claim_dma_lock();
-        disable_dma(dma);
-        clear_dma_ff(dma);
-        set_dma_mode(dma, DMA_MODE_READ);
-        set_dma_addr(dma, isa_virt_to_bus(dma_buffer));
-        set_dma_count(dma, dma_bytes);
-        enable_dma(dma);
-        release_dma_lock(flags);
-        
-        serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX);
+
+       program_isa_dma(dma, DMA_MODE_READ, isa_virt_to_bus(dma_buffer),
+                                                               dma_bytes);
+       serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX);
 }
 
 static inline void receive_chars_dma_done(struct esp_struct *info,
@@ -366,22 +373,22 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
        struct tty_struct *tty = info->tty;
        int num_bytes;
        unsigned long flags;
-       
-       flags=claim_dma_lock();
+
+       flags = claim_dma_lock();
        disable_dma(dma);
        clear_dma_ff(dma);
 
        info->stat_flags &= ~ESP_STAT_DMA_RX;
        num_bytes = dma_bytes - get_dma_residue(dma);
        release_dma_lock(flags);
-       
+
        info->icount.rx += num_bytes;
 
        if (num_bytes > 0) {
                tty_insert_flip_string(tty, dma_buffer, num_bytes - 1);
 
                status &= (0x1c & info->read_status_mask);
-               
+
                /* Is the status significant or do we throw the last byte ? */
                if (!(status & info->ignore_status_mask)) {
                        int statflag = 0;
@@ -393,13 +400,13 @@ static inline void receive_chars_dma_done(struct esp_struct *info,
                                        do_SAK(tty);
                        } else if (status & 0x08) {
                                statflag = TTY_FRAME;
-                               (info->icount.frame)++;
-                       }
-                       else if (status & 0x04) {
+                               info->icount.frame++;
+                       } else if (status & 0x04) {
                                statflag = TTY_PARITY;
-                               (info->icount.parity)++;
+                               info->icount.parity++;
                        }
-                       tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag);
+                       tty_insert_flip_char(tty, dma_buffer[num_bytes - 1],
+                                                               statflag);
                }
                tty_schedule_flip(tty);
        }
@@ -484,8 +491,6 @@ static inline void transmit_chars_pio(struct esp_struct *info,
 /* Caller must hold info->lock */
 static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes)
 {
-       unsigned long flags;
-       
        dma_bytes = num_bytes;
 
        if (info->xmit_tail + dma_bytes <= ESP_XMIT_SIZE) {
@@ -517,26 +522,18 @@ static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes)
        }
 
        info->stat_flags |= ESP_STAT_DMA_TX;
-       
-       flags=claim_dma_lock();
-        disable_dma(dma);
-        clear_dma_ff(dma);
-        set_dma_mode(dma, DMA_MODE_WRITE);
-        set_dma_addr(dma, isa_virt_to_bus(dma_buffer));
-        set_dma_count(dma, dma_bytes);
-        enable_dma(dma);
-        release_dma_lock(flags);
-        
-        serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
+
+       program_isa_dma(dma, DMA_MODE_WRITE, isa_virt_to_bus(dma_buffer),
+                                                               dma_bytes);
+       serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
 }
 
 static inline void transmit_chars_dma_done(struct esp_struct *info)
 {
        int num_bytes;
        unsigned long flags;
-       
 
-       flags=claim_dma_lock();
+       flags = claim_dma_lock();
        disable_dma(dma);
        clear_dma_ff(dma);
 
@@ -547,27 +544,21 @@ static inline void transmit_chars_dma_done(struct esp_struct *info)
        if (dma_bytes != num_bytes) {
                dma_bytes -= num_bytes;
                memmove(dma_buffer, dma_buffer + num_bytes, dma_bytes);
-               
-               flags=claim_dma_lock();
-               disable_dma(dma);
-               clear_dma_ff(dma);
-               set_dma_mode(dma, DMA_MODE_WRITE);
-               set_dma_addr(dma, isa_virt_to_bus(dma_buffer));
-               set_dma_count(dma, dma_bytes);
-               enable_dma(dma);
-               release_dma_lock(flags);
-               
-               serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
+
+               program_isa_dma(dma, DMA_MODE_WRITE,
+                               isa_virt_to_bus(dma_buffer), dma_bytes);
+
+               serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX);
        } else {
                dma_bytes = 0;
                info->stat_flags &= ~ESP_STAT_DMA_TX;
        }
 }
 
-static inline void check_modem_status(struct esp_struct *info)
+static void check_modem_status(struct esp_struct *info)
 {
        int     status;
-       
+
        serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT);
        status = serial_in(info, UART_ESI_STAT2);
 
@@ -588,7 +579,7 @@ static inline void check_modem_status(struct esp_struct *info)
 #if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR))
                printk("ttys%d CD now %s...", info->line,
                       (status & UART_MSR_DCD) ? "on" : "off");
-#endif         
+#endif
                if (status & UART_MSR_DCD)
                        wake_up_interruptible(&info->open_wait);
                else {
@@ -605,7 +596,7 @@ static inline void check_modem_status(struct esp_struct *info)
  */
 static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
 {
-       struct esp_struct * info;
+       struct esp_struct *info;
        unsigned err_status;
        unsigned int scratch;
 
@@ -617,7 +608,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
        scratch = serial_in(info, UART_ESI_SID);
 
        spin_lock(&info->lock);
-       
+
        if (!info->tty) {
                spin_unlock(&info->lock);
                return IRQ_NONE;
@@ -637,7 +628,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
                if (err_status & 0x80) /* Start break */
                        wake_up_interruptible(&info->break_wait);
        }
-               
+
        if ((scratch & 0x88) || /* DMA completed or timed out */
            (err_status & 0x1c) /* receive error */) {
                if (info->stat_flags & ESP_STAT_DMA_RX)
@@ -667,7 +658,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
                                receive_chars_dma(info, num_bytes);
                }
        }
-       
+
        if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) &&
            (scratch & 0x02) && (info->IER & UART_IER_THRI)) {
                if ((info->xmit_cnt <= 0) || info->tty->stopped) {
@@ -722,11 +713,11 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
  * ---------------------------------------------------------------
  */
 
-static inline void esp_basic_init(struct esp_struct * info)
+static void esp_basic_init(struct esp_struct *info)
 {
        /* put ESPC in enhanced mode */
        serial_out(info, UART_ESI_CMD1, ESI_SET_MODE);
-       
+
        if (info->stat_flags & ESP_STAT_NEVER_DMA)
                serial_out(info, UART_ESI_CMD2, 0x01);
        else
@@ -783,13 +774,13 @@ static inline void esp_basic_init(struct esp_struct * info)
        serial_out(info, UART_ESI_CMD2, 0xff);
 }
 
-static int startup(struct esp_struct * info)
+static int startup(struct esp_struct *info)
 {
        unsigned long flags;
-       int     retval=0;
-        unsigned int num_chars;
+       int     retval = 0;
+       unsigned int num_chars;
 
-        spin_lock_irqsave(&info->lock, flags);
+       spin_lock_irqsave(&info->lock, flags);
 
        if (info->flags & ASYNC_INITIALIZED)
                goto out;
@@ -802,7 +793,8 @@ static int startup(struct esp_struct * info)
        }
 
 #ifdef SERIAL_DEBUG_OPEN
-       printk("starting up ttys%d (irq %d)...", info->line, info->irq);
+       printk(KERN_DEBUG "starting up ttys%d (irq %d)...",
+                                               info->line, info->irq);
 #endif
 
        /* Flush the RX buffer.  Using the ESI flush command may cause */
@@ -863,7 +855,7 @@ static int startup(struct esp_struct * info)
                        dma_buffer = NULL;
                        info->stat_flags |= ESP_STAT_USE_PIO;
                }
-                       
+
        }
 
        info->MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
@@ -872,7 +864,7 @@ static int startup(struct esp_struct * info)
        serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART);
        serial_out(info, UART_ESI_CMD2, UART_MCR);
        serial_out(info, UART_ESI_CMD2, info->MCR);
-       
+
        /*
         * Finally, enable interrupts
         */
@@ -881,7 +873,7 @@ static int startup(struct esp_struct * info)
                        UART_IER_DMA_TC;
        serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
        serial_out(info, UART_ESI_CMD2, info->IER);
-       
+
        if (info->tty)
                clear_bit(TTY_IO_ERROR, &info->tty->flags);
        info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
@@ -900,7 +892,7 @@ static int startup(struct esp_struct * info)
                if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
                        info->tty->alt_speed = 460800;
        }
-       
+
        /*
         * set the speed of the serial port
         */
@@ -918,7 +910,7 @@ out_unlocked:
  * This routine will shutdown a serial port; interrupts are disabled, and
  * DTR is dropped if the hangup on close termio flag is on.
  */
-static void shutdown(struct esp_struct * info)
+static void shutdown(struct esp_struct *info)
 {
        unsigned long   flags, f;
 
@@ -929,7 +921,7 @@ static void shutdown(struct esp_struct * info)
        printk("Shutting down serial port %d (irq %d)....", info->line,
               info->irq);
 #endif
-       
+
        spin_lock_irqsave(&info->lock, flags);
        /*
         * clear delta_msr_wait queue to avoid mem leaks: we may free the irq
@@ -941,14 +933,14 @@ static void shutdown(struct esp_struct * info)
        /* stop a DMA transfer on the port being closed */
        /* DMA lock is higher priority always */
        if (info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) {
-               f=claim_dma_lock();
+               f = claim_dma_lock();
                disable_dma(dma);
                clear_dma_ff(dma);
                release_dma_lock(f);
-               
+
                dma_bytes = 0;
        }
-       
+
        /*
         * Free the IRQ
         */
@@ -970,7 +962,7 @@ static void shutdown(struct esp_struct * info)
                        free_pages((unsigned long)dma_buffer,
                                   get_order(DMA_BUFFER_SZ));
                        dma_buffer = NULL;
-               }               
+               }
        }
 
        if (info->xmit_buf) {
@@ -992,7 +984,7 @@ static void shutdown(struct esp_struct * info)
 
        if (info->tty)
                set_bit(TTY_IO_ERROR, &info->tty->flags);
-       
+
        info->flags &= ~ASYNC_INITIALIZED;
        spin_unlock_irqrestore(&info->lock, flags);
 }
@@ -1005,7 +997,7 @@ static void change_speed(struct esp_struct *info)
 {
        unsigned short port;
        int     quot = 0;
-       unsigned cflag,cval;
+       unsigned cflag, cval;
        int     baud, bits;
        unsigned char flow1 = 0, flow2 = 0;
        unsigned long flags;
@@ -1014,14 +1006,14 @@ static void change_speed(struct esp_struct *info)
                return;
        cflag = info->tty->termios->c_cflag;
        port = info->port;
-       
+
        /* byte size and parity */
        switch (cflag & CSIZE) {
-             case CS5: cval = 0x00; bits = 7; break;
-             case CS6: cval = 0x01; bits = 8; break;
-             case CS7: cval = 0x02; bits = 9; break;
-             case CS8: cval = 0x03; bits = 10; break;
-             default:  cval = 0x00; bits = 7; break;
+       case CS5: cval = 0x00; bits = 7; break;
+       case CS6: cval = 0x01; bits = 8; break;
+       case CS7: cval = 0x02; bits = 9; break;
+       case CS8: cval = 0x03; bits = 10; break;
+       default:  cval = 0x00; bits = 7; break;
        }
        if (cflag & CSTOPB) {
                cval |= 0x04;
@@ -1037,14 +1029,12 @@ static void change_speed(struct esp_struct *info)
        if (cflag & CMSPAR)
                cval |= UART_LCR_SPAR;
 #endif
-
        baud = tty_get_baud_rate(info->tty);
        if (baud == 38400 &&
-           ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
+               ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST))
                quot = info->custom_divisor;
        else {
-               if (baud == 134)
-                       /* Special case since 134 is really 134.5 */
+               if (baud == 134) /* Special case since 134 is really 134.5 */
                        quot = (2*BASE_BAUD / 269);
                else if (baud)
                        quot = BASE_BAUD / baud;
@@ -1052,7 +1042,12 @@ static void change_speed(struct esp_struct *info)
        /* If the quotient is ever zero, default to 9600 bps */
        if (!quot)
                quot = BASE_BAUD / 9600;
-       
+
+       if (baud) {
+               /* Actual rate */
+               baud = BASE_BAUD/quot;
+               tty_encode_baud_rate(info->tty, baud, baud);
+       }
        info->timeout = ((1024 * HZ * bits * quot) / BASE_BAUD) + (HZ / 50);
 
        /* CTS flow control flag and modem status interrupts */
@@ -1066,10 +1061,8 @@ static void change_speed(struct esp_struct *info)
                info->flags &= ~ASYNC_CTS_FLOW;
        if (cflag & CLOCAL)
                info->flags &= ~ASYNC_CHECK_CD;
-       else {
+       else
                info->flags |= ASYNC_CHECK_CD;
-               /* info->IER |= UART_IER_MSI; */
-       }
 
        /*
         * Set up parity check flag
@@ -1079,7 +1072,7 @@ static void change_speed(struct esp_struct *info)
                info->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
        if (I_BRKINT(info->tty) || I_PARMRK(info->tty))
                info->read_status_mask |= UART_LSR_BI;
-       
+
        info->ignore_status_mask = 0;
 #if 0
        /* This should be safe, but for some broken bits of hardware... */
@@ -1092,7 +1085,7 @@ static void change_speed(struct esp_struct *info)
                info->ignore_status_mask |= UART_LSR_BI;
                info->read_status_mask |= UART_LSR_BI;
                /*
-                * If we're ignore parity and break indicators, ignore 
+                * If we're ignore parity and break indicators, ignore
                 * overruns too.  (For real raw support).
                 */
                if (I_IGNPAR(info->tty)) {
@@ -1130,19 +1123,19 @@ static void change_speed(struct esp_struct *info)
                serial_out(info, UART_ESI_CMD2, 0x10);
                serial_out(info, UART_ESI_CMD2, 0x21);
                switch (cflag & CSIZE) {
-                       case CS5:
-                               serial_out(info, UART_ESI_CMD2, 0x1f);
-                               break;
-                       case CS6:
-                               serial_out(info, UART_ESI_CMD2, 0x3f);
-                               break;
-                       case CS7:
-                       case CS8:
-                               serial_out(info, UART_ESI_CMD2, 0x7f);
-                               break;
-                       default:
-                               serial_out(info, UART_ESI_CMD2, 0xff);
-                               break;
+               case CS5:
+                       serial_out(info, UART_ESI_CMD2, 0x1f);
+                       break;
+               case CS6:
+                       serial_out(info, UART_ESI_CMD2, 0x3f);
+                       break;
+               case CS7:
+               case CS8:
+                       serial_out(info, UART_ESI_CMD2, 0x7f);
+                       break;
+               default:
+                       serial_out(info, UART_ESI_CMD2, 0xff);
+                       break;
                }
        }
 
@@ -1156,31 +1149,34 @@ static void change_speed(struct esp_struct *info)
        spin_unlock_irqrestore(&info->lock, flags);
 }
 
-static void rs_put_char(struct tty_struct *tty, unsigned char ch)
+static int rs_put_char(struct tty_struct *tty, unsigned char ch)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
+       int ret = 0;
 
        if (serial_paranoia_check(info, tty->name, "rs_put_char"))
-               return;
+               return 0;
 
        if (!info->xmit_buf)
-               return;
+               return 0;
 
        spin_lock_irqsave(&info->lock, flags);
        if (info->xmit_cnt < ESP_XMIT_SIZE - 1) {
                info->xmit_buf[info->xmit_head++] = ch;
                info->xmit_head &= ESP_XMIT_SIZE-1;
                info->xmit_cnt++;
+               ret = 1;
        }
        spin_unlock_irqrestore(&info->lock, flags);
+       return ret;
 }
 
 static void rs_flush_chars(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
-                               
+
        if (serial_paranoia_check(info, tty->name, "rs_flush_chars"))
                return;
 
@@ -1198,11 +1194,11 @@ out:
        spin_unlock_irqrestore(&info->lock, flags);
 }
 
-static int rs_write(struct tty_struct * tty,
+static int rs_write(struct tty_struct *tty,
                    const unsigned char *buf, int count)
 {
        int     c, t, ret = 0;
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
 
        if (serial_paranoia_check(info, tty->name, "rs_write"))
@@ -1210,19 +1206,19 @@ static int rs_write(struct tty_struct * tty,
 
        if (!info->xmit_buf)
                return 0;
-           
+
        while (1) {
                /* Thanks to R. Wolff for suggesting how to do this with */
                /* interrupts enabled */
 
                c = count;
                t = ESP_XMIT_SIZE - info->xmit_cnt - 1;
-               
+
                if (t < c)
                        c = t;
 
                t = ESP_XMIT_SIZE - info->xmit_head;
-               
+
                if (t < c)
                        c = t;
 
@@ -1252,10 +1248,10 @@ static int rs_write(struct tty_struct * tty,
 
 static int rs_write_room(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        int     ret;
        unsigned long flags;
-                               
+
        if (serial_paranoia_check(info, tty->name, "rs_write_room"))
                return 0;
 
@@ -1270,8 +1266,8 @@ static int rs_write_room(struct tty_struct *tty)
 
 static int rs_chars_in_buffer(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
-                               
+       struct esp_struct *info = tty->driver_data;
+
        if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer"))
                return 0;
        return info->xmit_cnt;
@@ -1279,9 +1275,9 @@ static int rs_chars_in_buffer(struct tty_struct *tty)
 
 static void rs_flush_buffer(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
-                               
+
        if (serial_paranoia_check(info, tty->name, "rs_flush_buffer"))
                return;
        spin_lock_irqsave(&info->lock, flags);
@@ -1293,20 +1289,20 @@ static void rs_flush_buffer(struct tty_struct *tty)
 /*
  * ------------------------------------------------------------
  * rs_throttle()
- * 
+ *
  * This routine is called by the upper-layer tty layer to signal that
  * incoming characters should be throttled.
  * ------------------------------------------------------------
  */
-static void rs_throttle(struct tty_struct * tty)
+static void rs_throttle(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
 #ifdef SERIAL_DEBUG_THROTTLE
        char    buf[64];
-       
+
        printk("throttle %s: %d....\n", tty_name(tty, buf),
-              tty->ldisc.chars_in_buffer(tty));
+                                               tty_chars_in_buffer(tty));
 #endif
 
        if (serial_paranoia_check(info, tty->name, "rs_throttle"))
@@ -1321,20 +1317,20 @@ static void rs_throttle(struct tty_struct * tty)
        spin_unlock_irqrestore(&info->lock, flags);
 }
 
-static void rs_unthrottle(struct tty_struct * tty)
+static void rs_unthrottle(struct tty_struct *tty)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
 #ifdef SERIAL_DEBUG_THROTTLE
        char    buf[64];
-       
-       printk("unthrottle %s: %d....\n", tty_name(tty, buf),
-              tty->ldisc.chars_in_buffer(tty));
+
+       printk(KERN_DEBUG "unthrottle %s: %d....\n", tty_name(tty, buf),
+              tty_chars_in_buffer(tty));
 #endif
 
        if (serial_paranoia_check(info, tty->name, "rs_unthrottle"))
                return;
-       
+
        spin_lock_irqsave(&info->lock, flags);
        info->IER |= UART_IER_RDI;
        serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK);
@@ -1350,11 +1346,12 @@ static void rs_unthrottle(struct tty_struct * tty)
  * ------------------------------------------------------------
  */
 
-static int get_serial_info(struct esp_struct * info,
+static int get_serial_info(struct esp_struct *info,
                           struct serial_struct __user *retinfo)
 {
        struct serial_struct tmp;
-  
+
+       lock_kernel();
        memset(&tmp, 0, sizeof(tmp));
        tmp.type = PORT_16550A;
        tmp.line = info->line;
@@ -1367,20 +1364,22 @@ static int get_serial_info(struct esp_struct * info,
        tmp.closing_wait = info->closing_wait;
        tmp.custom_divisor = info->custom_divisor;
        tmp.hub6 = 0;
-       if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
+       unlock_kernel();
+       if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
                return -EFAULT;
        return 0;
 }
 
-static int get_esp_config(struct esp_struct * info,
+static int get_esp_config(struct esp_struct *info,
                          struct hayes_esp_config __user *retinfo)
 {
        struct hayes_esp_config tmp;
-  
+
        if (!retinfo)
                return -EFAULT;
 
        memset(&tmp, 0, sizeof(tmp));
+       lock_kernel();
        tmp.rx_timeout = info->config.rx_timeout;
        tmp.rx_trigger = info->config.rx_trigger;
        tmp.tx_trigger = info->config.tx_trigger;
@@ -1388,11 +1387,12 @@ static int get_esp_config(struct esp_struct * info,
        tmp.flow_on = info->config.flow_on;
        tmp.pio_threshold = info->config.pio_threshold;
        tmp.dma_channel = (info->stat_flags & ESP_STAT_NEVER_DMA ? 0 : dma);
+       unlock_kernel();
 
        return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
 }
 
-static int set_serial_info(struct esp_struct * info,
+static int set_serial_info(struct esp_struct *info,
                           struct serial_struct __user *new_info)
 {
        struct serial_struct new_serial;
@@ -1401,7 +1401,7 @@ static int set_serial_info(struct esp_struct * info,
        int retval = 0;
        struct esp_struct *current_async;
 
-       if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
+       if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
                return -EFAULT;
        old_info = *info;
 
@@ -1422,7 +1422,7 @@ static int set_serial_info(struct esp_struct * info,
                return -EINVAL;
 
        if (!capable(CAP_SYS_ADMIN)) {
-               if (change_irq || 
+               if (change_irq ||
                    (new_serial.close_delay != info->close_delay) ||
                    ((new_serial.flags & ~ASYNC_USR_MASK) !=
                     (info->flags & ~ASYNC_USR_MASK)))
@@ -1507,8 +1507,8 @@ static int set_serial_info(struct esp_struct * info,
        return retval;
 }
 
-static int set_esp_config(struct esp_struct * info,
-                         struct hayes_esp_config __user * new_info)
+static int set_esp_config(struct esp_struct *info,
+                         struct hayes_esp_config __user *new_info)
 {
        struct hayes_esp_config new_config;
        unsigned int change_dma;
@@ -1550,7 +1550,6 @@ static int set_esp_config(struct esp_struct * info,
                if (new_config.dma_channel) {
                        /* PIO mode to DMA mode transition OR */
                        /* change current DMA channel */
-                       
                        current_async = ports;
 
                        while (current_async) {
@@ -1559,16 +1558,15 @@ static int set_esp_config(struct esp_struct * info,
                                                return -EBUSY;
                                } else if (current_async->count)
                                        return -EBUSY;
-                                       
-                               current_async =
-                                       current_async->next_port;
+
+                               current_async = current_async->next_port;
                        }
 
                        shutdown(info);
                        dma = new_config.dma_channel;
                        info->stat_flags &= ~ESP_STAT_NEVER_DMA;
-                       
-                        /* all ports must use the same DMA channel */
+
+                       /* all ports must use the same DMA channel */
 
                        spin_lock_irqsave(&info->lock, flags);
                        current_async = ports;
@@ -1580,7 +1578,6 @@ static int set_esp_config(struct esp_struct * info,
                        spin_unlock_irqrestore(&info->lock, flags);
                } else {
                        /* DMA mode to PIO mode only */
-                       
                        if (info->count > 1)
                                return -EBUSY;
 
@@ -1596,8 +1593,6 @@ static int set_esp_config(struct esp_struct * info,
 
        if ((new_config.flow_off != info->config.flow_off) ||
            (new_config.flow_on != info->config.flow_on)) {
-               unsigned long flags;
-
                info->config.flow_off = new_config.flow_off;
                info->config.flow_on = new_config.flow_on;
 
@@ -1612,8 +1607,6 @@ static int set_esp_config(struct esp_struct * info,
 
        if ((new_config.rx_trigger != info->config.rx_trigger) ||
            (new_config.tx_trigger != info->config.tx_trigger)) {
-               unsigned long flags;
-
                info->config.rx_trigger = new_config.rx_trigger;
                info->config.tx_trigger = new_config.tx_trigger;
                spin_lock_irqsave(&info->lock, flags);
@@ -1628,8 +1621,6 @@ static int set_esp_config(struct esp_struct * info,
        }
 
        if (new_config.rx_timeout != info->config.rx_timeout) {
-               unsigned long flags;
-
                info->config.rx_timeout = new_config.rx_timeout;
                spin_lock_irqsave(&info->lock, flags);
 
@@ -1657,9 +1648,9 @@ static int set_esp_config(struct esp_struct * info,
  *         release the bus after transmitting. This must be done when
  *         the transmit shift register is empty, not be done when the
  *         transmit holding register is empty.  This functionality
- *         allows an RS485 driver to be written in user space. 
+ *         allows an RS485 driver to be written in user space.
  */
-static int get_lsr_info(struct esp_struct * info, unsigned int __user *value)
+static int get_lsr_info(struct esp_struct *info, unsigned int __user *value)
 {
        unsigned char status;
        unsigned int result;
@@ -1670,17 +1661,17 @@ static int get_lsr_info(struct esp_struct * info, unsigned int __user *value)
        status = serial_in(info, UART_ESI_STAT1);
        spin_unlock_irqrestore(&info->lock, flags);
        result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
-       return put_user(result,value);
+       return put_user(result, value);
 }
 
 
 static int esp_tiocmget(struct tty_struct *tty, struct file *file)
 {
-       struct esp_struct * info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned char control, status;
        unsigned long flags;
 
-       if (serial_paranoia_check(info, tty->name, __FUNCTION__))
+       if (serial_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
        if (tty->flags & (1 << TTY_IO_ERROR))
                return -EIO;
@@ -1703,10 +1694,10 @@ static int esp_tiocmget(struct tty_struct *tty, struct file *file)
 static int esp_tiocmset(struct tty_struct *tty, struct file *file,
                        unsigned int set, unsigned int clear)
 {
-       struct esp_struct * info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
 
-       if (serial_paranoia_check(info, tty->name, __FUNCTION__))
+       if (serial_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
        if (tty->flags & (1 << TTY_IO_ERROR))
                return -EIO;
@@ -1736,9 +1727,9 @@ static int esp_tiocmset(struct tty_struct *tty, struct file *file,
  */
 static void esp_break(struct tty_struct *tty, int break_state)
 {
-       struct esp_struct * info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
-       
+
        if (serial_paranoia_check(info, tty->name, "esp_break"))
                return;
 
@@ -1758,14 +1749,15 @@ static void esp_break(struct tty_struct *tty, int break_state)
        }
 }
 
-static int rs_ioctl(struct tty_struct *tty, struct file * file,
+static int rs_ioctl(struct tty_struct *tty, struct file *file,
                    unsigned int cmd, unsigned long arg)
 {
-       struct esp_struct * info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        struct async_icount cprev, cnow;        /* kernel counter temps */
        struct serial_icounter_struct __user *p_cuser;  /* user space */
        void __user *argp = (void __user *)arg;
        unsigned long flags;
+       int ret;
 
        if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
                return -ENODEV;
@@ -1778,97 +1770,93 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
                if (tty->flags & (1 << TTY_IO_ERROR))
                    return -EIO;
        }
-       
-       switch (cmd) {
-               case TIOCGSERIAL:
-                       return get_serial_info(info, argp);
-               case TIOCSSERIAL:
-                       return set_serial_info(info, argp);
-               case TIOCSERCONFIG:
-                       /* do not reconfigure after initial configuration */
-                       return 0;
-
-               case TIOCSERGWILD:
-                       return put_user(0L, (unsigned long __user *)argp);
 
-               case TIOCSERGETLSR: /* Get line status register */
-                           return get_lsr_info(info, argp);
-
-               case TIOCSERSWILD:
-                       if (!capable(CAP_SYS_ADMIN))
-                               return -EPERM;
-                       return 0;
-
-               /*
-                * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
-                * - mask passed in arg for lines of interest
-                *   (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
-                * Caller should use TIOCGICOUNT to see which one it was
-                */
-                case TIOCMIWAIT:
+       switch (cmd) {
+       case TIOCGSERIAL:
+               return get_serial_info(info, argp);
+       case TIOCSSERIAL:
+               lock_kernel();
+               ret = set_serial_info(info, argp);
+               unlock_kernel();
+               return ret;
+       case TIOCSERGWILD:
+               return put_user(0L, (unsigned long __user *)argp);
+       case TIOCSERGETLSR: /* Get line status register */
+               return get_lsr_info(info, argp);
+       case TIOCSERSWILD:
+               if (!capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+               return 0;
+       /*
+        * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
+        * - mask passed in arg for lines of interest
+        *   (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
+        * Caller should use TIOCGICOUNT to see which one it was
+        */
+       case TIOCMIWAIT:
+               spin_lock_irqsave(&info->lock, flags);
+               cprev = info->icount;   /* note the counters on entry */
+               spin_unlock_irqrestore(&info->lock, flags);
+               while (1) {
+                       /* FIXME: convert to new style wakeup */
+                       interruptible_sleep_on(&info->delta_msr_wait);
+                       /* see if a signal did it */
+                       if (signal_pending(current))
+                               return -ERESTARTSYS;
                        spin_lock_irqsave(&info->lock, flags);
-                       cprev = info->icount;   /* note the counters on entry */
+                       cnow = info->icount;    /* atomic copy */
                        spin_unlock_irqrestore(&info->lock, flags);
-                       while (1) {
-                               /* FIXME: convert to new style wakeup */
-                               interruptible_sleep_on(&info->delta_msr_wait);
-                               /* see if a signal did it */
-                               if (signal_pending(current))
-                                       return -ERESTARTSYS;
-                               spin_lock_irqsave(&info->lock, flags);
-                               cnow = info->icount;    /* atomic copy */
-                               spin_unlock_irqrestore(&info->lock, flags);
-                               if (cnow.rng == cprev.rng &&
-                                   cnow.dsr == cprev.dsr && 
-                                   cnow.dcd == cprev.dcd &&
-                                   cnow.cts == cprev.cts)
-                                       return -EIO; /* no change => error */
-                               if (((arg & TIOCM_RNG) &&
-                                    (cnow.rng != cprev.rng)) ||
-                                    ((arg & TIOCM_DSR) &&
-                                     (cnow.dsr != cprev.dsr)) ||
-                                    ((arg & TIOCM_CD) &&
-                                     (cnow.dcd != cprev.dcd)) ||
-                                    ((arg & TIOCM_CTS) &&
-                                     (cnow.cts != cprev.cts)) ) {
-                                       return 0;
-                               }
-                               cprev = cnow;
+                       if (cnow.rng == cprev.rng &&
+                           cnow.dsr == cprev.dsr &&
+                           cnow.dcd == cprev.dcd &&
+                           cnow.cts == cprev.cts)
+                               return -EIO; /* no change => error */
+                       if (((arg & TIOCM_RNG) &&
+                            (cnow.rng != cprev.rng)) ||
+                            ((arg & TIOCM_DSR) &&
+                             (cnow.dsr != cprev.dsr)) ||
+                            ((arg & TIOCM_CD) &&
+                             (cnow.dcd != cprev.dcd)) ||
+                            ((arg & TIOCM_CTS) &&
+                             (cnow.cts != cprev.cts))) {
+                               return 0;
                        }
-                       /* NOTREACHED */
-
-               /* 
-                * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
-                * Return: write counters to the user passed counter struct
-                * NB: both 1->0 and 0->1 transitions are counted except for
-                *     RI where only 0->1 is counted.
-                */
-               case TIOCGICOUNT:
-                       spin_lock_irqsave(&info->lock, flags);
-                       cnow = info->icount;
-                       spin_unlock_irqrestore(&info->lock, flags);
-                       p_cuser = argp;
-                       if (put_user(cnow.cts, &p_cuser->cts) ||
-                           put_user(cnow.dsr, &p_cuser->dsr) ||
-                           put_user(cnow.rng, &p_cuser->rng) ||
-                           put_user(cnow.dcd, &p_cuser->dcd))
-                               return -EFAULT;
-
+                       cprev = cnow;
+               }
+               /* NOTREACHED */
+       /*
+        * Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
+        * Return: write counters to the user passed counter struct
+        * NB: both 1->0 and 0->1 transitions are counted except for
+        *     RI where only 0->1 is counted.
+        */
+       case TIOCGICOUNT:
+               spin_lock_irqsave(&info->lock, flags);
+               cnow = info->icount;
+               spin_unlock_irqrestore(&info->lock, flags);
+               p_cuser = argp;
+               if (put_user(cnow.cts, &p_cuser->cts) ||
+                   put_user(cnow.dsr, &p_cuser->dsr) ||
+                   put_user(cnow.rng, &p_cuser->rng) ||
+                   put_user(cnow.dcd, &p_cuser->dcd))
+                       return -EFAULT;
                        return 0;
        case TIOCGHAYESESP:
                return get_esp_config(info, argp);
        case TIOCSHAYESESP:
-               return set_esp_config(info, argp);
-
-               default:
-                       return -ENOIOCTLCMD;
-               }
+               lock_kernel();
+               ret = set_esp_config(info, argp);
+               unlock_kernel();
+               return ret;
+       default:
+               return -ENOIOCTLCMD;
+       }
        return 0;
 }
 
 static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
 
        change_speed(info);
@@ -1905,32 +1893,33 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
 /*
  * ------------------------------------------------------------
  * rs_close()
- * 
+ *
  * This routine is called when the serial port gets closed.  First, we
  * wait for the last remaining data to be sent.  Then, we unlink its
  * async structure from the interrupt chain if necessary, and we free
  * that IRQ if nothing is left in the chain.
  * ------------------------------------------------------------
  */
-static void rs_close(struct tty_struct *tty, struct file * filp)
+static void rs_close(struct tty_struct *tty, struct file *filp)
 {
-       struct esp_struct * info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long flags;
 
        if (!info || serial_paranoia_check(info, tty->name, "rs_close"))
                return;
-       
+
        spin_lock_irqsave(&info->lock, flags);
-       
+
        if (tty_hung_up_p(filp)) {
                DBG_CNT("before DEC-hung");
                goto out;
        }
-       
+
 #ifdef SERIAL_DEBUG_OPEN
-       printk("rs_close ttys%d, count = %d\n", info->line, info->count);
+       printk(KERN_DEBUG "rs_close ttys%d, count = %d\n",
+                                               info->line, info->count);
 #endif
-       if ((tty->count == 1) && (info->count != 1)) {
+       if (tty->count == 1 && info->count != 1) {
                /*
                 * Uh, oh.  tty->count is 1, which means that the tty
                 * structure will be freed.  Info->count should always
@@ -1938,12 +1927,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
                 * one, we've got real problems, since it means the
                 * serial port won't be shutdown.
                 */
-               printk("rs_close: bad serial port count; tty->count is 1, "
-                      "info->count is %d\n", info->count);
+               printk(KERN_DEBUG "rs_close: bad serial port count; tty->count is 1, info->count is %d\n", info->count);
                info->count = 1;
        }
        if (--info->count < 0) {
-               printk("rs_close: bad serial port count for ttys%d: %d\n",
+               printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n",
                       info->line, info->count);
                info->count = 0;
        }
@@ -1955,7 +1943,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
 
        spin_unlock_irqrestore(&info->lock, flags);
        /*
-        * Now we wait for the transmit buffer to clear; and we notify 
+        * Now we wait for the transmit buffer to clear; and we notify
         * the line discipline to only process XON/XOFF characters.
         */
        tty->closing = 1;
@@ -1990,16 +1978,14 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
                rs_wait_until_sent(tty, info->timeout);
        }
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       rs_flush_buffer(tty);
        tty_ldisc_flush(tty);
        tty->closing = 0;
        info->tty = NULL;
 
        if (info->blocked_open) {
-               if (info->close_delay) {
+               if (info->close_delay)
                        msleep_interruptible(jiffies_to_msecs(info->close_delay));
-               }
                wake_up_interruptible(&info->open_wait);
        }
        info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
@@ -2012,7 +1998,7 @@ out:
 
 static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
 {
-       struct esp_struct *info = (struct esp_struct *)tty->driver_data;
+       struct esp_struct *info = tty->driver_data;
        unsigned long orig_jiffies, char_time;
        unsigned long flags;
 
@@ -2036,10 +2022,10 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
                msleep_interruptible(jiffies_to_msecs(char_time));
 
                if (signal_pending(current))
-                       break;
+                       return;
 
                if (timeout && time_after(jiffies, orig_jiffies + timeout))
-                       break;
+                       return;
 
                spin_lock_irqsave(&info->lock, flags);
                serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND);
@@ -2054,11 +2040,11 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
  */
 static void esp_hangup(struct tty_struct *tty)
 {
-       struct esp_struct * info = (struct esp_struct *)tty->driver_data;
-       
+       struct esp_struct *info = tty->driver_data;
+
        if (serial_paranoia_check(info, tty->name, "esp_hangup"))
                return;
-       
+
        rs_flush_buffer(tty);
        shutdown(info);
        info->count = 0;
@@ -2072,7 +2058,7 @@ static void esp_hangup(struct tty_struct *tty)
  * esp_open() and friends
  * ------------------------------------------------------------
  */
-static int block_til_ready(struct tty_struct *tty, struct file * filp,
+static int block_til_ready(struct tty_struct *tty, struct file *filp,
                           struct esp_struct *info)
 {
        DECLARE_WAITQUEUE(wait, current);
@@ -2121,11 +2107,11 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
        retval = 0;
        add_wait_queue(&info->open_wait, &wait);
 #ifdef SERIAL_DEBUG_OPEN
-       printk("block_til_ready before block: ttys%d, count = %d\n",
+       printk(KERN_DEBUG "block_til_ready before block: ttys%d, count = %d\n",
               info->line, info->count);
 #endif
        spin_lock_irqsave(&info->lock, flags);
-       if (!tty_hung_up_p(filp)) 
+       if (!tty_hung_up_p(filp))
                info->count--;
        info->blocked_open++;
        while (1) {
@@ -2147,7 +2133,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                        if (info->flags & ASYNC_HUP_NOTIFY)
                                retval = -EAGAIN;
                        else
-                               retval = -ERESTARTSYS;  
+                               retval = -ERESTARTSYS;
 #else
                        retval = -EAGAIN;
 #endif
@@ -2166,7 +2152,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                        break;
                }
 #ifdef SERIAL_DEBUG_OPEN
-               printk("block_til_ready blocking: ttys%d, count = %d\n",
+               printk(KERN_DEBUG "block_til_ready blocking: ttys%d, count = %d\n",
                       info->line, info->count);
 #endif
                spin_unlock_irqrestore(&info->lock, flags);
@@ -2180,14 +2166,14 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
        info->blocked_open--;
        spin_unlock_irqrestore(&info->lock, flags);
 #ifdef SERIAL_DEBUG_OPEN
-       printk("block_til_ready after blocking: ttys%d, count = %d\n",
+       printk(KERN_DEBUG "block_til_ready after blocking: ttys%d, count = %d\n",
               info->line, info->count);
 #endif
        if (retval)
                return retval;
        info->flags |= ASYNC_NORMAL_ACTIVE;
        return 0;
-}      
+}
 
 /*
  * This routine is called whenever a serial port is opened.  It
@@ -2195,7 +2181,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
  * the IRQ chain.   It also performs the serial-specific
  * initialization for the tty structure.
  */
-static int esp_open(struct tty_struct *tty, struct file * filp)
+static int esp_open(struct tty_struct *tty, struct file *filp)
 {
        struct esp_struct       *info;
        int                     retval, line;
@@ -2218,7 +2204,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
        }
 
 #ifdef SERIAL_DEBUG_OPEN
-       printk("esp_open %s, count = %d\n", tty->name, info->count);
+       printk(KERN_DEBUG "esp_open %s, count = %d\n", tty->name, info->count);
 #endif
        spin_lock_irqsave(&info->lock, flags);
        info->count++;
@@ -2226,7 +2212,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
        info->tty = tty;
 
        spin_unlock_irqrestore(&info->lock, flags);
-       
+
        /*
         * Start up serial port
         */
@@ -2237,14 +2223,13 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
        retval = block_til_ready(tty, filp, info);
        if (retval) {
 #ifdef SERIAL_DEBUG_OPEN
-               printk("esp_open returning after block_til_ready with %d\n",
+               printk(KERN_DEBUG "esp_open returning after block_til_ready with %d\n",
                       retval);
 #endif
                return retval;
        }
-
 #ifdef SERIAL_DEBUG_OPEN
-       printk("esp_open %s successful...", tty->name);
+       printk(KERN_DEBUG "esp_open %s successful...", tty->name);
 #endif
        return 0;
 }
@@ -2262,10 +2247,10 @@ static int esp_open(struct tty_struct *tty, struct file * filp)
  * number, and identifies which options were configured into this
  * driver.
  */
-static inline void show_serial_version(void)
+
+static void show_serial_version(void)
 {
-       printk(KERN_INFO "%s version %s (DMA %u)\n",
+       printk(KERN_INFO "%s version %s (DMA %u)\n",
                serial_name, serial_version, dma);
 }
 
@@ -2273,7 +2258,7 @@ static inline void show_serial_version(void)
  * This routine is called by espserial_init() to initialize a specific serial
  * port.
  */
-static inline int autoconfig(struct esp_struct * info)
+static int autoconfig(struct esp_struct *info)
 {
        int port_detected = 0;
        unsigned long flags;
@@ -2349,14 +2334,14 @@ static const struct tty_operations esp_ops = {
 static int __init espserial_init(void)
 {
        int i, offset;
-       struct esp_struct * info;
+       struct esp_struct *info;
        struct esp_struct *last_primary = NULL;
-       int esp[] = {0x100,0x140,0x180,0x200,0x240,0x280,0x300,0x380};
+       int esp[] = { 0x100, 0x140, 0x180, 0x200, 0x240, 0x280, 0x300, 0x380 };
 
        esp_driver = alloc_tty_driver(NR_PORTS);
        if (!esp_driver)
                return -ENOMEM;
-       
+
        for (i = 0; i < NR_PRIMARY; i++) {
                if (irq[i] != 0) {
                        if ((irq[i] < 2) || (irq[i] > 15) || (irq[i] == 6) ||
@@ -2378,20 +2363,20 @@ static int __init espserial_init(void)
 
        if ((flow_off < 1) || (flow_off > 1023))
                flow_off = 1016;
-       
+
        if ((flow_on < 1) || (flow_on > 1023))
                flow_on = 944;
 
        if ((rx_timeout < 0) || (rx_timeout > 255))
                rx_timeout = 128;
-       
+
        if (flow_on >= flow_off)
                flow_on = flow_off - 1;
 
        show_serial_version();
 
        /* Initialize the tty_driver structure */
-       
+
        esp_driver->owner = THIS_MODULE;
        esp_driver->name = "ttyP";
        esp_driver->major = ESP_IN_MAJOR;
@@ -2401,10 +2386,11 @@ static int __init espserial_init(void)
        esp_driver->init_termios = tty_std_termios;
        esp_driver->init_termios.c_cflag =
                B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+       esp_driver->init_termios.c_ispeed = 9600;
+       esp_driver->init_termios.c_ospeed = 9600;
        esp_driver->flags = TTY_DRIVER_REAL_RAW;
        tty_set_operations(esp_driver, &esp_ops);
-       if (tty_register_driver(esp_driver))
-       {
+       if (tty_register_driver(esp_driver)) {
                printk(KERN_ERR "Couldn't register esp serial driver");
                put_tty_driver(esp_driver);
                return 1;
@@ -2412,8 +2398,7 @@ static int __init espserial_init(void)
 
        info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
 
-       if (!info)
-       {
+       if (!info) {
                printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
                tty_unregister_driver(esp_driver);
                put_tty_driver(esp_driver);
@@ -2476,10 +2461,8 @@ static int __init espserial_init(void)
                        info->stat_flags |= ESP_STAT_NEVER_DMA;
 
                info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL);
-               if (!info)
-               {
-                       printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); 
-
+               if (!info) {
+                       printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n");
                        /* allow use of the already detected ports */
                        return 0;
                }
@@ -2503,22 +2486,20 @@ static int __init espserial_init(void)
        return 0;
 }
 
-static void __exit espserial_exit(void) 
+static void __exit espserial_exit(void)
 {
        int e1;
        struct esp_struct *temp_async;
        struct esp_pio_buffer *pio_buf;
 
-       /* printk("Unloading %s: version %s\n", serial_name, serial_version); */
-       if ((e1 = tty_unregister_driver(esp_driver)))
-               printk("SERIAL: failed to unregister serial driver (%d)\n",
-                      e1);
+       e1 = tty_unregister_driver(esp_driver);
+       if (e1)
+               printk(KERN_ERR "esp: failed to unregister driver (%d)\n", e1);
        put_tty_driver(esp_driver);
 
        while (ports) {
-               if (ports->port) {
+               if (ports->port)
                        release_region(ports->port, REGION_SIZE);
-               }
                temp_async = ports->next_port;
                kfree(ports);
                ports = temp_async;
index 7ed7da1d99cf9120f238601e794df7845992ff2a..252f73e4859664b79bcb32657445265d943c10bb 100644 (file)
@@ -40,27 +40,27 @@ static int gs_debug;
 #define gs_dprintk(f, str...) /* nothing */
 #endif
 
-#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __FUNCTION__)
-#define func_exit()  gs_dprintk (GS_DEBUG_FLOW, "gs: exit  %s\n", __FUNCTION__)
+#define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __func__)
+#define func_exit()  gs_dprintk (GS_DEBUG_FLOW, "gs: exit  %s\n", __func__)
 
 #define RS_EVENT_WRITE_WAKEUP  1
 
 module_param(gs_debug, int, 0644);
 
 
-void gs_put_char(struct tty_struct * tty, unsigned char ch)
+int gs_put_char(struct tty_struct * tty, unsigned char ch)
 {
        struct gs_port *port;
 
        func_enter (); 
 
-       if (!tty) return;
+       if (!tty) return 0;
 
        port = tty->driver_data;
 
-       if (!port) return;
+       if (!port) return 0;
 
-       if (! (port->flags & ASYNC_INITIALIZED)) return;
+       if (! (port->flags & ASYNC_INITIALIZED)) return 0;
 
        /* Take a lock on the serial tranmit buffer! */
        mutex_lock(& port->port_write_mutex);
@@ -68,7 +68,7 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch)
        if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
                /* Sorry, buffer is full, drop character. Update statistics???? -- REW */
                mutex_unlock(&port->port_write_mutex);
-               return;
+               return 0;
        }
 
        port->xmit_buf[port->xmit_head++] = ch;
@@ -77,6 +77,7 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch)
 
        mutex_unlock(&port->port_write_mutex);
        func_exit ();
+       return 1;
 }
 
 
@@ -586,8 +587,7 @@ void gs_close(struct tty_struct * tty, struct file * filp)
 
        port->flags &= ~GS_ACTIVE;
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       gs_flush_buffer(tty);
 
        tty_ldisc_flush(tty);
        tty->closing = 0;
index 1399971be6890c2167aea675a51b77bfce3f41d5..e7fb0bca3667670f27ae781893bab44b6f51d006 100644 (file)
@@ -308,7 +308,7 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
        if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
                                        PAGE_SIZE, vma->vm_page_prot)) {
                printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
-                       __FUNCTION__);
+                       __func__);
                return -EAGAIN;
        }
 
@@ -748,7 +748,7 @@ int hpet_alloc(struct hpet_data *hdp)
         */
        if (hpet_is_known(hdp)) {
                printk(KERN_DEBUG "%s: duplicate HPET ignored\n",
-                       __FUNCTION__);
+                       __func__);
                return 0;
        }
 
@@ -869,7 +869,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
 
                if (hpet_is_known(hdp)) {
                        printk(KERN_DEBUG "%s: 0x%lx is busy\n",
-                               __FUNCTION__, hdp->hd_phys_address);
+                               __func__, hdp->hd_phys_address);
                        iounmap(hdp->hd_address);
                        return AE_ALREADY_EXISTS;
                }
@@ -886,7 +886,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data)
 
                if (hpet_is_known(hdp)) {
                        printk(KERN_DEBUG "%s: 0x%lx is busy\n",
-                               __FUNCTION__, hdp->hd_phys_address);
+                               __func__, hdp->hd_phys_address);
                        iounmap(hdp->hd_address);
                        return AE_ALREADY_EXISTS;
                }
@@ -925,7 +925,7 @@ static int hpet_acpi_add(struct acpi_device *device)
                return -ENODEV;
 
        if (!data.hd_address || !data.hd_nirqs) {
-               printk("%s: no address or irqs in _CRS\n", __FUNCTION__);
+               printk("%s: no address or irqs in _CRS\n", __func__);
                return -ENODEV;
        }
 
index d5a752da322f855261a06c9aacb93636fb57d1f9..59c6f9ab94e4b279700ff1f50f1f39ae02b54fa2 100644 (file)
@@ -246,7 +246,7 @@ static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to)
 {
        int remaining = (int)(hp->inbuf_end - read_to);
 
-       pr_debug("%s: %i chars remain\n", __FUNCTION__, remaining);
+       pr_debug("%s: %i chars remain\n", __func__, remaining);
 
        if (read_to != hp->inbuf)
                memmove(hp->inbuf, read_to, remaining);
@@ -365,7 +365,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
        packet.u.version = HVSI_VERSION;
        packet.query_seqno = query_seqno+1;
 
-       pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
+       pr_debug("%s: sending %i bytes\n", __func__, packet.len);
        dbg_dump_hex((uint8_t*)&packet, packet.len);
 
        wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -437,7 +437,7 @@ static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp,
                return NULL;
 
        if (overflow > 0) {
-               pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __FUNCTION__);
+               pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__);
                datalen = TTY_THRESHOLD_THROTTLE;
        }
 
@@ -448,7 +448,7 @@ static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp,
                 * we still have more data to deliver, so we need to save off the
                 * overflow and send it later
                 */
-               pr_debug("%s: deferring overflow\n", __FUNCTION__);
+               pr_debug("%s: deferring overflow\n", __func__);
                memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow);
                hp->n_throttle = overflow;
        }
@@ -474,11 +474,11 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
 
        chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ);
        if (chunklen == 0) {
-               pr_debug("%s: 0-length read\n", __FUNCTION__);
+               pr_debug("%s: 0-length read\n", __func__);
                return 0;
        }
 
-       pr_debug("%s: got %i bytes\n", __FUNCTION__, chunklen);
+       pr_debug("%s: got %i bytes\n", __func__, chunklen);
        dbg_dump_hex(hp->inbuf_end, chunklen);
 
        hp->inbuf_end += chunklen;
@@ -495,7 +495,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
                        continue;
                }
 
-               pr_debug("%s: handling %i-byte packet\n", __FUNCTION__,
+               pr_debug("%s: handling %i-byte packet\n", __func__,
                                len_packet(packet));
                dbg_dump_packet(packet);
 
@@ -526,7 +526,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
                packet += len_packet(packet);
 
                if (*hangup || *handshake) {
-                       pr_debug("%s: hangup or handshake\n", __FUNCTION__);
+                       pr_debug("%s: hangup or handshake\n", __func__);
                        /*
                         * we need to send the hangup now before receiving any more data.
                         * If we get "data, hangup, data", we can't deliver the second
@@ -543,7 +543,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip,
 
 static void hvsi_send_overflow(struct hvsi_struct *hp)
 {
-       pr_debug("%s: delivering %i bytes overflow\n", __FUNCTION__,
+       pr_debug("%s: delivering %i bytes overflow\n", __func__,
                        hp->n_throttle);
 
        hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle);
@@ -563,7 +563,7 @@ static irqreturn_t hvsi_interrupt(int irq, void *arg)
        unsigned long flags;
        int again = 1;
 
-       pr_debug("%s\n", __FUNCTION__);
+       pr_debug("%s\n", __func__);
 
        while (again) {
                spin_lock_irqsave(&hp->lock, flags);
@@ -647,7 +647,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
        packet.seqno = atomic_inc_return(&hp->seqno);
        packet.verb = verb;
 
-       pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
+       pr_debug("%s: sending %i bytes\n", __func__, packet.len);
        dbg_dump_hex((uint8_t*)&packet, packet.len);
 
        wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -674,7 +674,7 @@ static int hvsi_get_mctrl(struct hvsi_struct *hp)
                return ret;
        }
 
-       pr_debug("%s: mctrl 0x%x\n", __FUNCTION__, hp->mctrl);
+       pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl);
 
        return 0;
 }
@@ -694,7 +694,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
        if (mctrl & TIOCM_DTR)
                packet.word = HVSI_TSDTR;
 
-       pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
+       pr_debug("%s: sending %i bytes\n", __func__, packet.len);
        dbg_dump_hex((uint8_t*)&packet, packet.len);
 
        wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -790,7 +790,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
        packet.len = 6;
        packet.verb = VSV_CLOSE_PROTOCOL;
 
-       pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len);
+       pr_debug("%s: sending %i bytes\n", __func__, packet.len);
        dbg_dump_hex((uint8_t*)&packet, packet.len);
 
        hvc_put_chars(hp->vtermno, (char *)&packet, packet.len);
@@ -803,7 +803,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp)
        int line = tty->index;
        int ret;
 
-       pr_debug("%s\n", __FUNCTION__);
+       pr_debug("%s\n", __func__);
 
        if (line < 0 || line >= hvsi_count)
                return -ENODEV;
@@ -868,7 +868,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp)
        struct hvsi_struct *hp = tty->driver_data;
        unsigned long flags;
 
-       pr_debug("%s\n", __FUNCTION__);
+       pr_debug("%s\n", __func__);
 
        if (tty_hung_up_p(filp))
                return;
@@ -920,7 +920,7 @@ static void hvsi_hangup(struct tty_struct *tty)
        struct hvsi_struct *hp = tty->driver_data;
        unsigned long flags;
 
-       pr_debug("%s\n", __FUNCTION__);
+       pr_debug("%s\n", __func__);
 
        spin_lock_irqsave(&hp->lock, flags);
 
@@ -942,7 +942,7 @@ static void hvsi_push(struct hvsi_struct *hp)
        n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf);
        if (n > 0) {
                /* success */
-               pr_debug("%s: wrote %i chars\n", __FUNCTION__, n);
+               pr_debug("%s: wrote %i chars\n", __func__, n);
                hp->n_outbuf = 0;
        } else if (n == -EIO) {
                __set_state(hp, HVSI_FSP_DIED);
@@ -965,7 +965,7 @@ static void hvsi_write_worker(struct work_struct *work)
 
        spin_lock_irqsave(&hp->lock, flags);
 
-       pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf);
+       pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
 
        if (!is_open(hp)) {
                /*
@@ -983,7 +983,7 @@ static void hvsi_write_worker(struct work_struct *work)
                schedule_delayed_work(&hp->writer, 10);
        else {
 #ifdef DEBUG
-               pr_debug("%s: outbuf emptied after %li jiffies\n", __FUNCTION__,
+               pr_debug("%s: outbuf emptied after %li jiffies\n", __func__,
                                jiffies - start_j);
                start_j = 0;
 #endif /* DEBUG */
@@ -1020,11 +1020,11 @@ static int hvsi_write(struct tty_struct *tty,
 
        spin_lock_irqsave(&hp->lock, flags);
 
-       pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf);
+       pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf);
 
        if (!is_open(hp)) {
                /* we're either closing or not yet open; don't accept data */
-               pr_debug("%s: not open\n", __FUNCTION__);
+               pr_debug("%s: not open\n", __func__);
                goto out;
        }
 
@@ -1058,7 +1058,7 @@ out:
        spin_unlock_irqrestore(&hp->lock, flags);
 
        if (total != origcount)
-               pr_debug("%s: wanted %i, only wrote %i\n", __FUNCTION__, origcount,
+               pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount,
                        total);
 
        return total;
@@ -1072,7 +1072,7 @@ static void hvsi_throttle(struct tty_struct *tty)
 {
        struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data;
 
-       pr_debug("%s\n", __FUNCTION__);
+       pr_debug("%s\n", __func__);
 
        h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE);
 }
@@ -1083,7 +1083,7 @@ static void hvsi_unthrottle(struct tty_struct *tty)
        unsigned long flags;
        int shouldflip = 0;
 
-       pr_debug("%s\n", __FUNCTION__);
+       pr_debug("%s\n", __func__);
 
        spin_lock_irqsave(&hp->lock, flags);
        if (hp->n_throttle) {
@@ -1302,7 +1302,7 @@ static int __init hvsi_console_init(void)
                hp->virq = irq_create_mapping(NULL, irq[0]);
                if (hp->virq == NO_IRQ) {
                        printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n",
-                               __FUNCTION__, irq[0]);
+                               __func__, irq[0]);
                        continue;
                }
 
index 8609b8236c67d42562ada21812b3c757534166b6..b60d425ce8d1502fcadcfba5350770525d9be2dd 100644 (file)
@@ -77,11 +77,16 @@ static int power_status;
 module_param(power_status, bool, 0600);
 MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
 
+static int fan_mult = I8K_FAN_MULT;
+module_param(fan_mult, int, 0);
+MODULE_PARM_DESC(fan_mult, "Factor to multiply fan speed with");
+
 static int i8k_open_fs(struct inode *inode, struct file *file);
 static int i8k_ioctl(struct inode *, struct file *, unsigned int,
                     unsigned long);
 
 static const struct file_operations i8k_fops = {
+       .owner          = THIS_MODULE,
        .open           = i8k_open_fs,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -238,7 +243,7 @@ static int i8k_get_fan_speed(int fan)
        struct smm_regs regs = { .eax = I8K_SMM_GET_SPEED, };
 
        regs.ebx = fan & 0xff;
-       return i8k_smm(&regs) ? : (regs.eax & 0xffff) * I8K_FAN_MULT;
+       return i8k_smm(&regs) ? : (regs.eax & 0xffff) * fan_mult;
 }
 
 /*
@@ -554,13 +559,10 @@ static int __init i8k_init(void)
                return -ENODEV;
 
        /* Register the proc entry */
-       proc_i8k = create_proc_entry("i8k", 0, NULL);
+       proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops);
        if (!proc_i8k)
                return -ENOENT;
 
-       proc_i8k->proc_fops = &i8k_fops;
-       proc_i8k->owner = THIS_MODULE;
-
        printk(KERN_INFO
               "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n",
               I8K_VERSION);
index 61ef013b84457887fdf803c0e70fe75b713ff6fb..3601017f58cfdd936b490fc69936ab2973a864f5 100644 (file)
@@ -53,7 +53,7 @@ static int ii2Safe;         // Safe I/O address for delay routine
 
 static int iiDelayed;  // Set when the iiResetDelay function is
                                                        // called. Cleared when ANY board is reset.
-static rwlock_t Dl_spinlock;
+static DEFINE_RWLOCK(Dl_spinlock);
 
 //********
 //* Code *
@@ -82,7 +82,6 @@ static rwlock_t Dl_spinlock;
 static void
 iiEllisInit(void)
 {
-       LOCK_INIT(&Dl_spinlock);
 }
 
 //******************************************************************************
@@ -132,7 +131,7 @@ iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay )
                || (address & 0x7)
                )
        {
-               COMPLETE(pB,I2EE_BADADDR);
+               I2_COMPLETE(pB, I2EE_BADADDR);
        }
 
        // Initialize accelerators
@@ -152,7 +151,7 @@ iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay )
        pB->i2eValid = I2E_MAGIC;
        pB->i2eState = II_STATE_COLD;
 
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -177,12 +176,12 @@ iiReset(i2eBordStrPtr pB)
        // Magic number should be set, else even the address is suspect
        if (pB->i2eValid != I2E_MAGIC)
        {
-               COMPLETE(pB, I2EE_BADMAGIC);
+               I2_COMPLETE(pB, I2EE_BADMAGIC);
        }
 
-       OUTB(pB->i2eBase + FIFO_RESET, 0);  // Any data will do
+       outb(0, pB->i2eBase + FIFO_RESET);  /* Any data will do */
        iiDelay(pB, 50);                    // Pause between resets
-       OUTB(pB->i2eBase + FIFO_RESET, 0);  // Second reset
+       outb(0, pB->i2eBase + FIFO_RESET);  /* Second reset */
 
        // We must wait before even attempting to read anything from the FIFO: the
        // board's P.O.S.T may actually attempt to read and write its end of the
@@ -203,7 +202,7 @@ iiReset(i2eBordStrPtr pB)
        // Ensure anything which would have been of use to standard loadware is
        // blanked out, since board has now forgotten everything!.
 
-       pB->i2eUsingIrq = IRQ_UNDEFINED; // Not set up to use an interrupt yet
+       pB->i2eUsingIrq = I2_IRQ_UNDEFINED; /* to not use an interrupt so far */
        pB->i2eWaitingForEmptyFifo = 0;
        pB->i2eOutMailWaiting = 0;
        pB->i2eChannelPtr = NULL;
@@ -215,7 +214,7 @@ iiReset(i2eBordStrPtr pB)
        pB->i2eFatalTrap = NULL;
        pB->i2eFatal = 0;
 
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -235,14 +234,14 @@ static int
 iiResetDelay(i2eBordStrPtr pB)
 {
        if (pB->i2eValid != I2E_MAGIC) {
-               COMPLETE(pB, I2EE_BADMAGIC);
+               I2_COMPLETE(pB, I2EE_BADMAGIC);
        }
        if (pB->i2eState != II_STATE_RESET) {
-               COMPLETE(pB, I2EE_BADSTATE);
+               I2_COMPLETE(pB, I2EE_BADSTATE);
        }
        iiDelay(pB,2000);       /* Now we wait for two seconds. */
        iiDelayed = 1;          /* Delay has been called: ok to initialize */
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -273,12 +272,12 @@ iiInitialize(i2eBordStrPtr pB)
 
        if (pB->i2eValid != I2E_MAGIC)
        {
-               COMPLETE(pB, I2EE_BADMAGIC);
+               I2_COMPLETE(pB, I2EE_BADMAGIC);
        }
 
        if (pB->i2eState != II_STATE_RESET || !iiDelayed)
        {
-               COMPLETE(pB, I2EE_BADSTATE);
+               I2_COMPLETE(pB, I2EE_BADSTATE);
        }
 
        // In case there is a failure short of our completely reading the power-up
@@ -291,13 +290,12 @@ iiInitialize(i2eBordStrPtr pB)
        for (itemp = 0; itemp < sizeof(porStr); itemp++)
        {
                // We expect the entire message is ready.
-               if (HAS_NO_INPUT(pB))
-               {
+               if (!I2_HAS_INPUT(pB)) {
                        pB->i2ePomSize = itemp;
-                       COMPLETE(pB, I2EE_PORM_SHORT);
+                       I2_COMPLETE(pB, I2EE_PORM_SHORT);
                }
 
-               pB->i2ePom.c[itemp] = c = BYTE_FROM(pB);
+               pB->i2ePom.c[itemp] = c = inb(pB->i2eData);
 
                // We check the magic numbers as soon as they are supposed to be read
                // (rather than after) to minimize effect of reading something we
@@ -306,22 +304,22 @@ iiInitialize(i2eBordStrPtr pB)
                                (itemp == POR_2_INDEX && c != POR_MAGIC_2))
                {
                        pB->i2ePomSize = itemp+1;
-                       COMPLETE(pB, I2EE_BADMAGIC);
+                       I2_COMPLETE(pB, I2EE_BADMAGIC);
                }
        }
 
        pB->i2ePomSize = itemp;
 
        // Ensure that this was all the data...
-       if (HAS_INPUT(pB))
-               COMPLETE(pB, I2EE_PORM_LONG);
+       if (I2_HAS_INPUT(pB))
+               I2_COMPLETE(pB, I2EE_PORM_LONG);
 
        // For now, we'll fail to initialize if P.O.S.T reports bad chip mapper:
        // Implying we will not be able to download any code either:  That's ok: the
        // condition is pretty explicit.
        if (pB->i2ePom.e.porDiag1 & POR_BAD_MAPPER)
        {
-               COMPLETE(pB, I2EE_POSTERR);
+               I2_COMPLETE(pB, I2EE_POSTERR);
        }
 
        // Determine anything which must be done differently depending on the family
@@ -332,7 +330,7 @@ iiInitialize(i2eBordStrPtr pB)
 
                pB->i2eFifoStyle   = FIFO_II;
                pB->i2eFifoSize    = 512;     // 512 bytes, always
-               pB->i2eDataWidth16 = NO;
+               pB->i2eDataWidth16 = false;
 
                pB->i2eMaxIrq = 15;     // Because board cannot tell us it is in an 8-bit
                                                        // slot, we do allow it to be done (documentation!)
@@ -354,7 +352,7 @@ iiInitialize(i2eBordStrPtr pB)
                        // should always be consistent for IntelliPort-II.  Ditto below...
                        if (pB->i2ePom.e.porPorts1 != 4)
                        {
-                               COMPLETE(pB, I2EE_INCONSIST);
+                               I2_COMPLETE(pB, I2EE_INCONSIST);
                        }
                        break;
 
@@ -364,7 +362,7 @@ iiInitialize(i2eBordStrPtr pB)
                        pB->i2eChannelMap[0] = 0xff;  // Eight port
                        if (pB->i2ePom.e.porPorts1 != 8)
                        {
-                               COMPLETE(pB, I2EE_INCONSIST);
+                               I2_COMPLETE(pB, I2EE_INCONSIST);
                        }
                        break;
 
@@ -373,7 +371,7 @@ iiInitialize(i2eBordStrPtr pB)
                        pB->i2eChannelMap[0] = 0x3f;  // Six Port
                        if (pB->i2ePom.e.porPorts1 != 6)
                        {
-                               COMPLETE(pB, I2EE_INCONSIST);
+                               I2_COMPLETE(pB, I2EE_INCONSIST);
                        }
                        break;
                }
@@ -402,7 +400,7 @@ iiInitialize(i2eBordStrPtr pB)
 
                if (itemp < 8 || itemp > 15)
                {
-                       COMPLETE(pB, I2EE_INCONSIST);
+                       I2_COMPLETE(pB, I2EE_INCONSIST);
                }
                pB->i2eFifoSize = (1 << itemp);
 
@@ -450,26 +448,26 @@ iiInitialize(i2eBordStrPtr pB)
                switch (pB->i2ePom.e.porBus & (POR_BUS_SLOT16 | POR_BUS_DIP16) )
                {
                case POR_BUS_SLOT16 | POR_BUS_DIP16:
-                       pB->i2eDataWidth16 = YES;
+                       pB->i2eDataWidth16 = true;
                        pB->i2eMaxIrq = 15;
                        break;
 
                case POR_BUS_SLOT16:
-                       pB->i2eDataWidth16 = NO;
+                       pB->i2eDataWidth16 = false;
                        pB->i2eMaxIrq = 15;
                        break;
 
                case 0:
                case POR_BUS_DIP16:     // In an 8-bit slot, DIP switch don't care.
                default:
-                       pB->i2eDataWidth16 = NO;
+                       pB->i2eDataWidth16 = false;
                        pB->i2eMaxIrq = 7;
                        break;
                }
                break;   // POR_ID_FIIEX case
 
        default:    // Unknown type of board
-               COMPLETE(pB, I2EE_BAD_FAMILY);
+               I2_COMPLETE(pB, I2EE_BAD_FAMILY);
                break;
        }  // End the switch based on family
 
@@ -483,17 +481,14 @@ iiInitialize(i2eBordStrPtr pB)
        {
        case POR_BUS_T_ISA:
        case POR_BUS_T_UNK:  // If the type of bus is undeclared, assume ok.
-               pB->i2eChangeIrq = YES;
-               break;
        case POR_BUS_T_MCA:
        case POR_BUS_T_EISA:
-               pB->i2eChangeIrq = NO;
                break;
        default:
-               COMPLETE(pB, I2EE_BADBUS);
+               I2_COMPLETE(pB, I2EE_BADBUS);
        }
 
-       if (pB->i2eDataWidth16 == YES)
+       if (pB->i2eDataWidth16)
        {
                pB->i2eWriteBuf  = iiWriteBuf16;
                pB->i2eReadBuf   = iiReadBuf16;
@@ -529,7 +524,7 @@ iiInitialize(i2eBordStrPtr pB)
                break;
 
        default:
-               COMPLETE(pB, I2EE_INCONSIST);
+               I2_COMPLETE(pB, I2EE_INCONSIST);
        }
 
        // Initialize state information.
@@ -549,7 +544,7 @@ iiInitialize(i2eBordStrPtr pB)
        // Everything is ok now, return with good status/
 
        pB->i2eValid = I2E_MAGIC;
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -658,7 +653,7 @@ ii2DelayIO(unsigned int mseconds)
        while(mseconds--) {
                int i = ii2DelValue;
                while ( i-- ) {
-                       INB ( ii2Safe );
+                       inb(ii2Safe);
                }
        }
 }
@@ -709,11 +704,11 @@ iiWriteBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
 {
        // Rudimentary sanity checking here.
        if (pB->i2eValid != I2E_MAGIC)
-               COMPLETE(pB, I2EE_INVALID);
+               I2_COMPLETE(pB, I2EE_INVALID);
 
-       OUTSW ( pB->i2eData, address, count);
+       I2_OUTSW(pB->i2eData, address, count);
 
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -738,11 +733,11 @@ iiWriteBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
 {
        /* Rudimentary sanity checking here */
        if (pB->i2eValid != I2E_MAGIC)
-               COMPLETE(pB, I2EE_INVALID);
+               I2_COMPLETE(pB, I2EE_INVALID);
 
-       OUTSB ( pB->i2eData, address, count );
+       I2_OUTSB(pB->i2eData, address, count);
 
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -767,11 +762,11 @@ iiReadBuf16(i2eBordStrPtr pB, unsigned char *address, int count)
 {
        // Rudimentary sanity checking here.
        if (pB->i2eValid != I2E_MAGIC)
-               COMPLETE(pB, I2EE_INVALID);
+               I2_COMPLETE(pB, I2EE_INVALID);
 
-       INSW ( pB->i2eData, address, count);
+       I2_INSW(pB->i2eData, address, count);
 
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -796,11 +791,11 @@ iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
 {
        // Rudimentary sanity checking here.
        if (pB->i2eValid != I2E_MAGIC)
-               COMPLETE(pB, I2EE_INVALID);
+               I2_COMPLETE(pB, I2EE_INVALID);
 
-       INSB ( pB->i2eData, address, count);
+       I2_INSB(pB->i2eData, address, count);
 
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 }
 
 //******************************************************************************
@@ -820,7 +815,7 @@ iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count)
 static unsigned short
 iiReadWord16(i2eBordStrPtr pB)
 {
-       return (unsigned short)( INW(pB->i2eData) );
+       return inw(pB->i2eData);
 }
 
 //******************************************************************************
@@ -842,9 +837,9 @@ iiReadWord8(i2eBordStrPtr pB)
 {
        unsigned short urs;
 
-       urs = INB ( pB->i2eData );
+       urs = inb(pB->i2eData);
 
-       return ( ( INB ( pB->i2eData ) << 8 ) | urs );
+       return (inb(pB->i2eData) << 8) | urs;
 }
 
 //******************************************************************************
@@ -865,7 +860,7 @@ iiReadWord8(i2eBordStrPtr pB)
 static void
 iiWriteWord16(i2eBordStrPtr pB, unsigned short value)
 {
-       WORD_TO(pB, (int)value);
+       outw((int)value, pB->i2eData);
 }
 
 //******************************************************************************
@@ -886,8 +881,8 @@ iiWriteWord16(i2eBordStrPtr pB, unsigned short value)
 static void
 iiWriteWord8(i2eBordStrPtr pB, unsigned short value)
 {
-       BYTE_TO(pB, (char)value);
-       BYTE_TO(pB, (char)(value >> 8) );
+       outb((char)value, pB->i2eData);
+       outb((char)(value >> 8), pB->i2eData);
 }
 
 //******************************************************************************
@@ -939,30 +934,30 @@ iiWaitForTxEmptyII(i2eBordStrPtr pB, int mSdelay)
                // interrupts of any kind.
 
 
-               WRITE_LOCK_IRQSAVE(&Dl_spinlock,flags)
-               OUTB(pB->i2ePointer, SEL_COMMAND);
-               OUTB(pB->i2ePointer, SEL_CMD_SH);
+               write_lock_irqsave(&Dl_spinlock, flags);
+               outb(SEL_COMMAND, pB->i2ePointer);
+               outb(SEL_CMD_SH, pB->i2ePointer);
 
-               itemp = INB(pB->i2eStatus);
+               itemp = inb(pB->i2eStatus);
 
-               OUTB(pB->i2ePointer, SEL_COMMAND);
-               OUTB(pB->i2ePointer, SEL_CMD_UNSH);
+               outb(SEL_COMMAND, pB->i2ePointer);
+               outb(SEL_CMD_UNSH, pB->i2ePointer);
 
                if (itemp & ST_IN_EMPTY)
                {
-                       UPDATE_FIFO_ROOM(pB);
-                       WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags)
-                       COMPLETE(pB, I2EE_GOOD);
+                       I2_UPDATE_FIFO_ROOM(pB);
+                       write_unlock_irqrestore(&Dl_spinlock, flags);
+                       I2_COMPLETE(pB, I2EE_GOOD);
                }
 
-               WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags)
+               write_unlock_irqrestore(&Dl_spinlock, flags);
 
                if (mSdelay-- == 0)
                        break;
 
                iiDelay(pB, 1);      /* 1 mS granularity on checking condition */
        }
-       COMPLETE(pB, I2EE_TXE_TIME);
+       I2_COMPLETE(pB, I2EE_TXE_TIME);
 }
 
 //******************************************************************************
@@ -1002,21 +997,21 @@ iiWaitForTxEmptyIIEX(i2eBordStrPtr pB, int mSdelay)
                // you will generally not want to service interrupts or in any way
                // disrupt the assumptions implicit in the larger context.
 
-               WRITE_LOCK_IRQSAVE(&Dl_spinlock,flags)
+               write_lock_irqsave(&Dl_spinlock, flags);
 
-               if (INB(pB->i2eStatus) & STE_OUT_MT) {
-                       UPDATE_FIFO_ROOM(pB);
-                       WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags)
-                       COMPLETE(pB, I2EE_GOOD);
+               if (inb(pB->i2eStatus) & STE_OUT_MT) {
+                       I2_UPDATE_FIFO_ROOM(pB);
+                       write_unlock_irqrestore(&Dl_spinlock, flags);
+                       I2_COMPLETE(pB, I2EE_GOOD);
                }
-               WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags)
+               write_unlock_irqrestore(&Dl_spinlock, flags);
 
                if (mSdelay-- == 0)
                        break;
 
                iiDelay(pB, 1);      // 1 mS granularity on checking condition
        }
-       COMPLETE(pB, I2EE_TXE_TIME);
+       I2_COMPLETE(pB, I2EE_TXE_TIME);
 }
 
 //******************************************************************************
@@ -1038,8 +1033,8 @@ static int
 iiTxMailEmptyII(i2eBordStrPtr pB)
 {
        int port = pB->i2ePointer;
-       OUTB ( port, SEL_OUTMAIL );
-       return ( INB(port) == 0 );
+       outb(SEL_OUTMAIL, port);
+       return inb(port) == 0;
 }
 
 //******************************************************************************
@@ -1060,7 +1055,7 @@ iiTxMailEmptyII(i2eBordStrPtr pB)
 static int
 iiTxMailEmptyIIEX(i2eBordStrPtr pB)
 {
-       return !(INB(pB->i2eStatus) & STE_OUT_MAIL);
+       return !(inb(pB->i2eStatus) & STE_OUT_MAIL);
 }
 
 //******************************************************************************
@@ -1084,10 +1079,10 @@ iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail)
 {
        int port = pB->i2ePointer;
 
-       OUTB(port, SEL_OUTMAIL);
-       if (INB(port) == 0) {
-               OUTB(port, SEL_OUTMAIL);
-               OUTB(port, mail);
+       outb(SEL_OUTMAIL, port);
+       if (inb(port) == 0) {
+               outb(SEL_OUTMAIL, port);
+               outb(mail, port);
                return 1;
        }
        return 0;
@@ -1112,10 +1107,9 @@ iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail)
 static int
 iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail)
 {
-       if(INB(pB->i2eStatus) & STE_OUT_MAIL) {
+       if (inb(pB->i2eStatus) & STE_OUT_MAIL)
                return 0;
-       }
-       OUTB(pB->i2eXMail, mail);
+       outb(mail, pB->i2eXMail);
        return 1;
 }
 
@@ -1136,9 +1130,9 @@ iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail)
 static unsigned short
 iiGetMailII(i2eBordStrPtr pB)
 {
-       if (HAS_MAIL(pB)) {
-               OUTB(pB->i2ePointer, SEL_INMAIL);
-               return INB(pB->i2ePointer);
+       if (I2_HAS_MAIL(pB)) {
+               outb(SEL_INMAIL, pB->i2ePointer);
+               return inb(pB->i2ePointer);
        } else {
                return NO_MAIL_HERE;
        }
@@ -1161,11 +1155,10 @@ iiGetMailII(i2eBordStrPtr pB)
 static unsigned short
 iiGetMailIIEX(i2eBordStrPtr pB)
 {
-       if (HAS_MAIL(pB)) {
-               return INB(pB->i2eXMail);
-       } else {
+       if (I2_HAS_MAIL(pB))
+               return inb(pB->i2eXMail);
+       else
                return NO_MAIL_HERE;
-       }
 }
 
 //******************************************************************************
@@ -1184,8 +1177,8 @@ iiGetMailIIEX(i2eBordStrPtr pB)
 static void
 iiEnableMailIrqII(i2eBordStrPtr pB)
 {
-       OUTB(pB->i2ePointer, SEL_MASK);
-       OUTB(pB->i2ePointer, ST_IN_MAIL);
+       outb(SEL_MASK, pB->i2ePointer);
+       outb(ST_IN_MAIL, pB->i2ePointer);
 }
 
 //******************************************************************************
@@ -1204,7 +1197,7 @@ iiEnableMailIrqII(i2eBordStrPtr pB)
 static void
 iiEnableMailIrqIIEX(i2eBordStrPtr pB)
 {
-       OUTB(pB->i2eXMask, MX_IN_MAIL);
+       outb(MX_IN_MAIL, pB->i2eXMask);
 }
 
 //******************************************************************************
@@ -1223,8 +1216,8 @@ iiEnableMailIrqIIEX(i2eBordStrPtr pB)
 static void
 iiWriteMaskII(i2eBordStrPtr pB, unsigned char value)
 {
-       OUTB(pB->i2ePointer, SEL_MASK);
-       OUTB(pB->i2ePointer, value);
+       outb(SEL_MASK, pB->i2ePointer);
+       outb(value, pB->i2ePointer);
 }
 
 //******************************************************************************
@@ -1243,7 +1236,7 @@ iiWriteMaskII(i2eBordStrPtr pB, unsigned char value)
 static void
 iiWriteMaskIIEX(i2eBordStrPtr pB, unsigned char value)
 {
-       OUTB(pB->i2eXMask, value);
+       outb(value, pB->i2eXMask);
 }
 
 //******************************************************************************
@@ -1354,9 +1347,8 @@ iiDownloadBlock ( i2eBordStrPtr pB, loadHdrStrPtr pSource, int isStandard)
        // immediately and be harmless, though not strictly necessary.
        itemp = MAX_DLOAD_ACK_TIME/10;
        while (--itemp) {
-               if (HAS_INPUT(pB)) {
-                       switch(BYTE_FROM(pB))
-                       {
+               if (I2_HAS_INPUT(pB)) {
+                       switch (inb(pB->i2eData)) {
                        case LOADWARE_OK:
                                pB->i2eState =
                                        isStandard ? II_STATE_STDLOADED :II_STATE_LOADED;
index 433305062fb8aa4f7c070d4c8ae7e968d766be1b..c88a64e527aa510012e249f1037ae853186b0dbc 100644 (file)
@@ -185,10 +185,6 @@ typedef struct _i2eBordStr
                                                // The highest allowable IRQ, based on the
                                                // slot size.
 
-       unsigned char  i2eChangeIrq;
-                                               // Whether tis valid to change IRQ's
-                                               // ISA = ok, EISA, MicroChannel, no
-
        // Accelerators for various addresses on the board
        int            i2eBase;        // I/O Address of the Board
        int            i2eData;        // From here data transfers happen
@@ -431,12 +427,6 @@ typedef struct _i2eBordStr
 // Manifests for i2eBordStr:
 //-------------------------------------------
 
-#define YES 1
-#define NO  0
-
-#define NULLFUNC (void (*)(void))0
-#define NULLPTR (void *)0
-
 typedef void (*delayFunc_t)(unsigned int);
 
 // i2eValid
@@ -494,8 +484,8 @@ typedef void (*delayFunc_t)(unsigned int);
 
 // i2eUsingIrq
 //
-#define IRQ_UNDEFINED   0x1352  // No valid irq (or polling = 0) can ever
-                                                               // promote to this!
+#define I2_IRQ_UNDEFINED       0x1352  /* No valid irq (or polling = 0) can
+                                        * ever promote to this! */
 //------------------------------------------
 // Handy Macros for i2ellis.c and others
 // Note these are common to -II and -IIEX
@@ -504,41 +494,14 @@ typedef void (*delayFunc_t)(unsigned int);
 // Given a pointer to the board structure, does the input FIFO have any data or
 // not?
 //
-#define HAS_INPUT(pB)      !(INB(pB->i2eStatus) & ST_IN_EMPTY)
-#define HAS_NO_INPUT(pB)   (INB(pB->i2eStatus) & ST_IN_EMPTY)
-
-// Given a pointer to board structure, read a byte or word from the fifo
-//
-#define BYTE_FROM(pB)      (unsigned char)INB(pB->i2eData)
-#define WORD_FROM(pB)      (unsigned short)INW(pB->i2eData)
-
-// Given a pointer to board structure, is there room for any data to be written
-// to the data fifo?
-//
-#define HAS_OUTROOM(pB)    !(INB(pB->i2eStatus) & ST_OUT_FULL)
-#define HAS_NO_OUTROOM(pB) (INB(pB->i2eStatus) & ST_OUT_FULL)
-
-// Given a pointer to board structure, write a single byte to the fifo
-// structure. Note that for 16-bit interfaces, the high order byte is undefined
-// and unknown.
-//
-#define BYTE_TO(pB, c)     OUTB(pB->i2eData,(c))
-
-// Write a word to the fifo structure. For 8-bit interfaces, this may have
-// unknown results.
-//
-#define WORD_TO(pB, c)     OUTW(pB->i2eData,(c))
+#define I2_HAS_INPUT(pB)       !(inb(pB->i2eStatus) & ST_IN_EMPTY)
 
 // Given a pointer to the board structure, is there anything in the incoming
 // mailbox?
 //
-#define HAS_MAIL(pB)       (INB(pB->i2eStatus) & ST_IN_MAIL)
+#define I2_HAS_MAIL(pB)                (inb(pB->i2eStatus) & ST_IN_MAIL)
 
-#define UPDATE_FIFO_ROOM(pB)  (pB)->i2eFifoRemains=(pB)->i2eFifoSize
-
-// Handy macro to round up a number (like the buffer write and read routines do)
-// 
-#define ROUNDUP(number)    (((number)+1) & (~1))
+#define I2_UPDATE_FIFO_ROOM(pB)        ((pB)->i2eFifoRemains = (pB)->i2eFifoSize)
 
 //------------------------------------------
 // Function Declarations for i2ellis.c
@@ -593,20 +556,11 @@ static int iiDownloadBlock(i2eBordStrPtr, loadHdrStrPtr, int);
 //
 static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int);
 
-// Called indirectly always.  Needed externally so the routine might be
-// SPECIFIED as an argument to iiReset()
-//
-//static void ii2DelayIO(unsigned int);                // N-millisecond delay using
-                                                                                       //hardware spin
-//static void ii2DelayTimer(unsigned int);     // N-millisecond delay using Linux
-                                                                                       //timer
-
 // Many functions defined here return True if good, False otherwise, with an
 // error code in i2eError field. Here is a handy macro for setting the error
 // code and returning.
 //
-#define COMPLETE(pB,code) \
-       do { \
+#define I2_COMPLETE(pB,code) do { \
                 pB->i2eError = code; \
                 return (code == I2EE_GOOD);\
        } while (0)
index 15fe04e748f4bf2d1954a04a900abc596837f288..8aa6e7ab8d5b503b1d4a5f36290b622bf79e39cc 100644 (file)
@@ -129,7 +129,6 @@ registers, use byte operations only.
 //------------------------------------------------
 //
 #include "ip2types.h"
-#include "i2os.h"    /* For any o.s., compiler, or host-related issues */
 
 //-------------------------------------------------------------------------
 // Manifests for the I/O map:
@@ -644,5 +643,10 @@ typedef union _loadHdrStr
 #define ABS_BIGGEST_BOX 16    // Absolute the most ports per box
 #define ABS_MOST_PORTS  (ABS_MAX_BOXES * ABS_BIGGEST_BOX)
 
+#define I2_OUTSW(port, addr, count)    outsw((port), (addr), (((count)+1)/2))
+#define I2_OUTSB(port, addr, count)    outsb((port), (addr), (((count)+1))&-2)
+#define I2_INSW(port, addr, count)     insw((port), (addr), (((count)+1)/2))
+#define I2_INSB(port, addr, count)     insb((port), (addr), (((count)+1))&-2)
+
 #endif   // I2HW_H
 
index 9c25320121efe88f911901ec38a95e76b66c2a9e..938879cc7bccc66e394a32c034a45fcb0c79be27 100644 (file)
@@ -227,17 +227,17 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
        i2ChanStrPtr *ppCh;
        
        if (pB->i2eValid != I2E_MAGIC) {
-               COMPLETE(pB, I2EE_BADMAGIC);
+               I2_COMPLETE(pB, I2EE_BADMAGIC);
        }
        if (pB->i2eState != II_STATE_STDLOADED) {
-               COMPLETE(pB, I2EE_BADSTATE);
+               I2_COMPLETE(pB, I2EE_BADSTATE);
        }
 
-       LOCK_INIT(&pB->read_fifo_spinlock);
-       LOCK_INIT(&pB->write_fifo_spinlock);
-       LOCK_INIT(&pB->Dbuf_spinlock);
-       LOCK_INIT(&pB->Bbuf_spinlock);
-       LOCK_INIT(&pB->Fbuf_spinlock);
+       rwlock_init(&pB->read_fifo_spinlock);
+       rwlock_init(&pB->write_fifo_spinlock);
+       rwlock_init(&pB->Dbuf_spinlock);
+       rwlock_init(&pB->Bbuf_spinlock);
+       rwlock_init(&pB->Fbuf_spinlock);
        
        // NO LOCK needed yet - this is init
 
@@ -259,10 +259,10 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
                if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) {
                        continue;
                }
-               LOCK_INIT(&pCh->Ibuf_spinlock);
-               LOCK_INIT(&pCh->Obuf_spinlock);
-               LOCK_INIT(&pCh->Cbuf_spinlock);
-               LOCK_INIT(&pCh->Pbuf_spinlock);
+               rwlock_init(&pCh->Ibuf_spinlock);
+               rwlock_init(&pCh->Obuf_spinlock);
+               rwlock_init(&pCh->Cbuf_spinlock);
+               rwlock_init(&pCh->Pbuf_spinlock);
                // NO LOCK needed yet - this is init
                // Set up validity flag according to support level
                if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) {
@@ -347,7 +347,7 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh)
        }
        // No need to check for wrap here; this is initialization.
        pB->i2Fbuf_stuff = stuffIndex;
-       COMPLETE(pB, I2EE_GOOD);
+       I2_COMPLETE(pB, I2EE_GOOD);
 
 }
 
@@ -374,7 +374,7 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
 
        case  NEED_INLINE:
 
-               WRITE_LOCK_IRQSAVE(&pB->Dbuf_spinlock,flags);
+               write_lock_irqsave(&pB->Dbuf_spinlock, flags);
                if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip)
                {
                        queueIndex = pB->i2Dbuf_strip;
@@ -386,12 +386,12 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
                        pB->i2Dbuf_strip = queueIndex;
                        pCh->channelNeeds &= ~NEED_INLINE;
                }
-               WRITE_UNLOCK_IRQRESTORE(&pB->Dbuf_spinlock,flags); 
+               write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
                break;
 
        case NEED_BYPASS:
 
-               WRITE_LOCK_IRQSAVE(&pB->Bbuf_spinlock,flags);
+               write_lock_irqsave(&pB->Bbuf_spinlock, flags);
                if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip)
                {
                        queueIndex = pB->i2Bbuf_strip;
@@ -403,12 +403,12 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
                        pB->i2Bbuf_strip = queueIndex;
                        pCh->channelNeeds &= ~NEED_BYPASS;
                }
-               WRITE_UNLOCK_IRQRESTORE(&pB->Bbuf_spinlock,flags); 
+               write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
                break;
        
        case NEED_FLOW:
 
-               WRITE_LOCK_IRQSAVE(&pB->Fbuf_spinlock,flags);
+               write_lock_irqsave(&pB->Fbuf_spinlock, flags);
                if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip)
                {
                        queueIndex = pB->i2Fbuf_strip;
@@ -420,7 +420,7 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type)
                        pB->i2Fbuf_strip = queueIndex;
                        pCh->channelNeeds &= ~NEED_FLOW;
                }
-               WRITE_UNLOCK_IRQRESTORE(&pB->Fbuf_spinlock,flags); 
+               write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
                break;
        default:
                printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type);
@@ -453,7 +453,7 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
 
        case NEED_INLINE:
 
-               WRITE_LOCK_IRQSAVE(&pB->Dbuf_spinlock,flags);
+               write_lock_irqsave(&pB->Dbuf_spinlock, flags);
                if ( !(pCh->channelNeeds & NEED_INLINE) )
                {
                        pCh->channelNeeds |= NEED_INLINE;
@@ -463,12 +463,12 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
                                queueIndex = 0;
                        pB->i2Dbuf_stuff = queueIndex;
                }
-               WRITE_UNLOCK_IRQRESTORE(&pB->Dbuf_spinlock,flags); 
+               write_unlock_irqrestore(&pB->Dbuf_spinlock, flags);
                break;
 
        case NEED_BYPASS:
 
-               WRITE_LOCK_IRQSAVE(&pB->Bbuf_spinlock,flags);
+               write_lock_irqsave(&pB->Bbuf_spinlock, flags);
                if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS))
                {
                        pCh->channelNeeds |= NEED_BYPASS;
@@ -478,12 +478,12 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
                                queueIndex = 0;
                        pB->i2Bbuf_stuff = queueIndex;
                } 
-               WRITE_UNLOCK_IRQRESTORE(&pB->Bbuf_spinlock,flags); 
+               write_unlock_irqrestore(&pB->Bbuf_spinlock, flags);
                break;
 
        case NEED_FLOW:
 
-               WRITE_LOCK_IRQSAVE(&pB->Fbuf_spinlock,flags);
+               write_lock_irqsave(&pB->Fbuf_spinlock, flags);
                if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW))
                {
                        pCh->channelNeeds |= NEED_FLOW;
@@ -493,7 +493,7 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type)
                                queueIndex = 0;
                        pB->i2Fbuf_stuff = queueIndex;
                }
-               WRITE_UNLOCK_IRQRESTORE(&pB->Fbuf_spinlock,flags); 
+               write_unlock_irqrestore(&pB->Fbuf_spinlock, flags);
                break;
 
        case NEED_CREDIT:
@@ -562,9 +562,8 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
        pB = pCh->pMyBord;
 
        // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT
-       if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == IRQ_UNDEFINED) {
+       if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == I2_IRQ_UNDEFINED)
                return -2;
-       }
        // If the board has gone fatal, return bad, and also hit the trap routine if
        // it exists.
        if (pB->i2eFatal) {
@@ -620,13 +619,13 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
                        switch(type) {
                        case PTYPE_INLINE:
                                lock_var_p = &pCh->Obuf_spinlock;
-                               WRITE_LOCK_IRQSAVE(lock_var_p,flags);
+                               write_lock_irqsave(lock_var_p, flags);
                                stuffIndex = pCh->Obuf_stuff;
                                bufroom = pCh->Obuf_strip - stuffIndex;
                                break;
                        case PTYPE_BYPASS:
                                lock_var_p = &pCh->Cbuf_spinlock;
-                               WRITE_LOCK_IRQSAVE(lock_var_p,flags);
+                               write_lock_irqsave(lock_var_p, flags);
                                stuffIndex = pCh->Cbuf_stuff;
                                bufroom = pCh->Cbuf_strip - stuffIndex;
                                break;
@@ -645,7 +644,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
                                break; /* from for()- Enough room: goto proceed */
                        }
                        ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
-                       WRITE_UNLOCK_IRQRESTORE(lock_var_p, flags);
+                       write_unlock_irqrestore(lock_var_p, flags);
                } else
                        ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize);
 
@@ -747,7 +746,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
        {
        case PTYPE_INLINE:
                pCh->Obuf_stuff = stuffIndex;  // Store buffer pointer
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); 
+               write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
 
                pB->debugInlineQueued++;
                // Add the channel pointer to list of channels needing service (first
@@ -757,7 +756,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands,
 
        case PTYPE_BYPASS:
                pCh->Cbuf_stuff = stuffIndex;  // Store buffer pointer
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Cbuf_spinlock,flags); 
+               write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
 
                pB->debugBypassQueued++;
                // Add the channel pointer to list of channels needing service (first
@@ -840,7 +839,7 @@ i2Input(i2ChanStrPtr pCh)
                count = -1;
                goto i2Input_exit;
        }
-       WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags);
+       write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
 
        // initialize some accelerators and private copies
        stripIndex = pCh->Ibuf_strip;
@@ -850,7 +849,7 @@ i2Input(i2ChanStrPtr pCh)
        // If buffer is empty or requested data count was 0, (trivial case) return
        // without any further thought.
        if ( count == 0 ) {
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
                goto i2Input_exit;
        }
        // Adjust for buffer wrap
@@ -891,10 +890,10 @@ i2Input(i2ChanStrPtr pCh)
 
        if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) {
                pCh->sinceLastFlow -= pCh->whenSendFlow;
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
                i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
        } else {
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
        }
 
 i2Input_exit:
@@ -926,7 +925,7 @@ i2InputFlush(i2ChanStrPtr pCh)
 
        ip2trace (CHANN, ITRC_INPUT, 10, 0);
 
-       WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags);
+       write_lock_irqsave(&pCh->Ibuf_spinlock, flags);
        count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
 
        // Adjust for buffer wrap
@@ -947,10 +946,10 @@ i2InputFlush(i2ChanStrPtr pCh)
        if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow )
        {
                pCh->sinceLastFlow -= pCh->whenSendFlow;
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
                i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW);
        } else {
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
        }
 
        ip2trace (CHANN, ITRC_INPUT, 19, 1, count);
@@ -979,9 +978,9 @@ i2InputAvailable(i2ChanStrPtr pCh)
 
 
        // initialize some accelerators and private copies
-       READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags);
+       read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
        count = pCh->Ibuf_stuff - pCh->Ibuf_strip;
-       READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags);
+       read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
 
        // Adjust for buffer wrap
        if (count < 0)
@@ -1045,9 +1044,9 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
        while ( count > 0 ) {
 
                // How much room in output buffer is there?
-               READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags);
+               read_lock_irqsave(&pCh->Obuf_spinlock, flags);
                amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
-               READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags);
+               read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
                if (amountToMove < 0) {
                        amountToMove += OBUF_SIZE;
                }
@@ -1075,7 +1074,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
                if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) ) 
                                && amountToMove > 0 )
                {
-                       WRITE_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags);
+                       write_lock_irqsave(&pCh->Obuf_spinlock, flags);
                        stuffIndex = pCh->Obuf_stuff;
       
                        // Had room to move some data: don't know whether the block size,
@@ -1102,7 +1101,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count)
                        }
                        pCh->Obuf_stuff = stuffIndex;
 
-                       WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags);
+                       write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
 
                        ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex );
 
@@ -1352,9 +1351,9 @@ i2OutputFree(i2ChanStrPtr pCh)
        if ( !i2Validate ( pCh ) ) {
                return -1;
        }
-       READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags);
+       read_lock_irqsave(&pCh->Obuf_spinlock, flags);
        amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1;
-       READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags);
+       read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
 
        if (amountToMove < 0) {
                amountToMove += OBUF_SIZE;
@@ -1464,11 +1463,11 @@ i2StripFifo(i2eBordStrPtr pB)
 
 //     ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 );
 
-       while (HAS_INPUT(pB)) {
+       while (I2_HAS_INPUT(pB)) {
 //             ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 );
 
                // Process packet from fifo a one atomic unit
-               WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock,bflags);
+               write_lock_irqsave(&pB->read_fifo_spinlock, bflags);
    
                // The first word (or two bytes) will have channel number and type of
                // packet, possibly other information
@@ -1490,7 +1489,8 @@ i2StripFifo(i2eBordStrPtr pB)
 // sick!
                        if ( ((unsigned int)count) > IBUF_SIZE ) {
                                pB->i2eFatal = 2;
-                               WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags);
+                               write_unlock_irqrestore(&pB->read_fifo_spinlock,
+                                               bflags);
                                return;     /* Bail out ASAP */
                        }
                        // Channel is illegally big ?
@@ -1498,7 +1498,8 @@ i2StripFifo(i2eBordStrPtr pB)
                                (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])))
                        {
                                iiReadBuf(pB, junkBuffer, count);
-                               WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags);
+                               write_unlock_irqrestore(&pB->read_fifo_spinlock,
+                                               bflags);
                                break;         /* From switch: ready for next packet */
                        }
 
@@ -1512,14 +1513,15 @@ i2StripFifo(i2eBordStrPtr pB)
                        if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY)
                        {
                                pCh->hotKeyIn = iiReadWord(pB) & 0xff;
-                               WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags);
+                               write_unlock_irqrestore(&pB->read_fifo_spinlock,
+                                               bflags);
                                i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK);
                                break;   /* From the switch: ready for next packet */
                        }
 
                        // Normal data! We crudely assume there is room for the data in our
                        // buffer because the board wouldn't have exceeded his credit limit.
-                       WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,cflags);
+                       write_lock_irqsave(&pCh->Ibuf_spinlock, cflags);
                                                                                                        // We have 2 locks now
                        stuffIndex = pCh->Ibuf_stuff;
                        amountToRead = IBUF_SIZE - stuffIndex;
@@ -1562,8 +1564,9 @@ i2StripFifo(i2eBordStrPtr pB)
 
                        // Update stuff index
                        pCh->Ibuf_stuff = stuffIndex;
-                       WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,cflags);
-                       WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags);
+                       write_unlock_irqrestore(&pCh->Ibuf_spinlock, cflags);
+                       write_unlock_irqrestore(&pB->read_fifo_spinlock,
+                                       bflags);
 
 #ifdef USE_IQ
                        schedule_work(&pCh->tqueue_input);
@@ -1585,7 +1588,8 @@ i2StripFifo(i2eBordStrPtr pB)
 
                        iiReadBuf(pB, cmdBuffer, count);
                        // We can release early with buffer grab
-                       WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags);
+                       write_unlock_irqrestore(&pB->read_fifo_spinlock,
+                                       bflags);
 
                        pc = cmdBuffer;
                        pcLimit = &(cmdBuffer[count]);
@@ -1830,12 +1834,12 @@ i2StripFifo(i2eBordStrPtr pB)
                default: // Neither packet? should be impossible
                        ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1,
                                PTYPE_OF(pB->i2eLeadoffWord) );
-                       WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,
+                       write_unlock_irqrestore(&pB->read_fifo_spinlock,
                                        bflags);
 
                        break;
                }  // End of switch on type of packets
-       }       //while(board HAS_INPUT)
+       }       /*while(board I2_HAS_INPUT)*/
 
        ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 );
 
@@ -1858,7 +1862,7 @@ i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve)
 {
        int rc = 0;
        unsigned long flags;
-       WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags);
+       write_lock_irqsave(&pB->write_fifo_spinlock, flags);
        if (!pB->i2eWaitingForEmptyFifo) {
                if (pB->i2eFifoRemains > (count+reserve)) {
                        pB->i2eFifoRemains -= count;
@@ -1867,7 +1871,7 @@ i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve)
                        rc =  count;
                }
        }
-       WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags);
+       write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
        return rc;
 }
 //******************************************************************************
@@ -1898,7 +1902,7 @@ i2StuffFifoBypass(i2eBordStrPtr pB)
        while ( --bailout && notClogged && 
                        (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS))))
        {
-               WRITE_LOCK_IRQSAVE(&pCh->Cbuf_spinlock,flags);
+               write_lock_irqsave(&pCh->Cbuf_spinlock, flags);
                stripIndex = pCh->Cbuf_strip;
 
                // as long as there are packets for this channel...
@@ -1906,7 +1910,7 @@ i2StuffFifoBypass(i2eBordStrPtr pB)
                while (stripIndex != pCh->Cbuf_stuff) {
                        pRemove = &(pCh->Cbuf[stripIndex]);
                        packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader);
-                       paddedSize = ROUNDUP(packetSize);
+                       paddedSize = roundup(packetSize, 2);
 
                        if (paddedSize > 0) {
                                if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) {
@@ -1930,7 +1934,7 @@ WriteDBGBuf("BYPS", pRemove, paddedSize);
                // Done with this channel. Move to next, removing this one from 
                // the queue of channels if we cleaned it out (i.e., didn't get clogged.
                pCh->Cbuf_strip = stripIndex;
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Cbuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags);
        }  // Either clogged or finished all the work
 
 #ifdef IP2DEBUG_TRACE
@@ -1954,7 +1958,7 @@ static inline void
 i2StuffFifoFlow(i2eBordStrPtr pB)
 {
        i2ChanStrPtr pCh;
-       unsigned short paddedSize               = ROUNDUP(sizeof(flowIn));
+       unsigned short paddedSize = roundup(sizeof(flowIn), 2);
 
        ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2,
                pB->i2eFifoRemains, paddedSize );
@@ -2010,7 +2014,7 @@ i2StuffFifoInline(i2eBordStrPtr pB)
        while ( --bailout && notClogged && 
                        (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) )
        {
-               WRITE_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags);
+               write_lock_irqsave(&pCh->Obuf_spinlock, flags);
                stripIndex = pCh->Obuf_strip;
 
                ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff );
@@ -2031,7 +2035,7 @@ i2StuffFifoInline(i2eBordStrPtr pB)
                                packetSize = flowsize + sizeof(i2CmdHeader);
                        }
                        flowsize = CREDIT_USAGE(flowsize);
-                       paddedSize = ROUNDUP(packetSize);
+                       paddedSize = roundup(packetSize, 2);
 
                        ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize );
 
@@ -2086,7 +2090,7 @@ WriteDBGBuf("DATA", pRemove, paddedSize);
                // Done with this channel. Move to next, removing this one from the
                // queue of channels if we cleaned it out (i.e., didn't get clogged.
                pCh->Obuf_strip = stripIndex;
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
                if ( notClogged )
                {
 
@@ -2190,10 +2194,11 @@ i2ServiceBoard ( i2eBordStrPtr pB )
 
                if (inmail & MB_OUT_STRIPPED) {
                        pB->i2eFifoOutInts++;
-                       WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags);
+                       write_lock_irqsave(&pB->write_fifo_spinlock, flags);
                        pB->i2eFifoRemains = pB->i2eFifoSize;
                        pB->i2eWaitingForEmptyFifo = 0;
-                       WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags);
+                       write_unlock_irqrestore(&pB->write_fifo_spinlock,
+                                       flags);
 
                        ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains );
 
diff --git a/drivers/char/ip2/i2os.h b/drivers/char/ip2/i2os.h
deleted file mode 100644 (file)
index eff9b54..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-/*******************************************************************************
-*
-*   (c) 1999 by Computone Corporation
-*
-********************************************************************************
-*
-*
-*   PACKAGE:     Linux tty Device Driver for IntelliPort II family of multiport
-*                serial I/O controllers.
-*
-*   DESCRIPTION: Defines, definitions and includes which are heavily dependent
-*                on O/S, host, compiler, etc. This file is tailored for:
-*                 Linux v2.0.0 and later
-*                 Gnu gcc c2.7.2
-*                 80x86 architecture
-*
-*******************************************************************************/
-
-#ifndef I2OS_H    /* To prevent multiple includes */
-#define I2OS_H 1
-
-//-------------------------------------------------
-// Required Includes
-//-------------------------------------------------
-
-#include "ip2types.h"
-#include <asm/io.h>  /* For inb, etc */
-
-//------------------------------------
-// Defines for I/O instructions:
-//------------------------------------
-
-#define INB(port)                inb(port)
-#define OUTB(port,value)         outb((value),(port))
-#define INW(port)                inw(port)
-#define OUTW(port,value)         outw((value),(port))
-#define OUTSW(port,addr,count)   outsw((port),(addr),(((count)+1)/2))
-#define OUTSB(port,addr,count)   outsb((port),(addr),(((count)+1))&-2)
-#define INSW(port,addr,count)    insw((port),(addr),(((count)+1)/2))
-#define INSB(port,addr,count)    insb((port),(addr),(((count)+1))&-2)
-
-//--------------------------------------------
-// Interrupt control
-//--------------------------------------------
-
-#define LOCK_INIT(a)   rwlock_init(a)
-
-#define SAVE_AND_DISABLE_INTS(a,b) { \
-       /* printk("get_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
-       spin_lock_irqsave(a,b); \
-}
-
-#define RESTORE_INTS(a,b) { \
-       /* printk("rel_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
-       spin_unlock_irqrestore(a,b); \
-}
-
-#define READ_LOCK_IRQSAVE(a,b) { \
-       /* printk("get_read_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
-       read_lock_irqsave(a,b); \
-}
-
-#define READ_UNLOCK_IRQRESTORE(a,b) { \
-       /* printk("rel_read_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
-       read_unlock_irqrestore(a,b); \
-}
-
-#define WRITE_LOCK_IRQSAVE(a,b) { \
-       /* printk("get_write_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
-       write_lock_irqsave(a,b); \
-}
-
-#define WRITE_UNLOCK_IRQRESTORE(a,b) { \
-       /* printk("rel_write_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \
-       write_unlock_irqrestore(a,b); \
-}
-
-
-//------------------------------------------------------------------------------
-// Hardware-delay loop
-//
-// Probably used in only one place (see i2ellis.c) but this helps keep things
-// together. Note we have unwound the IN instructions. On machines with a
-// reasonable cache, the eight instructions (1 byte each) should fit in cache
-// nicely, and on un-cached machines, the code-fetch would tend not to dominate.
-// Note that cx is shifted so that "count" still reflects the total number of
-// iterations assuming no unwinding.
-//------------------------------------------------------------------------------
-
-//#define  DELAY1MS(port,count,label)
-
-//------------------------------------------------------------------------------
-// Macros to switch to a new stack, saving stack pointers, and to restore the
-// old stack (Used, for example, in i2lib.c) "heap" is the address of some
-// buffer which will become the new stack (working down from highest address).
-// The two words at the two lowest addresses in this stack are for storing the
-// SS and SP.
-//------------------------------------------------------------------------------
-
-//#define  TO_NEW_STACK(heap,size)
-//#define  TO_OLD_STACK(heap)
-
-//------------------------------------------------------------------------------
-// Macros to save the original IRQ vectors and masks, and to patch in new ones.
-//------------------------------------------------------------------------------
-
-//#define  SAVE_IRQ_MASKS(dest)
-//#define  WRITE_IRQ_MASKS(src)
-//#define  SAVE_IRQ_VECTOR(value,dest)
-//#define  WRITE_IRQ_VECTOR(value,src)
-
-//------------------------------------------------------------------------------
-// Macro to copy data from one far pointer to another.
-//------------------------------------------------------------------------------
-
-#define  I2_MOVE_DATA(fpSource,fpDest,count) memmove(fpDest,fpSource,count);
-
-//------------------------------------------------------------------------------
-// Macros to issue eoi's to host interrupt control (IBM AT 8259-style).
-//------------------------------------------------------------------------------
-
-//#define MASTER_EOI
-//#define SLAVE_EOI
-
-#endif   /* I2OS_H */
-
-
index b1d6cad84282e3960f974c9d41df0a4ed8c64500..70957acaa960ac3bcdefc2a8faa6e05aa7055502 100644 (file)
  *****************/
 
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 
-static int ip2_read_procmem(char *, char **, off_t, int);
+static const struct file_operations ip2mem_proc_fops;
 static int ip2_read_proc(char *, char **, off_t, int, int *, void * );
 
 /********************/
@@ -168,7 +169,7 @@ static int Fip_firmware_size;
 static int  ip2_open(PTTY, struct file *);
 static void ip2_close(PTTY, struct file *);
 static int  ip2_write(PTTY, const unsigned char *, int);
-static void ip2_putchar(PTTY, unsigned char);
+static int  ip2_putchar(PTTY, unsigned char);
 static void ip2_flush_chars(PTTY);
 static int  ip2_write_room(PTTY);
 static int  ip2_chars_in_buf(PTTY);
@@ -354,14 +355,15 @@ have_requested_irq( char irq )
 /* the driver initialisation function and returns what it returns.            */
 /******************************************************************************/
 #ifdef MODULE
-int
-init_module(void)
+static int __init
+ip2_init_module(void)
 {
 #ifdef IP2DEBUG_INIT
        printk (KERN_DEBUG "Loading module ...\n" );
 #endif
     return 0;
 }
+module_init(ip2_init_module);
 #endif /* MODULE */
 
 /******************************************************************************/
@@ -380,8 +382,8 @@ init_module(void)
 /* driver should be returned since it may be unloaded from memory.            */
 /******************************************************************************/
 #ifdef MODULE
-void
-cleanup_module(void)
+void __exit
+ip2_cleanup_module(void)
 {
        int err;
        int i;
@@ -423,7 +425,7 @@ cleanup_module(void)
        }
        put_tty_driver(ip2_tty_driver);
        unregister_chrdev(IP2_IPL_MAJOR, pcIpl);
-       remove_proc_entry("ip2mem", &proc_root);
+       remove_proc_entry("ip2mem", NULL);
 
        // free memory
        for (i = 0; i < IP2_MAX_BOARDS; i++) {
@@ -451,6 +453,7 @@ cleanup_module(void)
        printk (KERN_DEBUG "IP2 Unloaded\n" );
 #endif
 }
+module_exit(ip2_cleanup_module);
 #endif /* MODULE */
 
 static const struct tty_operations ip2_ops = {
@@ -695,7 +698,7 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize)
                }
        }
        /* Register the read_procmem thing */
-       if (!create_proc_info_entry("ip2mem",0,&proc_root,ip2_read_procmem)) {
+       if (!proc_create("ip2mem",0,NULL,&ip2mem_proc_fops)) {
                printk(KERN_ERR "IP2: failed to register read_procmem\n");
        } else {
 
@@ -1049,9 +1052,9 @@ set_irq( int boardnum, int boardIrq )
         * Write to FIFO; don't bother to adjust fifo capacity for this, since
         * board will respond almost immediately after SendMail hit.
         */
-       WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags);
+       write_lock_irqsave(&pB->write_fifo_spinlock, flags);
        iiWriteBuf(pB, tempCommand, 4);
-       WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags);
+       write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
        pB->i2eUsingIrq = boardIrq;
        pB->i2eOutMailWaiting |= MB_OUT_STUFFED;
 
@@ -1069,9 +1072,9 @@ set_irq( int boardnum, int boardIrq )
        (CMD_OF(tempCommand))[4] = 64;  // chars
 
        (CMD_OF(tempCommand))[5] = 87;  // HW_TEST
-       WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags);
+       write_lock_irqsave(&pB->write_fifo_spinlock, flags);
        iiWriteBuf(pB, tempCommand, 8);
-       WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags);
+       write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
 
        CHANNEL_OF(tempCommand) = 0;
        PTYPE_OF(tempCommand) = PTYPE_BYPASS;
@@ -1086,9 +1089,9 @@ set_irq( int boardnum, int boardIrq )
        CMD_COUNT_OF(tempCommand) = 2;
        (CMD_OF(tempCommand))[0] = 44;  /* get ping */
        (CMD_OF(tempCommand))[1] = 200; /* 200 ms */
-       WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags);
+       write_lock_irqsave(&pB->write_fifo_spinlock, flags);
        iiWriteBuf(pB, tempCommand, 4);
-       WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags);
+       write_unlock_irqrestore(&pB->write_fifo_spinlock, flags);
 #endif
 
        iiEnableMailIrq(pB);
@@ -1267,12 +1270,12 @@ static void do_input(struct work_struct *work)
 
        // Data input
        if ( pCh->pTTY != NULL ) {
-               READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags)
+               read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
                if (!pCh->throttled && (pCh->Ibuf_stuff != pCh->Ibuf_strip)) {
-                       READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags)
+                       read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
                        i2Input( pCh );
                } else
-                       READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags)
+                       read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
        } else {
                ip2trace(CHANN, ITRC_INPUT, 22, 0 );
 
@@ -1613,10 +1616,8 @@ ip2_close( PTTY tty, struct file *pFile )
 
        serviceOutgoingFifo ( pCh->pMyBord );
 
-       if ( tty->driver->flush_buffer ) 
-               tty->driver->flush_buffer(tty);
-       if ( tty->ldisc.flush_buffer )  
-               tty->ldisc.flush_buffer(tty);
+       tty_ldisc_flush(tty);
+       tty_driver_flush_buffer(tty);
        tty->closing = 0;
        
        pCh->pTTY = NULL;
@@ -1716,9 +1717,9 @@ ip2_write( PTTY tty, const unsigned char *pData, int count)
        ip2_flush_chars( tty );
 
        /* This is the actual move bit. Make sure it does what we need!!!!! */
-       WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
+       write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
        bytesSent = i2Output( pCh, pData, count);
-       WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
+       write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
 
        ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent );
 
@@ -1735,7 +1736,7 @@ ip2_write( PTTY tty, const unsigned char *pData, int count)
 /*                                                                            */
 /*                                                                            */
 /******************************************************************************/
-static void
+static int
 ip2_putchar( PTTY tty, unsigned char ch )
 {
        i2ChanStrPtr  pCh = tty->driver_data;
@@ -1743,13 +1744,14 @@ ip2_putchar( PTTY tty, unsigned char ch )
 
 //     ip2trace (CHANN, ITRC_PUTC, ITRC_ENTER, 1, ch );
 
-       WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
+       write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
        pCh->Pbuf[pCh->Pbuf_stuff++] = ch;
        if ( pCh->Pbuf_stuff == sizeof pCh->Pbuf ) {
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
                ip2_flush_chars( tty );
        } else
-               WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
+               write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
+       return 1;
 
 //     ip2trace (CHANN, ITRC_PUTC, ITRC_RETURN, 1, ch );
 }
@@ -1769,7 +1771,7 @@ ip2_flush_chars( PTTY tty )
        i2ChanStrPtr  pCh = tty->driver_data;
        unsigned long flags;
 
-       WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
+       write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
        if ( pCh->Pbuf_stuff ) {
 
 //             ip2trace (CHANN, ITRC_PUTC, 10, 1, strip );
@@ -1783,7 +1785,7 @@ ip2_flush_chars( PTTY tty )
                }
                pCh->Pbuf_stuff -= strip;
        }
-       WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
+       write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
 }
 
 /******************************************************************************/
@@ -1801,9 +1803,9 @@ ip2_write_room ( PTTY tty )
        i2ChanStrPtr  pCh = tty->driver_data;
        unsigned long flags;
 
-       READ_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
+       read_lock_irqsave(&pCh->Pbuf_spinlock, flags);
        bytesFree = i2OutputFree( pCh ) - pCh->Pbuf_stuff;
-       READ_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
+       read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
 
        ip2trace (CHANN, ITRC_WRITE, 11, 1, bytesFree );
 
@@ -1833,12 +1835,12 @@ ip2_chars_in_buf ( PTTY tty )
                                 pCh->Obuf_char_count + pCh->Pbuf_stuff,
                                 pCh->Obuf_char_count, pCh->Pbuf_stuff );
 #endif
-       READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags);
+       read_lock_irqsave(&pCh->Obuf_spinlock, flags);
        rc =  pCh->Obuf_char_count;
-       READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags);
-       READ_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
+       read_unlock_irqrestore(&pCh->Obuf_spinlock, flags);
+       read_lock_irqsave(&pCh->Pbuf_spinlock, flags);
        rc +=  pCh->Pbuf_stuff;
-       READ_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
+       read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
        return rc;
 }
 
@@ -1862,9 +1864,9 @@ ip2_flush_buffer( PTTY tty )
 #ifdef IP2DEBUG_WRITE
        printk (KERN_DEBUG "IP2: flush buffer\n" );
 #endif
-       WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags);
+       write_lock_irqsave(&pCh->Pbuf_spinlock, flags);
        pCh->Pbuf_stuff = 0;
-       WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags);
+       write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags);
        i2FlushOutput( pCh );
        ip2_owake(tty);
 
@@ -1950,15 +1952,15 @@ ip2_unthrottle ( PTTY tty )
        pCh->throttled = 0;
        i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_RESUME);
        serviceOutgoingFifo( pCh->pMyBord );
-       READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags)
+       read_lock_irqsave(&pCh->Ibuf_spinlock, flags);
        if ( pCh->Ibuf_stuff != pCh->Ibuf_strip ) {
-               READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags)
+               read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
 #ifdef IP2DEBUG_READ
                printk (KERN_DEBUG "i2Input called from unthrottle\n" );
 #endif
                i2Input( pCh );
        } else
-               READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags)
+               read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags);
 }
 
 static void
@@ -2201,9 +2203,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
         * for masking). Caller should use TIOCGICOUNT to see which one it was
         */
        case TIOCMIWAIT:
-               WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags);
+               write_lock_irqsave(&pB->read_fifo_spinlock, flags);
                cprev = pCh->icount;     /* note the counters on entry */
-               WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags);
+               write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
                i2QueueCommands(PTYPE_BYPASS, pCh, 100, 4, 
                                                CMD_DCD_REP, CMD_CTS_REP, CMD_DSR_REP, CMD_RI_REP);
                init_waitqueue_entry(&wait, current);
@@ -2223,9 +2225,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
                                rc = -ERESTARTSYS;
                                break;
                        }
-                       WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags);
+                       write_lock_irqsave(&pB->read_fifo_spinlock, flags);
                        cnow = pCh->icount; /* atomic copy */
-                       WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags);
+                       write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
                        if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
                                cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) {
                                rc =  -EIO; /* no change => rc */
@@ -2263,9 +2265,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg )
        case TIOCGICOUNT:
                ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc );
 
-               WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags);
+               write_lock_irqsave(&pB->read_fifo_spinlock, flags);
                cnow = pCh->icount;
-               WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags);
+               write_unlock_irqrestore(&pB->read_fifo_spinlock, flags);
                p_cuser = argp;
                rc = put_user(cnow.cts, &p_cuser->cts);
                rc = put_user(cnow.dsr, &p_cuser->dsr);
@@ -2871,7 +2873,7 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg )
                case 65:        /* Board  - ip2stat */
                        if ( pB ) {
                                rc = copy_to_user(argp, pB, sizeof(i2eBordStr));
-                               rc = put_user(INB(pB->i2eStatus),
+                               rc = put_user(inb(pB->i2eStatus),
                                        (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) );
                        } else {
                                rc = -ENODEV;
@@ -2967,65 +2969,61 @@ ip2_ipl_open( struct inode *pInode, struct file *pFile )
        }
        return 0;
 }
-/******************************************************************************/
-/* Function:   ip2_read_procmem                                               */
-/* Parameters:                                                                */
-/*                                                                            */
-/* Returns: Length of output                                                  */
-/*                                                                            */
-/* Description:                                                               */
-/*   Supplies some driver operating parameters                                */
-/*     Not real useful unless your debugging the fifo                                                    */
-/*                                                                            */
-/******************************************************************************/
-
-#define LIMIT  (PAGE_SIZE - 120)
 
 static int
-ip2_read_procmem(char *buf, char **start, off_t offset, int len)
+proc_ip2mem_show(struct seq_file *m, void *v)
 {
        i2eBordStrPtr  pB;
        i2ChanStrPtr  pCh;
        PTTY tty;
        int i;
 
-       len = 0;
-
 #define FMTLINE        "%3d: 0x%08x 0x%08x 0%011o 0%011o\n"
 #define FMTLIN2        "     0x%04x 0x%04x tx flow 0x%x\n"
 #define FMTLIN3        "     0x%04x 0x%04x rc flow\n"
 
-       len += sprintf(buf+len,"\n");
+       seq_printf(m,"\n");
 
        for( i = 0; i < IP2_MAX_BOARDS; ++i ) {
                pB = i2BoardPtrTable[i];
                if ( pB ) {
-                       len += sprintf(buf+len,"board %d:\n",i);
-                       len += sprintf(buf+len,"\tFifo rem: %d mty: %x outM %x\n",
+                       seq_printf(m,"board %d:\n",i);
+                       seq_printf(m,"\tFifo rem: %d mty: %x outM %x\n",
                                pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting);
                }
        }
 
-       len += sprintf(buf+len,"#: tty flags, port flags,     cflags,     iflags\n");
+       seq_printf(m,"#: tty flags, port flags,     cflags,     iflags\n");
        for (i=0; i < IP2_MAX_PORTS; i++) {
-               if (len > LIMIT)
-                       break;
                pCh = DevTable[i];
                if (pCh) {
                        tty = pCh->pTTY;
                        if (tty && tty->count) {
-                               len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags,
+                               seq_printf(m,FMTLINE,i,(int)tty->flags,pCh->flags,
                                                                        tty->termios->c_cflag,tty->termios->c_iflag);
 
-                               len += sprintf(buf+len,FMTLIN2,
+                               seq_printf(m,FMTLIN2,
                                                pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds);
-                               len += sprintf(buf+len,FMTLIN3,pCh->infl.asof,pCh->infl.room);
+                               seq_printf(m,FMTLIN3,pCh->infl.asof,pCh->infl.room);
                        }
                }
        }
-       return len;
+       return 0;
+}
+
+static int proc_ip2mem_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, proc_ip2mem_show, NULL);
 }
 
+static const struct file_operations ip2mem_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = proc_ip2mem_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 /*
  * This is the handler for /proc/tty/driver/ip2
  *
index 553f0a408edadbc0c0835880410625ec7fae599b..eb8a1a8c188e482571a185488f60884c91c9878e 100644 (file)
@@ -9,7 +9,3 @@ obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
 obj-$(CONFIG_IPMI_SI) += ipmi_si.o
 obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
 obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
-
-ipmi_si.o:     $(ipmi_si-objs)
-       $(LD) -r -o $@ $(ipmi_si-objs)
-
index e736119b64970ab6175c161e91a95d4412197bb8..7b98c067190a01917e31b1189feba430c1f1c5e9 100644 (file)
 #define BT_DEBUG_ENABLE        1       /* Generic messages */
 #define BT_DEBUG_MSG   2       /* Prints all request/response buffers */
 #define BT_DEBUG_STATES        4       /* Verbose look at state changes */
-/* BT_DEBUG_OFF must be zero to correspond to the default uninitialized
-   value */
+/*
+ * BT_DEBUG_OFF must be zero to correspond to the default uninitialized
+ * value
+ */
 
 static int bt_debug; /* 0 == BT_DEBUG_OFF */
 
 module_param(bt_debug, int, 0644);
 MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
 
-/* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
-   and 64 byte buffers.  However, one HP implementation wants 255 bytes of
-   buffer (with a documented message of 160 bytes) so go for the max.
-   Since the Open IPMI architecture is single-message oriented at this
-   stage, the queue depth of BT is of no concern. */
+/*
+ * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds,
+ * and 64 byte buffers.  However, one HP implementation wants 255 bytes of
+ * buffer (with a documented message of 160 bytes) so go for the max.
+ * Since the Open IPMI architecture is single-message oriented at this
+ * stage, the queue depth of BT is of no concern.
+ */
 
 #define BT_NORMAL_TIMEOUT      5       /* seconds */
 #define BT_NORMAL_RETRY_LIMIT  2
 #define BT_RESET_DELAY         6       /* seconds after warm reset */
 
-/* States are written in chronological order and usually cover
-   multiple rows of the state table discussion in the IPMI spec. */
+/*
+ * States are written in chronological order and usually cover
+ * multiple rows of the state table discussion in the IPMI spec.
+ */
 
 enum bt_states {
        BT_STATE_IDLE = 0,      /* Order is critical in this list */
@@ -76,10 +82,12 @@ enum bt_states {
        BT_STATE_LONG_BUSY      /* BT doesn't get hosed :-) */
 };
 
-/* Macros seen at the end of state "case" blocks.  They help with legibility
-   and debugging. */
+/*
+ * Macros seen at the end of state "case" blocks.  They help with legibility
+ * and debugging.
+ */
 
-#define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; }
+#define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; }
 
 #define BT_SI_SM_RETURN(Y)   { last_printed = BT_STATE_PRINTME; return Y; }
 
@@ -110,11 +118,13 @@ struct si_sm_data {
 #define BT_H_BUSY      0x40
 #define BT_B_BUSY      0x80
 
-/* Some bits are toggled on each write: write once to set it, once
-   more to clear it; writing a zero does nothing.  To absolutely
-   clear it, check its state and write if set.  This avoids the "get
-   current then use as mask" scheme to modify one bit.  Note that the
-   variable "bt" is hardcoded into these macros. */
+/*
+ * Some bits are toggled on each write: write once to set it, once
+ * more to clear it; writing a zero does nothing.  To absolutely
+ * clear it, check its state and write if set.  This avoids the "get
+ * current then use as mask" scheme to modify one bit.  Note that the
+ * variable "bt" is hardcoded into these macros.
+ */
 
 #define BT_STATUS      bt->io->inputb(bt->io, 0)
 #define BT_CONTROL(x)  bt->io->outputb(bt->io, 0, x)
@@ -125,8 +135,10 @@ struct si_sm_data {
 #define BT_INTMASK_R   bt->io->inputb(bt->io, 2)
 #define BT_INTMASK_W(x)        bt->io->outputb(bt->io, 2, x)
 
-/* Convenience routines for debugging.  These are not multi-open safe!
-   Note the macros have hardcoded variables in them. */
+/*
+ * Convenience routines for debugging.  These are not multi-open safe!
+ * Note the macros have hardcoded variables in them.
+ */
 
 static char *state2txt(unsigned char state)
 {
@@ -182,7 +194,8 @@ static char *status2txt(unsigned char status)
 static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io)
 {
        memset(bt, 0, sizeof(struct si_sm_data));
-       if (bt->io != io) {             /* external: one-time only things */
+       if (bt->io != io) {
+               /* external: one-time only things */
                bt->io = io;
                bt->seq = 0;
        }
@@ -229,7 +242,7 @@ static int bt_start_transaction(struct si_sm_data *bt,
                printk(KERN_WARNING "BT: +++++++++++++++++ New command\n");
                printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2);
                for (i = 0; i < size; i ++)
-                       printk (" %02x", data[i]);
+                       printk(" %02x", data[i]);
                printk("\n");
        }
        bt->write_data[0] = size + 1;   /* all data plus seq byte */
@@ -246,8 +259,10 @@ static int bt_start_transaction(struct si_sm_data *bt,
        return 0;
 }
 
-/* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
-   it calls this.  Strip out the length and seq bytes. */
+/*
+ * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE
+ * it calls this.  Strip out the length and seq bytes.
+ */
 
 static int bt_get_result(struct si_sm_data *bt,
                         unsigned char *data,
@@ -269,10 +284,10 @@ static int bt_get_result(struct si_sm_data *bt,
                memcpy(data + 2, bt->read_data + 4, msg_len - 2);
 
        if (bt_debug & BT_DEBUG_MSG) {
-               printk (KERN_WARNING "BT: result %d bytes:", msg_len);
+               printk(KERN_WARNING "BT: result %d bytes:", msg_len);
                for (i = 0; i < msg_len; i++)
                        printk(" %02x", data[i]);
-               printk ("\n");
+               printk("\n");
        }
        return msg_len;
 }
@@ -292,8 +307,10 @@ static void reset_flags(struct si_sm_data *bt)
        BT_INTMASK_W(BT_BMC_HWRST);
 }
 
-/* Get rid of an unwanted/stale response.  This should only be needed for
-   BMCs that support multiple outstanding requests. */
+/*
+ * Get rid of an unwanted/stale response.  This should only be needed for
+ * BMCs that support multiple outstanding requests.
+ */
 
 static void drain_BMC2HOST(struct si_sm_data *bt)
 {
@@ -326,8 +343,8 @@ static inline void write_all_bytes(struct si_sm_data *bt)
                printk(KERN_WARNING "BT: write %d bytes seq=0x%02X",
                        bt->write_count, bt->seq);
                for (i = 0; i < bt->write_count; i++)
-                       printk (" %02x", bt->write_data[i]);
-               printk ("\n");
+                       printk(" %02x", bt->write_data[i]);
+               printk("\n");
        }
        for (i = 0; i < bt->write_count; i++)
                HOST2BMC(bt->write_data[i]);
@@ -337,8 +354,10 @@ static inline int read_all_bytes(struct si_sm_data *bt)
 {
        unsigned char i;
 
-       /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
-          Keep layout of first four bytes aligned with write_data[] */
+       /*
+        * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
+        * Keep layout of first four bytes aligned with write_data[]
+        */
 
        bt->read_data[0] = BMC2HOST;
        bt->read_count = bt->read_data[0];
@@ -362,8 +381,8 @@ static inline int read_all_bytes(struct si_sm_data *bt)
                if (max > 16)
                        max = 16;
                for (i = 0; i < max; i++)
-                       printk (" %02x", bt->read_data[i]);
-               printk ("%s\n", bt->read_count == max ? "" : " ...");
+                       printk(KERN_CONT " %02x", bt->read_data[i]);
+               printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ...");
        }
 
        /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */
@@ -402,8 +421,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
        printk(KERN_WARNING "IPMI BT: %s in %s %s ",    /* open-ended line */
                reason, STATE2TXT, STATUS2TXT);
 
-       /* Per the IPMI spec, retries are based on the sequence number
-          known only to this module, so manage a restart here. */
+       /*
+        * Per the IPMI spec, retries are based on the sequence number
+        * known only to this module, so manage a restart here.
+        */
        (bt->error_retries)++;
        if (bt->error_retries < bt->BT_CAP_retries) {
                printk("%d retries left\n",
@@ -412,8 +433,8 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
                return SI_SM_CALL_WITHOUT_DELAY;
        }
 
-       printk("failed %d retries, sending error response\n",
-               bt->BT_CAP_retries);
+       printk(KERN_WARNING "failed %d retries, sending error response\n",
+              bt->BT_CAP_retries);
        if (!bt->nonzero_status)
                printk(KERN_ERR "IPMI BT: stuck, try power cycle\n");
 
@@ -424,8 +445,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt,
                return SI_SM_CALL_WITHOUT_DELAY;
        }
 
-       /* Concoct a useful error message, set up the next state, and
-          be done with this sequence. */
+       /*
+        * Concoct a useful error message, set up the next state, and
+        * be done with this sequence.
+        */
 
        bt->state = BT_STATE_IDLE;
        switch (cCode) {
@@ -461,10 +484,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                last_printed = bt->state;
        }
 
-       /* Commands that time out may still (eventually) provide a response.
-          This stale response will get in the way of a new response so remove
-          it if possible (hopefully during IDLE).  Even if it comes up later
-          it will be rejected by its (now-forgotten) seq number. */
+       /*
+        * Commands that time out may still (eventually) provide a response.
+        * This stale response will get in the way of a new response so remove
+        * it if possible (hopefully during IDLE).  Even if it comes up later
+        * it will be rejected by its (now-forgotten) seq number.
+        */
 
        if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) {
                drain_BMC2HOST(bt);
@@ -472,7 +497,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
        }
 
        if ((bt->state != BT_STATE_IDLE) &&
-           (bt->state <  BT_STATE_PRINTME)) {          /* check timeout */
+           (bt->state <  BT_STATE_PRINTME)) {
+               /* check timeout */
                bt->timeout -= time;
                if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1))
                        return error_recovery(bt,
@@ -482,8 +508,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 
        switch (bt->state) {
 
-       /* Idle state first checks for asynchronous messages from another
-          channel, then does some opportunistic housekeeping. */
+       /*
+        * Idle state first checks for asynchronous messages from another
+        * channel, then does some opportunistic housekeeping.
+        */
 
        case BT_STATE_IDLE:
                if (status & BT_SMS_ATN) {
@@ -531,16 +559,19 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                        BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
                BT_CONTROL(BT_H_BUSY);          /* set */
 
-               /* Uncached, ordered writes should just proceeed serially but
-                  some BMCs don't clear B2H_ATN with one hit.  Fast-path a
-                  workaround without too much penalty to the general case. */
+               /*
+                * Uncached, ordered writes should just proceeed serially but
+                * some BMCs don't clear B2H_ATN with one hit.  Fast-path a
+                * workaround without too much penalty to the general case.
+                */
 
                BT_CONTROL(BT_B2H_ATN);         /* clear it to ACK the BMC */
                BT_STATE_CHANGE(BT_STATE_CLEAR_B2H,
                                SI_SM_CALL_WITHOUT_DELAY);
 
        case BT_STATE_CLEAR_B2H:
-               if (status & BT_B2H_ATN) {      /* keep hitting it */
+               if (status & BT_B2H_ATN) {
+                       /* keep hitting it */
                        BT_CONTROL(BT_B2H_ATN);
                        BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY);
                }
@@ -548,7 +579,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                                SI_SM_CALL_WITHOUT_DELAY);
 
        case BT_STATE_READ_BYTES:
-               if (!(status & BT_H_BUSY))      /* check in case of retry */
+               if (!(status & BT_H_BUSY))
+                       /* check in case of retry */
                        BT_CONTROL(BT_H_BUSY);
                BT_CONTROL(BT_CLR_RD_PTR);      /* start of BMC2HOST buffer */
                i = read_all_bytes(bt);         /* true == packet seq match */
@@ -599,8 +631,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
                BT_STATE_CHANGE(BT_STATE_XACTION_START,
                                SI_SM_CALL_WITH_DELAY);
 
-       /* Get BT Capabilities, using timing of upper level state machine.
-          Set outreqs to prevent infinite loop on timeout. */
+       /*
+        * Get BT Capabilities, using timing of upper level state machine.
+        * Set outreqs to prevent infinite loop on timeout.
+        */
        case BT_STATE_CAPABILITIES_BEGIN:
                bt->BT_CAP_outreqs = 1;
                {
@@ -638,10 +672,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
 
 static int bt_detect(struct si_sm_data *bt)
 {
-       /* It's impossible for the BT status and interrupt registers to be
-          all 1's, (assuming a properly functioning, self-initialized BMC)
-          but that's what you get from reading a bogus address, so we
-          test that first.  The calling routine uses negative logic. */
+       /*
+        * It's impossible for the BT status and interrupt registers to be
+        * all 1's, (assuming a properly functioning, self-initialized BMC)
+        * but that's what you get from reading a bogus address, so we
+        * test that first.  The calling routine uses negative logic.
+        */
 
        if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF))
                return 1;
@@ -658,8 +694,7 @@ static int bt_size(void)
        return sizeof(struct si_sm_data);
 }
 
-struct si_sm_handlers bt_smi_handlers =
-{
+struct si_sm_handlers bt_smi_handlers = {
        .init_data              = bt_init_data,
        .start_transaction      = bt_start_transaction,
        .get_result             = bt_get_result,
index c1b8228cb7b685c619d437fac98f01ef68169437..80704875794c38795505f945262448bb6f43cb25 100644 (file)
@@ -60,37 +60,58 @@ MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states");
 
 /* The states the KCS driver may be in. */
 enum kcs_states {
-       KCS_IDLE,               /* The KCS interface is currently
-                                   doing nothing. */
-       KCS_START_OP,           /* We are starting an operation.  The
-                                  data is in the output buffer, but
-                                  nothing has been done to the
-                                  interface yet.  This was added to
-                                  the state machine in the spec to
-                                  wait for the initial IBF. */
-       KCS_WAIT_WRITE_START,   /* We have written a write cmd to the
-                                  interface. */
-       KCS_WAIT_WRITE,         /* We are writing bytes to the
-                                   interface. */
-       KCS_WAIT_WRITE_END,     /* We have written the write end cmd
-                                   to the interface, and still need to
-                                   write the last byte. */
-       KCS_WAIT_READ,          /* We are waiting to read data from
-                                  the interface. */
-       KCS_ERROR0,             /* State to transition to the error
-                                  handler, this was added to the
-                                  state machine in the spec to be
-                                  sure IBF was there. */
-       KCS_ERROR1,             /* First stage error handler, wait for
-                                   the interface to respond. */
-       KCS_ERROR2,             /* The abort cmd has been written,
-                                  wait for the interface to
-                                  respond. */
-       KCS_ERROR3,             /* We wrote some data to the
-                                  interface, wait for it to switch to
-                                  read mode. */
-       KCS_HOSED               /* The hardware failed to follow the
-                                  state machine. */
+       /* The KCS interface is currently doing nothing. */
+       KCS_IDLE,
+
+       /*
+        * We are starting an operation.  The data is in the output
+        * buffer, but nothing has been done to the interface yet.  This
+        * was added to the state machine in the spec to wait for the
+        * initial IBF.
+        */
+       KCS_START_OP,
+
+       /* We have written a write cmd to the interface. */
+       KCS_WAIT_WRITE_START,
+
+       /* We are writing bytes to the interface. */
+       KCS_WAIT_WRITE,
+
+       /*
+        * We have written the write end cmd to the interface, and
+        * still need to write the last byte.
+        */
+       KCS_WAIT_WRITE_END,
+
+       /* We are waiting to read data from the interface. */
+       KCS_WAIT_READ,
+
+       /*
+        * State to transition to the error handler, this was added to
+        * the state machine in the spec to be sure IBF was there.
+        */
+       KCS_ERROR0,
+
+       /*
+        * First stage error handler, wait for the interface to
+        * respond.
+        */
+       KCS_ERROR1,
+
+       /*
+        * The abort cmd has been written, wait for the interface to
+        * respond.
+        */
+       KCS_ERROR2,
+
+       /*
+        * We wrote some data to the interface, wait for it to switch
+        * to read mode.
+        */
+       KCS_ERROR3,
+
+       /* The hardware failed to follow the state machine. */
+       KCS_HOSED
 };
 
 #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH
@@ -102,8 +123,7 @@ enum kcs_states {
 #define MAX_ERROR_RETRIES 10
 #define ERROR0_OBF_WAIT_JIFFIES (2*HZ)
 
-struct si_sm_data
-{
+struct si_sm_data {
        enum kcs_states  state;
        struct si_sm_io *io;
        unsigned char    write_data[MAX_KCS_WRITE_SIZE];
@@ -187,7 +207,8 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason)
        (kcs->error_retries)++;
        if (kcs->error_retries > MAX_ERROR_RETRIES) {
                if (kcs_debug & KCS_DEBUG_ENABLE)
-                       printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason);
+                       printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n",
+                              reason);
                kcs->state = KCS_HOSED;
        } else {
                kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES;
@@ -271,10 +292,9 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data,
 
        if (kcs_debug & KCS_DEBUG_MSG) {
                printk(KERN_DEBUG "start_kcs_transaction -");
-               for (i = 0; i < size; i ++) {
+               for (i = 0; i < size; i++)
                        printk(" %02x", (unsigned char) (data [i]));
-               }
-               printk ("\n");
+               printk("\n");
        }
        kcs->error_retries = 0;
        memcpy(kcs->write_data, data, size);
@@ -305,9 +325,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
                kcs->read_pos = 3;
        }
        if (kcs->truncated) {
-               /* Report a truncated error.  We might overwrite
-                  another error, but that's too bad, the user needs
-                  to know it was truncated. */
+               /*
+                * Report a truncated error.  We might overwrite
+                * another error, but that's too bad, the user needs
+                * to know it was truncated.
+                */
                data[2] = IPMI_ERR_MSG_TRUNCATED;
                kcs->truncated = 0;
        }
@@ -315,9 +337,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data,
        return kcs->read_pos;
 }
 
-/* This implements the state machine defined in the IPMI manual, see
-   that for details on how this works.  Divide that flowchart into
-   sections delimited by "Wait for IBF" and this will become clear. */
+/*
+ * This implements the state machine defined in the IPMI manual, see
+ * that for details on how this works.  Divide that flowchart into
+ * sections delimited by "Wait for IBF" and this will become clear.
+ */
 static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
 {
        unsigned char status;
@@ -388,11 +412,12 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
                        write_next_byte(kcs);
                }
                break;
-               
+
        case KCS_WAIT_WRITE_END:
                if (state != KCS_WRITE_STATE) {
                        start_error_recovery(kcs,
-                                            "Not in write state for write end");
+                                            "Not in write state"
+                                            " for write end");
                        break;
                }
                clear_obf(kcs, status);
@@ -413,13 +438,15 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
                                return SI_SM_CALL_WITH_DELAY;
                        read_next_byte(kcs);
                } else {
-                       /* We don't implement this exactly like the state
-                          machine in the spec.  Some broken hardware
-                          does not write the final dummy byte to the
-                          read register.  Thus obf will never go high
-                          here.  We just go straight to idle, and we
-                          handle clearing out obf in idle state if it
-                          happens to come in. */
+                       /*
+                        * We don't implement this exactly like the state
+                        * machine in the spec.  Some broken hardware
+                        * does not write the final dummy byte to the
+                        * read register.  Thus obf will never go high
+                        * here.  We just go straight to idle, and we
+                        * handle clearing out obf in idle state if it
+                        * happens to come in.
+                        */
                        clear_obf(kcs, status);
                        kcs->orig_write_count = 0;
                        kcs->state = KCS_IDLE;
@@ -430,7 +457,8 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
        case KCS_ERROR0:
                clear_obf(kcs, status);
                status = read_status(kcs);
-               if  (GET_STATUS_OBF(status)) /* controller isn't responding */
+               if (GET_STATUS_OBF(status))
+                       /* controller isn't responding */
                        if (time_before(jiffies, kcs->error0_timeout))
                                return SI_SM_CALL_WITH_TICK_DELAY;
                write_cmd(kcs, KCS_GET_STATUS_ABORT);
@@ -442,7 +470,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
                write_data(kcs, 0);
                kcs->state = KCS_ERROR2;
                break;
-               
+
        case KCS_ERROR2:
                if (state != KCS_READ_STATE) {
                        start_error_recovery(kcs,
@@ -456,7 +484,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
                write_data(kcs, KCS_READ_BYTE);
                kcs->state = KCS_ERROR3;
                break;
-               
+
        case KCS_ERROR3:
                if (state != KCS_IDLE_STATE) {
                        start_error_recovery(kcs,
@@ -475,7 +503,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time)
                        return SI_SM_TRANSACTION_COMPLETE;
                }
                break;
-                       
+
        case KCS_HOSED:
                break;
        }
@@ -495,10 +523,12 @@ static int kcs_size(void)
 
 static int kcs_detect(struct si_sm_data *kcs)
 {
-       /* It's impossible for the KCS status register to be all 1's,
-          (assuming a properly functioning, self-initialized BMC)
-          but that's what you get from reading a bogus address, so we
-          test that first. */
+       /*
+        * It's impossible for the KCS status register to be all 1's,
+        * (assuming a properly functioning, self-initialized BMC)
+        * but that's what you get from reading a bogus address, so we
+        * test that first.
+        */
        if (read_status(kcs) == 0xff)
                return 1;
 
@@ -509,8 +539,7 @@ static void kcs_cleanup(struct si_sm_data *kcs)
 {
 }
 
-struct si_sm_handlers kcs_smi_handlers =
-{
+struct si_sm_handlers kcs_smi_handlers = {
        .init_data         = init_kcs_data,
        .start_transaction = start_kcs_transaction,
        .get_result        = get_kcs_result,
index 32b2b22996dc34efc189ccc71b5cdc2b6b3b4ba2..8a59aaa21be5c79e337802eba20cc0e5ac860f67 100644 (file)
@@ -47,7 +47,7 @@
 
 #define PFX "IPMI message handler: "
 
-#define IPMI_DRIVER_VERSION "39.1"
+#define IPMI_DRIVER_VERSION "39.2"
 
 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
 static int ipmi_init_msghandler(void);
@@ -63,16 +63,16 @@ static struct proc_dir_entry *proc_ipmi_root;
 
 #define MAX_EVENTS_IN_QUEUE    25
 
-/* Don't let a message sit in a queue forever, always time it with at lest
-   the max message timer.  This is in milliseconds. */
+/*
+ * Don't let a message sit in a queue forever, always time it with at lest
+ * the max message timer.  This is in milliseconds.
+ */
 #define MAX_MSG_TIMEOUT                60000
 
-
 /*
  * The main "user" data structure.
  */
-struct ipmi_user
-{
+struct ipmi_user {
        struct list_head link;
 
        /* Set to "0" when the user is destroyed. */
@@ -91,8 +91,7 @@ struct ipmi_user
        int gets_events;
 };
 
-struct cmd_rcvr
-{
+struct cmd_rcvr {
        struct list_head link;
 
        ipmi_user_t   user;
@@ -106,12 +105,12 @@ struct cmd_rcvr
         * or change any data until the RCU period completes.  So we
         * use this next variable during mass deletion so we can have
         * a list and don't have to wait and restart the search on
-        * every individual deletion of a command. */
+        * every individual deletion of a command.
+        */
        struct cmd_rcvr *next;
 };
 
-struct seq_table
-{
+struct seq_table {
        unsigned int         inuse : 1;
        unsigned int         broadcast : 1;
 
@@ -119,53 +118,60 @@ struct seq_table
        unsigned long        orig_timeout;
        unsigned int         retries_left;
 
-       /* To verify on an incoming send message response that this is
-           the message that the response is for, we keep a sequence id
-           and increment it every time we send a message. */
+       /*
+        * To verify on an incoming send message response that this is
+        * the message that the response is for, we keep a sequence id
+        * and increment it every time we send a message.
+        */
        long                 seqid;
 
-       /* This is held so we can properly respond to the message on a
-           timeout, and it is used to hold the temporary data for
-           retransmission, too. */
+       /*
+        * This is held so we can properly respond to the message on a
+        * timeout, and it is used to hold the temporary data for
+        * retransmission, too.
+        */
        struct ipmi_recv_msg *recv_msg;
 };
 
-/* Store the information in a msgid (long) to allow us to find a
-   sequence table entry from the msgid. */
+/*
+ * Store the information in a msgid (long) to allow us to find a
+ * sequence table entry from the msgid.
+ */
 #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff))
 
 #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
        do {                                                            \
                seq = ((msgid >> 26) & 0x3f);                           \
                seqid = (msgid & 0x3fffff);                             \
-        } while (0)
+       } while (0)
 
 #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff)
 
-struct ipmi_channel
-{
+struct ipmi_channel {
        unsigned char medium;
        unsigned char protocol;
 
-       /* My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
-          but may be changed by the user. */
+       /*
+        * My slave address.  This is initialized to IPMI_BMC_SLAVE_ADDR,
+        * but may be changed by the user.
+        */
        unsigned char address;
 
-       /* My LUN.  This should generally stay the SMS LUN, but just in
-          case... */
+       /*
+        * My LUN.  This should generally stay the SMS LUN, but just in
+        * case...
+        */
        unsigned char lun;
 };
 
 #ifdef CONFIG_PROC_FS
-struct ipmi_proc_entry
-{
+struct ipmi_proc_entry {
        char                   *name;
        struct ipmi_proc_entry *next;
 };
 #endif
 
-struct bmc_device
-{
+struct bmc_device {
        struct platform_device *dev;
        struct ipmi_device_id  id;
        unsigned char          guid[16];
@@ -186,10 +192,108 @@ struct bmc_device
        struct device_attribute aux_firmware_rev_attr;
 };
 
+/*
+ * Various statistics for IPMI, these index stats[] in the ipmi_smi
+ * structure.
+ */
+enum ipmi_stat_indexes {
+       /* Commands we got from the user that were invalid. */
+       IPMI_STAT_sent_invalid_commands = 0,
+
+       /* Commands we sent to the MC. */
+       IPMI_STAT_sent_local_commands,
+
+       /* Responses from the MC that were delivered to a user. */
+       IPMI_STAT_handled_local_responses,
+
+       /* Responses from the MC that were not delivered to a user. */
+       IPMI_STAT_unhandled_local_responses,
+
+       /* Commands we sent out to the IPMB bus. */
+       IPMI_STAT_sent_ipmb_commands,
+
+       /* Commands sent on the IPMB that had errors on the SEND CMD */
+       IPMI_STAT_sent_ipmb_command_errs,
+
+       /* Each retransmit increments this count. */
+       IPMI_STAT_retransmitted_ipmb_commands,
+
+       /*
+        * When a message times out (runs out of retransmits) this is
+        * incremented.
+        */
+       IPMI_STAT_timed_out_ipmb_commands,
+
+       /*
+        * This is like above, but for broadcasts.  Broadcasts are
+        * *not* included in the above count (they are expected to
+        * time out).
+        */
+       IPMI_STAT_timed_out_ipmb_broadcasts,
+
+       /* Responses I have sent to the IPMB bus. */
+       IPMI_STAT_sent_ipmb_responses,
+
+       /* The response was delivered to the user. */
+       IPMI_STAT_handled_ipmb_responses,
+
+       /* The response had invalid data in it. */
+       IPMI_STAT_invalid_ipmb_responses,
+
+       /* The response didn't have anyone waiting for it. */
+       IPMI_STAT_unhandled_ipmb_responses,
+
+       /* Commands we sent out to the IPMB bus. */
+       IPMI_STAT_sent_lan_commands,
+
+       /* Commands sent on the IPMB that had errors on the SEND CMD */
+       IPMI_STAT_sent_lan_command_errs,
+
+       /* Each retransmit increments this count. */
+       IPMI_STAT_retransmitted_lan_commands,
+
+       /*
+        * When a message times out (runs out of retransmits) this is
+        * incremented.
+        */
+       IPMI_STAT_timed_out_lan_commands,
+
+       /* Responses I have sent to the IPMB bus. */
+       IPMI_STAT_sent_lan_responses,
+
+       /* The response was delivered to the user. */
+       IPMI_STAT_handled_lan_responses,
+
+       /* The response had invalid data in it. */
+       IPMI_STAT_invalid_lan_responses,
+
+       /* The response didn't have anyone waiting for it. */
+       IPMI_STAT_unhandled_lan_responses,
+
+       /* The command was delivered to the user. */
+       IPMI_STAT_handled_commands,
+
+       /* The command had invalid data in it. */
+       IPMI_STAT_invalid_commands,
+
+       /* The command didn't have anyone waiting for it. */
+       IPMI_STAT_unhandled_commands,
+
+       /* Invalid data in an event. */
+       IPMI_STAT_invalid_events,
+
+       /* Events that were received with the proper format. */
+       IPMI_STAT_events,
+
+
+       /* This *must* remain last, add new values above this. */
+       IPMI_NUM_STATS
+};
+
+
 #define IPMI_IPMB_NUM_SEQ      64
 #define IPMI_MAX_CHANNELS       16
-struct ipmi_smi
-{
+struct ipmi_smi {
        /* What interface number are we? */
        int intf_num;
 
@@ -198,8 +302,10 @@ struct ipmi_smi
        /* Used for a list of interfaces. */
        struct list_head link;
 
-       /* The list of upper layers that are using me.  seq_lock
-        * protects this. */
+       /*
+        * The list of upper layers that are using me.  seq_lock
+        * protects this.
+        */
        struct list_head users;
 
        /* Information to supply to users. */
@@ -213,10 +319,12 @@ struct ipmi_smi
        char *my_dev_name;
        char *sysfs_name;
 
-       /* This is the lower-layer's sender routine.  Note that you
+       /*
+        * This is the lower-layer's sender routine.  Note that you
         * must either be holding the ipmi_interfaces_mutex or be in
         * an umpreemptible region to use this.  You must fetch the
-        * value into a local variable and make sure it is not NULL. */
+        * value into a local variable and make sure it is not NULL.
+        */
        struct ipmi_smi_handlers *handlers;
        void                     *send_info;
 
@@ -229,34 +337,45 @@ struct ipmi_smi
        /* Driver-model device for the system interface. */
        struct device          *si_dev;
 
-       /* A table of sequence numbers for this interface.  We use the
-           sequence numbers for IPMB messages that go out of the
-           interface to match them up with their responses.  A routine
-           is called periodically to time the items in this list. */
+       /*
+        * A table of sequence numbers for this interface.  We use the
+        * sequence numbers for IPMB messages that go out of the
+        * interface to match them up with their responses.  A routine
+        * is called periodically to time the items in this list.
+        */
        spinlock_t       seq_lock;
        struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
        int curr_seq;
 
-       /* Messages that were delayed for some reason (out of memory,
-           for instance), will go in here to be processed later in a
-           periodic timer interrupt. */
+       /*
+        * Messages that were delayed for some reason (out of memory,
+        * for instance), will go in here to be processed later in a
+        * periodic timer interrupt.
+        */
        spinlock_t       waiting_msgs_lock;
        struct list_head waiting_msgs;
 
-       /* The list of command receivers that are registered for commands
-          on this interface. */
+       /*
+        * The list of command receivers that are registered for commands
+        * on this interface.
+        */
        struct mutex     cmd_rcvrs_mutex;
        struct list_head cmd_rcvrs;
 
-       /* Events that were queues because no one was there to receive
-           them. */
+       /*
+        * Events that were queues because no one was there to receive
+        * them.
+        */
        spinlock_t       events_lock; /* For dealing with event stuff. */
        struct list_head waiting_events;
        unsigned int     waiting_events_count; /* How many events in queue? */
-       int              delivering_events;
+       char             delivering_events;
+       char             event_msg_printed;
 
-       /* The event receiver for my BMC, only really used at panic
-          shutdown as a place to store this. */
+       /*
+        * The event receiver for my BMC, only really used at panic
+        * shutdown as a place to store this.
+        */
        unsigned char event_receiver;
        unsigned char event_receiver_lun;
        unsigned char local_sel_device;
@@ -268,14 +387,18 @@ struct ipmi_smi
        int auto_maintenance_timeout;
        spinlock_t maintenance_mode_lock; /* Used in a timer... */
 
-       /* A cheap hack, if this is non-null and a message to an
-          interface comes in with a NULL user, call this routine with
-          it.  Note that the message will still be freed by the
-          caller.  This only works on the system interface. */
+       /*
+        * A cheap hack, if this is non-null and a message to an
+        * interface comes in with a NULL user, call this routine with
+        * it.  Note that the message will still be freed by the
+        * caller.  This only works on the system interface.
+        */
        void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
 
-       /* When we are scanning the channels for an SMI, this will
-          tell which channel we are scanning. */
+       /*
+        * When we are scanning the channels for an SMI, this will
+        * tell which channel we are scanning.
+        */
        int curr_channel;
 
        /* Channel information */
@@ -285,74 +408,14 @@ struct ipmi_smi
        struct proc_dir_entry *proc_dir;
        char                  proc_dir_name[10];
 
-       spinlock_t   counter_lock; /* For making counters atomic. */
-
-       /* Commands we got that were invalid. */
-       unsigned int sent_invalid_commands;
-
-       /* Commands we sent to the MC. */
-       unsigned int sent_local_commands;
-       /* Responses from the MC that were delivered to a user. */
-       unsigned int handled_local_responses;
-       /* Responses from the MC that were not delivered to a user. */
-       unsigned int unhandled_local_responses;
-
-       /* Commands we sent out to the IPMB bus. */
-       unsigned int sent_ipmb_commands;
-       /* Commands sent on the IPMB that had errors on the SEND CMD */
-       unsigned int sent_ipmb_command_errs;
-       /* Each retransmit increments this count. */
-       unsigned int retransmitted_ipmb_commands;
-       /* When a message times out (runs out of retransmits) this is
-           incremented. */
-       unsigned int timed_out_ipmb_commands;
-
-       /* This is like above, but for broadcasts.  Broadcasts are
-           *not* included in the above count (they are expected to
-           time out). */
-       unsigned int timed_out_ipmb_broadcasts;
+       atomic_t stats[IPMI_NUM_STATS];
 
-       /* Responses I have sent to the IPMB bus. */
-       unsigned int sent_ipmb_responses;
-
-       /* The response was delivered to the user. */
-       unsigned int handled_ipmb_responses;
-       /* The response had invalid data in it. */
-       unsigned int invalid_ipmb_responses;
-       /* The response didn't have anyone waiting for it. */
-       unsigned int unhandled_ipmb_responses;
-
-       /* Commands we sent out to the IPMB bus. */
-       unsigned int sent_lan_commands;
-       /* Commands sent on the IPMB that had errors on the SEND CMD */
-       unsigned int sent_lan_command_errs;
-       /* Each retransmit increments this count. */
-       unsigned int retransmitted_lan_commands;
-       /* When a message times out (runs out of retransmits) this is
-           incremented. */
-       unsigned int timed_out_lan_commands;
-
-       /* Responses I have sent to the IPMB bus. */
-       unsigned int sent_lan_responses;
-
-       /* The response was delivered to the user. */
-       unsigned int handled_lan_responses;
-       /* The response had invalid data in it. */
-       unsigned int invalid_lan_responses;
-       /* The response didn't have anyone waiting for it. */
-       unsigned int unhandled_lan_responses;
-
-       /* The command was delivered to the user. */
-       unsigned int handled_commands;
-       /* The command had invalid data in it. */
-       unsigned int invalid_commands;
-       /* The command didn't have anyone waiting for it. */
-       unsigned int unhandled_commands;
-
-       /* Invalid data in an event. */
-       unsigned int invalid_events;
-       /* Events that were received with the proper format. */
-       unsigned int events;
+       /*
+        * run_to_completion duplicate of smb_info, smi_info
+        * and ipmi_serial_info structures. Used to decrease numbers of
+        * parameters passed by "low" level IPMI code.
+        */
+       int run_to_completion;
 };
 #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
 
@@ -368,12 +431,19 @@ static DEFINE_MUTEX(ipmidriver_mutex);
 static LIST_HEAD(ipmi_interfaces);
 static DEFINE_MUTEX(ipmi_interfaces_mutex);
 
-/* List of watchers that want to know when smi's are added and
-   deleted. */
+/*
+ * List of watchers that want to know when smi's are added and deleted.
+ */
 static LIST_HEAD(smi_watchers);
 static DEFINE_MUTEX(smi_watchers_mutex);
 
 
+#define ipmi_inc_stat(intf, stat) \
+       atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
+#define ipmi_get_stat(intf, stat) \
+       ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
+
+
 static void free_recv_msg_list(struct list_head *q)
 {
        struct ipmi_recv_msg *msg, *msg2;
@@ -417,10 +487,8 @@ static void clean_up_interface_data(ipmi_smi_t intf)
 
        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
                if ((intf->seq_table[i].inuse)
-                   && (intf->seq_table[i].recv_msg))
-               {
+                                       && (intf->seq_table[i].recv_msg))
                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
-               }
        }
 }
 
@@ -487,6 +555,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
        }
        return -ENOMEM;
 }
+EXPORT_SYMBOL(ipmi_smi_watcher_register);
 
 int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
 {
@@ -495,6 +564,7 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
        mutex_unlock(&smi_watchers_mutex);
        return 0;
 }
+EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
 
 /*
  * Must be called with smi_watchers_mutex held.
@@ -530,8 +600,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
        }
 
        if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE)
-           || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-       {
+           || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
                struct ipmi_ipmb_addr *ipmb_addr1
                    = (struct ipmi_ipmb_addr *) addr1;
                struct ipmi_ipmb_addr *ipmb_addr2
@@ -559,9 +628,8 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
 
 int ipmi_validate_addr(struct ipmi_addr *addr, int len)
 {
-       if (len < sizeof(struct ipmi_system_interface_addr)) {
+       if (len < sizeof(struct ipmi_system_interface_addr))
                return -EINVAL;
-       }
 
        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
                if (addr->channel != IPMI_BMC_CHANNEL)
@@ -575,23 +643,21 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
                return -EINVAL;
 
        if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
-           || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-       {
-               if (len < sizeof(struct ipmi_ipmb_addr)) {
+           || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
+               if (len < sizeof(struct ipmi_ipmb_addr))
                        return -EINVAL;
-               }
                return 0;
        }
 
        if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
-               if (len < sizeof(struct ipmi_lan_addr)) {
+               if (len < sizeof(struct ipmi_lan_addr))
                        return -EINVAL;
-               }
                return 0;
        }
 
        return -EINVAL;
 }
+EXPORT_SYMBOL(ipmi_validate_addr);
 
 unsigned int ipmi_addr_length(int addr_type)
 {
@@ -599,34 +665,28 @@ unsigned int ipmi_addr_length(int addr_type)
                return sizeof(struct ipmi_system_interface_addr);
 
        if ((addr_type == IPMI_IPMB_ADDR_TYPE)
-           || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-       {
+                       || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
                return sizeof(struct ipmi_ipmb_addr);
-       }
 
        if (addr_type == IPMI_LAN_ADDR_TYPE)
                return sizeof(struct ipmi_lan_addr);
 
        return 0;
 }
+EXPORT_SYMBOL(ipmi_addr_length);
 
 static void deliver_response(struct ipmi_recv_msg *msg)
 {
        if (!msg->user) {
                ipmi_smi_t    intf = msg->user_msg_data;
-               unsigned long flags;
 
                /* Special handling for NULL users. */
                if (intf->null_user_handler) {
                        intf->null_user_handler(intf, msg);
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->handled_local_responses++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, handled_local_responses);
                } else {
                        /* No handler, so give up. */
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->unhandled_local_responses++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, unhandled_local_responses);
                }
                ipmi_free_recv_msg(msg);
        } else {
@@ -646,9 +706,11 @@ deliver_err_response(struct ipmi_recv_msg *msg, int err)
        deliver_response(msg);
 }
 
-/* Find the next sequence number not being used and add the given
-   message with the given timeout to the sequence table.  This must be
-   called with the interface's seq_lock held. */
+/*
+ * Find the next sequence number not being used and add the given
+ * message with the given timeout to the sequence table.  This must be
+ * called with the interface's seq_lock held.
+ */
 static int intf_next_seq(ipmi_smi_t           intf,
                         struct ipmi_recv_msg *recv_msg,
                         unsigned long        timeout,
@@ -660,10 +722,8 @@ static int intf_next_seq(ipmi_smi_t           intf,
        int          rv = 0;
        unsigned int i;
 
-       for (i = intf->curr_seq;
-            (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
-            i = (i+1)%IPMI_IPMB_NUM_SEQ)
-       {
+       for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
+                                       i = (i+1)%IPMI_IPMB_NUM_SEQ) {
                if (!intf->seq_table[i].inuse)
                        break;
        }
@@ -671,8 +731,10 @@ static int intf_next_seq(ipmi_smi_t           intf,
        if (!intf->seq_table[i].inuse) {
                intf->seq_table[i].recv_msg = recv_msg;
 
-               /* Start with the maximum timeout, when the send response
-                  comes in we will start the real timer. */
+               /*
+                * Start with the maximum timeout, when the send response
+                * comes in we will start the real timer.
+                */
                intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
                intf->seq_table[i].orig_timeout = timeout;
                intf->seq_table[i].retries_left = retries;
@@ -685,15 +747,17 @@ static int intf_next_seq(ipmi_smi_t           intf,
        } else {
                rv = -EAGAIN;
        }
-       
+
        return rv;
 }
 
-/* Return the receive message for the given sequence number and
-   release the sequence number so it can be reused.  Some other data
-   is passed in to be sure the message matches up correctly (to help
-   guard against message coming in after their timeout and the
-   sequence number being reused). */
+/*
+ * Return the receive message for the given sequence number and
+ * release the sequence number so it can be reused.  Some other data
+ * is passed in to be sure the message matches up correctly (to help
+ * guard against message coming in after their timeout and the
+ * sequence number being reused).
+ */
 static int intf_find_seq(ipmi_smi_t           intf,
                         unsigned char        seq,
                         short                channel,
@@ -712,11 +776,9 @@ static int intf_find_seq(ipmi_smi_t           intf,
        if (intf->seq_table[seq].inuse) {
                struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
 
-               if ((msg->addr.channel == channel)
-                   && (msg->msg.cmd == cmd)
-                   && (msg->msg.netfn == netfn)
-                   && (ipmi_addr_equal(addr, &(msg->addr))))
-               {
+               if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
+                               && (msg->msg.netfn == netfn)
+                               && (ipmi_addr_equal(addr, &(msg->addr)))) {
                        *recv_msg = msg;
                        intf->seq_table[seq].inuse = 0;
                        rv = 0;
@@ -741,11 +803,12 @@ static int intf_start_seq_timer(ipmi_smi_t intf,
        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
 
        spin_lock_irqsave(&(intf->seq_lock), flags);
-       /* We do this verification because the user can be deleted
-           while a message is outstanding. */
+       /*
+        * We do this verification because the user can be deleted
+        * while a message is outstanding.
+        */
        if ((intf->seq_table[seq].inuse)
-           && (intf->seq_table[seq].seqid == seqid))
-       {
+                               && (intf->seq_table[seq].seqid == seqid)) {
                struct seq_table *ent = &(intf->seq_table[seq]);
                ent->timeout = ent->orig_timeout;
                rv = 0;
@@ -770,11 +833,12 @@ static int intf_err_seq(ipmi_smi_t   intf,
        GET_SEQ_FROM_MSGID(msgid, seq, seqid);
 
        spin_lock_irqsave(&(intf->seq_lock), flags);
-       /* We do this verification because the user can be deleted
-           while a message is outstanding. */
+       /*
+        * We do this verification because the user can be deleted
+        * while a message is outstanding.
+        */
        if ((intf->seq_table[seq].inuse)
-           && (intf->seq_table[seq].seqid == seqid))
-       {
+                               && (intf->seq_table[seq].seqid == seqid)) {
                struct seq_table *ent = &(intf->seq_table[seq]);
 
                ent->inuse = 0;
@@ -800,24 +864,30 @@ int ipmi_create_user(unsigned int          if_num,
        int           rv = 0;
        ipmi_smi_t    intf;
 
-       /* There is no module usecount here, because it's not
-           required.  Since this can only be used by and called from
-           other modules, they will implicitly use this module, and
-           thus this can't be removed unless the other modules are
-           removed. */
+       /*
+        * There is no module usecount here, because it's not
+        * required.  Since this can only be used by and called from
+        * other modules, they will implicitly use this module, and
+        * thus this can't be removed unless the other modules are
+        * removed.
+        */
 
        if (handler == NULL)
                return -EINVAL;
 
-       /* Make sure the driver is actually initialized, this handles
-          problems with initialization order. */
+       /*
+        * Make sure the driver is actually initialized, this handles
+        * problems with initialization order.
+        */
        if (!initialized) {
                rv = ipmi_init_msghandler();
                if (rv)
                        return rv;
 
-               /* The init code doesn't return an error if it was turned
-                  off, but it won't initialize.  Check that. */
+               /*
+                * The init code doesn't return an error if it was turned
+                * off, but it won't initialize.  Check that.
+                */
                if (!initialized)
                        return -ENODEV;
        }
@@ -858,8 +928,10 @@ int ipmi_create_user(unsigned int          if_num,
                }
        }
 
-       /* Hold the lock so intf->handlers is guaranteed to be good
-        * until now */
+       /*
+        * Hold the lock so intf->handlers is guaranteed to be good
+        * until now
+        */
        mutex_unlock(&ipmi_interfaces_mutex);
 
        new_user->valid = 1;
@@ -876,6 +948,7 @@ out_kfree:
        kfree(new_user);
        return rv;
 }
+EXPORT_SYMBOL(ipmi_create_user);
 
 static void free_user(struct kref *ref)
 {
@@ -899,8 +972,7 @@ int ipmi_destroy_user(ipmi_user_t user)
 
        for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
                if (intf->seq_table[i].inuse
-                   && (intf->seq_table[i].recv_msg->user == user))
-               {
+                   && (intf->seq_table[i].recv_msg->user == user)) {
                        intf->seq_table[i].inuse = 0;
                        ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
                }
@@ -943,6 +1015,7 @@ int ipmi_destroy_user(ipmi_user_t user)
 
        return 0;
 }
+EXPORT_SYMBOL(ipmi_destroy_user);
 
 void ipmi_get_version(ipmi_user_t   user,
                      unsigned char *major,
@@ -951,6 +1024,7 @@ void ipmi_get_version(ipmi_user_t   user,
        *major = user->intf->ipmi_version_major;
        *minor = user->intf->ipmi_version_minor;
 }
+EXPORT_SYMBOL(ipmi_get_version);
 
 int ipmi_set_my_address(ipmi_user_t   user,
                        unsigned int  channel,
@@ -961,6 +1035,7 @@ int ipmi_set_my_address(ipmi_user_t   user,
        user->intf->channels[channel].address = address;
        return 0;
 }
+EXPORT_SYMBOL(ipmi_set_my_address);
 
 int ipmi_get_my_address(ipmi_user_t   user,
                        unsigned int  channel,
@@ -971,6 +1046,7 @@ int ipmi_get_my_address(ipmi_user_t   user,
        *address = user->intf->channels[channel].address;
        return 0;
 }
+EXPORT_SYMBOL(ipmi_get_my_address);
 
 int ipmi_set_my_LUN(ipmi_user_t   user,
                    unsigned int  channel,
@@ -981,6 +1057,7 @@ int ipmi_set_my_LUN(ipmi_user_t   user,
        user->intf->channels[channel].lun = LUN & 0x3;
        return 0;
 }
+EXPORT_SYMBOL(ipmi_set_my_LUN);
 
 int ipmi_get_my_LUN(ipmi_user_t   user,
                    unsigned int  channel,
@@ -991,6 +1068,7 @@ int ipmi_get_my_LUN(ipmi_user_t   user,
        *address = user->intf->channels[channel].lun;
        return 0;
 }
+EXPORT_SYMBOL(ipmi_get_my_LUN);
 
 int ipmi_get_maintenance_mode(ipmi_user_t user)
 {
@@ -1075,6 +1153,11 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
                list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
                        list_move_tail(&msg->link, &msgs);
                intf->waiting_events_count = 0;
+               if (intf->event_msg_printed) {
+                       printk(KERN_WARNING PFX "Event queue no longer"
+                              " full\n");
+                       intf->event_msg_printed = 0;
+               }
 
                intf->delivering_events = 1;
                spin_unlock_irqrestore(&intf->events_lock, flags);
@@ -1094,6 +1177,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
 
        return 0;
 }
+EXPORT_SYMBOL(ipmi_set_gets_events);
 
 static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t    intf,
                                      unsigned char netfn,
@@ -1159,6 +1243,7 @@ int ipmi_register_for_cmd(ipmi_user_t   user,
 
        return rv;
 }
+EXPORT_SYMBOL(ipmi_register_for_cmd);
 
 int ipmi_unregister_for_cmd(ipmi_user_t   user,
                            unsigned char netfn,
@@ -1196,19 +1281,13 @@ int ipmi_unregister_for_cmd(ipmi_user_t   user,
        }
        return rv;
 }
-
-void ipmi_user_set_run_to_completion(ipmi_user_t user, int val)
-{
-       ipmi_smi_t intf = user->intf;
-       if (intf->handlers)
-               intf->handlers->set_run_to_completion(intf->send_info, val);
-}
+EXPORT_SYMBOL(ipmi_unregister_for_cmd);
 
 static unsigned char
 ipmb_checksum(unsigned char *data, int size)
 {
        unsigned char csum = 0;
-       
+
        for (; size > 0; size--, data++)
                csum += *data;
 
@@ -1250,8 +1329,10 @@ static inline void format_ipmb_msg(struct ipmi_smi_msg   *smi_msg,
                = ipmb_checksum(&(smi_msg->data[i+6]),
                                smi_msg->data_size-6);
 
-       /* Add on the checksum size and the offset from the
-          broadcast. */
+       /*
+        * Add on the checksum size and the offset from the
+        * broadcast.
+        */
        smi_msg->data_size += 1 + i;
 
        smi_msg->msgid = msgid;
@@ -1287,17 +1368,21 @@ static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
                = ipmb_checksum(&(smi_msg->data[7]),
                                smi_msg->data_size-7);
 
-       /* Add on the checksum size and the offset from the
-          broadcast. */
+       /*
+        * Add on the checksum size and the offset from the
+        * broadcast.
+        */
        smi_msg->data_size += 1;
 
        smi_msg->msgid = msgid;
 }
 
-/* Separate from ipmi_request so that the user does not have to be
-   supplied in certain circumstances (mainly at panic time).  If
-   messages are supplied, they will be freed, even if an error
-   occurs. */
+/*
+ * Separate from ipmi_request so that the user does not have to be
+ * supplied in certain circumstances (mainly at panic time).  If
+ * messages are supplied, they will be freed, even if an error
+ * occurs.
+ */
 static int i_ipmi_request(ipmi_user_t          user,
                          ipmi_smi_t           intf,
                          struct ipmi_addr     *addr,
@@ -1319,19 +1404,18 @@ static int i_ipmi_request(ipmi_user_t          user,
        struct ipmi_smi_handlers *handlers;
 
 
-       if (supplied_recv) {
+       if (supplied_recv)
                recv_msg = supplied_recv;
-       else {
+       else {
                recv_msg = ipmi_alloc_recv_msg();
-               if (recv_msg == NULL) {
+               if (recv_msg == NULL)
                        return -ENOMEM;
-               }
        }
        recv_msg->user_msg_data = user_msg_data;
 
-       if (supplied_smi) {
+       if (supplied_smi)
                smi_msg = (struct ipmi_smi_msg *) supplied_smi;
-       else {
+       else {
                smi_msg = ipmi_alloc_smi_msg();
                if (smi_msg == NULL) {
                        ipmi_free_recv_msg(recv_msg);
@@ -1350,8 +1434,10 @@ static int i_ipmi_request(ipmi_user_t          user,
        if (user)
                kref_get(&user->refcount);
        recv_msg->msgid = msgid;
-       /* Store the message to send in the receive message so timeout
-          responses can get the proper response data. */
+       /*
+        * Store the message to send in the receive message so timeout
+        * responses can get the proper response data.
+        */
        recv_msg->msg = *msg;
 
        if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
@@ -1365,9 +1451,7 @@ static int i_ipmi_request(ipmi_user_t          user,
 
                smi_addr = (struct ipmi_system_interface_addr *) addr;
                if (smi_addr->lun > 3) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
@@ -1377,13 +1461,12 @@ static int i_ipmi_request(ipmi_user_t          user,
                if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
                    && ((msg->cmd == IPMI_SEND_MSG_CMD)
                        || (msg->cmd == IPMI_GET_MSG_CMD)
-                       || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD)))
-               {
-                       /* We don't let the user do these, since we manage
-                          the sequence numbers. */
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
+                       /*
+                        * We don't let the user do these, since we manage
+                        * the sequence numbers.
+                        */
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
@@ -1391,14 +1474,12 @@ static int i_ipmi_request(ipmi_user_t          user,
                if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
                      && ((msg->cmd == IPMI_COLD_RESET_CMD)
                          || (msg->cmd == IPMI_WARM_RESET_CMD)))
-                    || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST))
-               {
+                    || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
                        spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
                        intf->auto_maintenance_timeout
                                = IPMI_MAINTENANCE_MODE_TIMEOUT;
                        if (!intf->maintenance_mode
-                           && !intf->maintenance_mode_enable)
-                       {
+                           && !intf->maintenance_mode_enable) {
                                intf->maintenance_mode_enable = 1;
                                maintenance_mode_update(intf);
                        }
@@ -1407,9 +1488,7 @@ static int i_ipmi_request(ipmi_user_t          user,
                }
 
                if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EMSGSIZE;
                        goto out_err;
                }
@@ -1421,31 +1500,23 @@ static int i_ipmi_request(ipmi_user_t          user,
                if (msg->data_len > 0)
                        memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
                smi_msg->data_size = msg->data_len + 2;
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->sent_local_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, sent_local_commands);
        } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE)
-                  || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
-       {
+                  || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) {
                struct ipmi_ipmb_addr *ipmb_addr;
                unsigned char         ipmb_seq;
                long                  seqid;
                int                   broadcast = 0;
 
                if (addr->channel >= IPMI_MAX_CHANNELS) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
 
                if (intf->channels[addr->channel].medium
-                   != IPMI_CHANNEL_MEDIUM_IPMB)
-               {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                                       != IPMI_CHANNEL_MEDIUM_IPMB) {
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
@@ -1457,9 +1528,11 @@ static int i_ipmi_request(ipmi_user_t          user,
                        retries = 4;
                }
                if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
-                   /* Broadcasts add a zero at the beginning of the
-                      message, but otherwise is the same as an IPMB
-                      address. */
+                   /*
+                    * Broadcasts add a zero at the beginning of the
+                    * message, but otherwise is the same as an IPMB
+                    * address.
+                    */
                    addr->addr_type = IPMI_IPMB_ADDR_TYPE;
                    broadcast = 1;
                }
@@ -1469,21 +1542,19 @@ static int i_ipmi_request(ipmi_user_t          user,
                if (retry_time_ms == 0)
                    retry_time_ms = 1000;
 
-               /* 9 for the header and 1 for the checksum, plus
-                   possibly one for the broadcast. */
+               /*
+                * 9 for the header and 1 for the checksum, plus
+                * possibly one for the broadcast.
+                */
                if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EMSGSIZE;
                        goto out_err;
                }
 
                ipmb_addr = (struct ipmi_ipmb_addr *) addr;
                if (ipmb_addr->lun > 3) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
@@ -1491,29 +1562,31 @@ static int i_ipmi_request(ipmi_user_t          user,
                memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
 
                if (recv_msg->msg.netfn & 0x1) {
-                       /* It's a response, so use the user's sequence
-                           from msgid. */
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_ipmb_responses++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       /*
+                        * It's a response, so use the user's sequence
+                        * from msgid.
+                        */
+                       ipmi_inc_stat(intf, sent_ipmb_responses);
                        format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
                                        msgid, broadcast,
                                        source_address, source_lun);
 
-                       /* Save the receive message so we can use it
-                          to deliver the response. */
+                       /*
+                        * Save the receive message so we can use it
+                        * to deliver the response.
+                        */
                        smi_msg->user_data = recv_msg;
                } else {
                        /* It's a command, so get a sequence for it. */
 
                        spin_lock_irqsave(&(intf->seq_lock), flags);
 
-                       spin_lock(&intf->counter_lock);
-                       intf->sent_ipmb_commands++;
-                       spin_unlock(&intf->counter_lock);
+                       ipmi_inc_stat(intf, sent_ipmb_commands);
 
-                       /* Create a sequence number with a 1 second
-                           timeout and 4 retries. */
+                       /*
+                        * Create a sequence number with a 1 second
+                        * timeout and 4 retries.
+                        */
                        rv = intf_next_seq(intf,
                                           recv_msg,
                                           retry_time_ms,
@@ -1522,34 +1595,42 @@ static int i_ipmi_request(ipmi_user_t          user,
                                           &ipmb_seq,
                                           &seqid);
                        if (rv) {
-                               /* We have used up all the sequence numbers,
-                                  probably, so abort. */
+                               /*
+                                * We have used up all the sequence numbers,
+                                * probably, so abort.
+                                */
                                spin_unlock_irqrestore(&(intf->seq_lock),
                                                       flags);
                                goto out_err;
                        }
 
-                       /* Store the sequence number in the message,
-                           so that when the send message response
-                           comes back we can start the timer. */
+                       /*
+                        * Store the sequence number in the message,
+                        * so that when the send message response
+                        * comes back we can start the timer.
+                        */
                        format_ipmb_msg(smi_msg, msg, ipmb_addr,
                                        STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
                                        ipmb_seq, broadcast,
                                        source_address, source_lun);
 
-                       /* Copy the message into the recv message data, so we
-                          can retransmit it later if necessary. */
+                       /*
+                        * Copy the message into the recv message data, so we
+                        * can retransmit it later if necessary.
+                        */
                        memcpy(recv_msg->msg_data, smi_msg->data,
                               smi_msg->data_size);
                        recv_msg->msg.data = recv_msg->msg_data;
                        recv_msg->msg.data_len = smi_msg->data_size;
 
-                       /* We don't unlock until here, because we need
-                           to copy the completed message into the
-                           recv_msg before we release the lock.
-                           Otherwise, race conditions may bite us.  I
-                           know that's pretty paranoid, but I prefer
-                           to be correct. */
+                       /*
+                        * We don't unlock until here, because we need
+                        * to copy the completed message into the
+                        * recv_msg before we release the lock.
+                        * Otherwise, race conditions may bite us.  I
+                        * know that's pretty paranoid, but I prefer
+                        * to be correct.
+                        */
                        spin_unlock_irqrestore(&(intf->seq_lock), flags);
                }
        } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) {
@@ -1558,21 +1639,16 @@ static int i_ipmi_request(ipmi_user_t          user,
                long                  seqid;
 
                if (addr->channel >= IPMI_MAX_CHANNELS) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
 
                if ((intf->channels[addr->channel].medium
-                   != IPMI_CHANNEL_MEDIUM_8023LAN)
+                               != IPMI_CHANNEL_MEDIUM_8023LAN)
                    && (intf->channels[addr->channel].medium
-                       != IPMI_CHANNEL_MEDIUM_ASYNC))
-               {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                               != IPMI_CHANNEL_MEDIUM_ASYNC)) {
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
@@ -1585,18 +1661,14 @@ static int i_ipmi_request(ipmi_user_t          user,
 
                /* 11 for the header and 1 for the checksum. */
                if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EMSGSIZE;
                        goto out_err;
                }
 
                lan_addr = (struct ipmi_lan_addr *) addr;
                if (lan_addr->lun > 3) {
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_invalid_commands++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       ipmi_inc_stat(intf, sent_invalid_commands);
                        rv = -EINVAL;
                        goto out_err;
                }
@@ -1604,28 +1676,30 @@ static int i_ipmi_request(ipmi_user_t          user,
                memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
 
                if (recv_msg->msg.netfn & 0x1) {
-                       /* It's a response, so use the user's sequence
-                           from msgid. */
-                       spin_lock_irqsave(&intf->counter_lock, flags);
-                       intf->sent_lan_responses++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                       /*
+                        * It's a response, so use the user's sequence
+                        * from msgid.
+                        */
+                       ipmi_inc_stat(intf, sent_lan_responses);
                        format_lan_msg(smi_msg, msg, lan_addr, msgid,
                                       msgid, source_lun);
 
-                       /* Save the receive message so we can use it
-                          to deliver the response. */
+                       /*
+                        * Save the receive message so we can use it
+                        * to deliver the response.
+                        */
                        smi_msg->user_data = recv_msg;
                } else {
                        /* It's a command, so get a sequence for it. */
 
                        spin_lock_irqsave(&(intf->seq_lock), flags);
 
-                       spin_lock(&intf->counter_lock);
-                       intf->sent_lan_commands++;
-                       spin_unlock(&intf->counter_lock);
+                       ipmi_inc_stat(intf, sent_lan_commands);
 
-                       /* Create a sequence number with a 1 second
-                           timeout and 4 retries. */
+                       /*
+                        * Create a sequence number with a 1 second
+                        * timeout and 4 retries.
+                        */
                        rv = intf_next_seq(intf,
                                           recv_msg,
                                           retry_time_ms,
@@ -1634,40 +1708,46 @@ static int i_ipmi_request(ipmi_user_t          user,
                                           &ipmb_seq,
                                           &seqid);
                        if (rv) {
-                               /* We have used up all the sequence numbers,
-                                  probably, so abort. */
+                               /*
+                                * We have used up all the sequence numbers,
+                                * probably, so abort.
+                                */
                                spin_unlock_irqrestore(&(intf->seq_lock),
                                                       flags);
                                goto out_err;
                        }
 
-                       /* Store the sequence number in the message,
-                           so that when the send message response
-                           comes back we can start the timer. */
+                       /*
+                        * Store the sequence number in the message,
+                        * so that when the send message response
+                        * comes back we can start the timer.
+                        */
                        format_lan_msg(smi_msg, msg, lan_addr,
                                       STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
                                       ipmb_seq, source_lun);
 
-                       /* Copy the message into the recv message data, so we
-                          can retransmit it later if necessary. */
+                       /*
+                        * Copy the message into the recv message data, so we
+                        * can retransmit it later if necessary.
+                        */
                        memcpy(recv_msg->msg_data, smi_msg->data,
                               smi_msg->data_size);
                        recv_msg->msg.data = recv_msg->msg_data;
                        recv_msg->msg.data_len = smi_msg->data_size;
 
-                       /* We don't unlock until here, because we need
-                           to copy the completed message into the
-                           recv_msg before we release the lock.
-                           Otherwise, race conditions may bite us.  I
-                           know that's pretty paranoid, but I prefer
-                           to be correct. */
+                       /*
+                        * We don't unlock until here, because we need
+                        * to copy the completed message into the
+                        * recv_msg before we release the lock.
+                        * Otherwise, race conditions may bite us.  I
+                        * know that's pretty paranoid, but I prefer
+                        * to be correct.
+                        */
                        spin_unlock_irqrestore(&(intf->seq_lock), flags);
                }
        } else {
            /* Unknown address type. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->sent_invalid_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, sent_invalid_commands);
                rv = -EINVAL;
                goto out_err;
        }
@@ -1735,6 +1815,7 @@ int ipmi_request_settime(ipmi_user_t      user,
                              retries,
                              retry_time_ms);
 }
+EXPORT_SYMBOL(ipmi_request_settime);
 
 int ipmi_request_supply_msgs(ipmi_user_t          user,
                             struct ipmi_addr     *addr,
@@ -1766,6 +1847,7 @@ int ipmi_request_supply_msgs(ipmi_user_t          user,
                              lun,
                              -1, 0);
 }
+EXPORT_SYMBOL(ipmi_request_supply_msgs);
 
 #ifdef CONFIG_PROC_FS
 static int ipmb_file_read_proc(char *page, char **start, off_t off,
@@ -1790,7 +1872,7 @@ static int version_file_read_proc(char *page, char **start, off_t off,
        char       *out = (char *) page;
        ipmi_smi_t intf = data;
 
-       return sprintf(out, "%d.%d\n",
+       return sprintf(out, "%u.%u\n",
                       ipmi_version_major(&intf->bmc->id),
                       ipmi_version_minor(&intf->bmc->id));
 }
@@ -1801,65 +1883,65 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
        char       *out = (char *) page;
        ipmi_smi_t intf = data;
 
-       out += sprintf(out, "sent_invalid_commands:       %d\n",
-                      intf->sent_invalid_commands);
-       out += sprintf(out, "sent_local_commands:         %d\n",
-                      intf->sent_local_commands);
-       out += sprintf(out, "handled_local_responses:     %d\n",
-                      intf->handled_local_responses);
-       out += sprintf(out, "unhandled_local_responses:   %d\n",
-                      intf->unhandled_local_responses);
-       out += sprintf(out, "sent_ipmb_commands:          %d\n",
-                      intf->sent_ipmb_commands);
-       out += sprintf(out, "sent_ipmb_command_errs:      %d\n",
-                      intf->sent_ipmb_command_errs);
-       out += sprintf(out, "retransmitted_ipmb_commands: %d\n",
-                      intf->retransmitted_ipmb_commands);
-       out += sprintf(out, "timed_out_ipmb_commands:     %d\n",
-                      intf->timed_out_ipmb_commands);
-       out += sprintf(out, "timed_out_ipmb_broadcasts:   %d\n",
-                      intf->timed_out_ipmb_broadcasts);
-       out += sprintf(out, "sent_ipmb_responses:         %d\n",
-                      intf->sent_ipmb_responses);
-       out += sprintf(out, "handled_ipmb_responses:      %d\n",
-                      intf->handled_ipmb_responses);
-       out += sprintf(out, "invalid_ipmb_responses:      %d\n",
-                      intf->invalid_ipmb_responses);
-       out += sprintf(out, "unhandled_ipmb_responses:    %d\n",
-                      intf->unhandled_ipmb_responses);
-       out += sprintf(out, "sent_lan_commands:           %d\n",
-                      intf->sent_lan_commands);
-       out += sprintf(out, "sent_lan_command_errs:       %d\n",
-                      intf->sent_lan_command_errs);
-       out += sprintf(out, "retransmitted_lan_commands:  %d\n",
-                      intf->retransmitted_lan_commands);
-       out += sprintf(out, "timed_out_lan_commands:      %d\n",
-                      intf->timed_out_lan_commands);
-       out += sprintf(out, "sent_lan_responses:          %d\n",
-                      intf->sent_lan_responses);
-       out += sprintf(out, "handled_lan_responses:       %d\n",
-                      intf->handled_lan_responses);
-       out += sprintf(out, "invalid_lan_responses:       %d\n",
-                      intf->invalid_lan_responses);
-       out += sprintf(out, "unhandled_lan_responses:     %d\n",
-                      intf->unhandled_lan_responses);
-       out += sprintf(out, "handled_commands:            %d\n",
-                      intf->handled_commands);
-       out += sprintf(out, "invalid_commands:            %d\n",
-                      intf->invalid_commands);
-       out += sprintf(out, "unhandled_commands:          %d\n",
-                      intf->unhandled_commands);
-       out += sprintf(out, "invalid_events:              %d\n",
-                      intf->invalid_events);
-       out += sprintf(out, "events:                      %d\n",
-                      intf->events);
+       out += sprintf(out, "sent_invalid_commands:       %u\n",
+                      ipmi_get_stat(intf, sent_invalid_commands));
+       out += sprintf(out, "sent_local_commands:         %u\n",
+                      ipmi_get_stat(intf, sent_local_commands));
+       out += sprintf(out, "handled_local_responses:     %u\n",
+                      ipmi_get_stat(intf, handled_local_responses));
+       out += sprintf(out, "unhandled_local_responses:   %u\n",
+                      ipmi_get_stat(intf, unhandled_local_responses));
+       out += sprintf(out, "sent_ipmb_commands:          %u\n",
+                      ipmi_get_stat(intf, sent_ipmb_commands));
+       out += sprintf(out, "sent_ipmb_command_errs:      %u\n",
+                      ipmi_get_stat(intf, sent_ipmb_command_errs));
+       out += sprintf(out, "retransmitted_ipmb_commands: %u\n",
+                      ipmi_get_stat(intf, retransmitted_ipmb_commands));
+       out += sprintf(out, "timed_out_ipmb_commands:     %u\n",
+                      ipmi_get_stat(intf, timed_out_ipmb_commands));
+       out += sprintf(out, "timed_out_ipmb_broadcasts:   %u\n",
+                      ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
+       out += sprintf(out, "sent_ipmb_responses:         %u\n",
+                      ipmi_get_stat(intf, sent_ipmb_responses));
+       out += sprintf(out, "handled_ipmb_responses:      %u\n",
+                      ipmi_get_stat(intf, handled_ipmb_responses));
+       out += sprintf(out, "invalid_ipmb_responses:      %u\n",
+                      ipmi_get_stat(intf, invalid_ipmb_responses));
+       out += sprintf(out, "unhandled_ipmb_responses:    %u\n",
+                      ipmi_get_stat(intf, unhandled_ipmb_responses));
+       out += sprintf(out, "sent_lan_commands:           %u\n",
+                      ipmi_get_stat(intf, sent_lan_commands));
+       out += sprintf(out, "sent_lan_command_errs:       %u\n",
+                      ipmi_get_stat(intf, sent_lan_command_errs));
+       out += sprintf(out, "retransmitted_lan_commands:  %u\n",
+                      ipmi_get_stat(intf, retransmitted_lan_commands));
+       out += sprintf(out, "timed_out_lan_commands:      %u\n",
+                      ipmi_get_stat(intf, timed_out_lan_commands));
+       out += sprintf(out, "sent_lan_responses:          %u\n",
+                      ipmi_get_stat(intf, sent_lan_responses));
+       out += sprintf(out, "handled_lan_responses:       %u\n",
+                      ipmi_get_stat(intf, handled_lan_responses));
+       out += sprintf(out, "invalid_lan_responses:       %u\n",
+                      ipmi_get_stat(intf, invalid_lan_responses));
+       out += sprintf(out, "unhandled_lan_responses:     %u\n",
+                      ipmi_get_stat(intf, unhandled_lan_responses));
+       out += sprintf(out, "handled_commands:            %u\n",
+                      ipmi_get_stat(intf, handled_commands));
+       out += sprintf(out, "invalid_commands:            %u\n",
+                      ipmi_get_stat(intf, invalid_commands));
+       out += sprintf(out, "unhandled_commands:          %u\n",
+                      ipmi_get_stat(intf, unhandled_commands));
+       out += sprintf(out, "invalid_events:              %u\n",
+                      ipmi_get_stat(intf, invalid_events));
+       out += sprintf(out, "events:                      %u\n",
+                      ipmi_get_stat(intf, events));
 
        return (out - ((char *) page));
 }
 #endif /* CONFIG_PROC_FS */
 
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
-                           read_proc_t *read_proc, write_proc_t *write_proc,
+                           read_proc_t *read_proc,
                            void *data, struct module *owner)
 {
        int                    rv = 0;
@@ -1886,7 +1968,6 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
        } else {
                file->data = data;
                file->read_proc = read_proc;
-               file->write_proc = write_proc;
                file->owner = owner;
 
                mutex_lock(&smi->proc_entry_lock);
@@ -1899,6 +1980,7 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
 
        return rv;
 }
+EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
 
 static int add_proc_entries(ipmi_smi_t smi, int num)
 {
@@ -1909,23 +1991,22 @@ static int add_proc_entries(ipmi_smi_t smi, int num)
        smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
        if (!smi->proc_dir)
                rv = -ENOMEM;
-       else {
+       else
                smi->proc_dir->owner = THIS_MODULE;
-       }
 
        if (rv == 0)
                rv = ipmi_smi_add_proc_entry(smi, "stats",
-                                            stat_file_read_proc, NULL,
+                                            stat_file_read_proc,
                                             smi, THIS_MODULE);
 
        if (rv == 0)
                rv = ipmi_smi_add_proc_entry(smi, "ipmb",
-                                            ipmb_file_read_proc, NULL,
+                                            ipmb_file_read_proc,
                                             smi, THIS_MODULE);
 
        if (rv == 0)
                rv = ipmi_smi_add_proc_entry(smi, "version",
-                                            version_file_read_proc, NULL,
+                                            version_file_read_proc,
                                             smi, THIS_MODULE);
 #endif /* CONFIG_PROC_FS */
 
@@ -2210,37 +2291,47 @@ static int create_files(struct bmc_device *bmc)
 
        err = device_create_file(&bmc->dev->dev,
                           &bmc->device_id_attr);
-       if (err) goto out;
+       if (err)
+               goto out;
        err = device_create_file(&bmc->dev->dev,
                           &bmc->provides_dev_sdrs_attr);
-       if (err) goto out_devid;
+       if (err)
+               goto out_devid;
        err = device_create_file(&bmc->dev->dev,
                           &bmc->revision_attr);
-       if (err) goto out_sdrs;
+       if (err)
+               goto out_sdrs;
        err = device_create_file(&bmc->dev->dev,
                           &bmc->firmware_rev_attr);
-       if (err) goto out_rev;
+       if (err)
+               goto out_rev;
        err = device_create_file(&bmc->dev->dev,
                           &bmc->version_attr);
-       if (err) goto out_firm;
+       if (err)
+               goto out_firm;
        err = device_create_file(&bmc->dev->dev,
                           &bmc->add_dev_support_attr);
-       if (err) goto out_version;
+       if (err)
+               goto out_version;
        err = device_create_file(&bmc->dev->dev,
                           &bmc->manufacturer_id_attr);
-       if (err) goto out_add_dev;
+       if (err)
+               goto out_add_dev;
        err = device_create_file(&bmc->dev->dev,
                           &bmc->product_id_attr);
-       if (err) goto out_manu;
+       if (err)
+               goto out_manu;
        if (bmc->id.aux_firmware_revision_set) {
                err = device_create_file(&bmc->dev->dev,
                                   &bmc->aux_firmware_rev_attr);
-               if (err) goto out_prod_id;
+               if (err)
+                       goto out_prod_id;
        }
        if (bmc->guid_set) {
                err = device_create_file(&bmc->dev->dev,
                                   &bmc->guid_attr);
-               if (err) goto out_aux_firm;
+               if (err)
+                       goto out_aux_firm;
        }
 
        return 0;
@@ -2368,8 +2459,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
                               "ipmi_msghandler:"
                               " Unable to register bmc device: %d\n",
                               rv);
-                       /* Don't go to out_err, you can only do that if
-                          the device is registered already. */
+                       /*
+                        * Don't go to out_err, you can only do that if
+                        * the device is registered already.
+                        */
                        return rv;
                }
 
@@ -2560,17 +2653,18 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
 
        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
-           && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD))
-       {
+           && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
                /* It's the one we want */
                if (msg->msg.data[0] != 0) {
                        /* Got an error from the channel, just go on. */
 
                        if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
-                               /* If the MC does not support this
-                                  command, that is legal.  We just
-                                  assume it has one IPMB at channel
-                                  zero. */
+                               /*
+                                * If the MC does not support this
+                                * command, that is legal.  We just
+                                * assume it has one IPMB at channel
+                                * zero.
+                                */
                                intf->channels[0].medium
                                        = IPMI_CHANNEL_MEDIUM_IPMB;
                                intf->channels[0].protocol
@@ -2591,7 +2685,7 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
                intf->channels[chan].medium = msg->msg.data[2] & 0x7f;
                intf->channels[chan].protocol = msg->msg.data[3] & 0x1f;
 
      next_channel:
+ next_channel:
                intf->curr_channel++;
                if (intf->curr_channel >= IPMI_MAX_CHANNELS)
                        wake_up(&intf->waitq);
@@ -2619,6 +2713,7 @@ void ipmi_poll_interface(ipmi_user_t user)
        if (intf->handlers->poll)
                intf->handlers->poll(intf->send_info);
 }
+EXPORT_SYMBOL(ipmi_poll_interface);
 
 int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
                      void                     *send_info,
@@ -2633,14 +2728,18 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
        ipmi_smi_t       tintf;
        struct list_head *link;
 
-       /* Make sure the driver is actually initialized, this handles
-          problems with initialization order. */
+       /*
+        * Make sure the driver is actually initialized, this handles
+        * problems with initialization order.
+        */
        if (!initialized) {
                rv = ipmi_init_msghandler();
                if (rv)
                        return rv;
-               /* The init code doesn't return an error if it was turned
-                  off, but it won't initialize.  Check that. */
+               /*
+                * The init code doesn't return an error if it was turned
+                * off, but it won't initialize.  Check that.
+                */
                if (!initialized)
                        return -ENODEV;
        }
@@ -2688,8 +2787,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
        spin_lock_init(&intf->maintenance_mode_lock);
        INIT_LIST_HEAD(&intf->cmd_rcvrs);
        init_waitqueue_head(&intf->waitq);
+       for (i = 0; i < IPMI_NUM_STATS; i++)
+               atomic_set(&intf->stats[i], 0);
 
-       spin_lock_init(&intf->counter_lock);
        intf->proc_dir = NULL;
 
        mutex_lock(&smi_watchers_mutex);
@@ -2717,11 +2817,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
        get_guid(intf);
 
        if ((intf->ipmi_version_major > 1)
-           || ((intf->ipmi_version_major == 1)
-               && (intf->ipmi_version_minor >= 5)))
-       {
-               /* Start scanning the channels to see what is
-                  available. */
+                       || ((intf->ipmi_version_major == 1)
+                           && (intf->ipmi_version_minor >= 5))) {
+               /*
+                * Start scanning the channels to see what is
+                * available.
+                */
                intf->null_user_handler = channel_handler;
                intf->curr_channel = 0;
                rv = send_channel_info_cmd(intf, 0);
@@ -2769,6 +2870,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
 
        return rv;
 }
+EXPORT_SYMBOL(ipmi_register_smi);
 
 static void cleanup_smi_msgs(ipmi_smi_t intf)
 {
@@ -2803,8 +2905,10 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
 
        remove_proc_entries(intf);
 
-       /* Call all the watcher interfaces to tell them that
-          an interface is gone. */
+       /*
+        * Call all the watcher interfaces to tell them that
+        * an interface is gone.
+        */
        list_for_each_entry(w, &smi_watchers, link)
                w->smi_gone(intf_num);
        mutex_unlock(&smi_watchers_mutex);
@@ -2812,22 +2916,21 @@ int ipmi_unregister_smi(ipmi_smi_t intf)
        kref_put(&intf->refcount, intf_free);
        return 0;
 }
+EXPORT_SYMBOL(ipmi_unregister_smi);
 
 static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
                                   struct ipmi_smi_msg *msg)
 {
        struct ipmi_ipmb_addr ipmb_addr;
        struct ipmi_recv_msg  *recv_msg;
-       unsigned long         flags;
 
-       
-       /* This is 11, not 10, because the response must contain a
-        * completion code. */
+       /*
+        * This is 11, not 10, because the response must contain a
+        * completion code.
+        */
        if (msg->rsp_size < 11) {
                /* Message not big enough, just ignore it. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->invalid_ipmb_responses++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, invalid_ipmb_responses);
                return 0;
        }
 
@@ -2841,37 +2944,38 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t          intf,
        ipmb_addr.channel = msg->rsp[3] & 0x0f;
        ipmb_addr.lun = msg->rsp[7] & 3;
 
-       /* It's a response from a remote entity.  Look up the sequence
-          number and handle the response. */
+       /*
+        * It's a response from a remote entity.  Look up the sequence
+        * number and handle the response.
+        */
        if (intf_find_seq(intf,
                          msg->rsp[7] >> 2,
                          msg->rsp[3] & 0x0f,
                          msg->rsp[8],
                          (msg->rsp[4] >> 2) & (~1),
                          (struct ipmi_addr *) &(ipmb_addr),
-                         &recv_msg))
-       {
-               /* We were unable to find the sequence number,
-                  so just nuke the message. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->unhandled_ipmb_responses++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+                         &recv_msg)) {
+               /*
+                * We were unable to find the sequence number,
+                * so just nuke the message.
+                */
+               ipmi_inc_stat(intf, unhandled_ipmb_responses);
                return 0;
        }
 
        memcpy(recv_msg->msg_data,
               &(msg->rsp[9]),
               msg->rsp_size - 9);
-       /* THe other fields matched, so no need to set them, except
-           for netfn, which needs to be the response that was
-           returned, not the request value. */
+       /*
+        * The other fields matched, so no need to set them, except
+        * for netfn, which needs to be the response that was
+        * returned, not the request value.
+        */
        recv_msg->msg.netfn = msg->rsp[4] >> 2;
        recv_msg->msg.data = recv_msg->msg_data;
        recv_msg->msg.data_len = msg->rsp_size - 10;
        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-       spin_lock_irqsave(&intf->counter_lock, flags);
-       intf->handled_ipmb_responses++;
-       spin_unlock_irqrestore(&intf->counter_lock, flags);
+       ipmi_inc_stat(intf, handled_ipmb_responses);
        deliver_response(recv_msg);
 
        return 0;
@@ -2888,14 +2992,11 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
        ipmi_user_t              user = NULL;
        struct ipmi_ipmb_addr    *ipmb_addr;
        struct ipmi_recv_msg     *recv_msg;
-       unsigned long            flags;
        struct ipmi_smi_handlers *handlers;
 
        if (msg->rsp_size < 10) {
                /* Message not big enough, just ignore it. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->invalid_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, invalid_commands);
                return 0;
        }
 
@@ -2919,19 +3020,17 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
 
        if (user == NULL) {
                /* We didn't find a user, deliver an error response. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->unhandled_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, unhandled_commands);
 
                msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
                msg->data[1] = IPMI_SEND_MSG_CMD;
                msg->data[2] = msg->rsp[3];
                msg->data[3] = msg->rsp[6];
-                msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
+               msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
                msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
                msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address;
-                /* rqseq/lun */
-                msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
+               /* rqseq/lun */
+               msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
                msg->data[8] = msg->rsp[8]; /* cmd */
                msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
                msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
@@ -2950,23 +3049,25 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
                handlers = intf->handlers;
                if (handlers) {
                        handlers->sender(intf->send_info, msg, 0);
-                       /* We used the message, so return the value
-                          that causes it to not be freed or
-                          queued. */
+                       /*
+                        * We used the message, so return the value
+                        * that causes it to not be freed or
+                        * queued.
+                        */
                        rv = -1;
                }
                rcu_read_unlock();
        } else {
                /* Deliver the message to the user. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->handled_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, handled_commands);
 
                recv_msg = ipmi_alloc_recv_msg();
                if (!recv_msg) {
-                       /* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+                       /*
+                        * We couldn't allocate memory for the
+                        * message, so requeue it for handling
+                        * later.
+                        */
                        rv = 1;
                        kref_put(&user->refcount, free_user);
                } else {
@@ -2977,8 +3078,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
                        ipmb_addr->lun = msg->rsp[7] & 3;
                        ipmb_addr->channel = msg->rsp[3] & 0xf;
 
-                       /* Extract the rest of the message information
-                          from the IPMB header.*/
+                       /*
+                        * Extract the rest of the message information
+                        * from the IPMB header.
+                        */
                        recv_msg->user = user;
                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
                        recv_msg->msgid = msg->rsp[7] >> 2;
@@ -2986,8 +3089,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t          intf,
                        recv_msg->msg.cmd = msg->rsp[8];
                        recv_msg->msg.data = recv_msg->msg_data;
 
-                       /* We chop off 10, not 9 bytes because the checksum
-                          at the end also needs to be removed. */
+                       /*
+                        * We chop off 10, not 9 bytes because the checksum
+                        * at the end also needs to be removed.
+                        */
                        recv_msg->msg.data_len = msg->rsp_size - 10;
                        memcpy(recv_msg->msg_data,
                               &(msg->rsp[9]),
@@ -3004,16 +3109,15 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t          intf,
 {
        struct ipmi_lan_addr  lan_addr;
        struct ipmi_recv_msg  *recv_msg;
-       unsigned long         flags;
 
 
-       /* This is 13, not 12, because the response must contain a
-        * completion code. */
+       /*
+        * This is 13, not 12, because the response must contain a
+        * completion code.
+        */
        if (msg->rsp_size < 13) {
                /* Message not big enough, just ignore it. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->invalid_lan_responses++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, invalid_lan_responses);
                return 0;
        }
 
@@ -3030,37 +3134,38 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t          intf,
        lan_addr.privilege = msg->rsp[3] >> 4;
        lan_addr.lun = msg->rsp[9] & 3;
 
-       /* It's a response from a remote entity.  Look up the sequence
-          number and handle the response. */
+       /*
+        * It's a response from a remote entity.  Look up the sequence
+        * number and handle the response.
+        */
        if (intf_find_seq(intf,
                          msg->rsp[9] >> 2,
                          msg->rsp[3] & 0x0f,
                          msg->rsp[10],
                          (msg->rsp[6] >> 2) & (~1),
                          (struct ipmi_addr *) &(lan_addr),
-                         &recv_msg))
-       {
-               /* We were unable to find the sequence number,
-                  so just nuke the message. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->unhandled_lan_responses++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+                         &recv_msg)) {
+               /*
+                * We were unable to find the sequence number,
+                * so just nuke the message.
+                */
+               ipmi_inc_stat(intf, unhandled_lan_responses);
                return 0;
        }
 
        memcpy(recv_msg->msg_data,
               &(msg->rsp[11]),
               msg->rsp_size - 11);
-       /* The other fields matched, so no need to set them, except
-           for netfn, which needs to be the response that was
-           returned, not the request value. */
+       /*
+        * The other fields matched, so no need to set them, except
+        * for netfn, which needs to be the response that was
+        * returned, not the request value.
+        */
        recv_msg->msg.netfn = msg->rsp[6] >> 2;
        recv_msg->msg.data = recv_msg->msg_data;
        recv_msg->msg.data_len = msg->rsp_size - 12;
        recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
-       spin_lock_irqsave(&intf->counter_lock, flags);
-       intf->handled_lan_responses++;
-       spin_unlock_irqrestore(&intf->counter_lock, flags);
+       ipmi_inc_stat(intf, handled_lan_responses);
        deliver_response(recv_msg);
 
        return 0;
@@ -3077,13 +3182,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
        ipmi_user_t              user = NULL;
        struct ipmi_lan_addr     *lan_addr;
        struct ipmi_recv_msg     *recv_msg;
-       unsigned long            flags;
 
        if (msg->rsp_size < 12) {
                /* Message not big enough, just ignore it. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->invalid_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, invalid_commands);
                return 0;
        }
 
@@ -3107,23 +3209,23 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
 
        if (user == NULL) {
                /* We didn't find a user, just give up. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->unhandled_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, unhandled_commands);
 
-               rv = 0; /* Don't do anything with these messages, just
-                          allow them to be freed. */
+               /*
+                * Don't do anything with these messages, just allow
+                * them to be freed.
+                */
+               rv = 0;
        } else {
                /* Deliver the message to the user. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->handled_commands++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, handled_commands);
 
                recv_msg = ipmi_alloc_recv_msg();
                if (!recv_msg) {
-                       /* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+                       /*
+                        * We couldn't allocate memory for the
+                        * message, so requeue it for handling later.
+                        */
                        rv = 1;
                        kref_put(&user->refcount, free_user);
                } else {
@@ -3137,8 +3239,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
                        lan_addr->channel = msg->rsp[3] & 0xf;
                        lan_addr->privilege = msg->rsp[3] >> 4;
 
-                       /* Extract the rest of the message information
-                          from the IPMB header.*/
+                       /*
+                        * Extract the rest of the message information
+                        * from the IPMB header.
+                        */
                        recv_msg->user = user;
                        recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
                        recv_msg->msgid = msg->rsp[9] >> 2;
@@ -3146,8 +3250,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t          intf,
                        recv_msg->msg.cmd = msg->rsp[10];
                        recv_msg->msg.data = recv_msg->msg_data;
 
-                       /* We chop off 12, not 11 bytes because the checksum
-                          at the end also needs to be removed. */
+                       /*
+                        * We chop off 12, not 11 bytes because the checksum
+                        * at the end also needs to be removed.
+                        */
                        recv_msg->msg.data_len = msg->rsp_size - 12;
                        memcpy(recv_msg->msg_data,
                               &(msg->rsp[11]),
@@ -3163,7 +3269,7 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
                                     struct ipmi_smi_msg  *msg)
 {
        struct ipmi_system_interface_addr *smi_addr;
-       
+
        recv_msg->msgid = 0;
        smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
        smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
@@ -3189,9 +3295,7 @@ static int handle_read_event_rsp(ipmi_smi_t          intf,
 
        if (msg->rsp_size < 19) {
                /* Message is too small to be an IPMB event. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->invalid_events++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, invalid_events);
                return 0;
        }
 
@@ -3204,12 +3308,12 @@ static int handle_read_event_rsp(ipmi_smi_t          intf,
 
        spin_lock_irqsave(&intf->events_lock, flags);
 
-       spin_lock(&intf->counter_lock);
-       intf->events++;
-       spin_unlock(&intf->counter_lock);
+       ipmi_inc_stat(intf, events);
 
-       /* Allocate and fill in one message for every user that is getting
-          events. */
+       /*
+        * Allocate and fill in one message for every user that is
+        * getting events.
+        */
        rcu_read_lock();
        list_for_each_entry_rcu(user, &intf->users, link) {
                if (!user->gets_events)
@@ -3223,9 +3327,11 @@ static int handle_read_event_rsp(ipmi_smi_t          intf,
                                list_del(&recv_msg->link);
                                ipmi_free_recv_msg(recv_msg);
                        }
-                       /* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+                       /*
+                        * We couldn't allocate memory for the
+                        * message, so requeue it for handling
+                        * later.
+                        */
                        rv = 1;
                        goto out;
                }
@@ -3246,13 +3352,17 @@ static int handle_read_event_rsp(ipmi_smi_t          intf,
                        deliver_response(recv_msg);
                }
        } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
-               /* No one to receive the message, put it in queue if there's
-                  not already too many things in the queue. */
+               /*
+                * No one to receive the message, put it in queue if there's
+                * not already too many things in the queue.
+                */
                recv_msg = ipmi_alloc_recv_msg();
                if (!recv_msg) {
-                       /* We couldn't allocate memory for the
-                           message, so requeue it for handling
-                           later. */
+                       /*
+                        * We couldn't allocate memory for the
+                        * message, so requeue it for handling
+                        * later.
+                        */
                        rv = 1;
                        goto out;
                }
@@ -3260,11 +3370,14 @@ static int handle_read_event_rsp(ipmi_smi_t          intf,
                copy_event_into_recv_msg(recv_msg, msg);
                list_add_tail(&(recv_msg->link), &(intf->waiting_events));
                intf->waiting_events_count++;
-       } else {
-               /* There's too many things in the queue, discard this
-                  message. */
-               printk(KERN_WARNING PFX "Event queue full, discarding an"
-                      " incoming event\n");
+       } else if (!intf->event_msg_printed) {
+               /*
+                * There's too many things in the queue, discard this
+                * message.
+                */
+               printk(KERN_WARNING PFX "Event queue full, discarding"
+                      " incoming events\n");
+               intf->event_msg_printed = 1;
        }
 
  out:
@@ -3277,16 +3390,15 @@ static int handle_bmc_rsp(ipmi_smi_t          intf,
                          struct ipmi_smi_msg *msg)
 {
        struct ipmi_recv_msg *recv_msg;
-       unsigned long        flags;
        struct ipmi_user     *user;
 
        recv_msg = (struct ipmi_recv_msg *) msg->user_data;
-       if (recv_msg == NULL)
-       {
-               printk(KERN_WARNING"IPMI message received with no owner. This\n"
-                       "could be because of a malformed message, or\n"
-                       "because of a hardware error.  Contact your\n"
-                       "hardware vender for assistance\n");
+       if (recv_msg == NULL) {
+               printk(KERN_WARNING
+                      "IPMI message received with no owner. This\n"
+                      "could be because of a malformed message, or\n"
+                      "because of a hardware error.  Contact your\n"
+                      "hardware vender for assistance\n");
                return 0;
        }
 
@@ -3294,16 +3406,12 @@ static int handle_bmc_rsp(ipmi_smi_t          intf,
        /* Make sure the user still exists. */
        if (user && !user->valid) {
                /* The user for the message went away, so give up. */
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->unhandled_local_responses++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, unhandled_local_responses);
                ipmi_free_recv_msg(recv_msg);
        } else {
                struct ipmi_system_interface_addr *smi_addr;
 
-               spin_lock_irqsave(&intf->counter_lock, flags);
-               intf->handled_local_responses++;
-               spin_unlock_irqrestore(&intf->counter_lock, flags);
+               ipmi_inc_stat(intf, handled_local_responses);
                recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
                recv_msg->msgid = msg->msgid;
                smi_addr = ((struct ipmi_system_interface_addr *)
@@ -3324,9 +3432,11 @@ static int handle_bmc_rsp(ipmi_smi_t          intf,
        return 0;
 }
 
-/* Handle a new message.  Return 1 if the message should be requeued,
-   0 if the message should be freed, or -1 if the message should not
-   be freed or requeued. */
+/*
+ * Handle a new message.  Return 1 if the message should be requeued,
+ * 0 if the message should be freed, or -1 if the message should not
+ * be freed or requeued.
+ */
 static int handle_new_recv_msg(ipmi_smi_t          intf,
                               struct ipmi_smi_msg *msg)
 {
@@ -3351,10 +3461,12 @@ static int handle_new_recv_msg(ipmi_smi_t          intf,
                msg->rsp[1] = msg->data[1];
                msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
                msg->rsp_size = 3;
-       } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */
-                  || (msg->rsp[1] != msg->data[1]))              /* Command */
-       {
-               /* The response is not even marginally correct. */
+       } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
+                  || (msg->rsp[1] != msg->data[1])) {
+               /*
+                * The NetFN and Command in the response is not even
+                * marginally correct.
+                */
                printk(KERN_WARNING PFX "BMC returned incorrect response,"
                       " expected netfn %x cmd %x, got netfn %x cmd %x\n",
                       (msg->data[0] >> 2) | 1, msg->data[1],
@@ -3369,10 +3481,11 @@ static int handle_new_recv_msg(ipmi_smi_t          intf,
 
        if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
            && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
-           && (msg->user_data != NULL))
-       {
-               /* It's a response to a response we sent.  For this we
-                  deliver a send message response to the user. */
+           && (msg->user_data != NULL)) {
+               /*
+                * It's a response to a response we sent.  For this we
+                * deliver a send message response to the user.
+                */
                struct ipmi_recv_msg     *recv_msg = msg->user_data;
 
                requeue = 0;
@@ -3398,8 +3511,7 @@ static int handle_new_recv_msg(ipmi_smi_t          intf,
                recv_msg->msg_data[0] = msg->rsp[2];
                deliver_response(recv_msg);
        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
-                  && (msg->rsp[1] == IPMI_GET_MSG_CMD))
-       {
+                  && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
                /* It's from the receive queue. */
                chan = msg->rsp[3] & 0xf;
                if (chan >= IPMI_MAX_CHANNELS) {
@@ -3411,12 +3523,16 @@ static int handle_new_recv_msg(ipmi_smi_t          intf,
                switch (intf->channels[chan].medium) {
                case IPMI_CHANNEL_MEDIUM_IPMB:
                        if (msg->rsp[4] & 0x04) {
-                               /* It's a response, so find the
-                                  requesting message and send it up. */
+                               /*
+                                * It's a response, so find the
+                                * requesting message and send it up.
+                                */
                                requeue = handle_ipmb_get_msg_rsp(intf, msg);
                        } else {
-                               /* It's a command to the SMS from some other
-                                  entity.  Handle that. */
+                               /*
+                                * It's a command to the SMS from some other
+                                * entity.  Handle that.
+                                */
                                requeue = handle_ipmb_get_msg_cmd(intf, msg);
                        }
                        break;
@@ -3424,25 +3540,30 @@ static int handle_new_recv_msg(ipmi_smi_t          intf,
                case IPMI_CHANNEL_MEDIUM_8023LAN:
                case IPMI_CHANNEL_MEDIUM_ASYNC:
                        if (msg->rsp[6] & 0x04) {
-                               /* It's a response, so find the
-                                  requesting message and send it up. */
+                               /*
+                                * It's a response, so find the
+                                * requesting message and send it up.
+                                */
                                requeue = handle_lan_get_msg_rsp(intf, msg);
                        } else {
-                               /* It's a command to the SMS from some other
-                                  entity.  Handle that. */
+                               /*
+                                * It's a command to the SMS from some other
+                                * entity.  Handle that.
+                                */
                                requeue = handle_lan_get_msg_cmd(intf, msg);
                        }
                        break;
 
                default:
-                       /* We don't handle the channel type, so just
-                        * free the message. */
+                       /*
+                        * We don't handle the channel type, so just
+                        * free the message.
+                        */
                        requeue = 0;
                }
 
        } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
-                  && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD))
-       {
+                  && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
                /* It's an asyncronous event. */
                requeue = handle_read_event_rsp(intf, msg);
        } else {
@@ -3458,71 +3579,82 @@ static int handle_new_recv_msg(ipmi_smi_t          intf,
 void ipmi_smi_msg_received(ipmi_smi_t          intf,
                           struct ipmi_smi_msg *msg)
 {
-       unsigned long flags;
+       unsigned long flags = 0; /* keep us warning-free. */
        int           rv;
+       int           run_to_completion;
 
 
        if ((msg->data_size >= 2)
            && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
            && (msg->data[1] == IPMI_SEND_MSG_CMD)
-           && (msg->user_data == NULL))
-       {
-               /* This is the local response to a command send, start
-                   the timer for these.  The user_data will not be
-                   NULL if this is a response send, and we will let
-                   response sends just go through. */
-
-               /* Check for errors, if we get certain errors (ones
-                   that mean basically we can try again later), we
-                   ignore them and start the timer.  Otherwise we
-                   report the error immediately. */
+           && (msg->user_data == NULL)) {
+               /*
+                * This is the local response to a command send, start
+                * the timer for these.  The user_data will not be
+                * NULL if this is a response send, and we will let
+                * response sends just go through.
+                */
+
+               /*
+                * Check for errors, if we get certain errors (ones
+                * that mean basically we can try again later), we
+                * ignore them and start the timer.  Otherwise we
+                * report the error immediately.
+                */
                if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
                    && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
                    && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
                    && (msg->rsp[2] != IPMI_BUS_ERR)
-                   && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR))
-               {
+                   && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
                        int chan = msg->rsp[3] & 0xf;
 
                        /* Got an error sending the message, handle it. */
-                       spin_lock_irqsave(&intf->counter_lock, flags);
                        if (chan >= IPMI_MAX_CHANNELS)
                                ; /* This shouldn't happen */
                        else if ((intf->channels[chan].medium
                                  == IPMI_CHANNEL_MEDIUM_8023LAN)
                                 || (intf->channels[chan].medium
                                     == IPMI_CHANNEL_MEDIUM_ASYNC))
-                               intf->sent_lan_command_errs++;
+                               ipmi_inc_stat(intf, sent_lan_command_errs);
                        else
-                               intf->sent_ipmb_command_errs++;
-                       spin_unlock_irqrestore(&intf->counter_lock, flags);
+                               ipmi_inc_stat(intf, sent_ipmb_command_errs);
                        intf_err_seq(intf, msg->msgid, msg->rsp[2]);
-               } else {
+               } else
                        /* The message was sent, start the timer. */
                        intf_start_seq_timer(intf, msg->msgid);
-               }
 
                ipmi_free_smi_msg(msg);
                goto out;
        }
 
-       /* To preserve message order, if the list is not empty, we
-           tack this message onto the end of the list. */
-       spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+       /*
+        * To preserve message order, if the list is not empty, we
+        * tack this message onto the end of the list.
+        */
+       run_to_completion = intf->run_to_completion;
+       if (!run_to_completion)
+               spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
        if (!list_empty(&intf->waiting_msgs)) {
                list_add_tail(&msg->link, &intf->waiting_msgs);
-               spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+               if (!run_to_completion)
+                       spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
                goto out;
        }
-       spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
-               
+       if (!run_to_completion)
+               spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+
        rv = handle_new_recv_msg(intf, msg);
        if (rv > 0) {
-               /* Could not handle the message now, just add it to a
-                   list to handle later. */
-               spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
+               /*
+                * Could not handle the message now, just add it to a
+                * list to handle later.
+                */
+               run_to_completion = intf->run_to_completion;
+               if (!run_to_completion)
+                       spin_lock_irqsave(&intf->waiting_msgs_lock, flags);
                list_add_tail(&msg->link, &intf->waiting_msgs);
-               spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
+               if (!run_to_completion)
+                       spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
        } else if (rv == 0) {
                ipmi_free_smi_msg(msg);
        }
@@ -3530,6 +3662,7 @@ void ipmi_smi_msg_received(ipmi_smi_t          intf,
  out:
        return;
 }
+EXPORT_SYMBOL(ipmi_smi_msg_received);
 
 void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
 {
@@ -3544,7 +3677,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
        }
        rcu_read_unlock();
 }
-
+EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
 
 static struct ipmi_smi_msg *
 smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
@@ -3552,14 +3685,16 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
 {
        struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
        if (!smi_msg)
-               /* If we can't allocate the message, then just return, we
-                  get 4 retries, so this should be ok. */
+               /*
+                * If we can't allocate the message, then just return, we
+                * get 4 retries, so this should be ok.
+                */
                return NULL;
 
        memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
        smi_msg->data_size = recv_msg->msg.data_len;
        smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
-               
+
 #ifdef DEBUG_MSGING
        {
                int m;
@@ -3594,28 +3729,26 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
                ent->inuse = 0;
                msg = ent->recv_msg;
                list_add_tail(&msg->link, timeouts);
-               spin_lock(&intf->counter_lock);
                if (ent->broadcast)
-                       intf->timed_out_ipmb_broadcasts++;
+                       ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
                else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
-                       intf->timed_out_lan_commands++;
+                       ipmi_inc_stat(intf, timed_out_lan_commands);
                else
-                       intf->timed_out_ipmb_commands++;
-               spin_unlock(&intf->counter_lock);
+                       ipmi_inc_stat(intf, timed_out_ipmb_commands);
        } else {
                struct ipmi_smi_msg *smi_msg;
                /* More retries, send again. */
 
-               /* Start with the max timer, set to normal
-                  timer after the message is sent. */
+               /*
+                * Start with the max timer, set to normal timer after
+                * the message is sent.
+                */
                ent->timeout = MAX_MSG_TIMEOUT;
                ent->retries_left--;
-               spin_lock(&intf->counter_lock);
                if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE)
-                       intf->retransmitted_lan_commands++;
+                       ipmi_inc_stat(intf, retransmitted_lan_commands);
                else
-                       intf->retransmitted_ipmb_commands++;
-               spin_unlock(&intf->counter_lock);
+                       ipmi_inc_stat(intf, retransmitted_ipmb_commands);
 
                smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
                                            ent->seqid);
@@ -3624,11 +3757,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
 
                spin_unlock_irqrestore(&intf->seq_lock, *flags);
 
-               /* Send the new message.  We send with a zero
-                * priority.  It timed out, I doubt time is
-                * that critical now, and high priority
-                * messages are really only for messages to the
-                * local MC, which don't get resent. */
+               /*
+                * Send the new message.  We send with a zero
+                * priority.  It timed out, I doubt time is that
+                * critical now, and high priority messages are really
+                * only for messages to the local MC, which don't get
+                * resent.
+                */
                handlers = intf->handlers;
                if (handlers)
                        intf->handlers->sender(intf->send_info,
@@ -3659,16 +3794,20 @@ static void ipmi_timeout_handler(long timeout_period)
                                list_del(&smi_msg->link);
                                ipmi_free_smi_msg(smi_msg);
                        } else {
-                               /* To preserve message order, quit if we
-                                  can't handle a message. */
+                               /*
+                                * To preserve message order, quit if we
+                                * can't handle a message.
+                                */
                                break;
                        }
                }
                spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags);
 
-               /* Go through the seq table and find any messages that
-                  have timed out, putting them in the timeouts
-                  list. */
+               /*
+                * Go through the seq table and find any messages that
+                * have timed out, putting them in the timeouts
+                * list.
+                */
                INIT_LIST_HEAD(&timeouts);
                spin_lock_irqsave(&intf->seq_lock, flags);
                for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
@@ -3694,8 +3833,7 @@ static void ipmi_timeout_handler(long timeout_period)
                                intf->auto_maintenance_timeout
                                        -= timeout_period;
                                if (!intf->maintenance_mode
-                                   && (intf->auto_maintenance_timeout <= 0))
-                               {
+                                   && (intf->auto_maintenance_timeout <= 0)) {
                                        intf->maintenance_mode_enable = 0;
                                        maintenance_mode_update(intf);
                                }
@@ -3713,8 +3851,10 @@ static void ipmi_request_event(void)
        struct ipmi_smi_handlers *handlers;
 
        rcu_read_lock();
-       /* Called from the timer, no need to check if handlers is
-        * valid. */
+       /*
+        * Called from the timer, no need to check if handlers is
+        * valid.
+        */
        list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
                /* No event requests when in maintenance mode. */
                if (intf->maintenance_mode_enable)
@@ -3735,10 +3875,12 @@ static struct timer_list ipmi_timer;
 /* How many jiffies does it take to get to the timeout time. */
 #define IPMI_TIMEOUT_JIFFIES   ((IPMI_TIMEOUT_TIME * HZ) / 1000)
 
-/* Request events from the queue every second (this is the number of
-   IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
-   future, IPMI will add a way to know immediately if an event is in
-   the queue and this silliness can go away. */
+/*
+ * Request events from the queue every second (this is the number of
+ * IPMI_TIMEOUT_TIMES between event requests).  Hopefully, in the
+ * future, IPMI will add a way to know immediately if an event is in
+ * the queue and this silliness can go away.
+ */
 #define IPMI_REQUEST_EV_TIME   (1000 / (IPMI_TIMEOUT_TIME))
 
 static atomic_t stop_operation;
@@ -3782,6 +3924,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
        }
        return rv;
 }
+EXPORT_SYMBOL(ipmi_alloc_smi_msg);
 
 static void free_recv_msg(struct ipmi_recv_msg *msg)
 {
@@ -3789,7 +3932,7 @@ static void free_recv_msg(struct ipmi_recv_msg *msg)
        kfree(msg);
 }
 
-struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
+static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
 {
        struct ipmi_recv_msg *rv;
 
@@ -3808,6 +3951,7 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
                kref_put(&msg->user->refcount, free_user);
        msg->done(msg);
 }
+EXPORT_SYMBOL(ipmi_free_recv_msg);
 
 #ifdef CONFIG_IPMI_PANIC_EVENT
 
@@ -3825,8 +3969,7 @@ static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
            && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
            && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
-           && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
-       {
+           && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
                /* A get event receiver command, save it. */
                intf->event_receiver = msg->msg.data[1];
                intf->event_receiver_lun = msg->msg.data[2] & 0x3;
@@ -3838,10 +3981,11 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
        if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
            && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
            && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
-           && (msg->msg.data[0] == IPMI_CC_NO_ERROR))
-       {
-               /* A get device id command, save if we are an event
-                  receiver or generator. */
+           && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
+               /*
+                * A get device id command, save if we are an event
+                * receiver or generator.
+                */
                intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
                intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
        }
@@ -3874,8 +4018,10 @@ static void send_panic_events(char *str)
        data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
        data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
 
-       /* Put a few breadcrumbs in.  Hopefully later we can add more things
-          to make the panic events more useful. */
+       /*
+        * Put a few breadcrumbs in.  Hopefully later we can add more things
+        * to make the panic events more useful.
+        */
        if (str) {
                data[3] = str[0];
                data[6] = str[1];
@@ -3891,6 +4037,7 @@ static void send_panic_events(char *str)
                        /* Interface is not ready. */
                        continue;
 
+               intf->run_to_completion = 1;
                /* Send the event announcing the panic. */
                intf->handlers->set_run_to_completion(intf->send_info, 1);
                i_ipmi_request(NULL,
@@ -3908,9 +4055,11 @@ static void send_panic_events(char *str)
        }
 
 #ifdef CONFIG_IPMI_PANIC_STRING
-       /* On every interface, dump a bunch of OEM event holding the
-          string. */
-       if (!str) 
+       /*
+        * On every interface, dump a bunch of OEM event holding the
+        * string.
+        */
+       if (!str)
                return;
 
        /* For every registered interface, send the event. */
@@ -3931,11 +4080,13 @@ static void send_panic_events(char *str)
                 */
                smp_rmb();
 
-               /* First job here is to figure out where to send the
-                  OEM events.  There's no way in IPMI to send OEM
-                  events using an event send command, so we have to
-                  find the SEL to put them in and stick them in
-                  there. */
+               /*
+                * First job here is to figure out where to send the
+                * OEM events.  There's no way in IPMI to send OEM
+                * events using an event send command, so we have to
+                * find the SEL to put them in and stick them in
+                * there.
+                */
 
                /* Get capabilities from the get device id. */
                intf->local_sel_device = 0;
@@ -3983,24 +4134,29 @@ static void send_panic_events(char *str)
                }
                intf->null_user_handler = NULL;
 
-               /* Validate the event receiver.  The low bit must not
-                  be 1 (it must be a valid IPMB address), it cannot
-                  be zero, and it must not be my address. */
-                if (((intf->event_receiver & 1) == 0)
+               /*
+                * Validate the event receiver.  The low bit must not
+                * be 1 (it must be a valid IPMB address), it cannot
+                * be zero, and it must not be my address.
+                */
+               if (((intf->event_receiver & 1) == 0)
                    && (intf->event_receiver != 0)
-                   && (intf->event_receiver != intf->channels[0].address))
-               {
-                       /* The event receiver is valid, send an IPMB
-                          message. */
+                   && (intf->event_receiver != intf->channels[0].address)) {
+                       /*
+                        * The event receiver is valid, send an IPMB
+                        * message.
+                        */
                        ipmb = (struct ipmi_ipmb_addr *) &addr;
                        ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
                        ipmb->channel = 0; /* FIXME - is this right? */
                        ipmb->lun = intf->event_receiver_lun;
                        ipmb->slave_addr = intf->event_receiver;
                } else if (intf->local_sel_device) {
-                       /* The event receiver was not valid (or was
-                          me), but I am an SEL device, just dump it
-                          in my SEL. */
+                       /*
+                        * The event receiver was not valid (or was
+                        * me), but I am an SEL device, just dump it
+                        * in my SEL.
+                        */
                        si = (struct ipmi_system_interface_addr *) &addr;
                        si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
                        si->channel = IPMI_BMC_CHANNEL;
@@ -4008,7 +4164,6 @@ static void send_panic_events(char *str)
                } else
                        continue; /* No where to send the event. */
 
-               
                msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
                msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
                msg.data = data;
@@ -4025,8 +4180,10 @@ static void send_panic_events(char *str)
                        data[2] = 0xf0; /* OEM event without timestamp. */
                        data[3] = intf->channels[0].address;
                        data[4] = j++; /* sequence # */
-                       /* Always give 11 bytes, so strncpy will fill
-                          it with zeroes for me. */
+                       /*
+                        * Always give 11 bytes, so strncpy will fill
+                        * it with zeroes for me.
+                        */
                        strncpy(data+5, p, 11);
                        p += size;
 
@@ -4043,7 +4200,7 @@ static void send_panic_events(char *str)
                                       intf->channels[0].lun,
                                       0, 1); /* no retry, and no wait. */
                }
-       }       
+       }
 #endif /* CONFIG_IPMI_PANIC_STRING */
 }
 #endif /* CONFIG_IPMI_PANIC_EVENT */
@@ -4052,7 +4209,7 @@ static int has_panicked;
 
 static int panic_event(struct notifier_block *this,
                       unsigned long         event,
-                       void                  *ptr)
+                      void                  *ptr)
 {
        ipmi_smi_t intf;
 
@@ -4066,6 +4223,7 @@ static int panic_event(struct notifier_block *this,
                        /* Interface is not ready. */
                        continue;
 
+               intf->run_to_completion = 1;
                intf->handlers->set_run_to_completion(intf->send_info, 1);
        }
 
@@ -4133,11 +4291,16 @@ static __exit void cleanup_ipmi(void)
 
        atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
 
-       /* This can't be called if any interfaces exist, so no worry about
-          shutting down the interfaces. */
+       /*
+        * This can't be called if any interfaces exist, so no worry
+        * about shutting down the interfaces.
+        */
 
-       /* Tell the timer to stop, then wait for it to stop.  This avoids
-          problems with race conditions removing the timer here. */
+       /*
+        * Tell the timer to stop, then wait for it to stop.  This
+        * avoids problems with race conditions removing the timer
+        * here.
+        */
        atomic_inc(&stop_operation);
        del_timer_sync(&ipmi_timer);
 
@@ -4164,31 +4327,6 @@ module_exit(cleanup_ipmi);
 module_init(ipmi_init_msghandler_mod);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
-MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
+MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
+                  " interface.");
 MODULE_VERSION(IPMI_DRIVER_VERSION);
-
-EXPORT_SYMBOL(ipmi_create_user);
-EXPORT_SYMBOL(ipmi_destroy_user);
-EXPORT_SYMBOL(ipmi_get_version);
-EXPORT_SYMBOL(ipmi_request_settime);
-EXPORT_SYMBOL(ipmi_request_supply_msgs);
-EXPORT_SYMBOL(ipmi_poll_interface);
-EXPORT_SYMBOL(ipmi_register_smi);
-EXPORT_SYMBOL(ipmi_unregister_smi);
-EXPORT_SYMBOL(ipmi_register_for_cmd);
-EXPORT_SYMBOL(ipmi_unregister_for_cmd);
-EXPORT_SYMBOL(ipmi_smi_msg_received);
-EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
-EXPORT_SYMBOL(ipmi_alloc_smi_msg);
-EXPORT_SYMBOL(ipmi_addr_length);
-EXPORT_SYMBOL(ipmi_validate_addr);
-EXPORT_SYMBOL(ipmi_set_gets_events);
-EXPORT_SYMBOL(ipmi_smi_watcher_register);
-EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
-EXPORT_SYMBOL(ipmi_set_my_address);
-EXPORT_SYMBOL(ipmi_get_my_address);
-EXPORT_SYMBOL(ipmi_set_my_LUN);
-EXPORT_SYMBOL(ipmi_get_my_LUN);
-EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
-EXPORT_SYMBOL(ipmi_user_set_run_to_completion);
-EXPORT_SYMBOL(ipmi_free_recv_msg);
index b86186de7f07ea8486602cfb7b70b6a450bb21a1..a261bd735dfbddd724974218bc5006c3ccabd30d 100644 (file)
@@ -87,7 +87,10 @@ MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog "
 
 /* parameter definition to allow user to flag power cycle */
 module_param(poweroff_powercycle, int, 0644);
-MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down.");
+MODULE_PARM_DESC(poweroff_powercycle,
+                " Set to non-zero to enable power cycle instead of power"
+                " down. Power cycle is contingent on hardware support,"
+                " otherwise it defaults back to power down.");
 
 /* Stuff from the get device id command. */
 static unsigned int mfg_id;
@@ -95,22 +98,25 @@ static unsigned int prod_id;
 static unsigned char capabilities;
 static unsigned char ipmi_version;
 
-/* We use our own messages for this operation, we don't let the system
-   allocate them, since we may be in a panic situation.  The whole
-   thing is single-threaded, anyway, so multiple messages are not
-   required. */
+/*
+ * We use our own messages for this operation, we don't let the system
+ * allocate them, since we may be in a panic situation.  The whole
+ * thing is single-threaded, anyway, so multiple messages are not
+ * required.
+ */
+static atomic_t dummy_count = ATOMIC_INIT(0);
 static void dummy_smi_free(struct ipmi_smi_msg *msg)
 {
+       atomic_dec(&dummy_count);
 }
 static void dummy_recv_free(struct ipmi_recv_msg *msg)
 {
+       atomic_dec(&dummy_count);
 }
-static struct ipmi_smi_msg halt_smi_msg =
-{
+static struct ipmi_smi_msg halt_smi_msg = {
        .done = dummy_smi_free
 };
-static struct ipmi_recv_msg halt_recv_msg =
-{
+static struct ipmi_recv_msg halt_recv_msg = {
        .done = dummy_recv_free
 };
 
@@ -127,8 +133,7 @@ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data)
                complete(comp);
 }
 
-static struct ipmi_user_hndl ipmi_poweroff_handler =
-{
+static struct ipmi_user_hndl ipmi_poweroff_handler = {
        .ipmi_recv_hndl = receive_handler
 };
 
@@ -152,17 +157,28 @@ static int ipmi_request_wait_for_response(ipmi_user_t            user,
        return halt_recv_msg.msg.data[0];
 }
 
-/* We are in run-to-completion mode, no completion is desired. */
+/* Wait for message to complete, spinning. */
 static int ipmi_request_in_rc_mode(ipmi_user_t            user,
                                   struct ipmi_addr       *addr,
                                   struct kernel_ipmi_msg *send_msg)
 {
        int rv;
 
+       atomic_set(&dummy_count, 2);
        rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL,
                                      &halt_smi_msg, &halt_recv_msg, 0);
-       if (rv)
+       if (rv) {
+               atomic_set(&dummy_count, 0);
                return rv;
+       }
+
+       /*
+        * Spin until our message is done.
+        */
+       while (atomic_read(&dummy_count) > 0) {
+               ipmi_poll_interface(user);
+               cpu_relax();
+       }
 
        return halt_recv_msg.msg.data[0];
 }
@@ -184,47 +200,47 @@ static int ipmi_request_in_rc_mode(ipmi_user_t            user,
 
 static void (*atca_oem_poweroff_hook)(ipmi_user_t user);
 
-static void pps_poweroff_atca (ipmi_user_t user)
+static void pps_poweroff_atca(ipmi_user_t user)
 {
-        struct ipmi_system_interface_addr smi_addr;
-        struct kernel_ipmi_msg            send_msg;
-        int                               rv;
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
-
-        printk(KERN_INFO PFX "PPS powerdown hook used");
-
-        send_msg.netfn = IPMI_NETFN_OEM;
-        send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
-        send_msg.data = IPMI_ATCA_PPS_IANA;
-        send_msg.data_len = 3;
-        rv = ipmi_request_in_rc_mode(user,
-                                  (struct ipmi_addr *) &smi_addr,
-                                   &send_msg);
-        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
-                printk(KERN_ERR PFX "Unable to send ATCA ,"
-                       " IPMI error 0x%x\n", rv);
-        }
+       struct ipmi_system_interface_addr smi_addr;
+       struct kernel_ipmi_msg            send_msg;
+       int                               rv;
+       /*
+        * Configure IPMI address for local access
+        */
+       smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+       smi_addr.channel = IPMI_BMC_CHANNEL;
+       smi_addr.lun = 0;
+
+       printk(KERN_INFO PFX "PPS powerdown hook used");
+
+       send_msg.netfn = IPMI_NETFN_OEM;
+       send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART;
+       send_msg.data = IPMI_ATCA_PPS_IANA;
+       send_msg.data_len = 3;
+       rv = ipmi_request_in_rc_mode(user,
+                                    (struct ipmi_addr *) &smi_addr,
+                                    &send_msg);
+       if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+               printk(KERN_ERR PFX "Unable to send ATCA ,"
+                      " IPMI error 0x%x\n", rv);
+       }
        return;
 }
 
-static int ipmi_atca_detect (ipmi_user_t user)
+static int ipmi_atca_detect(ipmi_user_t user)
 {
        struct ipmi_system_interface_addr smi_addr;
        struct kernel_ipmi_msg            send_msg;
        int                               rv;
        unsigned char                     data[1];
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+       /*
+        * Configure IPMI address for local access
+        */
+       smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+       smi_addr.channel = IPMI_BMC_CHANNEL;
+       smi_addr.lun = 0;
 
        /*
         * Use get address info to check and see if we are ATCA
@@ -238,28 +254,30 @@ static int ipmi_atca_detect (ipmi_user_t user)
                                            (struct ipmi_addr *) &smi_addr,
                                            &send_msg);
 
-        printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id);
-        if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
-            && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
-               printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n");
+       printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n",
+              mfg_id, prod_id);
+       if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID)
+           && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) {
+               printk(KERN_INFO PFX
+                      "Installing Pigeon Point Systems Poweroff Hook\n");
                atca_oem_poweroff_hook = pps_poweroff_atca;
        }
        return !rv;
 }
 
-static void ipmi_poweroff_atca (ipmi_user_t user)
+static void ipmi_poweroff_atca(ipmi_user_t user)
 {
        struct ipmi_system_interface_addr smi_addr;
        struct kernel_ipmi_msg            send_msg;
        int                               rv;
        unsigned char                     data[4];
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+       /*
+        * Configure IPMI address for local access
+        */
+       smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+       smi_addr.channel = IPMI_BMC_CHANNEL;
+       smi_addr.lun = 0;
 
        printk(KERN_INFO PFX "Powering down via ATCA power command\n");
 
@@ -273,23 +291,24 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
        data[2] = 0; /* Power Level */
        data[3] = 0; /* Don't change saved presets */
        send_msg.data = data;
-       send_msg.data_len = sizeof (data);
+       send_msg.data_len = sizeof(data);
        rv = ipmi_request_in_rc_mode(user,
                                     (struct ipmi_addr *) &smi_addr,
                                     &send_msg);
-        /** At this point, the system may be shutting down, and most
-         ** serial drivers (if used) will have interrupts turned off
-         ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
-         ** return code
-         **/
-        if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
+       /*
+        * At this point, the system may be shutting down, and most
+        * serial drivers (if used) will have interrupts turned off
+        * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE
+        * return code
+        */
+       if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) {
                printk(KERN_ERR PFX "Unable to send ATCA powerdown message,"
                       " IPMI error 0x%x\n", rv);
                goto out;
        }
 
-       if(atca_oem_poweroff_hook)
-               return atca_oem_poweroff_hook(user);
+       if (atca_oem_poweroff_hook)
+               atca_oem_poweroff_hook(user);
  out:
        return;
 }
@@ -310,13 +329,13 @@ static void ipmi_poweroff_atca (ipmi_user_t user)
 #define IPMI_CPI1_PRODUCT_ID           0x000157
 #define IPMI_CPI1_MANUFACTURER_ID      0x0108
 
-static int ipmi_cpi1_detect (ipmi_user_t user)
+static int ipmi_cpi1_detect(ipmi_user_t user)
 {
        return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID)
                && (prod_id == IPMI_CPI1_PRODUCT_ID));
 }
 
-static void ipmi_poweroff_cpi1 (ipmi_user_t user)
+static void ipmi_poweroff_cpi1(ipmi_user_t user)
 {
        struct ipmi_system_interface_addr smi_addr;
        struct ipmi_ipmb_addr             ipmb_addr;
@@ -328,12 +347,12 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
        unsigned char                     aer_addr;
        unsigned char                     aer_lun;
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+       /*
+        * Configure IPMI address for local access
+        */
+       smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+       smi_addr.channel = IPMI_BMC_CHANNEL;
+       smi_addr.lun = 0;
 
        printk(KERN_INFO PFX "Powering down via CPI1 power command\n");
 
@@ -425,7 +444,7 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user)
  */
 
 #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00}
-static int ipmi_dell_chassis_detect (ipmi_user_t user)
+static int ipmi_dell_chassis_detect(ipmi_user_t user)
 {
        const char ipmi_version_major = ipmi_version & 0xF;
        const char ipmi_version_minor = (ipmi_version >> 4) & 0xF;
@@ -444,25 +463,25 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user)
 #define IPMI_NETFN_CHASSIS_REQUEST     0
 #define IPMI_CHASSIS_CONTROL_CMD       0x02
 
-static int ipmi_chassis_detect (ipmi_user_t user)
+static int ipmi_chassis_detect(ipmi_user_t user)
 {
        /* Chassis support, use it. */
        return (capabilities & 0x80);
 }
 
-static void ipmi_poweroff_chassis (ipmi_user_t user)
+static void ipmi_poweroff_chassis(ipmi_user_t user)
 {
        struct ipmi_system_interface_addr smi_addr;
        struct kernel_ipmi_msg            send_msg;
        int                               rv;
        unsigned char                     data[1];
 
-        /*
-         * Configure IPMI address for local access
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+       /*
+        * Configure IPMI address for local access
+        */
+       smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+       smi_addr.channel = IPMI_BMC_CHANNEL;
+       smi_addr.lun = 0;
 
  powercyclefailed:
        printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n",
@@ -525,15 +544,13 @@ static struct poweroff_function poweroff_functions[] = {
 
 
 /* Called on a powerdown request. */
-static void ipmi_poweroff_function (void)
+static void ipmi_poweroff_function(void)
 {
        if (!ready)
                return;
 
        /* Use run-to-completion mode, since interrupts may be off. */
-       ipmi_user_set_run_to_completion(ipmi_user, 1);
        specific_poweroff_func(ipmi_user);
-       ipmi_user_set_run_to_completion(ipmi_user, 0);
 }
 
 /* Wait for an IPMI interface to be installed, the first one installed
@@ -561,13 +578,13 @@ static void ipmi_po_new_smi(int if_num, struct device *device)
 
        ipmi_ifnum = if_num;
 
-        /*
-         * Do a get device ide and store some results, since this is
+       /*
+        * Do a get device ide and store some results, since this is
         * used by several functions.
-         */
-        smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
-        smi_addr.channel = IPMI_BMC_CHANNEL;
-        smi_addr.lun = 0;
+        */
+       smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
+       smi_addr.channel = IPMI_BMC_CHANNEL;
+       smi_addr.lun = 0;
 
        send_msg.netfn = IPMI_NETFN_APP_REQUEST;
        send_msg.cmd = IPMI_GET_DEVICE_ID_CMD;
@@ -632,8 +649,7 @@ static void ipmi_po_smi_gone(int if_num)
        pm_power_off = old_poweroff_func;
 }
 
-static struct ipmi_smi_watcher smi_watcher =
-{
+static struct ipmi_smi_watcher smi_watcher = {
        .owner    = THIS_MODULE,
        .new_smi  = ipmi_po_new_smi,
        .smi_gone = ipmi_po_smi_gone
@@ -675,12 +691,12 @@ static struct ctl_table_header *ipmi_table_header;
 /*
  * Startup and shutdown functions.
  */
-static int ipmi_poweroff_init (void)
+static int ipmi_poweroff_init(void)
 {
        int rv;
 
-       printk (KERN_INFO "Copyright (C) 2004 MontaVista Software -"
-               " IPMI Powerdown via sys_reboot.\n");
+       printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -"
+              " IPMI Powerdown via sys_reboot.\n");
 
        if (poweroff_powercycle)
                printk(KERN_INFO PFX "Power cycle is enabled.\n");
index 4f560d0bb8089fa296dadeaee3a21bfb7a91b052..5a5455585c1d278bd770a784fd5e1df603df85e3 100644 (file)
@@ -80,7 +80,7 @@
 #define SI_USEC_PER_JIFFY      (1000000/HZ)
 #define SI_TIMEOUT_JIFFIES     (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
 #define SI_SHORT_TIMEOUT_USEC  250 /* .25ms when the SM request a
-                                       short timeout */
+                                     short timeout */
 
 /* Bit for BMC global enables. */
 #define IPMI_BMC_RCV_MSG_INTR     0x01
@@ -114,14 +114,61 @@ static char *si_to_str[] = { "kcs", "smic", "bt" };
 
 #define DEVICE_NAME "ipmi_si"
 
-static struct device_driver ipmi_driver =
-{
+static struct device_driver ipmi_driver = {
        .name = DEVICE_NAME,
        .bus = &platform_bus_type
 };
 
-struct smi_info
-{
+
+/*
+ * Indexes into stats[] in smi_info below.
+ */
+enum si_stat_indexes {
+       /*
+        * Number of times the driver requested a timer while an operation
+        * was in progress.
+        */
+       SI_STAT_short_timeouts = 0,
+
+       /*
+        * Number of times the driver requested a timer while nothing was in
+        * progress.
+        */
+       SI_STAT_long_timeouts,
+
+       /* Number of times the interface was idle while being polled. */
+       SI_STAT_idles,
+
+       /* Number of interrupts the driver handled. */
+       SI_STAT_interrupts,
+
+       /* Number of time the driver got an ATTN from the hardware. */
+       SI_STAT_attentions,
+
+       /* Number of times the driver requested flags from the hardware. */
+       SI_STAT_flag_fetches,
+
+       /* Number of times the hardware didn't follow the state machine. */
+       SI_STAT_hosed_count,
+
+       /* Number of completed messages. */
+       SI_STAT_complete_transactions,
+
+       /* Number of IPMI events received from the hardware. */
+       SI_STAT_events,
+
+       /* Number of watchdog pretimeouts. */
+       SI_STAT_watchdog_pretimeouts,
+
+       /* Number of asyncronous messages received. */
+       SI_STAT_incoming_messages,
+
+
+       /* This *must* remain last, add new values above this. */
+       SI_NUM_STATS
+};
+
+struct smi_info {
        int                    intf_num;
        ipmi_smi_t             intf;
        struct si_sm_data      *si_sm;
@@ -134,8 +181,10 @@ struct smi_info
        struct ipmi_smi_msg    *curr_msg;
        enum si_intf_state     si_state;
 
-       /* Used to handle the various types of I/O that can occur with
-           IPMI */
+       /*
+        * Used to handle the various types of I/O that can occur with
+        * IPMI
+        */
        struct si_sm_io io;
        int (*io_setup)(struct smi_info *info);
        void (*io_cleanup)(struct smi_info *info);
@@ -146,15 +195,18 @@ struct smi_info
        void (*addr_source_cleanup)(struct smi_info *info);
        void *addr_source_data;
 
-       /* Per-OEM handler, called from handle_flags().
-          Returns 1 when handle_flags() needs to be re-run
-          or 0 indicating it set si_state itself.
-       */
+       /*
+        * Per-OEM handler, called from handle_flags().  Returns 1
+        * when handle_flags() needs to be re-run or 0 indicating it
+        * set si_state itself.
+        */
        int (*oem_data_avail_handler)(struct smi_info *smi_info);
 
-       /* Flags from the last GET_MSG_FLAGS command, used when an ATTN
-          is set to hold the flags until we are done handling everything
-          from the flags. */
+       /*
+        * Flags from the last GET_MSG_FLAGS command, used when an ATTN
+        * is set to hold the flags until we are done handling everything
+        * from the flags.
+        */
 #define RECEIVE_MSG_AVAIL      0x01
 #define EVENT_MSG_BUFFER_FULL  0x02
 #define WDT_PRE_TIMEOUT_INT    0x08
@@ -162,25 +214,31 @@ struct smi_info
 #define OEM1_DATA_AVAIL     0x40
 #define OEM2_DATA_AVAIL     0x80
 #define OEM_DATA_AVAIL      (OEM0_DATA_AVAIL | \
-                             OEM1_DATA_AVAIL | \
-                             OEM2_DATA_AVAIL)
+                            OEM1_DATA_AVAIL | \
+                            OEM2_DATA_AVAIL)
        unsigned char       msg_flags;
 
-       /* If set to true, this will request events the next time the
-          state machine is idle. */
+       /*
+        * If set to true, this will request events the next time the
+        * state machine is idle.
+        */
        atomic_t            req_events;
 
-       /* If true, run the state machine to completion on every send
-          call.  Generally used after a panic to make sure stuff goes
-          out. */
+       /*
+        * If true, run the state machine to completion on every send
+        * call.  Generally used after a panic to make sure stuff goes
+        * out.
+        */
        int                 run_to_completion;
 
        /* The I/O port of an SI interface. */
        int                 port;
 
-       /* The space between start addresses of the two ports.  For
-          instance, if the first port is 0xca2 and the spacing is 4, then
-          the second port is 0xca6. */
+       /*
+        * The space between start addresses of the two ports.  For
+        * instance, if the first port is 0xca2 and the spacing is 4, then
+        * the second port is 0xca6.
+        */
        unsigned int        spacing;
 
        /* zero if no irq; */
@@ -195,10 +253,12 @@ struct smi_info
        /* Used to gracefully stop the timer without race conditions. */
        atomic_t            stop_operation;
 
-       /* The driver will disable interrupts when it gets into a
-          situation where it cannot handle messages due to lack of
-          memory.  Once that situation clears up, it will re-enable
-          interrupts. */
+       /*
+        * The driver will disable interrupts when it gets into a
+        * situation where it cannot handle messages due to lack of
+        * memory.  Once that situation clears up, it will re-enable
+        * interrupts.
+        */
        int interrupt_disabled;
 
        /* From the get device id response... */
@@ -208,33 +268,28 @@ struct smi_info
        struct device *dev;
        struct platform_device *pdev;
 
-        /* True if we allocated the device, false if it came from
-         * someplace else (like PCI). */
+       /*
+        * True if we allocated the device, false if it came from
+        * someplace else (like PCI).
+        */
        int dev_registered;
 
        /* Slave address, could be reported from DMI. */
        unsigned char slave_addr;
 
        /* Counters and things for the proc filesystem. */
-       spinlock_t count_lock;
-       unsigned long short_timeouts;
-       unsigned long long_timeouts;
-       unsigned long timeout_restarts;
-       unsigned long idles;
-       unsigned long interrupts;
-       unsigned long attentions;
-       unsigned long flag_fetches;
-       unsigned long hosed_count;
-       unsigned long complete_transactions;
-       unsigned long events;
-       unsigned long watchdog_pretimeouts;
-       unsigned long incoming_messages;
-
-        struct task_struct *thread;
+       atomic_t stats[SI_NUM_STATS];
+
+       struct task_struct *thread;
 
        struct list_head link;
 };
 
+#define smi_inc_stat(smi, stat) \
+       atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
+#define smi_get_stat(smi, stat) \
+       ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
+
 #define SI_MAX_PARMS 4
 
 static int force_kipmid[SI_MAX_PARMS];
@@ -246,7 +301,7 @@ static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
 
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
-static int register_xaction_notifier(struct notifier_block * nb)
+static int register_xaction_notifier(struct notifier_block *nb)
 {
        return atomic_notifier_chain_register(&xaction_notifier_list, nb);
 }
@@ -255,7 +310,7 @@ static void deliver_recv_msg(struct smi_info *smi_info,
                             struct ipmi_smi_msg *msg)
 {
        /* Deliver the message to the upper layer with the lock
-           released. */
+          released. */
        spin_unlock(&(smi_info->si_lock));
        ipmi_smi_msg_received(smi_info->intf, msg);
        spin_lock(&(smi_info->si_lock));
@@ -287,9 +342,12 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
        struct timeval t;
 #endif
 
-       /* No need to save flags, we aleady have interrupts off and we
-          already hold the SMI lock. */
-       spin_lock(&(smi_info->msg_lock));
+       /*
+        * No need to save flags, we aleady have interrupts off and we
+        * already hold the SMI lock.
+        */
+       if (!smi_info->run_to_completion)
+               spin_lock(&(smi_info->msg_lock));
 
        /* Pick the high priority queue first. */
        if (!list_empty(&(smi_info->hp_xmit_msgs))) {
@@ -310,7 +368,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
                                                link);
 #ifdef DEBUG_TIMING
                do_gettimeofday(&t);
-               printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+               printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
                err = atomic_notifier_call_chain(&xaction_notifier_list,
                                0, smi_info);
@@ -322,14 +380,14 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
                        smi_info->si_sm,
                        smi_info->curr_msg->data,
                        smi_info->curr_msg->data_size);
-               if (err) {
+               if (err)
                        return_hosed_msg(smi_info, err);
-               }
 
                rv = SI_SM_CALL_WITHOUT_DELAY;
        }
-       out:
-       spin_unlock(&(smi_info->msg_lock));
+ out:
+       if (!smi_info->run_to_completion)
+               spin_unlock(&(smi_info->msg_lock));
 
        return rv;
 }
@@ -338,8 +396,10 @@ static void start_enable_irq(struct smi_info *smi_info)
 {
        unsigned char msg[2];
 
-       /* If we are enabling interrupts, we have to tell the
-          BMC to use them. */
+       /*
+        * If we are enabling interrupts, we have to tell the
+        * BMC to use them.
+        */
        msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
        msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
@@ -371,10 +431,12 @@ static void start_clear_flags(struct smi_info *smi_info)
        smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
-/* When we have a situtaion where we run out of memory and cannot
-   allocate messages, we just leave them in the BMC and run the system
-   polled until we can allocate some memory.  Once we have some
-   memory, we will re-enable the interrupt. */
+/*
+ * When we have a situtaion where we run out of memory and cannot
+ * allocate messages, we just leave them in the BMC and run the system
+ * polled until we can allocate some memory.  Once we have some
+ * memory, we will re-enable the interrupt.
+ */
 static inline void disable_si_irq(struct smi_info *smi_info)
 {
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
@@ -396,9 +458,7 @@ static void handle_flags(struct smi_info *smi_info)
  retry:
        if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
                /* Watchdog pre-timeout */
-               spin_lock(&smi_info->count_lock);
-               smi_info->watchdog_pretimeouts++;
-               spin_unlock(&smi_info->count_lock);
+               smi_inc_stat(smi_info, watchdog_pretimeouts);
 
                start_clear_flags(smi_info);
                smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
@@ -444,12 +504,11 @@ static void handle_flags(struct smi_info *smi_info)
                        smi_info->curr_msg->data_size);
                smi_info->si_state = SI_GETTING_EVENTS;
        } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
-                  smi_info->oem_data_avail_handler) {
+                  smi_info->oem_data_avail_handler) {
                if (smi_info->oem_data_avail_handler(smi_info))
                        goto retry;
-       } else {
+       } else
                smi_info->si_state = SI_NORMAL;
-       }
 }
 
 static void handle_transaction_done(struct smi_info *smi_info)
@@ -459,7 +518,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
        struct timeval t;
 
        do_gettimeofday(&t);
-       printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+       printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
        switch (smi_info->si_state) {
        case SI_NORMAL:
@@ -472,9 +531,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
                                smi_info->curr_msg->rsp,
                                IPMI_MAX_MSG_LENGTH);
 
-               /* Do this here becase deliver_recv_msg() releases the
-                  lock, and a new message can be put in during the
-                  time the lock is released. */
+               /*
+                * Do this here becase deliver_recv_msg() releases the
+                * lock, and a new message can be put in during the
+                * time the lock is released.
+                */
                msg = smi_info->curr_msg;
                smi_info->curr_msg = NULL;
                deliver_recv_msg(smi_info, msg);
@@ -488,12 +549,13 @@ static void handle_transaction_done(struct smi_info *smi_info)
                /* We got the flags from the SMI, now handle them. */
                len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
                if (msg[2] != 0) {
-                       /* Error fetching flags, just give up for
-                          now. */
+                       /* Error fetching flags, just give up for now. */
                        smi_info->si_state = SI_NORMAL;
                } else if (len < 4) {
-                       /* Hmm, no flags.  That's technically illegal, but
-                          don't use uninitialized data. */
+                       /*
+                        * Hmm, no flags.  That's technically illegal, but
+                        * don't use uninitialized data.
+                        */
                        smi_info->si_state = SI_NORMAL;
                } else {
                        smi_info->msg_flags = msg[3];
@@ -530,9 +592,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
                                smi_info->curr_msg->rsp,
                                IPMI_MAX_MSG_LENGTH);
 
-               /* Do this here becase deliver_recv_msg() releases the
-                  lock, and a new message can be put in during the
-                  time the lock is released. */
+               /*
+                * Do this here becase deliver_recv_msg() releases the
+                * lock, and a new message can be put in during the
+                * time the lock is released.
+                */
                msg = smi_info->curr_msg;
                smi_info->curr_msg = NULL;
                if (msg->rsp[2] != 0) {
@@ -543,14 +607,14 @@ static void handle_transaction_done(struct smi_info *smi_info)
                        smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
                        handle_flags(smi_info);
                } else {
-                       spin_lock(&smi_info->count_lock);
-                       smi_info->events++;
-                       spin_unlock(&smi_info->count_lock);
-
-                       /* Do this before we deliver the message
-                          because delivering the message releases the
-                          lock and something else can mess with the
-                          state. */
+                       smi_inc_stat(smi_info, events);
+
+                       /*
+                        * Do this before we deliver the message
+                        * because delivering the message releases the
+                        * lock and something else can mess with the
+                        * state.
+                        */
                        handle_flags(smi_info);
 
                        deliver_recv_msg(smi_info, msg);
@@ -566,9 +630,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
                                smi_info->curr_msg->rsp,
                                IPMI_MAX_MSG_LENGTH);
 
-               /* Do this here becase deliver_recv_msg() releases the
-                  lock, and a new message can be put in during the
-                  time the lock is released. */
+               /*
+                * Do this here becase deliver_recv_msg() releases the
+                * lock, and a new message can be put in during the
+                * time the lock is released.
+                */
                msg = smi_info->curr_msg;
                smi_info->curr_msg = NULL;
                if (msg->rsp[2] != 0) {
@@ -579,14 +645,14 @@ static void handle_transaction_done(struct smi_info *smi_info)
                        smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
                        handle_flags(smi_info);
                } else {
-                       spin_lock(&smi_info->count_lock);
-                       smi_info->incoming_messages++;
-                       spin_unlock(&smi_info->count_lock);
-
-                       /* Do this before we deliver the message
-                          because delivering the message releases the
-                          lock and something else can mess with the
-                          state. */
+                       smi_inc_stat(smi_info, incoming_messages);
+
+                       /*
+                        * Do this before we deliver the message
+                        * because delivering the message releases the
+                        * lock and something else can mess with the
+                        * state.
+                        */
                        handle_flags(smi_info);
 
                        deliver_recv_msg(smi_info, msg);
@@ -674,69 +740,70 @@ static void handle_transaction_done(struct smi_info *smi_info)
        }
 }
 
-/* Called on timeouts and events.  Timeouts should pass the elapsed
-   time, interrupts should pass in zero.  Must be called with
-   si_lock held and interrupts disabled. */
+/*
+ * Called on timeouts and events.  Timeouts should pass the elapsed
+ * time, interrupts should pass in zero.  Must be called with
+ * si_lock held and interrupts disabled.
+ */
 static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
                                           int time)
 {
        enum si_sm_result si_sm_result;
 
  restart:
-       /* There used to be a loop here that waited a little while
-          (around 25us) before giving up.  That turned out to be
-          pointless, the minimum delays I was seeing were in the 300us
-          range, which is far too long to wait in an interrupt.  So
-          we just run until the state machine tells us something
-          happened or it needs a delay. */
+       /*
+        * There used to be a loop here that waited a little while
+        * (around 25us) before giving up.  That turned out to be
+        * pointless, the minimum delays I was seeing were in the 300us
+        * range, which is far too long to wait in an interrupt.  So
+        * we just run until the state machine tells us something
+        * happened or it needs a delay.
+        */
        si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
        time = 0;
        while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
-       {
                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
-       }
 
-       if (si_sm_result == SI_SM_TRANSACTION_COMPLETE)
-       {
-               spin_lock(&smi_info->count_lock);
-               smi_info->complete_transactions++;
-               spin_unlock(&smi_info->count_lock);
+       if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
+               smi_inc_stat(smi_info, complete_transactions);
 
                handle_transaction_done(smi_info);
                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
-       }
-       else if (si_sm_result == SI_SM_HOSED)
-       {
-               spin_lock(&smi_info->count_lock);
-               smi_info->hosed_count++;
-               spin_unlock(&smi_info->count_lock);
+       } else if (si_sm_result == SI_SM_HOSED) {
+               smi_inc_stat(smi_info, hosed_count);
 
-               /* Do the before return_hosed_msg, because that
-                  releases the lock. */
+               /*
+                * Do the before return_hosed_msg, because that
+                * releases the lock.
+                */
                smi_info->si_state = SI_NORMAL;
                if (smi_info->curr_msg != NULL) {
-                       /* If we were handling a user message, format
-                           a response to send to the upper layer to
-                           tell it about the error. */
+                       /*
+                        * If we were handling a user message, format
+                        * a response to send to the upper layer to
+                        * tell it about the error.
+                        */
                        return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
                }
                si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
        }
 
-       /* We prefer handling attn over new messages. */
-       if (si_sm_result == SI_SM_ATTN)
-       {
+       /*
+        * We prefer handling attn over new messages.  But don't do
+        * this if there is not yet an upper layer to handle anything.
+        */
+       if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) {
                unsigned char msg[2];
 
-               spin_lock(&smi_info->count_lock);
-               smi_info->attentions++;
-               spin_unlock(&smi_info->count_lock);
+               smi_inc_stat(smi_info, attentions);
 
-               /* Got a attn, send down a get message flags to see
-                   what's causing it.  It would be better to handle
-                   this in the upper layer, but due to the way
-                   interrupts work with the SMI, that's not really
-                   possible. */
+               /*
+                * Got a attn, send down a get message flags to see
+                * what's causing it.  It would be better to handle
+                * this in the upper layer, but due to the way
+                * interrupts work with the SMI, that's not really
+                * possible.
+                */
                msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
                msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 
@@ -748,20 +815,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
 
        /* If we are currently idle, try to start the next message. */
        if (si_sm_result == SI_SM_IDLE) {
-               spin_lock(&smi_info->count_lock);
-               smi_info->idles++;
-               spin_unlock(&smi_info->count_lock);
+               smi_inc_stat(smi_info, idles);
 
                si_sm_result = start_next_msg(smi_info);
                if (si_sm_result != SI_SM_IDLE)
                        goto restart;
-        }
+       }
 
        if ((si_sm_result == SI_SM_IDLE)
-           && (atomic_read(&smi_info->req_events)))
-       {
-               /* We are idle and the upper layer requested that I fetch
-                  events, so do so. */
+           && (atomic_read(&smi_info->req_events))) {
+               /*
+                * We are idle and the upper layer requested that I fetch
+                * events, so do so.
+                */
                atomic_set(&smi_info->req_events, 0);
 
                smi_info->curr_msg = ipmi_alloc_smi_msg();
@@ -803,56 +869,50 @@ static void sender(void                *send_info,
                return;
        }
 
-       spin_lock_irqsave(&(smi_info->msg_lock), flags);
 #ifdef DEBUG_TIMING
        do_gettimeofday(&t);
        printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
 
        if (smi_info->run_to_completion) {
-               /* If we are running to completion, then throw it in
-                  the list and run transactions until everything is
-                  clear.  Priority doesn't matter here. */
+               /*
+                * If we are running to completion, then throw it in
+                * the list and run transactions until everything is
+                * clear.  Priority doesn't matter here.
+                */
+
+               /*
+                * Run to completion means we are single-threaded, no
+                * need for locks.
+                */
                list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
 
-               /* We have to release the msg lock and claim the smi
-                  lock in this case, because of race conditions. */
-               spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
-
-               spin_lock_irqsave(&(smi_info->si_lock), flags);
                result = smi_event_handler(smi_info, 0);
                while (result != SI_SM_IDLE) {
                        udelay(SI_SHORT_TIMEOUT_USEC);
                        result = smi_event_handler(smi_info,
                                                   SI_SHORT_TIMEOUT_USEC);
                }
-               spin_unlock_irqrestore(&(smi_info->si_lock), flags);
                return;
-       } else {
-               if (priority > 0) {
-                       list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs));
-               } else {
-                       list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
-               }
        }
-       spin_unlock_irqrestore(&(smi_info->msg_lock), flags);
 
-       spin_lock_irqsave(&(smi_info->si_lock), flags);
-       if ((smi_info->si_state == SI_NORMAL)
-           && (smi_info->curr_msg == NULL))
-       {
+       spin_lock_irqsave(&smi_info->msg_lock, flags);
+       if (priority > 0)
+               list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
+       else
+               list_add_tail(&msg->link, &smi_info->xmit_msgs);
+       spin_unlock_irqrestore(&smi_info->msg_lock, flags);
+
+       spin_lock_irqsave(&smi_info->si_lock, flags);
+       if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL)
                start_next_msg(smi_info);
-       }
-       spin_unlock_irqrestore(&(smi_info->si_lock), flags);
+       spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
 
 static void set_run_to_completion(void *send_info, int i_run_to_completion)
 {
        struct smi_info   *smi_info = send_info;
        enum si_sm_result result;
-       unsigned long     flags;
-
-       spin_lock_irqsave(&(smi_info->si_lock), flags);
 
        smi_info->run_to_completion = i_run_to_completion;
        if (i_run_to_completion) {
@@ -863,8 +923,6 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
                                                   SI_SHORT_TIMEOUT_USEC);
                }
        }
-
-       spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 }
 
 static int ipmi_thread(void *data)
@@ -878,9 +936,8 @@ static int ipmi_thread(void *data)
                spin_lock_irqsave(&(smi_info->si_lock), flags);
                smi_result = smi_event_handler(smi_info, 0);
                spin_unlock_irqrestore(&(smi_info->si_lock), flags);
-               if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
-                       /* do nothing */
-               }
+               if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
+                       ; /* do nothing */
                else if (smi_result == SI_SM_CALL_WITH_DELAY)
                        schedule();
                else
@@ -931,7 +988,7 @@ static void smi_timeout(unsigned long data)
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 #ifdef DEBUG_TIMING
        do_gettimeofday(&t);
-       printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+       printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
        jiffies_now = jiffies;
        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
@@ -945,23 +1002,19 @@ static void smi_timeout(unsigned long data)
        if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
                /* Running with interrupts, only do long timeouts. */
                smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
-               spin_lock_irqsave(&smi_info->count_lock, flags);
-               smi_info->long_timeouts++;
-               spin_unlock_irqrestore(&smi_info->count_lock, flags);
+               smi_inc_stat(smi_info, long_timeouts);
                goto do_add_timer;
        }
 
-       /* If the state machine asks for a short delay, then shorten
-           the timer timeout. */
+       /*
+        * If the state machine asks for a short delay, then shorten
+        * the timer timeout.
+        */
        if (smi_result == SI_SM_CALL_WITH_DELAY) {
-               spin_lock_irqsave(&smi_info->count_lock, flags);
-               smi_info->short_timeouts++;
-               spin_unlock_irqrestore(&smi_info->count_lock, flags);
+               smi_inc_stat(smi_info, short_timeouts);
                smi_info->si_timer.expires = jiffies + 1;
        } else {
-               spin_lock_irqsave(&smi_info->count_lock, flags);
-               smi_info->long_timeouts++;
-               spin_unlock_irqrestore(&smi_info->count_lock, flags);
+               smi_inc_stat(smi_info, long_timeouts);
                smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES;
        }
 
@@ -979,13 +1032,11 @@ static irqreturn_t si_irq_handler(int irq, void *data)
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
-       spin_lock(&smi_info->count_lock);
-       smi_info->interrupts++;
-       spin_unlock(&smi_info->count_lock);
+       smi_inc_stat(smi_info, interrupts);
 
 #ifdef DEBUG_TIMING
        do_gettimeofday(&t);
-       printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
+       printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
 #endif
        smi_event_handler(smi_info, 0);
        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
@@ -1028,7 +1079,7 @@ static int smi_start_processing(void       *send_info,
         * The BT interface is efficient enough to not need a thread,
         * and there is no need for a thread if we have interrupts.
         */
-       else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
+       else if ((new_smi->si_type != SI_BT) && (!new_smi->irq))
                enable = 1;
 
        if (enable) {
@@ -1054,8 +1105,7 @@ static void set_maintenance_mode(void *send_info, int enable)
                atomic_set(&smi_info->req_events, 0);
 }
 
-static struct ipmi_smi_handlers handlers =
-{
+static struct ipmi_smi_handlers handlers = {
        .owner                  = THIS_MODULE,
        .start_processing       = smi_start_processing,
        .sender                 = sender,
@@ -1065,8 +1115,10 @@ static struct ipmi_smi_handlers handlers =
        .poll                   = poll,
 };
 
-/* There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
-   a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS */
+/*
+ * There can be 4 IO ports passed in (with or without IRQs), 4 addresses,
+ * a default IO port, and 1 ACPI/SPMI address.  That sets SI_MAX_DRIVERS.
+ */
 
 static LIST_HEAD(smi_infos);
 static DEFINE_MUTEX(smi_infos_lock);
@@ -1257,10 +1309,9 @@ static void port_cleanup(struct smi_info *info)
        int          idx;
 
        if (addr) {
-               for (idx = 0; idx < info->io_size; idx++) {
+               for (idx = 0; idx < info->io_size; idx++)
                        release_region(addr + idx * info->io.regspacing,
                                       info->io.regsize);
-               }
        }
 }
 
@@ -1274,8 +1325,10 @@ static int port_setup(struct smi_info *info)
 
        info->io_cleanup = port_cleanup;
 
-       /* Figure out the actual inb/inw/inl/etc routine to use based
-          upon the register size. */
+       /*
+        * Figure out the actual inb/inw/inl/etc routine to use based
+        * upon the register size.
+        */
        switch (info->io.regsize) {
        case 1:
                info->io.inputb = port_inb;
@@ -1290,17 +1343,18 @@ static int port_setup(struct smi_info *info)
                info->io.outputb = port_outl;
                break;
        default:
-               printk("ipmi_si: Invalid register size: %d\n",
+               printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
                       info->io.regsize);
                return -EINVAL;
        }
 
-       /* Some BIOSes reserve disjoint I/O regions in their ACPI
+       /*
+        * Some BIOSes reserve disjoint I/O regions in their ACPI
         * tables.  This causes problems when trying to register the
         * entire I/O region.  Therefore we must register each I/O
         * port separately.
         */
-       for (idx = 0; idx < info->io_size; idx++) {
+       for (idx = 0; idx < info->io_size; idx++) {
                if (request_region(addr + idx * info->io.regspacing,
                                   info->io.regsize, DEVICE_NAME) == NULL) {
                        /* Undo allocations */
@@ -1388,8 +1442,10 @@ static int mem_setup(struct smi_info *info)
 
        info->io_cleanup = mem_cleanup;
 
-       /* Figure out the actual readb/readw/readl/etc routine to use based
-          upon the register size. */
+       /*
+        * Figure out the actual readb/readw/readl/etc routine to use based
+        * upon the register size.
+        */
        switch (info->io.regsize) {
        case 1:
                info->io.inputb = intf_mem_inb;
@@ -1410,16 +1466,18 @@ static int mem_setup(struct smi_info *info)
                break;
 #endif
        default:
-               printk("ipmi_si: Invalid register size: %d\n",
+               printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n",
                       info->io.regsize);
                return -EINVAL;
        }
 
-       /* Calculate the total amount of memory to claim.  This is an
+       /*
+        * Calculate the total amount of memory to claim.  This is an
         * unusual looking calculation, but it avoids claiming any
         * more memory than it has to.  It will claim everything
         * between the first address to the end of the last full
-        * register. */
+        * register.
+        */
        mapsize = ((info->io_size * info->io.regspacing)
                   - (info->io.regspacing - info->io.regsize));
 
@@ -1749,9 +1807,11 @@ static __devinit void hardcode_find_bmc(void)
 
 #include <linux/acpi.h>
 
-/* Once we get an ACPI failure, we don't try any more, because we go
-   through the tables sequentially.  Once we don't find a table, there
-   are no more. */
+/*
+ * Once we get an ACPI failure, we don't try any more, because we go
+ * through the tables sequentially.  Once we don't find a table, there
+ * are no more.
+ */
 static int acpi_failure;
 
 /* For GPE-type interrupts. */
@@ -1765,9 +1825,7 @@ static u32 ipmi_acpi_gpe(void *context)
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
-       spin_lock(&smi_info->count_lock);
-       smi_info->interrupts++;
-       spin_unlock(&smi_info->count_lock);
+       smi_inc_stat(smi_info, interrupts);
 
 #ifdef DEBUG_TIMING
        do_gettimeofday(&t);
@@ -1816,7 +1874,8 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
 
 /*
  * Defined at
- * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf
+ * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/
+ * Docs/TechPapers/IA64/hpspmi.pdf
  */
 struct SPMITable {
        s8      Signature[4];
@@ -1838,14 +1897,18 @@ struct SPMITable {
         */
        u8      InterruptType;
 
-       /* If bit 0 of InterruptType is set, then this is the SCI
-           interrupt in the GPEx_STS register. */
+       /*
+        * If bit 0 of InterruptType is set, then this is the SCI
+        * interrupt in the GPEx_STS register.
+        */
        u8      GPE;
 
        s16     Reserved;
 
-       /* If bit 1 of InterruptType is set, then this is the I/O
-           APIC/SAPIC interrupt. */
+       /*
+        * If bit 1 of InterruptType is set, then this is the I/O
+        * APIC/SAPIC interrupt.
+        */
        u32     GlobalSystemInterrupt;
 
        /* The actual register address. */
@@ -1863,7 +1926,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
 
        if (spmi->IPMIlegacy != 1) {
            printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy);
-           return -ENODEV;
+           return -ENODEV;
        }
 
        if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
@@ -1880,8 +1943,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
        info->addr_source = "ACPI";
 
        /* Figure out the interface type. */
-       switch (spmi->InterfaceType)
-       {
+       switch (spmi->InterfaceType) {
        case 1: /* KCS */
                info->si_type = SI_KCS;
                break;
@@ -1929,7 +1991,8 @@ static __devinit int try_init_acpi(struct SPMITable *spmi)
                info->io.addr_type = IPMI_IO_ADDR_SPACE;
        } else {
                kfree(info);
-               printk("ipmi_si: Unknown ACPI I/O Address type\n");
+               printk(KERN_WARNING
+                      "ipmi_si: Unknown ACPI I/O Address type\n");
                return -EIO;
        }
        info->io.addr_data = spmi->addr.address;
@@ -1963,8 +2026,7 @@ static __devinit void acpi_find_bmc(void)
 #endif
 
 #ifdef CONFIG_DMI
-struct dmi_ipmi_data
-{
+struct dmi_ipmi_data {
        u8              type;
        u8              addr_space;
        unsigned long   base_addr;
@@ -1989,11 +2051,10 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
                        /* I/O */
                        base_addr &= 0xFFFE;
                        dmi->addr_space = IPMI_IO_ADDR_SPACE;
-               }
-               else {
+               } else
                        /* Memory */
                        dmi->addr_space = IPMI_MEM_ADDR_SPACE;
-               }
+
                /* If bit 4 of byte 0x10 is set, then the lsb for the address
                   is odd. */
                dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4);
@@ -2002,7 +2063,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
 
                /* The top two bits of byte 0x10 hold the register spacing. */
                reg_spacing = (data[0x10] & 0xC0) >> 6;
-               switch(reg_spacing){
+               switch (reg_spacing) {
                case 0x00: /* Byte boundaries */
                    dmi->offset = 1;
                    break;
@@ -2018,12 +2079,14 @@ static int __devinit decode_dmi(const struct dmi_header *dm,
                }
        } else {
                /* Old DMI spec. */
-               /* Note that technically, the lower bit of the base
+               /*
+                * Note that technically, the lower bit of the base
                 * address should be 1 if the address is I/O and 0 if
                 * the address is in memory.  So many systems get that
                 * wrong (and all that I have seen are I/O) so we just
                 * ignore that bit and assume I/O.  Systems that use
-                * memory should use the newer spec, anyway. */
+                * memory should use the newer spec, anyway.
+                */
                dmi->base_addr = base_addr & 0xfffe;
                dmi->addr_space = IPMI_IO_ADDR_SPACE;
                dmi->offset = 1;
@@ -2230,13 +2293,13 @@ static struct pci_device_id ipmi_pci_devices[] = {
 MODULE_DEVICE_TABLE(pci, ipmi_pci_devices);
 
 static struct pci_driver ipmi_pci_driver = {
-        .name =         DEVICE_NAME,
-        .id_table =     ipmi_pci_devices,
-        .probe =        ipmi_pci_probe,
-        .remove =       __devexit_p(ipmi_pci_remove),
+       .name =         DEVICE_NAME,
+       .id_table =     ipmi_pci_devices,
+       .probe =        ipmi_pci_probe,
+       .remove =       __devexit_p(ipmi_pci_remove),
 #ifdef CONFIG_PM
-        .suspend =      ipmi_pci_suspend,
-        .resume =       ipmi_pci_resume,
+       .suspend =      ipmi_pci_suspend,
+       .resume =       ipmi_pci_resume,
 #endif
 };
 #endif /* CONFIG_PCI */
@@ -2306,7 +2369,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev,
                info->io.addr_data, info->io.regsize, info->io.regspacing,
                info->irq);
 
-       dev->dev.driver_data = (void*) info;
+       dev->dev.driver_data = (void *) info;
 
        return try_smi_init(info);
 }
@@ -2319,14 +2382,16 @@ static int __devexit ipmi_of_remove(struct of_device *dev)
 
 static struct of_device_id ipmi_match[] =
 {
-       { .type = "ipmi", .compatible = "ipmi-kcs",  .data = (void *)(unsigned long) SI_KCS },
-       { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC },
-       { .type = "ipmi", .compatible = "ipmi-bt",   .data = (void *)(unsigned long) SI_BT },
+       { .type = "ipmi", .compatible = "ipmi-kcs",
+         .data = (void *)(unsigned long) SI_KCS },
+       { .type = "ipmi", .compatible = "ipmi-smic",
+         .data = (void *)(unsigned long) SI_SMIC },
+       { .type = "ipmi", .compatible = "ipmi-bt",
+         .data = (void *)(unsigned long) SI_BT },
        {},
 };
 
-static struct of_platform_driver ipmi_of_platform_driver =
-{
+static struct of_platform_driver ipmi_of_platform_driver = {
        .name           = "ipmi",
        .match_table    = ipmi_match,
        .probe          = ipmi_of_probe,
@@ -2347,32 +2412,32 @@ static int try_get_dev_id(struct smi_info *smi_info)
        if (!resp)
                return -ENOMEM;
 
-       /* Do a Get Device ID command, since it comes back with some
-          useful info. */
+       /*
+        * Do a Get Device ID command, since it comes back with some
+        * useful info.
+        */
        msg[0] = IPMI_NETFN_APP_REQUEST << 2;
        msg[1] = IPMI_GET_DEVICE_ID_CMD;
        smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
 
        smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
-       for (;;)
-       {
+       for (;;) {
                if (smi_result == SI_SM_CALL_WITH_DELAY ||
                    smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
                        schedule_timeout_uninterruptible(1);
                        smi_result = smi_info->handlers->event(
                                smi_info->si_sm, 100);
-               }
-               else if (smi_result == SI_SM_CALL_WITHOUT_DELAY)
-               {
+               } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
                        smi_result = smi_info->handlers->event(
                                smi_info->si_sm, 0);
-               }
-               else
+               } else
                        break;
        }
        if (smi_result == SI_SM_HOSED) {
-               /* We couldn't get the state machine to run, so whatever's at
-                  the port is probably not an IPMI SMI interface. */
+               /*
+                * We couldn't get the state machine to run, so whatever's at
+                * the port is probably not an IPMI SMI interface.
+                */
                rv = -ENODEV;
                goto out;
        }
@@ -2405,30 +2470,28 @@ static int stat_file_read_proc(char *page, char **start, off_t off,
 
        out += sprintf(out, "interrupts_enabled:    %d\n",
                       smi->irq && !smi->interrupt_disabled);
-       out += sprintf(out, "short_timeouts:        %ld\n",
-                      smi->short_timeouts);
-       out += sprintf(out, "long_timeouts:         %ld\n",
-                      smi->long_timeouts);
-       out += sprintf(out, "timeout_restarts:      %ld\n",
-                      smi->timeout_restarts);
-       out += sprintf(out, "idles:                 %ld\n",
-                      smi->idles);
-       out += sprintf(out, "interrupts:            %ld\n",
-                      smi->interrupts);
-       out += sprintf(out, "attentions:            %ld\n",
-                      smi->attentions);
-       out += sprintf(out, "flag_fetches:          %ld\n",
-                      smi->flag_fetches);
-       out += sprintf(out, "hosed_count:           %ld\n",
-                      smi->hosed_count);
-       out += sprintf(out, "complete_transactions: %ld\n",
-                      smi->complete_transactions);
-       out += sprintf(out, "events:                %ld\n",
-                      smi->events);
-       out += sprintf(out, "watchdog_pretimeouts:  %ld\n",
-                      smi->watchdog_pretimeouts);
-       out += sprintf(out, "incoming_messages:     %ld\n",
-                      smi->incoming_messages);
+       out += sprintf(out, "short_timeouts:        %u\n",
+                      smi_get_stat(smi, short_timeouts));
+       out += sprintf(out, "long_timeouts:         %u\n",
+                      smi_get_stat(smi, long_timeouts));
+       out += sprintf(out, "idles:                 %u\n",
+                      smi_get_stat(smi, idles));
+       out += sprintf(out, "interrupts:            %u\n",
+                      smi_get_stat(smi, interrupts));
+       out += sprintf(out, "attentions:            %u\n",
+                      smi_get_stat(smi, attentions));
+       out += sprintf(out, "flag_fetches:          %u\n",
+                      smi_get_stat(smi, flag_fetches));
+       out += sprintf(out, "hosed_count:           %u\n",
+                      smi_get_stat(smi, hosed_count));
+       out += sprintf(out, "complete_transactions: %u\n",
+                      smi_get_stat(smi, complete_transactions));
+       out += sprintf(out, "events:                %u\n",
+                      smi_get_stat(smi, events));
+       out += sprintf(out, "watchdog_pretimeouts:  %u\n",
+                      smi_get_stat(smi, watchdog_pretimeouts));
+       out += sprintf(out, "incoming_messages:     %u\n",
+                      smi_get_stat(smi, incoming_messages));
 
        return out - page;
 }
@@ -2460,7 +2523,7 @@ static int param_read_proc(char *page, char **start, off_t off,
 static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
 {
        smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
-                               RECEIVE_MSG_AVAIL);
+                              RECEIVE_MSG_AVAIL);
        return 1;
 }
 
@@ -2502,10 +2565,9 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
                    id->ipmi_version   == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
                        smi_info->oem_data_avail_handler =
                                oem_data_avail_to_receive_msg_avail;
-               }
-               else if (ipmi_version_major(id) < 1 ||
-                        (ipmi_version_major(id) == 1 &&
-                         ipmi_version_minor(id) < 5)) {
+               } else if (ipmi_version_major(id) < 1 ||
+                          (ipmi_version_major(id) == 1 &&
+                           ipmi_version_minor(id) < 5)) {
                        smi_info->oem_data_avail_handler =
                                oem_data_avail_to_receive_msg_avail;
                }
@@ -2597,8 +2659,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
 static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
 {
        if (smi_info->intf) {
-               /* The timer and thread are only running if the
-                  interface has been started up and registered. */
+               /*
+                * The timer and thread are only running if the
+                * interface has been started up and registered.
+                */
                if (smi_info->thread != NULL)
                        kthread_stop(smi_info->thread);
                del_timer_sync(&smi_info->si_timer);
@@ -2676,6 +2740,7 @@ static int is_new_interface(struct smi_info *info)
 static int try_smi_init(struct smi_info *new_smi)
 {
        int rv;
+       int i;
 
        if (new_smi->addr_source) {
                printk(KERN_INFO "ipmi_si: Trying %s-specified %s state"
@@ -2722,7 +2787,7 @@ static int try_smi_init(struct smi_info *new_smi)
        /* Allocate the state machine's data and initialize it. */
        new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
        if (!new_smi->si_sm) {
-               printk(Could not allocate state machine memory\n");
+               printk(KERN_ERR "Could not allocate state machine memory\n");
                rv = -ENOMEM;
                goto out_err;
        }
@@ -2732,13 +2797,12 @@ static int try_smi_init(struct smi_info *new_smi)
        /* Now that we know the I/O size, we can set up the I/O. */
        rv = new_smi->io_setup(new_smi);
        if (rv) {
-               printk(Could not set up I/O space\n");
+               printk(KERN_ERR "Could not set up I/O space\n");
                goto out_err;
        }
 
        spin_lock_init(&(new_smi->si_lock));
        spin_lock_init(&(new_smi->msg_lock));
-       spin_lock_init(&(new_smi->count_lock));
 
        /* Do low-level detection first. */
        if (new_smi->handlers->detect(new_smi->si_sm)) {
@@ -2749,8 +2813,10 @@ static int try_smi_init(struct smi_info *new_smi)
                goto out_err;
        }
 
-       /* Attempt a get device id command.  If it fails, we probably
-           don't have a BMC here. */
+       /*
+        * Attempt a get device id command.  If it fails, we probably
+        * don't have a BMC here.
+        */
        rv = try_get_dev_id(new_smi);
        if (rv) {
                if (new_smi->addr_source)
@@ -2767,22 +2833,28 @@ static int try_smi_init(struct smi_info *new_smi)
        new_smi->curr_msg = NULL;
        atomic_set(&new_smi->req_events, 0);
        new_smi->run_to_completion = 0;
+       for (i = 0; i < SI_NUM_STATS; i++)
+               atomic_set(&new_smi->stats[i], 0);
 
        new_smi->interrupt_disabled = 0;
        atomic_set(&new_smi->stop_operation, 0);
        new_smi->intf_num = smi_num;
        smi_num++;
 
-       /* Start clearing the flags before we enable interrupts or the
-          timer to avoid racing with the timer. */
+       /*
+        * Start clearing the flags before we enable interrupts or the
+        * timer to avoid racing with the timer.
+        */
        start_clear_flags(new_smi);
        /* IRQ is defined to be set when non-zero. */
        if (new_smi->irq)
                new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ;
 
        if (!new_smi->dev) {
-               /* If we don't already have a device from something
-                * else (like PCI), then register a new one. */
+               /*
+                * If we don't already have a device from something
+                * else (like PCI), then register a new one.
+                */
                new_smi->pdev = platform_device_alloc("ipmi_si",
                                                      new_smi->intf_num);
                if (rv) {
@@ -2820,7 +2892,7 @@ static int try_smi_init(struct smi_info *new_smi)
        }
 
        rv = ipmi_smi_add_proc_entry(new_smi->intf, "type",
-                                    type_file_read_proc, NULL,
+                                    type_file_read_proc,
                                     new_smi, THIS_MODULE);
        if (rv) {
                printk(KERN_ERR
@@ -2830,7 +2902,7 @@ static int try_smi_init(struct smi_info *new_smi)
        }
 
        rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats",
-                                    stat_file_read_proc, NULL,
+                                    stat_file_read_proc,
                                     new_smi, THIS_MODULE);
        if (rv) {
                printk(KERN_ERR
@@ -2840,7 +2912,7 @@ static int try_smi_init(struct smi_info *new_smi)
        }
 
        rv = ipmi_smi_add_proc_entry(new_smi->intf, "params",
-                                    param_read_proc, NULL,
+                                    param_read_proc,
                                     new_smi, THIS_MODULE);
        if (rv) {
                printk(KERN_ERR
@@ -2853,7 +2925,8 @@ static int try_smi_init(struct smi_info *new_smi)
 
        mutex_unlock(&smi_infos_lock);
 
-       printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]);
+       printk(KERN_INFO "IPMI %s interface initialized\n",
+              si_to_str[new_smi->si_type]);
 
        return 0;
 
@@ -2868,9 +2941,11 @@ static int try_smi_init(struct smi_info *new_smi)
        if (new_smi->irq_cleanup)
                new_smi->irq_cleanup(new_smi);
 
-       /* Wait until we know that we are out of any interrupt
-          handlers might have been running before we freed the
-          interrupt. */
+       /*
+        * Wait until we know that we are out of any interrupt
+        * handlers might have been running before we freed the
+        * interrupt.
+        */
        synchronize_sched();
 
        if (new_smi->si_sm) {
@@ -2942,11 +3017,10 @@ static __devinit int init_ipmi_si(void)
 
 #ifdef CONFIG_PCI
        rv = pci_register_driver(&ipmi_pci_driver);
-       if (rv){
+       if (rv)
                printk(KERN_ERR
                       "init_ipmi_si: Unable to register PCI driver: %d\n",
                       rv);
-       }
 #endif
 
 #ifdef CONFIG_PPC_OF
@@ -2975,7 +3049,8 @@ static __devinit int init_ipmi_si(void)
                of_unregister_platform_driver(&ipmi_of_platform_driver);
 #endif
                driver_unregister(&ipmi_driver);
-               printk("ipmi_si: Unable to find any System Interface(s)\n");
+               printk(KERN_WARNING
+                      "ipmi_si: Unable to find any System Interface(s)\n");
                return -ENODEV;
        } else {
                mutex_unlock(&smi_infos_lock);
@@ -2997,13 +3072,17 @@ static void cleanup_one_si(struct smi_info *to_clean)
        /* Tell the driver that we are shutting down. */
        atomic_inc(&to_clean->stop_operation);
 
-       /* Make sure the timer and thread are stopped and will not run
-          again. */
+       /*
+        * Make sure the timer and thread are stopped and will not run
+        * again.
+        */
        wait_for_timer_and_thread(to_clean);
 
-       /* Timeouts are stopped, now make sure the interrupts are off
-          for the device.  A little tricky with locks to make sure
-          there are no races. */
+       /*
+        * Timeouts are stopped, now make sure the interrupts are off
+        * for the device.  A little tricky with locks to make sure
+        * there are no races.
+        */
        spin_lock_irqsave(&to_clean->si_lock, flags);
        while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
                spin_unlock_irqrestore(&to_clean->si_lock, flags);
@@ -3074,4 +3153,5 @@ module_exit(cleanup_ipmi_si);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
-MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
+MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
+                  " system interfaces.");
index 4b731b24dc16c79d2e688edafe9ac20837f60e45..df89f73475fb996770bfeae8f3710038feabb825 100644 (file)
  *  675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
-/* This is defined by the state machines themselves, it is an opaque
-   data type for them to use. */
+/*
+ * This is defined by the state machines themselves, it is an opaque
+ * data type for them to use.
+ */
 struct si_sm_data;
 
-/* The structure for doing I/O in the state machine.  The state
-   machine doesn't have the actual I/O routines, they are done through
-   this interface. */
-struct si_sm_io
-{
+/*
+ * The structure for doing I/O in the state machine.  The state
+ * machine doesn't have the actual I/O routines, they are done through
+ * this interface.
+ */
+struct si_sm_io {
        unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset);
        void (*outputb)(struct si_sm_io *io,
                        unsigned int  offset,
                        unsigned char b);
 
-       /* Generic info used by the actual handling routines, the
-           state machine shouldn't touch these. */
+       /*
+        * Generic info used by the actual handling routines, the
+        * state machine shouldn't touch these.
+        */
        void __iomem *addr;
        int  regspacing;
        int  regsize;
@@ -59,53 +64,67 @@ struct si_sm_io
 };
 
 /* Results of SMI events. */
-enum si_sm_result
-{
+enum si_sm_result {
        SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */
        SI_SM_CALL_WITH_DELAY,  /* Delay some before calling again. */
-       SI_SM_CALL_WITH_TICK_DELAY,     /* Delay at least 1 tick before calling again. */
+       SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */
        SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */
        SI_SM_IDLE,             /* The SM is in idle state. */
        SI_SM_HOSED,            /* The hardware violated the state machine. */
-       SI_SM_ATTN              /* The hardware is asserting attn and the
-                                  state machine is idle. */
+
+       /*
+        * The hardware is asserting attn and the state machine is
+        * idle.
+        */
+       SI_SM_ATTN
 };
 
 /* Handlers for the SMI state machine. */
-struct si_sm_handlers
-{
-       /* Put the version number of the state machine here so the
-           upper layer can print it. */
+struct si_sm_handlers {
+       /*
+        * Put the version number of the state machine here so the
+        * upper layer can print it.
+        */
        char *version;
 
-       /* Initialize the data and return the amount of I/O space to
-           reserve for the space. */
+       /*
+        * Initialize the data and return the amount of I/O space to
+        * reserve for the space.
+        */
        unsigned int (*init_data)(struct si_sm_data *smi,
                                  struct si_sm_io   *io);
 
-       /* Start a new transaction in the state machine.  This will
-          return -2 if the state machine is not idle, -1 if the size
-          is invalid (to large or too small), or 0 if the transaction
-          is successfully completed. */
+       /*
+        * Start a new transaction in the state machine.  This will
+        * return -2 if the state machine is not idle, -1 if the size
+        * is invalid (to large or too small), or 0 if the transaction
+        * is successfully completed.
+        */
        int (*start_transaction)(struct si_sm_data *smi,
                                 unsigned char *data, unsigned int size);
 
-       /* Return the results after the transaction.  This will return
-          -1 if the buffer is too small, zero if no transaction is
-          present, or the actual length of the result data. */
+       /*
+        * Return the results after the transaction.  This will return
+        * -1 if the buffer is too small, zero if no transaction is
+        * present, or the actual length of the result data.
+        */
        int (*get_result)(struct si_sm_data *smi,
                          unsigned char *data, unsigned int length);
 
-       /* Call this periodically (for a polled interface) or upon
-          receiving an interrupt (for a interrupt-driven interface).
-          If interrupt driven, you should probably poll this
-          periodically when not in idle state.  This should be called
-          with the time that passed since the last call, if it is
-          significant.  Time is in microseconds. */
+       /*
+        * Call this periodically (for a polled interface) or upon
+        * receiving an interrupt (for a interrupt-driven interface).
+        * If interrupt driven, you should probably poll this
+        * periodically when not in idle state.  This should be called
+        * with the time that passed since the last call, if it is
+        * significant.  Time is in microseconds.
+        */
        enum si_sm_result (*event)(struct si_sm_data *smi, long time);
 
-       /* Attempt to detect an SMI.  Returns 0 on success or nonzero
-           on failure. */
+       /*
+        * Attempt to detect an SMI.  Returns 0 on success or nonzero
+        * on failure.
+        */
        int (*detect)(struct si_sm_data *smi);
 
        /* The interface is shutting down, so clean it up. */
index e64ea7d25d241d858d7a3022e070de8952a4fc00..faed92971907212c24f2765b6d2f633fb0a5719a 100644 (file)
@@ -85,6 +85,7 @@ enum smic_states {
 /* SMIC Flags Register Bits */
 #define SMIC_RX_DATA_READY     0x80
 #define SMIC_TX_DATA_READY     0x40
+
 /*
  * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by
  * a few systems, and then only by Systems Management
@@ -104,23 +105,22 @@ enum smic_states {
 #define        EC_ILLEGAL_COMMAND      0x04
 #define        EC_BUFFER_FULL          0x05
 
-struct si_sm_data
-{
+struct si_sm_data {
        enum smic_states state;
        struct si_sm_io *io;
-        unsigned char   write_data[MAX_SMIC_WRITE_SIZE];
-        int             write_pos;
-        int             write_count;
-        int             orig_write_count;
-        unsigned char   read_data[MAX_SMIC_READ_SIZE];
-        int             read_pos;
-        int             truncated;
-        unsigned int    error_retries;
-        long            smic_timeout;
+       unsigned char    write_data[MAX_SMIC_WRITE_SIZE];
+       int              write_pos;
+       int              write_count;
+       int              orig_write_count;
+       unsigned char    read_data[MAX_SMIC_READ_SIZE];
+       int              read_pos;
+       int              truncated;
+       unsigned int     error_retries;
+       long             smic_timeout;
 };
 
-static unsigned int init_smic_data (struct si_sm_data *smic,
-                                   struct si_sm_io *io)
+static unsigned int init_smic_data(struct si_sm_data *smic,
+                                  struct si_sm_io *io)
 {
        smic->state = SMIC_IDLE;
        smic->io = io;
@@ -150,11 +150,10 @@ static int start_smic_transaction(struct si_sm_data *smic,
                return IPMI_NOT_IN_MY_STATE_ERR;
 
        if (smic_debug & SMIC_DEBUG_MSG) {
-               printk(KERN_INFO "start_smic_transaction -");
-               for (i = 0; i < size; i ++) {
-                       printk (" %02x", (unsigned char) (data [i]));
-               }
-               printk ("\n");
+               printk(KERN_DEBUG "start_smic_transaction -");
+               for (i = 0; i < size; i++)
+                       printk(" %02x", (unsigned char) data[i]);
+               printk("\n");
        }
        smic->error_retries = 0;
        memcpy(smic->write_data, data, size);
@@ -173,11 +172,10 @@ static int smic_get_result(struct si_sm_data *smic,
        int i;
 
        if (smic_debug & SMIC_DEBUG_MSG) {
-               printk (KERN_INFO "smic_get result -");
-               for (i = 0; i < smic->read_pos; i ++) {
-                       printk (" %02x", (smic->read_data [i]));
-               }
-               printk ("\n");
+               printk(KERN_DEBUG "smic_get result -");
+               for (i = 0; i < smic->read_pos; i++)
+                       printk(" %02x", smic->read_data[i]);
+               printk("\n");
        }
        if (length < smic->read_pos) {
                smic->read_pos = length;
@@ -223,8 +221,8 @@ static inline void write_smic_control(struct si_sm_data *smic,
        smic->io->outputb(smic->io, 1, control);
 }
 
-static inline void write_si_sm_data (struct si_sm_data *smic,
-                                  unsigned char   data)
+static inline void write_si_sm_data(struct si_sm_data *smic,
+                                   unsigned char   data)
 {
        smic->io->outputb(smic->io, 0, data);
 }
@@ -233,10 +231,9 @@ static inline void start_error_recovery(struct si_sm_data *smic, char *reason)
 {
        (smic->error_retries)++;
        if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) {
-               if (smic_debug & SMIC_DEBUG_ENABLE) {
+               if (smic_debug & SMIC_DEBUG_ENABLE)
                        printk(KERN_WARNING
                               "ipmi_smic_drv: smic hosed: %s\n", reason);
-               }
                smic->state = SMIC_HOSED;
        } else {
                smic->write_count = smic->orig_write_count;
@@ -254,14 +251,14 @@ static inline void write_next_byte(struct si_sm_data *smic)
        (smic->write_count)--;
 }
 
-static inline void read_next_byte (struct si_sm_data *smic)
+static inline void read_next_byte(struct si_sm_data *smic)
 {
        if (smic->read_pos >= MAX_SMIC_READ_SIZE) {
-               read_smic_data (smic);
+               read_smic_data(smic);
                smic->truncated = 1;
        } else {
                smic->read_data[smic->read_pos] = read_smic_data(smic);
-               (smic->read_pos)++;
+               smic->read_pos++;
        }
 }
 
@@ -336,7 +333,7 @@ static inline void read_next_byte (struct si_sm_data *smic)
        SMIC_SC_SMS_RD_END      0xC6
 */
 
-static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
+static enum si_sm_result smic_event(struct si_sm_data *smic, long time)
 {
        unsigned char status;
        unsigned char flags;
@@ -347,13 +344,15 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
                return SI_SM_HOSED;
        }
        if (smic->state != SMIC_IDLE) {
-               if (smic_debug & SMIC_DEBUG_STATES) {
-                       printk(KERN_INFO
+               if (smic_debug & SMIC_DEBUG_STATES)
+                       printk(KERN_DEBUG
                               "smic_event - smic->smic_timeout = %ld,"
                               " time = %ld\n",
                               smic->smic_timeout, time);
-               }
-/* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */
+               /*
+                * FIXME: smic_event is sometimes called with time >
+                * SMIC_RETRY_TIMEOUT
+                */
                if (time < SMIC_RETRY_TIMEOUT) {
                        smic->smic_timeout -= time;
                        if (smic->smic_timeout < 0) {
@@ -366,9 +365,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
        if (flags & SMIC_FLAG_BSY)
                return SI_SM_CALL_WITH_DELAY;
 
-       status = read_smic_status (smic);
+       status = read_smic_status(smic);
        if (smic_debug & SMIC_DEBUG_STATES)
-               printk(KERN_INFO
+               printk(KERN_DEBUG
                       "smic_event - state = %d, flags = 0x%02x,"
                       " status = 0x%02x\n",
                       smic->state, flags, status);
@@ -377,9 +376,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
        case SMIC_IDLE:
                /* in IDLE we check for available messages */
                if (flags & SMIC_SMS_DATA_AVAIL)
-               {
                        return SI_SM_ATTN;
-               }
                return SI_SM_IDLE;
 
        case SMIC_START_OP:
@@ -391,7 +388,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
 
        case SMIC_OP_OK:
                if (status != SMIC_SC_SMS_READY) {
-                               /* this should not happen */
+                       /* this should not happen */
                        start_error_recovery(smic,
                                             "state = SMIC_OP_OK,"
                                             " status != SMIC_SC_SMS_READY");
@@ -411,8 +408,10 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
                                             "status != SMIC_SC_SMS_WR_START");
                        return SI_SM_CALL_WITH_DELAY;
                }
-               /* we must not issue WR_(NEXT|END) unless
-                   TX_DATA_READY is set */
+               /*
+                * we must not issue WR_(NEXT|END) unless
+                * TX_DATA_READY is set
+                * */
                if (flags & SMIC_TX_DATA_READY) {
                        if (smic->write_count == 1) {
                                /* last byte */
@@ -424,10 +423,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
                        }
                        write_next_byte(smic);
                        write_smic_flags(smic, flags | SMIC_FLAG_BSY);
-               }
-               else {
+               } else
                        return SI_SM_CALL_WITH_DELAY;
-               }
                break;
 
        case SMIC_WRITE_NEXT:
@@ -442,52 +439,48 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
                        if (smic->write_count == 1) {
                                write_smic_control(smic, SMIC_CC_SMS_WR_END);
                                smic->state = SMIC_WRITE_END;
-                       }
-                       else {
+                       } else {
                                write_smic_control(smic, SMIC_CC_SMS_WR_NEXT);
                                smic->state = SMIC_WRITE_NEXT;
                        }
                        write_next_byte(smic);
                        write_smic_flags(smic, flags | SMIC_FLAG_BSY);
-               }
-               else {
+               } else
                        return SI_SM_CALL_WITH_DELAY;
-               }
                break;
 
        case SMIC_WRITE_END:
                if (status != SMIC_SC_SMS_WR_END) {
-                       start_error_recovery (smic,
-                                             "state = SMIC_WRITE_END, "
-                                             "status != SMIC_SC_SMS_WR_END");
+                       start_error_recovery(smic,
+                                            "state = SMIC_WRITE_END, "
+                                            "status != SMIC_SC_SMS_WR_END");
                        return SI_SM_CALL_WITH_DELAY;
                }
                /* data register holds an error code */
                data = read_smic_data(smic);
                if (data != 0) {
-                       if (smic_debug & SMIC_DEBUG_ENABLE) {
-                               printk(KERN_INFO
+                       if (smic_debug & SMIC_DEBUG_ENABLE)
+                               printk(KERN_DEBUG
                                       "SMIC_WRITE_END: data = %02x\n", data);
-                       }
                        start_error_recovery(smic,
                                             "state = SMIC_WRITE_END, "
                                             "data != SUCCESS");
                        return SI_SM_CALL_WITH_DELAY;
-               } else {
+               } else
                        smic->state = SMIC_WRITE2READ;
-               }
                break;
 
        case SMIC_WRITE2READ:
-               /* we must wait for RX_DATA_READY to be set before we
-                   can continue */
+               /*
+                * we must wait for RX_DATA_READY to be set before we
+                * can continue
+                */
                if (flags & SMIC_RX_DATA_READY) {
                        write_smic_control(smic, SMIC_CC_SMS_RD_START);
                        write_smic_flags(smic, flags | SMIC_FLAG_BSY);
                        smic->state = SMIC_READ_START;
-               } else {
+               } else
                        return SI_SM_CALL_WITH_DELAY;
-               }
                break;
 
        case SMIC_READ_START:
@@ -502,15 +495,16 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
                        write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
                        write_smic_flags(smic, flags | SMIC_FLAG_BSY);
                        smic->state = SMIC_READ_NEXT;
-               } else {
+               } else
                        return SI_SM_CALL_WITH_DELAY;
-               }
                break;
 
        case SMIC_READ_NEXT:
                switch (status) {
-               /* smic tells us that this is the last byte to be read
-                   --> clean up */
+               /*
+                * smic tells us that this is the last byte to be read
+                * --> clean up
+                */
                case SMIC_SC_SMS_RD_END:
                        read_next_byte(smic);
                        write_smic_control(smic, SMIC_CC_SMS_RD_END);
@@ -523,9 +517,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
                                write_smic_control(smic, SMIC_CC_SMS_RD_NEXT);
                                write_smic_flags(smic, flags | SMIC_FLAG_BSY);
                                smic->state = SMIC_READ_NEXT;
-                       } else {
+                       } else
                                return SI_SM_CALL_WITH_DELAY;
-                       }
                        break;
                default:
                        start_error_recovery(
@@ -546,10 +539,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
                data = read_smic_data(smic);
                /* data register holds an error code */
                if (data != 0) {
-                       if (smic_debug & SMIC_DEBUG_ENABLE) {
-                               printk(KERN_INFO
+                       if (smic_debug & SMIC_DEBUG_ENABLE)
+                               printk(KERN_DEBUG
                                       "SMIC_READ_END: data = %02x\n", data);
-                       }
                        start_error_recovery(smic,
                                             "state = SMIC_READ_END, "
                                             "data != SUCCESS");
@@ -565,7 +557,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
 
        default:
                if (smic_debug & SMIC_DEBUG_ENABLE) {
-                       printk(KERN_WARNING "smic->state = %d\n", smic->state);
+                       printk(KERN_DEBUG "smic->state = %d\n", smic->state);
                        start_error_recovery(smic, "state = UNKNOWN");
                        return SI_SM_CALL_WITH_DELAY;
                }
@@ -576,10 +568,12 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time)
 
 static int smic_detect(struct si_sm_data *smic)
 {
-       /* It's impossible for the SMIC fnags register to be all 1's,
-          (assuming a properly functioning, self-initialized BMC)
-          but that's what you get from reading a bogus address, so we
-          test that first. */
+       /*
+        * It's impossible for the SMIC fnags register to be all 1's,
+        * (assuming a properly functioning, self-initialized BMC)
+        * but that's what you get from reading a bogus address, so we
+        * test that first.
+        */
        if (read_smic_flags(smic) == 0xff)
                return 1;
 
@@ -595,8 +589,7 @@ static int smic_size(void)
        return sizeof(struct si_sm_data);
 }
 
-struct si_sm_handlers smic_smi_handlers =
-{
+struct si_sm_handlers smic_smi_handlers = {
        .init_data         = init_smic_data,
        .start_transaction = start_smic_transaction,
        .get_result        = smic_get_result,
index 8f45ca9235ad10fb41ece3d803d776843bb3ea53..1b9a8704781783d6e9f9f8416cc9ef6e07c280c6 100644 (file)
 #include <asm/atomic.h>
 
 #ifdef CONFIG_X86
-/* This is ugly, but I've determined that x86 is the only architecture
-   that can reasonably support the IPMI NMI watchdog timeout at this
-   time.  If another architecture adds this capability somehow, it
-   will have to be a somewhat different mechanism and I have no idea
-   how it will work.  So in the unlikely event that another
-   architecture supports this, we can figure out a good generic
-   mechanism for it at that time. */
+/*
+ * This is ugly, but I've determined that x86 is the only architecture
+ * that can reasonably support the IPMI NMI watchdog timeout at this
+ * time.  If another architecture adds this capability somehow, it
+ * will have to be a somewhat different mechanism and I have no idea
+ * how it will work.  So in the unlikely event that another
+ * architecture supports this, we can figure out a good generic
+ * mechanism for it at that time.
+ */
 #include <asm/kdebug.h>
 #define HAVE_DIE_NMI
 #endif
@@ -95,9 +97,8 @@
 /* Operations that can be performed on a pretimout. */
 #define WDOG_PREOP_NONE                0
 #define WDOG_PREOP_PANIC       1
-#define WDOG_PREOP_GIVE_DATA   2 /* Cause data to be available to
-                                     read.  Doesn't work in NMI
-                                     mode. */
+/* Cause data to be available to read.  Doesn't work in NMI mode. */
+#define WDOG_PREOP_GIVE_DATA   2
 
 /* Actions to perform on a full timeout. */
 #define WDOG_SET_TIMEOUT_ACT(byte, use) \
 #define WDOG_TIMEOUT_POWER_DOWN                2
 #define WDOG_TIMEOUT_POWER_CYCLE       3
 
-/* Byte 3 of the get command, byte 4 of the get response is the
-   pre-timeout in seconds. */
+/*
+ * Byte 3 of the get command, byte 4 of the get response is the
+ * pre-timeout in seconds.
+ */
 
 /* Bits for setting byte 4 of the set command, byte 5 of the get response. */
 #define WDOG_EXPIRE_CLEAR_BIOS_FRB2    (1 << 1)
 #define WDOG_EXPIRE_CLEAR_SMS_OS       (1 << 4)
 #define WDOG_EXPIRE_CLEAR_OEM          (1 << 5)
 
-/* Setting/getting the watchdog timer value.  This is for bytes 5 and
-   6 (the timeout time) of the set command, and bytes 6 and 7 (the
-   timeout time) and 8 and 9 (the current countdown value) of the
-   response.  The timeout value is given in seconds (in the command it
-   is 100ms intervals). */
+/*
+ * Setting/getting the watchdog timer value.  This is for bytes 5 and
+ * 6 (the timeout time) of the set command, and bytes 6 and 7 (the
+ * timeout time) and 8 and 9 (the current countdown value) of the
+ * response.  The timeout value is given in seconds (in the command it
+ * is 100ms intervals).
+ */
 #define WDOG_SET_TIMEOUT(byte1, byte2, val) \
        (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8)
 #define WDOG_GET_TIMEOUT(byte1, byte2) \
@@ -184,8 +189,10 @@ static int ipmi_set_timeout(int do_heartbeat);
 static void ipmi_register_watchdog(int ipmi_intf);
 static void ipmi_unregister_watchdog(int ipmi_intf);
 
-/* If true, the driver will start running as soon as it is configured
-   and ready. */
+/*
+ * If true, the driver will start running as soon as it is configured
+ * and ready.
+ */
 static int start_now;
 
 static int set_param_int(const char *val, struct kernel_param *kp)
@@ -309,10 +316,12 @@ static int ipmi_ignore_heartbeat;
 /* Is someone using the watchdog?  Only one user is allowed. */
 static unsigned long ipmi_wdog_open;
 
-/* If set to 1, the heartbeat command will set the state to reset and
-   start the timer.  The timer doesn't normally run when the driver is
-   first opened until the heartbeat is set the first time, this
-   variable is used to accomplish this. */
+/*
+ * If set to 1, the heartbeat command will set the state to reset and
+ * start the timer.  The timer doesn't normally run when the driver is
+ * first opened until the heartbeat is set the first time, this
+ * variable is used to accomplish this.
+ */
 static int ipmi_start_timer_on_heartbeat;
 
 /* IPMI version of the BMC. */
@@ -329,10 +338,12 @@ static int nmi_handler_registered;
 
 static int ipmi_heartbeat(void);
 
-/* We use a mutex to make sure that only one thing can send a set
-   timeout at one time, because we only have one copy of the data.
-   The mutex is claimed when the set_timeout is sent and freed
-   when both messages are free. */
+/*
+ * We use a mutex to make sure that only one thing can send a set
+ * timeout at one time, because we only have one copy of the data.
+ * The mutex is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
 static atomic_t set_timeout_tofree = ATOMIC_INIT(0);
 static DEFINE_MUTEX(set_timeout_lock);
 static DECLARE_COMPLETION(set_timeout_wait);
@@ -346,15 +357,13 @@ static void set_timeout_free_recv(struct ipmi_recv_msg *msg)
     if (atomic_dec_and_test(&set_timeout_tofree))
            complete(&set_timeout_wait);
 }
-static struct ipmi_smi_msg set_timeout_smi_msg =
-{
+static struct ipmi_smi_msg set_timeout_smi_msg = {
        .done = set_timeout_free_smi
 };
-static struct ipmi_recv_msg set_timeout_recv_msg =
-{
+static struct ipmi_recv_msg set_timeout_recv_msg = {
        .done = set_timeout_free_recv
 };
+
 static int i_ipmi_set_timeout(struct ipmi_smi_msg  *smi_msg,
                              struct ipmi_recv_msg *recv_msg,
                              int                  *send_heartbeat_now)
@@ -373,13 +382,14 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg  *smi_msg,
        WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS);
 
        if ((ipmi_version_major > 1)
-           || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5)))
-       {
+           || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) {
                /* This is an IPMI 1.5-only feature. */
                data[0] |= WDOG_DONT_STOP_ON_SET;
        } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
-               /* In ipmi 1.0, setting the timer stops the watchdog, we
-                  need to start it back up again. */
+               /*
+                * In ipmi 1.0, setting the timer stops the watchdog, we
+                * need to start it back up again.
+                */
                hbnow = 1;
        }
 
@@ -465,12 +475,10 @@ static void panic_recv_free(struct ipmi_recv_msg *msg)
        atomic_dec(&panic_done_count);
 }
 
-static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg =
-{
+static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = {
        .done = panic_smi_free
 };
-static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg =
-{
+static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = {
        .done = panic_recv_free
 };
 
@@ -480,8 +488,10 @@ static void panic_halt_ipmi_heartbeat(void)
        struct ipmi_system_interface_addr addr;
        int rv;
 
-       /* Don't reset the timer if we have the timer turned off, that
-           re-enables the watchdog. */
+       /*
+        * Don't reset the timer if we have the timer turned off, that
+        * re-enables the watchdog.
+        */
        if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
                return;
 
@@ -505,19 +515,19 @@ static void panic_halt_ipmi_heartbeat(void)
                atomic_add(2, &panic_done_count);
 }
 
-static struct ipmi_smi_msg panic_halt_smi_msg =
-{
+static struct ipmi_smi_msg panic_halt_smi_msg = {
        .done = panic_smi_free
 };
-static struct ipmi_recv_msg panic_halt_recv_msg =
-{
+static struct ipmi_recv_msg panic_halt_recv_msg = {
        .done = panic_recv_free
 };
 
-/* Special call, doesn't claim any locks.  This is only to be called
-   at panic or halt time, in run-to-completion mode, when the caller
-   is the only CPU and the only thing that will be going is these IPMI
-   calls. */
+/*
+ * Special call, doesn't claim any locks.  This is only to be called
+ * at panic or halt time, in run-to-completion mode, when the caller
+ * is the only CPU and the only thing that will be going is these IPMI
+ * calls.
+ */
 static void panic_halt_ipmi_set_timeout(void)
 {
        int send_heartbeat_now;
@@ -540,10 +550,12 @@ static void panic_halt_ipmi_set_timeout(void)
                ipmi_poll_interface(watchdog_user);
 }
 
-/* We use a semaphore to make sure that only one thing can send a
-   heartbeat at one time, because we only have one copy of the data.
-   The semaphore is claimed when the set_timeout is sent and freed
-   when both messages are free. */
+/*
+ * We use a mutex to make sure that only one thing can send a
+ * heartbeat at one time, because we only have one copy of the data.
+ * The semaphore is claimed when the set_timeout is sent and freed
+ * when both messages are free.
+ */
 static atomic_t heartbeat_tofree = ATOMIC_INIT(0);
 static DEFINE_MUTEX(heartbeat_lock);
 static DECLARE_COMPLETION(heartbeat_wait);
@@ -557,15 +569,13 @@ static void heartbeat_free_recv(struct ipmi_recv_msg *msg)
     if (atomic_dec_and_test(&heartbeat_tofree))
            complete(&heartbeat_wait);
 }
-static struct ipmi_smi_msg heartbeat_smi_msg =
-{
+static struct ipmi_smi_msg heartbeat_smi_msg = {
        .done = heartbeat_free_smi
 };
-static struct ipmi_recv_msg heartbeat_recv_msg =
-{
+static struct ipmi_recv_msg heartbeat_recv_msg = {
        .done = heartbeat_free_recv
 };
+
 static int ipmi_heartbeat(void)
 {
        struct kernel_ipmi_msg            msg;
@@ -580,10 +590,12 @@ static int ipmi_heartbeat(void)
                ipmi_watchdog_state = action_val;
                return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
        } else if (pretimeout_since_last_heartbeat) {
-               /* A pretimeout occurred, make sure we set the timeout.
-                  We don't want to set the action, though, we want to
-                  leave that alone (thus it can't be combined with the
-                  above operation. */
+               /*
+                * A pretimeout occurred, make sure we set the timeout.
+                * We don't want to set the action, though, we want to
+                * leave that alone (thus it can't be combined with the
+                * above operation.
+                */
                return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY);
        }
 
@@ -591,8 +603,10 @@ static int ipmi_heartbeat(void)
 
        atomic_set(&heartbeat_tofree, 2);
 
-       /* Don't reset the timer if we have the timer turned off, that
-           re-enables the watchdog. */
+       /*
+        * Don't reset the timer if we have the timer turned off, that
+        * re-enables the watchdog.
+        */
        if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) {
                mutex_unlock(&heartbeat_lock);
                return 0;
@@ -625,10 +639,12 @@ static int ipmi_heartbeat(void)
        wait_for_completion(&heartbeat_wait);
 
        if (heartbeat_recv_msg.msg.data[0] != 0) {
-           /* Got an error in the heartbeat response.  It was already
-              reported in ipmi_wdog_msg_handler, but we should return
-              an error here. */
-           rv = -EINVAL;
+               /*
+                * Got an error in the heartbeat response.  It was already
+                * reported in ipmi_wdog_msg_handler, but we should return
+                * an error here.
+                */
+               rv = -EINVAL;
        }
 
        mutex_unlock(&heartbeat_lock);
@@ -636,8 +652,7 @@ static int ipmi_heartbeat(void)
        return rv;
 }
 
-static struct watchdog_info ident =
-{
+static struct watchdog_info ident = {
        .options        = 0,    /* WDIOF_SETTIMEOUT, */
        .firmware_version = 1,
        .identity       = "IPMI"
@@ -650,7 +665,7 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
        int i;
        int val;
 
-       switch(cmd) {
+       switch (cmd) {
        case WDIOC_GETSUPPORT:
                i = copy_to_user(argp, &ident, sizeof(ident));
                return i ? -EFAULT : 0;
@@ -690,15 +705,13 @@ static int ipmi_ioctl(struct inode *inode, struct file *file,
                i = copy_from_user(&val, argp, sizeof(int));
                if (i)
                        return -EFAULT;
-               if (val & WDIOS_DISABLECARD)
-               {
+               if (val & WDIOS_DISABLECARD) {
                        ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
                        ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
                        ipmi_start_timer_on_heartbeat = 0;
                }
 
-               if (val & WDIOS_ENABLECARD)
-               {
+               if (val & WDIOS_ENABLECARD) {
                        ipmi_watchdog_state = action_val;
                        ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB);
                }
@@ -724,13 +737,13 @@ static ssize_t ipmi_write(struct file *file,
        int rv;
 
        if (len) {
-               if (!nowayout) {
-                       size_t i;
+               if (!nowayout) {
+                       size_t i;
 
                        /* In case it was set long ago */
                        expect_close = 0;
 
-                       for (i = 0; i != len; i++) {
+                       for (i = 0; i != len; i++) {
                                char c;
 
                                if (get_user(c, buf + i))
@@ -758,15 +771,17 @@ static ssize_t ipmi_read(struct file *file,
        if (count <= 0)
                return 0;
 
-       /* Reading returns if the pretimeout has gone off, and it only does
-          it once per pretimeout. */
+       /*
+        * Reading returns if the pretimeout has gone off, and it only does
+        * it once per pretimeout.
+        */
        spin_lock(&ipmi_read_lock);
        if (!data_to_read) {
                if (file->f_flags & O_NONBLOCK) {
                        rv = -EAGAIN;
                        goto out;
                }
-               
+
                init_waitqueue_entry(&wait, current);
                add_wait_queue(&read_q, &wait);
                while (!data_to_read) {
@@ -776,7 +791,7 @@ static ssize_t ipmi_read(struct file *file,
                        spin_lock(&ipmi_read_lock);
                }
                remove_wait_queue(&read_q, &wait);
-           
+
                if (signal_pending(current)) {
                        rv = -ERESTARTSYS;
                        goto out;
@@ -799,25 +814,27 @@ static ssize_t ipmi_read(struct file *file,
 
 static int ipmi_open(struct inode *ino, struct file *filep)
 {
-        switch (iminor(ino)) {
-        case WATCHDOG_MINOR:
+       switch (iminor(ino)) {
+       case WATCHDOG_MINOR:
                if (test_and_set_bit(0, &ipmi_wdog_open))
-                        return -EBUSY;
+                       return -EBUSY;
 
-               /* Don't start the timer now, let it start on the
-                  first heartbeat. */
+               /*
+                * Don't start the timer now, let it start on the
+                * first heartbeat.
+                */
                ipmi_start_timer_on_heartbeat = 1;
                return nonseekable_open(ino, filep);
 
        default:
                return (-ENODEV);
-        }
+       }
 }
 
 static unsigned int ipmi_poll(struct file *file, poll_table *wait)
 {
        unsigned int mask = 0;
-       
+
        poll_wait(file, &read_q, wait);
 
        spin_lock(&ipmi_read_lock);
@@ -851,7 +868,7 @@ static int ipmi_close(struct inode *ino, struct file *filep)
                clear_bit(0, &ipmi_wdog_open);
        }
 
-       ipmi_fasync (-1, filep, 0);
+       ipmi_fasync(-1, filep, 0);
        expect_close = 0;
 
        return 0;
@@ -882,7 +899,7 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
                       msg->msg.data[0],
                       msg->msg.cmd);
        }
-       
+
        ipmi_free_recv_msg(msg);
 }
 
@@ -902,14 +919,14 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data)
                }
        }
 
-       /* On some machines, the heartbeat will give
-          an error and not work unless we re-enable
-          the timer.   So do so. */
+       /*
+        * On some machines, the heartbeat will give an error and not
+        * work unless we re-enable the timer.  So do so.
+        */
        pretimeout_since_last_heartbeat = 1;
 }
 
-static struct ipmi_user_hndl ipmi_hndlrs =
-{
+static struct ipmi_user_hndl ipmi_hndlrs = {
        .ipmi_recv_hndl           = ipmi_wdog_msg_handler,
        .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler
 };
@@ -949,8 +966,10 @@ static void ipmi_register_watchdog(int ipmi_intf)
                int old_timeout = timeout;
                int old_preop_val = preop_val;
 
-               /* Set the pretimeout to go off in a second and give
-                  ourselves plenty of time to stop the timer. */
+               /*
+                * Set the pretimeout to go off in a second and give
+                * ourselves plenty of time to stop the timer.
+                */
                ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
                preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */
                pretimeout = 99;
@@ -974,7 +993,7 @@ static void ipmi_register_watchdog(int ipmi_intf)
                               " occur.  The NMI pretimeout will"
                               " likely not work\n");
                }
      out_restore:
+ out_restore:
                testing_nmi = 0;
                preop_val = old_preop_val;
                pretimeout = old_pretimeout;
@@ -1009,9 +1028,11 @@ static void ipmi_unregister_watchdog(int ipmi_intf)
        /* Make sure no one can call us any more. */
        misc_deregister(&ipmi_wdog_miscdev);
 
-       /* Wait to make sure the message makes it out.  The lower layer has
-          pointers to our buffers, we want to make sure they are done before
-          we release our memory. */
+       /*
+        * Wait to make sure the message makes it out.  The lower layer has
+        * pointers to our buffers, we want to make sure they are done before
+        * we release our memory.
+        */
        while (atomic_read(&set_timeout_tofree))
                schedule_timeout_uninterruptible(1);
 
@@ -1052,15 +1073,17 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data)
                return NOTIFY_STOP;
        }
 
-        /* If we are not expecting a timeout, ignore it. */
+       /* If we are not expecting a timeout, ignore it. */
        if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE)
                return NOTIFY_OK;
 
        if (preaction_val != WDOG_PRETIMEOUT_NMI)
                return NOTIFY_OK;
 
-       /* If no one else handled the NMI, we assume it was the IPMI
-           watchdog. */
+       /*
+        * If no one else handled the NMI, we assume it was the IPMI
+        * watchdog.
+        */
        if (preop_val == WDOG_PREOP_PANIC) {
                /* On some machines, the heartbeat will give
                   an error and not work unless we re-enable
@@ -1082,7 +1105,7 @@ static int wdog_reboot_handler(struct notifier_block *this,
                               unsigned long         code,
                               void                  *unused)
 {
-       static int reboot_event_handled = 0;
+       static int reboot_event_handled;
 
        if ((watchdog_user) && (!reboot_event_handled)) {
                /* Make sure we only do this once. */
@@ -1115,7 +1138,7 @@ static int wdog_panic_handler(struct notifier_block *this,
                              unsigned long         event,
                              void                  *unused)
 {
-       static int panic_event_handled = 0;
+       static int panic_event_handled;
 
        /* On a panic, if we have a panic timeout, make sure to extend
           the watchdog timer to a reasonable value to complete the
@@ -1125,7 +1148,7 @@ static int wdog_panic_handler(struct notifier_block *this,
            ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
                /* Make sure we do this only once. */
                panic_event_handled = 1;
-           
+
                timeout = 255;
                pretimeout = 0;
                panic_halt_ipmi_set_timeout();
@@ -1151,8 +1174,7 @@ static void ipmi_smi_gone(int if_num)
        ipmi_unregister_watchdog(if_num);
 }
 
-static struct ipmi_smi_watcher smi_watcher =
-{
+static struct ipmi_smi_watcher smi_watcher = {
        .owner    = THIS_MODULE,
        .new_smi  = ipmi_new_smi,
        .smi_gone = ipmi_smi_gone
index eba2883b630ede817677c54a7cfe3e94f1582be1..4f3cefa8eb0e28cd0632983e66cebc63e5bb82ae 100644 (file)
 #include <linux/delay.h>
 #include <linux/ioport.h>
 
-#include <asm/uaccess.h>
-#include <asm/io.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
 #include <asm/system.h>
 
 #include <linux/pci.h>
@@ -189,7 +189,7 @@ struct      isi_board {
        unsigned short          status;
        unsigned short          port_status; /* each bit for each port */
        unsigned short          shift_count;
-       struct isi_port         * ports;
+       struct isi_port         *ports;
        signed char             count;
        spinlock_t              card_lock; /* Card wide lock 11/5/00 -sameer */
        unsigned long           flags;
@@ -205,11 +205,11 @@ struct    isi_port {
        u16                     channel;
        u16                     status;
        u16                     closing_wait;
-       struct isi_board        * card;
-       struct tty_struct       * tty;
+       struct isi_board        *card;
+       struct tty_struct       *tty;
        wait_queue_head_t       close_wait;
        wait_queue_head_t       open_wait;
-       unsigned char           * xmit_buf;
+       unsigned char           *xmit_buf;
        int                     xmit_head;
        int                     xmit_tail;
        int                     xmit_cnt;
@@ -405,7 +405,7 @@ static void isicom_tx(unsigned long _data)
 
        /*      find next active board  */
        card = (prev_card + 1) & 0x0003;
-       while(count-- > 0) {
+       while (count-- > 0) {
                if (isi_card[card].status & BOARD_ACTIVE)
                        break;
                card = (card + 1) & 0x0003;
@@ -428,7 +428,7 @@ static void isicom_tx(unsigned long _data)
        if (retries >= 100)
                goto unlock;
 
-       for (;count > 0;count--, port++) {
+       for (; count > 0; count--, port++) {
                /* port not active or tx disabled to force flow control */
                if (!(port->flags & ASYNC_INITIALIZED) ||
                                !(port->status & ISI_TXOK))
@@ -471,9 +471,10 @@ static void isicom_tx(unsigned long _data)
                                        break;
                                }
                        }
-                       if (cnt <= 0) break;
+                       if (cnt <= 0)
+                               break;
                        word_count = cnt >> 1;
-                       outsw(base, port->xmit_buf+port->xmit_tail,word_count);
+                       outsw(base, port->xmit_buf+port->xmit_tail, word_count);
                        port->xmit_tail = (port->xmit_tail
                                + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1);
                        txcount -= (word_count << 1);
@@ -556,7 +557,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
        tty = port->tty;
        if (tty == NULL) {
                word_count = byte_count >> 1;
-               while(byte_count > 1) {
+               while (byte_count > 1) {
                        inw(base);
                        byte_count -= 2;
                }
@@ -569,7 +570,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
 
        if (header & 0x8000) {          /* Status Packet */
                header = inw(base);
-               switch(header & 0xff) {
+               switch (header & 0xff) {
                case 0: /* Change in EIA signals */
                        if (port->flags & ASYNC_CHECK_CD) {
                                if (port->status & ISI_DCD) {
@@ -656,7 +657,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
                if (byte_count > 0) {
                        pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping "
                                "bytes...\n", base, channel + 1);
-                       while(byte_count > 0) { /* drain out unread xtra data */
+               /* drain out unread xtra data */
+               while (byte_count > 0) {
                                inw(base);
                                byte_count -= 2;
                        }
@@ -679,8 +681,11 @@ static void isicom_config_port(struct isi_port *port)
                shift_count = card->shift_count;
        unsigned char flow_ctrl;
 
-       if (!(tty = port->tty) || !tty->termios)
+       tty = port->tty;
+
+       if (tty == NULL)
                return;
+       /* FIXME: Switch to new tty baud API */
        baud = C_BAUD(tty);
        if (baud & CBAUDEX) {
                baud &= ~CBAUDEX;
@@ -706,7 +711,7 @@ static void isicom_config_port(struct isi_port *port)
                if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
                        baud++; /*  57.6 Kbps */
                if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-                       baud +=2; /*  115  Kbps */
+                       baud += 2; /*  115  Kbps */
                if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
                        baud += 3; /* 230 kbps*/
                if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
@@ -716,15 +721,14 @@ static void isicom_config_port(struct isi_port *port)
                /* hang up */
                drop_dtr(port);
                return;
-       }
-       else
+       } else
                raise_dtr(port);
 
        if (WaitTillCardIsFree(base) == 0) {
-               outw(0x8000 | (channel << shift_count) |0x03, base);
+               outw(0x8000 | (channel << shift_count) | 0x03, base);
                outw(linuxb_to_isib[baud] << 8 | 0x03, base);
                channel_setup = 0;
-               switch(C_CSIZE(tty)) {
+               switch (C_CSIZE(tty)) {
                case CS5:
                        channel_setup |= ISICOM_CS5;
                        break;
@@ -767,7 +771,7 @@ static void isicom_config_port(struct isi_port *port)
                flow_ctrl |= ISICOM_INITIATE_XONXOFF;
 
        if (WaitTillCardIsFree(base) == 0) {
-               outw(0x8000 | (channel << shift_count) |0x04, base);
+               outw(0x8000 | (channel << shift_count) | 0x04, base);
                outw(flow_ctrl << 8 | 0x05, base);
                outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base);
                InterruptTheCard(base);
@@ -805,20 +809,17 @@ static int isicom_setup_port(struct isi_port *port)
        struct isi_board *card = port->card;
        unsigned long flags;
 
-       if (port->flags & ASYNC_INITIALIZED) {
+       if (port->flags & ASYNC_INITIALIZED)
                return 0;
-       }
        if (!port->xmit_buf) {
-               unsigned long page;
-
-               if (!(page = get_zeroed_page(GFP_KERNEL)))
+               /* Relies on BKL */
+               unsigned long page  = get_zeroed_page(GFP_KERNEL);
+               if (page == 0)
                        return -ENOMEM;
-
-               if (port->xmit_buf) {
+               if (port->xmit_buf)
                        free_page(page);
-                       return -ERESTARTSYS;
-               }
-               port->xmit_buf = (unsigned char *) page;
+               else
+                       port->xmit_buf = (unsigned char *) page;
        }
 
        spin_lock_irqsave(&card->card_lock, flags);
@@ -949,21 +950,18 @@ static int isicom_open(struct tty_struct *tty, struct file *filp)
        port->count++;
        tty->driver_data = port;
        port->tty = tty;
-       if ((error = isicom_setup_port(port))!=0)
-               return error;
-       if ((error = block_til_ready(tty, filp, port))!=0)
-               return error;
-
-       return 0;
+       error = isicom_setup_port(port);
+       if (error == 0)
+               error = block_til_ready(tty, filp, port);
+       return error;
 }
 
 /* close et all */
 
 static inline void isicom_shutdown_board(struct isi_board *bp)
 {
-       if (bp->status & BOARD_ACTIVE) {
+       if (bp->status & BOARD_ACTIVE)
                bp->status &= ~BOARD_ACTIVE;
-       }
 }
 
 /* card->lock HAS to be held */
@@ -1012,6 +1010,22 @@ static void isicom_shutdown_port(struct isi_port *port)
        }
 }
 
+static void isicom_flush_buffer(struct tty_struct *tty)
+{
+       struct isi_port *port = tty->driver_data;
+       struct isi_board *card = port->card;
+       unsigned long flags;
+
+       if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer"))
+               return;
+
+       spin_lock_irqsave(&card->card_lock, flags);
+       port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
+       spin_unlock_irqrestore(&card->card_lock, flags);
+
+       tty_wakeup(tty);
+}
+
 static void isicom_close(struct tty_struct *tty, struct file *filp)
 {
        struct isi_port *port = tty->driver_data;
@@ -1065,8 +1079,7 @@ static void isicom_close(struct tty_struct *tty, struct file *filp)
        isicom_shutdown_port(port);
        spin_unlock_irqrestore(&card->card_lock, flags);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       isicom_flush_buffer(tty);
        tty_ldisc_flush(tty);
 
        spin_lock_irqsave(&card->card_lock, flags);
@@ -1104,7 +1117,7 @@ static int isicom_write(struct tty_struct *tty,   const unsigned char *buf,
 
        spin_lock_irqsave(&card->card_lock, flags);
 
-       while(1) {
+       while (1) {
                cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt
                                - 1, SERIAL_XMIT_SIZE - port->xmit_head));
                if (cnt <= 0)
@@ -1125,28 +1138,29 @@ static int isicom_write(struct tty_struct *tty, const unsigned char *buf,
 }
 
 /* put_char et all */
-static void isicom_put_char(struct tty_struct *tty, unsigned char ch)
+static int isicom_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct isi_port *port = tty->driver_data;
        struct isi_board *card = port->card;
        unsigned long flags;
 
        if (isicom_paranoia_check(port, tty->name, "isicom_put_char"))
-               return;
+               return 0;
 
        if (!port->xmit_buf)
-               return;
+               return 0;
 
        spin_lock_irqsave(&card->card_lock, flags);
        if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) {
                spin_unlock_irqrestore(&card->card_lock, flags);
-               return;
+               return 0;
        }
 
        port->xmit_buf[port->xmit_head++] = ch;
        port->xmit_head &= (SERIAL_XMIT_SIZE - 1);
        port->xmit_cnt++;
        spin_unlock_irqrestore(&card->card_lock, flags);
+       return 1;
 }
 
 /* flush_chars et all */
@@ -1258,6 +1272,8 @@ static int isicom_set_serial_info(struct isi_port *port,
        if (copy_from_user(&newinfo, info, sizeof(newinfo)))
                return -EFAULT;
 
+       lock_kernel();
+
        reconfig_port = ((port->flags & ASYNC_SPD_MASK) !=
                (newinfo.flags & ASYNC_SPD_MASK));
 
@@ -1265,12 +1281,13 @@ static int isicom_set_serial_info(struct isi_port *port,
                if ((newinfo.close_delay != port->close_delay) ||
                                (newinfo.closing_wait != port->closing_wait) ||
                                ((newinfo.flags & ~ASYNC_USR_MASK) !=
-                               (port->flags & ~ASYNC_USR_MASK)))
+                               (port->flags & ~ASYNC_USR_MASK))) {
+                       unlock_kernel();
                        return -EPERM;
-               port->flags = ((port->flags & ~ ASYNC_USR_MASK) |
+               }
+               port->flags = ((port->flags & ~ASYNC_USR_MASK) |
                                (newinfo.flags & ASYNC_USR_MASK));
-       }
-       else {
+       } else {
                port->close_delay = newinfo.close_delay;
                port->closing_wait = newinfo.closing_wait;
                port->flags = ((port->flags & ~ASYNC_FLAGS) |
@@ -1282,6 +1299,7 @@ static int isicom_set_serial_info(struct isi_port *port,
                isicom_config_port(port);
                spin_unlock_irqrestore(&port->card->card_lock, flags);
        }
+       unlock_kernel();
        return 0;
 }
 
@@ -1290,6 +1308,7 @@ static int isicom_get_serial_info(struct isi_port *port,
 {
        struct serial_struct out_info;
 
+       lock_kernel();
        memset(&out_info, 0, sizeof(out_info));
 /*     out_info.type = ? */
        out_info.line = port - isi_ports;
@@ -1299,6 +1318,7 @@ static int isicom_get_serial_info(struct isi_port *port,
 /*     out_info.baud_base = ? */
        out_info.close_delay = port->close_delay;
        out_info.closing_wait = port->closing_wait;
+       unlock_kernel();
        if (copy_to_user(info, &out_info, sizeof(out_info)))
                return -EFAULT;
        return 0;
@@ -1314,7 +1334,7 @@ static int isicom_ioctl(struct tty_struct *tty, struct file *filp,
        if (isicom_paranoia_check(port, tty->name, "isicom_ioctl"))
                return -ENODEV;
 
-       switch(cmd) {
+       switch (cmd) {
        case TCSBRK:
                retval = tty_check_change(tty);
                if (retval)
@@ -1331,19 +1351,6 @@ static int isicom_ioctl(struct tty_struct *tty, struct file *filp,
                tty_wait_until_sent(tty, 0);
                isicom_send_break(port, arg ? arg * (HZ/10) : HZ/4);
                return 0;
-
-       case TIOCGSOFTCAR:
-               return put_user(C_CLOCAL(tty) ? 1 : 0,
-                               (unsigned long __user *)argp);
-
-       case TIOCSSOFTCAR:
-               if (get_user(arg, (unsigned long __user *) argp))
-                       return -EFAULT;
-               tty->termios->c_cflag =
-                       ((tty->termios->c_cflag & ~CLOCAL) |
-                       (arg ? CLOCAL : 0));
-               return 0;
-
        case TIOCGSERIAL:
                return isicom_get_serial_info(port, argp);
 
@@ -1453,22 +1460,6 @@ static void isicom_hangup(struct tty_struct *tty)
        wake_up_interruptible(&port->open_wait);
 }
 
-/* flush_buffer et all */
-static void isicom_flush_buffer(struct tty_struct *tty)
-{
-       struct isi_port *port = tty->driver_data;
-       struct isi_board *card = port->card;
-       unsigned long flags;
-
-       if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer"))
-               return;
-
-       spin_lock_irqsave(&card->card_lock, flags);
-       port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
-       spin_unlock_irqrestore(&card->card_lock, flags);
-
-       tty_wakeup(tty);
-}
 
 /*
  * Driver init and deinit functions
@@ -1592,7 +1583,7 @@ static int __devinit load_firmware(struct pci_dev *pdev,
        default:
                dev_err(&pdev->dev, "Unknown signature.\n");
                goto end;
-       }
+       }
 
        retval = request_firmware(&fw, name, &pdev->dev);
        if (retval)
@@ -1620,7 +1611,8 @@ static int __devinit load_firmware(struct pci_dev *pdev,
                if (WaitTillCardIsFree(base))
                        goto errrelfw;
 
-               if ((status = inw(base + 0x4)) != 0) {
+               status = inw(base + 0x4);
+               if (status != 0) {
                        dev_warn(&pdev->dev, "Card%d rejected load header:\n"
                                KERN_WARNING "Address:0x%x\n"
                                KERN_WARNING "Count:0x%x\n"
@@ -1637,12 +1629,13 @@ static int __devinit load_firmware(struct pci_dev *pdev,
                if (WaitTillCardIsFree(base))
                        goto errrelfw;
 
-               if ((status = inw(base + 0x4)) != 0) {
+               status = inw(base + 0x4);
+               if (status != 0) {
                        dev_err(&pdev->dev, "Card%d got out of sync.Card "
                                "Status:0x%x\n", index + 1, status);
                        goto errrelfw;
                }
-       }
+       }
 
 /* XXX: should we test it by reading it back and comparing with original like
  * in load firmware package? */
@@ -1666,7 +1659,8 @@ static int __devinit load_firmware(struct pci_dev *pdev,
                if (WaitTillCardIsFree(base))
                        goto errrelfw;
 
-               if ((status = inw(base + 0x4)) != 0) {
+               status = inw(base + 0x4);
+               if (status != 0) {
                        dev_warn(&pdev->dev, "Card%d rejected verify header:\n"
                                KERN_WARNING "Address:0x%x\n"
                                KERN_WARNING "Count:0x%x\n"
@@ -1699,7 +1693,8 @@ static int __devinit load_firmware(struct pci_dev *pdev,
                if (WaitTillCardIsFree(base))
                        goto errrelfw;
 
-               if ((status = inw(base + 0x4)) != 0) {
+               status = inw(base + 0x4);
+               if (status != 0) {
                        dev_err(&pdev->dev, "Card%d verify got out of sync. "
                                "Card Status:0x%x\n", index + 1, status);
                        goto errrelfw;
@@ -1764,7 +1759,7 @@ static int __devinit isicom_probe(struct pci_dev *pdev,
                        index + 1);
                retval = -EBUSY;
                goto errdec;
-       }
+       }
 
        retval = request_irq(board->irq, isicom_interrupt,
                        IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board);
@@ -1818,7 +1813,7 @@ static int __init isicom_init(void)
        int retval, idx, channel;
        struct isi_port *port;
 
-       for(idx = 0; idx < BOARD_COUNT; idx++) {
+       for (idx = 0; idx < BOARD_COUNT; idx++) {
                port = &isi_ports[idx * 16];
                isi_card[idx].ports = port;
                spin_lock_init(&isi_card[idx].card_lock);
@@ -1832,7 +1827,7 @@ static int __init isicom_init(void)
                        init_waitqueue_head(&port->open_wait);
                        init_waitqueue_head(&port->close_wait);
                        /*  . . .  */
-               }
+               }
                isi_card[idx].base = 0;
                isi_card[idx].irq = 0;
        }
index c645455c3fd1552ed9f7932bd28dabb7f963280d..7c8b62f162bf115c0c74ab184f333625f65df0c4 100644 (file)
@@ -1682,16 +1682,6 @@ static int stli_ioctl(struct tty_struct *tty, struct file *file, unsigned int cm
        rc = 0;
 
        switch (cmd) {
-       case TIOCGSOFTCAR:
-               rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
-                       (unsigned __user *) arg);
-               break;
-       case TIOCSSOFTCAR:
-               if ((rc = get_user(ival, (unsigned __user *) arg)) == 0)
-                       tty->termios->c_cflag =
-                               (tty->termios->c_cflag & ~CLOCAL) |
-                               (ival ? CLOCAL : 0);
-               break;
        case TIOCGSERIAL:
                rc = stli_getserial(portp, argp);
                break;
@@ -3267,7 +3257,7 @@ static int stli_initecp(struct stlibrd *brdp)
  */
        EBRDINIT(brdp);
 
-       brdp->membase = ioremap(brdp->memaddr, brdp->memsize);
+       brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
        if (brdp->membase == NULL) {
                retval = -ENOMEM;
                goto err_reg;
@@ -3424,7 +3414,7 @@ static int stli_initonb(struct stlibrd *brdp)
  */
        EBRDINIT(brdp);
 
-       brdp->membase = ioremap(brdp->memaddr, brdp->memsize);
+       brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
        if (brdp->membase == NULL) {
                retval = -ENOMEM;
                goto err_reg;
@@ -3675,7 +3665,7 @@ static int stli_eisamemprobe(struct stlibrd *brdp)
  */
        for (i = 0; (i < stli_eisamempsize); i++) {
                brdp->memaddr = stli_eisamemprobeaddrs[i];
-               brdp->membase = ioremap(brdp->memaddr, brdp->memsize);
+               brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize);
                if (brdp->membase == NULL)
                        continue;
 
@@ -4433,6 +4423,8 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
        done = 0;
        rc = 0;
 
+       lock_kernel();
+
        switch (cmd) {
        case COM_GETPORTSTATS:
                rc = stli_getportstats(NULL, argp);
@@ -4455,6 +4447,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
                done++;
                break;
        }
+       unlock_kernel();
 
        if (done)
                return rc;
@@ -4472,6 +4465,8 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
        if (brdp->state == 0)
                return -ENODEV;
 
+       lock_kernel();
+
        switch (cmd) {
        case STL_BINTR:
                EBRDINTR(brdp);
@@ -4494,6 +4489,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un
                rc = -ENOIOCTLCMD;
                break;
        }
+       unlock_kernel();
        return rc;
 }
 
index 60b934adea65139c5c802b995e6be28950cc6e2e..7f7e798c13844f0a8c8e2630057566c9ed5a66cf 100644 (file)
@@ -110,6 +110,7 @@ const int max_vals[] = {
 const int NR_TYPES = ARRAY_SIZE(max_vals);
 
 struct kbd_struct kbd_table[MAX_NR_CONSOLES];
+EXPORT_SYMBOL_GPL(kbd_table);
 static struct kbd_struct *kbd = kbd_table;
 
 struct vt_spawn_console vt_spawn_con = {
@@ -260,6 +261,7 @@ void kd_mksound(unsigned int hz, unsigned int ticks)
        } else
                kd_nosound(0);
 }
+EXPORT_SYMBOL(kd_mksound);
 
 /*
  * Setting the keyboard rate.
@@ -1230,7 +1232,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
 
        if (rep &&
            (!vc_kbd_mode(kbd, VC_REPEAT) ||
-            (tty && !L_ECHO(tty) && tty->driver->chars_in_buffer(tty)))) {
+            (tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) {
                /*
                 * Don't repeat a key if the input buffers are not empty and the
                 * characters get aren't echoed locally. This makes key repeat
index e83623ead4414efaf02a8d7e124f5719b18479c4..934ffafedaea207131f7d193bed9e75c929f4db9 100644 (file)
@@ -364,6 +364,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma)
        return 0;
 }
 
+#ifdef CONFIG_DEVKMEM
 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
 {
        unsigned long pfn;
@@ -384,6 +385,7 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
        vma->vm_pgoff = pfn;
        return mmap_mem(file, vma);
 }
+#endif
 
 #ifdef CONFIG_CRASH_DUMP
 /*
@@ -422,6 +424,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
 extern long vread(char *buf, char *addr, unsigned long count);
 extern long vwrite(char *buf, char *addr, unsigned long count);
 
+#ifdef CONFIG_DEVKMEM
 /*
  * This function reads the *virtual* memory as seen by the kernel.
  */
@@ -626,6 +629,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
        *ppos = p;
        return virtr + wrote;
 }
+#endif
 
 #ifdef CONFIG_DEVPORT
 static ssize_t read_port(struct file * file, char __user * buf,
@@ -803,6 +807,7 @@ static const struct file_operations mem_fops = {
        .get_unmapped_area = get_unmapped_area_mem,
 };
 
+#ifdef CONFIG_DEVKMEM
 static const struct file_operations kmem_fops = {
        .llseek         = memory_lseek,
        .read           = read_kmem,
@@ -811,6 +816,7 @@ static const struct file_operations kmem_fops = {
        .open           = open_kmem,
        .get_unmapped_area = get_unmapped_area_mem,
 };
+#endif
 
 static const struct file_operations null_fops = {
        .llseek         = null_lseek,
@@ -889,11 +895,13 @@ static int memory_open(struct inode * inode, struct file * filp)
                        filp->f_mapping->backing_dev_info =
                                &directly_mappable_cdev_bdi;
                        break;
+#ifdef CONFIG_DEVKMEM
                case 2:
                        filp->f_op = &kmem_fops;
                        filp->f_mapping->backing_dev_info =
                                &directly_mappable_cdev_bdi;
                        break;
+#endif
                case 3:
                        filp->f_op = &null_fops;
                        break;
@@ -942,7 +950,9 @@ static const struct {
        const struct file_operations    *fops;
 } devlist[] = { /* list of minor devices */
        {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
+#ifdef CONFIG_DEVKMEM
        {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
+#endif
        {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
 #ifdef CONFIG_DEVPORT
        {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
index 4d058dadbfcc8c6324b7c4e224530289145140b3..eaace0db0ff4b65d61418e17db6b401c37fc3507 100644 (file)
@@ -263,23 +263,26 @@ EXPORT_SYMBOL(misc_deregister);
 
 static int __init misc_init(void)
 {
-#ifdef CONFIG_PROC_FS
-       struct proc_dir_entry *ent;
+       int err;
 
-       ent = create_proc_entry("misc", 0, NULL);
-       if (ent)
-               ent->proc_fops = &misc_proc_fops;
+#ifdef CONFIG_PROC_FS
+       proc_create("misc", 0, NULL, &misc_proc_fops);
 #endif
        misc_class = class_create(THIS_MODULE, "misc");
+       err = PTR_ERR(misc_class);
        if (IS_ERR(misc_class))
-               return PTR_ERR(misc_class);
+               goto fail_remove;
 
-       if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) {
-               printk("unable to get major %d for misc devices\n",
-                      MISC_MAJOR);
-               class_destroy(misc_class);
-               return -EIO;
-       }
+       err = -EIO;
+       if (register_chrdev(MISC_MAJOR,"misc",&misc_fops))
+               goto fail_printk;
        return 0;
+
+fail_printk:
+       printk("unable to get major %d for misc devices\n", MISC_MAJOR);
+       class_destroy(misc_class);
+fail_remove:
+       remove_proc_entry("misc", NULL);
+       return err;
 }
 subsys_initcall(misc_init);
index e60a74c66e3dfa05c00eb45596829e77932ce5c1..192961fd71739d22ad4ff9a65df5a101bc460fbd 100644 (file)
@@ -30,6 +30,8 @@
 #include <linux/miscdevice.h>
 #include <linux/posix-timers.h>
 #include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/math64.h>
 
 #include <asm/uaccess.h>
 #include <asm/sn/addrs.h>
@@ -74,9 +76,8 @@ static const struct file_operations mmtimer_fops = {
  * We only have comparison registers RTC1-4 currently available per
  * node.  RTC0 is used by SAL.
  */
-#define NUM_COMPARATORS 3
 /* Check for an RTC interrupt pending */
-static int inline mmtimer_int_pending(int comparator)
+static int mmtimer_int_pending(int comparator)
 {
        if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) &
                        SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator)
@@ -84,15 +85,16 @@ static int inline mmtimer_int_pending(int comparator)
        else
                return 0;
 }
+
 /* Clear the RTC interrupt pending bit */
-static void inline mmtimer_clr_int_pending(int comparator)
+static void mmtimer_clr_int_pending(int comparator)
 {
        HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS),
                SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator);
 }
 
 /* Setup timer on comparator RTC1 */
-static void inline mmtimer_setup_int_0(u64 expires)
+static void mmtimer_setup_int_0(int cpu, u64 expires)
 {
        u64 val;
 
@@ -106,7 +108,7 @@ static void inline mmtimer_setup_int_0(u64 expires)
        mmtimer_clr_int_pending(0);
 
        val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) |
-               ((u64)cpu_physical_id(smp_processor_id()) <<
+               ((u64)cpu_physical_id(cpu) <<
                        SH_RTC1_INT_CONFIG_PID_SHFT);
 
        /* Set configuration */
@@ -122,7 +124,7 @@ static void inline mmtimer_setup_int_0(u64 expires)
 }
 
 /* Setup timer on comparator RTC2 */
-static void inline mmtimer_setup_int_1(u64 expires)
+static void mmtimer_setup_int_1(int cpu, u64 expires)
 {
        u64 val;
 
@@ -133,7 +135,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
        mmtimer_clr_int_pending(1);
 
        val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) |
-               ((u64)cpu_physical_id(smp_processor_id()) <<
+               ((u64)cpu_physical_id(cpu) <<
                        SH_RTC2_INT_CONFIG_PID_SHFT);
 
        HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val);
@@ -144,7 +146,7 @@ static void inline mmtimer_setup_int_1(u64 expires)
 }
 
 /* Setup timer on comparator RTC3 */
-static void inline mmtimer_setup_int_2(u64 expires)
+static void mmtimer_setup_int_2(int cpu, u64 expires)
 {
        u64 val;
 
@@ -155,7 +157,7 @@ static void inline mmtimer_setup_int_2(u64 expires)
        mmtimer_clr_int_pending(2);
 
        val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) |
-               ((u64)cpu_physical_id(smp_processor_id()) <<
+               ((u64)cpu_physical_id(cpu) <<
                        SH_RTC3_INT_CONFIG_PID_SHFT);
 
        HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val);
@@ -170,22 +172,22 @@ static void inline mmtimer_setup_int_2(u64 expires)
  * in order to insure that the setup succeeds in a deterministic time frame.
  * It will check if the interrupt setup succeeded.
  */
-static int inline mmtimer_setup(int comparator, unsigned long expires)
+static int mmtimer_setup(int cpu, int comparator, unsigned long expires)
 {
 
        switch (comparator) {
        case 0:
-               mmtimer_setup_int_0(expires);
+               mmtimer_setup_int_0(cpu, expires);
                break;
        case 1:
-               mmtimer_setup_int_1(expires);
+               mmtimer_setup_int_1(cpu, expires);
                break;
        case 2:
-               mmtimer_setup_int_2(expires);
+               mmtimer_setup_int_2(cpu, expires);
                break;
        }
        /* We might've missed our expiration time */
-       if (rtc_time() < expires)
+       if (rtc_time() <= expires)
                return 1;
 
        /*
@@ -195,7 +197,7 @@ static int inline mmtimer_setup(int comparator, unsigned long expires)
        return mmtimer_int_pending(comparator);
 }
 
-static int inline mmtimer_disable_int(long nasid, int comparator)
+static int mmtimer_disable_int(long nasid, int comparator)
 {
        switch (comparator) {
        case 0:
@@ -216,18 +218,124 @@ static int inline mmtimer_disable_int(long nasid, int comparator)
        return 0;
 }
 
-#define TIMER_OFF 0xbadcabLL
+#define COMPARATOR     1               /* The comparator to use */
 
-/* There is one of these for each comparator */
-typedef struct mmtimer {
-       spinlock_t lock ____cacheline_aligned;
+#define TIMER_OFF      0xbadcabLL      /* Timer is not setup */
+#define TIMER_SET      0               /* Comparator is set for this timer */
+
+/* There is one of these for each timer */
+struct mmtimer {
+       struct rb_node list;
        struct k_itimer *timer;
-       int i;
        int cpu;
+};
+
+struct mmtimer_node {
+       spinlock_t lock ____cacheline_aligned;
+       struct rb_root timer_head;
+       struct rb_node *next;
        struct tasklet_struct tasklet;
-} mmtimer_t;
+};
+static struct mmtimer_node *timers;
+
+
+/*
+ * Add a new mmtimer struct to the node's mmtimer list.
+ * This function assumes the struct mmtimer_node is locked.
+ */
+static void mmtimer_add_list(struct mmtimer *n)
+{
+       int nodeid = n->timer->it.mmtimer.node;
+       unsigned long expires = n->timer->it.mmtimer.expires;
+       struct rb_node **link = &timers[nodeid].timer_head.rb_node;
+       struct rb_node *parent = NULL;
+       struct mmtimer *x;
+
+       /*
+        * Find the right place in the rbtree:
+        */
+       while (*link) {
+               parent = *link;
+               x = rb_entry(parent, struct mmtimer, list);
+
+               if (expires < x->timer->it.mmtimer.expires)
+                       link = &(*link)->rb_left;
+               else
+                       link = &(*link)->rb_right;
+       }
+
+       /*
+        * Insert the timer to the rbtree and check whether it
+        * replaces the first pending timer
+        */
+       rb_link_node(&n->list, parent, link);
+       rb_insert_color(&n->list, &timers[nodeid].timer_head);
+
+       if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next,
+                       struct mmtimer, list)->timer->it.mmtimer.expires)
+               timers[nodeid].next = &n->list;
+}
+
+/*
+ * Set the comparator for the next timer.
+ * This function assumes the struct mmtimer_node is locked.
+ */
+static void mmtimer_set_next_timer(int nodeid)
+{
+       struct mmtimer_node *n = &timers[nodeid];
+       struct mmtimer *x;
+       struct k_itimer *t;
+       int o;
+
+restart:
+       if (n->next == NULL)
+               return;
 
-static mmtimer_t ** timers;
+       x = rb_entry(n->next, struct mmtimer, list);
+       t = x->timer;
+       if (!t->it.mmtimer.incr) {
+               /* Not an interval timer */
+               if (!mmtimer_setup(x->cpu, COMPARATOR,
+                                       t->it.mmtimer.expires)) {
+                       /* Late setup, fire now */
+                       tasklet_schedule(&n->tasklet);
+               }
+               return;
+       }
+
+       /* Interval timer */
+       o = 0;
+       while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) {
+               unsigned long e, e1;
+               struct rb_node *next;
+               t->it.mmtimer.expires += t->it.mmtimer.incr << o;
+               t->it_overrun += 1 << o;
+               o++;
+               if (o > 20) {
+                       printk(KERN_ALERT "mmtimer: cannot reschedule timer\n");
+                       t->it.mmtimer.clock = TIMER_OFF;
+                       n->next = rb_next(&x->list);
+                       rb_erase(&x->list, &n->timer_head);
+                       kfree(x);
+                       goto restart;
+               }
+
+               e = t->it.mmtimer.expires;
+               next = rb_next(&x->list);
+
+               if (next == NULL)
+                       continue;
+
+               e1 = rb_entry(next, struct mmtimer, list)->
+                       timer->it.mmtimer.expires;
+               if (e > e1) {
+                       n->next = next;
+                       rb_erase(&x->list, &n->timer_head);
+                       mmtimer_add_list(x);
+                       goto restart;
+               }
+       }
+}
 
 /**
  * mmtimer_ioctl - ioctl interface for /dev/mmtimer
@@ -366,8 +474,8 @@ static int sgi_clock_get(clockid_t clockid, struct timespec *tp)
 
        nsec = rtc_time() * sgi_clock_period
                        + sgi_clock_offset.tv_nsec;
-       tp->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tp->tv_nsec)
-                       + sgi_clock_offset.tv_sec;
+       *tp = ns_to_timespec(nsec);
+       tp->tv_sec += sgi_clock_offset.tv_sec;
        return 0;
 };
 
@@ -375,11 +483,11 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
 {
 
        u64 nsec;
-       u64 rem;
+       u32 rem;
 
        nsec = rtc_time() * sgi_clock_period;
 
-       sgi_clock_offset.tv_sec = tp->tv_sec - div_long_long_rem(nsec, NSEC_PER_SEC, &rem);
+       sgi_clock_offset.tv_sec = tp->tv_sec - div_u64_rem(nsec, NSEC_PER_SEC, &rem);
 
        if (rem <= tp->tv_nsec)
                sgi_clock_offset.tv_nsec = tp->tv_sec - rem;
@@ -390,35 +498,6 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp)
        return 0;
 }
 
-/*
- * Schedule the next periodic interrupt. This function will attempt
- * to schedule a periodic interrupt later if necessary. If the scheduling
- * of an interrupt fails then the time to skip is lengthened
- * exponentially in order to ensure that the next interrupt
- * can be properly scheduled..
- */
-static int inline reschedule_periodic_timer(mmtimer_t *x)
-{
-       int n;
-       struct k_itimer *t = x->timer;
-
-       t->it.mmtimer.clock = x->i;
-       t->it_overrun--;
-
-       n = 0;
-       do {
-
-               t->it.mmtimer.expires += t->it.mmtimer.incr << n;
-               t->it_overrun += 1 << n;
-               n++;
-               if (n > 20)
-                       return 1;
-
-       } while (!mmtimer_setup(x->i, t->it.mmtimer.expires));
-
-       return 0;
-}
-
 /**
  * mmtimer_interrupt - timer interrupt handler
  * @irq: irq received
@@ -435,71 +514,75 @@ static int inline reschedule_periodic_timer(mmtimer_t *x)
 static irqreturn_t
 mmtimer_interrupt(int irq, void *dev_id)
 {
-       int i;
        unsigned long expires = 0;
        int result = IRQ_NONE;
        unsigned indx = cpu_to_node(smp_processor_id());
+       struct mmtimer *base;
 
-       /*
-        * Do this once for each comparison register
-        */
-       for (i = 0; i < NUM_COMPARATORS; i++) {
-               mmtimer_t *base = timers[indx] + i;
-               /* Make sure this doesn't get reused before tasklet_sched */
-               spin_lock(&base->lock);
-               if (base->cpu == smp_processor_id()) {
-                       if (base->timer)
-                               expires = base->timer->it.mmtimer.expires;
-                       /* expires test won't work with shared irqs */
-                       if ((mmtimer_int_pending(i) > 0) ||
-                               (expires && (expires < rtc_time()))) {
-                               mmtimer_clr_int_pending(i);
-                               tasklet_schedule(&base->tasklet);
-                               result = IRQ_HANDLED;
-                       }
+       spin_lock(&timers[indx].lock);
+       base = rb_entry(timers[indx].next, struct mmtimer, list);
+       if (base == NULL) {
+               spin_unlock(&timers[indx].lock);
+               return result;
+       }
+
+       if (base->cpu == smp_processor_id()) {
+               if (base->timer)
+                       expires = base->timer->it.mmtimer.expires;
+               /* expires test won't work with shared irqs */
+               if ((mmtimer_int_pending(COMPARATOR) > 0) ||
+                       (expires && (expires <= rtc_time()))) {
+                       mmtimer_clr_int_pending(COMPARATOR);
+                       tasklet_schedule(&timers[indx].tasklet);
+                       result = IRQ_HANDLED;
                }
-               spin_unlock(&base->lock);
-               expires = 0;
        }
+       spin_unlock(&timers[indx].lock);
        return result;
 }
 
-void mmtimer_tasklet(unsigned long data) {
-       mmtimer_t *x = (mmtimer_t *)data;
-       struct k_itimer *t = x->timer;
+static void mmtimer_tasklet(unsigned long data)
+{
+       int nodeid = data;
+       struct mmtimer_node *mn = &timers[nodeid];
+       struct mmtimer *x = rb_entry(mn->next, struct mmtimer, list);
+       struct k_itimer *t;
        unsigned long flags;
 
-       if (t == NULL)
-               return;
-
        /* Send signal and deal with periodic signals */
-       spin_lock_irqsave(&t->it_lock, flags);
-       spin_lock(&x->lock);
-       /* If timer was deleted between interrupt and here, leave */
-       if (t != x->timer)
+       spin_lock_irqsave(&mn->lock, flags);
+       if (!mn->next)
                goto out;
-       t->it_overrun = 0;
 
-       if (posix_timer_event(t, 0) != 0) {
+       x = rb_entry(mn->next, struct mmtimer, list);
+       t = x->timer;
+
+       if (t->it.mmtimer.clock == TIMER_OFF)
+               goto out;
+
+       t->it_overrun = 0;
 
-               // printk(KERN_WARNING "mmtimer: cannot deliver signal.\n");
+       mn->next = rb_next(&x->list);
+       rb_erase(&x->list, &mn->timer_head);
 
+       if (posix_timer_event(t, 0) != 0)
                t->it_overrun++;
-       }
+
        if(t->it.mmtimer.incr) {
-               /* Periodic timer */
-               if (reschedule_periodic_timer(x)) {
-                       printk(KERN_WARNING "mmtimer: unable to reschedule\n");
-                       x->timer = NULL;
-               }
+               t->it.mmtimer.expires += t->it.mmtimer.incr;
+               mmtimer_add_list(x);
        } else {
                /* Ensure we don't false trigger in mmtimer_interrupt */
+               t->it.mmtimer.clock = TIMER_OFF;
                t->it.mmtimer.expires = 0;
+               kfree(x);
        }
+       /* Set comparator for next timer, if there is one */
+       mmtimer_set_next_timer(nodeid);
+
        t->it_overrun_last = t->it_overrun;
 out:
-       spin_unlock(&x->lock);
-       spin_unlock_irqrestore(&t->it_lock, flags);
+       spin_unlock_irqrestore(&mn->lock, flags);
 }
 
 static int sgi_timer_create(struct k_itimer *timer)
@@ -516,25 +599,53 @@ static int sgi_timer_create(struct k_itimer *timer)
  */
 static int sgi_timer_del(struct k_itimer *timr)
 {
-       int i = timr->it.mmtimer.clock;
        cnodeid_t nodeid = timr->it.mmtimer.node;
-       mmtimer_t *t = timers[nodeid] + i;
        unsigned long irqflags;
 
-       if (i != TIMER_OFF) {
-               spin_lock_irqsave(&t->lock, irqflags);
-               mmtimer_disable_int(cnodeid_to_nasid(nodeid),i);
-               t->timer = NULL;
+       spin_lock_irqsave(&timers[nodeid].lock, irqflags);
+       if (timr->it.mmtimer.clock != TIMER_OFF) {
+               unsigned long expires = timr->it.mmtimer.expires;
+               struct rb_node *n = timers[nodeid].timer_head.rb_node;
+               struct mmtimer *uninitialized_var(t);
+               int r = 0;
+
                timr->it.mmtimer.clock = TIMER_OFF;
                timr->it.mmtimer.expires = 0;
-               spin_unlock_irqrestore(&t->lock, irqflags);
+
+               while (n) {
+                       t = rb_entry(n, struct mmtimer, list);
+                       if (t->timer == timr)
+                               break;
+
+                       if (expires < t->timer->it.mmtimer.expires)
+                               n = n->rb_left;
+                       else
+                               n = n->rb_right;
+               }
+
+               if (!n) {
+                       spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
+                       return 0;
+               }
+
+               if (timers[nodeid].next == n) {
+                       timers[nodeid].next = rb_next(n);
+                       r = 1;
+               }
+
+               rb_erase(n, &timers[nodeid].timer_head);
+               kfree(t);
+
+               if (r) {
+                       mmtimer_disable_int(cnodeid_to_nasid(nodeid),
+                               COMPARATOR);
+                       mmtimer_set_next_timer(nodeid);
+               }
        }
+       spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
        return 0;
 }
 
-#define timespec_to_ns(x) ((x).tv_nsec + (x).tv_sec * NSEC_PER_SEC)
-#define ns_to_timespec(ts, nsec) (ts).tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &(ts).tv_nsec)
-
 /* Assumption: it_lock is already held with irq's disabled */
 static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
 {
@@ -547,9 +658,8 @@ static void sgi_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
                return;
        }
 
-       ns_to_timespec(cur_setting->it_interval, timr->it.mmtimer.incr * sgi_clock_period);
-       ns_to_timespec(cur_setting->it_value, (timr->it.mmtimer.expires - rtc_time())* sgi_clock_period);
-       return;
+       cur_setting->it_interval = ns_to_timespec(timr->it.mmtimer.incr * sgi_clock_period);
+       cur_setting->it_value = ns_to_timespec((timr->it.mmtimer.expires - rtc_time()) * sgi_clock_period);
 }
 
 
@@ -557,30 +667,33 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
        struct itimerspec * new_setting,
        struct itimerspec * old_setting)
 {
-
-       int i;
        unsigned long when, period, irqflags;
        int err = 0;
        cnodeid_t nodeid;
-       mmtimer_t *base;
+       struct mmtimer *base;
+       struct rb_node *n;
 
        if (old_setting)
                sgi_timer_get(timr, old_setting);
 
        sgi_timer_del(timr);
-       when = timespec_to_ns(new_setting->it_value);
-       period = timespec_to_ns(new_setting->it_interval);
+       when = timespec_to_ns(&new_setting->it_value);
+       period = timespec_to_ns(&new_setting->it_interval);
 
        if (when == 0)
                /* Clear timer */
                return 0;
 
+       base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL);
+       if (base == NULL)
+               return -ENOMEM;
+
        if (flags & TIMER_ABSTIME) {
                struct timespec n;
                unsigned long now;
 
                getnstimeofday(&n);
-               now = timespec_to_ns(n);
+               now = timespec_to_ns(&n);
                if (when > now)
                        when -= now;
                else
@@ -604,47 +717,38 @@ static int sgi_timer_set(struct k_itimer *timr, int flags,
        preempt_disable();
 
        nodeid =  cpu_to_node(smp_processor_id());
-retry:
-       /* Don't use an allocated timer, or a deleted one that's pending */
-       for(i = 0; i< NUM_COMPARATORS; i++) {
-               base = timers[nodeid] + i;
-               if (!base->timer && !base->tasklet.state) {
-                       break;
-               }
-       }
-
-       if (i == NUM_COMPARATORS) {
-               preempt_enable();
-               return -EBUSY;
-       }
 
-       spin_lock_irqsave(&base->lock, irqflags);
+       /* Lock the node timer structure */
+       spin_lock_irqsave(&timers[nodeid].lock, irqflags);
 
-       if (base->timer || base->tasklet.state != 0) {
-               spin_unlock_irqrestore(&base->lock, irqflags);
-               goto retry;
-       }
        base->timer = timr;
        base->cpu = smp_processor_id();
 
-       timr->it.mmtimer.clock = i;
+       timr->it.mmtimer.clock = TIMER_SET;
        timr->it.mmtimer.node = nodeid;
        timr->it.mmtimer.incr = period;
        timr->it.mmtimer.expires = when;
 
-       if (period == 0) {
-               if (!mmtimer_setup(i, when)) {
-                       mmtimer_disable_int(-1, i);
-                       posix_timer_event(timr, 0);
-                       timr->it.mmtimer.expires = 0;
-               }
-       } else {
-               timr->it.mmtimer.expires -= period;
-               if (reschedule_periodic_timer(base))
-                       err = -EINVAL;
+       n = timers[nodeid].next;
+
+       /* Add the new struct mmtimer to node's timer list */
+       mmtimer_add_list(base);
+
+       if (timers[nodeid].next == n) {
+               /* No need to reprogram comparator for now */
+               spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
+               preempt_enable();
+               return err;
        }
 
-       spin_unlock_irqrestore(&base->lock, irqflags);
+       /* We need to reprogram the comparator */
+       if (n)
+               mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR);
+
+       mmtimer_set_next_timer(nodeid);
+
+       /* Unlock the node timer structure */
+       spin_unlock_irqrestore(&timers[nodeid].lock, irqflags);
 
        preempt_enable();
 
@@ -669,7 +773,6 @@ static struct k_clock sgi_clock = {
  */
 static int __init mmtimer_init(void)
 {
-       unsigned i;
        cnodeid_t node, maxn = -1;
 
        if (!ia64_platform_is("sn2"))
@@ -706,31 +809,18 @@ static int __init mmtimer_init(void)
        maxn++;
 
        /* Allocate list of node ptrs to mmtimer_t's */
-       timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL);
+       timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL);
        if (timers == NULL) {
                printk(KERN_ERR "%s: failed to allocate memory for device\n",
                                MMTIMER_NAME);
                goto out3;
        }
 
-       /* Allocate mmtimer_t's for each online node */
+       /* Initialize struct mmtimer's for each online node */
        for_each_online_node(node) {
-               timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node);
-               if (timers[node] == NULL) {
-                       printk(KERN_ERR "%s: failed to allocate memory for device\n",
-                               MMTIMER_NAME);
-                       goto out4;
-               }
-               for (i=0; i< NUM_COMPARATORS; i++) {
-                       mmtimer_t * base = timers[node] + i;
-
-                       spin_lock_init(&base->lock);
-                       base->timer = NULL;
-                       base->cpu = 0;
-                       base->i = i;
-                       tasklet_init(&base->tasklet, mmtimer_tasklet,
-                               (unsigned long) (base));
-               }
+               spin_lock_init(&timers[node].lock);
+               tasklet_init(&timers[node].tasklet, mmtimer_tasklet,
+                       (unsigned long) node);
        }
 
        sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second;
@@ -741,11 +831,8 @@ static int __init mmtimer_init(void)
 
        return 0;
 
-out4:
-       for_each_online_node(node) {
-               kfree(timers[node]);
-       }
 out3:
+       kfree(timers);
        misc_deregister(&mmtimer_miscdev);
 out2:
        free_irq(SGI_MMTIMER_VECTOR, NULL);
@@ -754,4 +841,3 @@ out1:
 }
 
 module_init(mmtimer_init);
-
index 64b7b2b18352526d92e0ebf83f6bda424aed7d1c..d57d3a61919b310f22e972e97cf1c8797334db1d 100644 (file)
@@ -2,7 +2,8 @@
 /*
  *           moxa.c  -- MOXA Intellio family multiport serial driver.
  *
- *      Copyright (C) 1999-2000  Moxa Technologies (support@moxa.com.tw).
+ *      Copyright (C) 1999-2000  Moxa Technologies (support@moxa.com).
+ *      Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
  *
  *      This code is loosely based on the Linux serial driver, written by
  *      Linus Torvalds, Theodore T'so and others.
@@ -25,6 +26,7 @@
 #include <linux/mm.h>
 #include <linux/ioport.h>
 #include <linux/errno.h>
+#include <linux/firmware.h>
 #include <linux/signal.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/bitops.h>
-#include <linux/completion.h>
 
 #include <asm/system.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
 
-#define MOXA_VERSION           "5.1k"
+#include "moxa.h"
+
+#define MOXA_VERSION           "6.0k"
+
+#define MOXA_FW_HDRLEN         32
 
 #define MOXAMAJOR              172
-#define MOXACUMAJOR            173
 
 #define MAX_BOARDS             4       /* Don't change this value */
 #define MAX_PORTS_PER_BOARD    32      /* Don't change this value */
 #define MAX_PORTS              (MAX_BOARDS * MAX_PORTS_PER_BOARD)
 
+#define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \
+               (brd)->boardType == MOXA_BOARD_C320_PCI)
+
 /*
  *    Define the Moxa PCI vendor and device IDs.
  */
@@ -92,24 +99,16 @@ static struct pci_device_id moxa_pcibrds[] = {
 MODULE_DEVICE_TABLE(pci, moxa_pcibrds);
 #endif /* CONFIG_PCI */
 
-struct moxa_isa_board_conf {
-       int boardType;
-       int numPorts;
-       unsigned long baseAddr;
-};
-
-static struct moxa_isa_board_conf moxa_isa_boards[] =
-{
-/*       {MOXA_BOARD_C218_ISA,8,0xDC000}, */
-};
+struct moxa_port;
 
 static struct moxa_board_conf {
        int boardType;
        int numPorts;
-       unsigned long baseAddr;
        int busType;
 
-       int loadstat;
+       unsigned int ready;
+
+       struct moxa_port *ports;
 
        void __iomem *basemem;
        void __iomem *intNdx;
@@ -131,30 +130,27 @@ struct moxaq_str {
 };
 
 struct moxa_port {
+       struct moxa_board_conf *board;
+       struct tty_struct *tty;
+       void __iomem *tableAddr;
+
        int type;
-       int port;
        int close_delay;
-       unsigned short closing_wait;
-       int count;
-       int blocked_open;
-       long event; /* long req'd for set_bit --RR */
+       unsigned int count;
        int asyncflags;
-       unsigned long statusflags;
-       struct tty_struct *tty;
        int cflag;
+       unsigned long statusflags;
        wait_queue_head_t open_wait;
-       struct completion close_wait;
 
-       struct timer_list emptyTimer;
-
-       char chkPort;
-       char lineCtrl;
-       void __iomem *tableAddr;
-       long curBaud;
-       char DCDState;
-       char lowChkFlag;
+       u8 DCDState;
+       u8 lineCtrl;
+       u8 lowChkFlag;
+};
 
-       ushort breakCnt;
+struct mon_str {
+       int tick;
+       int rxcnt[MAX_PORTS];
+       int txcnt[MAX_PORTS];
 };
 
 /* statusflags */
@@ -168,20 +164,27 @@ struct moxa_port {
 #define WAKEUP_CHARS           256
 
 static int ttymajor = MOXAMAJOR;
+static struct mon_str moxaLog;
+static unsigned int moxaFuncTout = HZ / 2;
+static unsigned int moxaLowWaterChk;
+static DEFINE_MUTEX(moxa_openlock);
 /* Variables for insmod */
 #ifdef MODULE
-static int baseaddr[4];
-static int type[4];
-static int numports[4];
+static unsigned long baseaddr[MAX_BOARDS];
+static unsigned int type[MAX_BOARDS];
+static unsigned int numports[MAX_BOARDS];
 #endif
 
 MODULE_AUTHOR("William Chen");
 MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
 MODULE_LICENSE("GPL");
 #ifdef MODULE
-module_param_array(type, int, NULL, 0);
-module_param_array(baseaddr, int, NULL, 0);
-module_param_array(numports, int, NULL, 0);
+module_param_array(type, uint, NULL, 0);
+MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
+module_param_array(baseaddr, ulong, NULL, 0);
+MODULE_PARM_DESC(baseaddr, "base address");
+module_param_array(numports, uint, NULL, 0);
+MODULE_PARM_DESC(numports, "numports (ignored for C218)");
 #endif
 module_param(ttymajor, int, 0);
 
@@ -194,9 +197,6 @@ static int moxa_write(struct tty_struct *, const unsigned char *, int);
 static int moxa_write_room(struct tty_struct *);
 static void moxa_flush_buffer(struct tty_struct *);
 static int moxa_chars_in_buffer(struct tty_struct *);
-static void moxa_flush_chars(struct tty_struct *);
-static void moxa_put_char(struct tty_struct *, unsigned char);
-static int moxa_ioctl(struct tty_struct *, struct file *, unsigned int, unsigned long);
 static void moxa_throttle(struct tty_struct *);
 static void moxa_unthrottle(struct tty_struct *);
 static void moxa_set_termios(struct tty_struct *, struct ktermios *);
@@ -208,44 +208,183 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
                         unsigned int set, unsigned int clear);
 static void moxa_poll(unsigned long);
 static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
-static int moxa_block_till_ready(struct tty_struct *, struct file *,
-                           struct moxa_port *);
 static void moxa_setup_empty_event(struct tty_struct *);
-static void moxa_check_xmit_empty(unsigned long);
 static void moxa_shut_down(struct moxa_port *);
-static void moxa_receive_data(struct moxa_port *);
 /*
  * moxa board interface functions:
  */
-static void MoxaDriverInit(void);
-static int MoxaDriverIoctl(unsigned int, unsigned long, int);
-static int MoxaDriverPoll(void);
-static int MoxaPortsOfCard(int);
-static int MoxaPortIsValid(int);
-static void MoxaPortEnable(int);
-static void MoxaPortDisable(int);
-static long MoxaPortGetMaxBaud(int);
-static long MoxaPortSetBaud(int, long);
-static int MoxaPortSetTermio(int, struct ktermios *, speed_t);
-static int MoxaPortGetLineOut(int, int *, int *);
-static void MoxaPortLineCtrl(int, int, int);
-static void MoxaPortFlowCtrl(int, int, int, int, int, int);
-static int MoxaPortLineStatus(int);
-static int MoxaPortDCDChange(int);
-static int MoxaPortDCDON(int);
-static void MoxaPortFlushData(int, int);
-static int MoxaPortWriteData(int, unsigned char *, int);
-static int MoxaPortReadData(int, struct tty_struct *tty);
-static int MoxaPortTxQueue(int);
-static int MoxaPortRxQueue(int);
-static int MoxaPortTxFree(int);
-static void MoxaPortTxDisable(int);
-static void MoxaPortTxEnable(int);
-static int MoxaPortResetBrkCnt(int);
-static void MoxaPortSendBreak(int, int);
+static void MoxaPortEnable(struct moxa_port *);
+static void MoxaPortDisable(struct moxa_port *);
+static int MoxaPortSetTermio(struct moxa_port *, struct ktermios *, speed_t);
+static int MoxaPortGetLineOut(struct moxa_port *, int *, int *);
+static void MoxaPortLineCtrl(struct moxa_port *, int, int);
+static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int);
+static int MoxaPortLineStatus(struct moxa_port *);
+static void MoxaPortFlushData(struct moxa_port *, int);
+static int MoxaPortWriteData(struct moxa_port *, const unsigned char *, int);
+static int MoxaPortReadData(struct moxa_port *);
+static int MoxaPortTxQueue(struct moxa_port *);
+static int MoxaPortRxQueue(struct moxa_port *);
+static int MoxaPortTxFree(struct moxa_port *);
+static void MoxaPortTxDisable(struct moxa_port *);
+static void MoxaPortTxEnable(struct moxa_port *);
 static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *);
 static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *);
-static void MoxaSetFifo(int port, int enable);
+static void MoxaSetFifo(struct moxa_port *port, int enable);
+
+/*
+ * I/O functions
+ */
+
+static void moxa_wait_finish(void __iomem *ofsAddr)
+{
+       unsigned long end = jiffies + moxaFuncTout;
+
+       while (readw(ofsAddr + FuncCode) != 0)
+               if (time_after(jiffies, end))
+                       return;
+       if (readw(ofsAddr + FuncCode) != 0 && printk_ratelimit())
+               printk(KERN_WARNING "moxa function expired\n");
+}
+
+static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg)
+{
+       writew(arg, ofsAddr + FuncArg);
+       writew(cmd, ofsAddr + FuncCode);
+       moxa_wait_finish(ofsAddr);
+}
+
+static void moxa_low_water_check(void __iomem *ofsAddr)
+{
+       u16 rptr, wptr, mask, len;
+
+       if (readb(ofsAddr + FlagStat) & Xoff_state) {
+               rptr = readw(ofsAddr + RXrptr);
+               wptr = readw(ofsAddr + RXwptr);
+               mask = readw(ofsAddr + RX_mask);
+               len = (wptr - rptr) & mask;
+               if (len <= Low_water)
+                       moxafunc(ofsAddr, FC_SendXon, 0);
+       }
+}
+
+/*
+ * TTY operations
+ */
+
+static int moxa_ioctl(struct tty_struct *tty, struct file *file,
+                     unsigned int cmd, unsigned long arg)
+{
+       struct moxa_port *ch = tty->driver_data;
+       void __user *argp = (void __user *)arg;
+       int status, ret = 0;
+
+       if (tty->index == MAX_PORTS) {
+               if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE &&
+                               cmd != MOXA_GETMSTATUS)
+                       return -EINVAL;
+       } else if (!ch)
+               return -ENODEV;
+
+       switch (cmd) {
+       case MOXA_GETDATACOUNT:
+               moxaLog.tick = jiffies;
+               if (copy_to_user(argp, &moxaLog, sizeof(moxaLog)))
+                       ret = -EFAULT;
+               break;
+       case MOXA_FLUSH_QUEUE:
+               MoxaPortFlushData(ch, arg);
+               break;
+       case MOXA_GET_IOQUEUE: {
+               struct moxaq_str __user *argm = argp;
+               struct moxaq_str tmp;
+               struct moxa_port *p;
+               unsigned int i, j;
+
+               mutex_lock(&moxa_openlock);
+               for (i = 0; i < MAX_BOARDS; i++) {
+                       p = moxa_boards[i].ports;
+                       for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
+                               memset(&tmp, 0, sizeof(tmp));
+                               if (moxa_boards[i].ready) {
+                                       tmp.inq = MoxaPortRxQueue(p);
+                                       tmp.outq = MoxaPortTxQueue(p);
+                               }
+                               if (copy_to_user(argm, &tmp, sizeof(tmp))) {
+                                       mutex_unlock(&moxa_openlock);
+                                       return -EFAULT;
+                               }
+                       }
+               }
+               mutex_unlock(&moxa_openlock);
+               break;
+       } case MOXA_GET_OQUEUE:
+               status = MoxaPortTxQueue(ch);
+               ret = put_user(status, (unsigned long __user *)argp);
+               break;
+       case MOXA_GET_IQUEUE:
+               status = MoxaPortRxQueue(ch);
+               ret = put_user(status, (unsigned long __user *)argp);
+               break;
+       case MOXA_GETMSTATUS: {
+               struct mxser_mstatus __user *argm = argp;
+               struct mxser_mstatus tmp;
+               struct moxa_port *p;
+               unsigned int i, j;
+
+               mutex_lock(&moxa_openlock);
+               for (i = 0; i < MAX_BOARDS; i++) {
+                       p = moxa_boards[i].ports;
+                       for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) {
+                               memset(&tmp, 0, sizeof(tmp));
+                               if (!moxa_boards[i].ready)
+                                       goto copy;
+
+                               status = MoxaPortLineStatus(p);
+                               if (status & 1)
+                                       tmp.cts = 1;
+                               if (status & 2)
+                                       tmp.dsr = 1;
+                               if (status & 4)
+                                       tmp.dcd = 1;
+
+                               if (!p->tty || !p->tty->termios)
+                                       tmp.cflag = p->cflag;
+                               else
+                                       tmp.cflag = p->tty->termios->c_cflag;
+copy:
+                               if (copy_to_user(argm, &tmp, sizeof(tmp))) {
+                                       mutex_unlock(&moxa_openlock);
+                                       return -EFAULT;
+                               }
+                       }
+               }
+               mutex_unlock(&moxa_openlock);
+               break;
+       }
+       case TIOCGSERIAL:
+               mutex_lock(&moxa_openlock);
+               ret = moxa_get_serial_info(ch, argp);
+               mutex_unlock(&moxa_openlock);
+               break;
+       case TIOCSSERIAL:
+               mutex_lock(&moxa_openlock);
+               ret = moxa_set_serial_info(ch, argp);
+               mutex_unlock(&moxa_openlock);
+               break;
+       default:
+               ret = -ENOIOCTLCMD;
+       }
+       return ret;
+}
+
+static void moxa_break_ctl(struct tty_struct *tty, int state)
+{
+       struct moxa_port *port = tty->driver_data;
+
+       moxafunc(port->tableAddr, state ? FC_SendBreak : FC_StopBreak,
+                       Magic_code);
+}
 
 static const struct tty_operations moxa_ops = {
        .open = moxa_open,
@@ -254,8 +393,6 @@ static const struct tty_operations moxa_ops = {
        .write_room = moxa_write_room,
        .flush_buffer = moxa_flush_buffer,
        .chars_in_buffer = moxa_chars_in_buffer,
-       .flush_chars = moxa_flush_chars,
-       .put_char = moxa_put_char,
        .ioctl = moxa_ioctl,
        .throttle = moxa_throttle,
        .unthrottle = moxa_unthrottle,
@@ -263,259 +400,795 @@ static const struct tty_operations moxa_ops = {
        .stop = moxa_stop,
        .start = moxa_start,
        .hangup = moxa_hangup,
+       .break_ctl = moxa_break_ctl,
        .tiocmget = moxa_tiocmget,
        .tiocmset = moxa_tiocmset,
 };
 
 static struct tty_driver *moxaDriver;
-static struct moxa_port moxa_ports[MAX_PORTS];
 static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0);
 static DEFINE_SPINLOCK(moxa_lock);
 
-#ifdef CONFIG_PCI
-static int __devinit moxa_pci_probe(struct pci_dev *pdev,
-               const struct pci_device_id *ent)
-{
-       struct moxa_board_conf *board;
-       unsigned int i;
-       int board_type = ent->driver_data;
-       int retval;
-
-       retval = pci_enable_device(pdev);
-       if (retval) {
-               dev_err(&pdev->dev, "can't enable pci device\n");
-               goto err;
-       }
-
-       for (i = 0; i < MAX_BOARDS; i++)
-               if (moxa_boards[i].basemem == NULL)
-                       break;
-
-       retval = -ENODEV;
-       if (i >= MAX_BOARDS) {
-               dev_warn(&pdev->dev, "more than %u MOXA Intellio family boards "
-                               "found. Board is ignored.\n", MAX_BOARDS);
-               goto err;
-       }
-
-       board = &moxa_boards[i];
-       board->basemem = pci_iomap(pdev, 2, 0x4000);
-       if (board->basemem == NULL) {
-               dev_err(&pdev->dev, "can't remap io space 2\n");
-               goto err;
-       }
+/*
+ * HW init
+ */
 
-       board->boardType = board_type;
-       switch (board_type) {
+static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model)
+{
+       switch (brd->boardType) {
        case MOXA_BOARD_C218_ISA:
        case MOXA_BOARD_C218_PCI:
-               board->numPorts = 8;
+               if (model != 1)
+                       goto err;
                break;
-
        case MOXA_BOARD_CP204J:
-               board->numPorts = 4;
+               if (model != 3)
+                       goto err;
                break;
        default:
-               board->numPorts = 0;
+               if (model != 2)
+                       goto err;
                break;
        }
-       board->busType = MOXA_BUS_TYPE_PCI;
-
-       pci_set_drvdata(pdev, board);
-
-       return (0);
+       return 0;
 err:
-       return retval;
+       return -EINVAL;
 }
 
-static void __devexit moxa_pci_remove(struct pci_dev *pdev)
+static int moxa_check_fw(const void *ptr)
 {
-       struct moxa_board_conf *brd = pci_get_drvdata(pdev);
+       const __le16 *lptr = ptr;
 
-       pci_iounmap(pdev, brd->basemem);
-       brd->basemem = NULL;
-}
+       if (*lptr != cpu_to_le16(0x7980))
+               return -EINVAL;
 
-static struct pci_driver moxa_pci_driver = {
-       .name = "moxa",
-       .id_table = moxa_pcibrds,
-       .probe = moxa_pci_probe,
-       .remove = __devexit_p(moxa_pci_remove)
-};
-#endif /* CONFIG_PCI */
+       return 0;
+}
 
-static int __init moxa_init(void)
+static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf,
+               size_t len)
 {
-       int i, numBoards, retval = 0;
-       struct moxa_port *ch;
-
-       printk(KERN_INFO "MOXA Intellio family driver version %s\n",
-                       MOXA_VERSION);
-       moxaDriver = alloc_tty_driver(MAX_PORTS + 1);
-       if (!moxaDriver)
-               return -ENOMEM;
-
-       moxaDriver->owner = THIS_MODULE;
-       moxaDriver->name = "ttyMX";
-       moxaDriver->major = ttymajor;
-       moxaDriver->minor_start = 0;
-       moxaDriver->type = TTY_DRIVER_TYPE_SERIAL;
-       moxaDriver->subtype = SERIAL_TYPE_NORMAL;
-       moxaDriver->init_termios = tty_std_termios;
-       moxaDriver->init_termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
-       moxaDriver->init_termios.c_ispeed = 9600;
-       moxaDriver->init_termios.c_ospeed = 9600;
-       moxaDriver->flags = TTY_DRIVER_REAL_RAW;
-       tty_set_operations(moxaDriver, &moxa_ops);
+       void __iomem *baseAddr = brd->basemem;
+       u16 tmp;
 
-       for (i = 0, ch = moxa_ports; i < MAX_PORTS; i++, ch++) {
-               ch->type = PORT_16550A;
-               ch->port = i;
-               ch->close_delay = 5 * HZ / 10;
-               ch->closing_wait = 30 * HZ;
-               ch->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
-               init_waitqueue_head(&ch->open_wait);
-               init_completion(&ch->close_wait);
-
-               setup_timer(&ch->emptyTimer, moxa_check_xmit_empty,
-                               (unsigned long)ch);
-       }
+       writeb(HW_reset, baseAddr + Control_reg);       /* reset */
+       msleep(10);
+       memset_io(baseAddr, 0, 4096);
+       memcpy_toio(baseAddr, buf, len);        /* download BIOS */
+       writeb(0, baseAddr + Control_reg);      /* restart */
 
-       pr_debug("Moxa tty devices major number = %d\n", ttymajor);
+       msleep(2000);
 
-       if (tty_register_driver(moxaDriver)) {
-               printk(KERN_ERR "Couldn't install MOXA Smartio family driver !\n");
-               put_tty_driver(moxaDriver);
-               return -1;
+       switch (brd->boardType) {
+       case MOXA_BOARD_C218_ISA:
+       case MOXA_BOARD_C218_PCI:
+               tmp = readw(baseAddr + C218_key);
+               if (tmp != C218_KeyCode)
+                       goto err;
+               break;
+       case MOXA_BOARD_CP204J:
+               tmp = readw(baseAddr + C218_key);
+               if (tmp != CP204J_KeyCode)
+                       goto err;
+               break;
+       default:
+               tmp = readw(baseAddr + C320_key);
+               if (tmp != C320_KeyCode)
+                       goto err;
+               tmp = readw(baseAddr + C320_status);
+               if (tmp != STS_init) {
+                       printk(KERN_ERR "MOXA: bios upload failed -- CPU/Basic "
+                                       "module not found\n");
+                       return -EIO;
+               }
+               break;
        }
 
-       mod_timer(&moxaTimer, jiffies + HZ / 50);
+       return 0;
+err:
+       printk(KERN_ERR "MOXA: bios upload failed -- board not found\n");
+       return -EIO;
+}
 
-       /* Find the boards defined in source code */
-       numBoards = 0;
-       for (i = 0; i < MAX_BOARDS; i++) {
-               if ((moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA) ||
-                (moxa_isa_boards[i].boardType == MOXA_BOARD_C320_ISA)) {
-                       moxa_boards[numBoards].boardType = moxa_isa_boards[i].boardType;
-                       if (moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA)
-                               moxa_boards[numBoards].numPorts = 8;
-                       else
-                               moxa_boards[numBoards].numPorts = moxa_isa_boards[i].numPorts;
-                       moxa_boards[numBoards].busType = MOXA_BUS_TYPE_ISA;
-                       moxa_boards[numBoards].baseAddr = moxa_isa_boards[i].baseAddr;
-                       pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
-                              numBoards + 1,
-                              moxa_brdname[moxa_boards[numBoards].boardType-1],
-                              moxa_boards[numBoards].baseAddr);
-                       numBoards++;
-               }
-       }
-       /* Find the boards defined form module args. */
-#ifdef MODULE
-       for (i = 0; i < MAX_BOARDS; i++) {
-               if ((type[i] == MOXA_BOARD_C218_ISA) ||
-                   (type[i] == MOXA_BOARD_C320_ISA)) {
-                       pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
-                              numBoards + 1, moxa_brdname[type[i] - 1],
-                              (unsigned long)baseaddr[i]);
-                       if (numBoards >= MAX_BOARDS) {
-                               printk(KERN_WARNING "More than %d MOXA "
-                                       "Intellio family boards found. Board "
-                                       "is ignored.\n", MAX_BOARDS);
-                               continue;
-                       }
-                       moxa_boards[numBoards].boardType = type[i];
-                       if (moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA)
-                               moxa_boards[numBoards].numPorts = 8;
-                       else
-                               moxa_boards[numBoards].numPorts = numports[i];
-                       moxa_boards[numBoards].busType = MOXA_BUS_TYPE_ISA;
-                       moxa_boards[numBoards].baseAddr = baseaddr[i];
-                       numBoards++;
-               }
-       }
-#endif
+static int moxa_load_320b(struct moxa_board_conf *brd, const u8 *ptr,
+               size_t len)
+{
+       void __iomem *baseAddr = brd->basemem;
 
-#ifdef CONFIG_PCI
-       retval = pci_register_driver(&moxa_pci_driver);
-       if (retval) {
-               printk(KERN_ERR "Can't register moxa pci driver!\n");
-               if (numBoards)
-                       retval = 0;
+       if (len < 7168) {
+               printk(KERN_ERR "MOXA: invalid 320 bios -- too short\n");
+               return -EINVAL;
        }
-#endif
 
-       for (i = 0; i < numBoards; i++) {
-               moxa_boards[i].basemem = ioremap(moxa_boards[i].baseAddr,
-                               0x4000);
-       }
+       writew(len - 7168 - 2, baseAddr + C320bapi_len);
+       writeb(1, baseAddr + Control_reg);      /* Select Page 1 */
+       memcpy_toio(baseAddr + DynPage_addr, ptr, 7168);
+       writeb(2, baseAddr + Control_reg);      /* Select Page 2 */
+       memcpy_toio(baseAddr + DynPage_addr, ptr + 7168, len - 7168);
 
-       return retval;
+       return 0;
 }
 
-static void __exit moxa_exit(void)
+static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr,
+               size_t len)
 {
-       int i;
-
-       del_timer_sync(&moxaTimer);
+       void __iomem *baseAddr = brd->basemem;
+       const u16 *uptr = ptr;
+       size_t wlen, len2, j;
+       unsigned long key, loadbuf, loadlen, checksum, checksum_ok;
+       unsigned int i, retry;
+       u16 usum, keycode;
 
-       for (i = 0; i < MAX_PORTS; i++)
-               del_timer_sync(&moxa_ports[i].emptyTimer);
+       keycode = (brd->boardType == MOXA_BOARD_CP204J) ? CP204J_KeyCode :
+                               C218_KeyCode;
 
-       if (tty_unregister_driver(moxaDriver))
-               printk(KERN_ERR "Couldn't unregister MOXA Intellio family "
-                               "serial driver\n");
-       put_tty_driver(moxaDriver);
+       switch (brd->boardType) {
+       case MOXA_BOARD_CP204J:
+       case MOXA_BOARD_C218_ISA:
+       case MOXA_BOARD_C218_PCI:
+               key = C218_key;
+               loadbuf = C218_LoadBuf;
+               loadlen = C218DLoad_len;
+               checksum = C218check_sum;
+               checksum_ok = C218chksum_ok;
+               break;
+       default:
+               key = C320_key;
+               keycode = C320_KeyCode;
+               loadbuf = C320_LoadBuf;
+               loadlen = C320DLoad_len;
+               checksum = C320check_sum;
+               checksum_ok = C320chksum_ok;
+               break;
+       }
 
-#ifdef CONFIG_PCI
-       pci_unregister_driver(&moxa_pci_driver);
-#endif
+       usum = 0;
+       wlen = len >> 1;
+       for (i = 0; i < wlen; i++)
+               usum += le16_to_cpu(uptr[i]);
+       retry = 0;
+       do {
+               wlen = len >> 1;
+               j = 0;
+               while (wlen) {
+                       len2 = (wlen > 2048) ? 2048 : wlen;
+                       wlen -= len2;
+                       memcpy_toio(baseAddr + loadbuf, ptr + j, len2 << 1);
+                       j += len2 << 1;
+
+                       writew(len2, baseAddr + loadlen);
+                       writew(0, baseAddr + key);
+                       for (i = 0; i < 100; i++) {
+                               if (readw(baseAddr + key) == keycode)
+                                       break;
+                               msleep(10);
+                       }
+                       if (readw(baseAddr + key) != keycode)
+                               return -EIO;
+               }
+               writew(0, baseAddr + loadlen);
+               writew(usum, baseAddr + checksum);
+               writew(0, baseAddr + key);
+               for (i = 0; i < 100; i++) {
+                       if (readw(baseAddr + key) == keycode)
+                               break;
+                       msleep(10);
+               }
+               retry++;
+       } while ((readb(baseAddr + checksum_ok) != 1) && (retry < 3));
+       if (readb(baseAddr + checksum_ok) != 1)
+               return -EIO;
+
+       writew(0, baseAddr + key);
+       for (i = 0; i < 600; i++) {
+               if (readw(baseAddr + Magic_no) == Magic_code)
+                       break;
+               msleep(10);
+       }
+       if (readw(baseAddr + Magic_no) != Magic_code)
+               return -EIO;
+
+       if (MOXA_IS_320(brd)) {
+               if (brd->busType == MOXA_BUS_TYPE_PCI) {        /* ASIC board */
+                       writew(0x3800, baseAddr + TMS320_PORT1);
+                       writew(0x3900, baseAddr + TMS320_PORT2);
+                       writew(28499, baseAddr + TMS320_CLOCK);
+               } else {
+                       writew(0x3200, baseAddr + TMS320_PORT1);
+                       writew(0x3400, baseAddr + TMS320_PORT2);
+                       writew(19999, baseAddr + TMS320_CLOCK);
+               }
+       }
+       writew(1, baseAddr + Disable_IRQ);
+       writew(0, baseAddr + Magic_no);
+       for (i = 0; i < 500; i++) {
+               if (readw(baseAddr + Magic_no) == Magic_code)
+                       break;
+               msleep(10);
+       }
+       if (readw(baseAddr + Magic_no) != Magic_code)
+               return -EIO;
+
+       if (MOXA_IS_320(brd)) {
+               j = readw(baseAddr + Module_cnt);
+               if (j <= 0)
+                       return -EIO;
+               brd->numPorts = j * 8;
+               writew(j, baseAddr + Module_no);
+               writew(0, baseAddr + Magic_no);
+               for (i = 0; i < 600; i++) {
+                       if (readw(baseAddr + Magic_no) == Magic_code)
+                               break;
+                       msleep(10);
+               }
+               if (readw(baseAddr + Magic_no) != Magic_code)
+                       return -EIO;
+       }
+       brd->intNdx = baseAddr + IRQindex;
+       brd->intPend = baseAddr + IRQpending;
+       brd->intTable = baseAddr + IRQtable;
+
+       return 0;
+}
+
+static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr,
+               size_t len)
+{
+       void __iomem *ofsAddr, *baseAddr = brd->basemem;
+       struct moxa_port *port;
+       int retval, i;
+
+       if (len % 2) {
+               printk(KERN_ERR "MOXA: bios length is not even\n");
+               return -EINVAL;
+       }
+
+       retval = moxa_real_load_code(brd, ptr, len); /* may change numPorts */
+       if (retval)
+               return retval;
+
+       switch (brd->boardType) {
+       case MOXA_BOARD_C218_ISA:
+       case MOXA_BOARD_C218_PCI:
+       case MOXA_BOARD_CP204J:
+               port = brd->ports;
+               for (i = 0; i < brd->numPorts; i++, port++) {
+                       port->board = brd;
+                       port->DCDState = 0;
+                       port->tableAddr = baseAddr + Extern_table +
+                                       Extern_size * i;
+                       ofsAddr = port->tableAddr;
+                       writew(C218rx_mask, ofsAddr + RX_mask);
+                       writew(C218tx_mask, ofsAddr + TX_mask);
+                       writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb);
+                       writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb);
+
+                       writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb);
+                       writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb);
+
+               }
+               break;
+       default:
+               port = brd->ports;
+               for (i = 0; i < brd->numPorts; i++, port++) {
+                       port->board = brd;
+                       port->DCDState = 0;
+                       port->tableAddr = baseAddr + Extern_table +
+                                       Extern_size * i;
+                       ofsAddr = port->tableAddr;
+                       switch (brd->numPorts) {
+                       case 8:
+                               writew(C320p8rx_mask, ofsAddr + RX_mask);
+                               writew(C320p8tx_mask, ofsAddr + TX_mask);
+                               writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb);
+                               writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb);
+                               writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb);
+                               writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb);
+
+                               break;
+                       case 16:
+                               writew(C320p16rx_mask, ofsAddr + RX_mask);
+                               writew(C320p16tx_mask, ofsAddr + TX_mask);
+                               writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb);
+                               writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb);
+                               writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb);
+                               writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb);
+                               break;
+
+                       case 24:
+                               writew(C320p24rx_mask, ofsAddr + RX_mask);
+                               writew(C320p24tx_mask, ofsAddr + TX_mask);
+                               writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb);
+                               writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb);
+                               writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb);
+                               writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
+                               break;
+                       case 32:
+                               writew(C320p32rx_mask, ofsAddr + RX_mask);
+                               writew(C320p32tx_mask, ofsAddr + TX_mask);
+                               writew(C320p32tx_ofs, ofsAddr + Ofs_txb);
+                               writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb);
+                               writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb);
+                               writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb);
+                               writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
+                               break;
+                       }
+               }
+               break;
+       }
+       return 0;
+}
+
+static int moxa_load_fw(struct moxa_board_conf *brd, const struct firmware *fw)
+{
+       void *ptr = fw->data;
+       char rsn[64];
+       u16 lens[5];
+       size_t len;
+       unsigned int a, lenp, lencnt;
+       int ret = -EINVAL;
+       struct {
+               __le32 magic;   /* 0x34303430 */
+               u8 reserved1[2];
+               u8 type;        /* UNIX = 3 */
+               u8 model;       /* C218T=1, C320T=2, CP204=3 */
+               u8 reserved2[8];
+               __le16 len[5];
+       } *hdr = ptr;
+
+       BUILD_BUG_ON(ARRAY_SIZE(hdr->len) != ARRAY_SIZE(lens));
+
+       if (fw->size < MOXA_FW_HDRLEN) {
+               strcpy(rsn, "too short (even header won't fit)");
+               goto err;
+       }
+       if (hdr->magic != cpu_to_le32(0x30343034)) {
+               sprintf(rsn, "bad magic: %.8x", le32_to_cpu(hdr->magic));
+               goto err;
+       }
+       if (hdr->type != 3) {
+               sprintf(rsn, "not for linux, type is %u", hdr->type);
+               goto err;
+       }
+       if (moxa_check_fw_model(brd, hdr->model)) {
+               sprintf(rsn, "not for this card, model is %u", hdr->model);
+               goto err;
+       }
+
+       len = MOXA_FW_HDRLEN;
+       lencnt = hdr->model == 2 ? 5 : 3;
+       for (a = 0; a < ARRAY_SIZE(lens); a++) {
+               lens[a] = le16_to_cpu(hdr->len[a]);
+               if (lens[a] && len + lens[a] <= fw->size &&
+                               moxa_check_fw(&fw->data[len]))
+                       printk(KERN_WARNING "MOXA firmware: unexpected input "
+                               "at offset %u, but going on\n", (u32)len);
+               if (!lens[a] && a < lencnt) {
+                       sprintf(rsn, "too few entries in fw file");
+                       goto err;
+               }
+               len += lens[a];
+       }
+
+       if (len != fw->size) {
+               sprintf(rsn, "bad length: %u (should be %u)", (u32)fw->size,
+                               (u32)len);
+               goto err;
+       }
+
+       ptr += MOXA_FW_HDRLEN;
+       lenp = 0; /* bios */
+
+       strcpy(rsn, "read above");
+
+       ret = moxa_load_bios(brd, ptr, lens[lenp]);
+       if (ret)
+               goto err;
+
+       /* we skip the tty section (lens[1]), since we don't need it */
+       ptr += lens[lenp] + lens[lenp + 1];
+       lenp += 2; /* comm */
+
+       if (hdr->model == 2) {
+               ret = moxa_load_320b(brd, ptr, lens[lenp]);
+               if (ret)
+                       goto err;
+               /* skip another tty */
+               ptr += lens[lenp] + lens[lenp + 1];
+               lenp += 2;
+       }
+
+       ret = moxa_load_code(brd, ptr, lens[lenp]);
+       if (ret)
+               goto err;
+
+       return 0;
+err:
+       printk(KERN_ERR "firmware failed to load, reason: %s\n", rsn);
+       return ret;
+}
+
+static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev)
+{
+       const struct firmware *fw;
+       const char *file;
+       struct moxa_port *p;
+       unsigned int i;
+       int ret;
+
+       brd->ports = kcalloc(MAX_PORTS_PER_BOARD, sizeof(*brd->ports),
+                       GFP_KERNEL);
+       if (brd->ports == NULL) {
+               printk(KERN_ERR "cannot allocate memory for ports\n");
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) {
+               p->type = PORT_16550A;
+               p->close_delay = 5 * HZ / 10;
+               p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
+               init_waitqueue_head(&p->open_wait);
+       }
+
+       switch (brd->boardType) {
+       case MOXA_BOARD_C218_ISA:
+       case MOXA_BOARD_C218_PCI:
+               file = "c218tunx.cod";
+               break;
+       case MOXA_BOARD_CP204J:
+               file = "cp204unx.cod";
+               break;
+       default:
+               file = "c320tunx.cod";
+               break;
+       }
+
+       ret = request_firmware(&fw, file, dev);
+       if (ret) {
+               printk(KERN_ERR "MOXA: request_firmware failed. Make sure "
+                               "you've placed '%s' file into your firmware "
+                               "loader directory (e.g. /lib/firmware)\n",
+                               file);
+               goto err_free;
+       }
+
+       ret = moxa_load_fw(brd, fw);
+
+       release_firmware(fw);
+
+       if (ret)
+               goto err_free;
+
+       spin_lock_bh(&moxa_lock);
+       brd->ready = 1;
+       if (!timer_pending(&moxaTimer))
+               mod_timer(&moxaTimer, jiffies + HZ / 50);
+       spin_unlock_bh(&moxa_lock);
+
+       return 0;
+err_free:
+       kfree(brd->ports);
+err:
+       return ret;
+}
+
+static void moxa_board_deinit(struct moxa_board_conf *brd)
+{
+       unsigned int a, opened;
+
+       mutex_lock(&moxa_openlock);
+       spin_lock_bh(&moxa_lock);
+       brd->ready = 0;
+       spin_unlock_bh(&moxa_lock);
+
+       /* pci hot-un-plug support */
+       for (a = 0; a < brd->numPorts; a++)
+               if (brd->ports[a].asyncflags & ASYNC_INITIALIZED)
+                       tty_hangup(brd->ports[a].tty);
+       while (1) {
+               opened = 0;
+               for (a = 0; a < brd->numPorts; a++)
+                       if (brd->ports[a].asyncflags & ASYNC_INITIALIZED)
+                               opened++;
+               mutex_unlock(&moxa_openlock);
+               if (!opened)
+                       break;
+               msleep(50);
+               mutex_lock(&moxa_openlock);
+       }
+
+       iounmap(brd->basemem);
+       brd->basemem = NULL;
+       kfree(brd->ports);
+}
+
+#ifdef CONFIG_PCI
+static int __devinit moxa_pci_probe(struct pci_dev *pdev,
+               const struct pci_device_id *ent)
+{
+       struct moxa_board_conf *board;
+       unsigned int i;
+       int board_type = ent->driver_data;
+       int retval;
+
+       retval = pci_enable_device(pdev);
+       if (retval) {
+               dev_err(&pdev->dev, "can't enable pci device\n");
+               goto err;
+       }
 
        for (i = 0; i < MAX_BOARDS; i++)
-               if (moxa_boards[i].basemem)
-                       iounmap(moxa_boards[i].basemem);
+               if (moxa_boards[i].basemem == NULL)
+                       break;
+
+       retval = -ENODEV;
+       if (i >= MAX_BOARDS) {
+               dev_warn(&pdev->dev, "more than %u MOXA Intellio family boards "
+                               "found. Board is ignored.\n", MAX_BOARDS);
+               goto err;
+       }
+
+       board = &moxa_boards[i];
+
+       retval = pci_request_region(pdev, 2, "moxa-base");
+       if (retval) {
+               dev_err(&pdev->dev, "can't request pci region 2\n");
+               goto err;
+       }
+
+       board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000);
+       if (board->basemem == NULL) {
+               dev_err(&pdev->dev, "can't remap io space 2\n");
+               goto err_reg;
+       }
+
+       board->boardType = board_type;
+       switch (board_type) {
+       case MOXA_BOARD_C218_ISA:
+       case MOXA_BOARD_C218_PCI:
+               board->numPorts = 8;
+               break;
+
+       case MOXA_BOARD_CP204J:
+               board->numPorts = 4;
+               break;
+       default:
+               board->numPorts = 0;
+               break;
+       }
+       board->busType = MOXA_BUS_TYPE_PCI;
+
+       retval = moxa_init_board(board, &pdev->dev);
+       if (retval)
+               goto err_base;
+
+       pci_set_drvdata(pdev, board);
+
+       dev_info(&pdev->dev, "board '%s' ready (%u ports, firmware loaded)\n",
+                       moxa_brdname[board_type - 1], board->numPorts);
+
+       return 0;
+err_base:
+       iounmap(board->basemem);
+       board->basemem = NULL;
+err_reg:
+       pci_release_region(pdev, 2);
+err:
+       return retval;
+}
+
+static void __devexit moxa_pci_remove(struct pci_dev *pdev)
+{
+       struct moxa_board_conf *brd = pci_get_drvdata(pdev);
+
+       moxa_board_deinit(brd);
+
+       pci_release_region(pdev, 2);
+}
+
+static struct pci_driver moxa_pci_driver = {
+       .name = "moxa",
+       .id_table = moxa_pcibrds,
+       .probe = moxa_pci_probe,
+       .remove = __devexit_p(moxa_pci_remove)
+};
+#endif /* CONFIG_PCI */
+
+static int __init moxa_init(void)
+{
+       unsigned int isabrds = 0;
+       int retval = 0;
+
+       printk(KERN_INFO "MOXA Intellio family driver version %s\n",
+                       MOXA_VERSION);
+       moxaDriver = alloc_tty_driver(MAX_PORTS + 1);
+       if (!moxaDriver)
+               return -ENOMEM;
+
+       moxaDriver->owner = THIS_MODULE;
+       moxaDriver->name = "ttyMX";
+       moxaDriver->major = ttymajor;
+       moxaDriver->minor_start = 0;
+       moxaDriver->type = TTY_DRIVER_TYPE_SERIAL;
+       moxaDriver->subtype = SERIAL_TYPE_NORMAL;
+       moxaDriver->init_termios = tty_std_termios;
+       moxaDriver->init_termios.c_cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL;
+       moxaDriver->init_termios.c_ispeed = 9600;
+       moxaDriver->init_termios.c_ospeed = 9600;
+       moxaDriver->flags = TTY_DRIVER_REAL_RAW;
+       tty_set_operations(moxaDriver, &moxa_ops);
+
+       if (tty_register_driver(moxaDriver)) {
+               printk(KERN_ERR "can't register MOXA Smartio tty driver!\n");
+               put_tty_driver(moxaDriver);
+               return -1;
+       }
+
+       /* Find the boards defined from module args. */
+#ifdef MODULE
+       {
+       struct moxa_board_conf *brd = moxa_boards;
+       unsigned int i;
+       for (i = 0; i < MAX_BOARDS; i++) {
+               if (!baseaddr[i])
+                       break;
+               if (type[i] == MOXA_BOARD_C218_ISA ||
+                               type[i] == MOXA_BOARD_C320_ISA) {
+                       pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n",
+                                       isabrds + 1, moxa_brdname[type[i] - 1],
+                                       baseaddr[i]);
+                       brd->boardType = type[i];
+                       brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 :
+                                       numports[i];
+                       brd->busType = MOXA_BUS_TYPE_ISA;
+                       brd->basemem = ioremap_nocache(baseaddr[i], 0x4000);
+                       if (!brd->basemem) {
+                               printk(KERN_ERR "MOXA: can't remap %lx\n",
+                                               baseaddr[i]);
+                               continue;
+                       }
+                       if (moxa_init_board(brd, NULL)) {
+                               iounmap(brd->basemem);
+                               brd->basemem = NULL;
+                               continue;
+                       }
+
+                       printk(KERN_INFO "MOXA isa board found at 0x%.8lu and "
+                                       "ready (%u ports, firmware loaded)\n",
+                                       baseaddr[i], brd->numPorts);
+
+                       brd++;
+                       isabrds++;
+               }
+       }
+       }
+#endif
+
+#ifdef CONFIG_PCI
+       retval = pci_register_driver(&moxa_pci_driver);
+       if (retval) {
+               printk(KERN_ERR "Can't register MOXA pci driver!\n");
+               if (isabrds)
+                       retval = 0;
+       }
+#endif
+
+       return retval;
+}
+
+static void __exit moxa_exit(void)
+{
+       unsigned int i;
+
+#ifdef CONFIG_PCI
+       pci_unregister_driver(&moxa_pci_driver);
+#endif
+
+       for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */
+               if (moxa_boards[i].ready)
+                       moxa_board_deinit(&moxa_boards[i]);
+
+       del_timer_sync(&moxaTimer);
+
+       if (tty_unregister_driver(moxaDriver))
+               printk(KERN_ERR "Couldn't unregister MOXA Intellio family "
+                               "serial driver\n");
+       put_tty_driver(moxaDriver);
 }
 
 module_init(moxa_init);
 module_exit(moxa_exit);
 
+static void moxa_close_port(struct moxa_port *ch)
+{
+       moxa_shut_down(ch);
+       MoxaPortFlushData(ch, 2);
+       ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
+       ch->tty->driver_data = NULL;
+       ch->tty = NULL;
+}
+
+static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp,
+                           struct moxa_port *ch)
+{
+       DEFINE_WAIT(wait);
+       int retval = 0;
+       u8 dcd;
+
+       while (1) {
+               prepare_to_wait(&ch->open_wait, &wait, TASK_INTERRUPTIBLE);
+               if (tty_hung_up_p(filp)) {
+#ifdef SERIAL_DO_RESTART
+                       retval = -ERESTARTSYS;
+#else
+                       retval = -EAGAIN;
+#endif
+                       break;
+               }
+               spin_lock_bh(&moxa_lock);
+               dcd = ch->DCDState;
+               spin_unlock_bh(&moxa_lock);
+               if (dcd)
+                       break;
+
+               if (signal_pending(current)) {
+                       retval = -ERESTARTSYS;
+                       break;
+               }
+               schedule();
+       }
+       finish_wait(&ch->open_wait, &wait);
+
+       return retval;
+}
+
 static int moxa_open(struct tty_struct *tty, struct file *filp)
 {
+       struct moxa_board_conf *brd;
        struct moxa_port *ch;
        int port;
        int retval;
 
        port = tty->index;
        if (port == MAX_PORTS) {
-               return (0);
+               return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
        }
-       if (!MoxaPortIsValid(port)) {
-               tty->driver_data = NULL;
-               return (-ENODEV);
+       if (mutex_lock_interruptible(&moxa_openlock))
+               return -ERESTARTSYS;
+       brd = &moxa_boards[port / MAX_PORTS_PER_BOARD];
+       if (!brd->ready) {
+               mutex_unlock(&moxa_openlock);
+               return -ENODEV;
        }
 
-       ch = &moxa_ports[port];
+       ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
        ch->count++;
        tty->driver_data = ch;
        ch->tty = tty;
        if (!(ch->asyncflags & ASYNC_INITIALIZED)) {
                ch->statusflags = 0;
                moxa_set_tty_param(tty, tty->termios);
-               MoxaPortLineCtrl(ch->port, 1, 1);
-               MoxaPortEnable(ch->port);
+               MoxaPortLineCtrl(ch, 1, 1);
+               MoxaPortEnable(ch);
+               MoxaSetFifo(ch, ch->type == PORT_16550A);
                ch->asyncflags |= ASYNC_INITIALIZED;
        }
-       retval = moxa_block_till_ready(tty, filp, ch);
+       mutex_unlock(&moxa_openlock);
 
-       moxa_unthrottle(tty);
-
-       if (ch->type == PORT_16550A) {
-               MoxaSetFifo(ch->port, 1);
-       } else {
-               MoxaSetFifo(ch->port, 0);
-       }
+       retval = 0;
+       if (!(filp->f_flags & O_NONBLOCK) && !C_CLOCAL(tty))
+               retval = moxa_block_till_ready(tty, filp, ch);
+       mutex_lock(&moxa_openlock);
+       if (retval) {
+               if (ch->count) /* 0 means already hung up... */
+                       if (--ch->count == 0)
+                               moxa_close_port(ch);
+       } else
+               ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
+       mutex_unlock(&moxa_openlock);
 
-       return (retval);
+       return retval;
 }
 
 static void moxa_close(struct tty_struct *tty, struct file *filp)
@@ -524,23 +1197,14 @@ static void moxa_close(struct tty_struct *tty, struct file *filp)
        int port;
 
        port = tty->index;
-       if (port == MAX_PORTS) {
-               return;
-       }
-       if (!MoxaPortIsValid(port)) {
-               pr_debug("Invalid portno in moxa_close\n");
-               tty->driver_data = NULL;
+       if (port == MAX_PORTS || tty_hung_up_p(filp))
                return;
-       }
-       if (tty->driver_data == NULL) {
-               return;
-       }
-       if (tty_hung_up_p(filp)) {
-               return;
-       }
-       ch = (struct moxa_port *) tty->driver_data;
 
-       if ((tty->count == 1) && (ch->count != 1)) {
+       mutex_lock(&moxa_openlock);
+       ch = tty->driver_data;
+       if (ch == NULL)
+               goto unlock;
+       if (tty->count == 1 && ch->count != 1) {
                printk(KERN_WARNING "moxa_close: bad serial port count; "
                        "tty->count is 1, ch->count is %d\n", ch->count);
                ch->count = 1;
@@ -550,59 +1214,35 @@ static void moxa_close(struct tty_struct *tty, struct file *filp)
                        "device=%s\n", tty->name);
                ch->count = 0;
        }
-       if (ch->count) {
-               return;
-       }
-       ch->asyncflags |= ASYNC_CLOSING;
+       if (ch->count)
+               goto unlock;
 
        ch->cflag = tty->termios->c_cflag;
        if (ch->asyncflags & ASYNC_INITIALIZED) {
                moxa_setup_empty_event(tty);
                tty_wait_until_sent(tty, 30 * HZ);      /* 30 seconds timeout */
-               del_timer_sync(&moxa_ports[ch->port].emptyTimer);
        }
-       moxa_shut_down(ch);
-       MoxaPortFlushData(port, 2);
-
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
-       tty_ldisc_flush(tty);
-                       
-       tty->closing = 0;
-       ch->event = 0;
-       ch->tty = NULL;
-       if (ch->blocked_open) {
-               if (ch->close_delay) {
-                       msleep_interruptible(jiffies_to_msecs(ch->close_delay));
-               }
-               wake_up_interruptible(&ch->open_wait);
-       }
-       ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING);
-       complete_all(&ch->close_wait);
+
+       moxa_close_port(ch);
+unlock:
+       mutex_unlock(&moxa_openlock);
 }
 
 static int moxa_write(struct tty_struct *tty,
                      const unsigned char *buf, int count)
 {
-       struct moxa_port *ch;
-       int len, port;
-       unsigned long flags;
+       struct moxa_port *ch = tty->driver_data;
+       int len;
 
-       ch = (struct moxa_port *) tty->driver_data;
        if (ch == NULL)
-               return (0);
-       port = ch->port;
+               return 0;
 
-       spin_lock_irqsave(&moxa_lock, flags);
-       len = MoxaPortWriteData(port, (unsigned char *) buf, count);
-       spin_unlock_irqrestore(&moxa_lock, flags);
+       spin_lock_bh(&moxa_lock);
+       len = MoxaPortWriteData(ch, buf, count);
+       spin_unlock_bh(&moxa_lock);
 
-       /*********************************************
-       if ( !(ch->statusflags & LOWWAIT) &&
-            ((len != count) || (MoxaPortTxFree(port) <= 100)) )
-       ************************************************/
        ch->statusflags |= LOWWAIT;
-       return (len);
+       return len;
 }
 
 static int moxa_write_room(struct tty_struct *tty)
@@ -610,27 +1250,27 @@ static int moxa_write_room(struct tty_struct *tty)
        struct moxa_port *ch;
 
        if (tty->stopped)
-               return (0);
-       ch = (struct moxa_port *) tty->driver_data;
+               return 0;
+       ch = tty->driver_data;
        if (ch == NULL)
-               return (0);
-       return (MoxaPortTxFree(ch->port));
+               return 0;
+       return MoxaPortTxFree(ch);
 }
 
 static void moxa_flush_buffer(struct tty_struct *tty)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch = tty->driver_data;
 
        if (ch == NULL)
                return;
-       MoxaPortFlushData(ch->port, 1);
+       MoxaPortFlushData(ch, 1);
        tty_wakeup(tty);
 }
 
 static int moxa_chars_in_buffer(struct tty_struct *tty)
 {
+       struct moxa_port *ch = tty->driver_data;
        int chars;
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
 
        /*
         * Sigh...I have to check if driver_data is NULL here, because
@@ -639,8 +1279,9 @@ static int moxa_chars_in_buffer(struct tty_struct *tty)
         * routine.  And since the open() failed, we return 0 here.  TDJ
         */
        if (ch == NULL)
-               return (0);
-       chars = MoxaPortTxQueue(ch->port);
+               return 0;
+       lock_kernel();
+       chars = MoxaPortTxQueue(ch);
        if (chars) {
                /*
                 * Make it possible to wakeup anything waiting for output
@@ -649,73 +1290,54 @@ static int moxa_chars_in_buffer(struct tty_struct *tty)
                if (!(ch->statusflags & EMPTYWAIT))
                        moxa_setup_empty_event(tty);
        }
-       return (chars);
-}
-
-static void moxa_flush_chars(struct tty_struct *tty)
-{
-       /*
-        * Don't think I need this, because this is called to empty the TX
-        * buffer for the 16450, 16550, etc.
-        */
-}
-
-static void moxa_put_char(struct tty_struct *tty, unsigned char c)
-{
-       struct moxa_port *ch;
-       int port;
-       unsigned long flags;
-
-       ch = (struct moxa_port *) tty->driver_data;
-       if (ch == NULL)
-               return;
-       port = ch->port;
-       spin_lock_irqsave(&moxa_lock, flags);
-       MoxaPortWriteData(port, &c, 1);
-       spin_unlock_irqrestore(&moxa_lock, flags);
-       /************************************************
-       if ( !(ch->statusflags & LOWWAIT) && (MoxaPortTxFree(port) <= 100) )
-       *************************************************/
-       ch->statusflags |= LOWWAIT;
+       unlock_kernel();
+       return chars;
 }
 
 static int moxa_tiocmget(struct tty_struct *tty, struct file *file)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
-       int port;
+       struct moxa_port *ch;
        int flag = 0, dtr, rts;
 
-       port = tty->index;
-       if ((port != MAX_PORTS) && (!ch))
-               return (-EINVAL);
+       mutex_lock(&moxa_openlock);
+       ch = tty->driver_data;
+       if (!ch) {
+               mutex_unlock(&moxa_openlock);
+               return -EINVAL;
+       }
 
-       MoxaPortGetLineOut(ch->port, &dtr, &rts);
+       MoxaPortGetLineOut(ch, &dtr, &rts);
        if (dtr)
                flag |= TIOCM_DTR;
        if (rts)
                flag |= TIOCM_RTS;
-       dtr = MoxaPortLineStatus(ch->port);
+       dtr = MoxaPortLineStatus(ch);
        if (dtr & 1)
                flag |= TIOCM_CTS;
        if (dtr & 2)
                flag |= TIOCM_DSR;
        if (dtr & 4)
                flag |= TIOCM_CD;
+       mutex_unlock(&moxa_openlock);
        return flag;
 }
 
 static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
                         unsigned int set, unsigned int clear)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch;
        int port;
        int dtr, rts;
 
        port = tty->index;
-       if ((port != MAX_PORTS) && (!ch))
-               return (-EINVAL);
+       mutex_lock(&moxa_openlock);
+       ch = tty->driver_data;
+       if (!ch) {
+               mutex_unlock(&moxa_openlock);
+               return -EINVAL;
+       }
 
-       MoxaPortGetLineOut(ch->port, &dtr, &rts);
+       MoxaPortGetLineOut(ch, &dtr, &rts);
        if (set & TIOCM_RTS)
                rts = 1;
        if (set & TIOCM_DTR)
@@ -724,105 +1346,51 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file,
                rts = 0;
        if (clear & TIOCM_DTR)
                dtr = 0;
-       MoxaPortLineCtrl(ch->port, dtr, rts);
+       MoxaPortLineCtrl(ch, dtr, rts);
+       mutex_unlock(&moxa_openlock);
        return 0;
 }
 
-static int moxa_ioctl(struct tty_struct *tty, struct file *file,
-                     unsigned int cmd, unsigned long arg)
-{
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
-       register int port;
-       void __user *argp = (void __user *)arg;
-       int retval;
-
-       port = tty->index;
-       if ((port != MAX_PORTS) && (!ch))
-               return (-EINVAL);
-
-       switch (cmd) {
-       case TCSBRK:            /* SVID version: non-zero arg --> no break */
-               retval = tty_check_change(tty);
-               if (retval)
-                       return (retval);
-               moxa_setup_empty_event(tty);
-               tty_wait_until_sent(tty, 0);
-               if (!arg)
-                       MoxaPortSendBreak(ch->port, 0);
-               return (0);
-       case TCSBRKP:           /* support for POSIX tcsendbreak() */
-               retval = tty_check_change(tty);
-               if (retval)
-                       return (retval);
-               moxa_setup_empty_event(tty);
-               tty_wait_until_sent(tty, 0);
-               MoxaPortSendBreak(ch->port, arg);
-               return (0);
-       case TIOCGSOFTCAR:
-               return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp);
-       case TIOCSSOFTCAR:
-               if(get_user(retval, (unsigned long __user *) argp))
-                       return -EFAULT;
-               arg = retval;
-               tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) |
-                                        (arg ? CLOCAL : 0));
-               if (C_CLOCAL(tty))
-                       ch->asyncflags &= ~ASYNC_CHECK_CD;
-               else
-                       ch->asyncflags |= ASYNC_CHECK_CD;
-               return (0);
-       case TIOCGSERIAL:
-               return moxa_get_serial_info(ch, argp);
-
-       case TIOCSSERIAL:
-               return moxa_set_serial_info(ch, argp);
-       default:
-               retval = MoxaDriverIoctl(cmd, arg, port);
-       }
-       return (retval);
-}
-
 static void moxa_throttle(struct tty_struct *tty)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch = tty->driver_data;
 
        ch->statusflags |= THROTTLE;
 }
 
 static void moxa_unthrottle(struct tty_struct *tty)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch = tty->driver_data;
 
        ch->statusflags &= ~THROTTLE;
 }
 
 static void moxa_set_termios(struct tty_struct *tty,
-                            struct ktermios *old_termios)
+               struct ktermios *old_termios)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch = tty->driver_data;
 
        if (ch == NULL)
                return;
        moxa_set_tty_param(tty, old_termios);
-       if (!(old_termios->c_cflag & CLOCAL) &&
-           (tty->termios->c_cflag & CLOCAL))
+       if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty))
                wake_up_interruptible(&ch->open_wait);
 }
 
 static void moxa_stop(struct tty_struct *tty)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch = tty->driver_data;
 
        if (ch == NULL)
                return;
-       MoxaPortTxDisable(ch->port);
+       MoxaPortTxDisable(ch);
        ch->statusflags |= TXSTOPPED;
 }
 
 
 static void moxa_start(struct tty_struct *tty)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch = tty->driver_data;
 
        if (ch == NULL)
                return;
@@ -830,91 +1398,143 @@ static void moxa_start(struct tty_struct *tty)
        if (!(ch->statusflags & TXSTOPPED))
                return;
 
-       MoxaPortTxEnable(ch->port);
+       MoxaPortTxEnable(ch);
        ch->statusflags &= ~TXSTOPPED;
 }
 
 static void moxa_hangup(struct tty_struct *tty)
 {
-       struct moxa_port *ch = (struct moxa_port *) tty->driver_data;
+       struct moxa_port *ch;
 
-       moxa_flush_buffer(tty);
-       moxa_shut_down(ch);
-       ch->event = 0;
+       mutex_lock(&moxa_openlock);
+       ch = tty->driver_data;
+       if (ch == NULL) {
+               mutex_unlock(&moxa_openlock);
+               return;
+       }
        ch->count = 0;
-       ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
-       ch->tty = NULL;
+       moxa_close_port(ch);
+       mutex_unlock(&moxa_openlock);
+
        wake_up_interruptible(&ch->open_wait);
 }
 
+static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd)
+{
+       dcd = !!dcd;
+
+       if (dcd != p->DCDState && p->tty && C_CLOCAL(p->tty)) {
+               if (!dcd)
+                       tty_hangup(p->tty);
+       }
+       p->DCDState = dcd;
+}
+
+static int moxa_poll_port(struct moxa_port *p, unsigned int handle,
+               u16 __iomem *ip)
+{
+       struct tty_struct *tty = p->tty;
+       void __iomem *ofsAddr;
+       unsigned int inited = p->asyncflags & ASYNC_INITIALIZED;
+       u16 intr;
+
+       if (tty) {
+               if ((p->statusflags & EMPTYWAIT) &&
+                               MoxaPortTxQueue(p) == 0) {
+                       p->statusflags &= ~EMPTYWAIT;
+                       tty_wakeup(tty);
+               }
+               if ((p->statusflags & LOWWAIT) && !tty->stopped &&
+                               MoxaPortTxQueue(p) <= WAKEUP_CHARS) {
+                       p->statusflags &= ~LOWWAIT;
+                       tty_wakeup(tty);
+               }
+
+               if (inited && !(p->statusflags & THROTTLE) &&
+                               MoxaPortRxQueue(p) > 0) { /* RX */
+                       MoxaPortReadData(p);
+                       tty_schedule_flip(tty);
+               }
+       } else {
+               p->statusflags &= ~EMPTYWAIT;
+               MoxaPortFlushData(p, 0); /* flush RX */
+       }
+
+       if (!handle) /* nothing else to do */
+               return 0;
+
+       intr = readw(ip); /* port irq status */
+       if (intr == 0)
+               return 0;
+
+       writew(0, ip); /* ACK port */
+       ofsAddr = p->tableAddr;
+       if (intr & IntrTx) /* disable tx intr */
+               writew(readw(ofsAddr + HostStat) & ~WakeupTx,
+                               ofsAddr + HostStat);
+
+       if (!inited)
+               return 0;
+
+       if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */
+               tty_insert_flip_char(tty, 0, TTY_BREAK);
+               tty_schedule_flip(tty);
+       }
+
+       if (intr & IntrLine)
+               moxa_new_dcdstate(p, readb(ofsAddr + FlagStat) & DCD_state);
+
+       return 0;
+}
+
 static void moxa_poll(unsigned long ignored)
 {
-       register int card;
-       struct moxa_port *ch;
-       struct tty_struct *tp;
-       int i, ports;
-
-       del_timer(&moxaTimer);
+       struct moxa_board_conf *brd;
+       u16 __iomem *ip;
+       unsigned int card, port, served = 0;
 
-       if (MoxaDriverPoll() < 0) {
-               mod_timer(&moxaTimer, jiffies + HZ / 50);
-               return;
-       }
+       spin_lock(&moxa_lock);
        for (card = 0; card < MAX_BOARDS; card++) {
-               if ((ports = MoxaPortsOfCard(card)) <= 0)
+               brd = &moxa_boards[card];
+               if (!brd->ready)
                        continue;
-               ch = &moxa_ports[card * MAX_PORTS_PER_BOARD];
-               for (i = 0; i < ports; i++, ch++) {
-                       if ((ch->asyncflags & ASYNC_INITIALIZED) == 0)
-                               continue;
-                       if (!(ch->statusflags & THROTTLE) &&
-                           (MoxaPortRxQueue(ch->port) > 0))
-                               moxa_receive_data(ch);
-                       if ((tp = ch->tty) == 0)
-                               continue;
-                       if (ch->statusflags & LOWWAIT) {
-                               if (MoxaPortTxQueue(ch->port) <= WAKEUP_CHARS) {
-                                       if (!tp->stopped) {
-                                               ch->statusflags &= ~LOWWAIT;
-                                               tty_wakeup(tp);
-                                       }
-                               }
-                       }
-                       if (!I_IGNBRK(tp) && (MoxaPortResetBrkCnt(ch->port) > 0)) {
-                               tty_insert_flip_char(tp, 0, TTY_BREAK);
-                               tty_schedule_flip(tp);
-                       }
-                       if (MoxaPortDCDChange(ch->port)) {
-                               if (ch->asyncflags & ASYNC_CHECK_CD) {
-                                       if (MoxaPortDCDON(ch->port))
-                                               wake_up_interruptible(&ch->open_wait);
-                                       else {
-                                               tty_hangup(tp);
-                                               wake_up_interruptible(&ch->open_wait);
-                                               ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE;
-                                       }
+
+               served++;
+
+               ip = NULL;
+               if (readb(brd->intPend) == 0xff)
+                       ip = brd->intTable + readb(brd->intNdx);
+
+               for (port = 0; port < brd->numPorts; port++)
+                       moxa_poll_port(&brd->ports[port], !!ip, ip + port);
+
+               if (ip)
+                       writeb(0, brd->intPend); /* ACK */
+
+               if (moxaLowWaterChk) {
+                       struct moxa_port *p = brd->ports;
+                       for (port = 0; port < brd->numPorts; port++, p++)
+                               if (p->lowChkFlag) {
+                                       p->lowChkFlag = 0;
+                                       moxa_low_water_check(p->tableAddr);
                                }
-                       }
                }
        }
+       moxaLowWaterChk = 0;
 
-       mod_timer(&moxaTimer, jiffies + HZ / 50);
+       if (served)
+               mod_timer(&moxaTimer, jiffies + HZ / 50);
+       spin_unlock(&moxa_lock);
 }
 
 /******************************************************************************/
 
 static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios)
 {
-       register struct ktermios *ts;
-       struct moxa_port *ch;
+       register struct ktermios *ts = tty->termios;
+       struct moxa_port *ch = tty->driver_data;
        int rts, cts, txflow, rxflow, xany, baud;
 
-       ch = (struct moxa_port *) tty->driver_data;
-       ts = tty->termios;
-       if (ts->c_cflag & CLOCAL)
-               ch->asyncflags &= ~ASYNC_CHECK_CD;
-       else
-               ch->asyncflags |= ASYNC_CHECK_CD;
        rts = cts = txflow = rxflow = xany = 0;
        if (ts->c_cflag & CRTSCTS)
                rts = cts = 1;
@@ -927,776 +1547,60 @@ static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_term
 
        /* Clear the features we don't support */
        ts->c_cflag &= ~CMSPAR;
-       MoxaPortFlowCtrl(ch->port, rts, cts, txflow, rxflow, xany);
-       baud = MoxaPortSetTermio(ch->port, ts, tty_get_baud_rate(tty));
+       MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany);
+       baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty));
        if (baud == -1)
                baud = tty_termios_baud_rate(old_termios);
        /* Not put the baud rate into the termios data */
        tty_encode_baud_rate(tty, baud, baud);
 }
 
-static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp,
-                           struct moxa_port *ch)
-{
-       DECLARE_WAITQUEUE(wait,current);
-       unsigned long flags;
-       int retval;
-       int do_clocal = C_CLOCAL(tty);
-
-       /*
-        * If the device is in the middle of being closed, then block
-        * until it's done, and then try again.
-        */
-       if (tty_hung_up_p(filp) || (ch->asyncflags & ASYNC_CLOSING)) {
-               if (ch->asyncflags & ASYNC_CLOSING)
-                       wait_for_completion_interruptible(&ch->close_wait);
-#ifdef SERIAL_DO_RESTART
-               if (ch->asyncflags & ASYNC_HUP_NOTIFY)
-                       return (-EAGAIN);
-               else
-                       return (-ERESTARTSYS);
-#else
-               return (-EAGAIN);
-#endif
-       }
-       /*
-        * If non-blocking mode is set, then make the check up front
-        * and then exit.
-        */
-       if (filp->f_flags & O_NONBLOCK) {
-               ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
-               return (0);
-       }
-       /*
-        * Block waiting for the carrier detect and the line to become free
-        */
-       retval = 0;
-       add_wait_queue(&ch->open_wait, &wait);
-       pr_debug("block_til_ready before block: ttys%d, count = %d\n",
-               ch->port, ch->count);
-       spin_lock_irqsave(&moxa_lock, flags);
-       if (!tty_hung_up_p(filp))
-               ch->count--;
-       ch->blocked_open++;
-       spin_unlock_irqrestore(&moxa_lock, flags);
-
-       while (1) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               if (tty_hung_up_p(filp) ||
-                   !(ch->asyncflags & ASYNC_INITIALIZED)) {
-#ifdef SERIAL_DO_RESTART
-                       if (ch->asyncflags & ASYNC_HUP_NOTIFY)
-                               retval = -EAGAIN;
-                       else
-                               retval = -ERESTARTSYS;
-#else
-                       retval = -EAGAIN;
-#endif
-                       break;
-               }
-               if (!(ch->asyncflags & ASYNC_CLOSING) && (do_clocal ||
-                                               MoxaPortDCDON(ch->port)))
-                       break;
-
-               if (signal_pending(current)) {
-                       retval = -ERESTARTSYS;
-                       break;
-               }
-               schedule();
-       }
-       set_current_state(TASK_RUNNING);
-       remove_wait_queue(&ch->open_wait, &wait);
-
-       spin_lock_irqsave(&moxa_lock, flags);
-       if (!tty_hung_up_p(filp))
-               ch->count++;
-       ch->blocked_open--;
-       spin_unlock_irqrestore(&moxa_lock, flags);
-       pr_debug("block_til_ready after blocking: ttys%d, count = %d\n",
-               ch->port, ch->count);
-       if (retval)
-               return (retval);
-       /* FIXME: review to see if we need to use set_bit on these */
-       ch->asyncflags |= ASYNC_NORMAL_ACTIVE;
-       return 0;
-}
-
 static void moxa_setup_empty_event(struct tty_struct *tty)
 {
        struct moxa_port *ch = tty->driver_data;
-       unsigned long flags;
 
-       spin_lock_irqsave(&moxa_lock, flags);
+       spin_lock_bh(&moxa_lock);
        ch->statusflags |= EMPTYWAIT;
-       mod_timer(&moxa_ports[ch->port].emptyTimer, jiffies + HZ);
-       spin_unlock_irqrestore(&moxa_lock, flags);
-}
-
-static void moxa_check_xmit_empty(unsigned long data)
-{
-       struct moxa_port *ch;
-
-       ch = (struct moxa_port *) data;
-       if (ch->tty && (ch->statusflags & EMPTYWAIT)) {
-               if (MoxaPortTxQueue(ch->port) == 0) {
-                       ch->statusflags &= ~EMPTYWAIT;
-                       tty_wakeup(ch->tty);
-                       return;
-               }
-               mod_timer(&moxa_ports[ch->port].emptyTimer,
-                               round_jiffies(jiffies + HZ));
-       } else
-               ch->statusflags &= ~EMPTYWAIT;
+       spin_unlock_bh(&moxa_lock);
 }
 
 static void moxa_shut_down(struct moxa_port *ch)
 {
-       struct tty_struct *tp;
+       struct tty_struct *tp = ch->tty;
 
        if (!(ch->asyncflags & ASYNC_INITIALIZED))
                return;
 
-       tp = ch->tty;
-
-       MoxaPortDisable(ch->port);
+       MoxaPortDisable(ch);
 
        /*
         * If we're a modem control device and HUPCL is on, drop RTS & DTR.
         */
-       if (tp->termios->c_cflag & HUPCL)
-               MoxaPortLineCtrl(ch->port, 0, 0);
+       if (C_HUPCL(tp))
+               MoxaPortLineCtrl(ch, 0, 0);
 
+       spin_lock_bh(&moxa_lock);
        ch->asyncflags &= ~ASYNC_INITIALIZED;
+       spin_unlock_bh(&moxa_lock);
 }
 
-static void moxa_receive_data(struct moxa_port *ch)
-{
-       struct tty_struct *tp;
-       struct ktermios *ts;
-       unsigned long flags;
-
-       ts = NULL;
-       tp = ch->tty;
-       if (tp)
-               ts = tp->termios;
-       /**************************************************
-       if ( !tp || !ts || !(ts->c_cflag & CREAD) ) {
-       *****************************************************/
-       if (!tp || !ts) {
-               MoxaPortFlushData(ch->port, 0);
-               return;
-       }
-       spin_lock_irqsave(&moxa_lock, flags);
-       MoxaPortReadData(ch->port, tp);
-       spin_unlock_irqrestore(&moxa_lock, flags);
-       tty_schedule_flip(tp);
-}
-
-#define Magic_code     0x404
-
-/*
- *    System Configuration
- */
-/*
- *    for C218 BIOS initialization
- */
-#define C218_ConfBase  0x800
-#define C218_status    (C218_ConfBase + 0)     /* BIOS running status    */
-#define C218_diag      (C218_ConfBase + 2)     /* diagnostic status      */
-#define C218_key       (C218_ConfBase + 4)     /* WORD (0x218 for C218) */
-#define C218DLoad_len  (C218_ConfBase + 6)     /* WORD           */
-#define C218check_sum  (C218_ConfBase + 8)     /* BYTE           */
-#define C218chksum_ok  (C218_ConfBase + 0x0a)  /* BYTE (1:ok)            */
-#define C218_TestRx    (C218_ConfBase + 0x10)  /* 8 bytes for 8 ports    */
-#define C218_TestTx    (C218_ConfBase + 0x18)  /* 8 bytes for 8 ports    */
-#define C218_RXerr     (C218_ConfBase + 0x20)  /* 8 bytes for 8 ports    */
-#define C218_ErrFlag   (C218_ConfBase + 0x28)  /* 8 bytes for 8 ports    */
-
-#define C218_LoadBuf   0x0F00
-#define C218_KeyCode   0x218
-#define CP204J_KeyCode 0x204
-
-/*
- *    for C320 BIOS initialization
- */
-#define C320_ConfBase  0x800
-#define C320_LoadBuf   0x0f00
-#define STS_init       0x05    /* for C320_status        */
-
-#define C320_status    C320_ConfBase + 0       /* BIOS running status    */
-#define C320_diag      C320_ConfBase + 2       /* diagnostic status      */
-#define C320_key       C320_ConfBase + 4       /* WORD (0320H for C320) */
-#define C320DLoad_len  C320_ConfBase + 6       /* WORD           */
-#define C320check_sum  C320_ConfBase + 8       /* WORD           */
-#define C320chksum_ok  C320_ConfBase + 0x0a    /* WORD (1:ok)            */
-#define C320bapi_len   C320_ConfBase + 0x0c    /* WORD           */
-#define C320UART_no    C320_ConfBase + 0x0e    /* WORD           */
-
-#define C320_KeyCode   0x320
-
-#define FixPage_addr   0x0000  /* starting addr of static page  */
-#define DynPage_addr   0x2000  /* starting addr of dynamic page */
-#define C218_start     0x3000  /* starting addr of C218 BIOS prg */
-#define Control_reg    0x1ff0  /* select page and reset control */
-#define HW_reset       0x80
-
-/*
- *    Function Codes
- */
-#define FC_CardReset   0x80
-#define FC_ChannelReset 1      /* C320 firmware not supported */
-#define FC_EnableCH    2
-#define FC_DisableCH   3
-#define FC_SetParam    4
-#define FC_SetMode     5
-#define FC_SetRate     6
-#define FC_LineControl 7
-#define FC_LineStatus  8
-#define FC_XmitControl 9
-#define FC_FlushQueue  10
-#define FC_SendBreak   11
-#define FC_StopBreak   12
-#define FC_LoopbackON  13
-#define FC_LoopbackOFF 14
-#define FC_ClrIrqTable 15
-#define FC_SendXon     16
-#define FC_SetTermIrq  17      /* C320 firmware not supported */
-#define FC_SetCntIrq   18      /* C320 firmware not supported */
-#define FC_SetBreakIrq 19
-#define FC_SetLineIrq  20
-#define FC_SetFlowCtl  21
-#define FC_GenIrq      22
-#define FC_InCD180     23
-#define FC_OutCD180    24
-#define FC_InUARTreg   23
-#define FC_OutUARTreg  24
-#define FC_SetXonXoff  25
-#define FC_OutCD180CCR 26
-#define FC_ExtIQueue   27
-#define FC_ExtOQueue   28
-#define FC_ClrLineIrq  29
-#define FC_HWFlowCtl   30
-#define FC_GetClockRate 35
-#define FC_SetBaud     36
-#define FC_SetDataMode  41
-#define FC_GetCCSR      43
-#define FC_GetDataError 45
-#define FC_RxControl   50
-#define FC_ImmSend     51
-#define FC_SetXonState 52
-#define FC_SetXoffState        53
-#define FC_SetRxFIFOTrig 54
-#define FC_SetTxFIFOCnt 55
-#define FC_UnixRate    56
-#define FC_UnixResetTimer 57
-
-#define        RxFIFOTrig1     0
-#define        RxFIFOTrig4     1
-#define        RxFIFOTrig8     2
-#define        RxFIFOTrig14    3
-
-/*
- *    Dual-Ported RAM
- */
-#define DRAM_global    0
-#define INT_data       (DRAM_global + 0)
-#define Config_base    (DRAM_global + 0x108)
-
-#define IRQindex       (INT_data + 0)
-#define IRQpending     (INT_data + 4)
-#define IRQtable       (INT_data + 8)
-
-/*
- *    Interrupt Status
- */
-#define IntrRx         0x01    /* receiver data O.K.             */
-#define IntrTx         0x02    /* transmit buffer empty  */
-#define IntrFunc       0x04    /* function complete              */
-#define IntrBreak      0x08    /* received break         */
-#define IntrLine       0x10    /* line status change
-                                  for transmitter                */
-#define IntrIntr       0x20    /* received INTR code             */
-#define IntrQuit       0x40    /* received QUIT code             */
-#define IntrEOF        0x80    /* received EOF code              */
-
-#define IntrRxTrigger  0x100   /* rx data count reach tigger value */
-#define IntrTxTrigger  0x200   /* tx data count below trigger value */
-
-#define Magic_no       (Config_base + 0)
-#define Card_model_no  (Config_base + 2)
-#define Total_ports    (Config_base + 4)
-#define Module_cnt     (Config_base + 8)
-#define Module_no      (Config_base + 10)
-#define Timer_10ms     (Config_base + 14)
-#define Disable_IRQ    (Config_base + 20)
-#define TMS320_PORT1   (Config_base + 22)
-#define TMS320_PORT2   (Config_base + 24)
-#define TMS320_CLOCK   (Config_base + 26)
-
-/*
- *    DATA BUFFER in DRAM
- */
-#define Extern_table   0x400   /* Base address of the external table
-                                  (24 words *    64) total 3K bytes
-                                  (24 words * 128) total 6K bytes */
-#define Extern_size    0x60    /* 96 bytes                       */
-#define RXrptr         0x00    /* read pointer for RX buffer     */
-#define RXwptr         0x02    /* write pointer for RX buffer    */
-#define TXrptr         0x04    /* read pointer for TX buffer     */
-#define TXwptr         0x06    /* write pointer for TX buffer    */
-#define HostStat       0x08    /* IRQ flag and general flag      */
-#define FlagStat       0x0A
-#define FlowControl    0x0C    /* B7 B6 B5 B4 B3 B2 B1 B0              */
-                                       /*  x  x  x  x  |  |  |  |            */
-                                       /*              |  |  |  + CTS flow   */
-                                       /*              |  |  +--- RTS flow   */
-                                       /*              |  +------ TX Xon/Xoff */
-                                       /*              +--------- RX Xon/Xoff */
-#define Break_cnt      0x0E    /* received break count   */
-#define CD180TXirq     0x10    /* if non-0: enable TX irq        */
-#define RX_mask        0x12
-#define TX_mask        0x14
-#define Ofs_rxb        0x16
-#define Ofs_txb        0x18
-#define Page_rxb       0x1A
-#define Page_txb       0x1C
-#define EndPage_rxb    0x1E
-#define EndPage_txb    0x20
-#define Data_error     0x22
-#define RxTrigger      0x28
-#define TxTrigger      0x2a
-
-#define rRXwptr        0x34
-#define Low_water      0x36
-
-#define FuncCode       0x40
-#define FuncArg        0x42
-#define FuncArg1       0x44
-
-#define C218rx_size    0x2000  /* 8K bytes */
-#define C218tx_size    0x8000  /* 32K bytes */
-
-#define C218rx_mask    (C218rx_size - 1)
-#define C218tx_mask    (C218tx_size - 1)
-
-#define C320p8rx_size  0x2000
-#define C320p8tx_size  0x8000
-#define C320p8rx_mask  (C320p8rx_size - 1)
-#define C320p8tx_mask  (C320p8tx_size - 1)
-
-#define C320p16rx_size 0x2000
-#define C320p16tx_size 0x4000
-#define C320p16rx_mask (C320p16rx_size - 1)
-#define C320p16tx_mask (C320p16tx_size - 1)
-
-#define C320p24rx_size 0x2000
-#define C320p24tx_size 0x2000
-#define C320p24rx_mask (C320p24rx_size - 1)
-#define C320p24tx_mask (C320p24tx_size - 1)
-
-#define C320p32rx_size 0x1000
-#define C320p32tx_size 0x1000
-#define C320p32rx_mask (C320p32rx_size - 1)
-#define C320p32tx_mask (C320p32tx_size - 1)
-
-#define Page_size      0x2000
-#define Page_mask      (Page_size - 1)
-#define C218rx_spage   3
-#define C218tx_spage   4
-#define C218rx_pageno  1
-#define C218tx_pageno  4
-#define C218buf_pageno 5
-
-#define C320p8rx_spage 3
-#define C320p8tx_spage 4
-#define C320p8rx_pgno  1
-#define C320p8tx_pgno  4
-#define C320p8buf_pgno 5
-
-#define C320p16rx_spage 3
-#define C320p16tx_spage 4
-#define C320p16rx_pgno 1
-#define C320p16tx_pgno 2
-#define C320p16buf_pgno 3
-
-#define C320p24rx_spage 3
-#define C320p24tx_spage 4
-#define C320p24rx_pgno 1
-#define C320p24tx_pgno 1
-#define C320p24buf_pgno 2
-
-#define C320p32rx_spage 3
-#define C320p32tx_ofs  C320p32rx_size
-#define C320p32tx_spage 3
-#define C320p32buf_pgno 1
-
-/*
- *    Host Status
- */
-#define WakeupRx       0x01
-#define WakeupTx       0x02
-#define WakeupBreak    0x08
-#define WakeupLine     0x10
-#define WakeupIntr     0x20
-#define WakeupQuit     0x40
-#define WakeupEOF      0x80    /* used in VTIME control */
-#define WakeupRxTrigger        0x100
-#define WakeupTxTrigger        0x200
-/*
- *    Flag status
- */
-#define Rx_over                0x01
-#define Xoff_state     0x02
-#define Tx_flowOff     0x04
-#define Tx_enable      0x08
-#define CTS_state      0x10
-#define DSR_state      0x20
-#define DCD_state      0x80
-/*
- *    FlowControl
- */
-#define CTS_FlowCtl    1
-#define RTS_FlowCtl    2
-#define Tx_FlowCtl     4
-#define Rx_FlowCtl     8
-#define IXM_IXANY      0x10
-
-#define LowWater       128
-
-#define DTR_ON         1
-#define RTS_ON         2
-#define CTS_ON         1
-#define DSR_ON         2
-#define DCD_ON         8
-
-/* mode definition */
-#define        MX_CS8          0x03
-#define        MX_CS7          0x02
-#define        MX_CS6          0x01
-#define        MX_CS5          0x00
-
-#define        MX_STOP1        0x00
-#define        MX_STOP15       0x04
-#define        MX_STOP2        0x08
-
-#define        MX_PARNONE      0x00
-#define        MX_PAREVEN      0x40
-#define        MX_PARODD       0xC0
-
-/*
- *    Query
- */
-
-struct mon_str {
-       int tick;
-       int rxcnt[MAX_PORTS];
-       int txcnt[MAX_PORTS];
-};
-
-#define        DCD_changed     0x01
-#define        DCD_oldstate    0x80
-
-static unsigned char moxaBuff[10240];
-static int moxaLowWaterChk;
-static int moxaCard;
-static struct mon_str moxaLog;
-static int moxaFuncTout = HZ / 2;
-
-static void moxafunc(void __iomem *, int, ushort);
-static void moxa_wait_finish(void __iomem *);
-static void moxa_low_water_check(void __iomem *);
-static int moxaloadbios(int, unsigned char __user *, int);
-static int moxafindcard(int);
-static int moxaload320b(int, unsigned char __user *, int);
-static int moxaloadcode(int, unsigned char __user *, int);
-static int moxaloadc218(int, void __iomem *, int);
-static int moxaloadc320(int, void __iomem *, int, int *);
-
 /*****************************************************************************
  *     Driver level functions:                                              *
- *     1. MoxaDriverInit(void);                                             *
- *     2. MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port);   *
- *     3. MoxaDriverPoll(void);                                             *
  *****************************************************************************/
-void MoxaDriverInit(void)
-{
-       struct moxa_port *p;
-       unsigned int i;
-
-       moxaFuncTout = HZ / 2;  /* 500 mini-seconds */
-       moxaCard = 0;
-       moxaLog.tick = 0;
-       moxaLowWaterChk = 0;
-       for (i = 0; i < MAX_PORTS; i++) {
-               p = &moxa_ports[i];
-               p->chkPort = 0;
-               p->lowChkFlag = 0;
-               p->lineCtrl = 0;
-               moxaLog.rxcnt[i] = 0;
-               moxaLog.txcnt[i] = 0;
-       }
-}
-
-#define        MOXA            0x400
-#define MOXA_GET_IQUEUE        (MOXA + 1)      /* get input buffered count */
-#define MOXA_GET_OQUEUE        (MOXA + 2)      /* get output buffered count */
-#define MOXA_INIT_DRIVER       (MOXA + 6)      /* moxaCard=0 */
-#define MOXA_LOAD_BIOS         (MOXA + 9)      /* download BIOS */
-#define MOXA_FIND_BOARD                (MOXA + 10)     /* Check if MOXA card exist? */
-#define MOXA_LOAD_C320B                (MOXA + 11)     /* download 320B firmware */
-#define MOXA_LOAD_CODE         (MOXA + 12)     /* download firmware */
-#define MOXA_GETDATACOUNT       (MOXA + 23)
-#define MOXA_GET_IOQUEUE       (MOXA + 27)
-#define MOXA_FLUSH_QUEUE       (MOXA + 28)
-#define MOXA_GET_CONF          (MOXA + 35)     /* configuration */
-#define MOXA_GET_MAJOR          (MOXA + 63)
-#define MOXA_GET_CUMAJOR        (MOXA + 64)
-#define MOXA_GETMSTATUS         (MOXA + 65)
-
-struct dl_str {
-       char __user *buf;
-       int len;
-       int cardno;
-};
-
-static struct dl_str dltmp;
 
-void MoxaPortFlushData(int port, int mode)
+static void MoxaPortFlushData(struct moxa_port *port, int mode)
 {
        void __iomem *ofsAddr;
-       if ((mode < 0) || (mode > 2))
+       if (mode < 0 || mode > 2)
                return;
-       ofsAddr = moxa_ports[port].tableAddr;
+       ofsAddr = port->tableAddr;
        moxafunc(ofsAddr, FC_FlushQueue, mode);
        if (mode != 1) {
-               moxa_ports[port].lowChkFlag = 0;
+               port->lowChkFlag = 0;
                moxa_low_water_check(ofsAddr);
        }
 }
 
-int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port)
-{
-       int i;
-       int status;
-       int MoxaPortTxQueue(int), MoxaPortRxQueue(int);
-       void __user *argp = (void __user *)arg;
-
-       if (port == MAX_PORTS) {
-               if ((cmd != MOXA_GET_CONF) && (cmd != MOXA_INIT_DRIVER) &&
-                   (cmd != MOXA_LOAD_BIOS) && (cmd != MOXA_FIND_BOARD) && (cmd != MOXA_LOAD_C320B) &&
-                (cmd != MOXA_LOAD_CODE) && (cmd != MOXA_GETDATACOUNT) &&
-                 (cmd != MOXA_GET_IOQUEUE) && (cmd != MOXA_GET_MAJOR) &&
-                   (cmd != MOXA_GET_CUMAJOR) && (cmd != MOXA_GETMSTATUS))
-                       return (-EINVAL);
-       }
-       switch (cmd) {
-       case MOXA_GET_CONF:
-               if(copy_to_user(argp, &moxa_boards, MAX_BOARDS *
-                               sizeof(struct moxa_board_conf)))
-                       return -EFAULT;
-               return (0);
-       case MOXA_INIT_DRIVER:
-               if ((int) arg == 0x404)
-                       MoxaDriverInit();
-               return (0);
-       case MOXA_GETDATACOUNT:
-               moxaLog.tick = jiffies;
-               if(copy_to_user(argp, &moxaLog, sizeof(struct mon_str)))
-                       return -EFAULT;
-               return (0);
-       case MOXA_FLUSH_QUEUE:
-               MoxaPortFlushData(port, arg);
-               return (0);
-       case MOXA_GET_IOQUEUE: {
-               struct moxaq_str __user *argm = argp;
-               struct moxaq_str tmp;
-
-               for (i = 0; i < MAX_PORTS; i++, argm++) {
-                       memset(&tmp, 0, sizeof(tmp));
-                       if (moxa_ports[i].chkPort) {
-                               tmp.inq = MoxaPortRxQueue(i);
-                               tmp.outq = MoxaPortTxQueue(i);
-                       }
-                       if (copy_to_user(argm, &tmp, sizeof(tmp)))
-                               return -EFAULT;
-               }
-               return (0);
-       } case MOXA_GET_OQUEUE:
-               i = MoxaPortTxQueue(port);
-               return put_user(i, (unsigned long __user *)argp);
-       case MOXA_GET_IQUEUE:
-               i = MoxaPortRxQueue(port);
-               return put_user(i, (unsigned long __user *)argp);
-       case MOXA_GET_MAJOR:
-               if(copy_to_user(argp, &ttymajor, sizeof(int)))
-                       return -EFAULT;
-               return 0;
-       case MOXA_GET_CUMAJOR:
-               i = 0;
-               if(copy_to_user(argp, &i, sizeof(int)))
-                       return -EFAULT;
-               return 0;
-       case MOXA_GETMSTATUS: {
-               struct mxser_mstatus __user *argm = argp;
-               struct mxser_mstatus tmp;
-               struct moxa_port *p;
-
-               for (i = 0; i < MAX_PORTS; i++, argm++) {
-                       p = &moxa_ports[i];
-                       memset(&tmp, 0, sizeof(tmp));
-                       if (!p->chkPort) {
-                               goto copy;
-                       } else {
-                               status = MoxaPortLineStatus(p->port);
-                               if (status & 1)
-                                       tmp.cts = 1;
-                               if (status & 2)
-                                       tmp.dsr = 1;
-                               if (status & 4)
-                                       tmp.dcd = 1;
-                       }
-
-                       if (!p->tty || !p->tty->termios)
-                               tmp.cflag = p->cflag;
-                       else
-                               tmp.cflag = p->tty->termios->c_cflag;
-copy:
-                       if (copy_to_user(argm, &tmp, sizeof(tmp)))
-                               return -EFAULT;
-               }
-               return 0;
-       } default:
-               return (-ENOIOCTLCMD);
-       case MOXA_LOAD_BIOS:
-       case MOXA_FIND_BOARD:
-       case MOXA_LOAD_C320B:
-       case MOXA_LOAD_CODE:
-               if (!capable(CAP_SYS_RAWIO))
-                       return -EPERM;
-               break;
-       }
-
-       if(copy_from_user(&dltmp, argp, sizeof(struct dl_str)))
-               return -EFAULT;
-       if(dltmp.cardno < 0 || dltmp.cardno >= MAX_BOARDS || dltmp.len < 0)
-               return -EINVAL;
-
-       switch(cmd)
-       {
-       case MOXA_LOAD_BIOS:
-               i = moxaloadbios(dltmp.cardno, dltmp.buf, dltmp.len);
-               return (i);
-       case MOXA_FIND_BOARD:
-               return moxafindcard(dltmp.cardno);
-       case MOXA_LOAD_C320B:
-               moxaload320b(dltmp.cardno, dltmp.buf, dltmp.len);
-       default: /* to keep gcc happy */
-               return (0);
-       case MOXA_LOAD_CODE:
-               i = moxaloadcode(dltmp.cardno, dltmp.buf, dltmp.len);
-               if (i == -1)
-                       return (-EFAULT);
-               return (i);
-
-       }
-}
-
-int MoxaDriverPoll(void)
-{
-       struct moxa_board_conf *brd;
-       register ushort temp;
-       register int card;
-       void __iomem *ofsAddr;
-       void __iomem *ip;
-       int port, p, ports;
-
-       if (moxaCard == 0)
-               return (-1);
-       for (card = 0; card < MAX_BOARDS; card++) {
-               brd = &moxa_boards[card];
-               if (brd->loadstat == 0)
-                       continue;
-               if ((ports = brd->numPorts) == 0)
-                       continue;
-               if (readb(brd->intPend) == 0xff) {
-                       ip = brd->intTable + readb(brd->intNdx);
-                       p = card * MAX_PORTS_PER_BOARD;
-                       ports <<= 1;
-                       for (port = 0; port < ports; port += 2, p++) {
-                               if ((temp = readw(ip + port)) != 0) {
-                                       writew(0, ip + port);
-                                       ofsAddr = moxa_ports[p].tableAddr;
-                                       if (temp & IntrTx)
-                                               writew(readw(ofsAddr + HostStat) & ~WakeupTx, ofsAddr + HostStat);
-                                       if (temp & IntrBreak) {
-                                               moxa_ports[p].breakCnt++;
-                                       }
-                                       if (temp & IntrLine) {
-                                               if (readb(ofsAddr + FlagStat) & DCD_state) {
-                                                       if ((moxa_ports[p].DCDState & DCD_oldstate) == 0)
-                                                               moxa_ports[p].DCDState = (DCD_oldstate |
-                                                                                  DCD_changed);
-                                               } else {
-                                                       if (moxa_ports[p].DCDState & DCD_oldstate)
-                                                               moxa_ports[p].DCDState = DCD_changed;
-                                               }
-                                       }
-                               }
-                       }
-                       writeb(0, brd->intPend);
-               }
-               if (moxaLowWaterChk) {
-                       p = card * MAX_PORTS_PER_BOARD;
-                       for (port = 0; port < ports; port++, p++) {
-                               if (moxa_ports[p].lowChkFlag) {
-                                       moxa_ports[p].lowChkFlag = 0;
-                                       ofsAddr = moxa_ports[p].tableAddr;
-                                       moxa_low_water_check(ofsAddr);
-                               }
-                       }
-               }
-       }
-       moxaLowWaterChk = 0;
-       return (0);
-}
-
-/*****************************************************************************
- *     Card level function:                                                 *
- *     1. MoxaPortsOfCard(int cardno);                                      *
- *****************************************************************************/
-int MoxaPortsOfCard(int cardno)
-{
-
-       if (moxa_boards[cardno].boardType == 0)
-               return (0);
-       return (moxa_boards[cardno].numPorts);
-}
-
-/*****************************************************************************
- *     Port level functions:                                                *
- *     1.  MoxaPortIsValid(int port);                                       *
- *     2.  MoxaPortEnable(int port);                                        *
- *     3.  MoxaPortDisable(int port);                                       *
- *     4.  MoxaPortGetMaxBaud(int port);                                    *
- *     6.  MoxaPortSetBaud(int port, long baud);                            *
- *     8.  MoxaPortSetTermio(int port, unsigned char *termio);              *
- *     9.  MoxaPortGetLineOut(int port, int *dtrState, int *rtsState);      *
- *     10. MoxaPortLineCtrl(int port, int dtrState, int rtsState);          *
- *     11. MoxaPortFlowCtrl(int port, int rts, int cts, int rx, int tx,int xany);    *
- *     12. MoxaPortLineStatus(int port);                                    *
- *     13. MoxaPortDCDChange(int port);                                     *
- *     14. MoxaPortDCDON(int port);                                         *
- *     15. MoxaPortFlushData(int port, int mode);                           *
- *     16. MoxaPortWriteData(int port, unsigned char * buffer, int length); *
- *     17. MoxaPortReadData(int port, struct tty_struct *tty);              *
- *     20. MoxaPortTxQueue(int port);                                       *
- *     21. MoxaPortTxFree(int port);                                        *
- *     22. MoxaPortRxQueue(int port);                                       *
- *     24. MoxaPortTxDisable(int port);                                     *
- *     25. MoxaPortTxEnable(int port);                                      *
- *     27. MoxaPortResetBrkCnt(int port);                                   *
- *     30. MoxaPortSendBreak(int port, int ticks);                          *
- *****************************************************************************/
 /*
  *    Moxa Port Number Description:
  *
@@ -1733,33 +1637,6 @@ int MoxaPortsOfCard(int cardno)
  *                      -ENOIOCTLCMD
  *
  *
- *      Function 3:     Moxa driver polling process routine.
- *      Syntax:
- *      int  MoxaDriverPoll(void);
- *
- *           return:    0       ; polling O.K.
- *                      -1      : no any Moxa card.             
- *
- *
- *      Function 4:     Get the ports of this card.
- *      Syntax:
- *      int  MoxaPortsOfCard(int cardno);
- *
- *           int cardno         : card number (0 - 3)
- *
- *           return:    0       : this card is invalid
- *                      8/16/24/32
- *
- *
- *      Function 5:     Check this port is valid or invalid
- *      Syntax:
- *      int  MoxaPortIsValid(int port);
- *           int port           : port number (0 - 127, ref port description)
- *
- *           return:    0       : this port is invalid
- *                      1       : this port is valid
- *
- *
  *      Function 6:     Enable this port to start Tx/Rx data.
  *      Syntax:
  *      void MoxaPortEnable(int port);
@@ -1772,18 +1649,9 @@ int MoxaPortsOfCard(int cardno)
  *           int port           : port number (0 - 127)
  *
  *
- *      Function 8:     Get the maximun available baud rate of this port.
- *      Syntax:
- *      long MoxaPortGetMaxBaud(int port);
- *           int port           : port number (0 - 127)
- *
- *           return:    0       : this port is invalid
- *                      38400/57600/115200 bps
- *
- *
  *      Function 10:    Setting baud rate of this port.
  *      Syntax:
- *      long MoxaPortSetBaud(int port, long baud);
+ *      speed_t MoxaPortSetBaud(int port, speed_t baud);
  *           int port           : port number (0 - 127)
  *           long baud          : baud rate (50 - 115200)
  *
@@ -1850,25 +1718,6 @@ int MoxaPortsOfCard(int cardno)
  *                      Bit 2 - DCD state (0: off, 1: on)
  *
  *
- *      Function 17:    Check the DCD state has changed since the last read
- *                      of this function.
- *      Syntax:
- *      int  MoxaPortDCDChange(int port);
- *           int port           : port number (0 - 127)
- *
- *           return:    0       : no changed
- *                      1       : DCD has changed
- *
- *
- *      Function 18:    Check ths current DCD state is ON or not.
- *      Syntax:
- *      int  MoxaPortDCDON(int port);
- *           int port           : port number (0 - 127)
- *
- *           return:    0       : DCD off
- *                      1       : DCD on
- *
- *
  *      Function 19:    Flush the Rx/Tx buffer data of this port.
  *      Syntax:
  *      void MoxaPortFlushData(int port, int mode);
@@ -1942,40 +1791,20 @@ int MoxaPortsOfCard(int cardno)
  *           return:    0 - ..  : BREAK signal count
  *
  *
- *      Function 34:    Send out a BREAK signal.
- *      Syntax:
- *      void MoxaPortSendBreak(int port, int ms100);
- *           int port           : port number (0 - 127)
- *           int ms100          : break signal time interval.
- *                                unit: 100 mini-second. if ms100 == 0, it will
- *                                send out a about 250 ms BREAK signal.
- *
  */
-int MoxaPortIsValid(int port)
-{
-
-       if (moxaCard == 0)
-               return (0);
-       if (moxa_ports[port].chkPort == 0)
-               return (0);
-       return (1);
-}
 
-void MoxaPortEnable(int port)
+static void MoxaPortEnable(struct moxa_port *port)
 {
        void __iomem *ofsAddr;
-       int MoxaPortLineStatus(int);
-       short lowwater = 512;
+       u16 lowwater = 512;
 
-       ofsAddr = moxa_ports[port].tableAddr;
+       ofsAddr = port->tableAddr;
        writew(lowwater, ofsAddr + Low_water);
-       moxa_ports[port].breakCnt = 0;
-       if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) ||
-           (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) {
+       if (MOXA_IS_320(port->board))
                moxafunc(ofsAddr, FC_SetBreakIrq, 0);
-       } else {
-               writew(readw(ofsAddr + HostStat) | WakeupBreak, ofsAddr + HostStat);
-       }
+       else
+               writew(readw(ofsAddr + HostStat) | WakeupBreak,
+                               ofsAddr + HostStat);
 
        moxafunc(ofsAddr, FC_SetLineIrq, Magic_code);
        moxafunc(ofsAddr, FC_FlushQueue, 2);
@@ -1984,9 +1813,9 @@ void MoxaPortEnable(int port)
        MoxaPortLineStatus(port);
 }
 
-void MoxaPortDisable(int port)
+static void MoxaPortDisable(struct moxa_port *port)
 {
-       void __iomem *ofsAddr = moxa_ports[port].tableAddr;
+       void __iomem *ofsAddr = port->tableAddr;
 
        moxafunc(ofsAddr, FC_SetFlowCtl, 0);    /* disable flow control */
        moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code);
@@ -1994,49 +1823,32 @@ void MoxaPortDisable(int port)
        moxafunc(ofsAddr, FC_DisableCH, Magic_code);
 }
 
-long MoxaPortGetMaxBaud(int port)
-{
-       if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) ||
-           (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI))
-               return (460800L);
-       else
-               return (921600L);
-}
-
-
-long MoxaPortSetBaud(int port, long baud)
+static speed_t MoxaPortSetBaud(struct moxa_port *port, speed_t baud)
 {
-       void __iomem *ofsAddr;
-       long max, clock;
-       unsigned int val;
+       void __iomem *ofsAddr = port->tableAddr;
+       unsigned int clock, val;
+       speed_t max;
 
-       if ((baud < 50L) || ((max = MoxaPortGetMaxBaud(port)) == 0))
-               return (0);
-       ofsAddr = moxa_ports[port].tableAddr;
+       max = MOXA_IS_320(port->board) ? 460800 : 921600;
+       if (baud < 50)
+               return 0;
        if (baud > max)
                baud = max;
-       if (max == 38400L)
-               clock = 614400L;        /* for 9.8304 Mhz : max. 38400 bps */
-       else if (max == 57600L)
-               clock = 691200L;        /* for 11.0592 Mhz : max. 57600 bps */
-       else
-               clock = 921600L;        /* for 14.7456 Mhz : max. 115200 bps */
+       clock = 921600;
        val = clock / baud;
        moxafunc(ofsAddr, FC_SetBaud, val);
        baud = clock / val;
-       moxa_ports[port].curBaud = baud;
-       return (baud);
+       return baud;
 }
 
-int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud)
+static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio,
+               speed_t baud)
 {
        void __iomem *ofsAddr;
        tcflag_t cflag;
        tcflag_t mode = 0;
 
-       if (moxa_ports[port].chkPort == 0 || termio == 0)
-               return (-1);
-       ofsAddr = moxa_ports[port].tableAddr;
+       ofsAddr = port->tableAddr;
        cflag = termio->c_cflag;        /* termio->c_cflag */
 
        mode = termio->c_cflag & CSIZE;
@@ -2065,13 +1877,11 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud)
        } else
                mode |= MX_PARNONE;
 
-       moxafunc(ofsAddr, FC_SetDataMode, (ushort) mode);
+       moxafunc(ofsAddr, FC_SetDataMode, (u16)mode);
+
+       if (MOXA_IS_320(port->board) && baud >= 921600)
+               return -1;
 
-       if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) ||
-           (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) {
-               if (baud >= 921600L)
-                       return (-1);
-       }
        baud = MoxaPortSetBaud(port, baud);
 
        if (termio->c_iflag & (IXON | IXOFF | IXANY)) {
@@ -2081,51 +1891,37 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud)
                moxa_wait_finish(ofsAddr);
 
        }
-       return (baud);
+       return baud;
 }
 
-int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState)
+static int MoxaPortGetLineOut(struct moxa_port *port, int *dtrState,
+               int *rtsState)
 {
+       if (dtrState)
+               *dtrState = !!(port->lineCtrl & DTR_ON);
+       if (rtsState)
+               *rtsState = !!(port->lineCtrl & RTS_ON);
 
-       if (!MoxaPortIsValid(port))
-               return (-1);
-       if (dtrState) {
-               if (moxa_ports[port].lineCtrl & DTR_ON)
-                       *dtrState = 1;
-               else
-                       *dtrState = 0;
-       }
-       if (rtsState) {
-               if (moxa_ports[port].lineCtrl & RTS_ON)
-                       *rtsState = 1;
-               else
-                       *rtsState = 0;
-       }
-       return (0);
+       return 0;
 }
 
-void MoxaPortLineCtrl(int port, int dtr, int rts)
+static void MoxaPortLineCtrl(struct moxa_port *port, int dtr, int rts)
 {
-       void __iomem *ofsAddr;
-       int mode;
+       u8 mode = 0;
 
-       ofsAddr = moxa_ports[port].tableAddr;
-       mode = 0;
        if (dtr)
                mode |= DTR_ON;
        if (rts)
                mode |= RTS_ON;
-       moxa_ports[port].lineCtrl = mode;
-       moxafunc(ofsAddr, FC_LineControl, mode);
+       port->lineCtrl = mode;
+       moxafunc(port->tableAddr, FC_LineControl, mode);
 }
 
-void MoxaPortFlowCtrl(int port, int rts, int cts, int txflow, int rxflow, int txany)
+static void MoxaPortFlowCtrl(struct moxa_port *port, int rts, int cts,
+               int txflow, int rxflow, int txany)
 {
-       void __iomem *ofsAddr;
-       int mode;
+       int mode = 0;
 
-       ofsAddr = moxa_ports[port].tableAddr;
-       mode = 0;
        if (rts)
                mode |= RTS_FlowCtl;
        if (cts)
@@ -2136,81 +1932,50 @@ void MoxaPortFlowCtrl(int port, int rts, int cts, int txflow, int rxflow, int tx
                mode |= Rx_FlowCtl;
        if (txany)
                mode |= IXM_IXANY;
-       moxafunc(ofsAddr, FC_SetFlowCtl, mode);
+       moxafunc(port->tableAddr, FC_SetFlowCtl, mode);
 }
 
-int MoxaPortLineStatus(int port)
+static int MoxaPortLineStatus(struct moxa_port *port)
 {
        void __iomem *ofsAddr;
        int val;
 
-       ofsAddr = moxa_ports[port].tableAddr;
-       if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) ||
-           (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) {
+       ofsAddr = port->tableAddr;
+       if (MOXA_IS_320(port->board)) {
                moxafunc(ofsAddr, FC_LineStatus, 0);
                val = readw(ofsAddr + FuncArg);
        } else {
                val = readw(ofsAddr + FlagStat) >> 4;
        }
        val &= 0x0B;
-       if (val & 8) {
+       if (val & 8)
                val |= 4;
-               if ((moxa_ports[port].DCDState & DCD_oldstate) == 0)
-                       moxa_ports[port].DCDState = (DCD_oldstate | DCD_changed);
-       } else {
-               if (moxa_ports[port].DCDState & DCD_oldstate)
-                       moxa_ports[port].DCDState = DCD_changed;
-       }
+       spin_lock_bh(&moxa_lock);
+       moxa_new_dcdstate(port, val & 8);
+       spin_unlock_bh(&moxa_lock);
        val &= 7;
-       return (val);
-}
-
-int MoxaPortDCDChange(int port)
-{
-       int n;
-
-       if (moxa_ports[port].chkPort == 0)
-               return (0);
-       n = moxa_ports[port].DCDState;
-       moxa_ports[port].DCDState &= ~DCD_changed;
-       n &= DCD_changed;
-       return (n);
-}
-
-int MoxaPortDCDON(int port)
-{
-       int n;
-
-       if (moxa_ports[port].chkPort == 0)
-               return (0);
-       if (moxa_ports[port].DCDState & DCD_oldstate)
-               n = 1;
-       else
-               n = 0;
-       return (n);
+       return val;
 }
 
-int MoxaPortWriteData(int port, unsigned char * buffer, int len)
+static int MoxaPortWriteData(struct moxa_port *port,
+               const unsigned char *buffer, int len)
 {
-       int c, total, i;
-       ushort tail;
-       int cnt;
-       ushort head, tx_mask, spage, epage;
-       ushort pageno, pageofs, bufhead;
        void __iomem *baseAddr, *ofsAddr, *ofs;
+       unsigned int c, total;
+       u16 head, tail, tx_mask, spage, epage;
+       u16 pageno, pageofs, bufhead;
 
-       ofsAddr = moxa_ports[port].tableAddr;
-       baseAddr = moxa_boards[port / MAX_PORTS_PER_BOARD].basemem;
+       ofsAddr = port->tableAddr;
+       baseAddr = port->board->basemem;
        tx_mask = readw(ofsAddr + TX_mask);
        spage = readw(ofsAddr + Page_txb);
        epage = readw(ofsAddr + EndPage_txb);
        tail = readw(ofsAddr + TXwptr);
        head = readw(ofsAddr + TXrptr);
-       c = (head > tail) ? (head - tail - 1)
-           : (head - tail + tx_mask);
+       c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask);
        if (c > len)
                c = len;
-       moxaLog.txcnt[port] += c;
+       moxaLog.txcnt[port->tty->index] += c;
        total = c;
        if (spage == epage) {
                bufhead = readw(ofsAddr + Ofs_txb);
@@ -2222,249 +1987,179 @@ int MoxaPortWriteData(int port, unsigned char * buffer, int len)
                                len = tx_mask + 1 - tail;
                        len = (c > len) ? len : c;
                        ofs = baseAddr + DynPage_addr + bufhead + tail;
-                       for (i = 0; i < len; i++)
-                               writeb(*buffer++, ofs + i);
+                       memcpy_toio(ofs, buffer, len);
+                       buffer += len;
                        tail = (tail + len) & tx_mask;
                        c -= len;
                }
-               writew(tail, ofsAddr + TXwptr);
        } else {
-               len = c;
                pageno = spage + (tail >> 13);
                pageofs = tail & Page_mask;
-               do {
-                       cnt = Page_size - pageofs;
-                       if (cnt > c)
-                               cnt = c;
-                       c -= cnt;
+               while (c > 0) {
+                       len = Page_size - pageofs;
+                       if (len > c)
+                               len = c;
                        writeb(pageno, baseAddr + Control_reg);
                        ofs = baseAddr + DynPage_addr + pageofs;
-                       for (i = 0; i < cnt; i++)
-                               writeb(*buffer++, ofs + i);
-                       if (c == 0) {
-                               writew((tail + len) & tx_mask, ofsAddr + TXwptr);
-                               break;
-                       }
+                       memcpy_toio(ofs, buffer, len);
+                       buffer += len;
                        if (++pageno == epage)
                                pageno = spage;
                        pageofs = 0;
-               } while (1);
+                       c -= len;
+               }
+               tail = (tail + total) & tx_mask;
        }
+       writew(tail, ofsAddr + TXwptr);
        writeb(1, ofsAddr + CD180TXirq);        /* start to send */
-       return (total);
+       return total;
 }
 
-int MoxaPortReadData(int port, struct tty_struct *tty)
+static int MoxaPortReadData(struct moxa_port *port)
 {
-       register ushort head, pageofs;
-       int i, count, cnt, len, total, remain;
-       ushort tail, rx_mask, spage, epage;
-       ushort pageno, bufhead;
+       struct tty_struct *tty = port->tty;
+       unsigned char *dst;
        void __iomem *baseAddr, *ofsAddr, *ofs;
+       unsigned int count, len, total;
+       u16 tail, rx_mask, spage, epage;
+       u16 pageno, pageofs, bufhead, head;
 
-       ofsAddr = moxa_ports[port].tableAddr;
-       baseAddr = moxa_boards[port / MAX_PORTS_PER_BOARD].basemem;
+       ofsAddr = port->tableAddr;
+       baseAddr = port->board->basemem;
        head = readw(ofsAddr + RXrptr);
        tail = readw(ofsAddr + RXwptr);
        rx_mask = readw(ofsAddr + RX_mask);
        spage = readw(ofsAddr + Page_rxb);
        epage = readw(ofsAddr + EndPage_rxb);
-       count = (tail >= head) ? (tail - head)
-           : (tail - head + rx_mask + 1);
+       count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1);
        if (count == 0)
                return 0;
 
        total = count;
-       remain = count - total;
-       moxaLog.rxcnt[port] += total;
-       count = total;
+       moxaLog.rxcnt[tty->index] += total;
        if (spage == epage) {
                bufhead = readw(ofsAddr + Ofs_rxb);
                writew(spage, baseAddr + Control_reg);
                while (count > 0) {
-                       if (tail >= head)
-                               len = tail - head;
-                       else
-                               len = rx_mask + 1 - head;
-                       len = (count > len) ? len : count;
                        ofs = baseAddr + DynPage_addr + bufhead + head;
-                       for (i = 0; i < len; i++)
-                               tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL);
+                       len = (tail >= head) ? (tail - head) :
+                                       (rx_mask + 1 - head);
+                       len = tty_prepare_flip_string(tty, &dst,
+                                       min(len, count));
+                       memcpy_fromio(dst, ofs, len);
                        head = (head + len) & rx_mask;
                        count -= len;
                }
-               writew(head, ofsAddr + RXrptr);
        } else {
-               len = count;
                pageno = spage + (head >> 13);
                pageofs = head & Page_mask;
-               do {
-                       cnt = Page_size - pageofs;
-                       if (cnt > count)
-                               cnt = count;
-                       count -= cnt;
+               while (count > 0) {
                        writew(pageno, baseAddr + Control_reg);
                        ofs = baseAddr + DynPage_addr + pageofs;
-                       for (i = 0; i < cnt; i++)
-                               tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL);
-                       if (count == 0) {
-                               writew((head + len) & rx_mask, ofsAddr + RXrptr);
-                               break;
-                       }
-                       if (++pageno == epage)
+                       len = tty_prepare_flip_string(tty, &dst,
+                                       min(Page_size - pageofs, count));
+                       memcpy_fromio(dst, ofs, len);
+
+                       count -= len;
+                       pageofs = (pageofs + len) & Page_mask;
+                       if (pageofs == 0 && ++pageno == epage)
                                pageno = spage;
-                       pageofs = 0;
-               } while (1);
+               }
+               head = (head + total) & rx_mask;
        }
-       if ((readb(ofsAddr + FlagStat) & Xoff_state) && (remain < LowWater)) {
+       writew(head, ofsAddr + RXrptr);
+       if (readb(ofsAddr + FlagStat) & Xoff_state) {
                moxaLowWaterChk = 1;
-               moxa_ports[port].lowChkFlag = 1;
+               port->lowChkFlag = 1;
        }
-       return (total);
+       return total;
 }
 
 
-int MoxaPortTxQueue(int port)
+static int MoxaPortTxQueue(struct moxa_port *port)
 {
-       void __iomem *ofsAddr;
-       ushort rptr, wptr, mask;
-       int len;
+       void __iomem *ofsAddr = port->tableAddr;
+       u16 rptr, wptr, mask;
 
-       ofsAddr = moxa_ports[port].tableAddr;
        rptr = readw(ofsAddr + TXrptr);
        wptr = readw(ofsAddr + TXwptr);
        mask = readw(ofsAddr + TX_mask);
-       len = (wptr - rptr) & mask;
-       return (len);
+       return (wptr - rptr) & mask;
 }
 
-int MoxaPortTxFree(int port)
+static int MoxaPortTxFree(struct moxa_port *port)
 {
-       void __iomem *ofsAddr;
-       ushort rptr, wptr, mask;
-       int len;
+       void __iomem *ofsAddr = port->tableAddr;
+       u16 rptr, wptr, mask;
 
-       ofsAddr = moxa_ports[port].tableAddr;
        rptr = readw(ofsAddr + TXrptr);
        wptr = readw(ofsAddr + TXwptr);
        mask = readw(ofsAddr + TX_mask);
-       len = mask - ((wptr - rptr) & mask);
-       return (len);
+       return mask - ((wptr - rptr) & mask);
 }
 
-int MoxaPortRxQueue(int port)
+static int MoxaPortRxQueue(struct moxa_port *port)
 {
-       void __iomem *ofsAddr;
-       ushort rptr, wptr, mask;
-       int len;
+       void __iomem *ofsAddr = port->tableAddr;
+       u16 rptr, wptr, mask;
 
-       ofsAddr = moxa_ports[port].tableAddr;
        rptr = readw(ofsAddr + RXrptr);
        wptr = readw(ofsAddr + RXwptr);
        mask = readw(ofsAddr + RX_mask);
-       len = (wptr - rptr) & mask;
-       return (len);
-}
-
-
-void MoxaPortTxDisable(int port)
-{
-       void __iomem *ofsAddr;
-
-       ofsAddr = moxa_ports[port].tableAddr;
-       moxafunc(ofsAddr, FC_SetXoffState, Magic_code);
-}
-
-void MoxaPortTxEnable(int port)
-{
-       void __iomem *ofsAddr;
-
-       ofsAddr = moxa_ports[port].tableAddr;
-       moxafunc(ofsAddr, FC_SetXonState, Magic_code);
+       return (wptr - rptr) & mask;
 }
 
-
-int MoxaPortResetBrkCnt(int port)
+static void MoxaPortTxDisable(struct moxa_port *port)
 {
-       ushort cnt;
-       cnt = moxa_ports[port].breakCnt;
-       moxa_ports[port].breakCnt = 0;
-       return (cnt);
+       moxafunc(port->tableAddr, FC_SetXoffState, Magic_code);
 }
 
-
-void MoxaPortSendBreak(int port, int ms100)
+static void MoxaPortTxEnable(struct moxa_port *port)
 {
-       void __iomem *ofsAddr;
-
-       ofsAddr = moxa_ports[port].tableAddr;
-       if (ms100) {
-               moxafunc(ofsAddr, FC_SendBreak, Magic_code);
-               msleep(ms100 * 10);
-       } else {
-               moxafunc(ofsAddr, FC_SendBreak, Magic_code);
-               msleep(250);
-       }
-       moxafunc(ofsAddr, FC_StopBreak, Magic_code);
+       moxafunc(port->tableAddr, FC_SetXonState, Magic_code);
 }
 
 static int moxa_get_serial_info(struct moxa_port *info,
-                               struct serial_struct __user *retinfo)
+               struct serial_struct __user *retinfo)
 {
-       struct serial_struct tmp;
-
-       memset(&tmp, 0, sizeof(tmp));
-       tmp.type = info->type;
-       tmp.line = info->port;
-       tmp.port = 0;
-       tmp.irq = 0;
-       tmp.flags = info->asyncflags;
-       tmp.baud_base = 921600;
-       tmp.close_delay = info->close_delay;
-       tmp.closing_wait = info->closing_wait;
-       tmp.custom_divisor = 0;
-       tmp.hub6 = 0;
-       if(copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
-               return -EFAULT;
-       return (0);
+       struct serial_struct tmp = {
+               .type = info->type,
+               .line = info->tty->index,
+               .flags = info->asyncflags,
+               .baud_base = 921600,
+               .close_delay = info->close_delay
+       };
+       return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0;
 }
 
 
 static int moxa_set_serial_info(struct moxa_port *info,
-                               struct serial_struct __user *new_info)
+               struct serial_struct __user *new_info)
 {
        struct serial_struct new_serial;
 
-       if(copy_from_user(&new_serial, new_info, sizeof(new_serial)))
+       if (copy_from_user(&new_serial, new_info, sizeof(new_serial)))
                return -EFAULT;
 
-       if ((new_serial.irq != 0) ||
-           (new_serial.port != 0) ||
-//           (new_serial.type != info->type) ||
-           (new_serial.custom_divisor != 0) ||
-           (new_serial.baud_base != 921600))
-               return (-EPERM);
+       if (new_serial.irq != 0 || new_serial.port != 0 ||
+                       new_serial.custom_divisor != 0 ||
+                       new_serial.baud_base != 921600)
+               return -EPERM;
 
        if (!capable(CAP_SYS_ADMIN)) {
                if (((new_serial.flags & ~ASYNC_USR_MASK) !=
                     (info->asyncflags & ~ASYNC_USR_MASK)))
-                       return (-EPERM);
-       } else {
+                       return -EPERM;
+       } else
                info->close_delay = new_serial.close_delay * HZ / 100;
-               info->closing_wait = new_serial.closing_wait * HZ / 100;
-       }
 
        new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS);
        new_serial.flags |= (info->asyncflags & ASYNC_FLAGS);
 
-       if (new_serial.type == PORT_16550A) {
-               MoxaSetFifo(info->port, 1);
-       } else {
-               MoxaSetFifo(info->port, 0);
-       }
+       MoxaSetFifo(info, new_serial.type == PORT_16550A);
 
        info->type = new_serial.type;
-       return (0);
+       return 0;
 }
 
 
@@ -2472,374 +2167,10 @@ static int moxa_set_serial_info(struct moxa_port *info,
 /*****************************************************************************
  *     Static local functions:                                              *
  *****************************************************************************/
-static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg)
-{
-
-       writew(arg, ofsAddr + FuncArg);
-       writew(cmd, ofsAddr + FuncCode);
-       moxa_wait_finish(ofsAddr);
-}
-
-static void moxa_wait_finish(void __iomem *ofsAddr)
-{
-       unsigned long i, j;
-
-       i = jiffies;
-       while (readw(ofsAddr + FuncCode) != 0) {
-               j = jiffies;
-               if ((j - i) > moxaFuncTout) {
-                       return;
-               }
-       }
-}
-
-static void moxa_low_water_check(void __iomem *ofsAddr)
-{
-       int len;
-       ushort rptr, wptr, mask;
-
-       if (readb(ofsAddr + FlagStat) & Xoff_state) {
-               rptr = readw(ofsAddr + RXrptr);
-               wptr = readw(ofsAddr + RXwptr);
-               mask = readw(ofsAddr + RX_mask);
-               len = (wptr - rptr) & mask;
-               if (len <= Low_water)
-                       moxafunc(ofsAddr, FC_SendXon, 0);
-       }
-}
-
-static int moxaloadbios(int cardno, unsigned char __user *tmp, int len)
-{
-       void __iomem *baseAddr;
-       int i;
-
-       if(len < 0 || len > sizeof(moxaBuff))
-               return -EINVAL;
-       if(copy_from_user(moxaBuff, tmp, len))
-               return -EFAULT;
-       baseAddr = moxa_boards[cardno].basemem;
-       writeb(HW_reset, baseAddr + Control_reg);       /* reset */
-       msleep(10);
-       for (i = 0; i < 4096; i++)
-               writeb(0, baseAddr + i);        /* clear fix page */
-       for (i = 0; i < len; i++)
-               writeb(moxaBuff[i], baseAddr + i);      /* download BIOS */
-       writeb(0, baseAddr + Control_reg);      /* restart */
-       return (0);
-}
-
-static int moxafindcard(int cardno)
-{
-       void __iomem *baseAddr;
-       ushort tmp;
-
-       baseAddr = moxa_boards[cardno].basemem;
-       switch (moxa_boards[cardno].boardType) {
-       case MOXA_BOARD_C218_ISA:
-       case MOXA_BOARD_C218_PCI:
-               if ((tmp = readw(baseAddr + C218_key)) != C218_KeyCode) {
-                       return (-1);
-               }
-               break;
-       case MOXA_BOARD_CP204J:
-               if ((tmp = readw(baseAddr + C218_key)) != CP204J_KeyCode) {
-                       return (-1);
-               }
-               break;
-       default:
-               if ((tmp = readw(baseAddr + C320_key)) != C320_KeyCode) {
-                       return (-1);
-               }
-               if ((tmp = readw(baseAddr + C320_status)) != STS_init) {
-                       return (-2);
-               }
-       }
-       return (0);
-}
-
-static int moxaload320b(int cardno, unsigned char __user *tmp, int len)
-{
-       void __iomem *baseAddr;
-       int i;
-
-       if(len < 0 || len > sizeof(moxaBuff))
-               return -EINVAL;
-       if(copy_from_user(moxaBuff, tmp, len))
-               return -EFAULT;
-       baseAddr = moxa_boards[cardno].basemem;
-       writew(len - 7168 - 2, baseAddr + C320bapi_len);
-       writeb(1, baseAddr + Control_reg);      /* Select Page 1 */
-       for (i = 0; i < 7168; i++)
-               writeb(moxaBuff[i], baseAddr + DynPage_addr + i);
-       writeb(2, baseAddr + Control_reg);      /* Select Page 2 */
-       for (i = 0; i < (len - 7168); i++)
-               writeb(moxaBuff[i + 7168], baseAddr + DynPage_addr + i);
-       return (0);
-}
-
-static int moxaloadcode(int cardno, unsigned char __user *tmp, int len)
-{
-       void __iomem *baseAddr, *ofsAddr;
-       int retval, port, i;
-
-       if(len < 0 || len > sizeof(moxaBuff))
-               return -EINVAL;
-       if(copy_from_user(moxaBuff, tmp, len))
-               return -EFAULT;
-       baseAddr = moxa_boards[cardno].basemem;
-       switch (moxa_boards[cardno].boardType) {
-       case MOXA_BOARD_C218_ISA:
-       case MOXA_BOARD_C218_PCI:
-       case MOXA_BOARD_CP204J:
-               retval = moxaloadc218(cardno, baseAddr, len);
-               if (retval)
-                       return (retval);
-               port = cardno * MAX_PORTS_PER_BOARD;
-               for (i = 0; i < moxa_boards[cardno].numPorts; i++, port++) {
-                       struct moxa_port *p = &moxa_ports[port];
-
-                       p->chkPort = 1;
-                       p->curBaud = 9600L;
-                       p->DCDState = 0;
-                       p->tableAddr = baseAddr + Extern_table + Extern_size * i;
-                       ofsAddr = p->tableAddr;
-                       writew(C218rx_mask, ofsAddr + RX_mask);
-                       writew(C218tx_mask, ofsAddr + TX_mask);
-                       writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb);
-                       writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb);
-
-                       writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb);
-                       writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb);
-
-               }
-               break;
-       default:
-               retval = moxaloadc320(cardno, baseAddr, len,
-                                     &moxa_boards[cardno].numPorts);
-               if (retval)
-                       return (retval);
-               port = cardno * MAX_PORTS_PER_BOARD;
-               for (i = 0; i < moxa_boards[cardno].numPorts; i++, port++) {
-                       struct moxa_port *p = &moxa_ports[port];
-
-                       p->chkPort = 1;
-                       p->curBaud = 9600L;
-                       p->DCDState = 0;
-                       p->tableAddr = baseAddr + Extern_table + Extern_size * i;
-                       ofsAddr = p->tableAddr;
-                       if (moxa_boards[cardno].numPorts == 8) {
-                               writew(C320p8rx_mask, ofsAddr + RX_mask);
-                               writew(C320p8tx_mask, ofsAddr + TX_mask);
-                               writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb);
-                               writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb);
-                               writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb);
-                               writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb);
-
-                       } else if (moxa_boards[cardno].numPorts == 16) {
-                               writew(C320p16rx_mask, ofsAddr + RX_mask);
-                               writew(C320p16tx_mask, ofsAddr + TX_mask);
-                               writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb);
-                               writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb);
-                               writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb);
-                               writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb);
-
-                       } else if (moxa_boards[cardno].numPorts == 24) {
-                               writew(C320p24rx_mask, ofsAddr + RX_mask);
-                               writew(C320p24tx_mask, ofsAddr + TX_mask);
-                               writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb);
-                               writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb);
-                               writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb);
-                               writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
-                       } else if (moxa_boards[cardno].numPorts == 32) {
-                               writew(C320p32rx_mask, ofsAddr + RX_mask);
-                               writew(C320p32tx_mask, ofsAddr + TX_mask);
-                               writew(C320p32tx_ofs, ofsAddr + Ofs_txb);
-                               writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb);
-                               writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb);
-                               writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb);
-                               writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb);
-                       }
-               }
-               break;
-       }
-       moxa_boards[cardno].loadstat = 1;
-       return (0);
-}
-
-static int moxaloadc218(int cardno, void __iomem *baseAddr, int len)
-{
-       char retry;
-       int i, j, len1, len2;
-       ushort usum, *ptr, keycode;
-
-       if (moxa_boards[cardno].boardType == MOXA_BOARD_CP204J)
-               keycode = CP204J_KeyCode;
-       else
-               keycode = C218_KeyCode;
-       usum = 0;
-       len1 = len >> 1;
-       ptr = (ushort *) moxaBuff;
-       for (i = 0; i < len1; i++)
-               usum += le16_to_cpu(*(ptr + i));
-       retry = 0;
-       do {
-               len1 = len >> 1;
-               j = 0;
-               while (len1) {
-                       len2 = (len1 > 2048) ? 2048 : len1;
-                       len1 -= len2;
-                       for (i = 0; i < len2 << 1; i++)
-                               writeb(moxaBuff[i + j], baseAddr + C218_LoadBuf + i);
-                       j += i;
-
-                       writew(len2, baseAddr + C218DLoad_len);
-                       writew(0, baseAddr + C218_key);
-                       for (i = 0; i < 100; i++) {
-                               if (readw(baseAddr + C218_key) == keycode)
-                                       break;
-                               msleep(10);
-                       }
-                       if (readw(baseAddr + C218_key) != keycode) {
-                               return (-1);
-                       }
-               }
-               writew(0, baseAddr + C218DLoad_len);
-               writew(usum, baseAddr + C218check_sum);
-               writew(0, baseAddr + C218_key);
-               for (i = 0; i < 100; i++) {
-                       if (readw(baseAddr + C218_key) == keycode)
-                               break;
-                       msleep(10);
-               }
-               retry++;
-       } while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3));
-       if (readb(baseAddr + C218chksum_ok) != 1) {
-               return (-1);
-       }
-       writew(0, baseAddr + C218_key);
-       for (i = 0; i < 100; i++) {
-               if (readw(baseAddr + Magic_no) == Magic_code)
-                       break;
-               msleep(10);
-       }
-       if (readw(baseAddr + Magic_no) != Magic_code) {
-               return (-1);
-       }
-       writew(1, baseAddr + Disable_IRQ);
-       writew(0, baseAddr + Magic_no);
-       for (i = 0; i < 100; i++) {
-               if (readw(baseAddr + Magic_no) == Magic_code)
-                       break;
-               msleep(10);
-       }
-       if (readw(baseAddr + Magic_no) != Magic_code) {
-               return (-1);
-       }
-       moxaCard = 1;
-       moxa_boards[cardno].intNdx = baseAddr + IRQindex;
-       moxa_boards[cardno].intPend = baseAddr + IRQpending;
-       moxa_boards[cardno].intTable = baseAddr + IRQtable;
-       return (0);
-}
-
-static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPorts)
-{
-       ushort usum;
-       int i, j, wlen, len2, retry;
-       ushort *uptr;
-
-       usum = 0;
-       wlen = len >> 1;
-       uptr = (ushort *) moxaBuff;
-       for (i = 0; i < wlen; i++)
-               usum += le16_to_cpu(uptr[i]);
-       retry = 0;
-       j = 0;
-       do {
-               while (wlen) {
-                       if (wlen > 2048)
-                               len2 = 2048;
-                       else
-                               len2 = wlen;
-                       wlen -= len2;
-                       len2 <<= 1;
-                       for (i = 0; i < len2; i++)
-                               writeb(moxaBuff[j + i], baseAddr + C320_LoadBuf + i);
-                       len2 >>= 1;
-                       j += i;
-                       writew(len2, baseAddr + C320DLoad_len);
-                       writew(0, baseAddr + C320_key);
-                       for (i = 0; i < 10; i++) {
-                               if (readw(baseAddr + C320_key) == C320_KeyCode)
-                                       break;
-                               msleep(10);
-                       }
-                       if (readw(baseAddr + C320_key) != C320_KeyCode)
-                               return (-1);
-               }
-               writew(0, baseAddr + C320DLoad_len);
-               writew(usum, baseAddr + C320check_sum);
-               writew(0, baseAddr + C320_key);
-               for (i = 0; i < 10; i++) {
-                       if (readw(baseAddr + C320_key) == C320_KeyCode)
-                               break;
-                       msleep(10);
-               }
-               retry++;
-       } while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3));
-       if (readb(baseAddr + C320chksum_ok) != 1)
-               return (-1);
-       writew(0, baseAddr + C320_key);
-       for (i = 0; i < 600; i++) {
-               if (readw(baseAddr + Magic_no) == Magic_code)
-                       break;
-               msleep(10);
-       }
-       if (readw(baseAddr + Magic_no) != Magic_code)
-               return (-100);
-
-       if (moxa_boards[cardno].busType == MOXA_BUS_TYPE_PCI) {         /* ASIC board */
-               writew(0x3800, baseAddr + TMS320_PORT1);
-               writew(0x3900, baseAddr + TMS320_PORT2);
-               writew(28499, baseAddr + TMS320_CLOCK);
-       } else {
-               writew(0x3200, baseAddr + TMS320_PORT1);
-               writew(0x3400, baseAddr + TMS320_PORT2);
-               writew(19999, baseAddr + TMS320_CLOCK);
-       }
-       writew(1, baseAddr + Disable_IRQ);
-       writew(0, baseAddr + Magic_no);
-       for (i = 0; i < 500; i++) {
-               if (readw(baseAddr + Magic_no) == Magic_code)
-                       break;
-               msleep(10);
-       }
-       if (readw(baseAddr + Magic_no) != Magic_code)
-               return (-102);
-
-       j = readw(baseAddr + Module_cnt);
-       if (j <= 0)
-               return (-101);
-       *numPorts = j * 8;
-       writew(j, baseAddr + Module_no);
-       writew(0, baseAddr + Magic_no);
-       for (i = 0; i < 600; i++) {
-               if (readw(baseAddr + Magic_no) == Magic_code)
-                       break;
-               msleep(10);
-       }
-       if (readw(baseAddr + Magic_no) != Magic_code)
-               return (-102);
-       moxaCard = 1;
-       moxa_boards[cardno].intNdx = baseAddr + IRQindex;
-       moxa_boards[cardno].intPend = baseAddr + IRQpending;
-       moxa_boards[cardno].intTable = baseAddr + IRQtable;
-       return (0);
-}
 
-static void MoxaSetFifo(int port, int enable)
+static void MoxaSetFifo(struct moxa_port *port, int enable)
 {
-       void __iomem *ofsAddr = moxa_ports[port].tableAddr;
+       void __iomem *ofsAddr = port->tableAddr;
 
        if (!enable) {
                moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0);
diff --git a/drivers/char/moxa.h b/drivers/char/moxa.h
new file mode 100644 (file)
index 0000000..87d16ce
--- /dev/null
@@ -0,0 +1,304 @@
+#ifndef MOXA_H_FILE
+#define MOXA_H_FILE
+
+#define        MOXA            0x400
+#define MOXA_GET_IQUEUE        (MOXA + 1)      /* get input buffered count */
+#define MOXA_GET_OQUEUE        (MOXA + 2)      /* get output buffered count */
+#define MOXA_GETDATACOUNT       (MOXA + 23)
+#define MOXA_GET_IOQUEUE       (MOXA + 27)
+#define MOXA_FLUSH_QUEUE       (MOXA + 28)
+#define MOXA_GETMSTATUS         (MOXA + 65)
+
+/*
+ *    System Configuration
+ */
+
+#define Magic_code     0x404
+
+/*
+ *    for C218 BIOS initialization
+ */
+#define C218_ConfBase  0x800
+#define C218_status    (C218_ConfBase + 0)     /* BIOS running status    */
+#define C218_diag      (C218_ConfBase + 2)     /* diagnostic status      */
+#define C218_key       (C218_ConfBase + 4)     /* WORD (0x218 for C218) */
+#define C218DLoad_len  (C218_ConfBase + 6)     /* WORD           */
+#define C218check_sum  (C218_ConfBase + 8)     /* BYTE           */
+#define C218chksum_ok  (C218_ConfBase + 0x0a)  /* BYTE (1:ok)            */
+#define C218_TestRx    (C218_ConfBase + 0x10)  /* 8 bytes for 8 ports    */
+#define C218_TestTx    (C218_ConfBase + 0x18)  /* 8 bytes for 8 ports    */
+#define C218_RXerr     (C218_ConfBase + 0x20)  /* 8 bytes for 8 ports    */
+#define C218_ErrFlag   (C218_ConfBase + 0x28)  /* 8 bytes for 8 ports    */
+
+#define C218_LoadBuf   0x0F00
+#define C218_KeyCode   0x218
+#define CP204J_KeyCode 0x204
+
+/*
+ *    for C320 BIOS initialization
+ */
+#define C320_ConfBase  0x800
+#define C320_LoadBuf   0x0f00
+#define STS_init       0x05    /* for C320_status        */
+
+#define C320_status    C320_ConfBase + 0       /* BIOS running status    */
+#define C320_diag      C320_ConfBase + 2       /* diagnostic status      */
+#define C320_key       C320_ConfBase + 4       /* WORD (0320H for C320) */
+#define C320DLoad_len  C320_ConfBase + 6       /* WORD           */
+#define C320check_sum  C320_ConfBase + 8       /* WORD           */
+#define C320chksum_ok  C320_ConfBase + 0x0a    /* WORD (1:ok)            */
+#define C320bapi_len   C320_ConfBase + 0x0c    /* WORD           */
+#define C320UART_no    C320_ConfBase + 0x0e    /* WORD           */
+
+#define C320_KeyCode   0x320
+
+#define FixPage_addr   0x0000  /* starting addr of static page  */
+#define DynPage_addr   0x2000  /* starting addr of dynamic page */
+#define C218_start     0x3000  /* starting addr of C218 BIOS prg */
+#define Control_reg    0x1ff0  /* select page and reset control */
+#define HW_reset       0x80
+
+/*
+ *    Function Codes
+ */
+#define FC_CardReset   0x80
+#define FC_ChannelReset 1      /* C320 firmware not supported */
+#define FC_EnableCH    2
+#define FC_DisableCH   3
+#define FC_SetParam    4
+#define FC_SetMode     5
+#define FC_SetRate     6
+#define FC_LineControl 7
+#define FC_LineStatus  8
+#define FC_XmitControl 9
+#define FC_FlushQueue  10
+#define FC_SendBreak   11
+#define FC_StopBreak   12
+#define FC_LoopbackON  13
+#define FC_LoopbackOFF 14
+#define FC_ClrIrqTable 15
+#define FC_SendXon     16
+#define FC_SetTermIrq  17      /* C320 firmware not supported */
+#define FC_SetCntIrq   18      /* C320 firmware not supported */
+#define FC_SetBreakIrq 19
+#define FC_SetLineIrq  20
+#define FC_SetFlowCtl  21
+#define FC_GenIrq      22
+#define FC_InCD180     23
+#define FC_OutCD180    24
+#define FC_InUARTreg   23
+#define FC_OutUARTreg  24
+#define FC_SetXonXoff  25
+#define FC_OutCD180CCR 26
+#define FC_ExtIQueue   27
+#define FC_ExtOQueue   28
+#define FC_ClrLineIrq  29
+#define FC_HWFlowCtl   30
+#define FC_GetClockRate 35
+#define FC_SetBaud     36
+#define FC_SetDataMode  41
+#define FC_GetCCSR      43
+#define FC_GetDataError 45
+#define FC_RxControl   50
+#define FC_ImmSend     51
+#define FC_SetXonState 52
+#define FC_SetXoffState        53
+#define FC_SetRxFIFOTrig 54
+#define FC_SetTxFIFOCnt 55
+#define FC_UnixRate    56
+#define FC_UnixResetTimer 57
+
+#define        RxFIFOTrig1     0
+#define        RxFIFOTrig4     1
+#define        RxFIFOTrig8     2
+#define        RxFIFOTrig14    3
+
+/*
+ *    Dual-Ported RAM
+ */
+#define DRAM_global    0
+#define INT_data       (DRAM_global + 0)
+#define Config_base    (DRAM_global + 0x108)
+
+#define IRQindex       (INT_data + 0)
+#define IRQpending     (INT_data + 4)
+#define IRQtable       (INT_data + 8)
+
+/*
+ *    Interrupt Status
+ */
+#define IntrRx         0x01    /* receiver data O.K.             */
+#define IntrTx         0x02    /* transmit buffer empty  */
+#define IntrFunc       0x04    /* function complete              */
+#define IntrBreak      0x08    /* received break         */
+#define IntrLine       0x10    /* line status change
+                                  for transmitter                */
+#define IntrIntr       0x20    /* received INTR code             */
+#define IntrQuit       0x40    /* received QUIT code             */
+#define IntrEOF        0x80    /* received EOF code              */
+
+#define IntrRxTrigger  0x100   /* rx data count reach tigger value */
+#define IntrTxTrigger  0x200   /* tx data count below trigger value */
+
+#define Magic_no       (Config_base + 0)
+#define Card_model_no  (Config_base + 2)
+#define Total_ports    (Config_base + 4)
+#define Module_cnt     (Config_base + 8)
+#define Module_no      (Config_base + 10)
+#define Timer_10ms     (Config_base + 14)
+#define Disable_IRQ    (Config_base + 20)
+#define TMS320_PORT1   (Config_base + 22)
+#define TMS320_PORT2   (Config_base + 24)
+#define TMS320_CLOCK   (Config_base + 26)
+
+/*
+ *    DATA BUFFER in DRAM
+ */
+#define Extern_table   0x400   /* Base address of the external table
+                                  (24 words *    64) total 3K bytes
+                                  (24 words * 128) total 6K bytes */
+#define Extern_size    0x60    /* 96 bytes                       */
+#define RXrptr         0x00    /* read pointer for RX buffer     */
+#define RXwptr         0x02    /* write pointer for RX buffer    */
+#define TXrptr         0x04    /* read pointer for TX buffer     */
+#define TXwptr         0x06    /* write pointer for TX buffer    */
+#define HostStat       0x08    /* IRQ flag and general flag      */
+#define FlagStat       0x0A
+#define FlowControl    0x0C    /* B7 B6 B5 B4 B3 B2 B1 B0              */
+                               /*  x  x  x  x  |  |  |  |            */
+                               /*              |  |  |  + CTS flow   */
+                               /*              |  |  +--- RTS flow   */
+                               /*              |  +------ TX Xon/Xoff */
+                               /*              +--------- RX Xon/Xoff */
+#define Break_cnt      0x0E    /* received break count   */
+#define CD180TXirq     0x10    /* if non-0: enable TX irq        */
+#define RX_mask        0x12
+#define TX_mask        0x14
+#define Ofs_rxb        0x16
+#define Ofs_txb        0x18
+#define Page_rxb       0x1A
+#define Page_txb       0x1C
+#define EndPage_rxb    0x1E
+#define EndPage_txb    0x20
+#define Data_error     0x22
+#define RxTrigger      0x28
+#define TxTrigger      0x2a
+
+#define rRXwptr        0x34
+#define Low_water      0x36
+
+#define FuncCode       0x40
+#define FuncArg        0x42
+#define FuncArg1       0x44
+
+#define C218rx_size    0x2000  /* 8K bytes */
+#define C218tx_size    0x8000  /* 32K bytes */
+
+#define C218rx_mask    (C218rx_size - 1)
+#define C218tx_mask    (C218tx_size - 1)
+
+#define C320p8rx_size  0x2000
+#define C320p8tx_size  0x8000
+#define C320p8rx_mask  (C320p8rx_size - 1)
+#define C320p8tx_mask  (C320p8tx_size - 1)
+
+#define C320p16rx_size 0x2000
+#define C320p16tx_size 0x4000
+#define C320p16rx_mask (C320p16rx_size - 1)
+#define C320p16tx_mask (C320p16tx_size - 1)
+
+#define C320p24rx_size 0x2000
+#define C320p24tx_size 0x2000
+#define C320p24rx_mask (C320p24rx_size - 1)
+#define C320p24tx_mask (C320p24tx_size - 1)
+
+#define C320p32rx_size 0x1000
+#define C320p32tx_size 0x1000
+#define C320p32rx_mask (C320p32rx_size - 1)
+#define C320p32tx_mask (C320p32tx_size - 1)
+
+#define Page_size      0x2000U
+#define Page_mask      (Page_size - 1)
+#define C218rx_spage   3
+#define C218tx_spage   4
+#define C218rx_pageno  1
+#define C218tx_pageno  4
+#define C218buf_pageno 5
+
+#define C320p8rx_spage 3
+#define C320p8tx_spage 4
+#define C320p8rx_pgno  1
+#define C320p8tx_pgno  4
+#define C320p8buf_pgno 5
+
+#define C320p16rx_spage 3
+#define C320p16tx_spage 4
+#define C320p16rx_pgno 1
+#define C320p16tx_pgno 2
+#define C320p16buf_pgno 3
+
+#define C320p24rx_spage 3
+#define C320p24tx_spage 4
+#define C320p24rx_pgno 1
+#define C320p24tx_pgno 1
+#define C320p24buf_pgno 2
+
+#define C320p32rx_spage 3
+#define C320p32tx_ofs  C320p32rx_size
+#define C320p32tx_spage 3
+#define C320p32buf_pgno 1
+
+/*
+ *    Host Status
+ */
+#define WakeupRx       0x01
+#define WakeupTx       0x02
+#define WakeupBreak    0x08
+#define WakeupLine     0x10
+#define WakeupIntr     0x20
+#define WakeupQuit     0x40
+#define WakeupEOF      0x80    /* used in VTIME control */
+#define WakeupRxTrigger        0x100
+#define WakeupTxTrigger        0x200
+/*
+ *    Flag status
+ */
+#define Rx_over                0x01
+#define Xoff_state     0x02
+#define Tx_flowOff     0x04
+#define Tx_enable      0x08
+#define CTS_state      0x10
+#define DSR_state      0x20
+#define DCD_state      0x80
+/*
+ *    FlowControl
+ */
+#define CTS_FlowCtl    1
+#define RTS_FlowCtl    2
+#define Tx_FlowCtl     4
+#define Rx_FlowCtl     8
+#define IXM_IXANY      0x10
+
+#define LowWater       128
+
+#define DTR_ON         1
+#define RTS_ON         2
+#define CTS_ON         1
+#define DSR_ON         2
+#define DCD_ON         8
+
+/* mode definition */
+#define        MX_CS8          0x03
+#define        MX_CS7          0x02
+#define        MX_CS6          0x01
+#define        MX_CS5          0x00
+
+#define        MX_STOP1        0x00
+#define        MX_STOP15       0x04
+#define        MX_STOP2        0x08
+
+#define        MX_PARNONE      0x00
+#define        MX_PAREVEN      0x40
+#define        MX_PARODD       0xC0
+
+#endif
index ff146c2b08fd65cb6e26498923d4df8b03ab95df..fe2a95b5d3c05e36121110de446e7309098d3f6b 100644 (file)
@@ -180,7 +180,7 @@ mspec_close(struct vm_area_struct *vma)
                my_page = vdata->maddr[index];
                vdata->maddr[index] = 0;
                if (!mspec_zero_block(my_page, PAGE_SIZE))
-                       uncached_free_page(my_page);
+                       uncached_free_page(my_page, 1);
                else
                        printk(KERN_WARNING "mspec_close(): "
                               "failed to zero page %ld\n", my_page);
@@ -209,7 +209,7 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
        index = (address - vdata->vm_start) >> PAGE_SHIFT;
        maddr = (volatile unsigned long) vdata->maddr[index];
        if (maddr == 0) {
-               maddr = uncached_alloc_page(numa_node_id());
+               maddr = uncached_alloc_page(numa_node_id(), 1);
                if (maddr == 0)
                        return NOPFN_OOM;
 
@@ -218,7 +218,7 @@ mspec_nopfn(struct vm_area_struct *vma, unsigned long address)
                        vdata->count++;
                        vdata->maddr[index] = maddr;
                } else {
-                       uncached_free_page(maddr);
+                       uncached_free_page(maddr, 1);
                        maddr = vdata->maddr[index];
                }
                spin_unlock(&vdata->lock);
@@ -367,7 +367,7 @@ mspec_init(void)
                                int nasid;
                                unsigned long phys;
 
-                               scratch_page[nid] = uncached_alloc_page(nid);
+                               scratch_page[nid] = uncached_alloc_page(nid, 1);
                                if (scratch_page[nid] == 0)
                                        goto free_scratch_pages;
                                phys = __pa(scratch_page[nid]);
@@ -414,7 +414,7 @@ mspec_init(void)
  free_scratch_pages:
        for_each_node(nid) {
                if (scratch_page[nid] != 0)
-                       uncached_free_page(scratch_page[nid]);
+                       uncached_free_page(scratch_page[nid], 1);
        }
        return ret;
 }
@@ -431,7 +431,7 @@ mspec_exit(void)
 
                for_each_node(nid) {
                        if (scratch_page[nid] != 0)
-                               uncached_free_page(scratch_page[nid]);
+                               uncached_free_page(scratch_page[nid], 1);
                }
        }
 }
index 68c2e923469195295ed74c028eb360e2588e9a19..4b81a85c5b53d1002252514b9e95c3cc60c0bb43 100644 (file)
@@ -307,6 +307,200 @@ static unsigned char mxser_msr[MXSER_PORTS + 1];
 static struct mxser_mon_ext mon_data_ext;
 static int mxser_set_baud_method[MXSER_PORTS + 1];
 
+static void mxser_enable_must_enchance_mode(unsigned long baseio)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr |= MOXA_MUST_EFR_EFRB_ENABLE;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_disable_must_enchance_mode(unsigned long baseio)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_EFRB_ENABLE;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_set_must_xon1_value(unsigned long baseio, u8 value)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_BANK_MASK;
+       efr |= MOXA_MUST_EFR_BANK0;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(value, baseio + MOXA_MUST_XON1_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_set_must_xoff1_value(unsigned long baseio, u8 value)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_BANK_MASK;
+       efr |= MOXA_MUST_EFR_BANK0;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(value, baseio + MOXA_MUST_XOFF1_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_set_must_fifo_value(struct mxser_port *info)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(info->ioaddr + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, info->ioaddr + UART_LCR);
+
+       efr = inb(info->ioaddr + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_BANK_MASK;
+       efr |= MOXA_MUST_EFR_BANK1;
+
+       outb(efr, info->ioaddr + MOXA_MUST_EFR_REGISTER);
+       outb((u8)info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER);
+       outb((u8)info->rx_trigger, info->ioaddr + MOXA_MUST_RBRTI_REGISTER);
+       outb((u8)info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER);
+       outb(oldlcr, info->ioaddr + UART_LCR);
+}
+
+static void mxser_set_must_enum_value(unsigned long baseio, u8 value)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_BANK_MASK;
+       efr |= MOXA_MUST_EFR_BANK2;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(value, baseio + MOXA_MUST_ENUM_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_BANK_MASK;
+       efr |= MOXA_MUST_EFR_BANK2;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       *pId = inb(baseio + MOXA_MUST_HWID_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_SF_MASK;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_enable_must_tx_software_flow_control(unsigned long baseio)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
+       efr |= MOXA_MUST_EFR_SF_TX1;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_disable_must_tx_software_flow_control(unsigned long baseio)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_SF_TX_MASK;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_enable_must_rx_software_flow_control(unsigned long baseio)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
+       efr |= MOXA_MUST_EFR_SF_RX1;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
+static void mxser_disable_must_rx_software_flow_control(unsigned long baseio)
+{
+       u8 oldlcr;
+       u8 efr;
+
+       oldlcr = inb(baseio + UART_LCR);
+       outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR);
+
+       efr = inb(baseio + MOXA_MUST_EFR_REGISTER);
+       efr &= ~MOXA_MUST_EFR_SF_RX_MASK;
+
+       outb(efr, baseio + MOXA_MUST_EFR_REGISTER);
+       outb(oldlcr, baseio + UART_LCR);
+}
+
 #ifdef CONFIG_PCI
 static int __devinit CheckIsMoxaMust(unsigned long io)
 {
@@ -314,16 +508,16 @@ static int __devinit CheckIsMoxaMust(unsigned long io)
        int i;
 
        outb(0, io + UART_LCR);
-       DISABLE_MOXA_MUST_ENCHANCE_MODE(io);
+       mxser_disable_must_enchance_mode(io);
        oldmcr = inb(io + UART_MCR);
        outb(0, io + UART_MCR);
-       SET_MOXA_MUST_XON1_VALUE(io, 0x11);
+       mxser_set_must_xon1_value(io, 0x11);
        if ((hwid = inb(io + UART_MCR)) != 0) {
                outb(oldmcr, io + UART_MCR);
                return MOXA_OTHER_UART;
        }
 
-       GET_MOXA_MUST_HARDWARE_ID(io, &hwid);
+       mxser_get_must_hardware_id(io, &hwid);
        for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */
                if (hwid == Gpci_uart_info[i].type)
                        return (int)hwid;
@@ -494,10 +688,10 @@ static int mxser_set_baud(struct mxser_port *info, long newspd)
                } else
                        quot /= newspd;
 
-               SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, quot);
+               mxser_set_must_enum_value(info->ioaddr, quot);
        } else
 #endif
-               SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, 0);
+               mxser_set_must_enum_value(info->ioaddr, 0);
 
        return 0;
 }
@@ -553,14 +747,14 @@ static int mxser_change_speed(struct mxser_port *info,
                if (info->board->chip_flag) {
                        fcr = UART_FCR_ENABLE_FIFO;
                        fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
-                       SET_MOXA_MUST_FIFO_VALUE(info);
+                       mxser_set_must_fifo_value(info);
                } else
                        fcr = 0;
        } else {
                fcr = UART_FCR_ENABLE_FIFO;
                if (info->board->chip_flag) {
                        fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE;
-                       SET_MOXA_MUST_FIFO_VALUE(info);
+                       mxser_set_must_fifo_value(info);
                } else {
                        switch (info->rx_trigger) {
                        case 1:
@@ -657,17 +851,21 @@ static int mxser_change_speed(struct mxser_port *info,
                }
        }
        if (info->board->chip_flag) {
-               SET_MOXA_MUST_XON1_VALUE(info->ioaddr, START_CHAR(info->tty));
-               SET_MOXA_MUST_XOFF1_VALUE(info->ioaddr, STOP_CHAR(info->tty));
+               mxser_set_must_xon1_value(info->ioaddr, START_CHAR(info->tty));
+               mxser_set_must_xoff1_value(info->ioaddr, STOP_CHAR(info->tty));
                if (I_IXON(info->tty)) {
-                       ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+                       mxser_enable_must_rx_software_flow_control(
+                                       info->ioaddr);
                } else {
-                       DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+                       mxser_disable_must_rx_software_flow_control(
+                                       info->ioaddr);
                }
                if (I_IXOFF(info->tty)) {
-                       ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+                       mxser_enable_must_tx_software_flow_control(
+                                       info->ioaddr);
                } else {
-                       DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+                       mxser_disable_must_tx_software_flow_control(
+                                       info->ioaddr);
                }
        }
 
@@ -927,6 +1125,27 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
        return 0;
 }
 
+static void mxser_flush_buffer(struct tty_struct *tty)
+{
+       struct mxser_port *info = tty->driver_data;
+       char fcr;
+       unsigned long flags;
+
+
+       spin_lock_irqsave(&info->slock, flags);
+       info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
+
+       fcr = inb(info->ioaddr + UART_FCR);
+       outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
+               info->ioaddr + UART_FCR);
+       outb(fcr, info->ioaddr + UART_FCR);
+
+       spin_unlock_irqrestore(&info->slock, flags);
+
+       tty_wakeup(tty);
+}
+
+
 /*
  * This routine is called when the serial port gets closed.  First, we
  * wait for the last remaining data to be sent.  Then, we unlink its
@@ -1013,9 +1232,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp)
        }
        mxser_shutdown(info);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
-
+       mxser_flush_buffer(tty);
        tty_ldisc_flush(tty);
 
        tty->closing = 0;
@@ -1072,16 +1289,16 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou
        return total;
 }
 
-static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
+static int mxser_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct mxser_port *info = tty->driver_data;
        unsigned long flags;
 
        if (!info->xmit_buf)
-               return;
+               return 0;
 
        if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
-               return;
+               return 0;
 
        spin_lock_irqsave(&info->slock, flags);
        info->xmit_buf[info->xmit_head++] = ch;
@@ -1099,6 +1316,7 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch)
                        spin_unlock_irqrestore(&info->slock, flags);
                }
        }
+       return 1;
 }
 
 
@@ -1142,26 +1360,6 @@ static int mxser_chars_in_buffer(struct tty_struct *tty)
        return info->xmit_cnt;
 }
 
-static void mxser_flush_buffer(struct tty_struct *tty)
-{
-       struct mxser_port *info = tty->driver_data;
-       char fcr;
-       unsigned long flags;
-
-
-       spin_lock_irqsave(&info->slock, flags);
-       info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
-
-       fcr = inb(info->ioaddr + UART_FCR);
-       outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT),
-               info->ioaddr + UART_FCR);
-       outb(fcr, info->ioaddr + UART_FCR);
-
-       spin_unlock_irqrestore(&info->slock, flags);
-
-       tty_wakeup(tty);
-}
-
 /*
  * ------------------------------------------------------------
  * friends of mxser_ioctl()
@@ -1460,6 +1658,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
        struct mxser_port *port;
        int result, status;
        unsigned int i, j;
+       int ret = 0;
 
        switch (cmd) {
        case MOXA_GET_MAJOR:
@@ -1467,18 +1666,21 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
 
        case MOXA_CHKPORTENABLE:
                result = 0;
-
+               lock_kernel();
                for (i = 0; i < MXSER_BOARDS; i++)
                        for (j = 0; j < MXSER_PORTS_PER_BOARD; j++)
                                if (mxser_boards[i].ports[j].ioaddr)
                                        result |= (1 << i);
-
+               unlock_kernel();
                return put_user(result, (unsigned long __user *)argp);
        case MOXA_GETDATACOUNT:
+               lock_kernel();
                if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log)))
-                       return -EFAULT;
-               return 0;
+                       ret = -EFAULT;
+               unlock_kernel();
+               return ret;
        case MOXA_GETMSTATUS:
+               lock_kernel();
                for (i = 0; i < MXSER_BOARDS; i++)
                        for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
                                port = &mxser_boards[i].ports[j];
@@ -1515,6 +1717,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
                                else
                                        GMStatus[i].cts = 0;
                        }
+               unlock_kernel();
                if (copy_to_user(argp, GMStatus,
                                sizeof(struct mxser_mstatus) * MXSER_PORTS))
                        return -EFAULT;
@@ -1524,7 +1727,8 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
                unsigned long opmode;
                unsigned cflag, iflag;
 
-               for (i = 0; i < MXSER_BOARDS; i++)
+               lock_kernel();
+               for (i = 0; i < MXSER_BOARDS; i++) {
                        for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) {
                                port = &mxser_boards[i].ports[j];
                                if (!port->ioaddr)
@@ -1589,13 +1793,14 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
                                mon_data_ext.iftype[i] = opmode;
 
                        }
-                       if (copy_to_user(argp, &mon_data_ext,
-                                               sizeof(mon_data_ext)))
-                               return -EFAULT;
-
-                       return 0;
-
-       } default:
+               }
+               unlock_kernel();
+               if (copy_to_user(argp, &mon_data_ext,
+                                       sizeof(mon_data_ext)))
+                       return -EFAULT;
+               return 0;
+       }
+       default:
                return -ENOIOCTLCMD;
        }
        return 0;
@@ -1651,16 +1856,20 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
                                        opmode != RS422_MODE &&
                                        opmode != RS485_4WIRE_MODE)
                                return -EFAULT;
+                       lock_kernel();
                        mask = ModeMask[p];
                        shiftbit = p * 2;
                        val = inb(info->opmode_ioaddr);
                        val &= mask;
                        val |= (opmode << shiftbit);
                        outb(val, info->opmode_ioaddr);
+                       unlock_kernel();
                } else {
+                       lock_kernel();
                        shiftbit = p * 2;
                        opmode = inb(info->opmode_ioaddr) >> shiftbit;
                        opmode &= OP_MODE_MASK;
+                       unlock_kernel();
                        if (put_user(opmode, (int __user *)argp))
                                return -EFAULT;
                }
@@ -1687,19 +1896,18 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
                tty_wait_until_sent(tty, 0);
                mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4);
                return 0;
-       case TIOCGSOFTCAR:
-               return put_user(!!C_CLOCAL(tty), (unsigned long __user *)argp);
-       case TIOCSSOFTCAR:
-               if (get_user(arg, (unsigned long __user *)argp))
-                       return -EFAULT;
-               tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0));
-               return 0;
        case TIOCGSERIAL:
-               return mxser_get_serial_info(info, argp);
+               lock_kernel();
+               retval = mxser_get_serial_info(info, argp);
+               unlock_kernel();
+               return retval;
        case TIOCSSERIAL:
-               return mxser_set_serial_info(info, argp);
+               lock_kernel();
+               retval = mxser_set_serial_info(info, argp);
+               unlock_kernel();
+               return retval;
        case TIOCSERGETLSR:     /* Get line status register */
-               return mxser_get_lsr_info(info, argp);
+               return  mxser_get_lsr_info(info, argp);
                /*
                 * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
                 * - mask passed in arg for lines of interest
@@ -1746,24 +1954,27 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
        case MOXA_HighSpeedOn:
                return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp);
        case MOXA_SDS_RSTICOUNTER:
+               lock_kernel();
                info->mon_data.rxcnt = 0;
                info->mon_data.txcnt = 0;
+               unlock_kernel();
                return 0;
 
        case MOXA_ASPP_OQUEUE:{
                int len, lsr;
 
+               lock_kernel();
                len = mxser_chars_in_buffer(tty);
-
                lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT;
-
                len += (lsr ? 0 : 1);
+               unlock_kernel();
 
                return put_user(len, (int __user *)argp);
        }
        case MOXA_ASPP_MON: {
                int mcr, status;
 
+               lock_kernel();
                status = mxser_get_msr(info->ioaddr, 1, tty->index);
                mxser_check_modem_status(info, status);
 
@@ -1782,7 +1993,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file,
                        info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD;
                else
                        info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD;
-
+               unlock_kernel();
                if (copy_to_user(argp, &info->mon_data,
                                sizeof(struct mxser_mon)))
                        return -EFAULT;
@@ -1925,7 +2136,8 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi
 
                if (info->board->chip_flag) {
                        spin_lock_irqsave(&info->slock, flags);
-                       DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr);
+                       mxser_disable_must_rx_software_flow_control(
+                                       info->ioaddr);
                        spin_unlock_irqrestore(&info->slock, flags);
                }
 
@@ -1979,6 +2191,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
                timeout, char_time);
        printk("jiff=%lu...", jiffies);
 #endif
+       lock_kernel();
        while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) {
 #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
                printk("lsr = %d (jiff=%lu)...", lsr, jiffies);
@@ -1990,6 +2203,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout)
                        break;
        }
        set_current_state(TASK_RUNNING);
+       unlock_kernel();
 
 #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
        printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
@@ -2342,7 +2556,7 @@ static int __devinit mxser_initbrd(struct mxser_board *brd,
 
                /* Enhance mode enabled here */
                if (brd->chip_flag != MOXA_OTHER_UART)
-                       ENABLE_MOXA_MUST_ENCHANCE_MODE(info->ioaddr);
+                       mxser_enable_must_enchance_mode(info->ioaddr);
 
                info->flags = ASYNC_SHARE_IRQ;
                info->type = brd->uart_type;
index 844171115954701dd6055fd6d5be9906843486fb..41878a69203d72ae1d3b52830868ff561f1ca83e 100644 (file)
 /* Rx software flow control mask */
 #define MOXA_MUST_EFR_SF_RX_MASK       0x03
 
-#define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do {            \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr |= MOXA_MUST_EFR_EFRB_ENABLE;                     \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do {           \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE;                    \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define SET_MOXA_MUST_XON1_VALUE(baseio, Value) do {           \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_BANK_MASK;                      \
-       __efr |= MOXA_MUST_EFR_BANK0;                           \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb((u8)(Value), (baseio)+MOXA_MUST_XON1_REGISTER);    \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) do {          \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_BANK_MASK;                      \
-       __efr |= MOXA_MUST_EFR_BANK0;                           \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb((u8)(Value), (baseio)+MOXA_MUST_XOFF1_REGISTER);   \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define SET_MOXA_MUST_FIFO_VALUE(info) do {                    \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((info)->ioaddr+UART_LCR);                \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (info)->ioaddr+UART_LCR);\
-       __efr = inb((info)->ioaddr+MOXA_MUST_EFR_REGISTER);     \
-       __efr &= ~MOXA_MUST_EFR_BANK_MASK;                      \
-       __efr |= MOXA_MUST_EFR_BANK1;                           \
-       outb(__efr, (info)->ioaddr+MOXA_MUST_EFR_REGISTER);     \
-       outb((u8)((info)->rx_high_water), (info)->ioaddr+       \
-                       MOXA_MUST_RBRTH_REGISTER);              \
-       outb((u8)((info)->rx_trigger), (info)->ioaddr+          \
-                       MOXA_MUST_RBRTI_REGISTER);              \
-       outb((u8)((info)->rx_low_water), (info)->ioaddr+        \
-                       MOXA_MUST_RBRTL_REGISTER);              \
-       outb(__oldlcr, (info)->ioaddr+UART_LCR);                \
-} while (0)
-
-#define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) do {           \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_BANK_MASK;                      \
-       __efr |= MOXA_MUST_EFR_BANK2;                           \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb((u8)(Value), (baseio)+MOXA_MUST_ENUM_REGISTER);    \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) do {            \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_BANK_MASK;                      \
-       __efr |= MOXA_MUST_EFR_BANK2;                           \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) do {    \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_SF_MASK;                        \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_SF_TX_MASK;                     \
-       __efr |= MOXA_MUST_EFR_SF_TX1;                          \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do {        \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_SF_TX_MASK;                     \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_SF_RX_MASK;                     \
-       __efr |= MOXA_MUST_EFR_SF_RX1;                          \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
-#define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do {        \
-       u8      __oldlcr, __efr;                                \
-       __oldlcr = inb((baseio)+UART_LCR);                      \
-       outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR);      \
-       __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER);           \
-       __efr &= ~MOXA_MUST_EFR_SF_RX_MASK;                     \
-       outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER);           \
-       outb(__oldlcr, (baseio)+UART_LCR);                      \
-} while (0)
-
 #endif
index 82bcfb9c839adb62a4cca24c10c55c20c8c72cf4..a35bfd7ee80ecca131d37e8ea0798390ed5896d6 100644 (file)
@@ -342,12 +342,10 @@ static int n_hdlc_tty_open (struct tty_struct *tty)
 #endif
        
        /* Flush any pending characters in the driver and discipline. */
-       
        if (tty->ldisc.flush_buffer)
-               tty->ldisc.flush_buffer (tty);
+               tty->ldisc.flush_buffer(tty);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer (tty);
+       tty_driver_flush_buffer(tty);
                
        if (debuglevel >= DEBUG_LEVEL_INFO)     
                printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__);
@@ -399,7 +397,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
                        
                /* Send the next block of data to device */
                tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
-               actual = tty->driver->write(tty, tbuf->buf, tbuf->count);
+               actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
 
                /* rollback was possible and has been done */
                if (actual == -ERESTARTSYS) {
@@ -501,7 +499,7 @@ static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
                        __FILE__,__LINE__, count);
                
        /* This can happen if stuff comes in on the backup tty */
-       if (n_hdlc == 0 || tty != n_hdlc->tty)
+       if (!n_hdlc || tty != n_hdlc->tty)
                return;
                
        /* verify line is using HDLC discipline */
@@ -578,26 +576,36 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
                return -EFAULT;
        }
 
+       lock_kernel();
+
        for (;;) {
-               if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+               if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+                       unlock_kernel();
                        return -EIO;
+               }
 
                n_hdlc = tty2n_hdlc (tty);
                if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC ||
-                        tty != n_hdlc->tty)
+                        tty != n_hdlc->tty) {
+                       unlock_kernel();
                        return 0;
+               }
 
                rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list);
                if (rbuf)
                        break;
                        
                /* no data */
-               if (file->f_flags & O_NONBLOCK)
+               if (file->f_flags & O_NONBLOCK) {
+                       unlock_kernel();
                        return -EAGAIN;
+               }
                        
                interruptible_sleep_on (&tty->read_wait);
-               if (signal_pending(current))
+               if (signal_pending(current)) {
+                       unlock_kernel();
                        return -EINTR;
+               }
        }
                
        if (rbuf->count > nr)
@@ -618,7 +626,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
                kfree(rbuf);
        else    
                n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf);
-       
+       unlock_kernel();
        return ret;
        
 }      /* end of n_hdlc_tty_read() */
@@ -661,6 +669,8 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
                count = maxframe;
        }
        
+       lock_kernel();
+
        add_wait_queue(&tty->write_wait, &wait);
        set_current_state(TASK_INTERRUPTIBLE);
        
@@ -695,7 +705,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file,
                n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf);
                n_hdlc_send_frames(n_hdlc,tty);
        }
-
+       unlock_kernel();
        return error;
        
 }      /* end of n_hdlc_tty_write() */
@@ -740,8 +750,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file,
 
        case TIOCOUTQ:
                /* get the pending tx byte count in the driver */
-               count = tty->driver->chars_in_buffer ?
-                               tty->driver->chars_in_buffer(tty) : 0;
+               count = tty_chars_in_buffer(tty);
                /* add size of next output frame in queue */
                spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags);
                if (n_hdlc->tx_buf_list.head)
index 6b918b80f73e6af785eeb34fdefa27e75ca32780..902169062332483a8449a4f68b8c69756600b544 100644 (file)
@@ -376,8 +376,9 @@ static void put_char(struct r3964_info *pInfo, unsigned char ch)
        if (tty == NULL)
                return;
 
-       if (tty->driver->put_char) {
-               tty->driver->put_char(tty, ch);
+       /* FIXME: put_char should not be called from an IRQ */
+       if (tty->ops->put_char) {
+               tty->ops->put_char(tty, ch);
        }
        pInfo->bcc ^= ch;
 }
@@ -386,12 +387,9 @@ static void flush(struct r3964_info *pInfo)
 {
        struct tty_struct *tty = pInfo->tty;
 
-       if (tty == NULL)
+       if (tty == NULL || tty->ops->flush_chars == NULL)
                return;
-
-       if (tty->driver->flush_chars) {
-               tty->driver->flush_chars(tty);
-       }
+       tty->ops->flush_chars(tty);
 }
 
 static void trigger_transmit(struct r3964_info *pInfo)
@@ -449,12 +447,11 @@ static void transmit_block(struct r3964_info *pInfo)
        struct r3964_block_header *pBlock = pInfo->tx_first;
        int room = 0;
 
-       if ((tty == NULL) || (pBlock == NULL)) {
+       if (tty == NULL || pBlock == NULL) {
                return;
        }
 
-       if (tty->driver->write_room)
-               room = tty->driver->write_room(tty);
+       room = tty_write_room(tty);
 
        TRACE_PS("transmit_block %p, room %d, length %d",
                 pBlock, room, pBlock->length);
@@ -1075,12 +1072,15 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
 
        TRACE_L("read()");
 
+       lock_kernel();
+
        pClient = findClient(pInfo, task_pid(current));
        if (pClient) {
                pMsg = remove_msg(pInfo, pClient);
                if (pMsg == NULL) {
                        /* no messages available. */
                        if (file->f_flags & O_NONBLOCK) {
+                               unlock_kernel();
                                return -EAGAIN;
                        }
                        /* block until there is a message: */
@@ -1090,8 +1090,10 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
 
                /* If we still haven't got a message, we must have been signalled */
 
-               if (!pMsg)
+               if (!pMsg) {
+                       unlock_kernel();
                        return -EINTR;
+               }
 
                /* deliver msg to client process: */
                theMsg.msg_id = pMsg->msg_id;
@@ -1102,12 +1104,15 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
                kfree(pMsg);
                TRACE_M("r3964_read - msg kfree %p", pMsg);
 
-               if (copy_to_user(buf, &theMsg, count))
+               if (copy_to_user(buf, &theMsg, count)) {
+                       unlock_kernel();
                        return -EFAULT;
+               }
 
                TRACE_PS("read - return %d", count);
                return count;
        }
+       unlock_kernel();
        return -EPERM;
 }
 
@@ -1156,6 +1161,8 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
        pHeader->locks = 0;
        pHeader->owner = NULL;
 
+       lock_kernel();
+
        pClient = findClient(pInfo, task_pid(current));
        if (pClient) {
                pHeader->owner = pClient;
@@ -1173,6 +1180,8 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
        add_tx_queue(pInfo, pHeader);
        trigger_transmit(pInfo);
 
+       unlock_kernel();
+
        return 0;
 }
 
index 0c09409fa45d4dc5f13a212c0211bab5fb307ae7..19105ec203f7afe611edfdc10c99bf50741ccecc 100644 (file)
@@ -147,10 +147,8 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty)
 
 static void check_unthrottle(struct tty_struct *tty)
 {
-       if (tty->count &&
-           test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
-           tty->driver->unthrottle)
-               tty->driver->unthrottle(tty);
+       if (tty->count)
+               tty_unthrottle(tty);
 }
 
 /**
@@ -183,22 +181,24 @@ static void reset_buffer_flags(struct tty_struct *tty)
  *     at hangup) or when the N_TTY line discipline internally has to
  *     clean the pending queue (for example some signals).
  *
- *     FIXME: tty->ctrl_status is not spinlocked and relies on
- *     lock_kernel() still.
+ *     Locking: ctrl_lock
  */
 
 static void n_tty_flush_buffer(struct tty_struct *tty)
 {
+       unsigned long flags;
        /* clear everything and unthrottle the driver */
        reset_buffer_flags(tty);
 
        if (!tty->link)
                return;
 
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
        if (tty->link->packet) {
                tty->ctrl_status |= TIOCPKT_FLUSHREAD;
                wake_up_interruptible(&tty->link->read_wait);
        }
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
 }
 
 /**
@@ -264,17 +264,18 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty)
  *     relevant in the world today. If you ever need them, add them here.
  *
  *     Called from both the receive and transmit sides and can be called
- *     re-entrantly. Relies on lock_kernel() still.
+ *     re-entrantly. Relies on lock_kernel() for tty->column state.
  */
 
 static int opost(unsigned char c, struct tty_struct *tty)
 {
        int     space, spaces;
 
-       space = tty->driver->write_room(tty);
+       space = tty_write_room(tty);
        if (!space)
                return -1;
 
+       lock_kernel();
        if (O_OPOST(tty)) {
                switch (c) {
                case '\n':
@@ -283,7 +284,7 @@ static int opost(unsigned char c, struct tty_struct *tty)
                        if (O_ONLCR(tty)) {
                                if (space < 2)
                                        return -1;
-                               tty->driver->put_char(tty, '\r');
+                               tty_put_char(tty, '\r');
                                tty->column = 0;
                        }
                        tty->canon_column = tty->column;
@@ -305,7 +306,7 @@ static int opost(unsigned char c, struct tty_struct *tty)
                                if (space < spaces)
                                        return -1;
                                tty->column += spaces;
-                               tty->driver->write(tty, "        ", spaces);
+                               tty->ops->write(tty, "        ", spaces);
                                return 0;
                        }
                        tty->column += spaces;
@@ -322,7 +323,8 @@ static int opost(unsigned char c, struct tty_struct *tty)
                        break;
                }
        }
-       tty->driver->put_char(tty, c);
+       tty_put_char(tty, c);
+       unlock_kernel();
        return 0;
 }
 
@@ -337,7 +339,8 @@ static int opost(unsigned char c, struct tty_struct *tty)
  *     the simple cases normally found and helps to generate blocks of
  *     symbols for the console driver and thus improve performance.
  *
- *     Called from write_chan under the tty layer write lock.
+ *     Called from write_chan under the tty layer write lock. Relies
+ *     on lock_kernel for the tty->column state.
  */
 
 static ssize_t opost_block(struct tty_struct *tty,
@@ -347,12 +350,13 @@ static ssize_t opost_block(struct tty_struct *tty,
        int     i;
        const unsigned char *cp;
 
-       space = tty->driver->write_room(tty);
+       space = tty_write_room(tty);
        if (!space)
                return 0;
        if (nr > space)
                nr = space;
 
+       lock_kernel();
        for (i = 0, cp = buf; i < nr; i++, cp++) {
                switch (*cp) {
                case '\n':
@@ -384,26 +388,14 @@ static ssize_t opost_block(struct tty_struct *tty,
                }
        }
 break_out:
-       if (tty->driver->flush_chars)
-               tty->driver->flush_chars(tty);
-       i = tty->driver->write(tty, buf, i);
+       if (tty->ops->flush_chars)
+               tty->ops->flush_chars(tty);
+       i = tty->ops->write(tty, buf, i);
+       unlock_kernel();
        return i;
 }
 
 
-/**
- *     put_char        -       write character to driver
- *     @c: character (or part of unicode symbol)
- *     @tty: terminal device
- *
- *     Queue a byte to the driver layer for output
- */
-
-static inline void put_char(unsigned char c, struct tty_struct *tty)
-{
-       tty->driver->put_char(tty, c);
-}
-
 /**
  *     echo_char       -       echo characters
  *     @c: unicode byte to echo
@@ -416,8 +408,8 @@ static inline void put_char(unsigned char c, struct tty_struct *tty)
 static void echo_char(unsigned char c, struct tty_struct *tty)
 {
        if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') {
-               put_char('^', tty);
-               put_char(c ^ 0100, tty);
+               tty_put_char(tty, '^');
+               tty_put_char(tty, c ^ 0100);
                tty->column += 2;
        } else
                opost(c, tty);
@@ -426,7 +418,7 @@ static void echo_char(unsigned char c, struct tty_struct *tty)
 static inline void finish_erasing(struct tty_struct *tty)
 {
        if (tty->erasing) {
-               put_char('/', tty);
+               tty_put_char(tty, '/');
                tty->column++;
                tty->erasing = 0;
        }
@@ -510,7 +502,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
                if (L_ECHO(tty)) {
                        if (L_ECHOPRT(tty)) {
                                if (!tty->erasing) {
-                                       put_char('\\', tty);
+                                       tty_put_char(tty, '\\');
                                        tty->column++;
                                        tty->erasing = 1;
                                }
@@ -518,7 +510,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
                                echo_char(c, tty);
                                while (--cnt > 0) {
                                        head = (head+1) & (N_TTY_BUF_SIZE-1);
-                                       put_char(tty->read_buf[head], tty);
+                                       tty_put_char(tty, tty->read_buf[head]);
                                }
                        } else if (kill_type == ERASE && !L_ECHOE(tty)) {
                                echo_char(ERASE_CHAR(tty), tty);
@@ -546,22 +538,22 @@ static void eraser(unsigned char c, struct tty_struct *tty)
                                /* Now backup to that column. */
                                while (tty->column > col) {
                                        /* Can't use opost here. */
-                                       put_char('\b', tty);
+                                       tty_put_char(tty, '\b');
                                        if (tty->column > 0)
                                                tty->column--;
                                }
                        } else {
                                if (iscntrl(c) && L_ECHOCTL(tty)) {
-                                       put_char('\b', tty);
-                                       put_char(' ', tty);
-                                       put_char('\b', tty);
+                                       tty_put_char(tty, '\b');
+                                       tty_put_char(tty, ' ');
+                                       tty_put_char(tty, '\b');
                                        if (tty->column > 0)
                                                tty->column--;
                                }
                                if (!iscntrl(c) || L_ECHOCTL(tty)) {
-                                       put_char('\b', tty);
-                                       put_char(' ', tty);
-                                       put_char('\b', tty);
+                                       tty_put_char(tty, '\b');
+                                       tty_put_char(tty, ' ');
+                                       tty_put_char(tty, '\b');
                                        if (tty->column > 0)
                                                tty->column--;
                                }
@@ -592,8 +584,7 @@ static inline void isig(int sig, struct tty_struct *tty, int flush)
                kill_pgrp(tty->pgrp, sig, 1);
        if (flush || !L_NOFLSH(tty)) {
                n_tty_flush_buffer(tty);
-               if (tty->driver->flush_buffer)
-                       tty->driver->flush_buffer(tty);
+               tty_driver_flush_buffer(tty);
        }
 }
 
@@ -701,7 +692,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
 
        if (tty->stopped && !tty->flow_stopped && I_IXON(tty) &&
            ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) ||
-            c == INTR_CHAR(tty) || c == QUIT_CHAR(tty)))
+            c == INTR_CHAR(tty) || c == QUIT_CHAR(tty) || c == SUSP_CHAR(tty)))
                start_tty(tty);
 
        if (tty->closing) {
@@ -725,7 +716,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
                tty->lnext = 0;
                if (L_ECHO(tty)) {
                        if (tty->read_cnt >= N_TTY_BUF_SIZE-1) {
-                               put_char('\a', tty); /* beep if no space */
+                               tty_put_char(tty, '\a'); /* beep if no space */
                                return;
                        }
                        /* Record the column of first canon char. */
@@ -739,13 +730,6 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
                return;
        }
 
-       if (c == '\r') {
-               if (I_IGNCR(tty))
-                       return;
-               if (I_ICRNL(tty))
-                       c = '\n';
-       } else if (c == '\n' && I_INLCR(tty))
-               c = '\r';
        if (I_IXON(tty)) {
                if (c == START_CHAR(tty)) {
                        start_tty(tty);
@@ -756,6 +740,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c)
                        return;
                }
        }
+
        if (L_ISIG(tty)) {
                int signal;
                signal = SIGINT;
@@ -775,8 +760,7 @@ send_signal:
                         */
                        if (!L_NOFLSH(tty)) {
                                n_tty_flush_buffer(tty);
-                               if (tty->driver->flush_buffer)
-                                       tty->driver->flush_buffer(tty);
+                               tty_driver_flush_buffer(tty);
                        }
                        if (L_ECHO(tty))
                                echo_char(c, tty);
@@ -785,6 +769,15 @@ send_signal:
                        return;
                }
        }
+
+       if (c == '\r') {
+               if (I_IGNCR(tty))
+                       return;
+               if (I_ICRNL(tty))
+                       c = '\n';
+       } else if (c == '\n' && I_INLCR(tty))
+               c = '\r';
+
        if (tty->icanon) {
                if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) ||
                    (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) {
@@ -796,8 +789,8 @@ send_signal:
                        if (L_ECHO(tty)) {
                                finish_erasing(tty);
                                if (L_ECHOCTL(tty)) {
-                                       put_char('^', tty);
-                                       put_char('\b', tty);
+                                       tty_put_char(tty, '^');
+                                       tty_put_char(tty, '\b');
                                }
                        }
                        return;
@@ -818,7 +811,7 @@ send_signal:
                if (c == '\n') {
                        if (L_ECHO(tty) || L_ECHONL(tty)) {
                                if (tty->read_cnt >= N_TTY_BUF_SIZE-1)
-                                       put_char('\a', tty);
+                                       tty_put_char(tty, '\a');
                                opost('\n', tty);
                        }
                        goto handle_newline;
@@ -836,7 +829,7 @@ send_signal:
                         */
                        if (L_ECHO(tty)) {
                                if (tty->read_cnt >= N_TTY_BUF_SIZE-1)
-                                       put_char('\a', tty);
+                                       tty_put_char(tty, '\a');
                                /* Record the column of first canon char. */
                                if (tty->canon_head == tty->read_head)
                                        tty->canon_column = tty->column;
@@ -866,7 +859,7 @@ handle_newline:
        finish_erasing(tty);
        if (L_ECHO(tty)) {
                if (tty->read_cnt >= N_TTY_BUF_SIZE-1) {
-                       put_char('\a', tty); /* beep if no space */
+                       tty_put_char(tty, '\a'); /* beep if no space */
                        return;
                }
                if (c == '\n')
@@ -970,8 +963,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
                                break;
                        }
                }
-               if (tty->driver->flush_chars)
-                       tty->driver->flush_chars(tty);
+               if (tty->ops->flush_chars)
+                       tty->ops->flush_chars(tty);
        }
 
        n_tty_set_room(tty);
@@ -987,12 +980,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
         * mode.  We don't want to throttle the driver if we're in
         * canonical mode and don't have a newline yet!
         */
-       if (tty->receive_room < TTY_THRESHOLD_THROTTLE) {
-               /* check TTY_THROTTLED first so it indicates our state */
-               if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
-                   tty->driver->throttle)
-                       tty->driver->throttle(tty);
-       }
+       if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
+               tty_throttle(tty);
 }
 
 int is_ignored(int sig)
@@ -1076,6 +1065,9 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
                        tty->real_raw = 0;
        }
        n_tty_set_room(tty);
+       /* The termios change make the tty ready for I/O */
+       wake_up_interruptible(&tty->write_wait);
+       wake_up_interruptible(&tty->read_wait);
 }
 
 /**
@@ -1194,6 +1186,11 @@ extern ssize_t redirected_tty_write(struct file *, const char __user *,
  *     Perform job control management checks on this file/tty descriptor
  *     and if appropriate send any needed signals and return a negative
  *     error code if action should be taken.
+ *
+ *     FIXME:
+ *     Locking: None - redirected write test is safe, testing
+ *     current->signal should possibly lock current->sighand
+ *     pgrp locking ?
  */
 
 static int job_control(struct tty_struct *tty, struct file *file)
@@ -1246,6 +1243,7 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file,
        ssize_t size;
        long timeout;
        unsigned long flags;
+       int packet;
 
 do_it_again:
 
@@ -1289,16 +1287,19 @@ do_it_again:
                if (mutex_lock_interruptible(&tty->atomic_read_lock))
                        return -ERESTARTSYS;
        }
+       packet = tty->packet;
 
        add_wait_queue(&tty->read_wait, &wait);
        while (nr) {
                /* First test for status change. */
-               if (tty->packet && tty->link->ctrl_status) {
+               if (packet && tty->link->ctrl_status) {
                        unsigned char cs;
                        if (b != buf)
                                break;
+                       spin_lock_irqsave(&tty->link->ctrl_lock, flags);
                        cs = tty->link->ctrl_status;
                        tty->link->ctrl_status = 0;
+                       spin_unlock_irqrestore(&tty->link->ctrl_lock, flags);
                        if (tty_put_user(tty, cs, b++)) {
                                retval = -EFAULT;
                                b--;
@@ -1333,6 +1334,7 @@ do_it_again:
                                retval = -ERESTARTSYS;
                                break;
                        }
+                       /* FIXME: does n_tty_set_room need locking ? */
                        n_tty_set_room(tty);
                        timeout = schedule_timeout(timeout);
                        continue;
@@ -1340,7 +1342,7 @@ do_it_again:
                __set_current_state(TASK_RUNNING);
 
                /* Deal with packet mode. */
-               if (tty->packet && b == buf) {
+               if (packet && b == buf) {
                        if (tty_put_user(tty, TIOCPKT_DATA, b++)) {
                                retval = -EFAULT;
                                b--;
@@ -1388,6 +1390,8 @@ do_it_again:
                                break;
                } else {
                        int uncopied;
+                       /* The copy function takes the read lock and handles
+                          locking internally for this case */
                        uncopied = copy_from_read_buf(tty, &b, &nr);
                        uncopied += copy_from_read_buf(tty, &b, &nr);
                        if (uncopied) {
@@ -1429,7 +1433,6 @@ do_it_again:
                 goto do_it_again;
 
        n_tty_set_room(tty);
-
        return retval;
 }
 
@@ -1492,11 +1495,11 @@ static ssize_t write_chan(struct tty_struct *tty, struct file *file,
                                        break;
                                b++; nr--;
                        }
-                       if (tty->driver->flush_chars)
-                               tty->driver->flush_chars(tty);
+                       if (tty->ops->flush_chars)
+                               tty->ops->flush_chars(tty);
                } else {
                        while (nr > 0) {
-                               c = tty->driver->write(tty, b, nr);
+                               c = tty->ops->write(tty, b, nr);
                                if (c < 0) {
                                        retval = c;
                                        goto break_out;
@@ -1533,11 +1536,6 @@ break_out:
  *
  *     This code must be sure never to sleep through a hangup.
  *     Called without the kernel lock held - fine
- *
- *     FIXME: if someone changes the VMIN or discipline settings for the
- *     terminal while another process is in poll() the poll does not
- *     recompute the new limits. Possibly set_termios should issue
- *     a read wakeup to fix this bug.
  */
 
 static unsigned int normal_poll(struct tty_struct *tty, struct file *file,
@@ -1561,9 +1559,9 @@ static unsigned int normal_poll(struct tty_struct *tty, struct file *file,
                else
                        tty->minimum_to_wake = 1;
        }
-       if (!tty_is_writelocked(tty) &&
-                       tty->driver->chars_in_buffer(tty) < WAKEUP_CHARS &&
-                       tty->driver->write_room(tty) > 0)
+       if (tty->ops->write && !tty_is_writelocked(tty) &&
+                       tty_chars_in_buffer(tty) < WAKEUP_CHARS &&
+                       tty_write_room(tty) > 0)
                mask |= POLLOUT | POLLWRNORM;
        return mask;
 }
index 6a6843a0a6742ba15b44cbf47fe93856ed71b26f..66a0f931c66ca91bf06b56e0b789edb8a0d68204 100644 (file)
@@ -73,7 +73,7 @@ do {                                                          \
        char tmp[P_BUF_SIZE];                                   \
        snprintf(tmp, sizeof(tmp), ##args);                     \
        printk(_err_flag_ "[%d] %s(): %s\n", __LINE__,          \
-               __FUNCTION__, tmp);                             \
+               __func__, tmp);                         \
 } while (0)
 
 #define DBG1(args...) D_(0x01, ##args)
@@ -1407,7 +1407,7 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
        /* Find out what card type it is */
        nozomi_get_card_type(dc);
 
-       dc->base_addr = ioremap(start, dc->card_type);
+       dc->base_addr = ioremap_nocache(start, dc->card_type);
        if (!dc->base_addr) {
                dev_err(&pdev->dev, "Unable to map card MMIO\n");
                ret = -ENODEV;
@@ -1724,6 +1724,8 @@ static int ntty_tiocmget(struct tty_struct *tty, struct file *file)
        const struct ctrl_dl *ctrl_dl = &port->ctrl_dl;
        const struct ctrl_ul *ctrl_ul = &port->ctrl_ul;
 
+       /* Note: these could change under us but it is not clear this
+          matters if so */
        return  (ctrl_ul->RTS ? TIOCM_RTS : 0) |
                (ctrl_ul->DTR ? TIOCM_DTR : 0) |
                (ctrl_dl->DCD ? TIOCM_CAR : 0) |
@@ -1849,16 +1851,6 @@ static void ntty_throttle(struct tty_struct *tty)
        spin_unlock_irqrestore(&dc->spin_mutex, flags);
 }
 
-/* just to discard single character writes */
-static void ntty_put_char(struct tty_struct *tty, unsigned char c)
-{
-       /*
-        * card does not react correct when we write single chars
-        * to the card, so we discard them
-        */
-       DBG2("PUT CHAR Function: %c", c);
-}
-
 /* Returns number of chars in buffer, called by tty layer */
 static s32 ntty_chars_in_buffer(struct tty_struct *tty)
 {
@@ -1892,7 +1884,6 @@ static const struct tty_operations tty_ops = {
        .unthrottle = ntty_unthrottle,
        .throttle = ntty_throttle,
        .chars_in_buffer = ntty_chars_in_buffer,
-       .put_char = ntty_put_char,
        .tiocmget = ntty_tiocmget,
        .tiocmset = ntty_tiocmset,
 };
index 454d7324ba40b19ef3ce1396b3f09dfb0aeaff9e..4a933d4134231cd095a859f286a0d8c7e08e434e 100644 (file)
@@ -53,7 +53,7 @@ module_param(pc_debug, int, 0600);
 #define DEBUGP(n, rdr, x, args...) do {                                \
        if (pc_debug >= (n))                                            \
                dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x,     \
-                          __FUNCTION__ , ## args);                     \
+                          __func__ , ## args);                 \
        } while (0)
 #else
 #define DEBUGP(n, rdr, x, args...)
index 5f291bf739a627901dbad1fbebfe970311f538a3..035084c07329cd87bfa42da3a70e6fe624b3c201 100644 (file)
@@ -47,7 +47,7 @@ module_param(pc_debug, int, 0600);
 #define DEBUGP(n, rdr, x, args...) do {                                \
        if (pc_debug >= (n))                                            \
                dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x,     \
-                          __FUNCTION__ , ##args);                      \
+                          __func__ , ##args);                  \
        } while (0)
 #else
 #define DEBUGP(n, rdr, x, args...)
index 1f978ff87fa8a2ddeece885d0773de41f071558f..fa9d3c945f311a779702cdcb33dcdf63e155deef 100644 (file)
@@ -354,32 +354,6 @@ struct ipw_rx_packet {
        unsigned int channel_idx;
 };
 
-#ifdef IPWIRELESS_STATE_DEBUG
-int ipwireless_dump_hardware_state(char *p, size_t limit,
-                                  struct ipw_hardware *hw)
-{
-       return snprintf(p, limit,
-                       "debug: initializing=%d\n"
-                       "debug: tx_ready=%d\n"
-                       "debug: tx_queued=%d\n"
-                       "debug: rx_ready=%d\n"
-                       "debug: rx_bytes_queued=%d\n"
-                       "debug: blocking_rx=%d\n"
-                       "debug: removed=%d\n"
-                       "debug: hardware.shutting_down=%d\n"
-                       "debug: to_setup=%d\n",
-                       hw->initializing,
-                       hw->tx_ready,
-                       hw->tx_queued,
-                       hw->rx_ready,
-                       hw->rx_bytes_queued,
-                       hw->blocking_rx,
-                       hw->removed,
-                       hw->shutting_down,
-                       hw->to_setup);
-}
-#endif
-
 static char *data_type(const unsigned char *buf, unsigned length)
 {
        struct nl_packet_header *hdr = (struct nl_packet_header *) buf;
index c83190ffb0e74d7642d25f14bbac2141c12d5523..19ce5eb266b19fcc5150c1b0d821547a493eda3d 100644 (file)
@@ -58,7 +58,5 @@ void ipwireless_init_hardware_v1(struct ipw_hardware *hw,
                                 void *reboot_cb_data);
 void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw);
 void ipwireless_sleep(unsigned int tenths);
-int ipwireless_dump_hardware_state(char *p, size_t limit,
-                                  struct ipw_hardware *hw);
 
 #endif
index d793e68b3e0d913030402978e46a05eedd8abf40..fe914d34f7f6f63e10e452c775d364b1d2ba1930 100644 (file)
@@ -63,21 +63,6 @@ struct ipw_network {
        struct work_struct work_go_offline;
 };
 
-
-#ifdef IPWIRELESS_STATE_DEBUG
-int ipwireless_dump_network_state(char *p, size_t limit,
-                                 struct ipw_network *network)
-{
-       return snprintf(p, limit,
-                       "debug: ppp_blocked=%d\n"
-                       "debug: outgoing_packets_queued=%d\n"
-                       "debug: network.shutting_down=%d\n",
-                       network->ppp_blocked,
-                       network->outgoing_packets_queued,
-                       network->shutting_down);
-}
-#endif
-
 static void notify_packet_sent(void *callback_data, unsigned int packet_length)
 {
        struct ipw_network *network = callback_data;
index b0e1e952fd14c7c155b7d66e28ccf02665093c63..ccacd26fc7efdf3c9836e80aa3a536190359a809 100644 (file)
@@ -49,7 +49,4 @@ void ipwireless_ppp_close(struct ipw_network *net);
 int ipwireless_ppp_channel_index(struct ipw_network *net);
 int ipwireless_ppp_unit_number(struct ipw_network *net);
 
-int ipwireless_dump_network_state(char *p, size_t limit,
-                                 struct ipw_network *net);
-
 #endif
index 4e84d233e5a295b8cb5c0e43b051e6133197efbe..1dd0e992c83d9ceddc777b3d6b6cc892e25044f8 100644 (file)
@@ -189,20 +189,20 @@ typedef struct _mgslpc_info {
 
        u32 pending_bh;
 
-       int bh_running;
-       int bh_requested;
+       bool bh_running;
+       bool bh_requested;
 
        int dcd_chkcount; /* check counts to prevent */
        int cts_chkcount; /* too many IRQs if a signal */
        int dsr_chkcount; /* is floating */
        int ri_chkcount;
 
-       int rx_enabled;
-       int rx_overflow;
+       bool rx_enabled;
+       bool rx_overflow;
 
-       int tx_enabled;
-       int tx_active;
-       int tx_aborting;
+       bool tx_enabled;
+       bool tx_active;
+       bool tx_aborting;
        u32 idle_mode;
 
        int if_mode; /* serial interface selection (RS-232, v.35 etc) */
@@ -216,12 +216,12 @@ typedef struct _mgslpc_info {
 
        unsigned char serial_signals;   /* current serial signal states */
 
-       char irq_occurred;              /* for diagnostics use */
+       bool irq_occurred;              /* for diagnostics use */
        char testing_irq;
        unsigned int init_error;        /* startup error (DIAGS)        */
 
        char flag_buf[MAX_ASYNC_BUFFER_SIZE];
-       BOOLEAN drop_rts_on_tx_done;
+       bool drop_rts_on_tx_done;
 
        struct  _input_signal_events    input_signal_events;
 
@@ -402,8 +402,8 @@ static void hdlcdev_exit(MGSLPC_INFO *info);
 
 static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit);
 
-static BOOLEAN register_test(MGSLPC_INFO *info);
-static BOOLEAN irq_test(MGSLPC_INFO *info);
+static bool register_test(MGSLPC_INFO *info);
+static bool irq_test(MGSLPC_INFO *info);
 static int adapter_test(MGSLPC_INFO *info);
 
 static int claim_resources(MGSLPC_INFO *info);
@@ -411,7 +411,7 @@ static void release_resources(MGSLPC_INFO *info);
 static void mgslpc_add_device(MGSLPC_INFO *info);
 static void mgslpc_remove_device(MGSLPC_INFO *info);
 
-static int  rx_get_frame(MGSLPC_INFO *info);
+static bool rx_get_frame(MGSLPC_INFO *info);
 static void rx_reset_buffers(MGSLPC_INFO *info);
 static int  rx_alloc_buffers(MGSLPC_INFO *info);
 static void rx_free_buffers(MGSLPC_INFO *info);
@@ -503,20 +503,9 @@ static void* mgslpc_get_text_ptr(void)
  * The wrappers maintain line discipline references
  * while calling into the line discipline.
  *
- * ldisc_flush_buffer - flush line discipline receive buffers
  * ldisc_receive_buf  - pass receive data to line discipline
  */
 
-static void ldisc_flush_buffer(struct tty_struct *tty)
-{
-       struct tty_ldisc *ld = tty_ldisc_ref(tty);
-       if (ld) {
-               if (ld->flush_buffer)
-                       ld->flush_buffer(tty);
-               tty_ldisc_deref(ld);
-       }
-}
-
 static void ldisc_receive_buf(struct tty_struct *tty,
                              const __u8 *data, char *flags, int count)
 {
@@ -719,7 +708,7 @@ static int mgslpc_resume(struct pcmcia_device *link)
 }
 
 
-static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
+static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info,
                                        char *name, const char *routine)
 {
 #ifdef MGSLPC_PARANOIA_CHECK
@@ -730,17 +719,17 @@ static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
 
        if (!info) {
                printk(badinfo, name, routine);
-               return 1;
+               return true;
        }
        if (info->magic != MGSLPC_MAGIC) {
                printk(badmagic, name, routine);
-               return 1;
+               return true;
        }
 #else
        if (!info)
-               return 1;
+               return true;
 #endif
-       return 0;
+       return false;
 }
 
 
@@ -752,16 +741,16 @@ static inline int mgslpc_paranoia_check(MGSLPC_INFO *info,
 #define CMD_TXEOM       BIT1   // transmit end message
 #define CMD_TXRESET     BIT0   // transmit reset
 
-static BOOLEAN wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
+static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel)
 {
        int i = 0;
        /* wait for command completion */
        while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) {
                udelay(1);
                if (i++ == 1000)
-                       return FALSE;
+                       return false;
        }
-       return TRUE;
+       return true;
 }
 
 static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd)
@@ -825,8 +814,8 @@ static int bh_action(MGSLPC_INFO *info)
 
        if (!rc) {
                /* Mark BH routine as complete */
-               info->bh_running   = 0;
-               info->bh_requested = 0;
+               info->bh_running = false;
+               info->bh_requested = false;
        }
 
        spin_unlock_irqrestore(&info->lock,flags);
@@ -846,7 +835,7 @@ static void bh_handler(struct work_struct *work)
                printk( "%s(%d):bh_handler(%s) entry\n",
                        __FILE__,__LINE__,info->device_name);
 
-       info->bh_running = 1;
+       info->bh_running = true;
 
        while((action = bh_action(info)) != 0) {
 
@@ -913,7 +902,7 @@ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom)
                /* no more free buffers */
                issue_command(info, CHA, CMD_RXRESET);
                info->pending_bh |= BH_RECEIVE;
-               info->rx_overflow = 1;
+               info->rx_overflow = true;
                info->icount.buf_overrun++;
                return;
        }
@@ -1032,8 +1021,8 @@ static void tx_done(MGSLPC_INFO *info)
        if (!info->tx_active)
                return;
 
-       info->tx_active = 0;
-       info->tx_aborting = 0;
+       info->tx_active = false;
+       info->tx_aborting = false;
 
        if (info->params.mode == MGSL_MODE_ASYNC)
                return;
@@ -1047,7 +1036,7 @@ static void tx_done(MGSLPC_INFO *info)
                        info->serial_signals &= ~SerialSignal_RTS;
                        set_signals(info);
                }
-               info->drop_rts_on_tx_done = 0;
+               info->drop_rts_on_tx_done = false;
        }
 
 #if SYNCLINK_GENERIC_HDLC
@@ -1081,7 +1070,7 @@ static void tx_ready(MGSLPC_INFO *info)
                        return;
                }
                if (!info->tx_count)
-                       info->tx_active = 0;
+                       info->tx_active = false;
        }
 
        if (!info->tx_count)
@@ -1261,7 +1250,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
                {
                        isr = read_reg16(info, CHA + ISR);
                        if (isr & IRQ_TIMER) {
-                               info->irq_occurred = 1;
+                               info->irq_occurred = true;
                                irq_disable(info, CHA, IRQ_TIMER);
                        }
 
@@ -1318,7 +1307,7 @@ static irqreturn_t mgslpc_isr(int dummy, void *dev_id)
                        printk("%s(%d):%s queueing bh task.\n",
                                __FILE__,__LINE__,info->device_name);
                schedule_work(&info->task);
-               info->bh_requested = 1;
+               info->bh_requested = true;
        }
 
        spin_unlock(&info->lock);
@@ -1556,7 +1545,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info)
 
 /* Add a character to the transmit buffer
  */
-static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
+static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
 {
        MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data;
        unsigned long flags;
@@ -1567,10 +1556,10 @@ static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
        }
 
        if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char"))
-               return;
+               return 0;
 
        if (!info->tx_buf)
-               return;
+               return 0;
 
        spin_lock_irqsave(&info->lock,flags);
 
@@ -1583,6 +1572,7 @@ static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch)
        }
 
        spin_unlock_irqrestore(&info->lock,flags);
+       return 1;
 }
 
 /* Enable transmitter so remaining characters in the
@@ -1990,7 +1980,7 @@ static int tx_abort(MGSLPC_INFO * info)
                 * This results in underrun and abort transmission.
                 */
                info->tx_count = info->tx_put = info->tx_get = 0;
-               info->tx_aborting = TRUE;
+               info->tx_aborting = true;
        }
        spin_unlock_irqrestore(&info->lock,flags);
        return 0;
@@ -2467,10 +2457,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
        if (info->flags & ASYNC_INITIALIZED)
                mgslpc_wait_until_sent(tty, info->timeout);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       mgslpc_flush_buffer(tty);
 
-       ldisc_flush_buffer(tty);
+       tty_ldisc_flush(tty);
 
        shutdown(info);
 
@@ -2589,7 +2578,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 {
        DECLARE_WAITQUEUE(wait, current);
        int             retval;
-       int             do_clocal = 0, extra_count = 0;
+       bool            do_clocal = false;
+       bool            extra_count = false;
        unsigned long   flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
@@ -2604,7 +2594,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
        }
 
        if (tty->termios->c_cflag & CLOCAL)
-               do_clocal = 1;
+               do_clocal = true;
 
        /* Wait for carrier detect and the line to become
         * free (i.e., not in use by the callout).  While we are in
@@ -2622,7 +2612,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 
        spin_lock_irqsave(&info->lock, flags);
        if (!tty_hung_up_p(filp)) {
-               extra_count = 1;
+               extra_count = true;
                info->count--;
        }
        spin_unlock_irqrestore(&info->lock, flags);
@@ -3493,8 +3483,8 @@ static void rx_stop(MGSLPC_INFO *info)
        /* MODE:03 RAC Receiver Active, 0=inactive */
        clear_reg_bits(info, CHA + MODE, BIT3);
 
-       info->rx_enabled = 0;
-       info->rx_overflow = 0;
+       info->rx_enabled = false;
+       info->rx_overflow = false;
 }
 
 static void rx_start(MGSLPC_INFO *info)
@@ -3504,13 +3494,13 @@ static void rx_start(MGSLPC_INFO *info)
                         __FILE__,__LINE__, info->device_name );
 
        rx_reset_buffers(info);
-       info->rx_enabled = 0;
-       info->rx_overflow = 0;
+       info->rx_enabled = false;
+       info->rx_overflow = false;
 
        /* MODE:03 RAC Receiver Active, 1=active */
        set_reg_bits(info, CHA + MODE, BIT3);
 
-       info->rx_enabled = 1;
+       info->rx_enabled = true;
 }
 
 static void tx_start(MGSLPC_INFO *info)
@@ -3523,24 +3513,24 @@ static void tx_start(MGSLPC_INFO *info)
                /* If auto RTS enabled and RTS is inactive, then assert */
                /* RTS and set a flag indicating that the driver should */
                /* negate RTS when the transmission completes. */
-               info->drop_rts_on_tx_done = 0;
+               info->drop_rts_on_tx_done = false;
 
                if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
                        get_signals(info);
                        if (!(info->serial_signals & SerialSignal_RTS)) {
                                info->serial_signals |= SerialSignal_RTS;
                                set_signals(info);
-                               info->drop_rts_on_tx_done = 1;
+                               info->drop_rts_on_tx_done = true;
                        }
                }
 
                if (info->params.mode == MGSL_MODE_ASYNC) {
                        if (!info->tx_active) {
-                               info->tx_active = 1;
+                               info->tx_active = true;
                                tx_ready(info);
                        }
                } else {
-                       info->tx_active = 1;
+                       info->tx_active = true;
                        tx_ready(info);
                        mod_timer(&info->tx_timer, jiffies +
                                        msecs_to_jiffies(5000));
@@ -3548,7 +3538,7 @@ static void tx_start(MGSLPC_INFO *info)
        }
 
        if (!info->tx_enabled)
-               info->tx_enabled = 1;
+               info->tx_enabled = true;
 }
 
 static void tx_stop(MGSLPC_INFO *info)
@@ -3559,8 +3549,8 @@ static void tx_stop(MGSLPC_INFO *info)
 
        del_timer(&info->tx_timer);
 
-       info->tx_enabled = 0;
-       info->tx_active  = 0;
+       info->tx_enabled = false;
+       info->tx_active = false;
 }
 
 /* Reset the adapter to a known state and prepare it for further use.
@@ -3860,19 +3850,19 @@ static void rx_reset_buffers(MGSLPC_INFO *info)
 /* Attempt to return a received HDLC frame
  * Only frames received without errors are returned.
  *
- * Returns 1 if frame returned, otherwise 0
+ * Returns true if frame returned, otherwise false
  */
-static int rx_get_frame(MGSLPC_INFO *info)
+static bool rx_get_frame(MGSLPC_INFO *info)
 {
        unsigned short status;
        RXBUF *buf;
        unsigned int framesize = 0;
        unsigned long flags;
        struct tty_struct *tty = info->tty;
-       int return_frame = 0;
+       bool return_frame = false;
 
        if (info->rx_frame_count == 0)
-               return 0;
+               return false;
 
        buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size));
 
@@ -3891,7 +3881,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
                else if (!(status & BIT5)) {
                        info->icount.rxcrc++;
                        if (info->params.crc_type & HDLC_CRC_RETURN_EX)
-                               return_frame = 1;
+                               return_frame = true;
                }
                framesize = 0;
 #if SYNCLINK_GENERIC_HDLC
@@ -3902,7 +3892,7 @@ static int rx_get_frame(MGSLPC_INFO *info)
                }
 #endif
        } else
-               return_frame = 1;
+               return_frame = true;
 
        if (return_frame)
                framesize = buf->count;
@@ -3945,16 +3935,16 @@ static int rx_get_frame(MGSLPC_INFO *info)
                info->rx_get = 0;
        spin_unlock_irqrestore(&info->lock,flags);
 
-       return 1;
+       return true;
 }
 
-static BOOLEAN register_test(MGSLPC_INFO *info)
+static bool register_test(MGSLPC_INFO *info)
 {
        static unsigned char patterns[] =
            { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f };
        static unsigned int count = ARRAY_SIZE(patterns);
        unsigned int i;
-       BOOLEAN rc = TRUE;
+       bool rc = true;
        unsigned long flags;
 
        spin_lock_irqsave(&info->lock,flags);
@@ -3965,7 +3955,7 @@ static BOOLEAN register_test(MGSLPC_INFO *info)
                write_reg(info, XAD2, patterns[(i + 1) % count]);
                if ((read_reg(info, XAD1) != patterns[i]) ||
                    (read_reg(info, XAD2) != patterns[(i + 1) % count])) {
-                       rc = FALSE;
+                       rc = false;
                        break;
                }
        }
@@ -3974,7 +3964,7 @@ static BOOLEAN register_test(MGSLPC_INFO *info)
        return rc;
 }
 
-static BOOLEAN irq_test(MGSLPC_INFO *info)
+static bool irq_test(MGSLPC_INFO *info)
 {
        unsigned long end_time;
        unsigned long flags;
@@ -3982,10 +3972,10 @@ static BOOLEAN irq_test(MGSLPC_INFO *info)
        spin_lock_irqsave(&info->lock,flags);
        reset_device(info);
 
-       info->testing_irq = TRUE;
+       info->testing_irq = true;
        hdlc_mode(info);
 
-       info->irq_occurred = FALSE;
+       info->irq_occurred = false;
 
        /* init hdlc mode */
 
@@ -4000,13 +3990,13 @@ static BOOLEAN irq_test(MGSLPC_INFO *info)
                msleep_interruptible(10);
        }
 
-       info->testing_irq = FALSE;
+       info->testing_irq = false;
 
        spin_lock_irqsave(&info->lock,flags);
        reset_device(info);
        spin_unlock_irqrestore(&info->lock,flags);
 
-       return info->irq_occurred ? TRUE : FALSE;
+       return info->irq_occurred;
 }
 
 static int adapter_test(MGSLPC_INFO *info)
@@ -4079,7 +4069,7 @@ static void tx_timeout(unsigned long context)
                info->icount.txtimeout++;
        }
        spin_lock_irqsave(&info->lock,flags);
-       info->tx_active = 0;
+       info->tx_active = false;
        info->tx_count = info->tx_put = info->tx_get = 0;
 
        spin_unlock_irqrestore(&info->lock,flags);
index 706ff34728f19382be4215e25e3052bd2b1ba371..0a05c038ae6f168236c0b5de08ddb79a9de4e45d 100644 (file)
@@ -181,6 +181,7 @@ static int pty_set_lock(struct tty_struct *tty, int __user * arg)
 static void pty_flush_buffer(struct tty_struct *tty)
 {
        struct tty_struct *to = tty->link;
+       unsigned long flags;
        
        if (!to)
                return;
@@ -189,8 +190,10 @@ static void pty_flush_buffer(struct tty_struct *tty)
                to->ldisc.flush_buffer(to);
        
        if (to->packet) {
+               spin_lock_irqsave(&tty->ctrl_lock, flags);
                tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
                wake_up_interruptible(&to->read_wait);
+               spin_unlock_irqrestore(&tty->ctrl_lock, flags);
        }
 }
 
@@ -251,6 +254,18 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
 static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
 module_param(legacy_count, int, 0);
 
+static const struct tty_operations pty_ops_bsd = {
+       .open = pty_open,
+       .close = pty_close,
+       .write = pty_write,
+       .write_room = pty_write_room,
+       .flush_buffer = pty_flush_buffer,
+       .chars_in_buffer = pty_chars_in_buffer,
+       .unthrottle = pty_unthrottle,
+       .set_termios = pty_set_termios,
+       .ioctl = pty_bsd_ioctl,
+};
+
 static void __init legacy_pty_init(void)
 {
        if (legacy_count <= 0)
@@ -281,7 +296,6 @@ static void __init legacy_pty_init(void)
        pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
        pty_driver->other = pty_slave_driver;
        tty_set_operations(pty_driver, &pty_ops);
-       pty_driver->ioctl = pty_bsd_ioctl;
 
        pty_slave_driver->owner = THIS_MODULE;
        pty_slave_driver->driver_name = "pty_slave";
@@ -374,6 +388,19 @@ static int pty_unix98_ioctl(struct tty_struct *tty, struct file *file,
        return -ENOIOCTLCMD;
 }
 
+static const struct tty_operations pty_unix98_ops = {
+       .open = pty_open,
+       .close = pty_close,
+       .write = pty_write,
+       .write_room = pty_write_room,
+       .flush_buffer = pty_flush_buffer,
+       .chars_in_buffer = pty_chars_in_buffer,
+       .unthrottle = pty_unthrottle,
+       .set_termios = pty_set_termios,
+       .ioctl = pty_unix98_ioctl
+};
+
+
 static void __init unix98_pty_init(void)
 {
        ptm_driver = alloc_tty_driver(NR_UNIX98_PTY_MAX);
@@ -400,8 +427,7 @@ static void __init unix98_pty_init(void)
        ptm_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW |
                TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM;
        ptm_driver->other = pts_driver;
-       tty_set_operations(ptm_driver, &pty_ops);
-       ptm_driver->ioctl = pty_unix98_ioctl;
+       tty_set_operations(ptm_driver, &pty_unix98_ops);
 
        pts_driver->owner = THIS_MODULE;
        pts_driver->driver_name = "pty_slave";
index f43c89f7c449831b2441f5f8f55441f2253a65ce..0cf98bd4f2d2438351357890a2cee00062b6c6da 100644 (file)
@@ -272,7 +272,7 @@ static int random_write_wakeup_thresh = 128;
 
 static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
 
-static DEFINE_PER_CPU(int, trickle_count) = 0;
+static DEFINE_PER_CPU(int, trickle_count);
 
 /*
  * A pool of size .poolwords is stirred with a primitive polynomial
@@ -370,17 +370,19 @@ static struct poolinfo {
  */
 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+static struct fasync_struct *fasync;
 
 #if 0
-static int debug = 0;
+static int debug;
 module_param(debug, bool, 0644);
-#define DEBUG_ENT(fmt, arg...) do { if (debug) \
-       printk(KERN_DEBUG "random %04d %04d %04d: " \
-       fmt,\
-       input_pool.entropy_count,\
-       blocking_pool.entropy_count,\
-       nonblocking_pool.entropy_count,\
-       ## arg); } while (0)
+#define DEBUG_ENT(fmt, arg...) do { \
+       if (debug) \
+               printk(KERN_DEBUG "random %04d %04d %04d: " \
+               fmt,\
+               input_pool.entropy_count,\
+               blocking_pool.entropy_count,\
+               nonblocking_pool.entropy_count,\
+               ## arg); } while (0)
 #else
 #define DEBUG_ENT(fmt, arg...) do {} while (0)
 #endif
@@ -394,7 +396,7 @@ module_param(debug, bool, 0644);
 
 struct entropy_store;
 struct entropy_store {
-       /* mostly-read data: */
+       /* read-only data: */
        struct poolinfo *poolinfo;
        __u32 *pool;
        const char *name;
@@ -402,7 +404,7 @@ struct entropy_store {
        struct entropy_store *pull;
 
        /* read-write data: */
-       spinlock_t lock ____cacheline_aligned_in_smp;
+       spinlock_t lock;
        unsigned add_ptr;
        int entropy_count;
        int input_rotate;
@@ -438,25 +440,26 @@ static struct entropy_store nonblocking_pool = {
 };
 
 /*
- * This function adds a byte into the entropy "pool".  It does not
+ * This function adds bytes into the entropy "pool".  It does not
  * update the entropy estimate.  The caller should call
- * credit_entropy_store if this is appropriate.
+ * credit_entropy_bits if this is appropriate.
  *
  * The pool is stirred with a primitive polynomial of the appropriate
  * degree, and then twisted.  We twist by three bits at a time because
  * it's cheap to do so and helps slightly in the expected case where
  * the entropy is concentrated in the low-order bits.
  */
-static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
-                               int nwords, __u32 out[16])
+static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+                                  int nbytes, __u8 out[64])
 {
        static __u32 const twist_table[8] = {
                0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
                0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
-       unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5;
-       int new_rotate, input_rotate;
+       unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+       int input_rotate;
        int wordmask = r->poolinfo->poolwords - 1;
-       __u32 w, next_w;
+       const char *bytes = in;
+       __u32 w;
        unsigned long flags;
 
        /* Taps are constant, so we can load them without holding r->lock.  */
@@ -465,78 +468,76 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in,
        tap3 = r->poolinfo->tap3;
        tap4 = r->poolinfo->tap4;
        tap5 = r->poolinfo->tap5;
-       next_w = *in++;
 
        spin_lock_irqsave(&r->lock, flags);
-       prefetch_range(r->pool, wordmask);
        input_rotate = r->input_rotate;
-       add_ptr = r->add_ptr;
+       i = r->add_ptr;
 
-       while (nwords--) {
-               w = rol32(next_w, input_rotate);
-               if (nwords > 0)
-                       next_w = *in++;
-               i = add_ptr = (add_ptr - 1) & wordmask;
-               /*
-                * Normally, we add 7 bits of rotation to the pool.
-                * At the beginning of the pool, add an extra 7 bits
-                * rotation, so that successive passes spread the
-                * input bits across the pool evenly.
-                */
-               new_rotate = input_rotate + 14;
-               if (i)
-                       new_rotate = input_rotate + 7;
-               input_rotate = new_rotate & 31;
+       /* mix one byte at a time to simplify size handling and churn faster */
+       while (nbytes--) {
+               w = rol32(*bytes++, input_rotate & 31);
+               i = (i - 1) & wordmask;
 
                /* XOR in the various taps */
+               w ^= r->pool[i];
                w ^= r->pool[(i + tap1) & wordmask];
                w ^= r->pool[(i + tap2) & wordmask];
                w ^= r->pool[(i + tap3) & wordmask];
                w ^= r->pool[(i + tap4) & wordmask];
                w ^= r->pool[(i + tap5) & wordmask];
-               w ^= r->pool[i];
+
+               /* Mix the result back in with a twist */
                r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
+               /*
+                * Normally, we add 7 bits of rotation to the pool.
+                * At the beginning of the pool, add an extra 7 bits
+                * rotation, so that successive passes spread the
+                * input bits across the pool evenly.
+                */
+               input_rotate += i ? 7 : 14;
        }
 
        r->input_rotate = input_rotate;
-       r->add_ptr = add_ptr;
+       r->add_ptr = i;
 
-       if (out) {
-               for (i = 0; i < 16; i++) {
-                       out[i] = r->pool[add_ptr];
-                       add_ptr = (add_ptr - 1) & wordmask;
-               }
-       }
+       if (out)
+               for (j = 0; j < 16; j++)
+                       ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
 
        spin_unlock_irqrestore(&r->lock, flags);
 }
 
-static inline void add_entropy_words(struct entropy_store *r, const __u32 *in,
-                                    int nwords)
+static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
 {
-       __add_entropy_words(r, in, nwords, NULL);
+       mix_pool_bytes_extract(r, in, bytes, NULL);
 }
 
 /*
  * Credit (or debit) the entropy store with n bits of entropy
  */
-static void credit_entropy_store(struct entropy_store *r, int nbits)
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
 {
        unsigned long flags;
 
+       if (!nbits)
+               return;
+
        spin_lock_irqsave(&r->lock, flags);
 
-       if (r->entropy_count + nbits < 0) {
-               DEBUG_ENT("negative entropy/overflow (%d+%d)\n",
-                         r->entropy_count, nbits);
+       DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+       r->entropy_count += nbits;
+       if (r->entropy_count < 0) {
+               DEBUG_ENT("negative entropy/overflow\n");
                r->entropy_count = 0;
-       } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) {
+       } else if (r->entropy_count > r->poolinfo->POOLBITS)
                r->entropy_count = r->poolinfo->POOLBITS;
-       } else {
-               r->entropy_count += nbits;
-               if (nbits)
-                       DEBUG_ENT("added %d entropy credits to %s\n",
-                                 nbits, r->name);
+
+       /* should we wake readers? */
+       if (r == &input_pool &&
+           r->entropy_count >= random_read_wakeup_thresh) {
+               wake_up_interruptible(&random_read_wait);
+               kill_fasync(&fasync, SIGIO, POLL_IN);
        }
 
        spin_unlock_irqrestore(&r->lock, flags);
@@ -551,7 +552,7 @@ static void credit_entropy_store(struct entropy_store *r, int nbits)
 /* There is one of these per entropy source */
 struct timer_rand_state {
        cycles_t last_time;
-       long last_delta,last_delta2;
+       long last_delta, last_delta2;
        unsigned dont_count_entropy:1;
 };
 
@@ -586,7 +587,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
        sample.jiffies = jiffies;
        sample.cycles = get_cycles();
        sample.num = num;
-       add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4);
+       mix_pool_bytes(&input_pool, &sample, sizeof(sample));
 
        /*
         * Calculate number of bits of randomness we probably added.
@@ -620,13 +621,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
                 * Round down by 1 bit on general principles,
                 * and limit entropy entimate to 12 bits.
                 */
-               credit_entropy_store(&input_pool,
-                                    min_t(int, fls(delta>>1), 11));
+               credit_entropy_bits(&input_pool,
+                                   min_t(int, fls(delta>>1), 11));
        }
-
-       if(input_pool.entropy_count >= random_read_wakeup_thresh)
-               wake_up_interruptible(&random_read_wait);
-
 out:
        preempt_enable();
 }
@@ -677,7 +674,7 @@ void add_disk_randomness(struct gendisk *disk)
  *
  *********************************************************************/
 
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
                               size_t nbytes, int min, int rsvd);
 
 /*
@@ -704,10 +701,10 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
                          "(%d of %d requested)\n",
                          r->name, bytes * 8, nbytes * 8, r->entropy_count);
 
-               bytes=extract_entropy(r->pull, tmp, bytes,
-                                     random_read_wakeup_thresh / 8, rsvd);
-               add_entropy_words(r, tmp, (bytes + 3) / 4);
-               credit_entropy_store(r, bytes*8);
+               bytes = extract_entropy(r->pull, tmp, bytes,
+                                       random_read_wakeup_thresh / 8, rsvd);
+               mix_pool_bytes(r, tmp, bytes);
+               credit_entropy_bits(r, bytes*8);
        }
 }
 
@@ -744,13 +741,15 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
                if (r->limit && nbytes + reserved >= r->entropy_count / 8)
                        nbytes = r->entropy_count/8 - reserved;
 
-               if(r->entropy_count / 8 >= nbytes + reserved)
+               if (r->entropy_count / 8 >= nbytes + reserved)
                        r->entropy_count -= nbytes*8;
                else
                        r->entropy_count = reserved;
 
-               if (r->entropy_count < random_write_wakeup_thresh)
+               if (r->entropy_count < random_write_wakeup_thresh) {
                        wake_up_interruptible(&random_write_wait);
+                       kill_fasync(&fasync, SIGIO, POLL_OUT);
+               }
        }
 
        DEBUG_ENT("debiting %d entropy credits from %s%s\n",
@@ -764,45 +763,46 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
 static void extract_buf(struct entropy_store *r, __u8 *out)
 {
        int i;
-       __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS];
+       __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+       __u8 extract[64];
+
+       /* Generate a hash across the pool, 16 words (512 bits) at a time */
+       sha_init(hash);
+       for (i = 0; i < r->poolinfo->poolwords; i += 16)
+               sha_transform(hash, (__u8 *)(r->pool + i), workspace);
 
-       sha_init(buf);
        /*
-        * As we hash the pool, we mix intermediate values of
-        * the hash back into the pool.  This eliminates
-        * backtracking attacks (where the attacker knows
-        * the state of the pool plus the current outputs, and
-        * attempts to find previous ouputs), unless the hash
-        * function can be inverted.
+        * We mix the hash back into the pool to prevent backtracking
+        * attacks (where the attacker knows the state of the pool
+        * plus the current outputs, and attempts to find previous
+        * ouputs), unless the hash function can be inverted. By
+        * mixing at least a SHA1 worth of hash data back, we make
+        * brute-forcing the feedback as hard as brute-forcing the
+        * hash.
         */
-       for (i = 0; i < r->poolinfo->poolwords; i += 16) {
-               /* hash blocks of 16 words = 512 bits */
-               sha_transform(buf, (__u8 *)(r->pool + i), buf + 5);
-               /* feed back portion of the resulting hash */
-               add_entropy_words(r, &buf[i % 5], 1);
-       }
+       mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
 
        /*
-        * To avoid duplicates, we atomically extract a
-        * portion of the pool while mixing, and hash one
-        * final time.
+        * To avoid duplicates, we atomically extract a portion of the
+        * pool while mixing, and hash one final time.
         */
-       __add_entropy_words(r, &buf[i % 5], 1, data);
-       sha_transform(buf, (__u8 *)data, buf + 5);
+       sha_transform(hash, extract, workspace);
+       memset(extract, 0, sizeof(extract));
+       memset(workspace, 0, sizeof(workspace));
 
        /*
-        * In case the hash function has some recognizable
-        * output pattern, we fold it in half.
+        * In case the hash function has some recognizable output
+        * pattern, we fold it in half. Thus, we always feed back
+        * twice as much data as we output.
         */
-
-       buf[0] ^= buf[3];
-       buf[1] ^= buf[4];
-       buf[2] ^= rol32(buf[2], 16);
-       memcpy(out, buf, EXTRACT_SIZE);
-       memset(buf, 0, sizeof(buf));
+       hash[0] ^= hash[3];
+       hash[1] ^= hash[4];
+       hash[2] ^= rol32(hash[2], 16);
+       memcpy(out, hash, EXTRACT_SIZE);
+       memset(hash, 0, sizeof(hash));
 }
 
-static ssize_t extract_entropy(struct entropy_store *r, void * buf,
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
                               size_t nbytes, int min, int reserved)
 {
        ssize_t ret = 0, i;
@@ -872,7 +872,6 @@ void get_random_bytes(void *buf, int nbytes)
 {
        extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
 }
-
 EXPORT_SYMBOL(get_random_bytes);
 
 /*
@@ -894,12 +893,11 @@ static void init_std_data(struct entropy_store *r)
        spin_unlock_irqrestore(&r->lock, flags);
 
        now = ktime_get_real();
-       add_entropy_words(r, (__u32 *)&now, sizeof(now)/4);
-       add_entropy_words(r, (__u32 *)utsname(),
-                         sizeof(*(utsname()))/4);
+       mix_pool_bytes(r, &now, sizeof(now));
+       mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
 }
 
-static int __init rand_initialize(void)
+static int rand_initialize(void)
 {
        init_std_data(&input_pool);
        init_std_data(&blocking_pool);
@@ -940,7 +938,7 @@ void rand_initialize_disk(struct gendisk *disk)
 #endif
 
 static ssize_t
-random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
        ssize_t n, retval = 0, count = 0;
 
@@ -1002,8 +1000,7 @@ random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos)
 }
 
 static ssize_t
-urandom_read(struct file * file, char __user * buf,
-                     size_t nbytes, loff_t *ppos)
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
        return extract_entropy_user(&nonblocking_pool, buf, nbytes);
 }
@@ -1038,16 +1035,15 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
                count -= bytes;
                p += bytes;
 
-               add_entropy_words(r, buf, (bytes + 3) / 4);
+               mix_pool_bytes(r, buf, bytes);
                cond_resched();
        }
 
        return 0;
 }
 
-static ssize_t
-random_write(struct file * file, const char __user * buffer,
-            size_t count, loff_t *ppos)
+static ssize_t random_write(struct file *file, const char __user *buffer,
+                           size_t count, loff_t *ppos)
 {
        size_t ret;
        struct inode *inode = file->f_path.dentry->d_inode;
@@ -1064,9 +1060,7 @@ random_write(struct file * file, const char __user * buffer,
        return (ssize_t)count;
 }
 
-static int
-random_ioctl(struct inode * inode, struct file * file,
-            unsigned int cmd, unsigned long arg)
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
 {
        int size, ent_count;
        int __user *p = (int __user *)arg;
@@ -1074,8 +1068,8 @@ random_ioctl(struct inode * inode, struct file * file,
 
        switch (cmd) {
        case RNDGETENTCNT:
-               ent_count = input_pool.entropy_count;
-               if (put_user(ent_count, p))
+               /* inherently racy, no point locking */
+               if (put_user(input_pool.entropy_count, p))
                        return -EFAULT;
                return 0;
        case RNDADDTOENTCNT:
@@ -1083,13 +1077,7 @@ random_ioctl(struct inode * inode, struct file * file,
                        return -EPERM;
                if (get_user(ent_count, p))
                        return -EFAULT;
-               credit_entropy_store(&input_pool, ent_count);
-               /*
-                * Wake up waiting processes if we have enough
-                * entropy.
-                */
-               if (input_pool.entropy_count >= random_read_wakeup_thresh)
-                       wake_up_interruptible(&random_read_wait);
+               credit_entropy_bits(&input_pool, ent_count);
                return 0;
        case RNDADDENTROPY:
                if (!capable(CAP_SYS_ADMIN))
@@ -1104,39 +1092,45 @@ random_ioctl(struct inode * inode, struct file * file,
                                    size);
                if (retval < 0)
                        return retval;
-               credit_entropy_store(&input_pool, ent_count);
-               /*
-                * Wake up waiting processes if we have enough
-                * entropy.
-                */
-               if (input_pool.entropy_count >= random_read_wakeup_thresh)
-                       wake_up_interruptible(&random_read_wait);
+               credit_entropy_bits(&input_pool, ent_count);
                return 0;
        case RNDZAPENTCNT:
        case RNDCLEARPOOL:
                /* Clear the entropy pool counters. */
                if (!capable(CAP_SYS_ADMIN))
                        return -EPERM;
-               init_std_data(&input_pool);
-               init_std_data(&blocking_pool);
-               init_std_data(&nonblocking_pool);
+               rand_initialize();
                return 0;
        default:
                return -EINVAL;
        }
 }
 
+static int random_fasync(int fd, struct file *filp, int on)
+{
+       return fasync_helper(fd, filp, on, &fasync);
+}
+
+static int random_release(struct inode *inode, struct file *filp)
+{
+       return fasync_helper(-1, filp, 0, &fasync);
+}
+
 const struct file_operations random_fops = {
        .read  = random_read,
        .write = random_write,
        .poll  = random_poll,
-       .ioctl = random_ioctl,
+       .unlocked_ioctl = random_ioctl,
+       .fasync = random_fasync,
+       .release = random_release,
 };
 
 const struct file_operations urandom_fops = {
        .read  = urandom_read,
        .write = random_write,
-       .ioctl = random_ioctl,
+       .unlocked_ioctl = random_ioctl,
+       .fasync = random_fasync,
+       .release = random_release,
 };
 
 /***************************************************************
@@ -1157,7 +1151,6 @@ void generate_random_uuid(unsigned char uuid_out[16])
        /* Set the UUID variant to DCE */
        uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
 }
-
 EXPORT_SYMBOL(generate_random_uuid);
 
 /********************************************************************
@@ -1339,7 +1332,7 @@ ctl_table random_table[] = {
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 
-static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12])
+static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
 {
        __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
 
@@ -1487,8 +1480,8 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
         */
 
        memcpy(hash, saddr, 16);
-       hash[4]=((__force u16)sport << 16) + (__force u16)dport;
-       memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+       hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
+       memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
 
        seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
        seq += keyptr->count;
@@ -1538,10 +1531,10 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
         *  Note that the words are placed into the starting vector, which is
         *  then mixed with a partial MD4 over random data.
         */
-       hash[0]=(__force u32)saddr;
-       hash[1]=(__force u32)daddr;
-       hash[2]=((__force u16)sport << 16) + (__force u16)dport;
-       hash[3]=keyptr->secret[11];
+       hash[0] = (__force u32)saddr;
+       hash[1] = (__force u32)daddr;
+       hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+       hash[3] = keyptr->secret[11];
 
        seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
        seq += keyptr->count;
@@ -1556,10 +1549,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
         *      Choosing a clock of 64 ns period is OK. (period of 274 s)
         */
        seq += ktime_to_ns(ktime_get_real()) >> 6;
-#if 0
-       printk("init_seq(%lx, %lx, %d, %d) = %d\n",
-              saddr, daddr, sport, dport, seq);
-#endif
+
        return seq;
 }
 
@@ -1582,14 +1572,15 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport)
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+                              __be16 dport)
 {
        struct keydata *keyptr = get_keyptr();
        u32 hash[12];
 
        memcpy(hash, saddr, 16);
        hash[4] = (__force u32)dport;
-       memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7);
+       memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
 
        return twothirdsMD4Transform((const __u32 *)daddr, hash);
 }
@@ -1617,13 +1608,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
 
        seq += ktime_to_ns(ktime_get_real());
        seq &= (1ull << 48) - 1;
-#if 0
-       printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n",
-              saddr, daddr, sport, dport, seq);
-#endif
+
        return seq;
 }
-
 EXPORT_SYMBOL(secure_dccp_sequence_number);
 #endif
 
index f4f837f868290a5e4baebfd61ae04e9b7f366c20..a03a538a3efb0d59eb0a5b0c1e47711ff3852433 100644 (file)
 /* Bit fields for particular registers shared with driver */
 
 /* COR1 - driver and RTA */
-#define        COR1_ODD        0x80    /* Odd parity */
-#define COR1_EVEN      0x00    /* Even parity */
-#define        COR1_NOP        0x00    /* No parity */
-#define        COR1_FORCE      0x20    /* Force parity */
-#define        COR1_NORMAL     0x40    /* With parity */
-#define        COR1_1STOP      0x00    /* 1 stop bit */
-#define        COR1_15STOP     0x04    /* 1.5 stop bits */
-#define        COR1_2STOP      0x08    /* 2 stop bits */
-#define        COR1_5BITS      0x00    /* 5 data bits */
-#define        COR1_6BITS      0x01    /* 6 data bits */
-#define        COR1_7BITS      0x02    /* 7 data bits */
-#define        COR1_8BITS      0x03    /* 8 data bits */
-
-#define COR1_HOST       0xef   /* Safe host bits */
+#define RIOC_COR1_ODD          0x80    /* Odd parity */
+#define RIOC_COR1_EVEN         0x00    /* Even parity */
+#define RIOC_COR1_NOP          0x00    /* No parity */
+#define RIOC_COR1_FORCE                0x20    /* Force parity */
+#define RIOC_COR1_NORMAL       0x40    /* With parity */
+#define RIOC_COR1_1STOP                0x00    /* 1 stop bit */
+#define RIOC_COR1_15STOP       0x04    /* 1.5 stop bits */
+#define RIOC_COR1_2STOP                0x08    /* 2 stop bits */
+#define RIOC_COR1_5BITS                0x00    /* 5 data bits */
+#define RIOC_COR1_6BITS                0x01    /* 6 data bits */
+#define RIOC_COR1_7BITS                0x02    /* 7 data bits */
+#define RIOC_COR1_8BITS                0x03    /* 8 data bits */
+
+#define RIOC_COR1_HOST         0xef    /* Safe host bits */
 
 /* RTA only */
-#define COR1_CINPCK     0x00   /* Check parity of received characters */
-#define COR1_CNINPCK    0x10   /* Don't check parity */
+#define RIOC_COR1_CINPCK       0x00    /* Check parity of received characters */
+#define RIOC_COR1_CNINPCK      0x10    /* Don't check parity */
 
 /* COR2 bits for both RTA and driver use */
-#define        COR2_IXANY      0x80    /* IXANY - any character is XON */
-#define        COR2_IXON       0x40    /* IXON - enable tx soft flowcontrol */
-#define        COR2_RTSFLOW    0x02    /* Enable tx hardware flow control */
+#define RIOC_COR2_IXANY                0x80    /* IXANY - any character is XON */
+#define RIOC_COR2_IXON         0x40    /* IXON - enable tx soft flowcontrol */
+#define RIOC_COR2_RTSFLOW      0x02    /* Enable tx hardware flow control */
 
 /* Additional driver bits */
-#define        COR2_HUPCL      0x20    /* Hang up on close */
-#define        COR2_CTSFLOW    0x04    /* Enable rx hardware flow control */
-#define        COR2_IXOFF      0x01    /* Enable rx software flow control */
-#define COR2_DTRFLOW   0x08    /* Enable tx hardware flow control */
+#define RIOC_COR2_HUPCL                0x20    /* Hang up on close */
+#define RIOC_COR2_CTSFLOW      0x04    /* Enable rx hardware flow control */
+#define RIOC_COR2_IXOFF                0x01    /* Enable rx software flow control */
+#define RIOC_COR2_DTRFLOW      0x08    /* Enable tx hardware flow control */
 
 /* RTA use only */
-#define COR2_ETC       0x20    /* Embedded transmit options */
-#define        COR2_LOCAL      0x10    /* Local loopback mode */
-#define        COR2_REMOTE     0x08    /* Remote loopback mode */
-#define        COR2_HOST       0xc2    /* Safe host bits */
+#define RIOC_COR2_ETC          0x20    /* Embedded transmit options */
+#define RIOC_COR2_LOCAL                0x10    /* Local loopback mode */
+#define RIOC_COR2_REMOTE       0x08    /* Remote loopback mode */
+#define RIOC_COR2_HOST         0xc2    /* Safe host bits */
 
 /* COR3 - RTA use only */
-#define        COR3_SCDRNG     0x80    /* Enable special char detect for range */
-#define        COR3_SCD34      0x40    /* Special character detect for SCHR's 3 + 4 */
-#define        COR3_FCT        0x20    /* Flow control transparency */
-#define        COR3_SCD12      0x10    /* Special character detect for SCHR's 1 + 2 */
-#define        COR3_FIFO12     0x0c    /* 12 chars for receive FIFO threshold */
-#define COR3_FIFO10     0x0a   /* 10 chars for receive FIFO threshold */
-#define COR3_FIFO8      0x08   /* 8 chars for receive FIFO threshold */
-#define COR3_FIFO6      0x06   /* 6 chars for receive FIFO threshold */
-
-#define COR3_THRESHOLD  COR3_FIFO8     /* MUST BE LESS THAN MCOR_THRESHOLD */
-
-#define        COR3_DEFAULT    (COR3_FCT | COR3_THRESHOLD)
+#define RIOC_COR3_SCDRNG       0x80    /* Enable special char detect for range */
+#define RIOC_COR3_SCD34                0x40    /* Special character detect for SCHR's 3 + 4 */
+#define RIOC_COR3_FCT          0x20    /* Flow control transparency */
+#define RIOC_COR3_SCD12                0x10    /* Special character detect for SCHR's 1 + 2 */
+#define RIOC_COR3_FIFO12       0x0c    /* 12 chars for receive FIFO threshold */
+#define RIOC_COR3_FIFO10       0x0a    /* 10 chars for receive FIFO threshold */
+#define RIOC_COR3_FIFO8                0x08    /* 8 chars for receive FIFO threshold */
+#define RIOC_COR3_FIFO6                0x06    /* 6 chars for receive FIFO threshold */
+
+#define RIOC_COR3_THRESHOLD    RIOC_COR3_FIFO8 /* MUST BE LESS THAN MCOR_THRESHOLD */
+
+#define RIOC_COR3_DEFAULT      (RIOC_COR3_FCT | RIOC_COR3_THRESHOLD)
                                /* Default bits for COR3 */
 
 /* COR4 driver and RTA use */
-#define        COR4_IGNCR      0x80    /* Throw away CR's on input */
-#define        COR4_ICRNL      0x40    /* Map CR -> NL on input */
-#define        COR4_INLCR      0x20    /* Map NL -> CR on input */
-#define        COR4_IGNBRK     0x10    /* Ignore Break */
-#define        COR4_NBRKINT    0x08    /* No interrupt on break (-BRKINT) */
-#define COR4_RAISEMOD  0x01    /* Raise modem output lines on non-zero baud */
+#define RIOC_COR4_IGNCR                0x80    /* Throw away CR's on input */
+#define RIOC_COR4_ICRNL                0x40    /* Map CR -> NL on input */
+#define RIOC_COR4_INLCR                0x20    /* Map NL -> CR on input */
+#define RIOC_COR4_IGNBRK       0x10    /* Ignore Break */
+#define RIOC_COR4_NBRKINT      0x08    /* No interrupt on break (-BRKINT) */
+#define RIOC_COR4_RAISEMOD     0x01    /* Raise modem output lines on non-zero baud */
 
 
 /* COR4 driver only */
-#define COR4_IGNPAR    0x04    /* IGNPAR (ignore characters with errors) */
-#define COR4_PARMRK    0x02    /* PARMRK */
+#define RIOC_COR4_IGNPAR       0x04    /* IGNPAR (ignore characters with errors) */
+#define RIOC_COR4_PARMRK       0x02    /* PARMRK */
 
-#define COR4_HOST      0xf8    /* Safe host bits */
+#define RIOC_COR4_HOST         0xf8    /* Safe host bits */
 
 /* COR4 RTA only */
-#define COR4_CIGNPAR   0x02    /* Thrown away bad characters */
-#define COR4_CPARMRK   0x04    /* PARMRK characters */
-#define COR4_CNPARMRK  0x03    /* Don't PARMRK */
+#define RIOC_COR4_CIGNPAR      0x02    /* Thrown away bad characters */
+#define RIOC_COR4_CPARMRK      0x04    /* PARMRK characters */
+#define RIOC_COR4_CNPARMRK     0x03    /* Don't PARMRK */
 
 /* COR5 driver and RTA use */
-#define        COR5_ISTRIP     0x80    /* Strip input chars to 7 bits */
-#define        COR5_LNE        0x40    /* Enable LNEXT processing */
-#define        COR5_CMOE       0x20    /* Match good and errored characters */
-#define        COR5_ONLCR      0x02    /* NL -> CR NL on output */
-#define        COR5_OCRNL      0x01    /* CR -> NL on output */
+#define RIOC_COR5_ISTRIP       0x80    /* Strip input chars to 7 bits */
+#define RIOC_COR5_LNE          0x40    /* Enable LNEXT processing */
+#define RIOC_COR5_CMOE         0x20    /* Match good and errored characters */
+#define RIOC_COR5_ONLCR                0x02    /* NL -> CR NL on output */
+#define RIOC_COR5_OCRNL                0x01    /* CR -> NL on output */
 
 /*
 ** Spare bits - these are not used in the CIRRUS registers, so we use
 /*
 ** tstop and tbusy indication
 */
-#define        COR5_TSTATE_ON  0x08    /* Turn on monitoring of tbusy and tstop */
-#define        COR5_TSTATE_OFF 0x04    /* Turn off monitoring of tbusy and tstop */
+#define RIOC_COR5_TSTATE_ON    0x08    /* Turn on monitoring of tbusy and tstop */
+#define RIOC_COR5_TSTATE_OFF   0x04    /* Turn off monitoring of tbusy and tstop */
 /*
 ** TAB3
 */
-#define        COR5_TAB3       0x10    /* TAB3 mode */
+#define RIOC_COR5_TAB3         0x10    /* TAB3 mode */
 
-#define        COR5_HOST       0xc3    /* Safe host bits */
+#define RIOC_COR5_HOST         0xc3    /* Safe host bits */
 
 /* CCSR */
-#define        CCSR_TXFLOFF    0x04    /* Tx is xoffed */
+#define RIOC_CCSR_TXFLOFF      0x04    /* Tx is xoffed */
 
 /* MSVR1 */
 /* NB. DTR / CD swapped from Cirrus spec as the pins are also reversed on the
    RTA. This is because otherwise DCD would get lost on the 1 parallel / 3
    serial option.
 */
-#define        MSVR1_CD        0x80    /* CD (DSR on Cirrus) */
-#define        MSVR1_RTS       0x40    /* RTS (CTS on Cirrus) */
-#define        MSVR1_RI        0x20    /* RI */
-#define        MSVR1_DTR       0x10    /* DTR (CD on Cirrus) */
-#define        MSVR1_CTS       0x01    /* CTS output pin (RTS on Cirrus) */
+#define RIOC_MSVR1_CD          0x80    /* CD (DSR on Cirrus) */
+#define RIOC_MSVR1_RTS         0x40    /* RTS (CTS on Cirrus) */
+#define RIOC_MSVR1_RI          0x20    /* RI */
+#define RIOC_MSVR1_DTR         0x10    /* DTR (CD on Cirrus) */
+#define RIOC_MSVR1_CTS         0x01    /* CTS output pin (RTS on Cirrus) */
 /* Next two used to indicate state of tbusy and tstop to driver */
-#define        MSVR1_TSTOP     0x08    /* Set if port flow controlled */
-#define        MSVR1_TEMPTY    0x04    /* Set if port tx buffer empty */
+#define RIOC_MSVR1_TSTOP       0x08    /* Set if port flow controlled */
+#define RIOC_MSVR1_TEMPTY      0x04    /* Set if port tx buffer empty */
 
-#define        MSVR1_HOST      0xf3    /* The bits the host wants */
+#define RIOC_MSVR1_HOST                0xf3    /* The bits the host wants */
 
 /* Defines for the subscripts of a CONFIG packet */
-#define        CONFIG_COR1     1       /* Option register 1 */
-#define        CONFIG_COR2     2       /* Option register 2 */
-#define        CONFIG_COR4     3       /* Option register 4 */
-#define        CONFIG_COR5     4       /* Option register 5 */
-#define        CONFIG_TXXON    5       /* Tx XON character */
-#define        CONFIG_TXXOFF   6       /* Tx XOFF character */
-#define        CONFIG_RXXON    7       /* Rx XON character */
-#define        CONFIG_RXXOFF   8       /* Rx XOFF character */
-#define CONFIG_LNEXT   9       /* LNEXT character */
-#define        CONFIG_TXBAUD   10      /* Tx baud rate */
-#define        CONFIG_RXBAUD   11      /* Rx baud rate */
-
-#define        PRE_EMPTIVE     0x80    /* Pre-emptive bit in command field */
+#define RIOC_CONFIG_COR1       1       /* Option register 1 */
+#define RIOC_CONFIG_COR2       2       /* Option register 2 */
+#define RIOC_CONFIG_COR4       3       /* Option register 4 */
+#define RIOC_CONFIG_COR5       4       /* Option register 5 */
+#define RIOC_CONFIG_TXXON      5       /* Tx XON character */
+#define RIOC_CONFIG_TXXOFF     6       /* Tx XOFF character */
+#define RIOC_CONFIG_RXXON      7       /* Rx XON character */
+#define RIOC_CONFIG_RXXOFF     8       /* Rx XOFF character */
+#define RIOC_CONFIG_LNEXT      9       /* LNEXT character */
+#define RIOC_CONFIG_TXBAUD     10      /* Tx baud rate */
+#define RIOC_CONFIG_RXBAUD     11      /* Rx baud rate */
+
+#define RIOC_PRE_EMPTIVE       0x80    /* Pre-emptive bit in command field */
 
 /* Packet types going from Host to remote - with the exception of OPEN, MOPEN,
    CONFIG, SBREAK and MEMDUMP the remaining bytes of the data array will not
    be used 
 */
-#define        OPEN            0x00    /* Open a port */
-#define CONFIG         0x01    /* Configure a port */
-#define        MOPEN           0x02    /* Modem open (block for DCD) */
-#define        CLOSE           0x03    /* Close a port */
-#define        WFLUSH          (0x04 | PRE_EMPTIVE)    /* Write flush */
-#define        RFLUSH          (0x05 | PRE_EMPTIVE)    /* Read flush */
-#define        RESUME          (0x06 | PRE_EMPTIVE)    /* Resume if xoffed */
-#define        SBREAK          0x07    /* Start break */
-#define        EBREAK          0x08    /* End break */
-#define        SUSPEND         (0x09 | PRE_EMPTIVE)    /* Susp op (behave as tho xoffed) */
-#define FCLOSE          (0x0a | PRE_EMPTIVE)   /* Force close */
-#define XPRINT          0x0b   /* Xprint packet */
-#define MBIS           (0x0c | PRE_EMPTIVE)    /* Set modem lines */
-#define MBIC           (0x0d | PRE_EMPTIVE)    /* Clear modem lines */
-#define MSET           (0x0e | PRE_EMPTIVE)    /* Set modem lines */
-#define PCLOSE         0x0f    /* Pseudo close - Leaves rx/tx enabled */
-#define MGET           (0x10 | PRE_EMPTIVE)    /* Force update of modem status */
-#define MEMDUMP                (0x11 | PRE_EMPTIVE)    /* Send back mem from addr supplied */
-#define        READ_REGISTER   (0x12 | PRE_EMPTIVE)    /* Read CD1400 register (debug) */
+#define RIOC_OPEN              0x00    /* Open a port */
+#define RIOC_CONFIG            0x01    /* Configure a port */
+#define RIOC_MOPEN             0x02    /* Modem open (block for DCD) */
+#define RIOC_CLOSE             0x03    /* Close a port */
+#define RIOC_WFLUSH            (0x04 | RIOC_PRE_EMPTIVE)       /* Write flush */
+#define RIOC_RFLUSH            (0x05 | RIOC_PRE_EMPTIVE)       /* Read flush */
+#define RIOC_RESUME            (0x06 | RIOC_PRE_EMPTIVE)       /* Resume if xoffed */
+#define RIOC_SBREAK            0x07    /* Start break */
+#define RIOC_EBREAK            0x08    /* End break */
+#define RIOC_SUSPEND           (0x09 | RIOC_PRE_EMPTIVE)       /* Susp op (behave as tho xoffed) */
+#define RIOC_FCLOSE            (0x0a | RIOC_PRE_EMPTIVE)       /* Force close */
+#define RIOC_XPRINT            0x0b    /* Xprint packet */
+#define RIOC_MBIS              (0x0c | RIOC_PRE_EMPTIVE)       /* Set modem lines */
+#define RIOC_MBIC              (0x0d | RIOC_PRE_EMPTIVE)       /* Clear modem lines */
+#define RIOC_MSET              (0x0e | RIOC_PRE_EMPTIVE)       /* Set modem lines */
+#define RIOC_PCLOSE            0x0f    /* Pseudo close - Leaves rx/tx enabled */
+#define RIOC_MGET              (0x10 | RIOC_PRE_EMPTIVE)       /* Force update of modem status */
+#define RIOC_MEMDUMP           (0x11 | RIOC_PRE_EMPTIVE)       /* Send back mem from addr supplied */
+#define RIOC_READ_REGISTER     (0x12 | RIOC_PRE_EMPTIVE)       /* Read CD1400 register (debug) */
 
 /* "Command" packets going from remote to host COMPLETE and MODEM_STATUS
    use data[4] / data[3] to indicate current state and modem status respectively
 */
 
-#define        COMPLETE        (0x20 | PRE_EMPTIVE)
+#define RIOC_COMPLETE          (0x20 | RIOC_PRE_EMPTIVE)
                                /* Command complete */
-#define BREAK_RECEIVED (0x21 | PRE_EMPTIVE)
+#define RIOC_BREAK_RECEIVED    (0x21 | RIOC_PRE_EMPTIVE)
                                /* Break received */
-#define MODEM_STATUS   (0x22 | PRE_EMPTIVE)
+#define RIOC_MODEM_STATUS      (0x22 | RIOC_PRE_EMPTIVE)
                                /* Change in modem status */
 
 /* "Command" packet that could go either way - handshake wake-up */
-#define HANDSHAKE      (0x23 | PRE_EMPTIVE)
+#define RIOC_HANDSHAKE         (0x23 | RIOC_PRE_EMPTIVE)
                                /* Wake-up to HOST / RTA */
 
 #endif
index 0ce96670f97994093fdc39ed84af0c5efc69d024..412777cd1e6879f14e8edabff5e780897c9748e8 100644 (file)
@@ -344,7 +344,7 @@ int rio_minor(struct tty_struct *tty)
 
 static int rio_set_real_termios(void *ptr)
 {
-       return RIOParam((struct Port *) ptr, CONFIG, 1, 1);
+       return RIOParam((struct Port *) ptr, RIOC_CONFIG, 1, 1);
 }
 
 
@@ -487,7 +487,7 @@ static int rio_get_CD(void *ptr)
        int rv;
 
        func_enter();
-       rv = (PortP->ModemState & MSVR1_CD) != 0;
+       rv = (PortP->ModemState & RIOC_MSVR1_CD) != 0;
 
        rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv);
 
@@ -607,7 +607,8 @@ static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd
                        rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n");
                        rc = -EIO;
                } else {
-                       if (RIOShortCommand(p, PortP, SBREAK, 2, 250) == RIO_FAIL) {
+                       if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2, 250) ==
+                                       RIO_FAIL) {
                                rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
                                rc = -EIO;
                        }
@@ -622,7 +623,8 @@ static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd
                        l = arg ? arg * 100 : 250;
                        if (l > 255)
                                l = 255;
-                       if (RIOShortCommand(p, PortP, SBREAK, 2, arg ? arg * 100 : 250) == RIO_FAIL) {
+                       if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2,
+                                       arg ? arg * 100 : 250) == RIO_FAIL) {
                                rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n");
                                rc = -EIO;
                        }
index dc3f005614a36d5cefbc9df333a1fa09aab8546a..7f26cd7c815e579d228fab9c290a5c8b4e525b52 100644 (file)
@@ -186,9 +186,9 @@ static inline void *rio_memcpy_fromio(void *dest, void __iomem *source, int n)
 
 #ifdef DEBUG
 #define rio_dprintk(f, str...) do { if (rio_debug & f) printk (str);} while (0)
-#define func_enter() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s\n", __FUNCTION__)
-#define func_exit()  rio_dprintk (RIO_DEBUG_FLOW, "rio: exit  %s\n", __FUNCTION__)
-#define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__FUNCTION__, port->line)
+#define func_enter() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s\n", __func__)
+#define func_exit()  rio_dprintk (RIO_DEBUG_FLOW, "rio: exit  %s\n", __func__)
+#define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__func__, port->line)
 #else
 #define rio_dprintk(f, str...) /* nothing */
 #define func_enter()
index bf36959fc12149edc7610c1bf4957fa52c08eda6..7b96e0814887d8f14ea5a1b793686acbc3b099b1 100644 (file)
@@ -417,7 +417,7 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
        PortP = p->RIOPortp[SysPort];
        rio_spin_lock_irqsave(&PortP->portSem, flags);
        switch (readb(&PktCmdP->Command)) {
-       case BREAK_RECEIVED:
+       case RIOC_BREAK_RECEIVED:
                rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n");
                /* If the current line disc. is not multi-threading and
                   the current processor is not the default, reset rup_intr
@@ -428,16 +428,16 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
                gs_got_break(&PortP->gs);
                break;
 
-       case COMPLETE:
+       case RIOC_COMPLETE:
                rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %Zd\n", readb(&PktCmdP->PhbNum), HostP - p->RIOHosts);
                subCommand = 1;
                switch (readb(&PktCmdP->SubCommand)) {
-               case MEMDUMP:
+               case RIOC_MEMDUMP:
                        rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", readb(&PktCmdP->SubCommand), readw(&PktCmdP->SubAddr));
                        break;
-               case READ_REGISTER:
+               case RIOC_READ_REGISTER:
                        rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", readw(&PktCmdP->SubAddr));
-                       p->CdRegister = (readb(&PktCmdP->ModemStatus) & MSVR1_HOST);
+                       p->CdRegister = (readb(&PktCmdP->ModemStatus) & RIOC_MSVR1_HOST);
                        break;
                default:
                        subCommand = 0;
@@ -456,14 +456,15 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
                        rio_dprintk(RIO_DEBUG_CMD, "No change\n");
 
                /* FALLTHROUGH */
-       case MODEM_STATUS:
+       case RIOC_MODEM_STATUS:
                /*
                 ** Knock out the tbusy and tstop bits, as these are not relevant
                 ** to the check for modem status change (they're just there because
                 ** it's a convenient place to put them!).
                 */
                ReportedModemStatus = readb(&PktCmdP->ModemStatus);
-               if ((PortP->ModemState & MSVR1_HOST) == (ReportedModemStatus & MSVR1_HOST)) {
+               if ((PortP->ModemState & RIOC_MSVR1_HOST) ==
+                               (ReportedModemStatus & RIOC_MSVR1_HOST)) {
                        rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState);
                        /*
                         ** Update ModemState just in case tbusy or tstop states have
@@ -497,7 +498,7 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc
                                        /*
                                         ** Is there a carrier?
                                         */
-                                       if (PortP->ModemState & MSVR1_CD) {
+                                       if (PortP->ModemState & RIOC_MSVR1_CD) {
                                                /*
                                                 ** Has carrier just appeared?
                                                 */
@@ -691,7 +692,7 @@ void RIOPollHostCommands(struct rio_info *p, struct Host *HostP)
                                 */
                                rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags);
                                FreeMe = RIOCommandRup(p, Rup, HostP, PacketP);
-                               if (readb(&PacketP->data[5]) == MEMDUMP) {
+                               if (readb(&PacketP->data[5]) == RIOC_MEMDUMP) {
                                        rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", readw(&(PacketP->data[6])));
                                        rio_memcpy_fromio(p->RIOMemDump, &(PacketP->data[8]), 32);
                                }
index d8eb2bcbe015dca54fce742555ec18142cb7851c..d65ceb9a434a49191cc84fa70355aab5fd964397 100644 (file)
@@ -422,7 +422,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
                }
 
                rio_spin_lock_irqsave(&PortP->portSem, flags);
-               if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RESUME) == RIO_FAIL) {
+               if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RIOC_RESUME) ==
+                               RIO_FAIL) {
                        rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME failed\n");
                        rio_spin_unlock_irqrestore(&PortP->portSem, flags);
                        return -EBUSY;
@@ -636,7 +637,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
                        return -ENXIO;
                }
                PortP = (p->RIOPortp[PortTty.port]);
-               RIOParam(PortP, CONFIG, PortP->State & RIO_MODEM, OK_TO_SLEEP);
+               RIOParam(PortP, RIOC_CONFIG, PortP->State & RIO_MODEM,
+                               OK_TO_SLEEP);
                return retval;
 
        case RIO_SET_PORT_PARAMS:
@@ -1247,7 +1249,7 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
 
                rio_spin_lock_irqsave(&PortP->portSem, flags);
 
-               if (RIOPreemptiveCmd(p, PortP, MEMDUMP) == RIO_FAIL) {
+               if (RIOPreemptiveCmd(p, PortP, RIOC_MEMDUMP) == RIO_FAIL) {
                        rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n");
                        rio_spin_unlock_irqrestore(&PortP->portSem, flags);
                        return -EBUSY;
@@ -1313,7 +1315,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su
 
                rio_spin_lock_irqsave(&PortP->portSem, flags);
 
-               if (RIOPreemptiveCmd(p, PortP, READ_REGISTER) == RIO_FAIL) {
+               if (RIOPreemptiveCmd(p, PortP, RIOC_READ_REGISTER) ==
+                               RIO_FAIL) {
                        rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n");
                        rio_spin_unlock_irqrestore(&PortP->portSem, flags);
                        return -EBUSY;
@@ -1434,50 +1437,50 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd)
        PktCmdP->PhbNum = port;
 
        switch (Cmd) {
-       case MEMDUMP:
+       case RIOC_MEMDUMP:
                rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p "
                                "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr);
-               PktCmdP->SubCommand = MEMDUMP;
+               PktCmdP->SubCommand = RIOC_MEMDUMP;
                PktCmdP->SubAddr = SubCmd.Addr;
                break;
-       case FCLOSE:
+       case RIOC_FCLOSE:
                rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n",
                                CmdBlkP);
                break;
-       case READ_REGISTER:
+       case RIOC_READ_REGISTER:
                rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) "
                                "command blk %p\n", (int) SubCmd.Addr, CmdBlkP);
-               PktCmdP->SubCommand = READ_REGISTER;
+               PktCmdP->SubCommand = RIOC_READ_REGISTER;
                PktCmdP->SubAddr = SubCmd.Addr;
                break;
-       case RESUME:
+       case RIOC_RESUME:
                rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n",
                                CmdBlkP);
                break;
-       case RFLUSH:
+       case RIOC_RFLUSH:
                rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n",
                                CmdBlkP);
                CmdBlkP->PostFuncP = RIORFlushEnable;
                break;
-       case SUSPEND:
+       case RIOC_SUSPEND:
                rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n",
                                CmdBlkP);
                break;
 
-       case MGET:
+       case RIOC_MGET:
                rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n",
                                CmdBlkP);
                break;
 
-       case MSET:
-       case MBIC:
-       case MBIS:
+       case RIOC_MSET:
+       case RIOC_MBIC:
+       case RIOC_MBIS:
                CmdBlkP->Packet.data[4] = (char) PortP->ModemLines;
                rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command "
                                "blk %p\n", CmdBlkP);
                break;
 
-       case WFLUSH:
+       case RIOC_WFLUSH:
                /*
                 ** If we have queued up the maximum number of Write flushes
                 ** allowed then we should not bother sending any more to the
index 4734e26e1ccd8a694ba23e090e56b35fe890598c..ea21686c69a43293b41664546b134932e3837ca9 100644 (file)
@@ -401,9 +401,8 @@ void RIOServiceHost(struct rio_info *p, struct Host *HostP)
                                        PortP->InUse = NOT_INUSE;
 
                                        rio_spin_unlock(&PortP->portSem);
-                                       if (RIOParam(PortP, OPEN, ((PortP->Cor2Copy & (COR2_RTSFLOW | COR2_CTSFLOW)) == (COR2_RTSFLOW | COR2_CTSFLOW)) ? 1 : 0, DONT_SLEEP) == RIO_FAIL) {
+                                       if (RIOParam(PortP, RIOC_OPEN, ((PortP->Cor2Copy & (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) == (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) ? 1 : 0, DONT_SLEEP) == RIO_FAIL)
                                                continue;       /* with next port */
-                                       }
                                        rio_spin_lock(&PortP->portSem);
                                        PortP->MagicFlags &= ~MAGIC_REBOOT;
                                }
@@ -429,7 +428,7 @@ void RIOServiceHost(struct rio_info *p, struct Host *HostP)
                                         */
                                        PktCmdP = (struct PktCmd __iomem *) &PacketP->data[0];
 
-                                       writeb(WFLUSH, &PktCmdP->Command);
+                                       writeb(RIOC_WFLUSH, &PktCmdP->Command);
 
                                        p = PortP->HostPort % (u16) PORTS_PER_RTA;
 
index da276ed57b3f5b8ade87dc12f3188bd50404978c..4810b845cc21074f49fc72aba3c0e7aa5637bee6 100644 (file)
@@ -177,7 +177,7 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
        }
        rio_spin_lock_irqsave(&PortP->portSem, flags);
 
-       if (cmd == OPEN) {
+       if (cmd == RIOC_OPEN) {
                /*
                 ** If the port is set to store or lock the parameters, and it is
                 ** paramed with OPEN, we want to restore the saved port termio, but
@@ -241,50 +241,50 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
        case CS5:
                {
                        rio_dprintk(RIO_DEBUG_PARAM, "5 bit data\n");
-                       Cor1 |= COR1_5BITS;
+                       Cor1 |= RIOC_COR1_5BITS;
                        break;
                }
        case CS6:
                {
                        rio_dprintk(RIO_DEBUG_PARAM, "6 bit data\n");
-                       Cor1 |= COR1_6BITS;
+                       Cor1 |= RIOC_COR1_6BITS;
                        break;
                }
        case CS7:
                {
                        rio_dprintk(RIO_DEBUG_PARAM, "7 bit data\n");
-                       Cor1 |= COR1_7BITS;
+                       Cor1 |= RIOC_COR1_7BITS;
                        break;
                }
        case CS8:
                {
                        rio_dprintk(RIO_DEBUG_PARAM, "8 bit data\n");
-                       Cor1 |= COR1_8BITS;
+                       Cor1 |= RIOC_COR1_8BITS;
                        break;
                }
        }
 
        if (TtyP->termios->c_cflag & CSTOPB) {
                rio_dprintk(RIO_DEBUG_PARAM, "2 stop bits\n");
-               Cor1 |= COR1_2STOP;
+               Cor1 |= RIOC_COR1_2STOP;
        } else {
                rio_dprintk(RIO_DEBUG_PARAM, "1 stop bit\n");
-               Cor1 |= COR1_1STOP;
+               Cor1 |= RIOC_COR1_1STOP;
        }
 
        if (TtyP->termios->c_cflag & PARENB) {
                rio_dprintk(RIO_DEBUG_PARAM, "Enable parity\n");
-               Cor1 |= COR1_NORMAL;
+               Cor1 |= RIOC_COR1_NORMAL;
        } else {
                rio_dprintk(RIO_DEBUG_PARAM, "Disable parity\n");
-               Cor1 |= COR1_NOP;
+               Cor1 |= RIOC_COR1_NOP;
        }
        if (TtyP->termios->c_cflag & PARODD) {
                rio_dprintk(RIO_DEBUG_PARAM, "Odd parity\n");
-               Cor1 |= COR1_ODD;
+               Cor1 |= RIOC_COR1_ODD;
        } else {
                rio_dprintk(RIO_DEBUG_PARAM, "Even parity\n");
-               Cor1 |= COR1_EVEN;
+               Cor1 |= RIOC_COR1_EVEN;
        }
 
        /*
@@ -292,11 +292,11 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
         */
        if (TtyP->termios->c_iflag & IXON) {
                rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop output control\n");
-               Cor2 |= COR2_IXON;
+               Cor2 |= RIOC_COR2_IXON;
        } else {
                if (PortP->Config & RIO_IXON) {
                        rio_dprintk(RIO_DEBUG_PARAM, "Force enable start/stop output control\n");
-                       Cor2 |= COR2_IXON;
+                       Cor2 |= RIOC_COR2_IXON;
                } else
                        rio_dprintk(RIO_DEBUG_PARAM, "IXON has been disabled.\n");
        }
@@ -304,29 +304,29 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
        if (TtyP->termios->c_iflag & IXANY) {
                if (PortP->Config & RIO_IXANY) {
                        rio_dprintk(RIO_DEBUG_PARAM, "Enable any key to restart output\n");
-                       Cor2 |= COR2_IXANY;
+                       Cor2 |= RIOC_COR2_IXANY;
                } else
                        rio_dprintk(RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n");
        }
 
        if (TtyP->termios->c_iflag & IXOFF) {
                rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop input control 2\n");
-               Cor2 |= COR2_IXOFF;
+               Cor2 |= RIOC_COR2_IXOFF;
        }
 
        if (TtyP->termios->c_cflag & HUPCL) {
                rio_dprintk(RIO_DEBUG_PARAM, "Hangup on last close\n");
-               Cor2 |= COR2_HUPCL;
+               Cor2 |= RIOC_COR2_HUPCL;
        }
 
        if (C_CRTSCTS(TtyP)) {
                rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n");
-               Cor2 |= COR2_CTSFLOW;
-               Cor2 |= COR2_RTSFLOW;
+               Cor2 |= RIOC_COR2_CTSFLOW;
+               Cor2 |= RIOC_COR2_RTSFLOW;
        } else {
                rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n");
-               Cor2 &= ~COR2_CTSFLOW;
-               Cor2 &= ~COR2_RTSFLOW;
+               Cor2 &= ~RIOC_COR2_CTSFLOW;
+               Cor2 &= ~RIOC_COR2_RTSFLOW;
        }
 
 
@@ -341,36 +341,36 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
         */
        if (TtyP->termios->c_iflag & IGNBRK) {
                rio_dprintk(RIO_DEBUG_PARAM, "Ignore break condition\n");
-               Cor4 |= COR4_IGNBRK;
+               Cor4 |= RIOC_COR4_IGNBRK;
        }
        if (!(TtyP->termios->c_iflag & BRKINT)) {
                rio_dprintk(RIO_DEBUG_PARAM, "Break generates NULL condition\n");
-               Cor4 |= COR4_NBRKINT;
+               Cor4 |= RIOC_COR4_NBRKINT;
        } else {
                rio_dprintk(RIO_DEBUG_PARAM, "Interrupt on      break condition\n");
        }
 
        if (TtyP->termios->c_iflag & INLCR) {
                rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage return on input\n");
-               Cor4 |= COR4_INLCR;
+               Cor4 |= RIOC_COR4_INLCR;
        }
 
        if (TtyP->termios->c_iflag & IGNCR) {
                rio_dprintk(RIO_DEBUG_PARAM, "Ignore carriage return on input\n");
-               Cor4 |= COR4_IGNCR;
+               Cor4 |= RIOC_COR4_IGNCR;
        }
 
        if (TtyP->termios->c_iflag & ICRNL) {
                rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on input\n");
-               Cor4 |= COR4_ICRNL;
+               Cor4 |= RIOC_COR4_ICRNL;
        }
        if (TtyP->termios->c_iflag & IGNPAR) {
                rio_dprintk(RIO_DEBUG_PARAM, "Ignore characters with parity errors\n");
-               Cor4 |= COR4_IGNPAR;
+               Cor4 |= RIOC_COR4_IGNPAR;
        }
        if (TtyP->termios->c_iflag & PARMRK) {
                rio_dprintk(RIO_DEBUG_PARAM, "Mark parity errors\n");
-               Cor4 |= COR4_PARMRK;
+               Cor4 |= RIOC_COR4_PARMRK;
        }
 
        /*
@@ -378,22 +378,22 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
         ** on reception of a config packet.
         ** The download code handles the zero baud condition.
         */
-       Cor4 |= COR4_RAISEMOD;
+       Cor4 |= RIOC_COR4_RAISEMOD;
 
        /*
         ** COR 5
         */
 
-       Cor5 = COR5_CMOE;
+       Cor5 = RIOC_COR5_CMOE;
 
        /*
         ** Set to monitor tbusy/tstop (or not).
         */
 
        if (PortP->MonitorTstate)
-               Cor5 |= COR5_TSTATE_ON;
+               Cor5 |= RIOC_COR5_TSTATE_ON;
        else
-               Cor5 |= COR5_TSTATE_OFF;
+               Cor5 |= RIOC_COR5_TSTATE_OFF;
 
        /*
         ** Could set LNE here if you wanted LNext processing. SVR4 will use it.
@@ -401,24 +401,24 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag)
        if (TtyP->termios->c_iflag & ISTRIP) {
                rio_dprintk(RIO_DEBUG_PARAM, "Strip input characters\n");
                if (!(PortP->State & RIO_TRIAD_MODE)) {
-                       Cor5 |= COR5_ISTRIP;
+                       Cor5 |= RIOC_COR5_ISTRIP;
                }
        }
 
        if (TtyP->termios->c_oflag & ONLCR) {
                rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n");
                if (PortP->CookMode == COOK_MEDIUM)
-                       Cor5 |= COR5_ONLCR;
+                       Cor5 |= RIOC_COR5_ONLCR;
        }
        if (TtyP->termios->c_oflag & OCRNL) {
                rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on output\n");
                if (PortP->CookMode == COOK_MEDIUM)
-                       Cor5 |= COR5_OCRNL;
+                       Cor5 |= RIOC_COR5_OCRNL;
        }
        if ((TtyP->termios->c_oflag & TABDLY) == TAB3) {
                rio_dprintk(RIO_DEBUG_PARAM, "Tab delay 3 set\n");
                if (PortP->CookMode == COOK_MEDIUM)
-                       Cor5 |= COR5_TAB3;
+                       Cor5 |= RIOC_COR5_TAB3;
        }
 
        /*
index 85091ff74d968452dd5fc91e3851be346357f04d..7a9df7dcf9a88d54677d798d04b34972624f0864 100644 (file)
@@ -526,7 +526,7 @@ void RIOFixPhbs(struct rio_info *p, struct Host *HostP, unsigned int unit)
                         ** If RTA is not powered on, the tx packets will be
                         ** unset, so go no further.
                         */
-                       if (PortP->TxStart == 0) {
+                       if (!PortP->TxStart) {
                                rio_dprintk(RIO_DEBUG_ROUTE, "Tx pkts not set up yet\n");
                                rio_spin_unlock_irqrestore(&PortP->portSem, flags);
                                break;
index 1cb8580a161dc72b1403b13fff81e862748ab239..c99354843be100844690f6c9aafa987aaf2ee0b3 100644 (file)
@@ -211,7 +211,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
                rio_dprintk(RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n");
                if (repeat_this-- <= 0) {
                        rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
-                       RIOPreemptiveCmd(p, PortP, FCLOSE);
+                       RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
                        retval = -EINTR;
                        goto bombout;
                }
@@ -264,7 +264,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
                   here. If I read the docs correctly the "open"
                   command piggybacks the parameters immediately.
                   -- REW */
-               RIOParam(PortP, OPEN, 1, OK_TO_SLEEP);  /* Open the port */
+               RIOParam(PortP, RIOC_OPEN, 1, OK_TO_SLEEP); /* Open the port */
                rio_spin_lock_irqsave(&PortP->portSem, flags);
 
                /*
@@ -275,7 +275,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
                        rio_spin_unlock_irqrestore(&PortP->portSem, flags);
                        if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
                                rio_dprintk(RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n");
-                               RIOPreemptiveCmd(p, PortP, FCLOSE);
+                               RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
                                func_exit();
                                return -EINTR;
                        }
@@ -297,7 +297,8 @@ int riotopen(struct tty_struct *tty, struct file *filp)
         ** insert test for carrier here. -- ???
         ** I already see that test here. What's the deal? -- REW
         */
-       if ((PortP->gs.tty->termios->c_cflag & CLOCAL) || (PortP->ModemState & MSVR1_CD)) {
+       if ((PortP->gs.tty->termios->c_cflag & CLOCAL) ||
+                       (PortP->ModemState & RIOC_MSVR1_CD)) {
                rio_dprintk(RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort);
                /*
                   tp->tm.c_state |= CARR_ON;
@@ -325,7 +326,7 @@ int riotopen(struct tty_struct *tty, struct file *filp)
                                 ** I think it's OK. -- REW
                                 */
                                rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n", SysPort);
-                               RIOPreemptiveCmd(p, PortP, FCLOSE);
+                               RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
                                /*
                                   tp->tm.c_state &= ~WOPEN;
                                 */
@@ -416,7 +417,7 @@ int riotclose(void *ptr)
         */
        PortP->State &= ~RIO_MOPEN;
        PortP->State &= ~RIO_CARR_ON;
-       PortP->ModemState &= ~MSVR1_CD;
+       PortP->ModemState &= ~RIOC_MSVR1_CD;
        /*
         ** If the device was open as both a Modem and a tty line
         ** then we need to wimp out here, as the port has not really
@@ -453,7 +454,7 @@ int riotclose(void *ptr)
                        if (repeat_this-- <= 0) {
                                rv = -EINTR;
                                rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n");
-                               RIOPreemptiveCmd(p, PortP, FCLOSE);
+                               RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
                                goto close_end;
                        }
                        rio_dprintk(RIO_DEBUG_TTY, "Calling timeout to flush in closing\n");
@@ -492,8 +493,8 @@ int riotclose(void *ptr)
        /* Can't call RIOShortCommand with the port locked. */
        rio_spin_unlock_irqrestore(&PortP->portSem, flags);
 
-       if (RIOShortCommand(p, PortP, CLOSE, 1, 0) == RIO_FAIL) {
-               RIOPreemptiveCmd(p, PortP, FCLOSE);
+       if (RIOShortCommand(p, PortP, RIOC_CLOSE, 1, 0) == RIO_FAIL) {
+               RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
                rio_spin_lock_irqsave(&PortP->portSem, flags);
                goto close_end;
        }
@@ -503,7 +504,7 @@ int riotclose(void *ptr)
                        try--;
                        if (time_after(jiffies, end_time)) {
                                rio_dprintk(RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n");
-                               RIOPreemptiveCmd(p, PortP, FCLOSE);
+                               RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
                                break;
                        }
                        rio_dprintk(RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n", PortP->PortState & PORT_ISOPEN);
@@ -515,14 +516,14 @@ int riotclose(void *ptr)
                        }
                        if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) {
                                rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n");
-                               RIOPreemptiveCmd(p, PortP, FCLOSE);
+                               RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE);
                                break;
                        }
                }
        rio_spin_lock_irqsave(&PortP->portSem, flags);
        rio_dprintk(RIO_DEBUG_TTY, "Close: try was %d on completion\n", try);
 
-       /* RIOPreemptiveCmd(p, PortP, FCLOSE); */
+       /* RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); */
 
 /*
 ** 15.10.1998 ARG - ESIL 0761 part fix
index 3f9d0a9ac36d5988408f29a133e5a9c8fa1763ae..f073c710ab8d958a2fb695f43ad279a12929da06 100644 (file)
@@ -4,9 +4,9 @@
  *      Copyright (C) 1994-1996  Dmitry Gorodchanin (pgmdsg@ibi.com)
  *
  *      This code is loosely based on the Linux serial driver, written by
- *      Linus Torvalds, Theodore T'so and others. The RISCom/8 card 
- *      programming info was obtained from various drivers for other OSes 
- *     (FreeBSD, ISC, etc), but no source code from those drivers were 
+ *      Linus Torvalds, Theodore T'so and others. The RISCom/8 card
+ *      programming info was obtained from various drivers for other OSes
+ *     (FreeBSD, ISC, etc), but no source code from those drivers were
  *     directly included in this driver.
  *
  *
@@ -33,7 +33,7 @@
 
 #include <linux/module.h>
 
-#include <asm/io.h>
+#include <linux/io.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/ioport.h>
@@ -49,7 +49,7 @@
 #include <linux/tty_flip.h>
 #include <linux/spinlock.h>
 
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 #include "riscom8.h"
 #include "riscom8_reg.h"
 /* Am I paranoid or not ? ;-) */
 #define RISCOM_PARANOIA_CHECK
 
-/* 
- * Crazy InteliCom/8 boards sometimes has swapped CTS & DSR signals.
+/*
+ * Crazy InteliCom/8 boards sometimes have swapped CTS & DSR signals.
  * You can slightly speed up things by #undefing the following option,
- * if you are REALLY sure that your board is correct one. 
+ * if you are REALLY sure that your board is correct one.
  */
 
 #define RISCOM_BRAIN_DAMAGED_CTS
 
-/* 
+/*
  * The following defines are mostly for testing purposes. But if you need
  * some nice reporting in your syslog, you can define them also.
  */
@@ -112,7 +112,7 @@ static unsigned short rc_ioport[] =  {
 #define RC_NIOPORT     ARRAY_SIZE(rc_ioport)
 
 
-static inline int rc_paranoia_check(struct riscom_port const * port,
+static int rc_paranoia_check(struct riscom_port const *port,
                                    char *name, const char *routine)
 {
 #ifdef RISCOM_PARANOIA_CHECK
@@ -134,52 +134,53 @@ static inline int rc_paranoia_check(struct riscom_port const * port,
 }
 
 /*
- * 
+ *
  *  Service functions for RISCom/8 driver.
- * 
+ *
  */
 
 /* Get board number from pointer */
-static inline int board_No (struct riscom_board const * bp)
+static inline int board_No(struct riscom_board const *bp)
 {
        return bp - rc_board;
 }
 
 /* Get port number from pointer */
-static inline int port_No (struct riscom_port const * port)
+static inline int port_No(struct riscom_port const *port)
 {
-       return RC_PORT(port - rc_port); 
+       return RC_PORT(port - rc_port);
 }
 
 /* Get pointer to board from pointer to port */
-static inline struct riscom_board * port_Board(struct riscom_port const * port)
+static inline struct riscom_board *port_Board(struct riscom_port const *port)
 {
        return &rc_board[RC_BOARD(port - rc_port)];
 }
 
 /* Input Byte from CL CD180 register */
-static inline unsigned char rc_in(struct riscom_board const * bp, unsigned short reg)
+static inline unsigned char rc_in(struct riscom_board const *bp,
+                                                       unsigned short reg)
 {
        return inb(bp->base + RC_TO_ISA(reg));
 }
 
 /* Output Byte to CL CD180 register */
-static inline void rc_out(struct riscom_board const * bp, unsigned short reg,
+static inline void rc_out(struct riscom_board const *bp, unsigned short reg,
                          unsigned char val)
 {
        outb(val, bp->base + RC_TO_ISA(reg));
 }
 
 /* Wait for Channel Command Register ready */
-static inline void rc_wait_CCR(struct riscom_board const * bp)
+static void rc_wait_CCR(struct riscom_board const *bp)
 {
        unsigned long delay;
 
        /* FIXME: need something more descriptive then 100000 :) */
-       for (delay = 100000; delay; delay--) 
+       for (delay = 100000; delay; delay--)
                if (!rc_in(bp, CD180_CCR))
                        return;
-       
+
        printk(KERN_INFO "rc%d: Timeout waiting for CCR.\n", board_No(bp));
 }
 
@@ -187,11 +188,11 @@ static inline void rc_wait_CCR(struct riscom_board const * bp)
  *  RISCom/8 probe functions.
  */
 
-static inline int rc_request_io_range(struct riscom_board * const bp)
+static int rc_request_io_range(struct riscom_board * const bp)
 {
        int i;
-       
-       for (i = 0; i < RC_NIOPORT; i++)  
+
+       for (i = 0; i < RC_NIOPORT; i++)
                if (!request_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1,
                                   "RISCom/8"))  {
                        goto out_release;
@@ -200,42 +201,42 @@ static inline int rc_request_io_range(struct riscom_board * const bp)
 out_release:
        printk(KERN_INFO "rc%d: Skipping probe at 0x%03x. IO address in use.\n",
                         board_No(bp), bp->base);
-       while(--i >= 0)
+       while (--i >= 0)
                release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
        return 1;
 }
 
-static inline void rc_release_io_range(struct riscom_board * const bp)
+static void rc_release_io_range(struct riscom_board * const bp)
 {
        int i;
-       
-       for (i = 0; i < RC_NIOPORT; i++)  
+
+       for (i = 0; i < RC_NIOPORT; i++)
                release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1);
 }
-       
+
 /* Reset and setup CD180 chip */
-static void __init rc_init_CD180(struct riscom_board const * bp)
+static void __init rc_init_CD180(struct riscom_board const *bp)
 {
        unsigned long flags;
-       
+
        spin_lock_irqsave(&riscom_lock, flags);
 
-       rc_out(bp, RC_CTOUT, 0);                   /* Clear timeout             */
-       rc_wait_CCR(bp);                           /* Wait for CCR ready        */
-       rc_out(bp, CD180_CCR, CCR_HARDRESET);      /* Reset CD180 chip          */
+       rc_out(bp, RC_CTOUT, 0);                   /* Clear timeout        */
+       rc_wait_CCR(bp);                           /* Wait for CCR ready   */
+       rc_out(bp, CD180_CCR, CCR_HARDRESET);      /* Reset CD180 chip     */
        spin_unlock_irqrestore(&riscom_lock, flags);
-       msleep(50);                                /* Delay 0.05 sec            */
+       msleep(50);                                /* Delay 0.05 sec       */
        spin_lock_irqsave(&riscom_lock, flags);
-       rc_out(bp, CD180_GIVR, RC_ID);             /* Set ID for this chip      */
-       rc_out(bp, CD180_GICR, 0);                 /* Clear all bits            */
-       rc_out(bp, CD180_PILR1, RC_ACK_MINT);      /* Prio for modem intr       */
-       rc_out(bp, CD180_PILR2, RC_ACK_TINT);      /* Prio for transmitter intr */
-       rc_out(bp, CD180_PILR3, RC_ACK_RINT);      /* Prio for receiver intr    */
-       
+       rc_out(bp, CD180_GIVR, RC_ID);             /* Set ID for this chip */
+       rc_out(bp, CD180_GICR, 0);                 /* Clear all bits       */
+       rc_out(bp, CD180_PILR1, RC_ACK_MINT);      /* Prio for modem intr  */
+       rc_out(bp, CD180_PILR2, RC_ACK_TINT);      /* Prio for tx intr     */
+       rc_out(bp, CD180_PILR3, RC_ACK_RINT);      /* Prio for rx intr     */
+
        /* Setting up prescaler. We need 4 ticks per 1 ms */
        rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8);
        rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff);
-       
+
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
@@ -245,12 +246,12 @@ static int __init rc_probe(struct riscom_board *bp)
        unsigned char val1, val2;
        int irqs = 0;
        int retries;
-       
+
        bp->irq = 0;
 
        if (rc_request_io_range(bp))
                return 1;
-       
+
        /* Are the I/O ports here ? */
        rc_out(bp, CD180_PPRL, 0x5a);
        outb(0xff, 0x80);
@@ -258,34 +259,34 @@ static int __init rc_probe(struct riscom_board *bp)
        rc_out(bp, CD180_PPRL, 0xa5);
        outb(0x00, 0x80);
        val2 = rc_in(bp, CD180_PPRL);
-       
+
        if ((val1 != 0x5a) || (val2 != 0xa5))  {
                printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not found.\n",
                       board_No(bp), bp->base);
                goto out_release;
        }
-       
+
        /* It's time to find IRQ for this board */
-       for (retries = 0; retries < 5 && irqs <= 0; retries++)  {
+       for (retries = 0; retries < 5 && irqs <= 0; retries++) {
                irqs = probe_irq_on();
-               rc_init_CD180(bp);                      /* Reset CD180 chip       */
-               rc_out(bp, CD180_CAR, 2);               /* Select port 2          */
+               rc_init_CD180(bp);               /* Reset CD180 chip         */
+               rc_out(bp, CD180_CAR, 2);        /* Select port 2            */
                rc_wait_CCR(bp);
-               rc_out(bp, CD180_CCR, CCR_TXEN);        /* Enable transmitter     */
-               rc_out(bp, CD180_IER, IER_TXRDY);       /* Enable tx empty intr   */
+               rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter       */
+               rc_out(bp, CD180_IER, IER_TXRDY);/* Enable tx empty intr     */
                msleep(50);
                irqs = probe_irq_off(irqs);
-               val1 = rc_in(bp, RC_BSR);               /* Get Board Status reg   */
-               val2 = rc_in(bp, RC_ACK_TINT);          /* ACK interrupt          */
-               rc_init_CD180(bp);                      /* Reset CD180 again      */
-       
+               val1 = rc_in(bp, RC_BSR);       /* Get Board Status reg      */
+               val2 = rc_in(bp, RC_ACK_TINT);  /* ACK interrupt             */
+               rc_init_CD180(bp);              /* Reset CD180 again         */
+
                if ((val1 & RC_BSR_TINT) || (val2 != (RC_ID | GIVR_IT_TX)))  {
                        printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not "
                                        "found.\n", board_No(bp), bp->base);
                        goto out_release;
                }
        }
-       
+
        if (irqs <= 0)  {
                printk(KERN_ERR "rc%d: Can't find IRQ for RISCom/8 board "
                                "at 0x%03x.\n", board_No(bp), bp->base);
@@ -293,113 +294,112 @@ static int __init rc_probe(struct riscom_board *bp)
        }
        bp->irq = irqs;
        bp->flags |= RC_BOARD_PRESENT;
-       
+
        printk(KERN_INFO "rc%d: RISCom/8 Rev. %c board detected at "
                         "0x%03x, IRQ %d.\n",
               board_No(bp),
               (rc_in(bp, CD180_GFRCR) & 0x0f) + 'A',   /* Board revision */
               bp->base, bp->irq);
-       
+
        return 0;
 out_release:
        rc_release_io_range(bp);
        return 1;
 }
 
-/* 
- * 
+/*
+ *
  *  Interrupt processing routines.
- * 
+ *
  */
 
-static inline struct riscom_port * rc_get_port(struct riscom_board const * bp,
-                                              unsigned char const * what)
+static struct riscom_port *rc_get_port(struct riscom_board const *bp,
+                                              unsigned char const *what)
 {
        unsigned char channel;
-       struct riscom_port * port;
-       
+       struct riscom_port *port;
+
        channel = rc_in(bp, CD180_GICR) >> GICR_CHAN_OFF;
        if (channel < CD180_NCH)  {
                port = &rc_port[board_No(bp) * RC_NPORT + channel];
-               if (port->flags & ASYNC_INITIALIZED)  {
+               if (port->flags & ASYNC_INITIALIZED)
                        return port;
-               }
        }
-       printk(KERN_ERR "rc%d: %s interrupt from invalid port %d\n", 
+       printk(KERN_ERR "rc%d: %s interrupt from invalid port %d\n",
               board_No(bp), what, channel);
        return NULL;
 }
 
-static inline void rc_receive_exc(struct riscom_board const * bp)
+static void rc_receive_exc(struct riscom_board const *bp)
 {
        struct riscom_port *port;
        struct tty_struct *tty;
        unsigned char status;
        unsigned char ch, flag;
-       
-       if (!(port = rc_get_port(bp, "Receive")))
+
+       port = rc_get_port(bp, "Receive");
+       if (port == NULL)
                return;
 
        tty = port->tty;
-       
-#ifdef RC_REPORT_OVERRUN       
+
+#ifdef RC_REPORT_OVERRUN
        status = rc_in(bp, CD180_RCSR);
        if (status & RCSR_OE)
                port->overrun++;
        status &= port->mark_mask;
-#else  
+#else
        status = rc_in(bp, CD180_RCSR) & port->mark_mask;
-#endif 
+#endif
        ch = rc_in(bp, CD180_RDR);
-       if (!status)  {
+       if (!status)
                return;
-       }
        if (status & RCSR_TOUT)  {
                printk(KERN_WARNING "rc%d: port %d: Receiver timeout. "
-                                   "Hardware problems ?\n", 
+                                   "Hardware problems ?\n",
                       board_No(bp), port_No(port));
                return;
-               
+
        } else if (status & RCSR_BREAK)  {
                printk(KERN_INFO "rc%d: port %d: Handling break...\n",
                       board_No(bp), port_No(port));
                flag = TTY_BREAK;
                if (port->flags & ASYNC_SAK)
                        do_SAK(tty);
-               
-       } else if (status & RCSR_PE) 
+
+       } else if (status & RCSR_PE)
                flag = TTY_PARITY;
-       
-       else if (status & RCSR_FE) 
+
+       else if (status & RCSR_FE)
                flag = TTY_FRAME;
-       
-        else if (status & RCSR_OE)
+
+       else if (status & RCSR_OE)
                flag = TTY_OVERRUN;
-       
        else
                flag = TTY_NORMAL;
-       
+
        tty_insert_flip_char(tty, ch, flag);
        tty_flip_buffer_push(tty);
 }
 
-static inline void rc_receive(struct riscom_board const * bp)
+static void rc_receive(struct riscom_board const *bp)
 {
        struct riscom_port *port;
        struct tty_struct *tty;
        unsigned char count;
-       
-       if (!(port = rc_get_port(bp, "Receive")))
+
+       port = rc_get_port(bp, "Receive");
+       if (port == NULL)
                return;
-       
+
        tty = port->tty;
-       
+
        count = rc_in(bp, CD180_RDCR);
-       
+
 #ifdef RC_REPORT_FIFO
        port->hits[count > 8 ? 9 : count]++;
-#endif 
-       
+#endif
+
        while (count--)  {
                if (tty_buffer_request_room(tty, 1) == 0)  {
                        printk(KERN_WARNING "rc%d: port %d: Working around "
@@ -412,26 +412,26 @@ static inline void rc_receive(struct riscom_board const * bp)
        tty_flip_buffer_push(tty);
 }
 
-static inline void rc_transmit(struct riscom_board const * bp)
+static void rc_transmit(struct riscom_board const *bp)
 {
        struct riscom_port *port;
        struct tty_struct *tty;
        unsigned char count;
-       
-       
-       if (!(port = rc_get_port(bp, "Transmit")))
+
+       port = rc_get_port(bp, "Transmit");
+       if (port == NULL)
                return;
-       
+
        tty = port->tty;
-       
-       if (port->IER & IER_TXEMPTY)  {
+
+       if (port->IER & IER_TXEMPTY) {
                /* FIFO drained */
                rc_out(bp, CD180_CAR, port_No(port));
                port->IER &= ~IER_TXEMPTY;
                rc_out(bp, CD180_IER, port->IER);
                return;
        }
-       
+
        if ((port->xmit_cnt <= 0 && !port->break_length)
            || tty->stopped || tty->hw_stopped)  {
                rc_out(bp, CD180_CAR, port_No(port));
@@ -439,7 +439,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
                rc_out(bp, CD180_IER, port->IER);
                return;
        }
-       
+
        if (port->break_length)  {
                if (port->break_length > 0)  {
                        if (port->COR2 & COR2_ETC)  {
@@ -451,7 +451,8 @@ static inline void rc_transmit(struct riscom_board const * bp)
                        rc_out(bp, CD180_TDR, CD180_C_ESC);
                        rc_out(bp, CD180_TDR, CD180_C_DELAY);
                        rc_out(bp, CD180_TDR, count);
-                       if (!(port->break_length -= count))
+                       port->break_length -= count;
+                       if (port->break_length == 0)
                                port->break_length--;
                } else  {
                        rc_out(bp, CD180_TDR, CD180_C_ESC);
@@ -463,7 +464,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
                }
                return;
        }
-       
+
        count = CD180_NFIFO;
        do {
                rc_out(bp, CD180_TDR, port->xmit_buf[port->xmit_tail++]);
@@ -471,7 +472,7 @@ static inline void rc_transmit(struct riscom_board const * bp)
                if (--port->xmit_cnt <= 0)
                        break;
        } while (--count > 0);
-       
+
        if (port->xmit_cnt <= 0)  {
                rc_out(bp, CD180_CAR, port_No(port));
                port->IER &= ~IER_TXRDY;
@@ -481,25 +482,26 @@ static inline void rc_transmit(struct riscom_board const * bp)
                tty_wakeup(tty);
 }
 
-static inline void rc_check_modem(struct riscom_board const * bp)
+static void rc_check_modem(struct riscom_board const *bp)
 {
        struct riscom_port *port;
        struct tty_struct *tty;
        unsigned char mcr;
-       
-       if (!(port = rc_get_port(bp, "Modem")))
+
+       port = rc_get_port(bp, "Modem");
+       if (port == NULL)
                return;
-       
+
        tty = port->tty;
-       
+
        mcr = rc_in(bp, CD180_MCR);
-       if (mcr & MCR_CDCHG)  {
-               if (rc_in(bp, CD180_MSVR) & MSVR_CD) 
+       if (mcr & MCR_CDCHG) {
+               if (rc_in(bp, CD180_MSVR) & MSVR_CD)
                        wake_up_interruptible(&port->open_wait);
                else
                        tty_hangup(tty);
        }
-       
+
 #ifdef RISCOM_BRAIN_DAMAGED_CTS
        if (mcr & MCR_CTSCHG)  {
                if (rc_in(bp, CD180_MSVR) & MSVR_CTS)  {
@@ -526,13 +528,13 @@ static inline void rc_check_modem(struct riscom_board const * bp)
                rc_out(bp, CD180_IER, port->IER);
        }
 #endif /* RISCOM_BRAIN_DAMAGED_CTS */
-       
+
        /* Clear change bits */
        rc_out(bp, CD180_MCR, 0);
 }
 
 /* The main interrupt processing routine */
-static irqreturn_t rc_interrupt(int dummy, void * dev_id)
+static irqreturn_t rc_interrupt(int dummy, void *dev_id)
 {
        unsigned char status;
        unsigned char ack;
@@ -547,13 +549,11 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id)
                                 (RC_BSR_TOUT | RC_BSR_TINT |
                                  RC_BSR_MINT | RC_BSR_RINT))) {
                handled = 1;
-               if (status & RC_BSR_TOUT) 
+               if (status & RC_BSR_TOUT)
                        printk(KERN_WARNING "rc%d: Got timeout. Hardware "
                                            "error?\n", board_No(bp));
-               
                else if (status & RC_BSR_RINT) {
                        ack = rc_in(bp, RC_ACK_RINT);
-               
                        if (ack == (RC_ID | GIVR_IT_RCV))
                                rc_receive(bp);
                        else if (ack == (RC_ID | GIVR_IT_REXC))
@@ -562,29 +562,23 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id)
                                printk(KERN_WARNING "rc%d: Bad receive ack "
                                                    "0x%02x.\n",
                                       board_No(bp), ack);
-               
                } else if (status & RC_BSR_TINT) {
                        ack = rc_in(bp, RC_ACK_TINT);
-               
                        if (ack == (RC_ID | GIVR_IT_TX))
                                rc_transmit(bp);
                        else
                                printk(KERN_WARNING "rc%d: Bad transmit ack "
                                                    "0x%02x.\n",
                                       board_No(bp), ack);
-               
                } else /* if (status & RC_BSR_MINT) */ {
                        ack = rc_in(bp, RC_ACK_MINT);
-               
-                       if (ack == (RC_ID | GIVR_IT_MODEM)) 
+                       if (ack == (RC_ID | GIVR_IT_MODEM))
                                rc_check_modem(bp);
                        else
                                printk(KERN_WARNING "rc%d: Bad modem ack "
                                                    "0x%02x.\n",
                                       board_No(bp), ack);
-               
-               } 
-
+               }
                rc_out(bp, CD180_EOIR, 0);   /* Mark end of interrupt */
                rc_out(bp, RC_CTOUT, 0);     /* Clear timeout flag    */
        }
@@ -596,24 +590,24 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id)
  */
 
 /* Called with disabled interrupts */
-static int rc_setup_board(struct riscom_board * bp)
+static int rc_setup_board(struct riscom_board *bp)
 {
        int error;
 
-       if (bp->flags & RC_BOARD_ACTIVE) 
+       if (bp->flags & RC_BOARD_ACTIVE)
                return 0;
-       
+
        error = request_irq(bp->irq, rc_interrupt, IRQF_DISABLED,
                            "RISCom/8", bp);
-       if (error) 
+       if (error)
                return error;
-       
+
        rc_out(bp, RC_CTOUT, 0);                /* Just in case         */
        bp->DTR = ~0;
        rc_out(bp, RC_DTR, bp->DTR);            /* Drop DTR on all ports */
-       
+
        bp->flags |= RC_BOARD_ACTIVE;
-       
+
        return 0;
 }
 
@@ -622,40 +616,40 @@ static void rc_shutdown_board(struct riscom_board *bp)
 {
        if (!(bp->flags & RC_BOARD_ACTIVE))
                return;
-       
+
        bp->flags &= ~RC_BOARD_ACTIVE;
-       
+
        free_irq(bp->irq, NULL);
-       
+
        bp->DTR = ~0;
        rc_out(bp, RC_DTR, bp->DTR);           /* Drop DTR on all ports */
-       
+
 }
 
 /*
- * Setting up port characteristics. 
+ * Setting up port characteristics.
  * Must be called with disabled interrupts
  */
 static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
 {
-       struct tty_struct *tty;
+       struct tty_struct *tty = port->tty;
        unsigned long baud;
        long tmp;
        unsigned char cor1 = 0, cor3 = 0;
        unsigned char mcor1 = 0, mcor2 = 0;
-       
-       if (!(tty = port->tty) || !tty->termios)
+
+       if (tty == NULL || tty->termios == NULL)
                return;
 
        port->IER  = 0;
        port->COR2 = 0;
        port->MSVR = MSVR_RTS;
-       
+
        baud = tty_get_baud_rate(tty);
-       
+
        /* Select port on the board */
        rc_out(bp, CD180_CAR, port_No(port));
-       
+
        if (!baud)  {
                /* Drop DTR & exit */
                bp->DTR |= (1u << port_No(port));
@@ -666,69 +660,68 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
                bp->DTR &= ~(1u << port_No(port));
                rc_out(bp, RC_DTR, bp->DTR);
        }
-       
+
        /*
-        * Now we must calculate some speed depended things 
+        * Now we must calculate some speed depended things
         */
-       
+
        /* Set baud rate for port */
        tmp = (((RC_OSCFREQ + baud/2) / baud +
                CD180_TPC/2) / CD180_TPC);
 
-       rc_out(bp, CD180_RBPRH, (tmp >> 8) & 0xff); 
-       rc_out(bp, CD180_TBPRH, (tmp >> 8) & 0xff); 
-       rc_out(bp, CD180_RBPRL, tmp & 0xff); 
+       rc_out(bp, CD180_RBPRH, (tmp >> 8) & 0xff);
+       rc_out(bp, CD180_TBPRH, (tmp >> 8) & 0xff);
+       rc_out(bp, CD180_RBPRL, tmp & 0xff);
        rc_out(bp, CD180_TBPRL, tmp & 0xff);
-       
+
        baud = (baud + 5) / 10;   /* Estimated CPS */
-       
+
        /* Two timer ticks seems enough to wakeup something like SLIP driver */
-       tmp = ((baud + HZ/2) / HZ) * 2 - CD180_NFIFO;           
+       tmp = ((baud + HZ/2) / HZ) * 2 - CD180_NFIFO;
        port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ?
                                              SERIAL_XMIT_SIZE - 1 : tmp);
-       
+
        /* Receiver timeout will be transmission time for 1.5 chars */
        tmp = (RISCOM_TPS + RISCOM_TPS/2 + baud/2) / baud;
        tmp = (tmp > 0xff) ? 0xff : tmp;
        rc_out(bp, CD180_RTPR, tmp);
-       
-       switch (C_CSIZE(tty))  {
-        case CS5:
+
+       switch (C_CSIZE(tty)) {
+       case CS5:
                cor1 |= COR1_5BITS;
                break;
-        case CS6:
+       case CS6:
                cor1 |= COR1_6BITS;
                break;
-        case CS7:
+       case CS7:
                cor1 |= COR1_7BITS;
                break;
-        case CS8:
+       case CS8:
                cor1 |= COR1_8BITS;
                break;
        }
-       
-       if (C_CSTOPB(tty)) 
+       if (C_CSTOPB(tty))
                cor1 |= COR1_2SB;
-       
+
        cor1 |= COR1_IGNORE;
-       if (C_PARENB(tty))  {
+       if (C_PARENB(tty)) {
                cor1 |= COR1_NORMPAR;
-               if (C_PARODD(tty)) 
+               if (C_PARODD(tty))
                        cor1 |= COR1_ODDP;
-               if (I_INPCK(tty)) 
+               if (I_INPCK(tty))
                        cor1 &= ~COR1_IGNORE;
        }
        /* Set marking of some errors */
        port->mark_mask = RCSR_OE | RCSR_TOUT;
-       if (I_INPCK(tty)) 
+       if (I_INPCK(tty))
                port->mark_mask |= RCSR_FE | RCSR_PE;
-       if (I_BRKINT(tty) || I_PARMRK(tty)) 
+       if (I_BRKINT(tty) || I_PARMRK(tty))
                port->mark_mask |= RCSR_BREAK;
-       if (I_IGNPAR(tty)) 
+       if (I_IGNPAR(tty))
                port->mark_mask &= ~(RCSR_FE | RCSR_PE);
-       if (I_IGNBRK(tty))  {
+       if (I_IGNBRK(tty)) {
                port->mark_mask &= ~RCSR_BREAK;
-               if (I_IGNPAR(tty)) 
+               if (I_IGNPAR(tty))
                        /* Real raw mode. Ignore all */
                        port->mark_mask &= ~RCSR_OE;
        }
@@ -738,7 +731,8 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
                port->IER |= IER_DSR | IER_CTS;
                mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD;
                mcor2 |= MCOR2_DSROD | MCOR2_CTSOD;
-               tty->hw_stopped = !(rc_in(bp, CD180_MSVR) & (MSVR_CTS|MSVR_DSR));
+               tty->hw_stopped = !(rc_in(bp, CD180_MSVR) &
+                                               (MSVR_CTS|MSVR_DSR));
 #else
                port->COR2 |= COR2_CTSAE;
 #endif
@@ -761,13 +755,13 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
                mcor1 |= MCOR1_CDZD;
                mcor2 |= MCOR2_CDOD;
        }
-       
-       if (C_CREAD(tty)) 
+
+       if (C_CREAD(tty))
                /* Enable receiver */
                port->IER |= IER_RXD;
-       
+
        /* Set input FIFO size (1-8 bytes) */
-       cor3 |= RISCOM_RXFIFO; 
+       cor3 |= RISCOM_RXFIFO;
        /* Setting up CD180 channel registers */
        rc_out(bp, CD180_COR1, cor1);
        rc_out(bp, CD180_COR2, port->COR2);
@@ -791,36 +785,30 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port)
 static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
 {
        unsigned long flags;
-       
+
        if (port->flags & ASYNC_INITIALIZED)
                return 0;
-       
+
        if (!port->xmit_buf) {
                /* We may sleep in get_zeroed_page() */
-               unsigned long tmp;
-               
-               if (!(tmp = get_zeroed_page(GFP_KERNEL)))
+               unsigned long tmp = get_zeroed_page(GFP_KERNEL);
+               if (tmp == 0)
                        return -ENOMEM;
-                   
-               if (port->xmit_buf) {
+               if (port->xmit_buf)
                        free_page(tmp);
-                       return -ERESTARTSYS;
-               }
-               port->xmit_buf = (unsigned char *) tmp;
+               else
+                       port->xmit_buf = (unsigned char *) tmp;
        }
-
        spin_lock_irqsave(&riscom_lock, flags);
 
-       if (port->tty) 
+       if (port->tty)
                clear_bit(TTY_IO_ERROR, &port->tty->flags);
-               
-       if (port->count == 1) 
+       if (port->count == 1)
                bp->count++;
-               
        port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
        rc_change_speed(bp, port);
        port->flags |= ASYNC_INITIALIZED;
-               
+
        spin_unlock_irqrestore(&riscom_lock, flags);
        return 0;
 }
@@ -829,38 +817,39 @@ static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port)
 static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port)
 {
        struct tty_struct *tty;
-       
-       if (!(port->flags & ASYNC_INITIALIZED)) 
+
+       if (!(port->flags & ASYNC_INITIALIZED))
                return;
-       
+
 #ifdef RC_REPORT_OVERRUN
        printk(KERN_INFO "rc%d: port %d: Total %ld overruns were detected.\n",
               board_No(bp), port_No(port), port->overrun);
-#endif 
+#endif
 #ifdef RC_REPORT_FIFO
        {
                int i;
-               
+
                printk(KERN_INFO "rc%d: port %d: FIFO hits [ ",
                       board_No(bp), port_No(port));
-               for (i = 0; i < 10; i++)  {
+               for (i = 0; i < 10; i++)
                        printk("%ld ", port->hits[i]);
-               }
                printk("].\n");
        }
-#endif 
+#endif
        if (port->xmit_buf)  {
                free_page((unsigned long) port->xmit_buf);
                port->xmit_buf = NULL;
        }
 
-       if (!(tty = port->tty) || C_HUPCL(tty))  {
+       tty = port->tty;
+
+       if (tty == NULL || C_HUPCL(tty)) {
                /* Drop DTR */
                bp->DTR |= (1u << port_No(port));
                rc_out(bp, RC_DTR, bp->DTR);
        }
-       
-        /* Select port */
+
+       /* Select port */
        rc_out(bp, CD180_CAR, port_No(port));
        /* Reset port */
        rc_wait_CCR(bp);
@@ -868,28 +857,26 @@ static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port)
        /* Disable all interrupts from this port */
        port->IER = 0;
        rc_out(bp, CD180_IER, port->IER);
-       
-       if (tty)  
+
+       if (tty)
                set_bit(TTY_IO_ERROR, &tty->flags);
        port->flags &= ~ASYNC_INITIALIZED;
-       
+
        if (--bp->count < 0)  {
                printk(KERN_INFO "rc%d: rc_shutdown_port: "
                                 "bad board count: %d\n",
                       board_No(bp), bp->count);
                bp->count = 0;
        }
-       
        /*
         * If this is the last opened port on the board
         * shutdown whole board
         */
-       if (!bp->count) 
+       if (!bp->count)
                rc_shutdown_board(bp);
 }
 
-       
-static int block_til_ready(struct tty_struct *tty, struct file * filp,
+static int block_til_ready(struct tty_struct *tty, struct file *filp,
                           struct riscom_port *port)
 {
        DECLARE_WAITQUEUE(wait, current);
@@ -921,7 +908,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                return 0;
        }
 
-       if (C_CLOCAL(tty))  
+       if (C_CLOCAL(tty))
                do_clocal = 1;
 
        /*
@@ -959,7 +946,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
                        if (port->flags & ASYNC_HUP_NOTIFY)
                                retval = -EAGAIN;
                        else
-                               retval = -ERESTARTSYS;  
+                               retval = -ERESTARTSYS;
                        break;
                }
                if (!(port->flags & ASYNC_CLOSING) &&
@@ -978,50 +965,63 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
        port->blocked_open--;
        if (retval)
                return retval;
-       
+
        port->flags |= ASYNC_NORMAL_ACTIVE;
        return 0;
-}      
+}
 
-static int rc_open(struct tty_struct * tty, struct file * filp)
+static int rc_open(struct tty_struct *tty, struct file *filp)
 {
        int board;
        int error;
-       struct riscom_port * port;
-       struct riscom_board * bp;
-       
+       struct riscom_port *port;
+       struct riscom_board *bp;
+
        board = RC_BOARD(tty->index);
        if (board >= RC_NBOARD || !(rc_board[board].flags & RC_BOARD_PRESENT))
                return -ENODEV;
-       
+
        bp = &rc_board[board];
        port = rc_port + board * RC_NPORT + RC_PORT(tty->index);
        if (rc_paranoia_check(port, tty->name, "rc_open"))
                return -ENODEV;
-       
-       if ((error = rc_setup_board(bp))) 
+
+       error = rc_setup_board(bp);
+       if (error)
                return error;
-               
+
        port->count++;
        tty->driver_data = port;
        port->tty = tty;
-       
-       if ((error = rc_setup_port(bp, port))) 
-               return error;
-       
-       if ((error = block_til_ready(tty, filp, port)))
-               return error;
-       
-       return 0;
+
+       error = rc_setup_port(bp, port);
+       if (error == 0)
+               error = block_til_ready(tty, filp, port);
+       return error;
 }
 
-static void rc_close(struct tty_struct * tty, struct file * filp)
+static void rc_flush_buffer(struct tty_struct *tty)
+{
+       struct riscom_port *port = (struct riscom_port *)tty->driver_data;
+       unsigned long flags;
+
+       if (rc_paranoia_check(port, tty->name, "rc_flush_buffer"))
+               return;
+
+       spin_lock_irqsave(&riscom_lock, flags);
+       port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
+       spin_unlock_irqrestore(&riscom_lock, flags);
+
+       tty_wakeup(tty);
+}
+
+static void rc_close(struct tty_struct *tty, struct file *filp)
 {
        struct riscom_port *port = (struct riscom_port *) tty->driver_data;
        struct riscom_board *bp;
        unsigned long flags;
        unsigned long timeout;
-       
+
        if (!port || rc_paranoia_check(port, tty->name, "close"))
                return;
 
@@ -1029,7 +1029,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
 
        if (tty_hung_up_p(filp))
                goto out;
-       
+
        bp = port_Board(port);
        if ((tty->count == 1) && (port->count != 1))  {
                printk(KERN_INFO "rc%d: rc_close: bad port count;"
@@ -1047,7 +1047,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
                goto out;
        port->flags |= ASYNC_CLOSING;
        /*
-        * Now we wait for the transmit buffer to clear; and we notify 
+        * Now we wait for the transmit buffer to clear; and we notify
         * the line discipline to only process XON/XOFF characters.
         */
        tty->closing = 1;
@@ -1070,24 +1070,22 @@ static void rc_close(struct tty_struct * tty, struct file * filp)
                 * has completely drained; this is especially
                 * important if there is a transmit FIFO!
                 */
-               timeout = jiffies+HZ;
-               while(port->IER & IER_TXEMPTY)  {
+               timeout = jiffies + HZ;
+               while (port->IER & IER_TXEMPTY) {
                        msleep_interruptible(jiffies_to_msecs(port->timeout));
                        if (time_after(jiffies, timeout))
                                break;
                }
        }
        rc_shutdown_port(bp, port);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       rc_flush_buffer(tty);
        tty_ldisc_flush(tty);
 
        tty->closing = 0;
        port->tty = NULL;
        if (port->blocked_open) {
-               if (port->close_delay) {
+               if (port->close_delay)
                        msleep_interruptible(jiffies_to_msecs(port->close_delay));
-               }
                wake_up_interruptible(&port->open_wait);
        }
        port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING);
@@ -1097,17 +1095,17 @@ out:
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
-static int rc_write(struct tty_struct * tty, 
+static int rc_write(struct tty_struct *tty,
                    const unsigned char *buf, int count)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        struct riscom_board *bp;
        int c, total = 0;
        unsigned long flags;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_write"))
                return 0;
-       
+
        bp = port_Board(port);
 
        if (!tty || !port->xmit_buf)
@@ -1144,38 +1142,41 @@ static int rc_write(struct tty_struct * tty,
        return total;
 }
 
-static void rc_put_char(struct tty_struct * tty, unsigned char ch)
+static int rc_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        unsigned long flags;
+       int ret = 0;
 
        if (rc_paranoia_check(port, tty->name, "rc_put_char"))
-               return;
+               return 0;
 
        if (!tty || !port->xmit_buf)
-               return;
+               return 0;
 
        spin_lock_irqsave(&riscom_lock, flags);
-       
+
        if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1)
                goto out;
 
        port->xmit_buf[port->xmit_head++] = ch;
        port->xmit_head &= SERIAL_XMIT_SIZE - 1;
        port->xmit_cnt++;
+       ret = 1;
 
 out:
        spin_unlock_irqrestore(&riscom_lock, flags);
+       return ret;
 }
 
-static void rc_flush_chars(struct tty_struct * tty)
+static void rc_flush_chars(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        unsigned long flags;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_flush_chars"))
                return;
-       
+
        if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped ||
            !port->xmit_buf)
                return;
@@ -1189,11 +1190,11 @@ static void rc_flush_chars(struct tty_struct * tty)
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
-static int rc_write_room(struct tty_struct * tty)
+static int rc_write_room(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        int     ret;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_write_room"))
                return 0;
 
@@ -1206,39 +1207,22 @@ static int rc_write_room(struct tty_struct * tty)
 static int rc_chars_in_buffer(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer"))
                return 0;
-       
-       return port->xmit_cnt;
-}
-
-static void rc_flush_buffer(struct tty_struct *tty)
-{
-       struct riscom_port *port = (struct riscom_port *)tty->driver_data;
-       unsigned long flags;
-                               
-       if (rc_paranoia_check(port, tty->name, "rc_flush_buffer"))
-               return;
-
-       spin_lock_irqsave(&riscom_lock, flags);
-
-       port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
 
-       spin_unlock_irqrestore(&riscom_lock, flags);
-       
-       tty_wakeup(tty);
+       return port->xmit_cnt;
 }
 
 static int rc_tiocmget(struct tty_struct *tty, struct file *file)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
-       struct riscom_board * bp;
+       struct riscom_board *bp;
        unsigned char status;
        unsigned int result;
        unsigned long flags;
 
-       if (rc_paranoia_check(port, tty->name, __FUNCTION__))
+       if (rc_paranoia_check(port, tty->name, __func__))
                return -ENODEV;
 
        bp = port_Board(port);
@@ -1266,7 +1250,7 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
        unsigned long flags;
        struct riscom_board *bp;
 
-       if (rc_paranoia_check(port, tty->name, __FUNCTION__))
+       if (rc_paranoia_check(port, tty->name, __func__))
                return -ENODEV;
 
        bp = port_Board(port);
@@ -1292,11 +1276,11 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file,
        return 0;
 }
 
-static inline void rc_send_break(struct riscom_port * port, unsigned long length)
+static void rc_send_break(struct riscom_port *port, unsigned long length)
 {
        struct riscom_board *bp = port_Board(port);
        unsigned long flags;
-       
+
        spin_lock_irqsave(&riscom_lock, flags);
 
        port->break_length = RISCOM_TPS / HZ * length;
@@ -1312,17 +1296,17 @@ static inline void rc_send_break(struct riscom_port * port, unsigned long length
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
-static inline int rc_set_serial_info(struct riscom_port * port,
-                                    struct serial_struct __user * newinfo)
+static int rc_set_serial_info(struct riscom_port *port,
+                                    struct serial_struct __user *newinfo)
 {
        struct serial_struct tmp;
        struct riscom_board *bp = port_Board(port);
        int change_speed;
-       
+
        if (copy_from_user(&tmp, newinfo, sizeof(tmp)))
                return -EFAULT;
-       
-#if 0  
+
+#if 0
        if ((tmp.irq != bp->irq) ||
            (tmp.port != bp->base) ||
            (tmp.type != PORT_CIRRUS) ||
@@ -1331,16 +1315,16 @@ static inline int rc_set_serial_info(struct riscom_port * port,
            (tmp.xmit_fifo_size != CD180_NFIFO) ||
            (tmp.flags & ~RISCOM_LEGAL_FLAGS))
                return -EINVAL;
-#endif 
-       
+#endif
+
        change_speed = ((port->flags & ASYNC_SPD_MASK) !=
                        (tmp.flags & ASYNC_SPD_MASK));
-       
+
        if (!capable(CAP_SYS_ADMIN)) {
                if ((tmp.close_delay != port->close_delay) ||
                    (tmp.closing_wait != port->closing_wait) ||
                    ((tmp.flags & ~ASYNC_USR_MASK) !=
-                    (port->flags & ~ASYNC_USR_MASK)))  
+                    (port->flags & ~ASYNC_USR_MASK)))
                        return -EPERM;
                port->flags = ((port->flags & ~ASYNC_USR_MASK) |
                               (tmp.flags & ASYNC_USR_MASK));
@@ -1360,12 +1344,12 @@ static inline int rc_set_serial_info(struct riscom_port * port,
        return 0;
 }
 
-static inline int rc_get_serial_info(struct riscom_port * port,
+static int rc_get_serial_info(struct riscom_port *port,
                                     struct serial_struct __user *retinfo)
 {
        struct serial_struct tmp;
        struct riscom_board *bp = port_Board(port);
-       
+
        memset(&tmp, 0, sizeof(tmp));
        tmp.type = PORT_CIRRUS;
        tmp.line = port - rc_port;
@@ -1379,19 +1363,18 @@ static inline int rc_get_serial_info(struct riscom_port * port,
        return copy_to_user(retinfo, &tmp, sizeof(tmp)) ? -EFAULT : 0;
 }
 
-static int rc_ioctl(struct tty_struct * tty, struct file * filp, 
+static int rc_ioctl(struct tty_struct *tty, struct file *filp,
                    unsigned int cmd, unsigned long arg)
-                   
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        void __user *argp = (void __user *)arg;
-       int retval;
-                               
+       int retval = 0;
+
        if (rc_paranoia_check(port, tty->name, "rc_ioctl"))
                return -ENODEV;
-       
+
        switch (cmd) {
-        case TCSBRK:   /* SVID version: non-zero arg --> no break */
+       case TCSBRK:    /* SVID version: non-zero arg --> no break */
                retval = tty_check_change(tty);
                if (retval)
                        return retval;
@@ -1399,45 +1382,40 @@ static int rc_ioctl(struct tty_struct * tty, struct file * filp,
                if (!arg)
                        rc_send_break(port, HZ/4);      /* 1/4 second */
                break;
-        case TCSBRKP:  /* support for POSIX tcsendbreak() */
+       case TCSBRKP:   /* support for POSIX tcsendbreak() */
                retval = tty_check_change(tty);
                if (retval)
                        return retval;
                tty_wait_until_sent(tty, 0);
                rc_send_break(port, arg ? arg*(HZ/10) : HZ/4);
                break;
-        case TIOCGSOFTCAR:
-               return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned __user *)argp);
-        case TIOCSSOFTCAR:
-               if (get_user(arg,(unsigned __user *) argp))
-                       return -EFAULT;
-               tty->termios->c_cflag =
-                       ((tty->termios->c_cflag & ~CLOCAL) |
-                       (arg ? CLOCAL : 0));
+       case TIOCGSERIAL:
+               lock_kernel();
+               retval = rc_get_serial_info(port, argp);
+               unlock_kernel();
                break;
-        case TIOCGSERIAL:      
-               return rc_get_serial_info(port, argp);
-        case TIOCSSERIAL:      
-               return rc_set_serial_info(port, argp);
-        default:
-               return -ENOIOCTLCMD;
+       case TIOCSSERIAL:
+               lock_kernel();
+               retval = rc_set_serial_info(port, argp);
+               unlock_kernel();
+               break;
+       default:
+               retval = -ENOIOCTLCMD;
        }
-       return 0;
+       return retval;
 }
 
-static void rc_throttle(struct tty_struct * tty)
+static void rc_throttle(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        struct riscom_board *bp;
        unsigned long flags;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_throttle"))
                return;
-       
        bp = port_Board(port);
 
        spin_lock_irqsave(&riscom_lock, flags);
-
        port->MSVR &= ~MSVR_RTS;
        rc_out(bp, CD180_CAR, port_No(port));
        if (I_IXOFF(tty)) {
@@ -1446,23 +1424,20 @@ static void rc_throttle(struct tty_struct * tty)
                rc_wait_CCR(bp);
        }
        rc_out(bp, CD180_MSVR, port->MSVR);
-
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
-static void rc_unthrottle(struct tty_struct * tty)
+static void rc_unthrottle(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        struct riscom_board *bp;
        unsigned long flags;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_unthrottle"))
                return;
-       
        bp = port_Board(port);
-       
-       spin_lock_irqsave(&riscom_lock, flags);
 
+       spin_lock_irqsave(&riscom_lock, flags);
        port->MSVR |= MSVR_RTS;
        rc_out(bp, CD180_CAR, port_No(port));
        if (I_IXOFF(tty))  {
@@ -1471,62 +1446,58 @@ static void rc_unthrottle(struct tty_struct * tty)
                rc_wait_CCR(bp);
        }
        rc_out(bp, CD180_MSVR, port->MSVR);
-
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
-static void rc_stop(struct tty_struct * tty)
+static void rc_stop(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        struct riscom_board *bp;
        unsigned long flags;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_stop"))
                return;
-       
+
        bp = port_Board(port);
-       
-       spin_lock_irqsave(&riscom_lock, flags);
 
+       spin_lock_irqsave(&riscom_lock, flags);
        port->IER &= ~IER_TXRDY;
        rc_out(bp, CD180_CAR, port_No(port));
        rc_out(bp, CD180_IER, port->IER);
-
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
-static void rc_start(struct tty_struct * tty)
+static void rc_start(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        struct riscom_board *bp;
        unsigned long flags;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_start"))
                return;
-       
+
        bp = port_Board(port);
-       
+
        spin_lock_irqsave(&riscom_lock, flags);
 
-       if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY))  {
+       if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) {
                port->IER |= IER_TXRDY;
                rc_out(bp, CD180_CAR, port_No(port));
                rc_out(bp, CD180_IER, port->IER);
        }
-
        spin_unlock_irqrestore(&riscom_lock, flags);
 }
 
-static void rc_hangup(struct tty_struct * tty)
+static void rc_hangup(struct tty_struct *tty)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        struct riscom_board *bp;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_hangup"))
                return;
-       
+
        bp = port_Board(port);
-       
+
        rc_shutdown_port(bp, port);
        port->count = 0;
        port->flags &= ~ASYNC_NORMAL_ACTIVE;
@@ -1534,17 +1505,14 @@ static void rc_hangup(struct tty_struct * tty)
        wake_up_interruptible(&port->open_wait);
 }
 
-static void rc_set_termios(struct tty_struct * tty, struct ktermios * old_termios)
+static void rc_set_termios(struct tty_struct *tty,
+                                       struct ktermios *old_termios)
 {
        struct riscom_port *port = (struct riscom_port *)tty->driver_data;
        unsigned long flags;
-                               
+
        if (rc_paranoia_check(port, tty->name, "rc_set_termios"))
                return;
-       
-       if (tty->termios->c_cflag == old_termios->c_cflag &&
-           tty->termios->c_iflag == old_termios->c_iflag)
-               return;
 
        spin_lock_irqsave(&riscom_lock, flags);
        rc_change_speed(port_Board(port), port);
@@ -1583,9 +1551,9 @@ static int __init rc_init_drivers(void)
        int i;
 
        riscom_driver = alloc_tty_driver(RC_NBOARD * RC_NPORT);
-       if (!riscom_driver)     
+       if (!riscom_driver)
                return -ENOMEM;
-       
+
        riscom_driver->owner = THIS_MODULE;
        riscom_driver->name = "ttyL";
        riscom_driver->major = RISCOM8_NORMAL_MAJOR;
@@ -1598,23 +1566,21 @@ static int __init rc_init_drivers(void)
        riscom_driver->init_termios.c_ospeed = 9600;
        riscom_driver->flags = TTY_DRIVER_REAL_RAW;
        tty_set_operations(riscom_driver, &riscom_ops);
-       if ((error = tty_register_driver(riscom_driver)))  {
+       error = tty_register_driver(riscom_driver);
+       if (error != 0) {
                put_tty_driver(riscom_driver);
                printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, "
-                               "error = %d\n",
-                      error);
+                               "error = %d\n", error);
                return 1;
        }
-
        memset(rc_port, 0, sizeof(rc_port));
        for (i = 0; i < RC_NPORT * RC_NBOARD; i++)  {
                rc_port[i].magic = RISCOM8_MAGIC;
-               rc_port[i].close_delay = 50 * HZ/100;
-               rc_port[i].closing_wait = 3000 * HZ/100;
+               rc_port[i].close_delay = 50 * HZ / 100;
+               rc_port[i].closing_wait = 3000 * HZ / 100;
                init_waitqueue_head(&rc_port[i].open_wait);
                init_waitqueue_head(&rc_port[i].close_wait);
        }
-       
        return 0;
 }
 
@@ -1627,13 +1593,13 @@ static void rc_release_drivers(void)
 #ifndef MODULE
 /*
  * Called at boot time.
- * 
+ *
  * You can specify IO base for up to RC_NBOARD cards,
  * using line "riscom8=0xiobase1,0xiobase2,.." at LILO prompt.
  * Note that there will be no probing at default
  * addresses in this case.
  *
- */ 
+ */
 static int __init riscom8_setup(char *str)
 {
        int ints[RC_NBOARD];
@@ -1644,7 +1610,7 @@ static int __init riscom8_setup(char *str)
        for (i = 0; i < RC_NBOARD; i++) {
                if (i < ints[0])
                        rc_board[i].base = ints[i+1];
-               else 
+               else
                        rc_board[i].base = 0;
        }
        return 1;
@@ -1659,8 +1625,8 @@ static char banner[] __initdata =
 static char no_boards_msg[] __initdata =
        KERN_INFO "rc: No RISCom/8 boards detected.\n";
 
-/* 
- * This routine must be called by kernel at boot time 
+/*
+ * This routine must be called by kernel at boot time
  */
 static int __init riscom8_init(void)
 {
@@ -1669,13 +1635,12 @@ static int __init riscom8_init(void)
 
        printk(banner);
 
-       if (rc_init_drivers()) 
+       if (rc_init_drivers())
                return -EIO;
 
-       for (i = 0; i < RC_NBOARD; i++) 
-               if (rc_board[i].base && !rc_probe(&rc_board[i]))  
+       for (i = 0; i < RC_NBOARD; i++)
+               if (rc_board[i].base && !rc_probe(&rc_board[i]))
                        found++;
-       
        if (!found)  {
                rc_release_drivers();
                printk(no_boards_msg);
@@ -1702,13 +1667,13 @@ MODULE_LICENSE("GPL");
  * by specifying "iobase=0xXXX iobase1=0xXXX ..." as insmod parameter.
  *
  */
-static int __init riscom8_init_module (void)
+static int __init riscom8_init_module(void)
 {
 #ifdef MODULE
        int i;
 
        if (iobase || iobase1 || iobase2 || iobase3) {
-               for(i = 0; i < RC_NBOARD; i++)
+               for (i = 0; i < RC_NBOARD; i++)
                        rc_board[i].base = 0;
        }
 
@@ -1724,18 +1689,17 @@ static int __init riscom8_init_module (void)
 
        return riscom8_init();
 }
-       
-static void __exit riscom8_exit_module (void)
+
+static void __exit riscom8_exit_module(void)
 {
        int i;
-       
+
        rc_release_drivers();
-       for (i = 0; i < RC_NBOARD; i++)  
-               if (rc_board[i].flags & RC_BOARD_PRESENT) 
+       for (i = 0; i < RC_NBOARD; i++)
+               if (rc_board[i].flags & RC_BOARD_PRESENT)
                        rc_release_io_range(&rc_board[i]);
-       
+
 }
 
 module_init(riscom8_init_module);
 module_exit(riscom8_exit_module);
-
index f585bc8579e9929aba1e6f6d35b00991844e0f39..743dc80a9325bd806476fe9e91fb72d46297d27e 100644 (file)
@@ -449,7 +449,8 @@ static void rp_do_transmit(struct r_port *info)
        while (1) {
                if (tty->stopped || tty->hw_stopped)
                        break;
-               c = min(info->xmit_fifo_room, min(info->xmit_cnt, XMIT_BUF_SIZE - info->xmit_tail));
+               c = min(info->xmit_fifo_room, info->xmit_cnt);
+               c = min(c, XMIT_BUF_SIZE - info->xmit_tail);
                if (c <= 0 || info->xmit_fifo_room <= 0)
                        break;
                sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2);
@@ -1433,29 +1434,38 @@ static int rp_ioctl(struct tty_struct *tty, struct file *file,
 {
        struct r_port *info = (struct r_port *) tty->driver_data;
        void __user *argp = (void __user *)arg;
+       int ret = 0;
 
        if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl"))
                return -ENXIO;
 
+       lock_kernel();
+
        switch (cmd) {
        case RCKP_GET_STRUCT:
                if (copy_to_user(argp, info, sizeof (struct r_port)))
-                       return -EFAULT;
-               return 0;
+                       ret = -EFAULT;
+               break;
        case RCKP_GET_CONFIG:
-               return get_config(info, argp);
+               ret = get_config(info, argp);
+               break;
        case RCKP_SET_CONFIG:
-               return set_config(info, argp);
+               ret = set_config(info, argp);
+               break;
        case RCKP_GET_PORTS:
-               return get_ports(info, argp);
+               ret = get_ports(info, argp);
+               break;
        case RCKP_RESET_RM2:
-               return reset_rm2(info, argp);
+               ret = reset_rm2(info, argp);
+               break;
        case RCKP_GET_VERSION:
-               return get_version(info, argp);
+               ret = get_version(info, argp);
+               break;
        default:
-               return -ENOIOCTLCMD;
+               ret = -ENOIOCTLCMD;
        }
-       return 0;
+       unlock_kernel();
+       return ret;
 }
 
 static void rp_send_xchar(struct tty_struct *tty, char ch)
@@ -1575,6 +1585,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
               jiffies);
        printk(KERN_INFO "cps=%d...\n", info->cps);
 #endif
+       lock_kernel();
        while (1) {
                txcnt = sGetTxCnt(cp);
                if (!txcnt) {
@@ -1602,6 +1613,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout)
                        break;
        }
        __set_current_state(TASK_RUNNING);
+       unlock_kernel();
 #ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT
        printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies);
 #endif
@@ -1651,14 +1663,14 @@ static void rp_hangup(struct tty_struct *tty)
  *  writing routines will write directly to transmit FIFO.
  *  Write buffer and counters protected by spinlocks
  */
-static void rp_put_char(struct tty_struct *tty, unsigned char ch)
+static int rp_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct r_port *info = (struct r_port *) tty->driver_data;
        CHANNEL_t *cp;
        unsigned long flags;
 
        if (rocket_paranoia_check(info, "rp_put_char"))
-               return;
+               return 0;
 
        /*
         * Grab the port write mutex, locking out other processes that try to
@@ -1687,6 +1699,7 @@ static void rp_put_char(struct tty_struct *tty, unsigned char ch)
        }
        spin_unlock_irqrestore(&info->slock, flags);
        mutex_unlock(&info->write_mtx);
+       return 1;
 }
 
 /*
@@ -1749,10 +1762,10 @@ static int rp_write(struct tty_struct *tty,
 
        /*  Write remaining data into the port's xmit_buf */
        while (1) {
-               if (!info->tty) /*   Seemingly obligatory check... */
+               if (!info->tty)         /* Seemingly obligatory check... */
                        goto end;
-
-               c = min(count, min(XMIT_BUF_SIZE - info->xmit_cnt - 1, XMIT_BUF_SIZE - info->xmit_head));
+               c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1);
+               c = min(c, XMIT_BUF_SIZE - info->xmit_head);
                if (c <= 0)
                        break;
 
index b01d38125a8f1f575ec0cb5131e86636746bebd8..143cc432fdb2bec3cb0bbdb1be0188e3cc034765 100644 (file)
@@ -55,7 +55,7 @@ static inline void sOutW(unsigned short port, unsigned short value)
 
 static inline void out32(unsigned short port, Byte_t *p)
 {
-       u32 value = le32_to_cpu(get_unaligned((__le32 *)p));
+       u32 value = get_unaligned_le32(p);
 #ifdef ROCKET_DEBUG_IO
        printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value);
 #endif
index 5c3142b6f1fcdf102a92b79dec3b9d000b4d4a3b..5f80a9dff5735103b91cc16c40338aafa96f853a 100644 (file)
@@ -88,6 +88,7 @@
 
 #ifdef CONFIG_SPARC32
 #include <linux/pci.h>
+#include <linux/jiffies.h>
 #include <asm/ebus.h>
 
 static unsigned long rtc_port;
@@ -1068,10 +1069,8 @@ no_irq:
        }
 
 #ifdef CONFIG_PROC_FS
-       ent = create_proc_entry("driver/rtc", 0, NULL);
-       if (ent)
-               ent->proc_fops = &rtc_proc_fops;
-       else
+       ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops);
+       if (!ent)
                printk(KERN_WARNING "rtc: Failed to register with procfs.\n");
 #endif
 
@@ -1316,7 +1315,8 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
         * Once the read clears, read the RTC time (again via ioctl). Easy.
         */
 
-       while (rtc_is_updating() != 0 && jiffies - uip_watchdog < 2*HZ/100)
+       while (rtc_is_updating() != 0 &&
+              time_before(jiffies, uip_watchdog + 2*HZ/100))
                cpu_relax();
 
        /*
index df8cd0ca97ebcc5d8728ab36ffcdeabcd899c4f3..fd2db07a50fce9bf39e2bcaf0060bc7fc71f8a09 100644 (file)
@@ -1060,7 +1060,7 @@ static void config_setup(struct cyclades_port *info)
 
 }                              /* config_setup */
 
-static void cy_put_char(struct tty_struct *tty, unsigned char ch)
+static int cy_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct cyclades_port *info = (struct cyclades_port *)tty->driver_data;
        unsigned long flags;
@@ -1070,7 +1070,7 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch)
 #endif
 
        if (serial_paranoia_check(info, tty->name, "cy_put_char"))
-               return;
+               return 0;
 
        if (!info->xmit_buf)
                return;
@@ -1078,13 +1078,14 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch)
        local_irq_save(flags);
        if (info->xmit_cnt >= PAGE_SIZE - 1) {
                local_irq_restore(flags);
-               return;
+               return 0;
        }
 
        info->xmit_buf[info->xmit_head++] = ch;
        info->xmit_head &= PAGE_SIZE - 1;
        info->xmit_cnt++;
        local_irq_restore(flags);
+       return 1;
 }                              /* cy_put_char */
 
 static void cy_flush_chars(struct tty_struct *tty)
@@ -1539,6 +1540,8 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
        printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg);       /* */
 #endif
 
+       lock_kernel();
+
        switch (cmd) {
        case CYGETMON:
                ret_val = get_mon_info(info, argp);
@@ -1584,18 +1587,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
                break;
 
 /* The following commands are incompletely implemented!!! */
-       case TIOCGSOFTCAR:
-               ret_val =
-                   put_user(C_CLOCAL(tty) ? 1 : 0,
-                            (unsigned long __user *)argp);
-               break;
-       case TIOCSSOFTCAR:
-               ret_val = get_user(val, (unsigned long __user *)argp);
-               if (ret_val)
-                       break;
-               tty->termios->c_cflag =
-                   ((tty->termios->c_cflag & ~CLOCAL) | (val ? CLOCAL : 0));
-               break;
        case TIOCGSERIAL:
                ret_val = get_serial_info(info, argp);
                break;
@@ -1605,6 +1596,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file,
        default:
                ret_val = -ENOIOCTLCMD;
        }
+       unlock_kernel();
 
 #ifdef SERIAL_DEBUG_OTHER
        printk("cy_ioctl done\n");
@@ -1683,8 +1675,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp)
        if (info->flags & ASYNC_INITIALIZED)
                tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       cy_flush_buffer(tty);
        tty_ldisc_flush(tty);
        info->tty = NULL;
        if (info->blocked_open) {
index b9c1dba6bd01fe2882f404d90b2bd745872b50e9..8fe099a410654ffb35c5e9c0a3e8c42c8acbcb59 100644 (file)
@@ -80,7 +80,7 @@ scdrv_open(struct inode *inode, struct file *file)
        sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
        if (sd == NULL) {
                printk("%s: couldn't allocate subchannel data\n",
-                      __FUNCTION__);
+                      __func__);
                return -ENOMEM;
        }
 
@@ -90,7 +90,7 @@ scdrv_open(struct inode *inode, struct file *file)
 
        if (sd->sd_subch < 0) {
                kfree(sd);
-               printk("%s: couldn't allocate subchannel\n", __FUNCTION__);
+               printk("%s: couldn't allocate subchannel\n", __func__);
                return -EBUSY;
        }
 
@@ -110,7 +110,7 @@ scdrv_open(struct inode *inode, struct file *file)
        if (rv) {
                ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch);
                kfree(sd);
-               printk("%s: irq request failed (%d)\n", __FUNCTION__, rv);
+               printk("%s: irq request failed (%d)\n", __func__, rv);
                return -EBUSY;
        }
 
@@ -215,7 +215,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
                 */
                if (count < len) {
                        pr_debug("%s: only accepting %d of %d bytes\n",
-                                __FUNCTION__, (int) count, len);
+                                __func__, (int) count, len);
                }
                len = min((int) count, len);
                if (copy_to_user(buf, sd->sd_rb, len))
@@ -384,7 +384,7 @@ scdrv_init(void)
        if (alloc_chrdev_region(&first_dev, 0, num_cnodes,
                                SYSCTL_BASENAME) < 0) {
                printk("%s: failed to register SN system controller device\n",
-                      __FUNCTION__);
+                      __func__);
                return -ENODEV;
        }
        snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME);
@@ -403,7 +403,7 @@ scdrv_init(void)
                                      GFP_KERNEL);
                        if (!scd) {
                                printk("%s: failed to allocate device info"
-                                      "for %s/%s\n", __FUNCTION__,
+                                      "for %s/%s\n", __func__,
                                       SYSCTL_BASENAME, devname);
                                continue;
                        }
@@ -412,7 +412,7 @@ scdrv_init(void)
                        scd->scd_nasid = cnodeid_to_nasid(cnode);
                        if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) {
                                printk("%s: failed to allocate driver buffer"
-                                      "(%s%s)\n", __FUNCTION__,
+                                      "(%s%s)\n", __func__,
                                       SYSCTL_BASENAME, devname);
                                kfree(scd);
                                continue;
@@ -424,7 +424,7 @@ scdrv_init(void)
                                    ("%s: failed to initialize SAL for"
                                     " system controller communication"
                                     " (%s/%s): outdated PROM?\n",
-                                    __FUNCTION__, SYSCTL_BASENAME, devname);
+                                    __func__, SYSCTL_BASENAME, devname);
                                kfree(scd);
                                kfree(salbuf);
                                continue;
@@ -435,7 +435,7 @@ scdrv_init(void)
                        if (cdev_add(&scd->scd_cdev, dev, 1)) {
                                printk("%s: failed to register system"
                                       " controller device (%s%s)\n",
-                                      __FUNCTION__, SYSCTL_BASENAME, devname);
+                                      __func__, SYSCTL_BASENAME, devname);
                                kfree(scd);
                                kfree(salbuf);
                                continue;
index 1b75b0b7d5429dae92918dc1987c813b1de25138..53b3d44f8c066bb06dfa84e6ac98a7ddff660628 100644 (file)
@@ -63,16 +63,13 @@ static int
 scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
 {
        char *desc_end;
-       __be32 from_buf;
 
        /* record event source address */
-       from_buf = get_unaligned((__be32 *)event);
-       *src = be32_to_cpup(&from_buf);
+       *src = get_unaligned_be32(event);
        event += 4;                     /* move on to event code */
 
        /* record the system controller's event code */
-       from_buf = get_unaligned((__be32 *)event);
-       *code = be32_to_cpup(&from_buf);
+       *code = get_unaligned_be32(event);
        event += 4;                     /* move on to event arguments */
 
        /* how many arguments are in the packet? */
@@ -86,8 +83,7 @@ scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc)
                /* not an integer argument, so give up */
                return -1;
        }
-       from_buf = get_unaligned((__be32 *)event);
-       *esp_code = be32_to_cpup(&from_buf);
+       *esp_code = get_unaligned_be32(event);
        event += 4;
 
        /* parse out the event description */
@@ -275,7 +271,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
        event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL);
        if (event_sd == NULL) {
                printk(KERN_WARNING "%s: couldn't allocate subchannel info"
-                      " for event monitoring\n", __FUNCTION__);
+                      " for event monitoring\n", __func__);
                return;
        }
 
@@ -289,7 +285,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
        if (event_sd->sd_subch < 0) {
                kfree(event_sd);
                printk(KERN_WARNING "%s: couldn't open event subchannel\n",
-                      __FUNCTION__);
+                      __func__);
                return;
        }
 
@@ -299,7 +295,7 @@ scdrv_event_init(struct sysctl_data_s *scd)
                         "system controller events", event_sd);
        if (rv) {
                printk(KERN_WARNING "%s: irq request failed (%d)\n",
-                      __FUNCTION__, rv);
+                      __func__, rv);
                ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch);
                kfree(event_sd);
                return;
index c03ad164c39af5ff1e7fd73c7858c5f5ff9ab2f2..58533de59027806360ffe816e5b5571becf966b8 100644 (file)
@@ -506,7 +506,7 @@ static struct sonypi_device {
        while (--n && (command)) \
                udelay(1); \
        if (!n && (verbose || !quiet)) \
-               printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __FUNCTION__, __LINE__); \
+               printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \
 }
 
 #ifdef CONFIG_ACPI
index 4b5b5b78acb4835c3e58a212b92c87d035e512c5..2ee4d98937572195b474b5efdd42666606520a7a 100644 (file)
@@ -131,8 +131,8 @@ static int sx_rxfifo = SPECIALIX_RXFIFO;
 #define SX_DEBUG_FIFO    0x0800
 
 
-#define func_enter() dprintk (SX_DEBUG_FLOW, "io8: enter %s\n",__FUNCTION__)
-#define func_exit()  dprintk (SX_DEBUG_FLOW, "io8: exit  %s\n", __FUNCTION__)
+#define func_enter() dprintk (SX_DEBUG_FLOW, "io8: enter %s\n",__func__)
+#define func_exit()  dprintk (SX_DEBUG_FLOW, "io8: exit  %s\n", __func__)
 
 #define jiffies_from_ms(a) ((((a) * HZ)/1000)+1)
 
@@ -874,7 +874,7 @@ static irqreturn_t sx_interrupt(int dummy, void *dev_id)
 
        spin_lock_irqsave(&bp->lock, flags);
 
-       dprintk (SX_DEBUG_FLOW, "enter %s port %d room: %ld\n", __FUNCTION__, port_No(sx_get_port(bp, "INT")), SERIAL_XMIT_SIZE - sx_get_port(bp, "ITN")->xmit_cnt - 1);
+       dprintk (SX_DEBUG_FLOW, "enter %s port %d room: %ld\n", __func__, port_No(sx_get_port(bp, "INT")), SERIAL_XMIT_SIZE - sx_get_port(bp, "ITN")->xmit_cnt - 1);
        if (!(bp->flags & SX_BOARD_ACTIVE)) {
                dprintk (SX_DEBUG_IRQ, "sx: False interrupt. irq %d.\n", bp->irq);
                spin_unlock_irqrestore(&bp->lock, flags);
@@ -1504,6 +1504,27 @@ static int sx_open(struct tty_struct * tty, struct file * filp)
        return 0;
 }
 
+static void sx_flush_buffer(struct tty_struct *tty)
+{
+       struct specialix_port *port = (struct specialix_port *)tty->driver_data;
+       unsigned long flags;
+       struct specialix_board  * bp;
+
+       func_enter();
+
+       if (sx_paranoia_check(port, tty->name, "sx_flush_buffer")) {
+               func_exit();
+               return;
+       }
+
+       bp = port_Board(port);
+       spin_lock_irqsave(&port->lock, flags);
+       port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
+       spin_unlock_irqrestore(&port->lock, flags);
+       tty_wakeup(tty);
+
+       func_exit();
+}
 
 static void sx_close(struct tty_struct * tty, struct file * filp)
 {
@@ -1597,8 +1618,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp)
        }
 
        sx_shutdown_port(bp, port);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       sx_flush_buffer(tty);
        tty_ldisc_flush(tty);
        spin_lock_irqsave(&port->lock, flags);
        tty->closing = 0;
@@ -1670,7 +1690,7 @@ static int sx_write(struct tty_struct * tty,
 }
 
 
-static void sx_put_char(struct tty_struct * tty, unsigned char ch)
+static int sx_put_char(struct tty_struct * tty, unsigned char ch)
 {
        struct specialix_port *port = (struct specialix_port *)tty->driver_data;
        unsigned long flags;
@@ -1680,12 +1700,12 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch)
 
        if (sx_paranoia_check(port, tty->name, "sx_put_char")) {
                func_exit();
-               return;
+               return 0;
        }
        dprintk (SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf);
        if (!port->xmit_buf) {
                func_exit();
-               return;
+               return 0;
        }
        bp = port_Board(port);
        spin_lock_irqsave(&port->lock, flags);
@@ -1695,7 +1715,7 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch)
                spin_unlock_irqrestore(&port->lock, flags);
                dprintk (SX_DEBUG_TX, "Exit size\n");
                func_exit();
-               return;
+               return 0;
        }
        dprintk (SX_DEBUG_TX, "Handle xmit: %p %p\n", port, port->xmit_buf);
        port->xmit_buf[port->xmit_head++] = ch;
@@ -1704,6 +1724,7 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch)
        spin_unlock_irqrestore(&port->lock, flags);
 
        func_exit();
+       return 1;
 }
 
 
@@ -1770,28 +1791,6 @@ static int sx_chars_in_buffer(struct tty_struct *tty)
 }
 
 
-static void sx_flush_buffer(struct tty_struct *tty)
-{
-       struct specialix_port *port = (struct specialix_port *)tty->driver_data;
-       unsigned long flags;
-       struct specialix_board  * bp;
-
-       func_enter();
-
-       if (sx_paranoia_check(port, tty->name, "sx_flush_buffer")) {
-               func_exit();
-               return;
-       }
-
-       bp = port_Board(port);
-       spin_lock_irqsave(&port->lock, flags);
-       port->xmit_cnt = port->xmit_head = port->xmit_tail = 0;
-       spin_unlock_irqrestore(&port->lock, flags);
-       tty_wakeup(tty);
-
-       func_exit();
-}
-
 
 static int sx_tiocmget(struct tty_struct *tty, struct file *file)
 {
@@ -1803,7 +1802,7 @@ static int sx_tiocmget(struct tty_struct *tty, struct file *file)
 
        func_enter();
 
-       if (sx_paranoia_check(port, tty->name, __FUNCTION__)) {
+       if (sx_paranoia_check(port, tty->name, __func__)) {
                func_exit();
                return -ENODEV;
        }
@@ -1845,7 +1844,7 @@ static int sx_tiocmset(struct tty_struct *tty, struct file *file,
 
        func_enter();
 
-       if (sx_paranoia_check(port, tty->name, __FUNCTION__)) {
+       if (sx_paranoia_check(port, tty->name, __func__)) {
                func_exit();
                return -ENODEV;
        }
@@ -1922,29 +1921,13 @@ static inline int sx_set_serial_info(struct specialix_port * port,
        int change_speed;
 
        func_enter();
-       /*
-       if (!access_ok(VERIFY_READ, (void *) newinfo, sizeof(tmp))) {
-               func_exit();
-               return -EFAULT;
-       }
-       */
+
        if (copy_from_user(&tmp, newinfo, sizeof(tmp))) {
                func_enter();
                return -EFAULT;
        }
 
-#if 0
-       if ((tmp.irq != bp->irq) ||
-           (tmp.port != bp->base) ||
-           (tmp.type != PORT_CIRRUS) ||
-           (tmp.baud_base != (SX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC) ||
-           (tmp.custom_divisor != 0) ||
-           (tmp.xmit_fifo_size != CD186x_NFIFO) ||
-           (tmp.flags & ~SPECIALIX_LEGAL_FLAGS)) {
-               func_exit();
-               return -EINVAL;
-       }
-#endif
+       lock_kernel();
 
        change_speed = ((port->flags & ASYNC_SPD_MASK) !=
                        (tmp.flags & ASYNC_SPD_MASK));
@@ -1956,6 +1939,7 @@ static inline int sx_set_serial_info(struct specialix_port * port,
                    ((tmp.flags & ~ASYNC_USR_MASK) !=
                     (port->flags & ~ASYNC_USR_MASK))) {
                        func_exit();
+                       unlock_kernel();
                        return -EPERM;
                }
                port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1972,6 +1956,7 @@ static inline int sx_set_serial_info(struct specialix_port * port,
                sx_change_speed(bp, port);
        }
        func_exit();
+       unlock_kernel();
        return 0;
 }
 
@@ -1984,12 +1969,8 @@ static inline int sx_get_serial_info(struct specialix_port * port,
 
        func_enter();
 
-       /*
-       if (!access_ok(VERIFY_WRITE, (void *) retinfo, sizeof(tmp)))
-               return -EFAULT;
-       */
-
        memset(&tmp, 0, sizeof(tmp));
+       lock_kernel();
        tmp.type = PORT_CIRRUS;
        tmp.line = port - sx_port;
        tmp.port = bp->base;
@@ -2000,6 +1981,7 @@ static inline int sx_get_serial_info(struct specialix_port * port,
        tmp.closing_wait = port->closing_wait * HZ/100;
        tmp.custom_divisor =  port->custom_divisor;
        tmp.xmit_fifo_size = CD186x_NFIFO;
+       unlock_kernel();
        if (copy_to_user(retinfo, &tmp, sizeof(tmp))) {
                func_exit();
                return -EFAULT;
@@ -2045,23 +2027,6 @@ static int sx_ioctl(struct tty_struct * tty, struct file * filp,
                sx_send_break(port, arg ? arg*(HZ/10) : HZ/4);
                func_exit();
                return 0;
-        case TIOCGSOFTCAR:
-                if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)argp)) {
-                        func_exit();
-                        return -EFAULT;
-                }
-                func_exit();
-               return 0;
-        case TIOCSSOFTCAR:
-                if (get_user(arg, (unsigned long __user *) argp)) {
-                        func_exit();
-                        return -EFAULT;
-                }
-               tty->termios->c_cflag =
-                       ((tty->termios->c_cflag & ~CLOCAL) |
-                       (arg ? CLOCAL : 0));
-               func_exit();
-               return 0;
         case TIOCGSERIAL:
                 func_exit();
                return sx_get_serial_info(port, argp);
index 874aaa08e956d8ae8143308b9582317076dfd371..d17be10c5d2108f436a1479f15eb1f83373d82f4 100644 (file)
@@ -875,6 +875,7 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
                timeout = HZ;
        tend = jiffies + timeout;
 
+       lock_kernel();
        while (stl_datastate(portp)) {
                if (signal_pending(current))
                        break;
@@ -882,6 +883,7 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout)
                if (time_after_eq(jiffies, tend))
                        break;
        }
+       unlock_kernel();
 }
 
 /*****************************************************************************/
@@ -1273,18 +1275,9 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd
 
        rc = 0;
 
+       lock_kernel();
+
        switch (cmd) {
-       case TIOCGSOFTCAR:
-               rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
-                       (unsigned __user *) argp);
-               break;
-       case TIOCSSOFTCAR:
-               if (get_user(ival, (unsigned int __user *) arg))
-                       return -EFAULT;
-               tty->termios->c_cflag =
-                               (tty->termios->c_cflag & ~CLOCAL) |
-                               (ival ? CLOCAL : 0);
-               break;
        case TIOCGSERIAL:
                rc = stl_getserial(portp, argp);
                break;
@@ -1308,7 +1301,7 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd
                rc = -ENOIOCTLCMD;
                break;
        }
-
+       unlock_kernel();
        return rc;
 }
 
index a6e1c9ba12174d769e1389d5329b70676e5e3b16..f39f6fd8935064f01af2b91115e70111ddaa6769 100644 (file)
@@ -384,11 +384,11 @@ static struct real_driver sx_real_driver = {
 #define sx_dprintk(f, str...)  /* nothing */
 #endif
 
-#define func_enter()   sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s\n",__FUNCTION__)
-#define func_exit()    sx_dprintk(SX_DEBUG_FLOW, "sx: exit  %s\n",__FUNCTION__)
+#define func_enter()   sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s\n",__func__)
+#define func_exit()    sx_dprintk(SX_DEBUG_FLOW, "sx: exit  %s\n",__func__)
 
 #define func_enter2()  sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s (port %d)\n", \
-                               __FUNCTION__, port->line)
+                               __func__, port->line)
 
 /* 
  *  Firmware loader driver specific routines
@@ -1574,7 +1574,7 @@ static void sx_close(void *ptr)
                sx_dprintk(SX_DEBUG_CLOSE, "WARNING port count:%d\n",
                                port->gs.count);
                /*printk("%s SETTING port count to zero: %p count: %d\n",
-                               __FUNCTION__, port, port->gs.count);
+                               __func__, port, port->gs.count);
                port->gs.count = 0;*/
        }
 
@@ -1844,6 +1844,7 @@ static void sx_break(struct tty_struct *tty, int flag)
        int rv;
 
        func_enter();
+       lock_kernel();
 
        if (flag)
                rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK);
@@ -1852,7 +1853,7 @@ static void sx_break(struct tty_struct *tty, int flag)
        if (rv != 1)
                printk(KERN_ERR "sx: couldn't send break (%x).\n",
                        read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat)));
-
+       unlock_kernel();
        func_exit();
 }
 
@@ -1888,23 +1889,12 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
        int rc;
        struct sx_port *port = tty->driver_data;
        void __user *argp = (void __user *)arg;
-       int ival;
 
        /* func_enter2(); */
 
        rc = 0;
+       lock_kernel();
        switch (cmd) {
-       case TIOCGSOFTCAR:
-               rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0),
-                               (unsigned __user *)argp);
-               break;
-       case TIOCSSOFTCAR:
-               if ((rc = get_user(ival, (unsigned __user *)argp)) == 0) {
-                       tty->termios->c_cflag =
-                               (tty->termios->c_cflag & ~CLOCAL) |
-                               (ival ? CLOCAL : 0);
-               }
-               break;
        case TIOCGSERIAL:
                rc = gs_getserial(&port->gs, argp);
                break;
@@ -1915,6 +1905,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp,
                rc = -ENOIOCTLCMD;
                break;
        }
+       unlock_kernel();
 
        /* func_exit(); */
        return rc;
@@ -2549,7 +2540,7 @@ static int __devinit sx_eisa_probe(struct device *dev)
                goto err_flag;
        }
        board->base2 =
-       board->base = ioremap(board->hw_base, SI2_EISA_WINDOW_LEN);
+       board->base = ioremap_nocache(board->hw_base, SI2_EISA_WINDOW_LEN);
        if (!board->base) {
                dev_err(dev, "can't remap memory\n");
                goto err_reg;
@@ -2626,7 +2617,7 @@ static void __devinit fix_sx_pci(struct pci_dev *pdev, struct sx_board *board)
 
        pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase);
        hwbase &= PCI_BASE_ADDRESS_MEM_MASK;
-       rebase = ioremap(hwbase, 0x80);
+       rebase = ioremap_nocache(hwbase, 0x80);
        t = readl(rebase + CNTRL_REG_OFFSET);
        if (t != CNTRL_REG_GOODVALUE) {
                printk(KERN_DEBUG "sx: performing cntrl reg fix: %08x -> "
@@ -2770,7 +2761,7 @@ static int __init sx_init(void)
                if (!request_region(board->hw_base, board->hw_len, "sx"))
                        continue;
                board->base2 =
-               board->base = ioremap(board->hw_base, board->hw_len);
+               board->base = ioremap_nocache(board->hw_base, board->hw_len);
                if (!board->base)
                        goto err_sx_reg;
                board->flags &= ~SX_BOARD_TYPE;
@@ -2794,7 +2785,7 @@ err_sx_reg:
                if (!request_region(board->hw_base, board->hw_len, "sx"))
                        continue;
                board->base2 =
-               board->base = ioremap(board->hw_base, board->hw_len);
+               board->base = ioremap_nocache(board->hw_base, board->hw_len);
                if (!board->base)
                        goto err_si_reg;
                board->flags &= ~SX_BOARD_TYPE;
@@ -2817,7 +2808,7 @@ err_si_reg:
                if (!request_region(board->hw_base, board->hw_len, "sx"))
                        continue;
                board->base2 =
-               board->base = ioremap(board->hw_base, board->hw_len);
+               board->base = ioremap_nocache(board->hw_base, board->hw_len);
                if (!board->base)
                        goto err_si1_reg;
                board->flags &= ~SX_BOARD_TYPE;
index a3237d48a584e24ac5a2f12e637183d8f43cee5f..ac5080df2565aef4cd85e96a112d3cd3ffa7820f 100644 (file)
@@ -218,9 +218,9 @@ struct mgsl_struct {
 
        u32 pending_bh;
 
-       int bh_running;         /* Protection from multiple */
+       bool bh_running;                /* Protection from multiple */
        int isr_overflow;
-       int bh_requested;
+       bool bh_requested;
        
        int dcd_chkcount;               /* check counts to prevent */
        int cts_chkcount;               /* too many IRQs if a signal */
@@ -250,12 +250,12 @@ struct mgsl_struct {
        int tx_holding_count;           /* number of tx holding buffers waiting */
        struct tx_holding_buffer tx_holding_buffers[MAX_TX_HOLDING_BUFFERS];
 
-       int rx_enabled;
-       int rx_overflow;
-       int rx_rcc_underrun;
+       bool rx_enabled;
+       bool rx_overflow;
+       bool rx_rcc_underrun;
 
-       int tx_enabled;
-       int tx_active;
+       bool tx_enabled;
+       bool tx_active;
        u32 idle_mode;
 
        u16 cmr_value;
@@ -269,14 +269,14 @@ struct mgsl_struct {
 
        unsigned int io_base;           /* base I/O address of adapter */
        unsigned int io_addr_size;      /* size of the I/O address range */
-       int io_addr_requested;          /* nonzero if I/O address requested */
+       bool io_addr_requested;         /* true if I/O address requested */
        
        unsigned int irq_level;         /* interrupt level */
        unsigned long irq_flags;
-       int irq_requested;              /* nonzero if IRQ requested */
+       bool irq_requested;             /* true if IRQ requested */
        
        unsigned int dma_level;         /* DMA channel */
-       int dma_requested;              /* nonzero if dma channel requested */
+       bool dma_requested;             /* true if dma channel requested */
 
        u16 mbre_bit;
        u16 loopback_bits;
@@ -286,27 +286,27 @@ struct mgsl_struct {
 
        unsigned char serial_signals;   /* current serial signal states */
 
-       int irq_occurred;               /* for diagnostics use */
+       bool irq_occurred;              /* for diagnostics use */
        unsigned int init_error;        /* Initialization startup error                 (DIAGS) */
        int     fDiagnosticsmode;       /* Driver in Diagnostic mode?                   (DIAGS) */
 
        u32 last_mem_alloc;
        unsigned char* memory_base;     /* shared memory address (PCI only) */
        u32 phys_memory_base;
-       int shared_mem_requested;
+       bool shared_mem_requested;
 
        unsigned char* lcr_base;        /* local config registers (PCI only) */
        u32 phys_lcr_base;
        u32 lcr_offset;
-       int lcr_mem_requested;
+       bool lcr_mem_requested;
 
        u32 misc_ctrl_value;
        char flag_buf[MAX_ASYNC_BUFFER_SIZE];
        char char_buf[MAX_ASYNC_BUFFER_SIZE];   
-       BOOLEAN drop_rts_on_tx_done;
+       bool drop_rts_on_tx_done;
 
-       BOOLEAN loopmode_insert_requested;
-       BOOLEAN loopmode_send_done_requested;
+       bool loopmode_insert_requested;
+       bool loopmode_send_done_requested;
        
        struct  _input_signal_events    input_signal_events;
 
@@ -752,10 +752,10 @@ static void mgsl_trace_block(struct mgsl_struct *info,const char* data, int coun
 /*
  * Adapter diagnostic routines
  */
-static BOOLEAN mgsl_register_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_irq_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_dma_test( struct mgsl_struct *info );
-static BOOLEAN mgsl_memory_test( struct mgsl_struct *info );
+static bool mgsl_register_test( struct mgsl_struct *info );
+static bool mgsl_irq_test( struct mgsl_struct *info );
+static bool mgsl_dma_test( struct mgsl_struct *info );
+static bool mgsl_memory_test( struct mgsl_struct *info );
 static int mgsl_adapter_test( struct mgsl_struct *info );
 
 /*
@@ -770,8 +770,8 @@ static struct mgsl_struct* mgsl_allocate_device(void);
  * DMA buffer manupulation functions.
  */
 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex );
-static int  mgsl_get_rx_frame( struct mgsl_struct *info );
-static int  mgsl_get_raw_rx_frame( struct mgsl_struct *info );
+static bool mgsl_get_rx_frame( struct mgsl_struct *info );
+static bool mgsl_get_raw_rx_frame( struct mgsl_struct *info );
 static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info );
 static void mgsl_reset_tx_dma_buffers( struct mgsl_struct *info );
 static int num_free_tx_dma_buffers(struct mgsl_struct *info);
@@ -791,7 +791,7 @@ static int mgsl_alloc_intermediate_rxbuffer_memory(struct mgsl_struct *info);
 static void mgsl_free_intermediate_rxbuffer_memory(struct mgsl_struct *info);
 static int mgsl_alloc_intermediate_txbuffer_memory(struct mgsl_struct *info);
 static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info);
-static int load_next_tx_holding_buffer(struct mgsl_struct *info);
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info);
 static int save_tx_buffer_request(struct mgsl_struct *info,const char *Buffer, unsigned int BufferSize);
 
 /*
@@ -847,7 +847,7 @@ static int mgsl_wait_event(struct mgsl_struct * info, int __user *mask);
 static int mgsl_loopmode_send_done( struct mgsl_struct * info );
 
 /* set non-zero on successful registration with PCI subsystem */
-static int pci_registered;
+static bool pci_registered;
 
 /*
  * Global linked list of SyncLink devices
@@ -1054,8 +1054,8 @@ static int mgsl_bh_action(struct mgsl_struct *info)
 
        if (!rc) {
                /* Mark BH routine as complete */
-               info->bh_running   = 0;
-               info->bh_requested = 0;
+               info->bh_running = false;
+               info->bh_requested = false;
        }
        
        spin_unlock_irqrestore(&info->irq_spinlock,flags);
@@ -1079,7 +1079,7 @@ static void mgsl_bh_handler(struct work_struct *work)
                printk( "%s(%d):mgsl_bh_handler(%s) entry\n",
                        __FILE__,__LINE__,info->device_name);
        
-       info->bh_running = 1;
+       info->bh_running = true;
 
        while((action = mgsl_bh_action(info)) != 0) {
        
@@ -1113,7 +1113,7 @@ static void mgsl_bh_handler(struct work_struct *work)
 
 static void mgsl_bh_receive(struct mgsl_struct *info)
 {
-       int (*get_rx_frame)(struct mgsl_struct *info) =
+       bool (*get_rx_frame)(struct mgsl_struct *info) =
                (info->params.mode == MGSL_MODE_HDLC ? mgsl_get_rx_frame : mgsl_get_raw_rx_frame);
 
        if ( debug_level >= DEBUG_LEVEL_BH )
@@ -1187,7 +1187,7 @@ static void mgsl_isr_receive_status( struct mgsl_struct *info )
                usc_loopmode_active(info) )
        {
                ++info->icount.rxabort;
-               info->loopmode_insert_requested = FALSE;
+               info->loopmode_insert_requested = false;
  
                /* clear CMR:13 to start echoing RxD to TxD */
                info->cmr_value &= ~BIT13;
@@ -1257,7 +1257,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
        else
                info->icount.txunder++;
                        
-       info->tx_active = 0;
+       info->tx_active = false;
        info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
        del_timer(&info->tx_timer);     
        
@@ -1267,7 +1267,7 @@ static void mgsl_isr_transmit_status( struct mgsl_struct *info )
                        info->serial_signals &= ~SerialSignal_RTS;
                        usc_set_serial_signals( info );
                }
-               info->drop_rts_on_tx_done = 0;
+               info->drop_rts_on_tx_done = false;
        }
 
 #if SYNCLINK_GENERIC_HDLC
@@ -1403,7 +1403,7 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
                usc_OutReg( info, SICR,
                        (unsigned short)(usc_InReg(info,SICR) & ~(SICR_TXC_ACTIVE+SICR_TXC_INACTIVE)) );
                usc_UnlatchIostatusBits( info, MISCSTATUS_TXC_LATCHED );
-               info->irq_occurred = 1;
+               info->irq_occurred = true;
        }
 
 }      /* end of mgsl_isr_io_pin() */
@@ -1431,7 +1431,7 @@ static void mgsl_isr_transmit_data( struct mgsl_struct *info )
        if ( info->xmit_cnt )
                usc_load_txfifo( info );
        else
-               info->tx_active = 0;
+               info->tx_active = false;
                
        if (info->xmit_cnt < WAKEUP_CHARS)
                info->pending_bh |= BH_TRANSMIT;
@@ -1568,7 +1568,7 @@ static void mgsl_isr_misc( struct mgsl_struct *info )
 
                /* schedule BH handler to restart receiver */
                info->pending_bh |= BH_RECEIVE;
-               info->rx_rcc_underrun = 1;
+               info->rx_rcc_underrun = true;
        }
 
        usc_ClearIrqPendingBits( info, MISC );
@@ -1626,7 +1626,7 @@ static void mgsl_isr_receive_dma( struct mgsl_struct *info )
        info->pending_bh |= BH_RECEIVE;
        
        if ( status & BIT3 ) {
-               info->rx_overflow = 1;
+               info->rx_overflow = true;
                info->icount.buf_overrun++;
        }
 
@@ -1745,7 +1745,7 @@ static irqreturn_t mgsl_interrupt(int dummy, void *dev_id)
                        printk("%s(%d):%s queueing bh task.\n",
                                __FILE__,__LINE__,info->device_name);
                schedule_work(&info->task);
-               info->bh_requested = 1;
+               info->bh_requested = true;
        }
 
        spin_unlock(&info->irq_spinlock);
@@ -2026,34 +2026,35 @@ static void mgsl_change_params(struct mgsl_struct *info)
  *             
  * Return Value:       None
  */
-static void mgsl_put_char(struct tty_struct *tty, unsigned char ch)
+static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
 {
-       struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data;
+       struct mgsl_struct *info = tty->driver_data;
        unsigned long flags;
+       int ret = 0;
 
-       if ( debug_level >= DEBUG_LEVEL_INFO ) {
-               printk( "%s(%d):mgsl_put_char(%d) on %s\n",
-                       __FILE__,__LINE__,ch,info->device_name);
+       if (debug_level >= DEBUG_LEVEL_INFO) {
+               printk(KERN_DEBUG "%s(%d):mgsl_put_char(%d) on %s\n",
+                       __FILE__, __LINE__, ch, info->device_name);
        }               
        
        if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
-               return;
+               return 0;
 
        if (!tty || !info->xmit_buf)
-               return;
+               return 0;
 
-       spin_lock_irqsave(&info->irq_spinlock,flags);
-       
-       if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) {
+       spin_lock_irqsave(&info->irq_spinlock, flags);
        
+       if ((info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active) {
                if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) {
                        info->xmit_buf[info->xmit_head++] = ch;
                        info->xmit_head &= SERIAL_XMIT_SIZE-1;
                        info->xmit_cnt++;
+                       ret = 1;
                }
        }
-       
-       spin_unlock_irqrestore(&info->irq_spinlock,flags);
+       spin_unlock_irqrestore(&info->irq_spinlock, flags);
+       return ret;
        
 }      /* end of mgsl_put_char() */
 
@@ -2942,6 +2943,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
                    unsigned int cmd, unsigned long arg)
 {
        struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data;
+       int ret;
        
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__,
@@ -2956,7 +2958,10 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file,
                    return -EIO;
        }
 
-       return mgsl_ioctl_common(info, cmd, arg);
+       lock_kernel();
+       ret = mgsl_ioctl_common(info, cmd, arg);
+       unlock_kernel();
+       return ret;
 }
 
 static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg)
@@ -3153,8 +3158,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
        if (info->flags & ASYNC_INITIALIZED)
                mgsl_wait_until_sent(tty, info->timeout);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       mgsl_flush_buffer(tty);
 
        tty_ldisc_flush(tty);
                
@@ -3217,7 +3221,8 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
         * interval should also be less than the timeout.
         * Note: use tight timings here to satisfy the NIST-PCTS.
         */ 
-       
+
+       lock_kernel();
        if ( info->params.data_rate ) {
                char_time = info->timeout/(32 * 5);
                if (!char_time)
@@ -3247,6 +3252,7 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout)
                                break;
                }
        }
+       unlock_kernel();
       
 exit:
        if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3303,7 +3309,8 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
 {
        DECLARE_WAITQUEUE(wait, current);
        int             retval;
-       int             do_clocal = 0, extra_count = 0;
+       bool            do_clocal = false;
+       bool            extra_count = false;
        unsigned long   flags;
        
        if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3317,7 +3324,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
        }
 
        if (tty->termios->c_cflag & CLOCAL)
-               do_clocal = 1;
+               do_clocal = true;
 
        /* Wait for carrier detect and the line to become
         * free (i.e., not in use by the callout).  While we are in
@@ -3335,7 +3342,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
 
        spin_lock_irqsave(&info->irq_spinlock, flags);
        if (!tty_hung_up_p(filp)) {
-               extra_count = 1;
+               extra_count = true;
                info->count--;
        }
        spin_unlock_irqrestore(&info->irq_spinlock, flags);
@@ -4043,13 +4050,13 @@ static void mgsl_free_intermediate_txbuffer_memory(struct mgsl_struct *info)
  *
  *     info            pointer to device instance data
  *
- * Return Value:       1 if next buffered tx request loaded
+ * Return Value:       true if next buffered tx request loaded
  *                     into adapter's tx dma buffer,
- *                     0 otherwise
+ *                     false otherwise
  */
-static int load_next_tx_holding_buffer(struct mgsl_struct *info)
+static bool load_next_tx_holding_buffer(struct mgsl_struct *info)
 {
-       int ret = 0;
+       bool ret = false;
 
        if ( info->tx_holding_count ) {
                /* determine if we have enough tx dma buffers
@@ -4073,7 +4080,7 @@ static int load_next_tx_holding_buffer(struct mgsl_struct *info)
                        /* restart transmit timer */
                        mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000));
 
-                       ret = 1;
+                       ret = true;
                }
        }
 
@@ -4119,7 +4126,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
                        __FILE__,__LINE__,info->device_name, info->io_base);
                return -ENODEV;
        }
-       info->io_addr_requested = 1;
+       info->io_addr_requested = true;
        
        if ( request_irq(info->irq_level,mgsl_interrupt,info->irq_flags,
                info->device_name, info ) < 0 ) {
@@ -4127,7 +4134,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
                        __FILE__,__LINE__,info->device_name, info->irq_level );
                goto errout;
        }
-       info->irq_requested = 1;
+       info->irq_requested = true;
        
        if ( info->bus_type == MGSL_BUS_TYPE_PCI ) {
                if (request_mem_region(info->phys_memory_base,0x40000,"synclink") == NULL) {
@@ -4135,15 +4142,16 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
                                __FILE__,__LINE__,info->device_name, info->phys_memory_base);
                        goto errout;
                }
-               info->shared_mem_requested = 1;
+               info->shared_mem_requested = true;
                if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclink") == NULL) {
                        printk( "%s(%d):lcr mem addr conflict device %s Addr=%08X\n",
                                __FILE__,__LINE__,info->device_name, info->phys_lcr_base + info->lcr_offset);
                        goto errout;
                }
-               info->lcr_mem_requested = 1;
+               info->lcr_mem_requested = true;
 
-               info->memory_base = ioremap(info->phys_memory_base,0x40000);
+               info->memory_base = ioremap_nocache(info->phys_memory_base,
+                                                               0x40000);
                if (!info->memory_base) {
                        printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n",
                                __FILE__,__LINE__,info->device_name, info->phys_memory_base );
@@ -4156,12 +4164,14 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
                        goto errout;
                }
                
-               info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset;
+               info->lcr_base = ioremap_nocache(info->phys_lcr_base,
+                                                               PAGE_SIZE);
                if (!info->lcr_base) {
                        printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n",
                                __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
                        goto errout;
                }
+               info->lcr_base += info->lcr_offset;
                
        } else {
                /* claim DMA channel */
@@ -4172,7 +4182,7 @@ static int mgsl_claim_resources(struct mgsl_struct *info)
                        mgsl_release_resources( info );
                        return -ENODEV;
                }
-               info->dma_requested = 1;
+               info->dma_requested = true;
 
                /* ISA adapter uses bus master DMA */           
                set_dma_mode(info->dma_level,DMA_MODE_CASCADE);
@@ -4200,12 +4210,12 @@ static void mgsl_release_resources(struct mgsl_struct *info)
                        
        if ( info->irq_requested ) {
                free_irq(info->irq_level, info);
-               info->irq_requested = 0;
+               info->irq_requested = false;
        }
        if ( info->dma_requested ) {
                disable_dma(info->dma_level);
                free_dma(info->dma_level);
-               info->dma_requested = 0;
+               info->dma_requested = false;
        }
        mgsl_free_dma_buffers(info);
        mgsl_free_intermediate_rxbuffer_memory(info);
@@ -4213,15 +4223,15 @@ static void mgsl_release_resources(struct mgsl_struct *info)
        
        if ( info->io_addr_requested ) {
                release_region(info->io_base,info->io_addr_size);
-               info->io_addr_requested = 0;
+               info->io_addr_requested = false;
        }
        if ( info->shared_mem_requested ) {
                release_mem_region(info->phys_memory_base,0x40000);
-               info->shared_mem_requested = 0;
+               info->shared_mem_requested = false;
        }
        if ( info->lcr_mem_requested ) {
                release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
-               info->lcr_mem_requested = 0;
+               info->lcr_mem_requested = false;
        }
        if (info->memory_base){
                iounmap(info->memory_base);
@@ -4486,7 +4496,7 @@ static int __init synclink_init(void)
        if ((rc = pci_register_driver(&synclink_pci_driver)) < 0)
                printk("%s:failed to register PCI driver, error=%d\n",__FILE__,rc);
        else
-               pci_registered = 1;
+               pci_registered = true;
 
        if ((rc = mgsl_init_tty()) < 0)
                goto error;
@@ -4679,7 +4689,7 @@ static u16 usc_InReg( struct mgsl_struct *info, u16 RegAddr )
 static void usc_set_sdlc_mode( struct mgsl_struct *info )
 {
        u16 RegValue;
-       int PreSL1660;
+       bool PreSL1660;
        
        /*
         * determine if the IUSC on the adapter is pre-SL1660. If
@@ -4692,11 +4702,7 @@ static void usc_set_sdlc_mode( struct mgsl_struct *info )
         */
        usc_OutReg(info,TMCR,0x1f);
        RegValue=usc_InReg(info,TMDR);
-       if ( RegValue == IUSC_PRE_SL1660 )
-               PreSL1660 = 1;
-       else
-               PreSL1660 = 0;
-       
+       PreSL1660 = (RegValue == IUSC_PRE_SL1660);
 
        if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
        {
@@ -5382,9 +5388,9 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
        int start_index;
        int end_index;
        int frame_start_index;
-       int start_of_frame_found = FALSE;
-       int end_of_frame_found = FALSE;
-       int reprogram_dma = FALSE;
+       bool start_of_frame_found = false;
+       bool end_of_frame_found = false;
+       bool reprogram_dma = false;
 
        DMABUFFERENTRY *buffer_list = info->rx_buffer_list;
        u32 phys_addr;
@@ -5410,9 +5416,9 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
 
                if ( !start_of_frame_found )
                {
-                       start_of_frame_found = TRUE;
+                       start_of_frame_found = true;
                        frame_start_index = end_index;
-                       end_of_frame_found = FALSE;
+                       end_of_frame_found = false;
                }
 
                if ( buffer_list[end_index].status )
@@ -5423,8 +5429,8 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
                        /* We want to leave the buffers for this frame intact. */
                        /* Move on to next possible frame. */
 
-                       start_of_frame_found = FALSE;
-                       end_of_frame_found = TRUE;
+                       start_of_frame_found = false;
+                       end_of_frame_found = true;
                }
 
                /* advance to next buffer entry in linked list */
@@ -5439,8 +5445,8 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
                        /* completely screwed, reset all receive buffers! */
                        mgsl_reset_rx_dma_buffers( info );
                        frame_start_index = 0;
-                       start_of_frame_found = FALSE;
-                       reprogram_dma = TRUE;
+                       start_of_frame_found = false;
+                       reprogram_dma = true;
                        break;
                }
        }
@@ -5466,7 +5472,7 @@ static void usc_process_rxoverrun_sync( struct mgsl_struct *info )
 
                } while( start_index != end_index );
 
-               reprogram_dma = TRUE;
+               reprogram_dma = true;
        }
 
        if ( reprogram_dma )
@@ -5536,9 +5542,9 @@ static void usc_stop_receiver( struct mgsl_struct *info )
        usc_OutReg( info, CCSR, (u16)(usc_InReg(info,CCSR) | BIT13) );
        usc_RTCmd( info, RTCmd_PurgeRxFifo );
 
-       info->rx_enabled = 0;
-       info->rx_overflow = 0;
-       info->rx_rcc_underrun = 0;
+       info->rx_enabled = false;
+       info->rx_overflow = false;
+       info->rx_rcc_underrun = false;
        
 }      /* end of stop_receiver() */
 
@@ -5601,7 +5607,7 @@ static void usc_start_receiver( struct mgsl_struct *info )
 
        usc_OutReg( info, CCSR, 0x1020 );
 
-       info->rx_enabled = 1;
+       info->rx_enabled = true;
 
 }      /* end of usc_start_receiver() */
 
@@ -5628,14 +5634,14 @@ static void usc_start_transmitter( struct mgsl_struct *info )
                /* RTS and set a flag indicating that the driver should */
                /* negate RTS when the transmission completes. */
 
-               info->drop_rts_on_tx_done = 0;
+               info->drop_rts_on_tx_done = false;
 
                if ( info->params.flags & HDLC_FLAG_AUTO_RTS ) {
                        usc_get_serial_signals( info );
                        if ( !(info->serial_signals & SerialSignal_RTS) ) {
                                info->serial_signals |= SerialSignal_RTS;
                                usc_set_serial_signals( info );
-                               info->drop_rts_on_tx_done = 1;
+                               info->drop_rts_on_tx_done = true;
                        }
                }
 
@@ -5699,11 +5705,11 @@ static void usc_start_transmitter( struct mgsl_struct *info )
                        mod_timer(&info->tx_timer, jiffies +
                                        msecs_to_jiffies(5000));
                }
-               info->tx_active = 1;
+               info->tx_active = true;
        }
 
        if ( !info->tx_enabled ) {
-               info->tx_enabled = 1;
+               info->tx_enabled = true;
                if ( info->params.flags & HDLC_FLAG_AUTO_CTS )
                        usc_EnableTransmitter(info,ENABLE_AUTO_CTS);
                else
@@ -5735,8 +5741,8 @@ static void usc_stop_transmitter( struct mgsl_struct *info )
        usc_DmaCmd( info, DmaCmd_ResetTxChannel );
        usc_RTCmd( info, RTCmd_PurgeTxFifo );
 
-       info->tx_enabled = 0;
-       info->tx_active  = 0;
+       info->tx_enabled = false;
+       info->tx_active = false;
 
 }      /* end of usc_stop_transmitter() */
 
@@ -6520,7 +6526,7 @@ static void mgsl_reset_rx_dma_buffers( struct mgsl_struct *info )
  */
 static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int StartIndex, unsigned int EndIndex )
 {
-       int Done = 0;
+       bool Done = false;
        DMABUFFERENTRY *pBufEntry;
        unsigned int Index;
 
@@ -6534,7 +6540,7 @@ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int S
 
                if ( Index == EndIndex ) {
                        /* This is the last buffer of the frame! */
-                       Done = 1;
+                       Done = true;
                }
 
                /* reset current buffer for reuse */
@@ -6559,18 +6565,18 @@ static void mgsl_free_rx_frame_buffers( struct mgsl_struct *info, unsigned int S
  *     receive DMA buffers. Only frames received without errors are returned.
  *
  * Arguments:          info    pointer to device extension
- * Return Value:       1 if frame returned, otherwise 0
+ * Return Value:       true if frame returned, otherwise false
  */
-static int mgsl_get_rx_frame(struct mgsl_struct *info)
+static bool mgsl_get_rx_frame(struct mgsl_struct *info)
 {
        unsigned int StartIndex, EndIndex;      /* index of 1st and last buffers of Rx frame */
        unsigned short status;
        DMABUFFERENTRY *pBufEntry;
        unsigned int framesize = 0;
-       int ReturnCode = 0;
+       bool ReturnCode = false;
        unsigned long flags;
        struct tty_struct *tty = info->tty;
-       int return_frame = 0;
+       bool return_frame = false;
        
        /*
         * current_rx_buffer points to the 1st buffer of the next available
@@ -6629,7 +6635,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
                else {
                        info->icount.rxcrc++;
                        if ( info->params.crc_type & HDLC_CRC_RETURN_EX )
-                               return_frame = 1;
+                               return_frame = true;
                }
                framesize = 0;
 #if SYNCLINK_GENERIC_HDLC
@@ -6640,7 +6646,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
                }
 #endif
        } else
-               return_frame = 1;
+               return_frame = true;
 
        if ( return_frame ) {
                /* receive frame has no errors, get frame size.
@@ -6719,7 +6725,7 @@ static int mgsl_get_rx_frame(struct mgsl_struct *info)
        /* Free the buffers used by this frame. */
        mgsl_free_rx_frame_buffers( info, StartIndex, EndIndex );
 
-       ReturnCode = 1;
+       ReturnCode = true;
 
 Cleanup:
 
@@ -6758,15 +6764,15 @@ Cleanup:
  *     last Rx DMA buffer and return that last portion of the frame.
  *
  * Arguments:          info    pointer to device extension
- * Return Value:       1 if frame returned, otherwise 0
+ * Return Value:       true if frame returned, otherwise false
  */
-static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
+static bool mgsl_get_raw_rx_frame(struct mgsl_struct *info)
 {
        unsigned int CurrentIndex, NextIndex;
        unsigned short status;
        DMABUFFERENTRY *pBufEntry;
        unsigned int framesize = 0;
-       int ReturnCode = 0;
+       bool ReturnCode = false;
        unsigned long flags;
        struct tty_struct *tty = info->tty;
 
@@ -6891,7 +6897,7 @@ static int mgsl_get_raw_rx_frame(struct mgsl_struct *info)
                /* Free the buffers used by this frame. */
                mgsl_free_rx_frame_buffers( info, CurrentIndex, CurrentIndex );
 
-               ReturnCode = 1;
+               ReturnCode = true;
        }
 
 
@@ -7000,15 +7006,15 @@ static void mgsl_load_tx_dma_buffer(struct mgsl_struct *info,
  *     Performs a register test of the 16C32.
  *     
  * Arguments:          info    pointer to device instance data
- * Return Value:               TRUE if test passed, otherwise FALSE
+ * Return Value:               true if test passed, otherwise false
  */
-static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
+static bool mgsl_register_test( struct mgsl_struct *info )
 {
        static unsigned short BitPatterns[] =
                { 0x0000, 0xffff, 0xaaaa, 0x5555, 0x1234, 0x6969, 0x9696, 0x0f0f };
        static unsigned int Patterncount = ARRAY_SIZE(BitPatterns);
        unsigned int i;
-       BOOLEAN rc = TRUE;
+       bool rc = true;
        unsigned long flags;
 
        spin_lock_irqsave(&info->irq_spinlock,flags);
@@ -7019,10 +7025,10 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
        if ( (usc_InReg( info, SICR ) != 0) ||
                  (usc_InReg( info, IVR  ) != 0) ||
                  (usc_InDmaReg( info, DIVR ) != 0) ){
-               rc = FALSE;
+               rc = false;
        }
 
-       if ( rc == TRUE ){
+       if ( rc ){
                /* Write bit patterns to various registers but do it out of */
                /* sync, then read back and verify values. */
 
@@ -7040,7 +7046,7 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
                                  (usc_InReg( info, RCLR ) != BitPatterns[(i+3)%Patterncount]) ||
                                  (usc_InReg( info, RSR )  != BitPatterns[(i+4)%Patterncount]) ||
                                  (usc_InDmaReg( info, TBCR ) != BitPatterns[(i+5)%Patterncount]) ){
-                               rc = FALSE;
+                               rc = false;
                                break;
                        }
                }
@@ -7056,9 +7062,9 @@ static BOOLEAN mgsl_register_test( struct mgsl_struct *info )
 /* mgsl_irq_test()     Perform interrupt test of the 16C32.
  * 
  * Arguments:          info    pointer to device instance data
- * Return Value:       TRUE if test passed, otherwise FALSE
+ * Return Value:       true if test passed, otherwise false
  */
-static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
+static bool mgsl_irq_test( struct mgsl_struct *info )
 {
        unsigned long EndTime;
        unsigned long flags;
@@ -7068,10 +7074,10 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
 
        /*
         * Setup 16C32 to interrupt on TxC pin (14MHz clock) transition. 
-        * The ISR sets irq_occurred to 1. 
+        * The ISR sets irq_occurred to true.
         */
 
-       info->irq_occurred = FALSE;
+       info->irq_occurred = false;
 
        /* Enable INTEN gate for ISA adapter (Port 6, Bit12) */
        /* Enable INTEN (Port 6, Bit12) */
@@ -7097,10 +7103,7 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
        usc_reset(info);
        spin_unlock_irqrestore(&info->irq_spinlock,flags);
        
-       if ( !info->irq_occurred ) 
-               return FALSE;
-       else
-               return TRUE;
+       return info->irq_occurred;
 
 }      /* end of mgsl_irq_test() */
 
@@ -7111,16 +7114,16 @@ static BOOLEAN mgsl_irq_test( struct mgsl_struct *info )
  *     using single buffer DMA mode.
  *     
  * Arguments:          info    pointer to device instance data
- * Return Value:       TRUE if test passed, otherwise FALSE
+ * Return Value:       true if test passed, otherwise false
  */
-static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
+static bool mgsl_dma_test( struct mgsl_struct *info )
 {
        unsigned short FifoLevel;
        unsigned long phys_addr;
        unsigned int FrameSize;
        unsigned int i;
        char *TmpPtr;
-       BOOLEAN rc = TRUE;
+       bool rc = true;
        unsigned short status=0;
        unsigned long EndTime;
        unsigned long flags;
@@ -7233,7 +7236,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
 
        for(;;) {
                if (time_after(jiffies, EndTime)) {
-                       rc = FALSE;
+                       rc = false;
                        break;
                }
 
@@ -7289,7 +7292,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
 
        for(;;) {
                if (time_after(jiffies, EndTime)) {
-                       rc = FALSE;
+                       rc = false;
                        break;
                }
 
@@ -7309,7 +7312,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
        }
 
 
-       if ( rc == TRUE )
+       if ( rc )
        {
                /* Enable 16C32 transmitter. */
 
@@ -7337,7 +7340,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
 
                while ( !(status & (BIT6+BIT5+BIT4+BIT2+BIT1)) ) {
                        if (time_after(jiffies, EndTime)) {
-                               rc = FALSE;
+                               rc = false;
                                break;
                        }
 
@@ -7348,13 +7351,13 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
        }
 
 
-       if ( rc == TRUE ){
+       if ( rc ){
                /* CHECK FOR TRANSMIT ERRORS */
                if ( status & (BIT5 + BIT1) ) 
-                       rc = FALSE;
+                       rc = false;
        }
 
-       if ( rc == TRUE ) {
+       if ( rc ) {
                /* WAIT FOR RECEIVE COMPLETE */
 
                /* Wait 100ms */
@@ -7364,7 +7367,7 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
                status=info->rx_buffer_list[0].status;
                while ( status == 0 ) {
                        if (time_after(jiffies, EndTime)) {
-                               rc = FALSE;
+                               rc = false;
                                break;
                        }
                        status=info->rx_buffer_list[0].status;
@@ -7372,17 +7375,17 @@ static BOOLEAN mgsl_dma_test( struct mgsl_struct *info )
        }
 
 
-       if ( rc == TRUE ) {
+       if ( rc ) {
                /* CHECK FOR RECEIVE ERRORS */
                status = info->rx_buffer_list[0].status;
 
                if ( status & (BIT8 + BIT3 + BIT1) ) {
                        /* receive error has occurred */
-                       rc = FALSE;
+                       rc = false;
                } else {
                        if ( memcmp( info->tx_buffer_list[0].virt_addr ,
                                info->rx_buffer_list[0].virt_addr, FrameSize ) ){
-                               rc = FALSE;
+                               rc = false;
                        }
                }
        }
@@ -7445,9 +7448,9 @@ static int mgsl_adapter_test( struct mgsl_struct *info )
  *     Test the shared memory on a PCI adapter.
  * 
  * Arguments:          info    pointer to device instance data
- * Return Value:       TRUE if test passed, otherwise FALSE
+ * Return Value:       true if test passed, otherwise false
  */
-static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
+static bool mgsl_memory_test( struct mgsl_struct *info )
 {
        static unsigned long BitPatterns[] =
                { 0x0, 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
@@ -7457,7 +7460,7 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
        unsigned long * TestAddr;
 
        if ( info->bus_type != MGSL_BUS_TYPE_PCI )
-               return TRUE;
+               return true;
 
        TestAddr = (unsigned long *)info->memory_base;
 
@@ -7466,7 +7469,7 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
        for ( i = 0 ; i < Patterncount ; i++ ) {
                *TestAddr = BitPatterns[i];
                if ( *TestAddr != BitPatterns[i] )
-                       return FALSE;
+                       return false;
        }
 
        /* Test address lines with incrementing pattern over */
@@ -7481,13 +7484,13 @@ static BOOLEAN mgsl_memory_test( struct mgsl_struct *info )
 
        for ( i = 0 ; i < TestLimit ; i++ ) {
                if ( *TestAddr != i * 4 )
-                       return FALSE;
+                       return false;
                TestAddr++;
        }
 
        memset( info->memory_base, 0, SHARED_MEM_ADDRESS_SIZE );
 
-       return TRUE;
+       return true;
 
 }      /* End Of mgsl_memory_test() */
 
@@ -7604,7 +7607,7 @@ static void mgsl_tx_timeout(unsigned long context)
                info->icount.txtimeout++;
        }
        spin_lock_irqsave(&info->irq_spinlock,flags);
-       info->tx_active = 0;
+       info->tx_active = false;
        info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
 
        if ( info->params.flags & HDLC_FLAG_HDLC_LOOPMODE )
@@ -7632,7 +7635,7 @@ static int mgsl_loopmode_send_done( struct mgsl_struct * info )
        spin_lock_irqsave(&info->irq_spinlock,flags);
        if (info->params.flags & HDLC_FLAG_HDLC_LOOPMODE) {
                if (info->tx_active)
-                       info->loopmode_send_done_requested = TRUE;
+                       info->loopmode_send_done_requested = true;
                else
                        usc_loopmode_send_done(info);
        }
@@ -7646,7 +7649,7 @@ static int mgsl_loopmode_send_done( struct mgsl_struct * info )
  */
 static void usc_loopmode_send_done( struct mgsl_struct * info )
 {
-       info->loopmode_send_done_requested = FALSE;
+       info->loopmode_send_done_requested = false;
        /* clear CMR:13 to 0 to start echoing RxData to TxData */
        info->cmr_value &= ~BIT13;                        
        usc_OutReg(info, CMR, info->cmr_value);
@@ -7668,7 +7671,7 @@ static void usc_loopmode_cancel_transmit( struct mgsl_struct * info )
  */
 static void usc_loopmode_insert_request( struct mgsl_struct * info )
 {
-       info->loopmode_insert_requested = TRUE;
+       info->loopmode_insert_requested = true;
  
        /* enable RxAbort irq. On next RxAbort, clear CMR:13 to
         * begin repeating TxData on RxData (complete insertion)
index 3c89266c8255fedeb8b5114abeb28a7998691e22..2001b0e52dc69c51a765ff8ffdef2a426e202b01 100644 (file)
@@ -117,7 +117,7 @@ static struct pci_driver pci_driver = {
        .remove         = __devexit_p(remove_one),
 };
 
-static int pci_registered;
+static bool pci_registered;
 
 /*
  * module configuration and status
@@ -151,7 +151,7 @@ static void hangup(struct tty_struct *tty);
 static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
 
 static int  write(struct tty_struct *tty, const unsigned char *buf, int count);
-static void put_char(struct tty_struct *tty, unsigned char ch);
+static int put_char(struct tty_struct *tty, unsigned char ch);
 static void send_xchar(struct tty_struct *tty, char ch);
 static void wait_until_sent(struct tty_struct *tty, int timeout);
 static int  write_room(struct tty_struct *tty);
@@ -289,12 +289,12 @@ struct slgt_info {
 
        struct work_struct task;
        u32 pending_bh;
-       int bh_requested;
-       int bh_running;
+       bool bh_requested;
+       bool bh_running;
 
        int isr_overflow;
-       int irq_requested;      /* nonzero if IRQ requested */
-       int irq_occurred;       /* for diagnostics use */
+       bool irq_requested;     /* true if IRQ requested */
+       bool irq_occurred;      /* for diagnostics use */
 
        /* device configuration */
 
@@ -304,7 +304,7 @@ struct slgt_info {
 
        unsigned char __iomem * reg_addr;  /* memory mapped registers address */
        u32 phys_reg_addr;
-       int reg_addr_requested;
+       bool reg_addr_requested;
 
        MGSL_PARAMS params;       /* communications parameters */
        u32 idle_mode;
@@ -315,11 +315,11 @@ struct slgt_info {
 
        /* device status */
 
-       int rx_enabled;
-       int rx_restart;
+       bool rx_enabled;
+       bool rx_restart;
 
-       int tx_enabled;
-       int tx_active;
+       bool tx_enabled;
+       bool tx_active;
 
        unsigned char signals;    /* serial signal states */
        int init_error;  /* initialization error */
@@ -329,7 +329,7 @@ struct slgt_info {
 
        char flag_buf[MAX_ASYNC_BUFFER_SIZE];
        char char_buf[MAX_ASYNC_BUFFER_SIZE];
-       BOOLEAN drop_rts_on_tx_done;
+       bool drop_rts_on_tx_done;
        struct  _input_signal_events    input_signal_events;
 
        int dcd_chkcount;       /* check counts to prevent */
@@ -467,8 +467,8 @@ static void rx_start(struct slgt_info *info);
 static void reset_rbufs(struct slgt_info *info);
 static void free_rbufs(struct slgt_info *info, unsigned int first, unsigned int last);
 static void rdma_reset(struct slgt_info *info);
-static int  rx_get_frame(struct slgt_info *info);
-static int  rx_get_buf(struct slgt_info *info);
+static bool rx_get_frame(struct slgt_info *info);
+static bool rx_get_buf(struct slgt_info *info);
 
 static void tx_start(struct slgt_info *info);
 static void tx_stop(struct slgt_info *info);
@@ -771,8 +771,7 @@ static void close(struct tty_struct *tty, struct file *filp)
 
        if (info->flags & ASYNC_INITIALIZED)
                wait_until_sent(tty, info->timeout);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       flush_buffer(tty);
        tty_ldisc_flush(tty);
 
        shutdown(info);
@@ -913,20 +912,24 @@ cleanup:
        return ret;
 }
 
-static void put_char(struct tty_struct *tty, unsigned char ch)
+static int put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct slgt_info *info = tty->driver_data;
        unsigned long flags;
+       int ret;
 
        if (sanity_check(info, tty->name, "put_char"))
-               return;
+               return 0;
        DBGINFO(("%s put_char(%d)\n", info->device_name, ch));
        if (!info->tx_buf)
-               return;
+               return 0;
        spin_lock_irqsave(&info->lock,flags);
-       if (!info->tx_active && (info->tx_count < info->max_frame_size))
+       if (!info->tx_active && (info->tx_count < info->max_frame_size)) {
                info->tx_buf[info->tx_count++] = ch;
+               ret = 1;
+       }
        spin_unlock_irqrestore(&info->lock,flags);
+       return ret;
 }
 
 static void send_xchar(struct tty_struct *tty, char ch)
@@ -967,6 +970,8 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
         * Note: use tight timings here to satisfy the NIST-PCTS.
         */
 
+       lock_kernel();
+
        if (info->params.data_rate) {
                char_time = info->timeout/(32 * 5);
                if (!char_time)
@@ -984,6 +989,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
                if (timeout && time_after(jiffies, orig_jiffies + timeout))
                        break;
        }
+       unlock_kernel();
 
 exit:
        DBGINFO(("%s wait_until_sent exit\n", info->device_name));
@@ -1097,6 +1103,7 @@ static int ioctl(struct tty_struct *tty, struct file *file,
        struct serial_icounter_struct __user *p_cuser;  /* user space */
        unsigned long flags;
        void __user *argp = (void __user *)arg;
+       int ret;
 
        if (sanity_check(info, tty->name, "ioctl"))
                return -ENODEV;
@@ -1108,37 +1115,54 @@ static int ioctl(struct tty_struct *tty, struct file *file,
                    return -EIO;
        }
 
+       lock_kernel();
+
        switch (cmd) {
        case MGSL_IOCGPARAMS:
-               return get_params(info, argp);
+               ret = get_params(info, argp);
+               break;
        case MGSL_IOCSPARAMS:
-               return set_params(info, argp);
+               ret = set_params(info, argp);
+               break;
        case MGSL_IOCGTXIDLE:
-               return get_txidle(info, argp);
+               ret = get_txidle(info, argp);
+               break;
        case MGSL_IOCSTXIDLE:
-               return set_txidle(info, (int)arg);
+               ret = set_txidle(info, (int)arg);
+               break;
        case MGSL_IOCTXENABLE:
-               return tx_enable(info, (int)arg);
+               ret = tx_enable(info, (int)arg);
+               break;
        case MGSL_IOCRXENABLE:
-               return rx_enable(info, (int)arg);
+               ret = rx_enable(info, (int)arg);
+               break;
        case MGSL_IOCTXABORT:
-               return tx_abort(info);
+               ret = tx_abort(info);
+               break;
        case MGSL_IOCGSTATS:
-               return get_stats(info, argp);
+               ret = get_stats(info, argp);
+               break;
        case MGSL_IOCWAITEVENT:
-               return wait_mgsl_event(info, argp);
+               ret = wait_mgsl_event(info, argp);
+               break;
        case TIOCMIWAIT:
-               return modem_input_wait(info,(int)arg);
+               ret = modem_input_wait(info,(int)arg);
+               break;
        case MGSL_IOCGIF:
-               return get_interface(info, argp);
+               ret = get_interface(info, argp);
+               break;
        case MGSL_IOCSIF:
-               return set_interface(info,(int)arg);
+               ret = set_interface(info,(int)arg);
+               break;
        case MGSL_IOCSGPIO:
-               return set_gpio(info, argp);
+               ret = set_gpio(info, argp);
+               break;
        case MGSL_IOCGGPIO:
-               return get_gpio(info, argp);
+               ret = get_gpio(info, argp);
+               break;
        case MGSL_IOCWAITGPIO:
-               return wait_gpio(info, argp);
+               ret = wait_gpio(info, argp);
+               break;
        case TIOCGICOUNT:
                spin_lock_irqsave(&info->lock,flags);
                cnow = info->icount;
@@ -1155,12 +1179,14 @@ static int ioctl(struct tty_struct *tty, struct file *file,
                    put_user(cnow.parity, &p_cuser->parity) ||
                    put_user(cnow.brk, &p_cuser->brk) ||
                    put_user(cnow.buf_overrun, &p_cuser->buf_overrun))
-                       return -EFAULT;
-               return 0;
+                       ret = -EFAULT;
+               ret = 0;
+               break;
        default:
-               return -ENOIOCTLCMD;
+               ret = -ENOIOCTLCMD;
        }
-       return 0;
+       unlock_kernel();
+       return ret;
 }
 
 /*
@@ -1968,8 +1994,8 @@ static int bh_action(struct slgt_info *info)
                rc = BH_STATUS;
        } else {
                /* Mark BH routine as complete */
-               info->bh_running   = 0;
-               info->bh_requested = 0;
+               info->bh_running = false;
+               info->bh_requested = false;
                rc = 0;
        }
 
@@ -1988,7 +2014,7 @@ static void bh_handler(struct work_struct *work)
 
        if (!info)
                return;
-       info->bh_running = 1;
+       info->bh_running = true;
 
        while((action = bh_action(info))) {
                switch (action) {
@@ -2158,7 +2184,7 @@ static void isr_serial(struct slgt_info *info)
 
        wr_reg16(info, SSR, status); /* clear pending */
 
-       info->irq_occurred = 1;
+       info->irq_occurred = true;
 
        if (info->params.mode == MGSL_MODE_ASYNC) {
                if (status & IRQ_TXIDLE) {
@@ -2225,7 +2251,7 @@ static void isr_rdma(struct slgt_info *info)
 
        if (status & (BIT5 + BIT4)) {
                DBGISR(("%s isr_rdma rx_restart=1\n", info->device_name));
-               info->rx_restart = 1;
+               info->rx_restart = true;
        }
        info->pending_bh |= BH_RECEIVE;
 }
@@ -2276,14 +2302,14 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
                                info->icount.txok++;
                }
 
-               info->tx_active = 0;
+               info->tx_active = false;
                info->tx_count = 0;
 
                del_timer(&info->tx_timer);
 
                if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done) {
                        info->signals &= ~SerialSignal_RTS;
-                       info->drop_rts_on_tx_done = 0;
+                       info->drop_rts_on_tx_done = false;
                        set_signals(info);
                }
 
@@ -2337,7 +2363,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
 
        while((gsr = rd_reg32(info, GSR) & 0xffffff00)) {
                DBGISR(("%s gsr=%08x\n", info->device_name, gsr));
-               info->irq_occurred = 1;
+               info->irq_occurred = true;
                for(i=0; i < info->port_count ; i++) {
                        if (info->port_array[i] == NULL)
                                continue;
@@ -2374,7 +2400,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
                    !port->bh_requested) {
                        DBGISR(("%s bh queued\n", port->device_name));
                        schedule_work(&port->task);
-                       port->bh_requested = 1;
+                       port->bh_requested = true;
                }
        }
 
@@ -3110,7 +3136,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 {
        DECLARE_WAITQUEUE(wait, current);
        int             retval;
-       int             do_clocal = 0, extra_count = 0;
+       bool            do_clocal = false;
+       bool            extra_count = false;
        unsigned long   flags;
 
        DBGINFO(("%s block_til_ready\n", tty->driver->name));
@@ -3122,7 +3149,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
        }
 
        if (tty->termios->c_cflag & CLOCAL)
-               do_clocal = 1;
+               do_clocal = true;
 
        /* Wait for carrier detect and the line to become
         * free (i.e., not in use by the callout).  While we are in
@@ -3136,7 +3163,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 
        spin_lock_irqsave(&info->lock, flags);
        if (!tty_hung_up_p(filp)) {
-               extra_count = 1;
+               extra_count = true;
                info->count--;
        }
        spin_unlock_irqrestore(&info->lock, flags);
@@ -3321,9 +3348,9 @@ static int claim_resources(struct slgt_info *info)
                goto errout;
        }
        else
-               info->reg_addr_requested = 1;
+               info->reg_addr_requested = true;
 
-       info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE);
+       info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE);
        if (!info->reg_addr) {
                DBGERR(("%s cant map device registers, addr=%08X\n",
                        info->device_name, info->phys_reg_addr));
@@ -3341,12 +3368,12 @@ static void release_resources(struct slgt_info *info)
 {
        if (info->irq_requested) {
                free_irq(info->irq_level, info);
-               info->irq_requested = 0;
+               info->irq_requested = false;
        }
 
        if (info->reg_addr_requested) {
                release_mem_region(info->phys_reg_addr, SLGT_REG_SIZE);
-               info->reg_addr_requested = 0;
+               info->reg_addr_requested = false;
        }
 
        if (info->reg_addr) {
@@ -3511,7 +3538,7 @@ static void device_init(int adapter_num, struct pci_dev *pdev)
                                port_array[0]->device_name,
                                port_array[0]->irq_level));
                } else {
-                       port_array[0]->irq_requested = 1;
+                       port_array[0]->irq_requested = true;
                        adapter_test(port_array[0]);
                        for (i=1 ; i < port_count ; i++) {
                                port_array[i]->init_error = port_array[0]->init_error;
@@ -3654,7 +3681,7 @@ static int __init slgt_init(void)
                printk("%s pci_register_driver error=%d\n", driver_name, rc);
                goto error;
        }
-       pci_registered = 1;
+       pci_registered = true;
 
        if (!slgt_device_list)
                printk("%s no devices found\n",driver_name);
@@ -3812,8 +3839,8 @@ static void rx_stop(struct slgt_info *info)
 
        rdma_reset(info);
 
-       info->rx_enabled = 0;
-       info->rx_restart = 0;
+       info->rx_enabled = false;
+       info->rx_restart = false;
 }
 
 static void rx_start(struct slgt_info *info)
@@ -3849,8 +3876,8 @@ static void rx_start(struct slgt_info *info)
        /* enable receiver */
        wr_reg16(info, RCR, (unsigned short)(rd_reg16(info, RCR) | BIT1));
 
-       info->rx_restart = 0;
-       info->rx_enabled = 1;
+       info->rx_restart = false;
+       info->rx_enabled = true;
 }
 
 static void tx_start(struct slgt_info *info)
@@ -3858,11 +3885,11 @@ static void tx_start(struct slgt_info *info)
        if (!info->tx_enabled) {
                wr_reg16(info, TCR,
                         (unsigned short)((rd_reg16(info, TCR) | BIT1) & ~BIT2));
-               info->tx_enabled = TRUE;
+               info->tx_enabled = true;
        }
 
        if (info->tx_count) {
-               info->drop_rts_on_tx_done = 0;
+               info->drop_rts_on_tx_done = false;
 
                if (info->params.mode != MGSL_MODE_ASYNC) {
                        if (info->params.flags & HDLC_FLAG_AUTO_RTS) {
@@ -3870,7 +3897,7 @@ static void tx_start(struct slgt_info *info)
                                if (!(info->signals & SerialSignal_RTS)) {
                                        info->signals |= SerialSignal_RTS;
                                        set_signals(info);
-                                       info->drop_rts_on_tx_done = 1;
+                                       info->drop_rts_on_tx_done = true;
                                }
                        }
 
@@ -3888,7 +3915,7 @@ static void tx_start(struct slgt_info *info)
                        wr_reg16(info, SSR, IRQ_TXIDLE);
                }
                tdma_start(info);
-               info->tx_active = 1;
+               info->tx_active = true;
        }
 }
 
@@ -3949,8 +3976,8 @@ static void tx_stop(struct slgt_info *info)
 
        reset_tbufs(info);
 
-       info->tx_enabled = 0;
-       info->tx_active  = 0;
+       info->tx_enabled = false;
+       info->tx_active = false;
 }
 
 static void reset_port(struct slgt_info *info)
@@ -4470,14 +4497,13 @@ static void reset_rbufs(struct slgt_info *info)
 /*
  * pass receive HDLC frame to upper layer
  *
- * return 1 if frame available, otherwise 0
+ * return true if frame available, otherwise false
  */
-static int rx_get_frame(struct slgt_info *info)
+static bool rx_get_frame(struct slgt_info *info)
 {
        unsigned int start, end;
        unsigned short status;
        unsigned int framesize = 0;
-       int rc = 0;
        unsigned long flags;
        struct tty_struct *tty = info->tty;
        unsigned char addr_field = 0xff;
@@ -4601,23 +4627,23 @@ check_again:
                }
        }
        free_rbufs(info, start, end);
-       rc = 1;
+       return true;
 
 cleanup:
-       return rc;
+       return false;
 }
 
 /*
  * pass receive buffer (RAW synchronous mode) to tty layer
- * return 1 if buffer available, otherwise 0
+ * return true if buffer available, otherwise false
  */
-static int rx_get_buf(struct slgt_info *info)
+static bool rx_get_buf(struct slgt_info *info)
 {
        unsigned int i = info->rbuf_current;
        unsigned int count;
 
        if (!desc_complete(info->rbufs[i]))
-               return 0;
+               return false;
        count = desc_count(info->rbufs[i]);
        switch(info->params.mode) {
        case MGSL_MODE_MONOSYNC:
@@ -4633,7 +4659,7 @@ static int rx_get_buf(struct slgt_info *info)
                ldisc_receive_buf(info->tty, info->rbufs[i].buf,
                                  info->flag_buf, count);
        free_rbufs(info, i, i);
-       return 1;
+       return true;
 }
 
 static void reset_tbufs(struct slgt_info *info)
@@ -4758,7 +4784,7 @@ static int irq_test(struct slgt_info *info)
 
        /* assume failure */
        info->init_error = DiagStatus_IrqFailure;
-       info->irq_occurred = FALSE;
+       info->irq_occurred = false;
 
        spin_unlock_irqrestore(&info->lock, flags);
 
@@ -4891,7 +4917,7 @@ static void tx_timeout(unsigned long context)
                info->icount.txtimeout++;
        }
        spin_lock_irqsave(&info->lock,flags);
-       info->tx_active = 0;
+       info->tx_active = false;
        info->tx_count = 0;
        spin_unlock_irqrestore(&info->lock,flags);
 
index c96062ea72b4acffd82ea352006e7af53adb4329..bec54866e0bb6630b0f6d05f7890daf365695134 100644 (file)
@@ -188,9 +188,9 @@ typedef struct _synclinkmp_info {
 
        u32 pending_bh;
 
-       int bh_running;                         /* Protection from multiple */
+       bool bh_running;                                /* Protection from multiple */
        int isr_overflow;
-       int bh_requested;
+       bool bh_requested;
 
        int dcd_chkcount;                       /* check counts to prevent */
        int cts_chkcount;                       /* too many IRQs if a signal */
@@ -213,11 +213,11 @@ typedef struct _synclinkmp_info {
        unsigned char *tmp_rx_buf;
        unsigned int tmp_rx_buf_count;
 
-       int rx_enabled;
-       int rx_overflow;
+       bool rx_enabled;
+       bool rx_overflow;
 
-       int tx_enabled;
-       int tx_active;
+       bool tx_enabled;
+       bool tx_active;
        u32 idle_mode;
 
        unsigned char ie0_value;
@@ -238,13 +238,13 @@ typedef struct _synclinkmp_info {
 
        unsigned int irq_level;                 /* interrupt level */
        unsigned long irq_flags;
-       int irq_requested;                      /* nonzero if IRQ requested */
+       bool irq_requested;                     /* true if IRQ requested */
 
        MGSL_PARAMS params;                     /* communications parameters */
 
        unsigned char serial_signals;           /* current serial signal states */
 
-       int irq_occurred;                       /* for diagnostics use */
+       bool irq_occurred;                      /* for diagnostics use */
        unsigned int init_error;                /* Initialization startup error */
 
        u32 last_mem_alloc;
@@ -255,7 +255,7 @@ typedef struct _synclinkmp_info {
        unsigned char* sca_base;                /* HD64570 SCA Memory address */
        u32 phys_sca_base;
        u32 sca_offset;
-       int sca_base_requested;
+       bool sca_base_requested;
 
        unsigned char* lcr_base;                /* local config registers (PCI only) */
        u32 phys_lcr_base;
@@ -265,12 +265,12 @@ typedef struct _synclinkmp_info {
        unsigned char* statctrl_base;           /* status/control register memory */
        u32 phys_statctrl_base;
        u32 statctrl_offset;
-       int sca_statctrl_requested;
+       bool sca_statctrl_requested;
 
        u32 misc_ctrl_value;
        char flag_buf[MAX_ASYNC_BUFFER_SIZE];
        char char_buf[MAX_ASYNC_BUFFER_SIZE];
-       BOOLEAN drop_rts_on_tx_done;
+       bool drop_rts_on_tx_done;
 
        struct  _input_signal_events    input_signal_events;
 
@@ -519,7 +519,7 @@ static void hangup(struct tty_struct *tty);
 static void set_termios(struct tty_struct *tty, struct ktermios *old_termios);
 
 static int  write(struct tty_struct *tty, const unsigned char *buf, int count);
-static void put_char(struct tty_struct *tty, unsigned char ch);
+static int put_char(struct tty_struct *tty, unsigned char ch);
 static void send_xchar(struct tty_struct *tty, char ch);
 static void wait_until_sent(struct tty_struct *tty, int timeout);
 static int  write_room(struct tty_struct *tty);
@@ -571,12 +571,12 @@ static void shutdown(SLMP_INFO *info);
 static void program_hw(SLMP_INFO *info);
 static void change_params(SLMP_INFO *info);
 
-static int  init_adapter(SLMP_INFO *info);
-static int  register_test(SLMP_INFO *info);
-static int  irq_test(SLMP_INFO *info);
-static int  loopback_test(SLMP_INFO *info);
+static bool init_adapter(SLMP_INFO *info);
+static bool register_test(SLMP_INFO *info);
+static bool irq_test(SLMP_INFO *info);
+static bool loopback_test(SLMP_INFO *info);
 static int  adapter_test(SLMP_INFO *info);
-static int  memory_test(SLMP_INFO *info);
+static bool memory_test(SLMP_INFO *info);
 
 static void reset_adapter(SLMP_INFO *info);
 static void reset_port(SLMP_INFO *info);
@@ -587,7 +587,7 @@ static void rx_stop(SLMP_INFO *info);
 static void rx_start(SLMP_INFO *info);
 static void rx_reset_buffers(SLMP_INFO *info);
 static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last);
-static int  rx_get_frame(SLMP_INFO *info);
+static bool rx_get_frame(SLMP_INFO *info);
 
 static void tx_start(SLMP_INFO *info);
 static void tx_stop(SLMP_INFO *info);
@@ -862,8 +862,7 @@ static void close(struct tty_struct *tty, struct file *filp)
        if (info->flags & ASYNC_INITIALIZED)
                wait_until_sent(tty, info->timeout);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       flush_buffer(tty);
 
        tty_ldisc_flush(tty);
 
@@ -1046,10 +1045,11 @@ cleanup:
 
 /* Add a character to the transmit buffer.
  */
-static void put_char(struct tty_struct *tty, unsigned char ch)
+static int put_char(struct tty_struct *tty, unsigned char ch)
 {
        SLMP_INFO *info = (SLMP_INFO *)tty->driver_data;
        unsigned long flags;
+       int ret = 0;
 
        if ( debug_level >= DEBUG_LEVEL_INFO ) {
                printk( "%s(%d):%s put_char(%d)\n",
@@ -1057,10 +1057,10 @@ static void put_char(struct tty_struct *tty, unsigned char ch)
        }
 
        if (sanity_check(info, tty->name, "put_char"))
-               return;
+               return 0;
 
        if (!info->tx_buf)
-               return;
+               return 0;
 
        spin_lock_irqsave(&info->lock,flags);
 
@@ -1072,10 +1072,12 @@ static void put_char(struct tty_struct *tty, unsigned char ch)
                        if (info->tx_put >= info->max_frame_size)
                                info->tx_put -= info->max_frame_size;
                        info->tx_count++;
+                       ret = 1;
                }
        }
 
        spin_unlock_irqrestore(&info->lock,flags);
+       return ret;
 }
 
 /* Send a high-priority XON/XOFF character
@@ -1119,6 +1121,8 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
        if (sanity_check(info, tty->name, "wait_until_sent"))
                return;
 
+       lock_kernel();
+
        if (!(info->flags & ASYNC_INITIALIZED))
                goto exit;
 
@@ -1161,6 +1165,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout)
        }
 
 exit:
+       unlock_kernel();
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s wait_until_sent() exit\n",
                         __FILE__,__LINE__, info->device_name );
@@ -1176,6 +1181,7 @@ static int write_room(struct tty_struct *tty)
        if (sanity_check(info, tty->name, "write_room"))
                return 0;
 
+       lock_kernel();
        if (info->params.mode == MGSL_MODE_HDLC) {
                ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE;
        } else {
@@ -1183,6 +1189,7 @@ static int write_room(struct tty_struct *tty)
                if (ret < 0)
                        ret = 0;
        }
+       unlock_kernel();
 
        if (debug_level >= DEBUG_LEVEL_INFO)
                printk("%s(%d):%s write_room()=%d\n",
@@ -1303,7 +1310,7 @@ static void tx_release(struct tty_struct *tty)
  *
  * Return Value:       0 if success, otherwise error code
  */
-static int ioctl(struct tty_struct *tty, struct file *file,
+static int do_ioctl(struct tty_struct *tty, struct file *file,
                 unsigned int cmd, unsigned long arg)
 {
        SLMP_INFO *info = (SLMP_INFO *)tty->driver_data;
@@ -1393,6 +1400,16 @@ static int ioctl(struct tty_struct *tty, struct file *file,
        return 0;
 }
 
+static int ioctl(struct tty_struct *tty, struct file *file,
+                unsigned int cmd, unsigned long arg)
+{
+       int ret;
+       lock_kernel();
+       ret = do_ioctl(tty, file, cmd, arg);
+       unlock_kernel();
+       return ret;
+}
+
 /*
  * /proc fs routines....
  */
@@ -1473,7 +1490,7 @@ static inline int line_info(char *buf, SLMP_INFO *info)
 
 /* Called to print information about devices
  */
-int read_proc(char *page, char **start, off_t off, int count,
+static int read_proc(char *page, char **start, off_t off, int count,
              int *eof, void *data)
 {
        int len = 0, l;
@@ -2024,7 +2041,7 @@ static void hdlcdev_exit(SLMP_INFO *info)
 /* Return next bottom half action to perform.
  * Return Value:       BH action code or 0 if nothing to do.
  */
-int bh_action(SLMP_INFO *info)
+static int bh_action(SLMP_INFO *info)
 {
        unsigned long flags;
        int rc = 0;
@@ -2044,8 +2061,8 @@ int bh_action(SLMP_INFO *info)
 
        if (!rc) {
                /* Mark BH routine as complete */
-               info->bh_running   = 0;
-               info->bh_requested = 0;
+               info->bh_running = false;
+               info->bh_requested = false;
        }
 
        spin_unlock_irqrestore(&info->lock,flags);
@@ -2055,7 +2072,7 @@ int bh_action(SLMP_INFO *info)
 
 /* Perform bottom half processing of work items queued by ISR.
  */
-void bh_handler(struct work_struct *work)
+static void bh_handler(struct work_struct *work)
 {
        SLMP_INFO *info = container_of(work, SLMP_INFO, task);
        int action;
@@ -2067,7 +2084,7 @@ void bh_handler(struct work_struct *work)
                printk( "%s(%d):%s bh_handler() entry\n",
                        __FILE__,__LINE__,info->device_name);
 
-       info->bh_running = 1;
+       info->bh_running = true;
 
        while((action = bh_action(info)) != 0) {
 
@@ -2100,7 +2117,7 @@ void bh_handler(struct work_struct *work)
                        __FILE__,__LINE__,info->device_name);
 }
 
-void bh_receive(SLMP_INFO *info)
+static void bh_receive(SLMP_INFO *info)
 {
        if ( debug_level >= DEBUG_LEVEL_BH )
                printk( "%s(%d):%s bh_receive()\n",
@@ -2109,7 +2126,7 @@ void bh_receive(SLMP_INFO *info)
        while( rx_get_frame(info) );
 }
 
-void bh_transmit(SLMP_INFO *info)
+static void bh_transmit(SLMP_INFO *info)
 {
        struct tty_struct *tty = info->tty;
 
@@ -2121,7 +2138,7 @@ void bh_transmit(SLMP_INFO *info)
                tty_wakeup(tty);
 }
 
-void bh_status(SLMP_INFO *info)
+static void bh_status(SLMP_INFO *info)
 {
        if ( debug_level >= DEBUG_LEVEL_BH )
                printk( "%s(%d):%s bh_status() entry\n",
@@ -2133,7 +2150,7 @@ void bh_status(SLMP_INFO *info)
        info->cts_chkcount = 0;
 }
 
-void isr_timer(SLMP_INFO * info)
+static void isr_timer(SLMP_INFO * info)
 {
        unsigned char timer = (info->port_num & 1) ? TIMER2 : TIMER0;
 
@@ -2152,14 +2169,14 @@ void isr_timer(SLMP_INFO * info)
         */
        write_reg(info, (unsigned char)(timer + TMCS), 0);
 
-       info->irq_occurred = TRUE;
+       info->irq_occurred = true;
 
        if ( debug_level >= DEBUG_LEVEL_ISR )
                printk("%s(%d):%s isr_timer()\n",
                        __FILE__,__LINE__,info->device_name);
 }
 
-void isr_rxint(SLMP_INFO * info)
+static void isr_rxint(SLMP_INFO * info)
 {
        struct tty_struct *tty = info->tty;
        struct  mgsl_icount *icount = &info->icount;
@@ -2218,7 +2235,7 @@ void isr_rxint(SLMP_INFO * info)
 /*
  * handle async rx data interrupts
  */
-void isr_rxrdy(SLMP_INFO * info)
+static void isr_rxrdy(SLMP_INFO * info)
 {
        u16 status;
        unsigned char DataByte;
@@ -2232,7 +2249,7 @@ void isr_rxrdy(SLMP_INFO * info)
        while((status = read_reg(info,CST0)) & BIT0)
        {
                int flag = 0;
-               int over = 0;
+               bool over = false;
                DataByte = read_reg(info,TRB);
 
                icount->rx++;
@@ -2265,7 +2282,7 @@ void isr_rxrdy(SLMP_INFO * info)
                                         * reported immediately, and doesn't
                                         * affect the current character
                                         */
-                                       over = 1;
+                                       over = true;
                                }
                        }
                }       /* end of if (error) */
@@ -2318,14 +2335,14 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
                                info->icount.txok++;
                }
 
-               info->tx_active = 0;
+               info->tx_active = false;
                info->tx_count = info->tx_put = info->tx_get = 0;
 
                del_timer(&info->tx_timer);
 
                if (info->params.mode != MGSL_MODE_ASYNC && info->drop_rts_on_tx_done ) {
                        info->serial_signals &= ~SerialSignal_RTS;
-                       info->drop_rts_on_tx_done = 0;
+                       info->drop_rts_on_tx_done = false;
                        set_signals(info);
                }
 
@@ -2348,7 +2365,7 @@ static void isr_txeom(SLMP_INFO * info, unsigned char status)
 /*
  * handle tx status interrupts
  */
-void isr_txint(SLMP_INFO * info)
+static void isr_txint(SLMP_INFO * info)
 {
        unsigned char status = read_reg(info, SR1) & info->ie1_value & (UDRN + IDLE + CCTS);
 
@@ -2376,7 +2393,7 @@ void isr_txint(SLMP_INFO * info)
 /*
  * handle async tx data interrupts
  */
-void isr_txrdy(SLMP_INFO * info)
+static void isr_txrdy(SLMP_INFO * info)
 {
        if ( debug_level >= DEBUG_LEVEL_ISR )
                printk("%s(%d):%s isr_txrdy() tx_count=%d\n",
@@ -2398,7 +2415,7 @@ void isr_txrdy(SLMP_INFO * info)
        if ( info->tx_count )
                tx_load_fifo( info );
        else {
-               info->tx_active = 0;
+               info->tx_active = false;
                info->ie0_value &= ~TXRDYE;
                write_reg(info, IE0, info->ie0_value);
        }
@@ -2407,7 +2424,7 @@ void isr_txrdy(SLMP_INFO * info)
                info->pending_bh |= BH_TRANSMIT;
 }
 
-void isr_rxdmaok(SLMP_INFO * info)
+static void isr_rxdmaok(SLMP_INFO * info)
 {
        /* BIT7 = EOT (end of transfer)
         * BIT6 = EOM (end of message/frame)
@@ -2424,7 +2441,7 @@ void isr_rxdmaok(SLMP_INFO * info)
        info->pending_bh |= BH_RECEIVE;
 }
 
-void isr_rxdmaerror(SLMP_INFO * info)
+static void isr_rxdmaerror(SLMP_INFO * info)
 {
        /* BIT5 = BOF (buffer overflow)
         * BIT4 = COF (counter overflow)
@@ -2438,11 +2455,11 @@ void isr_rxdmaerror(SLMP_INFO * info)
                printk("%s(%d):%s isr_rxdmaerror(), status=%02x\n",
                        __FILE__,__LINE__,info->device_name,status);
 
-       info->rx_overflow = TRUE;
+       info->rx_overflow = true;
        info->pending_bh |= BH_RECEIVE;
 }
 
-void isr_txdmaok(SLMP_INFO * info)
+static void isr_txdmaok(SLMP_INFO * info)
 {
        unsigned char status_reg1 = read_reg(info, SR1);
 
@@ -2460,7 +2477,7 @@ void isr_txdmaok(SLMP_INFO * info)
        write_reg(info, IE0, info->ie0_value);
 }
 
-void isr_txdmaerror(SLMP_INFO * info)
+static void isr_txdmaerror(SLMP_INFO * info)
 {
        /* BIT5 = BOF (buffer overflow)
         * BIT4 = COF (counter overflow)
@@ -2477,7 +2494,7 @@ void isr_txdmaerror(SLMP_INFO * info)
 
 /* handle input serial signal changes
  */
-void isr_io_pin( SLMP_INFO *info, u16 status )
+static void isr_io_pin( SLMP_INFO *info, u16 status )
 {
        struct  mgsl_icount *icount;
 
@@ -2691,7 +2708,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
                                printk("%s(%d):%s queueing bh task.\n",
                                        __FILE__,__LINE__,port->device_name);
                        schedule_work(&port->task);
-                       port->bh_requested = 1;
+                       port->bh_requested = true;
                }
        }
 
@@ -3320,7 +3337,8 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 {
        DECLARE_WAITQUEUE(wait, current);
        int             retval;
-       int             do_clocal = 0, extra_count = 0;
+       bool            do_clocal = false;
+       bool            extra_count = false;
        unsigned long   flags;
 
        if (debug_level >= DEBUG_LEVEL_INFO)
@@ -3335,7 +3353,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
        }
 
        if (tty->termios->c_cflag & CLOCAL)
-               do_clocal = 1;
+               do_clocal = true;
 
        /* Wait for carrier detect and the line to become
         * free (i.e., not in use by the callout).  While we are in
@@ -3353,7 +3371,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
 
        spin_lock_irqsave(&info->lock, flags);
        if (!tty_hung_up_p(filp)) {
-               extra_count = 1;
+               extra_count = true;
                info->count--;
        }
        spin_unlock_irqrestore(&info->lock, flags);
@@ -3413,7 +3431,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
        return retval;
 }
 
-int alloc_dma_bufs(SLMP_INFO *info)
+static int alloc_dma_bufs(SLMP_INFO *info)
 {
        unsigned short BuffersPerFrame;
        unsigned short BufferCount;
@@ -3487,7 +3505,7 @@ int alloc_dma_bufs(SLMP_INFO *info)
 
 /* Allocate DMA buffers for the transmit and receive descriptor lists.
  */
-int alloc_buf_list(SLMP_INFO *info)
+static int alloc_buf_list(SLMP_INFO *info)
 {
        unsigned int i;
 
@@ -3546,7 +3564,7 @@ int alloc_buf_list(SLMP_INFO *info)
 
 /* Allocate the frame DMA buffers used by the specified buffer list.
  */
-int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count)
+static int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,int count)
 {
        int i;
        unsigned long phys_addr;
@@ -3563,7 +3581,7 @@ int alloc_frame_bufs(SLMP_INFO *info, SCADESC *buf_list,SCADESC_EX *buf_list_ex,
        return 0;
 }
 
-void free_dma_bufs(SLMP_INFO *info)
+static void free_dma_bufs(SLMP_INFO *info)
 {
        info->buffer_list = NULL;
        info->rx_buf_list = NULL;
@@ -3573,7 +3591,7 @@ void free_dma_bufs(SLMP_INFO *info)
 /* allocate buffer large enough to hold max_frame_size.
  * This buffer is used to pass an assembled frame to the line discipline.
  */
-int alloc_tmp_rx_buf(SLMP_INFO *info)
+static int alloc_tmp_rx_buf(SLMP_INFO *info)
 {
        info->tmp_rx_buf = kmalloc(info->max_frame_size, GFP_KERNEL);
        if (info->tmp_rx_buf == NULL)
@@ -3581,13 +3599,13 @@ int alloc_tmp_rx_buf(SLMP_INFO *info)
        return 0;
 }
 
-void free_tmp_rx_buf(SLMP_INFO *info)
+static void free_tmp_rx_buf(SLMP_INFO *info)
 {
        kfree(info->tmp_rx_buf);
        info->tmp_rx_buf = NULL;
 }
 
-int claim_resources(SLMP_INFO *info)
+static int claim_resources(SLMP_INFO *info)
 {
        if (request_mem_region(info->phys_memory_base,SCA_MEM_SIZE,"synclinkmp") == NULL) {
                printk( "%s(%d):%s mem addr conflict, Addr=%08X\n",
@@ -3596,7 +3614,7 @@ int claim_resources(SLMP_INFO *info)
                goto errout;
        }
        else
-               info->shared_mem_requested = 1;
+               info->shared_mem_requested = true;
 
        if (request_mem_region(info->phys_lcr_base + info->lcr_offset,128,"synclinkmp") == NULL) {
                printk( "%s(%d):%s lcr mem addr conflict, Addr=%08X\n",
@@ -3605,7 +3623,7 @@ int claim_resources(SLMP_INFO *info)
                goto errout;
        }
        else
-               info->lcr_mem_requested = 1;
+               info->lcr_mem_requested = true;
 
        if (request_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE,"synclinkmp") == NULL) {
                printk( "%s(%d):%s sca mem addr conflict, Addr=%08X\n",
@@ -3614,7 +3632,7 @@ int claim_resources(SLMP_INFO *info)
                goto errout;
        }
        else
-               info->sca_base_requested = 1;
+               info->sca_base_requested = true;
 
        if (request_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE,"synclinkmp") == NULL) {
                printk( "%s(%d):%s stat/ctrl mem addr conflict, Addr=%08X\n",
@@ -3623,9 +3641,10 @@ int claim_resources(SLMP_INFO *info)
                goto errout;
        }
        else
-               info->sca_statctrl_requested = 1;
+               info->sca_statctrl_requested = true;
 
-       info->memory_base = ioremap(info->phys_memory_base,SCA_MEM_SIZE);
+       info->memory_base = ioremap_nocache(info->phys_memory_base,
+                                                               SCA_MEM_SIZE);
        if (!info->memory_base) {
                printk( "%s(%d):%s Cant map shared memory, MemAddr=%08X\n",
                        __FILE__,__LINE__,info->device_name, info->phys_memory_base );
@@ -3633,7 +3652,7 @@ int claim_resources(SLMP_INFO *info)
                goto errout;
        }
 
-       info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE);
+       info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE);
        if (!info->lcr_base) {
                printk( "%s(%d):%s Cant map LCR memory, MemAddr=%08X\n",
                        __FILE__,__LINE__,info->device_name, info->phys_lcr_base );
@@ -3642,7 +3661,7 @@ int claim_resources(SLMP_INFO *info)
        }
        info->lcr_base += info->lcr_offset;
 
-       info->sca_base = ioremap(info->phys_sca_base,PAGE_SIZE);
+       info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE);
        if (!info->sca_base) {
                printk( "%s(%d):%s Cant map SCA memory, MemAddr=%08X\n",
                        __FILE__,__LINE__,info->device_name, info->phys_sca_base );
@@ -3651,7 +3670,8 @@ int claim_resources(SLMP_INFO *info)
        }
        info->sca_base += info->sca_offset;
 
-       info->statctrl_base = ioremap(info->phys_statctrl_base,PAGE_SIZE);
+       info->statctrl_base = ioremap_nocache(info->phys_statctrl_base,
+                                                               PAGE_SIZE);
        if (!info->statctrl_base) {
                printk( "%s(%d):%s Cant map SCA Status/Control memory, MemAddr=%08X\n",
                        __FILE__,__LINE__,info->device_name, info->phys_statctrl_base );
@@ -3674,7 +3694,7 @@ errout:
        return -ENODEV;
 }
 
-void release_resources(SLMP_INFO *info)
+static void release_resources(SLMP_INFO *info)
 {
        if ( debug_level >= DEBUG_LEVEL_INFO )
                printk( "%s(%d):%s release_resources() entry\n",
@@ -3682,24 +3702,24 @@ void release_resources(SLMP_INFO *info)
 
        if ( info->irq_requested ) {
                free_irq(info->irq_level, info);
-               info->irq_requested = 0;
+               info->irq_requested = false;
        }
 
        if ( info->shared_mem_requested ) {
                release_mem_region(info->phys_memory_base,SCA_MEM_SIZE);
-               info->shared_mem_requested = 0;
+               info->shared_mem_requested = false;
        }
        if ( info->lcr_mem_requested ) {
                release_mem_region(info->phys_lcr_base + info->lcr_offset,128);
-               info->lcr_mem_requested = 0;
+               info->lcr_mem_requested = false;
        }
        if ( info->sca_base_requested ) {
                release_mem_region(info->phys_sca_base + info->sca_offset,SCA_BASE_SIZE);
-               info->sca_base_requested = 0;
+               info->sca_base_requested = false;
        }
        if ( info->sca_statctrl_requested ) {
                release_mem_region(info->phys_statctrl_base + info->statctrl_offset,SCA_REG_SIZE);
-               info->sca_statctrl_requested = 0;
+               info->sca_statctrl_requested = false;
        }
 
        if (info->memory_base){
@@ -3730,7 +3750,7 @@ void release_resources(SLMP_INFO *info)
 /* Add the specified device instance data structure to the
  * global linked list of devices and increment the device count.
  */
-void add_device(SLMP_INFO *info)
+static void add_device(SLMP_INFO *info)
 {
        info->next_device = NULL;
        info->line = synclinkmp_device_count;
@@ -3853,7 +3873,7 @@ static SLMP_INFO *alloc_dev(int adapter_num, int port_num, struct pci_dev *pdev)
        return info;
 }
 
-void device_init(int adapter_num, struct pci_dev *pdev)
+static void device_init(int adapter_num, struct pci_dev *pdev)
 {
        SLMP_INFO *port_array[SCA_MAX_PORTS];
        int port;
@@ -3902,7 +3922,7 @@ void device_init(int adapter_num, struct pci_dev *pdev)
                                port_array[0]->irq_level );
                }
                else {
-                       port_array[0]->irq_requested = 1;
+                       port_array[0]->irq_requested = true;
                        adapter_test(port_array[0]);
                }
        }
@@ -4047,7 +4067,7 @@ module_exit(synclinkmp_exit);
  * The TxCLK and RxCLK signals are generated from the BRG and
  * the TxD is looped back to the RxD internally.
  */
-void enable_loopback(SLMP_INFO *info, int enable)
+static void enable_loopback(SLMP_INFO *info, int enable)
 {
        if (enable) {
                /* MD2 (Mode Register 2)
@@ -4094,7 +4114,7 @@ void enable_loopback(SLMP_INFO *info, int enable)
  *     data_rate       data rate of clock in bits per second
  *                     A data rate of 0 disables the AUX clock.
  */
-void set_rate( SLMP_INFO *info, u32 data_rate )
+static void set_rate( SLMP_INFO *info, u32 data_rate )
 {
                u32 TMCValue;
                unsigned char BRValue;
@@ -4140,7 +4160,7 @@ void set_rate( SLMP_INFO *info, u32 data_rate )
 
 /* Disable receiver
  */
-void rx_stop(SLMP_INFO *info)
+static void rx_stop(SLMP_INFO *info)
 {
        if (debug_level >= DEBUG_LEVEL_ISR)
                printk("%s(%d):%s rx_stop()\n",
@@ -4155,13 +4175,13 @@ void rx_stop(SLMP_INFO *info)
        write_reg(info, RXDMA + DCMD, SWABORT); /* reset/init Rx DMA */
        write_reg(info, RXDMA + DIR, 0);        /* disable Rx DMA interrupts */
 
-       info->rx_enabled = 0;
-       info->rx_overflow = 0;
+       info->rx_enabled = false;
+       info->rx_overflow = false;
 }
 
 /* enable the receiver
  */
-void rx_start(SLMP_INFO *info)
+static void rx_start(SLMP_INFO *info)
 {
        int i;
 
@@ -4211,14 +4231,14 @@ void rx_start(SLMP_INFO *info)
 
        write_reg(info, CMD, RXENABLE);
 
-       info->rx_overflow = FALSE;
-       info->rx_enabled = 1;
+       info->rx_overflow = false;
+       info->rx_enabled = true;
 }
 
 /* Enable the transmitter and send a transmit frame if
  * one is loaded in the DMA buffers.
  */
-void tx_start(SLMP_INFO *info)
+static void tx_start(SLMP_INFO *info)
 {
        if (debug_level >= DEBUG_LEVEL_ISR)
                printk("%s(%d):%s tx_start() tx_count=%d\n",
@@ -4227,7 +4247,7 @@ void tx_start(SLMP_INFO *info)
        if (!info->tx_enabled ) {
                write_reg(info, CMD, TXRESET);
                write_reg(info, CMD, TXENABLE);
-               info->tx_enabled = TRUE;
+               info->tx_enabled = true;
        }
 
        if ( info->tx_count ) {
@@ -4236,7 +4256,7 @@ void tx_start(SLMP_INFO *info)
                /* RTS and set a flag indicating that the driver should */
                /* negate RTS when the transmission completes. */
 
-               info->drop_rts_on_tx_done = 0;
+               info->drop_rts_on_tx_done = false;
 
                if (info->params.mode != MGSL_MODE_ASYNC) {
 
@@ -4245,7 +4265,7 @@ void tx_start(SLMP_INFO *info)
                                if ( !(info->serial_signals & SerialSignal_RTS) ) {
                                        info->serial_signals |= SerialSignal_RTS;
                                        set_signals( info );
-                                       info->drop_rts_on_tx_done = 1;
+                                       info->drop_rts_on_tx_done = true;
                                }
                        }
 
@@ -4282,13 +4302,13 @@ void tx_start(SLMP_INFO *info)
                        write_reg(info, IE0, info->ie0_value);
                }
 
-               info->tx_active = 1;
+               info->tx_active = true;
        }
 }
 
 /* stop the transmitter and DMA
  */
-void tx_stop( SLMP_INFO *info )
+static void tx_stop( SLMP_INFO *info )
 {
        if (debug_level >= DEBUG_LEVEL_ISR)
                printk("%s(%d):%s tx_stop()\n",
@@ -4308,14 +4328,14 @@ void tx_stop( SLMP_INFO *info )
        info->ie0_value &= ~TXRDYE;
        write_reg(info, IE0, info->ie0_value);  /* disable tx data interrupts */
 
-       info->tx_enabled = 0;
-       info->tx_active  = 0;
+       info->tx_enabled = false;
+       info->tx_active = false;
 }
 
 /* Fill the transmit FIFO until the FIFO is full or
  * there is no more data to load.
  */
-void tx_load_fifo(SLMP_INFO *info)
+static void tx_load_fifo(SLMP_INFO *info)
 {
        u8 TwoBytes[2];
 
@@ -4364,7 +4384,7 @@ void tx_load_fifo(SLMP_INFO *info)
 
 /* Reset a port to a known state
  */
-void reset_port(SLMP_INFO *info)
+static void reset_port(SLMP_INFO *info)
 {
        if (info->sca_base) {
 
@@ -4388,7 +4408,7 @@ void reset_port(SLMP_INFO *info)
 
 /* Reset all the ports to a known state.
  */
-void reset_adapter(SLMP_INFO *info)
+static void reset_adapter(SLMP_INFO *info)
 {
        int i;
 
@@ -4400,7 +4420,7 @@ void reset_adapter(SLMP_INFO *info)
 
 /* Program port for asynchronous communications.
  */
-void async_mode(SLMP_INFO *info)
+static void async_mode(SLMP_INFO *info)
 {
 
        unsigned char RegValue;
@@ -4539,7 +4559,7 @@ void async_mode(SLMP_INFO *info)
 
 /* Program the SCA for HDLC communications.
  */
-void hdlc_mode(SLMP_INFO *info)
+static void hdlc_mode(SLMP_INFO *info)
 {
        unsigned char RegValue;
        u32 DpllDivisor;
@@ -4741,7 +4761,7 @@ void hdlc_mode(SLMP_INFO *info)
 
 /* Set the transmit HDLC idle mode
  */
-void tx_set_idle(SLMP_INFO *info)
+static void tx_set_idle(SLMP_INFO *info)
 {
        unsigned char RegValue = 0xff;
 
@@ -4761,7 +4781,7 @@ void tx_set_idle(SLMP_INFO *info)
 
 /* Query the adapter for the state of the V24 status (input) signals.
  */
-void get_signals(SLMP_INFO *info)
+static void get_signals(SLMP_INFO *info)
 {
        u16 status = read_reg(info, SR3);
        u16 gpstatus = read_status_reg(info);
@@ -4790,7 +4810,7 @@ void get_signals(SLMP_INFO *info)
 /* Set the state of DTR and RTS based on contents of
  * serial_signals member of device context.
  */
-void set_signals(SLMP_INFO *info)
+static void set_signals(SLMP_INFO *info)
 {
        unsigned char RegValue;
        u16 EnableBit;
@@ -4819,7 +4839,7 @@ void set_signals(SLMP_INFO *info)
  * and set the current buffer to the first buffer. This effectively
  * makes all buffers free and discards any data in buffers.
  */
-void rx_reset_buffers(SLMP_INFO *info)
+static void rx_reset_buffers(SLMP_INFO *info)
 {
        rx_free_frame_buffers(info, 0, info->rx_buf_count - 1);
 }
@@ -4830,16 +4850,16 @@ void rx_reset_buffers(SLMP_INFO *info)
  * first  index of 1st receive buffer of frame
  * last   index of last receive buffer of frame
  */
-void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last)
+static void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int last)
 {
-       int done = 0;
+       bool done = false;
 
        while(!done) {
                /* reset current buffer for reuse */
                info->rx_buf_list[first].status = 0xff;
 
                if (first == last) {
-                       done = 1;
+                       done = true;
                        /* set new last rx descriptor address */
                        write_reg16(info, RXDMA + EDA, info->rx_buf_list_ex[first].phys_entry);
                }
@@ -4856,14 +4876,14 @@ void rx_free_frame_buffers(SLMP_INFO *info, unsigned int first, unsigned int las
 /* Return a received frame from the receive DMA buffers.
  * Only frames received without errors are returned.
  *
- * Return Value:       1 if frame returned, otherwise 0
+ * Return Value:       true if frame returned, otherwise false
  */
-int rx_get_frame(SLMP_INFO *info)
+static bool rx_get_frame(SLMP_INFO *info)
 {
        unsigned int StartIndex, EndIndex;      /* index of 1st and last buffers of Rx frame */
        unsigned short status;
        unsigned int framesize = 0;
-       int ReturnCode = 0;
+       bool ReturnCode = false;
        unsigned long flags;
        struct tty_struct *tty = info->tty;
        unsigned char addr_field = 0xff;
@@ -5014,7 +5034,7 @@ CheckAgain:
        /* Free the buffers used by this frame. */
        rx_free_frame_buffers( info, StartIndex, EndIndex );
 
-       ReturnCode = 1;
+       ReturnCode = true;
 
 Cleanup:
        if ( info->rx_enabled && info->rx_overflow ) {
@@ -5033,7 +5053,7 @@ Cleanup:
 
 /* load the transmit DMA buffer with data
  */
-void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
+static void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
 {
        unsigned short copy_count;
        unsigned int i = 0;
@@ -5073,12 +5093,12 @@ void tx_load_dma_buffer(SLMP_INFO *info, const char *buf, unsigned int count)
        info->last_tx_buf = ++i;
 }
 
-int register_test(SLMP_INFO *info)
+static bool register_test(SLMP_INFO *info)
 {
        static unsigned char testval[] = {0x00, 0xff, 0xaa, 0x55, 0x69, 0x96};
        static unsigned int count = ARRAY_SIZE(testval);
        unsigned int i;
-       int rc = TRUE;
+       bool rc = true;
        unsigned long flags;
 
        spin_lock_irqsave(&info->lock,flags);
@@ -5101,7 +5121,7 @@ int register_test(SLMP_INFO *info)
                          (read_reg(info, SA0) != testval[(i+2)%count]) ||
                          (read_reg(info, SA1) != testval[(i+3)%count]) )
                {
-                       rc = FALSE;
+                       rc = false;
                        break;
                }
        }
@@ -5112,7 +5132,7 @@ int register_test(SLMP_INFO *info)
        return rc;
 }
 
-int irq_test(SLMP_INFO *info)
+static bool irq_test(SLMP_INFO *info)
 {
        unsigned long timeout;
        unsigned long flags;
@@ -5124,7 +5144,7 @@ int irq_test(SLMP_INFO *info)
 
        /* assume failure */
        info->init_error = DiagStatus_IrqFailure;
-       info->irq_occurred = FALSE;
+       info->irq_occurred = false;
 
        /* setup timer0 on SCA0 to interrupt */
 
@@ -5163,7 +5183,7 @@ int irq_test(SLMP_INFO *info)
 
 /* initialize individual SCA device (2 ports)
  */
-static int sca_init(SLMP_INFO *info)
+static bool sca_init(SLMP_INFO *info)
 {
        /* set wait controller to single mem partition (low), no wait states */
        write_reg(info, PABR0, 0);      /* wait controller addr boundary 0 */
@@ -5199,12 +5219,12 @@ static int sca_init(SLMP_INFO *info)
         */
        write_reg(info, ITCR, 0);
 
-       return TRUE;
+       return true;
 }
 
 /* initialize adapter hardware
  */
-int init_adapter(SLMP_INFO *info)
+static bool init_adapter(SLMP_INFO *info)
 {
        int i;
 
@@ -5257,20 +5277,20 @@ int init_adapter(SLMP_INFO *info)
        sca_init(info->port_array[0]);
        sca_init(info->port_array[2]);
 
-       return TRUE;
+       return true;
 }
 
 /* Loopback an HDLC frame to test the hardware
  * interrupt and DMA functions.
  */
-int loopback_test(SLMP_INFO *info)
+static bool loopback_test(SLMP_INFO *info)
 {
 #define TESTFRAMESIZE 20
 
        unsigned long timeout;
        u16 count = TESTFRAMESIZE;
        unsigned char buf[TESTFRAMESIZE];
-       int rc = FALSE;
+       bool rc = false;
        unsigned long flags;
 
        struct tty_struct *oldtty = info->tty;
@@ -5304,16 +5324,16 @@ int loopback_test(SLMP_INFO *info)
                msleep_interruptible(10);
 
                if (rx_get_frame(info)) {
-                       rc = TRUE;
+                       rc = true;
                        break;
                }
        }
 
        /* verify received frame length and contents */
-       if (rc == TRUE &&
-               ( info->tmp_rx_buf_count != count ||
-                 memcmp(buf, info->tmp_rx_buf,count))) {
-               rc = FALSE;
+       if (rc &&
+           ( info->tmp_rx_buf_count != count ||
+             memcmp(buf, info->tmp_rx_buf,count))) {
+               rc = false;
        }
 
        spin_lock_irqsave(&info->lock,flags);
@@ -5328,7 +5348,7 @@ int loopback_test(SLMP_INFO *info)
 
 /* Perform diagnostics on hardware
  */
-int adapter_test( SLMP_INFO *info )
+static int adapter_test( SLMP_INFO *info )
 {
        unsigned long flags;
        if ( debug_level >= DEBUG_LEVEL_INFO )
@@ -5390,7 +5410,7 @@ int adapter_test( SLMP_INFO *info )
 
 /* Test the shared memory on a PCI adapter.
  */
-int memory_test(SLMP_INFO *info)
+static bool memory_test(SLMP_INFO *info)
 {
        static unsigned long testval[] = { 0x0, 0x55555555, 0xaaaaaaaa,
                0x66666666, 0x99999999, 0xffffffff, 0x12345678 };
@@ -5404,7 +5424,7 @@ int memory_test(SLMP_INFO *info)
        for ( i = 0 ; i < count ; i++ ) {
                *addr = testval[i];
                if ( *addr != testval[i] )
-                       return FALSE;
+                       return false;
        }
 
        /* Test address lines with incrementing pattern over */
@@ -5419,12 +5439,12 @@ int memory_test(SLMP_INFO *info)
 
        for ( i = 0 ; i < limit ; i++ ) {
                if ( *addr != i * 4 )
-                       return FALSE;
+                       return false;
                addr++;
        }
 
        memset( info->memory_base, 0, SCA_MEM_SIZE );
-       return TRUE;
+       return true;
 }
 
 /* Load data into PCI adapter shared memory.
@@ -5442,7 +5462,7 @@ int memory_test(SLMP_INFO *info)
  * the write transation. This allows any pending DMA request to gain control
  * of the local bus in a timely fasion.
  */
-void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count)
+static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count)
 {
        /* A load interval of 16 allows for 4 32-bit writes at */
        /* 136ns each for a maximum latency of 542ns on the local bus.*/
@@ -5461,7 +5481,7 @@ void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned shor
        memcpy(dest, src, count % sca_pci_load_interval);
 }
 
-void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
+static void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
 {
        int i;
        int linecount;
@@ -5496,7 +5516,7 @@ void trace_block(SLMP_INFO *info,const char* data, int count, int xmit)
 /* called when HDLC frame times out
  * update stats and do tx completion processing
  */
-void tx_timeout(unsigned long context)
+static void tx_timeout(unsigned long context)
 {
        SLMP_INFO *info = (SLMP_INFO*)context;
        unsigned long flags;
@@ -5508,7 +5528,7 @@ void tx_timeout(unsigned long context)
                info->icount.txtimeout++;
        }
        spin_lock_irqsave(&info->lock,flags);
-       info->tx_active = 0;
+       info->tx_active = false;
        info->tx_count = info->tx_put = info->tx_get = 0;
 
        spin_unlock_irqrestore(&info->lock,flags);
@@ -5523,7 +5543,7 @@ void tx_timeout(unsigned long context)
 
 /* called to periodically check the DSR/RI modem signal input status
  */
-void status_timeout(unsigned long context)
+static void status_timeout(unsigned long context)
 {
        u16 status = 0;
        SLMP_INFO *info = (SLMP_INFO*)context;
@@ -5574,36 +5594,36 @@ void status_timeout(unsigned long context)
        }
 
 
-unsigned char read_reg(SLMP_INFO * info, unsigned char Addr)
+static unsigned char read_reg(SLMP_INFO * info, unsigned char Addr)
 {
        CALC_REGADDR();
        return *RegAddr;
 }
-void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value)
+static void write_reg(SLMP_INFO * info, unsigned char Addr, unsigned char Value)
 {
        CALC_REGADDR();
        *RegAddr = Value;
 }
 
-u16 read_reg16(SLMP_INFO * info, unsigned char Addr)
+static u16 read_reg16(SLMP_INFO * info, unsigned char Addr)
 {
        CALC_REGADDR();
        return *((u16 *)RegAddr);
 }
 
-void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value)
+static void write_reg16(SLMP_INFO * info, unsigned char Addr, u16 Value)
 {
        CALC_REGADDR();
        *((u16 *)RegAddr) = Value;
 }
 
-unsigned char read_status_reg(SLMP_INFO * info)
+static unsigned char read_status_reg(SLMP_INFO * info)
 {
        unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
        return *RegAddr;
 }
 
-void write_control_reg(SLMP_INFO * info)
+static void write_control_reg(SLMP_INFO * info)
 {
        unsigned char *RegAddr = (unsigned char *)info->statctrl_base;
        *RegAddr = info->port_array[0]->ctrlreg_value;
index de60e1ea4fb3830e01524b73381738d7e98485b6..9e9bad8bdcf4e6725a982f92522b338bbf9731f3 100644 (file)
@@ -196,6 +196,48 @@ static struct sysrq_key_op sysrq_showlocks_op = {
 #define sysrq_showlocks_op (*(struct sysrq_key_op *)0)
 #endif
 
+#ifdef CONFIG_SMP
+static DEFINE_SPINLOCK(show_lock);
+
+static void showacpu(void *dummy)
+{
+       unsigned long flags;
+
+       /* Idle CPUs have no interesting backtrace. */
+       if (idle_cpu(smp_processor_id()))
+               return;
+
+       spin_lock_irqsave(&show_lock, flags);
+       printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+       show_stack(NULL, NULL);
+       spin_unlock_irqrestore(&show_lock, flags);
+}
+
+static void sysrq_showregs_othercpus(struct work_struct *dummy)
+{
+       smp_call_function(showacpu, NULL, 0, 0);
+}
+
+static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus);
+
+static void sysrq_handle_showallcpus(int key, struct tty_struct *tty)
+{
+       struct pt_regs *regs = get_irq_regs();
+       if (regs) {
+               printk(KERN_INFO "CPU%d:\n", smp_processor_id());
+               show_regs(regs);
+       }
+       schedule_work(&sysrq_showallcpus);
+}
+
+static struct sysrq_key_op sysrq_showallcpus_op = {
+       .handler        = sysrq_handle_showallcpus,
+       .help_msg       = "aLlcpus",
+       .action_msg     = "Show backtrace of all active CPUs",
+       .enable_mask    = SYSRQ_ENABLE_DUMP,
+};
+#endif
+
 static void sysrq_handle_showregs(int key, struct tty_struct *tty)
 {
        struct pt_regs *regs = get_irq_regs();
@@ -271,8 +313,7 @@ static struct sysrq_key_op sysrq_term_op = {
 
 static void moom_callback(struct work_struct *ignored)
 {
-       out_of_memory(&NODE_DATA(0)->node_zonelists[ZONE_NORMAL],
-                       GFP_KERNEL, 0);
+       out_of_memory(node_zonelist(0, GFP_KERNEL), GFP_KERNEL, 0);
 }
 
 static DECLARE_WORK(moom_work, moom_callback);
@@ -341,7 +382,11 @@ static struct sysrq_key_op *sysrq_key_table[36] = {
        &sysrq_kill_op,                 /* i */
        NULL,                           /* j */
        &sysrq_SAK_op,                  /* k */
+#ifdef CONFIG_SMP
+       &sysrq_showallcpus_op,          /* l */
+#else
        NULL,                           /* l */
+#endif
        &sysrq_showmem_op,              /* m */
        &sysrq_unrt_op,                 /* n */
        /* o: This will often be registered as 'Off' at init time */
index ce5ebe3b168ffe0cb59f83484370a2bf6d957602..663cd15d7c78fa5a0cbd548179f61b352ba53e38 100644 (file)
@@ -426,7 +426,7 @@ static int tosh_probe(void)
        int i,major,minor,day,year,month,flag;
        unsigned char signature[7] = { 0x54,0x4f,0x53,0x48,0x49,0x42,0x41 };
        SMMRegisters regs;
-       void __iomem *bios = ioremap(0xf0000, 0x10000);
+       void __iomem *bios = ioremap_cache(0xf0000, 0x10000);
 
        if (!bios)
                return -ENOMEM;
@@ -520,12 +520,11 @@ static int __init toshiba_init(void)
        {
                struct proc_dir_entry *pde;
 
-               pde = create_proc_entry("toshiba", 0, NULL);
+               pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops);
                if (!pde) {
                        misc_deregister(&tosh_device);
                        return -ENOMEM;
                }
-               pde->proc_fops = &proc_toshiba_fops;
        }
 #endif
 
index 8f3f7620f95a13afcc72b2fa36d142b1e5167303..3738cfa209ff2024b77d9790ea91777ab1783c83 100644 (file)
@@ -23,7 +23,7 @@ if TCG_TPM
 
 config TCG_TIS
        tristate "TPM Interface Specification 1.2 Interface"
-       depends on PNPACPI
+       depends on PNP
        ---help---
          If you have a TPM security chip that is compliant with the
          TCG TIS 1.2 TPM specification say Yes and it will be accessible
@@ -32,7 +32,6 @@ config TCG_TIS
 
 config TCG_NSC
        tristate "National Semiconductor TPM Interface"
-       depends on PNPACPI
        ---help---
          If you have a TPM security chip from National Semiconductor 
          say Yes and it will be accessible from within Linux.  To 
@@ -48,7 +47,7 @@ config TCG_ATMEL
 
 config TCG_INFINEON
        tristate "Infineon Technologies TPM Interface"
-       depends on PNPACPI
+       depends on PNP
        ---help---
          If you have a TPM security chip from Infineon Technologies
          (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it
index 6313326bc41fde5f2fdf455321af04abcede6181..ab18c1e7b115fd098ef56599241560d67fb19597 100644 (file)
@@ -264,7 +264,7 @@ static const struct tpm_vendor_specific tpm_nsc = {
 
 static struct platform_device *pdev = NULL;
 
-static void __devexit tpm_nsc_remove(struct device *dev)
+static void tpm_nsc_remove(struct device *dev)
 {
        struct tpm_chip *chip = dev_get_drvdata(dev);
        if ( chip ) {
index 7722466e052faedeb2ec999ba0b2f67f8b300e3e..6342b0534f4d3cda24aaac72cd5cb25deeb3021d 100644 (file)
@@ -92,7 +92,7 @@ static void tty_audit_buf_push(struct task_struct *tsk, uid_t loginuid,
                get_task_comm(name, tsk);
                audit_log_untrustedstring(ab, name);
                audit_log_format(ab, " data=");
-               audit_log_n_untrustedstring(ab, buf->valid, buf->data);
+               audit_log_n_untrustedstring(ab, buf->data, buf->valid);
                audit_log_end(ab);
        }
        buf->valid = 0;
@@ -151,14 +151,9 @@ void tty_audit_fork(struct signal_struct *sig)
 /**
  *     tty_audit_push_task     -       Flush task's pending audit data
  */
-void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid)
+void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid)
 {
        struct tty_audit_buf *buf;
-       /* FIXME I think this is correct.  Check against netlink once that is
-        * I really need to read this code more closely.  But that's for
-        * another patch.
-        */
-       unsigned int sessionid = audit_get_sessionid(tsk);
 
        spin_lock_irq(&tsk->sighand->siglock);
        buf = tsk->signal->tty_audit_buf;
@@ -238,6 +233,10 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
        if (unlikely(size == 0))
                return;
 
+       if (tty->driver->type == TTY_DRIVER_TYPE_PTY
+           && tty->driver->subtype == PTY_TYPE_MASTER)
+               return;
+
        buf = tty_audit_buf_get(tty);
        if (!buf)
                return;
@@ -300,53 +299,3 @@ void tty_audit_push(struct tty_struct *tty)
                tty_audit_buf_put(buf);
        }
 }
-
-/**
- *     tty_audit_opening       -       A TTY is being opened.
- *
- *     As a special hack, tasks that close all their TTYs and open new ones
- *     are assumed to be system daemons (e.g. getty) and auditing is
- *     automatically disabled for them.
- */
-void tty_audit_opening(void)
-{
-       int disable;
-
-       disable = 1;
-       spin_lock_irq(&current->sighand->siglock);
-       if (current->signal->audit_tty == 0)
-               disable = 0;
-       spin_unlock_irq(&current->sighand->siglock);
-       if (!disable)
-               return;
-
-       task_lock(current);
-       if (current->files) {
-               struct fdtable *fdt;
-               unsigned i;
-
-               /*
-                * We don't take a ref to the file, so we must hold ->file_lock
-                * instead.
-                */
-               spin_lock(&current->files->file_lock);
-               fdt = files_fdtable(current->files);
-               for (i = 0; i < fdt->max_fds; i++) {
-                       struct file *filp;
-
-                       filp = fcheck_files(current->files, i);
-                       if (filp && is_tty(filp)) {
-                               disable = 0;
-                               break;
-                       }
-               }
-               spin_unlock(&current->files->file_lock);
-       }
-       task_unlock(current);
-       if (!disable)
-               return;
-
-       spin_lock_irq(&current->sighand->siglock);
-       current->signal->audit_tty = 0;
-       spin_unlock_irq(&current->sighand->siglock);
-}
index 4d3c7018f0c3a9198093ace1ed28b69747a2209b..1d298c2cf9301c0a1ac2026577463e6a3a6b1813 100644 (file)
@@ -91,7 +91,6 @@
 #include <linux/module.h>
 #include <linux/smp_lock.h>
 #include <linux/device.h>
-#include <linux/idr.h>
 #include <linux/wait.h>
 #include <linux/bitops.h>
 #include <linux/delay.h>
@@ -137,9 +136,6 @@ EXPORT_SYMBOL(tty_mutex);
 
 #ifdef CONFIG_UNIX98_PTYS
 extern struct tty_driver *ptm_driver;  /* Unix98 pty masters; for /dev/ptmx */
-extern int pty_limit;                  /* Config limit on Unix98 ptys */
-static DEFINE_IDR(allocated_ptys);
-static DEFINE_MUTEX(allocated_ptys_lock);
 static int ptmx_open(struct inode *, struct file *);
 #endif
 
@@ -152,8 +148,7 @@ ssize_t redirected_tty_write(struct file *, const char __user *,
 static unsigned int tty_poll(struct file *, poll_table *);
 static int tty_open(struct inode *, struct file *);
 static int tty_release(struct inode *, struct file *);
-int tty_ioctl(struct inode *inode, struct file *file,
-             unsigned int cmd, unsigned long arg);
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 #ifdef CONFIG_COMPAT
 static long tty_compat_ioctl(struct file *file, unsigned int cmd,
                                unsigned long arg);
@@ -1109,8 +1104,8 @@ restart:
           a reference to the old ldisc. If we ended up flipping back
           to the existing ldisc we have two references to it */
 
-       if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc)
-               tty->driver->set_ldisc(tty);
+       if (tty->ldisc.num != o_ldisc.num && tty->ops->set_ldisc)
+               tty->ops->set_ldisc(tty);
 
        tty_ldisc_put(o_ldisc.num);
 
@@ -1180,11 +1175,10 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line)
                if (*str == ',')
                        str++;
                if (*str == '\0')
-                       str = 0;
-
-               if (tty_line >= 0 && tty_line <= p->num && p->poll_init &&
-                               !p->poll_init(p, tty_line, str)) {
+                       str = NULL;
 
+               if (tty_line >= 0 && tty_line <= p->num && p->ops &&
+                   p->ops->poll_init && !p->ops->poll_init(p, tty_line, str)) {
                        res = p;
                        *line = tty_line;
                        break;
@@ -1205,26 +1199,37 @@ EXPORT_SYMBOL_GPL(tty_find_polling_driver);
  *     not in the foreground, send a SIGTTOU.  If the signal is blocked or
  *     ignored, go ahead and perform the operation.  (POSIX 7.2)
  *
- *     Locking: none
+ *     Locking: ctrl_lock
  */
 
 int tty_check_change(struct tty_struct *tty)
 {
+       unsigned long flags;
+       int ret = 0;
+
        if (current->signal->tty != tty)
                return 0;
+
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
+
        if (!tty->pgrp) {
                printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n");
-               return 0;
+               goto out;
        }
        if (task_pgrp(current) == tty->pgrp)
-               return 0;
+               goto out;
        if (is_ignored(SIGTTOU))
-               return 0;
-       if (is_current_pgrp_orphaned())
-               return -EIO;
+               goto out;
+       if (is_current_pgrp_orphaned()) {
+               ret = -EIO;
+               goto out;
+       }
        kill_pgrp(task_pgrp(current), SIGTTOU, 1);
        set_thread_flag(TIF_SIGPENDING);
-       return -ERESTARTSYS;
+       ret = -ERESTARTSYS;
+out:
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+       return ret;
 }
 
 EXPORT_SYMBOL(tty_check_change);
@@ -1247,8 +1252,8 @@ static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait)
        return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM;
 }
 
-static int hung_up_tty_ioctl(struct inode *inode, struct file *file,
-                            unsigned int cmd, unsigned long arg)
+static long hung_up_tty_ioctl(struct file *file, unsigned int cmd,
+               unsigned long arg)
 {
        return cmd == TIOCSPGRP ? -ENOTTY : -EIO;
 }
@@ -1264,7 +1269,7 @@ static const struct file_operations tty_fops = {
        .read           = tty_read,
        .write          = tty_write,
        .poll           = tty_poll,
-       .ioctl          = tty_ioctl,
+       .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
        .open           = tty_open,
        .release        = tty_release,
@@ -1277,7 +1282,7 @@ static const struct file_operations ptmx_fops = {
        .read           = tty_read,
        .write          = tty_write,
        .poll           = tty_poll,
-       .ioctl          = tty_ioctl,
+       .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
        .open           = ptmx_open,
        .release        = tty_release,
@@ -1290,7 +1295,7 @@ static const struct file_operations console_fops = {
        .read           = tty_read,
        .write          = redirected_tty_write,
        .poll           = tty_poll,
-       .ioctl          = tty_ioctl,
+       .unlocked_ioctl = tty_ioctl,
        .compat_ioctl   = tty_compat_ioctl,
        .open           = tty_open,
        .release        = tty_release,
@@ -1302,7 +1307,7 @@ static const struct file_operations hung_up_tty_fops = {
        .read           = hung_up_tty_read,
        .write          = hung_up_tty_write,
        .poll           = hung_up_tty_poll,
-       .ioctl          = hung_up_tty_ioctl,
+       .unlocked_ioctl = hung_up_tty_ioctl,
        .compat_ioctl   = hung_up_tty_compat_ioctl,
        .release        = tty_release,
 };
@@ -1404,6 +1409,7 @@ static void do_tty_hangup(struct work_struct *work)
        struct task_struct *p;
        struct tty_ldisc *ld;
        int    closecount = 0, n;
+       unsigned long flags;
 
        if (!tty)
                return;
@@ -1441,8 +1447,7 @@ static void do_tty_hangup(struct work_struct *work)
                /* We may have no line discipline at this point */
                if (ld->flush_buffer)
                        ld->flush_buffer(tty);
-               if (tty->driver->flush_buffer)
-                       tty->driver->flush_buffer(tty);
+               tty_driver_flush_buffer(tty);
                if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) &&
                    ld->write_wakeup)
                        ld->write_wakeup(tty);
@@ -1480,19 +1485,24 @@ static void do_tty_hangup(struct work_struct *work)
                        __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p);
                        __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p);
                        put_pid(p->signal->tty_old_pgrp);  /* A noop */
+                       spin_lock_irqsave(&tty->ctrl_lock, flags);
                        if (tty->pgrp)
                                p->signal->tty_old_pgrp = get_pid(tty->pgrp);
+                       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
                        spin_unlock_irq(&p->sighand->siglock);
                } while_each_pid_task(tty->session, PIDTYPE_SID, p);
        }
        read_unlock(&tasklist_lock);
 
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
        tty->flags = 0;
        put_pid(tty->session);
        put_pid(tty->pgrp);
        tty->session = NULL;
        tty->pgrp = NULL;
        tty->ctrl_status = 0;
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
        /*
         * If one of the devices matches a console pointer, we
         * cannot just call hangup() because that will cause
@@ -1500,11 +1510,11 @@ static void do_tty_hangup(struct work_struct *work)
         * So we just call close() the right number of times.
         */
        if (cons_filp) {
-               if (tty->driver->close)
+               if (tty->ops->close)
                        for (n = 0; n < closecount; n++)
-                               tty->driver->close(tty, cons_filp);
-       } else if (tty->driver->hangup)
-               (tty->driver->hangup)(tty);
+                               tty->ops->close(tty, cons_filp);
+       } else if (tty->ops->hangup)
+               (tty->ops->hangup)(tty);
        /*
         * We don't want to have driver/ldisc interactions beyond
         * the ones we did here. The driver layer expects no
@@ -1626,16 +1636,17 @@ void disassociate_ctty(int on_exit)
        struct tty_struct *tty;
        struct pid *tty_pgrp = NULL;
 
-       lock_kernel();
 
        mutex_lock(&tty_mutex);
        tty = get_current_tty();
        if (tty) {
                tty_pgrp = get_pid(tty->pgrp);
                mutex_unlock(&tty_mutex);
+               lock_kernel();
                /* XXX: here we race, there is nothing protecting tty */
                if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY)
                        tty_vhangup(tty);
+               unlock_kernel();
        } else if (on_exit) {
                struct pid *old_pgrp;
                spin_lock_irq(&current->sighand->siglock);
@@ -1648,7 +1659,6 @@ void disassociate_ctty(int on_exit)
                        put_pid(old_pgrp);
                }
                mutex_unlock(&tty_mutex);
-               unlock_kernel();
                return;
        }
        if (tty_pgrp) {
@@ -1667,10 +1677,13 @@ void disassociate_ctty(int on_exit)
        /* It is possible that do_tty_hangup has free'd this tty */
        tty = get_current_tty();
        if (tty) {
+               unsigned long flags;
+               spin_lock_irqsave(&tty->ctrl_lock, flags);
                put_pid(tty->session);
                put_pid(tty->pgrp);
                tty->session = NULL;
                tty->pgrp = NULL;
+               spin_unlock_irqrestore(&tty->ctrl_lock, flags);
        } else {
 #ifdef TTY_DEBUG_HANGUP
                printk(KERN_DEBUG "error attempted to write to tty [0x%p]"
@@ -1683,7 +1696,6 @@ void disassociate_ctty(int on_exit)
        read_lock(&tasklist_lock);
        session_clear_tty(task_session(current));
        read_unlock(&tasklist_lock);
-       unlock_kernel();
 }
 
 /**
@@ -1693,8 +1705,10 @@ void disassociate_ctty(int on_exit)
 void no_tty(void)
 {
        struct task_struct *tsk = current;
+       lock_kernel();
        if (tsk->signal->leader)
                disassociate_ctty(0);
+       unlock_kernel();
        proc_clear_tty(tsk);
 }
 
@@ -1714,21 +1728,26 @@ void no_tty(void)
  *     but not always.
  *
  *     Locking:
- *             Broken. Relies on BKL which is unsafe here.
+ *             Uses the tty control lock internally
  */
 
 void stop_tty(struct tty_struct *tty)
 {
-       if (tty->stopped)
+       unsigned long flags;
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
+       if (tty->stopped) {
+               spin_unlock_irqrestore(&tty->ctrl_lock, flags);
                return;
+       }
        tty->stopped = 1;
        if (tty->link && tty->link->packet) {
                tty->ctrl_status &= ~TIOCPKT_START;
                tty->ctrl_status |= TIOCPKT_STOP;
                wake_up_interruptible(&tty->link->read_wait);
        }
-       if (tty->driver->stop)
-               (tty->driver->stop)(tty);
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+       if (tty->ops->stop)
+               (tty->ops->stop)(tty);
 }
 
 EXPORT_SYMBOL(stop_tty);
@@ -1743,21 +1762,26 @@ EXPORT_SYMBOL(stop_tty);
  *     driver start method is invoked and the line discipline woken.
  *
  *     Locking:
- *             Broken. Relies on BKL which is unsafe here.
+ *             ctrl_lock
  */
 
 void start_tty(struct tty_struct *tty)
 {
-       if (!tty->stopped || tty->flow_stopped)
+       unsigned long flags;
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
+       if (!tty->stopped || tty->flow_stopped) {
+               spin_unlock_irqrestore(&tty->ctrl_lock, flags);
                return;
+       }
        tty->stopped = 0;
        if (tty->link && tty->link->packet) {
                tty->ctrl_status &= ~TIOCPKT_STOP;
                tty->ctrl_status |= TIOCPKT_START;
                wake_up_interruptible(&tty->link->read_wait);
        }
-       if (tty->driver->start)
-               (tty->driver->start)(tty);
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+       if (tty->ops->start)
+               (tty->ops->start)(tty);
        /* If we have a running line discipline it may need kicking */
        tty_wakeup(tty);
 }
@@ -1775,10 +1799,8 @@ EXPORT_SYMBOL(start_tty);
  *     for hung up devices before calling the line discipline method.
  *
  *     Locking:
- *             Locks the line discipline internally while needed
- *             For historical reasons the line discipline read method is
- *     invoked under the BKL. This will go away in time so do not rely on it
- *     in new code. Multiple read calls may be outstanding in parallel.
+ *             Locks the line discipline internally while needed. Multiple
+ *     read calls may be outstanding in parallel.
  */
 
 static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
@@ -1799,13 +1821,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
        /* We want to wait for the line discipline to sort out in this
           situation */
        ld = tty_ldisc_ref_wait(tty);
-       lock_kernel();
        if (ld->read)
                i = (ld->read)(tty, file, buf, count);
        else
                i = -EIO;
        tty_ldisc_deref(ld);
-       unlock_kernel();
        if (i > 0)
                inode->i_atime = current_fs_time(inode->i_sb);
        return i;
@@ -1893,9 +1913,7 @@ static inline ssize_t do_tty_write(
                ret = -EFAULT;
                if (copy_from_user(tty->write_buf, buf, size))
                        break;
-               lock_kernel();
                ret = write(tty, file, tty->write_buf, size);
-               unlock_kernel();
                if (ret <= 0)
                        break;
                written += ret;
@@ -1948,10 +1966,13 @@ static ssize_t tty_write(struct file *file, const char __user *buf,
        tty = (struct tty_struct *)file->private_data;
        if (tty_paranoia_check(tty, inode, "tty_write"))
                return -EIO;
-       if (!tty || !tty->driver->write ||
+       if (!tty || !tty->ops->write ||
                (test_bit(TTY_IO_ERROR, &tty->flags)))
                        return -EIO;
-
+       /* Short term debug to catch buggy drivers */
+       if (tty->ops->write_room == NULL)
+               printk(KERN_ERR "tty driver %s lacks a write_room method.\n",
+                       tty->driver->name);
        ld = tty_ldisc_ref_wait(tty);
        if (!ld->write)
                ret = -EIO;
@@ -2098,6 +2119,7 @@ static int init_dev(struct tty_driver *driver, int idx,
                goto fail_no_mem;
        initialize_tty_struct(tty);
        tty->driver = driver;
+       tty->ops = driver->ops;
        tty->index = idx;
        tty_line_name(driver, idx, tty->name);
 
@@ -2128,6 +2150,7 @@ static int init_dev(struct tty_driver *driver, int idx,
                        goto free_mem_out;
                initialize_tty_struct(o_tty);
                o_tty->driver = driver->other;
+               o_tty->ops = driver->ops;
                o_tty->index = idx;
                tty_line_name(driver->other, idx, o_tty->name);
 
@@ -2432,8 +2455,8 @@ static void release_dev(struct file *filp)
                }
        }
 #endif
-       if (tty->driver->close)
-               tty->driver->close(tty, filp);
+       if (tty->ops->close)
+               tty->ops->close(tty, filp);
 
        /*
         * Sanity check: if tty->count is going to zero, there shouldn't be
@@ -2612,15 +2635,9 @@ static void release_dev(struct file *filp)
         */
        release_tty(tty, idx);
 
-#ifdef CONFIG_UNIX98_PTYS
        /* Make this pty number available for reallocation */
-       if (devpts) {
-               mutex_lock(&allocated_ptys_lock);
-               idr_remove(&allocated_ptys, idx);
-               mutex_unlock(&allocated_ptys_lock);
-       }
-#endif
-
+       if (devpts)
+               devpts_kill_index(idx);
 }
 
 /**
@@ -2716,8 +2733,8 @@ got_driver:
        printk(KERN_DEBUG "opening %s...", tty->name);
 #endif
        if (!retval) {
-               if (tty->driver->open)
-                       retval = tty->driver->open(tty, filp);
+               if (tty->ops->open)
+                       retval = tty->ops->open(tty, filp);
                else
                        retval = -ENODEV;
        }
@@ -2755,7 +2772,6 @@ got_driver:
                __proc_set_tty(current, tty);
        spin_unlock_irq(&current->sighand->siglock);
        mutex_unlock(&tty_mutex);
-       tty_audit_opening();
        return 0;
 }
 
@@ -2777,29 +2793,13 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        struct tty_struct *tty;
        int retval;
        int index;
-       int idr_ret;
 
        nonseekable_open(inode, filp);
 
        /* find a device that is not in use. */
-       mutex_lock(&allocated_ptys_lock);
-       if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
-               mutex_unlock(&allocated_ptys_lock);
-               return -ENOMEM;
-       }
-       idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
-       if (idr_ret < 0) {
-               mutex_unlock(&allocated_ptys_lock);
-               if (idr_ret == -EAGAIN)
-                       return -ENOMEM;
-               return -EIO;
-       }
-       if (index >= pty_limit) {
-               idr_remove(&allocated_ptys, index);
-               mutex_unlock(&allocated_ptys_lock);
-               return -EIO;
-       }
-       mutex_unlock(&allocated_ptys_lock);
+       index = devpts_new_index();
+       if (index < 0)
+               return index;
 
        mutex_lock(&tty_mutex);
        retval = init_dev(ptm_driver, index, &tty);
@@ -2812,23 +2812,19 @@ static int ptmx_open(struct inode *inode, struct file *filp)
        filp->private_data = tty;
        file_move(filp, &tty->tty_files);
 
-       retval = -ENOMEM;
-       if (devpts_pty_new(tty->link))
+       retval = devpts_pty_new(tty->link);
+       if (retval)
                goto out1;
 
-       check_tty_count(tty, "tty_open");
-       retval = ptm_driver->open(tty, filp);
-       if (!retval) {
-               tty_audit_opening();
+       check_tty_count(tty, "ptmx_open");
+       retval = ptm_driver->ops->open(tty, filp);
+       if (!retval)
                return 0;
-       }
 out1:
        release_dev(filp);
        return retval;
 out:
-       mutex_lock(&allocated_ptys_lock);
-       idr_remove(&allocated_ptys, index);
-       mutex_unlock(&allocated_ptys_lock);
+       devpts_kill_index(index);
        return retval;
 }
 #endif
@@ -2885,6 +2881,7 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait)
 static int tty_fasync(int fd, struct file *filp, int on)
 {
        struct tty_struct *tty;
+       unsigned long flags;
        int retval;
 
        tty = (struct tty_struct *)filp->private_data;
@@ -2900,6 +2897,7 @@ static int tty_fasync(int fd, struct file *filp, int on)
                struct pid *pid;
                if (!waitqueue_active(&tty->read_wait))
                        tty->minimum_to_wake = 1;
+               spin_lock_irqsave(&tty->ctrl_lock, flags);
                if (tty->pgrp) {
                        pid = tty->pgrp;
                        type = PIDTYPE_PGID;
@@ -2907,6 +2905,7 @@ static int tty_fasync(int fd, struct file *filp, int on)
                        pid = task_pid(current);
                        type = PIDTYPE_PID;
                }
+               spin_unlock_irqrestore(&tty->ctrl_lock, flags);
                retval = __f_setown(filp, pid, type, 0);
                if (retval)
                        return retval;
@@ -2992,6 +2991,8 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
        struct winsize __user *arg)
 {
        struct winsize tmp_ws;
+       struct pid *pgrp, *rpgrp;
+       unsigned long flags;
 
        if (copy_from_user(&tmp_ws, arg, sizeof(*arg)))
                return -EFAULT;
@@ -3009,10 +3010,21 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty,
                }
        }
 #endif
-       if (tty->pgrp)
-               kill_pgrp(tty->pgrp, SIGWINCH, 1);
-       if ((real_tty->pgrp != tty->pgrp) && real_tty->pgrp)
-               kill_pgrp(real_tty->pgrp, SIGWINCH, 1);
+       /* Get the PID values and reference them so we can
+          avoid holding the tty ctrl lock while sending signals */
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
+       pgrp = get_pid(tty->pgrp);
+       rpgrp = get_pid(real_tty->pgrp);
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+       if (pgrp)
+               kill_pgrp(pgrp, SIGWINCH, 1);
+       if (rpgrp != pgrp && rpgrp)
+               kill_pgrp(rpgrp, SIGWINCH, 1);
+
+       put_pid(pgrp);
+       put_pid(rpgrp);
+
        tty->winsize = tmp_ws;
        real_tty->winsize = tmp_ws;
 done:
@@ -3073,10 +3085,13 @@ static int fionbio(struct file *file, int __user *p)
        if (get_user(nonblock, p))
                return -EFAULT;
 
+       /* file->f_flags is still BKL protected in the fs layer - vomit */
+       lock_kernel();
        if (nonblock)
                file->f_flags |= O_NONBLOCK;
        else
                file->f_flags &= ~O_NONBLOCK;
+       unlock_kernel();
        return 0;
 }
 
@@ -3133,6 +3148,27 @@ unlock:
        return ret;
 }
 
+/**
+ *     tty_get_pgrp    -       return a ref counted pgrp pid
+ *     @tty: tty to read
+ *
+ *     Returns a refcounted instance of the pid struct for the process
+ *     group controlling the tty.
+ */
+
+struct pid *tty_get_pgrp(struct tty_struct *tty)
+{
+       unsigned long flags;
+       struct pid *pgrp;
+
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
+       pgrp = get_pid(tty->pgrp);
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+
+       return pgrp;
+}
+EXPORT_SYMBOL_GPL(tty_get_pgrp);
+
 /**
  *     tiocgpgrp               -       get process group
  *     @tty: tty passed by user
@@ -3147,13 +3183,18 @@ unlock:
 
 static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
 {
+       struct pid *pid;
+       int ret;
        /*
         * (tty == real_tty) is a cheap way of
         * testing if the tty is NOT a master pty.
         */
        if (tty == real_tty && current->signal->tty != real_tty)
                return -ENOTTY;
-       return put_user(pid_vnr(real_tty->pgrp), p);
+       pid = tty_get_pgrp(real_tty);
+       ret =  put_user(pid_vnr(pid), p);
+       put_pid(pid);
+       return ret;
 }
 
 /**
@@ -3165,7 +3206,7 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
  *     Set the process group of the tty to the session passed. Only
  *     permitted where the tty session is our session.
  *
- *     Locking: None
+ *     Locking: RCU, ctrl lock
  */
 
 static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p)
@@ -3173,6 +3214,7 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
        struct pid *pgrp;
        pid_t pgrp_nr;
        int retval = tty_check_change(real_tty);
+       unsigned long flags;
 
        if (retval == -EIO)
                return -ENOTTY;
@@ -3195,8 +3237,10 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t
        if (session_of_pgrp(pgrp) != task_session(current))
                goto out_unlock;
        retval = 0;
+       spin_lock_irqsave(&tty->ctrl_lock, flags);
        put_pid(real_tty->pgrp);
        real_tty->pgrp = get_pid(pgrp);
+       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
 out_unlock:
        rcu_read_unlock();
        return retval;
@@ -3240,10 +3284,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _
 static int tiocsetd(struct tty_struct *tty, int __user *p)
 {
        int ldisc;
+       int ret;
 
        if (get_user(ldisc, p))
                return -EFAULT;
-       return tty_set_ldisc(tty, ldisc);
+
+       lock_kernel();
+       ret = tty_set_ldisc(tty, ldisc);
+       unlock_kernel();
+
+       return ret;
 }
 
 /**
@@ -3263,18 +3313,18 @@ static int send_break(struct tty_struct *tty, unsigned int duration)
 {
        if (tty_write_lock(tty, 0) < 0)
                return -EINTR;
-       tty->driver->break_ctl(tty, -1);
+       tty->ops->break_ctl(tty, -1);
        if (!signal_pending(current))
                msleep_interruptible(duration);
-       tty->driver->break_ctl(tty, 0);
+       tty->ops->break_ctl(tty, 0);
        tty_write_unlock(tty);
-       if (signal_pending(current))
+       if (!signal_pending(current))
                return -EINTR;
        return 0;
 }
 
 /**
- *     tiocmget                -       get modem status
+ *     tty_tiocmget            -       get modem status
  *     @tty: tty device
  *     @file: user file pointer
  *     @p: pointer to result
@@ -3289,8 +3339,8 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p
 {
        int retval = -EINVAL;
 
-       if (tty->driver->tiocmget) {
-               retval = tty->driver->tiocmget(tty, file);
+       if (tty->ops->tiocmget) {
+               retval = tty->ops->tiocmget(tty, file);
 
                if (retval >= 0)
                        retval = put_user(retval, p);
@@ -3299,7 +3349,7 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p
 }
 
 /**
- *     tiocmset                -       set modem status
+ *     tty_tiocmset            -       set modem status
  *     @tty: tty device
  *     @file: user file pointer
  *     @cmd: command - clear bits, set bits or set all
@@ -3316,7 +3366,7 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
 {
        int retval = -EINVAL;
 
-       if (tty->driver->tiocmset) {
+       if (tty->ops->tiocmset) {
                unsigned int set, clear, val;
 
                retval = get_user(val, p);
@@ -3340,7 +3390,7 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
                set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
                clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP;
 
-               retval = tty->driver->tiocmset(tty, file, set, clear);
+               retval = tty->ops->tiocmset(tty, file, set, clear);
        }
        return retval;
 }
@@ -3348,20 +3398,18 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int
 /*
  * Split this up, as gcc can choke on it otherwise..
  */
-int tty_ioctl(struct inode *inode, struct file *file,
-             unsigned int cmd, unsigned long arg)
+long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
        struct tty_struct *tty, *real_tty;
        void __user *p = (void __user *)arg;
        int retval;
        struct tty_ldisc *ld;
+       struct inode *inode = file->f_dentry->d_inode;
 
        tty = (struct tty_struct *)file->private_data;
        if (tty_paranoia_check(tty, inode, "tty_ioctl"))
                return -EINVAL;
 
-       /* CHECKME: is this safe as one end closes ? */
-
        real_tty = tty;
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
            tty->driver->subtype == PTY_TYPE_MASTER)
@@ -3370,21 +3418,28 @@ int tty_ioctl(struct inode *inode, struct file *file,
        /*
         * Break handling by driver
         */
-       if (!tty->driver->break_ctl) {
+
+       retval = -EINVAL;
+
+       if (!tty->ops->break_ctl) {
                switch (cmd) {
                case TIOCSBRK:
                case TIOCCBRK:
-                       if (tty->driver->ioctl)
-                               return tty->driver->ioctl(tty, file, cmd, arg);
-                       return -EINVAL;
+                       if (tty->ops->ioctl)
+                               retval = tty->ops->ioctl(tty, file, cmd, arg);
+                       if (retval != -EINVAL && retval != -ENOIOCTLCMD)
+                               printk(KERN_WARNING "tty: driver %s needs updating to use break_ctl\n", tty->driver->name);
+                       return retval;
 
                /* These two ioctl's always return success; even if */
                /* the driver doesn't support them. */
                case TCSBRK:
                case TCSBRKP:
-                       if (!tty->driver->ioctl)
+                       if (!tty->ops->ioctl)
                                return 0;
-                       retval = tty->driver->ioctl(tty, file, cmd, arg);
+                       retval = tty->ops->ioctl(tty, file, cmd, arg);
+                       if (retval != -EINVAL && retval != -ENOIOCTLCMD)
+                               printk(KERN_WARNING "tty: driver %s needs updating to use break_ctl\n", tty->driver->name);
                        if (retval == -ENOIOCTLCMD)
                                retval = 0;
                        return retval;
@@ -3442,7 +3497,6 @@ int tty_ioctl(struct inode *inode, struct file *file,
        case TIOCGSID:
                return tiocgsid(tty, real_tty, p);
        case TIOCGETD:
-               /* FIXME: check this is ok */
                return put_user(tty->ldisc.num, (int __user *)p);
        case TIOCSETD:
                return tiocsetd(tty, p);
@@ -3454,11 +3508,13 @@ int tty_ioctl(struct inode *inode, struct file *file,
         * Break handling
         */
        case TIOCSBRK:  /* Turn break on, unconditionally */
-               tty->driver->break_ctl(tty, -1);
+               if (tty->ops->break_ctl)
+                       tty->ops->break_ctl(tty, -1);
                return 0;
 
        case TIOCCBRK:  /* Turn break off, unconditionally */
-               tty->driver->break_ctl(tty, 0);
+               if (tty->ops->break_ctl)
+                       tty->ops->break_ctl(tty, 0);
                return 0;
        case TCSBRK:   /* SVID version: non-zero arg --> no break */
                /* non-zero arg means wait for all output data
@@ -3487,8 +3543,8 @@ int tty_ioctl(struct inode *inode, struct file *file,
                }
                break;
        }
-       if (tty->driver->ioctl) {
-               retval = (tty->driver->ioctl)(tty, file, cmd, arg);
+       if (tty->ops->ioctl) {
+               retval = (tty->ops->ioctl)(tty, file, cmd, arg);
                if (retval != -ENOIOCTLCMD)
                        return retval;
        }
@@ -3515,8 +3571,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
        if (tty_paranoia_check(tty, inode, "tty_ioctl"))
                return -EINVAL;
 
-       if (tty->driver->compat_ioctl) {
-               retval = (tty->driver->compat_ioctl)(tty, file, cmd, arg);
+       if (tty->ops->compat_ioctl) {
+               retval = (tty->ops->compat_ioctl)(tty, file, cmd, arg);
                if (retval != -ENOIOCTLCMD)
                        return retval;
        }
@@ -3566,8 +3622,7 @@ void __do_SAK(struct tty_struct *tty)
 
        tty_ldisc_flush(tty);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
 
        read_lock(&tasklist_lock);
        /* Kill the entire session */
@@ -3773,19 +3828,32 @@ static void initialize_tty_struct(struct tty_struct *tty)
        mutex_init(&tty->atomic_read_lock);
        mutex_init(&tty->atomic_write_lock);
        spin_lock_init(&tty->read_lock);
+       spin_lock_init(&tty->ctrl_lock);
        INIT_LIST_HEAD(&tty->tty_files);
        INIT_WORK(&tty->SAK_work, do_SAK_work);
 }
 
-/*
- * The default put_char routine if the driver did not define one.
+/**
+ *     tty_put_char    -       write one character to a tty
+ *     @tty: tty
+ *     @ch: character
+ *
+ *     Write one byte to the tty using the provided put_char method
+ *     if present. Returns the number of characters successfully output.
+ *
+ *     Note: the specific put_char operation in the driver layer may go
+ *     away soon. Don't call it directly, use this method
  */
 
-static void tty_default_put_char(struct tty_struct *tty, unsigned char ch)
+int tty_put_char(struct tty_struct *tty, unsigned char ch)
 {
-       tty->driver->write(tty, &ch, 1);
+       if (tty->ops->put_char)
+               return tty->ops->put_char(tty, ch);
+       return tty->ops->write(tty, &ch, 1);
 }
 
+EXPORT_SYMBOL_GPL(tty_put_char);
+
 static struct class *tty_class;
 
 /**
@@ -3868,37 +3936,8 @@ void put_tty_driver(struct tty_driver *driver)
 void tty_set_operations(struct tty_driver *driver,
                        const struct tty_operations *op)
 {
-       driver->open = op->open;
-       driver->close = op->close;
-       driver->write = op->write;
-       driver->put_char = op->put_char;
-       driver->flush_chars = op->flush_chars;
-       driver->write_room = op->write_room;
-       driver->chars_in_buffer = op->chars_in_buffer;
-       driver->ioctl = op->ioctl;
-       driver->compat_ioctl = op->compat_ioctl;
-       driver->set_termios = op->set_termios;
-       driver->throttle = op->throttle;
-       driver->unthrottle = op->unthrottle;
-       driver->stop = op->stop;
-       driver->start = op->start;
-       driver->hangup = op->hangup;
-       driver->break_ctl = op->break_ctl;
-       driver->flush_buffer = op->flush_buffer;
-       driver->set_ldisc = op->set_ldisc;
-       driver->wait_until_sent = op->wait_until_sent;
-       driver->send_xchar = op->send_xchar;
-       driver->read_proc = op->read_proc;
-       driver->write_proc = op->write_proc;
-       driver->tiocmget = op->tiocmget;
-       driver->tiocmset = op->tiocmset;
-#ifdef CONFIG_CONSOLE_POLL
-       driver->poll_init = op->poll_init;
-       driver->poll_get_char = op->poll_get_char;
-       driver->poll_put_char = op->poll_put_char;
-#endif
-}
-
+       driver->ops = op;
+};
 
 EXPORT_SYMBOL(alloc_tty_driver);
 EXPORT_SYMBOL(put_tty_driver);
@@ -3961,9 +4000,6 @@ int tty_register_driver(struct tty_driver *driver)
                return error;
        }
 
-       if (!driver->put_char)
-               driver->put_char = tty_default_put_char;
-
        mutex_lock(&tty_mutex);
        list_add(&driver->tty_drivers, &tty_drivers);
        mutex_unlock(&tty_mutex);
@@ -4039,14 +4075,19 @@ void proc_clear_tty(struct task_struct *p)
 }
 EXPORT_SYMBOL(proc_clear_tty);
 
+/* Called under the sighand lock */
+
 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty)
 {
        if (tty) {
-               /* We should not have a session or pgrp to here but.... */
+               unsigned long flags;
+               /* We should not have a session or pgrp to put here but.... */
+               spin_lock_irqsave(&tty->ctrl_lock, flags);
                put_pid(tty->session);
                put_pid(tty->pgrp);
-               tty->session = get_pid(task_session(tsk));
                tty->pgrp = get_pid(task_pgrp(tsk));
+               spin_unlock_irqrestore(&tty->ctrl_lock, flags);
+               tty->session = get_pid(task_session(tsk));
        }
        put_pid(tsk->signal->tty_old_pgrp);
        tsk->signal->tty = tty;
index f95a80b2265fdf0f42829f750c0c19f491eec044..b1a757a5ee271c9c4a4dc5a4b2fc18c35c7fb491 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/mutex.h>
+#include <linux/smp_lock.h>
 
 #include <asm/io.h>
 #include <asm/uaccess.h>
 #define TERMIOS_OLD    8
 
 
+int tty_chars_in_buffer(struct tty_struct *tty)
+{
+       if (tty->ops->chars_in_buffer)
+               return tty->ops->chars_in_buffer(tty);
+       else
+               return 0;
+}
+
+EXPORT_SYMBOL(tty_chars_in_buffer);
+
+int tty_write_room(struct tty_struct *tty)
+{
+       if (tty->ops->write_room)
+               return tty->ops->write_room(tty);
+       return 2048;
+}
+
+EXPORT_SYMBOL(tty_write_room);
+
+void tty_driver_flush_buffer(struct tty_struct *tty)
+{
+       if (tty->ops->flush_buffer)
+               tty->ops->flush_buffer(tty);
+}
+
+EXPORT_SYMBOL(tty_driver_flush_buffer);
+
+void tty_throttle(struct tty_struct *tty)
+{
+       /* check TTY_THROTTLED first so it indicates our state */
+       if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) &&
+           tty->ops->throttle)
+               tty->ops->throttle(tty);
+}
+EXPORT_SYMBOL(tty_throttle);
+
+void tty_unthrottle(struct tty_struct *tty)
+{
+       if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) &&
+           tty->ops->unthrottle)
+               tty->ops->unthrottle(tty);
+}
+EXPORT_SYMBOL(tty_unthrottle);
+
 /**
  *     tty_wait_until_sent     -       wait for I/O to finish
  *     @tty: tty we are waiting for
@@ -57,15 +102,13 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout)
 
        printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf));
 #endif
-       if (!tty->driver->chars_in_buffer)
-               return;
        if (!timeout)
                timeout = MAX_SCHEDULE_TIMEOUT;
        if (wait_event_interruptible_timeout(tty->write_wait,
-                       !tty->driver->chars_in_buffer(tty), timeout) < 0)
-               return;
-       if (tty->driver->wait_until_sent)
-               tty->driver->wait_until_sent(tty, timeout);
+                       !tty_chars_in_buffer(tty), timeout) >= 0) {
+               if (tty->ops->wait_until_sent)
+                       tty->ops->wait_until_sent(tty, timeout);
+       }
 }
 EXPORT_SYMBOL(tty_wait_until_sent);
 
@@ -393,8 +436,9 @@ EXPORT_SYMBOL(tty_termios_hw_change);
 static void change_termios(struct tty_struct *tty, struct ktermios *new_termios)
 {
        int canon_change;
-       struct ktermios old_termios = *tty->termios;
+       struct ktermios old_termios;
        struct tty_ldisc *ld;
+       unsigned long flags;
 
        /*
         *      Perform the actual termios internal changes under lock.
@@ -404,7 +448,7 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios)
        /* FIXME: we need to decide on some locking/ordering semantics
           for the set_termios notification eventually */
        mutex_lock(&tty->termios_mutex);
-
+       old_termios = *tty->termios;
        *tty->termios = *new_termios;
        unset_locked_termios(tty->termios, &old_termios, tty->termios_locked);
        canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON;
@@ -429,17 +473,19 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios)
                                STOP_CHAR(tty) == '\023' &&
                                START_CHAR(tty) == '\021');
                if (old_flow != new_flow) {
+                       spin_lock_irqsave(&tty->ctrl_lock, flags);
                        tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP);
                        if (new_flow)
                                tty->ctrl_status |= TIOCPKT_DOSTOP;
                        else
                                tty->ctrl_status |= TIOCPKT_NOSTOP;
+                       spin_unlock_irqrestore(&tty->ctrl_lock, flags);
                        wake_up_interruptible(&tty->link->read_wait);
                }
        }
 
-       if (tty->driver->set_termios)
-               (*tty->driver->set_termios)(tty, &old_termios);
+       if (tty->ops->set_termios)
+               (*tty->ops->set_termios)(tty, &old_termios);
        else
                tty_termios_copy_hw(tty->termios, &old_termios);
 
@@ -474,7 +520,9 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt)
        if (retval)
                return retval;
 
+       mutex_lock(&tty->termios_mutex);
        memcpy(&tmp_termios, tty->termios, sizeof(struct ktermios));
+       mutex_unlock(&tty->termios_mutex);
 
        if (opt & TERMIOS_TERMIO) {
                if (user_termio_to_kernel_termios(&tmp_termios,
@@ -660,12 +708,14 @@ static int get_tchars(struct tty_struct *tty, struct tchars __user *tchars)
 {
        struct tchars tmp;
 
+       mutex_lock(&tty->termios_mutex);
        tmp.t_intrc = tty->termios->c_cc[VINTR];
        tmp.t_quitc = tty->termios->c_cc[VQUIT];
        tmp.t_startc = tty->termios->c_cc[VSTART];
        tmp.t_stopc = tty->termios->c_cc[VSTOP];
        tmp.t_eofc = tty->termios->c_cc[VEOF];
        tmp.t_brkc = tty->termios->c_cc[VEOL2]; /* what is brkc anyway? */
+       mutex_unlock(&tty->termios_mutex);
        return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
 }
 
@@ -675,12 +725,14 @@ static int set_tchars(struct tty_struct *tty, struct tchars __user *tchars)
 
        if (copy_from_user(&tmp, tchars, sizeof(tmp)))
                return -EFAULT;
+       mutex_lock(&tty->termios_mutex);
        tty->termios->c_cc[VINTR] = tmp.t_intrc;
        tty->termios->c_cc[VQUIT] = tmp.t_quitc;
        tty->termios->c_cc[VSTART] = tmp.t_startc;
        tty->termios->c_cc[VSTOP] = tmp.t_stopc;
        tty->termios->c_cc[VEOF] = tmp.t_eofc;
        tty->termios->c_cc[VEOL2] = tmp.t_brkc; /* what is brkc anyway? */
+       mutex_unlock(&tty->termios_mutex);
        return 0;
 }
 #endif
@@ -690,6 +742,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
 {
        struct ltchars tmp;
 
+       mutex_lock(&tty->termios_mutex);
        tmp.t_suspc = tty->termios->c_cc[VSUSP];
        /* what is dsuspc anyway? */
        tmp.t_dsuspc = tty->termios->c_cc[VSUSP];
@@ -698,6 +751,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
        tmp.t_flushc = tty->termios->c_cc[VEOL2];
        tmp.t_werasc = tty->termios->c_cc[VWERASE];
        tmp.t_lnextc = tty->termios->c_cc[VLNEXT];
+       mutex_unlock(&tty->termios_mutex);
        return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0;
 }
 
@@ -708,6 +762,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
        if (copy_from_user(&tmp, ltchars, sizeof(tmp)))
                return -EFAULT;
 
+       mutex_lock(&tty->termios_mutex);
        tty->termios->c_cc[VSUSP] = tmp.t_suspc;
        /* what is dsuspc anyway? */
        tty->termios->c_cc[VEOL2] = tmp.t_dsuspc;
@@ -716,6 +771,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars)
        tty->termios->c_cc[VEOL2] = tmp.t_flushc;
        tty->termios->c_cc[VWERASE] = tmp.t_werasc;
        tty->termios->c_cc[VLNEXT] = tmp.t_lnextc;
+       mutex_unlock(&tty->termios_mutex);
        return 0;
 }
 #endif
@@ -732,8 +788,8 @@ static int send_prio_char(struct tty_struct *tty, char ch)
 {
        int     was_stopped = tty->stopped;
 
-       if (tty->driver->send_xchar) {
-               tty->driver->send_xchar(tty, ch);
+       if (tty->ops->send_xchar) {
+               tty->ops->send_xchar(tty, ch);
                return 0;
        }
 
@@ -742,13 +798,40 @@ static int send_prio_char(struct tty_struct *tty, char ch)
 
        if (was_stopped)
                start_tty(tty);
-       tty->driver->write(tty, &ch, 1);
+       tty->ops->write(tty, &ch, 1);
        if (was_stopped)
                stop_tty(tty);
        tty_write_unlock(tty);
        return 0;
 }
 
+/**
+ *     tty_change_softcar      -       carrier change ioctl helper
+ *     @tty: tty to update
+ *     @arg: enable/disable CLOCAL
+ *
+ *     Perform a change to the CLOCAL state and call into the driver
+ *     layer to make it visible. All done with the termios mutex
+ */
+
+static int tty_change_softcar(struct tty_struct *tty, int arg)
+{
+       int ret = 0;
+       int bit = arg ? CLOCAL : 0;
+       struct ktermios old;
+
+       mutex_lock(&tty->termios_mutex);
+       old = *tty->termios;
+       tty->termios->c_cflag &= ~CLOCAL;
+       tty->termios->c_cflag |= bit;
+       if (tty->ops->set_termios)
+               tty->ops->set_termios(tty, &old);
+       if ((tty->termios->c_cflag & CLOCAL) != bit)
+               ret = -EINVAL;
+       mutex_unlock(&tty->termios_mutex);
+       return ret;
+}
+
 /**
  *     tty_mode_ioctl          -       mode related ioctls
  *     @tty: tty for the ioctl
@@ -859,12 +942,7 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
        case TIOCSSOFTCAR:
                if (get_user(arg, (unsigned int __user *) arg))
                        return -EFAULT;
-               mutex_lock(&tty->termios_mutex);
-               tty->termios->c_cflag =
-                       ((tty->termios->c_cflag & ~CLOCAL) |
-                        (arg ? CLOCAL : 0));
-               mutex_unlock(&tty->termios_mutex);
-               return 0;
+               return tty_change_softcar(tty, arg);
        default:
                return -ENOIOCTLCMD;
        }
@@ -889,8 +967,7 @@ int tty_perform_flush(struct tty_struct *tty, unsigned long arg)
                        ld->flush_buffer(tty);
                /* fall through */
        case TCOFLUSH:
-               if (tty->driver->flush_buffer)
-                       tty->driver->flush_buffer(tty);
+               tty_driver_flush_buffer(tty);
                break;
        default:
                tty_ldisc_deref(ld);
@@ -905,6 +982,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
                       unsigned int cmd, unsigned long arg)
 {
        struct tty_struct *real_tty;
+       unsigned long flags;
        int retval;
 
        if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
@@ -946,9 +1024,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
        case TCFLSH:
                return tty_perform_flush(tty, arg);
        case TIOCOUTQ:
-               return put_user(tty->driver->chars_in_buffer ?
-                               tty->driver->chars_in_buffer(tty) : 0,
-                               (int __user *) arg);
+               return put_user(tty_chars_in_buffer(tty), (int __user *) arg);
        case TIOCINQ:
                retval = tty->read_cnt;
                if (L_ICANON(tty))
@@ -963,6 +1039,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
                        return -ENOTTY;
                if (get_user(pktmode, (int __user *) arg))
                        return -EFAULT;
+               spin_lock_irqsave(&tty->ctrl_lock, flags);
                if (pktmode) {
                        if (!tty->packet) {
                                tty->packet = 1;
@@ -970,6 +1047,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file,
                        }
                } else
                        tty->packet = 0;
+               spin_unlock_irqrestore(&tty->ctrl_lock, flags);
                return 0;
        }
        default:
index 8de6b95aeb844cd03468d86f8643d2f65ed18acf..3d3e1c2b310f0abff4e60fdf6b6faf5fc7a322d0 100644 (file)
@@ -628,13 +628,13 @@ static int viotty_write(struct tty_struct *tty, const unsigned char *buf,
 /*
  * TTY put_char method
  */
-static void viotty_put_char(struct tty_struct *tty, unsigned char ch)
+static int viotty_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct port_info *pi;
 
        pi = get_port_data(tty);
        if (pi == NULL)
-               return;
+               return 0;
 
        /* This will append '\r' as well if the char is '\n' */
        if (viochar_is_console(pi))
@@ -642,6 +642,7 @@ static void viotty_put_char(struct tty_struct *tty, unsigned char ch)
 
        if (viopath_isactive(pi->lp))
                internal_write(pi, &ch, 1);
+       return 1;
 }
 
 /*
@@ -704,8 +705,11 @@ static int viotty_ioctl(struct tty_struct *tty, struct file *file,
        case KDSKBLED:
                return 0;
        }
-
-       return n_tty_ioctl(tty, file, cmd, arg);
+       /* FIXME: WTF is this being called for ??? */
+       lock_kernel();
+       ret =  n_tty_ioctl(tty, file, cmd, arg);
+       unlock_kernel();
+       return ret;
 }
 
 /*
index db7a731e2362dc747925991feb0fb06d3a86785c..58aad63831f4da854216bde2cd7b78ca87c0e7c0 100644 (file)
@@ -249,6 +249,7 @@ static int proc_viotape_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_viotape_operations = {
+       .owner          = THIS_MODULE,
        .open           = proc_viotape_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -915,7 +916,6 @@ static struct vio_driver viotape_driver = {
 int __init viotap_init(void)
 {
        int ret;
-       struct proc_dir_entry *e;
 
        if (!firmware_has_feature(FW_FEATURE_ISERIES))
                return -ENODEV;
@@ -968,11 +968,8 @@ int __init viotap_init(void)
        if (ret)
                goto unreg_class;
 
-       e = create_proc_entry("iSeries/viotape", S_IFREG|S_IRUGO, NULL);
-       if (e) {
-               e->owner = THIS_MODULE;
-               e->proc_fops = &proc_viotape_operations;
-       }
+       proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL,
+                   &proc_viotape_operations);
 
        return 0;
 
index 9b58b894f823764ac9c0e3d3620b4a2080404fba..e458b08139afb6fd638b125334ca1b602eb8e136 100644 (file)
@@ -301,7 +301,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
        d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
        s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr));
        scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row);
-       scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char,
+       scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_scrl_erase_char,
                    vc->vc_size_row * nr);
 }
 
@@ -319,7 +319,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr)
        s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t);
        step = vc->vc_cols * nr;
        scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row);
-       scr_memsetw(s, vc->vc_video_erase_char, 2 * step);
+       scr_memsetw(s, vc->vc_scrl_erase_char, 2 * step);
 }
 
 static void do_update_region(struct vc_data *vc, unsigned long start, int count)
@@ -400,7 +400,7 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink,
  *  Bit 7   : blink
  */
        {
-       u8 a = vc->vc_color;
+       u8 a = _color;
        if (!vc->vc_can_do_color)
                return _intensity |
                       (_italic ? 2 : 0) |
@@ -434,6 +434,7 @@ static void update_attr(struct vc_data *vc)
                      vc->vc_blink, vc->vc_underline,
                      vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic);
        vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' ';
+       vc->vc_scrl_erase_char = (build_attr(vc, vc->vc_def_color, 1, false, false, false, false) << 8) | ' ';
 }
 
 /* Note: inverting the screen twice should revert to the original state */
@@ -908,15 +909,21 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines)
 
        if (vc->vc_tty) {
                struct winsize ws, *cws = &vc->vc_tty->winsize;
+               unsigned long flags;
 
                memset(&ws, 0, sizeof(ws));
                ws.ws_row = vc->vc_rows;
                ws.ws_col = vc->vc_cols;
                ws.ws_ypixel = vc->vc_scan_lines;
+
+               mutex_lock(&vc->vc_tty->termios_mutex);
+               spin_lock_irqsave(&vc->vc_tty->ctrl_lock, flags);
                if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col) &&
                    vc->vc_tty->pgrp)
                        kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1);
+               spin_unlock_irqrestore(&vc->vc_tty->ctrl_lock, flags);
                *cws = ws;
+               mutex_unlock(&vc->vc_tty->termios_mutex);
        }
 
        if (CON_IS_VISIBLE(vc))
@@ -2054,6 +2061,7 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
        unsigned long draw_from = 0, draw_to = 0;
        struct vc_data *vc;
        unsigned char vc_attr;
+       struct vt_notifier_param param;
        uint8_t rescan;
        uint8_t inverse;
        uint8_t width;
@@ -2113,6 +2121,8 @@ static int do_con_write(struct tty_struct *tty, const unsigned char *buf, int co
        if (IS_FG(vc))
                hide_cursor(vc);
 
+       param.vc = vc;
+
        while (!tty->stopped && count) {
                int orig = *buf;
                c = orig;
@@ -2201,6 +2211,11 @@ rescan_last_byte:
                    tc = vc->vc_translate[vc->vc_toggle_meta ? (c | 0x80) : c];
                }
 
+               param.c = tc;
+               if (atomic_notifier_call_chain(&vt_notifier_list, VT_PREWRITE,
+                                       &param) == NOTIFY_STOP)
+                       continue;
+
                 /* If the original code was a control character we
                  * only allow a glyph to be displayed if the code is
                  * not normally used (such as for cursor movement) or
@@ -2532,6 +2547,9 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
        if (get_user(type, p))
                return -EFAULT;
        ret = 0;
+
+       lock_kernel();
+
        switch (type)
        {
                case TIOCL_SETSEL:
@@ -2551,7 +2569,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
                        ret = sel_loadlut(p);
                        break;
                case TIOCL_GETSHIFTSTATE:
-                       
+
        /*
         * Make it possible to react to Shift+Mousebutton.
         * Note that 'shift_state' is an undocumented
@@ -2606,6 +2624,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
                        ret = -EINVAL;
                        break;
        }
+       unlock_kernel();
        return ret;
 }
 
@@ -2623,11 +2642,11 @@ static int con_write(struct tty_struct *tty, const unsigned char *buf, int count
        return retval;
 }
 
-static void con_put_char(struct tty_struct *tty, unsigned char ch)
+static int con_put_char(struct tty_struct *tty, unsigned char ch)
 {
        if (in_interrupt())
-               return; /* n_r3964 calls put_char() from interrupt context */
-       do_con_write(tty, &ch, 1);
+               return 0;       /* n_r3964 calls put_char() from interrupt context */
+       return do_con_write(tty, &ch, 1);
 }
 
 static int con_write_room(struct tty_struct *tty)
@@ -3820,7 +3839,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op)
                goto out;
 
        c = (font.width+7)/8 * 32 * font.charcount;
-       
+
        if (op->data && font.charcount > op->charcount)
                rc = -ENOSPC;
        if (!(op->flags & KD_FONT_FLAG_OLD)) {
@@ -3985,6 +4004,7 @@ u16 screen_glyph(struct vc_data *vc, int offset)
                c |= 0x100;
        return c;
 }
+EXPORT_SYMBOL_GPL(screen_glyph);
 
 /* used by vcs - note the word offset */
 unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed)
index e6f89e8b9258429fc99754ec492918d94089bcfd..3211afd9d57e4080eb610c7bde54098a0c7bf117 100644 (file)
@@ -373,11 +373,17 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        unsigned char ucval;
        void __user *up = (void __user *)arg;
        int i, perm;
-       
+       int ret = 0;
+
        console = vc->vc_num;
 
-       if (!vc_cons_allocated(console))        /* impossible? */
-               return -ENOIOCTLCMD;
+       lock_kernel();
+
+       if (!vc_cons_allocated(console)) {      /* impossible? */
+               ret = -ENOIOCTLCMD;
+               goto out;
+       }
+
 
        /*
         * To have permissions to do most of the vt ioctls, we either have
@@ -391,15 +397,15 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        switch (cmd) {
        case KIOCSOUND:
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                if (arg)
                        arg = CLOCK_TICK_RATE / arg;
                kd_mksound(arg, 0);
-               return 0;
+               break;
 
        case KDMKTONE:
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
        {
                unsigned int ticks, count;
                
@@ -412,7 +418,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                if (count)
                        count = CLOCK_TICK_RATE / count;
                kd_mksound(count, ticks);
-               return 0;
+               break;
        }
 
        case KDGKBTYPE:
@@ -435,14 +441,18 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 * KDADDIO and KDDELIO may be able to add ports beyond what
                 * we reject here, but to be safe...
                 */
-               if (arg < GPFIRST || arg > GPLAST)
-                       return -EINVAL;
-               return sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0;
+               if (arg < GPFIRST || arg > GPLAST) {
+                       ret = -EINVAL;
+                       break;
+               }
+               ret = sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0;
+               break;
 
        case KDENABIO:
        case KDDISABIO:
-               return sys_ioperm(GPFIRST, GPNUM,
+               ret = sys_ioperm(GPFIRST, GPNUM,
                                  (cmd == KDENABIO)) ? -ENXIO : 0;
+               break;
 #endif
 
        /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */
@@ -450,19 +460,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KDKBDREP:
        {
                struct kbd_repeat kbrep;
-               int err;
                
                if (!capable(CAP_SYS_TTY_CONFIG))
-                       return -EPERM;
+                       goto eperm;
 
-               if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat)))
-                       return -EFAULT;
-               err = kbd_rate(&kbrep);
-               if (err)
-                       return err;
+               if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) {
+                       ret =  -EFAULT;
+                       break;
+               }
+               ret = kbd_rate(&kbrep);
+               if (ret)
+                       break;
                if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat)))
-                       return -EFAULT;
-               return 0;
+                       ret = -EFAULT;
+               break;
        }
 
        case KDSETMODE:
@@ -475,7 +486,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 * need to restore their engine state. --BenH
                 */
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                switch (arg) {
                case KD_GRAPHICS:
                        break;
@@ -485,13 +496,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                case KD_TEXT:
                        break;
                default:
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
                if (vc->vc_mode == (unsigned char) arg)
-                       return 0;
+                       break;
                vc->vc_mode = (unsigned char) arg;
                if (console != fg_console)
-                       return 0;
+                       break;
                /*
                 * explicitly blank/unblank the screen if switching modes
                 */
@@ -501,7 +513,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                else
                        do_blank_screen(1);
                release_console_sem();
-               return 0;
+               break;
 
        case KDGETMODE:
                ucval = vc->vc_mode;
@@ -513,11 +525,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                 * these work like a combination of mmap and KDENABIO.
                 * this could be easily finished.
                 */
-               return -EINVAL;
+               ret = -EINVAL;
+               break;
 
        case KDSKBMODE:
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                switch(arg) {
                  case K_RAW:
                        kbd->kbdmode = VC_RAW;
@@ -534,10 +547,11 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        compute_shiftstate();
                        break;
                  default:
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto out;
                }
                tty_ldisc_flush(tty);
-               return 0;
+               break;
 
        case KDGKBMODE:
                ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW :
@@ -557,28 +571,32 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        set_vc_kbd_mode(kbd, VC_META);
                        break;
                  default:
-                       return -EINVAL;
+                       ret = -EINVAL;
                }
-               return 0;
+               break;
 
        case KDGKBMETA:
                ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT);
        setint:
-               return put_user(ucval, (int __user *)arg); 
+               ret = put_user(ucval, (int __user *)arg);
+               break;
 
        case KDGETKEYCODE:
        case KDSETKEYCODE:
                if(!capable(CAP_SYS_TTY_CONFIG))
-                       perm=0;
-               return do_kbkeycode_ioctl(cmd, up, perm);
+                       perm = 0;
+               ret = do_kbkeycode_ioctl(cmd, up, perm);
+               break;
 
        case KDGKBENT:
        case KDSKBENT:
-               return do_kdsk_ioctl(cmd, up, perm, kbd);
+               ret = do_kdsk_ioctl(cmd, up, perm, kbd);
+               break;
 
        case KDGKBSENT:
        case KDSKBSENT:
-               return do_kdgkb_ioctl(cmd, up, perm);
+               ret = do_kdgkb_ioctl(cmd, up, perm);
+               break;
 
        case KDGKBDIACR:
        {
@@ -586,26 +604,31 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                struct kbdiacr diacr;
                int i;
 
-               if (put_user(accent_table_size, &a->kb_cnt))
-                       return -EFAULT;
+               if (put_user(accent_table_size, &a->kb_cnt)) {
+                       ret = -EFAULT;
+                       break;
+               }
                for (i = 0; i < accent_table_size; i++) {
                        diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr);
                        diacr.base = conv_uni_to_8bit(accent_table[i].base);
                        diacr.result = conv_uni_to_8bit(accent_table[i].result);
-                       if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr)))
-                               return -EFAULT;
+                       if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) {
+                               ret = -EFAULT;
+                               break;
+                       }
                }
-               return 0;
+               break;
        }
        case KDGKBDIACRUC:
        {
                struct kbdiacrsuc __user *a = up;
 
                if (put_user(accent_table_size, &a->kb_cnt))
-                       return -EFAULT;
-               if (copy_to_user(a->kbdiacruc, accent_table, accent_table_size*sizeof(struct kbdiacruc)))
-                       return -EFAULT;
-               return 0;
+                       ret = -EFAULT;
+               else if (copy_to_user(a->kbdiacruc, accent_table,
+                               accent_table_size*sizeof(struct kbdiacruc)))
+                       ret = -EFAULT;
+               break;
        }
 
        case KDSKBDIACR:
@@ -616,20 +639,26 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                int i;
 
                if (!perm)
-                       return -EPERM;
-               if (get_user(ct,&a->kb_cnt))
-                       return -EFAULT;
-               if (ct >= MAX_DIACR)
-                       return -EINVAL;
+                       goto eperm;
+               if (get_user(ct,&a->kb_cnt)) {
+                       ret = -EFAULT;
+                       break;
+               }
+               if (ct >= MAX_DIACR) {
+                       ret = -EINVAL;
+                       break;
+               }
                accent_table_size = ct;
                for (i = 0; i < ct; i++) {
-                       if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr)))
-                               return -EFAULT;
+                       if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) {
+                               ret = -EFAULT;
+                               break;
+                       }
                        accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr);
                        accent_table[i].base = conv_8bit_to_uni(diacr.base);
                        accent_table[i].result = conv_8bit_to_uni(diacr.result);
                }
-               return 0;
+               break;
        }
 
        case KDSKBDIACRUC:
@@ -638,15 +667,19 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                unsigned int ct;
 
                if (!perm)
-                       return -EPERM;
-               if (get_user(ct,&a->kb_cnt))
-                       return -EFAULT;
-               if (ct >= MAX_DIACR)
-                       return -EINVAL;
+                       goto eperm;
+               if (get_user(ct,&a->kb_cnt)) {
+                       ret = -EFAULT;
+                       break;
+               }
+               if (ct >= MAX_DIACR) {
+                       ret = -EINVAL;
+                       break;
+               }
                accent_table_size = ct;
                if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc)))
-                       return -EFAULT;
-               return 0;
+                       ret = -EFAULT;
+               break;
        }
 
        /* the ioctls below read/set the flags usually shown in the leds */
@@ -657,26 +690,29 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
 
        case KDSKBLED:
                if (!perm)
-                       return -EPERM;
-               if (arg & ~0x77)
-                       return -EINVAL;
+                       goto eperm;
+               if (arg & ~0x77) {
+                       ret = -EINVAL;
+                       break;
+               }
                kbd->ledflagstate = (arg & 7);
                kbd->default_ledflagstate = ((arg >> 4) & 7);
                set_leds();
-               return 0;
+               break;
 
        /* the ioctls below only set the lights, not the functions */
        /* for those, see KDGKBLED and KDSKBLED above */
        case KDGETLED:
                ucval = getledstate();
        setchar:
-               return put_user(ucval, (char __user *)arg);
+               ret = put_user(ucval, (char __user *)arg);
+               break;
 
        case KDSETLED:
                if (!perm)
-                 return -EPERM;
+                       goto eperm;
                setledstate(kbd, arg);
-               return 0;
+               break;
 
        /*
         * A process can indicate its willingness to accept signals
@@ -688,16 +724,17 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
        case KDSIGACCEPT:
        {
                if (!perm || !capable(CAP_KILL))
-                 return -EPERM;
+                       goto eperm;
                if (!valid_signal(arg) || arg < 1 || arg == SIGKILL)
-                 return -EINVAL;
-
-               spin_lock_irq(&vt_spawn_con.lock);
-               put_pid(vt_spawn_con.pid);
-               vt_spawn_con.pid = get_pid(task_pid(current));
-               vt_spawn_con.sig = arg;
-               spin_unlock_irq(&vt_spawn_con.lock);
-               return 0;
+                       ret = -EINVAL;
+               else {
+                       spin_lock_irq(&vt_spawn_con.lock);
+                       put_pid(vt_spawn_con.pid);
+                       vt_spawn_con.pid = get_pid(task_pid(current));
+                       vt_spawn_con.sig = arg;
+                       spin_unlock_irq(&vt_spawn_con.lock);
+               }
+               break;
        }
 
        case VT_SETMODE:
@@ -705,11 +742,15 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                struct vt_mode tmp;
 
                if (!perm)
-                       return -EPERM;
-               if (copy_from_user(&tmp, up, sizeof(struct vt_mode)))
-                       return -EFAULT;
-               if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS)
-                       return -EINVAL;
+                       goto eperm;
+               if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) {
+                       ret = -EFAULT;
+                       goto out;
+               }
+               if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
+                       ret = -EINVAL;
+                       goto out;
+               }
                acquire_console_sem();
                vc->vt_mode = tmp;
                /* the frsig is ignored, so we set it to 0 */
@@ -719,7 +760,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                /* no switch is required -- saw@shade.msu.ru */
                vc->vt_newvt = -1;
                release_console_sem();
-               return 0;
+               break;
        }
 
        case VT_GETMODE:
@@ -732,7 +773,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                release_console_sem();
 
                rc = copy_to_user(up, &tmp, sizeof(struct vt_mode));
-               return rc ? -EFAULT : 0;
+               if (rc)
+                       ret = -EFAULT;
+               break;
        }
 
        /*
@@ -746,12 +789,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                unsigned short state, mask;
 
                if (put_user(fg_console + 1, &vtstat->v_active))
-                       return -EFAULT;
-               state = 1;      /* /dev/tty0 is always open */
-               for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; ++i, mask <<= 1)
-                       if (VT_IS_IN_USE(i))
-                               state |= mask;
-               return put_user(state, &vtstat->v_state);
+                       ret = -EFAULT;
+               else {
+                       state = 1;      /* /dev/tty0 is always open */
+                       for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask;
+                                                       ++i, mask <<= 1)
+                               if (VT_IS_IN_USE(i))
+                                       state |= mask;
+                       ret = put_user(state, &vtstat->v_state);
+               }
+               break;
        }
 
        /*
@@ -771,27 +818,31 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
         */
        case VT_ACTIVATE:
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                if (arg == 0 || arg > MAX_NR_CONSOLES)
-                       return -ENXIO;
-               arg--;
-               acquire_console_sem();
-               i = vc_allocate(arg);
-               release_console_sem();
-               if (i)
-                       return i;
-               set_console(arg);
-               return 0;
+                       ret =  -ENXIO;
+               else {
+                       arg--;
+                       acquire_console_sem();
+                       ret = vc_allocate(arg);
+                       release_console_sem();
+                       if (ret)
+                               break;
+                       set_console(arg);
+               }
+               break;
 
        /*
         * wait until the specified VT has been activated
         */
        case VT_WAITACTIVE:
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                if (arg == 0 || arg > MAX_NR_CONSOLES)
-                       return -ENXIO;
-               return vt_waitactive(arg-1);
+                       ret = -ENXIO;
+               else
+                       ret = vt_waitactive(arg - 1);
+               break;
 
        /*
         * If a vt is under process control, the kernel will not switch to it
@@ -805,10 +856,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
         */
        case VT_RELDISP:
                if (!perm)
-                       return -EPERM;
-               if (vc->vt_mode.mode != VT_PROCESS)
-                       return -EINVAL;
+                       goto eperm;
 
+               if (vc->vt_mode.mode != VT_PROCESS) {
+                       ret = -EINVAL;
+                       break;
+               }
                /*
                 * Switching-from response
                 */
@@ -829,10 +882,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                                int newvt;
                                newvt = vc->vt_newvt;
                                vc->vt_newvt = -1;
-                               i = vc_allocate(newvt);
-                               if (i) {
+                               ret = vc_allocate(newvt);
+                               if (ret) {
                                        release_console_sem();
-                                       return i;
+                                       break;
                                }
                                /*
                                 * When we actually do the console switch,
@@ -841,31 +894,27 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                                 */
                                complete_change_console(vc_cons[newvt].d);
                        }
-               }
-
-               /*
-                * Switched-to response
-                */
-               else
-               {
+               } else {
+                       /*
+                        * Switched-to response
+                        */
                        /*
                         * If it's just an ACK, ignore it
                         */
-                       if (arg != VT_ACKACQ) {
-                               release_console_sem();
-                               return -EINVAL;
-                       }
+                       if (arg != VT_ACKACQ)
+                               ret = -EINVAL;
                }
                release_console_sem();
-
-               return 0;
+               break;
 
         /*
          * Disallocate memory associated to VT (but leave VT1)
          */
         case VT_DISALLOCATE:
-               if (arg > MAX_NR_CONSOLES)
-                       return -ENXIO;
+               if (arg > MAX_NR_CONSOLES) {
+                       ret = -ENXIO;
+                       break;
+               }
                if (arg == 0) {
                    /* deallocate all unused consoles, but leave 0 */
                        acquire_console_sem();
@@ -877,14 +926,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        /* deallocate a single console, if possible */
                        arg--;
                        if (VT_BUSY(arg))
-                               return -EBUSY;
-                       if (arg) {                            /* leave 0 */
+                               ret = -EBUSY;
+                       else if (arg) {                       /* leave 0 */
                                acquire_console_sem();
                                vc_deallocate(arg);
                                release_console_sem();
                        }
                }
-               return 0;
+               break;
 
        case VT_RESIZE:
        {
@@ -893,21 +942,21 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
 
                ushort ll,cc;
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                if (get_user(ll, &vtsizes->v_rows) ||
                    get_user(cc, &vtsizes->v_cols))
-                       return -EFAULT;
-
-               for (i = 0; i < MAX_NR_CONSOLES; i++) {
-                       vc = vc_cons[i].d;
+                       ret = -EFAULT;
+               else {
+                       for (i = 0; i < MAX_NR_CONSOLES; i++) {
+                               vc = vc_cons[i].d;
 
-                       if (vc) {
-                               vc->vc_resize_user = 1;
-                               vc_lock_resize(vc_cons[i].d, cc, ll);
+                               if (vc) {
+                                       vc->vc_resize_user = 1;
+                                       vc_lock_resize(vc_cons[i].d, cc, ll);
+                               }
                        }
                }
-
-               return 0;
+               break;
        }
 
        case VT_RESIZEX:
@@ -915,10 +964,13 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                struct vt_consize __user *vtconsize = up;
                ushort ll,cc,vlin,clin,vcol,ccol;
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                if (!access_ok(VERIFY_READ, vtconsize,
-                               sizeof(struct vt_consize)))
-                       return -EFAULT;
+                               sizeof(struct vt_consize))) {
+                       ret = -EFAULT;
+                       break;
+               }
+               /* FIXME: Should check the copies properly */
                __get_user(ll, &vtconsize->v_rows);
                __get_user(cc, &vtconsize->v_cols);
                __get_user(vlin, &vtconsize->v_vlin);
@@ -928,21 +980,28 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                vlin = vlin ? vlin : vc->vc_scan_lines;
                if (clin) {
                        if (ll) {
-                               if (ll != vlin/clin)
-                                       return -EINVAL; /* Parameters don't add up */
+                               if (ll != vlin/clin) {
+                                       /* Parameters don't add up */
+                                       ret = -EINVAL;
+                                       break;
+                               }
                        } else 
                                ll = vlin/clin;
                }
                if (vcol && ccol) {
                        if (cc) {
-                               if (cc != vcol/ccol)
-                                       return -EINVAL;
+                               if (cc != vcol/ccol) {
+                                       ret = -EINVAL;
+                                       break;
+                               }
                        } else
                                cc = vcol/ccol;
                }
 
-               if (clin > 32)
-                       return -EINVAL;
+               if (clin > 32) {
+                       ret =  -EINVAL;
+                       break;
+               }
                    
                for (i = 0; i < MAX_NR_CONSOLES; i++) {
                        if (!vc_cons[i].d)
@@ -956,19 +1015,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                        vc_resize(vc_cons[i].d, cc, ll);
                        release_console_sem();
                }
-               return 0;
+               break;
        }
 
        case PIO_FONT: {
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
                op.op = KD_FONT_OP_SET;
                op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */
                op.width = 8;
                op.height = 0;
                op.charcount = 256;
                op.data = up;
-               return con_font_op(vc_cons[fg_console].d, &op);
+               ret = con_font_op(vc_cons[fg_console].d, &op);
+               break;
        }
 
        case GIO_FONT: {
@@ -978,100 +1038,124 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
                op.height = 32;
                op.charcount = 256;
                op.data = up;
-               return con_font_op(vc_cons[fg_console].d, &op);
+               ret = con_font_op(vc_cons[fg_console].d, &op);
+               break;
        }
 
        case PIO_CMAP:
                 if (!perm)
-                       return -EPERM;
-                return con_set_cmap(up);
+                       ret = -EPERM;
+               else
+                       ret = con_set_cmap(up);
+               break;
 
        case GIO_CMAP:
-                return con_get_cmap(up);
+                ret = con_get_cmap(up);
+               break;
 
        case PIO_FONTX:
        case GIO_FONTX:
-               return do_fontx_ioctl(cmd, up, perm, &op);
+               ret = do_fontx_ioctl(cmd, up, perm, &op);
+               break;
 
        case PIO_FONTRESET:
        {
                if (!perm)
-                       return -EPERM;
+                       goto eperm;
 
 #ifdef BROKEN_GRAPHICS_PROGRAMS
                /* With BROKEN_GRAPHICS_PROGRAMS defined, the default
                   font is not saved. */
-               return -ENOSYS;
+               ret = -ENOSYS;
+               break;
 #else
                {
                op.op = KD_FONT_OP_SET_DEFAULT;
                op.data = NULL;
-               i = con_font_op(vc_cons[fg_console].d, &op);
-               if (i)
-                       return i;
+               ret = con_font_op(vc_cons[fg_console].d, &op);
+               if (ret)
+                       break;
                con_set_default_unimap(vc_cons[fg_console].d);
-               return 0;
+               break;
                }
 #endif
        }
 
        case KDFONTOP: {
-               if (copy_from_user(&op, up, sizeof(op)))
-                       return -EFAULT;
+               if (copy_from_user(&op, up, sizeof(op))) {
+                       ret = -EFAULT;
+                       break;
+               }
                if (!perm && op.op != KD_FONT_OP_GET)
-                       return -EPERM;
-               i = con_font_op(vc, &op);
-               if (i) return i;
+                       goto eperm;
+               ret = con_font_op(vc, &op);
+               if (ret)
+                       break;
                if (copy_to_user(up, &op, sizeof(op)))
-                       return -EFAULT;
-               return 0;
+                       ret = -EFAULT;
+               break;
        }
 
        case PIO_SCRNMAP:
                if (!perm)
-                       return -EPERM;
-               return con_set_trans_old(up);
+                       ret = -EPERM;
+               else
+                       ret = con_set_trans_old(up);
+               break;
 
        case GIO_SCRNMAP:
-               return con_get_trans_old(up);
+               ret = con_get_trans_old(up);
+               break;
 
        case PIO_UNISCRNMAP:
                if (!perm)
-                       return -EPERM;
-               return con_set_trans_new(up);
+                       ret = -EPERM;
+               else
+                       ret = con_set_trans_new(up);
+               break;
 
        case GIO_UNISCRNMAP:
-               return con_get_trans_new(up);
+               ret = con_get_trans_new(up);
+               break;
 
        case PIO_UNIMAPCLR:
              { struct unimapinit ui;
                if (!perm)
-                       return -EPERM;
-               i = copy_from_user(&ui, up, sizeof(struct unimapinit));
-               if (i) return -EFAULT;
-               con_clear_unimap(vc, &ui);
-               return 0;
+                       goto eperm;
+               ret = copy_from_user(&ui, up, sizeof(struct unimapinit));
+               if (!ret)
+                       con_clear_unimap(vc, &ui);
+               break;
              }
 
        case PIO_UNIMAP:
        case GIO_UNIMAP:
-               return do_unimap_ioctl(cmd, up, perm, vc);
+               ret = do_unimap_ioctl(cmd, up, perm, vc);
+               break;
 
        case VT_LOCKSWITCH:
                if (!capable(CAP_SYS_TTY_CONFIG))
-                  return -EPERM;
+                       goto eperm;
                vt_dont_switch = 1;
-               return 0;
+               break;
        case VT_UNLOCKSWITCH:
                if (!capable(CAP_SYS_TTY_CONFIG))
-                  return -EPERM;
+                       goto eperm;
                vt_dont_switch = 0;
-               return 0;
+               break;
        case VT_GETHIFONTMASK:
-               return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg);
+               ret = put_user(vc->vc_hi_font_mask,
+                                       (unsigned short __user *)arg);
+               break;
        default:
-               return -ENOIOCTLCMD;
+               ret = -ENOIOCTLCMD;
        }
+out:
+       unlock_kernel();
+       return ret;
+eperm:
+       ret = -EPERM;
+       goto out;
 }
 
 /*
index c159ae64eeb2cf791d8aa36036e0480b8d3cf487..5f076aef74fa96d95959f84931d6ac42974d8b01 100644 (file)
@@ -69,6 +69,15 @@ config CPU_FREQ_DEFAULT_GOV_PERFORMANCE
          the frequency statically to the highest frequency supported by
          the CPU.
 
+config CPU_FREQ_DEFAULT_GOV_POWERSAVE
+       bool "powersave"
+       depends on EMBEDDED
+       select CPU_FREQ_GOV_POWERSAVE
+       help
+         Use the CPUFreq governor 'powersave' as default. This sets
+         the frequency statically to the lowest frequency supported by
+         the CPU.
+
 config CPU_FREQ_DEFAULT_GOV_USERSPACE
        bool "userspace"
        select CPU_FREQ_GOV_USERSPACE
index 35a26a3e5f6804d977563ddd5ef03db8e8ea5f5c..7fce038fa57e6413892679f6c8ca7256aeedafa3 100644 (file)
@@ -118,9 +118,11 @@ static void handle_update(struct work_struct *work);
 static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
 static struct srcu_notifier_head cpufreq_transition_notifier_list;
 
+static bool init_cpufreq_transition_notifier_list_called;
 static int __init init_cpufreq_transition_notifier_list(void)
 {
        srcu_init_notifier_head(&cpufreq_transition_notifier_list);
+       init_cpufreq_transition_notifier_list_called = true;
        return 0;
 }
 pure_initcall(init_cpufreq_transition_notifier_list);
@@ -216,7 +218,7 @@ static void cpufreq_debug_disable_ratelimit(void)
 }
 
 void cpufreq_debug_printk(unsigned int type, const char *prefix,
-                                                       const char *fmt, ...)
+                       const char *fmt, ...)
 {
        char s[256];
        va_list args;
@@ -378,7 +380,7 @@ static struct cpufreq_governor *__find_governor(const char *str_governor)
 /**
  * cpufreq_parse_governor - parse a governor string
  */
-static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
+static int cpufreq_parse_governor(char *str_governor, unsigned int *policy,
                                struct cpufreq_governor **governor)
 {
        int err = -EINVAL;
@@ -446,7 +448,7 @@ extern struct sysdev_class cpu_sysdev_class;
 
 #define show_one(file_name, object)                    \
 static ssize_t show_##file_name                                \
-(struct cpufreq_policy * policy, char *buf)            \
+(struct cpufreq_policy *policy, char *buf)             \
 {                                                      \
        return sprintf (buf, "%u\n", policy->object);   \
 }
@@ -465,7 +467,7 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
  */
 #define store_one(file_name, object)                   \
 static ssize_t store_##file_name                                       \
-(struct cpufreq_policy * policy, const char *buf, size_t count)                \
+(struct cpufreq_policy *policy, const char *buf, size_t count)         \
 {                                                                      \
        unsigned int ret = -EINVAL;                                     \
        struct cpufreq_policy new_policy;                               \
@@ -490,8 +492,8 @@ store_one(scaling_max_freq,max);
 /**
  * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
  */
-static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
-                                                       char *buf)
+static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
+                                       char *buf)
 {
        unsigned int cur_freq = __cpufreq_get(policy->cpu);
        if (!cur_freq)
@@ -503,8 +505,7 @@ static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
 /**
  * show_scaling_governor - show the current policy for the specified CPU
  */
-static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
-                                                       char *buf)
+static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
 {
        if(policy->policy == CPUFREQ_POLICY_POWERSAVE)
                return sprintf(buf, "powersave\n");
@@ -519,8 +520,8 @@ static ssize_t show_scaling_governor (struct cpufreq_policy * policy,
 /**
  * store_scaling_governor - store policy for the specified CPU
  */
-static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
-                                      const char *buf, size_t count)
+static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
+                                       const char *buf, size_t count)
 {
        unsigned int ret = -EINVAL;
        char    str_governor[16];
@@ -554,7 +555,7 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
 /**
  * show_scaling_driver - show the cpufreq driver currently loaded
  */
-static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
+static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
 {
        return scnprintf(buf, CPUFREQ_NAME_LEN, "%s\n", cpufreq_driver->name);
 }
@@ -562,8 +563,8 @@ static ssize_t show_scaling_driver (struct cpufreq_policy * policy, char *buf)
 /**
  * show_scaling_available_governors - show the available CPUfreq governors
  */
-static ssize_t show_scaling_available_governors (struct cpufreq_policy *policy,
-                               char *buf)
+static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
+                                               char *buf)
 {
        ssize_t i = 0;
        struct cpufreq_governor *t;
@@ -582,15 +583,13 @@ out:
        i += sprintf(&buf[i], "\n");
        return i;
 }
-/**
- * show_affected_cpus - show the CPUs affected by each transition
- */
-static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
+
+static ssize_t show_cpus(cpumask_t mask, char *buf)
 {
        ssize_t i = 0;
        unsigned int cpu;
 
-       for_each_cpu_mask(cpu, policy->cpus) {
+       for_each_cpu_mask(cpu, mask) {
                if (i)
                        i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
                i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -601,8 +600,27 @@ static ssize_t show_affected_cpus (struct cpufreq_policy * policy, char *buf)
        return i;
 }
 
+/**
+ * show_related_cpus - show the CPUs affected by each transition even if
+ * hw coordination is in use
+ */
+static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
+{
+       if (cpus_empty(policy->related_cpus))
+               return show_cpus(policy->cpus, buf);
+       return show_cpus(policy->related_cpus, buf);
+}
+
+/**
+ * show_affected_cpus - show the CPUs affected by each transition
+ */
+static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
+{
+       return show_cpus(policy->cpus, buf);
+}
+
 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
-               const char *buf, size_t count)
+                                       const char *buf, size_t count)
 {
        unsigned int freq = 0;
        unsigned int ret;
@@ -645,18 +663,20 @@ define_one_ro(cpuinfo_max_freq);
 define_one_ro(scaling_available_governors);
 define_one_ro(scaling_driver);
 define_one_ro(scaling_cur_freq);
+define_one_ro(related_cpus);
 define_one_ro(affected_cpus);
 define_one_rw(scaling_min_freq);
 define_one_rw(scaling_max_freq);
 define_one_rw(scaling_governor);
 define_one_rw(scaling_setspeed);
 
-static struct attribute * default_attrs[] = {
+static struct attribute *default_attrs[] = {
        &cpuinfo_min_freq.attr,
        &cpuinfo_max_freq.attr,
        &scaling_min_freq.attr,
        &scaling_max_freq.attr,
        &affected_cpus.attr,
+       &related_cpus.attr,
        &scaling_governor.attr,
        &scaling_driver.attr,
        &scaling_available_governors.attr,
@@ -667,10 +687,10 @@ static struct attribute * default_attrs[] = {
 #define to_policy(k) container_of(k,struct cpufreq_policy,kobj)
 #define to_attr(a) container_of(a,struct freq_attr,attr)
 
-static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
+static ssize_t show(struct kobject *kobj, struct attribute *attr ,char *buf)
 {
-       struct cpufreq_policy * policy = to_policy(kobj);
-       struct freq_attr * fattr = to_attr(attr);
+       struct cpufreq_policy *policy = to_policy(kobj);
+       struct freq_attr *fattr = to_attr(attr);
        ssize_t ret = -EINVAL;
        policy = cpufreq_cpu_get(policy->cpu);
        if (!policy)
@@ -691,11 +711,11 @@ no_policy:
        return ret;
 }
 
-static ssize_t store(struct kobject * kobj, struct attribute * attr,
-                    const char * buf, size_t count)
+static ssize_t store(struct kobject *kobj, struct attribute *attr,
+                    const char *buf, size_t count)
 {
-       struct cpufreq_policy * policy = to_policy(kobj);
-       struct freq_attr * fattr = to_attr(attr);
+       struct cpufreq_policy *policy = to_policy(kobj);
+       struct freq_attr *fattr = to_attr(attr);
        ssize_t ret = -EINVAL;
        policy = cpufreq_cpu_get(policy->cpu);
        if (!policy)
@@ -716,9 +736,9 @@ no_policy:
        return ret;
 }
 
-static void cpufreq_sysfs_release(struct kobject * kobj)
+static void cpufreq_sysfs_release(struct kobject *kobj)
 {
-       struct cpufreq_policy * policy = to_policy(kobj);
+       struct cpufreq_policy *policy = to_policy(kobj);
        dprintk("last reference is dropped\n");
        complete(&policy->kobj_unregister);
 }
@@ -740,7 +760,7 @@ static struct kobj_type ktype_cpufreq = {
  *
  * Adds the cpufreq interface for a CPU device.
  */
-static int cpufreq_add_dev (struct sys_device * sys_dev)
+static int cpufreq_add_dev(struct sys_device *sys_dev)
 {
        unsigned int cpu = sys_dev->id;
        int ret = 0;
@@ -800,7 +820,6 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
        ret = cpufreq_driver->init(policy);
        if (ret) {
                dprintk("initialization failed\n");
-               unlock_policy_rwsem_write(cpu);
                goto err_out;
        }
        policy->user_policy.min = policy->cpuinfo.min_freq;
@@ -823,7 +842,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
                /* check for existing affected CPUs.  They may not be aware
                 * of it due to CPU Hotplug.
                 */
-               managed_policy = cpufreq_cpu_get(j);
+               managed_policy = cpufreq_cpu_get(j);            // FIXME: Where is this released?  What about error paths?
                if (unlikely(managed_policy)) {
 
                        /* Set proper policy_cpu */
@@ -842,14 +861,11 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
                        ret = sysfs_create_link(&sys_dev->kobj,
                                                &managed_policy->kobj,
                                                "cpufreq");
-                       if (ret) {
-                               unlock_policy_rwsem_write(cpu);
+                       if (ret)
                                goto err_out_driver_exit;
-                       }
 
                        cpufreq_debug_enable_ratelimit();
                        ret = 0;
-                       unlock_policy_rwsem_write(cpu);
                        goto err_out_driver_exit; /* call driver->exit() */
                }
        }
@@ -859,33 +875,26 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
        /* prepare interface data */
        ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj,
                                   "cpufreq");
-       if (ret) {
-               unlock_policy_rwsem_write(cpu);
+       if (ret)
                goto err_out_driver_exit;
-       }
+
        /* set up files for this cpu device */
        drv_attr = cpufreq_driver->attr;
        while ((drv_attr) && (*drv_attr)) {
                ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
-               if (ret) {
-                       unlock_policy_rwsem_write(cpu);
+               if (ret)
                        goto err_out_driver_exit;
-               }
                drv_attr++;
        }
-       if (cpufreq_driver->get){
+       if (cpufreq_driver->get) {
                ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
-               if (ret) {
-                       unlock_policy_rwsem_write(cpu);
+               if (ret)
                        goto err_out_driver_exit;
-               }
        }
-       if (cpufreq_driver->target){
+       if (cpufreq_driver->target) {
                ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
-               if (ret) {
-                       unlock_policy_rwsem_write(cpu);
+               if (ret)
                        goto err_out_driver_exit;
-               }
        }
 
        spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -907,10 +916,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
                cpu_sys_dev = get_cpu_sysdev(j);
                ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
                                        "cpufreq");
-               if (ret) {
-                       unlock_policy_rwsem_write(cpu);
+               if (ret)
                        goto err_out_unregister;
-               }
        }
 
        policy->governor = NULL; /* to assure that the starting sequence is
@@ -950,6 +957,7 @@ err_out_driver_exit:
                cpufreq_driver->exit(policy);
 
 err_out:
+       unlock_policy_rwsem_write(cpu);
        kfree(policy);
 
 nomem_out:
@@ -967,7 +975,7 @@ module_out:
  * Caller should already have policy_rwsem in write mode for this CPU.
  * This routine frees the rwsem before returning.
  */
-static int __cpufreq_remove_dev (struct sys_device * sys_dev)
+static int __cpufreq_remove_dev(struct sys_device *sys_dev)
 {
        unsigned int cpu = sys_dev->id;
        unsigned long flags;
@@ -1071,7 +1079,7 @@ static int __cpufreq_remove_dev (struct sys_device * sys_dev)
 }
 
 
-static int cpufreq_remove_dev (struct sys_device * sys_dev)
+static int cpufreq_remove_dev(struct sys_device *sys_dev)
 {
        unsigned int cpu = sys_dev->id;
        int retval;
@@ -1138,7 +1146,7 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
                cpufreq_cpu_put(policy);
        }
 
-       return (ret_freq);
+       return ret_freq;
 }
 EXPORT_SYMBOL(cpufreq_quick_get);
 
@@ -1149,7 +1157,7 @@ static unsigned int __cpufreq_get(unsigned int cpu)
        unsigned int ret_freq = 0;
 
        if (!cpufreq_driver->get)
-               return (ret_freq);
+               return ret_freq;
 
        ret_freq = cpufreq_driver->get(cpu);
 
@@ -1163,7 +1171,7 @@ static unsigned int __cpufreq_get(unsigned int cpu)
                }
        }
 
-       return (ret_freq);
+       return ret_freq;
 }
 
 /**
@@ -1190,7 +1198,7 @@ unsigned int cpufreq_get(unsigned int cpu)
 out_policy:
        cpufreq_cpu_put(policy);
 out:
-       return (ret_freq);
+       return ret_freq;
 }
 EXPORT_SYMBOL(cpufreq_get);
 
@@ -1199,7 +1207,7 @@ EXPORT_SYMBOL(cpufreq_get);
  *     cpufreq_suspend - let the low level driver prepare for suspend
  */
 
-static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
+static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
 {
        int cpu = sysdev->id;
        int ret = 0;
@@ -1221,22 +1229,18 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
                return -EINVAL;
 
        /* only handle each CPU group once */
-       if (unlikely(cpu_policy->cpu != cpu)) {
-               cpufreq_cpu_put(cpu_policy);
-               return 0;
-       }
+       if (unlikely(cpu_policy->cpu != cpu))
+               goto out;
 
        if (cpufreq_driver->suspend) {
                ret = cpufreq_driver->suspend(cpu_policy, pmsg);
                if (ret) {
                        printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
                                        "step on CPU %u\n", cpu_policy->cpu);
-                       cpufreq_cpu_put(cpu_policy);
-                       return ret;
+                       goto out;
                }
        }
 
-
        if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)
                goto out;
 
@@ -1270,7 +1274,7 @@ static int cpufreq_suspend(struct sys_device * sysdev, pm_message_t pmsg)
 
 out:
        cpufreq_cpu_put(cpu_policy);
-       return 0;
+       return ret;
 }
 
 /**
@@ -1281,7 +1285,7 @@ out:
  *     3.) schedule call cpufreq_update_policy() ASAP as interrupts are
  *         restored.
  */
-static int cpufreq_resume(struct sys_device * sysdev)
+static int cpufreq_resume(struct sys_device *sysdev)
 {
        int cpu = sysdev->id;
        int ret = 0;
@@ -1302,18 +1306,15 @@ static int cpufreq_resume(struct sys_device * sysdev)
                return -EINVAL;
 
        /* only handle each CPU group once */
-       if (unlikely(cpu_policy->cpu != cpu)) {
-               cpufreq_cpu_put(cpu_policy);
-               return 0;
-       }
+       if (unlikely(cpu_policy->cpu != cpu))
+               goto fail;
 
        if (cpufreq_driver->resume) {
                ret = cpufreq_driver->resume(cpu_policy);
                if (ret) {
                        printk(KERN_ERR "cpufreq: resume failed in ->resume "
                                        "step on CPU %u\n", cpu_policy->cpu);
-                       cpufreq_cpu_put(cpu_policy);
-                       return ret;
+                       goto fail;
                }
        }
 
@@ -1353,6 +1354,7 @@ static int cpufreq_resume(struct sys_device * sysdev)
 
 out:
        schedule_work(&cpu_policy->update);
+fail:
        cpufreq_cpu_put(cpu_policy);
        return ret;
 }
@@ -1386,6 +1388,8 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
 {
        int ret;
 
+       WARN_ON(!init_cpufreq_transition_notifier_list_called);
+
        switch (list) {
        case CPUFREQ_TRANSITION_NOTIFIER:
                ret = srcu_notifier_chain_register(
@@ -1848,7 +1852,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
                cpufreq_debug_enable_ratelimit();
        }
 
-       return (ret);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(cpufreq_register_driver);
 
index 13fe06b94b0a43c8e716c8e7625cecc5afc3b745..88d2f44fba480f7a66d00ed2fdeb571120926659 100644 (file)
@@ -35,12 +35,12 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy,
        return 0;
 }
 
-static struct cpufreq_governor cpufreq_gov_powersave = {
+struct cpufreq_governor cpufreq_gov_powersave = {
        .name           = "powersave",
        .governor       = cpufreq_governor_powersave,
        .owner          = THIS_MODULE,
 };
-
+EXPORT_SYMBOL(cpufreq_gov_powersave);
 
 static int __init cpufreq_gov_powersave_init(void)
 {
@@ -58,5 +58,9 @@ MODULE_AUTHOR("Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
 MODULE_LICENSE("GPL");
 
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE
+fs_initcall(cpufreq_gov_powersave_init);
+#else
 module_init(cpufreq_gov_powersave_init);
+#endif
 module_exit(cpufreq_gov_powersave_exit);
index 070421a5480e731116ed58845e5ac424cfaf0aba..ae70d63a8b26eabb2b02d8c2ea28721fcd934b2a 100644 (file)
@@ -114,7 +114,7 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
                                stat->freq_table[i]);
        }
        if (len >= PAGE_SIZE)
-               return len;
+               return PAGE_SIZE;
 
        len += snprintf(buf + len, PAGE_SIZE - len, "\n");
 
@@ -131,8 +131,12 @@ show_trans_table(struct cpufreq_policy *policy, char *buf)
                        len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
                                        stat->trans_table[i*stat->max_state+j]);
                }
+               if (len >= PAGE_SIZE)
+                       break;
                len += snprintf(buf + len, PAGE_SIZE - len, "\n");
        }
+       if (len >= PAGE_SIZE)
+               return PAGE_SIZE;
        return len;
 }
 CPUFREQ_STATDEVICE_ATTR(trans_table,0444,show_trans_table);
@@ -284,7 +288,7 @@ cpufreq_stat_notifier_trans (struct notifier_block *nb, unsigned long val,
        if (!stat)
                return 0;
 
-       old_index = freq_table_get_index(stat, freq->old);
+       old_index = stat->last_index;
        new_index = freq_table_get_index(stat, freq->new);
 
        cpufreq_stats_update(freq->cpu);
index 2b382990fe58772238eabd8e29b2485b95981778..6e6c3c4aea6b074d53c9824a1060bbfc38e88069 100644 (file)
@@ -67,7 +67,7 @@ config EDAC_E7XXX
          E7205, E7500, E7501 and E7505 server chipsets.
 
 config EDAC_E752X
-       tristate "Intel e752x (e7520, e7525, e7320)"
+       tristate "Intel e752x (e7520, e7525, e7320) and 3100"
        depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG
        help
          Support for error detection and correction on the Intel
index f22075410591a983fbb243fd42382448ac4d7640..2b95f1a3edfc318fe93ebf58b9bbd89cc8035599 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define AMD76X_REVISION        " Ver: 2.0.2 "  __DATE__
@@ -344,6 +345,9 @@ static struct pci_driver amd76x_driver = {
 
 static int __init amd76x_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        return pci_register_driver(&amd76x_driver);
 }
 
@@ -358,3 +362,6 @@ module_exit(amd76x_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
 MODULE_DESCRIPTION("MC support for AMD 76x memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
index 6eb434749cd5bad20ea376856c436b8f8587751b..c94a0eb492cb1eb2b01bfa5c8a5b47d7099c79a1 100644 (file)
@@ -29,6 +29,7 @@
 #define EDAC_MOD_STR   "e752x_edac"
 
 static int force_function_unhide;
+static int sysbus_parity = -1;
 
 static struct edac_pci_ctl_info *e752x_pci;
 
@@ -62,6 +63,14 @@ static struct edac_pci_ctl_info *e752x_pci;
 #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593
 #endif                         /* PCI_DEVICE_ID_INTEL_7320_1_ERR */
 
+#ifndef PCI_DEVICE_ID_INTEL_3100_0
+#define PCI_DEVICE_ID_INTEL_3100_0     0x35B0
+#endif                         /* PCI_DEVICE_ID_INTEL_3100_0 */
+
+#ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
+#define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1
+#endif                         /* PCI_DEVICE_ID_INTEL_3100_1_ERR */
+
 #define E752X_NR_CSROWS                8       /* number of csrows */
 
 /* E752X register addresses - device 0 function 0 */
@@ -152,6 +161,12 @@ static struct edac_pci_ctl_info *e752x_pci;
                                        /*     error syndrome register (16b) */
 #define E752X_DEVPRES1         0xF4    /* Device Present 1 register (8b) */
 
+/* 3100 IMCH specific register addresses - device 0 function 1 */
+#define I3100_NSI_FERR         0x48    /* NSI first error reg (32b) */
+#define I3100_NSI_NERR         0x4C    /* NSI next error reg (32b) */
+#define I3100_NSI_SMICMD       0x54    /* NSI SMI command register (32b) */
+#define I3100_NSI_EMASK                0x90    /* NSI error mask register (32b) */
+
 /* ICH5R register addresses - device 30 function 0 */
 #define ICH5R_PCI_STAT         0x06    /* PCI status register (16b) */
 #define ICH5R_PCI_2ND_STAT     0x1E    /* PCI status secondary reg (16b) */
@@ -160,7 +175,8 @@ static struct edac_pci_ctl_info *e752x_pci;
 enum e752x_chips {
        E7520 = 0,
        E7525 = 1,
-       E7320 = 2
+       E7320 = 2,
+       I3100 = 3
 };
 
 struct e752x_pvt {
@@ -185,8 +201,10 @@ struct e752x_dev_info {
 struct e752x_error_info {
        u32 ferr_global;
        u32 nerr_global;
-       u8 hi_ferr;
-       u8 hi_nerr;
+       u32 nsi_ferr;   /* 3100 only */
+       u32 nsi_nerr;   /* 3100 only */
+       u8 hi_ferr;     /* all but 3100 */
+       u8 hi_nerr;     /* all but 3100 */
        u16 sysbus_ferr;
        u16 sysbus_nerr;
        u8 buf_ferr;
@@ -215,6 +233,10 @@ static const struct e752x_dev_info e752x_devs[] = {
                .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
                .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
                .ctl_name = "E7320"},
+       [I3100] = {
+               .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
+               .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
+               .ctl_name = "3100"},
 };
 
 static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
@@ -402,7 +424,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
 static char *global_message[11] = {
        "PCI Express C1", "PCI Express C", "PCI Express B1",
        "PCI Express B", "PCI Express A1", "PCI Express A",
-       "DMA Controler", "HUB Interface", "System Bus",
+       "DMA Controler", "HUB or NS Interface", "System Bus",
        "DRAM Controler", "Internal Buffer"
 };
 
@@ -455,6 +477,63 @@ static inline void hub_error(int fatal, u8 errors, int *error_found,
                do_hub_error(fatal, errors);
 }
 
+#define NSI_FATAL_MASK         0x0c080081
+#define NSI_NON_FATAL_MASK     0x23a0ba64
+#define NSI_ERR_MASK           (NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
+
+static char *nsi_message[30] = {
+       "NSI Link Down",        /* NSI_FERR/NSI_NERR bit 0, fatal error */
+       "",                                             /* reserved */
+       "NSI Parity Error",                             /* bit 2, non-fatal */
+       "",                                             /* reserved */
+       "",                                             /* reserved */
+       "Correctable Error Message",                    /* bit 5, non-fatal */
+       "Non-Fatal Error Message",                      /* bit 6, non-fatal */
+       "Fatal Error Message",                          /* bit 7, fatal */
+       "",                                             /* reserved */
+       "Receiver Error",                               /* bit 9, non-fatal */
+       "",                                             /* reserved */
+       "Bad TLP",                                      /* bit 11, non-fatal */
+       "Bad DLLP",                                     /* bit 12, non-fatal */
+       "REPLAY_NUM Rollover",                          /* bit 13, non-fatal */
+       "",                                             /* reserved */
+       "Replay Timer Timeout",                         /* bit 15, non-fatal */
+       "",                                             /* reserved */
+       "",                                             /* reserved */
+       "",                                             /* reserved */
+       "Data Link Protocol Error",                     /* bit 19, fatal */
+       "",                                             /* reserved */
+       "Poisoned TLP",                                 /* bit 21, non-fatal */
+       "",                                             /* reserved */
+       "Completion Timeout",                           /* bit 23, non-fatal */
+       "Completer Abort",                              /* bit 24, non-fatal */
+       "Unexpected Completion",                        /* bit 25, non-fatal */
+       "Receiver Overflow",                            /* bit 26, fatal */
+       "Malformed TLP",                                /* bit 27, fatal */
+       "",                                             /* reserved */
+       "Unsupported Request"                           /* bit 29, non-fatal */
+};
+
+static void do_nsi_error(int fatal, u32 errors)
+{
+       int i;
+
+       for (i = 0; i < 30; i++) {
+               if (errors & (1 << i))
+                       printk(KERN_WARNING "%sError %s\n",
+                              fatal_message[fatal], nsi_message[i]);
+       }
+}
+
+static inline void nsi_error(int fatal, u32 errors, int *error_found,
+               int handle_error)
+{
+       *error_found = 1;
+
+       if (handle_error)
+               do_nsi_error(fatal, errors);
+}
+
 static char *membuf_message[4] = {
        "Internal PMWB to DRAM parity",
        "Internal PMWB to System Bus Parity",
@@ -546,6 +625,31 @@ static void e752x_check_hub_interface(struct e752x_error_info *info,
        }
 }
 
+static void e752x_check_ns_interface(struct e752x_error_info *info,
+                               int *error_found, int handle_error)
+{
+       u32 stat32;
+
+       stat32 = info->nsi_ferr;
+       if (stat32 & NSI_ERR_MASK) { /* Error, so process */
+               if (stat32 & NSI_FATAL_MASK)    /* check for fatal errors */
+                       nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+                                 handle_error);
+               if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
+                       nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+                                 handle_error);
+       }
+       stat32 = info->nsi_nerr;
+       if (stat32 & NSI_ERR_MASK) {
+               if (stat32 & NSI_FATAL_MASK)
+                       nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
+                                 handle_error);
+               if (stat32 & NSI_NON_FATAL_MASK)
+                       nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
+                                 handle_error);
+       }
+}
+
 static void e752x_check_sysbus(struct e752x_error_info *info,
                        int *error_found, int handle_error)
 {
@@ -653,7 +757,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
        pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
 
        if (info->ferr_global) {
-               pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr);
+               if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+                       pci_read_config_dword(dev, I3100_NSI_FERR,
+                                            &info->nsi_ferr);
+                       info->hi_ferr = 0;
+               } else {
+                       pci_read_config_byte(dev, E752X_HI_FERR,
+                                            &info->hi_ferr);
+                       info->nsi_ferr = 0;
+               }
                pci_read_config_word(dev, E752X_SYSBUS_FERR,
                                &info->sysbus_ferr);
                pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
@@ -669,10 +781,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
                pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
                                &info->dram_retr_add);
 
+               /* ignore the reserved bits just in case */
                if (info->hi_ferr & 0x7f)
                        pci_write_config_byte(dev, E752X_HI_FERR,
                                        info->hi_ferr);
 
+               if (info->nsi_ferr & NSI_ERR_MASK)
+                       pci_write_config_dword(dev, I3100_NSI_FERR,
+                                       info->nsi_ferr);
+
                if (info->sysbus_ferr)
                        pci_write_config_word(dev, E752X_SYSBUS_FERR,
                                        info->sysbus_ferr);
@@ -692,7 +809,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
        pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
 
        if (info->nerr_global) {
-               pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr);
+               if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+                       pci_read_config_dword(dev, I3100_NSI_NERR,
+                                            &info->nsi_nerr);
+                       info->hi_nerr = 0;
+               } else {
+                       pci_read_config_byte(dev, E752X_HI_NERR,
+                                            &info->hi_nerr);
+                       info->nsi_nerr = 0;
+               }
                pci_read_config_word(dev, E752X_SYSBUS_NERR,
                                &info->sysbus_nerr);
                pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
@@ -706,6 +831,10 @@ static void e752x_get_error_info(struct mem_ctl_info *mci,
                        pci_write_config_byte(dev, E752X_HI_NERR,
                                        info->hi_nerr);
 
+               if (info->nsi_nerr & NSI_ERR_MASK)
+                       pci_write_config_dword(dev, I3100_NSI_NERR,
+                                       info->nsi_nerr);
+
                if (info->sysbus_nerr)
                        pci_write_config_word(dev, E752X_SYSBUS_NERR,
                                        info->sysbus_nerr);
@@ -750,6 +879,7 @@ static int e752x_process_error_info(struct mem_ctl_info *mci,
                global_error(0, stat32, &error_found, handle_errors);
 
        e752x_check_hub_interface(info, &error_found, handle_errors);
+       e752x_check_ns_interface(info, &error_found, handle_errors);
        e752x_check_sysbus(info, &error_found, handle_errors);
        e752x_check_membuf(info, &error_found, handle_errors);
        e752x_check_dram(mci, info, &error_found, handle_errors);
@@ -920,15 +1050,53 @@ fail:
        return 1;
 }
 
+/* Setup system bus parity mask register.
+ * Sysbus parity supported on:
+ *   e7320/e7520/e7525 + Xeon
+ *   i3100 + Xeon/Celeron
+ * Sysbus parity not supported on:
+ *   i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo
+ */
+static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
+{
+       char *cpu_id = cpu_data(0).x86_model_id;
+       struct pci_dev *dev = pvt->dev_d0f1;
+       int enable = 1;
+
+       /* Allow module paramter override, else see if CPU supports parity */
+       if (sysbus_parity != -1) {
+               enable = sysbus_parity;
+       } else if (cpu_id[0] &&
+                  ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) ||
+                   (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) ||
+                   (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) {
+               e752x_printk(KERN_INFO, "System Bus Parity not "
+                            "supported by CPU, disabling\n");
+               enable = 0;
+       }
+
+       if (enable)
+               pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
+       else
+               pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
+}
+
 static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
 {
        struct pci_dev *dev;
 
        dev = pvt->dev_d0f1;
        /* Turn off error disable & SMI in case the BIOS turned it on */
-       pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
-       pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
-       pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00);
+       if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
+               pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
+               pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
+       } else {
+               pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
+               pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
+       }
+
+       e752x_init_sysbus_parity_mask(pvt);
+
        pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
        pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
        pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
@@ -949,16 +1117,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
        debugf0("%s(): mci\n", __func__);
        debugf0("Starting Probe1\n");
 
-       /* make sure error reporting method is sane */
-       switch (edac_op_state) {
-       case EDAC_OPSTATE_POLL:
-       case EDAC_OPSTATE_NMI:
-               break;
-       default:
-               edac_op_state = EDAC_OPSTATE_POLL;
-               break;
-       }
-
        /* check to see if device 0 function 1 is enabled; if it isn't, we
         * assume the BIOS has reserved it for a reason and is expecting
         * exclusive access, we take care not to violate that assumption and
@@ -985,8 +1143,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
 
        debugf3("%s(): init mci\n", __func__);
        mci->mtype_cap = MEM_FLAG_RDDR;
-       mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED |
-               EDAC_FLAG_S4ECD4ED;
+       /* 3100 IMCH supports SECDEC only */
+       mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
+               (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
        /* FIXME - what if different memory types are in different csrows? */
        mci->mod_name = EDAC_MOD_STR;
        mci->mod_ver = E752X_REVISION;
@@ -1018,7 +1177,10 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
        e752x_init_csrows(mci, pdev, ddrcsr);
        e752x_init_mem_map_table(pdev, pvt);
 
-       mci->edac_cap |= EDAC_FLAG_NONE;
+       if (dev_idx == I3100)
+               mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
+       else
+               mci->edac_cap |= EDAC_FLAG_NONE;
        debugf3("%s(): tolm, remapbase, remaplimit\n", __func__);
 
        /* load the top of low memory, remap base, and remap limit vars */
@@ -1109,6 +1271,9 @@ static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
        {
         PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
         E7320},
+       {
+        PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+        I3100},
        {
         0,
         }                      /* 0 terminated list. */
@@ -1128,6 +1293,10 @@ static int __init e752x_init(void)
        int pci_rc;
 
        debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        pci_rc = pci_register_driver(&e752x_driver);
        return (pci_rc < 0) ? pci_rc : 0;
 }
@@ -1143,10 +1312,15 @@ module_exit(e752x_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
-MODULE_DESCRIPTION("MC support for Intel e752x memory controllers");
+MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
 
 module_param(force_function_unhide, int, 0444);
 MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
                 " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access");
+
 module_param(edac_op_state, int, 0444);
 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
+module_param(sysbus_parity, int, 0444);
+MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
+               " 1=enable system bus parity checking, default=auto-detect");
index 96ecc492664122cb5025d9996353077642310957..c7d11cc4e21a5d608c96695f64cb29d78fb54f15 100644 (file)
@@ -414,16 +414,6 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
 
        debugf0("%s(): mci\n", __func__);
 
-       /* make sure error reporting method is sane */
-       switch (edac_op_state) {
-       case EDAC_OPSTATE_POLL:
-       case EDAC_OPSTATE_NMI:
-               break;
-       default:
-               edac_op_state = EDAC_OPSTATE_POLL;
-               break;
-       }
-
        pci_read_config_dword(pdev, E7XXX_DRC, &drc);
 
        drc_chan = dual_channel_active(drc, dev_idx);
@@ -565,6 +555,9 @@ static struct pci_driver e7xxx_driver = {
 
 static int __init e7xxx_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        return pci_register_driver(&e7xxx_driver);
 }
 
index b9552bc03dead83ee02788de4c57979f038d54ad..63372fa7ecfe63ec6de319e4177a141c45bb508c 100644 (file)
@@ -36,7 +36,7 @@
  * is protected by the 'device_ctls_mutex' lock
  */
 static DEFINE_MUTEX(device_ctls_mutex);
-static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list);
+static LIST_HEAD(edac_device_list);
 
 #ifdef CONFIG_EDAC_DEBUG
 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
@@ -375,37 +375,6 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info
        wait_for_completion(&edac_device->removal_complete);
 }
 
-/**
- * edac_device_find
- *     Search for a edac_device_ctl_info structure whose index is 'idx'.
- *
- * If found, return a pointer to the structure.
- * Else return NULL.
- *
- * Caller must hold device_ctls_mutex.
- */
-struct edac_device_ctl_info *edac_device_find(int idx)
-{
-       struct list_head *item;
-       struct edac_device_ctl_info *edac_dev;
-
-       /* Iterate over list, looking for exact match of ID */
-       list_for_each(item, &edac_device_list) {
-               edac_dev = list_entry(item, struct edac_device_ctl_info, link);
-
-               if (edac_dev->dev_idx >= idx) {
-                       if (edac_dev->dev_idx == idx)
-                               return edac_dev;
-
-                       /* not on list, so terminate early */
-                       break;
-               }
-       }
-
-       return NULL;
-}
-EXPORT_SYMBOL_GPL(edac_device_find);
-
 /*
  * edac_device_workq_function
  *     performs the operation scheduled by a workq request
index 063a1bffe38b026ba7005ee0b105a4e956baf6e9..a4cf1645f588c998bbebb4c812bd567a229cc0e2 100644 (file)
@@ -36,7 +36,7 @@
 
 /* lock to memory controller's control array */
 static DEFINE_MUTEX(mem_ctls_mutex);
-static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices);
+static LIST_HEAD(mc_devices);
 
 #ifdef CONFIG_EDAC_DEBUG
 
@@ -886,24 +886,3 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci,
        mci->csrows[csrow].channels[channel].ce_count++;
 }
 EXPORT_SYMBOL(edac_mc_handle_fbd_ce);
-
-/*
- * Iterate over all MC instances and check for ECC, et al, errors
- */
-void edac_check_mc_devices(void)
-{
-       struct list_head *item;
-       struct mem_ctl_info *mci;
-
-       debugf3("%s()\n", __func__);
-       mutex_lock(&mem_ctls_mutex);
-
-       list_for_each(item, &mc_devices) {
-               mci = list_entry(item, struct mem_ctl_info, link);
-
-               if (mci->edac_check != NULL)
-                       mci->edac_check(mci);
-       }
-
-       mutex_unlock(&mem_ctls_mutex);
-}
index cbc419c8ebc1b8373c3c5661ce9a449cb3517107..233d4798c3aa2ecf18f3875d45a138b8bc2f2fe8 100644 (file)
@@ -27,7 +27,6 @@ extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci);
 extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci);
 extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci);
 extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci);
-extern void edac_check_mc_devices(void);
 extern int edac_get_log_ue(void);
 extern int edac_get_log_ce(void);
 extern int edac_get_panic_on_ue(void);
index 32be43576a8ee720876604a6b0c11c9081b2c0c7..9b24340b52e1eb3d30666181e668abf83ebd78f2 100644 (file)
@@ -29,7 +29,7 @@
 #include "edac_module.h"
 
 static DEFINE_MUTEX(edac_pci_ctls_mutex);
-static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list);
+static LIST_HEAD(edac_pci_list);
 
 /*
  * edac_pci_alloc_ctl_info
@@ -189,6 +189,9 @@ static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci)
        wait_for_completion(&pci->complete);
 }
 
+#if 0
+/* Older code, but might use in the future */
+
 /*
  * edac_pci_find()
  *     Search for an edac_pci_ctl_info structure whose index is 'idx'
@@ -219,6 +222,7 @@ struct edac_pci_ctl_info *edac_pci_find(int idx)
        return NULL;
 }
 EXPORT_SYMBOL_GPL(edac_pci_find);
+#endif
 
 /*
  * edac_pci_workq_function()
@@ -422,7 +426,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device);
  *
  *     a Generic parity check API
  */
-void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
+static void edac_pci_generic_check(struct edac_pci_ctl_info *pci)
 {
        debugf4("%s()\n", __func__);
        edac_pci_do_parity_check();
index 71c3195d3704428fc16a3f69848d739f0a8d723e..2c1fa1bb6df2fe87dbc96b3c511c24e509c43cf4 100644 (file)
@@ -37,17 +37,17 @@ int edac_pci_get_check_errors(void)
        return check_pci_errors;
 }
 
-int edac_pci_get_log_pe(void)
+static int edac_pci_get_log_pe(void)
 {
        return edac_pci_log_pe;
 }
 
-int edac_pci_get_log_npe(void)
+static int edac_pci_get_log_npe(void)
 {
        return edac_pci_log_npe;
 }
 
-int edac_pci_get_panic_on_pe(void)
+static int edac_pci_get_panic_on_pe(void)
 {
        return edac_pci_panic_on_pe;
 }
@@ -197,7 +197,8 @@ error_out:
  *
  *     unregister the kobj for the EDAC PCI instance
  */
-void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci)
+static void edac_pci_unregister_sysfs_instance_kobj(
+                       struct edac_pci_ctl_info *pci)
 {
        debugf0("%s()\n", __func__);
 
@@ -337,7 +338,7 @@ static struct kobj_type ktype_edac_pci_main_kobj = {
  *     setup the sysfs for EDAC PCI attributes
  *     assumes edac_class has already been initialized
  */
-int edac_pci_main_kobj_setup(void)
+static int edac_pci_main_kobj_setup(void)
 {
        int err;
        struct sysdev_class *edac_class;
index 5d4292811c146285f45acc0f47ebe0618d210127..6c9a0f2a593cd89fbc998182798bf1e21894d5ea 100644 (file)
@@ -326,15 +326,6 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
                return -ENODEV;
        }
 
-       switch (edac_op_state) {
-       case EDAC_OPSTATE_POLL:
-       case EDAC_OPSTATE_NMI:
-               break;
-       default:
-               edac_op_state = EDAC_OPSTATE_POLL;
-               break;
-       }
-
        c0dra[0] = readb(window + I3000_C0DRA + 0);     /* ranks 0,1 */
        c0dra[1] = readb(window + I3000_C0DRA + 1);     /* ranks 2,3 */
        c1dra[0] = readb(window + I3000_C1DRA + 0);     /* ranks 0,1 */
@@ -503,6 +494,10 @@ static int __init i3000_init(void)
        int pci_rc;
 
        debugf3("MC: %s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        pci_rc = pci_register_driver(&i3000_driver);
        if (pci_rc < 0)
                goto fail0;
index 5a852017c17a70dc87d1bd16c7e83587b8b7a1c7..4a16b5b61cfbdc702530e735af2bbb7a0eac1d2b 100644 (file)
@@ -1286,16 +1286,6 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
        if (PCI_FUNC(pdev->devfn) != 0)
                return -ENODEV;
 
-       /* make sure error reporting method is sane */
-       switch (edac_op_state) {
-       case EDAC_OPSTATE_POLL:
-       case EDAC_OPSTATE_NMI:
-               break;
-       default:
-               edac_op_state = EDAC_OPSTATE_POLL;
-               break;
-       }
-
        /* Ask the devices for the number of CSROWS and CHANNELS so
         * that we can calculate the memory resources, etc
         *
@@ -1478,6 +1468,9 @@ static int __init i5000_init(void)
 
        debugf2("MC: " __FILE__ ": %s()\n", __func__);
 
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        pci_rc = pci_register_driver(&i5000_driver);
 
        return (pci_rc < 0) ? pci_rc : 0;
@@ -1501,5 +1494,6 @@ MODULE_AUTHOR
     ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>");
 MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - "
                I5000_REVISION);
+
 module_param(edac_op_state, int, 0444);
 MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
index 83bfe37c4bbbbabe3ec051e34aa7a6149257b203..c5305e3ee4346932b44cc73074cb6718bd3a38ad 100644 (file)
@@ -29,6 +29,7 @@
 
 #include <linux/slab.h>
 
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define I82443_REVISION        "0.1"
@@ -386,6 +387,9 @@ static struct pci_driver i82443bxgx_edacmc_driver = {
 
 static int __init i82443bxgx_edacmc_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        return pci_register_driver(&i82443bxgx_edacmc_driver);
 }
 
@@ -400,3 +404,6 @@ module_exit(i82443bxgx_edacmc_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD");
 MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
index f5ecd2c4d813eb9439bcffb6a69addbfd7e724ff..c0088ba9672b754429d0bcd8bea19329f36c4064 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define  I82860_REVISION " Ver: 2.0.2 " __DATE__
@@ -294,6 +295,9 @@ static int __init i82860_init(void)
 
        debugf3("%s()\n", __func__);
 
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        if ((pci_rc = pci_register_driver(&i82860_driver)) < 0)
                goto fail0;
 
@@ -345,3 +349,6 @@ MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) "
                "Ben Woodard <woodard@redhat.com>");
 MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
index 031abadc439a37c407d213fd1091b8e9a03b745e..e43bdc43a1bf09732a7f9037b76852968c24c4f5 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define I82875P_REVISION       " Ver: 2.0.2 " __DATE__
@@ -393,6 +394,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
        struct i82875p_error_info discard;
 
        debugf0("%s()\n", __func__);
+
        ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL);
 
        if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window))
@@ -532,6 +534,10 @@ static int __init i82875p_init(void)
        int pci_rc;
 
        debugf3("%s()\n", __func__);
+
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        pci_rc = pci_register_driver(&i82875p_driver);
 
        if (pci_rc < 0)
@@ -586,3 +592,6 @@ module_exit(i82875p_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh");
 MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
index 0ee888456932e4f0f95e59ab135b02e80d6f23ca..2eed3ea2cf621303320aec118f0ed3bcfcd9764f 100644 (file)
@@ -14,7 +14,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
-
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define I82975X_REVISION       " Ver: 1.0.0 " __DATE__
@@ -611,6 +611,9 @@ static int __init i82975x_init(void)
 
        debugf3("%s()\n", __func__);
 
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        pci_rc = pci_register_driver(&i82975x_driver);
        if (pci_rc < 0)
                goto fail0;
@@ -664,3 +667,6 @@ module_exit(i82975x_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>");
 MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
index 90320917be2875a6a0fc77361fd59e254f7670f6..8e6b91bd2e99994525cba93fbdaac6750ec6da23 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define MODULE_NAME "pasemi_edac"
@@ -284,6 +285,9 @@ static struct pci_driver pasemi_edac_driver = {
 
 static int __init pasemi_edac_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        return pci_register_driver(&pasemi_edac_driver);
 }
 
@@ -298,3 +302,6 @@ module_exit(pasemi_edac_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
 MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller");
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
+
index e25f712f2dc3b8479e224d6bae80d9765304166b..9900675e9598c25006dd696f6f4d20af3694a223 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/slab.h>
+#include <linux/edac.h>
 #include "edac_core.h"
 
 #define R82600_REVISION        " Ver: 2.0.2 " __DATE__
@@ -393,6 +394,9 @@ static struct pci_driver r82600_driver = {
 
 static int __init r82600_init(void)
 {
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
        return pci_register_driver(&r82600_driver);
 }
 
@@ -412,3 +416,6 @@ MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers");
 module_param(disable_hardware_scrub, bool, 0644);
 MODULE_PARM_DESC(disable_hardware_scrub,
                 "If set, disable the chipset's automatic scrub for CEs");
+
+module_param(edac_op_state, int, 0444);
+MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
index 40ffd767647d623baa1259618b8912ef8ecc863d..dc2cec6127d1816982558de72a2ee9e4c1b884a5 100644 (file)
@@ -17,6 +17,15 @@ config EDD
           obscure configurations. Most disk controller BIOS vendors do
           not yet implement this feature.
 
+config EDD_OFF
+       bool "Sets default behavior for EDD detection to off"
+       depends on EDD
+       default n
+       help
+         Say Y if you want EDD disabled by default, even though it is compiled into the
+         kernel. Say N if you want EDD enabled by default. EDD can be dynamically set
+         using the kernel parameter 'edd={on|skipmbr|off}'.
+
 config EFI_VARS
        tristate "EFI Variable Support via sysfs"
        depends on EFI
index f235940719e71363b0a5f273ac84e9bc57093c37..25918f7dfd0fe73884ff86101ed90c04346e00cb 100644 (file)
@@ -63,7 +63,7 @@ static void smi_data_buf_free(void)
                return;
 
        dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
-               __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
+               __func__, smi_data_buf_phys_addr, smi_data_buf_size);
 
        dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf,
                          smi_data_buf_handle);
@@ -92,7 +92,7 @@ static int smi_data_buf_realloc(unsigned long size)
        if (!buf) {
                dev_dbg(&dcdbas_pdev->dev,
                        "%s: failed to allocate memory size %lu\n",
-                       __FUNCTION__, size);
+                       __func__, size);
                return -ENOMEM;
        }
        /* memory zeroed by dma_alloc_coherent */
@@ -110,7 +110,7 @@ static int smi_data_buf_realloc(unsigned long size)
        smi_data_buf_size = size;
 
        dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n",
-               __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size);
+               __func__, smi_data_buf_phys_addr, smi_data_buf_size);
 
        return 0;
 }
@@ -258,7 +258,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
 
        if (smi_cmd->magic != SMI_CMD_MAGIC) {
                dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n",
-                        __FUNCTION__);
+                        __func__);
                return -EBADR;
        }
 
@@ -267,7 +267,7 @@ static int smi_request(struct smi_cmd *smi_cmd)
        set_cpus_allowed_ptr(current, &cpumask_of_cpu(0));
        if (smp_processor_id() != 0) {
                dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n",
-                       __FUNCTION__);
+                       __func__);
                ret = -EBUSY;
                goto out;
        }
@@ -428,7 +428,7 @@ static int host_control_smi(void)
 
        default:
                dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n",
-                       __FUNCTION__, host_control_smi_type);
+                       __func__, host_control_smi_type);
                return -ENOSYS;
        }
 
@@ -456,13 +456,13 @@ static void dcdbas_host_control(void)
        host_control_action = HC_ACTION_NONE;
 
        if (!smi_data_buf) {
-               dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __FUNCTION__);
+               dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__);
                return;
        }
 
        if (smi_data_buf_size < sizeof(struct apm_cmd)) {
                dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n",
-                       __FUNCTION__);
+                       __func__);
                return;
        }
 
index 477a3d0e3caf83ccffb2cf472375cafcc799733e..6a8b1e037e0776e7e4a1e3d1214380e062da694c 100644 (file)
@@ -123,7 +123,7 @@ static int create_packet(void *data, size_t length)
        if (!newpacket) {
                printk(KERN_WARNING
                        "dell_rbu:%s: failed to allocate new "
-                       "packet\n", __FUNCTION__);
+                       "packet\n", __func__);
                retval = -ENOMEM;
                spin_lock(&rbu_data.lock);
                goto out_noalloc;
@@ -152,7 +152,7 @@ static int create_packet(void *data, size_t length)
                printk(KERN_WARNING
                        "dell_rbu:%s: failed to allocate "
                        "invalid_addr_packet_array \n",
-                       __FUNCTION__);
+                       __func__);
                retval = -ENOMEM;
                spin_lock(&rbu_data.lock);
                goto out_alloc_packet;
@@ -164,7 +164,7 @@ static int create_packet(void *data, size_t length)
                if (!packet_data_temp_buf) {
                        printk(KERN_WARNING
                                "dell_rbu:%s: failed to allocate new "
-                               "packet\n", __FUNCTION__);
+                               "packet\n", __func__);
                        retval = -ENOMEM;
                        spin_lock(&rbu_data.lock);
                        goto out_alloc_packet_array;
@@ -416,7 +416,7 @@ static int img_update_realloc(unsigned long size)
                 */
                if ((size != 0) && (rbu_data.image_update_buffer == NULL)) {
                        printk(KERN_ERR "dell_rbu:%s: corruption "
-                               "check failed\n", __FUNCTION__);
+                               "check failed\n", __func__);
                        return -EINVAL;
                }
                /*
@@ -642,7 +642,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj,
                        if (req_firm_rc) {
                                printk(KERN_ERR
                                        "dell_rbu:%s request_firmware_nowait"
-                                       " failed %d\n", __FUNCTION__, rc);
+                                       " failed %d\n", __func__, rc);
                                rc = -EIO;
                        } else
                                rbu_data.entry_created = 1;
@@ -718,7 +718,7 @@ static int __init dcdrbu_init(void)
        if (IS_ERR(rbu_device)) {
                printk(KERN_ERR
                        "dell_rbu:%s:platform_device_register_simple "
-                       "failed\n", __FUNCTION__);
+                       "failed\n", __func__);
                return PTR_ERR(rbu_device);
        }
 
index d0e5fa4ea51b617b867703b7215983847fbd7b20..11f17440fea6cb4ad623826ab00830368ead0692 100644 (file)
@@ -58,7 +58,7 @@ void __init reserve_ibft_region(void)
        unsigned int len = 0;
        void *virt;
 
-       ibft_addr = 0;
+       ibft_addr = NULL;
 
        for (pos = IBFT_START; pos < IBFT_END; pos += 16) {
                /* The table can't be inside the VGA BIOS reserved space,
index d8db2f8ee41139a04248aaaf3767d8b947927817..7f138c6195ff38ceba44bf14dea45a83705c47d4 100644 (file)
@@ -43,6 +43,7 @@ struct gpio_desc {
 /* flag symbols are bit numbers */
 #define FLAG_REQUESTED 0
 #define FLAG_IS_OUT    1
+#define FLAG_RESERVED  2
 
 #ifdef CONFIG_DEBUG_FS
        const char              *label;
@@ -68,6 +69,9 @@ static void gpio_ensure_requested(struct gpio_desc *desc)
        if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
                pr_warning("GPIO-%d autorequested\n", (int)(desc - gpio_desc));
                desc_set_label(desc, "[auto]");
+               if (!try_module_get(desc->chip->owner))
+                       pr_err("GPIO-%d: module can't be gotten \n",
+                                       (int)(desc - gpio_desc));
        }
 }
 
@@ -77,6 +81,76 @@ static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
        return gpio_desc[gpio].chip;
 }
 
+/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
+static int gpiochip_find_base(int ngpio)
+{
+       int i;
+       int spare = 0;
+       int base = -ENOSPC;
+
+       for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) {
+               struct gpio_desc *desc = &gpio_desc[i];
+               struct gpio_chip *chip = desc->chip;
+
+               if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) {
+                       spare++;
+                       if (spare == ngpio) {
+                               base = i;
+                               break;
+                       }
+               } else {
+                       spare = 0;
+                       if (chip)
+                               i -= chip->ngpio - 1;
+               }
+       }
+
+       if (gpio_is_valid(base))
+               pr_debug("%s: found new base at %d\n", __func__, base);
+       return base;
+}
+
+/**
+ * gpiochip_reserve() - reserve range of gpios to use with platform code only
+ * @start: starting gpio number
+ * @ngpio: number of gpios to reserve
+ * Context: platform init, potentially before irqs or kmalloc will work
+ *
+ * Returns a negative errno if any gpio within the range is already reserved
+ * or registered, else returns zero as a success code.  Use this function
+ * to mark a range of gpios as unavailable for dynamic gpio number allocation,
+ * for example because its driver support is not yet loaded.
+ */
+int __init gpiochip_reserve(int start, int ngpio)
+{
+       int ret = 0;
+       unsigned long flags;
+       int i;
+
+       if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio))
+               return -EINVAL;
+
+       spin_lock_irqsave(&gpio_lock, flags);
+
+       for (i = start; i < start + ngpio; i++) {
+               struct gpio_desc *desc = &gpio_desc[i];
+
+               if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) {
+                       ret = -EBUSY;
+                       goto err;
+               }
+
+               set_bit(FLAG_RESERVED, &desc->flags);
+       }
+
+       pr_debug("%s: reserved gpios from %d to %d\n",
+                __func__, start, start + ngpio - 1);
+err:
+       spin_unlock_irqrestore(&gpio_lock, flags);
+
+       return ret;
+}
+
 /**
  * gpiochip_add() - register a gpio_chip
  * @chip: the chip to register, with chip->base initialized
@@ -85,38 +159,49 @@ static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
  * Returns a negative errno if the chip can't be registered, such as
  * because the chip->base is invalid or already associated with a
  * different chip.  Otherwise it returns zero as a success code.
+ *
+ * If chip->base is negative, this requests dynamic assignment of
+ * a range of valid GPIOs.
  */
 int gpiochip_add(struct gpio_chip *chip)
 {
        unsigned long   flags;
        int             status = 0;
        unsigned        id;
+       int             base = chip->base;
 
-       /* NOTE chip->base negative is reserved to mean a request for
-        * dynamic allocation.  We don't currently support that.
-        */
-
-       if (chip->base < 0 || (chip->base  + chip->ngpio) >= ARCH_NR_GPIOS) {
+       if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio))
+                       && base >= 0) {
                status = -EINVAL;
                goto fail;
        }
 
        spin_lock_irqsave(&gpio_lock, flags);
 
+       if (base < 0) {
+               base = gpiochip_find_base(chip->ngpio);
+               if (base < 0) {
+                       status = base;
+                       goto fail_unlock;
+               }
+               chip->base = base;
+       }
+
        /* these GPIO numbers must not be managed by another gpio_chip */
-       for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+       for (id = base; id < base + chip->ngpio; id++) {
                if (gpio_desc[id].chip != NULL) {
                        status = -EBUSY;
                        break;
                }
        }
        if (status == 0) {
-               for (id = chip->base; id < chip->base + chip->ngpio; id++) {
+               for (id = base; id < base + chip->ngpio; id++) {
                        gpio_desc[id].chip = chip;
                        gpio_desc[id].flags = 0;
                }
        }
 
+fail_unlock:
        spin_unlock_irqrestore(&gpio_lock, flags);
 fail:
        /* failures here can mean systems won't boot... */
@@ -171,12 +256,15 @@ int gpio_request(unsigned gpio, const char *label)
 
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (gpio >= ARCH_NR_GPIOS)
+       if (!gpio_is_valid(gpio))
                goto done;
        desc = &gpio_desc[gpio];
        if (desc->chip == NULL)
                goto done;
 
+       if (!try_module_get(desc->chip->owner))
+               goto done;
+
        /* NOTE:  gpio_request() can be called in early boot,
         * before IRQs are enabled.
         */
@@ -184,8 +272,10 @@ int gpio_request(unsigned gpio, const char *label)
        if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
                desc_set_label(desc, label ? : "?");
                status = 0;
-       } else
+       } else {
                status = -EBUSY;
+               module_put(desc->chip->owner);
+       }
 
 done:
        if (status)
@@ -201,7 +291,7 @@ void gpio_free(unsigned gpio)
        unsigned long           flags;
        struct gpio_desc        *desc;
 
-       if (gpio >= ARCH_NR_GPIOS) {
+       if (!gpio_is_valid(gpio)) {
                WARN_ON(extra_checks);
                return;
        }
@@ -209,9 +299,10 @@ void gpio_free(unsigned gpio)
        spin_lock_irqsave(&gpio_lock, flags);
 
        desc = &gpio_desc[gpio];
-       if (desc->chip && test_and_clear_bit(FLAG_REQUESTED, &desc->flags))
+       if (desc->chip && test_and_clear_bit(FLAG_REQUESTED, &desc->flags)) {
                desc_set_label(desc, NULL);
-       else
+               module_put(desc->chip->owner);
+       } else
                WARN_ON(extra_checks);
 
        spin_unlock_irqrestore(&gpio_lock, flags);
@@ -236,7 +327,7 @@ const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
 {
        unsigned gpio = chip->base + offset;
 
-       if (gpio >= ARCH_NR_GPIOS || gpio_desc[gpio].chip != chip)
+       if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip)
                return NULL;
        if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0)
                return NULL;
@@ -267,7 +358,7 @@ int gpio_direction_input(unsigned gpio)
 
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (gpio >= ARCH_NR_GPIOS)
+       if (!gpio_is_valid(gpio))
                goto fail;
        chip = desc->chip;
        if (!chip || !chip->get || !chip->direction_input)
@@ -291,7 +382,7 @@ fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
        if (status)
                pr_debug("%s: gpio-%d status %d\n",
-                       __FUNCTION__, gpio, status);
+                       __func__, gpio, status);
        return status;
 }
 EXPORT_SYMBOL_GPL(gpio_direction_input);
@@ -305,7 +396,7 @@ int gpio_direction_output(unsigned gpio, int value)
 
        spin_lock_irqsave(&gpio_lock, flags);
 
-       if (gpio >= ARCH_NR_GPIOS)
+       if (!gpio_is_valid(gpio))
                goto fail;
        chip = desc->chip;
        if (!chip || !chip->set || !chip->direction_output)
@@ -329,7 +420,7 @@ fail:
        spin_unlock_irqrestore(&gpio_lock, flags);
        if (status)
                pr_debug("%s: gpio-%d status %d\n",
-                       __FUNCTION__, gpio, status);
+                       __func__, gpio, status);
        return status;
 }
 EXPORT_SYMBOL_GPL(gpio_direction_output);
@@ -522,7 +613,7 @@ static int gpiolib_show(struct seq_file *s, void *unused)
 
        /* REVISIT this isn't locked against gpio_chip removal ... */
 
-       for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
+       for (gpio = 0; gpio_is_valid(gpio); gpio++) {
                if (chip == gpio_desc[gpio].chip)
                        continue;
                chip = gpio_desc[gpio].chip;
index bb60e8c1a1f0621e3d06c8292d1007ab3e66c754..7fb5b9d009d4a83a9c58a458464077caad24d6f5 100644 (file)
@@ -239,6 +239,7 @@ static int mcp23s08_probe(struct spi_device *spi)
        mcp->chip.base = pdata->base;
        mcp->chip.ngpio = 8;
        mcp->chip.can_sleep = 1;
+       mcp->chip.owner = THIS_MODULE;
 
        spi_set_drvdata(spi, mcp);
 
index 6e72fd31184d8aa95021f897ee436af3bdd7b833..93f916720b139e40b20591839126609db0e778a0 100644 (file)
 #define PCA953X_INVERT         2
 #define PCA953X_DIRECTION      3
 
-/* This is temporary - in 2.6.26 i2c_driver_data should replace it. */
-struct pca953x_desc {
-       char            name[I2C_NAME_SIZE];
-       unsigned long   driver_data;
-};
-
-static const struct pca953x_desc pca953x_descs[] = {
+static const struct i2c_device_id pca953x_id[] = {
        { "pca9534", 8, },
        { "pca9535", 16, },
        { "pca9536", 4, },
        { "pca9537", 4, },
        { "pca9538", 8, },
        { "pca9539", 16, },
+       { "pca9555", 16, },
+       { "pca9557", 8, },
        /* REVISIT several pca955x parts should work here too */
+       { }
 };
+MODULE_DEVICE_TABLE(i2c, pca953x_id);
 
 struct pca953x_chip {
        unsigned gpio_start;
@@ -189,28 +187,20 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
        gc->base = chip->gpio_start;
        gc->ngpio = gpios;
        gc->label = chip->client->name;
+       gc->owner = THIS_MODULE;
 }
 
-static int __devinit pca953x_probe(struct i2c_client *client)
+static int __devinit pca953x_probe(struct i2c_client *client,
+                                  const struct i2c_device_id *id)
 {
        struct pca953x_platform_data *pdata;
        struct pca953x_chip *chip;
-       int ret, i;
-       const struct pca953x_desc *id = NULL;
+       int ret;
 
        pdata = client->dev.platform_data;
        if (pdata == NULL)
                return -ENODEV;
 
-       /* this loop vanishes when we get i2c_device_id */
-       for (i = 0; i < ARRAY_SIZE(pca953x_descs); i++)
-               if (!strcmp(pca953x_descs[i].name, client->name)) {
-                       id = pca953x_descs + i;
-                       break;
-               }
-       if (!id)
-               return -ENODEV;
-
        chip = kzalloc(sizeof(struct pca953x_chip), GFP_KERNEL);
        if (chip == NULL)
                return -ENOMEM;
@@ -290,6 +280,7 @@ static struct i2c_driver pca953x_driver = {
        },
        .probe          = pca953x_probe,
        .remove         = pca953x_remove,
+       .id_table       = pca953x_id,
 };
 
 static int __init pca953x_init(void)
index c6b3b53783843cfd21f542dab4b278ba320aebe5..aa6cc8b2a2bc88afbeca4dec1b67bc75c05288a0 100644 (file)
 #include <asm/gpio.h>
 
 
+static const struct i2c_device_id pcf857x_id[] = {
+       { "pcf8574", 8 },
+       { "pca8574", 8 },
+       { "pca9670", 8 },
+       { "pca9672", 8 },
+       { "pca9674", 8 },
+       { "pcf8575", 16 },
+       { "pca8575", 16 },
+       { "pca9671", 16 },
+       { "pca9673", 16 },
+       { "pca9675", 16 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf857x_id);
+
 /*
  * The pcf857x, pca857x, and pca967x chips only expose one read and one
  * write register.  Writing a "one" bit (to match the reset state) lets
@@ -142,7 +157,8 @@ static void pcf857x_set16(struct gpio_chip *chip, unsigned offset, int value)
 
 /*-------------------------------------------------------------------------*/
 
-static int pcf857x_probe(struct i2c_client *client)
+static int pcf857x_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
 {
        struct pcf857x_platform_data    *pdata;
        struct pcf857x                  *gpio;
@@ -159,6 +175,7 @@ static int pcf857x_probe(struct i2c_client *client)
 
        gpio->chip.base = pdata->gpio_base;
        gpio->chip.can_sleep = 1;
+       gpio->chip.owner = THIS_MODULE;
 
        /* NOTE:  the OnSemi jlc1562b is also largely compatible with
         * these parts, notably for output.  It has a low-resolution
@@ -171,13 +188,8 @@ static int pcf857x_probe(struct i2c_client *client)
         *
         * NOTE: we don't distinguish here between *4 and *4a parts.
         */
-       if (strcmp(client->name, "pcf8574") == 0
-                       || strcmp(client->name, "pca8574") == 0
-                       || strcmp(client->name, "pca9670") == 0
-                       || strcmp(client->name, "pca9672") == 0
-                       || strcmp(client->name, "pca9674") == 0
-                       ) {
-               gpio->chip.ngpio = 8;
+       gpio->chip.ngpio = id->driver_data;
+       if (gpio->chip.ngpio == 8) {
                gpio->chip.direction_input = pcf857x_input8;
                gpio->chip.get = pcf857x_get8;
                gpio->chip.direction_output = pcf857x_output8;
@@ -197,13 +209,7 @@ static int pcf857x_probe(struct i2c_client *client)
         *
         * NOTE: we don't distinguish here between '75 and '75c parts.
         */
-       } else if (strcmp(client->name, "pcf8575") == 0
-                       || strcmp(client->name, "pca8575") == 0
-                       || strcmp(client->name, "pca9671") == 0
-                       || strcmp(client->name, "pca9673") == 0
-                       || strcmp(client->name, "pca9675") == 0
-                       ) {
-               gpio->chip.ngpio = 16;
+       } else if (gpio->chip.ngpio == 16) {
                gpio->chip.direction_input = pcf857x_input16;
                gpio->chip.get = pcf857x_get16;
                gpio->chip.direction_output = pcf857x_output16;
@@ -312,6 +318,7 @@ static struct i2c_driver pcf857x_driver = {
        },
        .probe  = pcf857x_probe,
        .remove = pcf857x_remove,
+       .id_table = pcf857x_id,
 };
 
 static int __init pcf857x_init(void)
index e03c67dd3e6325ae186fbe7ecc9523223f3015ad..f43d6d3cf2fa2429d5689bbe05e87cf0f99358e0 100644 (file)
@@ -606,7 +606,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
                case 2:
                        if ((end - start) < 2)
                                return NULL;
-                       item->data.u16 = le16_to_cpu(get_unaligned((__le16*)start));
+                       item->data.u16 = get_unaligned_le16(start);
                        start = (__u8 *)((__le16 *)start + 1);
                        return start;
 
@@ -614,7 +614,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
                        item->size++;
                        if ((end - start) < 4)
                                return NULL;
-                       item->data.u32 = le32_to_cpu(get_unaligned((__le32*)start));
+                       item->data.u32 = get_unaligned_le32(start);
                        start = (__u8 *)((__le32 *)start + 1);
                        return start;
        }
@@ -765,7 +765,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n)
 
        report += offset >> 3;  /* adjust byte index */
        offset &= 7;            /* now only need bit offset into one byte */
-       x = le64_to_cpu(get_unaligned((__le64 *) report));
+       x = get_unaligned_le64(report);
        x = (x >> offset) & ((1ULL << n) - 1);  /* extract bit field */
        return (u32) x;
 }
index e0d805f1b2bff4d80cb43eb12d320b07b412ddd5..01427c51c7cc71b602d4df4535ab8b745e65a791 100644 (file)
@@ -654,7 +654,7 @@ static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t co
        ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
                HID_REQ_SET_REPORT,
                USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
-               cpu_to_le16(((HID_OUTPUT_REPORT + 1) << 8) | *buf),
+               ((HID_OUTPUT_REPORT + 1) << 8) | *buf,
                interface->desc.bInterfaceNumber, buf + 1, count - 1,
                USB_CTRL_SET_TIMEOUT);
 
index ed71a8bc70dcf855710d525aedff131965c57122..5c8b6e0ff47c21354592d6ddcb63f5ebe48db5db 100644 (file)
@@ -224,7 +224,7 @@ static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind)
                        if (in_data & 0xF000) {
                                printk(KERN_DEBUG
                                "%s : Doesn't look like an ads7828 device\n",
-                               __FUNCTION__);
+                               __func__);
                                goto exit_free;
                        }
                }
index 9587869bdba0eb5bf58314b56c76d98f91b28507..c1009d6f97965b160d26ac4723235d97280a427c 100644 (file)
@@ -422,18 +422,14 @@ static ssize_t show_volt(struct device *dev, struct device_attribute *devattr,
  * number in the range -128 to 127, or as an unsigned number that must
  * be offset by 64.
  */
-static int decode_temp(struct adt7473_data *data, u8 raw)
+static int decode_temp(u8 twos_complement, u8 raw)
 {
-       if (data->temp_twos_complement)
-               return (s8)raw;
-       return raw - 64;
+       return twos_complement ? (s8)raw : raw - 64;
 }
 
-static u8 encode_temp(struct adt7473_data *data, int cooked)
+static u8 encode_temp(u8 twos_complement, int cooked)
 {
-       if (data->temp_twos_complement)
-               return (cooked & 0xFF);
-       return cooked + 64;
+       return twos_complement ? cooked & 0xFF : cooked + 64;
 }
 
 static ssize_t show_temp_min(struct device *dev,
@@ -442,8 +438,9 @@ static ssize_t show_temp_min(struct device *dev,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct adt7473_data *data = adt7473_update_device(dev);
-       return sprintf(buf, "%d\n",
-                      1000 * decode_temp(data, data->temp_min[attr->index]));
+       return sprintf(buf, "%d\n", 1000 * decode_temp(
+                                               data->temp_twos_complement,
+                                               data->temp_min[attr->index]));
 }
 
 static ssize_t set_temp_min(struct device *dev,
@@ -455,7 +452,7 @@ static ssize_t set_temp_min(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct adt7473_data *data = i2c_get_clientdata(client);
        int temp = simple_strtol(buf, NULL, 10) / 1000;
-       temp = encode_temp(data, temp);
+       temp = encode_temp(data->temp_twos_complement, temp);
 
        mutex_lock(&data->lock);
        data->temp_min[attr->index] = temp;
@@ -472,8 +469,9 @@ static ssize_t show_temp_max(struct device *dev,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct adt7473_data *data = adt7473_update_device(dev);
-       return sprintf(buf, "%d\n",
-                      1000 * decode_temp(data, data->temp_max[attr->index]));
+       return sprintf(buf, "%d\n", 1000 * decode_temp(
+                                               data->temp_twos_complement,
+                                               data->temp_max[attr->index]));
 }
 
 static ssize_t set_temp_max(struct device *dev,
@@ -485,7 +483,7 @@ static ssize_t set_temp_max(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct adt7473_data *data = i2c_get_clientdata(client);
        int temp = simple_strtol(buf, NULL, 10) / 1000;
-       temp = encode_temp(data, temp);
+       temp = encode_temp(data->temp_twos_complement, temp);
 
        mutex_lock(&data->lock);
        data->temp_max[attr->index] = temp;
@@ -501,8 +499,9 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct adt7473_data *data = adt7473_update_device(dev);
-       return sprintf(buf, "%d\n",
-                      1000 * decode_temp(data, data->temp[attr->index]));
+       return sprintf(buf, "%d\n", 1000 * decode_temp(
+                                               data->temp_twos_complement,
+                                               data->temp[attr->index]));
 }
 
 static ssize_t show_fan_min(struct device *dev,
@@ -671,8 +670,9 @@ static ssize_t show_temp_tmax(struct device *dev,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct adt7473_data *data = adt7473_update_device(dev);
-       return sprintf(buf, "%d\n",
-                      1000 * decode_temp(data, data->temp_tmax[attr->index]));
+       return sprintf(buf, "%d\n", 1000 * decode_temp(
+                                               data->temp_twos_complement,
+                                               data->temp_tmax[attr->index]));
 }
 
 static ssize_t set_temp_tmax(struct device *dev,
@@ -684,7 +684,7 @@ static ssize_t set_temp_tmax(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct adt7473_data *data = i2c_get_clientdata(client);
        int temp = simple_strtol(buf, NULL, 10) / 1000;
-       temp = encode_temp(data, temp);
+       temp = encode_temp(data->temp_twos_complement, temp);
 
        mutex_lock(&data->lock);
        data->temp_tmax[attr->index] = temp;
@@ -701,8 +701,9 @@ static ssize_t show_temp_tmin(struct device *dev,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct adt7473_data *data = adt7473_update_device(dev);
-       return sprintf(buf, "%d\n",
-                      1000 * decode_temp(data, data->temp_tmin[attr->index]));
+       return sprintf(buf, "%d\n", 1000 * decode_temp(
+                                               data->temp_twos_complement,
+                                               data->temp_tmin[attr->index]));
 }
 
 static ssize_t set_temp_tmin(struct device *dev,
@@ -714,7 +715,7 @@ static ssize_t set_temp_tmin(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct adt7473_data *data = i2c_get_clientdata(client);
        int temp = simple_strtol(buf, NULL, 10) / 1000;
-       temp = encode_temp(data, temp);
+       temp = encode_temp(data->temp_twos_complement, temp);
 
        mutex_lock(&data->lock);
        data->temp_tmin[attr->index] = temp;
index 84712a22acea1f4f17077cf59108701f91e07847..fe2eea4d799b60b4b4ab5df992eeb1aa200175bf 100644 (file)
@@ -953,12 +953,8 @@ static void asb100_write_value(struct i2c_client *client, u16 reg, u16 value)
 static void asb100_init_client(struct i2c_client *client)
 {
        struct asb100_data *data = i2c_get_clientdata(client);
-       int vid = 0;
 
-       vid = asb100_read_value(client, ASB100_REG_VID_FANDIV) & 0x0f;
-       vid |= (asb100_read_value(client, ASB100_REG_CHIPID) & 0x01) << 4;
        data->vrm = vid_which_vrm();
-       vid = vid_from_reg(vid, data->vrm);
 
        /* Start monitoring */
        asb100_write_value(client, ASB100_REG_CONFIG,
index 1464338e4e11b891298ff18f212bf2135c17804a..dc1f30e432eab96864d52af0706a2ac39f7338a7 100644 (file)
@@ -117,7 +117,8 @@ struct f75375_data {
 static int f75375_attach_adapter(struct i2c_adapter *adapter);
 static int f75375_detect(struct i2c_adapter *adapter, int address, int kind);
 static int f75375_detach_client(struct i2c_client *client);
-static int f75375_probe(struct i2c_client *client);
+static int f75375_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id);
 static int f75375_remove(struct i2c_client *client);
 
 static struct i2c_driver f75375_legacy_driver = {
@@ -128,12 +129,20 @@ static struct i2c_driver f75375_legacy_driver = {
        .detach_client = f75375_detach_client,
 };
 
+static const struct i2c_device_id f75375_id[] = {
+       { "f75373", f75373 },
+       { "f75375", f75375 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, f75375_id);
+
 static struct i2c_driver f75375_driver = {
        .driver = {
                .name = "f75375",
        },
        .probe = f75375_probe,
        .remove = f75375_remove,
+       .id_table = f75375_id,
 };
 
 static inline int f75375_read8(struct i2c_client *client, u8 reg)
@@ -628,7 +637,8 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
 
 }
 
-static int f75375_probe(struct i2c_client *client)
+static int f75375_probe(struct i2c_client *client,
+               const struct i2c_device_id *id)
 {
        struct f75375_data *data = i2c_get_clientdata(client);
        struct f75375s_platform_data *f75375s_pdata = client->dev.platform_data;
@@ -643,15 +653,7 @@ static int f75375_probe(struct i2c_client *client)
        i2c_set_clientdata(client, data);
        data->client = client;
        mutex_init(&data->update_lock);
-
-       if (strcmp(client->name, "f75375") == 0)
-               data->kind = f75375;
-       else if (strcmp(client->name, "f75373") == 0)
-               data->kind = f75373;
-       else {
-               dev_err(&client->dev, "Unsupported device: %s\n", client->name);
-               return -ENODEV;
-       }
+       data->kind = id->driver_data;
 
        if ((err = sysfs_create_group(&client->dev.kobj, &f75375_group)))
                goto exit_free;
@@ -712,6 +714,7 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind)
        u8 version = 0;
        int err = 0;
        const char *name = "";
+       struct i2c_device_id id;
 
        if (!(client = kzalloc(sizeof(*client), GFP_KERNEL))) {
                err = -ENOMEM;
@@ -748,7 +751,9 @@ static int f75375_detect(struct i2c_adapter *adapter, int address, int kind)
        if ((err = i2c_attach_client(client)))
                goto exit_free;
 
-       if ((err = f75375_probe(client)) < 0)
+       strlcpy(id.name, name, I2C_NAME_SIZE);
+       id.driver_data = kind;
+       if ((err = f75375_probe(client, &id)) < 0)
                goto exit_detach;
 
        return 0;
index 115f4090b98e3c94cab67e058a7e1676e9b3546c..fa7696905154ca1e14444ca2a95e946470084cd4 100644 (file)
@@ -248,7 +248,7 @@ static int lm75_detach_client(struct i2c_client *client)
 
 /* All registers are word-sized, except for the configuration register.
    LM75 uses a high-byte first convention, which is exactly opposite to
-   the usual practice. */
+   the SMBus standard. */
 static int lm75_read_value(struct i2c_client *client, u8 reg)
 {
        if (reg == LM75_REG_CONF)
@@ -257,9 +257,6 @@ static int lm75_read_value(struct i2c_client *client, u8 reg)
                return swab16(i2c_smbus_read_word_data(client, reg));
 }
 
-/* All registers are word-sized, except for the configuration register.
-   LM75 uses a high-byte first convention, which is exactly opposite to
-   the usual practice. */
 static int lm75_write_value(struct i2c_client *client, u8 reg, u16 value)
 {
        if (reg == LM75_REG_CONF)
index f61d8f4185b243efcf7bd3fb0f10fa6284dde6d2..eb03544c731ca47dd94c6d7d940b3f66a43dbcdd 100644 (file)
@@ -335,11 +335,23 @@ exit:
 static int __init smsc47b397_find(unsigned short *addr)
 {
        u8 id, rev;
+       char *name;
 
        superio_enter();
        id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
 
-       if ((id != 0x6f) && (id != 0x81) && (id != 0x85)) {
+       switch(id) {
+       case 0x81:
+               name = "SCH5307-NS";
+               break;
+       case 0x6f:
+               name = "LPC47B397-NC";
+               break;
+       case 0x85:
+       case 0x8c:
+               name = "SCH5317";
+               break;
+       default:
                superio_exit();
                return -ENODEV;
        }
@@ -352,8 +364,7 @@ static int __init smsc47b397_find(unsigned short *addr)
 
        printk(KERN_INFO DRVNAME ": found SMSC %s "
                "(base address 0x%04x, revision %u)\n",
-               id == 0x81 ? "SCH5307-NS" : id == 0x85 ? "SCH5317" :
-              "LPC47B397-NC", *addr, rev);
+               name, *addr, rev);
 
        superio_exit();
        return 0;
index ee35af93b574d37879acf53d813bd24f12c8dc29..ed3c019b78c7284f805a83596abba56f77664028 100644 (file)
@@ -1024,10 +1024,9 @@ static struct sensor_device_attribute_2 w83793_vid[] = {
        SENSOR_ATTR_2(cpu0_vid, S_IRUGO, show_vid, NULL, NOT_USED, 0),
        SENSOR_ATTR_2(cpu1_vid, S_IRUGO, show_vid, NULL, NOT_USED, 1),
 };
+static DEVICE_ATTR(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm);
 
 static struct sensor_device_attribute_2 sda_single_files[] = {
-       SENSOR_ATTR_2(vrm, S_IWUSR | S_IRUGO, show_vrm, store_vrm,
-                     NOT_USED, NOT_USED),
        SENSOR_ATTR_2(chassis, S_IWUSR | S_IRUGO, show_alarm_beep,
                      store_chassis_clear, ALARM_STATUS, 30),
        SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_beep_enable,
@@ -1080,6 +1079,7 @@ static int w83793_detach_client(struct i2c_client *client)
 
                for (i = 0; i < ARRAY_SIZE(w83793_vid); i++)
                        device_remove_file(dev, &w83793_vid[i].dev_attr);
+               device_remove_file(dev, &dev_attr_vrm);
 
                for (i = 0; i < ARRAY_SIZE(w83793_left_fan); i++)
                        device_remove_file(dev, &w83793_left_fan[i].dev_attr);
@@ -1282,7 +1282,6 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
        /* Initialize the chip */
        w83793_init_client(client);
 
-       data->vrm = vid_which_vrm();
        /*
           Only fan 1-5 has their own input pins,
           Pwm 1-3 has their own pins
@@ -1293,7 +1292,9 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
        val = w83793_read_value(client, W83793_REG_FANIN_CTRL);
 
        /* check the function of pins 49-56 */
-       if (!(tmp & 0x80)) {
+       if (tmp & 0x80) {
+               data->has_vid |= 0x2;   /* has VIDB */
+       } else {
                data->has_pwm |= 0x18;  /* pwm 4,5 */
                if (val & 0x01) {       /* fan 6 */
                        data->has_fan |= 0x20;
@@ -1309,13 +1310,15 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
                }
        }
 
+       /* check the function of pins 37-40 */
+       if (!(tmp & 0x29))
+               data->has_vid |= 0x1;   /* has VIDA */
        if (0x08 == (tmp & 0x0c)) {
                if (val & 0x08) /* fan 9 */
                        data->has_fan |= 0x100;
                if (val & 0x10) /* fan 10 */
                        data->has_fan |= 0x200;
        }
-
        if (0x20 == (tmp & 0x30)) {
                if (val & 0x20) /* fan 11 */
                        data->has_fan |= 0x400;
@@ -1359,13 +1362,6 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
        if (tmp & 0x02)
                data->has_temp |= 0x20;
 
-       /* Detect the VID usage and ignore unused input */
-       tmp = w83793_read_value(client, W83793_REG_MFC);
-       if (!(tmp & 0x29))
-               data->has_vid |= 0x1;   /* has VIDA */
-       if (tmp & 0x80)
-               data->has_vid |= 0x2;   /* has VIDB */
-
        /* Register sysfs hooks */
        for (i = 0; i < ARRAY_SIZE(w83793_sensor_attr_2); i++) {
                err = device_create_file(dev,
@@ -1381,6 +1377,12 @@ static int w83793_detect(struct i2c_adapter *adapter, int address, int kind)
                if (err)
                        goto exit_remove;
        }
+       if (data->has_vid) {
+               data->vrm = vid_which_vrm();
+               err = device_create_file(dev, &dev_attr_vrm);
+               if (err)
+                       goto exit_remove;
+       }
 
        for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) {
                err = device_create_file(dev, &sda_single_files[i].dev_attr);
index 77f2d482888b1fe3293080be6d83d17a17a4fd08..52e268e25dab01e574f97293008ec8e7a7e869fc 100644 (file)
@@ -301,8 +301,8 @@ static u8 w83l785ts_read_value(struct i2c_client *client, u8 reg, u8 defval)
                msleep(i);
        }
 
-       dev_err(&client->dev, "Couldn't read value from register 0x%02x. "
-               "Please report.\n", reg);
+       dev_err(&client->dev, "Couldn't read value from register 0x%02x.\n",
+               reg);
        return defval;
 }
 
index e5e96c8175660b15f006d9517a2866248e5a4a62..c38a0a112208286787d94966f57048980ad459b8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard
  *
- * Copyright (C) 2004 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -231,7 +231,8 @@ ERROR2:
        kfree(s4882_adapter);
        s4882_adapter = NULL;
 ERROR1:
-       i2c_del_adapter(&amd756_smbus);
+       /* Restore physical bus */
+       i2c_add_adapter(&amd756_smbus);
 ERROR0:
        return error;
 }
index 9bbe96cef719ee204243645e52b124323d2721f7..fdc9ad805e358528b8599f0dbd4635bb817155e7 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/ioport.h>
 #include <linux/i2c.h>
 #include <linux/init.h>
-#include <linux/apm_bios.h>
 #include <linux/dmi.h>
 #include <asm/io.h>
 
@@ -223,7 +222,7 @@ static int piix4_transaction(void)
                        dev_err(&piix4_adapter.dev, "Failed! (%02x)\n", temp);
                        return -1;
                } else {
-                       dev_dbg(&piix4_adapter.dev, "Successfull!\n");
+                       dev_dbg(&piix4_adapter.dev, "Successful!\n");
                }
        }
 
@@ -343,12 +342,7 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
 
 
        switch (size) {
-       case PIIX4_BYTE:        /* Where is the result put? I assume here it is in
-                                  SMBHSTDAT0 but it might just as well be in the
-                                  SMBHSTCMD. No clue in the docs */
-
-               data->byte = inb_p(SMBHSTDAT0);
-               break;
+       case PIIX4_BYTE:
        case PIIX4_BYTE_DATA:
                data->byte = inb_p(SMBHSTDAT0);
                break;
index 283769cecee297531fd80c66e1cc34f1ee1bbf64..9ca8f9155f958c40378a50770eb3327a0702c2e6 100644 (file)
@@ -238,7 +238,7 @@ static int sis5595_transaction(struct i2c_adapter *adap)
                        dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
                        return -1;
                } else {
-                       dev_dbg(&adap->dev, "Successfull!\n");
+                       dev_dbg(&adap->dev, "Successful!\n");
                }
        }
 
@@ -316,14 +316,8 @@ static s32 sis5595_access(struct i2c_adapter *adap, u16 addr,
                }
                size = (size == I2C_SMBUS_PROC_CALL) ? SIS5595_PROC_CALL : SIS5595_WORD_DATA;
                break;
-/*
-       case I2C_SMBUS_BLOCK_DATA:
-               printk(KERN_WARNING "sis5595.o: Block data not yet implemented!\n");
-               return -1;
-               break;
-*/
        default:
-               printk(KERN_WARNING "sis5595.o: Unsupported transaction %d\n", size);
+               dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
                return -1;
        }
 
@@ -338,9 +332,7 @@ static s32 sis5595_access(struct i2c_adapter *adap, u16 addr,
 
 
        switch (size) {
-       case SIS5595_BYTE:      /* Where is the result put? I assume here it is in
-                                  SMB_DATA but it might just as well be in the
-                                  SMB_CMD. No clue in the docs */
+       case SIS5595_BYTE:
        case SIS5595_BYTE_DATA:
                data->byte = sis5595_read(SMB_BYTE);
                break;
index 5fd734f99ee9df9f12ff5103fa13606c557d852f..3765dd7f450f06e7c71f6623d67795c9e4cc056d 100644 (file)
@@ -136,7 +136,7 @@ static int sis630_transaction_start(struct i2c_adapter *adap, int size, u8 *oldc
                        dev_dbg(&adap->dev, "Failed! (%02x)\n", temp);
                        return -1;
                 } else {
-                       dev_dbg(&adap->dev, "Successfull!\n");
+                       dev_dbg(&adap->dev, "Successful!\n");
                }
         }
 
index c2a9f8c94f5e0363cc71580b271010125e3ada00..d08eeec53913ce9759a74e82b984bdc06dcf18b8 100644 (file)
@@ -33,7 +33,7 @@
 static unsigned short chip_addr[MAX_CHIPS];
 module_param_array(chip_addr, ushort, NULL, S_IRUGO);
 MODULE_PARM_DESC(chip_addr,
-                "Chip addresses (up to 10, between 0x03 and 0x77)\n");
+                "Chip addresses (up to 10, between 0x03 and 0x77)");
 
 struct stub_chip {
        u8 pointer;
index 1b0cfd5472fda700c4e123edf834f2f6f20541ec..de9db49e54d93d8785f0beef0df4c5df28036a12 100644 (file)
@@ -51,7 +51,6 @@ struct taos_data {
 /* TAOS TSL2550 EVM */
 static struct i2c_board_info tsl2550_info = {
        I2C_BOARD_INFO("tsl2550", 0x39),
-       .type   = "tsl2550",
 };
 
 /* Instantiate i2c devices based on the adapter name */
@@ -59,7 +58,7 @@ static struct i2c_client *taos_instantiate_device(struct i2c_adapter *adapter)
 {
        if (!strncmp(adapter->name, "TAOS TSL2550 EVM", 16)) {
                dev_info(&adapter->dev, "Instantiating device %s at 0x%02x\n",
-                       tsl2550_info.driver_name, tsl2550_info.addr);
+                       tsl2550_info.type, tsl2550_info.addr);
                return i2c_new_device(adapter, &tsl2550_info);
        }
 
index 9e94542c18a2bc57abd22d05c17c0b04955fe7c3..23be4d42cb02c48da998864f3d13da047e75092b 100644 (file)
@@ -200,7 +200,8 @@ static struct bin_attribute ds1682_eeprom_attr = {
 /*
  * Called when a ds1682 device is matched with this driver
  */
-static int ds1682_probe(struct i2c_client *client)
+static int ds1682_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
        int rc;
 
@@ -234,12 +235,19 @@ static int ds1682_remove(struct i2c_client *client)
        return 0;
 }
 
+static const struct i2c_device_id ds1682_id[] = {
+       { "ds1682", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, ds1682_id);
+
 static struct i2c_driver ds1682_driver = {
        .driver = {
                .name = "ds1682",
        },
        .probe = ds1682_probe,
        .remove = ds1682_remove,
+       .id_table = ds1682_id,
 };
 
 static int __init ds1682_init(void)
index 2dea0123a958577ad87a3095c159edf177d7050e..b36db1797c11138982171b421c45a325bab01a62 100644 (file)
@@ -1149,7 +1149,8 @@ static inline void menelaus_rtc_init(struct menelaus_chip *m)
 
 static struct i2c_driver menelaus_i2c_driver;
 
-static int menelaus_probe(struct i2c_client *client)
+static int menelaus_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
 {
        struct menelaus_chip    *menelaus;
        int                     rev = 0, val;
@@ -1242,12 +1243,19 @@ static int __exit menelaus_remove(struct i2c_client *client)
        return 0;
 }
 
+static const struct i2c_device_id menelaus_id[] = {
+       { "menelaus", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, menelaus_id);
+
 static struct i2c_driver menelaus_i2c_driver = {
        .driver = {
                .name           = DRIVER_NAME,
        },
        .probe          = menelaus_probe,
        .remove         = __exit_p(menelaus_remove),
+       .id_table       = menelaus_id,
 };
 
 static int __init menelaus_init(void)
index b67f69c2e7f3850c9a80cf405dc78a3b29505d93..85949685191bb3218e75cb5547aab3750b7e291d 100644 (file)
@@ -64,7 +64,6 @@ static struct i2c_driver tps65010_driver;
  * as part of board setup by a bootloader.
  */
 enum tps_model {
-       TPS_UNKNOWN = 0,
        TPS65010,
        TPS65011,
        TPS65012,
@@ -527,11 +526,13 @@ static int __exit tps65010_remove(struct i2c_client *client)
        flush_scheduled_work();
        debugfs_remove(tps->file);
        kfree(tps);
+       i2c_set_clientdata(client, NULL);
        the_tps = NULL;
        return 0;
 }
 
-static int tps65010_probe(struct i2c_client *client)
+static int tps65010_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
 {
        struct tps65010         *tps;
        int                     status;
@@ -552,20 +553,7 @@ static int tps65010_probe(struct i2c_client *client)
        mutex_init(&tps->lock);
        INIT_DELAYED_WORK(&tps->work, tps65010_work);
        tps->client = client;
-
-       if (strcmp(client->name, "tps65010") == 0)
-               tps->model = TPS65010;
-       else if (strcmp(client->name, "tps65011") == 0)
-               tps->model = TPS65011;
-       else if (strcmp(client->name, "tps65012") == 0)
-               tps->model = TPS65012;
-       else if (strcmp(client->name, "tps65013") == 0)
-               tps->model = TPS65013;
-       else {
-               dev_warn(&client->dev, "unknown chip '%s'\n", client->name);
-               status = -ENODEV;
-               goto fail1;
-       }
+       tps->model = id->driver_data;
 
        /* the IRQ is active low, but many gpio lines can't support that
         * so this driver uses falling-edge triggers instead.
@@ -594,9 +582,6 @@ static int tps65010_probe(struct i2c_client *client)
        case TPS65012:
                tps->por = 1;
                break;
-       case TPS_UNKNOWN:
-               printk(KERN_WARNING "%s: unknown TPS chip\n", DRIVER_NAME);
-               break;
        /* else CHGCONFIG.POR is replaced by AUA, enabling a WAIT mode */
        }
        tps->chgconf = i2c_smbus_read_byte_data(client, TPS_CHGCONFIG);
@@ -615,6 +600,7 @@ static int tps65010_probe(struct i2c_client *client)
                i2c_smbus_read_byte_data(client, TPS_DEFGPIO),
                i2c_smbus_read_byte_data(client, TPS_MASK3));
 
+       i2c_set_clientdata(client, tps);
        the_tps = tps;
 
 #if    defined(CONFIG_USB_GADGET) && !defined(CONFIG_USB_OTG)
@@ -682,12 +668,22 @@ fail1:
        return status;
 }
 
+static const struct i2c_device_id tps65010_id[] = {
+       { "tps65010", TPS65010 },
+       { "tps65011", TPS65011 },
+       { "tps65012", TPS65012 },
+       { "tps65013", TPS65013 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tps65010_id);
+
 static struct i2c_driver tps65010_driver = {
        .driver = {
                .name   = "tps65010",
        },
        .probe  = tps65010_probe,
        .remove = __exit_p(tps65010_remove),
+       .id_table = tps65010_id,
 };
 
 /*-------------------------------------------------------------------------*/
index a10fd2791a69ab938eb214bd2376a02c76749959..1a9cc135219f4a5ea613c8da9819676cf19da0f4 100644 (file)
@@ -364,7 +364,8 @@ static int tsl2550_init_client(struct i2c_client *client)
  */
 
 static struct i2c_driver tsl2550_driver;
-static int __devinit tsl2550_probe(struct i2c_client *client)
+static int __devinit tsl2550_probe(struct i2c_client *client,
+                                  const struct i2c_device_id *id)
 {
        struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
        struct tsl2550_data *data;
@@ -451,6 +452,12 @@ static int tsl2550_resume(struct i2c_client *client)
 
 #endif /* CONFIG_PM */
 
+static const struct i2c_device_id tsl2550_id[] = {
+       { "tsl2550", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, tsl2550_id);
+
 static struct i2c_driver tsl2550_driver = {
        .driver = {
                .name   = TSL2550_DRV_NAME,
@@ -460,6 +467,7 @@ static struct i2c_driver tsl2550_driver = {
        .resume = tsl2550_resume,
        .probe  = tsl2550_probe,
        .remove = __devexit_p(tsl2550_remove),
+       .id_table = tsl2550_id,
 };
 
 static int __init tsl2550_init(void)
index 6c7fa8d53c0eeaeccfec0366302cdc37717efecb..26384daccb968180c369207e70f35e0a3598e04d 100644 (file)
@@ -48,6 +48,17 @@ static DEFINE_IDR(i2c_adapter_idr);
 
 /* ------------------------------------------------------------------------- */
 
+static const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+                                               const struct i2c_client *client)
+{
+       while (id->name[0]) {
+               if (strcmp(client->name, id->name) == 0)
+                       return id;
+               id++;
+       }
+       return NULL;
+}
+
 static int i2c_device_match(struct device *dev, struct device_driver *drv)
 {
        struct i2c_client       *client = to_i2c_client(dev);
@@ -59,6 +70,10 @@ static int i2c_device_match(struct device *dev, struct device_driver *drv)
        if (!is_newstyle_driver(driver))
                return 0;
 
+       /* match on an id table if there is one */
+       if (driver->id_table)
+               return i2c_match_id(driver->id_table, client) != NULL;
+
        /* new style drivers use the same kind of driver matching policy
         * as platform devices or SPI:  compare device and driver IDs.
         */
@@ -73,11 +88,17 @@ static int i2c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
        struct i2c_client       *client = to_i2c_client(dev);
 
        /* by definition, legacy drivers can't hotplug */
-       if (dev->driver || !client->driver_name)
+       if (dev->driver)
                return 0;
 
-       if (add_uevent_var(env, "MODALIAS=%s", client->driver_name))
-               return -ENOMEM;
+       if (client->driver_name[0]) {
+               if (add_uevent_var(env, "MODALIAS=%s", client->driver_name))
+                       return -ENOMEM;
+       } else {
+               if (add_uevent_var(env, "MODALIAS=%s%s",
+                                  I2C_MODULE_PREFIX, client->name))
+                       return -ENOMEM;
+       }
        dev_dbg(dev, "uevent\n");
        return 0;
 }
@@ -90,13 +111,19 @@ static int i2c_device_probe(struct device *dev)
 {
        struct i2c_client       *client = to_i2c_client(dev);
        struct i2c_driver       *driver = to_i2c_driver(dev->driver);
+       const struct i2c_device_id *id;
        int status;
 
        if (!driver->probe)
                return -ENODEV;
        client->driver = driver;
        dev_dbg(dev, "probe\n");
-       status = driver->probe(client);
+
+       if (driver->id_table)
+               id = i2c_match_id(driver->id_table, client);
+       else
+               id = NULL;
+       status = driver->probe(client, id);
        if (status)
                client->driver = NULL;
        return status;
@@ -179,9 +206,9 @@ static ssize_t show_client_name(struct device *dev, struct device_attribute *att
 static ssize_t show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       return client->driver_name
+       return client->driver_name[0]
                ? sprintf(buf, "%s\n", client->driver_name)
-               : 0;
+               : sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
 }
 
 static struct device_attribute i2c_dev_attrs[] = {
@@ -300,15 +327,21 @@ void i2c_unregister_device(struct i2c_client *client)
 EXPORT_SYMBOL_GPL(i2c_unregister_device);
 
 
-static int dummy_nop(struct i2c_client *client)
+static int dummy_probe(struct i2c_client *client,
+                      const struct i2c_device_id *id)
+{
+       return 0;
+}
+
+static int dummy_remove(struct i2c_client *client)
 {
        return 0;
 }
 
 static struct i2c_driver dummy_driver = {
        .driver.name    = "dummy",
-       .probe          = dummy_nop,
-       .remove         = dummy_nop,
+       .probe          = dummy_probe,
+       .remove         = dummy_remove,
 };
 
 /**
index 3f9e10001e19179706f219a9603f27261c80eb99..f702f9152ce6992ba91ef2702133f9740d432f54 100644 (file)
@@ -862,40 +862,6 @@ config BLK_DEV_IDE_BAST
          Say Y here if you want to support the onboard IDE channels on the
          Simtec BAST or the Thorcom VR1000
 
-config ETRAX_IDE
-       tristate "ETRAX IDE support"
-       depends on CRIS && BROKEN
-       select BLK_DEV_IDEDMA
-       help
-         Enables the ETRAX IDE driver.
-
-         You can't use parallel ports or SCSI ports at the same time.
-
-config ETRAX_IDE_DELAY
-       int "Delay for drives to regain consciousness"
-       depends on ETRAX_IDE && ETRAX_ARCH_V10
-       default 15
-       help
-         Number of seconds to wait for IDE drives to spin up after an IDE
-         reset.
-
-choice
-       prompt "IDE reset pin"
-       depends on ETRAX_IDE && ETRAX_ARCH_V10
-       default ETRAX_IDE_PB7_RESET
-
-config ETRAX_IDE_PB7_RESET
-       bool "Port_PB_Bit_7"
-       help
-         IDE reset on pin 7 on port B
-
-config ETRAX_IDE_G27_RESET
-       bool "Port_G_Bit_27"
-       help
-         IDE reset on pin 27 on port G
-
-endchoice
-
 config IDE_H8300
        tristate "H8300 IDE support"
        depends on H8300
index 571544c37bb26a3fbc47bcf6a11e9fd2e376956a..f94b679b611e127a92d924d925d172e213c0c43d 100644 (file)
@@ -35,7 +35,7 @@ ifeq ($(CONFIG_BLK_DEV_CMD640), y)
        obj-y += cmd640-core.o
 endif
 
-obj-$(CONFIG_BLK_DEV_IDE)              += cris/ ppc/
+obj-$(CONFIG_BLK_DEV_IDE)              += ppc/
 obj-$(CONFIG_IDE_H8300)                        += h8300/
 obj-$(CONFIG_IDE_GENERIC)              += ide-generic.o
 obj-$(CONFIG_BLK_DEV_IDEPNP)           += ide-pnp.o
index a80b9574865e18f03f5d8292133237ed52711bf0..713cef20622e986b3ff0e183861c9c0a8154c697 100644 (file)
@@ -35,12 +35,12 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
        base += BAST_IDE_CS;
        aux  += BAST_IDE_CS;
 
-       for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
-               hw.io_ports[i] = (unsigned long)base;
+       for (i = 0; i <= 7; i++) {
+               hw.io_ports_array[i] = (unsigned long)base;
                base += 0x20;
        }
 
-       hw.io_ports[IDE_CONTROL_OFFSET] = aux + (6 * 0x20);
+       hw.io_ports.ctl_addr = aux + (6 * 0x20);
        hw.irq = irq;
 
        hwif = ide_find_port();
@@ -49,14 +49,9 @@ static int __init bastide_register(unsigned int base, unsigned int aux, int irq)
 
        i = hwif->index;
 
-       if (hwif->present)
-               ide_unregister(i);
-       else
-               ide_init_port_data(hwif, i);
-
+       ide_init_port_data(hwif, i);
        ide_init_port_hw(hwif, &hw);
-       hwif->mmio = 1;
-       hwif->quirkproc = NULL;
+       hwif->port_ops = NULL;
 
        idx[0] = i;
 
index fd12bbe93f113db5926f033e83f16ba6173a7a1c..061456914ca352469ca644df2e83e311ac92d9fb 100644 (file)
@@ -191,6 +191,10 @@ static void icside_maskproc(ide_drive_t *drive, int mask)
        local_irq_restore(flags);
 }
 
+static const struct ide_port_ops icside_v6_no_dma_port_ops = {
+       .maskproc               = icside_maskproc,
+};
+
 #ifdef CONFIG_BLK_DEV_IDEDMA_ICS
 /*
  * SG-DMA support.
@@ -266,6 +270,11 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
                ide_xfer_verbose(xfer_mode), 2000 / drive->drive_data);
 }
 
+static const struct ide_port_ops icside_v6_port_ops = {
+       .set_dma_mode           = icside_set_dma_mode,
+       .maskproc               = icside_maskproc,
+};
+
 static void icside_dma_host_set(ide_drive_t *drive, int on)
 {
 }
@@ -375,25 +384,33 @@ static void icside_dma_lost_irq(ide_drive_t *drive)
        printk(KERN_ERR "%s: IRQ lost\n", drive->name);
 }
 
-static void icside_dma_init(ide_hwif_t *hwif)
+static int icside_dma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
 {
        hwif->dmatable_cpu      = NULL;
        hwif->dmatable_dma      = 0;
-       hwif->set_dma_mode      = icside_set_dma_mode;
-
-       hwif->dma_host_set      = icside_dma_host_set;
-       hwif->dma_setup         = icside_dma_setup;
-       hwif->dma_exec_cmd      = icside_dma_exec_cmd;
-       hwif->dma_start         = icside_dma_start;
-       hwif->ide_dma_end       = icside_dma_end;
-       hwif->ide_dma_test_irq  = icside_dma_test_irq;
-       hwif->dma_timeout       = icside_dma_timeout;
-       hwif->dma_lost_irq      = icside_dma_lost_irq;
+
+       return 0;
 }
+
+static const struct ide_dma_ops icside_v6_dma_ops = {
+       .dma_host_set           = icside_dma_host_set,
+       .dma_setup              = icside_dma_setup,
+       .dma_exec_cmd           = icside_dma_exec_cmd,
+       .dma_start              = icside_dma_start,
+       .dma_end                = icside_dma_end,
+       .dma_test_irq           = icside_dma_test_irq,
+       .dma_timeout            = icside_dma_timeout,
+       .dma_lost_irq           = icside_dma_lost_irq,
+};
 #else
-#define icside_dma_init(hwif)  (0)
+#define icside_v6_dma_ops NULL
 #endif
 
+static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+       return -EOPNOTSUPP;
+}
+
 static ide_hwif_t *
 icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *ec)
 {
@@ -402,21 +419,22 @@ icside_setup(void __iomem *base, struct cardinfo *info, struct expansion_card *e
 
        hwif = ide_find_port();
        if (hwif) {
-               int i;
-
                /*
                 * Ensure we're using MMIO
                 */
                default_hwif_mmiops(hwif);
-               hwif->mmio = 1;
 
-               for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
-                       hwif->io_ports[i] = port;
-                       port += 1 << info->stepping;
-               }
-               hwif->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)base + info->ctrloffset;
+               hwif->io_ports.data_addr = port;
+               hwif->io_ports.error_addr = port + (1 << info->stepping);
+               hwif->io_ports.nsect_addr = port + (2 << info->stepping);
+               hwif->io_ports.lbal_addr = port + (3 << info->stepping);
+               hwif->io_ports.lbam_addr = port + (4 << info->stepping);
+               hwif->io_ports.lbah_addr = port + (5 << info->stepping);
+               hwif->io_ports.device_addr = port + (6 << info->stepping);
+               hwif->io_ports.status_addr = port + (7 << info->stepping);
+               hwif->io_ports.ctl_addr =
+                       (unsigned long)base + info->ctrloffset;
                hwif->irq     = ec->irq;
-               hwif->noprobe = 0;
                hwif->chipset = ide_acorn;
                hwif->gendev.parent = &ec->dev;
                hwif->dev = &ec->dev;
@@ -462,9 +480,10 @@ icside_register_v5(struct icside_state *state, struct expansion_card *ec)
 }
 
 static const struct ide_port_info icside_v6_port_info __initdata = {
-       .host_flags             = IDE_HFLAG_SERIALIZE |
-                                 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
-                                 IDE_HFLAG_NO_AUTOTUNE,
+       .init_dma               = icside_dma_off_init,
+       .port_ops               = &icside_v6_no_dma_port_ops,
+       .dma_ops                = &icside_v6_dma_ops,
+       .host_flags             = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
        .mwdma_mask             = ATA_MWDMA2,
        .swdma_mask             = ATA_SWDMA2,
 };
@@ -526,21 +545,19 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
        state->hwif[0]    = hwif;
        state->hwif[1]    = mate;
 
-       hwif->maskproc    = icside_maskproc;
        hwif->hwif_data   = state;
        hwif->config_data = (unsigned long)ioc_base;
        hwif->select_data = sel;
 
-       mate->maskproc    = icside_maskproc;
        mate->hwif_data   = state;
        mate->config_data = (unsigned long)ioc_base;
        mate->select_data = sel | 1;
 
        if (ec->dma != NO_DMA && !request_dma(ec->dma, hwif->name)) {
-               icside_dma_init(hwif);
-               icside_dma_init(mate);
-       } else
-               d.mwdma_mask = d.swdma_mask = 0;
+               d.init_dma = icside_dma_init;
+               d.port_ops = &icside_v6_port_ops;
+               d.dma_ops = NULL;
+       }
 
        idx[0] = hwif->index;
        idx[1] = mate->index;
index 82643df7c49aff546d487fd81067b180ea153441..4263ffd4ab20ba10839466132442fac0a1eb3ad9 100644 (file)
@@ -14,6 +14,8 @@
 #include <asm/mach-types.h>
 #include <asm/irq.h>
 
+#define DRV_NAME "ide_arm"
+
 #ifdef CONFIG_ARCH_CLPS7500
 # include <asm/arch/hardware.h>
 #
@@ -28,10 +30,24 @@ static int __init ide_arm_init(void)
 {
        ide_hwif_t *hwif;
        hw_regs_t hw;
+       unsigned long base = IDE_ARM_IO, ctl = IDE_ARM_IO + 0x206;
        u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
 
+       if (!request_region(base, 8, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+                               DRV_NAME, base, base + 7);
+               return -EBUSY;
+       }
+
+       if (!request_region(ctl, 1, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+                               DRV_NAME, ctl);
+               release_region(base, 8);
+               return -EBUSY;
+       }
+
        memset(&hw, 0, sizeof(hw));
-       ide_std_init_ports(&hw, IDE_ARM_IO, IDE_ARM_IO + 0x206);
+       ide_std_init_ports(&hw, base, ctl);
        hw.irq = IDE_ARM_IRQ;
 
        hwif = ide_find_port();
index 666df779a5f46a119c1c2ff875eca5fb3d58e610..96378ebfb31fc70e4e4e81b0ef65fa08d454dae2 100644 (file)
@@ -317,17 +317,32 @@ static u8 __devinit palm_bk3710_cable_detect(ide_hwif_t *hwif)
        return ATA_CBL_PATA80;
 }
 
-static void __devinit palm_bk3710_init_hwif(ide_hwif_t *hwif)
+static int __devinit palm_bk3710_init_dma(ide_hwif_t *hwif,
+                                         const struct ide_port_info *d)
 {
-       hwif->set_pio_mode = palm_bk3710_set_pio_mode;
-       hwif->set_dma_mode = palm_bk3710_set_dma_mode;
+       unsigned long base =
+               hwif->io_ports.data_addr - IDE_PALM_ATA_PRI_REG_OFFSET;
 
-       hwif->cable_detect = palm_bk3710_cable_detect;
+       printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);
+
+       if (ide_allocate_dma_engine(hwif))
+               return -1;
+
+       ide_setup_dma(hwif, base);
+
+       return 0;
 }
 
+static const struct ide_port_ops palm_bk3710_ports_ops = {
+       .set_pio_mode           = palm_bk3710_set_pio_mode,
+       .set_dma_mode           = palm_bk3710_set_dma_mode,
+       .cable_detect           = palm_bk3710_cable_detect,
+};
+
 static const struct ide_port_info __devinitdata palm_bk3710_port_info = {
-       .init_hwif              = palm_bk3710_init_hwif,
-       .host_flags             = IDE_HFLAG_NO_DMA, /* hack (no PCI) */
+       .init_dma               = palm_bk3710_init_dma,
+       .port_ops               = &palm_bk3710_ports_ops,
+       .host_flags             = IDE_HFLAG_MMIO,
        .pio_mask               = ATA_PIO4,
        .udma_mask              = ATA_UDMA4,    /* (input clk 99MHz) */
        .mwdma_mask             = ATA_MWDMA2,
@@ -372,8 +387,8 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
 
        pribase = mem->start + IDE_PALM_ATA_PRI_REG_OFFSET;
        for (i = 0; i < IDE_NR_PORTS - 2; i++)
-               hw.io_ports[i] = pribase + i;
-       hw.io_ports[IDE_CONTROL_OFFSET] = mem->start +
+               hw.io_ports_array[i] = pribase + i;
+       hw.io_ports.ctl_addr = mem->start +
                        IDE_PALM_ATA_PRI_CTL_OFFSET;
        hw.irq = irq->start;
        hw.chipset = ide_palm3710;
@@ -384,18 +399,12 @@ static int __devinit palm_bk3710_probe(struct platform_device *pdev)
 
        i = hwif->index;
 
-       if (hwif->present)
-               ide_unregister(i);
-       else
-               ide_init_port_data(hwif, i);
-
+       ide_init_port_data(hwif, i);
        ide_init_port_hw(hwif, &hw);
 
        hwif->mmio = 1;
        default_hwif_mmiops(hwif);
 
-       ide_setup_dma(hwif, mem->start);
-
        idx[0] = i;
 
        ide_device_add(idx, &palm_bk3710_port_info);
index 2c3d0ec91dc3d5053abdcb252be75695747ff1f4..1747b23587758f93c1fc44f69b264f8198ad6def 100644 (file)
@@ -17,11 +17,11 @@ static void rapide_setup_ports(hw_regs_t *hw, void __iomem *base,
        unsigned long port = (unsigned long)base;
        int i;
 
-       for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
-               hw->io_ports[i] = port;
+       for (i = 0; i <= 7; i++) {
+               hw->io_ports_array[i] = port;
                port += sz;
        }
-       hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+       hw->io_ports.ctl_addr = (unsigned long)ctrl;
        hw->irq = irq;
 }
 
@@ -53,7 +53,7 @@ rapide_probe(struct expansion_card *ec, const struct ecard_id *id)
 
                ide_init_port_hw(hwif, &hw);
 
-               hwif->mmio = 1;
+               hwif->host_flags = IDE_HFLAG_MMIO;
                default_hwif_mmiops(hwif);
 
                idx[0] = hwif->index;
@@ -76,7 +76,7 @@ static void __devexit rapide_remove(struct expansion_card *ec)
 
        ecard_set_drvdata(ec, NULL);
 
-       ide_unregister(hwif->index);
+       ide_unregister(hwif);
 
        ecard_release_resources(ec);
 }
diff --git a/drivers/ide/cris/Makefile b/drivers/ide/cris/Makefile
deleted file mode 100644 (file)
index 20b9596..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-EXTRA_CFLAGS                           += -Idrivers/ide
-
-obj-$(CONFIG_IDE_ETRAX)                        += ide-cris.o
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
deleted file mode 100644 (file)
index 790a775..0000000
+++ /dev/null
@@ -1,1081 +0,0 @@
-/*
- * Etrax specific IDE functions, like init and PIO-mode setting etc.
- * Almost the entire ide.c is used for the rest of the Etrax ATA driver.
- * Copyright (c) 2000-2005 Axis Communications AB
- *
- * Authors:    Bjorn Wesen        (initial version)
- *             Mikael Starvik     (crisv32 port)
- */
-
-/* Regarding DMA:
- *
- * There are two forms of DMA - "DMA handshaking" between the interface and the drive,
- * and DMA between the memory and the interface. We can ALWAYS use the latter, since it's
- * something built-in in the Etrax. However only some drives support the DMA-mode handshaking
- * on the ATA-bus. The normal PC driver and Triton interface disables memory-if DMA when the
- * device can't do DMA handshaking for some stupid reason. We don't need to do that.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/hdreg.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#include <asm/io.h>
-#include <asm/dma.h>
-
-/* number of DMA descriptors */
-#define MAX_DMA_DESCRS 64
-
-/* number of times to retry busy-flags when reading/writing IDE-registers
- * this can't be too high because a hung harddisk might cause the watchdog
- * to trigger (sometimes INB and OUTB are called with irq's disabled)
- */
-
-#define IDE_REGISTER_TIMEOUT 300
-
-#define LOWDB(x)
-#define D(x)
-
-enum /* Transfer types */
-{
-       TYPE_PIO,
-       TYPE_DMA,
-       TYPE_UDMA
-};
-
-/* CRISv32 specifics */
-#ifdef CONFIG_ETRAX_ARCH_V32
-#include <asm/arch/hwregs/ata_defs.h>
-#include <asm/arch/hwregs/dma_defs.h>
-#include <asm/arch/hwregs/dma.h>
-#include <asm/arch/pinmux.h>
-
-#define ATA_UDMA2_CYC    2
-#define ATA_UDMA2_DVS    3
-#define ATA_UDMA1_CYC    2
-#define ATA_UDMA1_DVS    4
-#define ATA_UDMA0_CYC    4
-#define ATA_UDMA0_DVS    6
-#define ATA_DMA2_STROBE  7
-#define ATA_DMA2_HOLD    1
-#define ATA_DMA1_STROBE  8
-#define ATA_DMA1_HOLD    3
-#define ATA_DMA0_STROBE 25
-#define ATA_DMA0_HOLD   19
-#define ATA_PIO4_SETUP   3
-#define ATA_PIO4_STROBE  7
-#define ATA_PIO4_HOLD    1
-#define ATA_PIO3_SETUP   3
-#define ATA_PIO3_STROBE  9
-#define ATA_PIO3_HOLD    3
-#define ATA_PIO2_SETUP   3
-#define ATA_PIO2_STROBE 13
-#define ATA_PIO2_HOLD    5
-#define ATA_PIO1_SETUP   5
-#define ATA_PIO1_STROBE 23
-#define ATA_PIO1_HOLD    9
-#define ATA_PIO0_SETUP   9
-#define ATA_PIO0_STROBE 39
-#define ATA_PIO0_HOLD    9
-
-int
-cris_ide_ack_intr(ide_hwif_t* hwif)
-{
-       reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2,
-                                int, hwif->io_ports[0]);
-       REG_WR_INT(ata, regi_ata, rw_ack_intr, 1 << ctrl2.sel);
-       return 1;
-}
-
-static inline int
-cris_ide_busy(void)
-{
-       reg_ata_rs_stat_data stat_data;
-       stat_data = REG_RD(ata, regi_ata, rs_stat_data);
-       return stat_data.busy;
-}
-
-static inline int
-cris_ide_ready(void)
-{
-       return !cris_ide_busy();
-}
-
-static inline int
-cris_ide_data_available(unsigned short* data)
-{
-       reg_ata_rs_stat_data stat_data;
-       stat_data = REG_RD(ata, regi_ata, rs_stat_data);
-       *data = stat_data.data;
-       return stat_data.dav;
-}
-
-static void
-cris_ide_write_command(unsigned long command)
-{
-       REG_WR_INT(ata, regi_ata, rw_ctrl2, command); /* write data to the drive's register */
-}
-
-static void
-cris_ide_set_speed(int type, int setup, int strobe, int hold)
-{
-       reg_ata_rw_ctrl0 ctrl0 = REG_RD(ata, regi_ata, rw_ctrl0);
-       reg_ata_rw_ctrl1 ctrl1 = REG_RD(ata, regi_ata, rw_ctrl1);
-
-       if (type == TYPE_PIO) {
-               ctrl0.pio_setup = setup;
-               ctrl0.pio_strb = strobe;
-               ctrl0.pio_hold = hold;
-       } else if (type == TYPE_DMA) {
-               ctrl0.dma_strb = strobe;
-               ctrl0.dma_hold = hold;
-       } else if (type == TYPE_UDMA) {
-               ctrl1.udma_tcyc = setup;
-               ctrl1.udma_tdvs = strobe;
-       }
-       REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
-       REG_WR(ata, regi_ata, rw_ctrl1, ctrl1);
-}
-
-static unsigned long
-cris_ide_base_address(int bus)
-{
-       reg_ata_rw_ctrl2 ctrl2 = {0};
-       ctrl2.sel = bus;
-       return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
-}
-
-static unsigned long
-cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
-{
-       reg_ata_rw_ctrl2 ctrl2 = {0};
-       ctrl2.addr = addr;
-       ctrl2.cs1 = cs1;
-       ctrl2.cs0 = cs0;
-       return REG_TYPE_CONV(int, reg_ata_rw_ctrl2, ctrl2);
-}
-
-static __init void
-cris_ide_reset(unsigned val)
-{
-       reg_ata_rw_ctrl0 ctrl0 = {0};
-       ctrl0.rst = val ? regk_ata_active : regk_ata_inactive;
-       REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
-}
-
-static __init void
-cris_ide_init(void)
-{
-       reg_ata_rw_ctrl0 ctrl0 = {0};
-       reg_ata_rw_intr_mask intr_mask = {0};
-
-       ctrl0.en = regk_ata_yes;
-       REG_WR(ata, regi_ata, rw_ctrl0, ctrl0);
-
-       intr_mask.bus0 = regk_ata_yes;
-       intr_mask.bus1 = regk_ata_yes;
-       intr_mask.bus2 = regk_ata_yes;
-       intr_mask.bus3 = regk_ata_yes;
-
-       REG_WR(ata, regi_ata, rw_intr_mask, intr_mask);
-
-       crisv32_request_dma(2, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
-       crisv32_request_dma(3, "ETRAX FS built-in ATA", DMA_VERBOSE_ON_ERROR, 0, dma_ata);
-
-       crisv32_pinmux_alloc_fixed(pinmux_ata);
-       crisv32_pinmux_alloc_fixed(pinmux_ata0);
-       crisv32_pinmux_alloc_fixed(pinmux_ata1);
-       crisv32_pinmux_alloc_fixed(pinmux_ata2);
-       crisv32_pinmux_alloc_fixed(pinmux_ata3);
-
-       DMA_RESET(regi_dma2);
-       DMA_ENABLE(regi_dma2);
-       DMA_RESET(regi_dma3);
-       DMA_ENABLE(regi_dma3);
-
-       DMA_WR_CMD (regi_dma2, regk_dma_set_w_size2);
-       DMA_WR_CMD (regi_dma3, regk_dma_set_w_size2);
-}
-
-static dma_descr_context mycontext __attribute__ ((__aligned__(32)));
-
-#define cris_dma_descr_type dma_descr_data
-#define cris_pio_read regk_ata_rd
-#define cris_ultra_mask 0x7
-#define MAX_DESCR_SIZE 0xffffffffUL
-
-static unsigned long
-cris_ide_get_reg(unsigned long reg)
-{
-       return (reg & 0x0e000000) >> 25;
-}
-
-static void
-cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
-{
-       d->buf = (char*)virt_to_phys(buf);
-       d->after = d->buf + len;
-       d->eol = last;
-}
-
-static void
-cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir,int type,int len)
-{
-       ide_hwif_t *hwif = drive->hwif;
-
-       reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
-                                              hwif->io_ports[IDE_DATA_OFFSET]);
-       reg_ata_rw_trf_cnt trf_cnt = {0};
-
-       mycontext.saved_data = (dma_descr_data*)virt_to_phys(d);
-       mycontext.saved_data_buf = d->buf;
-       /* start the dma channel */
-       DMA_START_CONTEXT(dir ? regi_dma3 : regi_dma2, virt_to_phys(&mycontext));
-
-       /* initiate a multi word dma read using PIO handshaking */
-       trf_cnt.cnt = len >> 1;
-       /* Due to a "feature" the transfer count has to be one extra word for UDMA. */
-       if (type == TYPE_UDMA)
-               trf_cnt.cnt++;
-       REG_WR(ata, regi_ata, rw_trf_cnt, trf_cnt);
-
-       ctrl2.rw = dir ? regk_ata_rd : regk_ata_wr;
-       ctrl2.trf_mode = regk_ata_dma;
-       ctrl2.hsh = type == TYPE_PIO ? regk_ata_pio :
-                   type == TYPE_DMA ? regk_ata_dma : regk_ata_udma;
-       ctrl2.multi = regk_ata_yes;
-       ctrl2.dma_size = regk_ata_word;
-       REG_WR(ata, regi_ata, rw_ctrl2, ctrl2);
-}
-
-static void
-cris_ide_wait_dma(int dir)
-{
-       reg_dma_rw_stat status;
-       do
-       {
-               status = REG_RD(dma, dir ? regi_dma3 : regi_dma2, rw_stat);
-       } while(status.list_state != regk_dma_data_at_eol);
-}
-
-static int cris_dma_test_irq(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       int intr = REG_RD_INT(ata, regi_ata, r_intr);
-
-       reg_ata_rw_ctrl2 ctrl2 = REG_TYPE_CONV(reg_ata_rw_ctrl2, int,
-                                              hwif->io_ports[IDE_DATA_OFFSET]);
-
-       return intr & (1 << ctrl2.sel) ? 1 : 0;
-}
-
-static void cris_ide_initialize_dma(int dir)
-{
-}
-
-#else
-/* CRISv10 specifics */
-#include <asm/arch/svinto.h>
-#include <asm/arch/io_interface_mux.h>
-
-/* PIO timing (in R_ATA_CONFIG)
- *
- *                        _____________________________
- * ADDRESS :     ________/
- *
- *                            _______________
- * DIOR    :     ____________/               \__________
- *
- *                               _______________
- * DATA    :     XXXXXXXXXXXXXXXX_______________XXXXXXXX
- *
- *
- * DIOR is unbuffered while address and data is buffered.
- * This creates two problems:
- * 1. The DIOR pulse is to early (because it is unbuffered)
- * 2. The rise time of DIOR is long
- *
- * There are at least three different plausible solutions
- * 1. Use a pad capable of larger currents in Etrax
- * 2. Use an external buffer
- * 3. Make the strobe pulse longer
- *
- * Some of the strobe timings below are modified to compensate
- * for this. This implies a slight performance decrease.
- *
- * THIS SHOULD NEVER BE CHANGED!
- *
- * TODO: Is this true for the latest LX boards still ?
- */
-
-#define ATA_UDMA2_CYC    0 /* No UDMA supported, just to make it compile. */
-#define ATA_UDMA2_DVS    0
-#define ATA_UDMA1_CYC    0
-#define ATA_UDMA1_DVS    0
-#define ATA_UDMA0_CYC    0
-#define ATA_UDMA0_DVS    0
-#define ATA_DMA2_STROBE  4
-#define ATA_DMA2_HOLD    0
-#define ATA_DMA1_STROBE  4
-#define ATA_DMA1_HOLD    1
-#define ATA_DMA0_STROBE 12
-#define ATA_DMA0_HOLD    9
-#define ATA_PIO4_SETUP   1
-#define ATA_PIO4_STROBE  5
-#define ATA_PIO4_HOLD    0
-#define ATA_PIO3_SETUP   1
-#define ATA_PIO3_STROBE  5
-#define ATA_PIO3_HOLD    1
-#define ATA_PIO2_SETUP   1
-#define ATA_PIO2_STROBE  6
-#define ATA_PIO2_HOLD    2
-#define ATA_PIO1_SETUP   2
-#define ATA_PIO1_STROBE 11
-#define ATA_PIO1_HOLD    4
-#define ATA_PIO0_SETUP   4
-#define ATA_PIO0_STROBE 19
-#define ATA_PIO0_HOLD    4
-
-int
-cris_ide_ack_intr(ide_hwif_t* hwif)
-{
-       return 1;
-}
-
-static inline int
-cris_ide_busy(void)
-{
-       return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy) ;
-}
-
-static inline int
-cris_ide_ready(void)
-{
-       return *R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, tr_rdy) ;
-}
-
-static inline int
-cris_ide_data_available(unsigned short* data)
-{
-       unsigned long status = *R_ATA_STATUS_DATA;
-       *data = (unsigned short)status;
-       return status & IO_MASK(R_ATA_STATUS_DATA, dav);
-}
-
-static void
-cris_ide_write_command(unsigned long command)
-{
-       *R_ATA_CTRL_DATA = command;
-}
-
-static void
-cris_ide_set_speed(int type, int setup, int strobe, int hold)
-{
-       static int pio_setup = ATA_PIO4_SETUP;
-       static int pio_strobe = ATA_PIO4_STROBE;
-       static int pio_hold = ATA_PIO4_HOLD;
-       static int dma_strobe = ATA_DMA2_STROBE;
-       static int dma_hold = ATA_DMA2_HOLD;
-
-       if (type == TYPE_PIO) {
-               pio_setup = setup;
-               pio_strobe = strobe;
-               pio_hold = hold;
-       } else if (type == TYPE_DMA) {
-               dma_strobe = strobe;
-         dma_hold = hold;
-       }
-       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ) |
-         IO_FIELD( R_ATA_CONFIG, dma_strobe, dma_strobe ) |
-               IO_FIELD( R_ATA_CONFIG, dma_hold,   dma_hold ) |
-               IO_FIELD( R_ATA_CONFIG, pio_setup,  pio_setup ) |
-               IO_FIELD( R_ATA_CONFIG, pio_strobe, pio_strobe ) |
-               IO_FIELD( R_ATA_CONFIG, pio_hold,   pio_hold ) );
-}
-
-static unsigned long
-cris_ide_base_address(int bus)
-{
-       return IO_FIELD(R_ATA_CTRL_DATA, sel, bus);
-}
-
-static unsigned long
-cris_ide_reg_addr(unsigned long addr, int cs0, int cs1)
-{
-       return IO_FIELD(R_ATA_CTRL_DATA, addr, addr) |
-              IO_FIELD(R_ATA_CTRL_DATA, cs0, cs0) |
-              IO_FIELD(R_ATA_CTRL_DATA, cs1, cs1);
-}
-
-static __init void
-cris_ide_reset(unsigned val)
-{
-#ifdef CONFIG_ETRAX_IDE_G27_RESET
-       REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, 27, val);
-#endif
-#ifdef CONFIG_ETRAX_IDE_PB7_RESET
-       port_pb_dir_shadow = port_pb_dir_shadow |
-               IO_STATE(R_PORT_PB_DIR, dir7, output);
-       *R_PORT_PB_DIR = port_pb_dir_shadow;
-       REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, 7, val);
-#endif
-}
-
-static __init void
-cris_ide_init(void)
-{
-       volatile unsigned int dummy;
-
-       *R_ATA_CTRL_DATA = 0;
-       *R_ATA_TRANSFER_CNT = 0;
-       *R_ATA_CONFIG = 0;
-
-       if (cris_request_io_interface(if_ata, "ETRAX100LX IDE")) {
-               printk(KERN_CRIT "ide: Failed to get IO interface\n");
-               return;
-       } else if (cris_request_dma(ATA_TX_DMA_NBR,
-                                         "ETRAX100LX IDE TX",
-                                         DMA_VERBOSE_ON_ERROR,
-                                         dma_ata)) {
-               cris_free_io_interface(if_ata);
-               printk(KERN_CRIT "ide: Failed to get Tx DMA channel\n");
-               return;
-       } else if (cris_request_dma(ATA_RX_DMA_NBR,
-                                         "ETRAX100LX IDE RX",
-                                         DMA_VERBOSE_ON_ERROR,
-                                         dma_ata)) {
-               cris_free_dma(ATA_TX_DMA_NBR, "ETRAX100LX IDE Tx");
-               cris_free_io_interface(if_ata);
-               printk(KERN_CRIT "ide: Failed to get Rx DMA channel\n");
-               return;
-       }
-
-       /* make a dummy read to set the ata controller in a proper state */
-       dummy = *R_ATA_STATUS_DATA;
-
-       *R_ATA_CONFIG = ( IO_FIELD( R_ATA_CONFIG, enable, 1 ));
-       *R_ATA_CTRL_DATA = ( IO_STATE( R_ATA_CTRL_DATA, rw,   read) |
-                            IO_FIELD( R_ATA_CTRL_DATA, addr, 1   ) );
-
-       while(*R_ATA_STATUS_DATA & IO_MASK(R_ATA_STATUS_DATA, busy)); /* wait for busy flag*/
-
-       *R_IRQ_MASK0_SET = ( IO_STATE( R_IRQ_MASK0_SET, ata_irq0, set ) |
-                            IO_STATE( R_IRQ_MASK0_SET, ata_irq1, set ) |
-                            IO_STATE( R_IRQ_MASK0_SET, ata_irq2, set ) |
-                            IO_STATE( R_IRQ_MASK0_SET, ata_irq3, set ) );
-
-       /* reset the dma channels we will use */
-
-       RESET_DMA(ATA_TX_DMA_NBR);
-       RESET_DMA(ATA_RX_DMA_NBR);
-       WAIT_DMA(ATA_TX_DMA_NBR);
-       WAIT_DMA(ATA_RX_DMA_NBR);
-}
-
-#define cris_dma_descr_type etrax_dma_descr
-#define cris_pio_read IO_STATE(R_ATA_CTRL_DATA, rw, read)
-#define cris_ultra_mask 0x0
-#define MAX_DESCR_SIZE 0x10000UL
-
-static unsigned long
-cris_ide_get_reg(unsigned long reg)
-{
-       return (reg & 0x0e000000) >> 25;
-}
-
-static void
-cris_ide_fill_descriptor(cris_dma_descr_type *d, void* buf, unsigned int len, int last)
-{
-       d->buf = virt_to_phys(buf);
-       d->sw_len = len == MAX_DESCR_SIZE ? 0 : len;
-       if (last)
-               d->ctrl |= d_eol;
-}
-
-static void cris_ide_start_dma(ide_drive_t *drive, cris_dma_descr_type *d, int dir, int type, int len)
-{
-       unsigned long cmd;
-
-       if (dir) {
-               /* need to do this before RX DMA due to a chip bug
-                * it is enough to just flush the part of the cache that
-                * corresponds to the buffers we start, but since HD transfers
-                * usually are more than 8 kB, it is easier to optimize for the
-                * normal case and just flush the entire cache. its the only
-                * way to be sure! (OB movie quote)
-                */
-               flush_etrax_cache();
-               *R_DMA_CH3_FIRST = virt_to_phys(d);
-               *R_DMA_CH3_CMD   = IO_STATE(R_DMA_CH3_CMD, cmd, start);
-
-       } else {
-               *R_DMA_CH2_FIRST = virt_to_phys(d);
-               *R_DMA_CH2_CMD   = IO_STATE(R_DMA_CH2_CMD, cmd, start);
-       }
-
-       /* initiate a multi word dma read using DMA handshaking */
-
-       *R_ATA_TRANSFER_CNT =
-               IO_FIELD(R_ATA_TRANSFER_CNT, count, len >> 1);
-
-       cmd = dir ? IO_STATE(R_ATA_CTRL_DATA, rw, read) : IO_STATE(R_ATA_CTRL_DATA, rw, write);
-       cmd |= type == TYPE_PIO ? IO_STATE(R_ATA_CTRL_DATA, handsh, pio) :
-                                 IO_STATE(R_ATA_CTRL_DATA, handsh, dma);
-       *R_ATA_CTRL_DATA =
-               cmd |
-               IO_FIELD(R_ATA_CTRL_DATA, data,
-                        drive->hwif->io_ports[IDE_DATA_OFFSET]) |
-               IO_STATE(R_ATA_CTRL_DATA, src_dst,  dma)  |
-               IO_STATE(R_ATA_CTRL_DATA, multi,    on)   |
-               IO_STATE(R_ATA_CTRL_DATA, dma_size, word);
-}
-
-static void
-cris_ide_wait_dma(int dir)
-{
-       if (dir)
-               WAIT_DMA(ATA_RX_DMA_NBR);
-       else
-               WAIT_DMA(ATA_TX_DMA_NBR);
-}
-
-static int cris_dma_test_irq(ide_drive_t *drive)
-{
-       int intr = *R_IRQ_MASK0_RD;
-       int bus = IO_EXTRACT(R_ATA_CTRL_DATA, sel,
-                            drive->hwif->io_ports[IDE_DATA_OFFSET]);
-
-       return intr & (1 << (bus + IO_BITNR(R_IRQ_MASK0_RD, ata_irq0))) ? 1 : 0;
-}
-
-
-static void cris_ide_initialize_dma(int dir)
-{
-       if (dir)
-       {
-               RESET_DMA(ATA_RX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
-               WAIT_DMA(ATA_RX_DMA_NBR);
-       }
-       else
-       {
-               RESET_DMA(ATA_TX_DMA_NBR); /* sometimes the DMA channel get stuck so we need to do this */
-               WAIT_DMA(ATA_TX_DMA_NBR);
-       }
-}
-
-#endif
-
-void
-cris_ide_outw(unsigned short data, unsigned long reg) {
-       int timeleft;
-
-       LOWDB(printk("ow: data 0x%x, reg 0x%x\n", data, reg));
-
-       /* note the lack of handling any timeouts. we stop waiting, but we don't
-        * really notify anybody.
-        */
-
-       timeleft = IDE_REGISTER_TIMEOUT;
-       /* wait for busy flag */
-       do {
-               timeleft--;
-       } while(timeleft && cris_ide_busy());
-
-       /*
-        * Fall through at a timeout, so the ongoing command will be
-        * aborted by the write below, which is expected to be a dummy
-        * command to the command register.  This happens when a faulty
-        * drive times out on a command.  See comment on timeout in
-        * INB.
-        */
-       if(!timeleft)
-               printk("ATA timeout reg 0x%lx := 0x%x\n", reg, data);
-
-       cris_ide_write_command(reg|data); /* write data to the drive's register */
-
-       timeleft = IDE_REGISTER_TIMEOUT;
-       /* wait for transmitter ready */
-       do {
-               timeleft--;
-       } while(timeleft && !cris_ide_ready());
-}
-
-void
-cris_ide_outb(unsigned char data, unsigned long reg)
-{
-       cris_ide_outw(data, reg);
-}
-
-void
-cris_ide_outbsync(ide_drive_t *drive, u8 addr, unsigned long port)
-{
-       cris_ide_outw(addr, port);
-}
-
-unsigned short
-cris_ide_inw(unsigned long reg) {
-       int timeleft;
-       unsigned short val;
-
-       timeleft = IDE_REGISTER_TIMEOUT;
-       /* wait for busy flag */
-       do {
-               timeleft--;
-       } while(timeleft && cris_ide_busy());
-
-       if(!timeleft) {
-               /*
-                * If we're asked to read the status register, like for
-                * example when a command does not complete for an
-                * extended time, but the ATA interface is stuck in a
-                * busy state at the *ETRAX* ATA interface level (as has
-                * happened repeatedly with at least one bad disk), then
-                * the best thing to do is to pretend that we read
-                * "busy" in the status register, so the IDE driver will
-                * time-out, abort the ongoing command and perform a
-                * reset sequence.  Note that the subsequent OUT_BYTE
-                * call will also timeout on busy, but as long as the
-                * write is still performed, everything will be fine.
-                */
-               if (cris_ide_get_reg(reg) == IDE_STATUS_OFFSET)
-                       return BUSY_STAT;
-               else
-                       /* For other rare cases we assume 0 is good enough.  */
-                       return 0;
-       }
-
-       cris_ide_write_command(reg | cris_pio_read);
-
-       timeleft = IDE_REGISTER_TIMEOUT;
-       /* wait for available */
-       do {
-               timeleft--;
-       } while(timeleft && !cris_ide_data_available(&val));
-
-       if(!timeleft)
-               return 0;
-
-       LOWDB(printk("inb: 0x%x from reg 0x%x\n", val & 0xff, reg));
-
-       return val;
-}
-
-unsigned char
-cris_ide_inb(unsigned long reg)
-{
-       return (unsigned char)cris_ide_inw(reg);
-}
-
-static int cris_dma_end (ide_drive_t *drive);
-static int cris_dma_setup (ide_drive_t *drive);
-static void cris_dma_exec_cmd (ide_drive_t *drive, u8 command);
-static int cris_dma_test_irq(ide_drive_t *drive);
-static void cris_dma_start(ide_drive_t *drive);
-static void cris_ide_input_data (ide_drive_t *drive, void *, unsigned int);
-static void cris_ide_output_data (ide_drive_t *drive, void *, unsigned int);
-static void cris_atapi_input_bytes(ide_drive_t *drive, void *, unsigned int);
-static void cris_atapi_output_bytes(ide_drive_t *drive, void *, unsigned int);
-
-static void cris_dma_host_set(ide_drive_t *drive, int on)
-{
-}
-
-static void cris_set_pio_mode(ide_drive_t *drive, const u8 pio)
-{
-       int setup, strobe, hold;
-
-       switch(pio)
-       {
-               case 0:
-                       setup = ATA_PIO0_SETUP;
-                       strobe = ATA_PIO0_STROBE;
-                       hold = ATA_PIO0_HOLD;
-                       break;
-               case 1:
-                       setup = ATA_PIO1_SETUP;
-                       strobe = ATA_PIO1_STROBE;
-                       hold = ATA_PIO1_HOLD;
-                       break;
-               case 2:
-                       setup = ATA_PIO2_SETUP;
-                       strobe = ATA_PIO2_STROBE;
-                       hold = ATA_PIO2_HOLD;
-                       break;
-               case 3:
-                       setup = ATA_PIO3_SETUP;
-                       strobe = ATA_PIO3_STROBE;
-                       hold = ATA_PIO3_HOLD;
-                       break;
-               case 4:
-                       setup = ATA_PIO4_SETUP;
-                       strobe = ATA_PIO4_STROBE;
-                       hold = ATA_PIO4_HOLD;
-                       break;
-               default:
-                       return;
-       }
-
-       cris_ide_set_speed(TYPE_PIO, setup, strobe, hold);
-}
-
-static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed)
-{
-       int cyc = 0, dvs = 0, strobe = 0, hold = 0;
-
-       switch(speed)
-       {
-               case XFER_UDMA_0:
-                       cyc = ATA_UDMA0_CYC;
-                       dvs = ATA_UDMA0_DVS;
-                       break;
-               case XFER_UDMA_1:
-                       cyc = ATA_UDMA1_CYC;
-                       dvs = ATA_UDMA1_DVS;
-                       break;
-               case XFER_UDMA_2:
-                       cyc = ATA_UDMA2_CYC;
-                       dvs = ATA_UDMA2_DVS;
-                       break;
-               case XFER_MW_DMA_0:
-                       strobe = ATA_DMA0_STROBE;
-                       hold = ATA_DMA0_HOLD;
-                       break;
-               case XFER_MW_DMA_1:
-                       strobe = ATA_DMA1_STROBE;
-                       hold = ATA_DMA1_HOLD;
-                       break;
-               case XFER_MW_DMA_2:
-                       strobe = ATA_DMA2_STROBE;
-                       hold = ATA_DMA2_HOLD;
-                       break;
-       }
-
-       if (speed >= XFER_UDMA_0)
-               cris_ide_set_speed(TYPE_UDMA, cyc, dvs, 0);
-       else
-               cris_ide_set_speed(TYPE_DMA, 0, strobe, hold);
-}
-
-static void __init cris_setup_ports(hw_regs_t *hw, unsigned long base)
-{
-       int i;
-
-       memset(hw, 0, sizeof(*hw));
-
-       for (i = 0; i <= 7; i++)
-               hw->io_ports[i] = base + cris_ide_reg_addr(i, 0, 1);
-
-       /*
-        * the IDE control register is at ATA address 6,
-        * with CS1 active instead of CS0
-        */
-       hw->io_ports[IDE_CONTROL_OFFSET] = base + cris_ide_reg_addr(6, 1, 0);
-
-       hw->irq = ide_default_irq(0);
-       hw->ack_intr = cris_ide_ack_intr;
-}
-
-static const struct ide_port_info cris_port_info __initdata = {
-       .chipset                = ide_etrax100,
-       .host_flags             = IDE_HFLAG_NO_ATAPI_DMA |
-                                 IDE_HFLAG_NO_DMA, /* no SFF-style DMA */
-       .pio_mask               = ATA_PIO4,
-       .udma_mask              = cris_ultra_mask,
-       .mwdma_mask             = ATA_MWDMA2,
-};
-
-static int __init init_e100_ide(void)
-{
-       hw_regs_t hw;
-       int h;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-
-       printk("ide: ETRAX FS built-in ATA DMA controller\n");
-
-       for (h = 0; h < 4; h++) {
-               ide_hwif_t *hwif = NULL;
-
-               cris_setup_ports(&hw, cris_ide_base_address(h));
-
-               hwif = ide_find_port();
-               if (hwif == NULL)
-                       continue;
-               ide_init_port_data(hwif, hwif->index);
-               ide_init_port_hw(hwif, &hw);
-               hwif->mmio = 1;
-               hwif->set_pio_mode = &cris_set_pio_mode;
-               hwif->set_dma_mode = &cris_set_dma_mode;
-               hwif->ata_input_data = &cris_ide_input_data;
-               hwif->ata_output_data = &cris_ide_output_data;
-               hwif->atapi_input_bytes = &cris_atapi_input_bytes;
-               hwif->atapi_output_bytes = &cris_atapi_output_bytes;
-               hwif->dma_host_set = &cris_dma_host_set;
-               hwif->ide_dma_end = &cris_dma_end;
-               hwif->dma_setup = &cris_dma_setup;
-               hwif->dma_exec_cmd = &cris_dma_exec_cmd;
-               hwif->ide_dma_test_irq = &cris_dma_test_irq;
-               hwif->dma_start = &cris_dma_start;
-               hwif->OUTB = &cris_ide_outb;
-               hwif->OUTW = &cris_ide_outw;
-               hwif->OUTBSYNC = &cris_ide_outbsync;
-               hwif->INB = &cris_ide_inb;
-               hwif->INW = &cris_ide_inw;
-               hwif->cbl = ATA_CBL_PATA40;
-
-               idx[h] = hwif->index;
-       }
-
-       /* Reset pulse */
-       cris_ide_reset(0);
-       udelay(25);
-       cris_ide_reset(1);
-
-       cris_ide_init();
-
-       cris_ide_set_speed(TYPE_PIO, ATA_PIO4_SETUP, ATA_PIO4_STROBE, ATA_PIO4_HOLD);
-       cris_ide_set_speed(TYPE_DMA, 0, ATA_DMA2_STROBE, ATA_DMA2_HOLD);
-       cris_ide_set_speed(TYPE_UDMA, ATA_UDMA2_CYC, ATA_UDMA2_DVS, 0);
-
-       ide_device_add(idx, &cris_port_info);
-
-       return 0;
-}
-
-static cris_dma_descr_type mydescr __attribute__ ((__aligned__(16)));
-
-/*
- * The following routines are mainly used by the ATAPI drivers.
- *
- * These routines will round up any request for an odd number of bytes,
- * so if an odd bytecount is specified, be sure that there's at least one
- * extra byte allocated for the buffer.
- */
-static void
-cris_atapi_input_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
-{
-       D(printk("atapi_input_bytes, buffer 0x%x, count %d\n",
-                buffer, bytecount));
-
-       if(bytecount & 1) {
-               printk("warning, odd bytecount in cdrom_in_bytes = %d.\n", bytecount);
-               bytecount++; /* to round off */
-       }
-
-       /* setup DMA and start transfer */
-
-       cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
-       cris_ide_start_dma(drive, &mydescr, 1, TYPE_PIO, bytecount);
-
-       /* wait for completion */
-       LED_DISK_READ(1);
-       cris_ide_wait_dma(1);
-       LED_DISK_READ(0);
-}
-
-static void
-cris_atapi_output_bytes (ide_drive_t *drive, void *buffer, unsigned int bytecount)
-{
-       D(printk("atapi_output_bytes, buffer 0x%x, count %d\n",
-                buffer, bytecount));
-
-       if(bytecount & 1) {
-               printk("odd bytecount %d in atapi_out_bytes!\n", bytecount);
-               bytecount++;
-       }
-
-       cris_ide_fill_descriptor(&mydescr, buffer, bytecount, 1);
-       cris_ide_start_dma(drive, &mydescr, 0, TYPE_PIO, bytecount);
-
-       /* wait for completion */
-
-       LED_DISK_WRITE(1);
-       LED_DISK_READ(1);
-       cris_ide_wait_dma(0);
-       LED_DISK_WRITE(0);
-}
-
-/*
- * This is used for most PIO data transfers *from* the IDE interface
- */
-static void
-cris_ide_input_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
-{
-       cris_atapi_input_bytes(drive, buffer, wcount << 2);
-}
-
-/*
- * This is used for most PIO data transfers *to* the IDE interface
- */
-static void
-cris_ide_output_data (ide_drive_t *drive, void *buffer, unsigned int wcount)
-{
-       cris_atapi_output_bytes(drive, buffer, wcount << 2);
-}
-
-/* we only have one DMA channel on the chip for ATA, so we can keep these statically */
-static cris_dma_descr_type ata_descrs[MAX_DMA_DESCRS] __attribute__ ((__aligned__(16)));
-static unsigned int ata_tot_size;
-
-/*
- * cris_ide_build_dmatable() prepares a dma request.
- * Returns 0 if all went okay, returns 1 otherwise.
- */
-static int cris_ide_build_dmatable (ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       struct scatterlist* sg;
-       struct request *rq  = drive->hwif->hwgroup->rq;
-       unsigned long size, addr;
-       unsigned int count = 0;
-       int i = 0;
-
-       sg = hwif->sg_table;
-
-       ata_tot_size = 0;
-
-       ide_map_sg(drive, rq);
-       i = hwif->sg_nents;
-
-       while(i) {
-               /*
-                * Determine addr and size of next buffer area.  We assume that
-                * individual virtual buffers are always composed linearly in
-                * physical memory.  For example, we assume that any 8kB buffer
-                * is always composed of two adjacent physical 4kB pages rather
-                * than two possibly non-adjacent physical 4kB pages.
-                */
-               /* group sequential buffers into one large buffer */
-               addr = sg_phys(sg);
-               size = sg_dma_len(sg);
-               while (--i) {
-                       sg = sg_next(sg);
-                       if ((addr + size) != sg_phys(sg))
-                               break;
-                       size += sg_dma_len(sg);
-               }
-
-               /* did we run out of descriptors? */
-
-               if(count >= MAX_DMA_DESCRS) {
-                       printk("%s: too few DMA descriptors\n", drive->name);
-                       return 1;
-               }
-
-               /* however, this case is more difficult - rw_trf_cnt cannot be more
-                  than 65536 words per transfer, so in that case we need to either
-                  1) use a DMA interrupt to re-trigger rw_trf_cnt and continue with
-                     the descriptors, or
-                  2) simply do the request here, and get dma_intr to only ide_end_request on
-                     those blocks that were actually set-up for transfer.
-               */
-
-               if(ata_tot_size + size > 131072) {
-                       printk("too large total ATA DMA request, %d + %d!\n", ata_tot_size, (int)size);
-                       return 1;
-               }
-
-               /* If size > MAX_DESCR_SIZE it has to be splitted into new descriptors. Since we
-                   don't handle size > 131072 only one split is necessary */
-
-               if(size > MAX_DESCR_SIZE) {
-                       cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, MAX_DESCR_SIZE, 0);
-                       count++;
-                       ata_tot_size += MAX_DESCR_SIZE;
-                       size -= MAX_DESCR_SIZE;
-                       addr += MAX_DESCR_SIZE;
-               }
-
-               cris_ide_fill_descriptor(&ata_descrs[count], (void*)addr, size,i ? 0 : 1);
-               count++;
-               ata_tot_size += size;
-       }
-
-       if (count) {
-               /* return and say all is ok */
-               return 0;
-       }
-
-       printk("%s: empty DMA table?\n", drive->name);
-       return 1;       /* let the PIO routines handle this weirdness */
-}
-
-/*
- * cris_dma_intr() is the handler for disk read/write DMA interrupts
- */
-static ide_startstop_t cris_dma_intr (ide_drive_t *drive)
-{
-       LED_DISK_READ(0);
-       LED_DISK_WRITE(0);
-
-       return ide_dma_intr(drive);
-}
-
-/*
- * Functions below initiates/aborts DMA read/write operations on a drive.
- *
- * The caller is assumed to have selected the drive and programmed the drive's
- * sector address using CHS or LBA.  All that remains is to prepare for DMA
- * and then issue the actual read/write DMA/PIO command to the drive.
- *
- * For ATAPI devices, we just prepare for DMA and return. The caller should
- * then issue the packet command to the drive and call us again with
- * cris_dma_start afterwards.
- *
- * Returns 0 if all went well.
- * Returns 1 if DMA read/write could not be started, in which case
- * the caller should revert to PIO for the current request.
- */
-
-static int cris_dma_end(ide_drive_t *drive)
-{
-       drive->waiting_for_dma = 0;
-       return 0;
-}
-
-static int cris_dma_setup(ide_drive_t *drive)
-{
-       struct request *rq = drive->hwif->hwgroup->rq;
-
-       cris_ide_initialize_dma(!rq_data_dir(rq));
-       if (cris_ide_build_dmatable (drive)) {
-               ide_map_sg(drive, rq);
-               return 1;
-       }
-
-       drive->waiting_for_dma = 1;
-       return 0;
-}
-
-static void cris_dma_exec_cmd(ide_drive_t *drive, u8 command)
-{
-       ide_execute_command(drive, command, &cris_dma_intr, WAIT_CMD, NULL);
-}
-
-static void cris_dma_start(ide_drive_t *drive)
-{
-       struct request *rq = drive->hwif->hwgroup->rq;
-       int writing = rq_data_dir(rq);
-       int type = TYPE_DMA;
-
-       if (drive->current_speed >= XFER_UDMA_0)
-               type = TYPE_UDMA;
-
-       cris_ide_start_dma(drive, &ata_descrs[0], writing ? 0 : 1, type, ata_tot_size);
-
-       if (writing) {
-               LED_DISK_WRITE(1);
-       } else {
-               LED_DISK_READ(1);
-       }
-}
-
-module_init(init_e100_ide);
-
-MODULE_LICENSE("GPL");
index 92b02b96d7db813b11f1d5352db61bee7db67060..ecf53bb0d2aa2941da54aa772485daa0bfb3a616 100644 (file)
@@ -42,6 +42,91 @@ static u16 mm_inw(unsigned long a)
        return r;
 }
 
+static void h8300_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+       u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+       if (task->tf_flags & IDE_TFLAG_FLAGGED)
+               HIHI = 0xFF;
+
+       ide_set_irq(drive, 1);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DATA)
+               mm_outw((tf->hob_data << 8) | tf->data, io_ports->data_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+               outb(tf->hob_feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+               outb(tf->hob_nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+               outb(tf->hob_lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+               outb(tf->hob_lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+               outb(tf->hob_lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+               outb(tf->feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+               outb(tf->nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+               outb(tf->lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+               outb(tf->lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+               outb(tf->lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+               outb((tf->device & HIHI) | drive->select.all,
+                    io_ports->device_addr);
+}
+
+static void h8300_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+
+       if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+               u16 data = mm_inw(io_ports->data_addr);
+
+               tf->data = data & 0xff;
+               tf->hob_data = (data >> 8) & 0xff;
+       }
+
+       /* be sure we're looking at the low order bits */
+       outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+       if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+               tf->nsect  = inb(io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+               tf->lbal   = inb(io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+               tf->lbam   = inb(io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+               tf->lbah   = inb(io_ports->lbah_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+               tf->device = inb(io_ports->device_addr);
+
+       if (task->tf_flags & IDE_TFLAG_LBA48) {
+               outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+                       tf->hob_feature = inb(io_ports->feature_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+                       tf->hob_nsect   = inb(io_ports->nsect_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+                       tf->hob_lbal    = inb(io_ports->lbal_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+                       tf->hob_lbam    = inb(io_ports->lbam_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+                       tf->hob_lbah    = inb(io_ports->lbah_addr);
+       }
+}
+
 static void mm_outsw(unsigned long addr, void *buf, u32 len)
 {
        unsigned short *bp = (unsigned short *)buf;
@@ -56,6 +141,18 @@ static void mm_insw(unsigned long addr, void *buf, u32 len)
                *bp = bswap(*(volatile u16 *)addr);
 }
 
+static void h8300_input_data(ide_drive_t *drive, struct request *rq,
+                            void *buf, unsigned int len)
+{
+       mm_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
+
+static void h8300_output_data(ide_drive_t *drive, struct request *rq,
+                             void *buf, unsigned int len)
+{
+       mm_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
+
 #define H8300_IDE_GAP (2)
 
 static inline void hw_setup(hw_regs_t *hw)
@@ -63,9 +160,9 @@ static inline void hw_setup(hw_regs_t *hw)
        int i;
 
        memset(hw, 0, sizeof(hw_regs_t));
-       for (i = 0; i <= IDE_STATUS_OFFSET; i++)
-               hw->io_ports[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
-       hw->io_ports[IDE_CONTROL_OFFSET] = CONFIG_H8300_IDE_ALT;
+       for (i = 0; i <= 7; i++)
+               hw->io_ports_array[i] = CONFIG_H8300_IDE_BASE + H8300_IDE_GAP*i;
+       hw->io_ports.ctl_addr = CONFIG_H8300_IDE_ALT;
        hw->irq = EXT_IRQ0 + CONFIG_H8300_IDE_IRQ;
        hw->chipset = ide_generic;
 }
@@ -74,13 +171,11 @@ static inline void hwif_setup(ide_hwif_t *hwif)
 {
        default_hwif_iops(hwif);
 
-       hwif->mmio  = 1;
-       hwif->OUTW  = mm_outw;
-       hwif->OUTSW = mm_outsw;
-       hwif->INW   = mm_inw;
-       hwif->INSW  = mm_insw;
-       hwif->OUTSL = NULL;
-       hwif->INSL  = NULL;
+       hwif->tf_load = h8300_tf_load;
+       hwif->tf_read = h8300_tf_read;
+
+       hwif->input_data  = h8300_input_data;
+       hwif->output_data = h8300_output_data;
 }
 
 static int __init h8300_ide_init(void)
index 0f6fb6b72dd91bcbb15de6f237e279df2d60c353..9d3601fa56803c01c9870f5fd0ab12b196ff5637 100644 (file)
@@ -55,14 +55,22 @@ struct ide_acpi_hwif_link {
 /* note: adds function name and KERN_DEBUG */
 #ifdef DEBUGGING
 #define DEBPRINT(fmt, args...) \
-               printk(KERN_DEBUG "%s: " fmt, __FUNCTION__, ## args)
+               printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
 #else
 #define DEBPRINT(fmt, args...) do {} while (0)
 #endif /* DEBUGGING */
 
-extern int ide_noacpi;
-extern int ide_noacpitfs;
-extern int ide_noacpionboot;
+int ide_noacpi;
+module_param_named(noacpi, ide_noacpi, bool, 0);
+MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
+
+int ide_acpigtf;
+module_param_named(acpigtf, ide_acpigtf, bool, 0);
+MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
+
+int ide_acpionboot;
+module_param_named(acpionboot, ide_acpionboot, bool, 0);
+MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
 
 static bool ide_noacpi_psx;
 static int no_acpi_psx(const struct dmi_system_id *id)
@@ -309,7 +317,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
        if (ACPI_FAILURE(status)) {
                printk(KERN_DEBUG
                       "%s: Run _GTF error: status = 0x%x\n",
-                      __FUNCTION__, status);
+                      __func__, status);
                goto out;
        }
 
@@ -335,7 +343,7 @@ static int do_drive_get_GTF(ide_drive_t *drive,
            out_obj->buffer.length % REGS_PER_GTF) {
                printk(KERN_ERR
                       "%s: unexpected GTF length (%d) or addr (0x%p)\n",
-                      __FUNCTION__, out_obj->buffer.length,
+                      __func__, out_obj->buffer.length,
                       out_obj->buffer.pointer);
                err = -ENOENT;
                kfree(output.pointer);
@@ -376,7 +384,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
        memcpy(&args.tf_array[7], &gtf->tfa, 7);
        args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
 
-       if (ide_noacpitfs) {
+       if (!ide_acpigtf) {
                DEBPRINT("_GTF execution disabled\n");
                return err;
        }
@@ -384,7 +392,7 @@ static int taskfile_load_raw(ide_drive_t *drive,
        err = ide_no_data_taskfile(drive, &args);
        if (err)
                printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n",
-                      __FUNCTION__, err);
+                      __func__, err);
 
        return err;
 }
@@ -422,7 +430,7 @@ static int do_drive_set_taskfiles(ide_drive_t *drive,
 
        if (gtf_length % REGS_PER_GTF) {
                printk(KERN_ERR "%s: unexpected GTF length (%d)\n",
-                      __FUNCTION__, gtf_length);
+                      __func__, gtf_length);
                goto out;
        }
 
@@ -547,7 +555,7 @@ void ide_acpi_get_timing(ide_hwif_t *hwif)
                printk(KERN_ERR
                        "%s: unexpected _GTM length (0x%x)[should be 0x%zx] or "
                        "addr (0x%p)\n",
-                       __FUNCTION__, out_obj->buffer.length,
+                       __func__, out_obj->buffer.length,
                        sizeof(struct GTM_buffer), out_obj->buffer.pointer);
                return;
        }
@@ -721,7 +729,7 @@ void ide_acpi_port_init_devices(ide_hwif_t *hwif)
                                 drive->name, err);
        }
 
-       if (ide_noacpionboot) {
+       if (!ide_acpionboot) {
                DEBPRINT("ACPI methods disabled on boot\n");
                return;
        }
index 1afd95ad46538144e610a37a7559d30c3822dda0..68e7f19dc03662b405363ca23bf0081db73baa4a 100644 (file)
 #include <linux/mutex.h>
 #include <linux/bcd.h>
 
-#include <scsi/scsi.h> /* For SCSI -> ATAPI command conversion */
+/* For SCSI -> ATAPI command conversion */
+#include <scsi/scsi.h>
 
-#include <asm/irq.h>
-#include <asm/io.h>
+#include <linux/irq.h>
+#include <linux/io.h>
 #include <asm/byteorder.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <asm/unaligned.h>
 
 #include "ide-cd.h"
@@ -77,12 +78,11 @@ static void ide_cd_put(struct cdrom_info *cd)
        mutex_unlock(&idecd_ref_mutex);
 }
 
-/****************************************************************************
+/*
  * Generic packet command support and error handling routines.
  */
 
-/* Mark that we've seen a media change, and invalidate our internal
-   buffers. */
+/* Mark that we've seen a media change and invalidate our internal buffers. */
 static void cdrom_saw_media_change(ide_drive_t *drive)
 {
        struct cdrom_info *cd = drive->driver_data;
@@ -105,9 +105,8 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
                break;
        case NOT_READY:
                /*
-                * don't care about tray state messages for
-                * e.g. capacity commands or in-progress or
-                * becoming ready
+                * don't care about tray state messages for e.g. capacity
+                * commands or in-progress or becoming ready
                 */
                if (sense->asc == 0x3a || sense->asc == 0x04)
                        break;
@@ -115,8 +114,8 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
                break;
        case ILLEGAL_REQUEST:
                /*
-                * don't log START_STOP unit with LoEj set, since
-                * we cannot reliably check if drive can auto-close
+                * don't log START_STOP unit with LoEj set, since we cannot
+                * reliably check if drive can auto-close
                 */
                if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24)
                        break;
@@ -124,9 +123,9 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
                break;
        case UNIT_ATTENTION:
                /*
-                * Make good and sure we've seen this potential media
-                * change. Some drives (i.e. Creative) fail to present
-                * the correct sense key in the error register.
+                * Make good and sure we've seen this potential media change.
+                * Some drives (i.e. Creative) fail to present the correct sense
+                * key in the error register.
                 */
                cdrom_saw_media_change(drive);
                break;
@@ -137,29 +136,28 @@ static int cdrom_log_sense(ide_drive_t *drive, struct request *rq,
        return log;
 }
 
-static
-void cdrom_analyze_sense_data(ide_drive_t *drive,
+static void cdrom_analyze_sense_data(ide_drive_t *drive,
                              struct request *failed_command,
                              struct request_sense *sense)
 {
        unsigned long sector;
        unsigned long bio_sectors;
-       unsigned long valid;
        struct cdrom_info *info = drive->driver_data;
 
        if (!cdrom_log_sense(drive, failed_command, sense))
                return;
 
        /*
-        * If a read toc is executed for a CD-R or CD-RW medium where
-        * the first toc has not been recorded yet, it will fail with
-        * 05/24/00 (which is a confusing error)
+        * If a read toc is executed for a CD-R or CD-RW medium where the first
+        * toc has not been recorded yet, it will fail with 05/24/00 (which is a
+        * confusing error)
         */
        if (failed_command && failed_command->cmd[0] == GPCMD_READ_TOC_PMA_ATIP)
                if (sense->sense_key == 0x05 && sense->asc == 0x24)
                        return;
 
-       if (sense->error_code == 0x70) {        /* Current Error */
+       /* current error */
+       if (sense->error_code == 0x70) {
                switch (sense->sense_key) {
                case MEDIUM_ERROR:
                case VOLUME_OVERFLOW:
@@ -174,29 +172,23 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
                                 (sense->information[2] <<  8) |
                                 (sense->information[3]);
 
-                       bio_sectors = bio_sectors(failed_command->bio);
-                       if (bio_sectors < 4)
-                               bio_sectors = 4;
                        if (drive->queue->hardsect_size == 2048)
-                               sector <<= 2;   /* Device sector size is 2K */
+                               /* device sector size is 2K */
+                               sector <<= 2;
+
+                       bio_sectors = max(bio_sectors(failed_command->bio), 4U);
                        sector &= ~(bio_sectors - 1);
-                       valid = (sector - failed_command->sector) << 9;
 
-                       if (valid < 0)
-                               valid = 0;
                        if (sector < get_capacity(info->disk) &&
-                               drive->probed_capacity - sector < 4 * 75) {
+                           drive->probed_capacity - sector < 4 * 75)
                                set_capacity(info->disk, sector);
-                       }
                }
        }
 
        ide_cd_log_error(drive->name, failed_command, sense);
 }
 
-/*
- * Initialize a ide-cd packet command request
- */
+/* Initialize a ide-cd packet command request */
 void ide_cd_init_rq(ide_drive_t *drive, struct request *rq)
 {
        struct cdrom_info *cd = drive->driver_data;
@@ -220,7 +212,8 @@ static void cdrom_queue_request_sense(ide_drive_t *drive, void *sense,
 
        rq->data = sense;
        rq->cmd[0] = GPCMD_REQUEST_SENSE;
-       rq->cmd[4] = rq->data_len = 18;
+       rq->cmd[4] = 18;
+       rq->data_len = 18;
 
        rq->cmd_type = REQ_TYPE_SENSE;
 
@@ -252,7 +245,7 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
                        }
                        cdrom_analyze_sense_data(drive, failed, sense);
                        /*
-                        * now end failed request
+                        * now end the failed request
                         */
                        if (blk_fs_request(failed)) {
                                if (ide_end_dequeued_request(drive, failed, 0,
@@ -280,21 +273,24 @@ static void cdrom_end_request(ide_drive_t *drive, int uptodate)
        ide_end_request(drive, uptodate, nsectors);
 }
 
-static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 stat)
+static void ide_dump_status_no_sense(ide_drive_t *drive, const char *msg, u8 st)
 {
-       if (stat & 0x80)
+       if (st & 0x80)
                return;
-       ide_dump_status(drive, msg, stat);
+       ide_dump_status(drive, msg, st);
 }
 
-/* Returns 0 if the request should be continued.
-   Returns 1 if the request was ended. */
+/*
+ * Returns:
+ * 0: if the request should be continued.
+ * 1: if the request was ended.
+ */
 static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
 {
        struct request *rq = HWGROUP(drive)->rq;
        int stat, err, sense_key;
 
-       /* Check for errors. */
+       /* check for errors */
        stat = ide_read_status(drive);
 
        if (stat_ret)
@@ -303,20 +299,22 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
        if (OK_STAT(stat, good_stat, BAD_R_STAT))
                return 0;
 
-       /* Get the IDE error register. */
+       /* get the IDE error register */
        err = ide_read_error(drive);
        sense_key = err >> 4;
 
        if (rq == NULL) {
-               printk("%s: missing rq in cdrom_decode_status\n", drive->name);
+               printk(KERN_ERR "%s: missing rq in %s\n",
+                               drive->name, __func__);
                return 1;
        }
 
        if (blk_sense_request(rq)) {
-               /* We got an error trying to get sense info
-                  from the drive (probably while trying
-                  to recover from a former error).  Just give up. */
-
+               /*
+                * We got an error trying to get sense info from the drive
+                * (probably while trying to recover from a former error).
+                * Just give up.
+                */
                rq->cmd_flags |= REQ_FAILED;
                cdrom_end_request(drive, 0);
                ide_error(drive, "request sense failure", stat);
@@ -332,13 +330,12 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
                if (blk_pc_request(rq) && !rq->errors)
                        rq->errors = SAM_STAT_CHECK_CONDITION;
 
-               /* Check for tray open. */
+               /* check for tray open */
                if (sense_key == NOT_READY) {
                        cdrom_saw_media_change(drive);
                } else if (sense_key == UNIT_ATTENTION) {
-                       /* Check for media change. */
+                       /* check for media change */
                        cdrom_saw_media_change(drive);
-                       /*printk("%s: media changed\n",drive->name);*/
                        return 0;
                } else if (sense_key == ILLEGAL_REQUEST &&
                           rq->cmd[0] == GPCMD_START_STOP_UNIT) {
@@ -350,7 +347,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
                         * cdrom_log_sense() knows this!
                         */
                } else if (!(rq->cmd_flags & REQ_QUIET)) {
-                       /* Otherwise, print an error. */
+                       /* otherwise, print an error */
                        ide_dump_status(drive, "packet command error", stat);
                }
 
@@ -366,27 +363,30 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
        } else if (blk_fs_request(rq)) {
                int do_end_request = 0;
 
-               /* Handle errors from READ and WRITE requests. */
+               /* handle errors from READ and WRITE requests */
 
                if (blk_noretry_request(rq))
                        do_end_request = 1;
 
                if (sense_key == NOT_READY) {
-                       /* Tray open. */
+                       /* tray open */
                        if (rq_data_dir(rq) == READ) {
                                cdrom_saw_media_change(drive);
 
-                               /* Fail the request. */
-                               printk("%s: tray open\n", drive->name);
+                               /* fail the request */
+                               printk(KERN_ERR "%s: tray open\n", drive->name);
                                do_end_request = 1;
                        } else {
                                struct cdrom_info *info = drive->driver_data;
 
-                               /* allow the drive 5 seconds to recover, some
+                               /*
+                                * Allow the drive 5 seconds to recover, some
                                 * devices will return this error while flushing
-                                * data from cache */
+                                * data from cache.
+                                */
                                if (!rq->errors)
-                                       info->write_timeout = jiffies + ATAPI_WAIT_WRITE_BUSY;
+                                       info->write_timeout = jiffies +
+                                                       ATAPI_WAIT_WRITE_BUSY;
                                rq->errors = 1;
                                if (time_after(jiffies, info->write_timeout))
                                        do_end_request = 1;
@@ -394,65 +394,68 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
                                        unsigned long flags;
 
                                        /*
-                                        * take a breather relying on the
-                                        * unplug timer to kick us again
+                                        * take a breather relying on the unplug
+                                        * timer to kick us again
                                         */
                                        spin_lock_irqsave(&ide_lock, flags);
                                        blk_plug_device(drive->queue);
-                                       spin_unlock_irqrestore(&ide_lock, flags);
+                                       spin_unlock_irqrestore(&ide_lock,
+                                                               flags);
                                        return 1;
                                }
                        }
                } else if (sense_key == UNIT_ATTENTION) {
-                       /* Media change. */
-                       cdrom_saw_media_change (drive);
+                       /* media change */
+                       cdrom_saw_media_change(drive);
 
                        /*
-                        * Arrange to retry the request.
-                        * But be sure to give up if we've retried
-                        * too many times.
+                        * Arrange to retry the request but be sure to give up
+                        * if we've retried too many times.
                         */
                        if (++rq->errors > ERROR_MAX)
                                do_end_request = 1;
                } else if (sense_key == ILLEGAL_REQUEST ||
                           sense_key == DATA_PROTECT) {
                        /*
-                        * No point in retrying after an illegal
-                        * request or data protect error.
+                        * No point in retrying after an illegal request or data
+                        * protect error.
                         */
                        ide_dump_status_no_sense(drive, "command error", stat);
                        do_end_request = 1;
                } else if (sense_key == MEDIUM_ERROR) {
                        /*
                         * No point in re-trying a zillion times on a bad
-                        * sector... If we got here the error is not correctable
+                        * sector. If we got here the error is not correctable.
                         */
-                       ide_dump_status_no_sense(drive, "media error (bad sector)", stat);
+                       ide_dump_status_no_sense(drive,
+                                                "media error (bad sector)",
+                                                stat);
                        do_end_request = 1;
                } else if (sense_key == BLANK_CHECK) {
-                       /* Disk appears blank ?? */
-                       ide_dump_status_no_sense(drive, "media error (blank)", stat);
+                       /* disk appears blank ?? */
+                       ide_dump_status_no_sense(drive, "media error (blank)",
+                                                stat);
                        do_end_request = 1;
                } else if ((err & ~ABRT_ERR) != 0) {
-                       /* Go to the default handler
-                          for other errors. */
+                       /* go to the default handler for other errors */
                        ide_error(drive, "cdrom_decode_status", stat);
                        return 1;
                } else if ((++rq->errors > ERROR_MAX)) {
-                       /* We've racked up too many retries.  Abort. */
+                       /* we've racked up too many retries, abort */
                        do_end_request = 1;
                }
 
-               /* End a request through request sense analysis when we have
-                  sense data. We need this in order to perform end of media
-                  processing */
-
+               /*
+                * End a request through request sense analysis when we have
+                * sense data. We need this in order to perform end of media
+                * processing.
+                */
                if (do_end_request)
                        goto end_request;
 
                /*
-                * If we got a CHECK_CONDITION status,
-                * queue a request sense command.
+                * If we got a CHECK_CONDITION status, queue
+                * a request sense command.
                 */
                if (stat & ERR_STAT)
                        cdrom_queue_request_sense(drive, NULL, NULL);
@@ -461,7 +464,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
                cdrom_end_request(drive, 0);
        }
 
-       /* Retry, or handle the next request. */
+       /* retry, or handle the next request */
        return 1;
 
 end_request:
@@ -486,10 +489,10 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
        unsigned long wait = 0;
 
        /*
-        * Some commands are *slow* and normally take a long time to
-        * complete. Usually we can use the ATAPI "disconnect" to bypass
-        * this, but not all commands/drives support that. Let
-        * ide_timer_expiry keep polling us for these.
+        * Some commands are *slow* and normally take a long time to complete.
+        * Usually we can use the ATAPI "disconnect" to bypass this, but not all
+        * commands/drives support that. Let ide_timer_expiry keep polling us
+        * for these.
         */
        switch (rq->cmd[0]) {
        case GPCMD_BLANK:
@@ -501,20 +504,22 @@ static int cdrom_timer_expiry(ide_drive_t *drive)
                break;
        default:
                if (!(rq->cmd_flags & REQ_QUIET))
-                       printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n", rq->cmd[0]);
+                       printk(KERN_INFO "ide-cd: cmd 0x%x timed out\n",
+                                        rq->cmd[0]);
                wait = 0;
                break;
        }
        return wait;
 }
 
-/* Set up the device registers for transferring a packet command on DEV,
-   expecting to later transfer XFERLEN bytes.  HANDLER is the routine
-   which actually transfers the command to the drive.  If this is a
-   drq_interrupt device, this routine will arrange for HANDLER to be
-   called when the interrupt from the drive arrives.  Otherwise, HANDLER
-   will be called immediately after the drive is prepared for the transfer. */
-
+/*
+ * Set up the device registers for transferring a packet command on DEV,
+ * expecting to later transfer XFERLEN bytes.  HANDLER is the routine
+ * which actually transfers the command to the drive.  If this is a
+ * drq_interrupt device, this routine will arrange for HANDLER to be
+ * called when the interrupt from the drive arrives.  Otherwise, HANDLER
+ * will be called immediately after the drive is prepared for the transfer.
+ */
 static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
                                                  int xferlen,
                                                  ide_handler_t *handler)
@@ -523,15 +528,15 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
        struct cdrom_info *info = drive->driver_data;
        ide_hwif_t *hwif = drive->hwif;
 
-       /* Wait for the controller to be idle. */
+       /* wait for the controller to be idle */
        if (ide_wait_stat(&startstop, drive, 0, BUSY_STAT, WAIT_READY))
                return startstop;
 
        /* FIXME: for Virtual DMA we must check harder */
        if (info->dma)
-               info->dma = !hwif->dma_setup(drive);
+               info->dma = !hwif->dma_ops->dma_setup(drive);
 
-       /* Set up the controller registers. */
+       /* set up the controller registers */
        ide_pktcmd_tf_load(drive, IDE_TFLAG_OUT_NSECT | IDE_TFLAG_OUT_LBAL |
                           IDE_TFLAG_NO_SELECT_MASK, xferlen, info->dma);
 
@@ -541,27 +546,22 @@ static ide_startstop_t cdrom_start_packet_command(ide_drive_t *drive,
                        drive->waiting_for_dma = 0;
 
                /* packet command */
-               ide_execute_command(drive, WIN_PACKETCMD, handler, ATAPI_WAIT_PC, cdrom_timer_expiry);
+               ide_execute_command(drive, WIN_PACKETCMD, handler,
+                                   ATAPI_WAIT_PC, cdrom_timer_expiry);
                return ide_started;
        } else {
-               unsigned long flags;
-
-               /* packet command */
-               spin_lock_irqsave(&ide_lock, flags);
-               hwif->OUTBSYNC(drive, WIN_PACKETCMD,
-                              hwif->io_ports[IDE_COMMAND_OFFSET]);
-               ndelay(400);
-               spin_unlock_irqrestore(&ide_lock, flags);
+               ide_execute_pkt_cmd(drive);
 
                return (*handler) (drive);
        }
 }
 
-/* Send a packet command to DRIVE described by CMD_BUF and CMD_LEN.
-   The device registers must have already been prepared
-   by cdrom_start_packet_command.
-   HANDLER is the interrupt handler to call when the command completes
-   or there's data ready. */
+/*
+ * Send a packet command to DRIVE described by CMD_BUF and CMD_LEN. The device
+ * registers must have already been prepared by cdrom_start_packet_command.
+ * HANDLER is the interrupt handler to call when the command completes or
+ * there's data ready.
+ */
 #define ATAPI_MIN_CDB_BYTES 12
 static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
                                          struct request *rq,
@@ -573,24 +573,26 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
        ide_startstop_t startstop;
 
        if (info->cd_flags & IDE_CD_FLAG_DRQ_INTERRUPT) {
-               /* Here we should have been called after receiving an interrupt
-                  from the device.  DRQ should how be set. */
+               /*
+                * Here we should have been called after receiving an interrupt
+                * from the device.  DRQ should how be set.
+                */
 
-               /* Check for errors. */
+               /* check for errors */
                if (cdrom_decode_status(drive, DRQ_STAT, NULL))
                        return ide_stopped;
 
-               /* Ok, next interrupt will be DMA interrupt. */
+               /* ok, next interrupt will be DMA interrupt */
                if (info->dma)
                        drive->waiting_for_dma = 1;
        } else {
-               /* Otherwise, we must wait for DRQ to get set. */
+               /* otherwise, we must wait for DRQ to get set */
                if (ide_wait_stat(&startstop, drive, DRQ_STAT,
                                BUSY_STAT, WAIT_READY))
                        return startstop;
        }
 
-       /* Arm the interrupt handler. */
+       /* arm the interrupt handler */
        ide_set_handler(drive, handler, rq->timeout, cdrom_timer_expiry);
 
        /* ATAPI commands get padded out to 12 bytes minimum */
@@ -598,25 +600,24 @@ static ide_startstop_t cdrom_transfer_packet_command(ide_drive_t *drive,
        if (cmd_len < ATAPI_MIN_CDB_BYTES)
                cmd_len = ATAPI_MIN_CDB_BYTES;
 
-       /* Send the command to the device. */
-       HWIF(drive)->atapi_output_bytes(drive, rq->cmd, cmd_len);
+       /* send the command to the device */
+       hwif->output_data(drive, NULL, rq->cmd, cmd_len);
 
-       /* Start the DMA if need be */
+       /* start the DMA if need be */
        if (info->dma)
-               hwif->dma_start(drive);
+               hwif->dma_ops->dma_start(drive);
 
        return ide_started;
 }
 
-/****************************************************************************
+/*
  * Block read functions.
  */
-
 static void ide_cd_pad_transfer(ide_drive_t *drive, xfer_func_t *xf, int len)
 {
        while (len > 0) {
                int dum = 0;
-               xf(drive, &dum, sizeof(dum));
+               xf(drive, NULL, &dum, sizeof(dum));
                len -= sizeof(dum);
        }
 }
@@ -626,7 +627,7 @@ static void ide_cd_drain_data(ide_drive_t *drive, int nsects)
        while (nsects > 0) {
                static char dum[SECTOR_SIZE];
 
-               drive->hwif->atapi_input_bytes(drive, dum, sizeof(dum));
+               drive->hwif->input_data(drive, NULL, dum, sizeof(dum));
                nsects--;
        }
 }
@@ -649,20 +650,21 @@ static int ide_cd_check_ireason(ide_drive_t *drive, struct request *rq,
                ide_hwif_t *hwif = drive->hwif;
                xfer_func_t *xf;
 
-               /* Whoops... */
+               /* whoops... */
                printk(KERN_ERR "%s: %s: wrong transfer direction!\n",
                                drive->name, __func__);
 
-               xf = rw ? hwif->atapi_output_bytes : hwif->atapi_input_bytes;
+               xf = rw ? hwif->output_data : hwif->input_data;
                ide_cd_pad_transfer(drive, xf, len);
        } else  if (rw == 0 && ireason == 1) {
-               /* Some drives (ASUS) seem to tell us that status
-                * info is available. just get it and ignore.
+               /*
+                * Some drives (ASUS) seem to tell us that status info is
+                * available.  Just get it and ignore.
                 */
                (void)ide_read_status(drive);
                return 0;
        } else {
-               /* Drive wants a command packet, or invalid ireason... */
+               /* drive wants a command packet, or invalid ireason... */
                printk(KERN_ERR "%s: %s: bad interrupt reason 0x%02x\n",
                                drive->name, __func__, ireason);
        }
@@ -702,10 +704,10 @@ static int ide_cd_check_transfer_size(ide_drive_t *drive, int len)
 static ide_startstop_t cdrom_newpc_intr(ide_drive_t *);
 
 /*
- * Routine to send a read/write packet command to the drive.
- * This is usually called directly from cdrom_start_{read,write}().
- * However, for drq_interrupt devices, it is called from an interrupt
- * when the drive is ready to accept the command.
+ * Routine to send a read/write packet command to the drive. This is usually
+ * called directly from cdrom_start_{read,write}(). However, for drq_interrupt
+ * devices, it is called from an interrupt when the drive is ready to accept
+ * the command.
  */
 static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
 {
@@ -727,7 +729,7 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
                 * is larger than the buffer size.
                 */
                if (nskip > 0) {
-                       /* Sanity check... */
+                       /* sanity check... */
                        if (rq->current_nr_sectors !=
                            bio_cur_sectors(rq->bio)) {
                                printk(KERN_ERR "%s: %s: buffer botch (%u)\n",
@@ -744,10 +746,10 @@ static ide_startstop_t cdrom_start_rw_cont(ide_drive_t *drive)
                /* the immediate bit */
                rq->cmd[1] = 1 << 3;
 #endif
-       /* Set up the command */
+       /* set up the command */
        rq->timeout = ATAPI_WAIT_PC;
 
-       /* Send the command to the drive and return. */
+       /* send the command to the drive and return */
        return cdrom_transfer_packet_command(drive, rq, cdrom_newpc_intr);
 }
 
@@ -767,14 +769,8 @@ static ide_startstop_t cdrom_seek_intr(ide_drive_t *drive)
        info->cd_flags |= IDE_CD_FLAG_SEEKING;
 
        if (retry && time_after(jiffies, info->start_seek + IDECD_SEEK_TIMER)) {
-               if (--retry == 0) {
-                       /*
-                        * this condition is far too common, to bother
-                        * users about it
-                        */
-                       /* printk("%s: disabled DSC seek overlap\n", drive->name);*/
+               if (--retry == 0)
                        drive->dsc_overlap = 0;
-               }
        }
        return ide_stopped;
 }
@@ -786,7 +782,7 @@ static ide_startstop_t cdrom_start_seek_continuation(ide_drive_t *drive)
 
        sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS);
 
-       memset(rq->cmd, 0, sizeof(rq->cmd));
+       memset(rq->cmd, 0, BLK_MAX_CDB);
        rq->cmd[0] = GPCMD_SEEK;
        put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]);
 
@@ -800,32 +796,34 @@ static ide_startstop_t cdrom_start_seek(ide_drive_t *drive, unsigned int block)
 
        info->dma = 0;
        info->start_seek = jiffies;
-       return cdrom_start_packet_command(drive, 0, cdrom_start_seek_continuation);
+       return cdrom_start_packet_command(drive, 0,
+                                         cdrom_start_seek_continuation);
 }
 
 /*
- * Fix up a possibly partially-processed request so that we can
- * start it over entirely, or even put it back on the request queue.
+ * Fix up a possibly partially-processed request so that we can start it over
+ * entirely, or even put it back on the request queue.
  */
 static void restore_request(struct request *rq)
 {
        if (rq->buffer != bio_data(rq->bio)) {
-               sector_t n = (rq->buffer - (char *) bio_data(rq->bio)) / SECTOR_SIZE;
+               sector_t n =
+                       (rq->buffer - (char *)bio_data(rq->bio)) / SECTOR_SIZE;
 
                rq->buffer = bio_data(rq->bio);
                rq->nr_sectors += n;
                rq->sector -= n;
        }
-       rq->hard_cur_sectors = rq->current_nr_sectors = bio_cur_sectors(rq->bio);
+       rq->current_nr_sectors = bio_cur_sectors(rq->bio);
+       rq->hard_cur_sectors = rq->current_nr_sectors;
        rq->hard_nr_sectors = rq->nr_sectors;
        rq->hard_sector = rq->sector;
        rq->q->prep_rq_fn(rq->q, rq);
 }
 
-/****************************************************************************
- * Execute all other packet commands.
+/*
+ * All other packet commands.
  */
-
 static void ide_cd_request_sense_fixup(struct request *rq)
 {
        /*
@@ -849,7 +847,7 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
        if (rq->sense == NULL)
                rq->sense = &sense;
 
-       /* Start of retry loop. */
+       /* start of retry loop */
        do {
                int error;
                unsigned long time = jiffies;
@@ -858,41 +856,45 @@ int ide_cd_queue_pc(ide_drive_t *drive, struct request *rq)
                error = ide_do_drive_cmd(drive, rq, ide_wait);
                time = jiffies - time;
 
-               /* FIXME: we should probably abort/retry or something
-                * in case of failure */
+               /*
+                * FIXME: we should probably abort/retry or something in case of
+                * failure.
+                */
                if (rq->cmd_flags & REQ_FAILED) {
-                       /* The request failed.  Retry if it was due to a unit
-                          attention status
-                          (usually means media was changed). */
+                       /*
+                        * The request failed.  Retry if it was due to a unit
+                        * attention status (usually means media was changed).
+                        */
                        struct request_sense *reqbuf = rq->sense;
 
                        if (reqbuf->sense_key == UNIT_ATTENTION)
                                cdrom_saw_media_change(drive);
                        else if (reqbuf->sense_key == NOT_READY &&
                                 reqbuf->asc == 4 && reqbuf->ascq != 4) {
-                               /* The drive is in the process of loading
-                                  a disk.  Retry, but wait a little to give
-                                  the drive time to complete the load. */
+                               /*
+                                * The drive is in the process of loading
+                                * a disk.  Retry, but wait a little to give
+                                * the drive time to complete the load.
+                                */
                                ssleep(2);
                        } else {
-                               /* Otherwise, don't retry. */
+                               /* otherwise, don't retry */
                                retries = 0;
                        }
                        --retries;
                }
 
-               /* End of retry loop. */
+               /* end of retry loop */
        } while ((rq->cmd_flags & REQ_FAILED) && retries >= 0);
 
-       /* Return an error if the command failed. */
+       /* return an error if the command failed */
        return (rq->cmd_flags & REQ_FAILED) ? -EIO : 0;
 }
 
 /*
- * Called from blk_end_request_callback() after the data of the request
- * is completed and before the request is completed.
- * By returning value '1', blk_end_request_callback() returns immediately
- * without completing the request.
+ * Called from blk_end_request_callback() after the data of the request is
+ * completed and before the request itself is completed. By returning value '1',
+ * blk_end_request_callback() returns immediately without completing it.
  */
 static int cdrom_newpc_intr_dummy_cb(struct request *rq)
 {
@@ -911,11 +913,11 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
        unsigned int timeout;
        u8 lowcyl, highcyl;
 
-       /* Check for errors. */
+       /* check for errors */
        dma = info->dma;
        if (dma) {
                info->dma = 0;
-               dma_error = HWIF(drive)->ide_dma_end(drive);
+               dma_error = hwif->dma_ops->dma_end(drive);
                if (dma_error) {
                        printk(KERN_ERR "%s: DMA %s error\n", drive->name,
                                        write ? "write" : "read");
@@ -926,9 +928,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
        if (cdrom_decode_status(drive, 0, &stat))
                return ide_stopped;
 
-       /*
-        * using dma, transfer is complete now
-        */
+       /* using dma, transfer is complete now */
        if (dma) {
                if (dma_error)
                        return ide_error(drive, "dma error", stat);
@@ -939,12 +939,10 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
                goto end_request;
        }
 
-       /*
-        * ok we fall to pio :/
-        */
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]) & 0x3;
-       lowcyl  = hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
-       highcyl = hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]);
+       /* ok we fall to pio :/ */
+       ireason = hwif->INB(hwif->io_ports.nsect_addr) & 0x3;
+       lowcyl  = hwif->INB(hwif->io_ports.lbam_addr);
+       highcyl = hwif->INB(hwif->io_ports.lbah_addr);
 
        len = lowcyl + (256 * highcyl);
 
@@ -952,9 +950,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
        if (thislen > len)
                thislen = len;
 
-       /*
-        * If DRQ is clear, the command has completed.
-        */
+       /* If DRQ is clear, the command has completed. */
        if ((stat & DRQ_STAT) == 0) {
                if (blk_fs_request(rq)) {
                        /*
@@ -975,15 +971,13 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
                        return ide_stopped;
                } else if (!blk_pc_request(rq)) {
                        ide_cd_request_sense_fixup(rq);
-                       /* Complain if we still have data left to transfer. */
+                       /* complain if we still have data left to transfer */
                        uptodate = rq->data_len ? 0 : 1;
                }
                goto end_request;
        }
 
-       /*
-        * check which way to transfer data
-        */
+       /* check which way to transfer data */
        if (ide_cd_check_ireason(drive, rq, len, ireason, write))
                return ide_stopped;
 
@@ -1013,22 +1007,18 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
 
        if (ireason == 0) {
                write = 1;
-               xferfunc = HWIF(drive)->atapi_output_bytes;
+               xferfunc = hwif->output_data;
        } else {
                write = 0;
-               xferfunc = HWIF(drive)->atapi_input_bytes;
+               xferfunc = hwif->input_data;
        }
 
-       /*
-        * transfer data
-        */
+       /* transfer data */
        while (thislen > 0) {
                u8 *ptr = blk_fs_request(rq) ? NULL : rq->data;
                int blen = rq->data_len;
 
-               /*
-                * bio backed?
-                */
+               /* bio backed? */
                if (rq->bio) {
                        if (blk_fs_request(rq)) {
                                ptr = rq->buffer;
@@ -1043,7 +1033,8 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
                        if (blk_fs_request(rq) && !write)
                                /*
                                 * If the buffers are full, pipe the rest into
-                                * oblivion. */
+                                * oblivion.
+                                */
                                ide_cd_drain_data(drive, thislen >> 9);
                        else {
                                printk(KERN_ERR "%s: confused, missing data\n",
@@ -1058,7 +1049,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
                if (blen > thislen)
                        blen = thislen;
 
-               xferfunc(drive, ptr, blen);
+               xferfunc(drive, NULL, ptr, blen);
 
                thislen -= blen;
                len -= blen;
@@ -1090,9 +1081,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive)
                        rq->sense_len += blen;
        }
 
-       /*
-        * pad, if necessary
-        */
+       /* pad, if necessary */
        if (!blk_fs_request(rq) && len > 0)
                ide_cd_pad_transfer(drive, xferfunc, len);
 
@@ -1136,9 +1125,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
                queue_hardsect_size(drive->queue) >> SECTOR_BITS;
 
        if (write) {
-               /*
-                * disk has become write protected
-                */
+               /* disk has become write protected */
                if (cd->disk->policy) {
                        cdrom_end_request(drive, 0);
                        return ide_stopped;
@@ -1151,9 +1138,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
                restore_request(rq);
        }
 
-       /*
-        * use DMA, if possible / writes *must* be hardware frame aligned
-        */
+       /* use DMA, if possible / writes *must* be hardware frame aligned */
        if ((rq->nr_sectors & (sectors_per_frame - 1)) ||
            (rq->sector & (sectors_per_frame - 1))) {
                if (write) {
@@ -1167,7 +1152,7 @@ static ide_startstop_t cdrom_start_rw(ide_drive_t *drive, struct request *rq)
        if (write)
                cd->devinfo.media_written = 1;
 
-       /* Start sending the read/write request to the drive. */
+       /* start sending the read/write request to the drive */
        return cdrom_start_packet_command(drive, 32768, cdrom_start_rw_cont);
 }
 
@@ -1192,12 +1177,11 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
 
        info->dma = 0;
 
-       /*
-        * sg request
-        */
+       /* sg request */
        if (rq->bio) {
                int mask = drive->queue->dma_alignment;
-               unsigned long addr = (unsigned long) page_address(bio_page(rq->bio));
+               unsigned long addr =
+                       (unsigned long)page_address(bio_page(rq->bio));
 
                info->dma = drive->using_dma;
 
@@ -1211,15 +1195,16 @@ static ide_startstop_t cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
                        info->dma = 0;
        }
 
-       /* Start sending the command to the drive. */
-       return cdrom_start_packet_command(drive, rq->data_len, cdrom_do_newpc_cont);
+       /* start sending the command to the drive */
+       return cdrom_start_packet_command(drive, rq->data_len,
+                                         cdrom_do_newpc_cont);
 }
 
-/****************************************************************************
+/*
  * cdrom driver request routine.
  */
-static ide_startstop_t
-ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, sector_t block)
+static ide_startstop_t ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq,
+                                       sector_t block)
 {
        ide_startstop_t action;
        struct cdrom_info *info = drive->driver_data;
@@ -1231,14 +1216,19 @@ ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, sector_t block)
 
                        if ((stat & SEEK_STAT) != SEEK_STAT) {
                                if (elapsed < IDECD_SEEK_TIMEOUT) {
-                                       ide_stall_queue(drive, IDECD_SEEK_TIMER);
+                                       ide_stall_queue(drive,
+                                                       IDECD_SEEK_TIMER);
                                        return ide_stopped;
                                }
-                               printk(KERN_ERR "%s: DSC timeout\n", drive->name);
+                               printk(KERN_ERR "%s: DSC timeout\n",
+                                               drive->name);
                        }
                        info->cd_flags &= ~IDE_CD_FLAG_SEEKING;
                }
-               if ((rq_data_dir(rq) == READ) && IDE_LARGE_SEEK(info->last_block, block, IDECD_SEEK_THRESHOLD) && drive->dsc_overlap)
+               if (rq_data_dir(rq) == READ &&
+                   IDE_LARGE_SEEK(info->last_block, block,
+                                  IDECD_SEEK_THRESHOLD) &&
+                   drive->dsc_overlap)
                        action = cdrom_start_seek(drive, block);
                else
                        action = cdrom_start_rw(drive, rq);
@@ -1248,9 +1238,7 @@ ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, sector_t block)
                   rq->cmd_type == REQ_TYPE_ATA_PC) {
                return cdrom_do_block_pc(drive, rq);
        } else if (blk_special_request(rq)) {
-               /*
-                * right now this can only be a reset...
-                */
+               /* right now this can only be a reset... */
                cdrom_end_request(drive, 1);
                return ide_stopped;
        }
@@ -1262,18 +1250,16 @@ ide_do_rw_cdrom(ide_drive_t *drive, struct request *rq, sector_t block)
 
 
 
-/****************************************************************************
+/*
  * Ioctl handling.
  *
- * Routines which queue packet commands take as a final argument a pointer
- * to a request_sense struct.  If execution of the command results
- * in an error with a CHECK CONDITION status, this structure will be filled
- * with the results of the subsequent request sense command.  The pointer
- * can also be NULL, in which case no sense information is returned.
+ * Routines which queue packet commands take as a final argument a pointer to a
+ * request_sense struct. If execution of the command results in an error with a
+ * CHECK CONDITION status, this structure will be filled with the results of the
+ * subsequent request sense command. The pointer can also be NULL, in which case
+ * no sense information is returned.
  */
-
-static
-void msf_from_bcd(struct atapi_msf *msf)
+static void msf_from_bcd(struct atapi_msf *msf)
 {
        msf->minute = BCD2BIN(msf->minute);
        msf->second = BCD2BIN(msf->second);
@@ -1293,8 +1279,8 @@ int cdrom_check_status(ide_drive_t *drive, struct request_sense *sense)
        req.cmd_flags |= REQ_QUIET;
 
        /*
-        * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to
-        * switch CDs instead of supporting the LOAD_UNLOAD opcode.
+        * Sanyo 3 CD changer uses byte 7 of TEST_UNIT_READY to switch CDs
+        * instead of supporting the LOAD_UNLOAD opcode.
         */
        req.cmd[7] = cdi->sanyo_slot % 3;
 
@@ -1370,36 +1356,39 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
        unsigned long sectors_per_frame = SECTORS_PER_FRAME;
 
        if (toc == NULL) {
-               /* Try to allocate space. */
+               /* try to allocate space */
                toc = kmalloc(sizeof(struct atapi_toc), GFP_KERNEL);
                if (toc == NULL) {
-                       printk(KERN_ERR "%s: No cdrom TOC buffer!\n", drive->name);
+                       printk(KERN_ERR "%s: No cdrom TOC buffer!\n",
+                                       drive->name);
                        return -ENOMEM;
                }
                info->toc = toc;
        }
 
-       /* Check to see if the existing data is still valid.
-          If it is, just return. */
+       /*
+        * Check to see if the existing data is still valid. If it is,
+        * just return.
+        */
        (void) cdrom_check_status(drive, sense);
 
        if (info->cd_flags & IDE_CD_FLAG_TOC_VALID)
                return 0;
 
-       /* Try to get the total cdrom capacity and sector size. */
+       /* try to get the total cdrom capacity and sector size */
        stat = cdrom_read_capacity(drive, &toc->capacity, &sectors_per_frame,
                                   sense);
        if (stat)
                toc->capacity = 0x1fffff;
 
        set_capacity(info->disk, toc->capacity * sectors_per_frame);
-       /* Save a private copy of te TOC capacity for error handling */
+       /* save a private copy of the TOC capacity for error handling */
        drive->probed_capacity = toc->capacity * sectors_per_frame;
 
        blk_queue_hardsect_size(drive->queue,
                                sectors_per_frame << SECTOR_BITS);
 
-       /* First read just the header, so we know how long the TOC is. */
+       /* first read just the header, so we know how long the TOC is */
        stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr,
                                    sizeof(struct atapi_toc_header), sense);
        if (stat)
@@ -1416,7 +1405,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
        if (ntracks > MAX_TRACKS)
                ntracks = MAX_TRACKS;
 
-       /* Now read the whole schmeer. */
+       /* now read the whole schmeer */
        stat = cdrom_read_tocentry(drive, toc->hdr.first_track, 1, 0,
                                  (char *)&toc->hdr,
                                   sizeof(struct atapi_toc_header) +
@@ -1424,15 +1413,18 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
                                   sizeof(struct atapi_toc_entry), sense);
 
        if (stat && toc->hdr.first_track > 1) {
-               /* Cds with CDI tracks only don't have any TOC entries,
-                  despite of this the returned values are
-                  first_track == last_track = number of CDI tracks + 1,
-                  so that this case is indistinguishable from the same
-                  layout plus an additional audio track.
-                  If we get an error for the regular case, we assume
-                  a CDI without additional audio tracks. In this case
-                  the readable TOC is empty (CDI tracks are not included)
-                  and only holds the Leadout entry. Heiko Eißfeldt */
+               /*
+                * Cds with CDI tracks only don't have any TOC entries, despite
+                * of this the returned values are
+                * first_track == last_track = number of CDI tracks + 1,
+                * so that this case is indistinguishable from the same layout
+                * plus an additional audio track. If we get an error for the
+                * regular case, we assume a CDI without additional audio
+                * tracks. In this case the readable TOC is empty (CDI tracks
+                * are not included) and only holds the Leadout entry.
+                *
+                * Heiko Eißfeldt.
+                */
                ntracks = 0;
                stat = cdrom_read_tocentry(drive, CDROM_LEADOUT, 1, 0,
                                           (char *)&toc->hdr,
@@ -1473,9 +1465,8 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
                                                  toc->ent[i].addr.msf.frame);
        }
 
-       /* Read the multisession information. */
        if (toc->hdr.first_track != CDROM_LEADOUT) {
-               /* Read the multisession information. */
+               /* read the multisession information */
                stat = cdrom_read_tocentry(drive, 0, 0, 1, (char *)&ms_tmp,
                                           sizeof(ms_tmp), sense);
                if (stat)
@@ -1483,12 +1474,13 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
 
                toc->last_session_lba = be32_to_cpu(ms_tmp.ent.addr.lba);
        } else {
-               ms_tmp.hdr.first_track = ms_tmp.hdr.last_track = CDROM_LEADOUT;
+               ms_tmp.hdr.last_track = CDROM_LEADOUT;
+               ms_tmp.hdr.first_track = ms_tmp.hdr.last_track;
                toc->last_session_lba = msf_to_lba(0, 2, 0); /* 0m 2s 0f */
        }
 
        if (info->cd_flags & IDE_CD_FLAG_TOCADDR_AS_BCD) {
-               /* Re-read multisession information using MSF format */
+               /* re-read multisession information using MSF format */
                stat = cdrom_read_tocentry(drive, 0, 1, 1, (char *)&ms_tmp,
                                           sizeof(ms_tmp), sense);
                if (stat)
@@ -1502,7 +1494,7 @@ int ide_cd_read_toc(ide_drive_t *drive, struct request_sense *sense)
 
        toc->xa_flag = (ms_tmp.hdr.first_track != ms_tmp.hdr.last_track);
 
-       /* Now try to get the total cdrom capacity. */
+       /* now try to get the total cdrom capacity */
        stat = cdrom_get_last_written(cdi, &last_written);
        if (!stat && (last_written > toc->capacity)) {
                toc->capacity = last_written;
@@ -1527,7 +1519,8 @@ int ide_cdrom_get_capabilities(ide_drive_t *drive, u8 *buf)
                size -= ATAPI_CAPABILITIES_PAGE_PAD_SIZE;
 
        init_cdrom_command(&cgc, buf, size, CGC_DATA_UNKNOWN);
-       do { /* we seem to get stat=0x01,err=0x00 the first time (??) */
+       do {
+               /* we seem to get stat=0x01,err=0x00 the first time (??) */
                stat = cdrom_mode_sense(cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
                if (!stat)
                        break;
@@ -1596,8 +1589,7 @@ static int ide_cdrom_register(ide_drive_t *drive, int nslots)
        return register_cdrom(devinfo);
 }
 
-static
-int ide_cdrom_probe_capabilities(ide_drive_t *drive)
+static int ide_cdrom_probe_capabilities(ide_drive_t *drive)
 {
        struct cdrom_info *cd = drive->driver_data;
        struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1611,7 +1603,8 @@ int ide_cdrom_probe_capabilities(ide_drive_t *drive)
 
        if (drive->media == ide_optical) {
                cdi->mask &= ~(CDC_MO_DRIVE | CDC_RAM);
-               printk(KERN_ERR "%s: ATAPI magneto-optical drive\n", drive->name);
+               printk(KERN_ERR "%s: ATAPI magneto-optical drive\n",
+                               drive->name);
                return nslots;
        }
 
@@ -1622,11 +1615,10 @@ int ide_cdrom_probe_capabilities(ide_drive_t *drive)
        }
 
        /*
-        * we have to cheat a little here. the packet will eventually
-        * be queued with ide_cdrom_packet(), which extracts the
-        * drive from cdi->handle. Since this device hasn't been
-        * registered with the Uniform layer yet, it can't do this.
-        * Same goes for cdi->ops.
+        * We have to cheat a little here. the packet will eventually be queued
+        * with ide_cdrom_packet(), which extracts the drive from cdi->handle.
+        * Since this device hasn't been registered with the Uniform layer yet,
+        * it can't do this. Same goes for cdi->ops.
         */
        cdi->handle = drive;
        cdi->ops = &ide_cdrom_dops;
@@ -1695,25 +1687,14 @@ int ide_cdrom_probe_capabilities(ide_drive_t *drive)
        return nslots;
 }
 
-#ifdef CONFIG_IDE_PROC_FS
-static void ide_cdrom_add_settings(ide_drive_t *drive)
-{
-       ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1, &drive->dsc_overlap, NULL);
-}
-#else
-static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
-#endif
-
-/*
- * standard prep_rq_fn that builds 10 byte cmds
- */
+/* standard prep_rq_fn that builds 10 byte cmds */
 static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq)
 {
        int hard_sect = queue_hardsect_size(q);
        long block = (long)rq->hard_sector / (hard_sect >> 9);
        unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9);
 
-       memset(rq->cmd, 0, sizeof(rq->cmd));
+       memset(rq->cmd, 0, BLK_MAX_CDB);
 
        if (rq_data_dir(rq) == READ)
                rq->cmd[0] = GPCMD_READ_10;
@@ -1745,9 +1726,7 @@ static int ide_cdrom_prep_pc(struct request *rq)
 {
        u8 *c = rq->cmd;
 
-       /*
-        * Transform 6-byte read/write commands to the 10-byte version
-        */
+       /* transform 6-byte read/write commands to the 10-byte version */
        if (c[0] == READ_6 || c[0] == WRITE_6) {
                c[8] = c[4];
                c[5] = c[3];
@@ -1789,6 +1768,41 @@ struct cd_list_entry {
        unsigned int    cd_flags;
 };
 
+#ifdef CONFIG_IDE_PROC_FS
+static sector_t ide_cdrom_capacity(ide_drive_t *drive)
+{
+       unsigned long capacity, sectors_per_frame;
+
+       if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
+               return 0;
+
+       return capacity * sectors_per_frame;
+}
+
+static int proc_idecd_read_capacity(char *page, char **start, off_t off,
+                                       int count, int *eof, void *data)
+{
+       ide_drive_t *drive = data;
+       int len;
+
+       len = sprintf(page, "%llu\n", (long long)ide_cdrom_capacity(drive));
+       PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
+}
+
+static ide_proc_entry_t idecd_proc[] = {
+       { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
+       { NULL, 0, NULL, NULL }
+};
+
+static void ide_cdrom_add_settings(ide_drive_t *drive)
+{
+       ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1, 1,
+                       &drive->dsc_overlap, NULL);
+}
+#else
+static inline void ide_cdrom_add_settings(ide_drive_t *drive) { ; }
+#endif
+
 static const struct cd_list_entry ide_cd_quirks_list[] = {
        /* Limit transfer size per interrupt. */
        { "SAMSUNG CD-ROM SCR-2430", NULL,   IDE_CD_FLAG_LIMIT_NFRAMES      },
@@ -1846,8 +1860,7 @@ static unsigned int ide_cd_flags(struct hd_driveid *id)
        return 0;
 }
 
-static
-int ide_cdrom_setup(ide_drive_t *drive)
+static int ide_cdrom_setup(ide_drive_t *drive)
 {
        struct cdrom_info *cd = drive->driver_data;
        struct cdrom_device_info *cdi = &cd->devinfo;
@@ -1876,21 +1889,19 @@ int ide_cdrom_setup(ide_drive_t *drive)
                 id->fw_rev[4] == '1' && id->fw_rev[6] <= '2')
                cd->cd_flags |= IDE_CD_FLAG_TOCTRACKS_AS_BCD;
        else if (cd->cd_flags & IDE_CD_FLAG_SANYO_3CD)
-               cdi->sanyo_slot = 3;    /* 3 => use CD in slot 0 */
+               /* 3 => use CD in slot 0 */
+               cdi->sanyo_slot = 3;
 
        nslots = ide_cdrom_probe_capabilities(drive);
 
-       /*
-        * set correct block size
-        */
+       /* set correct block size */
        blk_queue_hardsect_size(drive->queue, CD_FRAMESIZE);
 
-       if (drive->autotune == IDE_TUNE_DEFAULT ||
-           drive->autotune == IDE_TUNE_AUTO)
-               drive->dsc_overlap = (drive->next != drive);
+       drive->dsc_overlap = (drive->next != drive);
 
        if (ide_cdrom_register(drive, nslots)) {
-               printk(KERN_ERR "%s: ide_cdrom_setup failed to register device with the cdrom driver.\n", drive->name);
+               printk(KERN_ERR "%s: %s failed to register device with the"
+                               " cdrom driver.\n", drive->name, __func__);
                cd->devinfo.handle = NULL;
                return 1;
        }
@@ -1929,33 +1940,6 @@ static void ide_cd_release(struct kref *kref)
 
 static int ide_cd_probe(ide_drive_t *);
 
-#ifdef CONFIG_IDE_PROC_FS
-static sector_t ide_cdrom_capacity(ide_drive_t *drive)
-{
-       unsigned long capacity, sectors_per_frame;
-
-       if (cdrom_read_capacity(drive, &capacity, &sectors_per_frame, NULL))
-               return 0;
-
-       return capacity * sectors_per_frame;
-}
-
-static int proc_idecd_read_capacity
-       (char *page, char **start, off_t off, int count, int *eof, void *data)
-{
-       ide_drive_t *drive = data;
-       int len;
-
-       len = sprintf(page, "%llu\n", (long long)ide_cdrom_capacity(drive));
-       PROC_IDE_READ_RETURN(page, start, off, count, eof, len);
-}
-
-static ide_proc_entry_t idecd_proc[] = {
-       { "capacity", S_IFREG|S_IRUGO, proc_idecd_read_capacity, NULL },
-       { NULL, 0, NULL, NULL }
-};
-#endif
-
 static ide_driver_t ide_cdrom_driver = {
        .gen_driver = {
                .owner          = THIS_MODULE,
@@ -2093,7 +2077,7 @@ static struct block_device_operations idecd_ops = {
        .revalidate_disk        = idecd_revalidate_disk
 };
 
-/* options */
+/* module options */
 static char *ignore;
 
 module_param(ignore, charp, 0400);
@@ -2114,17 +2098,20 @@ static int ide_cd_probe(ide_drive_t *drive)
        /* skip drives that we were told to ignore */
        if (ignore != NULL) {
                if (strstr(ignore, drive->name)) {
-                       printk(KERN_INFO "ide-cd: ignoring drive %s\n", drive->name);
+                       printk(KERN_INFO "ide-cd: ignoring drive %s\n",
+                                        drive->name);
                        goto failed;
                }
        }
        if (drive->scsi) {
-               printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi emulation.\n", drive->name);
+               printk(KERN_INFO "ide-cd: passing drive %s to ide-scsi "
+                                "emulation.\n", drive->name);
                goto failed;
        }
        info = kzalloc(sizeof(struct cdrom_info), GFP_KERNEL);
        if (info == NULL) {
-               printk(KERN_ERR "%s: Can't allocate a cdrom structure\n", drive->name);
+               printk(KERN_ERR "%s: Can't allocate a cdrom structure\n",
+                               drive->name);
                goto failed;
        }
 
index 6ed7ca0713311ca1eed5ac06c9abd1dbdff0105d..6490a2dea96b01526657615ceb7139d9b1603a55 100644 (file)
@@ -326,7 +326,7 @@ void ide_cd_log_error(const char *name, struct request *failed_command,
 
                printk(KERN_ERR "  The failed \"%s\" packet command "
                                "was: \n  \"", s);
-               for (i = 0; i < sizeof(failed_command->cmd); i++)
+               for (i = 0; i < BLK_MAX_CDB; i++)
                        printk(KERN_CONT "%02x ", failed_command->cmd[i]);
                printk(KERN_CONT "\"\n");
        }
index 8757e5ef6c95ad977de487c7c0793467cccbc6ab..653b1ade13d315950a5b22efc54d69244d203fe4 100644 (file)
@@ -102,7 +102,7 @@ ide_startstop_t ide_dma_intr (ide_drive_t *drive)
 {
        u8 stat = 0, dma_stat = 0;
 
-       dma_stat = HWIF(drive)->ide_dma_end(drive);
+       dma_stat = drive->hwif->dma_ops->dma_end(drive);
        stat = ide_read_status(drive);
 
        if (OK_STAT(stat,DRIVE_READY,drive->bad_wstat|DRQ_STAT)) {
@@ -394,7 +394,7 @@ void ide_dma_off_quietly(ide_drive_t *drive)
        drive->using_dma = 0;
        ide_toggle_bounce(drive, 0);
 
-       drive->hwif->dma_host_set(drive, 0);
+       drive->hwif->dma_ops->dma_host_set(drive, 0);
 }
 
 EXPORT_SYMBOL(ide_dma_off_quietly);
@@ -427,7 +427,7 @@ void ide_dma_on(ide_drive_t *drive)
        drive->using_dma = 1;
        ide_toggle_bounce(drive, 1);
 
-       drive->hwif->dma_host_set(drive, 1);
+       drive->hwif->dma_ops->dma_host_set(drive, 1);
 }
 
 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
@@ -464,9 +464,10 @@ int ide_dma_setup(ide_drive_t *drive)
 
        /* PRD table */
        if (hwif->mmio)
-               writel(hwif->dmatable_dma, (void __iomem *)hwif->dma_prdtable);
+               writel(hwif->dmatable_dma,
+                      (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS));
        else
-               outl(hwif->dmatable_dma, hwif->dma_prdtable);
+               outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS);
 
        /* specify r/w */
        hwif->OUTB(reading, hwif->dma_command);
@@ -482,11 +483,12 @@ int ide_dma_setup(ide_drive_t *drive)
 
 EXPORT_SYMBOL_GPL(ide_dma_setup);
 
-static void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
+void ide_dma_exec_cmd(ide_drive_t *drive, u8 command)
 {
        /* issue cmd to drive */
        ide_execute_command(drive, command, &ide_dma_intr, 2*WAIT_CMD, dma_timer_expiry);
 }
+EXPORT_SYMBOL_GPL(ide_dma_exec_cmd);
 
 void ide_dma_start(ide_drive_t *drive)
 {
@@ -532,7 +534,7 @@ int __ide_dma_end (ide_drive_t *drive)
 EXPORT_SYMBOL(__ide_dma_end);
 
 /* returns 1 if dma irq issued, 0 otherwise */
-static int __ide_dma_test_irq(ide_drive_t *drive)
+int ide_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        u8 dma_stat             = hwif->INB(hwif->dma_status);
@@ -542,9 +544,10 @@ static int __ide_dma_test_irq(ide_drive_t *drive)
                return 1;
        if (!drive->waiting_for_dma)
                printk(KERN_WARNING "%s: (%s) called while not waiting\n",
-                       drive->name, __FUNCTION__);
+                       drive->name, __func__);
        return 0;
 }
+EXPORT_SYMBOL_GPL(ide_dma_test_irq);
 #else
 static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -574,6 +577,7 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
 {
        struct hd_driveid *id = drive->id;
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_port_ops *port_ops = hwif->port_ops;
        unsigned int mask = 0;
 
        switch(base) {
@@ -581,8 +585,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
                if ((id->field_valid & 4) == 0)
                        break;
 
-               if (hwif->udma_filter)
-                       mask = hwif->udma_filter(drive);
+               if (port_ops && port_ops->udma_filter)
+                       mask = port_ops->udma_filter(drive);
                else
                        mask = hwif->ultra_mask;
                mask &= id->dma_ultra;
@@ -598,8 +602,8 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base, u8 req_mode)
        case XFER_MW_DMA_0:
                if ((id->field_valid & 2) == 0)
                        break;
-               if (hwif->mdma_filter)
-                       mask = hwif->mdma_filter(drive);
+               if (port_ops && port_ops->mdma_filter)
+                       mask = port_ops->mdma_filter(drive);
                else
                        mask = hwif->mwdma_mask;
                mask &= id->dma_mword;
@@ -801,15 +805,15 @@ void ide_dma_timeout (ide_drive_t *drive)
 
        printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
 
-       if (hwif->ide_dma_test_irq(drive))
+       if (hwif->dma_ops->dma_test_irq(drive))
                return;
 
-       hwif->ide_dma_end(drive);
+       hwif->dma_ops->dma_end(drive);
 }
 
 EXPORT_SYMBOL(ide_dma_timeout);
 
-static void ide_release_dma_engine(ide_hwif_t *hwif)
+void ide_release_dma_engine(ide_hwif_t *hwif)
 {
        if (hwif->dmatable_cpu) {
                struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -820,28 +824,7 @@ static void ide_release_dma_engine(ide_hwif_t *hwif)
        }
 }
 
-static int ide_release_iomio_dma(ide_hwif_t *hwif)
-{
-       release_region(hwif->dma_base, 8);
-       if (hwif->extra_ports)
-               release_region(hwif->extra_base, hwif->extra_ports);
-       return 1;
-}
-
-/*
- * Needed for allowing full modular support of ide-driver
- */
-int ide_release_dma(ide_hwif_t *hwif)
-{
-       ide_release_dma_engine(hwif);
-
-       if (hwif->mmio)
-               return 1;
-       else
-               return ide_release_iomio_dma(hwif);
-}
-
-static int ide_allocate_dma_engine(ide_hwif_t *hwif)
+int ide_allocate_dma_engine(ide_hwif_t *hwif)
 {
        struct pci_dev *pdev = to_pci_dev(hwif->dev);
 
@@ -853,99 +836,33 @@ static int ide_allocate_dma_engine(ide_hwif_t *hwif)
                return 0;
 
        printk(KERN_ERR "%s: -- Error, unable to allocate DMA table.\n",
-              hwif->cds->name);
+                       hwif->name);
 
        return 1;
 }
-
-static int ide_mapped_mmio_dma(ide_hwif_t *hwif, unsigned long base)
-{
-       printk(KERN_INFO "    %s: MMIO-DMA ", hwif->name);
-
-       return 0;
-}
-
-static int ide_iomio_dma(ide_hwif_t *hwif, unsigned long base)
-{
-       printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx",
-              hwif->name, base, base + 7);
-
-       if (!request_region(base, 8, hwif->name)) {
-               printk(" -- Error, ports in use.\n");
-               return 1;
-       }
-
-       if (hwif->cds->extra) {
-               hwif->extra_base = base + (hwif->channel ? 8 : 16);
-
-               if (!hwif->mate || !hwif->mate->extra_ports) {
-                       if (!request_region(hwif->extra_base,
-                                           hwif->cds->extra, hwif->cds->name)) {
-                               printk(" -- Error, extra ports in use.\n");
-                               release_region(base, 8);
-                               return 1;
-                       }
-                       hwif->extra_ports = hwif->cds->extra;
-               }
-       }
-
-       return 0;
-}
-
-static int ide_dma_iobase(ide_hwif_t *hwif, unsigned long base)
-{
-       if (hwif->mmio)
-               return ide_mapped_mmio_dma(hwif, base);
-
-       return ide_iomio_dma(hwif, base);
-}
+EXPORT_SYMBOL_GPL(ide_allocate_dma_engine);
+
+static const struct ide_dma_ops sff_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = __ide_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_timeout            = ide_dma_timeout,
+       .dma_lost_irq           = ide_dma_lost_irq,
+};
 
 void ide_setup_dma(ide_hwif_t *hwif, unsigned long base)
 {
-       u8 dma_stat;
-
-       if (ide_dma_iobase(hwif, base))
-               return;
-
-       if (ide_allocate_dma_engine(hwif)) {
-               ide_release_dma(hwif);
-               return;
-       }
-
        hwif->dma_base = base;
 
        if (!hwif->dma_command)
                hwif->dma_command       = hwif->dma_base + 0;
-       if (!hwif->dma_vendor1)
-               hwif->dma_vendor1       = hwif->dma_base + 1;
        if (!hwif->dma_status)
                hwif->dma_status        = hwif->dma_base + 2;
-       if (!hwif->dma_vendor3)
-               hwif->dma_vendor3       = hwif->dma_base + 3;
-       if (!hwif->dma_prdtable)
-               hwif->dma_prdtable      = hwif->dma_base + 4;
-
-       if (!hwif->dma_host_set)
-               hwif->dma_host_set = &ide_dma_host_set;
-       if (!hwif->dma_setup)
-               hwif->dma_setup = &ide_dma_setup;
-       if (!hwif->dma_exec_cmd)
-               hwif->dma_exec_cmd = &ide_dma_exec_cmd;
-       if (!hwif->dma_start)
-               hwif->dma_start = &ide_dma_start;
-       if (!hwif->ide_dma_end)
-               hwif->ide_dma_end = &__ide_dma_end;
-       if (!hwif->ide_dma_test_irq)
-               hwif->ide_dma_test_irq = &__ide_dma_test_irq;
-       if (!hwif->dma_timeout)
-               hwif->dma_timeout = &ide_dma_timeout;
-       if (!hwif->dma_lost_irq)
-               hwif->dma_lost_irq = &ide_dma_lost_irq;
 
-       dma_stat = hwif->INB(hwif->dma_status);
-       printk(KERN_CONT ", BIOS settings: %s:%s, %s:%s\n",
-              hwif->drives[0].name, (dma_stat & 0x20) ? "DMA" : "PIO",
-              hwif->drives[1].name, (dma_stat & 0x40) ? "DMA" : "PIO");
+       hwif->dma_ops = &sff_dma_ops;
 }
 
 EXPORT_SYMBOL_GPL(ide_setup_dma);
index ed19a8bbd2d2bb3ed84deafadd943efa1316a476..f05fbc2bd7a89ac597c1261acfc143f6df21d173 100644 (file)
@@ -231,6 +231,7 @@ static int idefloppy_end_request(ide_drive_t *drive, int uptodate, int nsecs)
 static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                                  unsigned int bcount, int direction)
 {
+       ide_hwif_t *hwif = drive->hwif;
        struct request *rq = pc->rq;
        struct req_iterator iter;
        struct bio_vec *bvec;
@@ -246,9 +247,9 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 
                data = bvec_kmap_irq(bvec, &flags);
                if (direction)
-                       drive->hwif->atapi_output_bytes(drive, data, count);
+                       hwif->output_data(drive, NULL, data, count);
                else
-                       drive->hwif->atapi_input_bytes(drive, data, count);
+                       hwif->input_data(drive, NULL, data, count);
                bvec_kunmap_irq(data, &flags);
 
                bcount -= count;
@@ -261,10 +262,7 @@ static void ide_floppy_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
        if (bcount) {
                printk(KERN_ERR "%s: leftover data in %s, bcount == %d\n",
                                drive->name, __func__, bcount);
-               if (direction)
-                       ide_atapi_write_zeros(drive, bcount);
-               else
-                       ide_atapi_discard_data(drive, bcount);
+               ide_pad_transfer(drive, direction, bcount);
        }
 }
 
@@ -411,7 +409,7 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
        debug_log("Reached %s interrupt handler\n", __func__);
 
        if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
-               dma_error = hwif->ide_dma_end(drive);
+               dma_error = hwif->dma_ops->dma_end(drive);
                if (dma_error) {
                        printk(KERN_ERR "%s: DMA %s error\n", drive->name,
                                        rq_data_dir(rq) ? "write" : "read");
@@ -465,10 +463,10 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
        }
 
        /* Get the number of bytes to transfer */
-       bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
-                 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+       bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+                 hwif->INB(hwif->io_ports.lbam_addr);
        /* on this interrupt */
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+       ireason = hwif->INB(hwif->io_ports.nsect_addr);
 
        if (ireason & CD) {
                printk(KERN_ERR "ide-floppy: CoD != 0 in %s\n", __func__);
@@ -490,7 +488,7 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
                                printk(KERN_ERR "ide-floppy: The floppy wants "
                                        "to send us more data than expected "
                                        "- discarding data\n");
-                               ide_atapi_discard_data(drive, bcount);
+                               ide_pad_transfer(drive, 0, bcount);
 
                                ide_set_handler(drive,
                                                &idefloppy_pc_intr,
@@ -503,12 +501,12 @@ static ide_startstop_t idefloppy_pc_intr(ide_drive_t *drive)
                }
        }
        if (pc->flags & PC_FLAG_WRITING)
-               xferfunc = hwif->atapi_output_bytes;
+               xferfunc = hwif->output_data;
        else
-               xferfunc = hwif->atapi_input_bytes;
+               xferfunc = hwif->input_data;
 
        if (pc->buf)
-               xferfunc(drive, pc->cur_pos, bcount);
+               xferfunc(drive, NULL, pc->cur_pos, bcount);
        else
                ide_floppy_io_buffers(drive, pc, bcount,
                                      !!(pc->flags & PC_FLAG_WRITING));
@@ -539,7 +537,7 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
                                "initiated yet DRQ isn't asserted\n");
                return startstop;
        }
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+       ireason = hwif->INB(hwif->io_ports.nsect_addr);
        if ((ireason & CD) == 0 || (ireason & IO)) {
                printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) while "
                                "issuing a packet command\n");
@@ -548,8 +546,10 @@ static ide_startstop_t idefloppy_transfer_pc(ide_drive_t *drive)
 
        /* Set the interrupt routine */
        ide_set_handler(drive, &idefloppy_pc_intr, IDEFLOPPY_WAIT_CMD, NULL);
+
        /* Send the actual packet */
-       HWIF(drive)->atapi_output_bytes(drive, floppy->pc->c, 12);
+       hwif->output_data(drive, NULL, floppy->pc->c, 12);
+
        return ide_started;
 }
 
@@ -569,7 +569,8 @@ static int idefloppy_transfer_pc2(ide_drive_t *drive)
        idefloppy_floppy_t *floppy = drive->driver_data;
 
        /* Send the actual packet */
-       HWIF(drive)->atapi_output_bytes(drive, floppy->pc->c, 12);
+       drive->hwif->output_data(drive, NULL, floppy->pc->c, 12);
+
        /* Timeout for the packet command */
        return IDEFLOPPY_WAIT_CMD;
 }
@@ -586,7 +587,7 @@ static ide_startstop_t idefloppy_transfer_pc1(ide_drive_t *drive)
                                "initiated yet DRQ isn't asserted\n");
                return startstop;
        }
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+       ireason = hwif->INB(hwif->io_ports.nsect_addr);
        if ((ireason & CD) == 0 || (ireason & IO)) {
                printk(KERN_ERR "ide-floppy: (IO,CoD) != (0,1) "
                                "while issuing a packet command\n");
@@ -663,7 +664,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
        dma = 0;
 
        if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
-               dma = !hwif->dma_setup(drive);
+               dma = !hwif->dma_ops->dma_setup(drive);
 
        ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
                           IDE_TFLAG_OUT_DEVICE, bcount, dma);
@@ -671,7 +672,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
        if (dma) {
                /* Begin DMA, if necessary */
                pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
-               hwif->dma_start(drive);
+               hwif->dma_ops->dma_start(drive);
        }
 
        /* Can we transfer the packet when we get the interrupt or wait? */
@@ -692,7 +693,7 @@ static ide_startstop_t idefloppy_issue_pc(ide_drive_t *drive,
                return ide_started;
        } else {
                /* Issue the packet command */
-               hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+               ide_execute_pkt_cmd(drive);
                return (*pkt_xfer_routine) (drive);
        }
 }
index 19f63e393d18766f0511f9f04ea178f0f57ab3c9..a6073e248f450df8e09408652024b58dcc348d95 100644 (file)
@@ -94,7 +94,24 @@ static int __init ide_generic_init(void)
                unsigned long io_addr = ide_default_io_base(i);
                hw_regs_t hw;
 
+               idx[i] = 0xff;
+
                if (io_addr) {
+                       if (!request_region(io_addr, 8, DRV_NAME)) {
+                               printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX "
+                                               "not free.\n",
+                                               DRV_NAME, io_addr, io_addr + 7);
+                               continue;
+                       }
+
+                       if (!request_region(io_addr + 0x206, 1, DRV_NAME)) {
+                               printk(KERN_ERR "%s: I/O resource 0x%lX "
+                                               "not free.\n",
+                                               DRV_NAME, io_addr + 0x206);
+                               release_region(io_addr, 8);
+                               continue;
+                       }
+
                        /*
                         * Skip probing if the corresponding
                         * slot is already occupied.
@@ -111,8 +128,7 @@ static int __init ide_generic_init(void)
                        ide_init_port_hw(hwif, &hw);
 
                        idx[i] = i;
-               } else
-                       idx[i] = 0xff;
+               }
        }
 
        ide_device_add_all(idx, NULL);
index 51d181ee9cf7332a40deaabd4a403955bcf3825c..696525342e9a56977c1d2ac88d72497be5a7e866 100644 (file)
@@ -218,7 +218,7 @@ static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *
                 * we could be smarter and check for current xfer_speed
                 * in struct drive etc...
                 */
-               if (drive->hwif->dma_host_set == NULL)
+               if (drive->hwif->dma_ops == NULL)
                        break;
                /*
                 * TODO: respect ->using_dma setting
@@ -295,54 +295,6 @@ static void ide_complete_pm_request (ide_drive_t *drive, struct request *rq)
        spin_unlock_irqrestore(&ide_lock, flags);
 }
 
-void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       struct ide_taskfile *tf = &task->tf;
-
-       if (task->tf_flags & IDE_TFLAG_IN_DATA) {
-               u16 data = hwif->INW(hwif->io_ports[IDE_DATA_OFFSET]);
-
-               tf->data = data & 0xff;
-               tf->hob_data = (data >> 8) & 0xff;
-       }
-
-       /* be sure we're looking at the low order bits */
-       hwif->OUTB(drive->ctl & ~0x80, hwif->io_ports[IDE_CONTROL_OFFSET]);
-
-       if (task->tf_flags & IDE_TFLAG_IN_NSECT)
-               tf->nsect  = hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_IN_LBAL)
-               tf->lbal   = hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_IN_LBAM)
-               tf->lbam   = hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_IN_LBAH)
-               tf->lbah   = hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
-               tf->device = hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]);
-
-       if (task->tf_flags & IDE_TFLAG_LBA48) {
-               hwif->OUTB(drive->ctl | 0x80,
-                          hwif->io_ports[IDE_CONTROL_OFFSET]);
-
-               if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
-                       tf->hob_feature =
-                               hwif->INB(hwif->io_ports[IDE_FEATURE_OFFSET]);
-               if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
-                       tf->hob_nsect   =
-                               hwif->INB(hwif->io_ports[IDE_NSECTOR_OFFSET]);
-               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
-                       tf->hob_lbal    =
-                               hwif->INB(hwif->io_ports[IDE_SECTOR_OFFSET]);
-               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
-                       tf->hob_lbam    =
-                               hwif->INB(hwif->io_ports[IDE_LCYL_OFFSET]);
-               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
-                       tf->hob_lbah    =
-                               hwif->INB(hwif->io_ports[IDE_HCYL_OFFSET]);
-       }
-}
-
 /**
  *     ide_end_drive_cmd       -       end an explicit drive command
  *     @drive: command 
@@ -378,7 +330,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
                        tf->error = err;
                        tf->status = stat;
 
-                       ide_tf_read(drive, task);
+                       drive->hwif->tf_read(drive, task);
 
                        if (task->tf_flags & IDE_TFLAG_DYN)
                                kfree(task);
@@ -427,7 +379,7 @@ static void try_to_flush_leftover_data (ide_drive_t *drive)
                u32 wcount = (i > 16) ? 16 : i;
 
                i -= wcount;
-               HWIF(drive)->ata_input_data(drive, buffer, wcount);
+               drive->hwif->input_data(drive, NULL, buffer, wcount * 4);
        }
 }
 
@@ -454,7 +406,7 @@ static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8
                if (err == ABRT_ERR) {
                        if (drive->select.b.lba &&
                            /* some newer drives don't support WIN_SPECIFY */
-                           hwif->INB(hwif->io_ports[IDE_COMMAND_OFFSET]) ==
+                           hwif->INB(hwif->io_ports.command_addr) ==
                                WIN_SPECIFY)
                                return ide_stopped;
                } else if ((err & BAD_CRC) == BAD_CRC) {
@@ -507,8 +459,8 @@ static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u
 
        if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
                /* force an abort */
-               hwif->OUTB(WIN_IDLEIMMEDIATE,
-                          hwif->io_ports[IDE_COMMAND_OFFSET]);
+               hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE,
+                              hwif->io_ports.command_addr);
 
        if (rq->errors >= ERROR_MAX) {
                ide_kill_rq(drive, rq);
@@ -721,6 +673,7 @@ static ide_startstop_t do_special (ide_drive_t *drive)
 #endif
        if (s->b.set_tune) {
                ide_hwif_t *hwif = drive->hwif;
+               const struct ide_port_ops *port_ops = hwif->port_ops;
                u8 req_pio = drive->tune_req;
 
                s->b.set_tune = 0;
@@ -733,10 +686,10 @@ static ide_startstop_t do_special (ide_drive_t *drive)
                                unsigned long flags;
 
                                spin_lock_irqsave(&ide_lock, flags);
-                               hwif->set_pio_mode(drive, req_pio);
+                               port_ops->set_pio_mode(drive, req_pio);
                                spin_unlock_irqrestore(&ide_lock, flags);
                        } else
-                               hwif->set_pio_mode(drive, req_pio);
+                               port_ops->set_pio_mode(drive, req_pio);
                } else {
                        int keep_dma = drive->using_dma;
 
@@ -1237,12 +1190,12 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
 
        if (error < 0) {
                printk(KERN_WARNING "%s: DMA timeout error\n", drive->name);
-               (void)HWIF(drive)->ide_dma_end(drive);
+               (void)hwif->dma_ops->dma_end(drive);
                ret = ide_error(drive, "dma timeout error",
                                ide_read_status(drive));
        } else {
                printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
-               hwif->dma_timeout(drive);
+               hwif->dma_ops->dma_timeout(drive);
        }
 
        /*
@@ -1354,7 +1307,7 @@ void ide_timer_expiry (unsigned long data)
                                startstop = handler(drive);
                        } else if (drive_is_ready(drive)) {
                                if (drive->waiting_for_dma)
-                                       hwgroup->hwif->dma_lost_irq(drive);
+                                       hwif->dma_ops->dma_lost_irq(drive);
                                (void)ide_ack_intr(hwif);
                                printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
                                startstop = handler(drive);
@@ -1420,7 +1373,7 @@ static void unexpected_intr (int irq, ide_hwgroup_t *hwgroup)
         */
        do {
                if (hwif->irq == irq) {
-                       stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+                       stat = hwif->INB(hwif->io_ports.status_addr);
                        if (!OK_STAT(stat, READY_STAT, BAD_STAT)) {
                                /* Try to not flood the console with msgs */
                                static unsigned long last_msgtime, count;
@@ -1510,7 +1463,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
                         * Whack the status register, just in case
                         * we have a leftover pending IRQ.
                         */
-                       (void) hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+                       (void) hwif->INB(hwif->io_ports.status_addr);
 #endif /* CONFIG_BLK_DEV_IDEPCI */
                }
                spin_unlock_irqrestore(&ide_lock, flags);
@@ -1597,8 +1550,7 @@ irqreturn_t ide_intr (int irq, void *dev_id)
 
 void ide_init_drive_cmd (struct request *rq)
 {
-       memset(rq, 0, sizeof(*rq));
-       rq->ref_count = 1;
+       blk_rq_init(NULL, rq);
 }
 
 EXPORT_SYMBOL(ide_init_drive_cmd);
@@ -1684,7 +1636,23 @@ void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma)
        task.tf.lbam    = bcount & 0xff;
        task.tf.lbah    = (bcount >> 8) & 0xff;
 
-       ide_tf_load(drive, &task);
+       ide_tf_dump(drive->name, &task.tf);
+       drive->hwif->tf_load(drive, &task);
 }
 
 EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load);
+
+void ide_pad_transfer(ide_drive_t *drive, int write, int len)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       u8 buf[4] = { 0 };
+
+       while (len > 0) {
+               if (write)
+                       hwif->output_data(drive, NULL, buf, min(4, len));
+               else
+                       hwif->input_data(drive, NULL, buf, min(4, len));
+               len -= 4;
+       }
+}
+EXPORT_SYMBOL_GPL(ide_pad_transfer);
index 45944219eea013f37060b6fd67a306afc2b33192..57d9a9a79a6f845842a9b1baeb7d20254754dda8 100644 (file)
@@ -37,21 +37,6 @@ static u8 ide_inb (unsigned long port)
        return (u8) inb(port);
 }
 
-static u16 ide_inw (unsigned long port)
-{
-       return (u16) inw(port);
-}
-
-static void ide_insw (unsigned long port, void *addr, u32 count)
-{
-       insw(port, addr, count);
-}
-
-static void ide_insl (unsigned long port, void *addr, u32 count)
-{
-       insl(port, addr, count);
-}
-
 static void ide_outb (u8 val, unsigned long port)
 {
        outb(val, port);
@@ -62,32 +47,11 @@ static void ide_outbsync (ide_drive_t *drive, u8 addr, unsigned long port)
        outb(addr, port);
 }
 
-static void ide_outw (u16 val, unsigned long port)
-{
-       outw(val, port);
-}
-
-static void ide_outsw (unsigned long port, void *addr, u32 count)
-{
-       outsw(port, addr, count);
-}
-
-static void ide_outsl (unsigned long port, void *addr, u32 count)
-{
-       outsl(port, addr, count);
-}
-
 void default_hwif_iops (ide_hwif_t *hwif)
 {
        hwif->OUTB      = ide_outb;
        hwif->OUTBSYNC  = ide_outbsync;
-       hwif->OUTW      = ide_outw;
-       hwif->OUTSW     = ide_outsw;
-       hwif->OUTSL     = ide_outsl;
        hwif->INB       = ide_inb;
-       hwif->INW       = ide_inw;
-       hwif->INSW      = ide_insw;
-       hwif->INSL      = ide_insl;
 }
 
 /*
@@ -99,21 +63,6 @@ static u8 ide_mm_inb (unsigned long port)
        return (u8) readb((void __iomem *) port);
 }
 
-static u16 ide_mm_inw (unsigned long port)
-{
-       return (u16) readw((void __iomem *) port);
-}
-
-static void ide_mm_insw (unsigned long port, void *addr, u32 count)
-{
-       __ide_mm_insw((void __iomem *) port, addr, count);
-}
-
-static void ide_mm_insl (unsigned long port, void *addr, u32 count)
-{
-       __ide_mm_insl((void __iomem *) port, addr, count);
-}
-
 static void ide_mm_outb (u8 value, unsigned long port)
 {
        writeb(value, (void __iomem *) port);
@@ -124,34 +73,13 @@ static void ide_mm_outbsync (ide_drive_t *drive, u8 value, unsigned long port)
        writeb(value, (void __iomem *) port);
 }
 
-static void ide_mm_outw (u16 value, unsigned long port)
-{
-       writew(value, (void __iomem *) port);
-}
-
-static void ide_mm_outsw (unsigned long port, void *addr, u32 count)
-{
-       __ide_mm_outsw((void __iomem *) port, addr, count);
-}
-
-static void ide_mm_outsl (unsigned long port, void *addr, u32 count)
-{
-       __ide_mm_outsl((void __iomem *) port, addr, count);
-}
-
 void default_hwif_mmiops (ide_hwif_t *hwif)
 {
        hwif->OUTB      = ide_mm_outb;
        /* Most systems will need to override OUTBSYNC, alas however
           this one is controller specific! */
        hwif->OUTBSYNC  = ide_mm_outbsync;
-       hwif->OUTW      = ide_mm_outw;
-       hwif->OUTSW     = ide_mm_outsw;
-       hwif->OUTSL     = ide_mm_outsl;
        hwif->INB       = ide_mm_inb;
-       hwif->INW       = ide_mm_inw;
-       hwif->INSW      = ide_mm_insw;
-       hwif->INSL      = ide_mm_insl;
 }
 
 EXPORT_SYMBOL(default_hwif_mmiops);
@@ -159,17 +87,137 @@ EXPORT_SYMBOL(default_hwif_mmiops);
 void SELECT_DRIVE (ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_port_ops *port_ops = hwif->port_ops;
 
-       if (hwif->selectproc)
-               hwif->selectproc(drive);
+       if (port_ops && port_ops->selectproc)
+               port_ops->selectproc(drive);
 
-       hwif->OUTB(drive->select.all, hwif->io_ports[IDE_SELECT_OFFSET]);
+       hwif->OUTB(drive->select.all, hwif->io_ports.device_addr);
 }
 
 void SELECT_MASK (ide_drive_t *drive, int mask)
 {
-       if (HWIF(drive)->maskproc)
-               HWIF(drive)->maskproc(drive, mask);
+       const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
+       if (port_ops && port_ops->maskproc)
+               port_ops->maskproc(drive, mask);
+}
+
+static void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+       void (*tf_outb)(u8 addr, unsigned long port);
+       u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+       u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+       if (mmio)
+               tf_outb = ide_mm_outb;
+       else
+               tf_outb = ide_outb;
+
+       if (task->tf_flags & IDE_TFLAG_FLAGGED)
+               HIHI = 0xFF;
+
+       ide_set_irq(drive, 1);
+
+       if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
+               SELECT_MASK(drive, 0);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DATA) {
+               u16 data = (tf->hob_data << 8) | tf->data;
+
+               if (mmio)
+                       writew(data, (void __iomem *)io_ports->data_addr);
+               else
+                       outw(data, io_ports->data_addr);
+       }
+
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+               tf_outb(tf->hob_feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+               tf_outb(tf->hob_nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+               tf_outb(tf->hob_lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+               tf_outb(tf->hob_lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+               tf_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+               tf_outb(tf->feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+               tf_outb(tf->nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+               tf_outb(tf->lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+               tf_outb(tf->lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+               tf_outb(tf->lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+               tf_outb((tf->device & HIHI) | drive->select.all,
+                        io_ports->device_addr);
+}
+
+static void ide_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+       void (*tf_outb)(u8 addr, unsigned long port);
+       u8 (*tf_inb)(unsigned long port);
+       u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+       if (mmio) {
+               tf_outb = ide_mm_outb;
+               tf_inb  = ide_mm_inb;
+       } else {
+               tf_outb = ide_outb;
+               tf_inb  = ide_inb;
+       }
+
+       if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+               u16 data;
+
+               if (mmio)
+                       data = readw((void __iomem *)io_ports->data_addr);
+               else
+                       data = inw(io_ports->data_addr);
+
+               tf->data = data & 0xff;
+               tf->hob_data = (data >> 8) & 0xff;
+       }
+
+       /* be sure we're looking at the low order bits */
+       tf_outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+       if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+               tf->nsect  = tf_inb(io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+               tf->lbal   = tf_inb(io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+               tf->lbam   = tf_inb(io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+               tf->lbah   = tf_inb(io_ports->lbah_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+               tf->device = tf_inb(io_ports->device_addr);
+
+       if (task->tf_flags & IDE_TFLAG_LBA48) {
+               tf_outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+                       tf->hob_feature = tf_inb(io_ports->feature_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+                       tf->hob_nsect   = tf_inb(io_ports->nsect_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+                       tf->hob_lbal    = tf_inb(io_ports->lbal_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+                       tf->hob_lbam    = tf_inb(io_ports->lbam_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+                       tf->hob_lbah    = tf_inb(io_ports->lbah_addr);
+       }
 }
 
 /*
@@ -179,115 +227,112 @@ void SELECT_MASK (ide_drive_t *drive, int mask)
  * of the sector count register location, with interrupts disabled
  * to ensure that the reads all happen together.
  */
-static void ata_vlb_sync(ide_drive_t *drive, unsigned long port)
+static void ata_vlb_sync(unsigned long port)
 {
-       (void) HWIF(drive)->INB(port);
-       (void) HWIF(drive)->INB(port);
-       (void) HWIF(drive)->INB(port);
+       (void)inb(port);
+       (void)inb(port);
+       (void)inb(port);
 }
 
 /*
  * This is used for most PIO data transfers *from* the IDE interface
+ *
+ * These routines will round up any request for an odd number of bytes,
+ * so if an odd len is specified, be sure that there's at least one
+ * extra byte allocated for the buffer.
  */
-static void ata_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
+static void ata_input_data(ide_drive_t *drive, struct request *rq,
+                          void *buf, unsigned int len)
 {
-       ide_hwif_t *hwif        = HWIF(drive);
-       u8 io_32bit             = drive->io_32bit;
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       unsigned long data_addr = io_ports->data_addr;
+       u8 io_32bit = drive->io_32bit;
+       u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
+
+       len++;
 
        if (io_32bit) {
-               if (io_32bit & 2) {
-                       unsigned long flags;
+               unsigned long uninitialized_var(flags);
 
+               if ((io_32bit & 2) && !mmio) {
                        local_irq_save(flags);
-                       ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
-                       hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                                  wcount);
+                       ata_vlb_sync(io_ports->nsect_addr);
+               }
+
+               if (mmio)
+                       __ide_mm_insl((void __iomem *)data_addr, buf, len / 4);
+               else
+                       insl(data_addr, buf, len / 4);
+
+               if ((io_32bit & 2) && !mmio)
                        local_irq_restore(flags);
-               } else
-                       hwif->INSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                                  wcount);
-       } else
-               hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                          wcount << 1);
+
+               if ((len & 3) >= 2) {
+                       if (mmio)
+                               __ide_mm_insw((void __iomem *)data_addr,
+                                               (u8 *)buf + (len & ~3), 1);
+                       else
+                               insw(data_addr, (u8 *)buf + (len & ~3), 1);
+               }
+       } else {
+               if (mmio)
+                       __ide_mm_insw((void __iomem *)data_addr, buf, len / 2);
+               else
+                       insw(data_addr, buf, len / 2);
+       }
 }
 
 /*
  * This is used for most PIO data transfers *to* the IDE interface
  */
-static void ata_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
+static void ata_output_data(ide_drive_t *drive, struct request *rq,
+                           void *buf, unsigned int len)
 {
-       ide_hwif_t *hwif        = HWIF(drive);
-       u8 io_32bit             = drive->io_32bit;
+       ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       unsigned long data_addr = io_ports->data_addr;
+       u8 io_32bit = drive->io_32bit;
+       u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
 
        if (io_32bit) {
-               if (io_32bit & 2) {
-                       unsigned long flags;
+               unsigned long uninitialized_var(flags);
 
+               if ((io_32bit & 2) && !mmio) {
                        local_irq_save(flags);
-                       ata_vlb_sync(drive, hwif->io_ports[IDE_NSECTOR_OFFSET]);
-                       hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                                   wcount);
-                       local_irq_restore(flags);
-               } else
-                       hwif->OUTSL(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                                   wcount);
-       } else
-               hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                           wcount << 1);
-}
-
-/*
- * The following routines are mainly used by the ATAPI drivers.
- *
- * These routines will round up any request for an odd number of bytes,
- * so if an odd bytecount is specified, be sure that there's at least one
- * extra byte allocated for the buffer.
- */
-
-static void atapi_input_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
-{
-       ide_hwif_t *hwif = HWIF(drive);
+                       ata_vlb_sync(io_ports->nsect_addr);
+               }
 
-       ++bytecount;
-#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
-       if (MACH_IS_ATARI || MACH_IS_Q40) {
-               /* Atari has a byte-swapped IDE interface */
-               insw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                          bytecount / 2);
-               return;
-       }
-#endif /* CONFIG_ATARI || CONFIG_Q40 */
-       hwif->ata_input_data(drive, buffer, bytecount / 4);
-       if ((bytecount & 0x03) >= 2)
-               hwif->INSW(hwif->io_ports[IDE_DATA_OFFSET],
-                          (u8 *)buffer + (bytecount & ~0x03), 1);
-}
+               if (mmio)
+                       __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4);
+               else
+                       outsl(data_addr, buf, len / 4);
 
-static void atapi_output_bytes(ide_drive_t *drive, void *buffer, u32 bytecount)
-{
-       ide_hwif_t *hwif = HWIF(drive);
+               if ((io_32bit & 2) && !mmio)
+                       local_irq_restore(flags);
 
-       ++bytecount;
-#if defined(CONFIG_ATARI) || defined(CONFIG_Q40)
-       if (MACH_IS_ATARI || MACH_IS_Q40) {
-               /* Atari has a byte-swapped IDE interface */
-               outsw_swapw(hwif->io_ports[IDE_DATA_OFFSET], buffer,
-                           bytecount / 2);
-               return;
+               if ((len & 3) >= 2) {
+                       if (mmio)
+                               __ide_mm_outsw((void __iomem *)data_addr,
+                                                (u8 *)buf + (len & ~3), 1);
+                       else
+                               outsw(data_addr, (u8 *)buf + (len & ~3), 1);
+               }
+       } else {
+               if (mmio)
+                       __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2);
+               else
+                       outsw(data_addr, buf, len / 2);
        }
-#endif /* CONFIG_ATARI || CONFIG_Q40 */
-       hwif->ata_output_data(drive, buffer, bytecount / 4);
-       if ((bytecount & 0x03) >= 2)
-               hwif->OUTSW(hwif->io_ports[IDE_DATA_OFFSET],
-                           (u8 *)buffer + (bytecount & ~0x03), 1);
 }
 
 void default_hwif_transport(ide_hwif_t *hwif)
 {
-       hwif->ata_input_data            = ata_input_data;
-       hwif->ata_output_data           = ata_output_data;
-       hwif->atapi_input_bytes         = atapi_input_bytes;
-       hwif->atapi_output_bytes        = atapi_output_bytes;
+       hwif->tf_load     = ide_tf_load;
+       hwif->tf_read     = ide_tf_read;
+
+       hwif->input_data  = ata_input_data;
+       hwif->output_data = ata_output_data;
 }
 
 void ide_fix_driveid (struct hd_driveid *id)
@@ -429,7 +474,7 @@ int drive_is_ready (ide_drive_t *drive)
        u8 stat                 = 0;
 
        if (drive->waiting_for_dma)
-               return hwif->ide_dma_test_irq(drive);
+               return hwif->dma_ops->dma_test_irq(drive);
 
 #if 0
        /* need to guarantee 400ns since last command was issued */
@@ -442,7 +487,7 @@ int drive_is_ready (ide_drive_t *drive)
         * an interrupt with another pci card/device.  We make no assumptions
         * about possible isa-pnp and pci-pnp issues yet.
         */
-       if (hwif->io_ports[IDE_CONTROL_OFFSET])
+       if (hwif->io_ports.ctl_addr)
                stat = ide_read_altstatus(drive);
        else
                /* Note: this may clear a pending IRQ!! */
@@ -580,6 +625,8 @@ static const struct drive_list_entry ivb_list[] = {
        { "TSSTcorp CDDVDW SH-S202J"    , "SB01"        },
        { "TSSTcorp CDDVDW SH-S202N"    , "SB00"        },
        { "TSSTcorp CDDVDW SH-S202N"    , "SB01"        },
+       { "TSSTcorp CDDVDW SH-S202H"    , "SB00"        },
+       { "TSSTcorp CDDVDW SH-S202H"    , "SB01"        },
        { NULL                          , NULL          }
 };
 
@@ -644,7 +691,7 @@ int ide_driveid_update(ide_drive_t *drive)
        SELECT_MASK(drive, 1);
        ide_set_irq(drive, 1);
        msleep(50);
-       hwif->OUTB(WIN_IDENTIFY, hwif->io_ports[IDE_COMMAND_OFFSET]);
+       hwif->OUTBSYNC(drive, WIN_IDENTIFY, hwif->io_ports.command_addr);
        timeout = jiffies + WAIT_WORSTCASE;
        do {
                if (time_after(jiffies, timeout)) {
@@ -671,7 +718,7 @@ int ide_driveid_update(ide_drive_t *drive)
                local_irq_restore(flags);
                return 0;
        }
-       hwif->ata_input_data(drive, id, SECTOR_WORDS);
+       hwif->input_data(drive, NULL, id, SECTOR_SIZE);
        (void)ide_read_status(drive);   /* clear drive IRQ */
        local_irq_enable();
        local_irq_restore(flags);
@@ -693,6 +740,7 @@ int ide_driveid_update(ide_drive_t *drive)
 int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
 {
        ide_hwif_t *hwif = drive->hwif;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
        int error = 0;
        u8 stat;
 
@@ -700,8 +748,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
 //             msleep(50);
 
 #ifdef CONFIG_BLK_DEV_IDEDMA
-       if (hwif->dma_host_set) /* check if host supports DMA */
-               hwif->dma_host_set(drive, 0);
+       if (hwif->dma_ops)      /* check if host supports DMA */
+               hwif->dma_ops->dma_host_set(drive, 0);
 #endif
 
        /* Skip setting PIO flow-control modes on pre-EIDE drives */
@@ -731,10 +779,9 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
        SELECT_MASK(drive, 0);
        udelay(1);
        ide_set_irq(drive, 0);
-       hwif->OUTB(speed, hwif->io_ports[IDE_NSECTOR_OFFSET]);
-       hwif->OUTB(SETFEATURES_XFER, hwif->io_ports[IDE_FEATURE_OFFSET]);
-       hwif->OUTBSYNC(drive, WIN_SETFEATURES,
-                      hwif->io_ports[IDE_COMMAND_OFFSET]);
+       hwif->OUTB(speed, io_ports->nsect_addr);
+       hwif->OUTB(SETFEATURES_XFER, io_ports->feature_addr);
+       hwif->OUTBSYNC(drive, WIN_SETFEATURES, io_ports->command_addr);
        if (drive->quirk_list == 2)
                ide_set_irq(drive, 1);
 
@@ -759,8 +806,8 @@ int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
 #ifdef CONFIG_BLK_DEV_IDEDMA
        if ((speed >= XFER_SW_DMA_0 || (hwif->host_flags & IDE_HFLAG_VDMA)) &&
            drive->using_dma)
-               hwif->dma_host_set(drive, 1);
-       else if (hwif->dma_host_set)    /* check if host supports DMA */
+               hwif->dma_ops->dma_host_set(drive, 1);
+       else if (hwif->dma_ops) /* check if host supports DMA */
                ide_dma_off_quietly(drive);
 #endif
 
@@ -842,7 +889,7 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
 
        spin_lock_irqsave(&ide_lock, flags);
        __ide_set_handler(drive, handler, timeout, expiry);
-       hwif->OUTBSYNC(drive, cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
+       hwif->OUTBSYNC(drive, cmd, hwif->io_ports.command_addr);
        /*
         * Drive takes 400nS to respond, we must avoid the IRQ being
         * serviced before that.
@@ -852,9 +899,19 @@ void ide_execute_command(ide_drive_t *drive, u8 cmd, ide_handler_t *handler,
        ndelay(400);
        spin_unlock_irqrestore(&ide_lock, flags);
 }
-
 EXPORT_SYMBOL(ide_execute_command);
 
+void ide_execute_pkt_cmd(ide_drive_t *drive)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ide_lock, flags);
+       hwif->OUTBSYNC(drive, WIN_PACKETCMD, hwif->io_ports.command_addr);
+       ndelay(400);
+       spin_unlock_irqrestore(&ide_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd);
 
 /* needed below */
 static ide_startstop_t do_reset1 (ide_drive_t *, int);
@@ -905,10 +962,11 @@ static ide_startstop_t reset_pollfunc (ide_drive_t *drive)
 {
        ide_hwgroup_t *hwgroup  = HWGROUP(drive);
        ide_hwif_t *hwif        = HWIF(drive);
+       const struct ide_port_ops *port_ops = hwif->port_ops;
        u8 tmp;
 
-       if (hwif->reset_poll != NULL) {
-               if (hwif->reset_poll(drive)) {
+       if (port_ops && port_ops->reset_poll) {
+               if (port_ops->reset_poll(drive)) {
                        printk(KERN_ERR "%s: host reset_poll failure for %s.\n",
                                hwif->name, drive->name);
                        return ide_stopped;
@@ -974,6 +1032,8 @@ static void ide_disk_pre_reset(ide_drive_t *drive)
 
 static void pre_reset(ide_drive_t *drive)
 {
+       const struct ide_port_ops *port_ops = drive->hwif->port_ops;
+
        if (drive->media == ide_disk)
                ide_disk_pre_reset(drive);
        else
@@ -994,8 +1054,8 @@ static void pre_reset(ide_drive_t *drive)
                return;
        }
 
-       if (HWIF(drive)->pre_reset != NULL)
-               HWIF(drive)->pre_reset(drive);
+       if (port_ops && port_ops->pre_reset)
+               port_ops->pre_reset(drive);
 
        if (drive->current_speed != 0xff)
                drive->desired_speed = drive->current_speed;
@@ -1023,12 +1083,16 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
        unsigned long flags;
        ide_hwif_t *hwif;
        ide_hwgroup_t *hwgroup;
+       struct ide_io_ports *io_ports;
+       const struct ide_port_ops *port_ops;
        u8 ctl;
 
        spin_lock_irqsave(&ide_lock, flags);
        hwif = HWIF(drive);
        hwgroup = HWGROUP(drive);
 
+       io_ports = &hwif->io_ports;
+
        /* We must not reset with running handlers */
        BUG_ON(hwgroup->handler != NULL);
 
@@ -1038,8 +1102,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
                pre_reset(drive);
                SELECT_DRIVE(drive);
                udelay (20);
-               hwif->OUTBSYNC(drive, WIN_SRST,
-                              hwif->io_ports[IDE_COMMAND_OFFSET]);
+               hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
                ndelay(400);
                hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
                hwgroup->polling = 1;
@@ -1055,7 +1118,7 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
        for (unit = 0; unit < MAX_DRIVES; ++unit)
                pre_reset(&hwif->drives[unit]);
 
-       if (hwif->io_ports[IDE_CONTROL_OFFSET] == 0) {
+       if (io_ports->ctl_addr == 0) {
                spin_unlock_irqrestore(&ide_lock, flags);
                return ide_stopped;
        }
@@ -1070,14 +1133,14 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
         * recover from reset very quickly, saving us the first 50ms wait time.
         */
        /* set SRST and nIEN */
-       hwif->OUTBSYNC(drive, drive->ctl|6, hwif->io_ports[IDE_CONTROL_OFFSET]);
+       hwif->OUTBSYNC(drive, drive->ctl|6, io_ports->ctl_addr);
        /* more than enough time */
        udelay(10);
        if (drive->quirk_list == 2)
                ctl = drive->ctl;       /* clear SRST and nIEN */
        else
                ctl = drive->ctl | 2;   /* clear SRST, leave nIEN */
-       hwif->OUTBSYNC(drive, ctl, hwif->io_ports[IDE_CONTROL_OFFSET]);
+       hwif->OUTBSYNC(drive, ctl, io_ports->ctl_addr);
        /* more than enough time */
        udelay(10);
        hwgroup->poll_timeout = jiffies + WAIT_WORSTCASE;
@@ -1089,8 +1152,9 @@ static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi)
         * state when the disks are reset this way. At least, the Winbond
         * 553 documentation says that
         */
-       if (hwif->resetproc)
-               hwif->resetproc(drive);
+       port_ops = hwif->port_ops;
+       if (port_ops && port_ops->resetproc)
+               port_ops->resetproc(drive);
 
        spin_unlock_irqrestore(&ide_lock, flags);
        return ide_started;
@@ -1121,7 +1185,7 @@ int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
                 * about locking issues (2.5 work ?).
                 */
                mdelay(1);
-               stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+               stat = hwif->INB(hwif->io_ports.status_addr);
                if ((stat & BUSY_STAT) == 0)
                        return 0;
                /*
index c859de77aa8fef8e37ce4ae4b05e866a5f7a355d..47af80df68728c1ec9885fec5ced8ecd38826c3c 100644 (file)
@@ -85,7 +85,7 @@ static u8 ide_rate_filter(ide_drive_t *drive, u8 speed)
                        mode = XFER_PIO_4;
        }
 
-//     printk("%s: mode 0x%02x, speed 0x%02x\n", __FUNCTION__, mode, speed);
+/*     printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */
 
        return min(speed, mode);
 }
@@ -288,9 +288,10 @@ EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
 void ide_set_pio(ide_drive_t *drive, u8 req_pio)
 {
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_port_ops *port_ops = hwif->port_ops;
        u8 host_pio, pio;
 
-       if (hwif->set_pio_mode == NULL ||
+       if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
            (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
                return;
 
@@ -343,29 +344,30 @@ void ide_toggle_bounce(ide_drive_t *drive, int on)
 int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
 {
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_port_ops *port_ops = hwif->port_ops;
 
        if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
                return 0;
 
-       if (hwif->set_pio_mode == NULL)
+       if (port_ops == NULL || port_ops->set_pio_mode == NULL)
                return -1;
 
        /*
         * TODO: temporary hack for some legacy host drivers that didn't
         * set transfer mode on the device in ->set_pio_mode method...
         */
-       if (hwif->set_dma_mode == NULL) {
-               hwif->set_pio_mode(drive, mode - XFER_PIO_0);
+       if (port_ops->set_dma_mode == NULL) {
+               port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
                return 0;
        }
 
        if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
                if (ide_config_drive_speed(drive, mode))
                        return -1;
-               hwif->set_pio_mode(drive, mode - XFER_PIO_0);
+               port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
                return 0;
        } else {
-               hwif->set_pio_mode(drive, mode - XFER_PIO_0);
+               port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
                return ide_config_drive_speed(drive, mode);
        }
 }
@@ -373,20 +375,21 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
 int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
 {
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_port_ops *port_ops = hwif->port_ops;
 
        if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)
                return 0;
 
-       if (hwif->set_dma_mode == NULL)
+       if (port_ops == NULL || port_ops->set_dma_mode == NULL)
                return -1;
 
        if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
                if (ide_config_drive_speed(drive, mode))
                        return -1;
-               hwif->set_dma_mode(drive, mode);
+               port_ops->set_dma_mode(drive, mode);
                return 0;
        } else {
-               hwif->set_dma_mode(drive, mode);
+               port_ops->set_dma_mode(drive, mode);
                return ide_config_drive_speed(drive, mode);
        }
 }
@@ -406,8 +409,9 @@ EXPORT_SYMBOL_GPL(ide_set_dma_mode);
 int ide_set_xfer_rate(ide_drive_t *drive, u8 rate)
 {
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_port_ops *port_ops = hwif->port_ops;
 
-       if (hwif->set_dma_mode == NULL ||
+       if (port_ops == NULL || port_ops->set_dma_mode == NULL ||
            (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
                return -1;
 
@@ -483,7 +487,7 @@ static void ide_dump_sector(ide_drive_t *drive)
        else
                task.tf_flags = IDE_TFLAG_IN_LBA | IDE_TFLAG_IN_DEVICE;
 
-       ide_tf_read(drive, &task);
+       drive->hwif->tf_read(drive, &task);
 
        if (lba48 || (tf->device & ATA_LBA))
                printk(", LBAsect=%llu",
index 8a178a55a027715d2fae6d11f1ec2683114b7180..6a8953f68e9f709945f2f108550dea12907d3670 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/pnp.h>
 #include <linux/ide.h>
 
+#define DRV_NAME "ide-pnp"
+
 /* Add your devices here :)) */
 static struct pnp_device_id idepnp_devices[] = {
        /* Generic ESDI/IDE/ATA compatible hard disk controller */
@@ -29,13 +31,29 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
 {
        hw_regs_t hw;
        ide_hwif_t *hwif;
+       unsigned long base, ctl;
 
        if (!(pnp_port_valid(dev, 0) && pnp_port_valid(dev, 1) && pnp_irq_valid(dev, 0)))
                return -1;
 
+       base = pnp_port_start(dev, 0);
+       ctl = pnp_port_start(dev, 1);
+
+       if (!request_region(base, 8, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+                               DRV_NAME, base, base + 7);
+               return -EBUSY;
+       }
+
+       if (!request_region(ctl, 1, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+                               DRV_NAME, ctl);
+               release_region(base, 8);
+               return -EBUSY;
+       }
+
        memset(&hw, 0, sizeof(hw));
-       ide_std_init_ports(&hw, pnp_port_start(dev, 0),
-                               pnp_port_start(dev, 1));
+       ide_std_init_ports(&hw, base, ctl);
        hw.irq = pnp_irq(dev, 0);
 
        hwif = ide_find_port();
@@ -54,6 +72,9 @@ static int idepnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
                return 0;
        }
 
+       release_region(ctl, 1);
+       release_region(base, 8);
+
        return -1;
 }
 
@@ -61,10 +82,10 @@ static void idepnp_remove(struct pnp_dev *dev)
 {
        ide_hwif_t *hwif = pnp_get_drvdata(dev);
 
-       if (hwif)
-               ide_unregister(hwif->index);
-       else
-               printk(KERN_ERR "idepnp: Unable to remove device, please report.\n");
+       ide_unregister(hwif);
+
+       release_region(pnp_port_start(dev, 1), 1);
+       release_region(pnp_port_start(dev, 0), 8);
 }
 
 static struct pnp_driver idepnp_driver = {
index 875429728021fb9ba2c78bcbd9d3347588d7f48f..591deda3f86a9ef655560abb059aa710fdbb2418 100644 (file)
@@ -124,7 +124,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
 
        id = drive->id;
        /* read 512 bytes of id info */
-       hwif->ata_input_data(drive, id, SECTOR_WORDS);
+       hwif->input_data(drive, NULL, id, SECTOR_SIZE);
 
        drive->id_read = 1;
        local_irq_enable();
@@ -264,6 +264,7 @@ err_misc:
 static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
 {
        ide_hwif_t *hwif = HWIF(drive);
+       struct ide_io_ports *io_ports = &hwif->io_ports;
        int use_altstatus = 0, rc;
        unsigned long timeout;
        u8 s = 0, a = 0;
@@ -271,7 +272,7 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
        /* take a deep breath */
        msleep(50);
 
-       if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
+       if (io_ports->ctl_addr) {
                a = ide_read_altstatus(drive);
                s = ide_read_status(drive);
                if ((a ^ s) & ~INDEX_STAT)
@@ -289,10 +290,10 @@ static int actual_try_to_identify (ide_drive_t *drive, u8 cmd)
         */
        if ((cmd == WIN_PIDENTIFY))
                /* disable dma & overlap */
-               hwif->OUTB(0, hwif->io_ports[IDE_FEATURE_OFFSET]);
+               hwif->OUTB(0, io_ports->feature_addr);
 
        /* ask drive for ID */
-       hwif->OUTB(cmd, hwif->io_ports[IDE_COMMAND_OFFSET]);
+       hwif->OUTBSYNC(drive, cmd, io_ports->command_addr);
 
        timeout = ((cmd == WIN_IDENTIFY) ? WAIT_WORSTCASE : WAIT_PIDENTIFY) / 2;
        timeout += jiffies;
@@ -353,7 +354,7 @@ static int try_to_identify (ide_drive_t *drive, u8 cmd)
         * interrupts during the identify-phase that
         * the irq handler isn't expecting.
         */
-       if (hwif->io_ports[IDE_CONTROL_OFFSET]) {
+       if (hwif->io_ports.ctl_addr) {
                if (!hwif->irq) {
                        autoprobe = 1;
                        cookie = probe_irq_on();
@@ -393,7 +394,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
 
        do {
                msleep(50);
-               stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+               stat = hwif->INB(hwif->io_ports.status_addr);
                if ((stat & BUSY_STAT) == 0)
                        return 0;
        } while (time_before(jiffies, timeout));
@@ -425,6 +426,7 @@ static int ide_busy_sleep(ide_hwif_t *hwif)
 static int do_probe (ide_drive_t *drive, u8 cmd)
 {
        ide_hwif_t *hwif = HWIF(drive);
+       struct ide_io_ports *io_ports = &hwif->io_ports;
        int rc;
        u8 stat;
 
@@ -445,7 +447,7 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
        msleep(50);
        SELECT_DRIVE(drive);
        msleep(50);
-       if (hwif->INB(hwif->io_ports[IDE_SELECT_OFFSET]) != drive->select.all &&
+       if (hwif->INB(io_ports->device_addr) != drive->select.all &&
            !drive->present) {
                if (drive->select.b.unit != 0) {
                        /* exit with drive0 selected */
@@ -472,17 +474,13 @@ static int do_probe (ide_drive_t *drive, u8 cmd)
                if (stat == (BUSY_STAT | READY_STAT))
                        return 4;
 
-               if ((rc == 1 && cmd == WIN_PIDENTIFY) &&
-                       ((drive->autotune == IDE_TUNE_DEFAULT) ||
-                       (drive->autotune == IDE_TUNE_AUTO))) {
+               if (rc == 1 && cmd == WIN_PIDENTIFY) {
                        printk(KERN_ERR "%s: no response (status = 0x%02x), "
                                        "resetting drive\n", drive->name, stat);
                        msleep(50);
-                       hwif->OUTB(drive->select.all,
-                                  hwif->io_ports[IDE_SELECT_OFFSET]);
+                       hwif->OUTB(drive->select.all, io_ports->device_addr);
                        msleep(50);
-                       hwif->OUTB(WIN_SRST,
-                                  hwif->io_ports[IDE_COMMAND_OFFSET]);
+                       hwif->OUTBSYNC(drive, WIN_SRST, io_ports->command_addr);
                        (void)ide_busy_sleep(hwif);
                        rc = try_to_identify(drive, cmd);
                }
@@ -518,7 +516,7 @@ static void enable_nest (ide_drive_t *drive)
        printk("%s: enabling %s -- ", hwif->name, drive->id->model);
        SELECT_DRIVE(drive);
        msleep(50);
-       hwif->OUTB(EXABYTE_ENABLE_NEST, hwif->io_ports[IDE_COMMAND_OFFSET]);
+       hwif->OUTBSYNC(drive, EXABYTE_ENABLE_NEST, hwif->io_ports.command_addr);
 
        if (ide_busy_sleep(hwif)) {
                printk(KERN_CONT "failed (timeout)\n");
@@ -644,7 +642,7 @@ static int ide_register_port(ide_hwif_t *hwif)
        ret = device_register(&hwif->gendev);
        if (ret < 0) {
                printk(KERN_WARNING "IDE: %s: device_register error: %d\n",
-                       __FUNCTION__, ret);
+                       __func__, ret);
                goto out;
        }
 
@@ -773,8 +771,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
 
        BUG_ON(hwif->present);
 
-       if (hwif->noprobe ||
-           (hwif->drives[0].noprobe && hwif->drives[1].noprobe))
+       if (hwif->drives[0].noprobe && hwif->drives[1].noprobe)
                return -EACCES;
 
        /*
@@ -801,14 +798,9 @@ static int ide_probe_port(ide_hwif_t *hwif)
                if (drive->present)
                        rc = 0;
        }
-       if (hwif->io_ports[IDE_CONTROL_OFFSET] && hwif->reset) {
-               printk(KERN_WARNING "%s: reset\n", hwif->name);
-               hwif->OUTB(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
-               udelay(10);
-               hwif->OUTB(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
-               (void)ide_busy_sleep(hwif);
-       }
+
        local_irq_restore(flags);
+
        /*
         * Use cached IRQ number. It might be (and is...) changed by probe
         * code above
@@ -821,29 +813,25 @@ static int ide_probe_port(ide_hwif_t *hwif)
 
 static void ide_port_tune_devices(ide_hwif_t *hwif)
 {
+       const struct ide_port_ops *port_ops = hwif->port_ops;
        int unit;
 
        for (unit = 0; unit < MAX_DRIVES; unit++) {
                ide_drive_t *drive = &hwif->drives[unit];
 
-               if (drive->present && hwif->quirkproc)
-                       hwif->quirkproc(drive);
+               if (drive->present && port_ops && port_ops->quirkproc)
+                       port_ops->quirkproc(drive);
        }
 
        for (unit = 0; unit < MAX_DRIVES; ++unit) {
                ide_drive_t *drive = &hwif->drives[unit];
 
                if (drive->present) {
-                       if (drive->autotune == IDE_TUNE_AUTO)
-                               ide_set_max_pio(drive);
-
-                       if (drive->autotune != IDE_TUNE_DEFAULT &&
-                           drive->autotune != IDE_TUNE_AUTO)
-                               continue;
+                       ide_set_max_pio(drive);
 
                        drive->nice1 = 1;
 
-                       if (hwif->dma_host_set)
+                       if (hwif->dma_ops)
                                ide_set_dma(drive);
                }
        }
@@ -994,6 +982,7 @@ static void ide_port_setup_devices(ide_hwif_t *hwif)
  */
 static int init_irq (ide_hwif_t *hwif)
 {
+       struct ide_io_ports *io_ports = &hwif->io_ports;
        unsigned int index;
        ide_hwgroup_t *hwgroup;
        ide_hwif_t *match = NULL;
@@ -1077,9 +1066,9 @@ static int init_irq (ide_hwif_t *hwif)
                if (IDE_CHIPSET_IS_PCI(hwif->chipset))
                        sa = IRQF_SHARED;
 
-               if (hwif->io_ports[IDE_CONTROL_OFFSET])
+               if (io_ports->ctl_addr)
                        /* clear nIEN */
-                       hwif->OUTB(0x08, hwif->io_ports[IDE_CONTROL_OFFSET]);
+                       hwif->OUTB(0x08, io_ports->ctl_addr);
 
                if (request_irq(hwif->irq,&ide_intr,sa,hwif->name,hwgroup))
                        goto out_unlink;
@@ -1095,12 +1084,11 @@ static int init_irq (ide_hwif_t *hwif)
 
 #if !defined(__mc68000__)
        printk("%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
-               hwif->io_ports[IDE_DATA_OFFSET],
-               hwif->io_ports[IDE_DATA_OFFSET]+7,
-               hwif->io_ports[IDE_CONTROL_OFFSET], hwif->irq);
+               io_ports->data_addr, io_ports->status_addr,
+               io_ports->ctl_addr, hwif->irq);
 #else
        printk("%s at 0x%08lx on irq %d", hwif->name,
-               hwif->io_ports[IDE_DATA_OFFSET], hwif->irq);
+               io_ports->data_addr, hwif->irq);
 #endif /* __mc68000__ */
        if (match)
                printk(" (%sed with %s)",
@@ -1242,8 +1230,8 @@ static int hwif_init(ide_hwif_t *hwif)
        int old_irq;
 
        if (!hwif->irq) {
-               if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET])))
-               {
+               hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
+               if (!hwif->irq) {
                        printk("%s: DISABLED, NO IRQ\n", hwif->name);
                        return 0;
                }
@@ -1272,7 +1260,8 @@ static int hwif_init(ide_hwif_t *hwif)
         *      It failed to initialise. Find the default IRQ for 
         *      this port and try that.
         */
-       if (!(hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]))) {
+       hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
+       if (!hwif->irq) {
                printk("%s: Disabled unable to get IRQ %d.\n",
                        hwif->name, old_irq);
                goto out;
@@ -1324,6 +1313,7 @@ static void hwif_register_devices(ide_hwif_t *hwif)
 
 static void ide_port_init_devices(ide_hwif_t *hwif)
 {
+       const struct ide_port_ops *port_ops = hwif->port_ops;
        int i;
 
        for (i = 0; i < MAX_DRIVES; i++) {
@@ -1335,12 +1325,10 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
                        drive->unmask = 1;
                if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
                        drive->no_unmask = 1;
-               if ((hwif->host_flags & IDE_HFLAG_NO_AUTOTUNE) == 0)
-                       drive->autotune = 1;
        }
 
-       if (hwif->port_init_devs)
-               hwif->port_init_devs(hwif);
+       if (port_ops && port_ops->port_init_devs)
+               port_ops->port_init_devs(hwif);
 }
 
 static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
@@ -1355,26 +1343,44 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
        if (d->init_iops)
                d->init_iops(hwif);
 
-       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0)
-               ide_hwif_setup_dma(hwif, d);
-
        if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) ||
            (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS))
                hwif->irq = port ? 15 : 14;
 
-       hwif->host_flags = d->host_flags;
+       /* ->host_flags may be set by ->init_iops (or even earlier...) */
+       hwif->host_flags |= d->host_flags;
        hwif->pio_mask = d->pio_mask;
 
-       if ((d->host_flags & IDE_HFLAG_SERIALIZE) && hwif->mate)
-               hwif->mate->serialized = hwif->serialized = 1;
+       /* ->set_pio_mode for DTC2278 is currently limited to port 0 */
+       if (hwif->chipset != ide_dtc2278 || hwif->channel == 0)
+               hwif->port_ops = d->port_ops;
+
+       if ((d->host_flags & IDE_HFLAG_SERIALIZE) ||
+           ((d->host_flags & IDE_HFLAG_SERIALIZE_DMA) && hwif->dma_base)) {
+               if (hwif->mate)
+                       hwif->mate->serialized = hwif->serialized = 1;
+       }
 
        hwif->swdma_mask = d->swdma_mask;
        hwif->mwdma_mask = d->mwdma_mask;
        hwif->ultra_mask = d->udma_mask;
 
-       /* reset DMA masks only for SFF-style DMA controllers */
-       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0 && hwif->dma_base == 0)
-               hwif->swdma_mask = hwif->mwdma_mask = hwif->ultra_mask = 0;
+       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+               int rc;
+
+               if (d->init_dma)
+                       rc = d->init_dma(hwif, d);
+               else
+                       rc = ide_hwif_setup_dma(hwif, d);
+
+               if (rc < 0) {
+                       printk(KERN_INFO "%s: DMA disabled\n", hwif->name);
+                       hwif->swdma_mask = 0;
+                       hwif->mwdma_mask = 0;
+                       hwif->ultra_mask = 0;
+               } else if (d->dma_ops)
+                       hwif->dma_ops = d->dma_ops;
+       }
 
        if (d->host_flags & IDE_HFLAG_RQSIZE_256)
                hwif->rqsize = 256;
@@ -1386,9 +1392,11 @@ static void ide_init_port(ide_hwif_t *hwif, unsigned int port,
 
 static void ide_port_cable_detect(ide_hwif_t *hwif)
 {
-       if (hwif->cable_detect && (hwif->ultra_mask & 0x78)) {
+       const struct ide_port_ops *port_ops = hwif->port_ops;
+
+       if (port_ops && port_ops->cable_detect && (hwif->ultra_mask & 0x78)) {
                if (hwif->cbl != ATA_CBL_PATA40_SHORT)
-                       hwif->cbl = hwif->cable_detect(hwif);
+                       hwif->cbl = port_ops->cable_detect(hwif);
        }
 }
 
@@ -1498,13 +1506,20 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
        int i, rc = 0;
 
        for (i = 0; i < MAX_HWIFS; i++) {
-               if (d == NULL || idx[i] == 0xff) {
+               if (idx[i] == 0xff) {
                        mate = NULL;
                        continue;
                }
 
                hwif = &ide_hwifs[idx[i]];
 
+               ide_port_apply_params(hwif);
+
+               if (d == NULL) {
+                       mate = NULL;
+                       continue;
+               }
+
                if (d->chipset != ide_etrax100 && (i & 1) && mate) {
                        hwif->mate = mate;
                        mate->mate = hwif;
@@ -1523,25 +1538,15 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
 
                hwif = &ide_hwifs[idx[i]];
 
-               if ((hwif->chipset != ide_4drives || !hwif->mate ||
-                    !hwif->mate->present) && ide_hwif_request_regions(hwif)) {
-                       printk(KERN_ERR "%s: ports already in use, "
-                                       "skipping probe\n", hwif->name);
-                       continue;
-               }
-
-               if (ide_probe_port(hwif) < 0) {
-                       ide_hwif_release_regions(hwif);
-                       continue;
-               }
-
-               hwif->present = 1;
+               if (ide_probe_port(hwif) == 0)
+                       hwif->present = 1;
 
                if (hwif->chipset != ide_4drives || !hwif->mate ||
                    !hwif->mate->present)
                        ide_register_port(hwif);
 
-               ide_port_tune_devices(hwif);
+               if (hwif->present)
+                       ide_port_tune_devices(hwif);
        }
 
        for (i = 0; i < MAX_HWIFS; i++) {
@@ -1550,9 +1555,6 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
 
                hwif = &ide_hwifs[idx[i]];
 
-               if (!hwif->present)
-                       continue;
-
                if (hwif_init(hwif) == 0) {
                        printk(KERN_INFO "%s: failed to initialize IDE "
                                         "interface\n", hwif->name);
@@ -1561,10 +1563,13 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
                        continue;
                }
 
-               ide_port_setup_devices(hwif);
+               if (hwif->present)
+                       ide_port_setup_devices(hwif);
 
                ide_acpi_init(hwif);
-               ide_acpi_port_init_devices(hwif);
+
+               if (hwif->present)
+                       ide_acpi_port_init_devices(hwif);
        }
 
        for (i = 0; i < MAX_HWIFS; i++) {
@@ -1573,11 +1578,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
 
                hwif = &ide_hwifs[idx[i]];
 
-               if (hwif->present) {
-                       if (hwif->chipset == ide_unknown)
-                               hwif->chipset = ide_generic;
+               if (hwif->chipset == ide_unknown)
+                       hwif->chipset = ide_generic;
+
+               if (hwif->present)
                        hwif_register_devices(hwif);
-               }
        }
 
        for (i = 0; i < MAX_HWIFS; i++) {
@@ -1586,11 +1591,11 @@ int ide_device_add_all(u8 *idx, const struct ide_port_info *d)
 
                hwif = &ide_hwifs[idx[i]];
 
-               if (hwif->present) {
-                       ide_sysfs_register_port(hwif);
-                       ide_proc_register_port(hwif);
+               ide_sysfs_register_port(hwif);
+               ide_proc_register_port(hwif);
+
+               if (hwif->present)
                        ide_proc_port_register_devices(hwif);
-               }
        }
 
        return rc;
@@ -1611,6 +1616,7 @@ EXPORT_SYMBOL_GPL(ide_device_add);
 
 void ide_port_scan(ide_hwif_t *hwif)
 {
+       ide_port_apply_params(hwif);
        ide_port_cable_detect(hwif);
        ide_port_init_devices(hwif);
 
@@ -1626,3 +1632,67 @@ void ide_port_scan(ide_hwif_t *hwif)
        ide_proc_port_register_devices(hwif);
 }
 EXPORT_SYMBOL_GPL(ide_port_scan);
+
+static void ide_legacy_init_one(u8 *idx, hw_regs_t *hw, u8 port_no,
+                               const struct ide_port_info *d,
+                               unsigned long config)
+{
+       ide_hwif_t *hwif;
+       unsigned long base, ctl;
+       int irq;
+
+       if (port_no == 0) {
+               base = 0x1f0;
+               ctl  = 0x3f6;
+               irq  = 14;
+       } else {
+               base = 0x170;
+               ctl  = 0x376;
+               irq  = 15;
+       }
+
+       if (!request_region(base, 8, d->name)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+                               d->name, base, base + 7);
+               return;
+       }
+
+       if (!request_region(ctl, 1, d->name)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+                               d->name, ctl);
+               release_region(base, 8);
+               return;
+       }
+
+       ide_std_init_ports(hw, base, ctl);
+       hw->irq = irq;
+
+       hwif = ide_find_port_slot(d);
+       if (hwif) {
+               ide_init_port_hw(hwif, hw);
+               if (config)
+                       hwif->config_data = config;
+               idx[port_no] = hwif->index;
+       }
+}
+
+int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
+{
+       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
+       hw_regs_t hw[2];
+
+       memset(&hw, 0, sizeof(hw));
+
+       if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
+               ide_legacy_init_one(idx, &hw[0], 0, d, config);
+       ide_legacy_init_one(idx, &hw[1], 1, d, config);
+
+       if (idx[0] == 0xff && idx[1] == 0xff &&
+           (d->host_flags & IDE_HFLAG_SINGLE))
+               return -ENOENT;
+
+       ide_device_add(idx, d);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ide_legacy_device_add);
index 5d3562b450390b4f8f2d24f30eb73290fa02dfa7..8d6ad812a014e3e4a5ea3c1611874d550e3d4bf1 100644 (file)
@@ -599,14 +599,14 @@ static int ide_replace_subdriver(ide_drive_t *drive, const char *driver)
        err = device_attach(dev);
        if (err < 0)
                printk(KERN_WARNING "IDE: %s: device_attach error: %d\n",
-                       __FUNCTION__, err);
+                       __func__, err);
        drive->driver_req[0] = 0;
        if (dev->driver == NULL) {
                err = device_attach(dev);
                if (err < 0)
                        printk(KERN_WARNING
                                "IDE: %s: device_attach(2) error: %d\n",
-                               __FUNCTION__, err);
+                               __func__, err);
        }
        if (dev->driver && !strcmp(dev->driver->name, driver))
                ret = 0;
@@ -786,14 +786,6 @@ void ide_proc_register_port(ide_hwif_t *hwif)
        }
 }
 
-#ifdef CONFIG_BLK_DEV_IDEPCI
-void ide_pci_create_host_proc(const char *name, get_info_t *get_info)
-{
-       create_proc_info_entry(name, 0, proc_ide_root, get_info);
-}
-EXPORT_SYMBOL_GPL(ide_pci_create_host_proc);
-#endif
-
 void ide_proc_unregister_port(ide_hwif_t *hwif)
 {
        if (hwif->proc) {
@@ -820,7 +812,7 @@ static int ide_drivers_show(struct seq_file *s, void *p)
        err = bus_for_each_drv(&ide_bus_type, NULL, s, proc_print_driver);
        if (err < 0)
                printk(KERN_WARNING "IDE: %s: bus_for_each_drv error: %d\n",
-                       __FUNCTION__, err);
+                       __func__, err);
        return 0;
 }
 
@@ -830,6 +822,7 @@ static int ide_drivers_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations ide_drivers_operations = {
+       .owner          = THIS_MODULE,
        .open           = ide_drivers_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -838,16 +831,12 @@ static const struct file_operations ide_drivers_operations = {
 
 void proc_ide_create(void)
 {
-       struct proc_dir_entry *entry;
-
        proc_ide_root = proc_mkdir("ide", NULL);
 
        if (!proc_ide_root)
                return;
 
-       entry = create_proc_entry("drivers", 0, proc_ide_root);
-       if (entry)
-               entry->proc_fops = &ide_drivers_operations;
+       proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations);
 }
 
 void proc_ide_destroy(void)
index 98888da1b600d3a4965e2dbc2d041332e5c5cbbc..0e79efff1debdd5c357c658b77b0ec1a60e735e2 100644 (file)
@@ -102,7 +102,7 @@ static int __init ide_scan_pcibus(void)
                if (__pci_register_driver(d, d->driver.owner,
                                          d->driver.mod_name))
                        printk(KERN_ERR "%s: failed to register %s driver\n",
-                                       __FUNCTION__, d->driver.mod_name);
+                                       __func__, d->driver.mod_name);
        }
 
        return 0;
index f43fd070f1b6d0c7221f8ab86a1ff89c913ac4cf..1e1f26331a24dd2c4aa3bc0028c216cd6f1c2c40 100644 (file)
@@ -72,26 +72,6 @@ enum {
 #endif
 
 /**************************** Tunable parameters *****************************/
-
-
-/*
- * Pipelined mode parameters.
- *
- * We try to use the minimum number of stages which is enough to keep the tape
- * constantly streaming. To accomplish that, we implement a feedback loop around
- * the maximum number of stages:
- *
- * We start from MIN maximum stages (we will not even use MIN stages if we don't
- * need them), increment it by RATE*(MAX-MIN) whenever we sense that the
- * pipeline is empty, until we reach the optimum value or until we reach MAX.
- *
- * Setting the following parameter to 0 is illegal: the pipelined mode cannot be
- * disabled (idetape_calculate_speeds() divides by tape->max_stages.)
- */
-#define IDETAPE_MIN_PIPELINE_STAGES      1
-#define IDETAPE_MAX_PIPELINE_STAGES    400
-#define IDETAPE_INCREASE_STAGES_RATE    20
-
 /*
  * After each failed packet command we issue a request sense command and retry
  * the packet command IDETAPE_MAX_PC_RETRIES times.
@@ -224,28 +204,17 @@ enum {
        /* 0 When the tape position is unknown */
        IDETAPE_FLAG_ADDRESS_VALID      = (1 << 1),
        /* Device already opened */
-       IDETAPE_FLAG_BUSY                       = (1 << 2),
-       /* Error detected in a pipeline stage */
-       IDETAPE_FLAG_PIPELINE_ERR       = (1 << 3),
+       IDETAPE_FLAG_BUSY               = (1 << 2),
        /* Attempt to auto-detect the current user block size */
-       IDETAPE_FLAG_DETECT_BS          = (1 << 4),
+       IDETAPE_FLAG_DETECT_BS          = (1 << 3),
        /* Currently on a filemark */
-       IDETAPE_FLAG_FILEMARK           = (1 << 5),
+       IDETAPE_FLAG_FILEMARK           = (1 << 4),
        /* DRQ interrupt device */
-       IDETAPE_FLAG_DRQ_INTERRUPT      = (1 << 6),
-       /* pipeline active */
-       IDETAPE_FLAG_PIPELINE_ACTIVE    = (1 << 7),
+       IDETAPE_FLAG_DRQ_INTERRUPT      = (1 << 5),
        /* 0 = no tape is loaded, so we don't rewind after ejecting */
-       IDETAPE_FLAG_MEDIUM_PRESENT     = (1 << 8),
+       IDETAPE_FLAG_MEDIUM_PRESENT     = (1 << 6),
 };
 
-/* A pipeline stage. */
-typedef struct idetape_stage_s {
-       struct request rq;                      /* The corresponding request */
-       struct idetape_bh *bh;                  /* The data buffers */
-       struct idetape_stage_s *next;           /* Pointer to the next stage */
-} idetape_stage_t;
-
 /*
  * Most of our global data which we need to save even as we leave the driver due
  * to an interrupt or a timer event is stored in the struct defined below.
@@ -289,9 +258,7 @@ typedef struct ide_tape_obj {
         * While polling for DSC we use postponed_rq to postpone the current
         * request so that ide.c will be able to service pending requests on the
         * other device. Note that at most we will have only one DSC (usually
-        * data transfer) request in the device request queue. Additional
-        * requests can be queued in our internal pipeline, but they will be
-        * visible to ide.c only one at a time.
+        * data transfer) request in the device request queue.
         */
        struct request *postponed_rq;
        /* The time in which we started polling for DSC */
@@ -331,43 +298,20 @@ typedef struct ide_tape_obj {
         * At most, there is only one ide-tape originated data transfer request
         * in the device request queue. This allows ide.c to easily service
         * requests from the other device when we postpone our active request.
-        * In the pipelined operation mode, we use our internal pipeline
-        * structure to hold more data requests. The data buffer size is chosen
-        * based on the tape's recommendation.
         */
-       /* ptr to the request which is waiting in the device request queue */
-       struct request *active_data_rq;
+
        /* Data buffer size chosen based on the tape's recommendation */
-       int stage_size;
-       idetape_stage_t *merge_stage;
-       int merge_stage_size;
+       int buffer_size;
+       /* merge buffer */
+       struct idetape_bh *merge_bh;
+       /* size of the merge buffer */
+       int merge_bh_size;
+       /* pointer to current buffer head within the merge buffer */
        struct idetape_bh *bh;
        char *b_data;
        int b_count;
 
-       /*
-        * Pipeline parameters.
-        *
-        * To accomplish non-pipelined mode, we simply set the following
-        * variables to zero (or NULL, where appropriate).
-        */
-       /* Number of currently used stages */
-       int nr_stages;
-       /* Number of pending stages */
-       int nr_pending_stages;
-       /* We will not allocate more than this number of stages */
-       int max_stages, min_pipeline, max_pipeline;
-       /* The first stage which will be removed from the pipeline */
-       idetape_stage_t *first_stage;
-       /* The currently active stage */
-       idetape_stage_t *active_stage;
-       /* Will be serviced after the currently active request */
-       idetape_stage_t *next_stage;
-       /* New requests will be added to the pipeline here */
-       idetape_stage_t *last_stage;
-       /* Optional free stage which we can use */
-       idetape_stage_t *cache_stage;
-       int pages_per_stage;
+       int pages_per_buffer;
        /* Wasted space in each stage */
        int excess_bh_size;
 
@@ -388,45 +332,6 @@ typedef struct ide_tape_obj {
        /* the tape is write protected (hardware or opened as read-only) */
        char write_prot;
 
-       /*
-        * Limit the number of times a request can be postponed, to avoid an
-        * infinite postpone deadlock.
-        */
-       int postpone_cnt;
-
-       /*
-        * Measures number of frames:
-        *
-        * 1. written/read to/from the driver pipeline (pipeline_head).
-        * 2. written/read to/from the tape buffers (idetape_bh).
-        * 3. written/read by the tape to/from the media (tape_head).
-        */
-       int pipeline_head;
-       int buffer_head;
-       int tape_head;
-       int last_tape_head;
-
-       /* Speed control at the tape buffers input/output */
-       unsigned long insert_time;
-       int insert_size;
-       int insert_speed;
-       int max_insert_speed;
-       int measure_insert_time;
-
-       /* Speed regulation negative feedback loop */
-       int speed_control;
-       int pipeline_head_speed;
-       int controlled_pipeline_head_speed;
-       int uncontrolled_pipeline_head_speed;
-       int controlled_last_pipeline_head;
-       unsigned long uncontrolled_pipeline_head_time;
-       unsigned long controlled_pipeline_head_time;
-       int controlled_previous_pipeline_head;
-       int uncontrolled_previous_pipeline_head;
-       unsigned long controlled_previous_head_time;
-       unsigned long uncontrolled_previous_head_time;
-       int restart_speed_control_req;
-
        u32 debug_mask;
 } idetape_tape_t;
 
@@ -490,13 +395,13 @@ static void idetape_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                if (bh == NULL) {
                        printk(KERN_ERR "ide-tape: bh == NULL in "
                                "idetape_input_buffers\n");
-                       ide_atapi_discard_data(drive, bcount);
+                       ide_pad_transfer(drive, 0, bcount);
                        return;
                }
                count = min(
                        (unsigned int)(bh->b_size - atomic_read(&bh->b_count)),
                        bcount);
-               HWIF(drive)->atapi_input_bytes(drive, bh->b_data +
+               drive->hwif->input_data(drive, NULL, bh->b_data +
                                        atomic_read(&bh->b_count), count);
                bcount -= count;
                atomic_add(count, &bh->b_count);
@@ -522,7 +427,7 @@ static void idetape_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                        return;
                }
                count = min((unsigned int)pc->b_count, (unsigned int)bcount);
-               HWIF(drive)->atapi_output_bytes(drive, pc->b_data, count);
+               drive->hwif->output_data(drive, NULL, pc->b_data, count);
                bcount -= count;
                pc->b_data += count;
                pc->b_count -= count;
@@ -674,128 +579,36 @@ static void idetape_analyze_error(ide_drive_t *drive, u8 *sense)
        }
 }
 
-static void idetape_activate_next_stage(ide_drive_t *drive)
+/* Free data buffers completely. */
+static void ide_tape_kfree_buffer(idetape_tape_t *tape)
 {
-       idetape_tape_t *tape = drive->driver_data;
-       idetape_stage_t *stage = tape->next_stage;
-       struct request *rq = &stage->rq;
+       struct idetape_bh *prev_bh, *bh = tape->merge_bh;
 
-       debug_log(DBG_PROCS, "Enter %s\n", __func__);
+       while (bh) {
+               u32 size = bh->b_size;
 
-       if (stage == NULL) {
-               printk(KERN_ERR "ide-tape: bug: Trying to activate a non"
-                               " existing stage\n");
-               return;
-       }
+               while (size) {
+                       unsigned int order = fls(size >> PAGE_SHIFT)-1;
 
-       rq->rq_disk = tape->disk;
-       rq->buffer = NULL;
-       rq->special = (void *)stage->bh;
-       tape->active_data_rq = rq;
-       tape->active_stage = stage;
-       tape->next_stage = stage->next;
-}
-
-/* Free a stage along with its related buffers completely. */
-static void __idetape_kfree_stage(idetape_stage_t *stage)
-{
-       struct idetape_bh *prev_bh, *bh = stage->bh;
-       int size;
-
-       while (bh != NULL) {
-               if (bh->b_data != NULL) {
-                       size = (int) bh->b_size;
-                       while (size > 0) {
-                               free_page((unsigned long) bh->b_data);
-                               size -= PAGE_SIZE;
-                               bh->b_data += PAGE_SIZE;
-                       }
+                       if (bh->b_data)
+                               free_pages((unsigned long)bh->b_data, order);
+
+                       size &= (order-1);
+                       bh->b_data += (1 << order) * PAGE_SIZE;
                }
                prev_bh = bh;
                bh = bh->b_reqnext;
                kfree(prev_bh);
        }
-       kfree(stage);
-}
-
-static void idetape_kfree_stage(idetape_tape_t *tape, idetape_stage_t *stage)
-{
-       __idetape_kfree_stage(stage);
+       kfree(tape->merge_bh);
 }
 
-/*
- * Remove tape->first_stage from the pipeline. The caller should avoid race
- * conditions.
- */
-static void idetape_remove_stage_head(ide_drive_t *drive)
-{
-       idetape_tape_t *tape = drive->driver_data;
-       idetape_stage_t *stage;
-
-       debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
-       if (tape->first_stage == NULL) {
-               printk(KERN_ERR "ide-tape: bug: tape->first_stage is NULL\n");
-               return;
-       }
-       if (tape->active_stage == tape->first_stage) {
-               printk(KERN_ERR "ide-tape: bug: Trying to free our active "
-                               "pipeline stage\n");
-               return;
-       }
-       stage = tape->first_stage;
-       tape->first_stage = stage->next;
-       idetape_kfree_stage(tape, stage);
-       tape->nr_stages--;
-       if (tape->first_stage == NULL) {
-               tape->last_stage = NULL;
-               if (tape->next_stage != NULL)
-                       printk(KERN_ERR "ide-tape: bug: tape->next_stage !="
-                                       " NULL\n");
-               if (tape->nr_stages)
-                       printk(KERN_ERR "ide-tape: bug: nr_stages should be 0 "
-                                       "now\n");
-       }
-}
-
-/*
- * This will free all the pipeline stages starting from new_last_stage->next
- * to the end of the list, and point tape->last_stage to new_last_stage.
- */
-static void idetape_abort_pipeline(ide_drive_t *drive,
-                                  idetape_stage_t *new_last_stage)
-{
-       idetape_tape_t *tape = drive->driver_data;
-       idetape_stage_t *stage = new_last_stage->next;
-       idetape_stage_t *nstage;
-
-       debug_log(DBG_PROCS, "%s: Enter %s\n", tape->name, __func__);
-
-       while (stage) {
-               nstage = stage->next;
-               idetape_kfree_stage(tape, stage);
-               --tape->nr_stages;
-               --tape->nr_pending_stages;
-               stage = nstage;
-       }
-       if (new_last_stage)
-               new_last_stage->next = NULL;
-       tape->last_stage = new_last_stage;
-       tape->next_stage = NULL;
-}
-
-/*
- * Finish servicing a request and insert a pending pipeline request into the
- * main device queue.
- */
 static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
 {
        struct request *rq = HWGROUP(drive)->rq;
        idetape_tape_t *tape = drive->driver_data;
        unsigned long flags;
        int error;
-       int remove_stage = 0;
-       idetape_stage_t *active_stage;
 
        debug_log(DBG_PROCS, "Enter %s\n", __func__);
 
@@ -815,58 +628,8 @@ static int idetape_end_request(ide_drive_t *drive, int uptodate, int nr_sects)
 
        spin_lock_irqsave(&tape->lock, flags);
 
-       /* The request was a pipelined data transfer request */
-       if (tape->active_data_rq == rq) {
-               active_stage = tape->active_stage;
-               tape->active_stage = NULL;
-               tape->active_data_rq = NULL;
-               tape->nr_pending_stages--;
-               if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
-                       remove_stage = 1;
-                       if (error) {
-                               set_bit(IDETAPE_FLAG_PIPELINE_ERR,
-                                       &tape->flags);
-                               if (error == IDETAPE_ERROR_EOD)
-                                       idetape_abort_pipeline(drive,
-                                                               active_stage);
-                       }
-               } else if (rq->cmd[0] & REQ_IDETAPE_READ) {
-                       if (error == IDETAPE_ERROR_EOD) {
-                               set_bit(IDETAPE_FLAG_PIPELINE_ERR,
-                                       &tape->flags);
-                               idetape_abort_pipeline(drive, active_stage);
-                       }
-               }
-               if (tape->next_stage != NULL) {
-                       idetape_activate_next_stage(drive);
-
-                       /* Insert the next request into the request queue. */
-                       (void)ide_do_drive_cmd(drive, tape->active_data_rq,
-                                               ide_end);
-               } else if (!error) {
-                       /*
-                        * This is a part of the feedback loop which tries to
-                        * find the optimum number of stages. We are starting
-                        * from a minimum maximum number of stages, and if we
-                        * sense that the pipeline is empty, we try to increase
-                        * it, until we reach the user compile time memory
-                        * limit.
-                        */
-                       int i = (tape->max_pipeline - tape->min_pipeline) / 10;
-
-                       tape->max_stages += max(i, 1);
-                       tape->max_stages = max(tape->max_stages,
-                                               tape->min_pipeline);
-                       tape->max_stages = min(tape->max_stages,
-                                               tape->max_pipeline);
-               }
-       }
        ide_end_drive_cmd(drive, 0, 0);
 
-       if (remove_stage)
-               idetape_remove_stage_head(drive);
-       if (tape->active_data_rq == NULL)
-               clear_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
        spin_unlock_irqrestore(&tape->lock, flags);
        return 0;
 }
@@ -899,7 +662,7 @@ static void idetape_create_request_sense_cmd(struct ide_atapi_pc *pc)
 
 static void idetape_init_rq(struct request *rq, u8 cmd)
 {
-       memset(rq, 0, sizeof(*rq));
+       blk_rq_init(NULL, rq);
        rq->cmd_type = REQ_TYPE_SPECIAL;
        rq->cmd[0] = cmd;
 }
@@ -993,7 +756,7 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
        stat = ide_read_status(drive);
 
        if (pc->flags & PC_FLAG_DMA_IN_PROGRESS) {
-               if (hwif->ide_dma_end(drive) || (stat & ERR_STAT)) {
+               if (hwif->dma_ops->dma_end(drive) || (stat & ERR_STAT)) {
                        /*
                         * A DMA error is sometimes expected. For example,
                         * if the tape is crossing a filemark during a
@@ -1083,10 +846,10 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
                return ide_do_reset(drive);
        }
        /* Get the number of bytes to transfer on this interrupt. */
-       bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
-                 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
+       bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+                 hwif->INB(hwif->io_ports.lbam_addr);
 
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+       ireason = hwif->INB(hwif->io_ports.nsect_addr);
 
        if (ireason & CD) {
                printk(KERN_ERR "ide-tape: CoD != 0 in %s\n", __func__);
@@ -1108,7 +871,7 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
                                printk(KERN_ERR "ide-tape: The tape wants to "
                                        "send us more data than expected "
                                        "- discarding data\n");
-                               ide_atapi_discard_data(drive, bcount);
+                               ide_pad_transfer(drive, 0, bcount);
                                ide_set_handler(drive, &idetape_pc_intr,
                                                IDETAPE_WAIT_CMD, NULL);
                                return ide_started;
@@ -1117,16 +880,16 @@ static ide_startstop_t idetape_pc_intr(ide_drive_t *drive)
                                "data than expected - allowing transfer\n");
                }
                iobuf = &idetape_input_buffers;
-               xferfunc = hwif->atapi_input_bytes;
+               xferfunc = hwif->input_data;
        } else {
                iobuf = &idetape_output_buffers;
-               xferfunc = hwif->atapi_output_bytes;
+               xferfunc = hwif->output_data;
        }
 
        if (pc->bh)
                iobuf(drive, pc, bcount);
        else
-               xferfunc(drive, pc->cur_pos, bcount);
+               xferfunc(drive, NULL, pc->cur_pos, bcount);
 
        /* Update the current position */
        pc->xferred += bcount;
@@ -1190,12 +953,12 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
                                "yet DRQ isn't asserted\n");
                return startstop;
        }
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+       ireason = hwif->INB(hwif->io_ports.nsect_addr);
        while (retries-- && ((ireason & CD) == 0 || (ireason & IO))) {
                printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while issuing "
                                "a packet command, retrying\n");
                udelay(100);
-               ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+               ireason = hwif->INB(hwif->io_ports.nsect_addr);
                if (retries == 0) {
                        printk(KERN_ERR "ide-tape: (IO,CoD != (0,1) while "
                                        "issuing a packet command, ignoring\n");
@@ -1213,10 +976,11 @@ static ide_startstop_t idetape_transfer_pc(ide_drive_t *drive)
 #ifdef CONFIG_BLK_DEV_IDEDMA
        /* Begin DMA, if necessary */
        if (pc->flags & PC_FLAG_DMA_IN_PROGRESS)
-               hwif->dma_start(drive);
+               hwif->dma_ops->dma_start(drive);
 #endif
        /* Send the actual packet */
-       HWIF(drive)->atapi_output_bytes(drive, pc->c, 12);
+       hwif->output_data(drive, NULL, pc->c, 12);
+
        return ide_started;
 }
 
@@ -1279,7 +1043,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
                ide_dma_off(drive);
        }
        if ((pc->flags & PC_FLAG_DMA_RECOMMENDED) && drive->using_dma)
-               dma_ok = !hwif->dma_setup(drive);
+               dma_ok = !hwif->dma_ops->dma_setup(drive);
 
        ide_pktcmd_tf_load(drive, IDE_TFLAG_NO_SELECT_MASK |
                           IDE_TFLAG_OUT_DEVICE, bcount, dma_ok);
@@ -1292,7 +1056,7 @@ static ide_startstop_t idetape_issue_pc(ide_drive_t *drive,
                                    IDETAPE_WAIT_CMD, NULL);
                return ide_started;
        } else {
-               hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+               ide_execute_pkt_cmd(drive);
                return idetape_transfer_pc(drive);
        }
 }
@@ -1335,69 +1099,6 @@ static void idetape_create_mode_sense_cmd(struct ide_atapi_pc *pc, u8 page_code)
        pc->idetape_callback = &idetape_pc_callback;
 }
 
-static void idetape_calculate_speeds(ide_drive_t *drive)
-{
-       idetape_tape_t *tape = drive->driver_data;
-
-       if (time_after(jiffies,
-                       tape->controlled_pipeline_head_time + 120 * HZ)) {
-               tape->controlled_previous_pipeline_head =
-                       tape->controlled_last_pipeline_head;
-               tape->controlled_previous_head_time =
-                       tape->controlled_pipeline_head_time;
-               tape->controlled_last_pipeline_head = tape->pipeline_head;
-               tape->controlled_pipeline_head_time = jiffies;
-       }
-       if (time_after(jiffies, tape->controlled_pipeline_head_time + 60 * HZ))
-               tape->controlled_pipeline_head_speed = (tape->pipeline_head -
-                               tape->controlled_last_pipeline_head) * 32 * HZ /
-                               (jiffies - tape->controlled_pipeline_head_time);
-       else if (time_after(jiffies, tape->controlled_previous_head_time))
-               tape->controlled_pipeline_head_speed = (tape->pipeline_head -
-                               tape->controlled_previous_pipeline_head) * 32 *
-                       HZ / (jiffies - tape->controlled_previous_head_time);
-
-       if (tape->nr_pending_stages < tape->max_stages/*- 1 */) {
-               /* -1 for read mode error recovery */
-               if (time_after(jiffies, tape->uncontrolled_previous_head_time +
-                                       10 * HZ)) {
-                       tape->uncontrolled_pipeline_head_time = jiffies;
-                       tape->uncontrolled_pipeline_head_speed =
-                               (tape->pipeline_head -
-                                tape->uncontrolled_previous_pipeline_head) *
-                               32 * HZ / (jiffies -
-                                       tape->uncontrolled_previous_head_time);
-               }
-       } else {
-               tape->uncontrolled_previous_head_time = jiffies;
-               tape->uncontrolled_previous_pipeline_head = tape->pipeline_head;
-               if (time_after(jiffies, tape->uncontrolled_pipeline_head_time +
-                                       30 * HZ))
-                       tape->uncontrolled_pipeline_head_time = jiffies;
-
-       }
-       tape->pipeline_head_speed = max(tape->uncontrolled_pipeline_head_speed,
-                                       tape->controlled_pipeline_head_speed);
-
-       if (tape->speed_control == 1) {
-               if (tape->nr_pending_stages >= tape->max_stages / 2)
-                       tape->max_insert_speed = tape->pipeline_head_speed +
-                               (1100 - tape->pipeline_head_speed) * 2 *
-                               (tape->nr_pending_stages - tape->max_stages / 2)
-                               / tape->max_stages;
-               else
-                       tape->max_insert_speed = 500 +
-                               (tape->pipeline_head_speed - 500) * 2 *
-                               tape->nr_pending_stages / tape->max_stages;
-
-               if (tape->nr_pending_stages >= tape->max_stages * 99 / 100)
-                       tape->max_insert_speed = 5000;
-       } else
-               tape->max_insert_speed = tape->speed_control;
-
-       tape->max_insert_speed = max(tape->max_insert_speed, 500);
-}
-
 static ide_startstop_t idetape_media_access_finished(ide_drive_t *drive)
 {
        idetape_tape_t *tape = drive->driver_data;
@@ -1432,17 +1133,7 @@ static ide_startstop_t idetape_rw_callback(ide_drive_t *drive)
        int blocks = tape->pc->xferred / tape->blk_size;
 
        tape->avg_size += blocks * tape->blk_size;
-       tape->insert_size += blocks * tape->blk_size;
-       if (tape->insert_size > 1024 * 1024)
-               tape->measure_insert_time = 1;
-       if (tape->measure_insert_time) {
-               tape->measure_insert_time = 0;
-               tape->insert_time = jiffies;
-               tape->insert_size = 0;
-       }
-       if (time_after(jiffies, tape->insert_time))
-               tape->insert_speed = tape->insert_size / 1024 * HZ /
-                                       (jiffies - tape->insert_time);
+
        if (time_after_eq(jiffies, tape->avg_time + HZ)) {
                tape->avg_speed = tape->avg_size * HZ /
                                (jiffies - tape->avg_time) / 1024;
@@ -1475,7 +1166,7 @@ static void idetape_create_read_cmd(idetape_tape_t *tape,
        pc->buf = NULL;
        pc->buf_size = length * tape->blk_size;
        pc->req_xfer = pc->buf_size;
-       if (pc->req_xfer == tape->stage_size)
+       if (pc->req_xfer == tape->buffer_size)
                pc->flags |= PC_FLAG_DMA_RECOMMENDED;
 }
 
@@ -1495,7 +1186,7 @@ static void idetape_create_write_cmd(idetape_tape_t *tape,
        pc->buf = NULL;
        pc->buf_size = length * tape->blk_size;
        pc->req_xfer = pc->buf_size;
-       if (pc->req_xfer == tape->stage_size)
+       if (pc->req_xfer == tape->buffer_size)
                pc->flags |= PC_FLAG_DMA_RECOMMENDED;
 }
 
@@ -1547,10 +1238,6 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
                drive->post_reset = 0;
        }
 
-       if (time_after(jiffies, tape->insert_time))
-               tape->insert_speed = tape->insert_size / 1024 * HZ /
-                                       (jiffies - tape->insert_time);
-       idetape_calculate_speeds(drive);
        if (!test_and_clear_bit(IDETAPE_FLAG_IGNORE_DSC, &tape->flags) &&
            (stat & SEEK_STAT) == 0) {
                if (postponed_rq == NULL) {
@@ -1574,16 +1261,12 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive,
                return ide_stopped;
        }
        if (rq->cmd[0] & REQ_IDETAPE_READ) {
-               tape->buffer_head++;
-               tape->postpone_cnt = 0;
                pc = idetape_next_pc_storage(drive);
                idetape_create_read_cmd(tape, pc, rq->current_nr_sectors,
                                        (struct idetape_bh *)rq->special);
                goto out;
        }
        if (rq->cmd[0] & REQ_IDETAPE_WRITE) {
-               tape->buffer_head++;
-               tape->postpone_cnt = 0;
                pc = idetape_next_pc_storage(drive);
                idetape_create_write_cmd(tape, pc, rq->current_nr_sectors,
                                         (struct idetape_bh *)rq->special);
@@ -1604,111 +1287,91 @@ out:
        return idetape_issue_pc(drive, pc);
 }
 
-/* Pipeline related functions */
-static inline int idetape_pipeline_active(idetape_tape_t *tape)
-{
-       int rc1, rc2;
-
-       rc1 = test_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
-       rc2 = (tape->active_data_rq != NULL);
-       return rc1;
-}
-
 /*
- * The function below uses __get_free_page to allocate a pipeline stage, along
- * with all the necessary small buffers which together make a buffer of size
- * tape->stage_size (or a bit more). We attempt to combine sequential pages as
+ * The function below uses __get_free_pages to allocate a data buffer of size
+ * tape->buffer_size (or a bit more). We attempt to combine sequential pages as
  * much as possible.
  *
- * It returns a pointer to the new allocated stage, or NULL if we can't (or
- * don't want to) allocate a stage.
- *
- * Pipeline stages are optional and are used to increase performance. If we
- * can't allocate them, we'll manage without them.
+ * It returns a pointer to the newly allocated buffer, or NULL in case of
+ * failure.
  */
-static idetape_stage_t *__idetape_kmalloc_stage(idetape_tape_t *tape, int full,
-                                               int clear)
+static struct idetape_bh *ide_tape_kmalloc_buffer(idetape_tape_t *tape,
+                                                 int full, int clear)
 {
-       idetape_stage_t *stage;
-       struct idetape_bh *prev_bh, *bh;
-       int pages = tape->pages_per_stage;
+       struct idetape_bh *prev_bh, *bh, *merge_bh;
+       int pages = tape->pages_per_buffer;
+       unsigned int order, b_allocd;
        char *b_data = NULL;
 
-       stage = kmalloc(sizeof(idetape_stage_t), GFP_KERNEL);
-       if (!stage)
-               return NULL;
-       stage->next = NULL;
-
-       stage->bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
-       bh = stage->bh;
+       merge_bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
+       bh = merge_bh;
        if (bh == NULL)
                goto abort;
-       bh->b_reqnext = NULL;
-       bh->b_data = (char *) __get_free_page(GFP_KERNEL);
+
+       order = fls(pages) - 1;
+       bh->b_data = (char *) __get_free_pages(GFP_KERNEL, order);
        if (!bh->b_data)
                goto abort;
+       b_allocd = (1 << order) * PAGE_SIZE;
+       pages &= (order-1);
+
        if (clear)
-               memset(bh->b_data, 0, PAGE_SIZE);
-       bh->b_size = PAGE_SIZE;
+               memset(bh->b_data, 0, b_allocd);
+       bh->b_reqnext = NULL;
+       bh->b_size = b_allocd;
        atomic_set(&bh->b_count, full ? bh->b_size : 0);
 
-       while (--pages) {
-               b_data = (char *) __get_free_page(GFP_KERNEL);
+       while (pages) {
+               order = fls(pages) - 1;
+               b_data = (char *) __get_free_pages(GFP_KERNEL, order);
                if (!b_data)
                        goto abort;
+               b_allocd = (1 << order) * PAGE_SIZE;
+
                if (clear)
-                       memset(b_data, 0, PAGE_SIZE);
-               if (bh->b_data == b_data + PAGE_SIZE) {
-                       bh->b_size += PAGE_SIZE;
-                       bh->b_data -= PAGE_SIZE;
+                       memset(b_data, 0, b_allocd);
+
+               /* newly allocated page frames below buffer header or ...*/
+               if (bh->b_data == b_data + b_allocd) {
+                       bh->b_size += b_allocd;
+                       bh->b_data -= b_allocd;
                        if (full)
-                               atomic_add(PAGE_SIZE, &bh->b_count);
+                               atomic_add(b_allocd, &bh->b_count);
                        continue;
                }
+               /* they are above the header */
                if (b_data == bh->b_data + bh->b_size) {
-                       bh->b_size += PAGE_SIZE;
+                       bh->b_size += b_allocd;
                        if (full)
-                               atomic_add(PAGE_SIZE, &bh->b_count);
+                               atomic_add(b_allocd, &bh->b_count);
                        continue;
                }
                prev_bh = bh;
                bh = kmalloc(sizeof(struct idetape_bh), GFP_KERNEL);
                if (!bh) {
-                       free_page((unsigned long) b_data);
+                       free_pages((unsigned long) b_data, order);
                        goto abort;
                }
                bh->b_reqnext = NULL;
                bh->b_data = b_data;
-               bh->b_size = PAGE_SIZE;
+               bh->b_size = b_allocd;
                atomic_set(&bh->b_count, full ? bh->b_size : 0);
                prev_bh->b_reqnext = bh;
+
+               pages &= (order-1);
        }
+
        bh->b_size -= tape->excess_bh_size;
        if (full)
                atomic_sub(tape->excess_bh_size, &bh->b_count);
-       return stage;
+       return merge_bh;
 abort:
-       __idetape_kfree_stage(stage);
+       ide_tape_kfree_buffer(tape);
        return NULL;
 }
 
-static idetape_stage_t *idetape_kmalloc_stage(idetape_tape_t *tape)
-{
-       idetape_stage_t *cache_stage = tape->cache_stage;
-
-       debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
-       if (tape->nr_stages >= tape->max_stages)
-               return NULL;
-       if (cache_stage != NULL) {
-               tape->cache_stage = NULL;
-               return cache_stage;
-       }
-       return __idetape_kmalloc_stage(tape, 0, 0);
-}
-
 static int idetape_copy_stage_from_user(idetape_tape_t *tape,
-               idetape_stage_t *stage, const char __user *buf, int n)
+                                       const char __user *buf, int n)
 {
        struct idetape_bh *bh = tape->bh;
        int count;
@@ -1740,7 +1403,7 @@ static int idetape_copy_stage_from_user(idetape_tape_t *tape,
 }
 
 static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
-               idetape_stage_t *stage, int n)
+                                     int n)
 {
        struct idetape_bh *bh = tape->bh;
        int count;
@@ -1771,11 +1434,11 @@ static int idetape_copy_stage_to_user(idetape_tape_t *tape, char __user *buf,
        return ret;
 }
 
-static void idetape_init_merge_stage(idetape_tape_t *tape)
+static void idetape_init_merge_buffer(idetape_tape_t *tape)
 {
-       struct idetape_bh *bh = tape->merge_stage->bh;
+       struct idetape_bh *bh = tape->merge_bh;
+       tape->bh = tape->merge_bh;
 
-       tape->bh = bh;
        if (tape->chrdev_dir == IDETAPE_DIR_WRITE)
                atomic_set(&bh->b_count, 0);
        else {
@@ -1784,61 +1447,6 @@ static void idetape_init_merge_stage(idetape_tape_t *tape)
        }
 }
 
-static void idetape_switch_buffers(idetape_tape_t *tape, idetape_stage_t *stage)
-{
-       struct idetape_bh *tmp;
-
-       tmp = stage->bh;
-       stage->bh = tape->merge_stage->bh;
-       tape->merge_stage->bh = tmp;
-       idetape_init_merge_stage(tape);
-}
-
-/* Add a new stage at the end of the pipeline. */
-static void idetape_add_stage_tail(ide_drive_t *drive, idetape_stage_t *stage)
-{
-       idetape_tape_t *tape = drive->driver_data;
-       unsigned long flags;
-
-       debug_log(DBG_PROCS, "Enter %s\n", __func__);
-
-       spin_lock_irqsave(&tape->lock, flags);
-       stage->next = NULL;
-       if (tape->last_stage != NULL)
-               tape->last_stage->next = stage;
-       else
-               tape->first_stage = stage;
-               tape->next_stage  = stage;
-       tape->last_stage = stage;
-       if (tape->next_stage == NULL)
-               tape->next_stage = tape->last_stage;
-       tape->nr_stages++;
-       tape->nr_pending_stages++;
-       spin_unlock_irqrestore(&tape->lock, flags);
-}
-
-/* Install a completion in a pending request and sleep until it is serviced. The
- * caller should ensure that the request will not be serviced before we install
- * the completion (usually by disabling interrupts).
- */
-static void idetape_wait_for_request(ide_drive_t *drive, struct request *rq)
-{
-       DECLARE_COMPLETION_ONSTACK(wait);
-       idetape_tape_t *tape = drive->driver_data;
-
-       if (rq == NULL || !blk_special_request(rq)) {
-               printk(KERN_ERR "ide-tape: bug: Trying to sleep on non-valid"
-                                " request\n");
-               return;
-       }
-       rq->end_io_data = &wait;
-       rq->end_io = blk_end_sync_rq;
-       spin_unlock_irq(&tape->lock);
-       wait_for_completion(&wait);
-       /* The stage and its struct request have been deallocated */
-       spin_lock_irq(&tape->lock);
-}
-
 static ide_startstop_t idetape_read_position_callback(ide_drive_t *drive)
 {
        idetape_tape_t *tape = drive->driver_data;
@@ -1907,7 +1515,7 @@ static void idetape_create_test_unit_ready_cmd(struct ide_atapi_pc *pc)
  * to the request list without waiting for it to be serviced! In that case, we
  * usually use idetape_queue_pc_head().
  */
-static int __idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
+static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
 {
        struct ide_tape_obj *tape = drive->driver_data;
        struct request rq;
@@ -1939,7 +1547,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
        timeout += jiffies;
        while (time_before(jiffies, timeout)) {
                idetape_create_test_unit_ready_cmd(&pc);
-               if (!__idetape_queue_pc_tail(drive, &pc))
+               if (!idetape_queue_pc_tail(drive, &pc))
                        return 0;
                if ((tape->sense_key == 2 && tape->asc == 4 && tape->ascq == 2)
                    || (tape->asc == 0x3A)) {
@@ -1948,7 +1556,7 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
                                return -ENOMEDIUM;
                        idetape_create_load_unload_cmd(drive, &pc,
                                                        IDETAPE_LU_LOAD_MASK);
-                       __idetape_queue_pc_tail(drive, &pc);
+                       idetape_queue_pc_tail(drive, &pc);
                        load_attempted = 1;
                /* not about to be ready */
                } else if (!(tape->sense_key == 2 && tape->asc == 4 &&
@@ -1959,11 +1567,6 @@ static int idetape_wait_ready(ide_drive_t *drive, unsigned long timeout)
        return -EIO;
 }
 
-static int idetape_queue_pc_tail(ide_drive_t *drive, struct ide_atapi_pc *pc)
-{
-       return __idetape_queue_pc_tail(drive, pc);
-}
-
 static int idetape_flush_tape_buffers(ide_drive_t *drive)
 {
        struct ide_atapi_pc pc;
@@ -2029,50 +1632,21 @@ static int idetape_create_prevent_cmd(ide_drive_t *drive,
        return 1;
 }
 
-static int __idetape_discard_read_pipeline(ide_drive_t *drive)
+static void __ide_tape_discard_merge_buffer(ide_drive_t *drive)
 {
        idetape_tape_t *tape = drive->driver_data;
-       unsigned long flags;
-       int cnt;
 
        if (tape->chrdev_dir != IDETAPE_DIR_READ)
-               return 0;
+               return;
 
-       /* Remove merge stage. */
-       cnt = tape->merge_stage_size / tape->blk_size;
-       if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
-               ++cnt;          /* Filemarks count as 1 sector */
-       tape->merge_stage_size = 0;
-       if (tape->merge_stage != NULL) {
-               __idetape_kfree_stage(tape->merge_stage);
-               tape->merge_stage = NULL;
+       clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
+       tape->merge_bh_size = 0;
+       if (tape->merge_bh != NULL) {
+               ide_tape_kfree_buffer(tape);
+               tape->merge_bh = NULL;
        }
 
-       /* Clear pipeline flags. */
-       clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
        tape->chrdev_dir = IDETAPE_DIR_NONE;
-
-       /* Remove pipeline stages. */
-       if (tape->first_stage == NULL)
-               return 0;
-
-       spin_lock_irqsave(&tape->lock, flags);
-       tape->next_stage = NULL;
-       if (idetape_pipeline_active(tape))
-               idetape_wait_for_request(drive, tape->active_data_rq);
-       spin_unlock_irqrestore(&tape->lock, flags);
-
-       while (tape->first_stage != NULL) {
-               struct request *rq_ptr = &tape->first_stage->rq;
-
-               cnt += rq_ptr->nr_sectors - rq_ptr->current_nr_sectors;
-               if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
-                       ++cnt;
-               idetape_remove_stage_head(drive);
-       }
-       tape->nr_pending_stages = 0;
-       tape->max_stages = tape->min_pipeline;
-       return cnt;
 }
 
 /*
@@ -2089,7 +1663,7 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
        struct ide_atapi_pc pc;
 
        if (tape->chrdev_dir == IDETAPE_DIR_READ)
-               __idetape_discard_read_pipeline(drive);
+               __ide_tape_discard_merge_buffer(drive);
        idetape_wait_ready(drive, 60 * 5 * HZ);
        idetape_create_locate_cmd(drive, &pc, block, partition, skip);
        retval = idetape_queue_pc_tail(drive, &pc);
@@ -2100,20 +1674,19 @@ static int idetape_position_tape(ide_drive_t *drive, unsigned int block,
        return (idetape_queue_pc_tail(drive, &pc));
 }
 
-static void idetape_discard_read_pipeline(ide_drive_t *drive,
+static void ide_tape_discard_merge_buffer(ide_drive_t *drive,
                                          int restore_position)
 {
        idetape_tape_t *tape = drive->driver_data;
-       int cnt;
        int seek, position;
 
-       cnt = __idetape_discard_read_pipeline(drive);
+       __ide_tape_discard_merge_buffer(drive);
        if (restore_position) {
                position = idetape_read_position(drive);
-               seek = position > cnt ? position - cnt : 0;
+               seek = position > 0 ? position : 0;
                if (idetape_position_tape(drive, seek, 0, 0)) {
                        printk(KERN_INFO "ide-tape: %s: position_tape failed in"
-                                        " discard_pipeline()\n", tape->name);
+                                        " %s\n", tape->name, __func__);
                        return;
                }
        }
@@ -2131,12 +1704,6 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
 
        debug_log(DBG_SENSE, "%s: cmd=%d\n", __func__, cmd);
 
-       if (idetape_pipeline_active(tape)) {
-               printk(KERN_ERR "ide-tape: bug: the pipeline is active in %s\n",
-                               __func__);
-               return (0);
-       }
-
        idetape_init_rq(&rq, cmd);
        rq.rq_disk = tape->disk;
        rq.special = (void *)bh;
@@ -2148,27 +1715,13 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int blocks,
        if ((cmd & (REQ_IDETAPE_READ | REQ_IDETAPE_WRITE)) == 0)
                return 0;
 
-       if (tape->merge_stage)
-               idetape_init_merge_stage(tape);
+       if (tape->merge_bh)
+               idetape_init_merge_buffer(tape);
        if (rq.errors == IDETAPE_ERROR_GENERAL)
                return -EIO;
        return (tape->blk_size * (blocks-rq.current_nr_sectors));
 }
 
-/* start servicing the pipeline stages, starting from tape->next_stage. */
-static void idetape_plug_pipeline(ide_drive_t *drive)
-{
-       idetape_tape_t *tape = drive->driver_data;
-
-       if (tape->next_stage == NULL)
-               return;
-       if (!idetape_pipeline_active(tape)) {
-               set_bit(IDETAPE_FLAG_PIPELINE_ACTIVE, &tape->flags);
-               idetape_activate_next_stage(drive);
-               (void) ide_do_drive_cmd(drive, tape->active_data_rq, ide_end);
-       }
-}
-
 static void idetape_create_inquiry_cmd(struct ide_atapi_pc *pc)
 {
        idetape_init_pc(pc);
@@ -2206,135 +1759,39 @@ static void idetape_create_space_cmd(struct ide_atapi_pc *pc, int count, u8 cmd)
        pc->idetape_callback = &idetape_pc_callback;
 }
 
-static void idetape_wait_first_stage(ide_drive_t *drive)
-{
-       idetape_tape_t *tape = drive->driver_data;
-       unsigned long flags;
-
-       if (tape->first_stage == NULL)
-               return;
-       spin_lock_irqsave(&tape->lock, flags);
-       if (tape->active_stage == tape->first_stage)
-               idetape_wait_for_request(drive, tape->active_data_rq);
-       spin_unlock_irqrestore(&tape->lock, flags);
-}
-
-/*
- * Try to add a character device originated write request to our pipeline. In
- * case we don't succeed, we revert to non-pipelined operation mode for this
- * request. In order to accomplish that, we
- *
- * 1. Try to allocate a new pipeline stage.
- * 2. If we can't, wait for more and more requests to be serviced and try again
- * each time.
- * 3. If we still can't allocate a stage, fallback to non-pipelined operation
- * mode for this request.
- */
+/* Queue up a character device originated write request. */
 static int idetape_add_chrdev_write_request(ide_drive_t *drive, int blocks)
 {
        idetape_tape_t *tape = drive->driver_data;
-       idetape_stage_t *new_stage;
-       unsigned long flags;
-       struct request *rq;
 
        debug_log(DBG_CHRDEV, "Enter %s\n", __func__);
 
-       /* Attempt to allocate a new stage. Beware possible race conditions. */
-       while ((new_stage = idetape_kmalloc_stage(tape)) == NULL) {
-               spin_lock_irqsave(&tape->lock, flags);
-               if (idetape_pipeline_active(tape)) {
-                       idetape_wait_for_request(drive, tape->active_data_rq);
-                       spin_unlock_irqrestore(&tape->lock, flags);
-               } else {
-                       spin_unlock_irqrestore(&tape->lock, flags);
-                       idetape_plug_pipeline(drive);
-                       if (idetape_pipeline_active(tape))
-                               continue;
-                       /*
-                        * The machine is short on memory. Fallback to non-
-                        * pipelined operation mode for this request.
-                        */
-                       return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
-                                               blocks, tape->merge_stage->bh);
-               }
-       }
-       rq = &new_stage->rq;
-       idetape_init_rq(rq, REQ_IDETAPE_WRITE);
-       /* Doesn't actually matter - We always assume sequential access */
-       rq->sector = tape->first_frame;
-       rq->current_nr_sectors = blocks;
-       rq->nr_sectors = blocks;
-
-       idetape_switch_buffers(tape, new_stage);
-       idetape_add_stage_tail(drive, new_stage);
-       tape->pipeline_head++;
-       idetape_calculate_speeds(drive);
-
-       /*
-        * Estimate whether the tape has stopped writing by checking if our
-        * write pipeline is currently empty. If we are not writing anymore,
-        * wait for the pipeline to be almost completely full (90%) before
-        * starting to service requests, so that we will be able to keep up with
-        * the higher speeds of the tape.
-        */
-       if (!idetape_pipeline_active(tape)) {
-               if (tape->nr_stages >= tape->max_stages * 9 / 10 ||
-                       tape->nr_stages >= tape->max_stages -
-                       tape->uncontrolled_pipeline_head_speed * 3 * 1024 /
-                       tape->blk_size) {
-                       tape->measure_insert_time = 1;
-                       tape->insert_time = jiffies;
-                       tape->insert_size = 0;
-                       tape->insert_speed = 0;
-                       idetape_plug_pipeline(drive);
-               }
-       }
-       if (test_and_clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
-               /* Return a deferred error */
-               return -EIO;
-       return blocks;
+       return idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE,
+                                    blocks, tape->merge_bh);
 }
 
-/*
- * Wait until all pending pipeline requests are serviced. Typically called on
- * device close.
- */
-static void idetape_wait_for_pipeline(ide_drive_t *drive)
-{
-       idetape_tape_t *tape = drive->driver_data;
-       unsigned long flags;
-
-       while (tape->next_stage || idetape_pipeline_active(tape)) {
-               idetape_plug_pipeline(drive);
-               spin_lock_irqsave(&tape->lock, flags);
-               if (idetape_pipeline_active(tape))
-                       idetape_wait_for_request(drive, tape->active_data_rq);
-               spin_unlock_irqrestore(&tape->lock, flags);
-       }
-}
-
-static void idetape_empty_write_pipeline(ide_drive_t *drive)
+static void ide_tape_flush_merge_buffer(ide_drive_t *drive)
 {
        idetape_tape_t *tape = drive->driver_data;
        int blocks, min;
        struct idetape_bh *bh;
 
        if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
-               printk(KERN_ERR "ide-tape: bug: Trying to empty write pipeline,"
+               printk(KERN_ERR "ide-tape: bug: Trying to empty merge buffer"
                                " but we are not writing.\n");
                return;
        }
-       if (tape->merge_stage_size > tape->stage_size) {
+       if (tape->merge_bh_size > tape->buffer_size) {
                printk(KERN_ERR "ide-tape: bug: merge_buffer too big\n");
-               tape->merge_stage_size = tape->stage_size;
+               tape->merge_bh_size = tape->buffer_size;
        }
-       if (tape->merge_stage_size) {
-               blocks = tape->merge_stage_size / tape->blk_size;
-               if (tape->merge_stage_size % tape->blk_size) {
+       if (tape->merge_bh_size) {
+               blocks = tape->merge_bh_size / tape->blk_size;
+               if (tape->merge_bh_size % tape->blk_size) {
                        unsigned int i;
 
                        blocks++;
-                       i = tape->blk_size - tape->merge_stage_size %
+                       i = tape->blk_size - tape->merge_bh_size %
                                tape->blk_size;
                        bh = tape->bh->b_reqnext;
                        while (bh) {
@@ -2358,74 +1815,33 @@ static void idetape_empty_write_pipeline(ide_drive_t *drive)
                        }
                }
                (void) idetape_add_chrdev_write_request(drive, blocks);
-               tape->merge_stage_size = 0;
+               tape->merge_bh_size = 0;
        }
-       idetape_wait_for_pipeline(drive);
-       if (tape->merge_stage != NULL) {
-               __idetape_kfree_stage(tape->merge_stage);
-               tape->merge_stage = NULL;
+       if (tape->merge_bh != NULL) {
+               ide_tape_kfree_buffer(tape);
+               tape->merge_bh = NULL;
        }
-       clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
        tape->chrdev_dir = IDETAPE_DIR_NONE;
-
-       /*
-        * On the next backup, perform the feedback loop again. (I don't want to
-        * keep sense information between backups, as some systems are
-        * constantly on, and the system load can be totally different on the
-        * next backup).
-        */
-       tape->max_stages = tape->min_pipeline;
-       if (tape->first_stage != NULL ||
-           tape->next_stage != NULL ||
-           tape->last_stage != NULL ||
-           tape->nr_stages != 0) {
-               printk(KERN_ERR "ide-tape: ide-tape pipeline bug, "
-                       "first_stage %p, next_stage %p, "
-                       "last_stage %p, nr_stages %d\n",
-                       tape->first_stage, tape->next_stage,
-                       tape->last_stage, tape->nr_stages);
-       }
 }
 
-static void idetape_restart_speed_control(ide_drive_t *drive)
+static int idetape_init_read(ide_drive_t *drive)
 {
        idetape_tape_t *tape = drive->driver_data;
-
-       tape->restart_speed_control_req = 0;
-       tape->pipeline_head = 0;
-       tape->controlled_last_pipeline_head = 0;
-       tape->controlled_previous_pipeline_head = 0;
-       tape->uncontrolled_previous_pipeline_head = 0;
-       tape->controlled_pipeline_head_speed = 5000;
-       tape->pipeline_head_speed = 5000;
-       tape->uncontrolled_pipeline_head_speed = 0;
-       tape->controlled_pipeline_head_time =
-               tape->uncontrolled_pipeline_head_time = jiffies;
-       tape->controlled_previous_head_time =
-               tape->uncontrolled_previous_head_time = jiffies;
-}
-
-static int idetape_init_read(ide_drive_t *drive, int max_stages)
-{
-       idetape_tape_t *tape = drive->driver_data;
-       idetape_stage_t *new_stage;
-       struct request rq;
        int bytes_read;
-       u16 blocks = *(u16 *)&tape->caps[12];
 
        /* Initialize read operation */
        if (tape->chrdev_dir != IDETAPE_DIR_READ) {
                if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
-                       idetape_empty_write_pipeline(drive);
+                       ide_tape_flush_merge_buffer(drive);
                        idetape_flush_tape_buffers(drive);
                }
-               if (tape->merge_stage || tape->merge_stage_size) {
-                       printk(KERN_ERR "ide-tape: merge_stage_size should be"
+               if (tape->merge_bh || tape->merge_bh_size) {
+                       printk(KERN_ERR "ide-tape: merge_bh_size should be"
                                         " 0 now\n");
-                       tape->merge_stage_size = 0;
+                       tape->merge_bh_size = 0;
                }
-               tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
-               if (!tape->merge_stage)
+               tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
+               if (!tape->merge_bh)
                        return -ENOMEM;
                tape->chrdev_dir = IDETAPE_DIR_READ;
 
@@ -2438,54 +1854,23 @@ static int idetape_init_read(ide_drive_t *drive, int max_stages)
                if (drive->dsc_overlap) {
                        bytes_read = idetape_queue_rw_tail(drive,
                                                        REQ_IDETAPE_READ, 0,
-                                                       tape->merge_stage->bh);
+                                                       tape->merge_bh);
                        if (bytes_read < 0) {
-                               __idetape_kfree_stage(tape->merge_stage);
-                               tape->merge_stage = NULL;
+                               ide_tape_kfree_buffer(tape);
+                               tape->merge_bh = NULL;
                                tape->chrdev_dir = IDETAPE_DIR_NONE;
                                return bytes_read;
                        }
                }
        }
-       if (tape->restart_speed_control_req)
-               idetape_restart_speed_control(drive);
-       idetape_init_rq(&rq, REQ_IDETAPE_READ);
-       rq.sector = tape->first_frame;
-       rq.nr_sectors = blocks;
-       rq.current_nr_sectors = blocks;
-       if (!test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags) &&
-           tape->nr_stages < max_stages) {
-               new_stage = idetape_kmalloc_stage(tape);
-               while (new_stage != NULL) {
-                       new_stage->rq = rq;
-                       idetape_add_stage_tail(drive, new_stage);
-                       if (tape->nr_stages >= max_stages)
-                               break;
-                       new_stage = idetape_kmalloc_stage(tape);
-               }
-       }
-       if (!idetape_pipeline_active(tape)) {
-               if (tape->nr_pending_stages >= 3 * max_stages / 4) {
-                       tape->measure_insert_time = 1;
-                       tape->insert_time = jiffies;
-                       tape->insert_size = 0;
-                       tape->insert_speed = 0;
-                       idetape_plug_pipeline(drive);
-               }
-       }
+
        return 0;
 }
 
-/*
- * Called from idetape_chrdev_read() to service a character device read request
- * and add read-ahead requests to our pipeline.
- */
+/* called from idetape_chrdev_read() to service a chrdev read request. */
 static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
 {
        idetape_tape_t *tape = drive->driver_data;
-       unsigned long flags;
-       struct request *rq_ptr;
-       int bytes_read;
 
        debug_log(DBG_PROCS, "Enter %s, %d blocks\n", __func__, blocks);
 
@@ -2493,39 +1878,10 @@ static int idetape_add_chrdev_read_request(ide_drive_t *drive, int blocks)
        if (test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
                return 0;
 
-       /* Wait for the next block to reach the head of the pipeline. */
-       idetape_init_read(drive, tape->max_stages);
-       if (tape->first_stage == NULL) {
-               if (test_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags))
-                       return 0;
-               return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
-                                       tape->merge_stage->bh);
-       }
-       idetape_wait_first_stage(drive);
-       rq_ptr = &tape->first_stage->rq;
-       bytes_read = tape->blk_size * (rq_ptr->nr_sectors -
-                                       rq_ptr->current_nr_sectors);
-       rq_ptr->nr_sectors = 0;
-       rq_ptr->current_nr_sectors = 0;
+       idetape_init_read(drive);
 
-       if (rq_ptr->errors == IDETAPE_ERROR_EOD)
-               return 0;
-       else {
-               idetape_switch_buffers(tape, tape->first_stage);
-               if (rq_ptr->errors == IDETAPE_ERROR_FILEMARK)
-                       set_bit(IDETAPE_FLAG_FILEMARK, &tape->flags);
-               spin_lock_irqsave(&tape->lock, flags);
-               idetape_remove_stage_head(drive);
-               spin_unlock_irqrestore(&tape->lock, flags);
-               tape->pipeline_head++;
-               idetape_calculate_speeds(drive);
-       }
-       if (bytes_read > blocks * tape->blk_size) {
-               printk(KERN_ERR "ide-tape: bug: trying to return more bytes"
-                               " than requested\n");
-               bytes_read = blocks * tape->blk_size;
-       }
-       return (bytes_read);
+       return idetape_queue_rw_tail(drive, REQ_IDETAPE_READ, blocks,
+                                    tape->merge_bh);
 }
 
 static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
@@ -2537,8 +1893,8 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
        while (bcount) {
                unsigned int count;
 
-               bh = tape->merge_stage->bh;
-               count = min(tape->stage_size, bcount);
+               bh = tape->merge_bh;
+               count = min(tape->buffer_size, bcount);
                bcount -= count;
                blocks = count / tape->blk_size;
                while (count) {
@@ -2549,31 +1905,10 @@ static void idetape_pad_zeros(ide_drive_t *drive, int bcount)
                        bh = bh->b_reqnext;
                }
                idetape_queue_rw_tail(drive, REQ_IDETAPE_WRITE, blocks,
-                                     tape->merge_stage->bh);
+                                     tape->merge_bh);
        }
 }
 
-static int idetape_pipeline_size(ide_drive_t *drive)
-{
-       idetape_tape_t *tape = drive->driver_data;
-       idetape_stage_t *stage;
-       struct request *rq;
-       int size = 0;
-
-       idetape_wait_for_pipeline(drive);
-       stage = tape->first_stage;
-       while (stage != NULL) {
-               rq = &stage->rq;
-               size += tape->blk_size * (rq->nr_sectors -
-                               rq->current_nr_sectors);
-               if (rq->errors == IDETAPE_ERROR_FILEMARK)
-                       size += tape->blk_size;
-               stage = stage->next;
-       }
-       size += tape->merge_stage_size;
-       return size;
-}
-
 /*
  * Rewinds the tape to the Beginning Of the current Partition (BOP). We
  * currently support only one partition.
@@ -2619,11 +1954,10 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
                if (copy_from_user(&config, argp, sizeof(config)))
                        return -EFAULT;
                tape->best_dsc_rw_freq = config.dsc_rw_frequency;
-               tape->max_stages = config.nr_stages;
                break;
        case 0x0350:
                config.dsc_rw_frequency = (int) tape->best_dsc_rw_freq;
-               config.nr_stages = tape->max_stages;
+               config.nr_stages = 1;
                if (copy_to_user(argp, &config, sizeof(config)))
                        return -EFAULT;
                break;
@@ -2633,19 +1967,11 @@ static int idetape_blkdev_ioctl(ide_drive_t *drive, unsigned int cmd,
        return 0;
 }
 
-/*
- * The function below is now a bit more complicated than just passing the
- * command to the tape since we may have crossed some filemarks during our
- * pipelined read-ahead mode. As a minor side effect, the pipeline enables us to
- * support MTFSFM when the filemark is in our internal pipeline even if the tape
- * doesn't support spacing over filemarks in the reverse direction.
- */
 static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
                                        int mt_count)
 {
        idetape_tape_t *tape = drive->driver_data;
        struct ide_atapi_pc pc;
-       unsigned long flags;
        int retval, count = 0;
        int sprev = !!(tape->caps[4] & 0x20);
 
@@ -2658,48 +1984,12 @@ static int idetape_space_over_filemarks(ide_drive_t *drive, short mt_op,
        }
 
        if (tape->chrdev_dir == IDETAPE_DIR_READ) {
-               /* its a read-ahead buffer, scan it for crossed filemarks. */
-               tape->merge_stage_size = 0;
+               tape->merge_bh_size = 0;
                if (test_and_clear_bit(IDETAPE_FLAG_FILEMARK, &tape->flags))
                        ++count;
-               while (tape->first_stage != NULL) {
-                       if (count == mt_count) {
-                               if (mt_op == MTFSFM)
-                                       set_bit(IDETAPE_FLAG_FILEMARK,
-                                               &tape->flags);
-                               return 0;
-                       }
-                       spin_lock_irqsave(&tape->lock, flags);
-                       if (tape->first_stage == tape->active_stage) {
-                               /*
-                                * We have reached the active stage in the read
-                                * pipeline. There is no point in allowing the
-                                * drive to continue reading any farther, so we
-                                * stop the pipeline.
-                                *
-                                * This section should be moved to a separate
-                                * subroutine because similar operations are
-                                * done in __idetape_discard_read_pipeline(),
-                                * for example.
-                                */
-                               tape->next_stage = NULL;
-                               spin_unlock_irqrestore(&tape->lock, flags);
-                               idetape_wait_first_stage(drive);
-                               tape->next_stage = tape->first_stage->next;
-                       } else
-                               spin_unlock_irqrestore(&tape->lock, flags);
-                       if (tape->first_stage->rq.errors ==
-                                       IDETAPE_ERROR_FILEMARK)
-                               ++count;
-                       idetape_remove_stage_head(drive);
-               }
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
        }
 
-       /*
-        * The filemark was not found in our internal pipeline; now we can issue
-        * the space command.
-        */
        switch (mt_op) {
        case MTFSF:
        case MTBSF:
@@ -2755,27 +2045,25 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
                            (count % tape->blk_size) == 0)
                                tape->user_bs_factor = count / tape->blk_size;
        }
-       rc = idetape_init_read(drive, tape->max_stages);
+       rc = idetape_init_read(drive);
        if (rc < 0)
                return rc;
        if (count == 0)
                return (0);
-       if (tape->merge_stage_size) {
-               actually_read = min((unsigned int)(tape->merge_stage_size),
+       if (tape->merge_bh_size) {
+               actually_read = min((unsigned int)(tape->merge_bh_size),
                                    (unsigned int)count);
-               if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
-                                              actually_read))
+               if (idetape_copy_stage_to_user(tape, buf, actually_read))
                        ret = -EFAULT;
                buf += actually_read;
-               tape->merge_stage_size -= actually_read;
+               tape->merge_bh_size -= actually_read;
                count -= actually_read;
        }
-       while (count >= tape->stage_size) {
+       while (count >= tape->buffer_size) {
                bytes_read = idetape_add_chrdev_read_request(drive, ctl);
                if (bytes_read <= 0)
                        goto finish;
-               if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
-                                              bytes_read))
+               if (idetape_copy_stage_to_user(tape, buf, bytes_read))
                        ret = -EFAULT;
                buf += bytes_read;
                count -= bytes_read;
@@ -2786,11 +2074,10 @@ static ssize_t idetape_chrdev_read(struct file *file, char __user *buf,
                if (bytes_read <= 0)
                        goto finish;
                temp = min((unsigned long)count, (unsigned long)bytes_read);
-               if (idetape_copy_stage_to_user(tape, buf, tape->merge_stage,
-                                              temp))
+               if (idetape_copy_stage_to_user(tape, buf, temp))
                        ret = -EFAULT;
                actually_read += temp;
-               tape->merge_stage_size = bytes_read-temp;
+               tape->merge_bh_size = bytes_read-temp;
        }
 finish:
        if (!actually_read && test_bit(IDETAPE_FLAG_FILEMARK, &tape->flags)) {
@@ -2821,17 +2108,17 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
        /* Initialize write operation */
        if (tape->chrdev_dir != IDETAPE_DIR_WRITE) {
                if (tape->chrdev_dir == IDETAPE_DIR_READ)
-                       idetape_discard_read_pipeline(drive, 1);
-               if (tape->merge_stage || tape->merge_stage_size) {
-                       printk(KERN_ERR "ide-tape: merge_stage_size "
+                       ide_tape_discard_merge_buffer(drive, 1);
+               if (tape->merge_bh || tape->merge_bh_size) {
+                       printk(KERN_ERR "ide-tape: merge_bh_size "
                                "should be 0 now\n");
-                       tape->merge_stage_size = 0;
+                       tape->merge_bh_size = 0;
                }
-               tape->merge_stage = __idetape_kmalloc_stage(tape, 0, 0);
-               if (!tape->merge_stage)
+               tape->merge_bh = ide_tape_kmalloc_buffer(tape, 0, 0);
+               if (!tape->merge_bh)
                        return -ENOMEM;
                tape->chrdev_dir = IDETAPE_DIR_WRITE;
-               idetape_init_merge_stage(tape);
+               idetape_init_merge_buffer(tape);
 
                /*
                 * Issue a write 0 command to ensure that DSC handshake is
@@ -2842,10 +2129,10 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
                if (drive->dsc_overlap) {
                        ssize_t retval = idetape_queue_rw_tail(drive,
                                                        REQ_IDETAPE_WRITE, 0,
-                                                       tape->merge_stage->bh);
+                                                       tape->merge_bh);
                        if (retval < 0) {
-                               __idetape_kfree_stage(tape->merge_stage);
-                               tape->merge_stage = NULL;
+                               ide_tape_kfree_buffer(tape);
+                               tape->merge_bh = NULL;
                                tape->chrdev_dir = IDETAPE_DIR_NONE;
                                return retval;
                        }
@@ -2853,49 +2140,44 @@ static ssize_t idetape_chrdev_write(struct file *file, const char __user *buf,
        }
        if (count == 0)
                return (0);
-       if (tape->restart_speed_control_req)
-               idetape_restart_speed_control(drive);
-       if (tape->merge_stage_size) {
-               if (tape->merge_stage_size >= tape->stage_size) {
+       if (tape->merge_bh_size) {
+               if (tape->merge_bh_size >= tape->buffer_size) {
                        printk(KERN_ERR "ide-tape: bug: merge buf too big\n");
-                       tape->merge_stage_size = 0;
+                       tape->merge_bh_size = 0;
                }
                actually_written = min((unsigned int)
-                               (tape->stage_size - tape->merge_stage_size),
+                               (tape->buffer_size - tape->merge_bh_size),
                                (unsigned int)count);
-               if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
-                                                actually_written))
+               if (idetape_copy_stage_from_user(tape, buf, actually_written))
                                ret = -EFAULT;
                buf += actually_written;
-               tape->merge_stage_size += actually_written;
+               tape->merge_bh_size += actually_written;
                count -= actually_written;
 
-               if (tape->merge_stage_size == tape->stage_size) {
+               if (tape->merge_bh_size == tape->buffer_size) {
                        ssize_t retval;
-                       tape->merge_stage_size = 0;
+                       tape->merge_bh_size = 0;
                        retval = idetape_add_chrdev_write_request(drive, ctl);
                        if (retval <= 0)
                                return (retval);
                }
        }
-       while (count >= tape->stage_size) {
+       while (count >= tape->buffer_size) {
                ssize_t retval;
-               if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
-                                                tape->stage_size))
+               if (idetape_copy_stage_from_user(tape, buf, tape->buffer_size))
                        ret = -EFAULT;
-               buf += tape->stage_size;
-               count -= tape->stage_size;
+               buf += tape->buffer_size;
+               count -= tape->buffer_size;
                retval = idetape_add_chrdev_write_request(drive, ctl);
-               actually_written += tape->stage_size;
+               actually_written += tape->buffer_size;
                if (retval <= 0)
                        return (retval);
        }
        if (count) {
                actually_written += count;
-               if (idetape_copy_stage_from_user(tape, tape->merge_stage, buf,
-                                                count))
+               if (idetape_copy_stage_from_user(tape, buf, count))
                        ret = -EFAULT;
-               tape->merge_stage_size += count;
+               tape->merge_bh_size += count;
        }
        return ret ? ret : actually_written;
 }
@@ -2919,8 +2201,7 @@ static int idetape_write_filemark(ide_drive_t *drive)
  *
  * Note: MTBSF and MTBSFM are not supported when the tape doesn't support
  * spacing over filemarks in the reverse direction. In this case, MTFSFM is also
- * usually not supported (it is supported in the rare case in which we crossed
- * the filemark during our read-ahead pipelined operation mode).
+ * usually not supported.
  *
  * The following commands are currently not supported:
  *
@@ -2936,7 +2217,6 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
        debug_log(DBG_ERR, "Handling MTIOCTOP ioctl: mt_op=%d, mt_count=%d\n",
                        mt_op, mt_count);
 
-       /* Commands which need our pipelined read-ahead stages. */
        switch (mt_op) {
        case MTFSF:
        case MTFSFM:
@@ -2953,7 +2233,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
        case MTWEOF:
                if (tape->write_prot)
                        return -EACCES;
-               idetape_discard_read_pipeline(drive, 1);
+               ide_tape_discard_merge_buffer(drive, 1);
                for (i = 0; i < mt_count; i++) {
                        retval = idetape_write_filemark(drive);
                        if (retval)
@@ -2961,12 +2241,12 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
                }
                return 0;
        case MTREW:
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
                if (idetape_rewind_tape(drive))
                        return -EIO;
                return 0;
        case MTLOAD:
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
                idetape_create_load_unload_cmd(drive, &pc,
                                               IDETAPE_LU_LOAD_MASK);
                return idetape_queue_pc_tail(drive, &pc);
@@ -2981,7 +2261,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
                                if (!idetape_queue_pc_tail(drive, &pc))
                                        tape->door_locked = DOOR_UNLOCKED;
                }
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
                idetape_create_load_unload_cmd(drive, &pc,
                                              !IDETAPE_LU_LOAD_MASK);
                retval = idetape_queue_pc_tail(drive, &pc);
@@ -2989,10 +2269,10 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
                        clear_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags);
                return retval;
        case MTNOP:
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
                return idetape_flush_tape_buffers(drive);
        case MTRETEN:
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
                idetape_create_load_unload_cmd(drive, &pc,
                        IDETAPE_LU_RETENSION_MASK | IDETAPE_LU_LOAD_MASK);
                return idetape_queue_pc_tail(drive, &pc);
@@ -3014,11 +2294,11 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
                        set_bit(IDETAPE_FLAG_DETECT_BS, &tape->flags);
                return 0;
        case MTSEEK:
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
                return idetape_position_tape(drive,
                        mt_count * tape->user_bs_factor, tape->partition, 0);
        case MTSETPART:
-               idetape_discard_read_pipeline(drive, 0);
+               ide_tape_discard_merge_buffer(drive, 0);
                return idetape_position_tape(drive, 0, mt_count, 0);
        case MTFSR:
        case MTBSR:
@@ -3063,13 +2343,12 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
 
        debug_log(DBG_CHRDEV, "Enter %s, cmd=%u\n", __func__, cmd);
 
-       tape->restart_speed_control_req = 1;
        if (tape->chrdev_dir == IDETAPE_DIR_WRITE) {
-               idetape_empty_write_pipeline(drive);
+               ide_tape_flush_merge_buffer(drive);
                idetape_flush_tape_buffers(drive);
        }
        if (cmd == MTIOCGET || cmd == MTIOCPOS) {
-               block_offset = idetape_pipeline_size(drive) /
+               block_offset = tape->merge_bh_size /
                        (tape->blk_size * tape->user_bs_factor);
                position = idetape_read_position(drive);
                if (position < 0)
@@ -3101,7 +2380,7 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
                return 0;
        default:
                if (tape->chrdev_dir == IDETAPE_DIR_READ)
-                       idetape_discard_read_pipeline(drive, 1);
+                       ide_tape_discard_merge_buffer(drive, 1);
                return idetape_blkdev_ioctl(drive, cmd, arg);
        }
 }
@@ -3175,9 +2454,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
        if (!test_bit(IDETAPE_FLAG_ADDRESS_VALID, &tape->flags))
                (void)idetape_rewind_tape(drive);
 
-       if (tape->chrdev_dir != IDETAPE_DIR_READ)
-               clear_bit(IDETAPE_FLAG_PIPELINE_ERR, &tape->flags);
-
        /* Read block size and write protect status from drive. */
        ide_tape_get_bsize_from_bdesc(drive);
 
@@ -3206,8 +2482,6 @@ static int idetape_chrdev_open(struct inode *inode, struct file *filp)
                        }
                }
        }
-       idetape_restart_speed_control(drive);
-       tape->restart_speed_control_req = 0;
        return 0;
 
 out_put_tape:
@@ -3219,13 +2493,13 @@ static void idetape_write_release(ide_drive_t *drive, unsigned int minor)
 {
        idetape_tape_t *tape = drive->driver_data;
 
-       idetape_empty_write_pipeline(drive);
-       tape->merge_stage = __idetape_kmalloc_stage(tape, 1, 0);
-       if (tape->merge_stage != NULL) {
+       ide_tape_flush_merge_buffer(drive);
+       tape->merge_bh = ide_tape_kmalloc_buffer(tape, 1, 0);
+       if (tape->merge_bh != NULL) {
                idetape_pad_zeros(drive, tape->blk_size *
                                (tape->user_bs_factor - 1));
-               __idetape_kfree_stage(tape->merge_stage);
-               tape->merge_stage = NULL;
+               ide_tape_kfree_buffer(tape);
+               tape->merge_bh = NULL;
        }
        idetape_write_filemark(drive);
        idetape_flush_tape_buffers(drive);
@@ -3248,14 +2522,9 @@ static int idetape_chrdev_release(struct inode *inode, struct file *filp)
                idetape_write_release(drive, minor);
        if (tape->chrdev_dir == IDETAPE_DIR_READ) {
                if (minor < 128)
-                       idetape_discard_read_pipeline(drive, 1);
-               else
-                       idetape_wait_for_pipeline(drive);
-       }
-       if (tape->cache_stage != NULL) {
-               __idetape_kfree_stage(tape->cache_stage);
-               tape->cache_stage = NULL;
+                       ide_tape_discard_merge_buffer(drive, 1);
        }
+
        if (minor < 128 && test_bit(IDETAPE_FLAG_MEDIUM_PRESENT, &tape->flags))
                (void) idetape_rewind_tape(drive);
        if (tape->chrdev_dir == IDETAPE_DIR_NONE) {
@@ -3392,33 +2661,15 @@ static void idetape_add_settings(ide_drive_t *drive)
 
        ide_add_setting(drive, "buffer", SETTING_READ, TYPE_SHORT, 0, 0xffff,
                        1, 2, (u16 *)&tape->caps[16], NULL);
-       ide_add_setting(drive, "pipeline_min", SETTING_RW, TYPE_INT, 1, 0xffff,
-                       tape->stage_size / 1024, 1, &tape->min_pipeline, NULL);
-       ide_add_setting(drive, "pipeline", SETTING_RW, TYPE_INT, 1, 0xffff,
-                       tape->stage_size / 1024, 1, &tape->max_stages, NULL);
-       ide_add_setting(drive, "pipeline_max", SETTING_RW, TYPE_INT, 1, 0xffff,
-                       tape->stage_size / 1024, 1, &tape->max_pipeline, NULL);
-       ide_add_setting(drive, "pipeline_used", SETTING_READ, TYPE_INT, 0,
-                       0xffff, tape->stage_size / 1024, 1, &tape->nr_stages,
-                       NULL);
-       ide_add_setting(drive, "pipeline_pending", SETTING_READ, TYPE_INT, 0,
-                       0xffff, tape->stage_size / 1024, 1,
-                       &tape->nr_pending_stages, NULL);
        ide_add_setting(drive, "speed", SETTING_READ, TYPE_SHORT, 0, 0xffff,
                        1, 1, (u16 *)&tape->caps[14], NULL);
-       ide_add_setting(drive, "stage", SETTING_READ, TYPE_INT, 0, 0xffff, 1,
-                       1024, &tape->stage_size, NULL);
+       ide_add_setting(drive, "buffer_size", SETTING_READ, TYPE_INT, 0, 0xffff,
+                       1, 1024, &tape->buffer_size, NULL);
        ide_add_setting(drive, "tdsc", SETTING_RW, TYPE_INT, IDETAPE_DSC_RW_MIN,
                        IDETAPE_DSC_RW_MAX, 1000, HZ, &tape->best_dsc_rw_freq,
                        NULL);
        ide_add_setting(drive, "dsc_overlap", SETTING_RW, TYPE_BYTE, 0, 1, 1,
                        1, &drive->dsc_overlap, NULL);
-       ide_add_setting(drive, "pipeline_head_speed_c", SETTING_READ, TYPE_INT,
-                       0, 0xffff, 1, 1, &tape->controlled_pipeline_head_speed,
-                       NULL);
-       ide_add_setting(drive, "pipeline_head_speed_u", SETTING_READ, TYPE_INT,
-                       0, 0xffff, 1, 1,
-                       &tape->uncontrolled_pipeline_head_speed, NULL);
        ide_add_setting(drive, "avg_speed", SETTING_READ, TYPE_INT, 0, 0xffff,
                        1, 1, &tape->avg_speed, NULL);
        ide_add_setting(drive, "debug_mask", SETTING_RW, TYPE_INT, 0, 0xffff, 1,
@@ -3441,11 +2692,10 @@ static inline void idetape_add_settings(ide_drive_t *drive) { ; }
  */
 static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
 {
-       unsigned long t1, tmid, tn, t;
+       unsigned long t;
        int speed;
-       int stage_size;
+       int buffer_size;
        u8 gcw[2];
-       struct sysinfo si;
        u16 *ctl = (u16 *)&tape->caps[12];
 
        spin_lock_init(&tape->lock);
@@ -3464,65 +2714,33 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
        tape->name[2] = '0' + minor;
        tape->chrdev_dir = IDETAPE_DIR_NONE;
        tape->pc = tape->pc_stack;
-       tape->max_insert_speed = 10000;
-       tape->speed_control = 1;
        *((unsigned short *) &gcw) = drive->id->config;
 
        /* Command packet DRQ type */
        if (((gcw[0] & 0x60) >> 5) == 1)
                set_bit(IDETAPE_FLAG_DRQ_INTERRUPT, &tape->flags);
 
-       tape->min_pipeline = 10;
-       tape->max_pipeline = 10;
-       tape->max_stages   = 10;
-
        idetape_get_inquiry_results(drive);
        idetape_get_mode_sense_results(drive);
        ide_tape_get_bsize_from_bdesc(drive);
        tape->user_bs_factor = 1;
-       tape->stage_size = *ctl * tape->blk_size;
-       while (tape->stage_size > 0xffff) {
+       tape->buffer_size = *ctl * tape->blk_size;
+       while (tape->buffer_size > 0xffff) {
                printk(KERN_NOTICE "ide-tape: decreasing stage size\n");
                *ctl /= 2;
-               tape->stage_size = *ctl * tape->blk_size;
+               tape->buffer_size = *ctl * tape->blk_size;
        }
-       stage_size = tape->stage_size;
-       tape->pages_per_stage = stage_size / PAGE_SIZE;
-       if (stage_size % PAGE_SIZE) {
-               tape->pages_per_stage++;
-               tape->excess_bh_size = PAGE_SIZE - stage_size % PAGE_SIZE;
+       buffer_size = tape->buffer_size;
+       tape->pages_per_buffer = buffer_size / PAGE_SIZE;
+       if (buffer_size % PAGE_SIZE) {
+               tape->pages_per_buffer++;
+               tape->excess_bh_size = PAGE_SIZE - buffer_size % PAGE_SIZE;
        }
 
-       /* Select the "best" DSC read/write polling freq and pipeline size. */
+       /* select the "best" DSC read/write polling freq */
        speed = max(*(u16 *)&tape->caps[14], *(u16 *)&tape->caps[8]);
 
-       tape->max_stages = speed * 1000 * 10 / tape->stage_size;
-
-       /* Limit memory use for pipeline to 10% of physical memory */
-       si_meminfo(&si);
-       if (tape->max_stages * tape->stage_size >
-                       si.totalram * si.mem_unit / 10)
-               tape->max_stages =
-                       si.totalram * si.mem_unit / (10 * tape->stage_size);
-
-       tape->max_stages   = min(tape->max_stages, IDETAPE_MAX_PIPELINE_STAGES);
-       tape->min_pipeline = min(tape->max_stages, IDETAPE_MIN_PIPELINE_STAGES);
-       tape->max_pipeline =
-               min(tape->max_stages * 2, IDETAPE_MAX_PIPELINE_STAGES);
-       if (tape->max_stages == 0) {
-               tape->max_stages   = 1;
-               tape->min_pipeline = 1;
-               tape->max_pipeline = 1;
-       }
-
-       t1 = (tape->stage_size * HZ) / (speed * 1000);
-       tmid = (*(u16 *)&tape->caps[16] * 32 * HZ) / (speed * 125);
-       tn = (IDETAPE_FIFO_THRESHOLD * tape->stage_size * HZ) / (speed * 1000);
-
-       if (tape->max_stages)
-               t = tn;
-       else
-               t = t1;
+       t = (IDETAPE_FIFO_THRESHOLD * tape->buffer_size * HZ) / (speed * 1000);
 
        /*
         * Ensure that the number we got makes sense; limit it within
@@ -3532,11 +2750,10 @@ static void idetape_setup(ide_drive_t *drive, idetape_tape_t *tape, int minor)
                                min_t(unsigned long, t, IDETAPE_DSC_RW_MAX),
                                IDETAPE_DSC_RW_MIN);
        printk(KERN_INFO "ide-tape: %s <-> %s: %dKBps, %d*%dkB buffer, "
-               "%dkB pipeline, %lums tDSC%s\n",
+               "%lums tDSC%s\n",
                drive->name, tape->name, *(u16 *)&tape->caps[14],
-               (*(u16 *)&tape->caps[16] * 512) / tape->stage_size,
-               tape->stage_size / 1024,
-               tape->max_stages * tape->stage_size / 1024,
+               (*(u16 *)&tape->caps[16] * 512) / tape->buffer_size,
+               tape->buffer_size / 1024,
                tape->best_dsc_rw_freq * 1000 / HZ,
                drive->using_dma ? ", DMA":"");
 
@@ -3560,7 +2777,7 @@ static void ide_tape_release(struct kref *kref)
        ide_drive_t *drive = tape->drive;
        struct gendisk *g = tape->disk;
 
-       BUG_ON(tape->first_stage != NULL || tape->merge_stage_size);
+       BUG_ON(tape->merge_bh_size);
 
        drive->dsc_overlap = 0;
        drive->driver_data = NULL;
index 155cc904f4eb056948745060c3e704d6a7698d29..0c908ca3ff79654ac3d0bf8fe500f32658d84016 100644 (file)
 #include <asm/uaccess.h>
 #include <asm/io.h>
 
-void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
+void ide_tf_dump(const char *s, struct ide_taskfile *tf)
 {
-       ide_hwif_t *hwif = drive->hwif;
-       struct ide_taskfile *tf = &task->tf;
-       u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
-
-       if (task->tf_flags & IDE_TFLAG_FLAGGED)
-               HIHI = 0xFF;
-
 #ifdef DEBUG
        printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
                "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
-               drive->name, tf->feature, tf->nsect, tf->lbal,
+               s, tf->feature, tf->nsect, tf->lbal,
                tf->lbam, tf->lbah, tf->device, tf->command);
        printk("%s: hob: nsect 0x%02x lbal 0x%02x "
                "lbam 0x%02x lbah 0x%02x\n",
-               drive->name, tf->hob_nsect, tf->hob_lbal,
+               s, tf->hob_nsect, tf->hob_lbal,
                tf->hob_lbam, tf->hob_lbah);
 #endif
-
-       ide_set_irq(drive, 1);
-
-       if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
-               SELECT_MASK(drive, 0);
-
-       if (task->tf_flags & IDE_TFLAG_OUT_DATA)
-               hwif->OUTW((tf->hob_data << 8) | tf->data,
-                          hwif->io_ports[IDE_DATA_OFFSET]);
-
-       if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
-               hwif->OUTB(tf->hob_feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
-               hwif->OUTB(tf->hob_nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
-               hwif->OUTB(tf->hob_lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
-               hwif->OUTB(tf->hob_lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
-               hwif->OUTB(tf->hob_lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
-
-       if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
-               hwif->OUTB(tf->feature, hwif->io_ports[IDE_FEATURE_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
-               hwif->OUTB(tf->nsect, hwif->io_ports[IDE_NSECTOR_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
-               hwif->OUTB(tf->lbal, hwif->io_ports[IDE_SECTOR_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
-               hwif->OUTB(tf->lbam, hwif->io_ports[IDE_LCYL_OFFSET]);
-       if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
-               hwif->OUTB(tf->lbah, hwif->io_ports[IDE_HCYL_OFFSET]);
-
-       if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
-               hwif->OUTB((tf->device & HIHI) | drive->select.all,
-                          hwif->io_ports[IDE_SELECT_OFFSET]);
 }
 
 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
@@ -135,6 +93,7 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
        ide_hwif_t *hwif        = HWIF(drive);
        struct ide_taskfile *tf = &task->tf;
        ide_handler_t *handler = NULL;
+       const struct ide_dma_ops *dma_ops = hwif->dma_ops;
 
        if (task->data_phase == TASKFILE_MULTI_IN ||
            task->data_phase == TASKFILE_MULTI_OUT) {
@@ -148,14 +107,15 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
        if (task->tf_flags & IDE_TFLAG_FLAGGED)
                task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
 
-       if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0)
-               ide_tf_load(drive, task);
+       if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
+               ide_tf_dump(drive->name, tf);
+               hwif->tf_load(drive, task);
+       }
 
        switch (task->data_phase) {
        case TASKFILE_MULTI_OUT:
        case TASKFILE_OUT:
-               hwif->OUTBSYNC(drive, tf->command,
-                              hwif->io_ports[IDE_COMMAND_OFFSET]);
+               hwif->OUTBSYNC(drive, tf->command, hwif->io_ports.command_addr);
                ndelay(400);    /* FIXME */
                return pre_task_out_intr(drive, task->rq);
        case TASKFILE_MULTI_IN:
@@ -178,10 +138,10 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
                return ide_started;
        default:
                if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
-                   hwif->dma_setup(drive))
+                   dma_ops->dma_setup(drive))
                        return ide_stopped;
-               hwif->dma_exec_cmd(drive, tf->command);
-               hwif->dma_start(drive);
+               dma_ops->dma_exec_cmd(drive, tf->command);
+               dma_ops->dma_start(drive);
                return ide_started;
        }
 }
@@ -283,7 +243,8 @@ static u8 wait_drive_not_busy(ide_drive_t *drive)
        return stat;
 }
 
-static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
+static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
+                          unsigned int write)
 {
        ide_hwif_t *hwif = drive->hwif;
        struct scatterlist *sg = hwif->sg_table;
@@ -323,9 +284,9 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
 
        /* do the actual data transfer */
        if (write)
-               hwif->ata_output_data(drive, buf, SECTOR_WORDS);
+               hwif->output_data(drive, rq, buf, SECTOR_SIZE);
        else
-               hwif->ata_input_data(drive, buf, SECTOR_WORDS);
+               hwif->input_data(drive, rq, buf, SECTOR_SIZE);
 
        kunmap_atomic(buf, KM_BIO_SRC_IRQ);
 #ifdef CONFIG_HIGHMEM
@@ -333,13 +294,14 @@ static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
 #endif
 }
 
-static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
+static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
+                         unsigned int write)
 {
        unsigned int nsect;
 
        nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
        while (nsect--)
-               ide_pio_sector(drive, write);
+               ide_pio_sector(drive, rq, write);
 }
 
 static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
@@ -362,10 +324,10 @@ static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
        switch (drive->hwif->data_phase) {
        case TASKFILE_MULTI_IN:
        case TASKFILE_MULTI_OUT:
-               ide_pio_multi(drive, write);
+               ide_pio_multi(drive, rq, write);
                break;
        default:
-               ide_pio_sector(drive, write);
+               ide_pio_sector(drive, rq, write);
                break;
        }
 
@@ -455,7 +417,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
 
        /* Error? */
        if (stat & ERR_STAT)
-               return task_error(drive, rq, __FUNCTION__, stat);
+               return task_error(drive, rq, __func__, stat);
 
        /* Didn't want any data? Odd. */
        if (!(stat & DRQ_STAT))
@@ -467,7 +429,7 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
        if (!hwif->nleft) {
                stat = wait_drive_not_busy(drive);
                if (!OK_STAT(stat, 0, BAD_STAT))
-                       return task_error(drive, rq, __FUNCTION__, stat);
+                       return task_error(drive, rq, __func__, stat);
                task_end_request(drive, rq, stat);
                return ide_stopped;
        }
@@ -488,11 +450,11 @@ static ide_startstop_t task_out_intr (ide_drive_t *drive)
        u8 stat = ide_read_status(drive);
 
        if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
-               return task_error(drive, rq, __FUNCTION__, stat);
+               return task_error(drive, rq, __func__, stat);
 
        /* Deal with unexpected ATA data phase. */
        if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
-               return task_error(drive, rq, __FUNCTION__, stat);
+               return task_error(drive, rq, __func__, stat);
 
        if (!hwif->nleft) {
                task_end_request(drive, rq, stat);
@@ -532,8 +494,7 @@ int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
 {
        struct request rq;
 
-       memset(&rq, 0, sizeof(rq));
-       rq.ref_count = 1;
+       blk_rq_init(NULL, &rq);
        rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
        rq.buffer = buf;
 
@@ -675,7 +636,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
                                /* (hs): give up if multcount is not set */
                                printk(KERN_ERR "%s: %s Multimode Write " \
                                        "multcount is not set\n",
-                                       drive->name, __FUNCTION__);
+                                       drive->name, __func__);
                                err = -EPERM;
                                goto abort;
                        }
@@ -692,7 +653,7 @@ int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
                                /* (hs): give up if multcount is not set */
                                printk(KERN_ERR "%s: %s Multimode Read failure " \
                                        "multcount is not set\n",
-                                       drive->name, __FUNCTION__);
+                                       drive->name, __func__);
                                err = -EPERM;
                                goto abort;
                        }
index d868ca44d03376168ab08143ecc4adb93f2cfae0..c758dcb13b14209a97bfdca02ef056ee113712a7 100644 (file)
@@ -94,12 +94,6 @@ DEFINE_MUTEX(ide_cfg_mtx);
 
 int noautodma = 0;
 
-#ifdef CONFIG_BLK_DEV_IDEACPI
-int ide_noacpi = 0;
-int ide_noacpitfs = 1;
-int ide_noacpionboot = 1;
-#endif
-
 ide_hwif_t ide_hwifs[MAX_HWIFS];       /* master data repository */
 
 static void ide_port_init_devices_data(ide_hwif_t *);
@@ -227,79 +221,6 @@ static int ide_system_bus_speed(void)
        return pci_dev_present(pci_default) ? 33 : 50;
 }
 
-static struct resource* hwif_request_region(ide_hwif_t *hwif,
-                                           unsigned long addr, int num)
-{
-       struct resource *res = request_region(addr, num, hwif->name);
-
-       if (!res)
-               printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
-                               hwif->name, addr, addr+num-1);
-       return res;
-}
-
-/**
- *     ide_hwif_request_regions - request resources for IDE
- *     @hwif: interface to use
- *
- *     Requests all the needed resources for an interface.
- *     Right now core IDE code does this work which is deeply wrong.
- *     MMIO leaves it to the controller driver,
- *     PIO will migrate this way over time.
- */
-
-int ide_hwif_request_regions(ide_hwif_t *hwif)
-{
-       unsigned long addr;
-
-       if (hwif->mmio)
-               return 0;
-
-       addr = hwif->io_ports[IDE_CONTROL_OFFSET];
-
-       if (addr && !hwif_request_region(hwif, addr, 1))
-               goto control_region_busy;
-
-       addr = hwif->io_ports[IDE_DATA_OFFSET];
-       BUG_ON((addr | 7) != hwif->io_ports[IDE_STATUS_OFFSET]);
-
-       if (!hwif_request_region(hwif, addr, 8))
-               goto data_region_busy;
-
-       return 0;
-
-data_region_busy:
-       addr = hwif->io_ports[IDE_CONTROL_OFFSET];
-       if (addr)
-               release_region(addr, 1);
-control_region_busy:
-       /* If any errors are return, we drop the hwif interface. */
-       return -EBUSY;
-}
-
-/**
- *     ide_hwif_release_regions - free IDE resources
- *
- *     Note that we only release the standard ports,
- *     and do not even try to handle any extra ports
- *     allocated for weird IDE interface chipsets.
- *
- *     Note also that we don't yet handle mmio resources here. More
- *     importantly our caller should be doing this so we need to 
- *     restructure this as a helper function for drivers.
- */
-
-void ide_hwif_release_regions(ide_hwif_t *hwif)
-{
-       if (hwif->mmio)
-               return;
-
-       if (hwif->io_ports[IDE_CONTROL_OFFSET])
-               release_region(hwif->io_ports[IDE_CONTROL_OFFSET], 1);
-
-       release_region(hwif->io_ports[IDE_DATA_OFFSET], 8);
-}
-
 void ide_remove_port_from_hwgroup(ide_hwif_t *hwif)
 {
        ide_hwgroup_t *hwgroup = hwif->hwgroup;
@@ -366,7 +287,7 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
 
 /**
  *     ide_unregister          -       free an IDE interface
- *     @index: index of interface (will change soon to a pointer)
+ *     @hwif: IDE interface
  *
  *     Perform the final unregister of an IDE interface. At the moment
  *     we don't refcount interfaces so this will also get split up.
@@ -386,19 +307,16 @@ EXPORT_SYMBOL_GPL(ide_port_unregister_devices);
  *     This is raving bonkers.
  */
 
-void ide_unregister(unsigned int index)
+void ide_unregister(ide_hwif_t *hwif)
 {
-       ide_hwif_t *hwif, *g;
+       ide_hwif_t *g;
        ide_hwgroup_t *hwgroup;
        int irq_count = 0;
 
-       BUG_ON(index >= MAX_HWIFS);
-
        BUG_ON(in_interrupt());
        BUG_ON(irqs_disabled());
        mutex_lock(&ide_cfg_mtx);
        spin_lock_irq(&ide_lock);
-       hwif = &ide_hwifs[index];
        if (!hwif->present)
                goto abort;
        __ide_port_unregister_devices(hwif);
@@ -436,12 +354,10 @@ void ide_unregister(unsigned int index)
        spin_lock_irq(&ide_lock);
 
        if (hwif->dma_base)
-               (void)ide_release_dma(hwif);
-
-       ide_hwif_release_regions(hwif);
+               ide_release_dma_engine(hwif);
 
        /* restore hwif data to pristine status */
-       ide_init_port_data(hwif, index);
+       ide_init_port_data(hwif, hwif->index);
 
 abort:
        spin_unlock_irq(&ide_lock);
@@ -452,9 +368,8 @@ EXPORT_SYMBOL(ide_unregister);
 
 void ide_init_port_hw(ide_hwif_t *hwif, hw_regs_t *hw)
 {
-       memcpy(hwif->io_ports, hw->io_ports, sizeof(hwif->io_ports));
+       memcpy(&hwif->io_ports, &hw->io_ports, sizeof(hwif->io_ports));
        hwif->irq = hw->irq;
-       hwif->noprobe = 0;
        hwif->chipset = hw->chipset;
        hwif->gendev.parent = hw->dev;
        hwif->ack_intr = hw->ack_intr;
@@ -545,7 +460,7 @@ int set_using_dma(ide_drive_t *drive, int arg)
        if (!drive->id || !(drive->id->capability & 1))
                goto out;
 
-       if (hwif->dma_host_set == NULL)
+       if (hwif->dma_ops == NULL)
                goto out;
 
        err = -EBUSY;
@@ -585,11 +500,12 @@ int set_pio_mode(ide_drive_t *drive, int arg)
 {
        struct request rq;
        ide_hwif_t *hwif = drive->hwif;
+       const struct ide_port_ops *port_ops = hwif->port_ops;
 
        if (arg < 0 || arg > 255)
                return -EINVAL;
 
-       if (hwif->set_pio_mode == NULL ||
+       if (port_ops == NULL || port_ops->set_pio_mode == NULL ||
            (hwif->host_flags & IDE_HFLAG_NO_SET_MODE))
                return -ENOSYS;
 
@@ -648,7 +564,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
        if (!(drive->dn % 2))
                ide_acpi_get_timing(hwif);
 
-       memset(&rq, 0, sizeof(rq));
+       blk_rq_init(NULL, &rq);
        memset(&rqpm, 0, sizeof(rqpm));
        memset(&args, 0, sizeof(args));
        rq.cmd_type = REQ_TYPE_PM_SUSPEND;
@@ -686,7 +602,7 @@ static int generic_ide_resume(struct device *dev)
 
        ide_acpi_exec_tfs(drive);
 
-       memset(&rq, 0, sizeof(rq));
+       blk_rq_init(NULL, &rq);
        memset(&rqpm, 0, sizeof(rqpm));
        memset(&args, 0, sizeof(args));
        rq.cmd_type = REQ_TYPE_PM_RESUME;
@@ -912,16 +828,6 @@ static int __init match_parm (char *s, const char *keywords[], int vals[], int m
        return 0;       /* zero = nothing matched */
 }
 
-extern int probe_ali14xx;
-extern int probe_umc8672;
-extern int probe_dtc2278;
-extern int probe_ht6560b;
-extern int probe_qd65xx;
-extern int cmd640_vlb;
-extern int probe_4drives;
-
-static int __initdata is_chipset_set;
-
 /*
  * ide_setup() gets called VERY EARLY during initialization,
  * to handle kernel "command line" strings beginning with "hdx=" or "ide".
@@ -930,14 +836,12 @@ static int __initdata is_chipset_set;
  */
 static int __init ide_setup(char *s)
 {
-       int i, vals[3];
        ide_hwif_t *hwif;
        ide_drive_t *drive;
        unsigned int hw, unit;
+       int vals[3];
        const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
-       const char max_hwif  = '0' + (MAX_HWIFS - 1);
 
-       
        if (strncmp(s,"hd",2) == 0 && s[2] == '=')      /* hd= is for hd.c   */
                return 0;                               /* driver and not us */
 
@@ -953,7 +857,7 @@ static int __init ide_setup(char *s)
 
                printk(" : Enabled support for IDE doublers\n");
                ide_doubler = 1;
-               return 1;
+               goto obsolete_option;
        }
 #endif /* CONFIG_BLK_DEV_IDEDOUBLER */
 
@@ -967,17 +871,17 @@ static int __init ide_setup(char *s)
        if (!strcmp(s, "ide=noacpi")) {
                //printk(" : Disable IDE ACPI support.\n");
                ide_noacpi = 1;
-               return 1;
+               goto obsolete_option;
        }
        if (!strcmp(s, "ide=acpigtf")) {
                //printk(" : Enable IDE ACPI _GTF support.\n");
-               ide_noacpitfs = 0;
-               return 1;
+               ide_acpigtf = 1;
+               goto obsolete_option;
        }
        if (!strcmp(s, "ide=acpionboot")) {
                //printk(" : Call IDE ACPI methods on boot.\n");
-               ide_noacpionboot = 0;
-               return 1;
+               ide_acpionboot = 1;
+               goto obsolete_option;
        }
 #endif /* CONFIG_BLK_DEV_IDEACPI */
 
@@ -987,7 +891,7 @@ static int __init ide_setup(char *s)
        if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
                const char *hd_words[] = {
                        "none", "noprobe", "nowerr", "cdrom", "nodma",
-                       "autotune", "noautotune", "-8", "-9", "-10",
+                       "-6", "-7", "-8", "-9", "-10",
                        "noflush", "remap", "remap63", "scsi", NULL };
                unit = s[2] - 'a';
                hw   = unit / MAX_DRIVES;
@@ -1002,30 +906,22 @@ static int __init ide_setup(char *s)
                        case -1: /* "none" */
                        case -2: /* "noprobe" */
                                drive->noprobe = 1;
-                               goto done;
+                               goto obsolete_option;
                        case -3: /* "nowerr" */
                                drive->bad_wstat = BAD_R_STAT;
-                               hwif->noprobe = 0;
-                               goto done;
+                               goto obsolete_option;
                        case -4: /* "cdrom" */
                                drive->present = 1;
                                drive->media = ide_cdrom;
                                /* an ATAPI device ignores DRDY */
                                drive->ready_stat = 0;
-                               hwif->noprobe = 0;
-                               goto done;
+                               goto obsolete_option;
                        case -5: /* nodma */
                                drive->nodma = 1;
-                               goto done;
-                       case -6: /* "autotune" */
-                               drive->autotune = IDE_TUNE_AUTO;
-                               goto obsolete_option;
-                       case -7: /* "noautotune" */
-                               drive->autotune = IDE_TUNE_NOAUTO;
                                goto obsolete_option;
                        case -11: /* noflush */
                                drive->noflush = 1;
-                               goto done;
+                               goto obsolete_option;
                        case -12: /* "remap" */
                                drive->remap_0_to_1 = 1;
                                goto obsolete_option;
@@ -1043,8 +939,7 @@ static int __init ide_setup(char *s)
                                drive->sect     = drive->bios_sect = vals[2];
                                drive->present  = 1;
                                drive->forced_geom = 1;
-                               hwif->noprobe = 0;
-                               goto done;
+                               goto obsolete_option;
                        default:
                                goto bad_option;
                }
@@ -1062,126 +957,15 @@ static int __init ide_setup(char *s)
                        idebus_parameter = vals[0];
                } else
                        printk(" -- BAD BUS SPEED! Expected value from 20 to 66");
-               goto done;
+               goto obsolete_option;
        }
-       /*
-        * Look for interface options:  "idex="
-        */
-       if (s[3] >= '0' && s[3] <= max_hwif) {
-               /*
-                * Be VERY CAREFUL changing this: note hardcoded indexes below
-                * (-8, -9, -10) are reserved to ease the hardcoding.
-                */
-               static const char *ide_words[] = {
-                       "minus1", "serialize", "minus3", "minus4",
-                       "reset", "minus6", "ata66", "minus8", "minus9",
-                       "minus10", "four", "qd65xx", "ht6560b", "cmd640_vlb",
-                       "dtc2278", "umc8672", "ali14xx", NULL };
-
-               hw = s[3] - '0';
-               hwif = &ide_hwifs[hw];
-               i = match_parm(&s[4], ide_words, vals, 3);
-
-               /*
-                * Cryptic check to ensure chipset not already set for hwif.
-                * Note: we can't depend on hwif->chipset here.
-                */
-               if (i >= -18 && i <= -11) {
-                       /* chipset already specified */
-                       if (is_chipset_set)
-                               goto bad_option;
-                       /* these drivers are for "ide0=" only */
-                       if (hw != 0)
-                               goto bad_hwif;
-                       is_chipset_set = 1;
-                       printk("\n");
-               }
-
-               switch (i) {
-#ifdef CONFIG_BLK_DEV_ALI14XX
-                       case -17: /* "ali14xx" */
-                               probe_ali14xx = 1;
-                               goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_UMC8672
-                       case -16: /* "umc8672" */
-                               probe_umc8672 = 1;
-                               goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_DTC2278
-                       case -15: /* "dtc2278" */
-                               probe_dtc2278 = 1;
-                               goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_CMD640
-                       case -14: /* "cmd640_vlb" */
-                               cmd640_vlb = 1;
-                               goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_HT6560B
-                       case -13: /* "ht6560b" */
-                               probe_ht6560b = 1;
-                               goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_QD65XX
-                       case -12: /* "qd65xx" */
-                               probe_qd65xx = 1;
-                               goto obsolete_option;
-#endif
-#ifdef CONFIG_BLK_DEV_4DRIVES
-                       case -11: /* "four" drives on one set of ports */
-                               probe_4drives = 1;
-                               goto obsolete_option;
-#endif
-                       case -10: /* minus10 */
-                       case -9: /* minus9 */
-                       case -8: /* minus8 */
-                       case -6:
-                       case -4:
-                       case -3:
-                               goto bad_option;
-                       case -7: /* ata66 */
-#ifdef CONFIG_BLK_DEV_IDEPCI
-                               /*
-                                * Use ATA_CBL_PATA40_SHORT so drive side
-                                * cable detection is also overriden.
-                                */
-                               hwif->cbl = ATA_CBL_PATA40_SHORT;
-                               goto obsolete_option;
-#else
-                               goto bad_hwif;
-#endif
-                       case -5: /* "reset" */
-                               hwif->reset = 1;
-                               goto obsolete_option;
-                       case -2: /* "serialize" */
-                               hwif->mate = &ide_hwifs[hw^1];
-                               hwif->mate->mate = hwif;
-                               hwif->serialized = hwif->mate->serialized = 1;
-                               goto obsolete_option;
 
-                       case -1:
-                       case 0:
-                       case 1:
-                       case 2:
-                       case 3:
-                               goto bad_option;
-                       default:
-                               printk(" -- SUPPORT NOT CONFIGURED IN THIS KERNEL\n");
-                               return 1;
-               }
-       }
 bad_option:
        printk(" -- BAD OPTION\n");
        return 1;
 obsolete_option:
        printk(" -- OBSOLETE OPTION, WILL BE REMOVED SOON!\n");
        return 1;
-bad_hwif:
-       printk("-- NOT SUPPORTED ON ide%d", hw);
-done:
-       printk("\n");
-       return 1;
 }
 
 EXPORT_SYMBOL(ide_lock);
@@ -1317,6 +1101,185 @@ static void ide_port_class_release(struct device *portdev)
        put_device(&hwif->gendev);
 }
 
+int ide_vlb_clk;
+EXPORT_SYMBOL_GPL(ide_vlb_clk);
+
+module_param_named(vlb_clock, ide_vlb_clk, int, 0);
+MODULE_PARM_DESC(vlb_clock, "VLB clock frequency (in MHz)");
+
+int ide_pci_clk;
+EXPORT_SYMBOL_GPL(ide_pci_clk);
+
+module_param_named(pci_clock, ide_pci_clk, int, 0);
+MODULE_PARM_DESC(pci_clock, "PCI bus clock frequency (in MHz)");
+
+static int ide_set_dev_param_mask(const char *s, struct kernel_param *kp)
+{
+       int a, b, i, j = 1;
+       unsigned int *dev_param_mask = (unsigned int *)kp->arg;
+
+       if (sscanf(s, "%d.%d:%d", &a, &b, &j) != 3 &&
+           sscanf(s, "%d.%d", &a, &b) != 2)
+               return -EINVAL;
+
+       i = a * MAX_DRIVES + b;
+
+       if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+               return -EINVAL;
+
+       if (j)
+               *dev_param_mask |= (1 << i);
+       else
+               *dev_param_mask &= (1 << i);
+
+       return 0;
+}
+
+static unsigned int ide_nodma;
+
+module_param_call(nodma, ide_set_dev_param_mask, NULL, &ide_nodma, 0);
+MODULE_PARM_DESC(nodma, "disallow DMA for a device");
+
+static unsigned int ide_noflush;
+
+module_param_call(noflush, ide_set_dev_param_mask, NULL, &ide_noflush, 0);
+MODULE_PARM_DESC(noflush, "disable flush requests for a device");
+
+static unsigned int ide_noprobe;
+
+module_param_call(noprobe, ide_set_dev_param_mask, NULL, &ide_noprobe, 0);
+MODULE_PARM_DESC(noprobe, "skip probing for a device");
+
+static unsigned int ide_nowerr;
+
+module_param_call(nowerr, ide_set_dev_param_mask, NULL, &ide_nowerr, 0);
+MODULE_PARM_DESC(nowerr, "ignore the WRERR_STAT bit for a device");
+
+static unsigned int ide_cdroms;
+
+module_param_call(cdrom, ide_set_dev_param_mask, NULL, &ide_cdroms, 0);
+MODULE_PARM_DESC(cdrom, "force device as a CD-ROM");
+
+struct chs_geom {
+       unsigned int    cyl;
+       u8              head;
+       u8              sect;
+};
+
+static unsigned int ide_disks;
+static struct chs_geom ide_disks_chs[MAX_HWIFS * MAX_DRIVES];
+
+static int ide_set_disk_chs(const char *str, struct kernel_param *kp)
+{
+       int a, b, c = 0, h = 0, s = 0, i, j = 1;
+
+       if (sscanf(str, "%d.%d:%d,%d,%d", &a, &b, &c, &h, &s) != 5 &&
+           sscanf(str, "%d.%d:%d", &a, &b, &j) != 3)
+               return -EINVAL;
+
+       i = a * MAX_DRIVES + b;
+
+       if (i >= MAX_HWIFS * MAX_DRIVES || j < 0 || j > 1)
+               return -EINVAL;
+
+       if (c > INT_MAX || h > 255 || s > 255)
+               return -EINVAL;
+
+       if (j)
+               ide_disks |= (1 << i);
+       else
+               ide_disks &= (1 << i);
+
+       ide_disks_chs[i].cyl  = c;
+       ide_disks_chs[i].head = h;
+       ide_disks_chs[i].sect = s;
+
+       return 0;
+}
+
+module_param_call(chs, ide_set_disk_chs, NULL, NULL, 0);
+MODULE_PARM_DESC(chs, "force device as a disk (using CHS)");
+
+static void ide_dev_apply_params(ide_drive_t *drive)
+{
+       int i = drive->hwif->index * MAX_DRIVES + drive->select.b.unit;
+
+       if (ide_nodma & (1 << i)) {
+               printk(KERN_INFO "ide: disallowing DMA for %s\n", drive->name);
+               drive->nodma = 1;
+       }
+       if (ide_noflush & (1 << i)) {
+               printk(KERN_INFO "ide: disabling flush requests for %s\n",
+                                drive->name);
+               drive->noflush = 1;
+       }
+       if (ide_noprobe & (1 << i)) {
+               printk(KERN_INFO "ide: skipping probe for %s\n", drive->name);
+               drive->noprobe = 1;
+       }
+       if (ide_nowerr & (1 << i)) {
+               printk(KERN_INFO "ide: ignoring the WRERR_STAT bit for %s\n",
+                                drive->name);
+               drive->bad_wstat = BAD_R_STAT;
+       }
+       if (ide_cdroms & (1 << i)) {
+               printk(KERN_INFO "ide: forcing %s as a CD-ROM\n", drive->name);
+               drive->present = 1;
+               drive->media = ide_cdrom;
+               /* an ATAPI device ignores DRDY */
+               drive->ready_stat = 0;
+       }
+       if (ide_disks & (1 << i)) {
+               drive->cyl  = drive->bios_cyl  = ide_disks_chs[i].cyl;
+               drive->head = drive->bios_head = ide_disks_chs[i].head;
+               drive->sect = drive->bios_sect = ide_disks_chs[i].sect;
+               drive->forced_geom = 1;
+               printk(KERN_INFO "ide: forcing %s as a disk (%d/%d/%d)\n",
+                                drive->name,
+                                drive->cyl, drive->head, drive->sect);
+               drive->present = 1;
+               drive->media = ide_disk;
+               drive->ready_stat = READY_STAT;
+       }
+}
+
+static unsigned int ide_ignore_cable;
+
+static int ide_set_ignore_cable(const char *s, struct kernel_param *kp)
+{
+       int i, j = 1;
+
+       if (sscanf(s, "%d:%d", &i, &j) != 2 && sscanf(s, "%d", &i) != 1)
+               return -EINVAL;
+
+       if (i >= MAX_HWIFS || j < 0 || j > 1)
+               return -EINVAL;
+
+       if (j)
+               ide_ignore_cable |= (1 << i);
+       else
+               ide_ignore_cable &= (1 << i);
+
+       return 0;
+}
+
+module_param_call(ignore_cable, ide_set_ignore_cable, NULL, NULL, 0);
+MODULE_PARM_DESC(ignore_cable, "ignore cable detection");
+
+void ide_port_apply_params(ide_hwif_t *hwif)
+{
+       int i;
+
+       if (ide_ignore_cable & (1 << hwif->index)) {
+               printk(KERN_INFO "ide: ignoring cable detection for %s\n",
+                                hwif->name);
+               hwif->cbl = ATA_CBL_PATA40_SHORT;
+       }
+
+       for (i = 0; i < MAX_DRIVES; i++)
+               ide_dev_apply_params(&hwif->drives[i]);
+}
+
 /*
  * This is gets invoked once during initialization, to set *everything* up
  */
@@ -1383,11 +1346,6 @@ int __init init_module (void)
 
 void __exit cleanup_module (void)
 {
-       int index;
-
-       for (index = 0; index < MAX_HWIFS; ++index)
-               ide_unregister(index);
-
        proc_ide_destroy();
 
        class_destroy(ide_port_class);
index 33bb7b87be5dc539005de3c22c1e0dc7b93e7fa6..90c65cf97448c3fc7e72313f7bb1dbfcd7e4e0fb 100644 (file)
@@ -49,6 +49,8 @@
 
 #include <asm/io.h>
 
+#define DRV_NAME "ali14xx"
+
 /* port addresses for auto-detection */
 #define ALI_NUM_PORTS 4
 static const int ports[ALI_NUM_PORTS] __initdata =
@@ -114,7 +116,7 @@ static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
        int time1, time2;
        u8 param1, param2, param3, param4;
        unsigned long flags;
-       int bus_speed = system_bus_clock();
+       int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
 
        /* calculate timing, according to PIO mode */
        time1 = ide_pio_cycle_time(drive, pio);
@@ -192,18 +194,20 @@ static int __init initRegisters(void)
        return t;
 }
 
+static const struct ide_port_ops ali14xx_port_ops = {
+       .set_pio_mode           = ali14xx_set_pio_mode,
+};
+
 static const struct ide_port_info ali14xx_port_info = {
+       .name                   = DRV_NAME,
        .chipset                = ide_ali14xx,
-       .host_flags             = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE,
+       .port_ops               = &ali14xx_port_ops,
+       .host_flags             = IDE_HFLAG_NO_DMA,
        .pio_mask               = ATA_PIO4,
 };
 
 static int __init ali14xx_probe(void)
 {
-       ide_hwif_t *hwif, *mate;
-       static u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw[2];
-
        printk(KERN_DEBUG "ali14xx: base=0x%03x, regOn=0x%02x.\n",
                          basePort, regOn);
 
@@ -213,34 +217,10 @@ static int __init ali14xx_probe(void)
                return 1;
        }
 
-       memset(&hw, 0, sizeof(hw));
-
-       ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
-       hw[0].irq = 14;
-
-       ide_std_init_ports(&hw[1], 0x170, 0x376);
-       hw[1].irq = 15;
-
-       hwif = ide_find_port();
-       if (hwif) {
-               ide_init_port_hw(hwif, &hw[0]);
-               hwif->set_pio_mode = &ali14xx_set_pio_mode;
-               idx[0] = hwif->index;
-       }
-
-       mate = ide_find_port();
-       if (mate) {
-               ide_init_port_hw(mate, &hw[1]);
-               mate->set_pio_mode = &ali14xx_set_pio_mode;
-               idx[1] = mate->index;
-       }
-
-       ide_device_add(idx, &ali14xx_port_info);
-
-       return 0;
+       return ide_legacy_device_add(&ali14xx_port_info, 0);
 }
 
-int probe_ali14xx;
+static int probe_ali14xx;
 
 module_param_named(probe, probe_ali14xx, bool, 0);
 MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
index 6956eb8f2d5f4fedb12349797bad15ec45ab40f1..5c730e4dd735b9c7f32b9111e037f86762768b88 100644 (file)
@@ -102,7 +102,7 @@ static int buddha_ack_intr(ide_hwif_t *hwif)
 {
     unsigned char ch;
 
-    ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+    ch = z_readb(hwif->io_ports.irq_addr);
     if (!(ch & 0x80))
            return 0;
     return 1;
@@ -112,9 +112,9 @@ static int xsurf_ack_intr(ide_hwif_t *hwif)
 {
     unsigned char ch;
 
-    ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+    ch = z_readb(hwif->io_ports.irq_addr);
     /* X-Surf needs a 0 written to IRQ register to ensure ISA bit A11 stays at 0 */
-    z_writeb(0, hwif->io_ports[IDE_IRQ_OFFSET]); 
+    z_writeb(0, hwif->io_ports.irq_addr);
     if (!(ch & 0x80))
            return 0;
     return 1;
@@ -128,13 +128,13 @@ static void __init buddha_setup_ports(hw_regs_t *hw, unsigned long base,
 
        memset(hw, 0, sizeof(*hw));
 
-       hw->io_ports[IDE_DATA_OFFSET] = base;
+       hw->io_ports.data_addr = base;
 
        for (i = 1; i < 8; i++)
-               hw->io_ports[i] = base + 2 + i * 4;
+               hw->io_ports_array[i] = base + 2 + i * 4;
 
-       hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
-       hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+       hw->io_ports.ctl_addr = ctl;
+       hw->io_ports.irq_addr = irq_port;
 
        hw->irq = IRQ_AMIGA_PORTS;
        hw->ack_intr = ack_intr;
@@ -228,8 +228,6 @@ fail_base2:
                                ide_init_port_data(hwif, index);
                                ide_init_port_hw(hwif, &hw);
 
-                               hwif->mmio = 1;
-
                                idx[i] = index;
                        }
                }
index 9c6b3249a0045af80f0721fd4daf13db5498e248..af791a02a1201948334c22b2ad3079c2f69762cd 100644 (file)
@@ -16,6 +16,8 @@
 
 #include <asm/io.h>
 
+#define DRV_NAME "dtc2278"
+
 /*
  * Changing this #undef to #define may solve start up problems in some systems.
  */
@@ -86,24 +88,26 @@ static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio)
        }
 }
 
+static const struct ide_port_ops dtc2278_port_ops = {
+       .set_pio_mode           = dtc2278_set_pio_mode,
+};
+
 static const struct ide_port_info dtc2278_port_info __initdata = {
+       .name                   = DRV_NAME,
        .chipset                = ide_dtc2278,
+       .port_ops               = &dtc2278_port_ops,
        .host_flags             = IDE_HFLAG_SERIALIZE |
                                  IDE_HFLAG_NO_UNMASK_IRQS |
                                  IDE_HFLAG_IO_32BIT |
                                  /* disallow ->io_32bit changes */
                                  IDE_HFLAG_NO_IO_32BIT |
-                                 IDE_HFLAG_NO_DMA |
-                                 IDE_HFLAG_NO_AUTOTUNE,
+                                 IDE_HFLAG_NO_DMA,
        .pio_mask               = ATA_PIO4,
 };
 
 static int __init dtc2278_probe(void)
 {
        unsigned long flags;
-       ide_hwif_t *hwif, *mate;
-       static u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw[2];
 
        local_irq_save(flags);
        /*
@@ -123,33 +127,10 @@ static int __init dtc2278_probe(void)
 #endif
        local_irq_restore(flags);
 
-       memset(&hw, 0, sizeof(hw));
-
-       ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
-       hw[0].irq = 14;
-
-       ide_std_init_ports(&hw[1], 0x170, 0x376);
-       hw[1].irq = 15;
-
-       hwif = ide_find_port();
-       if (hwif) {
-               ide_init_port_hw(hwif, &hw[0]);
-               hwif->set_pio_mode = dtc2278_set_pio_mode;
-               idx[0] = hwif->index;
-       }
-
-       mate = ide_find_port();
-       if (mate) {
-               ide_init_port_hw(mate, &hw[1]);
-               idx[1] = mate->index;
-       }
-
-       ide_device_add(idx, &dtc2278_port_info);
-
-       return 0;
+       return ide_legacy_device_add(&dtc2278_port_info, 0);
 }
 
-int probe_dtc2278 = 0;
+static int probe_dtc2278;
 
 module_param_named(probe, probe_dtc2278, bool, 0);
 MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
index 8c9c9f7f54ca5121f4f7a9b02dedf34bad10ff70..83555ca513b58c6b0f9330398431a151bd5ee027 100644 (file)
 int falconide_intr_lock;
 EXPORT_SYMBOL(falconide_intr_lock);
 
+static void falconide_input_data(ide_drive_t *drive, struct request *rq,
+                                void *buf, unsigned int len)
+{
+       unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+       if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+               return insw(data_addr, buf, (len + 1) / 2);
+
+       insw_swapw(data_addr, buf, (len + 1) / 2);
+}
+
+static void falconide_output_data(ide_drive_t *drive, struct request *rq,
+                                 void *buf, unsigned int len)
+{
+       unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+       if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+               return outsw(data_adr, buf, (len + 1) / 2);
+
+       outsw_swapw(data_addr, buf, (len + 1) / 2);
+}
+
 static void __init falconide_setup_ports(hw_regs_t *hw)
 {
        int i;
 
        memset(hw, 0, sizeof(*hw));
 
-       hw->io_ports[IDE_DATA_OFFSET] = ATA_HD_BASE;
+       hw->io_ports.data_addr = ATA_HD_BASE;
 
        for (i = 1; i < 8; i++)
-               hw->io_ports[i] = ATA_HD_BASE + 1 + i * 4;
+               hw->io_ports_array[i] = ATA_HD_BASE + 1 + i * 4;
 
-       hw->io_ports[IDE_CONTROL_OFFSET] = ATA_HD_BASE + ATA_HD_CONTROL;
+       hw->io_ports.ctl_addr = ATA_HD_BASE + ATA_HD_CONTROL;
 
        hw->irq = IRQ_MFP_IDE;
        hw->ack_intr = NULL;
@@ -89,7 +111,10 @@ static int __init falconide_init(void)
 
                ide_init_port_data(hwif, index);
                ide_init_port_hw(hwif, &hw);
-               hwif->mmio = 1;
+
+               /* Atari has a byte-swapped IDE interface */
+               hwif->input_data  = falconide_input_data;
+               hwif->output_data = falconide_output_data;
 
                ide_get_lock(NULL, NULL);
                ide_device_add(idx, NULL);
index fcc8d52bf2a1acb72e58a040ab6e295d2a61a2b1..a9c2593a898c30a1e39e2d4b910e73e9ae29817e 100644 (file)
@@ -63,6 +63,8 @@
 #define GAYLE_HAS_CONTROL_REG  (!ide_doubler)
 #define GAYLE_IDEREG_SIZE      (ide_doubler ? 0x1000 : 0x2000)
 int ide_doubler = 0;   /* support IDE doublers? */
+module_param_named(doubler, ide_doubler, bool, 0);
+MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
 #endif /* CONFIG_BLK_DEV_IDEDOUBLER */
 
 
@@ -74,7 +76,7 @@ static int gayle_ack_intr_a4000(ide_hwif_t *hwif)
 {
     unsigned char ch;
 
-    ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+    ch = z_readb(hwif->io_ports.irq_addr);
     if (!(ch & GAYLE_IRQ_IDE))
        return 0;
     return 1;
@@ -84,11 +86,11 @@ static int gayle_ack_intr_a1200(ide_hwif_t *hwif)
 {
     unsigned char ch;
 
-    ch = z_readb(hwif->io_ports[IDE_IRQ_OFFSET]);
+    ch = z_readb(hwif->io_ports.irq_addr);
     if (!(ch & GAYLE_IRQ_IDE))
        return 0;
-    (void)z_readb(hwif->io_ports[IDE_STATUS_OFFSET]);
-    z_writeb(0x7c, hwif->io_ports[IDE_IRQ_OFFSET]);
+    (void)z_readb(hwif->io_ports.status_addr);
+    z_writeb(0x7c, hwif->io_ports.irq_addr);
     return 1;
 }
 
@@ -100,13 +102,13 @@ static void __init gayle_setup_ports(hw_regs_t *hw, unsigned long base,
 
        memset(hw, 0, sizeof(*hw));
 
-       hw->io_ports[IDE_DATA_OFFSET] = base;
+       hw->io_ports.data_addr = base;
 
        for (i = 1; i < 8; i++)
-               hw->io_ports[i] = base + 2 + i * 4;
+               hw->io_ports_array[i] = base + 2 + i * 4;
 
-       hw->io_ports[IDE_CONTROL_OFFSET] = ctl;
-       hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+       hw->io_ports.ctl_addr = ctl;
+       hw->io_ports.irq_addr = irq_port;
 
        hw->irq = IRQ_AMIGA_PORTS;
        hw->ack_intr = ack_intr;
@@ -182,8 +184,6 @@ found:
            ide_init_port_data(hwif, index);
            ide_init_port_hw(hwif, &hw);
 
-           hwif->mmio = 1;
-
            idx[i] = index;
        } else
            release_mem_region(res_start, res_n);
index 60f52f5158c994524beecd90e1f256bbf55d891a..4fe516df9f74f3d40d2cc9abaa44000c80b891fb 100644 (file)
@@ -157,8 +157,8 @@ static void ht6560b_selectproc (ide_drive_t *drive)
                /*
                 * Set timing for this drive:
                 */
-               outb(timing, hwif->io_ports[IDE_SELECT_OFFSET]);
-               (void)inb(hwif->io_ports[IDE_STATUS_OFFSET]);
+               outb(timing, hwif->io_ports.device_addr);
+               (void)inb(hwif->io_ports.status_addr);
 #ifdef DEBUG
                printk("ht6560b: %s: select=%#x timing=%#x\n",
                        drive->name, select, timing);
@@ -212,8 +212,8 @@ static u8 ht_pio2timings(ide_drive_t *drive, const u8 pio)
 {
        int active_time, recovery_time;
        int active_cycles, recovery_cycles;
-       int bus_speed = system_bus_clock();
-       
+       int bus_speed = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+
         if (pio) {
                unsigned int cycle_time;
 
@@ -323,69 +323,44 @@ static void __init ht6560b_port_init_devs(ide_hwif_t *hwif)
        hwif->drives[1].drive_data = t;
 }
 
-int probe_ht6560b = 0;
+static int probe_ht6560b;
 
 module_param_named(probe, probe_ht6560b, bool, 0);
 MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
 
+static const struct ide_port_ops ht6560b_port_ops = {
+       .port_init_devs         = ht6560b_port_init_devs,
+       .set_pio_mode           = ht6560b_set_pio_mode,
+       .selectproc             = ht6560b_selectproc,
+};
+
 static const struct ide_port_info ht6560b_port_info __initdata = {
+       .name                   = DRV_NAME,
        .chipset                = ide_ht6560b,
+       .port_ops               = &ht6560b_port_ops,
        .host_flags             = IDE_HFLAG_SERIALIZE | /* is this needed? */
                                  IDE_HFLAG_NO_DMA |
-                                 IDE_HFLAG_NO_AUTOTUNE |
                                  IDE_HFLAG_ABUSE_PREFETCH,
        .pio_mask               = ATA_PIO4,
 };
 
 static int __init ht6560b_init(void)
 {
-       ide_hwif_t *hwif, *mate;
-       static u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw[2];
-
        if (probe_ht6560b == 0)
                return -ENODEV;
 
        if (!request_region(HT_CONFIG_PORT, 1, DRV_NAME)) {
                printk(KERN_NOTICE "%s: HT_CONFIG_PORT not found\n",
-                       __FUNCTION__);
+                       __func__);
                return -ENODEV;
        }
 
        if (!try_to_init_ht6560b()) {
-               printk(KERN_NOTICE "%s: HBA not found\n", __FUNCTION__);
+               printk(KERN_NOTICE "%s: HBA not found\n", __func__);
                goto release_region;
        }
 
-       memset(&hw, 0, sizeof(hw));
-
-       ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
-       hw[0].irq = 14;
-
-       ide_std_init_ports(&hw[1], 0x170, 0x376);
-       hw[1].irq = 15;
-
-       hwif = ide_find_port();
-       if (hwif) {
-               ide_init_port_hw(hwif, &hw[0]);
-               hwif->selectproc     = ht6560b_selectproc;
-               hwif->set_pio_mode   = ht6560b_set_pio_mode;
-               hwif->port_init_devs = ht6560b_port_init_devs;
-               idx[0] = hwif->index;
-       }
-
-       mate = ide_find_port();
-       if (mate) {
-               ide_init_port_hw(mate, &hw[1]);
-               mate->selectproc     = ht6560b_selectproc;
-               mate->set_pio_mode   = ht6560b_set_pio_mode;
-               mate->port_init_devs = ht6560b_port_init_devs;
-               idx[1] = mate->index;
-       }
-
-       ide_device_add(idx, &ht6560b_port_info);
-
-       return 0;
+       return ide_legacy_device_add(&ht6560b_port_info, 0);
 
 release_region:
        release_region(HT_CONFIG_PORT, 1);
index c352f12348af944bbdb512af53e923c950389bc6..ecae916a3385f15c8725199a3eb18fe3733a79e4 100644 (file)
@@ -4,7 +4,9 @@
 #include <linux/module.h>
 #include <linux/ide.h>
 
-int probe_4drives;
+#define DRV_NAME "ide-4drives"
+
+static int probe_4drives;
 
 module_param_named(probe, probe_4drives, bool, 0);
 MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
@@ -12,15 +14,29 @@ MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
 static int __init ide_4drives_init(void)
 {
        ide_hwif_t *hwif, *mate;
+       unsigned long base = 0x1f0, ctl = 0x3f6;
        u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
        hw_regs_t hw;
 
        if (probe_4drives == 0)
                return -ENODEV;
 
+       if (!request_region(base, 8, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+                               DRV_NAME, base, base + 7);
+               return -EBUSY;
+       }
+
+       if (!request_region(ctl, 1, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+                               DRV_NAME, ctl);
+               release_region(base, 8);
+               return -EBUSY;
+       }
+
        memset(&hw, 0, sizeof(hw));
 
-       ide_std_init_ports(&hw, 0x1f0, 0x3f6);
+       ide_std_init_ports(&hw, base, ctl);
        hw.irq = 14;
        hw.chipset = ide_4drives;
 
index b97b8d51b3eb9b246d5538294203cf1ed51f2989..aa2ea3deac85a14ac724501690fad4f2976975d1 100644 (file)
@@ -51,6 +51,8 @@
 #include <pcmcia/cisreg.h>
 #include <pcmcia/ciscode.h>
 
+#define DRV_NAME "ide-cs"
+
 /*====================================================================*/
 
 /* Module parameters */
@@ -72,16 +74,11 @@ static char *version =
 
 /*====================================================================*/
 
-static const char ide_major[] = {
-    IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR,
-    IDE4_MAJOR, IDE5_MAJOR
-};
-
 typedef struct ide_info_t {
        struct pcmcia_device    *p_dev;
+       ide_hwif_t              *hwif;
     int                ndev;
     dev_node_t node;
-    int                hd;
 } ide_info_t;
 
 static void ide_release(struct pcmcia_device *);
@@ -136,20 +133,44 @@ static int ide_probe(struct pcmcia_device *link)
 
 static void ide_detach(struct pcmcia_device *link)
 {
+    ide_info_t *info = link->priv;
+    ide_hwif_t *hwif = info->hwif;
+
     DEBUG(0, "ide_detach(0x%p)\n", link);
 
     ide_release(link);
 
-    kfree(link->priv);
+    release_region(hwif->io_ports.ctl_addr, 1);
+    release_region(hwif->io_ports.data_addr, 8);
+
+    kfree(info);
 } /* ide_detach */
 
-static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle)
+static const struct ide_port_ops idecs_port_ops = {
+       .quirkproc              = ide_undecoded_slave,
+};
+
+static ide_hwif_t *idecs_register(unsigned long io, unsigned long ctl,
+                               unsigned long irq, struct pcmcia_device *handle)
 {
     ide_hwif_t *hwif;
     hw_regs_t hw;
     int i;
     u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
 
+    if (!request_region(io, 8, DRV_NAME)) {
+       printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+                       DRV_NAME, io, io + 7);
+       return NULL;
+    }
+
+    if (!request_region(ctl, 1, DRV_NAME)) {
+       printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+                       DRV_NAME, ctl);
+       release_region(io, 8);
+       return NULL;
+    }
+
     memset(&hw, 0, sizeof(hw));
     ide_std_init_ports(&hw, io, ctl);
     hw.irq = irq;
@@ -158,23 +179,25 @@ static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq
 
     hwif = ide_find_port();
     if (hwif == NULL)
-       return -1;
+       goto out_release;
 
     i = hwif->index;
 
-    if (hwif->present)
-       ide_unregister(i);
-    else
-       ide_init_port_data(hwif, i);
-
+    ide_init_port_data(hwif, i);
     ide_init_port_hw(hwif, &hw);
-    hwif->quirkproc = &ide_undecoded_slave;
+    hwif->port_ops = &idecs_port_ops;
 
     idx[0] = i;
 
     ide_device_add(idx, NULL);
 
-    return hwif->present ? i : -1;
+    if (hwif->present)
+       return hwif;
+
+out_release:
+    release_region(ctl, 1);
+    release_region(io, 8);
+    return NULL;
 }
 
 /*======================================================================
@@ -199,8 +222,9 @@ static int ide_config(struct pcmcia_device *link)
        cistpl_cftable_entry_t dflt;
     } *stk = NULL;
     cistpl_cftable_entry_t *cfg;
-    int i, pass, last_ret = 0, last_fn = 0, hd, is_kme = 0;
+    int i, pass, last_ret = 0, last_fn = 0, is_kme = 0;
     unsigned long io_base, ctl_base;
+    ide_hwif_t *hwif;
 
     DEBUG(0, "ide_config(0x%p)\n", link);
 
@@ -296,14 +320,15 @@ static int ide_config(struct pcmcia_device *link)
        outb(0x81, ctl_base+1);
 
     /* retry registration in case device is still spinning up */
-    for (hd = -1, i = 0; i < 10; i++) {
-       hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
-       if (hd >= 0) break;
+    for (i = 0; i < 10; i++) {
+       hwif = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+       if (hwif)
+           break;
        if (link->io.NumPorts1 == 0x20) {
            outb(0x02, ctl_base + 0x10);
-           hd = idecs_register(io_base + 0x10, ctl_base + 0x10,
-                               link->irq.AssignedIRQ, link);
-           if (hd >= 0) {
+           hwif = idecs_register(io_base + 0x10, ctl_base + 0x10,
+                                 link->irq.AssignedIRQ, link);
+           if (hwif) {
                io_base += 0x10;
                ctl_base += 0x10;
                break;
@@ -312,7 +337,7 @@ static int ide_config(struct pcmcia_device *link)
        msleep(100);
     }
 
-    if (hd < 0) {
+    if (hwif == NULL) {
        printk(KERN_NOTICE "ide-cs: ide_register() at 0x%3lx & 0x%3lx"
               ", irq %u failed\n", io_base, ctl_base,
               link->irq.AssignedIRQ);
@@ -320,10 +345,10 @@ static int ide_config(struct pcmcia_device *link)
     }
 
     info->ndev = 1;
-    sprintf(info->node.dev_name, "hd%c", 'a' + (hd * 2));
-    info->node.major = ide_major[hd];
+    sprintf(info->node.dev_name, "hd%c", 'a' + hwif->index * 2);
+    info->node.major = hwif->major;
     info->node.minor = 0;
-    info->hd = hd;
+    info->hwif = hwif;
     link->dev_node = &info->node;
     printk(KERN_INFO "ide-cs: %s: Vpp = %d.%d\n",
           info->node.dev_name, link->conf.Vpp / 10, link->conf.Vpp % 10);
@@ -354,13 +379,14 @@ failed:
 void ide_release(struct pcmcia_device *link)
 {
     ide_info_t *info = link->priv;
+    ide_hwif_t *hwif = info->hwif;
 
     DEBUG(0, "ide_release(0x%p)\n", link);
 
     if (info->ndev) {
        /* FIXME: if this fails we need to queue the cleanup somehow
           -- need to investigate the required PCMCIA magic */
-       ide_unregister(info->hd);
+       ide_unregister(hwif);
     }
     info->ndev = 0;
 
index bf240775531e90d0ba74efd0029e73ee462ecf77..d3bc3f24e05d07e52f079121117a672b11de0146 100644 (file)
@@ -30,14 +30,14 @@ static void __devinit plat_ide_setup_ports(hw_regs_t *hw,
        unsigned long port = (unsigned long)base;
        int i;
 
-       hw->io_ports[IDE_DATA_OFFSET] = port;
+       hw->io_ports.data_addr = port;
 
        port += (1 << pdata->ioport_shift);
-       for (i = IDE_ERROR_OFFSET; i <= IDE_STATUS_OFFSET;
+       for (i = 1; i <= 7;
             i++, port += (1 << pdata->ioport_shift))
-               hw->io_ports[i] = port;
+               hw->io_ports_array[i] = port;
 
-       hw->io_ports[IDE_CONTROL_OFFSET] = (unsigned long)ctrl;
+       hw->io_ports.ctl_addr = (unsigned long)ctrl;
 
        hw->irq = irq;
 
@@ -100,10 +100,11 @@ static int __devinit plat_ide_probe(struct platform_device *pdev)
        hw.dev = &pdev->dev;
 
        ide_init_port_hw(hwif, &hw);
-       hwif->mmio = 1;
 
-       if (mmio)
+       if (mmio) {
+               hwif->host_flags = IDE_HFLAG_MMIO;
                default_hwif_mmiops(hwif);
+       }
 
        idx[0] = hwif->index;
 
@@ -121,7 +122,7 @@ static int __devexit plat_ide_remove(struct platform_device *pdev)
 {
        ide_hwif_t *hwif = pdev->dev.driver_data;
 
-       ide_unregister(hwif->index);
+       ide_unregister(hwif);
 
        return 0;
 }
index 7429b80cb0892c747eb46a38201dde0238f16e43..1f527bbf8d96f9ebb34637223bce6bb07a8db82b 100644 (file)
@@ -72,9 +72,9 @@ static void __init macide_setup_ports(hw_regs_t *hw, unsigned long base,
        memset(hw, 0, sizeof(*hw));
 
        for (i = 0; i < 8; i++)
-               hw->io_ports[i] = base + i * 4;
+               hw->io_ports_array[i] = base + i * 4;
 
-       hw->io_ports[IDE_CONTROL_OFFSET] = base + IDE_CONTROL;
+       hw->io_ports.ctl_addr = base + IDE_CONTROL;
 
        hw->irq = irq;
        hw->ack_intr = ack_intr;
@@ -128,8 +128,6 @@ static int __init macide_init(void)
                ide_init_port_data(hwif, index);
                ide_init_port_hw(hwif, &hw);
 
-               hwif->mmio = 1;
-
                ide_device_add(idx, NULL);
        }
 
index fcbff0eced1b0a80b6595183a7e1b9b3a9c3a329..6f535d00e6389d26ccc1f7187eebe8238dadc498 100644 (file)
@@ -36,23 +36,6 @@ static const unsigned long pcide_bases[Q40IDE_NUM_HWIFS] = {
     PCIDE_BASE6 */
 };
 
-
-    /*
-     *  Offsets from one of the above bases
-     */
-
-/* used to do addr translation here but it is easier to do in setup ports */
-/*#define IDE_OFF_B(x) ((unsigned long)Q40_ISA_IO_B((IDE_##x##_OFFSET)))*/
-
-#define IDE_OFF_B(x)   ((unsigned long)((IDE_##x##_OFFSET)))
-#define IDE_OFF_W(x)   ((unsigned long)((IDE_##x##_OFFSET)))
-
-static const int pcide_offsets[IDE_NR_PORTS] = {
-    IDE_OFF_W(DATA), IDE_OFF_B(ERROR), IDE_OFF_B(NSECTOR), IDE_OFF_B(SECTOR),
-    IDE_OFF_B(LCYL), IDE_OFF_B(HCYL), 6 /*IDE_OFF_B(CURRENT)*/, IDE_OFF_B(STATUS),
-    518/*IDE_OFF(CMD)*/
-};
-
 static int q40ide_default_irq(unsigned long base)
 {
            switch (base) {
@@ -68,29 +51,48 @@ static int q40ide_default_irq(unsigned long base)
 /*
  * Addresses are pretranslated for Q40 ISA access.
  */
-void q40_ide_setup_ports ( hw_regs_t *hw,
-                       unsigned long base, int *offsets,
-                       unsigned long ctrl, unsigned long intr,
+static void q40_ide_setup_ports(hw_regs_t *hw, unsigned long base,
                        ide_ack_intr_t *ack_intr,
                        int irq)
 {
-       int i;
-
        memset(hw, 0, sizeof(hw_regs_t));
-       for (i = 0; i < IDE_NR_PORTS; i++) {
-               /* BIG FAT WARNING: 
-                  assumption: only DATA port is ever used in 16 bit mode */
-               if ( i==0 )
-                       hw->io_ports[i] = Q40_ISA_IO_W(base + offsets[i]);
-               else
-                       hw->io_ports[i] = Q40_ISA_IO_B(base + offsets[i]);
-       }
+       /* BIG FAT WARNING: 
+          assumption: only DATA port is ever used in 16 bit mode */
+       hw->io_ports.data_addr = Q40_ISA_IO_W(base);
+       hw->io_ports.error_addr = Q40_ISA_IO_B(base + 1);
+       hw->io_ports.nsect_addr = Q40_ISA_IO_B(base + 2);
+       hw->io_ports.lbal_addr = Q40_ISA_IO_B(base + 3);
+       hw->io_ports.lbam_addr = Q40_ISA_IO_B(base + 4);
+       hw->io_ports.lbah_addr = Q40_ISA_IO_B(base + 5);
+       hw->io_ports.device_addr = Q40_ISA_IO_B(base + 6);
+       hw->io_ports.status_addr = Q40_ISA_IO_B(base + 7);
+       hw->io_ports.ctl_addr = Q40_ISA_IO_B(base + 0x206);
 
        hw->irq = irq;
        hw->ack_intr = ack_intr;
 }
 
+static void q40ide_input_data(ide_drive_t *drive, struct request *rq,
+                             void *buf, unsigned int len)
+{
+       unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+       if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+               return insw(data_addr, buf, (len + 1) / 2);
 
+       insw_swapw(data_addr, buf, (len + 1) / 2);
+}
+
+static void q40ide_output_data(ide_drive_t *drive, struct request *rq,
+                              void *buf, unsigned int len)
+{
+       unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+       if (drive->media == ide_disk && rq && rq->cmd_type == REQ_TYPE_FS)
+               return outsw(data_addr, buf, (len + 1) / 2);
+
+       outsw_swapw(data_addr, buf, (len + 1) / 2);
+}
 
 /* 
  * the static array is needed to have the name reported in /proc/ioports,
@@ -131,9 +133,8 @@ static int __init q40ide_init(void)
                release_region(pcide_bases[i], 8);
                continue;
        }
-       q40_ide_setup_ports(&hw,(unsigned long) pcide_bases[i], (int *)pcide_offsets, 
-                       pcide_bases[i]+0x206, 
-                       0, NULL,
+       q40_ide_setup_ports(&hw, pcide_bases[i],
+                       NULL,
 //                     m68kide_iops,
                        q40ide_default_irq(pcide_bases[i]));
 
@@ -141,7 +142,10 @@ static int __init q40ide_init(void)
        if (hwif) {
                ide_init_port_data(hwif, hwif->index);
                ide_init_port_hw(hwif, &hw);
-               hwif->mmio = 1;
+
+               /* Q40 has a byte-swapped IDE interface */
+               hwif->input_data  = q40ide_input_data;
+               hwif->output_data = q40ide_output_data;
 
                idx[i] = hwif->index;
        }
index 6e820c7c5c6bfae7541362d5a63c017a418074bc..6424af154325d9f1c0776b37f7cb7aac7d932593 100644 (file)
  *
  * QDI QD6500/QD6580 EIDE controller fast support
  *
- * Please set local bus speed using kernel parameter idebus
- *     for example, "idebus=33" stands for 33Mhz VLbus
  * To activate controller support, use "ide0=qd65xx"
- * To enable tuning, use "hda=autotune hdb=autotune"
- * To enable 2nd channel tuning (qd6580 only), use "hdc=autotune hdd=autotune"
  */
 
 /*
@@ -37,6 +33,8 @@
 #include <asm/system.h>
 #include <asm/io.h>
 
+#define DRV_NAME "qd65xx"
+
 #include "qd65xx.h"
 
 /*
@@ -112,17 +110,18 @@ static void qd65xx_select(ide_drive_t *drive)
 
 static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery_time)
 {
-       u8 active_cycle,recovery_cycle;
+       int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+       u8 act_cyc, rec_cyc;
 
-       if (system_bus_clock()<=33) {
-               active_cycle =   9  - IDE_IN(active_time   * system_bus_clock() / 1000 + 1, 2, 9);
-               recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 0, 15);
+       if (clk <= 33) {
+               act_cyc =  9 - IDE_IN(active_time   * clk / 1000 + 1, 2,  9);
+               rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 0, 15);
        } else {
-               active_cycle =   8  - IDE_IN(active_time   * system_bus_clock() / 1000 + 1, 1, 8);
-               recovery_cycle = 18 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 3, 18);
+               act_cyc =  8 - IDE_IN(active_time   * clk / 1000 + 1, 1,  8);
+               rec_cyc = 18 - IDE_IN(recovery_time * clk / 1000 + 1, 3, 18);
        }
 
-       return((recovery_cycle<<4) | 0x08 | active_cycle);
+       return (rec_cyc << 4) | 0x08 | act_cyc;
 }
 
 /*
@@ -133,10 +132,13 @@ static u8 qd6500_compute_timing (ide_hwif_t *hwif, int active_time, int recovery
 
 static u8 qd6580_compute_timing (int active_time, int recovery_time)
 {
-       u8 active_cycle   = 17 - IDE_IN(active_time   * system_bus_clock() / 1000 + 1, 2, 17);
-       u8 recovery_cycle = 15 - IDE_IN(recovery_time * system_bus_clock() / 1000 + 1, 2, 15);
+       int clk = ide_vlb_clk ? ide_vlb_clk : system_bus_clock();
+       u8 act_cyc, rec_cyc;
+
+       act_cyc = 17 - IDE_IN(active_time   * clk / 1000 + 1, 2, 17);
+       rec_cyc = 15 - IDE_IN(recovery_time * clk / 1000 + 1, 2, 15);
 
-       return((recovery_cycle<<4) | active_cycle);
+       return (rec_cyc << 4) | act_cyc;
 }
 
 /*
@@ -304,11 +306,23 @@ static void __init qd6580_port_init_devs(ide_hwif_t *hwif)
        hwif->drives[1].drive_data = t2;
 }
 
+static const struct ide_port_ops qd6500_port_ops = {
+       .port_init_devs         = qd6500_port_init_devs,
+       .set_pio_mode           = qd6500_set_pio_mode,
+       .selectproc             = qd65xx_select,
+};
+
+static const struct ide_port_ops qd6580_port_ops = {
+       .port_init_devs         = qd6580_port_init_devs,
+       .set_pio_mode           = qd6580_set_pio_mode,
+       .selectproc             = qd65xx_select,
+};
+
 static const struct ide_port_info qd65xx_port_info __initdata = {
+       .name                   = DRV_NAME,
        .chipset                = ide_qd65xx,
        .host_flags             = IDE_HFLAG_IO_32BIT |
-                                 IDE_HFLAG_NO_DMA |
-                                 IDE_HFLAG_NO_AUTOTUNE,
+                                 IDE_HFLAG_NO_DMA,
        .pio_mask               = ATA_PIO4,
 };
 
@@ -321,10 +335,8 @@ static const struct ide_port_info qd65xx_port_info __initdata = {
 
 static int __init qd_probe(int base)
 {
-       ide_hwif_t *hwif;
-       u8 config, unit;
-       u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw[2];
+       int rc;
+       u8 config, unit, control;
        struct ide_port_info d = qd65xx_port_info;
 
        config = inb(QD_CONFIG_PORT);
@@ -337,21 +349,11 @@ static int __init qd_probe(int base)
        if (unit)
                d.host_flags |= IDE_HFLAG_QD_2ND_PORT;
 
-       memset(&hw, 0, sizeof(hw));
-
-       ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
-       hw[0].irq = 14;
-
-       ide_std_init_ports(&hw[1], 0x170, 0x376);
-       hw[1].irq = 15;
-
-       if ((config & 0xf0) == QD_CONFIG_QD6500) {
-
+       switch (config & 0xf0) {
+       case QD_CONFIG_QD6500:
                if (qd_testreg(base))
                         return -ENODEV;        /* bad register */
 
-               /* qd6500 found */
-
                if (config & QD_CONFIG_DISABLED) {
                        printk(KERN_WARNING "qd6500 is disabled !\n");
                        return -ENODEV;
@@ -361,37 +363,14 @@ static int __init qd_probe(int base)
                printk(KERN_DEBUG "qd6500: config=%#x, ID3=%u\n",
                        config, QD_ID3);
 
+               d.port_ops = &qd6500_port_ops;
                d.host_flags |= IDE_HFLAG_SINGLE;
-
-               hwif = ide_find_port_slot(&d);
-               if (hwif == NULL)
-                       return -ENOENT;
-
-               ide_init_port_hw(hwif, &hw[unit]);
-
-               hwif->config_data = (base << 8) | config;
-
-               hwif->port_init_devs = qd6500_port_init_devs;
-               hwif->set_pio_mode   = qd6500_set_pio_mode;
-               hwif->selectproc     = qd65xx_select;
-
-               idx[unit] = hwif->index;
-
-               ide_device_add(idx, &d);
-
-               return 1;
-       }
-
-       if (((config & 0xf0) == QD_CONFIG_QD6580_A) ||
-           ((config & 0xf0) == QD_CONFIG_QD6580_B)) {
-
-               u8 control;
-
+               break;
+       case QD_CONFIG_QD6580_A:
+       case QD_CONFIG_QD6580_B:
                if (qd_testreg(base) || qd_testreg(base + 0x02))
                        return -ENODEV; /* bad registers */
 
-               /* qd6580 found */
-
                control = inb(QD_CONTROL_PORT);
 
                printk(KERN_NOTICE "qd6580 at %#x\n", base);
@@ -400,66 +379,26 @@ static int __init qd_probe(int base)
 
                outb(QD_DEF_CONTR, QD_CONTROL_PORT);
 
-               if (control & QD_CONTR_SEC_DISABLED) {
-                       /* secondary disabled */
-
-                       printk(KERN_INFO "qd6580: single IDE board\n");
-
+               d.port_ops = &qd6580_port_ops;
+               if (control & QD_CONTR_SEC_DISABLED)
                        d.host_flags |= IDE_HFLAG_SINGLE;
 
-                       hwif = ide_find_port_slot(&d);
-                       if (hwif == NULL)
-                               return -ENOENT;
-
-                       ide_init_port_hw(hwif, &hw[unit]);
-
-                       hwif->config_data = (base << 8) | config;
-
-                       hwif->port_init_devs = qd6580_port_init_devs;
-                       hwif->set_pio_mode   = qd6580_set_pio_mode;
-                       hwif->selectproc     = qd65xx_select;
+               printk(KERN_INFO "qd6580: %s IDE board\n",
+                       (control & QD_CONTR_SEC_DISABLED) ? "single" : "dual");
+               break;
+       default:
+               return -ENODEV;
+       }
 
-                       idx[unit] = hwif->index;
+       rc = ide_legacy_device_add(&d, (base << 8) | config);
 
-                       ide_device_add(idx, &d);
+       if (d.host_flags & IDE_HFLAG_SINGLE)
+               return (rc == 0) ? 1 : rc;
 
-                       return 1;
-               } else {
-                       ide_hwif_t *mate;
-
-                       /* secondary enabled */
-                       printk(KERN_INFO "qd6580: dual IDE board\n");
-
-                       hwif = ide_find_port();
-                       if (hwif) {
-                               ide_init_port_hw(hwif, &hw[0]);
-                               hwif->config_data = (base << 8) | config;
-                               hwif->port_init_devs = qd6580_port_init_devs;
-                               hwif->set_pio_mode   = qd6580_set_pio_mode;
-                               hwif->selectproc     = qd65xx_select;
-                               idx[0] = hwif->index;
-                       }
-
-                       mate = ide_find_port();
-                       if (mate) {
-                               ide_init_port_hw(mate, &hw[1]);
-                               mate->config_data = (base << 8) | config;
-                               mate->port_init_devs = qd6580_port_init_devs;
-                               mate->set_pio_mode   = qd6580_set_pio_mode;
-                               mate->selectproc     = qd65xx_select;
-                               idx[1] = mate->index;
-                       }
-
-                       ide_device_add(idx, &qd65xx_port_info);
-
-                       return 0; /* no other qd65xx possible */
-               }
-       }
-       /* no qd65xx found */
-       return -ENODEV;
+       return rc;
 }
 
-int probe_qd65xx = 0;
+static int probe_qd65xx;
 
 module_param_named(probe, probe_qd65xx, bool, 0);
 MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
index 4d90badd2bdab13472f1047e34c07f06b9361f56..b54a14a577550acf3af76bfceaadf3c221433c90 100644 (file)
@@ -51,6 +51,8 @@
 
 #include <asm/io.h>
 
+#define DRV_NAME "umc8672"
+
 /*
  * Default speeds.  These can be changed with "auto-tune" and/or hdparm.
  */
@@ -120,18 +122,21 @@ static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
        spin_unlock_irqrestore(&ide_lock, flags);
 }
 
+static const struct ide_port_ops umc8672_port_ops = {
+       .set_pio_mode           = umc_set_pio_mode,
+};
+
 static const struct ide_port_info umc8672_port_info __initdata = {
+       .name                   = DRV_NAME,
        .chipset                = ide_umc8672,
-       .host_flags             = IDE_HFLAG_NO_DMA | IDE_HFLAG_NO_AUTOTUNE,
+       .port_ops               = &umc8672_port_ops,
+       .host_flags             = IDE_HFLAG_NO_DMA,
        .pio_mask               = ATA_PIO4,
 };
 
 static int __init umc8672_probe(void)
 {
-       ide_hwif_t *hwif, *mate;
        unsigned long flags;
-       static u8 idx[4] = { 0xff, 0xff, 0xff, 0xff };
-       hw_regs_t hw[2];
 
        if (!request_region(0x108, 2, "umc8672")) {
                printk(KERN_ERR "umc8672: ports 0x108-0x109 already in use.\n");
@@ -150,34 +155,10 @@ static int __init umc8672_probe(void)
        umc_set_speeds(current_speeds);
        local_irq_restore(flags);
 
-       memset(&hw, 0, sizeof(hw));
-
-       ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
-       hw[0].irq = 14;
-
-       ide_std_init_ports(&hw[1], 0x170, 0x376);
-       hw[1].irq = 15;
-
-       hwif = ide_find_port();
-       if (hwif) {
-               ide_init_port_hw(hwif, &hw[0]);
-               hwif->set_pio_mode = umc_set_pio_mode;
-               idx[0] = hwif->index;
-       }
-
-       mate = ide_find_port();
-       if (mate) {
-               ide_init_port_hw(mate, &hw[1]);
-               mate->set_pio_mode = umc_set_pio_mode;
-               idx[1] = mate->index;
-       }
-
-       ide_device_add(idx, &umc8672_port_info);
-
-       return 0;
+       return ide_legacy_device_add(&umc8672_port_info, 0);
 }
 
-int probe_umc8672;
+static int probe_umc8672;
 
 module_param_named(probe, probe_umc8672, bool, 0);
 MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
index a8cd0035936d18e98edbd58061670b06c0ba1281..1a6c27b324983cc989e4c228b2be6cbd6e9a3027 100644 (file)
@@ -47,9 +47,6 @@
 #define IDE_AU1XXX_BURSTMODE   1
 
 static _auide_hwif auide_hwif;
-static int dbdma_init_done;
-
-static int auide_ddma_init(_auide_hwif *auide);
 
 #if defined(CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA)
 
@@ -61,7 +58,7 @@ void auide_insw(unsigned long port, void *addr, u32 count)
 
        if(!put_dest_flags(ahwif->rx_chan, (void*)addr, count << 1, 
                           DDMA_FLAGS_NOIE)) {
-               printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
+               printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
                return;
        }
        ctp = *((chan_tab_t **)ahwif->rx_chan);
@@ -79,7 +76,7 @@ void auide_outsw(unsigned long port, void *addr, u32 count)
 
        if(!put_source_flags(ahwif->tx_chan, (void*)addr,
                             count << 1, DDMA_FLAGS_NOIE)) {
-               printk(KERN_ERR "%s failed %d\n", __FUNCTION__, __LINE__);
+               printk(KERN_ERR "%s failed %d\n", __func__, __LINE__);
                return;
        }
        ctp = *((chan_tab_t **)ahwif->tx_chan);
@@ -89,6 +86,17 @@ void auide_outsw(unsigned long port, void *addr, u32 count)
        ctp->cur_ptr = au1xxx_ddma_get_nextptr_virt(dp);
 }
 
+static void au1xxx_input_data(ide_drive_t *drive, struct request *rq,
+                             void *buf, unsigned int len)
+{
+       auide_insw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
+
+static void au1xxx_output_data(ide_drive_t *drive, struct request *rq,
+                              void *buf, unsigned int len)
+{
+       auide_outsw(drive->hwif->io_ports.data_addr, buf, (len + 1) / 2);
+}
 #endif
 
 static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
@@ -250,7 +258,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
                                                     (void*) sg_virt(sg),
                                                     tc, flags)) { 
                                        printk(KERN_ERR "%s failed %d\n", 
-                                              __FUNCTION__, __LINE__);
+                                              __func__, __LINE__);
                                }
                        } else 
                        {
@@ -258,7 +266,7 @@ static int auide_build_dmatable(ide_drive_t *drive)
                                                   (void*) sg_virt(sg),
                                                   tc, flags)) { 
                                        printk(KERN_ERR "%s failed %d\n", 
-                                              __FUNCTION__, __LINE__);
+                                              __func__, __LINE__);
                                }
                        }
 
@@ -315,35 +323,6 @@ static int auide_dma_setup(ide_drive_t *drive)
        return 0;
 }
 
-static u8 auide_mdma_filter(ide_drive_t *drive)
-{
-       /*
-        * FIXME: ->white_list and ->black_list are based on completely bogus
-        * ->ide_dma_check implementation which didn't set neither the host
-        * controller timings nor the device for the desired transfer mode.
-        *
-        * They should be either removed or 0x00 MWDMA mask should be
-        * returned for devices on the ->black_list.
-        */
-
-       if (dbdma_init_done == 0) {
-               auide_hwif.white_list = ide_in_drive_list(drive->id,
-                                                         dma_white_list);
-               auide_hwif.black_list = ide_in_drive_list(drive->id,
-                                                         dma_black_list);
-               auide_hwif.drive = drive;
-               auide_ddma_init(&auide_hwif);
-               dbdma_init_done = 1;
-       }
-
-       /* Is the drive in our DMA black list? */
-       if (auide_hwif.black_list)
-               printk(KERN_WARNING "%s: Disabling DMA for %s (blacklisted)\n",
-                                   drive->name, drive->id->model);
-
-       return drive->hwif->mwdma_mask;
-}
-
 static int auide_dma_test_irq(ide_drive_t *drive)
 {      
        if (drive->waiting_for_dma == 0)
@@ -389,48 +368,48 @@ static void auide_ddma_rx_callback(int irq, void *param)
 static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags)
 {
        dev->dev_id          = dev_id;
-       dev->dev_physaddr    = (u32)AU1XXX_ATA_PHYS_ADDR;
+       dev->dev_physaddr    = (u32)IDE_PHYS_ADDR;
        dev->dev_intlevel    = 0;
        dev->dev_intpolarity = 0;
        dev->dev_tsize       = tsize;
        dev->dev_devwidth    = devwidth;
        dev->dev_flags       = flags;
 }
-  
-#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
 
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
 static void auide_dma_timeout(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
 
        printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
 
-       if (hwif->ide_dma_test_irq(drive))
+       if (auide_dma_test_irq(drive))
                return;
 
-       hwif->ide_dma_end(drive);
+       auide_dma_end(drive);
 }
-                                       
 
-static int auide_ddma_init(_auide_hwif *auide) {
-       
+static const struct ide_dma_ops au1xxx_dma_ops = {
+       .dma_host_set           = auide_dma_host_set,
+       .dma_setup              = auide_dma_setup,
+       .dma_exec_cmd           = auide_dma_exec_cmd,
+       .dma_start              = auide_dma_start,
+       .dma_end                = auide_dma_end,
+       .dma_test_irq           = auide_dma_test_irq,
+       .dma_lost_irq           = auide_dma_lost_irq,
+       .dma_timeout            = auide_dma_timeout,
+};
+
+static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
+{
+       _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
        dbdev_tab_t source_dev_tab, target_dev_tab;
        u32 dev_id, tsize, devwidth, flags;
-       ide_hwif_t *hwif = auide->hwif;
 
-       dev_id   = AU1XXX_ATA_DDMA_REQ;
+       dev_id   = IDE_DDMA_REQ;
 
-       if (auide->white_list || auide->black_list) {
-               tsize    = 8;
-               devwidth = 32;
-       }
-       else { 
-               tsize    = 1;
-               devwidth = 16;
-               
-               printk(KERN_ERR "au1xxx-ide: %s is not on ide driver whitelist.\n",auide_hwif.drive->id->model);
-               printk(KERN_ERR "            please read 'Documentation/mips/AU1xxx_IDE.README'");
-       }
+       tsize    =  8; /*  1 */
+       devwidth = 32; /* 16 */
 
 #ifdef IDE_AU1XXX_BURSTMODE 
        flags = DEV_FLAGS_SYNC | DEV_FLAGS_BURSTABLE;
@@ -482,9 +461,9 @@ static int auide_ddma_init(_auide_hwif *auide) {
        return 0;
 } 
 #else
-static int auide_ddma_init( _auide_hwif *auide )
+static int auide_ddma_init(ide_hwif_t *hwif, const struct ide_port_info *d)
 {
+       _auide_hwif *auide = (_auide_hwif *)hwif->hwif_data;
        dbdev_tab_t source_dev_tab;
        int flags;
 
@@ -532,20 +511,28 @@ static int auide_ddma_init( _auide_hwif *auide )
 static void auide_setup_ports(hw_regs_t *hw, _auide_hwif *ahwif)
 {
        int i;
-       unsigned long *ata_regs = hw->io_ports;
+       unsigned long *ata_regs = hw->io_ports_array;
 
        /* FIXME? */
-       for (i = 0; i < IDE_CONTROL_OFFSET; i++) {
-               *ata_regs++ = ahwif->regbase + (i << AU1XXX_ATA_REG_OFFSET);
-       }
+       for (i = 0; i < 8; i++)
+               *ata_regs++ = ahwif->regbase + (i << IDE_REG_SHIFT);
 
        /* set the Alternative Status register */
-       *ata_regs = ahwif->regbase + (14 << AU1XXX_ATA_REG_OFFSET);
+       *ata_regs = ahwif->regbase + (14 << IDE_REG_SHIFT);
 }
 
+static const struct ide_port_ops au1xxx_port_ops = {
+       .set_pio_mode           = au1xxx_set_pio_mode,
+       .set_dma_mode           = auide_set_dma_mode,
+};
+
 static const struct ide_port_info au1xxx_port_info = {
+       .init_dma               = auide_ddma_init,
+       .port_ops               = &au1xxx_port_ops,
+#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+       .dma_ops                = &au1xxx_dma_ops,
+#endif
        .host_flags             = IDE_HFLAG_POST_SET_MODE |
-                                 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
                                  IDE_HFLAG_NO_IO_32BIT |
                                  IDE_HFLAG_UNMASK_IRQS,
        .pio_mask               = ATA_PIO4,
@@ -615,32 +602,13 @@ static int au_ide_probe(struct device *dev)
 
        hwif->dev = dev;
 
-       hwif->mmio  = 1;
-
        /* If the user has selected DDMA assisted copies,
           then set up a few local I/O function entry points 
        */
 
 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA     
-       hwif->INSW                      = auide_insw;
-       hwif->OUTSW                     = auide_outsw;
-#endif
-
-       hwif->set_pio_mode              = &au1xxx_set_pio_mode;
-       hwif->set_dma_mode              = &auide_set_dma_mode;
-
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
-       hwif->dma_timeout               = &auide_dma_timeout;
-
-       hwif->mdma_filter               = &auide_mdma_filter;
-
-       hwif->dma_host_set              = &auide_dma_host_set;
-       hwif->dma_exec_cmd              = &auide_dma_exec_cmd;
-       hwif->dma_start                 = &auide_dma_start;
-       hwif->ide_dma_end               = &auide_dma_end;
-       hwif->dma_setup                 = &auide_dma_setup;
-       hwif->ide_dma_test_irq          = &auide_dma_test_irq;
-       hwif->dma_lost_irq              = &auide_dma_lost_irq;
+       hwif->input_data  = au1xxx_input_data;
+       hwif->output_data = au1xxx_output_data;
 #endif
        hwif->select_data               = 0;    /* no chipset-specific code */
        hwif->config_data               = 0;    /* no chipset-specific code */
@@ -648,11 +616,6 @@ static int au_ide_probe(struct device *dev)
        auide_hwif.hwif                 = hwif;
        hwif->hwif_data                 = &auide_hwif;
 
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA           
-       auide_ddma_init(&auide_hwif);
-       dbdma_init_done = 1;
-#endif
-
        idx[0] = hwif->index;
 
        ide_device_add(idx, &au1xxx_port_info);
@@ -672,7 +635,7 @@ static int au_ide_remove(struct device *dev)
        ide_hwif_t *hwif = dev_get_drvdata(dev);
        _auide_hwif *ahwif = &auide_hwif;
 
-       ide_unregister(hwif->index);
+       ide_unregister(hwif);
 
        iounmap((void *)ahwif->regbase);
 
index bbe8d585334891c5e5b40555aed1a6353648b78c..712d17bdd4705f8ff2d07bf20371c702de79a564 100644 (file)
@@ -109,16 +109,15 @@ static int __devinit swarm_ide_probe(struct device *dev)
        base = ioremap(offset, size);
 
        /* Setup MMIO ops.  */
+       hwif->host_flags = IDE_HFLAG_MMIO;
        default_hwif_mmiops(hwif);
-       /* Prevent resource map manipulation.  */
-       hwif->mmio = 1;
+
        hwif->chipset = ide_generic;
-       hwif->noprobe = 0;
 
-       for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
-               hwif->io_ports[i] =
+       for (i = 0; i <= 7; i++)
+               hwif->io_ports_array[i] =
                                (unsigned long)(base + ((0x1f0 + i) << 5));
-       hwif->io_ports[IDE_CONTROL_OFFSET] =
+       hwif->io_ports.ctl_addr =
                                (unsigned long)(base + (0x3f6 << 5));
        hwif->irq = K_INT_GB_IDE;
 
index c9ba15afe97dcc6bfed8e0bdb6d5f1cab8802358..7f46c224b7c40f0f2091eea99a0be2f193d44921 100644 (file)
@@ -135,12 +135,12 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
 
 static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
 {
-       drive->hwif->set_dma_mode(drive, pio + XFER_PIO_0);
+       drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
 }
 
 static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name)
 {
-       int bus_speed = system_bus_clock();
+       int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
 
        if (bus_speed <= 33)
                pci_set_drvdata(dev, (void *) aec6xxx_33_base);
@@ -175,27 +175,23 @@ static u8 __devinit atp86x_cable_detect(ide_hwif_t *hwif)
        return (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 }
 
-static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
-{
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-
-       hwif->set_pio_mode = &aec_set_pio_mode;
-
-       if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF)
-               hwif->set_dma_mode = &aec6210_set_mode;
-       else {
-               hwif->set_dma_mode = &aec6260_set_mode;
+static const struct ide_port_ops atp850_port_ops = {
+       .set_pio_mode           = aec_set_pio_mode,
+       .set_dma_mode           = aec6210_set_mode,
+};
 
-               hwif->cable_detect = atp86x_cable_detect;
-       }
-}
+static const struct ide_port_ops atp86x_port_ops = {
+       .set_pio_mode           = aec_set_pio_mode,
+       .set_dma_mode           = aec6260_set_mode,
+       .cable_detect           = atp86x_cable_detect,
+};
 
 static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
        {       /* 0 */
                .name           = "AEC6210",
                .init_chipset   = init_chipset_aec62xx,
-               .init_hwif      = init_hwif_aec62xx,
                .enablebits     = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+               .port_ops       = &atp850_port_ops,
                .host_flags     = IDE_HFLAG_SERIALIZE |
                                  IDE_HFLAG_NO_ATAPI_DMA |
                                  IDE_HFLAG_NO_DSC |
@@ -207,7 +203,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
        },{     /* 1 */
                .name           = "AEC6260",
                .init_chipset   = init_chipset_aec62xx,
-               .init_hwif      = init_hwif_aec62xx,
+               .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA | IDE_HFLAG_NO_AUTODMA |
                                  IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_OFF_BOARD,
@@ -217,8 +213,8 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
        },{     /* 2 */
                .name           = "AEC6260R",
                .init_chipset   = init_chipset_aec62xx,
-               .init_hwif      = init_hwif_aec62xx,
                .enablebits     = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+               .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA |
                                  IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_NON_BOOTABLE,
@@ -228,7 +224,7 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
        },{     /* 3 */
                .name           = "AEC6280",
                .init_chipset   = init_chipset_aec62xx,
-               .init_hwif      = init_hwif_aec62xx,
+               .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA |
                                  IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_OFF_BOARD,
@@ -238,8 +234,8 @@ static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
        },{     /* 4 */
                .name           = "AEC6280R",
                .init_chipset   = init_chipset_aec62xx,
-               .init_hwif      = init_hwif_aec62xx,
                .enablebits     = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
+               .port_ops       = &atp86x_port_ops,
                .host_flags     = IDE_HFLAG_NO_ATAPI_DMA |
                                  IDE_HFLAG_ABUSE_SET_DMA_MODE |
                                  IDE_HFLAG_OFF_BOARD,
index 3fa2d9f7b1b2ee6ab610d49ae884966788745bb2..c1922f9cfe80606e82a1ca2f38ae12e42877fc18 100644 (file)
@@ -38,8 +38,6 @@
 
 #include <asm/io.h>
 
-#define DISPLAY_ALI_TIMINGS
-
 /*
  *     ALi devices are not plug in. Otherwise these static values would
  *     need to go. They ought to go away anyway
@@ -49,236 +47,6 @@ static u8 m5229_revision;
 static u8 chip_is_1543c_e;
 static struct pci_dev *isa_dev;
 
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
-#include <linux/stat.h>
-#include <linux/proc_fs.h>
-
-static u8 ali_proc = 0;
-
-static struct pci_dev *bmide_dev;
-
-static char *fifo[4] = {
-       "FIFO Off",
-       "FIFO On ",
-       "DMA mode",
-       "PIO mode" };
-
-static char *udmaT[8] = {
-       "1.5T",
-       "  2T",
-       "2.5T",
-       "  3T",
-       "3.5T",
-       "  4T",
-       "  6T",
-       "  8T"
-};
-
-static char *channel_status[8] = {
-       "OK            ",
-       "busy          ",
-       "DRQ           ",
-       "DRQ busy      ",
-       "error         ",
-       "error busy    ",
-       "error DRQ     ",
-       "error DRQ busy"
-};
-
-/**
- *     ali_get_info            -       generate proc file for ALi IDE
- *     @buffer: buffer to fill
- *     @addr: address of user start in buffer
- *     @offset: offset into 'file'
- *     @count: buffer count
- *
- *     Walks the Ali devices and outputs summary data on the tuning and
- *     anything else that will help with debugging
- */
-static int ali_get_info (char *buffer, char **addr, off_t offset, int count)
-{
-       unsigned long bibma;
-       u8 reg53h, reg5xh, reg5yh, reg5xh1, reg5yh1, c0, c1, rev, tmp;
-       char *q, *p = buffer;
-
-       /* fetch rev. */
-       pci_read_config_byte(bmide_dev, 0x08, &rev);
-       if (rev >= 0xc1)        /* M1543C or newer */
-               udmaT[7] = " ???";
-       else
-               fifo[3]  = "   ???  ";
-
-       /* first fetch bibma: */
-       
-       bibma = pci_resource_start(bmide_dev, 4);
-
-       /*
-        * at that point bibma+0x2 et bibma+0xa are byte
-        * registers to investigate:
-        */
-       c0 = inb(bibma + 0x02);
-       c1 = inb(bibma + 0x0a);
-
-       p += sprintf(p,
-               "\n                                Ali M15x3 Chipset.\n");
-       p += sprintf(p,
-               "                                ------------------\n");
-       pci_read_config_byte(bmide_dev, 0x78, &reg53h);
-       p += sprintf(p, "PCI Clock: %d.\n", reg53h);
-
-       pci_read_config_byte(bmide_dev, 0x53, &reg53h);
-       p += sprintf(p,
-               "CD_ROM FIFO:%s, CD_ROM DMA:%s\n",
-               (reg53h & 0x02) ? "Yes" : "No ",
-               (reg53h & 0x01) ? "Yes" : "No " );
-       pci_read_config_byte(bmide_dev, 0x74, &reg53h);
-       p += sprintf(p,
-               "FIFO Status: contains %d Words, runs%s%s\n\n",
-               (reg53h & 0x3f),
-               (reg53h & 0x40) ? " OVERWR" : "",
-               (reg53h & 0x80) ? " OVERRD." : "." );
-
-       p += sprintf(p,
-               "-------------------primary channel"
-               "-------------------secondary channel"
-               "---------\n\n");
-
-       pci_read_config_byte(bmide_dev, 0x09, &reg53h);
-       p += sprintf(p,
-               "channel status:       %s"
-               "                               %s\n",
-               (reg53h & 0x20) ? "On " : "Off",
-               (reg53h & 0x10) ? "On " : "Off" );
-
-       p += sprintf(p,
-               "both channels togth:  %s"
-               "                               %s\n",
-               (c0&0x80) ? "No " : "Yes",
-               (c1&0x80) ? "No " : "Yes" );
-
-       pci_read_config_byte(bmide_dev, 0x76, &reg53h);
-       p += sprintf(p,
-               "Channel state:        %s                    %s\n",
-               channel_status[reg53h & 0x07],
-               channel_status[(reg53h & 0x70) >> 4] );
-
-       pci_read_config_byte(bmide_dev, 0x58, &reg5xh);
-       pci_read_config_byte(bmide_dev, 0x5c, &reg5yh);
-       p += sprintf(p,
-               "Add. Setup Timing:    %dT"
-               "                                %dT\n",
-               (reg5xh & 0x07) ? (reg5xh & 0x07) : 8,
-               (reg5yh & 0x07) ? (reg5yh & 0x07) : 8 );
-
-       pci_read_config_byte(bmide_dev, 0x59, &reg5xh);
-       pci_read_config_byte(bmide_dev, 0x5d, &reg5yh);
-       p += sprintf(p,
-               "Command Act. Count:   %dT"
-               "                                %dT\n"
-               "Command Rec. Count:   %dT"
-               "                               %dT\n\n",
-               (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
-               (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8, 
-               (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
-               (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16 );
-
-       p += sprintf(p,
-               "----------------drive0-----------drive1"
-               "------------drive0-----------drive1------\n\n");
-       p += sprintf(p,
-               "DMA enabled:      %s              %s"
-               "               %s              %s\n",
-               (c0&0x20) ? "Yes" : "No ",
-               (c0&0x40) ? "Yes" : "No ",
-               (c1&0x20) ? "Yes" : "No ",
-               (c1&0x40) ? "Yes" : "No " );
-
-       pci_read_config_byte(bmide_dev, 0x54, &reg5xh);
-       pci_read_config_byte(bmide_dev, 0x55, &reg5yh);
-       q = "FIFO threshold:   %2d Words         %2d Words"
-               "          %2d Words         %2d Words\n";
-       if (rev < 0xc1) {
-               if ((rev == 0x20) &&
-                   (pci_read_config_byte(bmide_dev, 0x4f, &tmp), (tmp &= 0x20))) {
-                       p += sprintf(p, q, 8, 8, 8, 8);
-               } else {
-                       p += sprintf(p, q,
-                               (reg5xh & 0x03) + 12,
-                               ((reg5xh & 0x30)>>4) + 12,
-                               (reg5yh & 0x03) + 12,
-                               ((reg5yh & 0x30)>>4) + 12 );
-               }
-       } else {
-               int t1 = (tmp = (reg5xh & 0x03)) ? (tmp << 3) : 4;
-               int t2 = (tmp = ((reg5xh & 0x30)>>4)) ? (tmp << 3) : 4;
-               int t3 = (tmp = (reg5yh & 0x03)) ? (tmp << 3) : 4;
-               int t4 = (tmp = ((reg5yh & 0x30)>>4)) ? (tmp << 3) : 4;
-               p += sprintf(p, q, t1, t2, t3, t4);
-       }
-
-#if 0
-       p += sprintf(p, 
-               "FIFO threshold:   %2d Words         %2d Words"
-               "          %2d Words         %2d Words\n",
-               (reg5xh & 0x03) + 12,
-               ((reg5xh & 0x30)>>4) + 12,
-               (reg5yh & 0x03) + 12,
-               ((reg5yh & 0x30)>>4) + 12 );
-#endif
-
-       p += sprintf(p,
-               "FIFO mode:        %s         %s          %s         %s\n",
-               fifo[((reg5xh & 0x0c) >> 2)],
-               fifo[((reg5xh & 0xc0) >> 6)],
-               fifo[((reg5yh & 0x0c) >> 2)],
-               fifo[((reg5yh & 0xc0) >> 6)] );
-
-       pci_read_config_byte(bmide_dev, 0x5a, &reg5xh);
-       pci_read_config_byte(bmide_dev, 0x5b, &reg5xh1);
-       pci_read_config_byte(bmide_dev, 0x5e, &reg5yh);
-       pci_read_config_byte(bmide_dev, 0x5f, &reg5yh1);
-
-       p += sprintf(p,/*
-               "------------------drive0-----------drive1"
-               "------------drive0-----------drive1------\n")*/
-               "Dt RW act. Cnt    %2dT              %2dT"
-               "               %2dT              %2dT\n"
-               "Dt RW rec. Cnt    %2dT              %2dT"
-               "               %2dT              %2dT\n\n",
-               (reg5xh & 0x70) ? ((reg5xh & 0x70) >> 4) : 8,
-               (reg5xh1 & 0x70) ? ((reg5xh1 & 0x70) >> 4) : 8,
-               (reg5yh & 0x70) ? ((reg5yh & 0x70) >> 4) : 8,
-               (reg5yh1 & 0x70) ? ((reg5yh1 & 0x70) >> 4) : 8,
-               (reg5xh & 0x0f) ? (reg5xh & 0x0f) : 16,
-               (reg5xh1 & 0x0f) ? (reg5xh1 & 0x0f) : 16,
-               (reg5yh & 0x0f) ? (reg5yh & 0x0f) : 16,
-               (reg5yh1 & 0x0f) ? (reg5yh1 & 0x0f) : 16 );
-
-       p += sprintf(p,
-               "-----------------------------------UDMA Timings"
-               "--------------------------------\n\n");
-
-       pci_read_config_byte(bmide_dev, 0x56, &reg5xh);
-       pci_read_config_byte(bmide_dev, 0x57, &reg5yh);
-       p += sprintf(p,
-               "UDMA:             %s               %s"
-               "                %s               %s\n"
-               "UDMA timings:     %s             %s"
-               "              %s             %s\n\n",
-               (reg5xh & 0x08) ? "OK" : "No",
-               (reg5xh & 0x80) ? "OK" : "No",
-               (reg5yh & 0x08) ? "OK" : "No",
-               (reg5yh & 0x80) ? "OK" : "No",
-               udmaT[(reg5xh & 0x07)],
-               udmaT[(reg5xh & 0x70) >> 4],
-               udmaT[reg5yh & 0x07],
-               udmaT[(reg5yh & 0x70) >> 4] );
-
-       return p-buffer; /* => must be less than 4k! */
-}
-#endif  /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
-
 /**
  *     ali_set_pio_mode        -       set host controller for PIO mode
  *     @drive: drive
@@ -294,7 +62,7 @@ static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
        int s_time, a_time, c_time;
        u8 s_clc, a_clc, r_clc;
        unsigned long flags;
-       int bus_speed = system_bus_clock();
+       int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
        int port = hwif->channel ? 0x5c : 0x58;
        int portFIFO = hwif->channel ? 0x55 : 0x54;
        u8 cd_dma_fifo = 0;
@@ -465,14 +233,6 @@ static unsigned int __devinit init_chipset_ali15x3 (struct pci_dev *dev, const c
 
        isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
 
-#if defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
-       if (!ali_proc) {
-               ali_proc = 1;
-               bmide_dev = dev;
-               ide_pci_create_host_proc("ali", ali_get_info);
-       }
-#endif  /* defined(DISPLAY_ALI_TIMINGS) && defined(CONFIG_IDE_PROC_FS) */
-
        local_irq_save(flags);
 
        if (m5229_revision < 0xC2) {
@@ -610,7 +370,7 @@ static int ali_cable_override(struct pci_dev *pdev)
 }
 
 /**
- *     ata66_ali15x3   -       check for UDMA 66 support
+ *     ali_cable_detect        -       cable detection
  *     @hwif: IDE interface
  *
  *     This checks if the controller and the cable are capable
@@ -620,7 +380,7 @@ static int ali_cable_override(struct pci_dev *pdev)
  *     FIXME: frobs bits that are not defined on newer ALi devicea
  */
 
-static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
+static u8 __devinit ali_cable_detect(ide_hwif_t *hwif)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
        unsigned long flags;
@@ -652,34 +412,14 @@ static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
        return cbl;
 }
 
-/**
- *     init_hwif_common_ali15x3        -       Set up ALI IDE hardware
- *     @hwif: IDE interface
- *
- *     Initialize the IDE structure side of the ALi 15x3 driver.
- */
-static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &ali_set_pio_mode;
-       hwif->set_dma_mode = &ali_set_dma_mode;
-       hwif->udma_filter = &ali_udma_filter;
-
-       hwif->cable_detect = ata66_ali15x3;
-
-       if (hwif->dma_base == 0)
-               return;
-
-       hwif->dma_setup = &ali15x3_dma_setup;
-}
-
+#if !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC)
 /**
  *     init_hwif_ali15x3       -       Initialize the ALI IDE x86 stuff
  *     @hwif: interface to configure
  *
  *     Obtain the IRQ tables for an ALi based IDE solution on the PC
  *     class platforms. This part of the code isn't applicable to the
- *     Sparc systems
+ *     Sparc and PowerPC systems.
  */
 
 static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
@@ -722,34 +462,66 @@ static void __devinit init_hwif_ali15x3 (ide_hwif_t *hwif)
                if(irq >= 0)
                        hwif->irq = irq;
        }
-
-       init_hwif_common_ali15x3(hwif);
 }
+#else
+#define init_hwif_ali15x3 NULL
+#endif /* !defined(CONFIG_SPARC64) && !defined(CONFIG_PPC) */
 
 /**
  *     init_dma_ali15x3        -       set up DMA on ALi15x3
  *     @hwif: IDE interface
- *     @dmabase: DMA interface base PCI address
+ *     @d: IDE port info
  *
- *     Set up the DMA functionality on the ALi 15x3. For the ALi
- *     controllers this is generic so we can let the generic code do
- *     the actual work.
+ *     Set up the DMA functionality on the ALi 15x3.
  */
 
-static void __devinit init_dma_ali15x3 (ide_hwif_t *hwif, unsigned long dmabase)
+static int __devinit init_dma_ali15x3(ide_hwif_t *hwif,
+                                     const struct ide_port_info *d)
 {
-       if (m5229_revision < 0x20)
-               return;
+       struct pci_dev *dev = to_pci_dev(hwif->dev);
+       unsigned long base = ide_pci_dma_base(hwif, d);
+
+       if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+               return -1;
+
        if (!hwif->channel)
-               outb(inb(dmabase + 2) & 0x60, dmabase + 2);
-       ide_setup_dma(hwif, dmabase);
+               outb(inb(base + 2) & 0x60, base + 2);
+
+       printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
+                        hwif->name, base, base + 7);
+
+       if (ide_allocate_dma_engine(hwif))
+               return -1;
+
+       ide_setup_dma(hwif, base);
+
+       return 0;
 }
 
+static const struct ide_port_ops ali_port_ops = {
+       .set_pio_mode           = ali_set_pio_mode,
+       .set_dma_mode           = ali_set_dma_mode,
+       .udma_filter            = ali_udma_filter,
+       .cable_detect           = ali_cable_detect,
+};
+
+static const struct ide_dma_ops ali_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ali15x3_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = __ide_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
+
 static const struct ide_port_info ali15x3_chipset __devinitdata = {
        .name           = "ALI15X3",
        .init_chipset   = init_chipset_ali15x3,
        .init_hwif      = init_hwif_ali15x3,
        .init_dma       = init_dma_ali15x3,
+       .port_ops       = &ali_port_ops,
        .pio_mask       = ATA_PIO5,
        .swdma_mask     = ATA_SWDMA2,
        .mwdma_mask     = ATA_MWDMA2,
@@ -792,14 +564,17 @@ static int __devinit alim15x3_init_one(struct pci_dev *dev, const struct pci_dev
                        d.udma_mask = ATA_UDMA5;
                else
                        d.udma_mask = ATA_UDMA6;
+
+               d.dma_ops = &ali_dma_ops;
+       } else {
+               d.host_flags |= IDE_HFLAG_NO_DMA;
+
+               d.mwdma_mask = d.swdma_mask = 0;
        }
 
        if (idx == 0)
                d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX;
 
-#if defined(CONFIG_SPARC64)
-       d.init_hwif = init_hwif_common_ali15x3;
-#endif /* CONFIG_SPARC64 */
        return ide_setup_pci_device(dev, &d);
 }
 
index ff684d3123784853e16332226c5ea1e9975d44a9..efcf54338be7b9c63543f496f3548d71d8825be2 100644 (file)
@@ -179,7 +179,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev,
  * Determine the system bus clock.
  */
 
-       amd_clock = system_bus_clock() * 1000;
+       amd_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
 
        switch (amd_clock) {
                case 33000: amd_clock = 33333; break;
@@ -210,13 +210,14 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
 
        if (hwif->irq == 0) /* 0 is bogus but will do for now */
                hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel);
-
-       hwif->set_pio_mode = &amd_set_pio_mode;
-       hwif->set_dma_mode = &amd_set_drive;
-
-       hwif->cable_detect = amd_cable_detect;
 }
 
+static const struct ide_port_ops amd_port_ops = {
+       .set_pio_mode           = amd_set_pio_mode,
+       .set_dma_mode           = amd_set_drive,
+       .cable_detect           = amd_cable_detect,
+};
+
 #define IDE_HFLAGS_AMD \
        (IDE_HFLAG_PIO_NO_BLACKLIST | \
         IDE_HFLAG_ABUSE_SET_DMA_MODE | \
@@ -230,6 +231,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
                .init_chipset   = init_chipset_amd74xx,                 \
                .init_hwif      = init_hwif_amd74xx,                    \
                .enablebits     = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \
+               .port_ops       = &amd_port_ops,                        \
                .host_flags     = IDE_HFLAGS_AMD,                       \
                .pio_mask       = ATA_PIO5,                             \
                .swdma_mask     = swdma,                                \
@@ -243,6 +245,7 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
                .init_chipset   = init_chipset_amd74xx,                 \
                .init_hwif      = init_hwif_amd74xx,                    \
                .enablebits     = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \
+               .port_ops       = &amd_port_ops,                        \
                .host_flags     = IDE_HFLAGS_AMD,                       \
                .pio_mask       = ATA_PIO5,                             \
                .swdma_mask     = ATA_SWDMA2,                           \
index 91722f88b7bd8b6cc6295c838e3b8d46af8b6db1..8b637181681ad2d6d4cfcbf30c39d6e6044b9bc6 100644 (file)
@@ -130,35 +130,25 @@ static u8 __devinit atiixp_cable_detect(ide_hwif_t *hwif)
                return ATA_CBL_PATA40;
 }
 
-/**
- *     init_hwif_atiixp                -       fill in the hwif for the ATIIXP
- *     @hwif: IDE interface
- *
- *     Set up the ide_hwif_t for the ATIIXP interface according to the
- *     capabilities of the hardware.
- */
-
-static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &atiixp_set_pio_mode;
-       hwif->set_dma_mode = &atiixp_set_dma_mode;
-
-       hwif->cable_detect = atiixp_cable_detect;
-}
+static const struct ide_port_ops atiixp_port_ops = {
+       .set_pio_mode           = atiixp_set_pio_mode,
+       .set_dma_mode           = atiixp_set_dma_mode,
+       .cable_detect           = atiixp_cable_detect,
+};
 
 static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
        {       /* 0 */
                .name           = "ATIIXP",
-               .init_hwif      = init_hwif_atiixp,
                .enablebits     = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
+               .port_ops       = &atiixp_port_ops,
                .host_flags     = IDE_HFLAG_LEGACY_IRQS,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
                .udma_mask      = ATA_UDMA5,
        },{     /* 1 */
                .name           = "SB600_PATA",
-               .init_hwif      = init_hwif_atiixp,
                .enablebits     = {{0x48,0x01,0x00}, {0x00,0x00,0x00}},
+               .port_ops       = &atiixp_port_ops,
                .host_flags     = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
index b076dbfc43a7b1639fbff24468b8972db74a2754..aaf38109eaecef95de0bdfd363bc92f514b62b9a 100644 (file)
 
 #include <asm/io.h>
 
-/*
- * This flag is set in ide.c by the parameter:  ide0=cmd640_vlb
- */
-int cmd640_vlb;
+#define DRV_NAME "cmd640"
+
+static int cmd640_vlb;
 
 /*
  * CMD640 specific registers definition.
@@ -348,12 +347,12 @@ static int __init secondary_port_responding(void)
 
        spin_lock_irqsave(&cmd640_lock, flags);
 
-       outb_p(0x0a, 0x170 + IDE_SELECT_OFFSET);        /* select drive0 */
+       outb_p(0x0a, 0x176);    /* select drive0 */
        udelay(100);
-       if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x0a) {
-               outb_p(0x1a, 0x170 + IDE_SELECT_OFFSET); /* select drive1 */
+       if ((inb_p(0x176) & 0x1f) != 0x0a) {
+               outb_p(0x1a, 0x176); /* select drive1 */
                udelay(100);
-               if ((inb_p(0x170 + IDE_SELECT_OFFSET) & 0x1f) != 0x1a) {
+               if ((inb_p(0x176) & 0x1f) != 0x1a) {
                        spin_unlock_irqrestore(&cmd640_lock, flags);
                        return 0; /* nothing responded */
                }
@@ -381,6 +380,7 @@ static void cmd640_dump_regs(void)
 }
 #endif
 
+#ifndef CONFIG_BLK_DEV_CMD640_ENHANCED
 /*
  * Check whether prefetch is on for a drive,
  * and initialize the unmask flags for safe operation.
@@ -401,9 +401,7 @@ static void __init check_prefetch(ide_drive_t *drive, unsigned int index)
                drive->no_io_32bit = 0;
        }
 }
-
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-
+#else
 /*
  * Sets prefetch mode for a drive.
  */
@@ -459,34 +457,6 @@ static inline u8 pack_nibbles(u8 upper, u8 lower)
        return ((upper & 0x0f) << 4) | (lower & 0x0f);
 }
 
-/*
- * This routine retrieves the initial drive timings from the chipset.
- */
-static void __init retrieve_drive_counts(unsigned int index)
-{
-       u8 b;
-
-       /*
-        * Get the internal setup timing, and convert to clock count
-        */
-       b = get_cmd640_reg(arttim_regs[index]) & ~0x3f;
-       switch (b) {
-       case 0x00: b = 4; break;
-       case 0x80: b = 3; break;
-       case 0x40: b = 2; break;
-       default:   b = 5; break;
-       }
-       setup_counts[index] = b;
-
-       /*
-        * Get the active/recovery counts
-        */
-       b = get_cmd640_reg(drwtim_regs[index]);
-       active_counts[index]   = (b >> 4)   ? (b >> 4)   : 0x10;
-       recovery_counts[index] = (b & 0x0f) ? (b & 0x0f) : 0x10;
-}
-
-
 /*
  * This routine writes the prepared setup/active/recovery counts
  * for a drive into the cmd640 chipset registers to active them.
@@ -553,7 +523,14 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
 {
        int setup_time, active_time, recovery_time, clock_time;
        u8 setup_count, active_count, recovery_count, recovery_count2, cycle_count;
-       int bus_speed = system_bus_clock();
+       int bus_speed;
+
+       if (cmd640_vlb && ide_vlb_clk)
+               bus_speed = ide_vlb_clk;
+       else if (!cmd640_vlb && ide_pci_clk)
+               bus_speed = ide_pci_clk;
+       else
+               bus_speed = system_bus_clock();
 
        if (pio_mode > 5)
                pio_mode = 5;
@@ -633,6 +610,9 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
        display_clocks(index);
 }
 
+static const struct ide_port_ops cmd640_port_ops = {
+       .set_pio_mode           = cmd640_set_pio_mode,
+};
 #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
 
 static int pci_conf1(void)
@@ -674,14 +654,32 @@ static const struct ide_port_info cmd640_port_info __initdata = {
        .chipset                = ide_cmd640,
        .host_flags             = IDE_HFLAG_SERIALIZE |
                                  IDE_HFLAG_NO_DMA |
-                                 IDE_HFLAG_NO_AUTOTUNE |
                                  IDE_HFLAG_ABUSE_PREFETCH |
                                  IDE_HFLAG_ABUSE_FAST_DEVSEL,
 #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
+       .port_ops               = &cmd640_port_ops,
        .pio_mask               = ATA_PIO5,
 #endif
 };
 
+static int cmd640x_init_one(unsigned long base, unsigned long ctl)
+{
+       if (!request_region(base, 8, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
+                               DRV_NAME, base, base + 7);
+               return -EBUSY;
+       }
+
+       if (!request_region(ctl, 1, DRV_NAME)) {
+               printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
+                               DRV_NAME, ctl);
+               release_region(base, 8);
+               return -EBUSY;
+       }
+
+       return 0;
+}
+
 /*
  * Probe for a cmd640 chipset, and initialize it if found.
  */
@@ -690,7 +688,7 @@ static int __init cmd640x_init(void)
 #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
        int second_port_toggled = 0;
 #endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-       int second_port_cmd640 = 0;
+       int second_port_cmd640 = 0, rc;
        const char *bus_type, *port2;
        unsigned int index;
        u8 b, cfr;
@@ -734,6 +732,17 @@ static int __init cmd640x_init(void)
                return 0;
        }
 
+       rc = cmd640x_init_one(0x1f0, 0x3f6);
+       if (rc)
+               return rc;
+
+       rc = cmd640x_init_one(0x170, 0x376);
+       if (rc) {
+               release_region(0x3f6, 1);
+               release_region(0x1f0, 8);
+               return rc;
+       }
+
        memset(&hw, 0, sizeof(hw));
 
        ide_std_init_ports(&hw[0], 0x1f0, 0x3f6);
@@ -752,10 +761,6 @@ static int __init cmd640x_init(void)
         */
        if (cmd_hwif0) {
                ide_init_port_hw(cmd_hwif0, &hw[0]);
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-               cmd_hwif0->set_pio_mode = &cmd640_set_pio_mode;
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
                idx[0] = cmd_hwif0->index;
        }
 
@@ -808,10 +813,6 @@ static int __init cmd640x_init(void)
         */
        if (second_port_cmd640 && cmd_hwif1) {
                ide_init_port_hw(cmd_hwif1, &hw[1]);
-#ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-               cmd_hwif1->set_pio_mode = &cmd640_set_pio_mode;
-#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-
                idx[1] = cmd_hwif1->index;
        }
        printk(KERN_INFO "cmd640: %sserialized, secondary interface %s\n",
@@ -835,29 +836,16 @@ static int __init cmd640x_init(void)
                }
 
 #ifdef CONFIG_BLK_DEV_CMD640_ENHANCED
-               if (drive->autotune || ((index > 1) && second_port_toggled)) {
-                       /*
-                        * Reset timing to the slowest speed and turn off
-                        * prefetch.  This way, the drive identify code has
-                        * a better chance.
-                        */
-                       setup_counts    [index] = 4;    /* max possible */
-                       active_counts   [index] = 16;   /* max possible */
-                       recovery_counts [index] = 16;   /* max possible */
-                       program_drive_counts(drive, index);
-                       set_prefetch_mode(drive, index, 0);
-                       printk("cmd640: drive%d timings/prefetch cleared\n", index);
-               } else {
-                       /*
-                        * Record timings/prefetch without changing them.
-                        * This preserves any prior BIOS setup.
-                        */
-                       retrieve_drive_counts (index);
-                       check_prefetch(drive, index);
-                       printk("cmd640: drive%d timings/prefetch(%s) preserved",
-                               index, drive->no_io_32bit ? "off" : "on");
-                       display_clocks(index);
-               }
+               /*
+                * Reset timing to the slowest speed and turn off prefetch.
+                * This way, the drive identify code has a better chance.
+                */
+               setup_counts    [index] = 4;    /* max possible */
+               active_counts   [index] = 16;   /* max possible */
+               recovery_counts [index] = 16;   /* max possible */
+               program_drive_counts(drive, index);
+               set_prefetch_mode(drive, index, 0);
+               printk("cmd640: drive%d timings/prefetch cleared\n", index);
 #else
                /*
                 * Set the drive unmask flags to match the prefetch setting
index 8baccfef237fab086f95486eb0f226a2d99bff3b..08674711d0895c6fae3476cba5d02a2d1f351541 100644 (file)
@@ -68,8 +68,8 @@ static u8 quantize_timing(int timing, int quant)
  */
 static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
 {
-       struct pci_dev *dev     = to_pci_dev(drive->hwif->dev);
-       int clock_time          = 1000 / system_bus_clock();
+       struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+       int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock());
        u8  cycle_count, active_count, recovery_count, drwtim;
        static const u8 recovery_values[] =
                {15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
@@ -128,7 +128,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
                            ide_pio_timings[pio].active_time);
 
        setup_count = quantize_timing(ide_pio_timings[pio].setup_time,
-                                     1000 / system_bus_clock());
+                       1000 / (ide_pci_clk ? ide_pci_clk : system_bus_clock()));
 
        /*
         * The primary channel has individual address setup timing registers
@@ -223,7 +223,7 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
                (void) pci_write_config_byte(dev, pciU, regU);
 }
 
-static int cmd648_ide_dma_end (ide_drive_t *drive)
+static int cmd648_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        unsigned long base      = hwif->dma_base - (hwif->channel * 8);
@@ -239,7 +239,7 @@ static int cmd648_ide_dma_end (ide_drive_t *drive)
        return err;
 }
 
-static int cmd64x_ide_dma_end (ide_drive_t *drive)
+static int cmd64x_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
@@ -256,7 +256,7 @@ static int cmd64x_ide_dma_end (ide_drive_t *drive)
        return err;
 }
 
-static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
+static int cmd648_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        unsigned long base      = hwif->dma_base - (hwif->channel * 8);
@@ -279,7 +279,7 @@ static int cmd648_ide_dma_test_irq (ide_drive_t *drive)
        return 0;
 }
 
-static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
+static int cmd64x_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
@@ -310,7 +310,7 @@ static int cmd64x_ide_dma_test_irq (ide_drive_t *drive)
  * event order for DMA transfers.
  */
 
-static int cmd646_1_ide_dma_end (ide_drive_t *drive)
+static int cmd646_1_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
        u8 dma_stat = 0, dma_cmd = 0;
@@ -370,7 +370,7 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
        return 0;
 }
 
-static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
+static u8 __devinit cmd64x_cable_detect(ide_hwif_t *hwif)
 {
        struct pci_dev  *dev    = to_pci_dev(hwif->dev);
        u8 bmidecsr = 0, mask   = hwif->channel ? 0x02 : 0x01;
@@ -385,60 +385,52 @@ static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
        }
 }
 
-static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
-{
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-
-       hwif->set_pio_mode = &cmd64x_set_pio_mode;
-       hwif->set_dma_mode = &cmd64x_set_dma_mode;
-
-       hwif->cable_detect = ata66_cmd64x;
+static const struct ide_port_ops cmd64x_port_ops = {
+       .set_pio_mode           = cmd64x_set_pio_mode,
+       .set_dma_mode           = cmd64x_set_dma_mode,
+       .cable_detect           = cmd64x_cable_detect,
+};
 
-       if (!hwif->dma_base)
-               return;
+static const struct ide_dma_ops cmd64x_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = cmd64x_dma_end,
+       .dma_test_irq           = cmd64x_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
 
-       /*
-        * UltraDMA only supported on PCI646U and PCI646U2, which
-        * correspond to revisions 0x03, 0x05 and 0x07 respectively.
-        * Actually, although the CMD tech support people won't
-        * tell me the details, the 0x03 revision cannot support
-        * UDMA correctly without hardware modifications, and even
-        * then it only works with Quantum disks due to some
-        * hold time assumptions in the 646U part which are fixed
-        * in the 646U2.
-        *
-        * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
-        */
-       if (dev->device == PCI_DEVICE_ID_CMD_646 && dev->revision < 5)
-               hwif->ultra_mask = 0x00;
+static const struct ide_dma_ops cmd646_rev1_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = cmd646_1_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
 
-       switch (dev->device) {
-       case PCI_DEVICE_ID_CMD_648:
-       case PCI_DEVICE_ID_CMD_649:
-       alt_irq_bits:
-               hwif->ide_dma_end       = &cmd648_ide_dma_end;
-               hwif->ide_dma_test_irq  = &cmd648_ide_dma_test_irq;
-               break;
-       case PCI_DEVICE_ID_CMD_646:
-               if (dev->revision == 0x01) {
-                       hwif->ide_dma_end = &cmd646_1_ide_dma_end;
-                       break;
-               } else if (dev->revision >= 0x03)
-                       goto alt_irq_bits;
-               /* fall thru */
-       default:
-               hwif->ide_dma_end       = &cmd64x_ide_dma_end;
-               hwif->ide_dma_test_irq  = &cmd64x_ide_dma_test_irq;
-               break;
-       }
-}
+static const struct ide_dma_ops cmd648_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = cmd648_dma_end,
+       .dma_test_irq           = cmd648_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
 
 static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
        {       /* 0 */
                .name           = "CMD643",
                .init_chipset   = init_chipset_cmd64x,
-               .init_hwif      = init_hwif_cmd64x,
                .enablebits     = {{0x00,0x00,0x00}, {0x51,0x08,0x08}},
+               .port_ops       = &cmd64x_port_ops,
+               .dma_ops        = &cmd64x_dma_ops,
                .host_flags     = IDE_HFLAG_CLEAR_SIMPLEX |
                                  IDE_HFLAG_ABUSE_PREFETCH,
                .pio_mask       = ATA_PIO5,
@@ -447,9 +439,10 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
        },{     /* 1 */
                .name           = "CMD646",
                .init_chipset   = init_chipset_cmd64x,
-               .init_hwif      = init_hwif_cmd64x,
                .enablebits     = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
                .chipset        = ide_cmd646,
+               .port_ops       = &cmd64x_port_ops,
+               .dma_ops        = &cmd648_dma_ops,
                .host_flags     = IDE_HFLAG_ABUSE_PREFETCH,
                .pio_mask       = ATA_PIO5,
                .mwdma_mask     = ATA_MWDMA2,
@@ -457,8 +450,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
        },{     /* 2 */
                .name           = "CMD648",
                .init_chipset   = init_chipset_cmd64x,
-               .init_hwif      = init_hwif_cmd64x,
                .enablebits     = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
+               .port_ops       = &cmd64x_port_ops,
+               .dma_ops        = &cmd648_dma_ops,
                .host_flags     = IDE_HFLAG_ABUSE_PREFETCH,
                .pio_mask       = ATA_PIO5,
                .mwdma_mask     = ATA_MWDMA2,
@@ -466,8 +460,9 @@ static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
        },{     /* 3 */
                .name           = "CMD649",
                .init_chipset   = init_chipset_cmd64x,
-               .init_hwif      = init_hwif_cmd64x,
                .enablebits     = {{0x51,0x04,0x04}, {0x51,0x08,0x08}},
+               .port_ops       = &cmd64x_port_ops,
+               .dma_ops        = &cmd648_dma_ops,
                .host_flags     = IDE_HFLAG_ABUSE_PREFETCH,
                .pio_mask       = ATA_PIO5,
                .mwdma_mask     = ATA_MWDMA2,
@@ -482,12 +477,35 @@ static int __devinit cmd64x_init_one(struct pci_dev *dev, const struct pci_devic
 
        d = cmd64x_chipsets[idx];
 
-       /*
-        * The original PCI0646 didn't have the primary channel enable bit,
-        * it appeared starting with PCI0646U (i.e. revision ID 3).
-        */
-       if (idx == 1 && dev->revision < 3)
-               d.enablebits[0].reg = 0;
+       if (idx == 1) {
+               /*
+                * UltraDMA only supported on PCI646U and PCI646U2, which
+                * correspond to revisions 0x03, 0x05 and 0x07 respectively.
+                * Actually, although the CMD tech support people won't
+                * tell me the details, the 0x03 revision cannot support
+                * UDMA correctly without hardware modifications, and even
+                * then it only works with Quantum disks due to some
+                * hold time assumptions in the 646U part which are fixed
+                * in the 646U2.
+                *
+                * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
+                */
+               if (dev->revision < 5) {
+                       d.udma_mask = 0x00;
+                       /*
+                        * The original PCI0646 didn't have the primary
+                        * channel enable bit, it appeared starting with
+                        * PCI0646U (i.e. revision ID 3).
+                        */
+                       if (dev->revision < 3) {
+                               d.enablebits[0].reg = 0;
+                               if (dev->revision == 1)
+                                       d.dma_ops = &cmd646_rev1_dma_ops;
+                               else
+                                       d.dma_ops = &cmd64x_dma_ops;
+                       }
+               }
+       }
 
        return ide_setup_pci_device(dev, &d);
 }
index 01b37ecb5a5a70f597e0846027c4e37897eaf3ac..17669a434438bccbd596e23b4fdc51fdfb2f679e 100644 (file)
@@ -103,21 +103,27 @@ static void cs5520_dma_host_set(ide_drive_t *drive, int on)
        ide_dma_host_set(drive, on);
 }
 
-static void __devinit init_hwif_cs5520(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &cs5520_set_pio_mode;
-       hwif->set_dma_mode = &cs5520_set_dma_mode;
-
-       if (hwif->dma_base == 0)
-               return;
+static const struct ide_port_ops cs5520_port_ops = {
+       .set_pio_mode           = cs5520_set_pio_mode,
+       .set_dma_mode           = cs5520_set_dma_mode,
+};
 
-       hwif->dma_host_set = &cs5520_dma_host_set;
-}
+static const struct ide_dma_ops cs5520_dma_ops = {
+       .dma_host_set           = cs5520_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = __ide_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
 
 #define DECLARE_CS_DEV(name_str)                               \
        {                                                       \
                .name           = name_str,                     \
-               .init_hwif      = init_hwif_cs5520,             \
+               .port_ops       = &cs5520_port_ops,             \
+               .dma_ops        = &cs5520_dma_ops,              \
                .host_flags     = IDE_HFLAG_ISA_PORTS |         \
                                  IDE_HFLAG_CS5520 |            \
                                  IDE_HFLAG_VDMA |              \
index 56a369c2a78afdad653d95039a1264652e9168f5..f5534c1ff349841b10ace33856a5b587b2e19860 100644 (file)
@@ -228,26 +228,25 @@ static void __devinit init_hwif_cs5530 (ide_hwif_t *hwif)
        unsigned long basereg;
        u32 d0_timings;
 
-       hwif->set_pio_mode = &cs5530_set_pio_mode;
-       hwif->set_dma_mode = &cs5530_set_dma_mode;
-
        basereg = CS5530_BASEREG(hwif);
        d0_timings = inl(basereg + 0);
        if (CS5530_BAD_PIO(d0_timings))
                outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 0);
        if (CS5530_BAD_PIO(inl(basereg + 8)))
                outl(cs5530_pio_timings[(d0_timings >> 31) & 1][0], basereg + 8);
-
-       if (hwif->dma_base == 0)
-               return;
-
-       hwif->udma_filter = cs5530_udma_filter;
 }
 
+static const struct ide_port_ops cs5530_port_ops = {
+       .set_pio_mode           = cs5530_set_pio_mode,
+       .set_dma_mode           = cs5530_set_dma_mode,
+       .udma_filter            = cs5530_udma_filter,
+};
+
 static const struct ide_port_info cs5530_chipset __devinitdata = {
        .name           = "CS5530",
        .init_chipset   = init_chipset_cs5530,
        .init_hwif      = init_hwif_cs5530,
+       .port_ops       = &cs5530_port_ops,
        .host_flags     = IDE_HFLAG_SERIALIZE |
                          IDE_HFLAG_POST_SET_MODE,
        .pio_mask       = ATA_PIO4,
index c9685f239c652a2a5172b4c3456b2ab53ef6c8f9..99fe91a191b8f556abb865876c2e3fcaf0e8a97a 100644 (file)
@@ -166,25 +166,15 @@ static u8 __devinit cs5535_cable_detect(ide_hwif_t *hwif)
        return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
 
-/****
- *     init_hwif_cs5535        -       Initialize one ide cannel
- *     @hwif: Channel descriptor
- *
- *     This gets invoked by the IDE driver once for each channel. It
- *     performs channel-specific pre-initialization before drive probing.
- *
- */
-static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &cs5535_set_pio_mode;
-       hwif->set_dma_mode = &cs5535_set_dma_mode;
-
-       hwif->cable_detect = cs5535_cable_detect;
-}
+static const struct ide_port_ops cs5535_port_ops = {
+       .set_pio_mode           = cs5535_set_pio_mode,
+       .set_dma_mode           = cs5535_set_dma_mode,
+       .cable_detect           = cs5535_cable_detect,
+};
 
 static const struct ide_port_info cs5535_chipset __devinitdata = {
        .name           = "CS5535",
-       .init_hwif      = init_hwif_cs5535,
+       .port_ops       = &cs5535_port_ops,
        .host_flags     = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE |
                          IDE_HFLAG_ABUSE_SET_DMA_MODE,
        .pio_mask       = ATA_PIO4,
index 08eab7e7f051958da14fd53cc5ddf300d1579f4b..77cc22c2ad457b61092e67056d31cab28d3fcbb1 100644 (file)
@@ -18,8 +18,6 @@
  *   hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
  * - this is my first linux driver, so there's probably a lot  of room
  *   for optimizations and bug fixing, so feel free to do it.
- * - use idebus=xx parameter to set PCI bus speed - needed to calc
- *   timings for PIO modes (default will be 40)
  * - if using PIO mode it's a good idea to set the PIO mode and
  *   32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
  * - I had some problems with my IBM DHEA with PIO modes < 2
@@ -136,7 +134,7 @@ static int calc_clk(int time, int bus_speed)
 static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
 {
        int clk1, clk2;
-       int bus_speed = system_bus_clock();     /* get speed of PCI bus */
+       int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
 
        /* we don't check against CY82C693's min and max speed,
         * so you can play with the idebus=xx parameter
@@ -382,15 +380,6 @@ static unsigned int __devinit init_chipset_cy82c693(struct pci_dev *dev, const c
        return 0;
 }
 
-/*
- * the init function - called for each ide channel once
- */
-static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &cy82c693_set_pio_mode;
-       hwif->set_dma_mode = &cy82c693_set_dma_mode;
-}
-
 static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
 {
        static ide_hwif_t *primary;
@@ -404,11 +393,16 @@ static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
        }
 }
 
+static const struct ide_port_ops cy82c693_port_ops = {
+       .set_pio_mode           = cy82c693_set_pio_mode,
+       .set_dma_mode           = cy82c693_set_dma_mode,
+};
+
 static const struct ide_port_info cy82c693_chipset __devinitdata = {
        .name           = "CY82C693",
        .init_chipset   = init_chipset_cy82c693,
        .init_iops      = init_iops_cy82c693,
-       .init_hwif      = init_hwif_cy82c693,
+       .port_ops       = &cy82c693_port_ops,
        .chipset        = ide_cy82c693,
        .host_flags     = IDE_HFLAG_SINGLE,
        .pio_mask       = ATA_PIO4,
index 753b86fc66375366192d00f29f37123d811be7a5..b9e457996d0e2f36034f209772f0c58afa8f9a89 100644 (file)
@@ -43,6 +43,10 @@ static const u8 setup[] = {
        0x00, 0x00, 0x00, 0x00, 0xa4, 0x83, 0x02, 0x13,
 };
 
+static const struct ide_port_ops delkin_cb_port_ops = {
+       .quirkproc              = ide_undecoded_slave,
+};
+
 static int __devinit
 delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
 {
@@ -83,14 +87,9 @@ delkin_cb_probe (struct pci_dev *dev, const struct pci_device_id *id)
 
        i = hwif->index;
 
-       if (hwif->present)
-               ide_unregister(i);
-       else
-               ide_init_port_data(hwif, i);
-
+       ide_init_port_data(hwif, i);
        ide_init_port_hw(hwif, &hw);
-       hwif->mmio = 1;
-       hwif->quirkproc = &ide_undecoded_slave;
+       hwif->port_ops = &delkin_cb_port_ops;
 
        idx[0] = i;
 
@@ -120,8 +119,7 @@ delkin_cb_remove (struct pci_dev *dev)
 {
        ide_hwif_t *hwif = pci_get_drvdata(dev);
 
-       if (hwif)
-               ide_unregister(hwif->index);
+       ide_unregister(hwif);
 
        pci_release_regions(dev);
        pci_disable_device(dev);
index 9f2fc309400059cca482de0461ecbda5e64a7ede..84c36c117194cb731e24e2ea60ee7b67e2df5681 100644 (file)
@@ -115,11 +115,10 @@ static unsigned int __devinit init_chipset_hpt34x(struct pci_dev *dev, const cha
        return dev->irq;
 }
 
-static void __devinit init_hwif_hpt34x(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &hpt34x_set_pio_mode;
-       hwif->set_dma_mode = &hpt34x_set_mode;
-}
+static const struct ide_port_ops hpt34x_port_ops = {
+       .set_pio_mode           = hpt34x_set_pio_mode,
+       .set_dma_mode           = hpt34x_set_mode,
+};
 
 #define IDE_HFLAGS_HPT34X \
        (IDE_HFLAG_NO_ATAPI_DMA | \
@@ -131,16 +130,14 @@ static const struct ide_port_info hpt34x_chipsets[] __devinitdata = {
        { /* 0 */
                .name           = "HPT343",
                .init_chipset   = init_chipset_hpt34x,
-               .init_hwif      = init_hwif_hpt34x,
-               .extra          = 16,
+               .port_ops       = &hpt34x_port_ops,
                .host_flags     = IDE_HFLAGS_HPT34X | IDE_HFLAG_NON_BOOTABLE,
                .pio_mask       = ATA_PIO5,
        },
        { /* 1 */
                .name           = "HPT345",
                .init_chipset   = init_chipset_hpt34x,
-               .init_hwif      = init_hwif_hpt34x,
-               .extra          = 16,
+               .port_ops       = &hpt34x_port_ops,
                .host_flags     = IDE_HFLAGS_HPT34X | IDE_HFLAG_OFF_BOARD,
                .pio_mask       = ATA_PIO5,
 #ifdef CONFIG_HPT34X_AUTODMA
index a490906721456b74d310de54dd5651181c35db51..c929dadaaaffb02836cf6e66067f6862cd8c0dae 100644 (file)
@@ -760,7 +760,7 @@ static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
                }
        } else
                outb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
-                    hwif->io_ports[IDE_CONTROL_OFFSET]);
+                    hwif->io_ports.ctl_addr);
 }
 
 /*
@@ -776,7 +776,7 @@ static void hpt366_dma_lost_irq(ide_drive_t *drive)
        pci_read_config_byte(dev, 0x52, &mcr3);
        pci_read_config_byte(dev, 0x5a, &scr1);
        printk("%s: (%s)  mcr1=0x%02x, mcr3=0x%02x, scr1=0x%02x\n",
-               drive->name, __FUNCTION__, mcr1, mcr3, scr1);
+               drive->name, __func__, mcr1, mcr3, scr1);
        if (scr1 & 0x10)
                pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
        ide_dma_lost_irq(drive);
@@ -808,7 +808,7 @@ static void hpt370_irq_timeout(ide_drive_t *drive)
        hpt370_clear_engine(drive);
 }
 
-static void hpt370_ide_dma_start(ide_drive_t *drive)
+static void hpt370_dma_start(ide_drive_t *drive)
 {
 #ifdef HPT_RESET_STATE_ENGINE
        hpt370_clear_engine(drive);
@@ -816,7 +816,7 @@ static void hpt370_ide_dma_start(ide_drive_t *drive)
        ide_dma_start(drive);
 }
 
-static int hpt370_ide_dma_end(ide_drive_t *drive)
+static int hpt370_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        u8  dma_stat            = inb(hwif->dma_status);
@@ -838,7 +838,7 @@ static void hpt370_dma_timeout(ide_drive_t *drive)
 }
 
 /* returns 1 if DMA IRQ issued, 0 otherwise */
-static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
+static int hpt374_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
@@ -858,11 +858,11 @@ static int hpt374_ide_dma_test_irq(ide_drive_t *drive)
 
        if (!drive->waiting_for_dma)
                printk(KERN_WARNING "%s: (%s) called while not waiting\n",
-                               drive->name, __FUNCTION__);
+                               drive->name, __func__);
        return 0;
 }
 
-static int hpt374_ide_dma_end(ide_drive_t *drive)
+static int hpt374_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
@@ -1271,17 +1271,6 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
        /* Cache the channel's MISC. control registers' offset */
        hwif->select_data       = hwif->channel ? 0x54 : 0x50;
 
-       hwif->set_pio_mode      = &hpt3xx_set_pio_mode;
-       hwif->set_dma_mode      = &hpt3xx_set_mode;
-
-       hwif->quirkproc         = &hpt3xx_quirkproc;
-       hwif->maskproc          = &hpt3xx_maskproc;
-
-       hwif->udma_filter       = &hpt3xx_udma_filter;
-       hwif->mdma_filter       = &hpt3xx_mdma_filter;
-
-       hwif->cable_detect      = hpt3xx_cable_detect;
-
        /*
         * HPT3xxN chips have some complications:
         *
@@ -1323,29 +1312,19 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
 
        if (new_mcr != old_mcr)
                pci_write_config_byte(dev, hwif->select_data + 1, new_mcr);
-
-       if (hwif->dma_base == 0)
-               return;
-
-       if (chip_type >= HPT374) {
-               hwif->ide_dma_test_irq  = &hpt374_ide_dma_test_irq;
-               hwif->ide_dma_end       = &hpt374_ide_dma_end;
-       } else if (chip_type >= HPT370) {
-               hwif->dma_start         = &hpt370_ide_dma_start;
-               hwif->ide_dma_end       = &hpt370_ide_dma_end;
-               hwif->dma_timeout       = &hpt370_dma_timeout;
-       } else
-               hwif->dma_lost_irq      = &hpt366_dma_lost_irq;
 }
 
-static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
+static int __devinit init_dma_hpt366(ide_hwif_t *hwif,
+                                    const struct ide_port_info *d)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
-       u8 masterdma    = 0, slavedma   = 0;
-       u8 dma_new      = 0, dma_old    = 0;
-       unsigned long flags;
+       unsigned long flags, base = ide_pci_dma_base(hwif, d);
+       u8 dma_old, dma_new, masterdma = 0, slavedma = 0;
 
-       dma_old = inb(dmabase + 2);
+       if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+               return -1;
+
+       dma_old = inb(base + 2);
 
        local_irq_save(flags);
 
@@ -1356,11 +1335,21 @@ static void __devinit init_dma_hpt366(ide_hwif_t *hwif, unsigned long dmabase)
        if (masterdma & 0x30)   dma_new |= 0x20;
        if ( slavedma & 0x30)   dma_new |= 0x40;
        if (dma_new != dma_old)
-               outb(dma_new, dmabase + 2);
+               outb(dma_new, base + 2);
 
        local_irq_restore(flags);
 
-       ide_setup_dma(hwif, dmabase);
+       printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
+                        hwif->name, base, base + 7);
+
+       hwif->extra_base = base + (hwif->channel ? 8 : 16);
+
+       if (ide_allocate_dma_engine(hwif))
+               return -1;
+
+       ide_setup_dma(hwif, base);
+
+       return 0;
 }
 
 static void __devinit hpt374_init(struct pci_dev *dev, struct pci_dev *dev2)
@@ -1416,6 +1405,49 @@ static int __devinit hpt36x_init(struct pci_dev *dev, struct pci_dev *dev2)
         IDE_HFLAG_ABUSE_SET_DMA_MODE | \
         IDE_HFLAG_OFF_BOARD)
 
+static const struct ide_port_ops hpt3xx_port_ops = {
+       .set_pio_mode           = hpt3xx_set_pio_mode,
+       .set_dma_mode           = hpt3xx_set_mode,
+       .quirkproc              = hpt3xx_quirkproc,
+       .maskproc               = hpt3xx_maskproc,
+       .mdma_filter            = hpt3xx_mdma_filter,
+       .udma_filter            = hpt3xx_udma_filter,
+       .cable_detect           = hpt3xx_cable_detect,
+};
+
+static const struct ide_dma_ops hpt37x_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = hpt374_dma_end,
+       .dma_test_irq           = hpt374_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
+
+static const struct ide_dma_ops hpt370_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = hpt370_dma_start,
+       .dma_end                = hpt370_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = hpt370_dma_timeout,
+};
+
+static const struct ide_dma_ops hpt36x_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = __ide_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = hpt366_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
+
 static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
        {       /* 0 */
                .name           = "HPT36x",
@@ -1429,7 +1461,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
                 * Bit 4 is for the primary channel, bit 5 for the secondary.
                 */
                .enablebits     = {{0x50,0x10,0x10}, {0x54,0x04,0x04}},
-               .extra          = 240,
+               .port_ops       = &hpt3xx_port_ops,
+               .dma_ops        = &hpt36x_dma_ops,
                .host_flags     = IDE_HFLAGS_HPT3XX | IDE_HFLAG_SINGLE,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -1439,7 +1472,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
                .init_hwif      = init_hwif_hpt366,
                .init_dma       = init_dma_hpt366,
                .enablebits     = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
-               .extra          = 240,
+               .port_ops       = &hpt3xx_port_ops,
+               .dma_ops        = &hpt37x_dma_ops,
                .host_flags     = IDE_HFLAGS_HPT3XX,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -1449,7 +1483,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
                .init_hwif      = init_hwif_hpt366,
                .init_dma       = init_dma_hpt366,
                .enablebits     = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
-               .extra          = 240,
+               .port_ops       = &hpt3xx_port_ops,
+               .dma_ops        = &hpt37x_dma_ops,
                .host_flags     = IDE_HFLAGS_HPT3XX,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -1459,7 +1494,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
                .init_hwif      = init_hwif_hpt366,
                .init_dma       = init_dma_hpt366,
                .enablebits     = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
-               .extra          = 240,
+               .port_ops       = &hpt3xx_port_ops,
+               .dma_ops        = &hpt37x_dma_ops,
                .host_flags     = IDE_HFLAGS_HPT3XX,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -1470,7 +1506,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
                .init_dma       = init_dma_hpt366,
                .enablebits     = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
                .udma_mask      = ATA_UDMA5,
-               .extra          = 240,
+               .port_ops       = &hpt3xx_port_ops,
+               .dma_ops        = &hpt37x_dma_ops,
                .host_flags     = IDE_HFLAGS_HPT3XX,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -1480,7 +1517,8 @@ static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
                .init_hwif      = init_hwif_hpt366,
                .init_dma       = init_dma_hpt366,
                .enablebits     = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
-               .extra          = 240,
+               .port_ops       = &hpt3xx_port_ops,
+               .dma_ops        = &hpt37x_dma_ops,
                .host_flags     = IDE_HFLAGS_HPT3XX,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -1543,6 +1581,10 @@ static int __devinit hpt366_init_one(struct pci_dev *dev, const struct pci_devic
        d.name = info->chip_name;
        d.udma_mask = info->udma_mask;
 
+       /* fixup ->dma_ops for HPT370/HPT370A */
+       if (info == &hpt370 || info == &hpt370a)
+               d.dma_ops = &hpt370_dma_ops;
+
        pci_set_drvdata(dev, (void *)info);
 
        if (info == &hpt36x || info == &hpt374)
index 5b5b0cc4b76ad9d13176c834c60d328d0991b71b..9053c8771e6ec792304c49b9e509fd450d994437 100644 (file)
@@ -149,27 +149,17 @@ static u8 __devinit it8213_cable_detect(ide_hwif_t *hwif)
        return (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 }
 
-/**
- *     init_hwif_it8213        -       set up hwif structs
- *     @hwif: interface to set up
- *
- *     We do the basic set up of the interface structure.
- */
-
-static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
-{
-       hwif->set_dma_mode = &it8213_set_dma_mode;
-       hwif->set_pio_mode = &it8213_set_pio_mode;
-
-       hwif->cable_detect = it8213_cable_detect;
-}
-
+static const struct ide_port_ops it8213_port_ops = {
+       .set_pio_mode           = it8213_set_pio_mode,
+       .set_dma_mode           = it8213_set_dma_mode,
+       .cable_detect           = it8213_cable_detect,
+};
 
 #define DECLARE_ITE_DEV(name_str)                      \
        {                                               \
                .name           = name_str,             \
-               .init_hwif      = init_hwif_it8213,     \
                .enablebits     = { {0x41, 0x80, 0x80} }, \
+               .port_ops       = &it8213_port_ops,     \
                .host_flags     = IDE_HFLAG_SINGLE,     \
                .pio_mask       = ATA_PIO4,             \
                .swdma_mask     = ATA_SWDMA2_ONLY,      \
index a38ec47423a0e67e08c82b9171ec84190c6447b7..6ab04115286be677e3b550caf201fdf7f12bd3a1 100644 (file)
@@ -418,7 +418,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
 }
 
 /**
- *     ata66_it821x    -       check for 80 pin cable
+ *     it821x_cable_detect     -       cable detection
  *     @hwif: interface to check
  *
  *     Check for the presence of an ATA66 capable cable on the
@@ -426,7 +426,7 @@ static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
  *     the needed logic onboard.
  */
 
-static u8 __devinit ata66_it821x(ide_hwif_t *hwif)
+static u8 __devinit it821x_cable_detect(ide_hwif_t *hwif)
 {
        /* The reference driver also only does disk side */
        return ATA_CBL_PATA80;
@@ -511,6 +511,11 @@ static void __devinit it821x_quirkproc(ide_drive_t *drive)
 
 }
 
+static struct ide_dma_ops it821x_pass_through_dma_ops = {
+       .dma_start              = it821x_dma_start,
+       .dma_end                = it821x_dma_end,
+};
+
 /**
  *     init_hwif_it821x        -       set up hwif structs
  *     @hwif: interface to set up
@@ -527,8 +532,6 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
        struct it821x_dev *idev = itdevs[hwif->channel];
        u8 conf;
 
-       hwif->quirkproc = &it821x_quirkproc;
-
        ide_set_hwifdata(hwif, idev);
 
        pci_read_config_byte(dev, 0x50, &conf);
@@ -563,17 +566,11 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
        }
 
        if (idev->smart == 0) {
-               hwif->set_pio_mode = &it821x_set_pio_mode;
-               hwif->set_dma_mode = &it821x_set_dma_mode;
-
                /* MWDMA/PIO clock switching for pass through mode */
-               hwif->dma_start = &it821x_dma_start;
-               hwif->ide_dma_end = &it821x_dma_end;
+               hwif->dma_ops = &it821x_pass_through_dma_ops;
        } else
                hwif->host_flags |= IDE_HFLAG_NO_SET_MODE;
 
-       hwif->cable_detect = ata66_it821x;
-
        if (hwif->dma_base == 0)
                return;
 
@@ -613,12 +610,20 @@ static unsigned int __devinit init_chipset_it821x(struct pci_dev *dev, const cha
        return 0;
 }
 
+static const struct ide_port_ops it821x_port_ops = {
+       /* it821x_set_{pio,dma}_mode() are only used in pass-through mode */
+       .set_pio_mode           = it821x_set_pio_mode,
+       .set_dma_mode           = it821x_set_dma_mode,
+       .quirkproc              = it821x_quirkproc,
+       .cable_detect           = it821x_cable_detect,
+};
 
 #define DECLARE_ITE_DEV(name_str)                      \
        {                                               \
                .name           = name_str,             \
                .init_chipset   = init_chipset_it821x,  \
                .init_hwif      = init_hwif_it821x,     \
+               .port_ops       = &it821x_port_ops,     \
                .pio_mask       = ATA_PIO4,             \
        }
 
index 673f7dc8ba6534092766329319c43a8967ff4bec..96ef7394f28395b5646bee8f3ae9e319be3dcc90 100644 (file)
@@ -19,13 +19,13 @@ typedef enum {
 } port_type;
 
 /**
- *     ata66_jmicron           -       Cable check
+ *     jmicron_cable_detect    -       cable detection
  *     @hwif: IDE port
  *
  *     Returns the cable type.
  */
 
-static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
+static u8 __devinit jmicron_cable_detect(ide_hwif_t *hwif)
 {
        struct pci_dev *pdev = to_pci_dev(hwif->dev);
 
@@ -95,25 +95,16 @@ static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode)
 {
 }
 
-/**
- *     init_hwif_jmicron       -       set up hwif structs
- *     @hwif: interface to set up
- *
- *     Minimal set up is required for the Jmicron hardware.
- */
-
-static void __devinit init_hwif_jmicron(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &jmicron_set_pio_mode;
-       hwif->set_dma_mode = &jmicron_set_dma_mode;
-
-       hwif->cable_detect = ata66_jmicron;
-}
+static const struct ide_port_ops jmicron_port_ops = {
+       .set_pio_mode           = jmicron_set_pio_mode,
+       .set_dma_mode           = jmicron_set_dma_mode,
+       .cable_detect           = jmicron_cable_detect,
+};
 
 static const struct ide_port_info jmicron_chipset __devinitdata = {
        .name           = "JMB",
-       .init_hwif      = init_hwif_jmicron,
        .enablebits     = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
+       .port_ops       = &jmicron_port_ops,
        .pio_mask       = ATA_PIO5,
        .mwdma_mask     = ATA_MWDMA2,
        .udma_mask      = ATA_UDMA6,
index 3015d6916d4c121d500b61a37965dfc61363a87f..fec4955f449b0fa9bcc08ad57a203e81684ad73b 100644 (file)
@@ -63,6 +63,48 @@ static u8 superio_ide_inb (unsigned long port)
        return inb(port);
 }
 
+static void superio_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+       struct ide_io_ports *io_ports = &drive->hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+
+       if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+               u16 data = inw(io_ports->data_addr);
+
+               tf->data = data & 0xff;
+               tf->hob_data = (data >> 8) & 0xff;
+       }
+
+       /* be sure we're looking at the low order bits */
+       outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+       if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+               tf->nsect  = inb(io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+               tf->lbal   = inb(io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+               tf->lbam   = inb(io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+               tf->lbah   = inb(io_ports->lbah_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+               tf->device = superio_ide_inb(io_ports->device_addr);
+
+       if (task->tf_flags & IDE_TFLAG_LBA48) {
+               outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+                       tf->hob_feature = inb(io_ports->feature_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+                       tf->hob_nsect   = inb(io_ports->nsect_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+                       tf->hob_lbal    = inb(io_ports->lbal_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+                       tf->hob_lbam    = inb(io_ports->lbam_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+                       tf->hob_lbah    = inb(io_ports->lbah_addr);
+       }
+}
+
 static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
 {
        struct pci_dev *pdev = to_pci_dev(hwif->dev);
@@ -72,14 +114,16 @@ static void __devinit superio_ide_init_iops (struct hwif_s *hwif)
        base = pci_resource_start(pdev, port * 2) & ~3;
        dmabase = pci_resource_start(pdev, 4) & ~3;
 
-       superio_ide_status[port] = base + IDE_STATUS_OFFSET;
-       superio_ide_select[port] = base + IDE_SELECT_OFFSET;
+       superio_ide_status[port] = base + 7;
+       superio_ide_select[port] = base + 6;
        superio_ide_dma_status[port] = dmabase + (!port ? 2 : 0xa);
 
        /* Clear error/interrupt, enable dma */
        tmp = superio_ide_inb(superio_ide_dma_status[port]);
        outb(tmp | 0x66, superio_ide_dma_status[port]);
 
+       hwif->tf_read = superio_tf_read;
+
        /* We need to override inb to workaround a SuperIO errata */
        hwif->INB = superio_ide_inb;
 }
@@ -150,7 +194,7 @@ static void ns87415_selectproc (ide_drive_t *drive)
        ns87415_prepare_drive (drive, drive->using_dma);
 }
 
-static int ns87415_ide_dma_end (ide_drive_t *drive)
+static int ns87415_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t      *hwif = HWIF(drive);
        u8 dma_stat = 0, dma_cmd = 0;
@@ -170,7 +214,7 @@ static int ns87415_ide_dma_end (ide_drive_t *drive)
        return (dma_stat & 7) != 4;
 }
 
-static int ns87415_ide_dma_setup(ide_drive_t *drive)
+static int ns87415_dma_setup(ide_drive_t *drive)
 {
        /* select DMA xfer */
        ns87415_prepare_drive(drive, 1);
@@ -195,8 +239,6 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
        u8 stat;
 #endif
 
-       hwif->selectproc = &ns87415_selectproc;
-
        /*
         * We cannot probe for IRQ: both ports share common IRQ on INTA.
         * Also, leave IRQ masked during drive probing, to prevent infinite
@@ -233,12 +275,12 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
                 *      SELECT_DRIVE() properly during first ide_probe_port().
                 */
                timeout = 10000;
-               outb(12, hwif->io_ports[IDE_CONTROL_OFFSET]);
+               outb(12, hwif->io_ports.ctl_addr);
                udelay(10);
-               outb(8, hwif->io_ports[IDE_CONTROL_OFFSET]);
+               outb(8, hwif->io_ports.ctl_addr);
                do {
                        udelay(50);
-                       stat = hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+                       stat = hwif->INB(hwif->io_ports.status_addr);
                        if (stat == 0xff)
                                break;
                } while ((stat & BUSY_STAT) && --timeout);
@@ -246,7 +288,7 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
        }
 
        if (!using_inta)
-               hwif->irq = ide_default_irq(hwif->io_ports[IDE_DATA_OFFSET]);
+               hwif->irq = ide_default_irq(hwif->io_ports.data_addr);
        else if (!hwif->irq && hwif->mate && hwif->mate->irq)
                hwif->irq = hwif->mate->irq;    /* share IRQ with mate */
 
@@ -254,16 +296,31 @@ static void __devinit init_hwif_ns87415 (ide_hwif_t *hwif)
                return;
 
        outb(0x60, hwif->dma_status);
-       hwif->dma_setup = &ns87415_ide_dma_setup;
-       hwif->ide_dma_end = &ns87415_ide_dma_end;
 }
 
+static const struct ide_port_ops ns87415_port_ops = {
+       .selectproc             = ns87415_selectproc,
+};
+
+static const struct ide_dma_ops ns87415_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ns87415_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = ns87415_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
+
 static const struct ide_port_info ns87415_chipset __devinitdata = {
        .name           = "NS87415",
 #ifdef CONFIG_SUPERIO
        .init_iops      = init_iops_ns87415,
 #endif
        .init_hwif      = init_hwif_ns87415,
+       .port_ops       = &ns87415_port_ops,
+       .dma_ops        = &ns87415_dma_ops,
        .host_flags     = IDE_HFLAG_TRUST_BIOS_FOR_DMA |
                          IDE_HFLAG_NO_ATAPI_DMA,
 };
index 88a4dd94eeea9ea4395bfad0610f616ed2cda7ac..6e99080497bfea0633d504d0c4c99ce557c35b5a 100644 (file)
@@ -53,8 +53,7 @@
  * If you then set the second drive to another PIO, the old value
  * (automatically selected) will be overrided by yours.
  * There is a 25/33MHz switch in configuration
- * register, but driver is written for use at any frequency which get
- * (use idebus=xx to select PCI bus speed).
+ * register, but driver is written for use at any frequency.
  *
  * Version 0.1, Nov 8, 1996
  * by Jaromir Koutek, for 2.1.8.
@@ -210,7 +209,7 @@ static void compute_clocks(int pio, pio_clocks_t *clks)
 {
        if (pio != PIO_NOT_EXIST) {
                int adr_setup, data_pls;
-               int bus_speed = system_bus_clock();
+               int bus_speed = ide_pci_clk ? ide_pci_clk : system_bus_clock();
 
                adr_setup = ide_pio_timings[pio].setup_time;
                data_pls = ide_pio_timings[pio].active_time;
@@ -280,7 +279,7 @@ static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
 
        spin_lock_irqsave(&opti621_lock, flags);
 
-       reg_base = hwif->io_ports[IDE_DATA_OFFSET];
+       reg_base = hwif->io_ports.data_addr;
 
        /* allow Register-B */
        outb(0xc0, reg_base + CNTRL_REG);
@@ -326,28 +325,24 @@ static void __devinit opti621_port_init_devs(ide_hwif_t *hwif)
        hwif->drives[1].drive_data = PIO_DONT_KNOW;
 }
 
-/*
- * init_hwif_opti621() is called once for each hwif found at boot.
- */
-static void __devinit init_hwif_opti621(ide_hwif_t *hwif)
-{
-       hwif->port_init_devs = opti621_port_init_devs;
-       hwif->set_pio_mode = &opti621_set_pio_mode;
-}
+static const struct ide_port_ops opti621_port_ops = {
+       .port_init_devs         = opti621_port_init_devs,
+       .set_pio_mode           = opti621_set_pio_mode,
+};
 
 static const struct ide_port_info opti621_chipsets[] __devinitdata = {
        {       /* 0 */
                .name           = "OPTI621",
-               .init_hwif      = init_hwif_opti621,
                .enablebits     = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
+               .port_ops       = &opti621_port_ops,
                .host_flags     = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
                .pio_mask       = ATA_PIO3,
                .swdma_mask     = ATA_SWDMA2,
                .mwdma_mask     = ATA_MWDMA2,
        }, {    /* 1 */
                .name           = "OPTI621X",
-               .init_hwif      = init_hwif_opti621,
                .enablebits     = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
+               .port_ops       = &opti621_port_ops,
                .host_flags     = IDE_HFLAG_TRUST_BIOS_FOR_DMA,
                .pio_mask       = ATA_PIO3,
                .swdma_mask     = ATA_SWDMA2,
index 1c8cb7797a4aea2a6800c22da7ab251fe9cb8163..070df8ab3b218946ac69b2804bb9ebafce7a48de 100644 (file)
@@ -34,7 +34,7 @@
 #undef DEBUG
 
 #ifdef DEBUG
-#define DBG(fmt, args...) printk("%s: " fmt, __FUNCTION__, ## args)
+#define DBG(fmt, args...) printk("%s: " fmt, __func__, ## args)
 #else
 #define DBG(fmt, args...)
 #endif
@@ -83,8 +83,8 @@ static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
 {
        u8 value;
 
-       outb(index, hwif->dma_vendor1);
-       value = inb(hwif->dma_vendor3);
+       outb(index, hwif->dma_base + 1);
+       value = inb(hwif->dma_base + 3);
 
        DBG("index[%02X] value[%02X]\n", index, value);
        return value;
@@ -97,8 +97,8 @@ static u8 get_indexed_reg(ide_hwif_t *hwif, u8 index)
  */
 static void set_indexed_reg(ide_hwif_t *hwif, u8 index, u8 value)
 {
-       outb(index, hwif->dma_vendor1);
-       outb(value, hwif->dma_vendor3);
+       outb(index, hwif->dma_base + 1);
+       outb(value, hwif->dma_base + 3);
        DBG("index[%02X] value[%02X]\n", index, value);
 }
 
@@ -442,17 +442,6 @@ static unsigned int __devinit init_chipset_pdcnew(struct pci_dev *dev, const cha
        return dev->irq;
 }
 
-static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &pdcnew_set_pio_mode;
-       hwif->set_dma_mode = &pdcnew_set_dma_mode;
-
-       hwif->quirkproc = &pdcnew_quirkproc;
-       hwif->resetproc = &pdcnew_reset;
-
-       hwif->cable_detect = pdcnew_cable_detect;
-}
-
 static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
 {
        struct pci_dev *dev2;
@@ -476,11 +465,19 @@ static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
        return NULL;
 }
 
+static const struct ide_port_ops pdcnew_port_ops = {
+       .set_pio_mode           = pdcnew_set_pio_mode,
+       .set_dma_mode           = pdcnew_set_dma_mode,
+       .quirkproc              = pdcnew_quirkproc,
+       .resetproc              = pdcnew_reset,
+       .cable_detect           = pdcnew_cable_detect,
+};
+
 #define DECLARE_PDCNEW_DEV(name_str, udma) \
        { \
                .name           = name_str, \
                .init_chipset   = init_chipset_pdcnew, \
-               .init_hwif      = init_hwif_pdc202new, \
+               .port_ops       = &pdcnew_port_ops, \
                .host_flags     = IDE_HFLAG_POST_SET_MODE | \
                                  IDE_HFLAG_ERROR_STOPS_FIFO | \
                                  IDE_HFLAG_OFF_BOARD, \
index 150422ec3cfabc6685d8f6a8bf5b82804cf969b4..fca89eda5c022cf8538eeaa0e89dd9344223d6ec 100644 (file)
@@ -115,7 +115,7 @@ static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
        pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
 }
 
-static u8 __devinit pdc2026x_old_cable_detect(ide_hwif_t *hwif)
+static u8 __devinit pdc2026x_cable_detect(ide_hwif_t *hwif)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
        u16 CIS, mask = hwif->channel ? (1 << 11) : (1 << 10);
@@ -163,7 +163,7 @@ static void pdc202xx_quirkproc(ide_drive_t *drive)
        drive->quirk_list = 0;
 }
 
-static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
+static void pdc202xx_dma_start(ide_drive_t *drive)
 {
        if (drive->current_speed > XFER_UDMA_2)
                pdc_old_enable_66MHz_clock(drive->hwif);
@@ -185,7 +185,7 @@ static void pdc202xx_old_ide_dma_start(ide_drive_t *drive)
        ide_dma_start(drive);
 }
 
-static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
+static int pdc202xx_dma_end(ide_drive_t *drive)
 {
        if (drive->media != ide_disk || drive->addressing == 1) {
                ide_hwif_t *hwif        = HWIF(drive);
@@ -202,7 +202,7 @@ static int pdc202xx_old_ide_dma_end(ide_drive_t *drive)
        return __ide_dma_end(drive);
 }
 
-static int pdc202xx_old_ide_dma_test_irq(ide_drive_t *drive)
+static int pdc202xx_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        unsigned long high_16   = hwif->extra_base - 16;
@@ -226,26 +226,6 @@ somebody_else:
        return (dma_stat & 4) == 4;     /* return 1 if INTR asserted */
 }
 
-static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = HWIF(drive);
-
-       if (hwif->resetproc != NULL)
-               hwif->resetproc(drive);
-
-       ide_dma_lost_irq(drive);
-}
-
-static void pdc202xx_dma_timeout(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = HWIF(drive);
-
-       if (hwif->resetproc != NULL)
-               hwif->resetproc(drive);
-
-       ide_dma_timeout(drive);
-}
-
 static void pdc202xx_reset_host (ide_hwif_t *hwif)
 {
        unsigned long high_16   = hwif->extra_base - 16;
@@ -271,68 +251,46 @@ static void pdc202xx_reset (ide_drive_t *drive)
        ide_set_max_pio(drive);
 }
 
-static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
-                                                       const char *name)
+static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
 {
-       return dev->irq;
+       pdc202xx_reset(drive);
+       ide_dma_lost_irq(drive);
 }
 
-static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
+static void pdc202xx_dma_timeout(ide_drive_t *drive)
 {
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-
-       hwif->set_pio_mode = &pdc202xx_set_pio_mode;
-       hwif->set_dma_mode = &pdc202xx_set_mode;
-
-       hwif->quirkproc = &pdc202xx_quirkproc;
-
-       if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
-               hwif->resetproc = &pdc202xx_reset;
-
-               hwif->cable_detect = pdc2026x_old_cable_detect;
-       }
-
-       if (hwif->dma_base == 0)
-               return;
-
-       hwif->dma_lost_irq = &pdc202xx_dma_lost_irq;
-       hwif->dma_timeout = &pdc202xx_dma_timeout;
-
-       if (dev->device != PCI_DEVICE_ID_PROMISE_20246) {
-               hwif->dma_start = &pdc202xx_old_ide_dma_start;
-               hwif->ide_dma_end = &pdc202xx_old_ide_dma_end;
-       } 
-       hwif->ide_dma_test_irq = &pdc202xx_old_ide_dma_test_irq;
+       pdc202xx_reset(drive);
+       ide_dma_timeout(drive);
 }
 
-static void __devinit init_dma_pdc202xx(ide_hwif_t *hwif, unsigned long dmabase)
+static unsigned int __devinit init_chipset_pdc202xx(struct pci_dev *dev,
+                                                   const char *name)
 {
+       unsigned long dmabase = pci_resource_start(dev, 4);
        u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0;
 
-       if (hwif->channel) {
-               ide_setup_dma(hwif, dmabase);
-               return;
-       }
+       if (dmabase == 0)
+               goto out;
 
        udma_speed_flag = inb(dmabase | 0x1f);
        primary_mode    = inb(dmabase | 0x1a);
        secondary_mode  = inb(dmabase | 0x1b);
        printk(KERN_INFO "%s: (U)DMA Burst Bit %sABLED " \
                "Primary %s Mode " \
-               "Secondary %s Mode.\n", hwif->cds->name,
+               "Secondary %s Mode.\n", pci_name(dev),
                (udma_speed_flag & 1) ? "EN" : "DIS",
                (primary_mode & 1) ? "MASTER" : "PCI",
                (secondary_mode & 1) ? "MASTER" : "PCI" );
 
        if (!(udma_speed_flag & 1)) {
                printk(KERN_INFO "%s: FORCING BURST BIT 0x%02x->0x%02x ",
-                       hwif->cds->name, udma_speed_flag,
+                       pci_name(dev), udma_speed_flag,
                        (udma_speed_flag|1));
                outb(udma_speed_flag | 1, dmabase | 0x1f);
                printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN");
        }
-
-       ide_setup_dma(hwif, dmabase);
+out:
+       return dev->irq;
 }
 
 static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
@@ -357,13 +315,48 @@ static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
         IDE_HFLAG_ABUSE_SET_DMA_MODE | \
         IDE_HFLAG_OFF_BOARD)
 
+static const struct ide_port_ops pdc20246_port_ops = {
+       .set_pio_mode           = pdc202xx_set_pio_mode,
+       .set_dma_mode           = pdc202xx_set_mode,
+       .quirkproc              = pdc202xx_quirkproc,
+};
+
+static const struct ide_port_ops pdc2026x_port_ops = {
+       .set_pio_mode           = pdc202xx_set_pio_mode,
+       .set_dma_mode           = pdc202xx_set_mode,
+       .quirkproc              = pdc202xx_quirkproc,
+       .resetproc              = pdc202xx_reset,
+       .cable_detect           = pdc2026x_cable_detect,
+};
+
+static const struct ide_dma_ops pdc20246_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = __ide_dma_end,
+       .dma_test_irq           = pdc202xx_dma_test_irq,
+       .dma_lost_irq           = pdc202xx_dma_lost_irq,
+       .dma_timeout            = pdc202xx_dma_timeout,
+};
+
+static const struct ide_dma_ops pdc2026x_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = pdc202xx_dma_start,
+       .dma_end                = pdc202xx_dma_end,
+       .dma_test_irq           = pdc202xx_dma_test_irq,
+       .dma_lost_irq           = pdc202xx_dma_lost_irq,
+       .dma_timeout            = pdc202xx_dma_timeout,
+};
+
 #define DECLARE_PDC2026X_DEV(name_str, udma, extra_flags) \
        { \
                .name           = name_str, \
                .init_chipset   = init_chipset_pdc202xx, \
-               .init_hwif      = init_hwif_pdc202xx, \
-               .init_dma       = init_dma_pdc202xx, \
-               .extra          = 48, \
+               .port_ops       = &pdc2026x_port_ops, \
+               .dma_ops        = &pdc2026x_dma_ops, \
                .host_flags     = IDE_HFLAGS_PDC202XX | extra_flags, \
                .pio_mask       = ATA_PIO4, \
                .mwdma_mask     = ATA_MWDMA2, \
@@ -374,9 +367,8 @@ static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
        {       /* 0 */
                .name           = "PDC20246",
                .init_chipset   = init_chipset_pdc202xx,
-               .init_hwif      = init_hwif_pdc202xx,
-               .init_dma       = init_dma_pdc202xx,
-               .extra          = 16,
+               .port_ops       = &pdc20246_port_ops,
+               .dma_ops        = &pdc20246_dma_ops,
                .host_flags     = IDE_HFLAGS_PDC202XX,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
index 89d74ffdb207f353bedccec0d160c809876bc83a..f04738d14a6f93b8499222dedef55ac582f4961c 100644 (file)
@@ -250,6 +250,7 @@ static const struct ich_laptop ich_laptop[] = {
        { 0x27DF, 0x1043, 0x1267 },     /* ICH7 on Asus W5F */
        { 0x27DF, 0x103C, 0x30A1 },     /* ICH7 on HP Compaq nc2400 */
        { 0x24CA, 0x1025, 0x0061 },     /* ICH4 on Acer Aspire 2023WLMi */
+       { 0x2653, 0x1043, 0x82D8 },     /* ICH6M on Asus Eee 701 */
        /* end marker */
        { 0, }
 };
@@ -285,11 +286,6 @@ static u8 __devinit piix_cable_detect(ide_hwif_t *hwif)
 
 static void __devinit init_hwif_piix(ide_hwif_t *hwif)
 {
-       hwif->set_pio_mode = &piix_set_pio_mode;
-       hwif->set_dma_mode = &piix_set_dma_mode;
-
-       hwif->cable_detect = piix_cable_detect;
-
        if (!hwif->dma_base)
                return;
 
@@ -306,6 +302,12 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
                hwif->ide_dma_clear_irq = &piix_dma_clear_irq;
 }
 
+static const struct ide_port_ops piix_port_ops = {
+       .set_pio_mode           = piix_set_pio_mode,
+       .set_dma_mode           = piix_set_dma_mode,
+       .cable_detect           = piix_cable_detect,
+};
+
 #ifndef CONFIG_IA64
  #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS
 #else
@@ -317,6 +319,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
                .name           = name_str,             \
                .init_hwif      = init_hwif_piix,       \
                .enablebits     = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+               .port_ops       = &piix_port_ops,       \
                .host_flags     = IDE_HFLAGS_PIIX,      \
                .pio_mask       = ATA_PIO4,             \
                .swdma_mask     = ATA_SWDMA2_ONLY,      \
@@ -330,6 +333,7 @@ static void __devinit init_hwif_ich(ide_hwif_t *hwif)
                .init_chipset   = init_chipset_ich, \
                .init_hwif      = init_hwif_ich, \
                .enablebits     = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \
+               .port_ops       = &piix_port_ops, \
                .host_flags     = IDE_HFLAGS_PIIX, \
                .pio_mask       = ATA_PIO4, \
                .swdma_mask     = ATA_SWDMA2_ONLY, \
index 44985c8f36e7d7f964b9f57dd51587e84dcec91f..14c787b5d95f89c8fd6406d22cd82ef72d4322bb 100644 (file)
@@ -165,7 +165,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
  *
  *  returns 1 on error, 0 otherwise
  */
-static int sc1200_ide_dma_end (ide_drive_t *drive)
+static int sc1200_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
        unsigned long dma_base = hwif->dma_base;
@@ -214,7 +214,7 @@ static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
                printk("SC1200: %s: changing (U)DMA mode\n", drive->name);
                ide_dma_off_quietly(drive);
                if (ide_set_dma_mode(drive, mode) == 0 && drive->using_dma)
-                       hwif->dma_host_set(drive, 1);
+                       hwif->dma_ops->dma_host_set(drive, 1);
                return;
        }
 
@@ -286,25 +286,27 @@ static int sc1200_resume (struct pci_dev *dev)
 }
 #endif
 
-/*
- * This gets invoked by the IDE driver once for each channel,
- * and performs channel-specific pre-initialization before drive probing.
- */
-static void __devinit init_hwif_sc1200 (ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &sc1200_set_pio_mode;
-       hwif->set_dma_mode = &sc1200_set_dma_mode;
-
-       if (hwif->dma_base == 0)
-               return;
+static const struct ide_port_ops sc1200_port_ops = {
+       .set_pio_mode           = sc1200_set_pio_mode,
+       .set_dma_mode           = sc1200_set_dma_mode,
+       .udma_filter            = sc1200_udma_filter,
+};
 
-       hwif->udma_filter = sc1200_udma_filter;
-       hwif->ide_dma_end   = &sc1200_ide_dma_end;
-}
+static const struct ide_dma_ops sc1200_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = sc1200_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
 
 static const struct ide_port_info sc1200_chipset __devinitdata = {
        .name           = "SC1200",
-       .init_hwif      = init_hwif_sc1200,
+       .port_ops       = &sc1200_port_ops,
+       .dma_ops        = &sc1200_dma_ops,
        .host_flags     = IDE_HFLAG_SERIALIZE |
                          IDE_HFLAG_POST_SET_MODE |
                          IDE_HFLAG_ABUSE_DMA_MODES,
index 52145796f12fadf9d7e3e714a72c6b72a88b4ec0..910fb00deb71486e8d0e7da1a3b922fc5009d4b5 100644 (file)
@@ -126,12 +126,6 @@ static u8 scc_ide_inb(unsigned long port)
        return (u8)data;
 }
 
-static u16 scc_ide_inw(unsigned long port)
-{
-       u32 data = in_be32((void*)port);
-       return (u16)data;
-}
-
 static void scc_ide_insw(unsigned long port, void *addr, u32 count)
 {
        u16 *ptr = (u16 *)addr;
@@ -154,11 +148,6 @@ static void scc_ide_outb(u8 addr, unsigned long port)
        out_be32((void*)port, addr);
 }
 
-static void scc_ide_outw(u16 addr, unsigned long port)
-{
-       out_be32((void*)port, addr);
-}
-
 static void
 scc_ide_outbsync(ide_drive_t * drive, u8 addr, unsigned long port)
 {
@@ -271,6 +260,20 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
        out_be32((void __iomem *)udenvt_port, reg);
 }
 
+static void scc_dma_host_set(ide_drive_t *drive, int on)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       u8 unit = (drive->select.b.unit & 0x01);
+       u8 dma_stat = scc_ide_inb(hwif->dma_status);
+
+       if (on)
+               dma_stat |= (1 << (5 + unit));
+       else
+               dma_stat &= ~(1 << (5 + unit));
+
+       scc_ide_outb(dma_stat, hwif->dma_status);
+}
+
 /**
  *     scc_ide_dma_setup       -       begin a DMA phase
  *     @drive: target device
@@ -301,7 +304,7 @@ static int scc_dma_setup(ide_drive_t *drive)
        }
 
        /* PRD table */
-       out_be32((void __iomem *)hwif->dma_prdtable, hwif->dmatable_dma);
+       out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
 
        /* specify r/w */
        out_be32((void __iomem *)hwif->dma_command, reading);
@@ -315,16 +318,48 @@ static int scc_dma_setup(ide_drive_t *drive)
        return 0;
 }
 
+static void scc_dma_start(ide_drive_t *drive)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       u8 dma_cmd = scc_ide_inb(hwif->dma_command);
+
+       /* start DMA */
+       scc_ide_outb(dma_cmd | 1, hwif->dma_command);
+       hwif->dma = 1;
+       wmb();
+}
+
+static int __scc_dma_end(ide_drive_t *drive)
+{
+       ide_hwif_t *hwif = drive->hwif;
+       u8 dma_stat, dma_cmd;
+
+       drive->waiting_for_dma = 0;
+       /* get DMA command mode */
+       dma_cmd = scc_ide_inb(hwif->dma_command);
+       /* stop DMA */
+       scc_ide_outb(dma_cmd & ~1, hwif->dma_command);
+       /* get DMA status */
+       dma_stat = scc_ide_inb(hwif->dma_status);
+       /* clear the INTR & ERROR bits */
+       scc_ide_outb(dma_stat | 6, hwif->dma_status);
+       /* purge DMA mappings */
+       ide_destroy_dmatable(drive);
+       /* verify good DMA status */
+       hwif->dma = 0;
+       wmb();
+       return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
+}
 
 /**
- *     scc_ide_dma_end -       Stop DMA
+ *     scc_dma_end     -       Stop DMA
  *     @drive: IDE drive
  *
  *     Check and clear INT Status register.
- *      Then call __ide_dma_end().
+ *     Then call __scc_dma_end().
  */
 
-static int scc_ide_dma_end(ide_drive_t * drive)
+static int scc_dma_end(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
        unsigned long intsts_port = hwif->dma_base + 0x014;
@@ -334,7 +369,7 @@ static int scc_ide_dma_end(ide_drive_t * drive)
 
        /* errata A308 workaround: Step5 (check data loss) */
        /* We don't check non ide_disk because it is limited to UDMA4 */
-       if (!(in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+       if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
              & ERR_STAT) &&
            drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
                reg = in_be32((void __iomem *)intsts_port);
@@ -425,7 +460,7 @@ static int scc_ide_dma_end(ide_drive_t * drive)
                break;
        }
 
-       dma_stat = __ide_dma_end(drive);
+       dma_stat = __scc_dma_end(drive);
        if (data_loss)
                dma_stat |= 2; /* emulate DMA error (to retry command) */
        return dma_stat;
@@ -438,7 +473,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
        u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
 
        /* SCC errata A252,A308 workaround: Step4 */
-       if ((in_be32((void __iomem *)hwif->io_ports[IDE_ALTSTATUS_OFFSET])
+       if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
             & ERR_STAT) &&
            (int_stat & INTSTS_INTRQ))
                return 1;
@@ -449,7 +484,7 @@ static int scc_dma_test_irq(ide_drive_t *drive)
 
        if (!drive->waiting_for_dma)
                printk(KERN_WARNING "%s: (%s) called while not waiting\n",
-                       drive->name, __FUNCTION__);
+                       drive->name, __func__);
        return 0;
 }
 
@@ -483,7 +518,7 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
        unsigned long dma_size = pci_resource_len(dev, 1);
        void __iomem *ctl_addr;
        void __iomem *dma_addr;
-       int i;
+       int i, ret;
 
        for (i = 0; i < MAX_HWIFS; i++) {
                if (scc_ports[i].ctl == 0)
@@ -492,21 +527,17 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
        if (i >= MAX_HWIFS)
                return -ENOMEM;
 
-       if (!request_mem_region(ctl_base, ctl_size, name)) {
-               printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME);
-               goto fail_0;
-       }
-
-       if (!request_mem_region(dma_base, dma_size, name)) {
-               printk(KERN_WARNING "%s: IDE controller MMIO ports not available.\n", SCC_PATA_NAME);
-               goto fail_1;
+       ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
+       if (ret < 0) {
+               printk(KERN_ERR "%s: can't reserve resources\n", name);
+               return ret;
        }
 
        if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
-               goto fail_2;
+               goto fail_0;
 
        if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
-               goto fail_3;
+               goto fail_1;
 
        pci_set_master(dev);
        scc_ports[i].ctl = (unsigned long)ctl_addr;
@@ -515,12 +546,8 @@ static int setup_mmio_scc (struct pci_dev *dev, const char *name)
 
        return 1;
 
- fail_3:
-       iounmap(ctl_addr);
- fail_2:
-       release_mem_region(dma_base, dma_size);
  fail_1:
-       release_mem_region(ctl_base, ctl_size);
+       iounmap(ctl_addr);
  fail_0:
        return -ENOMEM;
 }
@@ -542,14 +569,13 @@ static int scc_ide_setup_pci_device(struct pci_dev *dev,
        }
 
        memset(&hw, 0, sizeof(hw));
-       for (i = IDE_DATA_OFFSET; i <= IDE_CONTROL_OFFSET; i++)
-               hw.io_ports[i] = ports->dma + 0x20 + i * 4;
+       for (i = 0; i <= 8; i++)
+               hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
        hw.irq = dev->irq;
        hw.dev = &dev->dev;
        hw.chipset = ide_pci;
        ide_init_port_hw(hwif, &hw);
        hwif->dev = &dev->dev;
-       hwif->cds = d;
 
        idx[0] = hwif->index;
 
@@ -627,6 +653,122 @@ static int __devinit init_setup_scc(struct pci_dev *dev,
        return rc;
 }
 
+static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
+{
+       struct ide_io_ports *io_ports = &drive->hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+       u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
+
+       if (task->tf_flags & IDE_TFLAG_FLAGGED)
+               HIHI = 0xFF;
+
+       ide_set_irq(drive, 1);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DATA)
+               out_be32((void *)io_ports->data_addr,
+                        (tf->hob_data << 8) | tf->data);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
+               scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
+               scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
+               scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
+               scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
+               scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
+               scc_ide_outb(tf->feature, io_ports->feature_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
+               scc_ide_outb(tf->nsect, io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
+               scc_ide_outb(tf->lbal, io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
+               scc_ide_outb(tf->lbam, io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
+               scc_ide_outb(tf->lbah, io_ports->lbah_addr);
+
+       if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
+               scc_ide_outb((tf->device & HIHI) | drive->select.all,
+                            io_ports->device_addr);
+}
+
+static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
+{
+       struct ide_io_ports *io_ports = &drive->hwif->io_ports;
+       struct ide_taskfile *tf = &task->tf;
+
+       if (task->tf_flags & IDE_TFLAG_IN_DATA) {
+               u16 data = (u16)in_be32((void *)io_ports->data_addr);
+
+               tf->data = data & 0xff;
+               tf->hob_data = (data >> 8) & 0xff;
+       }
+
+       /* be sure we're looking at the low order bits */
+       scc_ide_outb(drive->ctl & ~0x80, io_ports->ctl_addr);
+
+       if (task->tf_flags & IDE_TFLAG_IN_NSECT)
+               tf->nsect  = scc_ide_inb(io_ports->nsect_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAL)
+               tf->lbal   = scc_ide_inb(io_ports->lbal_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAM)
+               tf->lbam   = scc_ide_inb(io_ports->lbam_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_LBAH)
+               tf->lbah   = scc_ide_inb(io_ports->lbah_addr);
+       if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
+               tf->device = scc_ide_inb(io_ports->device_addr);
+
+       if (task->tf_flags & IDE_TFLAG_LBA48) {
+               scc_ide_outb(drive->ctl | 0x80, io_ports->ctl_addr);
+
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
+                       tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
+                       tf->hob_nsect   = scc_ide_inb(io_ports->nsect_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
+                       tf->hob_lbal    = scc_ide_inb(io_ports->lbal_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
+                       tf->hob_lbam    = scc_ide_inb(io_ports->lbam_addr);
+               if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
+                       tf->hob_lbah    = scc_ide_inb(io_ports->lbah_addr);
+       }
+}
+
+static void scc_input_data(ide_drive_t *drive, struct request *rq,
+                          void *buf, unsigned int len)
+{
+       unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+       len++;
+
+       if (drive->io_32bit) {
+               scc_ide_insl(data_addr, buf, len / 4);
+
+               if ((len & 3) >= 2)
+                       scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
+       } else
+               scc_ide_insw(data_addr, buf, len / 2);
+}
+
+static void scc_output_data(ide_drive_t *drive,  struct request *rq,
+                           void *buf, unsigned int len)
+{
+       unsigned long data_addr = drive->hwif->io_ports.data_addr;
+
+       len++;
+
+       if (drive->io_32bit) {
+               scc_ide_outsl(data_addr, buf, len / 4);
+
+               if ((len & 3) >= 2)
+                       scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
+       } else
+               scc_ide_outsw(data_addr, buf, len / 2);
+}
+
 /**
  *     init_mmio_iops_scc      -       set up the iops for MMIO
  *     @hwif: interface to set up
@@ -641,15 +783,15 @@ static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
 
        ide_set_hwifdata(hwif, ports);
 
+       hwif->tf_load = scc_tf_load;
+       hwif->tf_read = scc_tf_read;
+
+       hwif->input_data  = scc_input_data;
+       hwif->output_data = scc_output_data;
+
        hwif->INB = scc_ide_inb;
-       hwif->INW = scc_ide_inw;
-       hwif->INSW = scc_ide_insw;
-       hwif->INSL = scc_ide_insl;
        hwif->OUTB = scc_ide_outb;
        hwif->OUTBSYNC = scc_ide_outbsync;
-       hwif->OUTW = scc_ide_outw;
-       hwif->OUTSW = scc_ide_outsw;
-       hwif->OUTSL = scc_ide_outsl;
 
        hwif->dma_base = dma_base;
        hwif->config_data = ports->ctl;
@@ -696,31 +838,41 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
 
        hwif->dma_command = hwif->dma_base;
        hwif->dma_status = hwif->dma_base + 0x04;
-       hwif->dma_prdtable = hwif->dma_base + 0x08;
 
        /* PTERADD */
        out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
 
-       hwif->dma_setup = scc_dma_setup;
-       hwif->ide_dma_end = scc_ide_dma_end;
-       hwif->set_pio_mode = scc_set_pio_mode;
-       hwif->set_dma_mode = scc_set_dma_mode;
-       hwif->ide_dma_test_irq = scc_dma_test_irq;
-       hwif->udma_filter = scc_udma_filter;
-
        if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
                hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
        else
                hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
-
-       hwif->cable_detect = scc_cable_detect;
 }
 
+static const struct ide_port_ops scc_port_ops = {
+       .set_pio_mode           = scc_set_pio_mode,
+       .set_dma_mode           = scc_set_dma_mode,
+       .udma_filter            = scc_udma_filter,
+       .cable_detect           = scc_cable_detect,
+};
+
+static const struct ide_dma_ops scc_dma_ops = {
+       .dma_host_set           = scc_dma_host_set,
+       .dma_setup              = scc_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = scc_dma_start,
+       .dma_end                = scc_dma_end,
+       .dma_test_irq           = scc_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
+
 #define DECLARE_SCC_DEV(name_str)                      \
   {                                                    \
       .name            = name_str,                     \
       .init_iops       = init_iops_scc,                \
       .init_hwif       = init_hwif_scc,                \
+      .port_ops                = &scc_port_ops,                \
+      .dma_ops         = &scc_dma_ops,                 \
       .host_flags      = IDE_HFLAG_SINGLE,             \
       .pio_mask                = ATA_PIO4,                     \
   }
@@ -754,10 +906,6 @@ static void __devexit scc_remove(struct pci_dev *dev)
 {
        struct scc_ports *ports = pci_get_drvdata(dev);
        ide_hwif_t *hwif = ports->hwif;
-       unsigned long ctl_base = pci_resource_start(dev, 0);
-       unsigned long dma_base = pci_resource_start(dev, 1);
-       unsigned long ctl_size = pci_resource_len(dev, 0);
-       unsigned long dma_size = pci_resource_len(dev, 1);
 
        if (hwif->dmatable_cpu) {
                pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
@@ -765,13 +913,11 @@ static void __devexit scc_remove(struct pci_dev *dev)
                hwif->dmatable_cpu = NULL;
        }
 
-       ide_unregister(hwif->index);
+       ide_unregister(hwif);
 
-       hwif->chipset = ide_unknown;
        iounmap((void*)ports->dma);
        iounmap((void*)ports->ctl);
-       release_mem_region(dma_base, dma_size);
-       release_mem_region(ctl_base, ctl_size);
+       pci_release_selected_regions(dev, (1 << 2) - 1);
        memset(ports, 0, sizeof(*ports));
 }
 
index cfe927469793ba2819e42c523fc56f2b91ed0a7b..a1fb20826a5b817244d500db92c921a054c9ddee 100644 (file)
@@ -312,7 +312,7 @@ static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif)
        return ATA_CBL_PATA40;
 }
 
-static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
+static u8 __devinit svwks_cable_detect(ide_hwif_t *hwif)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
 
@@ -336,17 +336,18 @@ static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
        return ATA_CBL_PATA40;
 }
 
-static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
-{
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-
-       hwif->set_pio_mode = &svwks_set_pio_mode;
-       hwif->set_dma_mode = &svwks_set_dma_mode;
-       hwif->udma_filter = &svwks_udma_filter;
+static const struct ide_port_ops osb4_port_ops = {
+       .set_pio_mode           = svwks_set_pio_mode,
+       .set_dma_mode           = svwks_set_dma_mode,
+       .udma_filter            = svwks_udma_filter,
+};
 
-       if (dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE)
-               hwif->cable_detect = ata66_svwks;
-}
+static const struct ide_port_ops svwks_port_ops = {
+       .set_pio_mode           = svwks_set_pio_mode,
+       .set_dma_mode           = svwks_set_dma_mode,
+       .udma_filter            = svwks_udma_filter,
+       .cable_detect           = svwks_cable_detect,
+};
 
 #define IDE_HFLAGS_SVWKS \
        (IDE_HFLAG_LEGACY_IRQS | \
@@ -356,7 +357,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
        {       /* 0 */
                .name           = "SvrWks OSB4",
                .init_chipset   = init_chipset_svwks,
-               .init_hwif      = init_hwif_svwks,
+               .port_ops       = &osb4_port_ops,
                .host_flags     = IDE_HFLAGS_SVWKS,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -364,7 +365,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
        },{     /* 1 */
                .name           = "SvrWks CSB5",
                .init_chipset   = init_chipset_svwks,
-               .init_hwif      = init_hwif_svwks,
+               .port_ops       = &svwks_port_ops,
                .host_flags     = IDE_HFLAGS_SVWKS,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -372,7 +373,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
        },{     /* 2 */
                .name           = "SvrWks CSB6",
                .init_chipset   = init_chipset_svwks,
-               .init_hwif      = init_hwif_svwks,
+               .port_ops       = &svwks_port_ops,
                .host_flags     = IDE_HFLAGS_SVWKS,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -380,7 +381,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
        },{     /* 3 */
                .name           = "SvrWks CSB6",
                .init_chipset   = init_chipset_svwks,
-               .init_hwif      = init_hwif_svwks,
+               .port_ops       = &svwks_port_ops,
                .host_flags     = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
@@ -388,7 +389,7 @@ static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
        },{     /* 4 */
                .name           = "SvrWks HT1000",
                .init_chipset   = init_chipset_svwks,
-               .init_hwif      = init_hwif_svwks,
+               .port_ops       = &svwks_port_ops,
                .host_flags     = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE,
                .pio_mask       = ATA_PIO4,
                .mwdma_mask     = ATA_MWDMA2,
index 6bd9523cf6428bc65815bc72b5d5366758771e4d..16a0bce17d6905b03959ea8817b227be2d70e94b 100644 (file)
@@ -98,28 +98,28 @@ sgiioc4_init_hwif_ports(hw_regs_t * hw, unsigned long data_port,
        int i;
 
        /* Registers are word (32 bit) aligned */
-       for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
-               hw->io_ports[i] = reg + i * 4;
+       for (i = 0; i <= 7; i++)
+               hw->io_ports_array[i] = reg + i * 4;
 
        if (ctrl_port)
-               hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+               hw->io_ports.ctl_addr = ctrl_port;
 
        if (irq_port)
-               hw->io_ports[IDE_IRQ_OFFSET] = irq_port;
+               hw->io_ports.irq_addr = irq_port;
 }
 
 static void
 sgiioc4_maskproc(ide_drive_t * drive, int mask)
 {
        writeb(mask ? (drive->ctl | 2) : (drive->ctl & ~2),
-              (void __iomem *)drive->hwif->io_ports[IDE_CONTROL_OFFSET]);
+              (void __iomem *)drive->hwif->io_ports.ctl_addr);
 }
 
 static int
 sgiioc4_checkirq(ide_hwif_t * hwif)
 {
        unsigned long intr_addr =
-               hwif->io_ports[IDE_IRQ_OFFSET] + IOC4_INTR_REG * 4;
+               hwif->io_ports.irq_addr + IOC4_INTR_REG * 4;
 
        if ((u8)readl((void __iomem *)intr_addr) & 0x03)
                return 1;
@@ -134,8 +134,8 @@ sgiioc4_clearirq(ide_drive_t * drive)
 {
        u32 intr_reg;
        ide_hwif_t *hwif = HWIF(drive);
-       unsigned long other_ir =
-           hwif->io_ports[IDE_IRQ_OFFSET] + (IOC4_INTR_REG << 2);
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       unsigned long other_ir = io_ports->irq_addr + (IOC4_INTR_REG << 2);
 
        /* Code to check for PCI error conditions */
        intr_reg = readl((void __iomem *)other_ir);
@@ -147,12 +147,12 @@ sgiioc4_clearirq(ide_drive_t * drive)
                 * a "clear" status if it got cleared.  If not, then spin
                 * for a bit trying to clear it.
                 */
-               u8 stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+               u8 stat = sgiioc4_INB(io_ports->status_addr);
                int count = 0;
-               stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+               stat = sgiioc4_INB(io_ports->status_addr);
                while ((stat & 0x80) && (count++ < 100)) {
                        udelay(1);
-                       stat = sgiioc4_INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+                       stat = sgiioc4_INB(io_ports->status_addr);
                }
 
                if (intr_reg & 0x02) {
@@ -162,18 +162,18 @@ sgiioc4_clearirq(ide_drive_t * drive)
                            pci_stat_cmd_reg;
 
                        pci_err_addr_low =
-                               readl((void __iomem *)hwif->io_ports[IDE_IRQ_OFFSET]);
+                               readl((void __iomem *)io_ports->irq_addr);
                        pci_err_addr_high =
-                               readl((void __iomem *)(hwif->io_ports[IDE_IRQ_OFFSET] + 4));
+                               readl((void __iomem *)(io_ports->irq_addr + 4));
                        pci_read_config_dword(dev, PCI_COMMAND,
                                              &pci_stat_cmd_reg);
                        printk(KERN_ERR
                               "%s(%s) : PCI Bus Error when doing DMA:"
                                   " status-cmd reg is 0x%x\n",
-                              __FUNCTION__, drive->name, pci_stat_cmd_reg);
+                              __func__, drive->name, pci_stat_cmd_reg);
                        printk(KERN_ERR
                               "%s(%s) : PCI Error Address is 0x%x%x\n",
-                              __FUNCTION__, drive->name,
+                              __func__, drive->name,
                               pci_err_addr_high, pci_err_addr_low);
                        /* Clear the PCI Error indicator */
                        pci_write_config_dword(dev, PCI_COMMAND, 0x00000146);
@@ -188,7 +188,7 @@ sgiioc4_clearirq(ide_drive_t * drive)
        return intr_reg & 3;
 }
 
-static void sgiioc4_ide_dma_start(ide_drive_t * drive)
+static void sgiioc4_dma_start(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = HWIF(drive);
        unsigned long ioc4_dma_addr = hwif->dma_base + IOC4_DMA_CTRL * 4;
@@ -215,8 +215,7 @@ sgiioc4_ide_dma_stop(ide_hwif_t *hwif, u64 dma_base)
 }
 
 /* Stops the IOC4 DMA Engine */
-static int
-sgiioc4_ide_dma_end(ide_drive_t * drive)
+static int sgiioc4_dma_end(ide_drive_t *drive)
 {
        u32 ioc4_dma, bc_dev, bc_mem, num, valid = 0, cnt = 0;
        ide_hwif_t *hwif = HWIF(drive);
@@ -232,7 +231,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
                printk(KERN_ERR
                       "%s(%s): IOC4 DMA STOP bit is still 1 :"
                       "ioc4_dma_reg 0x%x\n",
-                      __FUNCTION__, drive->name, ioc4_dma);
+                      __func__, drive->name, ioc4_dma);
                dma_stat = 1;
        }
 
@@ -251,7 +250,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
                udelay(1);
        }
        if (!valid) {
-               printk(KERN_ERR "%s(%s) : DMA incomplete\n", __FUNCTION__,
+               printk(KERN_ERR "%s(%s) : DMA incomplete\n", __func__,
                       drive->name);
                dma_stat = 1;
        }
@@ -264,7 +263,7 @@ sgiioc4_ide_dma_end(ide_drive_t * drive)
                        printk(KERN_ERR
                               "%s(%s): WARNING!! byte_count_dev %d "
                               "!= byte_count_mem %d\n",
-                              __FUNCTION__, drive->name, bc_dev, bc_mem);
+                              __func__, drive->name, bc_dev, bc_mem);
                }
        }
 
@@ -279,8 +278,7 @@ static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
 }
 
 /* returns 1 if dma irq issued, 0 otherwise */
-static int
-sgiioc4_ide_dma_test_irq(ide_drive_t * drive)
+static int sgiioc4_dma_test_irq(ide_drive_t *drive)
 {
        return sgiioc4_checkirq(HWIF(drive));
 }
@@ -294,7 +292,7 @@ static void sgiioc4_dma_host_set(ide_drive_t *drive, int on)
 static void
 sgiioc4_resetproc(ide_drive_t * drive)
 {
-       sgiioc4_ide_dma_end(drive);
+       sgiioc4_dma_end(drive);
        sgiioc4_clearirq(drive);
 }
 
@@ -329,13 +327,17 @@ sgiioc4_INB(unsigned long port)
 
 /* Creates a dma map for the scatter-gather list entries */
 static int __devinit
-ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
+ide_dma_sgiioc4(ide_hwif_t *hwif, const struct ide_port_info *d)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
+       unsigned long dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
        void __iomem *virt_dma_base;
        int num_ports = sizeof (ioc4_dma_regs_t);
        void *pad;
 
+       if (dma_base == 0)
+               return -1;
+
        printk(KERN_INFO "%s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name,
               dma_base, dma_base + num_ports - 1);
 
@@ -343,7 +345,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
                printk(KERN_ERR
                       "%s(%s) -- ERROR, Addresses 0x%p to 0x%p "
                       "ALREADY in use\n",
-                      __FUNCTION__, hwif->name, (void *) dma_base,
+                      __func__, hwif->name, (void *) dma_base,
                       (void *) dma_base + num_ports - 1);
                return -1;
        }
@@ -352,7 +354,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
        if (virt_dma_base == NULL) {
                printk(KERN_ERR
                       "%s(%s) -- ERROR, Unable to map addresses 0x%lx to 0x%lx\n",
-                      __FUNCTION__, hwif->name, dma_base, dma_base + num_ports - 1);
+                      __func__, hwif->name, dma_base, dma_base + num_ports - 1);
                goto dma_remap_failure;
        }
        hwif->dma_base = (unsigned long) virt_dma_base;
@@ -378,7 +380,7 @@ ide_dma_sgiioc4(ide_hwif_t * hwif, unsigned long dma_base)
                            hwif->dmatable_cpu, hwif->dmatable_dma);
        printk(KERN_INFO
               "%s() -- Error! Unable to allocate DMA Maps for drive %s\n",
-              __FUNCTION__, hwif->name);
+              __func__, hwif->name);
        printk(KERN_INFO
               "Changing from DMA to PIO mode for Drive %s\n", hwif->name);
 
@@ -406,14 +408,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
        if (ioc4_dma & IOC4_S_DMA_ACTIVE) {
                printk(KERN_WARNING
                        "%s(%s):Warning!! DMA from previous transfer was still active\n",
-                      __FUNCTION__, drive->name);
+                      __func__, drive->name);
                writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
                ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
 
                if (ioc4_dma & IOC4_S_DMA_STOP)
                        printk(KERN_ERR
                               "%s(%s) : IOC4 Dma STOP bit is still 1\n",
-                              __FUNCTION__, drive->name);
+                              __func__, drive->name);
        }
 
        ioc4_dma = readl((void __iomem *)ioc4_dma_addr);
@@ -421,14 +423,14 @@ sgiioc4_configure_for_dma(int dma_direction, ide_drive_t * drive)
                printk(KERN_WARNING
                       "%s(%s) : Warning!! - DMA Error during Previous"
                       " transfer | status 0x%x\n",
-                      __FUNCTION__, drive->name, ioc4_dma);
+                      __func__, drive->name, ioc4_dma);
                writel(IOC4_S_DMA_STOP, (void __iomem *)ioc4_dma_addr);
                ioc4_dma = sgiioc4_ide_dma_stop(hwif, dma_base);
 
                if (ioc4_dma & IOC4_S_DMA_STOP)
                        printk(KERN_ERR
                               "%s(%s) : IOC4 DMA STOP bit is still 1\n",
-                              __FUNCTION__, drive->name);
+                              __func__, drive->name);
        }
 
        /* Address of the Scatter Gather List */
@@ -519,7 +521,7 @@ use_pio_instead:
        return 0;               /* revert to PIO for this request */
 }
 
-static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
+static int sgiioc4_dma_setup(ide_drive_t *drive)
 {
        struct request *rq = HWGROUP(drive)->rq;
        unsigned int count = 0;
@@ -548,45 +550,37 @@ static int sgiioc4_ide_dma_setup(ide_drive_t *drive)
        return 0;
 }
 
-static void __devinit
-ide_init_sgiioc4(ide_hwif_t * hwif)
-{
-       hwif->mmio = 1;
-       hwif->set_pio_mode = NULL; /* Sets timing for PIO mode */
-       hwif->set_dma_mode = &sgiioc4_set_dma_mode;
-       hwif->selectproc = NULL;/* Use the default routine to select drive */
-       hwif->reset_poll = NULL;/* No HBA specific reset_poll needed */
-       hwif->pre_reset = NULL; /* No HBA specific pre_set needed */
-       hwif->resetproc = &sgiioc4_resetproc;/* Reset DMA engine,
-                                               clear interrupts */
-       hwif->maskproc = &sgiioc4_maskproc;     /* Mask on/off NIEN register */
-       hwif->quirkproc = NULL;
-
-       hwif->INB = &sgiioc4_INB;
-
-       if (hwif->dma_base == 0)
-               return;
+static const struct ide_port_ops sgiioc4_port_ops = {
+       .set_dma_mode           = sgiioc4_set_dma_mode,
+       /* reset DMA engine, clear IRQs */
+       .resetproc              = sgiioc4_resetproc,
+       /* mask on/off NIEN register */
+       .maskproc               = sgiioc4_maskproc,
+};
 
-       hwif->dma_host_set = &sgiioc4_dma_host_set;
-       hwif->dma_setup = &sgiioc4_ide_dma_setup;
-       hwif->dma_start = &sgiioc4_ide_dma_start;
-       hwif->ide_dma_end = &sgiioc4_ide_dma_end;
-       hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
-       hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
-       hwif->dma_timeout = &ide_dma_timeout;
-}
+static const struct ide_dma_ops sgiioc4_dma_ops = {
+       .dma_host_set           = sgiioc4_dma_host_set,
+       .dma_setup              = sgiioc4_dma_setup,
+       .dma_start              = sgiioc4_dma_start,
+       .dma_end                = sgiioc4_dma_end,
+       .dma_test_irq           = sgiioc4_dma_test_irq,
+       .dma_lost_irq           = sgiioc4_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
 
 static const struct ide_port_info sgiioc4_port_info __devinitdata = {
        .chipset                = ide_pci,
-       .host_flags             = IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
-                                 IDE_HFLAG_NO_AUTOTUNE,
+       .init_dma               = ide_dma_sgiioc4,
+       .port_ops               = &sgiioc4_port_ops,
+       .dma_ops                = &sgiioc4_dma_ops,
+       .host_flags             = IDE_HFLAG_MMIO,
        .mwdma_mask             = ATA_MWDMA2_ONLY,
 };
 
 static int __devinit
 sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
 {
-       unsigned long cmd_base, dma_base, irqport;
+       unsigned long cmd_base, irqport;
        unsigned long bar0, cmd_phys_base, ctl;
        void __iomem *virt_base;
        ide_hwif_t *hwif;
@@ -612,7 +606,6 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
        cmd_base = (unsigned long) virt_base + IOC4_CMD_OFFSET;
        ctl = (unsigned long) virt_base + IOC4_CTRL_OFFSET;
        irqport = (unsigned long) virt_base + IOC4_INTR_OFFSET;
-       dma_base = pci_resource_start(dev, 0) + IOC4_DMA_OFFSET;
 
        cmd_phys_base = bar0 + IOC4_CMD_OFFSET;
        if (!request_mem_region(cmd_phys_base, IOC4_CMD_CTL_BLK_SIZE,
@@ -620,7 +613,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
                printk(KERN_ERR
                        "%s : %s -- ERROR, Addresses "
                        "0x%p to 0x%p ALREADY in use\n",
-                      __FUNCTION__, hwif->name, (void *) cmd_phys_base,
+                      __func__, hwif->name, (void *) cmd_phys_base,
                       (void *) cmd_phys_base + IOC4_CMD_CTL_BLK_SIZE);
                return -ENOMEM;
        }
@@ -641,13 +634,7 @@ sgiioc4_ide_setup_pci_device(struct pci_dev *dev)
        /* Initializing chipset IRQ Registers */
        writel(0x03, (void __iomem *)(irqport + IOC4_INTR_SET * 4));
 
-       if (dma_base == 0 || ide_dma_sgiioc4(hwif, dma_base)) {
-               printk(KERN_INFO "%s: %s Bus-Master DMA disabled\n",
-                                hwif->name, DRV_NAME);
-               d.mwdma_mask = 0;
-       }
-
-       ide_init_sgiioc4(hwif);
+       hwif->INB = &sgiioc4_INB;
 
        idx[0] = hwif->index;
 
index c9ecab8aeb61d6be5c67ba3b192cdf65abcea2d7..0006b9e58567b7d595de4253b6d67e4a78f12d8f 100644 (file)
@@ -1,8 +1,8 @@
 /*
  * Copyright (C) 2001-2002     Andre Hedrick <andre@linux-ide.org>
  * Copyright (C) 2003          Red Hat <alan@redhat.com>
- * Copyright (C) 2007          MontaVista Software, Inc.
- * Copyright (C) 2007          Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2008     MontaVista Software, Inc.
+ * Copyright (C) 2007-2008     Bartlomiej Zolnierkiewicz
  *
  *  May be copied or modified under the terms of the GNU General Public License
  *
  *
  *  FAQ Items:
  *     If you are using Marvell SATA-IDE adapters with Maxtor drives
- *     ensure the system is set up for ATA100/UDMA5 not UDMA6.
+ *     ensure the system is set up for ATA100/UDMA5, not UDMA6.
  *
  *     If you are using WD drives with SATA bridges you must set the
- *     drive to "Single". "Master" will hang
+ *     drive to "Single". "Master" will hang.
  *
  *     If you have strange problems with nVidia chipset systems please
  *     see the SI support documentation and update your system BIOS
 #include <linux/hdreg.h>
 #include <linux/ide.h>
 #include <linux/init.h>
-
-#include <asm/io.h>
+#include <linux/io.h>
 
 /**
  *     pdev_is_sata            -       check if device is SATA
  *     @pdev:  PCI device to check
- *     
+ *
  *     Returns true if this is a SATA controller
  */
+
 static int pdev_is_sata(struct pci_dev *pdev)
 {
 #ifdef CONFIG_BLK_DEV_IDE_SATA
-       switch(pdev->device) {
-               case PCI_DEVICE_ID_SII_3112:
-               case PCI_DEVICE_ID_SII_1210SA:
-                       return 1;
-               case PCI_DEVICE_ID_SII_680:
-                       return 0;
+       switch (pdev->device) {
+       case PCI_DEVICE_ID_SII_3112:
+       case PCI_DEVICE_ID_SII_1210SA:
+               return 1;
+       case PCI_DEVICE_ID_SII_680:
+               return 0;
        }
        BUG();
 #endif
@@ -70,10 +69,10 @@ static int pdev_is_sata(struct pci_dev *pdev)
 /**
  *     is_sata                 -       check if hwif is SATA
  *     @hwif:  interface to check
- *     
+ *
  *     Returns true if this is a SATA controller
  */
+
 static inline int is_sata(ide_hwif_t *hwif)
 {
        return pdev_is_sata(to_pci_dev(hwif->dev));
@@ -86,21 +85,22 @@ static inline int is_sata(ide_hwif_t *hwif)
  *
  *     Turn a config register offset into the right address in either
  *     PCI space or MMIO space to access the control register in question
- *     Thankfully this is a configuration operation so isnt performance
- *     criticial. 
+ *     Thankfully this is a configuration operation, so isn't performance
+ *     critical.
  */
+
 static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
 {
        unsigned long base = (unsigned long)hwif->hwif_data;
+
        base += 0xA0 + r;
-       if(hwif->mmio)
-               base += (hwif->channel << 6);
+       if (hwif->mmio)
+               base += hwif->channel << 6;
        else
-               base += (hwif->channel << 4);
+               base += hwif->channel << 4;
        return base;
 }
-       
+
 /**
  *     siimage_seldev          -       return register base
  *     @hwif: interface
@@ -110,20 +110,69 @@ static unsigned long siimage_selreg(ide_hwif_t *hwif, int r)
  *     PCI space or MMIO space to access the control register in question
  *     including accounting for the unit shift.
  */
+
 static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
 {
        ide_hwif_t *hwif        = HWIF(drive);
-       unsigned long base = (unsigned long)hwif->hwif_data;
+       unsigned long base      = (unsigned long)hwif->hwif_data;
+
        base += 0xA0 + r;
-       if(hwif->mmio)
-               base += (hwif->channel << 6);
+       if (hwif->mmio)
+               base += hwif->channel << 6;
        else
-               base += (hwif->channel << 4);
+               base += hwif->channel << 4;
        base |= drive->select.b.unit << drive->select.b.unit;
        return base;
 }
 
+static u8 sil_ioread8(struct pci_dev *dev, unsigned long addr)
+{
+       u8 tmp = 0;
+
+       if (pci_get_drvdata(dev))
+               tmp = readb((void __iomem *)addr);
+       else
+               pci_read_config_byte(dev, addr, &tmp);
+
+       return tmp;
+}
+
+static u16 sil_ioread16(struct pci_dev *dev, unsigned long addr)
+{
+       u16 tmp = 0;
+
+       if (pci_get_drvdata(dev))
+               tmp = readw((void __iomem *)addr);
+       else
+               pci_read_config_word(dev, addr, &tmp);
+
+       return tmp;
+}
+
+static void sil_iowrite8(struct pci_dev *dev, u8 val, unsigned long addr)
+{
+       if (pci_get_drvdata(dev))
+               writeb(val, (void __iomem *)addr);
+       else
+               pci_write_config_byte(dev, addr, val);
+}
+
+static void sil_iowrite16(struct pci_dev *dev, u16 val, unsigned long addr)
+{
+       if (pci_get_drvdata(dev))
+               writew(val, (void __iomem *)addr);
+       else
+               pci_write_config_word(dev, addr, val);
+}
+
+static void sil_iowrite32(struct pci_dev *dev, u32 val, unsigned long addr)
+{
+       if (pci_get_drvdata(dev))
+               writel(val, (void __iomem *)addr);
+       else
+               pci_write_config_dword(dev, addr, val);
+}
+
 /**
  *     sil_udma_filter         -       compute UDMA mask
  *     @drive: IDE device
@@ -136,24 +185,26 @@ static inline unsigned long siimage_seldev(ide_drive_t *drive, int r)
 
 static u8 sil_pata_udma_filter(ide_drive_t *drive)
 {
-       ide_hwif_t *hwif = drive->hwif;
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-       unsigned long base = (unsigned long) hwif->hwif_data;
-       u8 mask = 0, scsc = 0;
+       ide_hwif_t *hwif        = drive->hwif;
+       struct pci_dev *dev     = to_pci_dev(hwif->dev);
+       unsigned long base      = (unsigned long)hwif->hwif_data;
+       u8 scsc, mask           = 0;
 
-       if (hwif->mmio)
-               scsc = hwif->INB(base + 0x4A);
-       else
-               pci_read_config_byte(dev, 0x8A, &scsc);
+       scsc = sil_ioread8(dev, base + (hwif->mmio ? 0x4A : 0x8A));
 
-       if ((scsc & 0x30) == 0x10)      /* 133 */
+       switch (scsc & 0x30) {
+       case 0x10:      /* 133 */
                mask = ATA_UDMA6;
-       else if ((scsc & 0x30) == 0x20) /* 2xPCI */
+               break;
+       case 0x20:      /* 2xPCI */
                mask = ATA_UDMA6;
-       else if ((scsc & 0x30) == 0x00) /* 100 */
+               break;
+       case 0x00:      /* 100 */
                mask = ATA_UDMA5;
-       else    /* Disabled ? */
+               break;
+       default:        /* Disabled ? */
                BUG();
+       }
 
        return mask;
 }
@@ -175,15 +226,16 @@ static u8 sil_sata_udma_filter(ide_drive_t *drive)
 
 static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
 {
-       const u16 tf_speed[]    = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
-       const u16 data_speed[]  = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
+       static const u16 tf_speed[]   = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
+       static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
 
        ide_hwif_t *hwif        = HWIF(drive);
+       struct pci_dev *dev     = to_pci_dev(hwif->dev);
        ide_drive_t *pair       = ide_get_paired_drive(drive);
        u32 speedt              = 0;
        u16 speedp              = 0;
        unsigned long addr      = siimage_seldev(drive, 0x04);
-       unsigned long tfaddr    = siimage_selreg(hwif, 0x02);
+       unsigned long tfaddr    = siimage_selreg(hwif,  0x02);
        unsigned long base      = (unsigned long)hwif->hwif_data;
        u8 tf_pio               = pio;
        u8 addr_mask            = hwif->channel ? (hwif->mmio ? 0xF4 : 0x84)
@@ -203,36 +255,20 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
        speedp = data_speed[pio];
        speedt = tf_speed[tf_pio];
 
-       if (hwif->mmio) {
-               hwif->OUTW(speedp, addr);
-               hwif->OUTW(speedt, tfaddr);
-               /* Now set up IORDY */
-               if (pio > 2)
-                       hwif->OUTW(hwif->INW(tfaddr-2)|0x200, tfaddr-2);
-               else
-                       hwif->OUTW(hwif->INW(tfaddr-2)&~0x200, tfaddr-2);
-
-               mode = hwif->INB(base + addr_mask);
-               mode &= ~(unit ? 0x30 : 0x03);
-               mode |= (unit ? 0x10 : 0x01);
-               hwif->OUTB(mode, base + addr_mask);
-       } else {
-               struct pci_dev *dev = to_pci_dev(hwif->dev);
-
-               pci_write_config_word(dev, addr, speedp);
-               pci_write_config_word(dev, tfaddr, speedt);
-               pci_read_config_word(dev, tfaddr - 2, &speedp);
-               speedp &= ~0x200;
-               /* Set IORDY for mode 3 or 4 */
-               if (pio > 2)
-                       speedp |= 0x200;
-               pci_write_config_word(dev, tfaddr - 2, speedp);
-
-               pci_read_config_byte(dev, addr_mask, &mode);
-               mode &= ~(unit ? 0x30 : 0x03);
-               mode |= (unit ? 0x10 : 0x01);
-               pci_write_config_byte(dev, addr_mask, mode);
-       }
+       sil_iowrite16(dev, speedp, addr);
+       sil_iowrite16(dev, speedt, tfaddr);
+
+       /* now set up IORDY */
+       speedp = sil_ioread16(dev, tfaddr - 2);
+       speedp &= ~0x200;
+       if (pio > 2)
+               speedp |= 0x200;
+       sil_iowrite16(dev, speedp, tfaddr - 2);
+
+       mode = sil_ioread8(dev, base + addr_mask);
+       mode &= ~(unit ? 0x30 : 0x03);
+       mode |= unit ? 0x10 : 0x01;
+       sil_iowrite8(dev, mode, base + addr_mask);
 }
 
 /**
@@ -245,63 +281,49 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
 
 static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
 {
-       u8 ultra6[]             = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
-       u8 ultra5[]             = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
-       u16 dma[]               = { 0x2208, 0x10C2, 0x10C1 };
+       static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
+       static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
+       static const u16 dma[]   = { 0x2208, 0x10C2, 0x10C1 };
 
        ide_hwif_t *hwif        = HWIF(drive);
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
        u16 ultra = 0, multi    = 0;
        u8 mode = 0, unit       = drive->select.b.unit;
        unsigned long base      = (unsigned long)hwif->hwif_data;
-       u8 scsc = 0, addr_mask  = ((hwif->channel) ?
-                                   ((hwif->mmio) ? 0xF4 : 0x84) :
-                                   ((hwif->mmio) ? 0xB4 : 0x80));
-                                   
+       u8 scsc = 0, addr_mask  = hwif->channel ?
+                                       (hwif->mmio ? 0xF4 : 0x84) :
+                                       (hwif->mmio ? 0xB4 : 0x80);
        unsigned long ma        = siimage_seldev(drive, 0x08);
        unsigned long ua        = siimage_seldev(drive, 0x0C);
 
-       if (hwif->mmio) {
-               scsc = hwif->INB(base + 0x4A);
-               mode = hwif->INB(base + addr_mask);
-               multi = hwif->INW(ma);
-               ultra = hwif->INW(ua);
-       } else {
-               pci_read_config_byte(dev, 0x8A, &scsc);
-               pci_read_config_byte(dev, addr_mask, &mode);
-               pci_read_config_word(dev, ma, &multi);
-               pci_read_config_word(dev, ua, &ultra);
-       }
+       scsc  = sil_ioread8 (dev, base + (hwif->mmio ? 0x4A : 0x8A));
+       mode  = sil_ioread8 (dev, base + addr_mask);
+       multi = sil_ioread16(dev, ma);
+       ultra = sil_ioread16(dev, ua);
 
-       mode &= ~((unit) ? 0x30 : 0x03);
+       mode  &= ~(unit ? 0x30 : 0x03);
        ultra &= ~0x3F;
        scsc = ((scsc & 0x30) == 0x00) ? 0 : 1;
 
        scsc = is_sata(hwif) ? 1 : scsc;
 
        if (speed >= XFER_UDMA_0) {
-               multi = dma[2];
-               ultra |= (scsc ? ultra6[speed - XFER_UDMA_0] :
-                                ultra5[speed - XFER_UDMA_0]);
-               mode |= (unit ? 0x30 : 0x03);
+               multi  = dma[2];
+               ultra |= scsc ? ultra6[speed - XFER_UDMA_0] :
+                               ultra5[speed - XFER_UDMA_0];
+               mode  |= unit ? 0x30 : 0x03;
        } else {
                multi = dma[speed - XFER_MW_DMA_0];
-               mode |= (unit ? 0x20 : 0x02);
+               mode |= unit ? 0x20 : 0x02;
        }
 
-       if (hwif->mmio) {
-               hwif->OUTB(mode, base + addr_mask);
-               hwif->OUTW(multi, ma);
-               hwif->OUTW(ultra, ua);
-       } else {
-               pci_write_config_byte(dev, addr_mask, mode);
-               pci_write_config_word(dev, ma, multi);
-               pci_write_config_word(dev, ua, ultra);
-       }
+       sil_iowrite8 (dev, mode, base + addr_mask);
+       sil_iowrite16(dev, multi, ma);
+       sil_iowrite16(dev, ultra, ua);
 }
 
 /* returns 1 if dma irq issued, 0 otherwise */
-static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
+static int siimage_io_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
@@ -309,25 +331,26 @@ static int siimage_io_ide_dma_test_irq (ide_drive_t *drive)
        unsigned long addr      = siimage_selreg(hwif, 1);
 
        /* return 1 if INTR asserted */
-       if ((hwif->INB(hwif->dma_status) & 4) == 4)
+       if (hwif->INB(hwif->dma_status) & 4)
                return 1;
 
        /* return 1 if Device INTR asserted */
        pci_read_config_byte(dev, addr, &dma_altstat);
        if (dma_altstat & 8)
-               return 0;       //return 1;
+               return 0;       /* return 1; */
+
        return 0;
 }
 
 /**
- *     siimage_mmio_ide_dma_test_irq   -       check we caused an IRQ
+ *     siimage_mmio_dma_test_irq       -       check we caused an IRQ
  *     @drive: drive we are testing
  *
  *     Check if we caused an IDE DMA interrupt. We may also have caused
  *     SATA status interrupts, if so we clean them up and continue.
  */
-static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
+
+static int siimage_mmio_dma_test_irq(ide_drive_t *drive)
 {
        ide_hwif_t *hwif        = HWIF(drive);
        unsigned long addr      = siimage_selreg(hwif, 0x1);
@@ -335,9 +358,9 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
                = (void __iomem *)hwif->sata_scr[SATA_ERROR_OFFSET];
 
        if (sata_error_addr) {
-               unsigned long base = (unsigned long)hwif->hwif_data;
-               u32 ext_stat = readl((void __iomem *)(base + 0x10));
-               u8 watchdog = 0;
+               unsigned long base      = (unsigned long)hwif->hwif_data;
+               u32 ext_stat            = readl((void __iomem *)(base + 0x10));
+               u8 watchdog             = 0;
 
                if (ext_stat & ((hwif->channel) ? 0x40 : 0x10)) {
                        u32 sata_error = readl(sata_error_addr);
@@ -346,29 +369,34 @@ static int siimage_mmio_ide_dma_test_irq (ide_drive_t *drive)
                        watchdog = (sata_error & 0x00680000) ? 1 : 0;
                        printk(KERN_WARNING "%s: sata_error = 0x%08x, "
                                "watchdog = %d, %s\n",
-                               drive->name, sata_error, watchdog,
-                               __FUNCTION__);
-
-               } else {
+                               drive->name, sata_error, watchdog, __func__);
+               } else
                        watchdog = (ext_stat & 0x8000) ? 1 : 0;
-               }
-               ext_stat >>= 16;
 
+               ext_stat >>= 16;
                if (!(ext_stat & 0x0404) && !watchdog)
                        return 0;
        }
 
        /* return 1 if INTR asserted */
-       if ((readb((void __iomem *)hwif->dma_status) & 0x04) == 0x04)
+       if (readb((void __iomem *)hwif->dma_status) & 0x04)
                return 1;
 
        /* return 1 if Device INTR asserted */
-       if ((readb((void __iomem *)addr) & 8) == 8)
-               return 0;       //return 1;
+       if (readb((void __iomem *)addr) & 8)
+               return 0;       /* return 1; */
 
        return 0;
 }
 
+static int siimage_dma_test_irq(ide_drive_t *drive)
+{
+       if (drive->hwif->mmio)
+               return siimage_mmio_dma_test_irq(drive);
+       else
+               return siimage_io_dma_test_irq(drive);
+}
+
 /**
  *     sil_sata_reset_poll     -       wait for SATA reset
  *     @drive: drive we are resetting
@@ -415,63 +443,33 @@ static void sil_sata_pre_reset(ide_drive_t *drive)
 }
 
 /**
- *     proc_reports_siimage            -       add siimage controller to proc
- *     @dev: PCI device
- *     @clocking: SCSC value
- *     @name: controller name
- *
- *     Report the clocking mode of the controller and add it to
- *     the /proc interface layer
- */
-static void proc_reports_siimage (struct pci_dev *dev, u8 clocking, const char *name)
-{
-       if (!pdev_is_sata(dev)) {
-               printk(KERN_INFO "%s: BASE CLOCK ", name);
-               clocking &= 0x03;
-               switch (clocking) {
-                       case 0x03: printk("DISABLED!\n"); break;
-                       case 0x02: printk("== 2X PCI\n"); break;
-                       case 0x01: printk("== 133\n"); break;
-                       case 0x00: printk("== 100\n"); break;
-               }
-       }
-}
-
-/**
- *     setup_mmio_siimage      -       switch an SI controller into MMIO
+ *     setup_mmio_siimage      -       switch controller into MMIO mode
  *     @dev: PCI device we are configuring
  *     @name: device name
  *
- *     Attempt to put the device into mmio mode. There are some slight
- *     complications here with certain systems where the mmio bar isnt
- *     mapped so we have to be sure we can fall back to I/O.
+ *     Attempt to put the device into MMIO mode. There are some slight
+ *     complications here with certain systems where the MMIO BAR isn't
+ *     mapped, so we have to be sure that we can fall back to I/O.
  */
-static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
+
+static unsigned int setup_mmio_siimage(struct pci_dev *dev, const char *name)
 {
        resource_size_t bar5    = pci_resource_start(dev, 5);
        unsigned long barsize   = pci_resource_len(dev, 5);
-       u8 tmpbyte      = 0;
        void __iomem *ioaddr;
-       u32 tmp, irq_mask;
 
        /*
-        *      Drop back to PIO if we can't map the mmio. Some
-        *      systems seem to get terminally confused in the PCI
-        *      spaces.
+        *      Drop back to PIO if we can't map the MMIO. Some systems
+        *      seem to get terminally confused in the PCI spaces.
         */
-        
-       if(!request_mem_region(bar5, barsize, name))
-       {
-               printk(KERN_WARNING "siimage: IDE controller MMIO ports not available.\n");
+       if (!request_mem_region(bar5, barsize, name)) {
+               printk(KERN_WARNING "siimage: IDE controller MMIO ports not "
+                                   "available.\n");
                return 0;
        }
-               
-       ioaddr = ioremap(bar5, barsize);
 
-       if (ioaddr == NULL)
-       {
+       ioaddr = ioremap(bar5, barsize);
+       if (ioaddr == NULL) {
                release_mem_region(bar5, barsize);
                return 0;
        }
@@ -479,62 +477,6 @@ static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
        pci_set_master(dev);
        pci_set_drvdata(dev, (void *) ioaddr);
 
-       if (pdev_is_sata(dev)) {
-               /* make sure IDE0/1 interrupts are not masked */
-               irq_mask = (1 << 22) | (1 << 23);
-               tmp = readl(ioaddr + 0x48);
-               if (tmp & irq_mask) {
-                       tmp &= ~irq_mask;
-                       writel(tmp, ioaddr + 0x48);
-                       readl(ioaddr + 0x48); /* flush */
-               }
-               writel(0, ioaddr + 0x148);
-               writel(0, ioaddr + 0x1C8);
-       }
-
-       writeb(0, ioaddr + 0xB4);
-       writeb(0, ioaddr + 0xF4);
-       tmpbyte = readb(ioaddr + 0x4A);
-
-       switch(tmpbyte & 0x30) {
-               case 0x00:
-                       /* In 100 MHz clocking, try and switch to 133 */
-                       writeb(tmpbyte|0x10, ioaddr + 0x4A);
-                       break;
-               case 0x10:
-                       /* On 133Mhz clocking */
-                       break;
-               case 0x20:
-                       /* On PCIx2 clocking */
-                       break;
-               case 0x30:
-                       /* Clocking is disabled */
-                       /* 133 clock attempt to force it on */
-                       writeb(tmpbyte & ~0x20, ioaddr + 0x4A);
-                       break;
-       }
-       
-       writeb(      0x72, ioaddr + 0xA1);
-       writew(    0x328A, ioaddr + 0xA2);
-       writel(0x62DD62DD, ioaddr + 0xA4);
-       writel(0x43924392, ioaddr + 0xA8);
-       writel(0x40094009, ioaddr + 0xAC);
-       writeb(      0x72, ioaddr + 0xE1);
-       writew(    0x328A, ioaddr + 0xE2);
-       writel(0x62DD62DD, ioaddr + 0xE4);
-       writel(0x43924392, ioaddr + 0xE8);
-       writel(0x40094009, ioaddr + 0xEC);
-
-       if (pdev_is_sata(dev)) {
-               writel(0xFFFF0000, ioaddr + 0x108);
-               writel(0xFFFF0000, ioaddr + 0x188);
-               writel(0x00680000, ioaddr + 0x148);
-               writel(0x00680000, ioaddr + 0x1C8);
-       }
-
-       tmpbyte = readb(ioaddr + 0x4A);
-
-       proc_reports_siimage(dev, (tmpbyte>>4), name);
        return 1;
 }
 
@@ -544,55 +486,92 @@ static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
  *     @name: device name
  *
  *     Perform the initial PCI set up for this device. Attempt to switch
- *     to 133MHz clocking if the system isn't already set up to do it.
+ *     to 133 MHz clocking if the system isn't already set up to do it.
  */
 
-static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev, const char *name)
+static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev,
+                                                  const char *name)
 {
-       u8 rev = dev->revision, tmpbyte = 0, BA5_EN = 0;
+       unsigned long base, scsc_addr;
+       void __iomem *ioaddr = NULL;
+       u8 rev = dev->revision, tmp, BA5_EN;
 
        pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, rev ? 1 : 255);
 
        pci_read_config_byte(dev, 0x8A, &BA5_EN);
-       if ((BA5_EN & 0x01) || (pci_resource_start(dev, 5))) {
-               if (setup_mmio_siimage(dev, name)) {
-                       return 0;
+
+       if ((BA5_EN & 0x01) || pci_resource_start(dev, 5))
+               if (setup_mmio_siimage(dev, name))
+                       ioaddr = pci_get_drvdata(dev);
+
+       base = (unsigned long)ioaddr;
+
+       if (ioaddr && pdev_is_sata(dev)) {
+               u32 tmp32, irq_mask;
+
+               /* make sure IDE0/1 interrupts are not masked */
+               irq_mask = (1 << 22) | (1 << 23);
+               tmp32 = readl(ioaddr + 0x48);
+               if (tmp32 & irq_mask) {
+                       tmp32 &= ~irq_mask;
+                       writel(tmp32, ioaddr + 0x48);
+                       readl(ioaddr + 0x48); /* flush */
                }
+               writel(0, ioaddr + 0x148);
+               writel(0, ioaddr + 0x1C8);
        }
 
-       pci_write_config_byte(dev, 0x80, 0x00);
-       pci_write_config_byte(dev, 0x84, 0x00);
-       pci_read_config_byte(dev, 0x8A, &tmpbyte);
-       switch(tmpbyte & 0x30) {
-               case 0x00:
-                       /* 133 clock attempt to force it on */
-                       pci_write_config_byte(dev, 0x8A, tmpbyte|0x10);
-               case 0x30:
-                       /* if clocking is disabled */
-                       /* 133 clock attempt to force it on */
-                       pci_write_config_byte(dev, 0x8A, tmpbyte & ~0x20);
-               case 0x10:
-                       /* 133 already */
-                       break;
-               case 0x20:
-                       /* BIOS set PCI x2 clocking */
-                       break;
+       sil_iowrite8(dev, 0, base ? (base + 0xB4) : 0x80);
+       sil_iowrite8(dev, 0, base ? (base + 0xF4) : 0x84);
+
+       scsc_addr = base ? (base + 0x4A) : 0x8A;
+       tmp = sil_ioread8(dev, scsc_addr);
+
+       switch (tmp & 0x30) {
+       case 0x00:
+               /* On 100 MHz clocking, try and switch to 133 MHz */
+               sil_iowrite8(dev, tmp | 0x10, scsc_addr);
+               break;
+       case 0x30:
+               /* Clocking is disabled, attempt to force 133MHz clocking. */
+               sil_iowrite8(dev, tmp & ~0x20, scsc_addr);
+       case 0x10:
+               /* On 133Mhz clocking. */
+               break;
+       case 0x20:
+               /* On PCIx2 clocking. */
+               break;
        }
 
-       pci_read_config_byte(dev,   0x8A, &tmpbyte);
+       tmp = sil_ioread8(dev, scsc_addr);
 
-       pci_write_config_byte(dev,  0xA1, 0x72);
-       pci_write_config_word(dev,  0xA2, 0x328A);
-       pci_write_config_dword(dev, 0xA4, 0x62DD62DD);
-       pci_write_config_dword(dev, 0xA8, 0x43924392);
-       pci_write_config_dword(dev, 0xAC, 0x40094009);
-       pci_write_config_byte(dev,  0xB1, 0x72);
-       pci_write_config_word(dev,  0xB2, 0x328A);
-       pci_write_config_dword(dev, 0xB4, 0x62DD62DD);
-       pci_write_config_dword(dev, 0xB8, 0x43924392);
-       pci_write_config_dword(dev, 0xBC, 0x40094009);
+       sil_iowrite8 (dev,       0x72, base + 0xA1);
+       sil_iowrite16(dev,     0x328A, base + 0xA2);
+       sil_iowrite32(dev, 0x62DD62DD, base + 0xA4);
+       sil_iowrite32(dev, 0x43924392, base + 0xA8);
+       sil_iowrite32(dev, 0x40094009, base + 0xAC);
+       sil_iowrite8 (dev,       0x72, base ? (base + 0xE1) : 0xB1);
+       sil_iowrite16(dev,     0x328A, base ? (base + 0xE2) : 0xB2);
+       sil_iowrite32(dev, 0x62DD62DD, base ? (base + 0xE4) : 0xB4);
+       sil_iowrite32(dev, 0x43924392, base ? (base + 0xE8) : 0xB8);
+       sil_iowrite32(dev, 0x40094009, base ? (base + 0xEC) : 0xBC);
+
+       if (base && pdev_is_sata(dev)) {
+               writel(0xFFFF0000, ioaddr + 0x108);
+               writel(0xFFFF0000, ioaddr + 0x188);
+               writel(0x00680000, ioaddr + 0x148);
+               writel(0x00680000, ioaddr + 0x1C8);
+       }
+
+       /* report the clocking mode of the controller */
+       if (!pdev_is_sata(dev)) {
+               static const char *clk_str[] =
+                       { "== 100", "== 133", "== 2X PCI", "DISABLED!" };
+
+               tmp >>= 4;
+               printk(KERN_INFO "%s: BASE CLOCK %s\n", name, clk_str[tmp & 3]);
+       }
 
-       proc_reports_siimage(dev, (tmpbyte>>4), name);
        return 0;
 }
 
@@ -602,8 +581,7 @@ static unsigned int __devinit init_chipset_siimage(struct pci_dev *dev, const ch
  *
  *     The basic setup here is fairly simple, we can use standard MMIO
  *     operations. However we do have to set the taskfile register offsets
- *     by hand as there isnt a standard defined layout for them this
- *     time.
+ *     by hand as there isn't a standard defined layout for them this time.
  *
  *     The hardware supports buffered taskfiles and also some rather nice
  *     extended PRD tables. For better SI3112 support use the libata driver
@@ -614,23 +592,21 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
        void *addr              = pci_get_drvdata(dev);
        u8 ch                   = hwif->channel;
-       hw_regs_t               hw;
-       unsigned long           base;
+       struct ide_io_ports *io_ports = &hwif->io_ports;
+       unsigned long base;
 
        /*
-        *      Fill in the basic HWIF bits
+        *      Fill in the basic hwif bits
         */
-
+       hwif->host_flags |= IDE_HFLAG_MMIO;
        default_hwif_mmiops(hwif);
-       hwif->hwif_data                 = addr;
+       hwif->hwif_data = addr;
 
        /*
-        *      Now set up the hw. We have to do this ourselves as
-        *      the MMIO layout isnt the same as the standard port
-        *      based I/O
+        *      Now set up the hw. We have to do this ourselves as the
+        *      MMIO layout isn't the same as the standard port based I/O.
         */
-
-       memset(&hw, 0, sizeof(hw_regs_t));
+       memset(io_ports, 0, sizeof(*io_ports));
 
        base = (unsigned long)addr;
        if (ch)
@@ -639,21 +615,18 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
                base += 0x80;
 
        /*
-        *      The buffered task file doesn't have status/control
-        *      so we can't currently use it sanely since we want to
-        *      use LBA48 mode.
-        */     
-       hw.io_ports[IDE_DATA_OFFSET]    = base;
-       hw.io_ports[IDE_ERROR_OFFSET]   = base + 1;
-       hw.io_ports[IDE_NSECTOR_OFFSET] = base + 2;
-       hw.io_ports[IDE_SECTOR_OFFSET]  = base + 3;
-       hw.io_ports[IDE_LCYL_OFFSET]    = base + 4;
-       hw.io_ports[IDE_HCYL_OFFSET]    = base + 5;
-       hw.io_ports[IDE_SELECT_OFFSET]  = base + 6;
-       hw.io_ports[IDE_STATUS_OFFSET]  = base + 7;
-       hw.io_ports[IDE_CONTROL_OFFSET] = base + 10;
-
-       hw.io_ports[IDE_IRQ_OFFSET]     = 0;
+        *      The buffered task file doesn't have status/control, so we
+        *      can't currently use it sanely since we want to use LBA48 mode.
+        */
+       io_ports->data_addr     = base;
+       io_ports->error_addr    = base + 1;
+       io_ports->nsect_addr    = base + 2;
+       io_ports->lbal_addr     = base + 3;
+       io_ports->lbam_addr     = base + 4;
+       io_ports->lbah_addr     = base + 5;
+       io_ports->device_addr   = base + 6;
+       io_ports->status_addr   = base + 7;
+       io_ports->ctl_addr      = base + 10;
 
        if (pdev_is_sata(dev)) {
                base = (unsigned long)addr;
@@ -664,8 +637,6 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
                hwif->sata_scr[SATA_CONTROL_OFFSET]     = base + 0x100;
        }
 
-       memcpy(hwif->io_ports, hw.io_ports, sizeof(hwif->io_ports));
-
        hwif->irq = dev->irq;
 
        hwif->dma_base = (unsigned long)addr + (ch ? 0x08 : 0x00);
@@ -675,19 +646,17 @@ static void __devinit init_mmio_iops_siimage(ide_hwif_t *hwif)
 
 static int is_dev_seagate_sata(ide_drive_t *drive)
 {
-       const char *s = &drive->id->model[0];
-       unsigned len;
-
-       len = strnlen(s, sizeof(drive->id->model));
+       const char *s   = &drive->id->model[0];
+       unsigned len    = strnlen(s, sizeof(drive->id->model));
 
-       if ((len > 4) && (!memcmp(s, "ST", 2))) {
+       if ((len > 4) && (!memcmp(s, "ST", 2)))
                if ((!memcmp(s + len - 2, "AS", 2)) ||
                    (!memcmp(s + len - 3, "ASL", 3))) {
                        printk(KERN_INFO "%s: applying pessimistic Seagate "
                                         "errata fix\n", drive->name);
                        return 1;
                }
-       }
+
        return 0;
 }
 
@@ -704,7 +673,7 @@ static void __devinit sil_quirkproc(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
 
-       /* Try and raise the rqsize */
+       /* Try and rise the rqsize */
        if (!is_sata(hwif) || !is_dev_seagate_sata(drive))
                hwif->rqsize = 128;
 }
@@ -735,102 +704,98 @@ static void __devinit init_iops_siimage(ide_hwif_t *hwif)
 }
 
 /**
- *     ata66_siimage   -       check for 80 pin cable
+ *     sil_cable_detect        -       cable detection
  *     @hwif: interface to check
  *
- *     Check for the presence of an ATA66 capable cable on the
- *     interface.
+ *     Check for the presence of an ATA66 capable cable on the interface.
  */
 
-static u8 __devinit ata66_siimage(ide_hwif_t *hwif)
+static u8 __devinit sil_cable_detect(ide_hwif_t *hwif)
 {
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-       unsigned long addr = siimage_selreg(hwif, 0);
-       u8 ata66 = 0;
-
-       if (pci_get_drvdata(dev) == NULL)
-               pci_read_config_byte(dev, addr, &ata66);
-       else
-               ata66 = hwif->INB(addr);
+       struct pci_dev *dev     = to_pci_dev(hwif->dev);
+       unsigned long addr      = siimage_selreg(hwif, 0);
+       u8 ata66                = sil_ioread8(dev, addr);
 
        return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
 
-/**
- *     init_hwif_siimage       -       set up hwif structs
- *     @hwif: interface to set up
- *
- *     We do the basic set up of the interface structure. The SIIMAGE
- *     requires several custom handlers so we override the default
- *     ide DMA handlers appropriately
- */
-
-static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
-{
-       u8 sata = is_sata(hwif);
-
-       hwif->set_pio_mode = &sil_set_pio_mode;
-       hwif->set_dma_mode = &sil_set_dma_mode;
-       hwif->quirkproc = &sil_quirkproc;
-
-       if (sata) {
-               static int first = 1;
-
-               hwif->reset_poll = &sil_sata_reset_poll;
-               hwif->pre_reset = &sil_sata_pre_reset;
-               hwif->udma_filter = &sil_sata_udma_filter;
-
-               if (first) {
-                       printk(KERN_INFO "siimage: For full SATA support you should use the libata sata_sil module.\n");
-                       first = 0;
-               }
-       } else
-               hwif->udma_filter = &sil_pata_udma_filter;
-
-       hwif->cable_detect = ata66_siimage;
-
-       if (hwif->dma_base == 0)
-               return;
+static const struct ide_port_ops sil_pata_port_ops = {
+       .set_pio_mode           = sil_set_pio_mode,
+       .set_dma_mode           = sil_set_dma_mode,
+       .quirkproc              = sil_quirkproc,
+       .udma_filter            = sil_pata_udma_filter,
+       .cable_detect           = sil_cable_detect,
+};
 
-       if (sata)
-               hwif->host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
+static const struct ide_port_ops sil_sata_port_ops = {
+       .set_pio_mode           = sil_set_pio_mode,
+       .set_dma_mode           = sil_set_dma_mode,
+       .reset_poll             = sil_sata_reset_poll,
+       .pre_reset              = sil_sata_pre_reset,
+       .quirkproc              = sil_quirkproc,
+       .udma_filter            = sil_sata_udma_filter,
+       .cable_detect           = sil_cable_detect,
+};
 
-       if (hwif->mmio) {
-               hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq;
-       } else {
-               hwif->ide_dma_test_irq = & siimage_io_ide_dma_test_irq;
-       }
-}
+static const struct ide_dma_ops sil_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = ide_dma_start,
+       .dma_end                = __ide_dma_end,
+       .dma_test_irq           = siimage_dma_test_irq,
+       .dma_timeout            = ide_dma_timeout,
+       .dma_lost_irq           = ide_dma_lost_irq,
+};
 
-#define DECLARE_SII_DEV(name_str)                      \
+#define DECLARE_SII_DEV(name_str, p_ops)               \
        {                                               \
                .name           = name_str,             \
                .init_chipset   = init_chipset_siimage, \
                .init_iops      = init_iops_siimage,    \
-               .init_hwif      = init_hwif_siimage,    \
+               .port_ops       = p_ops,                \
+               .dma_ops        = &sil_dma_ops,         \
                .pio_mask       = ATA_PIO4,             \
                .mwdma_mask     = ATA_MWDMA2,           \
                .udma_mask      = ATA_UDMA6,            \
        }
 
 static const struct ide_port_info siimage_chipsets[] __devinitdata = {
-       /* 0 */ DECLARE_SII_DEV("SiI680"),
-       /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA"),
-       /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA")
+       /* 0 */ DECLARE_SII_DEV("SiI680",               &sil_pata_port_ops),
+       /* 1 */ DECLARE_SII_DEV("SiI3112 Serial ATA",   &sil_sata_port_ops),
+       /* 2 */ DECLARE_SII_DEV("Adaptec AAR-1210SA",   &sil_sata_port_ops)
 };
 
 /**
- *     siimage_init_one        -       pci layer discovery entry
+ *     siimage_init_one        -       PCI layer discovery entry
  *     @dev: PCI device
  *     @id: ident table entry
  *
- *     Called by the PCI code when it finds an SI680 or SI3112 controller.
+ *     Called by the PCI code when it finds an SiI680 or SiI3112 controller.
  *     We then use the IDE PCI generic helper to do most of the work.
  */
-static int __devinit siimage_init_one(struct pci_dev *dev, const struct pci_device_id *id)
+
+static int __devinit siimage_init_one(struct pci_dev *dev,
+                                     const struct pci_device_id *id)
 {
-       return ide_setup_pci_device(dev, &siimage_chipsets[id->driver_data]);
+       struct ide_port_info d;
+       u8 idx = id->driver_data;
+
+       d = siimage_chipsets[idx];
+
+       if (idx) {
+               static int first = 1;
+
+               if (first) {
+                       printk(KERN_INFO "siimage: For full SATA support you "
+                               "should use the libata sata_sil module.\n");
+                       first = 0;
+               }
+
+               d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA;
+       }
+
+       return ide_setup_pci_device(dev, &d);
 }
 
 static const struct pci_device_id siimage_pci_tbl[] = {
index 181b647e5ca9625d4d91d9875d46894a2a249e1c..4b0b85d8faf5ae2cba38878d7db4a3ded5504aa7 100644 (file)
@@ -347,7 +347,7 @@ static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
                sis_program_timings(drive, speed);
 }
 
-static u8 sis5513_ata133_udma_filter(ide_drive_t *drive)
+static u8 sis_ata133_udma_filter(ide_drive_t *drive)
 {
        struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
        u32 regdw = 0;
@@ -514,7 +514,7 @@ static const struct sis_laptop sis_laptop[] = {
        { 0, }
 };
 
-static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
+static u8 __devinit sis_cable_detect(ide_hwif_t *hwif)
 {
        struct pci_dev *pdev = to_pci_dev(hwif->dev);
        const struct sis_laptop *lap = &sis_laptop[0];
@@ -543,21 +543,22 @@ static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
        return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
 }
 
-static void __devinit init_hwif_sis5513(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &sis_set_pio_mode;
-       hwif->set_dma_mode = &sis_set_dma_mode;
-
-       if (chipset_family >= ATA_133)
-               hwif->udma_filter = sis5513_ata133_udma_filter;
+static const struct ide_port_ops sis_port_ops = {
+       .set_pio_mode           = sis_set_pio_mode,
+       .set_dma_mode           = sis_set_dma_mode,
+       .cable_detect           = sis_cable_detect,
+};
 
-       hwif->cable_detect = ata66_sis5513;
-}
+static const struct ide_port_ops sis_ata133_port_ops = {
+       .set_pio_mode           = sis_set_pio_mode,
+       .set_dma_mode           = sis_set_dma_mode,
+       .udma_filter            = sis_ata133_udma_filter,
+       .cable_detect           = sis_cable_detect,
+};
 
 static const struct ide_port_info sis5513_chipset __devinitdata = {
        .name           = "SIS5513",
        .init_chipset   = init_chipset_sis5513,
-       .init_hwif      = init_hwif_sis5513,
        .enablebits     = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
        .host_flags     = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA,
        .pio_mask       = ATA_PIO4,
@@ -572,6 +573,11 @@ static int __devinit sis5513_init_one(struct pci_dev *dev, const struct pci_devi
        if (sis_find_family(dev) == 0)
                return -ENOTSUPP;
 
+       if (chipset_family >= ATA_133)
+               d.port_ops = &sis_ata133_port_ops;
+       else
+               d.port_ops = &sis_port_ops;
+
        d.udma_mask = udma_rates[chipset_family];
 
        return ide_setup_pci_device(dev, &d);
index 40b3eeb2d8479c6486d043fc3d1ec53849bbafe2..ce84fa045d39186a05b08f41a643a00428ff4407 100644 (file)
@@ -179,7 +179,7 @@ static void sl82c105_dma_start(ide_drive_t *drive)
        struct pci_dev *dev     = to_pci_dev(hwif->dev);
        int reg                 = 0x44 + drive->dn * 4;
 
-       DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
+       DBG(("%s(drive:%s)\n", __func__, drive->name));
 
        pci_write_config_word(dev, reg, drive->drive_data >> 16);
 
@@ -203,7 +203,7 @@ static int sl82c105_dma_end(ide_drive_t *drive)
        int reg                 = 0x44 + drive->dn * 4;
        int ret;
 
-       DBG(("%s(drive:%s)\n", __FUNCTION__, drive->name));
+       DBG(("%s(drive:%s)\n", __func__, drive->name));
 
        ret = __ide_dma_end(drive);
 
@@ -232,7 +232,7 @@ static void sl82c105_resetproc(ide_drive_t *drive)
  * Return the revision of the Winbond bridge
  * which this function is part of.
  */
-static unsigned int sl82c105_bridge_revision(struct pci_dev *dev)
+static u8 sl82c105_bridge_revision(struct pci_dev *dev)
 {
        struct pci_dev *bridge;
 
@@ -282,63 +282,59 @@ static unsigned int __devinit init_chipset_sl82c105(struct pci_dev *dev, const c
        return dev->irq;
 }
 
-/*
- * Initialise IDE channel
- */
-static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
-{
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-       unsigned int rev;
-
-       DBG(("init_hwif_sl82c105(hwif: ide%d)\n", hwif->index));
-
-       hwif->set_pio_mode      = &sl82c105_set_pio_mode;
-       hwif->set_dma_mode      = &sl82c105_set_dma_mode;
-       hwif->resetproc         = &sl82c105_resetproc;
-
-       if (!hwif->dma_base)
-               return;
-
-       rev = sl82c105_bridge_revision(dev);
-       if (rev <= 5) {
-               /*
-                * Never ever EVER under any circumstances enable
-                * DMA when the bridge is this old.
-                */
-               printk("    %s: Winbond W83C553 bridge revision %d, "
-                      "BM-DMA disabled\n", hwif->name, rev);
-               return;
-       }
-
-       hwif->mwdma_mask = ATA_MWDMA2;
-
-       hwif->dma_lost_irq              = &sl82c105_dma_lost_irq;
-       hwif->dma_start                 = &sl82c105_dma_start;
-       hwif->ide_dma_end               = &sl82c105_dma_end;
-       hwif->dma_timeout               = &sl82c105_dma_timeout;
+static const struct ide_port_ops sl82c105_port_ops = {
+       .set_pio_mode           = sl82c105_set_pio_mode,
+       .set_dma_mode           = sl82c105_set_dma_mode,
+       .resetproc              = sl82c105_resetproc,
+};
 
-       if (hwif->mate)
-               hwif->serialized = hwif->mate->serialized = 1;
-}
+static const struct ide_dma_ops sl82c105_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = sl82c105_dma_start,
+       .dma_end                = sl82c105_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = sl82c105_dma_lost_irq,
+       .dma_timeout            = sl82c105_dma_timeout,
+};
 
 static const struct ide_port_info sl82c105_chipset __devinitdata = {
        .name           = "W82C105",
        .init_chipset   = init_chipset_sl82c105,
-       .init_hwif      = init_hwif_sl82c105,
        .enablebits     = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
+       .port_ops       = &sl82c105_port_ops,
+       .dma_ops        = &sl82c105_dma_ops,
        .host_flags     = IDE_HFLAG_IO_32BIT |
                          IDE_HFLAG_UNMASK_IRQS |
 /* FIXME: check for Compatibility mode in generic IDE PCI code */
 #if defined(CONFIG_LOPEC) || defined(CONFIG_SANDPOINT)
                          IDE_HFLAG_FORCE_LEGACY_IRQS |
 #endif
+                         IDE_HFLAG_SERIALIZE_DMA |
                          IDE_HFLAG_NO_AUTODMA,
        .pio_mask       = ATA_PIO5,
+       .mwdma_mask     = ATA_MWDMA2,
 };
 
 static int __devinit sl82c105_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
-       return ide_setup_pci_device(dev, &sl82c105_chipset);
+       struct ide_port_info d = sl82c105_chipset;
+       u8 rev = sl82c105_bridge_revision(dev);
+
+       if (rev <= 5) {
+               /*
+                * Never ever EVER under any circumstances enable
+                * DMA when the bridge is this old.
+                */
+               printk(KERN_INFO "W82C105_IDE: Winbond W83C553 bridge "
+                                "revision %d, BM-DMA disabled\n", rev);
+               d.dma_ops = NULL;
+               d.mwdma_mask = 0;
+               d.host_flags &= ~IDE_HFLAG_SERIALIZE_DMA;
+       }
+
+       return ide_setup_pci_device(dev, &d);
 }
 
 static const struct pci_device_id sl82c105_pci_tbl[] = {
index eab557c45d1b7d5403c219c0f9ff6baed6800e12..dae6e2c94d865dbdef0d8df0710ccdfe90854f53 100644 (file)
@@ -125,18 +125,16 @@ static u8 __devinit slc90e66_cable_detect(ide_hwif_t *hwif)
        return (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
 }
 
-static void __devinit init_hwif_slc90e66(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &slc90e66_set_pio_mode;
-       hwif->set_dma_mode = &slc90e66_set_dma_mode;
-
-       hwif->cable_detect = slc90e66_cable_detect;
-}
+static const struct ide_port_ops slc90e66_port_ops = {
+       .set_pio_mode           = slc90e66_set_pio_mode,
+       .set_dma_mode           = slc90e66_set_dma_mode,
+       .cable_detect           = slc90e66_cable_detect,
+};
 
 static const struct ide_port_info slc90e66_chipset __devinitdata = {
        .name           = "SLC90E66",
-       .init_hwif      = init_hwif_slc90e66,
        .enablebits     = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
+       .port_ops       = &slc90e66_port_ops,
        .host_flags     = IDE_HFLAG_LEGACY_IRQS,
        .pio_mask       = ATA_PIO4,
        .swdma_mask     = ATA_SWDMA2_ONLY,
index c15435182e3c33273805ce3ba7cb307a6ad873f2..9b4b27a4c71121c30298052a30c653f5ccffee05 100644 (file)
@@ -157,11 +157,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
        /* Store the system control register base for convenience... */
        hwif->config_data = sc_base;
 
-       hwif->set_pio_mode = &tc86c001_set_pio_mode;
-       hwif->set_dma_mode = &tc86c001_set_mode;
-
-       hwif->cable_detect = tc86c001_cable_detect;
-
        if (!hwif->dma_base)
                return;
 
@@ -173,8 +168,6 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
 
        /* Sector Count Register limit */
        hwif->rqsize     = 0xffff;
-
-       hwif->dma_start         = &tc86c001_dma_start;
 }
 
 static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
@@ -187,10 +180,29 @@ static unsigned int __devinit init_chipset_tc86c001(struct pci_dev *dev,
        return err;
 }
 
+static const struct ide_port_ops tc86c001_port_ops = {
+       .set_pio_mode           = tc86c001_set_pio_mode,
+       .set_dma_mode           = tc86c001_set_mode,
+       .cable_detect           = tc86c001_cable_detect,
+};
+
+static const struct ide_dma_ops tc86c001_dma_ops = {
+       .dma_host_set           = ide_dma_host_set,
+       .dma_setup              = ide_dma_setup,
+       .dma_exec_cmd           = ide_dma_exec_cmd,
+       .dma_start              = tc86c001_dma_start,
+       .dma_end                = __ide_dma_end,
+       .dma_test_irq           = ide_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
+
 static const struct ide_port_info tc86c001_chipset __devinitdata = {
        .name           = "TC86C001",
        .init_chipset   = init_chipset_tc86c001,
        .init_hwif      = init_hwif_tc86c001,
+       .port_ops       = &tc86c001_port_ops,
+       .dma_ops        = &tc86c001_dma_ops,
        .host_flags     = IDE_HFLAG_SINGLE | IDE_HFLAG_OFF_BOARD |
                          IDE_HFLAG_ABUSE_SET_DMA_MODE,
        .pio_mask       = ATA_PIO4,
index 3316b197c779d366093011753c2e32c3f5998b32..db65a558d4eca62b26bff026d5c6fa3682d69555 100644 (file)
@@ -87,16 +87,15 @@ static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio)
        triflex_set_mode(drive, XFER_PIO_0 + pio);
 }
 
-static void __devinit init_hwif_triflex(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &triflex_set_pio_mode;
-       hwif->set_dma_mode = &triflex_set_mode;
-}
+static const struct ide_port_ops triflex_port_ops = {
+       .set_pio_mode           = triflex_set_pio_mode,
+       .set_dma_mode           = triflex_set_mode,
+};
 
 static const struct ide_port_info triflex_device __devinitdata = {
        .name           = "TRIFLEX",
-       .init_hwif      = init_hwif_triflex,
        .enablebits     = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
+       .port_ops       = &triflex_port_ops,
        .pio_mask       = ATA_PIO4,
        .swdma_mask     = ATA_SWDMA2,
        .mwdma_mask     = ATA_MWDMA2,
index 2b8f3a2837d7a6afebcb0a3c5ad05bc37ad7db1d..a8a3138682eff4841ac9784a4cce9d46f3ba1a89 100644 (file)
@@ -214,7 +214,7 @@ static void trm290_dma_start(ide_drive_t *drive)
 {
 }
 
-static int trm290_ide_dma_end (ide_drive_t *drive)
+static int trm290_dma_end(ide_drive_t *drive)
 {
        u16 status;
 
@@ -225,7 +225,7 @@ static int trm290_ide_dma_end (ide_drive_t *drive)
        return status != 0x00ff;
 }
 
-static int trm290_ide_dma_test_irq (ide_drive_t *drive)
+static int trm290_dma_test_irq(ide_drive_t *drive)
 {
        u16 status;
 
@@ -254,22 +254,11 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
        hwif->config_data = cfg_base;
        hwif->dma_base = (cfg_base + 4) ^ (hwif->channel ? 0x80 : 0);
 
-       printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx",
+       printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
               hwif->name, hwif->dma_base, hwif->dma_base + 3);
 
-       if (!request_region(hwif->dma_base, 4, hwif->name)) {
-               printk(KERN_CONT " -- Error, ports in use.\n");
+       if (ide_allocate_dma_engine(hwif))
                return;
-       }
-
-       hwif->dmatable_cpu = pci_alloc_consistent(dev, PRD_ENTRIES * PRD_BYTES,
-                                                 &hwif->dmatable_dma);
-       if (!hwif->dmatable_cpu) {
-               printk(KERN_CONT " -- Error, unable to allocate DMA table.\n");
-               release_region(hwif->dma_base, 4);
-               return;
-       }
-       printk(KERN_CONT "\n");
 
        local_irq_save(flags);
        /* put config reg into first byte of hwif->select_data */
@@ -291,14 +280,6 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
                /* sharing IRQ with mate */
                hwif->irq = hwif->mate->irq;
 
-       hwif->dma_host_set      = &trm290_dma_host_set;
-       hwif->dma_setup         = &trm290_dma_setup;
-       hwif->dma_exec_cmd      = &trm290_dma_exec_cmd;
-       hwif->dma_start         = &trm290_dma_start;
-       hwif->ide_dma_end       = &trm290_ide_dma_end;
-       hwif->ide_dma_test_irq  = &trm290_ide_dma_test_irq;
-
-       hwif->selectproc = &trm290_selectproc;
 #if 1
        {
        /*
@@ -317,7 +298,7 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
                if (old != compat && old_mask == 0xff) {
                        /* leave lower 10 bits untouched */
                        compat += (next_offset += 0x400);
-                       hwif->io_ports[IDE_CONTROL_OFFSET] = compat + 2;
+                       hwif->io_ports.ctl_addr = compat + 2;
                        outw(compat | 1, hwif->config_data);
                        new = inw(hwif->config_data);
                        printk(KERN_INFO "%s: control basereg workaround: "
@@ -328,10 +309,27 @@ static void __devinit init_hwif_trm290(ide_hwif_t *hwif)
 #endif
 }
 
+static const struct ide_port_ops trm290_port_ops = {
+       .selectproc             = trm290_selectproc,
+};
+
+static struct ide_dma_ops trm290_dma_ops = {
+       .dma_host_set           = trm290_dma_host_set,
+       .dma_setup              = trm290_dma_setup,
+       .dma_exec_cmd           = trm290_dma_exec_cmd,
+       .dma_start              = trm290_dma_start,
+       .dma_end                = trm290_dma_end,
+       .dma_test_irq           = trm290_dma_test_irq,
+       .dma_lost_irq           = ide_dma_lost_irq,
+       .dma_timeout            = ide_dma_timeout,
+};
+
 static const struct ide_port_info trm290_chipset __devinitdata = {
        .name           = "TRM290",
        .init_hwif      = init_hwif_trm290,
        .chipset        = ide_trm290,
+       .port_ops       = &trm290_port_ops,
+       .dma_ops        = &trm290_dma_ops,
        .host_flags     = IDE_HFLAG_NO_ATAPI_DMA |
 #if 0 /* play it safe for now */
                          IDE_HFLAG_TRUST_BIOS_FOR_DMA |
index cff3cafedc4740a1080f48f6b5c93bf1d7d57ec0..566e0ecb8db1251afe46065a7a371cdcea2f13b7 100644 (file)
@@ -340,7 +340,7 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
         * Determine system bus clock.
         */
 
-       via_clock = system_bus_clock() * 1000;
+       via_clock = (ide_pci_clk ? ide_pci_clk : system_bus_clock()) * 1000;
 
        switch (via_clock) {
                case 33000: via_clock = 33333; break;
@@ -415,19 +415,17 @@ static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif)
                return ATA_CBL_PATA40;
 }
 
-static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
-{
-       hwif->set_pio_mode = &via_set_pio_mode;
-       hwif->set_dma_mode = &via_set_drive;
-
-       hwif->cable_detect = via82cxxx_cable_detect;
-}
+static const struct ide_port_ops via_port_ops = {
+       .set_pio_mode           = via_set_pio_mode,
+       .set_dma_mode           = via_set_drive,
+       .cable_detect           = via82cxxx_cable_detect,
+};
 
 static const struct ide_port_info via82cxxx_chipset __devinitdata = {
        .name           = "VP_IDE",
        .init_chipset   = init_chipset_via82cxxx,
-       .init_hwif      = init_hwif_via82cxxx,
        .enablebits     = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
+       .port_ops       = &via_port_ops,
        .host_flags     = IDE_HFLAG_PIO_NO_BLACKLIST |
                          IDE_HFLAG_ABUSE_SET_DMA_MODE |
                          IDE_HFLAG_POST_SET_MODE |
index 467656f06ccc15b0a2bb17c81cadcfc7fa81ccca..f0e638dcc3ab5e7dd737abc8799d79179e202dda 100644 (file)
@@ -131,7 +131,7 @@ static int pcmcia_schlvl = PCMCIA_SCHLVL;
 #if defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_DIRECT)
 static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
 {
-       unsigned long *p = hw->io_ports;
+       unsigned long *p = hw->io_ports_array;
        int i;
 
        typedef struct {
@@ -314,7 +314,7 @@ static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
 #if defined(CONFIG_IDE_EXT_DIRECT)
 static int __init m8xx_ide_init_ports(hw_regs_t *hw, unsigned long data_port)
 {
-       unsigned long *p = hw->io_ports;
+       unsigned long *p = hw->io_ports_array;
        int i;
 
        u32 ide_phy_base;
@@ -438,10 +438,14 @@ static void m8xx_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
 #elif defined(CONFIG_IDE_EXT_DIRECT)
 
        printk("%s[%d] %s: not implemented yet!\n",
-               __FILE__,__LINE__,__FUNCTION__);
+               __FILE__, __LINE__, __func__);
 #endif /* defined(CONFIG_IDE_8xx_PCCARD) || defined(CONFIG_IDE_8xx_PCMCIA */
 }
 
+static const struct ide_port_ops m8xx_port_ops = {
+       .set_pio_mode           = m8xx_ide_set_pio_mode,
+};
+
 static void
 ide_interrupt_ack (void *dev)
 {
@@ -807,26 +811,28 @@ static int __init mpc8xx_ide_probe(void)
 #ifdef IDE0_BASE_OFFSET
        memset(&hw, 0, sizeof(hw));
        if (!m8xx_ide_init_ports(&hw, 0)) {
-               ide_hwif_t *hwif = &ide_hwifs[0];
+               ide_hwif_t *hwif = ide_find_port();
 
-               ide_init_port_hw(hwif, &hw);
-               hwif->mmio = 1;
-               hwif->pio_mask = ATA_PIO4;
-               hwif->set_pio_mode = m8xx_ide_set_pio_mode;
+               if (hwif) {
+                       ide_init_port_hw(hwif, &hw);
+                       hwif->pio_mask = ATA_PIO4;
+                       hwif->port_ops = &m8xx_port_ops;
 
-               idx[0] = 0;
+                       idx[0] = hwif->index;
+               }
        }
 #ifdef IDE1_BASE_OFFSET
        memset(&hw, 0, sizeof(hw));
        if (!m8xx_ide_init_ports(&hw, 1)) {
-               ide_hwif_t *mate = &ide_hwifs[1];
+               ide_hwif_t *mate = ide_find_port();
 
-               ide_init_port_hw(mate, &hw);
-               mate->mmio = 1;
-               mate->pio_mask = ATA_PIO4;
-               mate->set_pio_mode = m8xx_ide_set_pio_mode;
+               if (mate) {
+                       ide_init_port_hw(mate, &hw);
+                       mate->pio_mask = ATA_PIO4;
+                       mate->port_ops = &m8xx_port_ops;
 
-               idx[1] = 1;
+                       idx[1] = mate->index;
+               }
        }
 #endif
 #endif
index 177961edc430b890609f198c04a2da7bbdf83ac8..48aa019127bc9c35522741bb022381f8ec35ca03 100644 (file)
@@ -409,7 +409,7 @@ kauai_lookup_timing(struct kauai_timing* table, int cycle_time)
  */
 #define IDE_WAKEUP_DELAY       (1*HZ)
 
-static int pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif);
+static int pmac_ide_init_dma(ide_hwif_t *, const struct ide_port_info *);
 static int pmac_ide_build_dmatable(ide_drive_t *drive, struct request *rq);
 static void pmac_ide_selectproc(ide_drive_t *drive);
 static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
@@ -417,7 +417,7 @@ static void pmac_ide_kauai_selectproc(ide_drive_t *drive);
 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
 
 #define PMAC_IDE_REG(x) \
-       ((void __iomem *)((drive)->hwif->io_ports[IDE_DATA_OFFSET] + (x)))
+       ((void __iomem *)((drive)->hwif->io_ports.data_addr + (x)))
 
 /*
  * Apply the timings of the proper unit (master/slave) to the shared
@@ -918,11 +918,30 @@ pmac_ide_do_resume(ide_hwif_t *hwif)
        return 0;
 }
 
+static const struct ide_port_ops pmac_ide_ata6_port_ops = {
+       .set_pio_mode           = pmac_ide_set_pio_mode,
+       .set_dma_mode           = pmac_ide_set_dma_mode,
+       .selectproc             = pmac_ide_kauai_selectproc,
+};
+
+static const struct ide_port_ops pmac_ide_port_ops = {
+       .set_pio_mode           = pmac_ide_set_pio_mode,
+       .set_dma_mode           = pmac_ide_set_dma_mode,
+       .selectproc             = pmac_ide_selectproc,
+};
+
+static const struct ide_dma_ops pmac_dma_ops;
+
 static const struct ide_port_info pmac_port_info = {
+       .init_dma               = pmac_ide_init_dma,
        .chipset                = ide_pmac,
+#ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
+       .dma_ops                = &pmac_dma_ops,
+#endif
+       .port_ops               = &pmac_ide_port_ops,
        .host_flags             = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA |
                                  IDE_HFLAG_POST_SET_MODE |
-                                 IDE_HFLAG_NO_DMA | /* no SFF-style DMA */
+                                 IDE_HFLAG_MMIO |
                                  IDE_HFLAG_UNMASK_IRQS,
        .pio_mask               = ATA_PIO4,
        .mwdma_mask             = ATA_MWDMA2,
@@ -947,12 +966,15 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
        pmif->broken_dma = pmif->broken_dma_warn = 0;
        if (of_device_is_compatible(np, "shasta-ata")) {
                pmif->kind = controller_sh_ata6;
+               d.port_ops = &pmac_ide_ata6_port_ops;
                d.udma_mask = ATA_UDMA6;
        } else if (of_device_is_compatible(np, "kauai-ata")) {
                pmif->kind = controller_un_ata6;
+               d.port_ops = &pmac_ide_ata6_port_ops;
                d.udma_mask = ATA_UDMA5;
        } else if (of_device_is_compatible(np, "K2-UATA")) {
                pmif->kind = controller_k2_ata6;
+               d.port_ops = &pmac_ide_ata6_port_ops;
                d.udma_mask = ATA_UDMA5;
        } else if (of_device_is_compatible(np, "keylargo-ata")) {
                if (strcmp(np->name, "ata-4") == 0) {
@@ -1029,37 +1051,29 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif, hw_regs_t *hw)
        default_hwif_mmiops(hwif);
                hwif->OUTBSYNC = pmac_outbsync;
 
-       /* Tell common code _not_ to mess with resources */
-       hwif->mmio = 1;
        hwif->hwif_data = pmif;
        ide_init_port_hw(hwif, hw);
-       hwif->noprobe = pmif->mediabay;
        hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
-       hwif->set_pio_mode = pmac_ide_set_pio_mode;
-       if (pmif->kind == controller_un_ata6
-           || pmif->kind == controller_k2_ata6
-           || pmif->kind == controller_sh_ata6)
-               hwif->selectproc = pmac_ide_kauai_selectproc;
-       else
-               hwif->selectproc = pmac_ide_selectproc;
-       hwif->set_dma_mode = pmac_ide_set_dma_mode;
 
        printk(KERN_INFO "ide%d: Found Apple %s controller, bus ID %d%s, irq %d\n",
               hwif->index, model_name[pmif->kind], pmif->aapl_bus_id,
               pmif->mediabay ? " (mediabay)" : "", hwif->irq);
-                       
+
+       if (pmif->mediabay) {
 #ifdef CONFIG_PMAC_MEDIABAY
-       if (pmif->mediabay && check_media_bay_by_base(pmif->regbase, MB_CD) == 0)
-               hwif->noprobe = 0;
-#endif /* CONFIG_PMAC_MEDIABAY */
+               if (check_media_bay_by_base(pmif->regbase, MB_CD)) {
+#else
+               if (1) {
+#endif
+                       hwif->drives[0].noprobe = 1;
+                       hwif->drives[1].noprobe = 1;
+               }
+       }
 
 #ifdef CONFIG_BLK_DEV_IDEDMA_PMAC
        if (pmif->cable_80 == 0)
                d.udma_mask &= ATA_UDMA2;
-       /* has a DBDMA controller channel */
-       if (pmif->dma_regs == 0 || pmac_ide_setup_dma(pmif, hwif) < 0)
 #endif
-               d.udma_mask = d.mwdma_mask = 0;
 
        idx[0] = hwif->index;
 
@@ -1073,8 +1087,9 @@ static void __devinit pmac_ide_init_ports(hw_regs_t *hw, unsigned long base)
        int i;
 
        for (i = 0; i < 8; ++i)
-               hw->io_ports[i] = base + i * 0x10;
-       hw->io_ports[8] = base + 0x160;
+               hw->io_ports_array[i] = base + i * 0x10;
+
+       hw->io_ports.ctl_addr = base + 0x160;
 }
 
 /*
@@ -1662,18 +1677,31 @@ pmac_ide_dma_lost_irq (ide_drive_t *drive)
        printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
 }
 
+static const struct ide_dma_ops pmac_dma_ops = {
+       .dma_host_set           = pmac_ide_dma_host_set,
+       .dma_setup              = pmac_ide_dma_setup,
+       .dma_exec_cmd           = pmac_ide_dma_exec_cmd,
+       .dma_start              = pmac_ide_dma_start,
+       .dma_end                = pmac_ide_dma_end,
+       .dma_test_irq           = pmac_ide_dma_test_irq,
+       .dma_timeout            = ide_dma_timeout,
+       .dma_lost_irq           = pmac_ide_dma_lost_irq,
+};
+
 /*
  * Allocate the data structures needed for using DMA with an interface
  * and fill the proper list of functions pointers
  */
-static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
+static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
+                                      const struct ide_port_info *d)
 {
+       pmac_ide_hwif_t *pmif = (pmac_ide_hwif_t *)hwif->hwif_data;
        struct pci_dev *dev = to_pci_dev(hwif->dev);
 
        /* We won't need pci_dev if we switch to generic consistent
         * DMA routines ...
         */
-       if (dev == NULL)
+       if (dev == NULL || pmif->dma_regs == 0)
                return -ENODEV;
        /*
         * Allocate space for the DBDMA commands.
@@ -1692,18 +1720,14 @@ static int __devinit pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
 
        hwif->sg_max_nents = MAX_DCMDS;
 
-       hwif->dma_host_set = &pmac_ide_dma_host_set;
-       hwif->dma_setup = &pmac_ide_dma_setup;
-       hwif->dma_exec_cmd = &pmac_ide_dma_exec_cmd;
-       hwif->dma_start = &pmac_ide_dma_start;
-       hwif->ide_dma_end = &pmac_ide_dma_end;
-       hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
-       hwif->dma_timeout = &ide_dma_timeout;
-       hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
-
        return 0;
 }
-
+#else
+static int __devinit pmac_ide_init_dma(ide_hwif_t *hwif,
+                                      const struct ide_port_info *d)
+{
+       return -EOPNOTSUPP;
+}
 #endif /* CONFIG_BLK_DEV_IDEDMA_PMAC */
 
 module_init(pmac_ide_probe);
index 6302010fd8e232ab17ad71d56f2c4e615aebc9da..5171601fb2556646544587466a14b8e973389a15 100644 (file)
@@ -72,16 +72,16 @@ static void ide_pci_clear_simplex(unsigned long dma_base, const char *name)
 }
 
 /**
- *     ide_get_or_set_dma_base         -       setup BMIBA
- *     @d: IDE port info
+ *     ide_pci_dma_base        -       setup BMIBA
  *     @hwif: IDE interface
+ *     @d: IDE port info
  *
  *     Fetch the DMA Bus-Master-I/O-Base-Address (BMIBA) from PCI space.
  *     Where a device has a partner that is already in DMA mode we check
  *     and enforce IDE simplex rules.
  */
 
-static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_hwif_t *hwif)
+unsigned long ide_pci_dma_base(ide_hwif_t *hwif, const struct ide_port_info *d)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
        unsigned long dma_base = 0;
@@ -132,6 +132,31 @@ static unsigned long ide_get_or_set_dma_base(const struct ide_port_info *d, ide_
 out:
        return dma_base;
 }
+EXPORT_SYMBOL_GPL(ide_pci_dma_base);
+
+/*
+ * Set up BM-DMA capability (PnP BIOS should have done this)
+ */
+int ide_pci_set_master(struct pci_dev *dev, const char *name)
+{
+       u16 pcicmd;
+
+       pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
+
+       if ((pcicmd & PCI_COMMAND_MASTER) == 0) {
+               pci_set_master(dev);
+
+               if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) ||
+                   (pcicmd & PCI_COMMAND_MASTER) == 0) {
+                       printk(KERN_ERR "%s: error updating PCICMD on %s\n",
+                                       name, pci_name(dev));
+                       return -EIO;
+               }
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(ide_pci_set_master);
 #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
 
 void ide_setup_pci_noise(struct pci_dev *dev, const struct ide_port_info *d)
@@ -158,7 +183,7 @@ EXPORT_SYMBOL_GPL(ide_setup_pci_noise);
 
 static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
 {
-       int ret;
+       int ret, bars;
 
        if (pci_enable_device(dev)) {
                ret = pci_enable_device_io(dev);
@@ -181,13 +206,21 @@ static int ide_pci_enable(struct pci_dev *dev, const struct ide_port_info *d)
                goto out;
        }
 
-       /* FIXME: Temporary - until we put in the hotplug interface logic
-          Check that the bits we want are not in use by someone else. */
-       ret = pci_request_region(dev, 4, "ide_tmp");
-       if (ret < 0)
-               goto out;
+       if (d->host_flags & IDE_HFLAG_SINGLE)
+               bars = (1 << 2) - 1;
+       else
+               bars = (1 << 4) - 1;
 
-       pci_release_region(dev, 4);
+       if ((d->host_flags & IDE_HFLAG_NO_DMA) == 0) {
+               if (d->host_flags & IDE_HFLAG_CS5520)
+                       bars |= (1 << 2);
+               else
+                       bars |= (1 << 4);
+       }
+
+       ret = pci_request_selected_regions(dev, bars, d->name);
+       if (ret < 0)
+               printk(KERN_ERR "%s: can't reserve resources\n", d->name);
 out:
        return ret;
 }
@@ -314,7 +347,6 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
        ide_init_port_hw(hwif, &hw);
 
        hwif->dev = &dev->dev;
-       hwif->cds = d;
 
        return hwif;
 }
@@ -330,40 +362,33 @@ static ide_hwif_t *ide_hwif_configure(struct pci_dev *dev,
  *     state
  */
 
-void ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
+int ide_hwif_setup_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
 {
        struct pci_dev *dev = to_pci_dev(hwif->dev);
-       u16 pcicmd;
-
-       pci_read_config_word(dev, PCI_COMMAND, &pcicmd);
 
        if ((d->host_flags & IDE_HFLAG_NO_AUTODMA) == 0 ||
            ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE &&
             (dev->class & 0x80))) {
-               unsigned long dma_base = ide_get_or_set_dma_base(d, hwif);
-               if (dma_base && !(pcicmd & PCI_COMMAND_MASTER)) {
-                       /*
-                        * Set up BM-DMA capability
-                        * (PnP BIOS should have done this)
-                        */
-                       pci_set_master(dev);
-                       if (pci_read_config_word(dev, PCI_COMMAND, &pcicmd) || !(pcicmd & PCI_COMMAND_MASTER)) {
-                               printk(KERN_ERR "%s: %s error updating PCICMD\n",
-                                       hwif->name, d->name);
-                               dma_base = 0;
-                       }
-               }
-               if (dma_base) {
-                       if (d->init_dma) {
-                               d->init_dma(hwif, dma_base);
-                       } else {
-                               ide_setup_dma(hwif, dma_base);
-                       }
-               } else {
-                       printk(KERN_INFO "%s: %s Bus-Master DMA disabled "
-                               "(BIOS)\n", hwif->name, d->name);
-               }
+               unsigned long base = ide_pci_dma_base(hwif, d);
+
+               if (base == 0 || ide_pci_set_master(dev, d->name) < 0)
+                       return -1;
+
+               if (hwif->mmio)
+                       printk(KERN_INFO "    %s: MMIO-DMA\n", hwif->name);
+               else
+                       printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
+                                        hwif->name, base, base + 7);
+
+               hwif->extra_base = base + (hwif->channel ? 8 : 16);
+
+               if (ide_allocate_dma_engine(hwif))
+                       return -1;
+
+               ide_setup_dma(hwif, base);
        }
+
+       return 0;
 }
 #endif /* CONFIG_BLK_DEV_IDEDMA_PCI */
 
index 4e3128ff73c135ce988cdbb9d75fd58981faa26b..fe78f7d250991d120adcbf6c93c50b0ee38078c7 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/sched.h>
 #include <linux/hugetlb.h>
+#include <linux/dma-attrs.h>
 
 #include "uverbs.h"
 
@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
  * @addr: userspace virtual address to start at
  * @size: length of region to pin
  * @access: IB_ACCESS_xxx flags for memory being pinned
+ * @dmasync: flush in-flight DMA when the memory region is written
  */
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-                           size_t size, int access)
+                           size_t size, int access, int dmasync)
 {
        struct ib_umem *umem;
        struct page **page_list;
@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
        int ret;
        int off;
        int i;
+       DEFINE_DMA_ATTRS(attrs);
+
+       if (dmasync)
+               dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
 
        if (!can_do_mlock())
                return ERR_PTR(-EPERM);
@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
                                sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0);
                        }
 
-                       chunk->nmap = ib_dma_map_sg(context->device,
-                                                   &chunk->page_list[0],
-                                                   chunk->nents,
-                                                   DMA_BIDIRECTIONAL);
+                       chunk->nmap = ib_dma_map_sg_attrs(context->device,
+                                                         &chunk->page_list[0],
+                                                         chunk->nents,
+                                                         DMA_BIDIRECTIONAL,
+                                                         &attrs);
                        if (chunk->nmap <= 0) {
                                for (i = 0; i < chunk->nents; ++i)
                                        put_page(sg_page(&chunk->page_list[i]));
index 6af2c0f79a677ab889685e4b54d6ed53c580850d..2acf9b62cf9936acf72f25f2c15bc8bdee41493d 100644 (file)
@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                return ERR_PTR(-ENOMEM);
        c2mr->pd = c2pd;
 
-       c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+       c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
        if (IS_ERR(c2mr->umem)) {
                err = PTR_ERR(c2mr->umem);
                kfree(c2mr);
index 66eb7030aea83c1d01f89838621959d28cf191b6..ed2ee4ba4b7c3e9e50ed8d162fbc40b5482945ca 100644 (file)
@@ -456,7 +456,8 @@ void cxio_count_scqes(struct t3_cq *cq, struct t3_wq *wq, int *count)
        ptr = cq->sw_rptr;
        while (!Q_EMPTY(ptr, cq->sw_wptr)) {
                cqe = cq->sw_queue + (Q_PTR2IDX(ptr, cq->size_log2));
-               if ((SQ_TYPE(*cqe) || (CQE_OPCODE(*cqe) == T3_READ_RESP)) &&
+               if ((SQ_TYPE(*cqe) ||
+                    ((CQE_OPCODE(*cqe) == T3_READ_RESP) && wq->oldest_read)) &&
                    (CQE_QPID(*cqe) == wq->qpid))
                        (*count)++;
                ptr++;
@@ -829,7 +830,8 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
        wqe->mpaattrs = attr->mpaattrs;
        wqe->qpcaps = attr->qpcaps;
        wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
-       wqe->flags = cpu_to_be32(attr->flags);
+       wqe->rqe_count = cpu_to_be16(attr->rqe_count);
+       wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
        wqe->ord = cpu_to_be32(attr->ord);
        wqe->ird = cpu_to_be32(attr->ird);
        wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1134,6 +1136,18 @@ int cxio_poll_cq(struct t3_wq *wq, struct t3_cq *cq, struct t3_cqe *cqe,
         */
        if (RQ_TYPE(*hw_cqe) && (CQE_OPCODE(*hw_cqe) == T3_READ_RESP)) {
 
+               /*
+                * If this is an unsolicited read response, then the read
+                * was generated by the kernel driver as part of peer-2-peer
+                * connection setup.  So ignore the completion.
+                */
+               if (!wq->oldest_read) {
+                       if (CQE_STATUS(*hw_cqe))
+                               wq->error = 1;
+                       ret = -1;
+                       goto skip_cqe;
+               }
+
                /*
                 * Don't write to the HWCQ, so create a new read req CQE
                 * in local memory.
index 99543d634704d93c98b1f4bcfa615a5aee7bfc0d..2bcff7f5046e3a00c10675643f40872319bb51ef 100644 (file)
@@ -53,6 +53,7 @@
 #define T3_MAX_PBL_SIZE 256
 #define T3_MAX_RQ_SIZE 1024
 #define T3_MAX_NUM_STAG (1<<15)
+#define T3_MAX_MR_SIZE 0x100000000ULL
 
 #define T3_STAG_UNSET 0xffffffff
 
index 969d4d928455b237ac97deda8f7a1e1cf38b63df..f1a25a821a45049f6c5688b92ca4d5c14b6e9fcf 100644 (file)
@@ -278,6 +278,17 @@ enum t3_qp_caps {
        uP_RI_QP_STAG0_ENABLE = 0x10
 } __attribute__ ((packed));
 
+enum rdma_init_rtr_types {
+       RTR_READ = 1,
+       RTR_WRITE = 2,
+       RTR_SEND = 3,
+};
+
+#define S_RTR_TYPE     2
+#define M_RTR_TYPE     0x3
+#define V_RTR_TYPE(x)  ((x) << S_RTR_TYPE)
+#define G_RTR_TYPE(x)  ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
+
 struct t3_rdma_init_attr {
        u32 tid;
        u32 qpid;
@@ -293,7 +304,9 @@ struct t3_rdma_init_attr {
        u32 ird;
        u64 qp_dma_addr;
        u32 qp_dma_size;
-       u32 flags;
+       enum rdma_init_rtr_types rtr_type;
+       u16 flags;
+       u16 rqe_count;
        u32 irs;
 };
 
@@ -309,8 +322,8 @@ struct t3_rdma_init_wr {
        u8 mpaattrs;            /* 5 */
        u8 qpcaps;
        __be16 ulpdu_size;
-       __be32 flags;           /* bits 31-1 - reservered */
-                               /* bit     0 - set if RECV posted */
+       __be16 flags_rtr_type;
+       __be16 rqe_count;
        __be32 ord;             /* 6 */
        __be32 ird;
        __be64 qp_dma_addr;     /* 7 */
@@ -324,7 +337,7 @@ struct t3_genbit {
 };
 
 enum rdma_init_wr_flags {
-       RECVS_POSTED = (1<<0),
+       MPA_INITIATOR = (1<<0),
        PRIV_QP = (1<<1),
 };
 
index 6ba4138c8ec3a8b434526e7f7ceba7f641569d62..71554eacb13ce3f87d19b8bd582f05eabde6a8f9 100644 (file)
@@ -83,6 +83,7 @@ static void rnic_init(struct iwch_dev *rnicp)
        rnicp->attr.max_phys_buf_entries = T3_MAX_PBL_SIZE;
        rnicp->attr.max_pds = T3_MAX_NUM_PD - 1;
        rnicp->attr.mem_pgsizes_bitmask = 0x7FFF;       /* 4KB-128MB */
+       rnicp->attr.max_mr_size = T3_MAX_MR_SIZE;
        rnicp->attr.can_resize_wq = 0;
        rnicp->attr.max_rdma_reads_per_qp = 8;
        rnicp->attr.max_rdma_read_resources =
index 9ad9b1e7c8c1cced39eda9ead95d85e498018a03..d2409a505e8d5d3f6b659b4af0a07d0421cf8672 100644 (file)
@@ -66,6 +66,7 @@ struct iwch_rnic_attributes {
         * size (4k)^i.  Phys block list mode unsupported.
         */
        u32 mem_pgsizes_bitmask;
+       u64 max_mr_size;
        u8 can_resize_wq;
 
        /*
index 72ca360c3dbc12c2b0a36e865b5ad18601eb3bd2..d44a6df9ad8c4a7ebd915b00817a8a980ae2f237 100644 (file)
@@ -63,6 +63,10 @@ static char *states[] = {
        NULL,
 };
 
+int peer2peer = 0;
+module_param(peer2peer, int, 0644);
+MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
+
 static int ep_timeout_secs = 10;
 module_param(ep_timeout_secs, int, 0644);
 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
@@ -125,6 +129,12 @@ static void start_ep_timer(struct iwch_ep *ep)
 static void stop_ep_timer(struct iwch_ep *ep)
 {
        PDBG("%s ep %p\n", __func__, ep);
+       if (!timer_pending(&ep->timer)) {
+               printk(KERN_ERR "%s timer stopped when its not running!  ep %p state %u\n",
+                       __func__, ep, ep->com.state);
+               WARN_ON(1);
+               return;
+       }
        del_timer_sync(&ep->timer);
        put_ep(&ep->com);
 }
@@ -508,7 +518,7 @@ static void send_mpa_req(struct iwch_ep *ep, struct sk_buff *skb)
        skb_reset_transport_header(skb);
        len = skb->len;
        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
-       req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
+       req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
        req->wr_lo = htonl(V_WR_TID(ep->hwtid));
        req->len = htonl(len);
        req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
@@ -559,7 +569,7 @@ static int send_mpa_reject(struct iwch_ep *ep, const void *pdata, u8 plen)
        set_arp_failure_handler(skb, arp_failure_discard);
        skb_reset_transport_header(skb);
        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
-       req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
+       req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
        req->wr_lo = htonl(V_WR_TID(ep->hwtid));
        req->len = htonl(mpalen);
        req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
@@ -611,7 +621,7 @@ static int send_mpa_reply(struct iwch_ep *ep, const void *pdata, u8 plen)
        skb_reset_transport_header(skb);
        len = skb->len;
        req = (struct tx_data_wr *) skb_push(skb, sizeof(*req));
-       req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
+       req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL);
        req->wr_lo = htonl(V_WR_TID(ep->hwtid));
        req->len = htonl(len);
        req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) |
@@ -879,6 +889,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
         * the MPA header is valid.
         */
        state_set(&ep->com, FPDU_MODE);
+       ep->mpa_attr.initiator = 1;
        ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
        ep->mpa_attr.recv_marker_enabled = markers_enabled;
        ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
@@ -901,8 +912,14 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb)
        /* bind QP and TID with INIT_WR */
        err = iwch_modify_qp(ep->com.qp->rhp,
                             ep->com.qp, mask, &attrs, 1);
-       if (!err)
-               goto out;
+       if (err)
+               goto err;
+
+       if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) {
+               iwch_post_zb_read(ep->com.qp);
+       }
+
+       goto out;
 err:
        abort_connection(ep, skb, GFP_KERNEL);
 out:
@@ -995,6 +1012,7 @@ static void process_mpa_request(struct iwch_ep *ep, struct sk_buff *skb)
         * If we get here we have accumulated the entire mpa
         * start reply message including private data.
         */
+       ep->mpa_attr.initiator = 0;
        ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
        ep->mpa_attr.recv_marker_enabled = markers_enabled;
        ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
@@ -1065,17 +1083,33 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
 
        PDBG("%s ep %p credits %u\n", __func__, ep, credits);
 
-       if (credits == 0)
+       if (credits == 0) {
+               PDBG(KERN_ERR "%s 0 credit ack  ep %p state %u\n",
+                       __func__, ep, state_read(&ep->com));
                return CPL_RET_BUF_DONE;
+       }
+
        BUG_ON(credits != 1);
-       BUG_ON(ep->mpa_skb == NULL);
-       kfree_skb(ep->mpa_skb);
-       ep->mpa_skb = NULL;
        dst_confirm(ep->dst);
-       if (state_read(&ep->com) == MPA_REP_SENT) {
-               ep->com.rpl_done = 1;
-               PDBG("waking up ep %p\n", ep);
-               wake_up(&ep->com.waitq);
+       if (!ep->mpa_skb) {
+               PDBG("%s rdma_init wr_ack ep %p state %u\n",
+                       __func__, ep, state_read(&ep->com));
+               if (ep->mpa_attr.initiator) {
+                       PDBG("%s initiator ep %p state %u\n",
+                               __func__, ep, state_read(&ep->com));
+                       if (peer2peer)
+                               iwch_post_zb_read(ep->com.qp);
+               } else {
+                       PDBG("%s responder ep %p state %u\n",
+                               __func__, ep, state_read(&ep->com));
+                       ep->com.rpl_done = 1;
+                       wake_up(&ep->com.waitq);
+               }
+       } else {
+               PDBG("%s lsm ack ep %p state %u freeing skb\n",
+                       __func__, ep, state_read(&ep->com));
+               kfree_skb(ep->mpa_skb);
+               ep->mpa_skb = NULL;
        }
        return CPL_RET_BUF_DONE;
 }
@@ -1083,8 +1117,11 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
 static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
 {
        struct iwch_ep *ep = ctx;
+       unsigned long flags;
+       int release = 0;
 
        PDBG("%s ep %p\n", __func__, ep);
+       BUG_ON(!ep);
 
        /*
         * We get 2 abort replies from the HW.  The first one must
@@ -1095,9 +1132,22 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                return CPL_RET_BUF_DONE;
        }
 
-       close_complete_upcall(ep);
-       state_set(&ep->com, DEAD);
-       release_ep_resources(ep);
+       spin_lock_irqsave(&ep->com.lock, flags);
+       switch (ep->com.state) {
+       case ABORTING:
+               close_complete_upcall(ep);
+               __state_set(&ep->com, DEAD);
+               release = 1;
+               break;
+       default:
+               printk(KERN_ERR "%s ep %p state %d\n",
+                    __func__, ep, ep->com.state);
+               break;
+       }
+       spin_unlock_irqrestore(&ep->com.lock, flags);
+
+       if (release)
+               release_ep_resources(ep);
        return CPL_RET_BUF_DONE;
 }
 
@@ -1470,7 +1520,8 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        struct sk_buff *rpl_skb;
        struct iwch_qp_attributes attrs;
        int ret;
-       int state;
+       int release = 0;
+       unsigned long flags;
 
        if (is_neg_adv_abort(req->status)) {
                PDBG("%s neg_adv_abort ep %p tid %d\n", __func__, ep,
@@ -1488,9 +1539,9 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                return CPL_RET_BUF_DONE;
        }
 
-       state = state_read(&ep->com);
-       PDBG("%s ep %p state %u\n", __func__, ep, state);
-       switch (state) {
+       spin_lock_irqsave(&ep->com.lock, flags);
+       PDBG("%s ep %p state %u\n", __func__, ep, ep->com.state);
+       switch (ep->com.state) {
        case CONNECTING:
                break;
        case MPA_REQ_WAIT:
@@ -1536,21 +1587,25 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                break;
        case DEAD:
                PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
+               spin_unlock_irqrestore(&ep->com.lock, flags);
                return CPL_RET_BUF_DONE;
        default:
                BUG_ON(1);
                break;
        }
        dst_confirm(ep->dst);
+       if (ep->com.state != ABORTING) {
+               __state_set(&ep->com, DEAD);
+               release = 1;
+       }
+       spin_unlock_irqrestore(&ep->com.lock, flags);
 
        rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
        if (!rpl_skb) {
                printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
                       __func__);
-               dst_release(ep->dst);
-               l2t_release(L2DATA(ep->com.tdev), ep->l2t);
-               put_ep(&ep->com);
-               return CPL_RET_BUF_DONE;
+               release = 1;
+               goto out;
        }
        rpl_skb->priority = CPL_PRIORITY_DATA;
        rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
@@ -1559,10 +1614,9 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
        OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
        rpl->cmd = CPL_ABORT_NO_RST;
        cxgb3_ofld_send(ep->com.tdev, rpl_skb);
-       if (state != ABORTING) {
-               state_set(&ep->com, DEAD);
+out:
+       if (release)
                release_ep_resources(ep);
-       }
        return CPL_RET_BUF_DONE;
 }
 
@@ -1661,15 +1715,18 @@ static void ep_timeout(unsigned long arg)
        struct iwch_ep *ep = (struct iwch_ep *)arg;
        struct iwch_qp_attributes attrs;
        unsigned long flags;
+       int abort = 1;
 
        spin_lock_irqsave(&ep->com.lock, flags);
        PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
             ep->com.state);
        switch (ep->com.state) {
        case MPA_REQ_SENT:
+               __state_set(&ep->com, ABORTING);
                connect_reply_upcall(ep, -ETIMEDOUT);
                break;
        case MPA_REQ_WAIT:
+               __state_set(&ep->com, ABORTING);
                break;
        case CLOSING:
        case MORIBUND:
@@ -1679,13 +1736,17 @@ static void ep_timeout(unsigned long arg)
                                     ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
                                     &attrs, 1);
                }
+               __state_set(&ep->com, ABORTING);
                break;
        default:
-               BUG();
+               printk(KERN_ERR "%s unexpected state ep %p state %u\n",
+                       __func__, ep, ep->com.state);
+               WARN_ON(1);
+               abort = 0;
        }
-       __state_set(&ep->com, CLOSING);
        spin_unlock_irqrestore(&ep->com.lock, flags);
-       abort_connection(ep, NULL, GFP_ATOMIC);
+       if (abort)
+               abort_connection(ep, NULL, GFP_ATOMIC);
        put_ep(&ep->com);
 }
 
@@ -1762,16 +1823,19 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if (err)
                goto err;
 
+       /* if needed, wait for wr_ack */
+       if (iwch_rqes_posted(qp)) {
+               wait_event(ep->com.waitq, ep->com.rpl_done);
+               err = ep->com.rpl_err;
+               if (err)
+                       goto err;
+       }
+
        err = send_mpa_reply(ep, conn_param->private_data,
                             conn_param->private_data_len);
        if (err)
                goto err;
 
-       /* wait for wr_ack */
-       wait_event(ep->com.waitq, ep->com.rpl_done);
-       err = ep->com.rpl_err;
-       if (err)
-               goto err;
 
        state_set(&ep->com, FPDU_MODE);
        established_upcall(ep);
@@ -1968,40 +2032,39 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
        PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
             states[ep->com.state], abrupt);
 
-       if (ep->com.state == DEAD) {
-               PDBG("%s already dead ep %p\n", __func__, ep);
-               goto out;
-       }
-
-       if (abrupt) {
-               if (ep->com.state != ABORTING) {
-                       ep->com.state = ABORTING;
-                       close = 1;
-               }
-               goto out;
-       }
-
        switch (ep->com.state) {
        case MPA_REQ_WAIT:
        case MPA_REQ_SENT:
        case MPA_REQ_RCVD:
        case MPA_REP_SENT:
        case FPDU_MODE:
-               start_ep_timer(ep);
-               ep->com.state = CLOSING;
                close = 1;
+               if (abrupt)
+                       ep->com.state = ABORTING;
+               else {
+                       ep->com.state = CLOSING;
+                       start_ep_timer(ep);
+               }
                break;
        case CLOSING:
-               ep->com.state = MORIBUND;
                close = 1;
+               if (abrupt) {
+                       stop_ep_timer(ep);
+                       ep->com.state = ABORTING;
+               } else
+                       ep->com.state = MORIBUND;
                break;
        case MORIBUND:
+       case ABORTING:
+       case DEAD:
+               PDBG("%s ignoring disconnect ep %p state %u\n",
+                    __func__, ep, ep->com.state);
                break;
        default:
                BUG();
                break;
        }
-out:
+
        spin_unlock_irqrestore(&ep->com.lock, flags);
        if (close) {
                if (abrupt)
index 2bb7fbdb3ff44e3136a7e2c0d7edcec9b97e842f..d7c7e09f09963c29fcd593175c92db5cfba582a0 100644 (file)
@@ -56,6 +56,7 @@
 #define put_ep(ep) { \
        PDBG("put_ep (via %s:%u) ep %p refcnt %d\n", __func__, __LINE__,  \
             ep, atomic_read(&((ep)->kref.refcount))); \
+       WARN_ON(atomic_read(&((ep)->kref.refcount)) < 1); \
        kref_put(&((ep)->kref), __free_ep); \
 }
 
@@ -225,5 +226,6 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, st
 
 int __init iwch_cm_init(void);
 void __exit iwch_cm_term(void);
+extern int peer2peer;
 
 #endif                         /* _IWCH_CM_H_ */
index ab4695c1dd5614b3e881ad6d03abca913621ebf6..d07d3a377b5f2b02644e2164db56b858ac898e07 100644 (file)
@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (!mhp)
                return ERR_PTR(-ENOMEM);
 
-       mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+       mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0);
        if (IS_ERR(mhp->umem)) {
                err = PTR_ERR(mhp->umem);
                kfree(mhp);
@@ -998,7 +998,7 @@ static int iwch_query_device(struct ib_device *ibdev,
        props->device_cap_flags = dev->device_cap_flags;
        props->vendor_id = (u32)dev->rdev.rnic_info.pdev->vendor;
        props->vendor_part_id = (u32)dev->rdev.rnic_info.pdev->device;
-       props->max_mr_size = ~0ull;
+       props->max_mr_size = dev->attr.max_mr_size;
        props->max_qp = dev->attr.max_qps;
        props->max_qp_wr = dev->attr.max_wrs;
        props->max_sge = dev->attr.max_sge_per_wr;
index 61356f91109d8fd96e611cc55bb40a1b64f20997..db5100d27ca2e9f7746cb26444488c80883be292 100644 (file)
@@ -118,6 +118,7 @@ enum IWCH_QP_FLAGS {
 };
 
 struct iwch_mpa_attributes {
+       u8 initiator;
        u8 recv_marker_enabled;
        u8 xmit_marker_enabled; /* iWARP: enable inbound Read Resp. */
        u8 crc_enabled;
@@ -322,6 +323,7 @@ enum iwch_qp_query_flags {
        IWCH_QP_QUERY_TEST_USERWRITE = 0x32     /* Test special */
 };
 
+u16 iwch_rqes_posted(struct iwch_qp *qhp);
 int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                      struct ib_send_wr **bad_wr);
 int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
@@ -331,6 +333,7 @@ int iwch_bind_mw(struct ib_qp *qp,
                             struct ib_mw_bind *mw_bind);
 int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
 int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg);
+int iwch_post_zb_read(struct iwch_qp *qhp);
 int iwch_register_device(struct iwch_dev *dev);
 void iwch_unregister_device(struct iwch_dev *dev);
 int iwch_quiesce_qps(struct iwch_cq *chp);
index 8891c3b0a3d51d1ecc1e2e47766a27d91a4c8be6..9b4be889c58ea8d9d378200e0297958a337ef16c 100644 (file)
@@ -586,6 +586,36 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg,
        }
 }
 
+int iwch_post_zb_read(struct iwch_qp *qhp)
+{
+       union t3_wr *wqe;
+       struct sk_buff *skb;
+       u8 flit_cnt = sizeof(struct t3_rdma_read_wr) >> 3;
+
+       PDBG("%s enter\n", __func__);
+       skb = alloc_skb(40, GFP_KERNEL);
+       if (!skb) {
+               printk(KERN_ERR "%s cannot send zb_read!!\n", __func__);
+               return -ENOMEM;
+       }
+       wqe = (union t3_wr *)skb_put(skb, sizeof(struct t3_rdma_read_wr));
+       memset(wqe, 0, sizeof(struct t3_rdma_read_wr));
+       wqe->read.rdmaop = T3_READ_REQ;
+       wqe->read.reserved[0] = 0;
+       wqe->read.reserved[1] = 0;
+       wqe->read.reserved[2] = 0;
+       wqe->read.rem_stag = cpu_to_be32(1);
+       wqe->read.rem_to = cpu_to_be64(1);
+       wqe->read.local_stag = cpu_to_be32(1);
+       wqe->read.local_len = cpu_to_be32(0);
+       wqe->read.local_to = cpu_to_be64(1);
+       wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ));
+       wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)|
+                                               V_FW_RIWR_LEN(flit_cnt));
+       skb->priority = CPL_PRIORITY_DATA;
+       return cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb);
+}
+
 /*
  * This posts a TERMINATE with layer=RDMA, type=catastrophic.
  */
@@ -671,11 +701,18 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
 
 
 /*
- * Return non zero if at least one RECV was pre-posted.
+ * Return count of RECV WRs posted
  */
-static int rqes_posted(struct iwch_qp *qhp)
+u16 iwch_rqes_posted(struct iwch_qp *qhp)
 {
-       return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV;
+       union t3_wr *wqe = qhp->wq.queue;
+       u16 count = 0;
+       while ((count+1) != 0 && fw_riwrh_opcode((struct fw_riwrh *)wqe) == T3_WR_RCV) {
+               count++;
+               wqe++;
+       }
+       PDBG("%s qhp %p count %u\n", __func__, qhp, count);
+       return count;
 }
 
 static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
@@ -716,8 +753,17 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
        init_attr.ird = qhp->attr.max_ird;
        init_attr.qp_dma_addr = qhp->wq.dma_addr;
        init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
-       init_attr.flags = rqes_posted(qhp) ? RECVS_POSTED : 0;
+       init_attr.rqe_count = iwch_rqes_posted(qhp);
+       init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
        init_attr.flags |= capable(CAP_NET_BIND_SERVICE) ? PRIV_QP : 0;
+       if (peer2peer) {
+               init_attr.rtr_type = RTR_READ;
+               if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
+                       init_attr.ord = 1;
+               if (init_attr.ird == 0 && !qhp->attr.mpa_attr.initiator)
+                       init_attr.ird = 1;
+       } else
+               init_attr.rtr_type = 0;
        init_attr.irs = qhp->ep->rcv_seq;
        PDBG("%s init_attr.rq_addr 0x%x init_attr.rq_size = %d "
             "flags 0x%x qpcaps 0x%x\n", __func__,
@@ -832,6 +878,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
                                abort=0;
                                disconnect = 1;
                                ep = qhp->ep;
+                               get_ep(&ep->com);
                        }
                        flush_qp(qhp, &flag);
                        break;
@@ -848,6 +895,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp,
                                abort=1;
                                disconnect = 1;
                                ep = qhp->ep;
+                               get_ep(&ep->com);
                        }
                        goto err;
                        break;
@@ -929,8 +977,10 @@ out:
         * on the EP.  This can be a normal close (RTS->CLOSING) or
         * an abnormal close (RTS/CLOSING->ERROR).
         */
-       if (disconnect)
+       if (disconnect) {
                iwch_ep_disconnect(ep, abort, GFP_KERNEL);
+               put_ep(&ep->com);
+       }
 
        /*
         * If free is 1, then we've disassociated the EP from the QP
index 0d13fe0a260b182678134ac29acef7ac90bf4648..00bab60f6de474a1ed24e77ee9331f46efe7103f 100644 (file)
@@ -66,6 +66,7 @@ struct ehca_av;
 #include "ehca_irq.h"
 
 #define EHCA_EQE_CACHE_SIZE 20
+#define EHCA_MAX_NUM_QUEUES 0xffff
 
 struct ehca_eqe_cache_entry {
        struct ehca_eqe *eqe;
@@ -127,6 +128,8 @@ struct ehca_shca {
        /* MR pgsize: bit 0-3 means 4K, 64K, 1M, 16M respectively */
        u32 hca_cap_mr_pgsize;
        int max_mtu;
+       atomic_t num_cqs;
+       atomic_t num_qps;
 };
 
 struct ehca_pd {
@@ -160,6 +163,7 @@ struct ehca_qp {
        };
        u32 qp_type;
        enum ehca_ext_qp_type ext_type;
+       enum ib_qp_state state;
        struct ipz_queue ipz_squeue;
        struct ipz_queue ipz_rqueue;
        struct h_galpas galpas;
@@ -343,6 +347,8 @@ extern int ehca_use_hp_mr;
 extern int ehca_scaling_code;
 extern int ehca_lock_hcalls;
 extern int ehca_nr_ports;
+extern int ehca_max_cq;
+extern int ehca_max_qp;
 
 struct ipzu_queue_resp {
        u32 qe_size;      /* queue entry size */
index ec0cfcf3073f305dfd6ac796016f273f4db6f58f..5540b276a33cab4877bdf56833f16fc0c316fb22 100644 (file)
@@ -132,10 +132,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
        if (cqe >= 0xFFFFFFFF - 64 - additional_cqe)
                return ERR_PTR(-EINVAL);
 
+       if (!atomic_add_unless(&shca->num_cqs, 1, ehca_max_cq)) {
+               ehca_err(device, "Unable to create CQ, max number of %i "
+                       "CQs reached.", ehca_max_cq);
+               ehca_err(device, "To increase the maximum number of CQs "
+                       "use the number_of_cqs module parameter.\n");
+               return ERR_PTR(-ENOSPC);
+       }
+
        my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL);
        if (!my_cq) {
                ehca_err(device, "Out of memory for ehca_cq struct device=%p",
                         device);
+               atomic_dec(&shca->num_cqs);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -305,6 +314,7 @@ create_cq_exit2:
 create_cq_exit1:
        kmem_cache_free(cq_cache, my_cq);
 
+       atomic_dec(&shca->num_cqs);
        return cq;
 }
 
@@ -359,6 +369,7 @@ int ehca_destroy_cq(struct ib_cq *cq)
        ipz_queue_dtor(NULL, &my_cq->ipz_queue);
        kmem_cache_free(cq_cache, my_cq);
 
+       atomic_dec(&shca->num_cqs);
        return 0;
 }
 
index b4ac617a70e65e3f8200bd6381156eff2408b73b..49660dfa186785f8d73b16b84e0ddcc141967cac 100644 (file)
@@ -54,7 +54,8 @@ int ehca_create_eq(struct ehca_shca *shca,
                   struct ehca_eq *eq,
                   const enum ehca_eq_type type, const u32 length)
 {
-       u64 ret;
+       int ret;
+       u64 h_ret;
        u32 nr_pages;
        u32 i;
        void *vpage;
@@ -73,15 +74,15 @@ int ehca_create_eq(struct ehca_shca *shca,
                return -EINVAL;
        }
 
-       ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
-                                      &eq->pf,
-                                      type,
-                                      length,
-                                      &eq->ipz_eq_handle,
-                                      &eq->length,
-                                      &nr_pages, &eq->ist);
+       h_ret = hipz_h_alloc_resource_eq(shca->ipz_hca_handle,
+                                        &eq->pf,
+                                        type,
+                                        length,
+                                        &eq->ipz_eq_handle,
+                                        &eq->length,
+                                        &nr_pages, &eq->ist);
 
-       if (ret != H_SUCCESS) {
+       if (h_ret != H_SUCCESS) {
                ehca_err(ib_dev, "Can't allocate EQ/NEQ. eq=%p", eq);
                return -EINVAL;
        }
@@ -97,24 +98,22 @@ int ehca_create_eq(struct ehca_shca *shca,
                u64 rpage;
 
                vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-               if (!vpage) {
-                       ret = H_RESOURCE;
+               if (!vpage)
                        goto create_eq_exit2;
-               }
 
                rpage = virt_to_abs(vpage);
-               ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
-                                              eq->ipz_eq_handle,
-                                              &eq->pf,
-                                              0, 0, rpage, 1);
+               h_ret = hipz_h_register_rpage_eq(shca->ipz_hca_handle,
+                                                eq->ipz_eq_handle,
+                                                &eq->pf,
+                                                0, 0, rpage, 1);
 
                if (i == (nr_pages - 1)) {
                        /* last page */
                        vpage = ipz_qpageit_get_inc(&eq->ipz_queue);
-                       if (ret != H_SUCCESS || vpage)
+                       if (h_ret != H_SUCCESS || vpage)
                                goto create_eq_exit2;
                } else {
-                       if (ret != H_PAGE_REGISTERED || !vpage)
+                       if (h_ret != H_PAGE_REGISTERED || !vpage)
                                goto create_eq_exit2;
                }
        }
index b5ca94c6b8d91a603aebff7f907c42f3234bb31e..ca5eb0cb628cf74cb4df726667cd2f378199448c 100644 (file)
@@ -633,7 +633,7 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool)
        unsigned long flags;
 
        WARN_ON_ONCE(!in_interrupt());
-       if (ehca_debug_level)
+       if (ehca_debug_level >= 3)
                ehca_dmp(&cpu_online_map, sizeof(cpumask_t), "");
 
        spin_lock_irqsave(&pool->last_cpu_lock, flags);
index 65b3362cdb9b5a82321ce56ebaf71d3ce6b3ca4c..482103eb6eacdc699430c7455e9e9b723515443f 100644 (file)
@@ -50,7 +50,7 @@
 #include "ehca_tools.h"
 #include "hcp_if.h"
 
-#define HCAD_VERSION "0025"
+#define HCAD_VERSION "0026"
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -60,7 +60,6 @@ MODULE_VERSION(HCAD_VERSION);
 static int ehca_open_aqp1     = 0;
 static int ehca_hw_level      = 0;
 static int ehca_poll_all_eqs  = 1;
-static int ehca_mr_largepage  = 1;
 
 int ehca_debug_level   = 0;
 int ehca_nr_ports      = 2;
@@ -69,47 +68,52 @@ int ehca_port_act_time = 30;
 int ehca_static_rate   = -1;
 int ehca_scaling_code  = 0;
 int ehca_lock_hcalls   = -1;
-
-module_param_named(open_aqp1,     ehca_open_aqp1,     int, S_IRUGO);
-module_param_named(debug_level,   ehca_debug_level,   int, S_IRUGO);
-module_param_named(hw_level,      ehca_hw_level,      int, S_IRUGO);
-module_param_named(nr_ports,      ehca_nr_ports,      int, S_IRUGO);
-module_param_named(use_hp_mr,     ehca_use_hp_mr,     int, S_IRUGO);
-module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
-module_param_named(poll_all_eqs,  ehca_poll_all_eqs,  int, S_IRUGO);
-module_param_named(static_rate,   ehca_static_rate,   int, S_IRUGO);
-module_param_named(scaling_code,  ehca_scaling_code,  int, S_IRUGO);
-module_param_named(mr_largepage,  ehca_mr_largepage,  int, S_IRUGO);
+int ehca_max_cq        = -1;
+int ehca_max_qp        = -1;
+
+module_param_named(open_aqp1,     ehca_open_aqp1,     bool, S_IRUGO);
+module_param_named(debug_level,   ehca_debug_level,   int,  S_IRUGO);
+module_param_named(hw_level,      ehca_hw_level,      int,  S_IRUGO);
+module_param_named(nr_ports,      ehca_nr_ports,      int,  S_IRUGO);
+module_param_named(use_hp_mr,     ehca_use_hp_mr,     bool, S_IRUGO);
+module_param_named(port_act_time, ehca_port_act_time, int,  S_IRUGO);
+module_param_named(poll_all_eqs,  ehca_poll_all_eqs,  bool, S_IRUGO);
+module_param_named(static_rate,   ehca_static_rate,   int,  S_IRUGO);
+module_param_named(scaling_code,  ehca_scaling_code,  bool, S_IRUGO);
 module_param_named(lock_hcalls,   ehca_lock_hcalls,   bool, S_IRUGO);
+module_param_named(number_of_cqs, ehca_max_cq,        int,  S_IRUGO);
+module_param_named(number_of_qps, ehca_max_qp,        int,  S_IRUGO);
 
 MODULE_PARM_DESC(open_aqp1,
-                "AQP1 on startup (0: no (default), 1: yes)");
+                "Open AQP1 on startup (default: no)");
 MODULE_PARM_DESC(debug_level,
-                "debug level"
-                " (0: no debug traces (default), 1: with debug traces)");
+                "Amount of debug output (0: none (default), 1: traces, "
+                "2: some dumps, 3: lots)");
 MODULE_PARM_DESC(hw_level,
-                "hardware level"
-                " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
+                "Hardware level (0: autosensing (default), "
+                "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
 MODULE_PARM_DESC(nr_ports,
                 "number of connected ports (-1: autodetect, 1: port one only, "
                 "2: two ports (default)");
 MODULE_PARM_DESC(use_hp_mr,
-                "high performance MRs (0: no (default), 1: yes)");
+                "Use high performance MRs (default: no)");
 MODULE_PARM_DESC(port_act_time,
-                "time to wait for port activation (default: 30 sec)");
+                "Time to wait for port activation (default: 30 sec)");
 MODULE_PARM_DESC(poll_all_eqs,
-                "polls all event queues periodically"
-                " (0: no, 1: yes (default))");
+                "Poll all event queues periodically (default: yes)");
 MODULE_PARM_DESC(static_rate,
-                "set permanent static rate (default: disabled)");
+                "Set permanent static rate (default: no static rate)");
 MODULE_PARM_DESC(scaling_code,
-                "set scaling code (0: disabled/default, 1: enabled)");
-MODULE_PARM_DESC(mr_largepage,
-                "use large page for MR (0: use PAGE_SIZE (default), "
-                "1: use large page depending on MR size");
+                "Enable scaling code (default: no)");
 MODULE_PARM_DESC(lock_hcalls,
-                "serialize all hCalls made by the driver "
+                "Serialize all hCalls made by the driver "
                 "(default: autodetect)");
+MODULE_PARM_DESC(number_of_cqs,
+               "Max number of CQs which can be allocated "
+               "(default: autodetect)");
+MODULE_PARM_DESC(number_of_qps,
+               "Max number of QPs which can be allocated "
+               "(default: autodetect)");
 
 DEFINE_RWLOCK(ehca_qp_idr_lock);
 DEFINE_RWLOCK(ehca_cq_idr_lock);
@@ -275,6 +279,7 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
        u64 h_ret;
        struct hipz_query_hca *rblock;
        struct hipz_query_port *port;
+       const char *loc_code;
 
        static const u32 pgsize_map[] = {
                HCA_CAP_MR_PGSIZE_4K,  0x1000,
@@ -283,6 +288,12 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
                HCA_CAP_MR_PGSIZE_16M, 0x1000000,
        };
 
+       ehca_gen_dbg("Probing adapter %s...",
+                    shca->ofdev->node->full_name);
+       loc_code = of_get_property(shca->ofdev->node, "ibm,loc-code", NULL);
+       if (loc_code)
+               ehca_gen_dbg(" ... location lode=%s", loc_code);
+
        rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
        if (!rblock) {
                ehca_gen_err("Cannot allocate rblock memory.");
@@ -350,10 +361,27 @@ static int ehca_sense_attributes(struct ehca_shca *shca)
 
        /* translate supported MR page sizes; always support 4K */
        shca->hca_cap_mr_pgsize = EHCA_PAGESIZE;
-       if (ehca_mr_largepage) { /* support extra sizes only if enabled */
-               for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
-                       if (rblock->memory_page_size_supported & pgsize_map[i])
-                               shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
+       for (i = 0; i < ARRAY_SIZE(pgsize_map); i += 2)
+               if (rblock->memory_page_size_supported & pgsize_map[i])
+                       shca->hca_cap_mr_pgsize |= pgsize_map[i + 1];
+
+       /* Set maximum number of CQs and QPs to calculate EQ size */
+       if (ehca_max_qp == -1)
+               ehca_max_qp = min_t(int, rblock->max_qp, EHCA_MAX_NUM_QUEUES);
+       else if (ehca_max_qp < 1 || ehca_max_qp > rblock->max_qp) {
+               ehca_gen_err("Requested number of QPs is out of range (1 - %i) "
+                       "specified by HW", rblock->max_qp);
+               ret = -EINVAL;
+               goto sense_attributes1;
+       }
+
+       if (ehca_max_cq == -1)
+               ehca_max_cq = min_t(int, rblock->max_cq, EHCA_MAX_NUM_QUEUES);
+       else if (ehca_max_cq < 1 || ehca_max_cq > rblock->max_cq) {
+               ehca_gen_err("Requested number of CQs is out of range (1 - %i) "
+                       "specified by HW", rblock->max_cq);
+               ret = -EINVAL;
+               goto sense_attributes1;
        }
 
        /* query max MTU from first port -- it's the same for all ports */
@@ -567,8 +595,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
 
 static ssize_t ehca_show_debug_level(struct device_driver *ddp, char *buf)
 {
-       return snprintf(buf, PAGE_SIZE, "%d\n",
-                       ehca_debug_level);
+       return snprintf(buf, PAGE_SIZE, "%d\n", ehca_debug_level);
 }
 
 static ssize_t ehca_store_debug_level(struct device_driver *ddp,
@@ -657,14 +684,6 @@ static ssize_t ehca_show_adapter_handle(struct device *dev,
 }
 static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL);
 
-static ssize_t ehca_show_mr_largepage(struct device *dev,
-                                     struct device_attribute *attr,
-                                     char *buf)
-{
-       return sprintf(buf, "%d\n", ehca_mr_largepage);
-}
-static DEVICE_ATTR(mr_largepage, S_IRUGO, ehca_show_mr_largepage, NULL);
-
 static struct attribute *ehca_dev_attrs[] = {
        &dev_attr_adapter_handle.attr,
        &dev_attr_num_ports.attr,
@@ -681,7 +700,6 @@ static struct attribute *ehca_dev_attrs[] = {
        &dev_attr_cur_mw.attr,
        &dev_attr_max_pd.attr,
        &dev_attr_max_ah.attr,
-       &dev_attr_mr_largepage.attr,
        NULL
 };
 
@@ -695,7 +713,7 @@ static int __devinit ehca_probe(struct of_device *dev,
        struct ehca_shca *shca;
        const u64 *handle;
        struct ib_pd *ibpd;
-       int ret, i;
+       int ret, i, eq_size;
 
        handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
        if (!handle) {
@@ -716,6 +734,8 @@ static int __devinit ehca_probe(struct of_device *dev,
                return -ENOMEM;
        }
        mutex_init(&shca->modify_mutex);
+       atomic_set(&shca->num_cqs, 0);
+       atomic_set(&shca->num_qps, 0);
        for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
                spin_lock_init(&shca->sport[i].mod_sqp_lock);
 
@@ -735,8 +755,9 @@ static int __devinit ehca_probe(struct of_device *dev,
                goto probe1;
        }
 
+       eq_size = 2 * ehca_max_cq + 4 * ehca_max_qp;
        /* create event queues */
-       ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, 2048);
+       ret = ehca_create_eq(shca, &shca->eq, EHCA_EQ, eq_size);
        if (ret) {
                ehca_err(&shca->ib_device, "Cannot create EQ.");
                goto probe1;
index f26997fc00f88b810761aa6c742b254468694e4b..f974367cad4007475a314237768c358d50a236ac 100644 (file)
@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        }
 
        e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
-                                mr_access_flags);
+                                mr_access_flags, 0);
        if (IS_ERR(e_mr->umem)) {
                ib_mr = (void *)e_mr->umem;
                goto reg_user_mr_exit1;
@@ -1794,8 +1794,9 @@ static int ehca_check_kpages_per_ate(struct scatterlist *page_list,
        int t;
        for (t = start_idx; t <= end_idx; t++) {
                u64 pgaddr = page_to_pfn(sg_page(&page_list[t])) << PAGE_SHIFT;
-               ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
-                            *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
+               if (ehca_debug_level >= 3)
+                       ehca_gen_dbg("chunk_page=%lx value=%016lx", pgaddr,
+                                    *(u64 *)abs_to_virt(phys_to_abs(pgaddr)));
                if (pgaddr - PAGE_SIZE != *prev_pgaddr) {
                        ehca_gen_err("uncontiguous page found pgaddr=%lx "
                                     "prev_pgaddr=%lx page_list_i=%x",
@@ -1862,10 +1863,13 @@ static int ehca_set_pagebuf_user2(struct ehca_mr_pginfo *pginfo,
                                                pgaddr &
                                                ~(pginfo->hwpage_size - 1));
                                }
-                               ehca_gen_dbg("kpage=%lx chunk_page=%lx "
-                                            "value=%016lx", *kpage, pgaddr,
-                                            *(u64 *)abs_to_virt(
-                                                    phys_to_abs(pgaddr)));
+                               if (ehca_debug_level >= 3) {
+                                       u64 val = *(u64 *)abs_to_virt(
+                                               phys_to_abs(pgaddr));
+                                       ehca_gen_dbg("kpage=%lx chunk_page=%lx "
+                                                    "value=%016lx",
+                                                    *kpage, pgaddr, val);
+                               }
                                prev_pgaddr = pgaddr;
                                i++;
                                pginfo->kpage_cnt++;
index 3eb14a52cbf2bc4ebb5a2a46c129861a05ebdb28..18fba92fa7ae107311121b7cc591aa077c7b51eb 100644 (file)
@@ -421,8 +421,18 @@ static struct ehca_qp *internal_create_qp(
        u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
        unsigned long flags;
 
-       if (init_attr->create_flags)
+       if (!atomic_add_unless(&shca->num_qps, 1, ehca_max_qp)) {
+               ehca_err(pd->device, "Unable to create QP, max number of %i "
+                        "QPs reached.", ehca_max_qp);
+               ehca_err(pd->device, "To increase the maximum number of QPs "
+                        "use the number_of_qps module parameter.\n");
+               return ERR_PTR(-ENOSPC);
+       }
+
+       if (init_attr->create_flags) {
+               atomic_dec(&shca->num_qps);
                return ERR_PTR(-EINVAL);
+       }
 
        memset(&parms, 0, sizeof(parms));
        qp_type = init_attr->qp_type;
@@ -431,6 +441,7 @@ static struct ehca_qp *internal_create_qp(
                init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
                ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
                         init_attr->sq_sig_type);
+               atomic_dec(&shca->num_qps);
                return ERR_PTR(-EINVAL);
        }
 
@@ -455,6 +466,7 @@ static struct ehca_qp *internal_create_qp(
 
        if (is_llqp && has_srq) {
                ehca_err(pd->device, "LLQPs can't have an SRQ");
+               atomic_dec(&shca->num_qps);
                return ERR_PTR(-EINVAL);
        }
 
@@ -466,6 +478,7 @@ static struct ehca_qp *internal_create_qp(
                        ehca_err(pd->device, "no more than three SGEs "
                                 "supported for SRQ  pd=%p  max_sge=%x",
                                 pd, init_attr->cap.max_recv_sge);
+                       atomic_dec(&shca->num_qps);
                        return ERR_PTR(-EINVAL);
                }
        }
@@ -477,6 +490,7 @@ static struct ehca_qp *internal_create_qp(
            qp_type != IB_QPT_SMI &&
            qp_type != IB_QPT_GSI) {
                ehca_err(pd->device, "wrong QP Type=%x", qp_type);
+               atomic_dec(&shca->num_qps);
                return ERR_PTR(-EINVAL);
        }
 
@@ -490,6 +504,7 @@ static struct ehca_qp *internal_create_qp(
                                         "or max_rq_wr=%x for RC LLQP",
                                         init_attr->cap.max_send_wr,
                                         init_attr->cap.max_recv_wr);
+                               atomic_dec(&shca->num_qps);
                                return ERR_PTR(-EINVAL);
                        }
                        break;
@@ -497,6 +512,7 @@ static struct ehca_qp *internal_create_qp(
                        if (!EHCA_BMASK_GET(HCA_CAP_UD_LL_QP, shca->hca_cap)) {
                                ehca_err(pd->device, "UD LLQP not supported "
                                         "by this adapter");
+                               atomic_dec(&shca->num_qps);
                                return ERR_PTR(-ENOSYS);
                        }
                        if (!(init_attr->cap.max_send_sge <= 5
@@ -508,20 +524,22 @@ static struct ehca_qp *internal_create_qp(
                                         "or max_recv_sge=%x for UD LLQP",
                                         init_attr->cap.max_send_sge,
                                         init_attr->cap.max_recv_sge);
+                               atomic_dec(&shca->num_qps);
                                return ERR_PTR(-EINVAL);
                        } else if (init_attr->cap.max_send_wr > 255) {
                                ehca_err(pd->device,
                                         "Invalid Number of "
                                         "max_send_wr=%x for UD QP_TYPE=%x",
                                         init_attr->cap.max_send_wr, qp_type);
+                               atomic_dec(&shca->num_qps);
                                return ERR_PTR(-EINVAL);
                        }
                        break;
                default:
                        ehca_err(pd->device, "unsupported LL QP Type=%x",
                                 qp_type);
+                       atomic_dec(&shca->num_qps);
                        return ERR_PTR(-EINVAL);
-                       break;
                }
        } else {
                int max_sge = (qp_type == IB_QPT_UD || qp_type == IB_QPT_SMI
@@ -533,6 +551,7 @@ static struct ehca_qp *internal_create_qp(
                                 "send_sge=%x recv_sge=%x max_sge=%x",
                                 init_attr->cap.max_send_sge,
                                 init_attr->cap.max_recv_sge, max_sge);
+                       atomic_dec(&shca->num_qps);
                        return ERR_PTR(-EINVAL);
                }
        }
@@ -543,6 +562,7 @@ static struct ehca_qp *internal_create_qp(
        my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
        if (!my_qp) {
                ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
+               atomic_dec(&shca->num_qps);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -550,6 +570,7 @@ static struct ehca_qp *internal_create_qp(
        spin_lock_init(&my_qp->spinlock_r);
        my_qp->qp_type = qp_type;
        my_qp->ext_type = parms.ext_type;
+       my_qp->state = IB_QPS_RESET;
 
        if (init_attr->recv_cq)
                my_qp->recv_cq =
@@ -822,6 +843,7 @@ create_qp_exit1:
 
 create_qp_exit0:
        kmem_cache_free(qp_cache, my_qp);
+       atomic_dec(&shca->num_qps);
        return ERR_PTR(ret);
 }
 
@@ -965,7 +987,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
                 qp_num, bad_send_wqe_p);
        /* convert wqe pointer to vadr */
        bad_send_wqe_v = abs_to_virt((u64)bad_send_wqe_p);
-       if (ehca_debug_level)
+       if (ehca_debug_level >= 2)
                ehca_dmp(bad_send_wqe_v, 32, "qp_num=%x bad_wqe", qp_num);
        squeue = &my_qp->ipz_squeue;
        if (ipz_queue_abs_to_offset(squeue, (u64)bad_send_wqe_p, &q_ofs)) {
@@ -978,7 +1000,7 @@ static int prepare_sqe_rts(struct ehca_qp *my_qp, struct ehca_shca *shca,
        wqe = (struct ehca_wqe *)ipz_qeit_calc(squeue, q_ofs);
        *bad_wqe_cnt = 0;
        while (wqe->optype != 0xff && wqe->wqef != 0xff) {
-               if (ehca_debug_level)
+               if (ehca_debug_level >= 2)
                        ehca_dmp(wqe, 32, "qp_num=%x wqe", qp_num);
                wqe->nr_of_data_seg = 0; /* suppress data access */
                wqe->wqef = WQEF_PURGE; /* WQE to be purged */
@@ -1450,7 +1472,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
                /* no support for max_send/recv_sge yet */
        }
 
-       if (ehca_debug_level)
+       if (ehca_debug_level >= 2)
                ehca_dmp(mqpcb, 4*70, "qp_num=%x", ibqp->qp_num);
 
        h_ret = hipz_h_modify_qp(shca->ipz_hca_handle,
@@ -1508,6 +1530,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
        if (attr_mask & IB_QP_QKEY)
                my_qp->qkey = attr->qkey;
 
+       my_qp->state = qp_new_state;
+
 modify_qp_exit2:
        if (squeue_locked) { /* this means: sqe -> rts */
                spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
@@ -1763,7 +1787,7 @@ int ehca_query_qp(struct ib_qp *qp,
        if (qp_init_attr)
                *qp_init_attr = my_qp->init_attr;
 
-       if (ehca_debug_level)
+       if (ehca_debug_level >= 2)
                ehca_dmp(qpcb, 4*70, "qp_num=%x", qp->qp_num);
 
 query_qp_exit1:
@@ -1811,7 +1835,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                goto modify_srq_exit0;
        }
 
-       if (ehca_debug_level)
+       if (ehca_debug_level >= 2)
                ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
 
        h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
@@ -1864,7 +1888,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
        srq_attr->srq_limit = EHCA_BMASK_GET(
                MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
 
-       if (ehca_debug_level)
+       if (ehca_debug_level >= 2)
                ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
 
 query_srq_exit1:
@@ -1945,6 +1969,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
        if (HAS_SQ(my_qp))
                ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
        kmem_cache_free(qp_cache, my_qp);
+       atomic_dec(&shca->num_qps);
        return 0;
 }
 
index a20bbf4661881a095a34aab7cf13f5a443b832df..bbe0436f4f75a9b459c0d893deff27fa1e237e03 100644 (file)
@@ -81,7 +81,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
                        recv_wr->sg_list[cnt_ds].length;
        }
 
-       if (ehca_debug_level) {
+       if (ehca_debug_level >= 3) {
                ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
                             ipz_rqueue);
                ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
@@ -281,7 +281,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
                return -EINVAL;
        }
 
-       if (ehca_debug_level) {
+       if (ehca_debug_level >= 3) {
                ehca_gen_dbg("SEND WQE written into queue qp=%p ", qp);
                ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "send wqe");
        }
@@ -421,6 +421,11 @@ int ehca_post_send(struct ib_qp *qp,
        int ret = 0;
        unsigned long flags;
 
+       if (unlikely(my_qp->state != IB_QPS_RTS)) {
+               ehca_err(qp->device, "QP not in RTS state  qpn=%x", qp->qp_num);
+               return -EINVAL;
+       }
+
        /* LOCK the QUEUE */
        spin_lock_irqsave(&my_qp->spinlock_s, flags);
 
@@ -454,13 +459,14 @@ int ehca_post_send(struct ib_qp *qp,
                        goto post_send_exit0;
                }
                wqe_cnt++;
-               ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
-                        my_qp, qp->qp_num, wqe_cnt);
        } /* eof for cur_send_wr */
 
 post_send_exit0:
        iosync(); /* serialize GAL register access */
        hipz_update_sqa(my_qp, wqe_cnt);
+       if (unlikely(ret || ehca_debug_level >= 2))
+               ehca_dbg(qp->device, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
+                        my_qp, qp->qp_num, wqe_cnt, ret);
        my_qp->message_count += wqe_cnt;
        spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
        return ret;
@@ -520,13 +526,14 @@ static int internal_post_recv(struct ehca_qp *my_qp,
                        goto post_recv_exit0;
                }
                wqe_cnt++;
-               ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
-                        my_qp, my_qp->real_qp_num, wqe_cnt);
        } /* eof for cur_recv_wr */
 
 post_recv_exit0:
        iosync(); /* serialize GAL register access */
        hipz_update_rqa(my_qp, wqe_cnt);
+       if (unlikely(ret || ehca_debug_level >= 2))
+           ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d ret=%i",
+                    my_qp, my_qp->real_qp_num, wqe_cnt, ret);
        spin_unlock_irqrestore(&my_qp->spinlock_r, flags);
        return ret;
 }
@@ -570,16 +577,17 @@ static inline int ehca_poll_cq_one(struct ib_cq *cq, struct ib_wc *wc)
        struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
        struct ehca_cqe *cqe;
        struct ehca_qp *my_qp;
-       int cqe_count = 0;
+       int cqe_count = 0, is_error;
 
 poll_cq_one_read_cqe:
        cqe = (struct ehca_cqe *)
                ipz_qeit_get_inc_valid(&my_cq->ipz_queue);
        if (!cqe) {
                ret = -EAGAIN;
-               ehca_dbg(cq->device, "Completion queue is empty ehca_cq=%p "
-                        "cq_num=%x ret=%i", my_cq, my_cq->cq_number, ret);
-               goto  poll_cq_one_exit0;
+               if (ehca_debug_level >= 3)
+                       ehca_dbg(cq->device, "Completion queue is empty  "
+                                "my_cq=%p cq_num=%x", my_cq, my_cq->cq_number);
+               goto poll_cq_one_exit0;
        }
 
        /* prevents loads being reordered across this point */
@@ -609,7 +617,7 @@ poll_cq_one_read_cqe:
                        ehca_dbg(cq->device,
                                 "Got CQE with purged bit qp_num=%x src_qp=%x",
                                 cqe->local_qp_number, cqe->remote_qp_number);
-                       if (ehca_debug_level)
+                       if (ehca_debug_level >= 2)
                                ehca_dmp(cqe, 64, "qp_num=%x src_qp=%x",
                                         cqe->local_qp_number,
                                         cqe->remote_qp_number);
@@ -622,11 +630,13 @@ poll_cq_one_read_cqe:
                }
        }
 
-       /* tracing cqe */
-       if (unlikely(ehca_debug_level)) {
+       is_error = cqe->status & WC_STATUS_ERROR_BIT;
+
+       /* trace error CQEs if debug_level >= 1, trace all CQEs if >= 3 */
+       if (unlikely(ehca_debug_level >= 3 || (ehca_debug_level && is_error))) {
                ehca_dbg(cq->device,
-                        "Received COMPLETION ehca_cq=%p cq_num=%x -----",
-                        my_cq, my_cq->cq_number);
+                        "Received %sCOMPLETION ehca_cq=%p cq_num=%x -----",
+                        is_error ? "ERROR " : "", my_cq, my_cq->cq_number);
                ehca_dmp(cqe, 64, "ehca_cq=%p cq_num=%x",
                         my_cq, my_cq->cq_number);
                ehca_dbg(cq->device,
@@ -649,8 +659,9 @@ poll_cq_one_read_cqe:
                /* update also queue adder to throw away this entry!!! */
                goto poll_cq_one_exit0;
        }
+
        /* eval ib_wc_status */
-       if (unlikely(cqe->status & WC_STATUS_ERROR_BIT)) {
+       if (unlikely(is_error)) {
                /* complete with errors */
                map_ib_wc_status(cqe->status, &wc->status);
                wc->vendor_err = wc->status;
@@ -671,14 +682,6 @@ poll_cq_one_read_cqe:
        wc->imm_data = cpu_to_be32(cqe->immediate_data);
        wc->sl = cqe->service_level;
 
-       if (unlikely(wc->status != IB_WC_SUCCESS))
-               ehca_dbg(cq->device,
-                        "ehca_cq=%p cq_num=%x WARNING unsuccessful cqe "
-                        "OPType=%x status=%x qp_num=%x src_qp=%x wr_id=%lx "
-                        "cqe=%p", my_cq, my_cq->cq_number, cqe->optype,
-                        cqe->status, cqe->local_qp_number,
-                        cqe->remote_qp_number, cqe->work_request_id, cqe);
-
 poll_cq_one_exit0:
        if (cqe_count > 0)
                hipz_update_feca(my_cq, cqe_count);
index 1b07f2beafaf1426713d1475fa2969db4b485141..e43ed8f8a0c84a51b79c52e34590b821fbaf89dd 100644 (file)
@@ -211,8 +211,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
                break;
 
        case 1: /* qp rqueue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x rqueue",
-                        qp->ib_qp.qp_num);
+               ehca_dbg(qp->ib_qp.device, "qp_num=%x rq", qp->ib_qp.qp_num);
                ret = ehca_mmap_queue(vma, &qp->ipz_rqueue,
                                      &qp->mm_count_rqueue);
                if (unlikely(ret)) {
@@ -224,8 +223,7 @@ static int ehca_mmap_qp(struct vm_area_struct *vma, struct ehca_qp *qp,
                break;
 
        case 2: /* qp squeue_addr */
-               ehca_dbg(qp->ib_qp.device, "qp_num=%x squeue",
-                        qp->ib_qp.qp_num);
+               ehca_dbg(qp->ib_qp.device, "qp_num=%x sq", qp->ib_qp.qp_num);
                ret = ehca_mmap_queue(vma, &qp->ipz_squeue,
                                      &qp->mm_count_squeue);
                if (unlikely(ret)) {
index 7029aa6537517b043321004a4da114590c5ca04f..5245e13c3a30aac8dc4ebfdc3728499f4c2270f6 100644 (file)
@@ -123,8 +123,9 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
        int i, sleep_msecs;
        unsigned long flags = 0;
 
-       ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
-                    opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
+       if (unlikely(ehca_debug_level >= 2))
+               ehca_gen_dbg("opcode=%lx " HCALL7_REGS_FORMAT,
+                            opcode, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
 
        for (i = 0; i < 5; i++) {
                /* serialize hCalls to work around firmware issue */
@@ -148,7 +149,8 @@ static long ehca_plpar_hcall_norets(unsigned long opcode,
                                     opcode, ret, arg1, arg2, arg3,
                                     arg4, arg5, arg6, arg7);
                else
-                       ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
+                       if (unlikely(ehca_debug_level >= 2))
+                               ehca_gen_dbg("opcode=%lx ret=%li", opcode, ret);
 
                return ret;
        }
@@ -172,8 +174,10 @@ static long ehca_plpar_hcall9(unsigned long opcode,
        int i, sleep_msecs;
        unsigned long flags = 0;
 
-       ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
-                    arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+       if (unlikely(ehca_debug_level >= 2))
+               ehca_gen_dbg("INPUT -- opcode=%lx " HCALL9_REGS_FORMAT, opcode,
+                            arg1, arg2, arg3, arg4, arg5,
+                            arg6, arg7, arg8, arg9);
 
        for (i = 0; i < 5; i++) {
                /* serialize hCalls to work around firmware issue */
@@ -201,7 +205,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
                                     ret, outs[0], outs[1], outs[2], outs[3],
                                     outs[4], outs[5], outs[6], outs[7],
                                     outs[8]);
-               } else
+               } else if (unlikely(ehca_debug_level >= 2))
                        ehca_gen_dbg("OUTPUT -- ret=%li " HCALL9_REGS_FORMAT,
                                     ret, outs[0], outs[1], outs[2], outs[3],
                                     outs[4], outs[5], outs[6], outs[7],
@@ -381,7 +385,7 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
                                      r_cb,                  /* r6 */
                                      0, 0, 0, 0);
 
-       if (ehca_debug_level)
+       if (ehca_debug_level >= 2)
                ehca_dmp(query_port_response_block, 64, "response_block");
 
        return ret;
@@ -731,9 +735,6 @@ u64 hipz_h_alloc_resource_mr(const struct ipz_adapter_handle adapter_handle,
        u64 ret;
        u64 outs[PLPAR_HCALL9_BUFSIZE];
 
-       ehca_gen_dbg("kernel PAGE_SIZE=%x access_ctrl=%016x "
-                    "vaddr=%lx length=%lx",
-                    (u32)PAGE_SIZE, access_ctrl, vaddr, length);
        ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
                                adapter_handle.handle,            /* r4 */
                                5,                                /* r5 */
@@ -758,7 +759,7 @@ u64 hipz_h_register_rpage_mr(const struct ipz_adapter_handle adapter_handle,
 {
        u64 ret;
 
-       if (unlikely(ehca_debug_level >= 2)) {
+       if (unlikely(ehca_debug_level >= 3)) {
                if (count > 1) {
                        u64 *kpage;
                        int i;
index db4ba92f79fcd60736a8bd3d1e1752caa0ab3f5d..9d343b7c2f3b64b7729114a98773accab45baec8 100644 (file)
@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
                goto bail;
        }
 
-       umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags);
+       umem = ib_umem_get(pd->uobject->context, start, length,
+                          mr_access_flags, 0);
        if (IS_ERR(umem))
                return (void *) umem;
 
index 3557e7edc9b6590e4973b9f847f3518c2f75c1f1..2f199c5c4a724320a8e517a5a37046d7d740923d 100644 (file)
@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont
        int err;
 
        *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe),
-                           IB_ACCESS_LOCAL_WRITE);
+                           IB_ACCESS_LOCAL_WRITE, 1);
        if (IS_ERR(*umem))
                return PTR_ERR(*umem);
 
@@ -204,7 +204,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
 
                uar = &to_mucontext(context)->uar;
        } else {
-               err = mlx4_ib_db_alloc(dev, &cq->db, 1);
+               err = mlx4_db_alloc(dev->dev, &cq->db, 1);
                if (err)
                        goto err_cq;
 
@@ -221,7 +221,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
        }
 
        err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar,
-                           cq->db.dma, &cq->mcq);
+                           cq->db.dma, &cq->mcq, 0);
        if (err)
                goto err_dbmap;
 
@@ -250,7 +250,7 @@ err_mtt:
 
 err_db:
        if (!context)
-               mlx4_ib_db_free(dev, &cq->db);
+               mlx4_db_free(dev->dev, &cq->db);
 
 err_cq:
        kfree(cq);
@@ -435,7 +435,7 @@ int mlx4_ib_destroy_cq(struct ib_cq *cq)
                ib_umem_release(mcq->umem);
        } else {
                mlx4_ib_free_cq_buf(dev, &mcq->buf, cq->cqe + 1);
-               mlx4_ib_db_free(dev, &mcq->db);
+               mlx4_db_free(dev->dev, &mcq->db);
        }
 
        kfree(mcq);
index 1c36087aef140d0dd1f9e0fa44dc7d2c245c8980..8aee4233b388e2938036567e43f78fa425cf0e30 100644 (file)
 
 #include "mlx4_ib.h"
 
-struct mlx4_ib_db_pgdir {
-       struct list_head        list;
-       DECLARE_BITMAP(order0, MLX4_IB_DB_PER_PAGE);
-       DECLARE_BITMAP(order1, MLX4_IB_DB_PER_PAGE / 2);
-       unsigned long          *bits[2];
-       __be32                 *db_page;
-       dma_addr_t              db_dma;
-};
-
-static struct mlx4_ib_db_pgdir *mlx4_ib_alloc_db_pgdir(struct mlx4_ib_dev *dev)
-{
-       struct mlx4_ib_db_pgdir *pgdir;
-
-       pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
-       if (!pgdir)
-               return NULL;
-
-       bitmap_fill(pgdir->order1, MLX4_IB_DB_PER_PAGE / 2);
-       pgdir->bits[0] = pgdir->order0;
-       pgdir->bits[1] = pgdir->order1;
-       pgdir->db_page = dma_alloc_coherent(dev->ib_dev.dma_device,
-                                           PAGE_SIZE, &pgdir->db_dma,
-                                           GFP_KERNEL);
-       if (!pgdir->db_page) {
-               kfree(pgdir);
-               return NULL;
-       }
-
-       return pgdir;
-}
-
-static int mlx4_ib_alloc_db_from_pgdir(struct mlx4_ib_db_pgdir *pgdir,
-                                      struct mlx4_ib_db *db, int order)
-{
-       int o;
-       int i;
-
-       for (o = order; o <= 1; ++o) {
-               i = find_first_bit(pgdir->bits[o], MLX4_IB_DB_PER_PAGE >> o);
-               if (i < MLX4_IB_DB_PER_PAGE >> o)
-                       goto found;
-       }
-
-       return -ENOMEM;
-
-found:
-       clear_bit(i, pgdir->bits[o]);
-
-       i <<= o;
-
-       if (o > order)
-               set_bit(i ^ 1, pgdir->bits[order]);
-
-       db->u.pgdir = pgdir;
-       db->index   = i;
-       db->db      = pgdir->db_page + db->index;
-       db->dma     = pgdir->db_dma  + db->index * 4;
-       db->order   = order;
-
-       return 0;
-}
-
-int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order)
-{
-       struct mlx4_ib_db_pgdir *pgdir;
-       int ret = 0;
-
-       mutex_lock(&dev->pgdir_mutex);
-
-       list_for_each_entry(pgdir, &dev->pgdir_list, list)
-               if (!mlx4_ib_alloc_db_from_pgdir(pgdir, db, order))
-                       goto out;
-
-       pgdir = mlx4_ib_alloc_db_pgdir(dev);
-       if (!pgdir) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       list_add(&pgdir->list, &dev->pgdir_list);
-
-       /* This should never fail -- we just allocated an empty page: */
-       WARN_ON(mlx4_ib_alloc_db_from_pgdir(pgdir, db, order));
-
-out:
-       mutex_unlock(&dev->pgdir_mutex);
-
-       return ret;
-}
-
-void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db)
-{
-       int o;
-       int i;
-
-       mutex_lock(&dev->pgdir_mutex);
-
-       o = db->order;
-       i = db->index;
-
-       if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
-               clear_bit(i ^ 1, db->u.pgdir->order0);
-               ++o;
-       }
-
-       i >>= o;
-       set_bit(i, db->u.pgdir->bits[o]);
-
-       if (bitmap_full(db->u.pgdir->order1, MLX4_IB_DB_PER_PAGE / 2)) {
-               dma_free_coherent(dev->ib_dev.dma_device, PAGE_SIZE,
-                                 db->u.pgdir->db_page, db->u.pgdir->db_dma);
-               list_del(&db->u.pgdir->list);
-               kfree(db->u.pgdir);
-       }
-
-       mutex_unlock(&dev->pgdir_mutex);
-}
-
 struct mlx4_ib_user_db_page {
        struct list_head        list;
        struct ib_umem         *umem;
@@ -160,7 +42,7 @@ struct mlx4_ib_user_db_page {
 };
 
 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
-                       struct mlx4_ib_db *db)
+                       struct mlx4_db *db)
 {
        struct mlx4_ib_user_db_page *page;
        struct ib_umem_chunk *chunk;
@@ -181,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
        page->user_virt = (virt & PAGE_MASK);
        page->refcnt    = 0;
        page->umem      = ib_umem_get(&context->ibucontext, virt & PAGE_MASK,
-                                     PAGE_SIZE, 0);
+                                     PAGE_SIZE, 0, 0);
        if (IS_ERR(page->umem)) {
                err = PTR_ERR(page->umem);
                kfree(page);
@@ -202,7 +84,7 @@ out:
        return err;
 }
 
-void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db)
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db)
 {
        mutex_lock(&context->db_page_mutex);
 
index 4d9b5ac42202cc3df5b81961068f18e65beb00ac..4d61e32866c6029c2dab38f873f6cbbc189df999 100644 (file)
@@ -557,9 +557,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                goto err_uar;
        MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
 
-       INIT_LIST_HEAD(&ibdev->pgdir_list);
-       mutex_init(&ibdev->pgdir_mutex);
-
        ibdev->dev = dev;
 
        strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
index 9e637323c155769ff659439c801a8782aa8fb533..5cf994794d25f2ff3f99828696a990c49d58dd31 100644 (file)
 #include <linux/mlx4/device.h>
 #include <linux/mlx4/doorbell.h>
 
-enum {
-       MLX4_IB_DB_PER_PAGE     = PAGE_SIZE / 4
-};
-
-struct mlx4_ib_db_pgdir;
-struct mlx4_ib_user_db_page;
-
-struct mlx4_ib_db {
-       __be32                 *db;
-       union {
-               struct mlx4_ib_db_pgdir        *pgdir;
-               struct mlx4_ib_user_db_page    *user_page;
-       }                       u;
-       dma_addr_t              dma;
-       int                     index;
-       int                     order;
-};
-
 struct mlx4_ib_ucontext {
        struct ib_ucontext      ibucontext;
        struct mlx4_uar         uar;
@@ -88,7 +70,7 @@ struct mlx4_ib_cq {
        struct mlx4_cq          mcq;
        struct mlx4_ib_cq_buf   buf;
        struct mlx4_ib_cq_resize *resize_buf;
-       struct mlx4_ib_db       db;
+       struct mlx4_db          db;
        spinlock_t              lock;
        struct mutex            resize_mutex;
        struct ib_umem         *umem;
@@ -127,7 +109,7 @@ struct mlx4_ib_qp {
        struct mlx4_qp          mqp;
        struct mlx4_buf         buf;
 
-       struct mlx4_ib_db       db;
+       struct mlx4_db          db;
        struct mlx4_ib_wq       rq;
 
        u32                     doorbell_qpn;
@@ -154,7 +136,7 @@ struct mlx4_ib_srq {
        struct ib_srq           ibsrq;
        struct mlx4_srq         msrq;
        struct mlx4_buf         buf;
-       struct mlx4_ib_db       db;
+       struct mlx4_db          db;
        u64                    *wrid;
        spinlock_t              lock;
        int                     head;
@@ -175,9 +157,6 @@ struct mlx4_ib_dev {
        struct mlx4_dev        *dev;
        void __iomem           *uar_map;
 
-       struct list_head        pgdir_list;
-       struct mutex            pgdir_mutex;
-
        struct mlx4_uar         priv_uar;
        u32                     priv_pdn;
        MLX4_DECLARE_DOORBELL_LOCK(uar_lock);
@@ -248,11 +227,9 @@ static inline struct mlx4_ib_ah *to_mah(struct ib_ah *ibah)
        return container_of(ibah, struct mlx4_ib_ah, ibah);
 }
 
-int mlx4_ib_db_alloc(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db, int order);
-void mlx4_ib_db_free(struct mlx4_ib_dev *dev, struct mlx4_ib_db *db);
 int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt,
-                       struct mlx4_ib_db *db);
-void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_ib_db *db);
+                       struct mlx4_db *db);
+void mlx4_ib_db_unmap_user(struct mlx4_ib_ucontext *context, struct mlx4_db *db);
 
 struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc);
 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
index fe2c2e94a5f87031de3c68591c299b0c38bf353f..68e92485fc76f132ea079110691b1c283cd337e0 100644 (file)
@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        if (!mr)
                return ERR_PTR(-ENOMEM);
 
-       mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags);
+       mr->umem = ib_umem_get(pd->uobject->context, start, length,
+                              access_flags, 0);
        if (IS_ERR(mr->umem)) {
                err = PTR_ERR(mr->umem);
                goto err_free;
index b75efae7e449f0d974b16672dc283a1e8ccb618d..8e02ecfec18872c4d8f5585cd01cf49c5c8b91b6 100644 (file)
@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        goto err;
 
                qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
-                                      qp->buf_size, 0);
+                                      qp->buf_size, 0, 0);
                if (IS_ERR(qp->umem)) {
                        err = PTR_ERR(qp->umem);
                        goto err;
@@ -514,7 +514,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
                        goto err;
 
                if (!init_attr->srq) {
-                       err = mlx4_ib_db_alloc(dev, &qp->db, 0);
+                       err = mlx4_db_alloc(dev->dev, &qp->db, 0);
                        if (err)
                                goto err;
 
@@ -580,7 +580,7 @@ err_buf:
 
 err_db:
        if (!pd->uobject && !init_attr->srq)
-               mlx4_ib_db_free(dev, &qp->db);
+               mlx4_db_free(dev->dev, &qp->db);
 
 err:
        return err;
@@ -666,7 +666,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
                kfree(qp->rq.wrid);
                mlx4_buf_free(dev->dev, qp->buf_size, &qp->buf);
                if (!qp->ibqp.srq)
-                       mlx4_ib_db_free(dev, &qp->db);
+                       mlx4_db_free(dev->dev, &qp->db);
        }
 }
 
index beaa3b06cf58b1eae91d08c2d6004b83223fcff0..12d6bc6f8007da0ced4158b1a956c7da52bf56c0 100644 (file)
@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
                }
 
                srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
-                                       buf_size, 0);
+                                       buf_size, 0, 0);
                if (IS_ERR(srq->umem)) {
                        err = PTR_ERR(srq->umem);
                        goto err_srq;
@@ -129,7 +129,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
                if (err)
                        goto err_mtt;
        } else {
-               err = mlx4_ib_db_alloc(dev, &srq->db, 0);
+               err = mlx4_db_alloc(dev->dev, &srq->db, 0);
                if (err)
                        goto err_srq;
 
@@ -200,7 +200,7 @@ err_buf:
 
 err_db:
        if (!pd->uobject)
-               mlx4_ib_db_free(dev, &srq->db);
+               mlx4_db_free(dev->dev, &srq->db);
 
 err_srq:
        kfree(srq);
@@ -267,7 +267,7 @@ int mlx4_ib_destroy_srq(struct ib_srq *srq)
                kfree(msrq->wrid);
                mlx4_buf_free(dev->dev, msrq->msrq.max << msrq->msrq.wqe_shift,
                              &msrq->buf);
-               mlx4_ib_db_free(dev, &msrq->db);
+               mlx4_db_free(dev->dev, &msrq->db);
        }
 
        kfree(msrq);
index 3538da16e3fe9ec4cea862fa7b0100fb3d3225c7..820205dec560954562b22c570f7eb13861ec8824 100644 (file)
@@ -818,15 +818,9 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
 
 void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
 {
-       u32 key;
-
        if (!fmr->maps)
                return;
 
-       key = tavor_key_to_hw_index(fmr->ibmr.lkey);
-       key &= dev->limits.num_mpts - 1;
-       fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
-
        fmr->maps = 0;
 
        writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
@@ -834,16 +828,9 @@ void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
 
 void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
 {
-       u32 key;
-
        if (!fmr->maps)
                return;
 
-       key = arbel_key_to_hw_index(fmr->ibmr.lkey);
-       key &= dev->limits.num_mpts - 1;
-       key = adjust_key(dev, key);
-       fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
-
        fmr->maps = 0;
 
        *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
index 696e1f30233268a6f69261ca10dd4fb0beadef28..be34f99ca62579b916970d5387110b370b99602d 100644 (file)
@@ -39,6 +39,8 @@
 #include <rdma/ib_smi.h>
 #include <rdma/ib_umem.h>
 #include <rdma/ib_user_verbs.h>
+
+#include <linux/sched.h>
 #include <linux/mm.h>
 
 #include "mthca_dev.h"
@@ -367,6 +369,8 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
                return ERR_PTR(-EFAULT);
        }
 
+       context->reg_mr_warned = 0;
+
        return &context->ibucontext;
 }
 
@@ -1006,17 +1010,31 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        struct mthca_dev *dev = to_mdev(pd->device);
        struct ib_umem_chunk *chunk;
        struct mthca_mr *mr;
+       struct mthca_reg_mr ucmd;
        u64 *pages;
        int shift, n, len;
        int i, j, k;
        int err = 0;
        int write_mtt_size;
 
+       if (udata->inlen - sizeof (struct ib_uverbs_cmd_hdr) < sizeof ucmd) {
+               if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
+                       mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
+                                  current->comm);
+                       mthca_warn(dev, "  Update libmthca to fix this.\n");
+               }
+               ++to_mucontext(pd->uobject->context)->reg_mr_warned;
+               ucmd.mr_attrs = 0;
+       } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+               return ERR_PTR(-EFAULT);
+
        mr = kmalloc(sizeof *mr, GFP_KERNEL);
        if (!mr)
                return ERR_PTR(-ENOMEM);
 
-       mr->umem = ib_umem_get(pd->uobject->context, start, length, acc);
+       mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
+                              ucmd.mr_attrs & MTHCA_MR_DMASYNC);
+
        if (IS_ERR(mr->umem)) {
                err = PTR_ERR(mr->umem);
                goto err;
index 262616c8ebb644d75ab603e5ce12b5f668baa5ac..934bf9544037c582aaeb60754d78ad534aef618c 100644 (file)
@@ -67,6 +67,7 @@ struct mthca_ucontext {
        struct ib_ucontext          ibucontext;
        struct mthca_uar            uar;
        struct mthca_user_db_table *db_tab;
+       int                         reg_mr_warned;
 };
 
 struct mthca_mtt;
index 02cc0a766f3ac767db1aba8333816ec58b402775..e1262c942db8c6ae31c91ec5b006454e239b2331 100644 (file)
@@ -61,6 +61,16 @@ struct mthca_alloc_pd_resp {
        __u32 reserved;
 };
 
+struct mthca_reg_mr {
+/*
+ * Mark the memory region with a DMA attribute that causes
+ * in-flight DMA to be flushed when the region is written to:
+ */
+#define MTHCA_MR_DMASYNC       0x1
+       __u32 mr_attrs;
+       __u32 reserved;
+};
+
 struct mthca_create_cq {
        __u32 lkey;
        __u32 pdn;
index 2aeb7ac972a9137d5d5f8ef47163032fbea418b3..d449eb6ec78e7d439a78cc824c260be16b32f93f 100644 (file)
@@ -2,6 +2,7 @@ config INFINIBAND_NES
        tristate "NetEffect RNIC Driver"
        depends on PCI && INET && INFINIBAND
        select LIBCRC32C
+       select INET_LRO
        ---help---
          This is a low-level driver for NetEffect RDMA enabled
          Network Interface Cards (RNIC).
index b046262ed638114bb8ce8c3d64ff9dd22267a62b..9f7364a9096d3d5b5ab42d07ee39ab4617f188cf 100644 (file)
@@ -91,6 +91,10 @@ unsigned int nes_debug_level = 0;
 module_param_named(debug_level, nes_debug_level, uint, 0644);
 MODULE_PARM_DESC(debug_level, "Enable debug output level");
 
+unsigned int nes_lro_max_aggr = NES_LRO_MAX_AGGR;
+module_param(nes_lro_max_aggr, int, NES_LRO_MAX_AGGR);
+MODULE_PARM_DESC(nes_mro_max_aggr, " nic LRO MAX packet aggregation");
+
 LIST_HEAD(nes_adapter_list);
 static LIST_HEAD(nes_dev_list);
 
@@ -139,8 +143,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,
 
        addr = ntohl(ifa->ifa_address);
        mask = ntohl(ifa->ifa_mask);
-       nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address %08X, netmask %08X.\n",
-                       addr, mask);
+       nes_debug(NES_DBG_NETDEV, "nes_inetaddr_event: ip address " NIPQUAD_FMT
+                 ", netmask " NIPQUAD_FMT ".\n",
+                 HIPQUAD(addr), HIPQUAD(mask));
        list_for_each_entry(nesdev, &nes_dev_list, list) {
                nes_debug(NES_DBG_NETDEV, "Nesdev list entry = 0x%p. (%s)\n",
                                nesdev, nesdev->netdev[0]->name);
@@ -353,13 +358,11 @@ struct ib_qp *nes_get_qp(struct ib_device *device, int qpn)
  */
 static void nes_print_macaddr(struct net_device *netdev)
 {
-       nes_debug(NES_DBG_INIT, "%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, IRQ %u\n",
-                       netdev->name,
-                       netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
-                       netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
-                       netdev->irq);
-}
+       DECLARE_MAC_BUF(mac);
 
+       nes_debug(NES_DBG_INIT, "%s: %s, IRQ %u\n",
+                 netdev->name, print_mac(mac, netdev->dev_addr), netdev->irq);
+}
 
 /**
  * nes_interrupt - handle interrupts
index cdf2e9ad62f7512b59d601558ac53349f732552f..1f9f7bf73862a44d1cf0a4a2ac7d1e5099ef70bf 100644 (file)
@@ -173,6 +173,7 @@ extern int disable_mpa_crc;
 extern unsigned int send_first;
 extern unsigned int nes_drv_opt;
 extern unsigned int nes_debug_level;
+extern unsigned int nes_lro_max_aggr;
 
 extern struct list_head nes_adapter_list;
 
@@ -535,8 +536,8 @@ int nes_register_ofa_device(struct nes_ib_device *);
 int nes_read_eeprom_values(struct nes_device *, struct nes_adapter *);
 void nes_write_1G_phy_reg(struct nes_device *, u8, u8, u16);
 void nes_read_1G_phy_reg(struct nes_device *, u8, u8, u16 *);
-void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16);
-void nes_read_10G_phy_reg(struct nes_device *, u16, u8);
+void nes_write_10G_phy_reg(struct nes_device *, u16, u8, u16, u16);
+void nes_read_10G_phy_reg(struct nes_device *, u8, u8, u16);
 struct nes_cqp_request *nes_get_cqp_request(struct nes_device *);
 void nes_post_cqp_request(struct nes_device *, struct nes_cqp_request *, int);
 int nes_arp_table(struct nes_device *, u32, u8 *, u32);
index d0738623bcf3529ecbef5ae6f64f8f7037720e33..9a4b40fae40db3c8994daf5ea66a5cfcfd3b55b5 100644 (file)
@@ -594,7 +594,7 @@ static void nes_cm_timer_tick(unsigned long pass)
                                continue;
                        }
                        /* this seems like the correct place, but leave send entry unprotected */
-                       // spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags);
+                       /* spin_unlock_irqrestore(&cm_node->retrans_list_lock, flags); */
                        atomic_inc(&send_entry->skb->users);
                        cm_packets_retrans++;
                        nes_debug(NES_DBG_CM, "Retransmitting send_entry %p for node %p,"
@@ -852,8 +852,8 @@ static struct nes_cm_node *find_node(struct nes_cm_core *cm_core,
        /* get a handle on the hte */
        hte = &cm_core->connected_nodes;
 
-       nes_debug(NES_DBG_CM, "Searching for an owner node:%x:%x from core %p->%p\n",
-                       loc_addr, loc_port, cm_core, hte);
+       nes_debug(NES_DBG_CM, "Searching for an owner node: " NIPQUAD_FMT ":%x from core %p->%p\n",
+                 HIPQUAD(loc_addr), loc_port, cm_core, hte);
 
        /* walk list and find cm_node associated with this session ID */
        spin_lock_irqsave(&cm_core->ht_lock, flags);
@@ -902,8 +902,8 @@ static struct nes_cm_listener *find_listener(struct nes_cm_core *cm_core,
        }
        spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
 
-       nes_debug(NES_DBG_CM, "Unable to find listener- %x:%x\n",
-                       dst_addr, dst_port);
+       nes_debug(NES_DBG_CM, "Unable to find listener for " NIPQUAD_FMT ":%x\n",
+                 HIPQUAD(dst_addr), dst_port);
 
        /* no listener */
        return NULL;
@@ -1054,6 +1054,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
        int arpindex = 0;
        struct nes_device *nesdev;
        struct nes_adapter *nesadapter;
+       DECLARE_MAC_BUF(mac);
 
        /* create an hte and cm_node for this instance */
        cm_node = kzalloc(sizeof(*cm_node), GFP_ATOMIC);
@@ -1066,8 +1067,9 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
        cm_node->loc_port = cm_info->loc_port;
        cm_node->rem_port = cm_info->rem_port;
        cm_node->send_write0 = send_first;
-       nes_debug(NES_DBG_CM, "Make node addresses : loc = %x:%x, rem = %x:%x\n",
-                       cm_node->loc_addr, cm_node->loc_port, cm_node->rem_addr, cm_node->rem_port);
+       nes_debug(NES_DBG_CM, "Make node addresses : loc = " NIPQUAD_FMT ":%x, rem = " NIPQUAD_FMT ":%x\n",
+                 HIPQUAD(cm_node->loc_addr), cm_node->loc_port,
+                 HIPQUAD(cm_node->rem_addr), cm_node->rem_port);
        cm_node->listener = listener;
        cm_node->netdev = nesvnic->netdev;
        cm_node->cm_id = cm_info->cm_id;
@@ -1116,11 +1118,8 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
 
        /* copy the mac addr to node context */
        memcpy(cm_node->rem_mac, nesadapter->arp_table[arpindex].mac_addr, ETH_ALEN);
-       nes_debug(NES_DBG_CM, "Remote mac addr from arp table:%02x,"
-                       " %02x, %02x, %02x, %02x, %02x\n",
-                       cm_node->rem_mac[0], cm_node->rem_mac[1],
-                       cm_node->rem_mac[2], cm_node->rem_mac[3],
-                       cm_node->rem_mac[4], cm_node->rem_mac[5]);
+       nes_debug(NES_DBG_CM, "Remote mac addr from arp table: %s\n",
+                 print_mac(mac, cm_node->rem_mac));
 
        add_hte_node(cm_core, cm_node);
        atomic_inc(&cm_nodes_created);
@@ -1336,7 +1335,7 @@ static int process_packet(struct nes_cm_node *cm_node, struct sk_buff *skb,
                                                        cm_node->loc_addr, cm_node->loc_port,
                                                        cm_node->rem_addr, cm_node->rem_port,
                                                        cm_node->state, atomic_read(&cm_node->ref_count));
-                               // create event
+                               /* create event */
                                cm_node->state = NES_CM_STATE_CLOSED;
 
                                create_event(cm_node, NES_CM_EVENT_ABORTED);
@@ -1670,7 +1669,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
        if (!cm_node)
                return NULL;
 
-       // set our node side to client (active) side
+       /* set our node side to client (active) side */
        cm_node->tcp_cntxt.client = 1;
        cm_node->tcp_cntxt.rcv_wscale = NES_CM_DEFAULT_RCV_WND_SCALE;
 
@@ -1695,7 +1694,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
                        loopbackremotenode->mpa_frame_size = mpa_frame_size -
                                        sizeof(struct ietf_mpa_frame);
 
-                       // we are done handling this state, set node to a TSA state
+                       /* we are done handling this state, set node to a TSA state */
                        cm_node->state = NES_CM_STATE_TSA;
                        cm_node->tcp_cntxt.rcv_nxt = loopbackremotenode->tcp_cntxt.loc_seq_num;
                        loopbackremotenode->tcp_cntxt.rcv_nxt = cm_node->tcp_cntxt.loc_seq_num;
@@ -1850,8 +1849,10 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core, struct nes_vnic *nesvni
        nfo.rem_addr = ntohl(iph->saddr);
        nfo.rem_port = ntohs(tcph->source);
 
-       nes_debug(NES_DBG_CM, "Received packet: dest=0x%08X:0x%04X src=0x%08X:0x%04X\n",
-                       iph->daddr, tcph->dest, iph->saddr, tcph->source);
+       nes_debug(NES_DBG_CM, "Received packet: dest=" NIPQUAD_FMT
+                 ":0x%04X src=" NIPQUAD_FMT ":0x%04X\n",
+                 NIPQUAD(iph->daddr), tcph->dest,
+                 NIPQUAD(iph->saddr), tcph->source);
 
        /* note: this call is going to increment cm_node ref count */
        cm_node = find_node(cm_core,
index aa53aab91bf8a3341dd3df72d155c4b2201db2c1..8dc70f9bad2f49f828b95dca4544ec46c1aa95b2 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/if_vlan.h>
+#include <linux/inet_lro.h>
 
 #include "nes.h"
 
@@ -636,6 +637,15 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
                        nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
                        return 0;
                }
+
+               i = 0;
+               while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
+                       mdelay(1);
+               if (i >= 10000) {
+                       printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
+                              nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
+                       return 0;
+               }
        }
 
        /* port reset */
@@ -684,17 +694,6 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
                }
        }
 
-
-
-       i = 0;
-       while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
-               mdelay(1);
-       if (i >= 10000) {
-               printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
-                               nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
-               return 0;
-       }
-
        return port_count;
 }
 
@@ -834,7 +833,7 @@ static void nes_init_csr_ne020(struct nes_device *nesdev, u8 hw_rev, u8 port_cou
        nes_write_indexed(nesdev, 0x00000900, 0x20000001);
        nes_write_indexed(nesdev, 0x000060C0, 0x0000028e);
        nes_write_indexed(nesdev, 0x000060C8, 0x00000020);
-                                                                                                               //
+
        nes_write_indexed(nesdev, 0x000001EC, 0x7b2625a0);
        /* nes_write_indexed(nesdev, 0x000001EC, 0x5f2625a0); */
 
@@ -1209,11 +1208,16 @@ int nes_init_phy(struct nes_device *nesdev)
 {
        struct nes_adapter *nesadapter = nesdev->nesadapter;
        u32 counter = 0;
+       u32 sds_common_control0;
        u32 mac_index = nesdev->mac_index;
-       u32 tx_config;
+       u32 tx_config = 0;
        u16 phy_data;
+       u32 temp_phy_data = 0;
+       u32 temp_phy_data2 = 0;
+       u32 i = 0;
 
-       if (nesadapter->OneG_Mode) {
+       if ((nesadapter->OneG_Mode) &&
+           (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
                nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
                if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_1G) {
                        printk(PFX "%s: Programming mdc config for 1G\n", __func__);
@@ -1225,7 +1229,7 @@ int nes_init_phy(struct nes_device *nesdev)
                nes_read_1G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index], &phy_data);
                nes_debug(NES_DBG_PHY, "Phy data from register 1 phy address %u = 0x%X.\n",
                                nesadapter->phy_index[mac_index], phy_data);
-               nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index],  0xb000);
+               nes_write_1G_phy_reg(nesdev, 23, nesadapter->phy_index[mac_index], 0xb000);
 
                /* Reset the PHY */
                nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], 0x8000);
@@ -1279,12 +1283,126 @@ int nes_init_phy(struct nes_device *nesdev)
                nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], &phy_data);
                nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[mac_index], phy_data | 0x0300);
        } else {
-               if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
+               if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) ||
+                   (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
                        /* setup 10G MDIO operation */
                        tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
                        tx_config |= 0x14;
                        nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
                }
+               if ((nesadapter->phy_type[mac_index] == NES_PHY_TYPE_ARGUS)) {
+                       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
+
+                       temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                       mdelay(10);
+                       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
+                       temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+
+                       /*
+                        * if firmware is already running (like from a
+                        * driver un-load/load, don't do anything.
+                        */
+                       if (temp_phy_data == temp_phy_data2) {
+                               /* configure QT2505 AMCC PHY */
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0x0000, 0x8000);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0000);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc302, 0x0044);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc318, 0x0052);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc319, 0x0008);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc31a, 0x0098);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0026, 0x0E00);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0027, 0x0000);
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0x0028, 0xA528);
+
+                               /*
+                                * remove micro from reset; chip boots from ROM,
+                                * uploads EEPROM f/w image, uC executes f/w
+                                */
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc300, 0x0002);
+
+                               /*
+                                * wait for heart beat to start to
+                                * know loading is done
+                                */
+                               counter = 0;
+                               do {
+                                       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
+                                       temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                                       if (counter++ > 1000) {
+                                               nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from heartbeat check <this is bad!!!> \n");
+                                               break;
+                                       }
+                                       mdelay(100);
+                                       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7ee);
+                                       temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                               } while ((temp_phy_data2 == temp_phy_data));
+
+                               /*
+                                * wait for tracking to start to know
+                                * f/w is good to go
+                                */
+                               counter = 0;
+                               do {
+                                       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x3, 0xd7fd);
+                                       temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                                       if (counter++ > 1000) {
+                                               nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from status check <this is bad!!!> \n");
+                                               break;
+                                       }
+                                       mdelay(1000);
+                                       /*
+                                        * nes_debug(NES_DBG_PHY, "AMCC PHY- phy_status not ready yet = 0x%02X\n",
+                                        *                      temp_phy_data);
+                                        */
+                               } while (((temp_phy_data & 0xff) != 0x50) && ((temp_phy_data & 0xff) != 0x70));
+
+                               /* set LOS Control invert RXLOSB_I_PADINV */
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd003, 0x0000);
+                               /* set LOS Control to mask of RXLOSB_I */
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xc314, 0x0042);
+                               /* set LED1 to input mode (LED1 and LED2 share same LED) */
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd006, 0x0007);
+                               /* set LED2 to RX link_status and activity */
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd007, 0x000A);
+                               /* set LED3 to RX link_status */
+                               nes_write_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 0x1, 0xd008, 0x0009);
+
+                               /*
+                                * reset the res-calibration on t2
+                                * serdes; ensures it is stable after
+                                * the amcc phy is stable
+                                */
+
+                               sds_common_control0  = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
+                               sds_common_control0 |= 0x1;
+                               nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
+
+                               /* release the res-calibration reset */
+                               sds_common_control0 &= 0xfffffffe;
+                               nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds_common_control0);
+
+                               i = 0;
+                               while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
+                                               && (i++ < 5000)) {
+                                       /* mdelay(1); */
+                               }
+
+                               /*
+                                * wait for link train done before moving on,
+                                * or will get an interupt storm
+                                */
+                               counter = 0;
+                               do {
+                                       temp_phy_data = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+                                                               (0x200 * (nesdev->mac_index & 1)));
+                                       if (counter++ > 1000) {
+                                               nes_debug(NES_DBG_PHY, "AMCC PHY- breaking from link train wait <this is bad, link didnt train!!!>\n");
+                                               break;
+                                       }
+                                       mdelay(1);
+                               } while (((temp_phy_data & 0x0f1f0000) != 0x0f0f0000));
+                       }
+               }
        }
        return 0;
 }
@@ -1377,6 +1495,25 @@ static void nes_rq_wqes_timeout(unsigned long parm)
 }
 
 
+static int nes_lro_get_skb_hdr(struct sk_buff *skb, void **iphdr,
+                              void **tcph, u64 *hdr_flags, void *priv)
+{
+       unsigned int ip_len;
+       struct iphdr *iph;
+       skb_reset_network_header(skb);
+       iph = ip_hdr(skb);
+       if (iph->protocol != IPPROTO_TCP)
+               return -1;
+       ip_len = ip_hdrlen(skb);
+       skb_set_transport_header(skb, ip_len);
+       *tcph = tcp_hdr(skb);
+
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+       *iphdr = iph;
+       return 0;
+}
+
+
 /**
  * nes_init_nic_qp
  */
@@ -1522,10 +1659,10 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
        }
 
        u64temp = (u64)nesvnic->nic.sq_pbase;
-       nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+       nic_context->context_words[NES_NIC_CTX_SQ_LOW_IDX]  = cpu_to_le32((u32)u64temp);
        nic_context->context_words[NES_NIC_CTX_SQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
        u64temp = (u64)nesvnic->nic.rq_pbase;
-       nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX] = cpu_to_le32((u32)u64temp);
+       nic_context->context_words[NES_NIC_CTX_RQ_LOW_IDX]  = cpu_to_le32((u32)u64temp);
        nic_context->context_words[NES_NIC_CTX_RQ_HIGH_IDX] = cpu_to_le32((u32)(u64temp >> 32));
 
        cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] = cpu_to_le32(NES_CQP_CREATE_QP |
@@ -1577,7 +1714,7 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
                nic_rqe = &nesvnic->nic.rq_vbase[counter];
                nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_1_0_IDX] = cpu_to_le32(nesvnic->max_frame_size);
                nic_rqe->wqe_words[NES_NIC_RQ_WQE_LENGTH_3_2_IDX] = 0;
-               nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX] = cpu_to_le32((u32)pmem);
+               nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]  = cpu_to_le32((u32)pmem);
                nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX] = cpu_to_le32((u32)((u64)pmem >> 32));
                nesvnic->nic.rx_skb[counter] = skb;
        }
@@ -1594,15 +1731,21 @@ int nes_init_nic_qp(struct nes_device *nesdev, struct net_device *netdev)
        nesvnic->rq_wqes_timer.function = nes_rq_wqes_timeout;
        nesvnic->rq_wqes_timer.data = (unsigned long)nesvnic;
        nes_debug(NES_DBG_INIT, "NAPI support Enabled\n");
-
        if (nesdev->nesadapter->et_use_adaptive_rx_coalesce)
        {
                nes_nic_init_timer(nesdev);
                if (netdev->mtu > 1500)
                        jumbomode = 1;
-                nes_nic_init_timer_defaults(nesdev, jumbomode);
-       }
-
+               nes_nic_init_timer_defaults(nesdev, jumbomode);
+       }
+       nesvnic->lro_mgr.max_aggr       = NES_LRO_MAX_AGGR;
+       nesvnic->lro_mgr.max_desc       = NES_MAX_LRO_DESCRIPTORS;
+       nesvnic->lro_mgr.lro_arr        = nesvnic->lro_desc;
+       nesvnic->lro_mgr.get_skb_header = nes_lro_get_skb_hdr;
+       nesvnic->lro_mgr.features       = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
+       nesvnic->lro_mgr.dev            = netdev;
+       nesvnic->lro_mgr.ip_summed      = CHECKSUM_UNNECESSARY;
+       nesvnic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
        return 0;
 }
 
@@ -1622,8 +1765,8 @@ void nes_destroy_nic_qp(struct nes_vnic *nesvnic)
 
        /* Free remaining NIC receive buffers */
        while (nesvnic->nic.rq_head != nesvnic->nic.rq_tail) {
-               nic_rqe = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
-               wqe_frag = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
+               nic_rqe   = &nesvnic->nic.rq_vbase[nesvnic->nic.rq_tail];
+               wqe_frag  = (u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_LOW_IDX]);
                wqe_frag |= ((u64)le32_to_cpu(nic_rqe->wqe_words[NES_NIC_RQ_WQE_FRAG0_HIGH_IDX])) << 32;
                pci_unmap_single(nesdev->pcidev, (dma_addr_t)wqe_frag,
                                nesvnic->max_frame_size, PCI_DMA_FROMDEVICE);
@@ -1706,17 +1849,17 @@ int nes_napi_isr(struct nes_device *nesdev)
        /* iff NIC, process here, else wait for DPC */
        if ((int_stat) && ((int_stat & 0x0000ff00) == int_stat)) {
                nesdev->napi_isr_ran = 0;
-               nes_write32(nesdev->regs+NES_INT_STAT,
-                               (int_stat &
-                               ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
+               nes_write32(nesdev->regs + NES_INT_STAT,
+                       (int_stat &
+                       ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 | NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3)));
 
                /* Process the CEQs */
                nes_process_ceq(nesdev, &nesdev->nesadapter->ceq[nesdev->nic_ceq_index]);
 
                if (unlikely((((nesadapter->et_rx_coalesce_usecs_irq) &&
-                                          (!nesadapter->et_use_adaptive_rx_coalesce)) ||
-                                         ((nesadapter->et_use_adaptive_rx_coalesce) &&
-                                          (nesdev->deepcq_count > nesadapter->et_pkt_rate_low)))) ) {
+                                       (!nesadapter->et_use_adaptive_rx_coalesce)) ||
+                                       ((nesadapter->et_use_adaptive_rx_coalesce) &&
+                                        (nesdev->deepcq_count > nesadapter->et_pkt_rate_low))))) {
                        if ((nesdev->int_req & NES_INT_TIMER) == 0) {
                                /* Enable Periodic timer interrupts */
                                nesdev->int_req |= NES_INT_TIMER;
@@ -1794,12 +1937,12 @@ void nes_dpc(unsigned long param)
                }
 
                if (int_stat) {
-                       if (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
-                                       NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)) {
+                       if (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0|
+                                       NES_INT_MAC1|NES_INT_MAC2 | NES_INT_MAC3)) {
                                /* Ack the interrupts */
                                nes_write32(nesdev->regs+NES_INT_STAT,
-                                               (int_stat & ~(NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
-                                               NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3)));
+                                       (int_stat & ~(NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0|
+                                       NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3)));
                        }
 
                        temp_int_stat = int_stat;
@@ -1864,8 +2007,8 @@ void nes_dpc(unsigned long param)
                        }
                }
                /* Don't use the interface interrupt bit stay in loop */
-               int_stat &= ~NES_INT_INTF|NES_INT_TIMER|NES_INT_MAC0|
-                               NES_INT_MAC1|NES_INT_MAC2|NES_INT_MAC3;
+               int_stat &= ~NES_INT_INTF | NES_INT_TIMER | NES_INT_MAC0 |
+                               NES_INT_MAC1 | NES_INT_MAC2 | NES_INT_MAC3;
        } while ((int_stat != 0) && (loop_counter++ < MAX_DPC_ITERATIONS));
 
        if (timer_ints == 1) {
@@ -1876,9 +2019,9 @@ void nes_dpc(unsigned long param)
                                        nesdev->timer_only_int_count = 0;
                                        nesdev->int_req &= ~NES_INT_TIMER;
                                        nes_write32(nesdev->regs + NES_INTF_INT_MASK, ~(nesdev->intf_int_req));
-                                       nes_write32(nesdev->regs+NES_INT_MASK, ~nesdev->int_req);
+                                       nes_write32(nesdev->regs + NES_INT_MASK, ~nesdev->int_req);
                                } else {
-                                       nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
+                                       nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
                                }
                        } else {
                                if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
@@ -1886,7 +2029,7 @@ void nes_dpc(unsigned long param)
                                        nes_nic_init_timer(nesdev);
                                }
                                nesdev->timer_only_int_count = 0;
-                               nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff|(~nesdev->int_req));
+                               nes_write32(nesdev->regs+NES_INT_MASK, 0x0000ffff | (~nesdev->int_req));
                        }
                } else {
                        nesdev->timer_only_int_count = 0;
@@ -1935,7 +2078,7 @@ static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
        do {
                if (le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]) &
                                NES_CEQE_VALID) {
-                       u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX])))<<32) |
+                       u64temp = (((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_HIGH_IDX]))) << 32) |
                                                ((u64)(le32_to_cpu(ceq->ceq_vbase[head].ceqe_words[NES_CEQE_CQ_CTX_LOW_IDX])));
                        u64temp <<= 1;
                        cq = *((struct nes_hw_cq **)&u64temp);
@@ -1963,7 +2106,7 @@ static void nes_process_ceq(struct nes_device *nesdev, struct nes_hw_ceq *ceq)
  */
 static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
 {
-//     u64 u64temp;
+       /* u64 u64temp; */
        u32 head;
        u32 aeq_size;
        u32 aeqe_misc;
@@ -1982,8 +2125,10 @@ static void nes_process_aeq(struct nes_device *nesdev, struct nes_hw_aeq *aeq)
                if (aeqe_misc & (NES_AEQE_QP|NES_AEQE_CQ)) {
                        if (aeqe_cq_id >= NES_FIRST_QPN) {
                                /* dealing with an accelerated QP related AE */
-//                             u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])))<<32) |
-//                                     ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX])));
+                               /*
+                                * u64temp = (((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX]))) << 32) |
+                                *           ((u64)(le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX])));
+                                */
                                nes_process_iwarp_aeqe(nesdev, (struct nes_hw_aeqe *)aeqe);
                        } else {
                                /* TODO: dealing with a CQP related AE */
@@ -2083,6 +2228,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
        u32 u32temp;
        u16 phy_data;
        u16 temp_phy_data;
+       u32 pcs_val  = 0x0f0f0000;
+       u32 pcs_mask = 0x0f1f0000;
 
        spin_lock_irqsave(&nesadapter->phy_lock, flags);
        if (nesadapter->mac_sw_state[mac_number] != NES_MAC_SW_IDLE) {
@@ -2146,13 +2293,30 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                nes_debug(NES_DBG_PHY, "Eth SERDES Common Status: 0=0x%08X, 1=0x%08X\n",
                                nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0),
                                nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0+0x200));
-               pcs_control_status = nes_read_indexed(nesdev,
-                               NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
-               pcs_control_status = nes_read_indexed(nesdev,
-                               NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index&1)*0x200));
+
+               if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_PUMA_1G) {
+                       switch (mac_index) {
+                       case 1:
+                       case 3:
+                               pcs_control_status = nes_read_indexed(nesdev,
+                                               NES_IDX_PHY_PCS_CONTROL_STATUS0 + 0x200);
+                               break;
+                       default:
+                               pcs_control_status = nes_read_indexed(nesdev,
+                                               NES_IDX_PHY_PCS_CONTROL_STATUS0);
+                               break;
+                       }
+               } else {
+                       pcs_control_status = nes_read_indexed(nesdev,
+                                       NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200));
+                       pcs_control_status = nes_read_indexed(nesdev,
+                                       NES_IDX_PHY_PCS_CONTROL_STATUS0 + ((mac_index & 1) * 0x200));
+               }
+
                nes_debug(NES_DBG_PHY, "PCS PHY Control/Status%u: 0x%08X\n",
                                mac_index, pcs_control_status);
-               if (nesadapter->OneG_Mode) {
+               if ((nesadapter->OneG_Mode) &&
+                               (nesadapter->phy_type[mac_index] != NES_PHY_TYPE_PUMA_1G)) {
                        u32temp = 0x01010000;
                        if (nesadapter->port_count > 2) {
                                u32temp |= 0x02020000;
@@ -2161,24 +2325,59 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                phy_data = 0;
                                nes_debug(NES_DBG_PHY, "PCS says the link is down\n");
                        }
-               } else if (nesadapter->phy_type[mac_index] == NES_PHY_TYPE_IRIS) {
-                       nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
-                       temp_phy_data = (u16)nes_read_indexed(nesdev,
-                                                               NES_IDX_MAC_MDIO_CONTROL);
-                       u32temp = 20;
-                       do {
-                               nes_read_10G_phy_reg(nesdev, 1, nesadapter->phy_index[mac_index]);
-                               phy_data = (u16)nes_read_indexed(nesdev,
-                                                               NES_IDX_MAC_MDIO_CONTROL);
-                               if ((phy_data == temp_phy_data) || (!(--u32temp)))
-                                       break;
-                               temp_phy_data = phy_data;
-                       } while (1);
-                       nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
-                               __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
-
                } else {
-                       phy_data = (0x0f0f0000 == (pcs_control_status & 0x0f1f0000)) ? 4 : 0;
+                       switch (nesadapter->phy_type[mac_index]) {
+                       case NES_PHY_TYPE_IRIS:
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
+                               temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                               u32temp = 20;
+                               do {
+                                       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
+                                       phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                                       if ((phy_data == temp_phy_data) || (!(--u32temp)))
+                                               break;
+                                       temp_phy_data = phy_data;
+                               } while (1);
+                               nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+                                       __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
+                               break;
+
+                       case NES_PHY_TYPE_ARGUS:
+                               /* clear the alarms */
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc002);
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc005);
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc006);
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9003);
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9004);
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 0x9005);
+                               /* check link status */
+                               nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
+                               temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                               u32temp = 100;
+                               do {
+                                       nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
+
+                                       phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+                                       if ((phy_data == temp_phy_data) || (!(--u32temp)))
+                                               break;
+                                       temp_phy_data = phy_data;
+                               } while (1);
+                               nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
+                                       __func__, phy_data, nesadapter->mac_link_down ? "DOWN" : "UP");
+                               break;
+
+                       case NES_PHY_TYPE_PUMA_1G:
+                               if (mac_index < 2)
+                                       pcs_val = pcs_mask = 0x01010000;
+                               else
+                                       pcs_val = pcs_mask = 0x02020000;
+                               /* fall through */
+                       default:
+                               phy_data = (pcs_val == (pcs_control_status & pcs_mask)) ? 0x4 : 0x0;
+                               break;
+                       }
                }
 
                if (phy_data & 0x0004) {
@@ -2187,8 +2386,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                nes_debug(NES_DBG_PHY, "The Link is UP!!.  linkup was %d\n",
                                                nesvnic->linkup);
                                if (nesvnic->linkup == 0) {
-                                       printk(PFX "The Link is now up for port %u, netdev %p.\n",
-                                                       mac_index, nesvnic->netdev);
+                                       printk(PFX "The Link is now up for port %s, netdev %p.\n",
+                                                       nesvnic->netdev->name, nesvnic->netdev);
                                        if (netif_queue_stopped(nesvnic->netdev))
                                                netif_start_queue(nesvnic->netdev);
                                        nesvnic->linkup = 1;
@@ -2201,8 +2400,8 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
                                nes_debug(NES_DBG_PHY, "The Link is Down!!. linkup was %d\n",
                                                nesvnic->linkup);
                                if (nesvnic->linkup == 1) {
-                                       printk(PFX "The Link is now down for port %u, netdev %p.\n",
-                                                       mac_index, nesvnic->netdev);
+                                       printk(PFX "The Link is now down for port %s, netdev %p.\n",
+                                                       nesvnic->netdev->name, nesvnic->netdev);
                                        if (!(netif_queue_stopped(nesvnic->netdev)))
                                                netif_stop_queue(nesvnic->netdev);
                                        nesvnic->linkup = 0;
@@ -2256,10 +2455,13 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
        u16 pkt_type;
        u16 rqes_processed = 0;
        u8 sq_cqes = 0;
+       u8 nes_use_lro = 0;
 
        head = cq->cq_head;
        cq_size = cq->cq_size;
        cq->cqes_pending = 1;
+       if (nesvnic->netdev->features & NETIF_F_LRO)
+               nes_use_lro = 1;
        do {
                if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_NIC_CQE_MISC_IDX]) &
                                NES_NIC_CQE_VALID) {
@@ -2274,8 +2476,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
                                /* bump past the vlan tag */
                                wqe_fragment_length++;
                                if (le16_to_cpu(wqe_fragment_length[wqe_fragment_index]) != 0) {
-                                       u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
-                                       u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
+                                       u64temp = (u64) le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX +
+                                                       wqe_fragment_index * 2]);
+                                       u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX +
+                                                       wqe_fragment_index * 2])) << 32;
                                        bus_address = (dma_addr_t)u64temp;
                                        if (test_and_clear_bit(nesnic->sq_tail, nesnic->first_frag_overflow)) {
                                                pci_unmap_single(nesdev->pcidev,
@@ -2285,8 +2489,10 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
                                        }
                                        for (; wqe_fragment_index < 5; wqe_fragment_index++) {
                                                if (wqe_fragment_length[wqe_fragment_index]) {
-                                                       u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX+wqe_fragment_index*2]);
-                                                       u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX+wqe_fragment_index*2]))<<32;
+                                                       u64temp = le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_LOW_IDX +
+                                                                               wqe_fragment_index * 2]);
+                                                       u64temp += ((u64)le32_to_cpu(nic_sqe->wqe_words[NES_NIC_SQ_WQE_FRAG0_HIGH_IDX
+                                                                               + wqe_fragment_index * 2])) <<32;
                                                        bus_address = (dma_addr_t)u64temp;
                                                        pci_unmap_page(nesdev->pcidev,
                                                                        bus_address,
@@ -2333,7 +2539,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
                                if (atomic_read(&nesvnic->rx_skbs_needed) > (nesvnic->nic.rq_size>>1)) {
                                        nes_write32(nesdev->regs+NES_CQE_ALLOC,
                                                        cq->cq_number | (cqe_count << 16));
-//                                     nesadapter->tune_timer.cq_count += cqe_count;
+                                       /* nesadapter->tune_timer.cq_count += cqe_count; */
                                        nesdev->currcq_count += cqe_count;
                                        cqe_count = 0;
                                        nes_replenish_nic_rq(nesvnic);
@@ -2381,9 +2587,16 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
                                                                >> 16);
                                                nes_debug(NES_DBG_CQ, "%s: Reporting stripped VLAN packet. Tag = 0x%04X\n",
                                                                nesvnic->netdev->name, vlan_tag);
-                                               nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
+                                               if (nes_use_lro)
+                                                       lro_vlan_hwaccel_receive_skb(&nesvnic->lro_mgr, rx_skb,
+                                                                       nesvnic->vlan_grp, vlan_tag, NULL);
+                                               else
+                                                       nes_vlan_rx(rx_skb, nesvnic->vlan_grp, vlan_tag);
                                        } else {
-                                               nes_netif_rx(rx_skb);
+                                               if (nes_use_lro)
+                                                       lro_receive_skb(&nesvnic->lro_mgr, rx_skb, NULL);
+                                               else
+                                                       nes_netif_rx(rx_skb);
                                        }
                                }
 
@@ -2401,7 +2614,7 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
                                /* Replenish Nic CQ */
                                nes_write32(nesdev->regs+NES_CQE_ALLOC,
                                                cq->cq_number | (cqe_count << 16));
-//                             nesdev->nesadapter->tune_timer.cq_count += cqe_count;
+                               /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */
                                nesdev->currcq_count += cqe_count;
                                cqe_count = 0;
                        }
@@ -2415,26 +2628,27 @@ void nes_nic_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *cq)
 
        } while (1);
 
+       if (nes_use_lro)
+               lro_flush_all(&nesvnic->lro_mgr);
        if (sq_cqes) {
                barrier();
                /* restart the queue if it had been stopped */
                if (netif_queue_stopped(nesvnic->netdev))
                        netif_wake_queue(nesvnic->netdev);
        }
-
        cq->cq_head = head;
        /* nes_debug(NES_DBG_CQ, "CQ%u Processed = %u cqes, new head = %u.\n",
                        cq->cq_number, cqe_count, cq->cq_head); */
        cq->cqe_allocs_pending = cqe_count;
        if (unlikely(nesadapter->et_use_adaptive_rx_coalesce))
        {
-//             nesdev->nesadapter->tune_timer.cq_count += cqe_count;
+               /* nesdev->nesadapter->tune_timer.cq_count += cqe_count; */
                nesdev->currcq_count += cqe_count;
                nes_nic_tune_timer(nesdev);
        }
        if (atomic_read(&nesvnic->rx_skbs_needed))
                nes_replenish_nic_rq(nesvnic);
-       }
+}
 
 
 /**
@@ -2463,7 +2677,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
 
                if (le32_to_cpu(cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
                        u64temp = (((u64)(le32_to_cpu(cq->cq_vbase[head].
-                                       cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+                                       cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]))) << 32) |
                                        ((u64)(le32_to_cpu(cq->cq_vbase[head].
                                        cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX])));
                        cqp = *((struct nes_hw_cqp **)&u64temp);
@@ -2480,7 +2694,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
                        }
 
                        u64temp = (((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
-                                       wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX])))<<32) |
+                                       wqe_words[NES_CQP_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
                                        ((u64)(le32_to_cpu(nesdev->cqp.sq_vbase[cqp->sq_tail].
                                        wqe_words[NES_CQP_WQE_COMP_SCRATCH_LOW_IDX])));
                        cqp_request = *((struct nes_cqp_request **)&u64temp);
@@ -2517,7 +2731,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
                                } else {
                                        nes_debug(NES_DBG_CQP, "CQP request %p (opcode 0x%02X) freed.\n",
                                                        cqp_request,
-                                                       le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX])&0x3f);
+                                                       le32_to_cpu(cqp_request->cqp_wqe.wqe_words[NES_CQP_WQE_OPCODE_IDX]) & 0x3f);
                                        if (cqp_request->dynamic) {
                                                kfree(cqp_request);
                                        } else {
@@ -2531,7 +2745,7 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
                        }
 
                        cq->cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
-                       nes_write32(nesdev->regs+NES_CQE_ALLOC, cq->cq_number | (1 << 16));
+                       nes_write32(nesdev->regs + NES_CQE_ALLOC, cq->cq_number | (1 << 16));
                        if (++cqp->sq_tail >= cqp->sq_size)
                                cqp->sq_tail = 0;
 
@@ -2600,13 +2814,13 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
        nes_debug(NES_DBG_AEQ, "\n");
        aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
        if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
-               context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
+               context  = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
                context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
        } else {
                aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
                aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
                context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
-                                               aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
+                                               aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
                BUG_ON(!context);
        }
 
@@ -2619,7 +2833,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                        le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]), aeqe,
                        nes_tcp_state_str[tcp_state], nes_iwarp_state_str[iwarp_state]);
 
-
        switch (async_event_id) {
                case NES_AEQE_AEID_LLP_FIN_RECEIVED:
                        nesqp = *((struct nes_qp **)&context);
@@ -3023,7 +3236,7 @@ void nes_manage_arp_cache(struct net_device *netdev, unsigned char *mac_addr,
                cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] |= cpu_to_le32(NES_CQP_ARP_VALID);
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_ADDR_LOW_IDX] = cpu_to_le32(
                                (((u32)mac_addr[2]) << 24) | (((u32)mac_addr[3]) << 16) |
-                               (((u32)mac_addr[4]) << 8) | (u32)mac_addr[5]);
+                               (((u32)mac_addr[4]) << 8)  | (u32)mac_addr[5]);
                cqp_wqe->wqe_words[NES_CQP_ARP_WQE_MAC_HIGH_IDX] = cpu_to_le32(
                                (((u32)mac_addr[0]) << 16) | (u32)mac_addr[1]);
        } else {
index b7e2844f096b620489c0b338e65e400937c16c4a..745bf94f3f074283f9c6f9dc36547f976d64883e 100644 (file)
 #ifndef __NES_HW_H
 #define __NES_HW_H
 
-#define NES_PHY_TYPE_1G   2
-#define NES_PHY_TYPE_IRIS 3
+#include <linux/inet_lro.h>
+
+#define NES_PHY_TYPE_1G        2
+#define NES_PHY_TYPE_IRIS      3
+#define NES_PHY_TYPE_ARGUS     4
+#define NES_PHY_TYPE_PUMA_1G   5
 #define NES_PHY_TYPE_PUMA_10G  6
 
 #define NES_MULTICAST_PF_MAX 8
@@ -905,7 +909,7 @@ struct nes_hw_qp {
 };
 
 struct nes_hw_cq {
-       struct nes_hw_cqe volatile *cq_vbase;   /* PCI memory for host rings */
+       struct nes_hw_cqe *cq_vbase;    /* PCI memory for host rings */
        void (*ce_handler)(struct nes_device *nesdev, struct nes_hw_cq *cq);
        dma_addr_t cq_pbase;    /* PCI memory for host rings */
        u16 cq_head;
@@ -965,7 +969,7 @@ struct nes_arp_entry {
 #define NES_NIC_CQ_DOWNWARD_TREND   16
 
 struct nes_hw_tune_timer {
-    //u16 cq_count;
+    /* u16 cq_count; */
     u16 threshold_low;
     u16 threshold_target;
     u16 threshold_high;
@@ -982,8 +986,10 @@ struct nes_hw_tune_timer {
 #define NES_TIMER_INT_LIMIT         2
 #define NES_TIMER_INT_LIMIT_DYNAMIC 10
 #define NES_TIMER_ENABLE_LIMIT      4
-#define NES_MAX_LINK_INTERRUPTS                128
-#define NES_MAX_LINK_CHECK             200
+#define NES_MAX_LINK_INTERRUPTS     128
+#define NES_MAX_LINK_CHECK          200
+#define NES_MAX_LRO_DESCRIPTORS     32
+#define NES_LRO_MAX_AGGR            64
 
 struct nes_adapter {
        u64              fw_ver;
@@ -1183,6 +1189,9 @@ struct nes_vnic {
        u8  of_device_registered;
        u8  rdma_enabled;
        u8  rx_checksum_disabled;
+       u32 lro_max_aggr;
+       struct net_lro_mgr lro_mgr;
+       struct net_lro_desc lro_desc[NES_MAX_LRO_DESCRIPTORS];
 };
 
 struct nes_ib_device {
index 01cd0effc492b0712e880ac016117d082a295ae6..1b0938c87774e54017dd22364520778547b40dee 100644 (file)
@@ -185,12 +185,13 @@ static int nes_netdev_open(struct net_device *netdev)
        nic_active |= nic_active_bit;
        nes_write_indexed(nesdev, NES_IDX_NIC_BROADCAST_ON, nic_active);
 
-       macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
+       macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
        macaddr_high += (u16)netdev->dev_addr[1];
-       macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
-       macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
-       macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
-       macaddr_low += (u32)netdev->dev_addr[5];
+
+       macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
+       macaddr_low  += ((u32)netdev->dev_addr[3]) << 16;
+       macaddr_low  += ((u32)netdev->dev_addr[4]) << 8;
+       macaddr_low  += (u32)netdev->dev_addr[5];
 
        /* Program the various MAC regs */
        for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
@@ -451,7 +452,7 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        __le16 *wqe_fragment_length;
        u32 nr_frags;
        u32 original_first_length;
-//     u64 *wqe_fragment_address;
+       /* u64 *wqe_fragment_address; */
        /* first fragment (0) is used by copy buffer */
        u16 wqe_fragment_index=1;
        u16 hoffset;
@@ -461,11 +462,12 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
        u32 old_head;
        u32 wqe_misc;
 
-       /* nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
-                       " (%u frags), tso_size=%u\n",
-                       netdev->name, skb->len, skb_headlen(skb),
-                       skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
-       */
+       /*
+        * nes_debug(NES_DBG_NIC_TX, "%s Request to tx NIC packet length %u, headlen %u,"
+        *              " (%u frags), tso_size=%u\n",
+        *              netdev->name, skb->len, skb_headlen(skb),
+        *              skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
+        */
 
        if (!netif_carrier_ok(netdev))
                return NETDEV_TX_OK;
@@ -787,22 +789,20 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
        int i;
        u32 macaddr_low;
        u16 macaddr_high;
+       DECLARE_MAC_BUF(mac);
 
        if (!is_valid_ether_addr(mac_addr->sa_data))
                return -EADDRNOTAVAIL;
 
        memcpy(netdev->dev_addr, mac_addr->sa_data, netdev->addr_len);
-       printk(PFX "%s: Address length = %d, Address = %02X%02X%02X%02X%02X%02X..\n",
-                  __func__, netdev->addr_len,
-                  mac_addr->sa_data[0], mac_addr->sa_data[1],
-                  mac_addr->sa_data[2], mac_addr->sa_data[3],
-                  mac_addr->sa_data[4], mac_addr->sa_data[5]);
-       macaddr_high = ((u16)netdev->dev_addr[0]) << 8;
+       printk(PFX "%s: Address length = %d, Address = %s\n",
+              __func__, netdev->addr_len, print_mac(mac, mac_addr->sa_data));
+       macaddr_high  = ((u16)netdev->dev_addr[0]) << 8;
        macaddr_high += (u16)netdev->dev_addr[1];
-       macaddr_low = ((u32)netdev->dev_addr[2]) << 24;
-       macaddr_low += ((u32)netdev->dev_addr[3]) << 16;
-       macaddr_low += ((u32)netdev->dev_addr[4]) << 8;
-       macaddr_low += (u32)netdev->dev_addr[5];
+       macaddr_low   = ((u32)netdev->dev_addr[2]) << 24;
+       macaddr_low  += ((u32)netdev->dev_addr[3]) << 16;
+       macaddr_low  += ((u32)netdev->dev_addr[4]) << 8;
+       macaddr_low  += (u32)netdev->dev_addr[5];
 
        for (i = 0; i < NES_MAX_PORT_COUNT; i++) {
                if (nesvnic->qp_nic_index[i] == 0xf) {
@@ -878,17 +878,17 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
                        if (mc_nic_index < 0)
                                mc_nic_index = nesvnic->nic_index;
                        if (multicast_addr) {
-                               nes_debug(NES_DBG_NIC_RX, "Assigning MC Address = %02X%02X%02X%02X%02X%02X to register 0x%04X nic_idx=%d\n",
-                                                 multicast_addr->dmi_addr[0], multicast_addr->dmi_addr[1],
-                                                 multicast_addr->dmi_addr[2], multicast_addr->dmi_addr[3],
-                                                 multicast_addr->dmi_addr[4], multicast_addr->dmi_addr[5],
-                                                 perfect_filter_register_address+(mc_index * 8), mc_nic_index);
-                               macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
+                               DECLARE_MAC_BUF(mac);
+                               nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %s to register 0x%04X nic_idx=%d\n",
+                                         print_mac(mac, multicast_addr->dmi_addr),
+                                         perfect_filter_register_address+(mc_index * 8),
+                                         mc_nic_index);
+                               macaddr_high  = ((u16)multicast_addr->dmi_addr[0]) << 8;
                                macaddr_high += (u16)multicast_addr->dmi_addr[1];
-                               macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
-                               macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16;
-                               macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8;
-                               macaddr_low += (u32)multicast_addr->dmi_addr[5];
+                               macaddr_low   = ((u32)multicast_addr->dmi_addr[2]) << 24;
+                               macaddr_low  += ((u32)multicast_addr->dmi_addr[3]) << 16;
+                               macaddr_low  += ((u32)multicast_addr->dmi_addr[4]) << 8;
+                               macaddr_low  += (u32)multicast_addr->dmi_addr[5];
                                nes_write_indexed(nesdev,
                                                perfect_filter_register_address+(mc_index * 8),
                                                macaddr_low);
@@ -912,23 +912,23 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
 /**
  * nes_netdev_change_mtu
  */
-static int nes_netdev_change_mtu(struct        net_device *netdev,     int     new_mtu)
+static int nes_netdev_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct nes_vnic *nesvnic = netdev_priv(netdev);
-       struct nes_device *nesdev =     nesvnic->nesdev;
-       int     ret     = 0;
-       u8 jumbomode=0;
+       struct nes_device *nesdev = nesvnic->nesdev;
+       int ret = 0;
+       u8 jumbomode = 0;
 
-       if ((new_mtu < ETH_ZLEN) ||     (new_mtu > max_mtu))
+       if ((new_mtu < ETH_ZLEN) || (new_mtu > max_mtu))
                return -EINVAL;
 
-       netdev->mtu     = new_mtu;
+       netdev->mtu = new_mtu;
        nesvnic->max_frame_size = new_mtu + VLAN_ETH_HLEN;
 
        if (netdev->mtu > 1500) {
                jumbomode=1;
        }
-       nes_nic_init_timer_defaults(nesdev,     jumbomode);
+       nes_nic_init_timer_defaults(nesdev, jumbomode);
 
        if (netif_running(netdev)) {
                nes_netdev_stop(netdev);
@@ -938,8 +938,7 @@ static int nes_netdev_change_mtu(struct     net_device *netdev,     int     new_mtu)
        return ret;
 }
 
-#define NES_ETHTOOL_STAT_COUNT 55
-static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN] = {
+static const char nes_ethtool_stringset[][ETH_GSTRING_LEN] = {
        "Link Change Interrupts",
        "Linearized SKBs",
        "T/GSO Requests",
@@ -995,8 +994,12 @@ static const char nes_ethtool_stringset[NES_ETHTOOL_STAT_COUNT][ETH_GSTRING_LEN]
        "CQ Depth 32",
        "CQ Depth 128",
        "CQ Depth 256",
+       "LRO aggregated",
+       "LRO flushed",
+       "LRO no_desc",
 };
 
+#define NES_ETHTOOL_STAT_COUNT  ARRAY_SIZE(nes_ethtool_stringset)
 
 /**
  * nes_netdev_get_rx_csum
@@ -1191,6 +1194,9 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
        target_stat_values[52] = int_mod_cq_depth_32;
        target_stat_values[53] = int_mod_cq_depth_128;
        target_stat_values[54] = int_mod_cq_depth_256;
+       target_stat_values[55] = nesvnic->lro_mgr.stats.aggregated;
+       target_stat_values[56] = nesvnic->lro_mgr.stats.flushed;
+       target_stat_values[57] = nesvnic->lro_mgr.stats.no_desc;
 
 }
 
@@ -1221,14 +1227,14 @@ static int nes_netdev_set_coalesce(struct net_device *netdev,
                struct ethtool_coalesce *et_coalesce)
 {
        struct nes_vnic *nesvnic = netdev_priv(netdev);
-       struct nes_device *nesdev =     nesvnic->nesdev;
+       struct nes_device *nesdev = nesvnic->nesdev;
        struct nes_adapter *nesadapter = nesdev->nesadapter;
        struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
        unsigned long flags;
 
-       spin_lock_irqsave(&nesadapter->periodic_timer_lock,     flags);
+       spin_lock_irqsave(&nesadapter->periodic_timer_lock, flags);
        if (et_coalesce->rx_max_coalesced_frames_low) {
-               shared_timer->threshold_low      = et_coalesce->rx_max_coalesced_frames_low;
+               shared_timer->threshold_low = et_coalesce->rx_max_coalesced_frames_low;
        }
        if (et_coalesce->rx_max_coalesced_frames_irq) {
                shared_timer->threshold_target = et_coalesce->rx_max_coalesced_frames_irq;
@@ -1248,14 +1254,14 @@ static int nes_netdev_set_coalesce(struct net_device *netdev,
        nesadapter->et_rx_coalesce_usecs_irq = et_coalesce->rx_coalesce_usecs_irq;
        if (et_coalesce->use_adaptive_rx_coalesce) {
                nesadapter->et_use_adaptive_rx_coalesce = 1;
-               nesadapter->timer_int_limit     = NES_TIMER_INT_LIMIT_DYNAMIC;
+               nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT_DYNAMIC;
                nesadapter->et_rx_coalesce_usecs_irq = 0;
                if (et_coalesce->pkt_rate_low) {
-                       nesadapter->et_pkt_rate_low     = et_coalesce->pkt_rate_low;
+                       nesadapter->et_pkt_rate_low = et_coalesce->pkt_rate_low;
                }
        } else {
                nesadapter->et_use_adaptive_rx_coalesce = 0;
-               nesadapter->timer_int_limit     = NES_TIMER_INT_LIMIT;
+               nesadapter->timer_int_limit = NES_TIMER_INT_LIMIT;
                if (nesadapter->et_rx_coalesce_usecs_irq) {
                        nes_write32(nesdev->regs+NES_PERIODIC_CONTROL,
                                        0x80000000 | ((u32)(nesadapter->et_rx_coalesce_usecs_irq*8)));
@@ -1272,28 +1278,28 @@ static int nes_netdev_get_coalesce(struct net_device *netdev,
                struct ethtool_coalesce *et_coalesce)
 {
        struct nes_vnic *nesvnic = netdev_priv(netdev);
-       struct nes_device *nesdev =     nesvnic->nesdev;
+       struct nes_device *nesdev = nesvnic->nesdev;
        struct nes_adapter *nesadapter = nesdev->nesadapter;
        struct ethtool_coalesce temp_et_coalesce;
        struct nes_hw_tune_timer *shared_timer = &nesadapter->tune_timer;
        unsigned long flags;
 
        memset(&temp_et_coalesce, 0, sizeof(temp_et_coalesce));
-       temp_et_coalesce.rx_coalesce_usecs_irq = nesadapter->et_rx_coalesce_usecs_irq;
-       temp_et_coalesce.use_adaptive_rx_coalesce =     nesadapter->et_use_adaptive_rx_coalesce;
-       temp_et_coalesce.rate_sample_interval = nesadapter->et_rate_sample_interval;
+       temp_et_coalesce.rx_coalesce_usecs_irq    = nesadapter->et_rx_coalesce_usecs_irq;
+       temp_et_coalesce.use_adaptive_rx_coalesce = nesadapter->et_use_adaptive_rx_coalesce;
+       temp_et_coalesce.rate_sample_interval     = nesadapter->et_rate_sample_interval;
        temp_et_coalesce.pkt_rate_low = nesadapter->et_pkt_rate_low;
        spin_lock_irqsave(&nesadapter->periodic_timer_lock,     flags);
-       temp_et_coalesce.rx_max_coalesced_frames_low  shared_timer->threshold_low;
-       temp_et_coalesce.rx_max_coalesced_frames_irq  shared_timer->threshold_target;
+       temp_et_coalesce.rx_max_coalesced_frames_low  = shared_timer->threshold_low;
+       temp_et_coalesce.rx_max_coalesced_frames_irq  = shared_timer->threshold_target;
        temp_et_coalesce.rx_max_coalesced_frames_high = shared_timer->threshold_high;
-       temp_et_coalesce.rx_coalesce_usecs_low = shared_timer->timer_in_use_min;
+       temp_et_coalesce.rx_coalesce_usecs_low  = shared_timer->timer_in_use_min;
        temp_et_coalesce.rx_coalesce_usecs_high = shared_timer->timer_in_use_max;
        if (nesadapter->et_use_adaptive_rx_coalesce) {
                temp_et_coalesce.rx_coalesce_usecs_irq = shared_timer->timer_in_use;
        }
        spin_unlock_irqrestore(&nesadapter->periodic_timer_lock, flags);
-       memcpy(et_coalesce,     &temp_et_coalesce, sizeof(*et_coalesce));
+       memcpy(et_coalesce, &temp_et_coalesce, sizeof(*et_coalesce));
        return 0;
 }
 
@@ -1372,30 +1378,38 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
        u16 phy_data;
 
        et_cmd->duplex = DUPLEX_FULL;
-       et_cmd->port = PORT_MII;
+       et_cmd->port   = PORT_MII;
+
        if (nesadapter->OneG_Mode) {
-               et_cmd->supported = SUPPORTED_1000baseT_Full|SUPPORTED_Autoneg;
-               et_cmd->advertising = ADVERTISED_1000baseT_Full|ADVERTISED_Autoneg;
                et_cmd->speed = SPEED_1000;
-               nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
-                               &phy_data);
-               if (phy_data&0x1000) {
-                       et_cmd->autoneg = AUTONEG_ENABLE;
+               if (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
+                       et_cmd->supported   = SUPPORTED_1000baseT_Full;
+                       et_cmd->advertising = ADVERTISED_1000baseT_Full;
+                       et_cmd->autoneg     = AUTONEG_DISABLE;
+                       et_cmd->transceiver = XCVR_INTERNAL;
+                       et_cmd->phy_address = nesdev->mac_index;
                } else {
-                       et_cmd->autoneg = AUTONEG_DISABLE;
+                       et_cmd->supported   = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg;
+                       et_cmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg;
+                       nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index], &phy_data);
+                       if (phy_data & 0x1000)
+                               et_cmd->autoneg = AUTONEG_ENABLE;
+                       else
+                               et_cmd->autoneg = AUTONEG_DISABLE;
+                       et_cmd->transceiver = XCVR_EXTERNAL;
+                       et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
                }
-               et_cmd->transceiver = XCVR_EXTERNAL;
-               et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
        } else {
-               if (nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
+               if ((nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) ||
+                   (nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_ARGUS)) {
                        et_cmd->transceiver = XCVR_EXTERNAL;
-                       et_cmd->port = PORT_FIBRE;
-                       et_cmd->supported = SUPPORTED_FIBRE;
+                       et_cmd->port        = PORT_FIBRE;
+                       et_cmd->supported   = SUPPORTED_FIBRE;
                        et_cmd->advertising = ADVERTISED_FIBRE;
                        et_cmd->phy_address = nesadapter->phy_index[nesdev->mac_index];
                } else {
                        et_cmd->transceiver = XCVR_INTERNAL;
-                       et_cmd->supported = SUPPORTED_10000baseT_Full;
+                       et_cmd->supported   = SUPPORTED_10000baseT_Full;
                        et_cmd->advertising = ADVERTISED_10000baseT_Full;
                        et_cmd->phy_address = nesdev->mac_index;
                }
@@ -1418,14 +1432,15 @@ static int nes_netdev_set_settings(struct net_device *netdev, struct ethtool_cmd
        struct nes_adapter *nesadapter = nesdev->nesadapter;
        u16 phy_data;
 
-       if (nesadapter->OneG_Mode) {
+       if ((nesadapter->OneG_Mode) &&
+           (nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G)) {
                nes_read_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
                                &phy_data);
                if (et_cmd->autoneg) {
                        /* Turn on Full duplex, Autoneg, and restart autonegotiation */
                        phy_data |= 0x1300;
                } else {
-                       // Turn off autoneg
+                       /* Turn off autoneg */
                        phy_data &= ~0x1000;
                }
                nes_write_1G_phy_reg(nesdev, 0, nesadapter->phy_index[nesdev->mac_index],
@@ -1456,6 +1471,8 @@ static struct ethtool_ops nes_ethtool_ops = {
        .set_sg = ethtool_op_set_sg,
        .get_tso = ethtool_op_get_tso,
        .set_tso = ethtool_op_set_tso,
+       .get_flags = ethtool_op_get_flags,
+       .set_flags = ethtool_op_set_flags,
 };
 
 
@@ -1609,27 +1626,34 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
        list_add_tail(&nesvnic->list, &nesdev->nesadapter->nesvnic_list[nesdev->mac_index]);
 
        if ((nesdev->netdev_count == 0) &&
-           (PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index)) {
-               nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
-                               NES_IDX_PHY_PCS_CONTROL_STATUS0+(0x200*(nesvnic->logical_port&1)));
+           ((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
+            ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) &&
+             (((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
+              ((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
+               /*
+                * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
+                *              NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1)));
+                */
                u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
-                               (0x200*(nesvnic->logical_port&1)));
-               u32temp |= 0x00200000;
-               nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
-                               (0x200*(nesvnic->logical_port&1)), u32temp);
+                               (0x200 * (nesdev->mac_index & 1)));
+               if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) {
+                       u32temp |= 0x00200000;
+                       nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
+                               (0x200 * (nesdev->mac_index & 1)), u32temp);
+               }
+
                u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
-                               (0x200*(nesvnic->logical_port&1)) );
+                               (0x200 * (nesdev->mac_index & 1)));
+
                if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
-                       if (nesdev->nesadapter->phy_type[nesvnic->logical_port] == NES_PHY_TYPE_IRIS) {
+                       if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) {
                                nes_init_phy(nesdev);
-                               nes_read_10G_phy_reg(nesdev, 1,
-                                               nesdev->nesadapter->phy_index[nesvnic->logical_port]);
+                               nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
                                temp_phy_data = (u16)nes_read_indexed(nesdev,
                                                                        NES_IDX_MAC_MDIO_CONTROL);
                                u32temp = 20;
                                do {
-                                       nes_read_10G_phy_reg(nesdev, 1,
-                                                       nesdev->nesadapter->phy_index[nesvnic->logical_port]);
+                                       nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
                                        phy_data = (u16)nes_read_indexed(nesdev,
                                                                        NES_IDX_MAC_MDIO_CONTROL);
                                        if ((phy_data == temp_phy_data) || (!(--u32temp)))
@@ -1646,6 +1670,14 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
                                nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
                                nesvnic->linkup = 1;
                        }
+               } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
+                       nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n",
+                               nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn));
+                       if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) ||
+                           ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000)))  {
+                               nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
+                               nesvnic->linkup = 1;
+                       }
                }
                /* clear the MAC interrupt status, assumes direct logical to physical mapping */
                u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
index f9db07c2717d81e7f4e7eb7ecdcccc20d303f3ed..fe83d1b2b177161a395ed826a7521181834ab8c2 100644 (file)
@@ -444,15 +444,13 @@ void nes_read_1G_phy_reg(struct nes_device *nesdev, u8 phy_reg, u8 phy_addr, u16
 /**
  * nes_write_10G_phy_reg
  */
-void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg,
-               u8 phy_addr, u16 data)
+void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_addr, u8 dev_addr, u16 phy_reg,
+               u16 data)
 {
-       u32 dev_addr;
        u32 port_addr;
        u32 u32temp;
        u32 counter;
 
-       dev_addr = 1;
        port_addr = phy_addr;
 
        /* set address */
@@ -492,14 +490,12 @@ void nes_write_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg,
  * This routine only issues the read, the data must be read
  * separately.
  */
-void nes_read_10G_phy_reg(struct nes_device *nesdev, u16 phy_reg, u8 phy_addr)
+void nes_read_10G_phy_reg(struct nes_device *nesdev, u8 phy_addr, u8 dev_addr, u16 phy_reg)
 {
-       u32 dev_addr;
        u32 port_addr;
        u32 u32temp;
        u32 counter;
 
-       dev_addr = 1;
        port_addr = phy_addr;
 
        /* set address */
@@ -660,7 +656,9 @@ int nes_arp_table(struct nes_device *nesdev, u32 ip_addr, u8 *mac_addr, u32 acti
 
        /* DELETE or RESOLVE */
        if (arp_index == nesadapter->arp_table_size) {
-               nes_debug(NES_DBG_NETDEV, "mac address not in ARP table - cannot delete or resolve\n");
+               nes_debug(NES_DBG_NETDEV, "MAC for " NIPQUAD_FMT " not in ARP table - cannot %s\n",
+                         HIPQUAD(ip_addr),
+                         action == NES_ARP_RESOLVE ? "resolve" : "delete");
                return -1;
        }
 
index f9a5d43908923081526b50e4eed3a709c5b4b8e0..99b3c4ae86eb0e1c71e0d03c4ebdde85b098155a 100644 (file)
@@ -1266,7 +1266,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
                        sq_size = init_attr->cap.max_send_wr;
                        rq_size = init_attr->cap.max_recv_wr;
 
-                       // check if the encoded sizes are OK or not...
+                       /* check if the encoded sizes are OK or not... */
                        sq_encoded_size = nes_get_encoded_size(&sq_size);
                        rq_encoded_size = nes_get_encoded_size(&rq_size);
 
@@ -1976,7 +1976,7 @@ static int nes_destroy_cq(struct ib_cq *ib_cq)
 
        if (nescq->cq_mem_size)
                pci_free_consistent(nesdev->pcidev, nescq->cq_mem_size,
-                               (void *)nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
+                                   nescq->hw_cq.cq_vbase, nescq->hw_cq.cq_pbase);
        kfree(nescq);
 
        return ret;
@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        u8 single_page = 1;
        u8 stag_key;
 
-       region = ib_umem_get(pd->uobject->context, start, length, acc);
+       region = ib_umem_get(pd->uobject->context, start, length, acc, 0);
        if (IS_ERR(region)) {
                return (struct ib_mr *)region;
        }
@@ -3610,6 +3610,12 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
        while (cqe_count < num_entries) {
                if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
                                NES_CQE_VALID) {
+                       /*
+                        * Make sure we read CQ entry contents *after*
+                        * we've checked the valid bit.
+                        */
+                       rmb();
+
                        cqe = nescq->hw_cq.cq_vbase[head];
                        nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
                        u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
index 73b2b176ad0e45e98adbb257d724fb1f814f507a..9044f8803532e035e264e2b166644b05d84563e4 100644 (file)
 /* constants */
 
 enum {
-       IPOIB_PACKET_SIZE         = 2048,
-       IPOIB_BUF_SIZE            = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
-
        IPOIB_ENCAP_LEN           = 4,
 
+       IPOIB_UD_HEAD_SIZE        = IB_GRH_BYTES + IPOIB_ENCAP_LEN,
+       IPOIB_UD_RX_SG            = 2, /* max buffer needed for 4K mtu */
+
        IPOIB_CM_MTU              = 0x10000 - 0x10, /* padding to align header to 16 */
        IPOIB_CM_BUF_SIZE         = IPOIB_CM_MTU  + IPOIB_ENCAP_LEN,
        IPOIB_CM_HEAD_SIZE        = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
@@ -95,6 +95,8 @@ enum {
        IPOIB_MCAST_FLAG_SENDONLY = 1,
        IPOIB_MCAST_FLAG_BUSY     = 2,  /* joining or already joined */
        IPOIB_MCAST_FLAG_ATTACHED = 3,
+
+       MAX_SEND_CQE              = 16,
 };
 
 #define        IPOIB_OP_RECV   (1ul << 31)
@@ -139,7 +141,7 @@ struct ipoib_mcast {
 
 struct ipoib_rx_buf {
        struct sk_buff *skb;
-       u64             mapping;
+       u64             mapping[IPOIB_UD_RX_SG];
 };
 
 struct ipoib_tx_buf {
@@ -285,7 +287,8 @@ struct ipoib_dev_priv {
        u16               pkey_index;
        struct ib_pd     *pd;
        struct ib_mr     *mr;
-       struct ib_cq     *cq;
+       struct ib_cq     *recv_cq;
+       struct ib_cq     *send_cq;
        struct ib_qp     *qp;
        u32               qkey;
 
@@ -294,6 +297,7 @@ struct ipoib_dev_priv {
 
        unsigned int admin_mtu;
        unsigned int mcast_mtu;
+       unsigned int max_ib_mtu;
 
        struct ipoib_rx_buf *rx_ring;
 
@@ -304,6 +308,10 @@ struct ipoib_dev_priv {
        struct ib_sge        tx_sge[MAX_SKB_FRAGS + 1];
        struct ib_send_wr    tx_wr;
        unsigned             tx_outstanding;
+       struct ib_wc         send_wc[MAX_SEND_CQE];
+
+       struct ib_recv_wr    rx_wr;
+       struct ib_sge        rx_sge[IPOIB_UD_RX_SG];
 
        struct ib_wc ibwc[IPOIB_NUM_WC];
 
@@ -366,6 +374,14 @@ struct ipoib_neigh {
        struct list_head    list;
 };
 
+#define IPOIB_UD_MTU(ib_mtu)           (ib_mtu - IPOIB_ENCAP_LEN)
+#define IPOIB_UD_BUF_SIZE(ib_mtu)      (ib_mtu + IB_GRH_BYTES)
+
+static inline int ipoib_ud_need_sg(unsigned int ib_mtu)
+{
+       return IPOIB_UD_BUF_SIZE(ib_mtu) > PAGE_SIZE;
+}
+
 /*
  * We stash a pointer to our private neighbour information after our
  * hardware address in neigh->ha.  The ALIGN() expression here makes
@@ -650,7 +666,6 @@ static inline int ipoib_register_debugfs(void) { return 0; }
 static inline void ipoib_unregister_debugfs(void) { }
 #endif
 
-
 #define ipoib_printk(level, priv, format, arg...)      \
        printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
 #define ipoib_warn(priv, format, arg...)               \
index 9db7b0bd9134b350befa605a40b122081fe861a4..97e67d36378fa1532cb2224624ae20ea53acefe2 100644 (file)
@@ -249,8 +249,8 @@ static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_qp_init_attr attr = {
                .event_handler = ipoib_cm_rx_event_handler,
-               .send_cq = priv->cq, /* For drain WR */
-               .recv_cq = priv->cq,
+               .send_cq = priv->recv_cq, /* For drain WR */
+               .recv_cq = priv->recv_cq,
                .srq = priv->cm.srq,
                .cap.max_send_wr = 1, /* For drain WR */
                .cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
@@ -951,8 +951,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ib_qp_init_attr attr = {
-               .send_cq                = priv->cq,
-               .recv_cq                = priv->cq,
+               .send_cq                = priv->recv_cq,
+               .recv_cq                = priv->recv_cq,
                .srq                    = priv->cm.srq,
                .cap.max_send_wr        = ipoib_sendq_size,
                .cap.max_send_sge       = 1,
index 9a47428366c91d0f35884ff2cecb0bbf49c41846..10279b79c44d92b9f2384f653fcf9204cf77159d 100644 (file)
@@ -71,7 +71,7 @@ static int ipoib_set_coalesce(struct net_device *dev,
            coal->rx_max_coalesced_frames > 0xffff)
                return -EINVAL;
 
-       ret = ib_modify_cq(priv->cq, coal->rx_max_coalesced_frames,
+       ret = ib_modify_cq(priv->recv_cq, coal->rx_max_coalesced_frames,
                           coal->rx_coalesce_usecs);
        if (ret && ret != -ENOSYS) {
                ipoib_warn(priv, "failed modifying CQ (%d)\n", ret);
index 0205eb7c1bd3328bcefd4e8a5c31d62e01a6c01c..97b815c1a3fc0988a33129e47ace54748d9d9dbb 100644 (file)
@@ -89,28 +89,59 @@ void ipoib_free_ah(struct kref *kref)
        spin_unlock_irqrestore(&priv->lock, flags);
 }
 
+static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv,
+                                 u64 mapping[IPOIB_UD_RX_SG])
+{
+       if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+               ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE,
+                                   DMA_FROM_DEVICE);
+               ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE,
+                                 DMA_FROM_DEVICE);
+       } else
+               ib_dma_unmap_single(priv->ca, mapping[0],
+                                   IPOIB_UD_BUF_SIZE(priv->max_ib_mtu),
+                                   DMA_FROM_DEVICE);
+}
+
+static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv,
+                                  struct sk_buff *skb,
+                                  unsigned int length)
+{
+       if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
+               unsigned int size;
+               /*
+                * There is only two buffers needed for max_payload = 4K,
+                * first buf size is IPOIB_UD_HEAD_SIZE
+                */
+               skb->tail += IPOIB_UD_HEAD_SIZE;
+               skb->len  += length;
+
+               size = length - IPOIB_UD_HEAD_SIZE;
+
+               frag->size     = size;
+               skb->data_len += size;
+               skb->truesize += size;
+       } else
+               skb_put(skb, length);
+
+}
+
 static int ipoib_ib_post_receive(struct net_device *dev, int id)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
-       struct ib_sge list;
-       struct ib_recv_wr param;
        struct ib_recv_wr *bad_wr;
        int ret;
 
-       list.addr     = priv->rx_ring[id].mapping;
-       list.length   = IPOIB_BUF_SIZE;
-       list.lkey     = priv->mr->lkey;
+       priv->rx_wr.wr_id   = id | IPOIB_OP_RECV;
+       priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0];
+       priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1];
 
-       param.next    = NULL;
-       param.wr_id   = id | IPOIB_OP_RECV;
-       param.sg_list = &list;
-       param.num_sge = 1;
 
-       ret = ib_post_recv(priv->qp, &param, &bad_wr);
+       ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr);
        if (unlikely(ret)) {
                ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret);
-               ib_dma_unmap_single(priv->ca, priv->rx_ring[id].mapping,
-                                   IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+               ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping);
                dev_kfree_skb_any(priv->rx_ring[id].skb);
                priv->rx_ring[id].skb = NULL;
        }
@@ -118,15 +149,21 @@ static int ipoib_ib_post_receive(struct net_device *dev, int id)
        return ret;
 }
 
-static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
+static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct sk_buff *skb;
-       u64 addr;
+       int buf_size;
+       u64 *mapping;
 
-       skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
-       if (!skb)
-               return -ENOMEM;
+       if (ipoib_ud_need_sg(priv->max_ib_mtu))
+               buf_size = IPOIB_UD_HEAD_SIZE;
+       else
+               buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+
+       skb = dev_alloc_skb(buf_size + 4);
+       if (unlikely(!skb))
+               return NULL;
 
        /*
         * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte
@@ -135,17 +172,32 @@ static int ipoib_alloc_rx_skb(struct net_device *dev, int id)
         */
        skb_reserve(skb, 4);
 
-       addr = ib_dma_map_single(priv->ca, skb->data, IPOIB_BUF_SIZE,
-                                DMA_FROM_DEVICE);
-       if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
-               dev_kfree_skb_any(skb);
-               return -EIO;
+       mapping = priv->rx_ring[id].mapping;
+       mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
+                                      DMA_FROM_DEVICE);
+       if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
+               goto error;
+
+       if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+               struct page *page = alloc_page(GFP_ATOMIC);
+               if (!page)
+                       goto partial_error;
+               skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE);
+               mapping[1] =
+                       ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page,
+                                       0, PAGE_SIZE, DMA_FROM_DEVICE);
+               if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1])))
+                       goto partial_error;
        }
 
-       priv->rx_ring[id].skb     = skb;
-       priv->rx_ring[id].mapping = addr;
+       priv->rx_ring[id].skb = skb;
+       return skb;
 
-       return 0;
+partial_error:
+       ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE);
+error:
+       dev_kfree_skb_any(skb);
+       return NULL;
 }
 
 static int ipoib_ib_post_receives(struct net_device *dev)
@@ -154,7 +206,7 @@ static int ipoib_ib_post_receives(struct net_device *dev)
        int i;
 
        for (i = 0; i < ipoib_recvq_size; ++i) {
-               if (ipoib_alloc_rx_skb(dev, i)) {
+               if (!ipoib_alloc_rx_skb(dev, i)) {
                        ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
                        return -ENOMEM;
                }
@@ -172,7 +224,7 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV;
        struct sk_buff *skb;
-       u64 addr;
+       u64 mapping[IPOIB_UD_RX_SG];
 
        ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
                       wr_id, wc->status);
@@ -184,15 +236,13 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        }
 
        skb  = priv->rx_ring[wr_id].skb;
-       addr = priv->rx_ring[wr_id].mapping;
 
        if (unlikely(wc->status != IB_WC_SUCCESS)) {
                if (wc->status != IB_WC_WR_FLUSH_ERR)
                        ipoib_warn(priv, "failed recv event "
                                   "(status=%d, wrid=%d vend_err %x)\n",
                                   wc->status, wr_id, wc->vendor_err);
-               ib_dma_unmap_single(priv->ca, addr,
-                                   IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+               ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping);
                dev_kfree_skb_any(skb);
                priv->rx_ring[wr_id].skb = NULL;
                return;
@@ -205,11 +255,14 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num)
                goto repost;
 
+       memcpy(mapping, priv->rx_ring[wr_id].mapping,
+              IPOIB_UD_RX_SG * sizeof *mapping);
+
        /*
         * If we can't allocate a new RX buffer, dump
         * this packet and reuse the old buffer.
         */
-       if (unlikely(ipoib_alloc_rx_skb(dev, wr_id))) {
+       if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) {
                ++dev->stats.rx_dropped;
                goto repost;
        }
@@ -217,9 +270,9 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
        ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
                       wc->byte_len, wc->slid);
 
-       ib_dma_unmap_single(priv->ca, addr, IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
+       ipoib_ud_dma_unmap_rx(priv, mapping);
+       ipoib_ud_skb_put_frags(priv, skb, wc->byte_len);
 
-       skb_put(skb, wc->byte_len);
        skb_pull(skb, IB_GRH_BYTES);
 
        skb->protocol = ((struct ipoib_header *) skb->data)->proto;
@@ -311,7 +364,6 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        unsigned int wr_id = wc->wr_id;
        struct ipoib_tx_buf *tx_req;
-       unsigned long flags;
 
        ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
                       wr_id, wc->status);
@@ -331,13 +383,11 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
 
        dev_kfree_skb_any(tx_req->skb);
 
-       spin_lock_irqsave(&priv->tx_lock, flags);
        ++priv->tx_tail;
        if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
            netif_queue_stopped(dev) &&
            test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                netif_wake_queue(dev);
-       spin_unlock_irqrestore(&priv->tx_lock, flags);
 
        if (wc->status != IB_WC_SUCCESS &&
            wc->status != IB_WC_WR_FLUSH_ERR)
@@ -346,6 +396,17 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
                           wc->status, wr_id, wc->vendor_err);
 }
 
+static int poll_tx(struct ipoib_dev_priv *priv)
+{
+       int n, i;
+
+       n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc);
+       for (i = 0; i < n; ++i)
+               ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i);
+
+       return n == MAX_SEND_CQE;
+}
+
 int ipoib_poll(struct napi_struct *napi, int budget)
 {
        struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi);
@@ -361,7 +422,7 @@ poll_more:
                int max = (budget - done);
 
                t = min(IPOIB_NUM_WC, max);
-               n = ib_poll_cq(priv->cq, t, priv->ibwc);
+               n = ib_poll_cq(priv->recv_cq, t, priv->ibwc);
 
                for (i = 0; i < n; i++) {
                        struct ib_wc *wc = priv->ibwc + i;
@@ -372,12 +433,8 @@ poll_more:
                                        ipoib_cm_handle_rx_wc(dev, wc);
                                else
                                        ipoib_ib_handle_rx_wc(dev, wc);
-                       } else {
-                               if (wc->wr_id & IPOIB_OP_CM)
-                                       ipoib_cm_handle_tx_wc(dev, wc);
-                               else
-                                       ipoib_ib_handle_tx_wc(dev, wc);
-                       }
+                       } else
+                               ipoib_cm_handle_tx_wc(priv->dev, wc);
                }
 
                if (n != t)
@@ -386,7 +443,7 @@ poll_more:
 
        if (done < budget) {
                netif_rx_complete(dev, napi);
-               if (unlikely(ib_req_notify_cq(priv->cq,
+               if (unlikely(ib_req_notify_cq(priv->recv_cq,
                                              IB_CQ_NEXT_COMP |
                                              IB_CQ_REPORT_MISSED_EVENTS)) &&
                    netif_rx_reschedule(dev, napi))
@@ -509,12 +566,16 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
 
                address->last_send = priv->tx_head;
                ++priv->tx_head;
+               skb_orphan(skb);
 
                if (++priv->tx_outstanding == ipoib_sendq_size) {
                        ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
                        netif_stop_queue(dev);
                }
        }
+
+       if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
+               poll_tx(priv);
 }
 
 static void __ipoib_reap_ah(struct net_device *dev)
@@ -661,7 +722,7 @@ void ipoib_drain_cq(struct net_device *dev)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int i, n;
        do {
-               n = ib_poll_cq(priv->cq, IPOIB_NUM_WC, priv->ibwc);
+               n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc);
                for (i = 0; i < n; ++i) {
                        /*
                         * Convert any successful completions to flush
@@ -676,14 +737,13 @@ void ipoib_drain_cq(struct net_device *dev)
                                        ipoib_cm_handle_rx_wc(dev, priv->ibwc + i);
                                else
                                        ipoib_ib_handle_rx_wc(dev, priv->ibwc + i);
-                       } else {
-                               if (priv->ibwc[i].wr_id & IPOIB_OP_CM)
-                                       ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
-                               else
-                                       ipoib_ib_handle_tx_wc(dev, priv->ibwc + i);
-                       }
+                       } else
+                               ipoib_cm_handle_tx_wc(dev, priv->ibwc + i);
                }
        } while (n == IPOIB_NUM_WC);
+
+       while (poll_tx(priv))
+               ; /* nothing */
 }
 
 int ipoib_ib_dev_stop(struct net_device *dev, int flush)
@@ -733,10 +793,8 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
                                rx_req = &priv->rx_ring[i];
                                if (!rx_req->skb)
                                        continue;
-                               ib_dma_unmap_single(priv->ca,
-                                                   rx_req->mapping,
-                                                   IPOIB_BUF_SIZE,
-                                                   DMA_FROM_DEVICE);
+                               ipoib_ud_dma_unmap_rx(priv,
+                                                     priv->rx_ring[i].mapping);
                                dev_kfree_skb_any(rx_req->skb);
                                rx_req->skb = NULL;
                        }
@@ -775,7 +833,7 @@ timeout:
                msleep(1);
        }
 
-       ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP);
+       ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP);
 
        return 0;
 }
index bd07f02cf02be6e36864db79fca3494d8a9bed05..2442090ac8d1dbfe083bfca69d46adc2b484c157 100644 (file)
@@ -195,7 +195,7 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
                return 0;
        }
 
-       if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
+       if (new_mtu > IPOIB_UD_MTU(priv->max_ib_mtu))
                return -EINVAL;
 
        priv->admin_mtu = new_mtu;
@@ -971,10 +971,6 @@ static void ipoib_setup(struct net_device *dev)
                                    NETIF_F_LLTX                |
                                    NETIF_F_HIGHDMA);
 
-       /* MTU will be reset when mcast join happens */
-       dev->mtu                 = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
-       priv->mcast_mtu          = priv->admin_mtu = dev->mtu;
-
        memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
 
        netif_carrier_off(dev);
@@ -1107,6 +1103,7 @@ static struct net_device *ipoib_add_port(const char *format,
 {
        struct ipoib_dev_priv *priv;
        struct ib_device_attr *device_attr;
+       struct ib_port_attr attr;
        int result = -ENOMEM;
 
        priv = ipoib_intf_alloc(format);
@@ -1115,6 +1112,18 @@ static struct net_device *ipoib_add_port(const char *format,
 
        SET_NETDEV_DEV(priv->dev, hca->dma_device);
 
+       if (!ib_query_port(hca, port, &attr))
+               priv->max_ib_mtu = ib_mtu_enum_to_int(attr.max_mtu);
+       else {
+               printk(KERN_WARNING "%s: ib_query_port %d failed\n",
+                      hca->name, port);
+               goto device_init_failed;
+       }
+
+       /* MTU will be reset when mcast join happens */
+       priv->dev->mtu  = IPOIB_UD_MTU(priv->max_ib_mtu);
+       priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
+
        result = ib_query_pkey(hca, port, 0, &priv->pkey);
        if (result) {
                printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
@@ -1289,7 +1298,8 @@ static int __init ipoib_init_module(void)
 
        ipoib_sendq_size = roundup_pow_of_two(ipoib_sendq_size);
        ipoib_sendq_size = min(ipoib_sendq_size, IPOIB_MAX_QUEUE_SIZE);
-       ipoib_sendq_size = max(ipoib_sendq_size, IPOIB_MIN_QUEUE_SIZE);
+       ipoib_sendq_size = max(ipoib_sendq_size, max(2 * MAX_SEND_CQE,
+                                                    IPOIB_MIN_QUEUE_SIZE));
 #ifdef CONFIG_INFINIBAND_IPOIB_CM
        ipoib_max_conn_qp = min(ipoib_max_conn_qp, IPOIB_CM_MAX_CONN_QP);
 #endif
index 31a53c5bcb1317cd52dc2be80dd3f43238184d6e..d00a2c174aee2c2c3a91453f94b2fb51d3840b2c 100644 (file)
@@ -567,8 +567,7 @@ void ipoib_mcast_join_task(struct work_struct *work)
                return;
        }
 
-       priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
-               IPOIB_ENCAP_LEN;
+       priv->mcast_mtu = IPOIB_UD_MTU(ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu));
 
        if (!ipoib_cm_admin_enabled(dev))
                dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
index 8a20e3742c4356119a1c899514e1520eb049b963..c1e7ece1fd440ffd61efc2260ade377cc87df47e 100644 (file)
@@ -150,7 +150,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                        .max_send_wr  = ipoib_sendq_size,
                        .max_recv_wr  = ipoib_recvq_size,
                        .max_send_sge = 1,
-                       .max_recv_sge = 1
+                       .max_recv_sge = IPOIB_UD_RX_SG
                },
                .sq_sig_type = IB_SIGNAL_ALL_WR,
                .qp_type     = IB_QPT_UD
@@ -171,26 +171,33 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
                goto out_free_pd;
        }
 
-       size = ipoib_sendq_size + ipoib_recvq_size + 1;
+       size = ipoib_recvq_size + 1;
        ret = ipoib_cm_dev_init(dev);
        if (!ret) {
+               size += ipoib_sendq_size;
                if (ipoib_cm_has_srq(dev))
                        size += ipoib_recvq_size + 1; /* 1 extra for rx_drain_qp */
                else
                        size += ipoib_recvq_size * ipoib_max_conn_qp;
        }
 
-       priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
-       if (IS_ERR(priv->cq)) {
-               printk(KERN_WARNING "%s: failed to create CQ\n", ca->name);
+       priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev, size, 0);
+       if (IS_ERR(priv->recv_cq)) {
+               printk(KERN_WARNING "%s: failed to create receive CQ\n", ca->name);
                goto out_free_mr;
        }
 
-       if (ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP))
-               goto out_free_cq;
+       priv->send_cq = ib_create_cq(priv->ca, NULL, NULL, dev, ipoib_sendq_size, 0);
+       if (IS_ERR(priv->send_cq)) {
+               printk(KERN_WARNING "%s: failed to create send CQ\n", ca->name);
+               goto out_free_recv_cq;
+       }
+
+       if (ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP))
+               goto out_free_send_cq;
 
-       init_attr.send_cq = priv->cq;
-       init_attr.recv_cq = priv->cq;
+       init_attr.send_cq = priv->send_cq;
+       init_attr.recv_cq = priv->recv_cq;
 
        if (priv->hca_caps & IB_DEVICE_UD_TSO)
                init_attr.create_flags = IB_QP_CREATE_IPOIB_UD_LSO;
@@ -201,7 +208,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        priv->qp = ib_create_qp(priv->pd, &init_attr);
        if (IS_ERR(priv->qp)) {
                printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
-               goto out_free_cq;
+               goto out_free_send_cq;
        }
 
        priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
@@ -215,10 +222,26 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
        priv->tx_wr.sg_list     = priv->tx_sge;
        priv->tx_wr.send_flags  = IB_SEND_SIGNALED;
 
+       priv->rx_sge[0].lkey = priv->mr->lkey;
+       if (ipoib_ud_need_sg(priv->max_ib_mtu)) {
+               priv->rx_sge[0].length = IPOIB_UD_HEAD_SIZE;
+               priv->rx_sge[1].length = PAGE_SIZE;
+               priv->rx_sge[1].lkey = priv->mr->lkey;
+               priv->rx_wr.num_sge = IPOIB_UD_RX_SG;
+       } else {
+               priv->rx_sge[0].length = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu);
+               priv->rx_wr.num_sge = 1;
+       }
+       priv->rx_wr.next = NULL;
+       priv->rx_wr.sg_list = priv->rx_sge;
+
        return 0;
 
-out_free_cq:
-       ib_destroy_cq(priv->cq);
+out_free_send_cq:
+       ib_destroy_cq(priv->send_cq);
+
+out_free_recv_cq:
+       ib_destroy_cq(priv->recv_cq);
 
 out_free_mr:
        ib_dereg_mr(priv->mr);
@@ -241,8 +264,11 @@ void ipoib_transport_dev_cleanup(struct net_device *dev)
                clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
        }
 
-       if (ib_destroy_cq(priv->cq))
-               ipoib_warn(priv, "ib_cq_destroy failed\n");
+       if (ib_destroy_cq(priv->send_cq))
+               ipoib_warn(priv, "ib_cq_destroy (send) failed\n");
+
+       if (ib_destroy_cq(priv->recv_cq))
+               ipoib_warn(priv, "ib_cq_destroy (recv) failed\n");
 
        ipoib_cm_dev_cleanup(dev);
 
index 293f5b892e3f5f20d33ababa2a50f1688254f891..1cdb5cfb0ff12f3cce46c149650cdd3e53f4bb87 100644 (file)
@@ -89,6 +89,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
                goto err;
        }
 
+       priv->max_ib_mtu = ppriv->max_ib_mtu;
+       /* MTU will be reset when mcast join happens */
+       priv->dev->mtu   = IPOIB_UD_MTU(priv->max_ib_mtu);
+       priv->mcast_mtu  = priv->admin_mtu = priv->dev->mtu;
        set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
 
        priv->pkey = pkey;
index be1b9fbd416d9cf2a2c70dad4542278704448058..aeb58cae9a3fa4ca8aaec0a949f2cb72f4815aa9 100644 (file)
@@ -473,13 +473,15 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
        stats->r2t_pdus = conn->r2t_pdus_cnt; /* always 0 */
        stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
        stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
-       stats->custom_length = 3;
+       stats->custom_length = 4;
        strcpy(stats->custom[0].desc, "qp_tx_queue_full");
        stats->custom[0].value = 0; /* TB iser_conn->qp_tx_queue_full; */
        strcpy(stats->custom[1].desc, "fmr_map_not_avail");
        stats->custom[1].value = 0; /* TB iser_conn->fmr_map_not_avail */;
        strcpy(stats->custom[2].desc, "eh_abort_cnt");
        stats->custom[2].value = conn->eh_abort_cnt;
+       strcpy(stats->custom[3].desc, "fmr_unalign_cnt");
+       stats->custom[3].value = conn->fmr_unalign_cnt;
 }
 
 static int
index 1ee867b1b3411754c58a814331283944b45b9587..a8c1b300e34d54d76eb18eeb646db24b2c34b598 100644 (file)
 #define DRV_DATE       "May 7th, 2006"
 
 #define iser_dbg(fmt, arg...)                          \
+       do {                                            \
+               if (iser_debug_level > 1)               \
+                       printk(KERN_DEBUG PFX "%s:" fmt,\
+                               __func__ , ## arg);     \
+       } while (0)
+
+#define iser_warn(fmt, arg...)                         \
        do {                                            \
                if (iser_debug_level > 0)               \
                        printk(KERN_DEBUG PFX "%s:" fmt,\
index 4a17743a639f2ae884f4cbe122e8e68f1a6f12b9..cac50c4dc159df8c1202640d6bc0ab13e6b8cc22 100644 (file)
@@ -334,8 +334,11 @@ static void iser_data_buf_dump(struct iser_data_buf *data,
        struct scatterlist *sg;
        int i;
 
+       if (iser_debug_level == 0)
+               return;
+
        for_each_sg(sgl, sg, data->dma_nents, i)
-               iser_err("sg[%d] dma_addr:0x%lX page:0x%p "
+               iser_warn("sg[%d] dma_addr:0x%lX page:0x%p "
                         "off:0x%x sz:0x%x dma_len:0x%x\n",
                         i, (unsigned long)ib_sg_dma_address(ibdev, sg),
                         sg_page(sg), sg->offset,
@@ -420,6 +423,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_cmd_task *iser_ctask)
 int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
                      enum   iser_data_dir        cmd_dir)
 {
+       struct iscsi_conn    *iscsi_conn = iser_ctask->iser_conn->iscsi_conn;
        struct iser_conn     *ib_conn = iser_ctask->iser_conn->ib_conn;
        struct iser_device   *device = ib_conn->device;
        struct ib_device     *ibdev = device->ib_device;
@@ -434,7 +438,8 @@ int iser_reg_rdma_mem(struct iscsi_iser_cmd_task *iser_ctask,
 
        aligned_len = iser_data_buf_aligned_len(mem, ibdev);
        if (aligned_len != mem->dma_nents) {
-               iser_err("rdma alignment violation %d/%d aligned\n",
+               iscsi_conn->fmr_unalign_cnt++;
+               iser_warn("rdma alignment violation %d/%d aligned\n",
                         aligned_len, mem->size);
                iser_data_buf_dump(mem, ibdev);
 
index f02c242c31145483c0b1836101d0aa27d402c7b2..27006fc1830567f30c87bc16a323cd826bd0ab46 100644 (file)
@@ -898,30 +898,26 @@ static int __init input_proc_init(void)
 {
        struct proc_dir_entry *entry;
 
-       proc_bus_input_dir = proc_mkdir("input", proc_bus);
+       proc_bus_input_dir = proc_mkdir("bus/input", NULL);
        if (!proc_bus_input_dir)
                return -ENOMEM;
 
        proc_bus_input_dir->owner = THIS_MODULE;
 
-       entry = create_proc_entry("devices", 0, proc_bus_input_dir);
+       entry = proc_create("devices", 0, proc_bus_input_dir,
+                           &input_devices_fileops);
        if (!entry)
                goto fail1;
 
-       entry->owner = THIS_MODULE;
-       entry->proc_fops = &input_devices_fileops;
-
-       entry = create_proc_entry("handlers", 0, proc_bus_input_dir);
+       entry = proc_create("handlers", 0, proc_bus_input_dir,
+                           &input_handlers_fileops);
        if (!entry)
                goto fail2;
 
-       entry->owner = THIS_MODULE;
-       entry->proc_fops = &input_handlers_fileops;
-
        return 0;
 
  fail2:        remove_proc_entry("devices", proc_bus_input_dir);
- fail1: remove_proc_entry("input", proc_bus);
+ fail1: remove_proc_entry("bus/input", NULL);
        return -ENOMEM;
 }
 
@@ -929,7 +925,7 @@ static void input_proc_exit(void)
 {
        remove_proc_entry("devices", proc_bus_input_dir);
        remove_proc_entry("handlers", proc_bus_input_dir);
-       remove_proc_entry("input", proc_bus);
+       remove_proc_entry("bus/input", NULL);
 }
 
 #else /* !CONFIG_PROC_FS */
index 1457b73850e7dd03e41fdb8df932342a3857b281..7fb3cf81cfbffcb234d5954b040d7ff9d24340fd 100644 (file)
@@ -159,7 +159,7 @@ static int iforce_usb_probe(struct usb_interface *intf,
 
        iforce->cr.bRequestType = USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_INTERFACE;
        iforce->cr.wIndex = 0;
-       iforce->cr.wLength = 16;
+       iforce->cr.wLength = cpu_to_le16(16);
 
        usb_fill_int_urb(iforce->irq, dev, usb_rcvintpipe(dev, epirq->bEndpointAddress),
                        iforce->data, 16, iforce_usb_irq, iforce, epirq->bInterval);
index 4b07bdadb81e16d9f7f0392ee6a942a81de0c78b..b29e3affb805a97126ccee39dcc867cd6e40dfe3 100644 (file)
@@ -444,6 +444,23 @@ exit:
                     __FUNCTION__, retval);
 }
 
+static void xpad_bulk_out(struct urb *urb)
+{
+       switch (urb->status) {
+       case 0:
+               /* success */
+               break;
+       case -ECONNRESET:
+       case -ENOENT:
+       case -ESHUTDOWN:
+               /* this urb is terminated, clean up */
+               dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
+               break;
+       default:
+               dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
+       }
+}
+
 #if defined(CONFIG_JOYSTICK_XPAD_FF) || defined(CONFIG_JOYSTICK_XPAD_LEDS)
 static void xpad_irq_out(struct urb *urb)
 {
@@ -475,23 +492,6 @@ exit:
                    __FUNCTION__, retval);
 }
 
-static void xpad_bulk_out(struct urb *urb)
-{
-       switch (urb->status) {
-       case 0:
-               /* success */
-               break;
-       case -ECONNRESET:
-       case -ENOENT:
-       case -ESHUTDOWN:
-               /* this urb is terminated, clean up */
-               dbg("%s - urb shutting down with status: %d", __FUNCTION__, urb->status);
-               break;
-       default:
-               dbg("%s - nonzero urb status received: %d", __FUNCTION__, urb->status);
-       }
-}
-
 static int xpad_init_output(struct usb_interface *intf, struct usb_xpad *xpad)
 {
        struct usb_endpoint_descriptor *ep_irq_out;
index fed3c375ccf39ff34ecb2a23fb720da5538fa153..d8765cc93d27d5daa1f343edea8bee7e75226117 100644 (file)
@@ -2,33 +2,69 @@
  *  Driver for PC-speaker like devices found on various Sparc systems.
  *
  *  Copyright (c) 2002 Vojtech Pavlik
- *  Copyright (c) 2002, 2006 David S. Miller (davem@davemloft.net)
+ *  Copyright (c) 2002, 2006, 2008 David S. Miller (davem@davemloft.net)
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/input.h>
-#include <linux/platform_device.h>
+#include <linux/of_device.h>
 
 #include <asm/io.h>
-#include <asm/ebus.h>
-#include <asm/isa.h>
 
 MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
 MODULE_DESCRIPTION("Sparc Speaker beeper driver");
 MODULE_LICENSE("GPL");
 
+struct grover_beep_info {
+       void __iomem    *freq_regs;
+       void __iomem    *enable_reg;
+};
+
+struct bbc_beep_info {
+       u32             clock_freq;
+       void __iomem    *regs;
+};
+
 struct sparcspkr_state {
        const char              *name;
-       unsigned long           iobase;
        int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value);
        spinlock_t              lock;
        struct input_dev        *input_dev;
+       union {
+               struct grover_beep_info grover;
+               struct bbc_beep_info bbc;
+       } u;
 };
 
-static int ebus_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
+static u32 bbc_count_to_reg(struct bbc_beep_info *info, unsigned int count)
+{
+       u32 val, clock_freq = info->clock_freq;
+       int i;
+
+       if (!count)
+               return 0;
+
+       if (count <= clock_freq >> 20)
+               return 1 << 18;
+
+       if (count >= clock_freq >> 12)
+               return 1 << 10;
+
+       val = 1 << 18;
+       for (i = 19; i >= 11; i--) {
+               val >>= 1;
+               if (count <= clock_freq >> i)
+                       break;
+       }
+
+       return val;
+}
+
+static int bbc_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
 {
        struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent);
+       struct bbc_beep_info *info = &state->u.bbc;
        unsigned int count = 0;
        unsigned long flags;
 
@@ -44,24 +80,29 @@ static int ebus_spkr_event(struct input_dev *dev, unsigned int type, unsigned in
        if (value > 20 && value < 32767)
                count = 1193182 / value;
 
+       count = bbc_count_to_reg(info, count);
+
        spin_lock_irqsave(&state->lock, flags);
 
-       /* EBUS speaker only has on/off state, the frequency does not
-        * appear to be programmable.
-        */
-       if (state->iobase & 0x2UL)
-               outb(!!count, state->iobase);
-       else
-               outl(!!count, state->iobase);
+       if (count) {
+               outb(0x01,                 info->regs + 0);
+               outb(0x00,                 info->regs + 2);
+               outb((count >> 16) & 0xff, info->regs + 3);
+               outb((count >>  8) & 0xff, info->regs + 4);
+               outb(0x00,                 info->regs + 5);
+       } else {
+               outb(0x00,                 info->regs + 0);
+       }
 
        spin_unlock_irqrestore(&state->lock, flags);
 
        return 0;
 }
 
-static int isa_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
+static int grover_spkr_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
 {
        struct sparcspkr_state *state = dev_get_drvdata(dev->dev.parent);
+       struct grover_beep_info *info = &state->u.grover;
        unsigned int count = 0;
        unsigned long flags;
 
@@ -81,15 +122,15 @@ static int isa_spkr_event(struct input_dev *dev, unsigned int type, unsigned int
 
        if (count) {
                /* enable counter 2 */
-               outb(inb(state->iobase + 0x61) | 3, state->iobase + 0x61);
+               outb(inb(info->enable_reg) | 3, info->enable_reg);
                /* set command for counter 2, 2 byte write */
-               outb(0xB6, state->iobase + 0x43);
+               outb(0xB6, info->freq_regs + 1);
                /* select desired HZ */
-               outb(count & 0xff, state->iobase + 0x42);
-               outb((count >> 8) & 0xff, state->iobase + 0x42);
+               outb(count & 0xff, info->freq_regs + 0);
+               outb((count >> 8) & 0xff, info->freq_regs + 0);
        } else {
                /* disable counter 2 */
-               outb(inb_p(state->iobase + 0x61) & 0xFC, state->iobase + 0x61);
+               outb(inb_p(info->enable_reg) & 0xFC, info->enable_reg);
        }
 
        spin_unlock_irqrestore(&state->lock, flags);
@@ -131,7 +172,7 @@ static int __devinit sparcspkr_probe(struct device *dev)
        return 0;
 }
 
-static int __devexit sparcspkr_remove(struct of_device *dev)
+static int sparcspkr_shutdown(struct of_device *dev)
 {
        struct sparcspkr_state *state = dev_get_drvdata(&dev->dev);
        struct input_dev *input_dev = state->input_dev;
@@ -139,115 +180,180 @@ static int __devexit sparcspkr_remove(struct of_device *dev)
        /* turn off the speaker */
        state->event(input_dev, EV_SND, SND_BELL, 0);
 
-       input_unregister_device(input_dev);
-
-       dev_set_drvdata(&dev->dev, NULL);
-       kfree(state);
-
        return 0;
 }
 
-static int sparcspkr_shutdown(struct of_device *dev)
+static int __devinit bbc_beep_probe(struct of_device *op, const struct of_device_id *match)
 {
-       struct sparcspkr_state *state = dev_get_drvdata(&dev->dev);
-       struct input_dev *input_dev = state->input_dev;
+       struct sparcspkr_state *state;
+       struct bbc_beep_info *info;
+       struct device_node *dp;
+       int err = -ENOMEM;
 
-       /* turn off the speaker */
-       state->event(input_dev, EV_SND, SND_BELL, 0);
+       state = kzalloc(sizeof(*state), GFP_KERNEL);
+       if (!state)
+               goto out_err;
+
+       state->name = "Sparc BBC Speaker";
+       state->event = bbc_spkr_event;
+       spin_lock_init(&state->lock);
+
+       dp = of_find_node_by_path("/");
+       err = -ENODEV;
+       if (!dp)
+               goto out_free;
+
+       info = &state->u.bbc;
+       info->clock_freq = of_getintprop_default(dp, "clock-frequency", 0);
+       if (!info->clock_freq)
+               goto out_free;
+
+       info->regs = of_ioremap(&op->resource[0], 0, 6, "bbc beep");
+       if (!info->regs)
+               goto out_free;
+
+       dev_set_drvdata(&op->dev, state);
+
+       err = sparcspkr_probe(&op->dev);
+       if (err)
+               goto out_clear_drvdata;
 
        return 0;
+
+out_clear_drvdata:
+       dev_set_drvdata(&op->dev, NULL);
+       of_iounmap(&op->resource[0], info->regs, 6);
+
+out_free:
+       kfree(state);
+out_err:
+       return err;
 }
 
-static int __devinit ebus_beep_probe(struct of_device *dev, const struct of_device_id *match)
+static int bbc_remove(struct of_device *op)
 {
-       struct linux_ebus_device *edev = to_ebus_device(&dev->dev);
-       struct sparcspkr_state *state;
-       int err;
+       struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
+       struct input_dev *input_dev = state->input_dev;
+       struct bbc_beep_info *info = &state->u.bbc;
 
-       state = kzalloc(sizeof(*state), GFP_KERNEL);
-       if (!state)
-               return -ENOMEM;
+       /* turn off the speaker */
+       state->event(input_dev, EV_SND, SND_BELL, 0);
 
-       state->name = "Sparc EBUS Speaker";
-       state->iobase = edev->resource[0].start;
-       state->event = ebus_spkr_event;
-       spin_lock_init(&state->lock);
+       input_unregister_device(input_dev);
 
-       dev_set_drvdata(&dev->dev, state);
+       of_iounmap(&op->resource[0], info->regs, 6);
 
-       err = sparcspkr_probe(&dev->dev);
-       if (err) {
-               dev_set_drvdata(&dev->dev, NULL);
-               kfree(state);
-       }
+       dev_set_drvdata(&op->dev, NULL);
+       kfree(state);
 
        return 0;
 }
 
-static struct of_device_id ebus_beep_match[] = {
+static struct of_device_id bbc_beep_match[] = {
        {
                .name = "beep",
+               .compatible = "SUNW,bbc-beep",
        },
        {},
 };
 
-static struct of_platform_driver ebus_beep_driver = {
-       .name           = "beep",
-       .match_table    = ebus_beep_match,
-       .probe          = ebus_beep_probe,
-       .remove         = __devexit_p(sparcspkr_remove),
+static struct of_platform_driver bbc_beep_driver = {
+       .name           = "bbcbeep",
+       .match_table    = bbc_beep_match,
+       .probe          = bbc_beep_probe,
+       .remove         = __devexit_p(bbc_remove),
        .shutdown       = sparcspkr_shutdown,
 };
 
-static int __devinit isa_beep_probe(struct of_device *dev, const struct of_device_id *match)
+static int __devinit grover_beep_probe(struct of_device *op, const struct of_device_id *match)
 {
-       struct sparc_isa_device *idev = to_isa_device(&dev->dev);
        struct sparcspkr_state *state;
-       int err;
+       struct grover_beep_info *info;
+       int err = -ENOMEM;
 
        state = kzalloc(sizeof(*state), GFP_KERNEL);
        if (!state)
-               return -ENOMEM;
+               goto out_err;
 
-       state->name = "Sparc ISA Speaker";
-       state->iobase = idev->resource.start;
-       state->event = isa_spkr_event;
+       state->name = "Sparc Grover Speaker";
+       state->event = grover_spkr_event;
        spin_lock_init(&state->lock);
 
-       dev_set_drvdata(&dev->dev, state);
+       info = &state->u.grover;
+       info->freq_regs = of_ioremap(&op->resource[2], 0, 2, "grover beep freq");
+       if (!info->freq_regs)
+               goto out_free;
 
-       err = sparcspkr_probe(&dev->dev);
-       if (err) {
-               dev_set_drvdata(&dev->dev, NULL);
-               kfree(state);
-       }
+       info->enable_reg = of_ioremap(&op->resource[3], 0, 1, "grover beep enable");
+       if (!info->enable_reg)
+               goto out_unmap_freq_regs;
+
+       dev_set_drvdata(&op->dev, state);
+
+       err = sparcspkr_probe(&op->dev);
+       if (err)
+               goto out_clear_drvdata;
+
+       return 0;
+
+out_clear_drvdata:
+       dev_set_drvdata(&op->dev, NULL);
+       of_iounmap(&op->resource[3], info->enable_reg, 1);
+
+out_unmap_freq_regs:
+       of_iounmap(&op->resource[2], info->freq_regs, 2);
+out_free:
+       kfree(state);
+out_err:
+       return err;
+}
+
+static int grover_remove(struct of_device *op)
+{
+       struct sparcspkr_state *state = dev_get_drvdata(&op->dev);
+       struct grover_beep_info *info = &state->u.grover;
+       struct input_dev *input_dev = state->input_dev;
+
+       /* turn off the speaker */
+       state->event(input_dev, EV_SND, SND_BELL, 0);
+
+       input_unregister_device(input_dev);
+
+       of_iounmap(&op->resource[3], info->enable_reg, 1);
+       of_iounmap(&op->resource[2], info->freq_regs, 2);
+
+       dev_set_drvdata(&op->dev, NULL);
+       kfree(state);
 
        return 0;
 }
 
-static struct of_device_id isa_beep_match[] = {
+static struct of_device_id grover_beep_match[] = {
        {
-               .name = "dma",
+               .name = "beep",
+               .compatible = "SUNW,smbus-beep",
        },
        {},
 };
 
-static struct of_platform_driver isa_beep_driver = {
-       .name           = "beep",
-       .match_table    = isa_beep_match,
-       .probe          = isa_beep_probe,
-       .remove         = __devexit_p(sparcspkr_remove),
+static struct of_platform_driver grover_beep_driver = {
+       .name           = "groverbeep",
+       .match_table    = grover_beep_match,
+       .probe          = grover_beep_probe,
+       .remove         = __devexit_p(grover_remove),
        .shutdown       = sparcspkr_shutdown,
 };
 
 static int __init sparcspkr_init(void)
 {
-       int err = of_register_driver(&ebus_beep_driver, &ebus_bus_type);
+       int err = of_register_driver(&bbc_beep_driver,
+                                    &of_platform_bus_type);
 
        if (!err) {
-               err = of_register_driver(&isa_beep_driver, &isa_bus_type);
+               err = of_register_driver(&grover_beep_driver,
+                                        &of_platform_bus_type);
                if (err)
-                       of_unregister_driver(&ebus_beep_driver);
+                       of_unregister_driver(&bbc_beep_driver);
        }
 
        return err;
@@ -255,8 +361,8 @@ static int __init sparcspkr_init(void)
 
 static void __exit sparcspkr_exit(void)
 {
-       of_unregister_driver(&ebus_beep_driver);
-       of_unregister_driver(&isa_beep_driver);
+       of_unregister_driver(&bbc_beep_driver);
+       of_unregister_driver(&grover_beep_driver);
 }
 
 module_init(sparcspkr_init);
index e1a3a79ab3f90ca309e92c9cc2bd2565b6c06211..7ff71ba7b7c90265875ed79052c18966a81a0614 100644 (file)
@@ -46,7 +46,7 @@ struct serport {
 static int serport_serio_write(struct serio *serio, unsigned char data)
 {
        struct serport *serport = serio->port_data;
-       return -(serport->tty->driver->write(serport->tty, &data, 1) != 1);
+       return -(serport->tty->ops->write(serport->tty, &data, 1) != 1);
 }
 
 static int serport_serio_open(struct serio *serio)
index 1d759f6f807670479171b21bdc20da2d17a46072..55c1134d61372aaf2889bb93f6cc33aef8b656ee 100644 (file)
@@ -528,9 +528,9 @@ static void aiptek_irq(struct urb *urb)
                            (aiptek->curSetting.pointerMode)) {
                                aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED;
                } else {
-                       x = le16_to_cpu(get_unaligned((__le16 *) (data + 1)));
-                       y = le16_to_cpu(get_unaligned((__le16 *) (data + 3)));
-                       z = le16_to_cpu(get_unaligned((__le16 *) (data + 6)));
+                       x = get_unaligned_le16(data + 1);
+                       y = get_unaligned_le16(data + 3);
+                       z = get_unaligned_le16(data + 6);
 
                        dv = (data[5] & 0x01) != 0 ? 1 : 0;
                        p = (data[5] & 0x02) != 0 ? 1 : 0;
@@ -613,8 +613,8 @@ static void aiptek_irq(struct urb *urb)
                        (aiptek->curSetting.pointerMode)) {
                        aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED;
                } else {
-                       x = le16_to_cpu(get_unaligned((__le16 *) (data + 1)));
-                       y = le16_to_cpu(get_unaligned((__le16 *) (data + 3)));
+                       x = get_unaligned_le16(data + 1);
+                       y = get_unaligned_le16(data + 3);
 
                        jitterable = data[5] & 0x1c;
 
@@ -679,7 +679,7 @@ static void aiptek_irq(struct urb *urb)
                pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0;
 
                macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1;
-               z = le16_to_cpu(get_unaligned((__le16 *) (data + 4)));
+               z = get_unaligned_le16(data + 4);
 
                if (dv) {
                        /* If the selected tool changed, reset the old
@@ -757,7 +757,7 @@ static void aiptek_irq(struct urb *urb)
         * hat switches (which just so happen to be the macroKeys.)
         */
        else if (data[0] == 6) {
-               macro = le16_to_cpu(get_unaligned((__le16 *) (data + 1)));
+               macro = get_unaligned_le16(data + 1);
                if (macro > 0) {
                        input_report_key(inputdev, macroKeyEvents[macro - 1],
                                         0);
@@ -952,7 +952,7 @@ aiptek_query(struct aiptek *aiptek, unsigned char command, unsigned char data)
                    buf[0], buf[1], buf[2]);
                ret = -EIO;
        } else {
-               ret = le16_to_cpu(get_unaligned((__le16 *) (buf + 1)));
+               ret = get_unaligned_le16(buf + 1);
        }
        kfree(buf);
        return ret;
index d2c6da264722576ddee43d1041ed641fe25af422..c5a8661a1baae242720bdd2c9f0eee248019c1d3 100644 (file)
@@ -245,11 +245,11 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report,
                        data = report[i];
                        break;
                case 2:
-                       data16 = le16_to_cpu(get_unaligned((__le16 *)&report[i]));
+                       data16 = get_unaligned_le16(&report[i]);
                        break;
                case 3:
                        size = 4;
-                       data32 = le32_to_cpu(get_unaligned((__le32 *)&report[i]));
+                       data32 = get_unaligned_le32(&report[i]);
                        break;
                }
 
@@ -695,10 +695,10 @@ static void gtco_urb_callback(struct urb *urbinfo)
                        /*  Fall thru */
                case 1:
                        /* All reports have X and Y coords in the same place */
-                       val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1]));
+                       val = get_unaligned_le16(&device->buffer[1]);
                        input_report_abs(inputdev, ABS_X, val);
 
-                       val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3]));
+                       val = get_unaligned_le16(&device->buffer[3]);
                        input_report_abs(inputdev, ABS_Y, val);
 
                        /* Ditto for proximity bit */
@@ -762,7 +762,7 @@ static void gtco_urb_callback(struct urb *urbinfo)
                                le_buffer[1]  = (u8)(device->buffer[4] >> 1);
                                le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7);
 
-                               val = le16_to_cpu(get_unaligned((__le16 *)le_buffer));
+                               val = get_unaligned_le16(le_buffer);
                                input_report_abs(inputdev, ABS_Y, val);
 
                                /*
@@ -772,10 +772,10 @@ static void gtco_urb_callback(struct urb *urbinfo)
                                buttonbyte = device->buffer[5] >> 1;
                        } else {
 
-                               val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1]));
+                               val = get_unaligned_le16(&device->buffer[1]);
                                input_report_abs(inputdev, ABS_X, val);
 
-                               val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3]));
+                               val = get_unaligned_le16(&device->buffer[3]);
                                input_report_abs(inputdev, ABS_Y, val);
 
                                buttonbyte = device->buffer[5];
@@ -897,7 +897,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
        dbg("Extra descriptor success: type:%d  len:%d",
            hid_desc->bDescriptorType,  hid_desc->wDescriptorLength);
 
-       report = kzalloc(hid_desc->wDescriptorLength, GFP_KERNEL);
+       report = kzalloc(le16_to_cpu(hid_desc->wDescriptorLength), GFP_KERNEL);
        if (!report) {
                err("No more memory for report");
                error = -ENOMEM;
@@ -913,16 +913,16 @@ static int gtco_probe(struct usb_interface *usbinterface,
                                         REPORT_DEVICE_TYPE << 8,
                                         0, /* interface */
                                         report,
-                                        hid_desc->wDescriptorLength,
+                                        le16_to_cpu(hid_desc->wDescriptorLength),
                                         5000); /* 5 secs */
 
-               if (result == hid_desc->wDescriptorLength)
+               if (result == le16_to_cpu(hid_desc->wDescriptorLength))
                        break;
        }
 
        /* If we didn't get the report, fail */
        dbg("usb_control_msg result: :%d", result);
-       if (result != hid_desc->wDescriptorLength) {
+       if (result != le16_to_cpu(hid_desc->wDescriptorLength)) {
                err("Failed to get HID Report Descriptor of size: %d",
                    hid_desc->wDescriptorLength);
                error = -EIO;
index 1182fc133167ac203e2d5e52cc71201ce4108c24..f23f5a97fb3868e58f5f23c730d38fb5c4eabc8d 100644 (file)
@@ -63,8 +63,8 @@ static void kbtab_irq(struct urb *urb)
                goto exit;
        }
 
-       kbtab->x = le16_to_cpu(get_unaligned((__le16 *) &data[1]));
-       kbtab->y = le16_to_cpu(get_unaligned((__le16 *) &data[3]));
+       kbtab->x = get_unaligned_le16(&data[1]);
+       kbtab->y = get_unaligned_le16(&data[3]);
 
        kbtab->pressure = (data[5]);
 
index 23ae66c76d47f2698f5daeabdf548bab5c03ddbe..6ca0bb949ad301a0ba109a73b342a1cb7833075f 100644 (file)
@@ -350,7 +350,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci)
                if (ncci == 0xffffffff || np->ncci == ncci) {
                        *pp = (*pp)->next;
 #ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-                       if ((mp = np->minorp) != 0) {
+                       if ((mp = np->minorp) != NULL) {
 #if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
                                capifs_free_ncci(mp->minor);
 #endif
@@ -366,7 +366,7 @@ static void capincci_free(struct capidev *cdev, u32 ncci)
                        }
 #endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
                        kfree(np);
-                       if (*pp == 0) return;
+                       if (*pp == NULL) return;
                } else {
                        pp = &(*pp)->next;
                }
@@ -483,7 +483,7 @@ static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
 #endif
                goto bad;
        }
-       if ((nskb = gen_data_b3_resp_for(mp, skb)) == 0) {
+       if ((nskb = gen_data_b3_resp_for(mp, skb)) == NULL) {
                printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
                goto bad;
        }
@@ -512,7 +512,7 @@ bad:
 static void handle_minor_recv(struct capiminor *mp)
 {
        struct sk_buff *skb;
-       while ((skb = skb_dequeue(&mp->inqueue)) != 0) {
+       while ((skb = skb_dequeue(&mp->inqueue)) != NULL) {
                unsigned int len = skb->len;
                mp->inbytes -= len;
                if (handle_recv_skb(mp, skb) < 0) {
@@ -538,7 +538,7 @@ static int handle_minor_send(struct capiminor *mp)
                return 0;
        }
 
-       while ((skb = skb_dequeue(&mp->outqueue)) != 0) {
+       while ((skb = skb_dequeue(&mp->outqueue)) != NULL) {
                datahandle = mp->datahandle;
                len = (u16)skb->len;
                skb_push(skb, CAPI_DATA_B3_REQ_LEN);
@@ -689,19 +689,19 @@ capi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
        if (!cdev->ap.applid)
                return -ENODEV;
 
-       if ((skb = skb_dequeue(&cdev->recvqueue)) == 0) {
+       if ((skb = skb_dequeue(&cdev->recvqueue)) == NULL) {
 
                if (file->f_flags & O_NONBLOCK)
                        return -EAGAIN;
 
                for (;;) {
                        interruptible_sleep_on(&cdev->recvwait);
-                       if ((skb = skb_dequeue(&cdev->recvqueue)) != 0)
+                       if ((skb = skb_dequeue(&cdev->recvqueue)) != NULL)
                                break;
                        if (signal_pending(current))
                                break;
                }
-               if (skb == 0)
+               if (skb == NULL)
                        return -ERESTARTNOHAND;
        }
        if (skb->len > count) {
@@ -940,12 +940,12 @@ capi_ioctl(struct inode *inode, struct file *file,
                                return -EFAULT;
 
                        mutex_lock(&cdev->ncci_list_mtx);
-                       if ((nccip = capincci_find(cdev, (u32) ncci)) == 0) {
+                       if ((nccip = capincci_find(cdev, (u32) ncci)) == NULL) {
                                mutex_unlock(&cdev->ncci_list_mtx);
                                return 0;
                        }
 #ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-                       if ((mp = nccip->minorp) != 0) {
+                       if ((mp = nccip->minorp) != NULL) {
                                count += atomic_read(&mp->ttyopencount);
                        }
 #endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
@@ -966,7 +966,7 @@ capi_ioctl(struct inode *inode, struct file *file,
                                return -EFAULT;
                        mutex_lock(&cdev->ncci_list_mtx);
                        nccip = capincci_find(cdev, (u32) ncci);
-                       if (!nccip || (mp = nccip->minorp) == 0) {
+                       if (!nccip || (mp = nccip->minorp) == NULL) {
                                mutex_unlock(&cdev->ncci_list_mtx);
                                return -ESRCH;
                        }
@@ -986,7 +986,7 @@ capi_open(struct inode *inode, struct file *file)
        if (file->private_data)
                return -EEXIST;
 
-       if ((file->private_data = capidev_alloc()) == 0)
+       if ((file->private_data = capidev_alloc()) == NULL)
                return -ENOMEM;
 
        return nonseekable_open(inode, file);
@@ -1023,9 +1023,9 @@ static int capinc_tty_open(struct tty_struct * tty, struct file * file)
        struct capiminor *mp;
        unsigned long flags;
 
-       if ((mp = capiminor_find(iminor(file->f_path.dentry->d_inode))) == 0)
+       if ((mp = capiminor_find(iminor(file->f_path.dentry->d_inode))) == NULL)
                return -ENXIO;
-       if (mp->nccip == 0)
+       if (mp->nccip == NULL)
                return -ENXIO;
 
        tty->driver_data = (void *)mp;
@@ -1058,7 +1058,7 @@ static void capinc_tty_close(struct tty_struct * tty, struct file * file)
 #ifdef _DEBUG_REFCOUNT
                printk(KERN_DEBUG "capinc_tty_close ocount=%d\n", atomic_read(&mp->ttyopencount));
 #endif
-               if (mp->nccip == 0)
+               if (mp->nccip == NULL)
                        capiminor_free(mp);
        }
 
@@ -1111,11 +1111,12 @@ static int capinc_tty_write(struct tty_struct * tty,
        return count;
 }
 
-static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
+static int capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct capiminor *mp = (struct capiminor *)tty->driver_data;
        struct sk_buff *skb;
        unsigned long flags;
+       int ret = 1;
 
 #ifdef _DEBUG_TTYFUNCS
        printk(KERN_DEBUG "capinc_put_char(%u)\n", ch);
@@ -1125,7 +1126,7 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
 #ifdef _DEBUG_TTYFUNCS
                printk(KERN_DEBUG "capinc_tty_put_char: mp or mp->ncci NULL\n");
 #endif
-               return;
+               return 0;
        }
 
        spin_lock_irqsave(&workaround_lock, flags);
@@ -1134,7 +1135,7 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
                if (skb_tailroom(skb) > 0) {
                        *(skb_put(skb, 1)) = ch;
                        spin_unlock_irqrestore(&workaround_lock, flags);
-                       return;
+                       return 1;
                }
                mp->ttyskb = NULL;
                skb_queue_tail(&mp->outqueue, skb);
@@ -1148,8 +1149,10 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
                mp->ttyskb = skb;
        } else {
                printk(KERN_ERR "capinc_put_char: char %u lost\n", ch);
+               ret = 0;
        }
        spin_unlock_irqrestore(&workaround_lock, flags);
+       return ret;
 }
 
 static void capinc_tty_flush_chars(struct tty_struct *tty)
@@ -1526,9 +1529,9 @@ static int __init capi_init(void)
        char *compileinfo;
        int major_ret;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, sizeof(rev));
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index cb42b690b45ee221b3f77a3ab967ced3864ba619..d5b4cc357a3c77dff03b704070063e2bed0ac11c 100644 (file)
@@ -335,7 +335,7 @@ static capidrv_plci *new_plci(capidrv_contr * card, int chan)
 
        plcip = kzalloc(sizeof(capidrv_plci), GFP_ATOMIC);
 
-       if (plcip == 0)
+       if (plcip == NULL)
                return NULL;
 
        plcip->state = ST_PLCI_NONE;
@@ -404,7 +404,7 @@ static inline capidrv_ncci *new_ncci(capidrv_contr * card,
 
        nccip = kzalloc(sizeof(capidrv_ncci), GFP_ATOMIC);
 
-       if (nccip == 0)
+       if (nccip == NULL)
                return NULL;
 
        nccip->ncci = ncci;
@@ -426,7 +426,7 @@ static inline capidrv_ncci *find_ncci(capidrv_contr * card, u32 ncci)
        capidrv_plci *plcip;
        capidrv_ncci *p;
 
-       if ((plcip = find_plci_by_ncci(card, ncci)) == 0)
+       if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
                return NULL;
 
        for (p = plcip->ncci_list; p; p = p->next)
@@ -441,7 +441,7 @@ static inline capidrv_ncci *find_ncci_by_msgid(capidrv_contr * card,
        capidrv_plci *plcip;
        capidrv_ncci *p;
 
-       if ((plcip = find_plci_by_ncci(card, ncci)) == 0)
+       if ((plcip = find_plci_by_ncci(card, ncci)) == NULL)
                return NULL;
 
        for (p = plcip->ncci_list; p; p = p->next)
@@ -755,7 +755,7 @@ static inline int new_bchan(capidrv_contr * card)
 {
        int i;
        for (i = 0; i < card->nbchan; i++) {
-               if (card->bchans[i].plcip == 0) {
+               if (card->bchans[i].plcip == NULL) {
                        card->bchans[i].disconnecting = 0;
                        return i;
                }
@@ -877,7 +877,7 @@ static void handle_incoming_call(capidrv_contr * card, _cmsg * cmsg)
                return;
        }
        bchan = &card->bchans[chan];
-       if ((plcip = new_plci(card, chan)) == 0) {
+       if ((plcip = new_plci(card, chan)) == NULL) {
                printk(KERN_ERR "capidrv-%d: incoming call: no memory, sorry.\n", card->contrnr);
                return;
        }
@@ -1388,12 +1388,12 @@ static void capidrv_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
                _cdebbuf *cdb = capi_cmsg2str(&s_cmsg);
 
                if (cdb) {
-                       printk(KERN_DEBUG "%s: applid=%d %s\n", __FUNCTION__,
+                       printk(KERN_DEBUG "%s: applid=%d %s\n", __func__,
                                ap->applid, cdb->buf);
                        cdebbuf_free(cdb);
                } else
                        printk(KERN_DEBUG "%s: applid=%d %s not traced\n",
-                               __FUNCTION__, ap->applid,
+                               __func__, ap->applid,
                                capi_cmd2str(s_cmsg.Command, s_cmsg.Subcommand));
        }
        if (s_cmsg.Command == CAPI_DATA_B3
@@ -1661,7 +1661,7 @@ static int capidrv_command(isdn_ctrl * c, capidrv_contr * card)
                                              NULL,     /* Useruserdata */
                                              NULL      /* Facilitydataarray */
                            );
-                       if ((plcip = new_plci(card, (c->arg % card->nbchan))) == 0) {
+                       if ((plcip = new_plci(card, (c->arg % card->nbchan))) == NULL) {
                                cmd.command = ISDN_STAT_DHUP;
                                cmd.driver = card->myid;
                                cmd.arg = (c->arg % card->nbchan);
@@ -1966,7 +1966,7 @@ static void enable_dchannel_trace(capidrv_contr *card)
                        card->name, errcode);
           return;
        }
-       if (strstr(manufacturer, "AVM") == 0) {
+       if (strstr(manufacturer, "AVM") == NULL) {
           printk(KERN_ERR "%s: not from AVM, no d-channel trace possible (%s)\n",
                        card->name, manufacturer);
           return;
@@ -2291,10 +2291,10 @@ static int __init capidrv_init(void)
        u32 ncontr, contr;
        u16 errcode;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strncpy(rev, p + 2, sizeof(rev));
                rev[sizeof(rev)-1] = 0;
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
@@ -2335,10 +2335,10 @@ static void __exit capidrv_exit(void)
        char rev[32];
        char *p;
 
-       if ((p = strchr(revision, ':')) != 0) {
+       if ((p = strchr(revision, ':')) != NULL) {
                strncpy(rev, p + 1, sizeof(rev));
                rev[sizeof(rev)-1] = 0;
-               if ((p = strchr(rev, '$')) != 0)
+               if ((p = strchr(rev, '$')) != NULL)
                        *p = 0;
        } else {
                strcpy(rev, " ??? ");
index 6d7c47ec0367aa39026c358ad3948200893fe19c..550e80f390a63f87da84f84e9ec0290f75ca7151 100644 (file)
@@ -69,6 +69,7 @@ static int capifs_remount(struct super_block *s, int *flags, char *data)
                } else if (sscanf(this_char, "mode=%o%c", &n, &dummy) == 1)
                        mode = n & ~S_IFMT;
                else {
+                       kfree(new_opt);
                        printk("capifs: called with bogus options\n");
                        return -EINVAL;
                }
@@ -189,9 +190,9 @@ static int __init capifs_init(void)
        char *p;
        int err;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, sizeof(rev));
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 68409d971e73a6c86aaf8f116f5eada1e75564c2..fcaa1241ee7714bb835aa7d7b0db648c50e19a45 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/isdn/capilli.h>
 
 #define DBG(format, arg...) do { \
-printk(KERN_DEBUG "%s: " format "\n" , __FUNCTION__ , ## arg); \
+printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
 } while (0)
 
 struct capilib_msgidqueue {
@@ -44,7 +44,7 @@ static inline void mq_init(struct capilib_ncci * np)
 static inline int mq_enqueue(struct capilib_ncci * np, u16 msgid)
 {
        struct capilib_msgidqueue *mq;
-       if ((mq = np->msgidfree) == 0)
+       if ((mq = np->msgidfree) == NULL)
                return 0;
        np->msgidfree = mq->next;
        mq->msgid = msgid;
index 22379b94e88f0f0a2bdaec108f3dfbb9a187d749..ebef4ce1b00c1b9b893efa2e5959c545bf4ebffd 100644 (file)
@@ -450,7 +450,7 @@ static void pars_2_message(_cmsg * cmsg)
                        cmsg->l += 4;
                        break;
                case _CSTRUCT:
-                       if (*(u8 **) OFF == 0) {
+                       if (*(u8 **) OFF == NULL) {
                                *(cmsg->m + cmsg->l) = '\0';
                                cmsg->l++;
                        } else if (**(_cstruct *) OFF != 0xff) {
index f55531869313f2422705c79910568982dd476a2e..75726ea0fbbd1615705f68ab58bb914df386cf97 100644 (file)
@@ -10,7 +10,7 @@
  *
  */
 
-#define CONFIG_AVMB1_COMPAT
+#define AVMB1_COMPAT
 
 #include "kcapi.h"
 #include <linux/module.h>
@@ -29,7 +29,7 @@
 #include <asm/uaccess.h>
 #include <linux/isdn/capicmd.h>
 #include <linux/isdn/capiutil.h>
-#ifdef CONFIG_AVMB1_COMPAT
+#ifdef AVMB1_COMPAT
 #include <linux/b1lli.h>
 #endif
 #include <linux/mutex.h>
@@ -154,7 +154,7 @@ static void register_appl(struct capi_ctr *card, u16 applid, capi_register_param
        if (card)
                card->register_appl(card, applid, rparam);
        else
-               printk(KERN_WARNING "%s: cannot get card resources\n", __FUNCTION__);
+               printk(KERN_WARNING "%s: cannot get card resources\n", __func__);
 }
 
 
@@ -178,7 +178,7 @@ static void notify_up(u32 contr)
                printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr);
        }
        if (!card) {
-               printk(KERN_WARNING "%s: invalid contr %d\n", __FUNCTION__, contr);
+               printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
                return;
        }
        for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
@@ -740,7 +740,7 @@ u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
 
 EXPORT_SYMBOL(capi20_get_profile);
 
-#ifdef CONFIG_AVMB1_COMPAT
+#ifdef AVMB1_COMPAT
 static int old_capi_manufacturer(unsigned int cmd, void __user *data)
 {
        avmb1_loadandconfigdef ldef;
@@ -826,7 +826,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
                card = capi_ctr_get(card);
                if (!card)
                        return -ESRCH;
-               if (card->load_firmware == 0) {
+               if (card->load_firmware == NULL) {
                        printk(KERN_DEBUG "kcapi: load: no load function\n");
                        return -ESRCH;
                }
@@ -835,7 +835,7 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
                        printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
                        return -EINVAL;
                }
-               if (ldef.t4file.data == 0) {
+               if (ldef.t4file.data == NULL) {
                        printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
                        return -EINVAL;
                }
@@ -904,7 +904,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
         struct capi_ctr *card;
 
        switch (cmd) {
-#ifdef CONFIG_AVMB1_COMPAT
+#ifdef AVMB1_COMPAT
        case AVMB1_LOAD:
        case AVMB1_LOAD_AND_CONFIG:
        case AVMB1_RESETCARD:
@@ -951,7 +951,7 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
                        if (strcmp(driver->name, cdef.driver) == 0)
                                break;
                }
-               if (driver == 0) {
+               if (driver == NULL) {
                        printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n",
                                        cdef.driver);
                        return -ESRCH;
@@ -1004,9 +1004,9 @@ static int __init kcapi_init(void)
                return ret;
         kcapi_proc_init();
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, sizeof(rev));
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 1cb2c40f99219bcdeb2f8b33a62cfc8b0abf672b..244711f7f838c34ed3bd4e364ca861a91e37c3b1 100644 (file)
@@ -17,7 +17,7 @@
 
 #ifdef KCAPI_DEBUG
 #define DBG(format, arg...) do { \
-printk(KERN_DEBUG "%s: " format "\n" , __FUNCTION__ , ## arg); \
+printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
 } while (0)
 #else
 #define DBG(format, arg...) /* */
index 845a797b00309e2fb45fb1ce3f9defd5ed943146..c29208bd752131e9d8406c193a48f2a5982df272 100644 (file)
@@ -114,6 +114,7 @@ static int seq_contrstats_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_controller_ops = {
+       .owner          = THIS_MODULE,
        .open           = seq_controller_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -121,6 +122,7 @@ static const struct file_operations proc_controller_ops = {
 };
 
 static const struct file_operations proc_contrstats_ops = {
+       .owner          = THIS_MODULE,
        .open           = seq_contrstats_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -219,6 +221,7 @@ seq_applstats_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_applications_ops = {
+       .owner          = THIS_MODULE,
        .open           = seq_applications_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -226,21 +229,13 @@ static const struct file_operations proc_applications_ops = {
 };
 
 static const struct file_operations proc_applstats_ops = {
+       .owner          = THIS_MODULE,
        .open           = seq_applstats_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release,
 };
 
-static void
-create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
-{
-       struct proc_dir_entry *entry;
-       entry = create_proc_entry(name, mode, NULL);
-       if (entry)
-               entry->proc_fops = f;
-}
-
 // ---------------------------------------------------------------------------
 
 static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
@@ -283,6 +278,7 @@ seq_capi_driver_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_driver_ops = {
+       .owner          = THIS_MODULE,
        .open           = seq_capi_driver_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -296,11 +292,11 @@ kcapi_proc_init(void)
 {
        proc_mkdir("capi",             NULL);
        proc_mkdir("capi/controllers", NULL);
-       create_seq_entry("capi/controller",   0, &proc_controller_ops);
-       create_seq_entry("capi/contrstats",   0, &proc_contrstats_ops);
-       create_seq_entry("capi/applications", 0, &proc_applications_ops);
-       create_seq_entry("capi/applstats",    0, &proc_applstats_ops);
-       create_seq_entry("capi/driver",       0, &proc_driver_ops);
+       proc_create("capi/controller",   0, NULL, &proc_controller_ops);
+       proc_create("capi/contrstats",   0, NULL, &proc_contrstats_ops);
+       proc_create("capi/applications", 0, NULL, &proc_applications_ops);
+       proc_create("capi/applstats",    0, NULL, &proc_applstats_ops);
+       proc_create("capi/driver",       0, NULL, &proc_driver_ops);
 }
 
 void __exit
index 4fd4c46892e3306ef3a18e181a4dc6e0735f4a9a..8b256a617c8a4524cec9f503ef7371ebbf30356b 100644 (file)
@@ -288,13 +288,12 @@ divert_dev_init(void)
        isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net);
        if (!isdn_proc_entry)
                return (-1);
-       isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry);
+       isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO,
+                                       isdn_proc_entry, &isdn_fops);
        if (!isdn_divert_entry) {
                remove_proc_entry("isdn", init_net.proc_net);
                return (-1);
        }
-       isdn_divert_entry->proc_fops = &isdn_fops; 
-       isdn_divert_entry->owner = THIS_MODULE; 
 #endif /* CONFIG_PROC_FS */
 
        return (0);
index fceeb1d57682849b3e438e63a265d4fd036efe78..45d1ee93cd39979e0f6788f73f9b7a4571eed4e6 100644 (file)
@@ -68,10 +68,10 @@ static int write_modem(struct cardstate *cs)
        struct tty_struct *tty = cs->hw.ser->tty;
        struct bc_state *bcs = &cs->bcs[0];     /* only one channel */
        struct sk_buff *skb = bcs->tx_skb;
-       int sent;
+       int sent = -EOPNOTSUPP;
 
        if (!tty || !tty->driver || !skb)
-               return -EFAULT;
+               return -EINVAL;
 
        if (!skb->len) {
                dev_kfree_skb_any(skb);
@@ -80,7 +80,8 @@ static int write_modem(struct cardstate *cs)
        }
 
        set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-       sent = tty->driver->write(tty, skb->data, skb->len);
+       if (tty->ops->write)
+               sent = tty->ops->write(tty, skb->data, skb->len);
        gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent);
        if (sent < 0) {
                /* error */
@@ -120,7 +121,7 @@ static int send_cb(struct cardstate *cs)
 
        if (cb->len) {
                set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-               sent = tty->driver->write(tty, cb->buf + cb->offset, cb->len);
+               sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len);
                if (sent < 0) {
                        /* error */
                        gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent);
@@ -440,14 +441,14 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsi
        struct tty_struct *tty = cs->hw.ser->tty;
        unsigned int set, clear;
 
-       if (!tty || !tty->driver || !tty->driver->tiocmset)
-               return -EFAULT;
+       if (!tty || !tty->driver || !tty->ops->tiocmset)
+               return -EINVAL;
        set = new_state & ~old_state;
        clear = old_state & ~new_state;
        if (!set && !clear)
                return 0;
        gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear);
-       return tty->driver->tiocmset(tty, NULL, set, clear);
+       return tty->ops->tiocmset(tty, NULL, set, clear);
 }
 
 static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
index 4484a6417235ec3c87de9cbe36313a7b015bfac2..abf05ec31760544b41928a84cb7286b58c6ac95b 100644 (file)
@@ -661,11 +661,11 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
        len += sprintf(page+len, "%-16s %s\n", "type", s);
        if (card->cardtype == avm_t1isa)
           len += sprintf(page+len, "%-16s %d\n", "cardnr", card->cardnr);
-       if ((s = cinfo->version[VER_DRIVER]) != 0)
+       if ((s = cinfo->version[VER_DRIVER]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
-       if ((s = cinfo->version[VER_CARDTYPE]) != 0)
+       if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
-       if ((s = cinfo->version[VER_SERIAL]) != 0)
+       if ((s = cinfo->version[VER_SERIAL]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
 
        if (card->cardtype != avm_m1) {
@@ -788,9 +788,9 @@ static int __init b1_init(void)
        char *p;
        char rev[32];
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, 32);
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 669f6f67449c46f26416a27c82939ad734256306..da34b98e3de7425b0ed647a3224e5b1f4d035195 100644 (file)
@@ -883,11 +883,11 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
        default: s = "???"; break;
        }
        len += sprintf(page+len, "%-16s %s\n", "type", s);
-       if ((s = cinfo->version[VER_DRIVER]) != 0)
+       if ((s = cinfo->version[VER_DRIVER]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
-       if ((s = cinfo->version[VER_CARDTYPE]) != 0)
+       if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
-       if ((s = cinfo->version[VER_SERIAL]) != 0)
+       if ((s = cinfo->version[VER_SERIAL]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
 
        if (card->cardtype != avm_m1) {
@@ -970,9 +970,9 @@ static int __init b1dma_init(void)
        char *p;
        char rev[32];
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, sizeof(rev));
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 80fb488848b81ac2cc64e6613e80c21001e5b223..1e288eeb5e2a5679908ca0f9cbf79793b91e18b8 100644 (file)
@@ -203,9 +203,9 @@ static int __init b1isa_init(void)
        char rev[32];
        int i;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, 32);
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 90e2e6643d197abd44f499ed83efb4cc8a7eef0b..5b314a2c40493eb7e88441b96fa6f0a523e6933f 100644 (file)
@@ -382,9 +382,9 @@ static int __init b1pci_init(void)
        char rev[32];
        int err;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, 32);
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index e479c0aef38d84aad3dc821a45b206b596fd479e..7740403b40e14685638b1d60c5d05a755332e6ae 100644 (file)
@@ -201,9 +201,9 @@ static int __init b1pcmcia_init(void)
        char *p;
        char rev[32];
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, 32);
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 4bbbbe688077558f1793c746e0f95380d8b65f17..9df1d3f66c879e750201e9e16f8d8cbf4d14ccbb 100644 (file)
@@ -1088,11 +1088,11 @@ static int c4_read_proc(char *page, char **start, off_t off,
        default: s = "???"; break;
        }
        len += sprintf(page+len, "%-16s %s\n", "type", s);
-       if ((s = cinfo->version[VER_DRIVER]) != 0)
+       if ((s = cinfo->version[VER_DRIVER]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
-       if ((s = cinfo->version[VER_CARDTYPE]) != 0)
+       if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
-       if ((s = cinfo->version[VER_SERIAL]) != 0)
+       if ((s = cinfo->version[VER_SERIAL]) != NULL)
           len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
 
        if (card->cardtype != avm_m1) {
@@ -1167,7 +1167,7 @@ static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
        }
 
        card->mbase = ioremap(card->membase, 128);
-       if (card->mbase == 0) {
+       if (card->mbase == NULL) {
                printk(KERN_NOTICE "c4: can't remap memory at 0x%lx\n",
                       card->membase);
                retval = -EIO;
@@ -1291,9 +1291,9 @@ static int __init c4_init(void)
        char rev[32];
        int err;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, 32);
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 6130724e46e775a98e9f37621d9aa4533eb2c553..e7724493738c4673f14d49c039e94f28c9e3ada8 100644 (file)
@@ -551,9 +551,9 @@ static int __init t1isa_init(void)
        char *p;
        int i;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, 32);
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index d1e253c94db4b751f353a58587f4378c0adc4666..e6d298d751465af4411143831b7bb4b9d2b3057a 100644 (file)
@@ -233,9 +233,9 @@ static int __init t1pci_init(void)
        char rev[32];
        int err;
 
-       if ((p = strchr(revision, ':')) != 0 && p[1]) {
+       if ((p = strchr(revision, ':')) != NULL && p[1]) {
                strlcpy(rev, p + 2, 32);
-               if ((p = strchr(rev, '$')) != 0 && p > rev)
+               if ((p = strchr(rev, '$')) != NULL && p > rev)
                   *(p-1) = 0;
        } else
                strcpy(rev, "1.0");
index 6d39f9360766a896ef33870d1bcb55f739fd7a2e..5fcbdccd7a5359aeac614524f37506785f47b59d 100644 (file)
@@ -393,7 +393,7 @@ void diva_free_dma_map(void *hdev, struct _diva_dma_map_entry *pmap)
        dma_addr_t dma_handle;
        void *addr_handle;
 
-       for (i = 0; (pmap != 0); i++) {
+       for (i = 0; (pmap != NULL); i++) {
                diva_get_dma_map_entry(pmap, i, &cpu_addr, &phys_addr);
                if (!cpu_addr) {
                        break;
index 0632a260699858cf7bc7a3036d366b34d888eed5..fae895828a1713317a0286d21f17a4fe6b051a38 100644 (file)
@@ -125,15 +125,11 @@ static const struct file_operations divas_fops = {
 
 int create_divas_proc(void)
 {
-       divas_proc_entry = create_proc_entry(divas_proc_name,
-                                            S_IFREG | S_IRUGO,
-                                            proc_net_eicon);
+       proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon,
+                   &divas_fops);
        if (!divas_proc_entry)
                return (0);
 
-       divas_proc_entry->proc_fops = &divas_fops;
-       divas_proc_entry->owner = THIS_MODULE;
-
        return (1);
 }
 
index 1ff98e7eb794cee276872c713672f1d5af951de6..599fed88222d02f82ab4ff2a7d068c02cdafcc13 100644 (file)
@@ -742,7 +742,7 @@ static void start_internal_command (dword Id, PLCI   *plci, t_std_internal_comma
   else
   {
     i = 1;
-    while (plci->internal_command_queue[i] != 0)
+    while (plci->internal_command_queue[i] != NULL)
       i++;
     plci->internal_command_queue[i] = command_function;
   }
@@ -758,7 +758,7 @@ static void next_internal_command (dword Id, PLCI   *plci)
 
   plci->internal_command = 0;
   plci->internal_command_queue[0] = NULL;
-  while (plci->internal_command_queue[1] != 0)
+  while (plci->internal_command_queue[1] != NULL)
   {
     for (i = 0; i < MAX_INTERNAL_COMMAND_LEVELS - 1; i++)
       plci->internal_command_queue[i] = plci->internal_command_queue[i+1];
@@ -9119,7 +9119,7 @@ word AdvCodecSupport(DIVA_CAPI_ADAPTER   *a, PLCI   *plci, APPL   *appl, byte ho
         dbug(1,dprintf("AdvSigPlci=0x%x",a->AdvSignalPLCI));
         return 0x2001; /* codec in use by another application */
       }
-      if(plci!=0)
+      if(plci!=NULL)
       {
         a->AdvSignalPLCI = plci;
         plci->tel=ADV_VOICE;
@@ -9144,7 +9144,7 @@ word AdvCodecSupport(DIVA_CAPI_ADAPTER   *a, PLCI   *plci, APPL   *appl, byte ho
         }
                                                /* indicate D-ch connect if  */
       }                                        /* codec is connected OK     */
-      if(plci!=0)
+      if(plci!=NULL)
       {
         a->AdvSignalPLCI = plci;
         plci->tel=ADV_VOICE;
@@ -9170,7 +9170,7 @@ word AdvCodecSupport(DIVA_CAPI_ADAPTER   *a, PLCI   *plci, APPL   *appl, byte ho
   {
     if(hook_listen) return 0x300B;               /* Facility not supported */
                                                  /* no hook with SCOM      */
-    if(plci!=0) plci->tel = CODEC;
+    if(plci!=NULL) plci->tel = CODEC;
     dbug(1,dprintf("S/SCOM codec"));
     /* first time we use the scom-s codec we must shut down the internal   */
     /* handset application of the card. This can be done by an assign with */
@@ -14604,7 +14604,7 @@ static void channel_xmit_extended_xon (PLCI   * plci) {
   int max_ch = ARRAY_SIZE(a->ch_flow_control);
   int i, one_requested = 0;
 
-  if ((!plci) || (!plci->Id) || ((a = plci->adapter) == 0)) {
+  if ((!plci) || (!plci->Id) || ((a = plci->adapter) == NULL)) {
     return;
   }
 
index b96f3184c2e5cc930232d40d56adbc6ea5cac751..1f879b500d838998969597e8af63b08ce1912fae 100644 (file)
@@ -344,7 +344,7 @@ setup_asuscom(struct IsdnCard *card)
                                        err = pnp_activate_dev(pnp_d);
                                        if (err<0) {
                                                printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                                       __FUNCTION__, err);
+                                                       __func__, err);
                                                return(0);
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
index 0f1db1f669b28028162fcf205bb69c5bd4749359..7cabc5a19492457e96d5de130f63c9ca352202b3 100644 (file)
@@ -797,7 +797,7 @@ static int __devinit avm_pnp_setup(struct IsdnCardState *cs)
                        err = pnp_activate_dev(pnp_avm_d);
                        if (err<0) {
                                printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                       __FUNCTION__, err);
+                                       __func__, err);
                                return(0);
                        }
                        cs->hw.avm.cfg_reg =
index 2d670856d141bcb0a9668faabba046ca40d0a9f7..018bd293e580829a499bb4929f7513a23801907e 100644 (file)
@@ -1088,7 +1088,7 @@ static int __devinit setup_diva_isapnp(struct IsdnCard *card)
                                err = pnp_activate_dev(pnp_d);
                                if (err<0) {
                                        printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                               __FUNCTION__, err);
+                                               __func__, err);
                                        return(0);
                                }
                                card->para[1] = pnp_port_start(pnp_d, 0);
index 2c3691fda30099d8b3863b2a96e56bb3c30bc855..aa29d1cf16af7031f54365e5f78508e572282c40 100644 (file)
@@ -937,7 +937,7 @@ setup_elsa_isapnp(struct IsdnCard *card)
                                        err = pnp_activate_dev(pnp_d);
                                        if (err<0) {
                                                printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                                       __FUNCTION__, err);
+                                                       __func__, err);
                                                return(0);
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
index f4a213877e35e35619bbc42286def6a84f7c482c..d92e8d6c2ae22d745b295da73459c3047e710d37 100644 (file)
@@ -1417,7 +1417,7 @@ setup_hfcsx(struct IsdnCard *card)
                                        err = pnp_activate_dev(pnp_d);
                                        if (err<0) {
                                                printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                                       __FUNCTION__, err);
+                                                       __func__, err);
                                                return(0);
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
index 98b0149bca68a4d36f93139dbd2db11dea52b6b6..8df889b0c1a97b1a6c3546545840bc3279755486 100644 (file)
@@ -905,7 +905,7 @@ rx_int_complete(struct urb *urb)
        if (status) {
                printk(KERN_INFO
                       "HFC-S USB: %s error resubmitting URB fifo(%d)\n",
-                      __FUNCTION__, fifon);
+                      __func__, fifon);
        }
 }
 
@@ -1543,14 +1543,14 @@ hfc_usb_disconnect(struct usb_interface *intf)
                                stop_isoc_chain(&context->fifos[i]);
                                DBG(HFCUSB_DBG_INIT,
                                    "HFC-S USB: %s stopping ISOC chain Fifo(%i)",
-                                   __FUNCTION__, i);
+                                   __func__, i);
                        }
                } else {
                        if (context->fifos[i].active > 0) {
                                context->fifos[i].active = 0;
                                DBG(HFCUSB_DBG_INIT,
                                    "HFC-S USB: %s unlinking URB for Fifo(%i)",
-                                   __FUNCTION__, i);
+                                   __func__, i);
                        }
                        usb_kill_urb(context->fifos[i].urb);
                        usb_free_urb(context->fifos[i].urb);
index 909d6709ec16782e151e64dae64f67cac4055a70..cf082665cc8bb36022cc671c2c57b22234f2aa8e 100644 (file)
@@ -193,7 +193,7 @@ setup_hfcs(struct IsdnCard *card)
                                        err = pnp_activate_dev(pnp_d);
                                        if (err<0) {
                                                printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                                       __FUNCTION__, err);
+                                                       __func__, err);
                                                return(0);
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
index ceafecdb103742c72f12beb4bce4cad7e80b0a10..5ed3b1c44184efdb271c0c5ed2d63b1cefb4b815 100644 (file)
 
 #define DBG(level, format, arg...) do { \
 if (level & __debug_variable) \
-printk(KERN_DEBUG "%s: " format "\n" , __FUNCTION__ , ## arg); \
+printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
 } while (0)
 
 #define DBG_PACKET(level,data,count) \
-  if (level & __debug_variable) dump_packet(__FUNCTION__,data,count)
+  if (level & __debug_variable) dump_packet(__func__,data,count)
 
 #define DBG_SKB(level,skb) \
-  if ((level & __debug_variable) && skb) dump_packet(__FUNCTION__,skb->data,skb->len)
+  if ((level & __debug_variable) && skb) dump_packet(__func__,skb->data,skb->len)
 
 
 static void __attribute__((unused))
index 76043dedba5b442bc0e66e70a59bb1772a63400d..c0b4db2f8364cf186f25f2861bf41af59d411e49 100644 (file)
@@ -68,7 +68,7 @@ static struct pci_device_id fcpci_ids[] = {
 
 MODULE_DEVICE_TABLE(pci, fcpci_ids);
 
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
 static struct pnp_device_id fcpnp_ids[] __devinitdata = {
        { 
                .id             = "AVM0900",
@@ -914,7 +914,7 @@ static int __devinit fcpci_probe(struct pci_dev *pdev,
        return retval;
 }
 
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
 static int __devinit fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
 {
        struct fritz_adapter *adapter;
@@ -935,7 +935,7 @@ static int __devinit fcpnp_probe(struct pnp_dev *pdev, const struct pnp_device_i
        pnp_disable_dev(pdev);
        retval = pnp_activate_dev(pdev);
        if (retval < 0) {
-               printk(KERN_WARNING "%s: pnp_activate_dev(%s) ret(%d)\n", __FUNCTION__,
+               printk(KERN_WARNING "%s: pnp_activate_dev(%s) ret(%d)\n", __func__,
                        (char *)dev_id->driver_data, retval);
                goto err_free;
        }
@@ -974,6 +974,8 @@ static struct pnp_driver fcpnp_driver = {
        .remove         = __devexit_p(fcpnp_remove),
        .id_table       = fcpnp_ids,
 };
+#else
+static struct pnp_driver fcpnp_driver;
 #endif
 
 static void __devexit fcpci_remove(struct pci_dev *pdev)
@@ -1001,7 +1003,7 @@ static int __init hisax_fcpcipnp_init(void)
        retval = pci_register_driver(&fcpci_driver);
        if (retval)
                return retval;
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
        retval = pnp_register_driver(&fcpnp_driver);
        if (retval < 0) {
                pci_unregister_driver(&fcpci_driver);
@@ -1013,7 +1015,7 @@ static int __init hisax_fcpcipnp_init(void)
 
 static void __exit hisax_fcpcipnp_exit(void)
 {
-#ifdef __ISAPNP__
+#ifdef CONFIG_PNP
        pnp_unregister_driver(&fcpnp_driver);
 #endif
        pci_unregister_driver(&fcpci_driver);
index 2d18d4f1e57e8f35d15dc388c3448a526a621226..a92bf0d2cab2e6821f940c54221f99223b448d39 100644 (file)
@@ -252,7 +252,7 @@ setup_ix1micro(struct IsdnCard *card)
                                        err = pnp_activate_dev(pnp_d);
                                        if (err<0) {
                                                printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                                       __FUNCTION__, err);
+                                                       __func__, err);
                                                return(0);
                                        }
                                        card->para[1] = pnp_port_start(pnp_d, 0);
index 421b8e6763d7bd7f4bdd30596e589d720120a802..ef00633e1d2a482fea9441f39dea12f90940908b 100644 (file)
@@ -255,7 +255,7 @@ int __devinit setup_niccy(struct IsdnCard *card)
                        err = pnp_activate_dev(pnp_d);
                        if (err < 0) {
                                printk(KERN_WARNING "%s: pnp_activate_dev "
-                                       "ret(%d)\n", __FUNCTION__, err);
+                                       "ret(%d)\n", __func__, err);
                                return 0;
                        }
                        card->para[1] = pnp_port_start(pnp_d, 0);
index 95425f3d22209291620bc57cb2646885f2b71acf..a10dfa82c734d815f7018ab3cf9932fabdee03fd 100644 (file)
@@ -555,7 +555,7 @@ setup_sedlbauer_isapnp(struct IsdnCard *card, int *bytecnt)
                                err = pnp_activate_dev(pnp_d);
                                if (err<0) {
                                        printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                               __FUNCTION__, err);
+                                               __func__, err);
                                        return(0);
                                }
                                card->para[1] = pnp_port_start(pnp_d, 0);
index 04416bad611d5f68b32976f8f37ad8104c011054..2044e7173ab46a9d81f5f1b646c361383c8245a1 100644 (file)
@@ -218,13 +218,13 @@ enum {
 #define L1_EVENT_COUNT (EV_TIMER3 + 1)
 
 #define ERR(format, arg...) \
-printk(KERN_ERR "%s:%s: " format "\n" , __FILE__,  __FUNCTION__ , ## arg)
+printk(KERN_ERR "%s:%s: " format "\n" , __FILE__,  __func__ , ## arg)
 
 #define WARN(format, arg...) \
-printk(KERN_WARNING "%s:%s: " format "\n" , __FILE__,  __FUNCTION__ , ## arg)
+printk(KERN_WARNING "%s:%s: " format "\n" , __FILE__,  __func__ , ## arg)
 
 #define INFO(format, arg...) \
-printk(KERN_INFO "%s:%s: " format "\n" , __FILE__,  __FUNCTION__ , ## arg)
+printk(KERN_INFO "%s:%s: " format "\n" , __FILE__,  __func__ , ## arg)
 
 #include "isdnhdlc.h"
 #include "fsm.h"
@@ -406,7 +406,7 @@ struct st5481_adapter {
 
 /*
  * Submit an URB with error reporting. This is a macro so
- * the __FUNCTION__ returns the caller function name.
+ * the __func__ returns the caller function name.
  */
 #define SUBMIT_URB(urb, mem_flags) \
 ({ \
@@ -470,7 +470,7 @@ extern int st5481_debug;
 #ifdef CONFIG_HISAX_DEBUG
 
 #define DBG_ISO_PACKET(level,urb) \
-  if (level & __debug_variable) dump_iso_packet(__FUNCTION__,urb)
+  if (level & __debug_variable) dump_iso_packet(__func__,urb)
 
 static void __attribute__((unused))
 dump_iso_packet(const char *name, struct urb *urb)
index 4ada66b8b6792e2eb1025479a610fb6fb5d980c3..427a8b0520f5aee14684503f208e5faa6f841d1b 100644 (file)
@@ -342,7 +342,7 @@ void st5481_release_usb(struct st5481_adapter *adapter)
        usb_kill_urb(intr->urb);
        kfree(intr->urb->transfer_buffer);
        usb_free_urb(intr->urb);
-       ctrl->urb = NULL;
+       intr->urb = NULL;
 }
 
 /*
index 6a5e379e077418a385b7aaa453c17e9df9ee8d41..5dc9f1a436291e8c3ddacb42127da72dd4cc3fff 100644 (file)
@@ -301,7 +301,7 @@ setup_teles3(struct IsdnCard *card)
                                        err = pnp_activate_dev(pnp_d);
                                        if (err<0) {
                                                printk(KERN_WARNING "%s: pnp_activate_dev ret(%d)\n",
-                                                       __FUNCTION__, err);
+                                                       __func__, err);
                                                return(0);
                                        }
                                        card->para[3] = pnp_port_start(pnp_d, 2);
index 27d890b48f88a6d7d9fab0e78c6a596870391411..15906d005b05de9f6371f75b6ca5b92617c0f4a3 100644 (file)
@@ -370,6 +370,7 @@ hysdn_conf_close(struct inode *ino, struct file *filep)
 /******************************************************/
 static const struct file_operations conf_fops =
 {
+       .owner          = THIS_MODULE,
        .llseek         = no_llseek,
        .read           = hysdn_conf_read,
        .write          = hysdn_conf_write,
@@ -402,11 +403,10 @@ hysdn_procconf_init(void)
        while (card) {
 
                sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid);
-               if ((card->procconf = (void *) create_proc_entry(conf_name,
-                                            S_IFREG | S_IRUGO | S_IWUSR,
-                                           hysdn_proc_entry)) != NULL) {
-                       ((struct proc_dir_entry *) card->procconf)->proc_fops = &conf_fops;
-                       ((struct proc_dir_entry *) card->procconf)->owner = THIS_MODULE;
+               if ((card->procconf = (void *) proc_create(conf_name,
+                                               S_IFREG | S_IRUGO | S_IWUSR,
+                                               hysdn_proc_entry,
+                                               &conf_fops)) != NULL) {
                        hysdn_proclog_init(card);       /* init the log file entry */
                }
                card = card->next;      /* next entry */
index 27b3991fb0ec600e031ae1f4627b173fea4a0ce9..8991d2c8ee4a9d74b02e15c9f44246c06593eb63 100644 (file)
@@ -380,6 +380,7 @@ hysdn_log_poll(struct file *file, poll_table * wait)
 /**************************************************/
 static const struct file_operations log_fops =
 {
+       .owner          = THIS_MODULE,
        .llseek         = no_llseek,
        .read           = hysdn_log_read,
        .write          = hysdn_log_write,
@@ -402,10 +403,9 @@ hysdn_proclog_init(hysdn_card * card)
 
        if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) {
                sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid);
-               if ((pd->log = create_proc_entry(pd->log_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry)) != NULL) {
-                       pd->log->proc_fops = &log_fops; 
-                       pd->log->owner = THIS_MODULE;
-               }
+               pd->log = proc_create(pd->log_name,
+                               S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry,
+                               &log_fops);
 
                init_waitqueue_head(&(pd->rd_queue));
 
index d4ad6992f776c895d637b72c779b04fd5dd7915e..0f3c66de69bcec511b89b9c72265ada93b2bc3c2 100644 (file)
@@ -1924,7 +1924,7 @@ isdn_free_channel(int di, int ch, int usage)
 
        if ((di < 0) || (ch < 0)) {
                printk(KERN_WARNING "%s: called with invalid drv(%d) or channel(%d)\n",
-                       __FUNCTION__, di, ch);
+                       __func__, di, ch);
                return;
        }
        for (i = 0; i < ISDN_MAX_CHANNELS; i++)
index bc2f0dd962eae6d93a7bf58f95b954df5ca3979d..be4949715d55f605206f99e86430fe0c46ff1f1d 100644 (file)
@@ -108,7 +108,7 @@ static __inline__ void isdn_net_add_to_bundle(isdn_net_dev *nd, isdn_net_local *
 
        lp = nd->queue;
 //     printk(KERN_DEBUG "%s: lp:%s(%p) nlp:%s(%p) last(%p)\n",
-//             __FUNCTION__, lp->name, lp, nlp->name, nlp, lp->last);
+//             __func__, lp->name, lp, nlp->name, nlp, lp->last);
        nlp->last = lp->last;
        lp->last->next = nlp;
        lp->last = nlp;
@@ -129,7 +129,7 @@ static __inline__ void isdn_net_rm_from_bundle(isdn_net_local *lp)
                master_lp = (isdn_net_local *) lp->master->priv;
 
 //     printk(KERN_DEBUG "%s: lp:%s(%p) mlp:%s(%p) last(%p) next(%p) mndq(%p)\n",
-//             __FUNCTION__, lp->name, lp, master_lp->name, master_lp, lp->last, lp->next, master_lp->netdev->queue);
+//             __func__, lp->name, lp, master_lp->name, master_lp, lp->last, lp->next, master_lp->netdev->queue);
        spin_lock_irqsave(&master_lp->netdev->queue_lock, flags);
        lp->last->next = lp->next;
        lp->next->last = lp->last;
@@ -141,7 +141,7 @@ static __inline__ void isdn_net_rm_from_bundle(isdn_net_local *lp)
        }
        lp->next = lp->last = lp;       /* (re)set own pointers */
 //     printk(KERN_DEBUG "%s: mndq(%p)\n",
-//             __FUNCTION__, master_lp->netdev->queue);
+//             __func__, master_lp->netdev->queue);
        spin_unlock_irqrestore(&master_lp->netdev->queue_lock, flags);
 }
 
index 9f5fe372f83ddd5846685358458a977ddbedc425..127cfdad68e7bb8b4ec727acb7e8cab9462e5e8e 100644 (file)
@@ -110,7 +110,7 @@ isdn_ppp_free(isdn_net_local * lp)
 
        if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
-                       __FUNCTION__, lp->ppp_slot);
+                       __func__, lp->ppp_slot);
                return 0;
        }
 
@@ -127,7 +127,7 @@ isdn_ppp_free(isdn_net_local * lp)
 #endif /* CONFIG_ISDN_MPP */
        if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: ppp_slot(%d) now invalid\n",
-                       __FUNCTION__, lp->ppp_slot);
+                       __func__, lp->ppp_slot);
                return 0;
        }
        is = ippp_table[lp->ppp_slot];
@@ -226,7 +226,7 @@ isdn_ppp_wakeup_daemon(isdn_net_local * lp)
 {
        if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: ppp_slot(%d) out of range\n",
-                       __FUNCTION__, lp->ppp_slot);
+                       __func__, lp->ppp_slot);
                return;
        }
        ippp_table[lp->ppp_slot]->state = IPPP_OPEN | IPPP_CONNECT | IPPP_NOBLOCK;
@@ -245,7 +245,7 @@ isdn_ppp_closewait(int slot)
 
        if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: slot(%d) out of range\n",
-                       __FUNCTION__, slot);
+                       __func__, slot);
                return 0;
        }
        is = ippp_table[slot];
@@ -343,7 +343,7 @@ isdn_ppp_release(int min, struct file *file)
        is = file->private_data;
 
        if (!is) {
-               printk(KERN_ERR "%s: no file->private_data\n", __FUNCTION__);
+               printk(KERN_ERR "%s: no file->private_data\n", __func__);
                return;
        }
        if (is->debug & 0x1)
@@ -353,7 +353,7 @@ isdn_ppp_release(int min, struct file *file)
                isdn_net_dev *p = is->lp->netdev;
 
                if (!p) {
-                       printk(KERN_ERR "%s: no lp->netdev\n", __FUNCTION__);
+                       printk(KERN_ERR "%s: no lp->netdev\n", __func__);
                        return;
                }
                is->state &= ~IPPP_CONNECT;     /* -> effect: no call of wakeup */
@@ -1080,7 +1080,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
                                printk(KERN_DEBUG "isdn_ppp: VJC_UNCOMP\n");
                        if (net_dev->local->ppp_slot < 0) {
                                printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
-                                       __FUNCTION__, net_dev->local->ppp_slot);
+                                       __func__, net_dev->local->ppp_slot);
                                goto drop_packet;
                        }
                        if (slhc_remember(ippp_table[net_dev->local->ppp_slot]->slcomp, skb->data, skb->len) <= 0) {
@@ -1107,7 +1107,7 @@ isdn_ppp_push_higher(isdn_net_dev * net_dev, isdn_net_local * lp, struct sk_buff
                                                          skb_old->len);
                                if (net_dev->local->ppp_slot < 0) {
                                        printk(KERN_ERR "%s: net_dev->local->ppp_slot(%d) out of range\n",
-                                               __FUNCTION__, net_dev->local->ppp_slot);
+                                               __func__, net_dev->local->ppp_slot);
                                        goto drop_packet;
                                }
                                pkt_len = slhc_uncompress(ippp_table[net_dev->local->ppp_slot]->slcomp,
@@ -1553,7 +1553,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
 
        if (lp->ppp_slot < 0) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
-                       __FUNCTION__, lp->ppp_slot);
+                       __func__, lp->ppp_slot);
                return(-EINVAL);
        }
 
@@ -1604,7 +1604,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
        slot = lp->ppp_slot;
        if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
-                       __FUNCTION__, lp->ppp_slot);
+                       __func__, lp->ppp_slot);
                stats->frame_drops++;
                dev_kfree_skb(skb);
                spin_unlock_irqrestore(&mp->lock, flags);
@@ -1641,7 +1641,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
                slot = lpq->ppp_slot;
                if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                        printk(KERN_ERR "%s: lpq->ppp_slot(%d)\n",
-                               __FUNCTION__, lpq->ppp_slot);
+                               __func__, lpq->ppp_slot);
                } else {
                        u32 lls = ippp_table[slot]->last_link_seqno;
                        if (MP_LT(lls, minseq))
@@ -1875,7 +1875,7 @@ void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
 
        if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
-                       __FUNCTION__, lp->ppp_slot);
+                       __func__, lp->ppp_slot);
                return;
        }
        if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
@@ -2655,7 +2655,7 @@ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
                lp->ppp_slot);
        if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
-                       __FUNCTION__, lp->ppp_slot);
+                       __func__, lp->ppp_slot);
                return;
        }
        is = ippp_table[lp->ppp_slot];
@@ -2665,7 +2665,7 @@ static void isdn_ppp_receive_ccp(isdn_net_dev *net_dev, isdn_net_local *lp,
                int slot = ((isdn_net_local *) (lp->master->priv))->ppp_slot;
                if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                        printk(KERN_ERR "%s: slot(%d) out of range\n",
-                               __FUNCTION__, slot);
+                               __func__, slot);
                        return;
                }       
                mis = ippp_table[slot];
@@ -2829,7 +2829,7 @@ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct
                return;
        if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
-                       __FUNCTION__, slot);
+                       __func__, slot);
                return;
        }       
        is = ippp_table[slot];
@@ -2852,7 +2852,7 @@ static void isdn_ppp_send_ccp(isdn_net_dev *net_dev, isdn_net_local *lp, struct
                slot = ((isdn_net_local *) (lp->master->priv))->ppp_slot;
                if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                        printk(KERN_ERR "%s: slot(%d) out of range\n",
-                               __FUNCTION__, slot);
+                               __func__, slot);
                        return;
                }       
                mis = ippp_table[slot];
index 133eb18e65cca989dcd3f0755ae3d0f98f1c9a47..1a2222cbb80541bc492d78cef045f0844a08fad6 100644 (file)
@@ -1347,17 +1347,19 @@ isdn_tty_tiocmget(struct tty_struct *tty, struct file *file)
        modem_info *info = (modem_info *) tty->driver_data;
        u_char control, status;
 
-       if (isdn_tty_paranoia_check(info, tty->name, __FUNCTION__))
+       if (isdn_tty_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
        if (tty->flags & (1 << TTY_IO_ERROR))
                return -EIO;
 
+       lock_kernel();
 #ifdef ISDN_DEBUG_MODEM_IOCTL
        printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line);
 #endif
 
        control = info->mcr;
        status = info->msr;
+       unlock_kernel();
        return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
            | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
            | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
@@ -1372,7 +1374,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
 {
        modem_info *info = (modem_info *) tty->driver_data;
 
-       if (isdn_tty_paranoia_check(info, tty->name, __FUNCTION__))
+       if (isdn_tty_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
        if (tty->flags & (1 << TTY_IO_ERROR))
                return -EIO;
@@ -1381,6 +1383,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
        printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear);
 #endif
 
+       lock_kernel();
        if (set & TIOCM_RTS)
                info->mcr |= UART_MCR_RTS;
        if (set & TIOCM_DTR) {
@@ -1402,6 +1405,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file,
                        isdn_tty_modem_hup(info, 1);
                }
        }
+       unlock_kernel();
        return 0;
 }
 
@@ -1435,21 +1439,6 @@ isdn_tty_ioctl(struct tty_struct *tty, struct file *file,
                                return retval;
                        tty_wait_until_sent(tty, 0);
                        return 0;
-               case TIOCGSOFTCAR:
-#ifdef ISDN_DEBUG_MODEM_IOCTL
-                       printk(KERN_DEBUG "ttyI%d ioctl TIOCGSOFTCAR\n", info->line);
-#endif
-                       return put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg);
-               case TIOCSSOFTCAR:
-#ifdef ISDN_DEBUG_MODEM_IOCTL
-                       printk(KERN_DEBUG "ttyI%d ioctl TIOCSSOFTCAR\n", info->line);
-#endif
-                       if (get_user(arg, (ulong __user *) arg))
-                               return -EFAULT;
-                       tty->termios->c_cflag =
-                           ((tty->termios->c_cflag & ~CLOCAL) |
-                            (arg ? CLOCAL : 0));
-                       return 0;
                case TIOCSERGETLSR:     /* Get line status register */
 #ifdef ISDN_DEBUG_MODEM_IOCTL
                        printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line);
@@ -1472,13 +1461,14 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
        if (!old_termios)
                isdn_tty_change_speed(info);
        else {
-               if (tty->termios->c_cflag == old_termios->c_cflag)
+               if (tty->termios->c_cflag == old_termios->c_cflag &&
+                   tty->termios->c_ispeed == old_termios->c_ispeed &&
+                   tty->termios->c_ospeed == old_termios->c_ospeed)
                        return;
                isdn_tty_change_speed(info);
                if ((old_termios->c_cflag & CRTSCTS) &&
-                   !(tty->termios->c_cflag & CRTSCTS)) {
+                   !(tty->termios->c_cflag & CRTSCTS))
                        tty->hw_stopped = 0;
-               }
        }
 }
 
@@ -1608,7 +1598,7 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
        if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_open"))
                return -ENODEV;
        if (!try_module_get(info->owner)) {
-               printk(KERN_WARNING "%s: cannot reserve module\n", __FUNCTION__);
+               printk(KERN_WARNING "%s: cannot reserve module\n", __func__);
                return -ENODEV;
        }
 #ifdef ISDN_DEBUG_MODEM_OPEN
@@ -1718,9 +1708,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
        }
        dev->modempoll--;
        isdn_tty_shutdown(info);
-       
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       isdn_tty_flush_buffer(tty);
        tty_ldisc_flush(tty);
        info->tty = NULL;
        info->ncarrier = 0;
index ac05a928f7642a54a51a0627e6ca7dc904511d09..b3c54be745568863f8eb5a271ba3e85d4f441f1f 100644 (file)
@@ -105,7 +105,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 
        led_cdev->dev = device_create(leds_class, parent, 0, "%s",
                                            led_cdev->name);
-       if (unlikely(IS_ERR(led_cdev->dev)))
+       if (IS_ERR(led_cdev->dev))
                return PTR_ERR(led_cdev->dev);
 
        dev_set_drvdata(led_cdev->dev, led_cdev);
index 77f50b63a9702d25bcfc1c89cb05cf7ee9f58f1b..b52659620d509a2ada42d856a05ec3ad05b81d12 100644 (file)
@@ -234,6 +234,14 @@ config WINDFARM_PM112
          which are the recent dual and quad G5 machines using the
          970MP dual-core processor.
 
+config WINDFARM_PM121
+       tristate "Support for thermal management on PowerMac12,1"
+       depends on WINDFARM && I2C && PMAC_SMU
+       select I2C_POWERMAC
+       help
+         This driver provides thermal control for the PowerMac12,1
+         which is the iMac G5 (iSight).
+
 config ANSLCD
        tristate "Support for ANS LCD display"
        depends on ADB_CUDA && PPC_PMAC
index 2dfc3f4eaf42b2055e8b4cddaf6ad6cff989180e..e3132efa17c059a1cd2ef59645468bb1ba2e1286 100644 (file)
@@ -42,4 +42,9 @@ obj-$(CONFIG_WINDFARM_PM112)  += windfarm_pm112.o windfarm_smu_sat.o \
                                   windfarm_smu_sensors.o \
                                   windfarm_max6690_sensor.o \
                                   windfarm_lm75_sensor.o windfarm_pid.o
+obj-$(CONFIG_WINDFARM_PM121)   += windfarm_pm121.o windfarm_smu_sat.o \
+                                  windfarm_smu_controls.o \
+                                  windfarm_smu_sensors.o \
+                                  windfarm_max6690_sensor.o \
+                                  windfarm_lm75_sensor.o windfarm_pid.o
 obj-$(CONFIG_PMAC_RACKMETER)   += rack-meter.o
index 7e10c3ab4d50c2bf62b720dfc6cf8a126d679703..b92b959fe16e3a9d57ad4a610bd137f138a3d25d 100644 (file)
@@ -127,6 +127,12 @@ static struct wf_lm75_sensor *wf_lm75_create(struct i2c_adapter *adapter,
         */
        if (!strcmp(loc, "Hard drive") || !strcmp(loc, "DRIVE BAY"))
                lm->sens.name = "hd-temp";
+       else if (!strcmp(loc, "Incoming Air Temp"))
+               lm->sens.name = "incoming-air-temp";
+       else if (!strcmp(loc, "ODD Temp"))
+               lm->sens.name = "optical-drive-temp";
+       else if (!strcmp(loc, "HD Temp"))
+               lm->sens.name = "hard-drive-temp";
        else
                goto fail;
 
index 5f03aab9fb5da3451b61a4ea8738ac0e647af5f5..e207a90d6b2762fa887bdfdc30344958f00a8373 100644 (file)
@@ -77,18 +77,28 @@ static struct wf_sensor_ops wf_max6690_ops = {
        .owner          = THIS_MODULE,
 };
 
-static void wf_max6690_create(struct i2c_adapter *adapter, u8 addr)
+static void wf_max6690_create(struct i2c_adapter *adapter, u8 addr,
+                             const char *loc)
 {
        struct wf_6690_sensor *max;
-       char *name = "backside-temp";
+       char *name;
 
        max = kzalloc(sizeof(struct wf_6690_sensor), GFP_KERNEL);
        if (max == NULL) {
                printk(KERN_ERR "windfarm: Couldn't create MAX6690 sensor %s: "
-                      "no memory\n", name);
+                      "no memory\n", loc);
                return;
        }
 
+       if (!strcmp(loc, "BACKSIDE"))
+               name = "backside-temp";
+       else if (!strcmp(loc, "NB Ambient"))
+               name = "north-bridge-temp";
+       else if (!strcmp(loc, "GPU Ambient"))
+               name = "gpu-temp";
+       else
+               goto fail;
+
        max->sens.ops = &wf_max6690_ops;
        max->sens.name = name;
        max->i2c.addr = addr >> 1;
@@ -138,9 +148,7 @@ static int wf_max6690_attach(struct i2c_adapter *adapter)
                if (loc == NULL || addr == 0)
                        continue;
                printk("found max6690, loc=%s addr=0x%02x\n", loc, addr);
-               if (strcmp(loc, "BACKSIDE"))
-                       continue;
-               wf_max6690_create(adapter, addr);
+               wf_max6690_create(adapter, addr, loc);
        }
 
        return 0;
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
new file mode 100644 (file)
index 0000000..66ec4fb
--- /dev/null
@@ -0,0 +1,1040 @@
+/*
+ * Windfarm PowerMac thermal control. iMac G5 iSight
+ *
+ * (c) Copyright 2007 Ã‰tienne Bersac <bersace@gmail.com>
+ *
+ * Bits & pieces from windfarm_pm81.c by (c) Copyright 2005 Benjamin
+ * Herrenschmidt, IBM Corp. <benh@kernel.crashing.org>
+ *
+ * Released under the term of the GNU GPL v2.
+ *
+ *
+ *
+ * PowerMac12,1
+ * ============
+ *
+ *
+ * The algorithm used is the PID control algorithm, used the same way
+ * the published Darwin code does, using the same values that are
+ * present in the Darwin 8.10 snapshot property lists (note however
+ * that none of the code has been re-used, it's a complete
+ * re-implementation
+ *
+ * There is two models using PowerMac12,1. Model 2 is iMac G5 iSight
+ * 17" while Model 3 is iMac G5 20". They do have both the same
+ * controls with a tiny difference. The control-ids of hard-drive-fan
+ * and cpu-fan is swapped.
+ *
+ *
+ * Target Correction :
+ *
+ * controls have a target correction calculated as :
+ *
+ * new_min = ((((average_power * slope) >> 16) + offset) >> 16) + min_value
+ * new_value = max(new_value, max(new_min, 0))
+ *
+ * OD Fan control correction.
+ *
+ * # model_id: 2
+ *   offset            : -19563152
+ *   slope             :  1956315
+ *
+ * # model_id: 3
+ *   offset            : -15650652
+ *   slope             :  1565065
+ *
+ * HD Fan control correction.
+ *
+ * # model_id: 2
+ *   offset            : -15650652
+ *   slope             :  1565065
+ *
+ * # model_id: 3
+ *   offset            : -19563152
+ *   slope             :  1956315
+ *
+ * CPU Fan control correction.
+ *
+ * # model_id: 2
+ *   offset            : -25431900
+ *   slope             :  2543190
+ *
+ * # model_id: 3
+ *   offset            : -15650652
+ *   slope             :  1565065
+ *
+ *
+ * Target rubber-banding :
+ *
+ * Some controls have a target correction which depends on another
+ * control value. The correction is computed in the following way :
+ *
+ * new_min = ref_value * slope + offset
+ *
+ * ref_value is the value of the reference control. If new_min is
+ * greater than 0, then we correct the target value using :
+ *
+ * new_target = max (new_target, new_min >> 16)
+ *
+ *
+ * # model_id : 2
+ *   control   : cpu-fan
+ *   ref       : optical-drive-fan
+ *   offset    : -15650652
+ *   slope     : 1565065
+ *
+ * # model_id : 3
+ *   control   : optical-drive-fan
+ *   ref       : hard-drive-fan
+ *   offset    : -32768000
+ *   slope     : 65536
+ *
+ *
+ * In order to have the moste efficient correction with those
+ * dependencies, we must trigger HD loop before OD loop before CPU
+ * loop.
+ *
+ *
+ * The various control loops found in Darwin config file are:
+ *
+ * HD Fan control loop.
+ *
+ * # model_id: 2
+ *   control        : hard-drive-fan
+ *   sensor         : hard-drive-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x002D70A3
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x370000
+ *                    Interval = 5s
+ *
+ * # model_id: 3
+ *   control        : hard-drive-fan
+ *   sensor         : hard-drive-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x002170A3
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x370000
+ *                    Interval = 5s
+ *
+ * OD Fan control loop.
+ *
+ * # model_id: 2
+ *   control        : optical-drive-fan
+ *   sensor         : optical-drive-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x001FAE14
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x320000
+ *                    Interval = 5s
+ *
+ * # model_id: 3
+ *   control        : optical-drive-fan
+ *   sensor         : optical-drive-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x001FAE14
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x320000
+ *                    Interval = 5s
+ *
+ * GPU Fan control loop.
+ *
+ * # model_id: 2
+ *   control        : hard-drive-fan
+ *   sensor         : gpu-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x002A6666
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x5A0000
+ *                    Interval = 5s
+ *
+ * # model_id: 3
+ *   control        : cpu-fan
+ *   sensor         : gpu-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x0010CCCC
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x500000
+ *                    Interval = 5s
+ *
+ * KODIAK (aka northbridge) Fan control loop.
+ *
+ * # model_id: 2
+ *   control        : optical-drive-fan
+ *   sensor         : north-bridge-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x003BD70A
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x550000
+ *                    Interval = 5s
+ *
+ * # model_id: 3
+ *   control        : hard-drive-fan
+ *   sensor         : north-bridge-temp
+ *   PID params     : G_d = 0x00000000
+ *                    G_p = 0x0030F5C2
+ *                    G_r = 0x00019999
+ *                    History = 2 entries
+ *                    Input target = 0x550000
+ *                    Interval = 5s
+ *
+ * CPU Fan control loop.
+ *
+ *   control        : cpu-fan
+ *   sensors        : cpu-temp, cpu-power
+ *   PID params     : from SDB partition
+ *
+ *
+ * CPU Slew control loop.
+ *
+ *   control        : cpufreq-clamp
+ *   sensor         : cpu-temp
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/kmod.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <asm/prom.h>
+#include <asm/machdep.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <asm/sections.h>
+#include <asm/smu.h>
+
+#include "windfarm.h"
+#include "windfarm_pid.h"
+
+#define VERSION "0.3"
+
+static int pm121_mach_model;   /* machine model id */
+
+/* Controls & sensors */
+static struct wf_sensor        *sensor_cpu_power;
+static struct wf_sensor        *sensor_cpu_temp;
+static struct wf_sensor        *sensor_cpu_voltage;
+static struct wf_sensor        *sensor_cpu_current;
+static struct wf_sensor        *sensor_gpu_temp;
+static struct wf_sensor        *sensor_north_bridge_temp;
+static struct wf_sensor        *sensor_hard_drive_temp;
+static struct wf_sensor        *sensor_optical_drive_temp;
+static struct wf_sensor        *sensor_incoming_air_temp; /* unused ! */
+
+enum {
+       FAN_CPU,
+       FAN_HD,
+       FAN_OD,
+       CPUFREQ,
+       N_CONTROLS
+};
+static struct wf_control *controls[N_CONTROLS] = {};
+
+/* Set to kick the control loop into life */
+static int pm121_all_controls_ok, pm121_all_sensors_ok, pm121_started;
+
+enum {
+       FAILURE_FAN             = 1 << 0,
+       FAILURE_SENSOR          = 1 << 1,
+       FAILURE_OVERTEMP        = 1 << 2
+};
+
+/* All sys loops. Note the HD before the OD loop in order to have it
+   run before. */
+enum {
+       LOOP_GPU,               /* control = hd or cpu, but luckily,
+                                  it doesn't matter */
+       LOOP_HD,                /* control = hd */
+       LOOP_KODIAK,            /* control = hd or od */
+       LOOP_OD,                /* control = od */
+       N_LOOPS
+};
+
+static const char *loop_names[N_LOOPS] = {
+       "GPU",
+       "HD",
+       "KODIAK",
+       "OD",
+};
+
+#define        PM121_NUM_CONFIGS       2
+
+static unsigned int pm121_failure_state;
+static int pm121_readjust, pm121_skipping;
+static s32 average_power;
+
+struct pm121_correction {
+       int     offset;
+       int     slope;
+};
+
+static struct pm121_correction corrections[N_CONTROLS][PM121_NUM_CONFIGS] = {
+       /* FAN_OD */
+       {
+               /* MODEL 2 */
+               { .offset       = -19563152,
+                 .slope        =  1956315
+               },
+               /* MODEL 3 */
+               { .offset       = -15650652,
+                 .slope        =  1565065
+               },
+       },
+       /* FAN_HD */
+       {
+               /* MODEL 2 */
+               { .offset       = -15650652,
+                 .slope        =  1565065
+               },
+               /* MODEL 3 */
+               { .offset       = -19563152,
+                 .slope        =  1956315
+               },
+       },
+       /* FAN_CPU */
+       {
+               /* MODEL 2 */
+               { .offset       = -25431900,
+                 .slope        =  2543190
+               },
+               /* MODEL 3 */
+               { .offset       = -15650652,
+                 .slope        =  1565065
+               },
+       },
+       /* CPUFREQ has no correction (and is not implemented at all) */
+};
+
+struct pm121_connection {
+       unsigned int    control_id;
+       unsigned int    ref_id;
+       struct pm121_correction correction;
+};
+
+static struct pm121_connection pm121_connections[] = {
+       /* MODEL 2 */
+       { .control_id   = FAN_CPU,
+         .ref_id       = FAN_OD,
+         { .offset     = -32768000,
+           .slope      =  65536
+         }
+       },
+       /* MODEL 3 */
+       { .control_id   = FAN_OD,
+         .ref_id       = FAN_HD,
+         { .offset     = -32768000,
+           .slope      =  65536
+         }
+       },
+};
+
+/* pointer to the current model connection */
+static struct pm121_connection *pm121_connection;
+
+/*
+ * ****** System Fans Control Loop ******
+ *
+ */
+
+/* Since each loop handles only one control and we want to avoid
+ * writing virtual control, we store the control correction with the
+ * loop params. Some data are not set, there are common to all loop
+ * and thus, hardcoded.
+ */
+struct pm121_sys_param {
+       /* purely informative since we use mach_model-2 as index */
+       int                     model_id;
+       struct wf_sensor        **sensor; /* use sensor_id instead ? */
+       s32                     gp, itarget;
+       unsigned int            control_id;
+};
+
+static struct pm121_sys_param
+pm121_sys_all_params[N_LOOPS][PM121_NUM_CONFIGS] = {
+       /* GPU Fan control loop */
+       {
+               { .model_id     = 2,
+                 .sensor       = &sensor_gpu_temp,
+                 .gp           = 0x002A6666,
+                 .itarget      = 0x5A0000,
+                 .control_id   = FAN_HD,
+               },
+               { .model_id     = 3,
+                 .sensor       = &sensor_gpu_temp,
+                 .gp           = 0x0010CCCC,
+                 .itarget      = 0x500000,
+                 .control_id   = FAN_CPU,
+               },
+       },
+       /* HD Fan control loop */
+       {
+               { .model_id     = 2,
+                 .sensor       = &sensor_hard_drive_temp,
+                 .gp           = 0x002D70A3,
+                 .itarget      = 0x370000,
+                 .control_id   = FAN_HD,
+               },
+               { .model_id     = 3,
+                 .sensor       = &sensor_hard_drive_temp,
+                 .gp           = 0x002170A3,
+                 .itarget      = 0x370000,
+                 .control_id   = FAN_HD,
+               },
+       },
+       /* KODIAK Fan control loop */
+       {
+               { .model_id     = 2,
+                 .sensor       = &sensor_north_bridge_temp,
+                 .gp           = 0x003BD70A,
+                 .itarget      = 0x550000,
+                 .control_id   = FAN_OD,
+               },
+               { .model_id     = 3,
+                 .sensor       = &sensor_north_bridge_temp,
+                 .gp           = 0x0030F5C2,
+                 .itarget      = 0x550000,
+                 .control_id   = FAN_HD,
+               },
+       },
+       /* OD Fan control loop */
+       {
+               { .model_id     = 2,
+                 .sensor       = &sensor_optical_drive_temp,
+                 .gp           = 0x001FAE14,
+                 .itarget      = 0x320000,
+                 .control_id   = FAN_OD,
+               },
+               { .model_id     = 3,
+                 .sensor       = &sensor_optical_drive_temp,
+                 .gp           = 0x001FAE14,
+                 .itarget      = 0x320000,
+                 .control_id   = FAN_OD,
+               },
+       },
+};
+
+/* the hardcoded values */
+#define        PM121_SYS_GD            0x00000000
+#define        PM121_SYS_GR            0x00019999
+#define        PM121_SYS_HISTORY_SIZE  2
+#define        PM121_SYS_INTERVAL      5
+
+/* State data used by the system fans control loop
+ */
+struct pm121_sys_state {
+       int                     ticks;
+       s32                     setpoint;
+       struct wf_pid_state     pid;
+};
+
+struct pm121_sys_state *pm121_sys_state[N_LOOPS] = {};
+
+/*
+ * ****** CPU Fans Control Loop ******
+ *
+ */
+
+#define PM121_CPU_INTERVAL     1
+
+/* State data used by the cpu fans control loop
+ */
+struct pm121_cpu_state {
+       int                     ticks;
+       s32                     setpoint;
+       struct wf_cpu_pid_state pid;
+};
+
+static struct pm121_cpu_state *pm121_cpu_state;
+
+
+
+/*
+ * ***** Implementation *****
+ *
+ */
+
+/* correction the value using the output-low-bound correction algo */
+static s32 pm121_correct(s32 new_setpoint,
+                        unsigned int control_id,
+                        s32 min)
+{
+       s32 new_min;
+       struct pm121_correction *correction;
+       correction = &corrections[control_id][pm121_mach_model - 2];
+
+       new_min = (average_power * correction->slope) >> 16;
+       new_min += correction->offset;
+       new_min = (new_min >> 16) + min;
+
+       return max(new_setpoint, max(new_min, 0));
+}
+
+static s32 pm121_connect(unsigned int control_id, s32 setpoint)
+{
+       s32 new_min, value, new_setpoint;
+
+       if (pm121_connection->control_id == control_id) {
+               controls[control_id]->ops->get_value(controls[control_id],
+                                                    &value);
+               new_min = value * pm121_connection->correction.slope;
+               new_min += pm121_connection->correction.offset;
+               if (new_min > 0) {
+                       new_setpoint = max(setpoint, (new_min >> 16));
+                       if (new_setpoint != setpoint) {
+                               pr_debug("pm121: %s depending on %s, "
+                                        "corrected from %d to %d RPM\n",
+                                        controls[control_id]->name,
+                                        controls[pm121_connection->ref_id]->name,
+                                        (int) setpoint, (int) new_setpoint);
+                       }
+               } else
+                       new_setpoint = setpoint;
+       }
+       /* no connection */
+       else
+               new_setpoint = setpoint;
+
+       return new_setpoint;
+}
+
+/* FAN LOOPS */
+static void pm121_create_sys_fans(int loop_id)
+{
+       struct pm121_sys_param *param = NULL;
+       struct wf_pid_param pid_param;
+       struct wf_control *control = NULL;
+       int i;
+
+       /* First, locate the params for this model */
+       for (i = 0; i < PM121_NUM_CONFIGS; i++) {
+               if (pm121_sys_all_params[loop_id][i].model_id == pm121_mach_model) {
+                       param = &(pm121_sys_all_params[loop_id][i]);
+                       break;
+               }
+       }
+
+       /* No params found, put fans to max */
+       if (param == NULL) {
+               printk(KERN_WARNING "pm121: %s fan config not found "
+                      " for this machine model\n",
+                      loop_names[loop_id]);
+               goto fail;
+       }
+
+       control = controls[param->control_id];
+
+       /* Alloc & initialize state */
+       pm121_sys_state[loop_id] = kmalloc(sizeof(struct pm121_sys_state),
+                                          GFP_KERNEL);
+       if (pm121_sys_state[loop_id] == NULL) {
+               printk(KERN_WARNING "pm121: Memory allocation error\n");
+               goto fail;
+       }
+       pm121_sys_state[loop_id]->ticks = 1;
+
+       /* Fill PID params */
+       pid_param.gd            = PM121_SYS_GD;
+       pid_param.gp            = param->gp;
+       pid_param.gr            = PM121_SYS_GR;
+       pid_param.interval      = PM121_SYS_INTERVAL;
+       pid_param.history_len   = PM121_SYS_HISTORY_SIZE;
+       pid_param.itarget       = param->itarget;
+       pid_param.min           = control->ops->get_min(control);
+       pid_param.max           = control->ops->get_max(control);
+
+       wf_pid_init(&pm121_sys_state[loop_id]->pid, &pid_param);
+
+       pr_debug("pm121: %s Fan control loop initialized.\n"
+                "       itarged=%d.%03d, min=%d RPM, max=%d RPM\n",
+                loop_names[loop_id], FIX32TOPRINT(pid_param.itarget),
+                pid_param.min, pid_param.max);
+       return;
+
+ fail:
+       /* note that this is not optimal since another loop may still
+          control the same control */
+       printk(KERN_WARNING "pm121: failed to set up %s loop "
+              "setting \"%s\" to max speed.\n",
+              loop_names[loop_id], control->name);
+
+       if (control)
+               wf_control_set_max(control);
+}
+
+static void pm121_sys_fans_tick(int loop_id)
+{
+       struct pm121_sys_param *param;
+       struct pm121_sys_state *st;
+       struct wf_sensor *sensor;
+       struct wf_control *control;
+       s32 temp, new_setpoint;
+       int rc;
+
+       param = &(pm121_sys_all_params[loop_id][pm121_mach_model-2]);
+       st = pm121_sys_state[loop_id];
+       sensor = *(param->sensor);
+       control = controls[param->control_id];
+
+       if (--st->ticks != 0) {
+               if (pm121_readjust)
+                       goto readjust;
+               return;
+       }
+       st->ticks = PM121_SYS_INTERVAL;
+
+       rc = sensor->ops->get_value(sensor, &temp);
+       if (rc) {
+               printk(KERN_WARNING "windfarm: %s sensor error %d\n",
+                      sensor->name, rc);
+               pm121_failure_state |= FAILURE_SENSOR;
+               return;
+       }
+
+       pr_debug("pm121: %s Fan tick ! %s: %d.%03d\n",
+                loop_names[loop_id], sensor->name,
+                FIX32TOPRINT(temp));
+
+       new_setpoint = wf_pid_run(&st->pid, temp);
+
+       /* correction */
+       new_setpoint = pm121_correct(new_setpoint,
+                                    param->control_id,
+                                    st->pid.param.min);
+       /* linked corretion */
+       new_setpoint = pm121_connect(param->control_id, new_setpoint);
+
+       if (new_setpoint == st->setpoint)
+               return;
+       st->setpoint = new_setpoint;
+       pr_debug("pm121: %s corrected setpoint: %d RPM\n",
+                control->name, (int)new_setpoint);
+ readjust:
+       if (control && pm121_failure_state == 0) {
+               rc = control->ops->set_value(control, st->setpoint);
+               if (rc) {
+                       printk(KERN_WARNING "windfarm: %s fan error %d\n",
+                              control->name, rc);
+                       pm121_failure_state |= FAILURE_FAN;
+               }
+       }
+}
+
+
+/* CPU LOOP */
+static void pm121_create_cpu_fans(void)
+{
+       struct wf_cpu_pid_param pid_param;
+       const struct smu_sdbp_header *hdr;
+       struct smu_sdbp_cpupiddata *piddata;
+       struct smu_sdbp_fvt *fvt;
+       struct wf_control *fan_cpu;
+       s32 tmax, tdelta, maxpow, powadj;
+
+       fan_cpu = controls[FAN_CPU];
+
+       /* First, locate the PID params in SMU SBD */
+       hdr = smu_get_sdb_partition(SMU_SDB_CPUPIDDATA_ID, NULL);
+       if (hdr == 0) {
+               printk(KERN_WARNING "pm121: CPU PID fan config not found.\n");
+               goto fail;
+       }
+       piddata = (struct smu_sdbp_cpupiddata *)&hdr[1];
+
+       /* Get the FVT params for operating point 0 (the only supported one
+        * for now) in order to get tmax
+        */
+       hdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL);
+       if (hdr) {
+               fvt = (struct smu_sdbp_fvt *)&hdr[1];
+               tmax = ((s32)fvt->maxtemp) << 16;
+       } else
+               tmax = 0x5e0000; /* 94 degree default */
+
+       /* Alloc & initialize state */
+       pm121_cpu_state = kmalloc(sizeof(struct pm121_cpu_state),
+                                 GFP_KERNEL);
+       if (pm121_cpu_state == NULL)
+               goto fail;
+       pm121_cpu_state->ticks = 1;
+
+       /* Fill PID params */
+       pid_param.interval = PM121_CPU_INTERVAL;
+       pid_param.history_len = piddata->history_len;
+       if (pid_param.history_len > WF_CPU_PID_MAX_HISTORY) {
+               printk(KERN_WARNING "pm121: History size overflow on "
+                      "CPU control loop (%d)\n", piddata->history_len);
+               pid_param.history_len = WF_CPU_PID_MAX_HISTORY;
+       }
+       pid_param.gd = piddata->gd;
+       pid_param.gp = piddata->gp;
+       pid_param.gr = piddata->gr / pid_param.history_len;
+
+       tdelta = ((s32)piddata->target_temp_delta) << 16;
+       maxpow = ((s32)piddata->max_power) << 16;
+       powadj = ((s32)piddata->power_adj) << 16;
+
+       pid_param.tmax = tmax;
+       pid_param.ttarget = tmax - tdelta;
+       pid_param.pmaxadj = maxpow - powadj;
+
+       pid_param.min = fan_cpu->ops->get_min(fan_cpu);
+       pid_param.max = fan_cpu->ops->get_max(fan_cpu);
+
+       wf_cpu_pid_init(&pm121_cpu_state->pid, &pid_param);
+
+       pr_debug("pm121: CPU Fan control initialized.\n");
+       pr_debug("       ttarged=%d.%03d, tmax=%d.%03d, min=%d RPM, max=%d RPM,\n",
+                FIX32TOPRINT(pid_param.ttarget), FIX32TOPRINT(pid_param.tmax),
+                pid_param.min, pid_param.max);
+
+       return;
+
+ fail:
+       printk(KERN_WARNING "pm121: CPU fan config not found, max fan speed\n");
+
+       if (controls[CPUFREQ])
+               wf_control_set_max(controls[CPUFREQ]);
+       if (fan_cpu)
+               wf_control_set_max(fan_cpu);
+}
+
+
+static void pm121_cpu_fans_tick(struct pm121_cpu_state *st)
+{
+       s32 new_setpoint, temp, power;
+       struct wf_control *fan_cpu = NULL;
+       int rc;
+
+       if (--st->ticks != 0) {
+               if (pm121_readjust)
+                       goto readjust;
+               return;
+       }
+       st->ticks = PM121_CPU_INTERVAL;
+
+       fan_cpu = controls[FAN_CPU];
+
+       rc = sensor_cpu_temp->ops->get_value(sensor_cpu_temp, &temp);
+       if (rc) {
+               printk(KERN_WARNING "pm121: CPU temp sensor error %d\n",
+                      rc);
+               pm121_failure_state |= FAILURE_SENSOR;
+               return;
+       }
+
+       rc = sensor_cpu_power->ops->get_value(sensor_cpu_power, &power);
+       if (rc) {
+               printk(KERN_WARNING "pm121: CPU power sensor error %d\n",
+                      rc);
+               pm121_failure_state |= FAILURE_SENSOR;
+               return;
+       }
+
+       pr_debug("pm121: CPU Fans tick ! CPU temp: %d.%03d°C, power: %d.%03d\n",
+                FIX32TOPRINT(temp), FIX32TOPRINT(power));
+
+       if (temp > st->pid.param.tmax)
+               pm121_failure_state |= FAILURE_OVERTEMP;
+
+       new_setpoint = wf_cpu_pid_run(&st->pid, power, temp);
+
+       /* correction */
+       new_setpoint = pm121_correct(new_setpoint,
+                                    FAN_CPU,
+                                    st->pid.param.min);
+
+       /* connected correction */
+       new_setpoint = pm121_connect(FAN_CPU, new_setpoint);
+
+       if (st->setpoint == new_setpoint)
+               return;
+       st->setpoint = new_setpoint;
+       pr_debug("pm121: CPU corrected setpoint: %d RPM\n", (int)new_setpoint);
+
+ readjust:
+       if (fan_cpu && pm121_failure_state == 0) {
+               rc = fan_cpu->ops->set_value(fan_cpu, st->setpoint);
+               if (rc) {
+                       printk(KERN_WARNING "pm121: %s fan error %d\n",
+                              fan_cpu->name, rc);
+                       pm121_failure_state |= FAILURE_FAN;
+               }
+       }
+}
+
+/*
+ * ****** Common ******
+ *
+ */
+
+static void pm121_tick(void)
+{
+       unsigned int last_failure = pm121_failure_state;
+       unsigned int new_failure;
+       s32 total_power;
+       int i;
+
+       if (!pm121_started) {
+               pr_debug("pm121: creating control loops !\n");
+               for (i = 0; i < N_LOOPS; i++)
+                       pm121_create_sys_fans(i);
+
+               pm121_create_cpu_fans();
+               pm121_started = 1;
+       }
+
+       /* skipping ticks */
+       if (pm121_skipping && --pm121_skipping)
+               return;
+
+       /* compute average power */
+       total_power = 0;
+       for (i = 0; i < pm121_cpu_state->pid.param.history_len; i++)
+               total_power += pm121_cpu_state->pid.powers[i];
+
+       average_power = total_power / pm121_cpu_state->pid.param.history_len;
+
+
+       pm121_failure_state = 0;
+       for (i = 0 ; i < N_LOOPS; i++) {
+               if (pm121_sys_state[i])
+                       pm121_sys_fans_tick(i);
+       }
+
+       if (pm121_cpu_state)
+               pm121_cpu_fans_tick(pm121_cpu_state);
+
+       pm121_readjust = 0;
+       new_failure = pm121_failure_state & ~last_failure;
+
+       /* If entering failure mode, clamp cpufreq and ramp all
+        * fans to full speed.
+        */
+       if (pm121_failure_state && !last_failure) {
+               for (i = 0; i < N_CONTROLS; i++) {
+                       if (controls[i])
+                               wf_control_set_max(controls[i]);
+               }
+       }
+
+       /* If leaving failure mode, unclamp cpufreq and readjust
+        * all fans on next iteration
+        */
+       if (!pm121_failure_state && last_failure) {
+               if (controls[CPUFREQ])
+                       wf_control_set_min(controls[CPUFREQ]);
+               pm121_readjust = 1;
+       }
+
+       /* Overtemp condition detected, notify and start skipping a couple
+        * ticks to let the temperature go down
+        */
+       if (new_failure & FAILURE_OVERTEMP) {
+               wf_set_overtemp();
+               pm121_skipping = 2;
+       }
+
+       /* We only clear the overtemp condition if overtemp is cleared
+        * _and_ no other failure is present. Since a sensor error will
+        * clear the overtemp condition (can't measure temperature) at
+        * the control loop levels, but we don't want to keep it clear
+        * here in this case
+        */
+       if (new_failure == 0 && last_failure & FAILURE_OVERTEMP)
+               wf_clear_overtemp();
+}
+
+
+static struct wf_control* pm121_register_control(struct wf_control *ct,
+                                                const char *match,
+                                                unsigned int id)
+{
+       if (controls[id] == NULL && !strcmp(ct->name, match)) {
+               if (wf_get_control(ct) == 0)
+                       controls[id] = ct;
+       }
+       return controls[id];
+}
+
+static void pm121_new_control(struct wf_control *ct)
+{
+       int all = 1;
+
+       if (pm121_all_controls_ok)
+               return;
+
+       all = pm121_register_control(ct, "optical-drive-fan", FAN_OD) && all;
+       all = pm121_register_control(ct, "hard-drive-fan", FAN_HD) && all;
+       all = pm121_register_control(ct, "cpu-fan", FAN_CPU) && all;
+       all = pm121_register_control(ct, "cpufreq-clamp", CPUFREQ) && all;
+
+       if (all)
+               pm121_all_controls_ok = 1;
+}
+
+
+
+
+static struct wf_sensor* pm121_register_sensor(struct wf_sensor *sensor,
+                                              const char *match,
+                                              struct wf_sensor **var)
+{
+       if (*var == NULL && !strcmp(sensor->name, match)) {
+               if (wf_get_sensor(sensor) == 0)
+                       *var = sensor;
+       }
+       return *var;
+}
+
+static void pm121_new_sensor(struct wf_sensor *sr)
+{
+       int all = 1;
+
+       if (pm121_all_sensors_ok)
+               return;
+
+       all = pm121_register_sensor(sr, "cpu-temp",
+                                   &sensor_cpu_temp) && all;
+       all = pm121_register_sensor(sr, "cpu-current",
+                                   &sensor_cpu_current) && all;
+       all = pm121_register_sensor(sr, "cpu-voltage",
+                                   &sensor_cpu_voltage) && all;
+       all = pm121_register_sensor(sr, "cpu-power",
+                                   &sensor_cpu_power) && all;
+       all = pm121_register_sensor(sr, "hard-drive-temp",
+                                   &sensor_hard_drive_temp) && all;
+       all = pm121_register_sensor(sr, "optical-drive-temp",
+                                   &sensor_optical_drive_temp) && all;
+       all = pm121_register_sensor(sr, "incoming-air-temp",
+                                   &sensor_incoming_air_temp) && all;
+       all = pm121_register_sensor(sr, "north-bridge-temp",
+                                   &sensor_north_bridge_temp) && all;
+       all = pm121_register_sensor(sr, "gpu-temp",
+                                   &sensor_gpu_temp) && all;
+
+       if (all)
+               pm121_all_sensors_ok = 1;
+}
+
+
+
+static int pm121_notify(struct notifier_block *self,
+                       unsigned long event, void *data)
+{
+       switch (event) {
+       case WF_EVENT_NEW_CONTROL:
+               pr_debug("pm121: new control %s detected\n",
+                        ((struct wf_control *)data)->name);
+               pm121_new_control(data);
+               break;
+       case WF_EVENT_NEW_SENSOR:
+               pr_debug("pm121: new sensor %s detected\n",
+                        ((struct wf_sensor *)data)->name);
+               pm121_new_sensor(data);
+               break;
+       case WF_EVENT_TICK:
+               if (pm121_all_controls_ok && pm121_all_sensors_ok)
+                       pm121_tick();
+               break;
+       }
+
+       return 0;
+}
+
+static struct notifier_block pm121_events = {
+       .notifier_call  = pm121_notify,
+};
+
+static int pm121_init_pm(void)
+{
+       const struct smu_sdbp_header *hdr;
+
+       hdr = smu_get_sdb_partition(SMU_SDB_SENSORTREE_ID, NULL);
+       if (hdr != 0) {
+               struct smu_sdbp_sensortree *st =
+                       (struct smu_sdbp_sensortree *)&hdr[1];
+               pm121_mach_model = st->model_id;
+       }
+
+       pm121_connection = &pm121_connections[pm121_mach_model - 2];
+
+       printk(KERN_INFO "pm121: Initializing for iMac G5 iSight model ID %d\n",
+              pm121_mach_model);
+
+       return 0;
+}
+
+
+static int pm121_probe(struct platform_device *ddev)
+{
+       wf_register_client(&pm121_events);
+
+       return 0;
+}
+
+static int __devexit pm121_remove(struct platform_device *ddev)
+{
+       wf_unregister_client(&pm121_events);
+       return 0;
+}
+
+static struct platform_driver pm121_driver = {
+       .probe = pm121_probe,
+       .remove = __devexit_p(pm121_remove),
+       .driver = {
+               .name = "windfarm",
+               .bus = &platform_bus_type,
+       },
+};
+
+
+static int __init pm121_init(void)
+{
+       int rc = -ENODEV;
+
+       if (machine_is_compatible("PowerMac12,1"))
+               rc = pm121_init_pm();
+
+       if (rc == 0) {
+               request_module("windfarm_smu_controls");
+               request_module("windfarm_smu_sensors");
+               request_module("windfarm_smu_sat");
+               request_module("windfarm_lm75_sensor");
+               request_module("windfarm_max6690_sensor");
+               request_module("windfarm_cpufreq_clamp");
+               platform_driver_register(&pm121_driver);
+       }
+
+       return rc;
+}
+
+static void __exit pm121_exit(void)
+{
+
+       platform_driver_unregister(&pm121_driver);
+}
+
+
+module_init(pm121_init);
+module_exit(pm121_exit);
+
+MODULE_AUTHOR("Étienne Bersac <bersace@gmail.com>");
+MODULE_DESCRIPTION("Thermal control logic for iMac G5 (iSight)");
+MODULE_LICENSE("GPL");
+
index 58c2590f05ecd587770127ffeb55ee37b7c55cc3..961fa0e7c2cf76b3607fe02707d64275192d6f4d 100644 (file)
@@ -218,6 +218,10 @@ static struct smu_fan_control *smu_fan_create(struct device_node *node,
                fct->ctrl.name = "cpu-fan";
        else if (!strcmp(l, "Hard Drive") || !strcmp(l, "Hard drive"))
                fct->ctrl.name = "drive-bay-fan";
+       else if (!strcmp(l, "HDD Fan")) /* seen on iMac G5 iSight */
+               fct->ctrl.name = "hard-drive-fan";
+       else if (!strcmp(l, "ODD Fan")) /* same */
+               fct->ctrl.name = "optical-drive-fan";
 
        /* Unrecognized fan, bail out */
        if (fct->ctrl.name == NULL)
index 0c7bfa74c8efccc36502b1cc99c38a542956f6ba..494f0c2001f598aa9af888eeb8a1f5bb4e2e9db8 100644 (file)
@@ -281,24 +281,6 @@ void mca_set_adapter_name(int slot, char* name)
 }
 EXPORT_SYMBOL(mca_set_adapter_name);
 
-/**
- *     mca_is_adapter_used - check if claimed by driver
- *     @slot:  slot to check
- *
- *     Returns 1 if the slot has been claimed by a driver
- */
-
-int mca_is_adapter_used(int slot)
-{
-       struct mca_device *mca_dev = mca_find_device_by_slot(slot);
-
-       if(!mca_dev)
-               return 0;
-
-       return mca_device_claimed(mca_dev);
-}
-EXPORT_SYMBOL(mca_is_adapter_used);
-
 /**
  *     mca_mark_as_used - claim an MCA device
  *     @slot:  slot to claim
index 33d5e0820cc503cc21becb73d11c375c759aaa6a..81ea0d377bf447108825c4683e5b2be633c4b24f 100644 (file)
@@ -183,7 +183,7 @@ void __init mca_do_proc_init(void)
        struct proc_dir_entry* node = NULL;
        struct mca_device *mca_dev;
 
-       proc_mca = proc_mkdir("mca", &proc_root);
+       proc_mca = proc_mkdir("mca", NULL);
        create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL);
        create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL);
 
index 6b91b9ab1d41f537bc81e703ecffa710f0efcdf8..3ea5ad4b7805f134af23f9d46842e9a657b7c284 100644 (file)
@@ -110,8 +110,6 @@ static struct request *get_failover_req(struct emc_handler *h,
        memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
        rq->sense_len = 0;
 
-       memset(&rq->cmd, 0, BLK_MAX_CDB);
-
        rq->timeout = EMC_FAILOVER_TIMEOUT;
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
index 204bf42c94493575518bc72e3536ddc9b94c27de..b63a0ab37c538736c9eac9869fdf464105ba0025 100644 (file)
@@ -137,7 +137,6 @@ static struct request *hp_sw_get_request(struct dm_path *path)
        req->sense = h->sense;
        memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE);
 
-       memset(&req->cmd, 0, BLK_MAX_CDB);
        req->cmd[0] = START_STOP;
        req->cmd[4] = 1;
        req->cmd_len = COMMAND_SIZE(req->cmd[0]);
index e04eb5c697fb884081b79c8ad5ed4b378c19abd6..95e77734880a7ea39b4159359d92f9c42316b843 100644 (file)
@@ -284,7 +284,6 @@ static struct request *get_rdac_req(struct rdac_handler *h,
                return NULL;
        }
 
-       memset(&rq->cmd, 0, BLK_MAX_CDB);
        rq->sense = h->sense;
        memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE);
        rq->sense_len = 0;
index 51be53344214906593ec54dec6b6e8e1c018f369..94116eaf47099ccad9f8ca458afd3c4f1e835435 100644 (file)
@@ -873,10 +873,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q)
        q->max_hw_sectors = t->limits.max_hw_sectors;
        q->seg_boundary_mask = t->limits.seg_boundary_mask;
        q->bounce_pfn = t->limits.bounce_pfn;
+
        if (t->limits.no_cluster)
-               q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER);
+               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
        else
-               q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER);
+               queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, q);
 
 }
 
index 50377e5dc2a36b41bc0838622aafd313478b1163..6f65883aef124828c20a6ba1b29a26c541a941c8 100644 (file)
@@ -78,7 +78,7 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
 
        event = dm_uevent_alloc(md);
        if (!event) {
-               DMERR("%s: dm_uevent_alloc() failed", __FUNCTION__);
+               DMERR("%s: dm_uevent_alloc() failed", __func__);
                goto err_nomem;
        }
 
@@ -86,32 +86,32 @@ static struct dm_uevent *dm_build_path_uevent(struct mapped_device *md,
 
        if (add_uevent_var(&event->ku_env, "DM_TARGET=%s", ti->type->name)) {
                DMERR("%s: add_uevent_var() for DM_TARGET failed",
-                     __FUNCTION__);
+                     __func__);
                goto err_add;
        }
 
        if (add_uevent_var(&event->ku_env, "DM_ACTION=%s", dm_action)) {
                DMERR("%s: add_uevent_var() for DM_ACTION failed",
-                     __FUNCTION__);
+                     __func__);
                goto err_add;
        }
 
        if (add_uevent_var(&event->ku_env, "DM_SEQNUM=%u",
                           dm_next_uevent_seq(md))) {
                DMERR("%s: add_uevent_var() for DM_SEQNUM failed",
-                     __FUNCTION__);
+                     __func__);
                goto err_add;
        }
 
        if (add_uevent_var(&event->ku_env, "DM_PATH=%s", path)) {
-               DMERR("%s: add_uevent_var() for DM_PATH failed", __FUNCTION__);
+               DMERR("%s: add_uevent_var() for DM_PATH failed", __func__);
                goto err_add;
        }
 
        if (add_uevent_var(&event->ku_env, "DM_NR_VALID_PATHS=%d",
                           nr_valid_paths)) {
                DMERR("%s: add_uevent_var() for DM_NR_VALID_PATHS failed",
-                     __FUNCTION__);
+                     __func__);
                goto err_add;
        }
 
@@ -146,25 +146,25 @@ void dm_send_uevents(struct list_head *events, struct kobject *kobj)
                if (dm_copy_name_and_uuid(event->md, event->name,
                                          event->uuid)) {
                        DMERR("%s: dm_copy_name_and_uuid() failed",
-                             __FUNCTION__);
+                             __func__);
                        goto uevent_free;
                }
 
                if (add_uevent_var(&event->ku_env, "DM_NAME=%s", event->name)) {
                        DMERR("%s: add_uevent_var() for DM_NAME failed",
-                             __FUNCTION__);
+                             __func__);
                        goto uevent_free;
                }
 
                if (add_uevent_var(&event->ku_env, "DM_UUID=%s", event->uuid)) {
                        DMERR("%s: add_uevent_var() for DM_UUID failed",
-                             __FUNCTION__);
+                             __func__);
                        goto uevent_free;
                }
 
                r = kobject_uevent_env(kobj, event->action, event->ku_env.envp);
                if (r)
-                       DMERR("%s: kobject_uevent_env failed", __FUNCTION__);
+                       DMERR("%s: kobject_uevent_env failed", __func__);
 uevent_free:
                dm_uevent_free(event);
        }
@@ -187,7 +187,7 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
        struct dm_uevent *event;
 
        if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
-               DMERR("%s: Invalid event_type %d", __FUNCTION__, event_type);
+               DMERR("%s: Invalid event_type %d", __func__, event_type);
                goto out;
        }
 
index 5ebfb4d7990170f3fc40344817d8b11c018eac1c..83eb78b00137e98f6624bf72247d8a20bed1f870 100644 (file)
@@ -276,13 +276,15 @@ static mddev_t * mddev_find(dev_t unit)
        init_waitqueue_head(&new->sb_wait);
        new->reshape_position = MaxSector;
        new->resync_max = MaxSector;
+       new->level = LEVEL_NONE;
 
        new->queue = blk_alloc_queue(GFP_KERNEL);
        if (!new->queue) {
                kfree(new);
                return NULL;
        }
-       set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags);
+       /* Can be unlocked because the queue is new: no concurrency */
+       queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue);
 
        blk_queue_make_request(new->queue, md_fail_request);
 
@@ -731,9 +733,9 @@ static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version
        else
                rdev->desc_nr = sb->this_disk.number;
 
-       if (refdev == 0)
+       if (!refdev) {
                ret = 1;
-       else {
+       else {
                __u64 ev1, ev2;
                mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page);
                if (!uuid_equal(refsb, sb)) {
@@ -1116,9 +1118,9 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
        else
                rdev->desc_nr = le32_to_cpu(sb->dev_number);
 
-       if (refdev == 0)
+       if (!refdev) {
                ret = 1;
-       else {
+       else {
                __u64 ev1, ev2;
                struct mdp_superblock_1 *refsb = 
                        (struct mdp_superblock_1*)page_address(refdev->sb_page);
@@ -1368,6 +1370,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
                MD_BUG();
                return -EINVAL;
        }
+
+       /* prevent duplicates */
+       if (find_rdev(mddev, rdev->bdev->bd_dev))
+               return -EEXIST;
+
        /* make sure rdev->size exceeds mddev->size */
        if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) {
                if (mddev->pers) {
@@ -1651,6 +1658,8 @@ static void md_update_sb(mddev_t * mddev, int force_change)
        int sync_req;
        int nospares = 0;
 
+       if (mddev->external)
+               return;
 repeat:
        spin_lock_irq(&mddev->write_lock);
 
@@ -1819,6 +1828,10 @@ state_show(mdk_rdev_t *rdev, char *page)
                len += sprintf(page+len, "%swrite_mostly",sep);
                sep = ",";
        }
+       if (test_bit(Blocked, &rdev->flags)) {
+               len += sprintf(page+len, "%sblocked", sep);
+               sep = ",";
+       }
        if (!test_bit(Faulty, &rdev->flags) &&
            !test_bit(In_sync, &rdev->flags)) {
                len += sprintf(page+len, "%sspare", sep);
@@ -1835,6 +1848,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
         *  remove  - disconnects the device
         *  writemostly - sets write_mostly
         *  -writemostly - clears write_mostly
+        *  blocked - sets the Blocked flag
+        *  -blocked - clears the Blocked flag
         */
        int err = -EINVAL;
        if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
@@ -1856,6 +1871,16 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len)
                err = 0;
        } else if (cmd_match(buf, "-writemostly")) {
                clear_bit(WriteMostly, &rdev->flags);
+               err = 0;
+       } else if (cmd_match(buf, "blocked")) {
+               set_bit(Blocked, &rdev->flags);
+               err = 0;
+       } else if (cmd_match(buf, "-blocked")) {
+               clear_bit(Blocked, &rdev->flags);
+               wake_up(&rdev->blocked_wait);
+               set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+               md_wakeup_thread(rdev->mddev->thread);
+
                err = 0;
        }
        return err ? err : len;
@@ -2096,7 +2121,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
                        rv = -EBUSY;
                else
                        rv = entry->store(rdev, page, length);
-               mddev_unlock(rdev->mddev);
+               mddev_unlock(mddev);
        }
        return rv;
 }
@@ -2185,7 +2210,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
                        goto abort_free;
                }
        }
+
        INIT_LIST_HEAD(&rdev->same_set);
+       init_waitqueue_head(&rdev->blocked_wait);
 
        return rdev;
 
@@ -2456,7 +2483,6 @@ resync_start_show(mddev_t *mddev, char *page)
 static ssize_t
 resync_start_store(mddev_t *mddev, const char *buf, size_t len)
 {
-       /* can only set chunk_size if array is not yet active */
        char *e;
        unsigned long long n = simple_strtoull(buf, &e, 10);
 
@@ -2590,15 +2616,20 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                        err = do_md_stop(mddev, 1);
                else {
                        mddev->ro = 1;
+                       set_disk_ro(mddev->gendisk, 1);
                        err = do_md_run(mddev);
                }
                break;
        case read_auto:
-               /* stopping an active array */
                if (mddev->pers) {
-                       err = do_md_stop(mddev, 1);
-                       if (err == 0)
-                               mddev->ro = 2; /* FIXME mark devices writable */
+                       if (mddev->ro != 1)
+                               err = do_md_stop(mddev, 1);
+                       else
+                               err = restart_array(mddev);
+                       if (err == 0) {
+                               mddev->ro = 2;
+                               set_disk_ro(mddev->gendisk, 0);
+                       }
                } else {
                        mddev->ro = 2;
                        err = do_md_run(mddev);
@@ -2611,6 +2642,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                        if (atomic_read(&mddev->writes_pending) == 0) {
                                if (mddev->in_sync == 0) {
                                        mddev->in_sync = 1;
+                                       if (mddev->safemode == 1)
+                                               mddev->safemode = 0;
                                        if (mddev->persistent)
                                                set_bit(MD_CHANGE_CLEAN,
                                                        &mddev->flags);
@@ -2634,6 +2667,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len)
                        err = 0;
                } else {
                        mddev->ro = 0;
+                       set_disk_ro(mddev->gendisk, 0);
                        err = do_md_run(mddev);
                }
                break;
@@ -3711,6 +3745,30 @@ static int do_md_stop(mddev_t * mddev, int mode)
                mddev->reshape_position = MaxSector;
                mddev->external = 0;
                mddev->persistent = 0;
+               mddev->level = LEVEL_NONE;
+               mddev->clevel[0] = 0;
+               mddev->flags = 0;
+               mddev->ro = 0;
+               mddev->metadata_type[0] = 0;
+               mddev->chunk_size = 0;
+               mddev->ctime = mddev->utime = 0;
+               mddev->layout = 0;
+               mddev->max_disks = 0;
+               mddev->events = 0;
+               mddev->delta_disks = 0;
+               mddev->new_level = LEVEL_NONE;
+               mddev->new_layout = 0;
+               mddev->new_chunk = 0;
+               mddev->curr_resync = 0;
+               mddev->resync_mismatches = 0;
+               mddev->suspend_lo = mddev->suspend_hi = 0;
+               mddev->sync_speed_min = mddev->sync_speed_max = 0;
+               mddev->recovery = 0;
+               mddev->in_sync = 0;
+               mddev->changed = 0;
+               mddev->degraded = 0;
+               mddev->barriers_work = 0;
+               mddev->safemode = 0;
 
        } else if (mddev->pers)
                printk(KERN_INFO "md: %s switched to read-only mode.\n",
@@ -4918,6 +4976,9 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev)
 
        if (!rdev || test_bit(Faulty, &rdev->flags))
                return;
+
+       if (mddev->external)
+               set_bit(Blocked, &rdev->flags);
 /*
        dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n",
                mdname(mddev),
@@ -5364,6 +5425,8 @@ void md_write_start(mddev_t *mddev, struct bio *bi)
                md_wakeup_thread(mddev->sync_thread);
        }
        atomic_inc(&mddev->writes_pending);
+       if (mddev->safemode == 1)
+               mddev->safemode = 0;
        if (mddev->in_sync) {
                spin_lock_irq(&mddev->write_lock);
                if (mddev->in_sync) {
@@ -5718,7 +5781,7 @@ static int remove_and_add_spares(mddev_t *mddev)
 
        rdev_for_each(rdev, rtmp, mddev)
                if (rdev->raid_disk >= 0 &&
-                   !mddev->external &&
+                   !test_bit(Blocked, &rdev->flags) &&
                    (test_bit(Faulty, &rdev->flags) ||
                     ! test_bit(In_sync, &rdev->flags)) &&
                    atomic_read(&rdev->nr_pending)==0) {
@@ -5788,7 +5851,7 @@ void md_check_recovery(mddev_t *mddev)
                return;
 
        if (signal_pending(current)) {
-               if (mddev->pers->sync_request) {
+               if (mddev->pers->sync_request && !mddev->external) {
                        printk(KERN_INFO "md: %s in immediate safe mode\n",
                               mdname(mddev));
                        mddev->safemode = 2;
@@ -5800,7 +5863,7 @@ void md_check_recovery(mddev_t *mddev)
                (mddev->flags && !mddev->external) ||
                test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
                test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
-               (mddev->safemode == 1) ||
+               (mddev->external == 0 && mddev->safemode == 1) ||
                (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending)
                 && !mddev->in_sync && mddev->recovery_cp == MaxSector)
                ))
@@ -5809,16 +5872,20 @@ void md_check_recovery(mddev_t *mddev)
        if (mddev_trylock(mddev)) {
                int spares = 0;
 
-               spin_lock_irq(&mddev->write_lock);
-               if (mddev->safemode && !atomic_read(&mddev->writes_pending) &&
-                   !mddev->in_sync && mddev->recovery_cp == MaxSector) {
-                       mddev->in_sync = 1;
-                       if (mddev->persistent)
-                               set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+               if (!mddev->external) {
+                       spin_lock_irq(&mddev->write_lock);
+                       if (mddev->safemode &&
+                           !atomic_read(&mddev->writes_pending) &&
+                           !mddev->in_sync &&
+                           mddev->recovery_cp == MaxSector) {
+                               mddev->in_sync = 1;
+                               if (mddev->persistent)
+                                       set_bit(MD_CHANGE_CLEAN, &mddev->flags);
+                       }
+                       if (mddev->safemode == 1)
+                               mddev->safemode = 0;
+                       spin_unlock_irq(&mddev->write_lock);
                }
-               if (mddev->safemode == 1)
-                       mddev->safemode = 0;
-               spin_unlock_irq(&mddev->write_lock);
 
                if (mddev->flags)
                        md_update_sb(mddev, 0);
@@ -5913,6 +5980,16 @@ void md_check_recovery(mddev_t *mddev)
        }
 }
 
+void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev)
+{
+       sysfs_notify(&rdev->kobj, NULL, "state");
+       wait_event_timeout(rdev->blocked_wait,
+                          !test_bit(Blocked, &rdev->flags),
+                          msecs_to_jiffies(5000));
+       rdev_dec_pending(rdev, mddev);
+}
+EXPORT_SYMBOL(md_wait_for_blocked_rdev);
+
 static int md_notify_reboot(struct notifier_block *this,
                            unsigned long code, void *x)
 {
@@ -5947,13 +6024,9 @@ static struct notifier_block md_notifier = {
 
 static void md_geninit(void)
 {
-       struct proc_dir_entry *p;
-
        dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t));
 
-       p = create_proc_entry("mdstat", S_IRUGO, NULL);
-       if (p)
-               p->proc_fops = &md_seq_fops;
+       proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops);
 }
 
 static int __init md_init(void)
index 3f299d835a2b1956e210c0a383e65f2d9c70874f..42ee1a2dc144fc88954c45a3509903633f00028e 100644 (file)
@@ -244,7 +244,8 @@ static void multipath_error (mddev_t *mddev, mdk_rdev_t *rdev)
                        conf->working_disks--;
                        mddev->degraded++;
                        printk(KERN_ALERT "multipath: IO failure on %s,"
-                               " disabling IO path. \n Operation continuing"
+                               " disabling IO path.\n"
+                               "multipath: Operation continuing"
                                " on %d IO paths.\n",
                                bdevname (rdev->bdev,b),
                                conf->working_disks);
index ff61b309129aa8ffa9dbd00987a71c1b6eb35bf5..6778b7cb39bd5dd94eb719683267a6bef926e3cf 100644 (file)
@@ -773,7 +773,6 @@ static int make_request(struct request_queue *q, struct bio * bio)
        r1bio_t *r1_bio;
        struct bio *read_bio;
        int i, targets = 0, disks;
-       mdk_rdev_t *rdev;
        struct bitmap *bitmap = mddev->bitmap;
        unsigned long flags;
        struct bio_list bl;
@@ -781,6 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
        const int rw = bio_data_dir(bio);
        const int do_sync = bio_sync(bio);
        int do_barriers;
+       mdk_rdev_t *blocked_rdev;
 
        /*
         * Register the new request and wait if the reconstruction
@@ -862,10 +862,17 @@ static int make_request(struct request_queue *q, struct bio * bio)
        first = 0;
        }
 #endif
+ retry_write:
+       blocked_rdev = NULL;
        rcu_read_lock();
        for (i = 0;  i < disks; i++) {
-               if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
-                   !test_bit(Faulty, &rdev->flags)) {
+               mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
+               if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+                       atomic_inc(&rdev->nr_pending);
+                       blocked_rdev = rdev;
+                       break;
+               }
+               if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        atomic_inc(&rdev->nr_pending);
                        if (test_bit(Faulty, &rdev->flags)) {
                                rdev_dec_pending(rdev, mddev);
@@ -878,6 +885,20 @@ static int make_request(struct request_queue *q, struct bio * bio)
        }
        rcu_read_unlock();
 
+       if (unlikely(blocked_rdev)) {
+               /* Wait for this device to become unblocked */
+               int j;
+
+               for (j = 0; j < i; j++)
+                       if (r1_bio->bios[j])
+                               rdev_dec_pending(conf->mirrors[j].rdev, mddev);
+
+               allow_barrier(conf);
+               md_wait_for_blocked_rdev(blocked_rdev, mddev);
+               wait_barrier(conf);
+               goto retry_write;
+       }
+
        BUG_ON(targets == 0); /* we never fail the last device */
 
        if (targets < conf->raid_disks) {
@@ -1008,8 +1029,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
        } else
                set_bit(Faulty, &rdev->flags);
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-       printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
-               "       Operation continuing on %d devices\n",
+       printk(KERN_ALERT "raid1: Disk failure on %s, disabling device.\n"
+               "raid1: Operation continuing on %d devices.\n",
                bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
 }
 
index 32389d2f18fcdfcadc87137936441e6d51560c05..5938fa9629221145e6b4249d4996ca6c67e08516 100644 (file)
@@ -790,6 +790,7 @@ static int make_request(struct request_queue *q, struct bio * bio)
        const int do_sync = bio_sync(bio);
        struct bio_list bl;
        unsigned long flags;
+       mdk_rdev_t *blocked_rdev;
 
        if (unlikely(bio_barrier(bio))) {
                bio_endio(bio, -EOPNOTSUPP);
@@ -879,17 +880,23 @@ static int make_request(struct request_queue *q, struct bio * bio)
        /*
         * WRITE:
         */
-       /* first select target devices under spinlock and
+       /* first select target devices under rcu_lock and
         * inc refcount on their rdev.  Record them by setting
         * bios[x] to bio
         */
        raid10_find_phys(conf, r10_bio);
+ retry_write:
+       blocked_rdev = 0;
        rcu_read_lock();
        for (i = 0;  i < conf->copies; i++) {
                int d = r10_bio->devs[i].devnum;
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
-               if (rdev &&
-                   !test_bit(Faulty, &rdev->flags)) {
+               if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+                       atomic_inc(&rdev->nr_pending);
+                       blocked_rdev = rdev;
+                       break;
+               }
+               if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        atomic_inc(&rdev->nr_pending);
                        r10_bio->devs[i].bio = bio;
                } else {
@@ -899,6 +906,22 @@ static int make_request(struct request_queue *q, struct bio * bio)
        }
        rcu_read_unlock();
 
+       if (unlikely(blocked_rdev)) {
+               /* Have to wait for this device to get unblocked, then retry */
+               int j;
+               int d;
+
+               for (j = 0; j < i; j++)
+                       if (r10_bio->devs[j].bio) {
+                               d = r10_bio->devs[j].devnum;
+                               rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+                       }
+               allow_barrier(conf);
+               md_wait_for_blocked_rdev(blocked_rdev, mddev);
+               wait_barrier(conf);
+               goto retry_write;
+       }
+
        atomic_set(&r10_bio->remaining, 0);
 
        bio_list_init(&bl);
@@ -1001,8 +1024,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
        }
        set_bit(Faulty, &rdev->flags);
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
-       printk(KERN_ALERT "raid10: Disk failure on %s, disabling device. \n"
-               "       Operation continuing on %d devices\n",
+       printk(KERN_ALERT "raid10: Disk failure on %s, disabling device.\n"
+               "raid10: Operation continuing on %d devices.\n",
                bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
 }
 
index b162b839a6620921eb258252ee9ddaf1a1d12810..087eee0cb80913fabc692187eed59e981c5eb95e 100644 (file)
@@ -63,6 +63,7 @@
 #define STRIPE_SHIFT           (PAGE_SHIFT - 9)
 #define STRIPE_SECTORS         (STRIPE_SIZE>>9)
 #define        IO_THRESHOLD            1
+#define BYPASS_THRESHOLD       1
 #define NR_HASH                        (PAGE_SIZE / sizeof(struct hlist_head))
 #define HASH_MASK              (NR_HASH - 1)
 
@@ -398,6 +399,7 @@ static void ops_run_io(struct stripe_head *sh)
 
        might_sleep();
 
+       set_bit(STRIPE_IO_STARTED, &sh->state);
        for (i = disks; i--; ) {
                int rw;
                struct bio *bi;
@@ -433,7 +435,7 @@ static void ops_run_io(struct stripe_head *sh)
 
                        bi->bi_bdev = rdev->bdev;
                        pr_debug("%s: for %llu schedule op %ld on disc %d\n",
-                               __FUNCTION__, (unsigned long long)sh->sector,
+                               __func__, (unsigned long long)sh->sector,
                                bi->bi_rw, i);
                        atomic_inc(&sh->count);
                        bi->bi_sector = sh->sector + rdev->data_offset;
@@ -520,7 +522,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
        raid5_conf_t *conf = sh->raid_conf;
        int i;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        /* clear completed biofills */
@@ -569,7 +571,7 @@ static void ops_run_biofill(struct stripe_head *sh)
        raid5_conf_t *conf = sh->raid_conf;
        int i;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        for (i = sh->disks; i--; ) {
@@ -600,7 +602,7 @@ static void ops_complete_compute5(void *stripe_head_ref)
        int target = sh->ops.target;
        struct r5dev *tgt = &sh->dev[target];
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        set_bit(R5_UPTODATE, &tgt->flags);
@@ -625,7 +627,7 @@ ops_run_compute5(struct stripe_head *sh, unsigned long pending)
        int i;
 
        pr_debug("%s: stripe %llu block: %d\n",
-               __FUNCTION__, (unsigned long long)sh->sector, target);
+               __func__, (unsigned long long)sh->sector, target);
        BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags));
 
        for (i = disks; i--; )
@@ -653,7 +655,7 @@ static void ops_complete_prexor(void *stripe_head_ref)
 {
        struct stripe_head *sh = stripe_head_ref;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        set_bit(STRIPE_OP_PREXOR, &sh->ops.complete);
@@ -670,7 +672,7 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
        /* existing parity data subtracted */
        struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        for (i = disks; i--; ) {
@@ -699,7 +701,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
         */
        int prexor = test_bit(STRIPE_OP_PREXOR, &pending);
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        for (i = disks; i--; ) {
@@ -744,7 +746,7 @@ static void ops_complete_postxor(void *stripe_head_ref)
 {
        struct stripe_head *sh = stripe_head_ref;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete);
@@ -757,7 +759,7 @@ static void ops_complete_write(void *stripe_head_ref)
        struct stripe_head *sh = stripe_head_ref;
        int disks = sh->disks, i, pd_idx = sh->pd_idx;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        for (i = disks; i--; ) {
@@ -787,7 +789,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx,
        unsigned long flags;
        dma_async_tx_callback callback;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        /* check if prexor is active which means only process blocks
@@ -837,7 +839,7 @@ static void ops_complete_check(void *stripe_head_ref)
        struct stripe_head *sh = stripe_head_ref;
        int pd_idx = sh->pd_idx;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) &&
@@ -859,7 +861,7 @@ static void ops_run_check(struct stripe_head *sh)
        int count = 0, pd_idx = sh->pd_idx, i;
        struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page;
 
-       pr_debug("%s: stripe %llu\n", __FUNCTION__,
+       pr_debug("%s: stripe %llu\n", __func__,
                (unsigned long long)sh->sector);
 
        for (i = disks; i--; ) {
@@ -1260,8 +1262,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
                }
                set_bit(Faulty, &rdev->flags);
                printk (KERN_ALERT
-                       "raid5: Disk failure on %s, disabling device."
-                       " Operation continuing on %d devices\n",
+                       "raid5: Disk failure on %s, disabling device.\n"
+                       "raid5: Operation continuing on %d devices.\n",
                        bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded);
        }
 }
@@ -1720,6 +1722,9 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
                                locked++;
                        }
                }
+               if (locked + 1 == disks)
+                       if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
+                               atomic_inc(&sh->raid_conf->pending_full_writes);
        } else {
                BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) ||
                        test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags)));
@@ -1759,7 +1764,7 @@ handle_write_operations5(struct stripe_head *sh, int rcw, int expand)
        locked++;
 
        pr_debug("%s: stripe %llu locked: %d pending: %lx\n",
-               __FUNCTION__, (unsigned long long)sh->sector,
+               __func__, (unsigned long long)sh->sector,
                locked, sh->ops.pending);
 
        return locked;
@@ -1947,6 +1952,9 @@ handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh,
                                        STRIPE_SECTORS, 0, 0);
        }
 
+       if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+               if (atomic_dec_and_test(&conf->pending_full_writes))
+                       md_wakeup_thread(conf->mddev->thread);
 }
 
 /* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks
@@ -2149,6 +2157,10 @@ static void handle_completed_write_requests(raid5_conf_t *conf,
                                                        0);
                        }
                }
+
+       if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
+               if (atomic_dec_and_test(&conf->pending_full_writes))
+                       md_wakeup_thread(conf->mddev->thread);
 }
 
 static void handle_issuing_new_write_requests5(raid5_conf_t *conf,
@@ -2333,6 +2345,9 @@ static void handle_issuing_new_write_requests6(raid5_conf_t *conf,
                                s->locked++;
                                set_bit(R5_Wantwrite, &sh->dev[i].flags);
                        }
+               if (s->locked == disks)
+                       if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state))
+                               atomic_inc(&conf->pending_full_writes);
                /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */
                set_bit(STRIPE_INSYNC, &sh->state);
 
@@ -2592,6 +2607,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh,
        }
 }
 
+
 /*
  * handle_stripe - do things to a stripe.
  *
@@ -2617,6 +2633,7 @@ static void handle_stripe5(struct stripe_head *sh)
        struct stripe_head_state s;
        struct r5dev *dev;
        unsigned long pending = 0;
+       mdk_rdev_t *blocked_rdev = NULL;
 
        memset(&s, 0, sizeof(s));
        pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d "
@@ -2676,6 +2693,11 @@ static void handle_stripe5(struct stripe_head *sh)
                if (dev->written)
                        s.written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+                       blocked_rdev = rdev;
+                       atomic_inc(&rdev->nr_pending);
+                       break;
+               }
                if (!rdev || !test_bit(In_sync, &rdev->flags)) {
                        /* The ReadError flag will just be confusing now */
                        clear_bit(R5_ReadError, &dev->flags);
@@ -2690,6 +2712,11 @@ static void handle_stripe5(struct stripe_head *sh)
        }
        rcu_read_unlock();
 
+       if (unlikely(blocked_rdev)) {
+               set_bit(STRIPE_HANDLE, &sh->state);
+               goto unlock;
+       }
+
        if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending))
                sh->ops.count++;
 
@@ -2879,8 +2906,13 @@ static void handle_stripe5(struct stripe_head *sh)
        if (sh->ops.count)
                pending = get_stripe_work(sh);
 
+ unlock:
        spin_unlock(&sh->lock);
 
+       /* wait for this device to become unblocked */
+       if (unlikely(blocked_rdev))
+               md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
+
        if (pending)
                raid5_run_ops(sh, pending);
 
@@ -2897,6 +2929,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
        struct stripe_head_state s;
        struct r6_state r6s;
        struct r5dev *dev, *pdev, *qdev;
+       mdk_rdev_t *blocked_rdev = NULL;
 
        r6s.qd_idx = raid6_next_disk(pd_idx, disks);
        pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
@@ -2960,6 +2993,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                if (dev->written)
                        s.written++;
                rdev = rcu_dereference(conf->disks[i].rdev);
+               if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
+                       blocked_rdev = rdev;
+                       atomic_inc(&rdev->nr_pending);
+                       break;
+               }
                if (!rdev || !test_bit(In_sync, &rdev->flags)) {
                        /* The ReadError flag will just be confusing now */
                        clear_bit(R5_ReadError, &dev->flags);
@@ -2974,6 +3012,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                        set_bit(R5_Insync, &dev->flags);
        }
        rcu_read_unlock();
+
+       if (unlikely(blocked_rdev)) {
+               set_bit(STRIPE_HANDLE, &sh->state);
+               goto unlock;
+       }
        pr_debug("locked=%d uptodate=%d to_read=%d"
               " to_write=%d failed=%d failed_num=%d,%d\n",
               s.locked, s.uptodate, s.to_read, s.to_write, s.failed,
@@ -3079,8 +3122,13 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
            !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending))
                handle_stripe_expansion(conf, sh, &r6s);
 
+ unlock:
        spin_unlock(&sh->lock);
 
+       /* wait for this device to become unblocked */
+       if (unlikely(blocked_rdev))
+               md_wait_for_blocked_rdev(blocked_rdev, conf->mddev);
+
        return_io(return_bi);
 
        for (i=disks; i-- ;) {
@@ -3094,6 +3142,8 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page)
                else
                        continue;
 
+               set_bit(STRIPE_IO_STARTED, &sh->state);
+
                bi = &sh->dev[i].req;
 
                bi->bi_rw = rw;
@@ -3164,7 +3214,7 @@ static void raid5_activate_delayed(raid5_conf_t *conf)
                        clear_bit(STRIPE_DELAYED, &sh->state);
                        if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
                                atomic_inc(&conf->preread_active_stripes);
-                       list_add_tail(&sh->lru, &conf->handle_list);
+                       list_add_tail(&sh->lru, &conf->hold_list);
                }
        } else
                blk_plug_device(conf->mddev->queue);
@@ -3442,6 +3492,58 @@ static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio)
        }
 }
 
+/* __get_priority_stripe - get the next stripe to process
+ *
+ * Full stripe writes are allowed to pass preread active stripes up until
+ * the bypass_threshold is exceeded.  In general the bypass_count
+ * increments when the handle_list is handled before the hold_list; however, it
+ * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
+ * stripe with in flight i/o.  The bypass_count will be reset when the
+ * head of the hold_list has changed, i.e. the head was promoted to the
+ * handle_list.
+ */
+static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf)
+{
+       struct stripe_head *sh;
+
+       pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
+                 __func__,
+                 list_empty(&conf->handle_list) ? "empty" : "busy",
+                 list_empty(&conf->hold_list) ? "empty" : "busy",
+                 atomic_read(&conf->pending_full_writes), conf->bypass_count);
+
+       if (!list_empty(&conf->handle_list)) {
+               sh = list_entry(conf->handle_list.next, typeof(*sh), lru);
+
+               if (list_empty(&conf->hold_list))
+                       conf->bypass_count = 0;
+               else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) {
+                       if (conf->hold_list.next == conf->last_hold)
+                               conf->bypass_count++;
+                       else {
+                               conf->last_hold = conf->hold_list.next;
+                               conf->bypass_count -= conf->bypass_threshold;
+                               if (conf->bypass_count < 0)
+                                       conf->bypass_count = 0;
+                       }
+               }
+       } else if (!list_empty(&conf->hold_list) &&
+                  ((conf->bypass_threshold &&
+                    conf->bypass_count > conf->bypass_threshold) ||
+                   atomic_read(&conf->pending_full_writes) == 0)) {
+               sh = list_entry(conf->hold_list.next,
+                               typeof(*sh), lru);
+               conf->bypass_count -= conf->bypass_threshold;
+               if (conf->bypass_count < 0)
+                       conf->bypass_count = 0;
+       } else
+               return NULL;
+
+       list_del_init(&sh->lru);
+       atomic_inc(&sh->count);
+       BUG_ON(atomic_read(&sh->count) != 1);
+       return sh;
+}
 
 static int make_request(struct request_queue *q, struct bio * bi)
 {
@@ -3914,7 +4016,6 @@ static void raid5d(mddev_t *mddev)
        handled = 0;
        spin_lock_irq(&conf->device_lock);
        while (1) {
-               struct list_head *first;
                struct bio *bio;
 
                if (conf->seq_flush != conf->seq_write) {
@@ -3936,17 +4037,12 @@ static void raid5d(mddev_t *mddev)
                        handled++;
                }
 
-               if (list_empty(&conf->handle_list)) {
+               sh = __get_priority_stripe(conf);
+
+               if (!sh) {
                        async_tx_issue_pending_all();
                        break;
                }
-
-               first = conf->handle_list.next;
-               sh = list_entry(first, struct stripe_head, lru);
-
-               list_del_init(first);
-               atomic_inc(&sh->count);
-               BUG_ON(atomic_read(&sh->count)!= 1);
                spin_unlock_irq(&conf->device_lock);
                
                handled++;
@@ -3978,15 +4074,13 @@ static ssize_t
 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len)
 {
        raid5_conf_t *conf = mddev_to_conf(mddev);
-       char *end;
-       int new;
+       unsigned long new;
        if (len >= PAGE_SIZE)
                return -EINVAL;
        if (!conf)
                return -ENODEV;
 
-       new = simple_strtoul(page, &end, 10);
-       if (!*page || (*end && *end != '\n') )
+       if (strict_strtoul(page, 10, &new))
                return -EINVAL;
        if (new <= 16 || new > 32768)
                return -EINVAL;
@@ -4010,6 +4104,40 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
                                raid5_show_stripe_cache_size,
                                raid5_store_stripe_cache_size);
 
+static ssize_t
+raid5_show_preread_threshold(mddev_t *mddev, char *page)
+{
+       raid5_conf_t *conf = mddev_to_conf(mddev);
+       if (conf)
+               return sprintf(page, "%d\n", conf->bypass_threshold);
+       else
+               return 0;
+}
+
+static ssize_t
+raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len)
+{
+       raid5_conf_t *conf = mddev_to_conf(mddev);
+       unsigned long new;
+       if (len >= PAGE_SIZE)
+               return -EINVAL;
+       if (!conf)
+               return -ENODEV;
+
+       if (strict_strtoul(page, 10, &new))
+               return -EINVAL;
+       if (new > conf->max_nr_stripes)
+               return -EINVAL;
+       conf->bypass_threshold = new;
+       return len;
+}
+
+static struct md_sysfs_entry
+raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold,
+                                       S_IRUGO | S_IWUSR,
+                                       raid5_show_preread_threshold,
+                                       raid5_store_preread_threshold);
+
 static ssize_t
 stripe_cache_active_show(mddev_t *mddev, char *page)
 {
@@ -4026,6 +4154,7 @@ raid5_stripecache_active = __ATTR_RO(stripe_cache_active);
 static struct attribute *raid5_attrs[] =  {
        &raid5_stripecache_size.attr,
        &raid5_stripecache_active.attr,
+       &raid5_preread_bypass_threshold.attr,
        NULL,
 };
 static struct attribute_group raid5_attrs_group = {
@@ -4130,12 +4259,14 @@ static int run(mddev_t *mddev)
        init_waitqueue_head(&conf->wait_for_stripe);
        init_waitqueue_head(&conf->wait_for_overlap);
        INIT_LIST_HEAD(&conf->handle_list);
+       INIT_LIST_HEAD(&conf->hold_list);
        INIT_LIST_HEAD(&conf->delayed_list);
        INIT_LIST_HEAD(&conf->bitmap_list);
        INIT_LIST_HEAD(&conf->inactive_list);
        atomic_set(&conf->active_stripes, 0);
        atomic_set(&conf->preread_active_stripes, 0);
        atomic_set(&conf->active_aligned_reads, 0);
+       conf->bypass_threshold = BYPASS_THRESHOLD;
 
        pr_debug("raid5: run(%s) called.\n", mdname(mddev));
 
index 77a6e4bf503d2257ed754ea6072f84263f2654a8..21987e3dbe6c0ce4199fcc25f3e81506c337713f 100644 (file)
@@ -121,7 +121,8 @@ int __init raid6_select_algo(void)
                        j0 = jiffies;
                        while ( (j1 = jiffies) == j0 )
                                cpu_relax();
-                       while ( (jiffies-j1) < (1 << RAID6_TIME_JIFFIES_LG2) ) {
+                       while (time_before(jiffies,
+                                           j1 + (1<<RAID6_TIME_JIFFIES_LG2))) {
                                (*algo)->gen_syndrome(disks, PAGE_SIZE, dptrs);
                                perf++;
                        }
index 128bb9cd5755f57f7a8cab9895149edcb39680aa..ddf57e135c6c61633fa030dd623a67d5072491c0 100644 (file)
@@ -5,16 +5,20 @@
 menu "Multimedia devices"
        depends on HAS_IOMEM
 
+comment "Multimedia core support"
+
+#
+# V4L core and enabled API's
+#
+
 config VIDEO_DEV
        tristate "Video For Linux"
        ---help---
-         Support for audio/video capture and overlay devices and FM radio
-         cards. The exact capabilities of each device vary.
+         V4L core support for video capture and overlay devices, webcams and
+         AM/FM radio cards.
 
          This kernel includes support for the new Video for Linux Two API,
-         (V4L2) as well as the original system. Drivers and applications
-         need to be rewritten to use V4L2, but drivers for popular cards
-         and applications for most video capture functions already exist.
+         (V4L2).
 
          Additional info and docs are available on the web at
          <http://linuxtv.org>
@@ -36,8 +40,11 @@ config VIDEO_ALLOW_V4L1
        default VIDEO_DEV && VIDEO_V4L2_COMMON
        select VIDEO_V4L1_COMPAT
        ---help---
-         Enables a compatibility API used by most V4L2 devices to allow
-         its usage with legacy applications that supports only V4L1 api.
+         Enables drivers based on the legacy V4L1 API.
+
+         This api were developed to be used at Kernel 2.2 and 2.4, but
+         lacks support for several video standards. There are several
+         drivers at kernel that still depends on it.
 
          If you are unsure as to whether this is required, answer Y.
 
@@ -46,9 +53,8 @@ config VIDEO_V4L1_COMPAT
        depends on VIDEO_DEV
        default VIDEO_DEV
        ---help---
-         This api were developed to be used at Kernel 2.2 and 2.4, but
-         lacks support for several video standards. There are several
-         drivers at kernel that still depends on it.
+         Enables a compatibility API used by most V4L2 devices to allow
+         its usage with legacy applications that supports only V4L1 api.
 
          Documentation for the original API is included in the file
          <Documentation/video4linux/API.html>.
@@ -58,135 +64,57 @@ config VIDEO_V4L1_COMPAT
 
          If you are unsure as to whether this is required, answer Y.
 
-config VIDEO_V4L2
-       tristate
-       depends on VIDEO_DEV && VIDEO_V4L2_COMMON
-       default VIDEO_DEV && VIDEO_V4L2_COMMON
-
-config VIDEO_V4L1
-       tristate
-       depends on VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
-       default VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
-
-source "drivers/media/video/Kconfig"
-
-source "drivers/media/radio/Kconfig"
-
-source "drivers/media/dvb/Kconfig"
-
-source "drivers/media/common/Kconfig"
+#
+# DVB Core
+#
 
-config VIDEO_TUNER
-       tristate
-       depends on I2C
-       select TUNER_XC2028 if !VIDEO_TUNER_CUSTOMIZE
-       select TUNER_MT20XX if !VIDEO_TUNER_CUSTOMIZE
-       select TUNER_TDA8290 if !VIDEO_TUNER_CUSTOMIZE
-       select TUNER_TEA5761 if !VIDEO_TUNER_CUSTOMIZE
-       select TUNER_TEA5767 if !VIDEO_TUNER_CUSTOMIZE
-       select TUNER_SIMPLE if !VIDEO_TUNER_CUSTOMIZE
-       select TUNER_TDA9887 if !VIDEO_TUNER_CUSTOMIZE
-
-menuconfig VIDEO_TUNER_CUSTOMIZE
-       bool "Customize analog tuner modules to build"
-       depends on VIDEO_TUNER
+config DVB_CORE
+       tristate "DVB for Linux"
+       depends on NET && INET
+       select CRC32
        help
-         This allows the user to deselect tuner drivers unnecessary
-         for their hardware from the build. Use this option with care
-         as deselecting tuner drivers which are in fact necessary will
-         result in V4L devices which cannot be tuned due to lack of
-         driver support
+         DVB core utility functions for device handling, software fallbacks etc.
 
-         If unsure say N.
-
-if VIDEO_TUNER_CUSTOMIZE
-
-config TUNER_XC2028
-       tristate "XCeive xc2028/xc3028 tuners"
-       depends on I2C && FW_LOADER
-       default m if VIDEO_TUNER_CUSTOMIZE
-       help
-         Say Y here to include support for the xc2028/xc3028 tuners.
+         Enable this if you own a DVB/ATSC adapter and want to use it or if
+         you compile Linux for a digital SetTopBox.
 
-config TUNER_MT20XX
-       tristate "Microtune 2032 / 2050 tuners"
-       depends on I2C
-       default m if VIDEO_TUNER_CUSTOMIZE
-       help
-         Say Y here to include support for the MT2032 / MT2050 tuner.
-
-config TUNER_TDA8290
-       tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
-       depends on I2C
-       select DVB_TDA827X
-       select DVB_TDA18271
-       default m if VIDEO_TUNER_CUSTOMIZE
-       help
-         Say Y here to include support for Philips TDA8290+8275(a) tuner.
+         Say Y when you have a DVB or an ATSC card and want to use it.
 
-config TUNER_TEA5761
-       tristate "TEA 5761 radio tuner (EXPERIMENTAL)"
-       depends on I2C && EXPERIMENTAL
-       default m if VIDEO_TUNER_CUSTOMIZE
-       help
-         Say Y here to include support for the Philips TEA5761 radio tuner.
+         API specs and user tools are available from <http://www.linuxtv.org/>.
 
-config TUNER_TEA5767
-       tristate "TEA 5767 radio tuner"
-       depends on I2C
-       default m if VIDEO_TUNER_CUSTOMIZE
-       help
-         Say Y here to include support for the Philips TEA5767 radio tuner.
+         Please report problems regarding this support to the LinuxDVB
+         mailing list.
 
-config TUNER_SIMPLE
-       tristate "Simple tuner support"
-       depends on I2C
-       select TUNER_TDA9887
-       default m if VIDEO_TUNER_CUSTOMIZE
-       help
-         Say Y here to include support for various simple tuners.
+         If unsure say N.
 
-config TUNER_TDA9887
-       tristate "TDA 9885/6/7 analog IF demodulator"
-       depends on I2C
-       default m if VIDEO_TUNER_CUSTOMIZE
-       help
-         Say Y here to include support for Philips TDA9885/6/7
-         analog IF demodulator.
+config VIDEO_MEDIA
+       tristate
+       default DVB_CORE || VIDEO_DEV
+       depends on DVB_CORE || VIDEO_DEV
 
-endif # VIDEO_TUNER_CUSTOMIZE
+comment "Multimedia drivers"
 
-config VIDEOBUF_GEN
-       tristate
+source "drivers/media/common/Kconfig"
 
-config VIDEOBUF_DMA_SG
-       depends on HAS_DMA
-       select VIDEOBUF_GEN
-       tristate
+#
+# Tuner drivers for DVB and V4L
+#
 
-config VIDEOBUF_VMALLOC
-       select VIDEOBUF_GEN
-       tristate
+source "drivers/media/common/tuners/Kconfig"
 
-config VIDEOBUF_DVB
-       tristate
-       select VIDEOBUF_GEN
-       select VIDEOBUF_DMA_SG
+#
+# Video/Radio/Hybrid adapters
+#
 
-config VIDEO_BTCX
-       tristate
+source "drivers/media/video/Kconfig"
 
-config VIDEO_IR_I2C
-       tristate
+source "drivers/media/radio/Kconfig"
 
-config VIDEO_IR
-       tristate
-       depends on INPUT
-       select VIDEO_IR_I2C if I2C
+#
+# DVB adapters
+#
 
-config VIDEO_TVEEPROM
-       tristate
-       depends on I2C
+source "drivers/media/dvb/Kconfig"
 
 config DAB
        boolean "DAB adapters"
index 7b8bb6949f5e877262312666a4991bf46c957c81..73f742c7e818428f1965d880a3c442e71154a957 100644 (file)
@@ -2,10 +2,10 @@
 # Makefile for the kernel multimedia device drivers.
 #
 
-obj-y := common/
-obj-y += video/
+obj-$(CONFIG_VIDEO_MEDIA) += common/
+
+# Since hybrid devices are here, should be compiled if DVB and/or V4L
+obj-$(CONFIG_VIDEO_MEDIA) += video/
+
 obj-$(CONFIG_VIDEO_DEV) += radio/
 obj-$(CONFIG_DVB_CORE)  += dvb/
-ifeq ($(CONFIG_DVB_CORE),)
-  obj-$(CONFIG_VIDEO_TUNER)  += dvb/frontends/
-endif
index 8e744823064382309cbf35a20cfbb41c30842b7e..351b98b9b302a765169ef2aa97050bfa7deeab0d 100644 (file)
@@ -2,6 +2,7 @@ saa7146-objs    := saa7146_i2c.o saa7146_core.o
 saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o
 ir-common-objs  := ir-functions.o ir-keymaps.o
 
+obj-y += tuners/
 obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o
 obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o
 obj-$(CONFIG_VIDEO_IR) += ir-common.o
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
new file mode 100644 (file)
index 0000000..5be85ff
--- /dev/null
@@ -0,0 +1,151 @@
+config MEDIA_ATTACH
+       bool "Load and attach frontend and tuner driver modules as needed"
+       depends on DVB_CORE
+       depends on MODULES
+       help
+         Remove the static dependency of DVB card drivers on all
+         frontend modules for all possible card variants. Instead,
+         allow the card drivers to only load the frontend modules
+         they require.
+
+         Also, tuner module will automatically load a tuner driver
+         when needed, for analog mode.
+
+         This saves several KBytes of memory.
+
+         Note: You will need module-init-tools v3.2 or later for this feature.
+
+         If unsure say Y.
+
+config MEDIA_TUNER
+       tristate
+       default DVB_CORE || VIDEO_DEV
+       depends on DVB_CORE || VIDEO_DEV
+       select MEDIA_TUNER_XC2028 if !MEDIA_TUNER_CUSTOMIZE
+       select MEDIA_TUNER_XC5000 if !MEDIA_TUNER_CUSTOMIZE
+       select MEDIA_TUNER_MT20XX if !MEDIA_TUNER_CUSTOMIZE
+       select MEDIA_TUNER_TDA8290 if !MEDIA_TUNER_CUSTOMIZE
+       select MEDIA_TUNER_TEA5761 if !MEDIA_TUNER_CUSTOMIZE
+       select MEDIA_TUNER_TEA5767 if !MEDIA_TUNER_CUSTOMIZE
+       select MEDIA_TUNER_SIMPLE if !MEDIA_TUNER_CUSTOMIZE
+       select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMIZE
+
+menuconfig MEDIA_TUNER_CUSTOMIZE
+       bool "Customize analog and hybrid tuner modules to build"
+       depends on MEDIA_TUNER
+       help
+         This allows the user to deselect tuner drivers unnecessary
+         for their hardware from the build. Use this option with care
+         as deselecting tuner drivers which are in fact necessary will
+         result in V4L/DVB devices which cannot be tuned due to lack of
+         driver support
+
+         If unsure say N.
+
+if MEDIA_TUNER_CUSTOMIZE
+
+config MEDIA_TUNER_SIMPLE
+       tristate "Simple tuner support"
+       depends on I2C
+       select MEDIA_TUNER_TDA9887
+       default m if MEDIA_TUNER_CUSTOMIZE
+       help
+         Say Y here to include support for various simple tuners.
+
+config MEDIA_TUNER_TDA8290
+       tristate "TDA 8290/8295 + 8275(a)/18271 tuner combo"
+       depends on I2C
+       select MEDIA_TUNER_TDA827X
+       select MEDIA_TUNER_TDA18271
+       default m if MEDIA_TUNER_CUSTOMIZE
+       help
+         Say Y here to include support for Philips TDA8290+8275(a) tuner.
+
+config MEDIA_TUNER_TDA827X
+       tristate "Philips TDA827X silicon tuner"
+       depends on DVB_CORE && I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A DVB-T silicon tuner module. Say Y when you want to support this tuner.
+
+config MEDIA_TUNER_TDA18271
+       tristate "NXP TDA18271 silicon tuner"
+       depends on I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A silicon tuner module. Say Y when you want to support this tuner.
+
+config MEDIA_TUNER_TDA9887
+       tristate "TDA 9885/6/7 analog IF demodulator"
+       depends on I2C
+       default m if MEDIA_TUNER_CUSTOMIZE
+       help
+         Say Y here to include support for Philips TDA9885/6/7
+         analog IF demodulator.
+
+config MEDIA_TUNER_TEA5761
+       tristate "TEA 5761 radio tuner (EXPERIMENTAL)"
+       depends on I2C && EXPERIMENTAL
+       default m if MEDIA_TUNER_CUSTOMIZE
+       help
+         Say Y here to include support for the Philips TEA5761 radio tuner.
+
+config MEDIA_TUNER_TEA5767
+       tristate "TEA 5767 radio tuner"
+       depends on I2C
+       default m if MEDIA_TUNER_CUSTOMIZE
+       help
+         Say Y here to include support for the Philips TEA5767 radio tuner.
+
+config MEDIA_TUNER_MT20XX
+       tristate "Microtune 2032 / 2050 tuners"
+       depends on I2C
+       default m if MEDIA_TUNER_CUSTOMIZE
+       help
+         Say Y here to include support for the MT2032 / MT2050 tuner.
+
+config MEDIA_TUNER_MT2060
+       tristate "Microtune MT2060 silicon IF tuner"
+       depends on I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A driver for the silicon IF tuner MT2060 from Microtune.
+
+config MEDIA_TUNER_MT2266
+       tristate "Microtune MT2266 silicon tuner"
+       depends on I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A driver for the silicon baseband tuner MT2266 from Microtune.
+
+config MEDIA_TUNER_MT2131
+       tristate "Microtune MT2131 silicon tuner"
+       depends on I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A driver for the silicon baseband tuner MT2131 from Microtune.
+
+config MEDIA_TUNER_QT1010
+       tristate "Quantek QT1010 silicon tuner"
+       depends on DVB_CORE && I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A driver for the silicon tuner QT1010 from Quantek.
+
+config MEDIA_TUNER_XC2028
+       tristate "XCeive xc2028/xc3028 tuners"
+       depends on I2C && FW_LOADER
+       default m if MEDIA_TUNER_CUSTOMIZE
+       help
+         Say Y here to include support for the xc2028/xc3028 tuners.
+
+config MEDIA_TUNER_XC5000
+       tristate "Xceive XC5000 silicon tuner"
+       depends on I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A driver for the silicon tuner XC5000 from Xceive.
+         This device is only used inside a SiP called togther with a
+         demodulator for now.
+
+endif # MEDIA_TUNER_CUSTOMIZE
diff --git a/drivers/media/common/tuners/Makefile b/drivers/media/common/tuners/Makefile
new file mode 100644 (file)
index 0000000..236d993
--- /dev/null
@@ -0,0 +1,25 @@
+#
+# Makefile for common V4L/DVB tuners
+#
+
+tda18271-objs := tda18271-maps.o tda18271-common.o tda18271-fe.o
+
+obj-$(CONFIG_MEDIA_TUNER_XC2028) += tuner-xc2028.o
+obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-simple.o
+# tuner-types will be merged into tuner-simple, in the future
+obj-$(CONFIG_MEDIA_TUNER_SIMPLE) += tuner-types.o
+obj-$(CONFIG_MEDIA_TUNER_MT20XX) += mt20xx.o
+obj-$(CONFIG_MEDIA_TUNER_TDA8290) += tda8290.o
+obj-$(CONFIG_MEDIA_TUNER_TEA5767) += tea5767.o
+obj-$(CONFIG_MEDIA_TUNER_TEA5761) += tea5761.o
+obj-$(CONFIG_MEDIA_TUNER_TDA9887) += tda9887.o
+obj-$(CONFIG_MEDIA_TUNER_TDA827X) += tda827x.o
+obj-$(CONFIG_MEDIA_TUNER_TDA18271) += tda18271.o
+obj-$(CONFIG_MEDIA_TUNER_XC5000) += xc5000.o
+obj-$(CONFIG_MEDIA_TUNER_MT2060) += mt2060.o
+obj-$(CONFIG_MEDIA_TUNER_MT2266) += mt2266.o
+obj-$(CONFIG_MEDIA_TUNER_QT1010) += qt1010.o
+obj-$(CONFIG_MEDIA_TUNER_MT2131) += mt2131.o
+
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
similarity index 90%
rename from drivers/media/dvb/frontends/mt2060.h
rename to drivers/media/common/tuners/mt2060.h
index acba0058f519bf00403a78b56b369de56f8c6a9c..cb60caffb6b6ae2c1ad4a10bf7eb95a474ef458a 100644 (file)
@@ -30,7 +30,7 @@ struct mt2060_config {
        u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
 };
 
-#if defined(CONFIG_DVB_TUNER_MT2060) || (defined(CONFIG_DVB_TUNER_MT2060_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_MT2060) || (defined(CONFIG_MEDIA_TUNER_MT2060_MODULE) && defined(MODULE))
 extern struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1);
 #else
 static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2060_config *cfg, u16 if1)
@@ -38,6 +38,6 @@ static inline struct dvb_frontend * mt2060_attach(struct dvb_frontend *fe, struc
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif // CONFIG_DVB_TUNER_MT2060
+#endif // CONFIG_MEDIA_TUNER_MT2060
 
 #endif
similarity index 91%
rename from drivers/media/video/mt20xx.h
rename to drivers/media/common/tuners/mt20xx.h
index aa848e14ce5e75bbf43b8edecc057fdc209af33b..259553a24903c3a9a1b59ca40ab7a8f936149fc3 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
-#if defined(CONFIG_TUNER_MT20XX) || (defined(CONFIG_TUNER_MT20XX_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_MT20XX) || (defined(CONFIG_MEDIA_TUNER_MT20XX_MODULE) && defined(MODULE))
 extern struct dvb_frontend *microtune_attach(struct dvb_frontend *fe,
                                             struct i2c_adapter* i2c_adap,
                                             u8 i2c_addr);
similarity index 91%
rename from drivers/media/dvb/frontends/mt2131.h
rename to drivers/media/common/tuners/mt2131.h
index 606d8576bc9885d2a7d8a9c504d41efcb860be76..cd8376f6f7b48ac3fd812e7ee7f06eb8cf037a3e 100644 (file)
@@ -30,7 +30,7 @@ struct mt2131_config {
        u8 clock_out; /* 0 = off, 1 = CLK/4, 2 = CLK/2, 3 = CLK/1 */
 };
 
-#if defined(CONFIG_DVB_TUNER_MT2131) || (defined(CONFIG_DVB_TUNER_MT2131_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_MT2131) || (defined(CONFIG_MEDIA_TUNER_MT2131_MODULE) && defined(MODULE))
 extern struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
                                          struct i2c_adapter *i2c,
                                          struct mt2131_config *cfg,
@@ -44,7 +44,7 @@ static inline struct dvb_frontend* mt2131_attach(struct dvb_frontend *fe,
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif /* CONFIG_DVB_TUNER_MT2131 */
+#endif /* CONFIG_MEDIA_TUNER_MT2131 */
 
 #endif /* __MT2131_H__ */
 
similarity index 88%
rename from drivers/media/dvb/frontends/mt2266.h
rename to drivers/media/common/tuners/mt2266.h
index c5113efe333c745a0e66919827a88b42073f3719..4d083882d044a6a6dcd96bb115651c7ad956ba50 100644 (file)
@@ -24,7 +24,7 @@ struct mt2266_config {
        u8 i2c_address;
 };
 
-#if defined(CONFIG_DVB_TUNER_MT2266) || (defined(CONFIG_DVB_TUNER_MT2266_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_MT2266) || (defined(CONFIG_MEDIA_TUNER_MT2266_MODULE) && defined(MODULE))
 extern struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg);
 #else
 static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struct i2c_adapter *i2c, struct mt2266_config *cfg)
@@ -32,6 +32,6 @@ static inline struct dvb_frontend * mt2266_attach(struct dvb_frontend *fe, struc
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif // CONFIG_DVB_TUNER_MT2266
+#endif // CONFIG_MEDIA_TUNER_MT2266
 
 #endif
similarity index 91%
rename from drivers/media/dvb/frontends/qt1010.h
rename to drivers/media/common/tuners/qt1010.h
index cff6a7ca53801cce4335b41a24c8f29f7cf93a0a..807fb7b6146b94984fc597298705e7b04e7cb678 100644 (file)
@@ -36,7 +36,7 @@ struct qt1010_config {
  * @param cfg  tuner hw based configuration
  * @return fe  pointer on success, NULL on failure
  */
-#if defined(CONFIG_DVB_TUNER_QT1010) || (defined(CONFIG_DVB_TUNER_QT1010_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_QT1010) || (defined(CONFIG_MEDIA_TUNER_QT1010_MODULE) && defined(MODULE))
 extern struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe,
                                          struct i2c_adapter *i2c,
                                          struct qt1010_config *cfg);
@@ -48,6 +48,6 @@ static inline struct dvb_frontend *qt1010_attach(struct dvb_frontend *fe,
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif // CONFIG_DVB_TUNER_QT1010
+#endif // CONFIG_MEDIA_TUNER_QT1010
 
 #endif
similarity index 96%
rename from drivers/media/dvb/frontends/tda18271.h
rename to drivers/media/common/tuners/tda18271.h
index 0e7af8d05a38cb91c074b7310bad34b93ad33073..7db9831c0cb073a0b75bf04d7bd0e745ec92f10b 100644 (file)
@@ -81,7 +81,7 @@ struct tda18271_config {
        unsigned int small_i2c:1;
 };
 
-#if defined(CONFIG_DVB_TDA18271) || (defined(CONFIG_DVB_TDA18271_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_TDA18271) || (defined(CONFIG_MEDIA_TUNER_TDA18271_MODULE) && defined(MODULE))
 extern struct dvb_frontend *tda18271_attach(struct dvb_frontend *fe, u8 addr,
                                            struct i2c_adapter *i2c,
                                            struct tda18271_config *cfg);
similarity index 93%
rename from drivers/media/dvb/frontends/tda827x.h
rename to drivers/media/common/tuners/tda827x.h
index b73c23570dab67e094482ebf31b77dcea29d4fe3..7850a9a1dc8f42a04a0e2c19496b894aeb8611a6 100644 (file)
@@ -51,7 +51,7 @@ struct tda827x_config
  * @param cfg optional callback function pointers.
  * @return FE pointer on success, NULL on failure.
  */
-#if defined(CONFIG_DVB_TDA827X) || (defined(CONFIG_DVB_TDA827X_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_TDA827X) || (defined(CONFIG_MEDIA_TUNER_TDA827X_MODULE) && defined(MODULE))
 extern struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe, int addr,
                                           struct i2c_adapter *i2c,
                                           struct tda827x_config *cfg);
@@ -64,6 +64,6 @@ static inline struct dvb_frontend* tda827x_attach(struct dvb_frontend *fe,
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif // CONFIG_DVB_TDA827X
+#endif // CONFIG_MEDIA_TUNER_TDA827X
 
 #endif // __DVB_TDA827X_H__
similarity index 99%
rename from drivers/media/video/tda8290.c
rename to drivers/media/common/tuners/tda8290.c
index 0ebb5b525e576da86f021b74b669f5493e4886db..91204d3f282dfc5de1c02c7c9b9ad6c62959ff13 100644 (file)
@@ -578,16 +578,16 @@ static int tda829x_find_tuner(struct dvb_frontend *fe)
 
        if ((data == 0x83) || (data == 0x84)) {
                priv->ver |= TDA18271;
-               tda18271_attach(fe, priv->tda827x_addr,
-                               priv->i2c_props.adap,
-                               &tda829x_tda18271_config);
+               dvb_attach(tda18271_attach, fe, priv->tda827x_addr,
+                          priv->i2c_props.adap, &tda829x_tda18271_config);
        } else {
                if ((data & 0x3c) == 0)
                        priv->ver |= TDA8275;
                else
                        priv->ver |= TDA8275A;
 
-               tda827x_attach(fe, priv->tda827x_addr, priv->i2c_props.adap, &priv->cfg);
+               dvb_attach(tda827x_attach, fe, priv->tda827x_addr,
+                          priv->i2c_props.adap, &priv->cfg);
                priv->cfg.switch_addr = priv->i2c_props.addr;
        }
        if (fe->ops.tuner_ops.init)
similarity index 94%
rename from drivers/media/video/tda8290.h
rename to drivers/media/common/tuners/tda8290.h
index d3bbf276a469165354e82770764feec9d619bec6..aa074f3f0c07f247db9e74a6fe4e636488ad0d47 100644 (file)
@@ -29,7 +29,7 @@ struct tda829x_config {
 #define TDA829X_DONT_PROBE  1
 };
 
-#if defined(CONFIG_TUNER_TDA8290) || (defined(CONFIG_TUNER_TDA8290_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_TDA8290) || (defined(CONFIG_MEDIA_TUNER_TDA8290_MODULE) && defined(MODULE))
 extern int tda829x_probe(struct i2c_adapter *i2c_adap, u8 i2c_addr);
 
 extern struct dvb_frontend *tda829x_attach(struct dvb_frontend *fe,
similarity index 92%
rename from drivers/media/video/tda9887.h
rename to drivers/media/common/tuners/tda9887.h
index be49dcbfc70e1473056823b24e7a7f0d7bec56d2..acc419e8c4fcfb858e66b7e5c00449d9d80f40e8 100644 (file)
@@ -21,7 +21,7 @@
 #include "dvb_frontend.h"
 
 /* ------------------------------------------------------------------------ */
-#if defined(CONFIG_TUNER_TDA9887) || (defined(CONFIG_TUNER_TDA9887_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_TDA9887) || (defined(CONFIG_MEDIA_TUNER_TDA9887_MODULE) && defined(MODULE))
 extern struct dvb_frontend *tda9887_attach(struct dvb_frontend *fe,
                                           struct i2c_adapter *i2c_adap,
                                           u8 i2c_addr);
similarity index 93%
rename from drivers/media/video/tea5761.h
rename to drivers/media/common/tuners/tea5761.h
index 8eb62722b9885f7d4253200a128401b416736c65..2e2ff82c95a4f7dd81a337980dcfda235aa1c669 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
-#if defined(CONFIG_TUNER_TEA5761) || (defined(CONFIG_TUNER_TEA5761_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE))
 extern int tea5761_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
 
 extern struct dvb_frontend *tea5761_attach(struct dvb_frontend *fe,
similarity index 94%
rename from drivers/media/video/tea5767.h
rename to drivers/media/common/tuners/tea5767.h
index 7b547c092e25215bbd81505b8635c7ec907611bc..d30ab1b483de68daeb70944bc748400ecac0f25f 100644 (file)
@@ -39,7 +39,7 @@ struct tea5767_ctrl {
        enum tea5767_xtal       xtal_freq;
 };
 
-#if defined(CONFIG_TUNER_TEA5767) || (defined(CONFIG_TUNER_TEA5767_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_TEA5767) || (defined(CONFIG_MEDIA_TUNER_TEA5767_MODULE) && defined(MODULE))
 extern int tea5767_autodetection(struct i2c_adapter* i2c_adap, u8 i2c_addr);
 
 extern struct dvb_frontend *tea5767_attach(struct dvb_frontend *fe,
similarity index 92%
rename from drivers/media/video/tuner-simple.h
rename to drivers/media/common/tuners/tuner-simple.h
index e46cf0121e030a23e314190116567550f9c7ff7e..381fa5d35a9bb6617b6d610b780552c3da442af1 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/i2c.h>
 #include "dvb_frontend.h"
 
-#if defined(CONFIG_TUNER_SIMPLE) || (defined(CONFIG_TUNER_SIMPLE_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_SIMPLE) || (defined(CONFIG_MEDIA_TUNER_SIMPLE_MODULE) && defined(MODULE))
 extern struct dvb_frontend *simple_tuner_attach(struct dvb_frontend *fe,
                                                struct i2c_adapter *i2c_adap,
                                                u8 i2c_addr,
similarity index 93%
rename from drivers/media/video/tuner-xc2028.h
rename to drivers/media/common/tuners/tuner-xc2028.h
index fc2f132a5541279e9c1b6089535dd42fd21fbb69..216025cf5d4bfdbe430851636818c69789987cab 100644 (file)
@@ -47,7 +47,7 @@ struct xc2028_config {
 #define XC2028_TUNER_RESET     0
 #define XC2028_RESET_CLK       1
 
-#if defined(CONFIG_TUNER_XC2028) || (defined(CONFIG_TUNER_XC2028_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_XC2028) || (defined(CONFIG_MEDIA_TUNER_XC2028_MODULE) && defined(MODULE))
 extern struct dvb_frontend *xc2028_attach(struct dvb_frontend *fe,
                                          struct xc2028_config *cfg);
 #else
similarity index 92%
rename from drivers/media/dvb/frontends/xc5000.h
rename to drivers/media/common/tuners/xc5000.h
index b890883a0cdc224a8f888000564b7eb4f63635bc..0ee80f9d19b8fa8621acb2f919650cc266925ae2 100644 (file)
@@ -45,8 +45,8 @@ struct xc5000_config {
 /* xc5000 callback command */
 #define XC5000_TUNER_RESET             0
 
-#if defined(CONFIG_DVB_TUNER_XC5000) || \
-    (defined(CONFIG_DVB_TUNER_XC5000_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_XC5000) || \
+    (defined(CONFIG_MEDIA_TUNER_XC5000_MODULE) && defined(MODULE))
 extern struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
                                          struct i2c_adapter *i2c,
                                          struct xc5000_config *cfg);
@@ -58,6 +58,6 @@ static inline struct dvb_frontend* xc5000_attach(struct dvb_frontend *fe,
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
        return NULL;
 }
-#endif // CONFIG_DVB_TUNER_XC5000
+#endif // CONFIG_MEDIA_TUNER_XC5000
 
 #endif // __XC5000_H__
index 03ef88acd9b826bbafc60ccad5a12be6e0302ceb..7b21b49f1945ad56430f6bf16ac99bdaed89f52b 100644 (file)
@@ -1,9 +1,7 @@
 #
-# Multimedia device configuration
+# DVB device configuration
 #
 
-source "drivers/media/dvb/dvb-core/Kconfig"
-
 menuconfig DVB_CAPTURE_DRIVERS
        bool "DVB/ATSC adapters"
        depends on DVB_CORE
index 6ec5afba1ca7ee061779067f22b36ad352d43b96..73dc2ee9b014021a5681d83be1badd96802010bc 100644 (file)
@@ -9,7 +9,7 @@ config DVB_B2C2_FLEXCOP
        select DVB_STV0297 if !DVB_FE_CUSTOMISE
        select DVB_BCM3510 if !DVB_FE_CUSTOMISE
        select DVB_LGDT330X if !DVB_FE_CUSTOMISE
-       select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
        select DVB_S5H1420 if !DVB_FE_CUSTOMISE
        select DVB_TUNER_ITD1000 if !DVB_FE_CUSTOMISE
        select DVB_ISL6421 if !DVB_FE_CUSTOMISE
index 870e2848c2962418b343ef2ae4ae5fcc561c0a5d..d9db066f9854f19a7572848b8834c0c1c29ee6a0 100644 (file)
@@ -14,4 +14,4 @@ b2c2-flexcop-usb-objs = flexcop-usb.o
 obj-$(CONFIG_DVB_B2C2_FLEXCOP_USB) += b2c2-flexcop-usb.o
 
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
-EXTRA_CFLAGS += -Idrivers/media/video/
+EXTRA_CFLAGS += -Idrivers/media/common/tuners/
index 902c762e0b7fb8867bc1d62ea65f2e2a4ee5e9a3..d1239b8342f81cdb13b5642fedcf463655fc1cb6 100644 (file)
@@ -8,7 +8,7 @@ config DVB_BT8XX
        select DVB_OR51211 if !DVB_FE_CUSTOMISE
        select DVB_LGDT330X if !DVB_FE_CUSTOMISE
        select DVB_ZL10353 if !DVB_FE_CUSTOMISE
-       select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
        select FW_LOADER
        help
          Support for PCI cards based on the Bt8xx PCI bridge. Examples are
index 9d3e68b5d6ebc22e9b8fb4adfdbf42aa50784d20..d98f1d49ffa89bab91f5c936189b7adc445f1c80 100644 (file)
@@ -3,4 +3,4 @@ obj-$(CONFIG_DVB_BT8XX) += bt878.o dvb-bt8xx.o dst.o dst_ca.o
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
 EXTRA_CFLAGS += -Idrivers/media/video/bt8xx
-EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
index 75711bde23ad3675fe82f89bf6f4edf68d7db644..a7637562e74200943bb846dddf81a8318b8b9dc4 100644 (file)
@@ -1714,7 +1714,7 @@ static void dst_release(struct dvb_frontend *fe)
        struct dst_state *state = fe->demodulator_priv;
        if (state->dst_ca) {
                dvb_unregister_device(state->dst_ca);
-#ifdef CONFIG_DVB_CORE_ATTACH
+#ifdef CONFIG_MEDIA_ATTACH
                symbol_put(dst_ca_attach);
 #endif
        }
diff --git a/drivers/media/dvb/dvb-core/Kconfig b/drivers/media/dvb/dvb-core/Kconfig
deleted file mode 100644 (file)
index e3e6839..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-config DVB_CORE
-       tristate "DVB for Linux"
-       depends on NET && INET
-       select CRC32
-       help
-         Support Digital Video Broadcasting hardware.  Enable this if you
-         own a DVB adapter and want to use it or if you compile Linux for
-         a digital SetTopBox.
-
-         DVB core utility functions for device handling, software fallbacks etc.
-         Say Y when you have a DVB card and want to use it. Say Y if your want
-         to build your drivers outside the kernel, but need the DVB core. All
-         in-kernel drivers will select this automatically if needed.
-
-         API specs and user tools are available from <http://www.linuxtv.org/>.
-
-         Please report problems regarding this driver to the LinuxDVB
-         mailing list.
-
-         If unsure say N.
-
-config DVB_CORE_ATTACH
-       bool "Load and attach frontend modules as needed"
-       depends on DVB_CORE
-       depends on MODULES
-       help
-         Remove the static dependency of DVB card drivers on all
-         frontend modules for all possible card variants. Instead,
-         allow the card drivers to only load the frontend modules
-         they require. This saves several KBytes of memory.
-
-         Note: You will need module-init-tools v3.2 or later for this feature.
-
-         If unsure say Y.
index 2dddd08c5445383cfe1b94352c94bda403147130..8cbdb218952f768fc9128552e64c8a3327370132 100644 (file)
@@ -1189,7 +1189,7 @@ int dvb_unregister_frontend(struct dvb_frontend* fe)
 }
 EXPORT_SYMBOL(dvb_unregister_frontend);
 
-#ifdef CONFIG_DVB_CORE_ATTACH
+#ifdef CONFIG_MEDIA_ATTACH
 void dvb_frontend_detach(struct dvb_frontend* fe)
 {
        void *ptr;
index 5f9a737c6de194065d3e5f7e4de06e0407d8c322..89d12dc477a7b4f4017803a86d1bfaa6ef46b7ce 100644 (file)
@@ -115,7 +115,7 @@ extern int dvb_usercopy(struct inode *inode, struct file *file,
                            unsigned int cmd, void *arg));
 
 /** generic DVB attach function. */
-#ifdef CONFIG_DVB_CORE_ATTACH
+#ifdef CONFIG_MEDIA_ATTACH
 #define dvb_attach(FUNCTION, ARGS...) ({ \
        void *__r = NULL; \
        typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
index 3c8493d2026dd329a9e20c1c6faaf2d9581b94b3..4c1cff9feb2e69ed8e3acc57b536aecc5e5804e1 100644 (file)
@@ -25,7 +25,7 @@ config DVB_USB_A800
        tristate "AVerMedia AverTV DVB-T USB 2.0 (A800)"
        depends on DVB_USB
        select DVB_DIB3000MC
-       select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
        select DVB_PLL if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the AVerMedia AverTV DVB-T USB 2.0 (A800) receiver.
@@ -35,7 +35,7 @@ config DVB_USB_DIBUSB_MB
        depends on DVB_USB
        select DVB_PLL if !DVB_FE_CUSTOMISE
        select DVB_DIB3000MB
-       select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
        help
          Support for USB 1.1 and 2.0 DVB-T receivers based on reference designs made by
          DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-B demodulator.
@@ -56,7 +56,7 @@ config DVB_USB_DIBUSB_MC
        tristate "DiBcom USB DVB-T devices (based on the DiB3000M-C/P) (see help for device list)"
        depends on DVB_USB
        select DVB_DIB3000MC
-       select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
        help
          Support for USB2.0 DVB-T receivers based on reference designs made by
          DiBcom (<http://www.dibcom.fr>) equipped with a DiB3000M-C/P demodulator.
@@ -73,8 +73,8 @@ config DVB_USB_DIB0700
        select DVB_DIB7000P
        select DVB_DIB7000M
        select DVB_DIB3000MC
-       select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
-       select DVB_TUNER_MT2266 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2266 if !DVB_FE_CUSTOMISE
        select DVB_TUNER_DIB0070
        help
          Support for USB2.0/1.1 DVB receivers based on the DiB0700 USB bridge. The
@@ -93,7 +93,7 @@ config DVB_USB_UMT_010
        depends on DVB_USB
        select DVB_PLL if !DVB_FE_CUSTOMISE
        select DVB_DIB3000MC
-       select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the HanfTek UMT-010 USB2.0 stick-sized DVB-T receiver.
 
@@ -105,7 +105,7 @@ config DVB_USB_CXUSB
        select DVB_LGDT330X if !DVB_FE_CUSTOMISE
        select DVB_MT352 if !DVB_FE_CUSTOMISE
        select DVB_ZL10353 if !DVB_FE_CUSTOMISE
-       select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the Conexant USB2.0 hybrid reference design.
          Currently, only DVB and ATSC modes are supported, analog mode
@@ -118,7 +118,7 @@ config DVB_USB_M920X
        tristate "Uli m920x DVB-T USB2.0 support"
        depends on DVB_USB
        select DVB_MT352 if !DVB_FE_CUSTOMISE
-       select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the MSI Mega Sky 580 USB2.0 DVB-T receiver.
          Currently, only devices with a product id of
@@ -129,7 +129,7 @@ config DVB_USB_GL861
        tristate "Genesys Logic GL861 USB2.0 support"
        depends on DVB_USB
        select DVB_ZL10353 if !DVB_FE_CUSTOMISE
-       select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the MSI Megasky 580 (55801) DVB-T USB2.0
          receiver with USB ID 0db0:5581.
@@ -138,7 +138,7 @@ config DVB_USB_AU6610
        tristate "Alcor Micro AU6610 USB2.0 support"
        depends on DVB_USB
        select DVB_ZL10353 if !DVB_FE_CUSTOMISE
-       select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the Sigmatek DVB-110 DVB-T USB2.0 receiver.
 
@@ -190,7 +190,7 @@ config DVB_USB_NOVA_T_USB2
        tristate "Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 support"
        depends on DVB_USB
        select DVB_DIB3000MC
-       select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
        select DVB_PLL if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the Hauppauge WinTV-NOVA-T usb2 DVB-T USB2.0 receiver.
@@ -227,8 +227,8 @@ config DVB_USB_OPERA1
 config DVB_USB_AF9005
        tristate "Afatech AF9005 DVB-T USB1.1 support"
        depends on DVB_USB && EXPERIMENTAL
-       select DVB_TUNER_MT2060 if !DVB_FE_CUSTOMISE
-       select DVB_TUNER_QT1010 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2060 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_QT1010 if !DVB_FE_CUSTOMISE
        help
          Say Y here to support the Afatech AF9005 based DVB-T USB1.1 receiver
          and the TerraTec Cinergy T USB XE (Rev.1)
index 60a910052c16fa0c0488214b5daadd36782e38a5..c6511a6c0ab82dbd9d429d31991c72657ff74a18 100644 (file)
@@ -63,5 +63,5 @@ obj-$(CONFIG_DVB_USB_AF9005_REMOTE) += dvb-usb-af9005-remote.o
 
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/ -Idrivers/media/dvb/frontends/
 # due to tuner-xc3028
-EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 
index f5fceb3cdb3cb0d71b34e7fa8a1bc272cc04f65e..6d2384605927190abaa5394672fd191fb9a6a6e6 100644 (file)
@@ -15,22 +15,36 @@ config DVB_FE_CUSTOMISE
 comment "DVB-S (satellite) frontends"
        depends on DVB_CORE
 
-config DVB_STV0299
-       tristate "ST STV0299 based"
+config DVB_CX24110
+       tristate "Conexant CX24110 based"
        depends on DVB_CORE && I2C
        default m if DVB_FE_CUSTOMISE
        help
          A DVB-S tuner module. Say Y when you want to support this frontend.
 
-config DVB_CX24110
-       tristate "Conexant CX24110 based"
+config DVB_CX24123
+       tristate "Conexant CX24123 based"
        depends on DVB_CORE && I2C
        default m if DVB_FE_CUSTOMISE
        help
          A DVB-S tuner module. Say Y when you want to support this frontend.
 
-config DVB_CX24123
-       tristate "Conexant CX24123 based"
+config DVB_MT312
+       tristate "Zarlink VP310/MT312 based"
+       depends on DVB_CORE && I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A DVB-S tuner module. Say Y when you want to support this frontend.
+
+config DVB_S5H1420
+       tristate "Samsung S5H1420 based"
+       depends on DVB_CORE && I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A DVB-S tuner module. Say Y when you want to support this frontend.
+
+config DVB_STV0299
+       tristate "ST STV0299 based"
        depends on DVB_CORE && I2C
        default m if DVB_FE_CUSTOMISE
        help
@@ -43,8 +57,8 @@ config DVB_TDA8083
        help
          A DVB-S tuner module. Say Y when you want to support this frontend.
 
-config DVB_MT312
-       tristate "Zarlink VP310/MT312 based"
+config DVB_TDA10086
+       tristate "Philips TDA10086 based"
        depends on DVB_CORE && I2C
        default m if DVB_FE_CUSTOMISE
        help
@@ -57,19 +71,26 @@ config DVB_VES1X93
        help
          A DVB-S tuner module. Say Y when you want to support this frontend.
 
-config DVB_S5H1420
-       tristate "Samsung S5H1420 based"
+config DVB_TUNER_ITD1000
+       tristate "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
        depends on DVB_CORE && I2C
        default m if DVB_FE_CUSTOMISE
        help
          A DVB-S tuner module. Say Y when you want to support this frontend.
 
-config DVB_TDA10086
-       tristate "Philips TDA10086 based"
+config DVB_TDA826X
+       tristate "Philips TDA826X silicon tuner"
        depends on DVB_CORE && I2C
        default m if DVB_FE_CUSTOMISE
        help
-         A DVB-S tuner module. Say Y when you want to support this frontend.
+         A DVB-S silicon tuner module. Say Y when you want to support this tuner.
+
+config DVB_TUA6100
+       tristate "Infineon TUA6100 PLL"
+       depends on DVB_CORE && I2C
+       default m if DVB_FE_CUSTOMISE
+       help
+         A DVB-S PLL chip.
 
 comment "DVB-T (terrestrial) frontends"
        depends on DVB_CORE
@@ -315,7 +336,7 @@ config DVB_S5H1411
          An ATSC 8VSB and QAM64/256 tuner module. Say Y when you want
          to support this frontend.
 
-comment "Tuners/PLL support"
+comment "Digital terrestrial only tuners/PLL"
        depends on DVB_CORE
 
 config DVB_PLL
@@ -326,55 +347,6 @@ config DVB_PLL
          This module drives a number of tuners based on PLL chips with a
          common I2C interface. Say Y when you want to support these tuners.
 
-config DVB_TDA826X
-       tristate "Philips TDA826X silicon tuner"
-       depends on DVB_CORE && I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A DVB-S silicon tuner module. Say Y when you want to support this tuner.
-
-config DVB_TDA827X
-       tristate "Philips TDA827X silicon tuner"
-       depends on DVB_CORE && I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A DVB-T silicon tuner module. Say Y when you want to support this tuner.
-
-config DVB_TDA18271
-       tristate "NXP TDA18271 silicon tuner"
-       depends on I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A silicon tuner module. Say Y when you want to support this tuner.
-
-config DVB_TUNER_QT1010
-       tristate "Quantek QT1010 silicon tuner"
-       depends on DVB_CORE && I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A driver for the silicon tuner QT1010 from Quantek.
-
-config DVB_TUNER_MT2060
-       tristate "Microtune MT2060 silicon IF tuner"
-       depends on I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A driver for the silicon IF tuner MT2060 from Microtune.
-
-config DVB_TUNER_MT2266
-       tristate "Microtune MT2266 silicon tuner"
-       depends on I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A driver for the silicon baseband tuner MT2266 from Microtune.
-
-config DVB_TUNER_MT2131
-       tristate "Microtune MT2131 silicon tuner"
-       depends on I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A driver for the silicon baseband tuner MT2131 from Microtune.
-
 config DVB_TUNER_DIB0070
        tristate "DiBcom DiB0070 silicon base-band tuner"
        depends on I2C
@@ -384,21 +356,7 @@ config DVB_TUNER_DIB0070
          This device is only used inside a SiP called togther with a
          demodulator for now.
 
-config DVB_TUNER_XC5000
-       tristate "Xceive XC5000 silicon tuner"
-       depends on I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A driver for the silicon tuner XC5000 from Xceive.
-         This device is only used inside a SiP called togther with a
-         demodulator for now.
-
-config DVB_TUNER_ITD1000
-       tristate "Integrant ITD1000 Zero IF tuner for DVB-S/DSS"
-       depends on DVB_CORE && I2C
-       default m if DVB_FE_CUSTOMISE
-
-comment "Miscellaneous devices"
+comment "SEC control devices for DVB-S"
        depends on DVB_CORE
 
 config DVB_LNBP21
@@ -422,11 +380,4 @@ config DVB_ISL6421
        help
          An SEC control chip.
 
-config DVB_TUA6100
-       tristate "TUA6100 PLL"
-       depends on DVB_CORE && I2C
-       default m if DVB_FE_CUSTOMISE
-       help
-         A DVBS PLL chip.
-
 endmenu
index 9747c73dc8269c78c9d8c4e6e7aecbb0a6dded15..a89dc0fc4c6fe90dbb97d5fd22368a66b0c65000 100644 (file)
@@ -3,9 +3,7 @@
 #
 
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core/
-EXTRA_CFLAGS += -Idrivers/media/video/
-
-tda18271-objs := tda18271-tables.o tda18271-common.o tda18271-fe.o
+EXTRA_CFLAGS += -Idrivers/media/common/tuners/
 
 obj-$(CONFIG_DVB_PLL) += dvb-pll.o
 obj-$(CONFIG_DVB_STV0299) += stv0299.o
@@ -42,16 +40,9 @@ obj-$(CONFIG_DVB_ISL6405) += isl6405.o
 obj-$(CONFIG_DVB_ISL6421) += isl6421.o
 obj-$(CONFIG_DVB_TDA10086) += tda10086.o
 obj-$(CONFIG_DVB_TDA826X) += tda826x.o
-obj-$(CONFIG_DVB_TDA827X) += tda827x.o
-obj-$(CONFIG_DVB_TDA18271) += tda18271.o
-obj-$(CONFIG_DVB_TUNER_MT2060) += mt2060.o
-obj-$(CONFIG_DVB_TUNER_MT2266) += mt2266.o
 obj-$(CONFIG_DVB_TUNER_DIB0070) += dib0070.o
-obj-$(CONFIG_DVB_TUNER_QT1010) += qt1010.o
 obj-$(CONFIG_DVB_TUA6100) += tua6100.o
-obj-$(CONFIG_DVB_TUNER_MT2131) += mt2131.o
 obj-$(CONFIG_DVB_S5H1409) += s5h1409.o
-obj-$(CONFIG_DVB_TUNER_XC5000) += xc5000.o
 obj-$(CONFIG_DVB_TUNER_ITD1000) += itd1000.o
 obj-$(CONFIG_DVB_AU8522) += au8522.o
 obj-$(CONFIG_DVB_TDA10048) += tda10048.o
index 281e1cb2edc67d516361a4114b8864a7c3c716b6..720ed9ff7c5fda6a43b0b5074e16ee28c487eaf0 100644 (file)
@@ -481,7 +481,7 @@ static void s5h1420_setsymbolrate(struct s5h1420_state* state,
                val *= 2;
        do_div(val, (state->fclk / 1000));
 
-       dprintk("symbol rate register: %06llx\n", val);
+       dprintk("symbol rate register: %06llx\n", (unsigned long long)val);
 
        v = s5h1420_readreg(state, Loop01);
        s5h1420_writereg(state, Loop01, v & 0x7f);
index fe9a4cc141414c18523cb11c1b97dd0138b8c111..fe743aa7f645fb611af3058775ad2b22bbd38081 100644 (file)
@@ -1,3 +1,49 @@
+#
+# Generic video config states
+#
+
+config VIDEO_V4L2
+       tristate
+       depends on VIDEO_DEV && VIDEO_V4L2_COMMON
+       default VIDEO_DEV && VIDEO_V4L2_COMMON
+
+config VIDEO_V4L1
+       tristate
+       depends on VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
+       default VIDEO_DEV && VIDEO_V4L2_COMMON && VIDEO_ALLOW_V4L1
+
+config VIDEOBUF_GEN
+       tristate
+
+config VIDEOBUF_DMA_SG
+       depends on HAS_DMA
+       select VIDEOBUF_GEN
+       tristate
+
+config VIDEOBUF_VMALLOC
+       select VIDEOBUF_GEN
+       tristate
+
+config VIDEOBUF_DVB
+       tristate
+       select VIDEOBUF_GEN
+       select VIDEOBUF_DMA_SG
+
+config VIDEO_BTCX
+       tristate
+
+config VIDEO_IR_I2C
+       tristate
+
+config VIDEO_IR
+       tristate
+       depends on INPUT
+       select VIDEO_IR_I2C if I2C
+
+config VIDEO_TVEEPROM
+       tristate
+       depends on I2C
+
 #
 # Multimedia Video device configuration
 #
@@ -644,7 +690,7 @@ config VIDEO_MXB
        tristate "Siemens-Nixdorf 'Multimedia eXtension Board'"
        depends on PCI && VIDEO_V4L1 && I2C
        select VIDEO_SAA7146_VV
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_SAA7111 if VIDEO_HELPER_CHIPS_AUTO
        select VIDEO_TDA9840 if VIDEO_HELPER_CHIPS_AUTO
        select VIDEO_TEA6415C if VIDEO_HELPER_CHIPS_AUTO
@@ -702,6 +748,8 @@ source "drivers/media/video/au0828/Kconfig"
 
 source "drivers/media/video/ivtv/Kconfig"
 
+source "drivers/media/video/cx18/Kconfig"
+
 config VIDEO_M32R_AR
        tristate "AR devices"
        depends on M32R && VIDEO_V4L1
index be14227f37269adb548577df17407aa52c6905ae..a352c6e31f0cbf96496e68d8f6c86e12b7089c0a 100644 (file)
@@ -84,17 +84,7 @@ obj-$(CONFIG_VIDEO_HEXIUM_GEMINI) += hexium_gemini.o
 obj-$(CONFIG_VIDEO_DPC) += dpc7146.o
 obj-$(CONFIG_TUNER_3036) += tuner-3036.o
 
-obj-$(CONFIG_VIDEO_TUNER) += tuner.o
-
-obj-$(CONFIG_TUNER_XC2028) += tuner-xc2028.o
-obj-$(CONFIG_TUNER_SIMPLE) += tuner-simple.o
-# tuner-types will be merged into tuner-simple, in the future
-obj-$(CONFIG_TUNER_SIMPLE) += tuner-types.o
-obj-$(CONFIG_TUNER_MT20XX) += mt20xx.o
-obj-$(CONFIG_TUNER_TDA8290) += tda8290.o
-obj-$(CONFIG_TUNER_TEA5767) += tea5767.o
-obj-$(CONFIG_TUNER_TEA5761) += tea5761.o
-obj-$(CONFIG_TUNER_TDA9887) += tda9887.o
+obj-$(CONFIG_MEDIA_TUNER) += tuner.o
 
 obj-$(CONFIG_VIDEOBUF_GEN) += videobuf-core.o
 obj-$(CONFIG_VIDEOBUF_DMA_SG) += videobuf-dma-sg.o
@@ -134,6 +124,7 @@ obj-$(CONFIG_USB_VICAM)         += usbvideo/
 obj-$(CONFIG_USB_QUICKCAM_MESSENGER)   += usbvideo/
 
 obj-$(CONFIG_VIDEO_IVTV) += ivtv/
+obj-$(CONFIG_VIDEO_CX18) += cx18/
 
 obj-$(CONFIG_VIDEO_VIVI) += vivi.o
 obj-$(CONFIG_VIDEO_CX23885) += cx23885/
@@ -147,3 +138,4 @@ obj-$(CONFIG_VIDEO_AU0828) += au0828/
 
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
index 41708267e7a4f64a2b5548014eb8f3678871c51f..cab277fafa63a97831567d5c61db98b3d149f574 100644 (file)
@@ -4,7 +4,7 @@ config VIDEO_AU0828
        depends on VIDEO_DEV && I2C && INPUT && DVB_CORE
        select I2C_ALGOBIT
        select DVB_AU8522 if !DVB_FE_CUSTOMIZE
-       select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
+       select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
        ---help---
          This is a video4linux driver for Auvitek's USB device.
 
index 9f4f572c89c588c55412a0b5f617b098a7b39cae..cd2c58281b4eda4c26ac46483f10208389a881d5 100644 (file)
@@ -2,7 +2,7 @@ au0828-objs     := au0828-core.o au0828-i2c.o au0828-cards.o au0828-dvb.o
 
 obj-$(CONFIG_VIDEO_AU0828) += au0828.o
 
-EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
 
index 5040d7fc4af56ea14f4307488915a6cac3764498..1371b4e4b5f15230fd8025fb01db690c53e0dccd 100644 (file)
@@ -119,7 +119,7 @@ static int start_urb_transfer(struct au0828_dev *dev)
                purb->transfer_buffer = kzalloc(URB_BUFSIZE, GFP_KERNEL);
                if (!purb->transfer_buffer) {
                        usb_free_urb(purb);
-                       dev->urbs[i] = 0;
+                       dev->urbs[i] = NULL;
                        goto err;
                }
 
index cfc822bb502a92b9cd2c6b66fe3c52d508ec91ef..7431ef6de9f1284f9d475678956fa8fe5ea6c7e1 100644 (file)
@@ -6,7 +6,7 @@ config VIDEO_BT848
        select VIDEO_BTCX
        select VIDEOBUF_DMA_SG
        select VIDEO_IR
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_TVEEPROM
        select VIDEO_MSP3400 if VIDEO_HELPER_CHIPS_AUTO
        select VIDEO_TVAUDIO if VIDEO_HELPER_CHIPS_AUTO
index 924d216d95705c1bb30dfc562294fb3c69255734..e415f6fc447c5905ffd87ac96e611a1c6bc9cf86 100644 (file)
@@ -9,4 +9,5 @@ bttv-objs      :=      bttv-driver.o bttv-cards.o bttv-if.o \
 obj-$(CONFIG_VIDEO_BT848) += bttv.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
index 03816b73f847560571a1bccd3693bdc5502d1666..27da7b4232754745c0127e61fc39014fe30fa887 100644 (file)
@@ -81,8 +81,6 @@
 /* Limits scaled width, which must be a multiple of 4. */
 #define MAX_HACTIVE (0x3FF & -4)
 
-#define clamp(x, low, high) min (max (low, x), high)
-
 #define BTTV_NORMS    (\
                V4L2_STD_PAL    | V4L2_STD_PAL_N | \
                V4L2_STD_PAL_Nc | V4L2_STD_SECAM | \
index fae469ce16f5da0356bd04705884e536340dbdf5..2a429f9e32cde03068e7ea72a0ddeb93316496e1 100644 (file)
@@ -142,7 +142,8 @@ static int cs5345_command(struct i2c_client *client, unsigned cmd, void *arg)
 
 /* ----------------------------------------------------------------------- */
 
-static int cs5345_probe(struct i2c_client *client)
+static int cs5345_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
        /* Check if the adapter supports the needed features */
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
index f41bfde045fe9dcb667e1373ac6e8424599db459..2dfd0afc62db2a72f29c180e11a291646049a3be 100644 (file)
@@ -135,7 +135,8 @@ static int cs53l32a_command(struct i2c_client *client, unsigned cmd, void *arg)
  * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
  */
 
-static int cs53l32a_probe(struct i2c_client *client)
+static int cs53l32a_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
 {
        int i;
 
diff --git a/drivers/media/video/cx18/Kconfig b/drivers/media/video/cx18/Kconfig
new file mode 100644 (file)
index 0000000..acc4b47
--- /dev/null
@@ -0,0 +1,20 @@
+config VIDEO_CX18
+       tristate "Conexant cx23418 MPEG encoder support"
+       depends on VIDEO_V4L2 && DVB_CORE && PCI && I2C && EXPERIMENTAL
+       select I2C_ALGOBIT
+       select FW_LOADER
+       select VIDEO_IR
+       select MEDIA_TUNER
+       select VIDEO_TVEEPROM
+       select VIDEO_CX2341X
+       select VIDEO_CS5345
+       select DVB_S5H1409
+       ---help---
+         This is a video4linux driver for Conexant cx23418 based
+         PCI combo video recorder devices.
+
+         This is used in devices such as the Hauppauge HVR-1600
+         cards.
+
+         To compile this driver as a module, choose M here: the
+         module will be called cx18.
diff --git a/drivers/media/video/cx18/Makefile b/drivers/media/video/cx18/Makefile
new file mode 100644 (file)
index 0000000..b23d2e2
--- /dev/null
@@ -0,0 +1,11 @@
+cx18-objs    := cx18-driver.o cx18-cards.o cx18-i2c.o cx18-firmware.o cx18-gpio.o \
+       cx18-queue.o cx18-streams.o cx18-fileops.o cx18-ioctl.o cx18-controls.o \
+       cx18-mailbox.o cx18-vbi.o cx18-audio.o cx18-video.o cx18-irq.o \
+       cx18-av-core.o cx18-av-audio.o cx18-av-firmware.o cx18-av-vbi.o cx18-scb.o \
+       cx18-dvb.o
+
+obj-$(CONFIG_VIDEO_CX18) += cx18.o
+
+EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
+EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
diff --git a/drivers/media/video/cx18/cx18-audio.c b/drivers/media/video/cx18/cx18-audio.c
new file mode 100644 (file)
index 0000000..1adc404
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ *  cx18 audio-related functions
+ *
+ *  Derived from ivtv-audio.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-i2c.h"
+#include "cx18-cards.h"
+#include "cx18-audio.h"
+
+/* Selects the audio input and output according to the current
+   settings. */
+int cx18_audio_set_io(struct cx18 *cx)
+{
+       struct v4l2_routing route;
+       u32 audio_input;
+       int mux_input;
+
+       /* Determine which input to use */
+       if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
+               audio_input = cx->card->radio_input.audio_input;
+               mux_input = cx->card->radio_input.muxer_input;
+       } else {
+               audio_input =
+                       cx->card->audio_inputs[cx->audio_input].audio_input;
+               mux_input =
+                       cx->card->audio_inputs[cx->audio_input].muxer_input;
+       }
+
+       /* handle muxer chips */
+       route.input = mux_input;
+       route.output = 0;
+       cx18_i2c_hw(cx, cx->card->hw_muxer, VIDIOC_INT_S_AUDIO_ROUTING, &route);
+
+       route.input = audio_input;
+       return cx18_i2c_hw(cx, cx->card->hw_audio_ctrl,
+                       VIDIOC_INT_S_AUDIO_ROUTING, &route);
+}
+
+void cx18_audio_set_route(struct cx18 *cx, struct v4l2_routing *route)
+{
+       cx18_i2c_hw(cx, cx->card->hw_audio_ctrl,
+                       VIDIOC_INT_S_AUDIO_ROUTING, route);
+}
+
+void cx18_audio_set_audio_clock_freq(struct cx18 *cx, u8 freq)
+{
+       static u32 freqs[3] = { 44100, 48000, 32000 };
+
+       /* The audio clock of the digitizer must match the codec sample
+          rate otherwise you get some very strange effects. */
+       if (freq > 2)
+               return;
+       cx18_call_i2c_clients(cx, VIDIOC_INT_AUDIO_CLOCK_FREQ, &freqs[freq]);
+}
diff --git a/drivers/media/video/cx18/cx18-audio.h b/drivers/media/video/cx18/cx18-audio.h
new file mode 100644 (file)
index 0000000..cb569a6
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ *  cx18 audio-related functions
+ *
+ *  Derived from ivtv-audio.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+int cx18_audio_set_io(struct cx18 *cx);
+void cx18_audio_set_route(struct cx18 *cx, struct v4l2_routing *route);
+void cx18_audio_set_audio_clock_freq(struct cx18 *cx, u8 freq);
diff --git a/drivers/media/video/cx18/cx18-av-audio.c b/drivers/media/video/cx18/cx18-av-audio.c
new file mode 100644 (file)
index 0000000..2dc3a5d
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+ *  cx18 ADEC audio functions
+ *
+ *  Derived from cx25840-audio.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ *  02110-1301, USA.
+ */
+
+#include "cx18-driver.h"
+
+static int set_audclk_freq(struct cx18 *cx, u32 freq)
+{
+       struct cx18_av_state *state = &cx->av_state;
+
+       if (freq != 32000 && freq != 44100 && freq != 48000)
+               return -EINVAL;
+
+       /* common for all inputs and rates */
+       /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x10 */
+       cx18_av_write(cx, 0x127, 0x50);
+
+       if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
+               switch (freq) {
+               case 32000:
+                       /* VID_PLL and AUX_PLL */
+                       cx18_av_write4(cx, 0x108, 0x1006040f);
+
+                       /* AUX_PLL_FRAC */
+                       cx18_av_write4(cx, 0x110, 0x01bb39ee);
+
+                       /* src3/4/6_ctl = 0x0801f77f */
+                       cx18_av_write4(cx, 0x900, 0x0801f77f);
+                       cx18_av_write4(cx, 0x904, 0x0801f77f);
+                       cx18_av_write4(cx, 0x90c, 0x0801f77f);
+                       break;
+
+               case 44100:
+                       /* VID_PLL and AUX_PLL */
+                       cx18_av_write4(cx, 0x108, 0x1009040f);
+
+                       /* AUX_PLL_FRAC */
+                       cx18_av_write4(cx, 0x110, 0x00ec6bd6);
+
+                       /* src3/4/6_ctl = 0x08016d59 */
+                       cx18_av_write4(cx, 0x900, 0x08016d59);
+                       cx18_av_write4(cx, 0x904, 0x08016d59);
+                       cx18_av_write4(cx, 0x90c, 0x08016d59);
+                       break;
+
+               case 48000:
+                       /* VID_PLL and AUX_PLL */
+                       cx18_av_write4(cx, 0x108, 0x100a040f);
+
+                       /* AUX_PLL_FRAC */
+                       cx18_av_write4(cx, 0x110, 0x0098d6e5);
+
+                       /* src3/4/6_ctl = 0x08014faa */
+                       cx18_av_write4(cx, 0x900, 0x08014faa);
+                       cx18_av_write4(cx, 0x904, 0x08014faa);
+                       cx18_av_write4(cx, 0x90c, 0x08014faa);
+                       break;
+               }
+       } else {
+               switch (freq) {
+               case 32000:
+                       /* VID_PLL and AUX_PLL */
+                       cx18_av_write4(cx, 0x108, 0x1e08040f);
+
+                       /* AUX_PLL_FRAC */
+                       cx18_av_write4(cx, 0x110, 0x012a0869);
+
+                       /* src1_ctl = 0x08010000 */
+                       cx18_av_write4(cx, 0x8f8, 0x08010000);
+
+                       /* src3/4/6_ctl = 0x08020000 */
+                       cx18_av_write4(cx, 0x900, 0x08020000);
+                       cx18_av_write4(cx, 0x904, 0x08020000);
+                       cx18_av_write4(cx, 0x90c, 0x08020000);
+
+                       /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x14 */
+                       cx18_av_write(cx, 0x127, 0x54);
+                       break;
+
+               case 44100:
+                       /* VID_PLL and AUX_PLL */
+                       cx18_av_write4(cx, 0x108, 0x1809040f);
+
+                       /* AUX_PLL_FRAC */
+                       cx18_av_write4(cx, 0x110, 0x00ec6bd6);
+
+                       /* src1_ctl = 0x08010000 */
+                       cx18_av_write4(cx, 0x8f8, 0x080160cd);
+
+                       /* src3/4/6_ctl = 0x08020000 */
+                       cx18_av_write4(cx, 0x900, 0x08017385);
+                       cx18_av_write4(cx, 0x904, 0x08017385);
+                       cx18_av_write4(cx, 0x90c, 0x08017385);
+                       break;
+
+               case 48000:
+                       /* VID_PLL and AUX_PLL */
+                       cx18_av_write4(cx, 0x108, 0x180a040f);
+
+                       /* AUX_PLL_FRAC */
+                       cx18_av_write4(cx, 0x110, 0x0098d6e5);
+
+                       /* src1_ctl = 0x08010000 */
+                       cx18_av_write4(cx, 0x8f8, 0x08018000);
+
+                       /* src3/4/6_ctl = 0x08020000 */
+                       cx18_av_write4(cx, 0x900, 0x08015555);
+                       cx18_av_write4(cx, 0x904, 0x08015555);
+                       cx18_av_write4(cx, 0x90c, 0x08015555);
+                       break;
+               }
+       }
+
+       state->audclk_freq = freq;
+
+       return 0;
+}
+
+void cx18_av_audio_set_path(struct cx18 *cx)
+{
+       struct cx18_av_state *state = &cx->av_state;
+
+       /* stop microcontroller */
+       cx18_av_and_or(cx, 0x803, ~0x10, 0);
+
+       /* assert soft reset */
+       cx18_av_and_or(cx, 0x810, ~0x1, 0x01);
+
+       /* Mute everything to prevent the PFFT! */
+       cx18_av_write(cx, 0x8d3, 0x1f);
+
+       if (state->aud_input == CX18_AV_AUDIO_SERIAL) {
+               /* Set Path1 to Serial Audio Input */
+               cx18_av_write4(cx, 0x8d0, 0x01011012);
+
+               /* The microcontroller should not be started for the
+                * non-tuner inputs: autodetection is specific for
+                * TV audio. */
+       } else {
+               /* Set Path1 to Analog Demod Main Channel */
+               cx18_av_write4(cx, 0x8d0, 0x1f063870);
+       }
+
+       set_audclk_freq(cx, state->audclk_freq);
+
+       /* deassert soft reset */
+       cx18_av_and_or(cx, 0x810, ~0x1, 0x00);
+
+       if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
+               /* When the microcontroller detects the
+                * audio format, it will unmute the lines */
+               cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
+       }
+}
+
+static int get_volume(struct cx18 *cx)
+{
+       /* Volume runs +18dB to -96dB in 1/2dB steps
+        * change to fit the msp3400 -114dB to +12dB range */
+
+       /* check PATH1_VOLUME */
+       int vol = 228 - cx18_av_read(cx, 0x8d4);
+       vol = (vol / 2) + 23;
+       return vol << 9;
+}
+
+static void set_volume(struct cx18 *cx, int volume)
+{
+       /* First convert the volume to msp3400 values (0-127) */
+       int vol = volume >> 9;
+       /* now scale it up to cx18_av values
+        * -114dB to -96dB maps to 0
+        * this should be 19, but in my testing that was 4dB too loud */
+       if (vol <= 23)
+               vol = 0;
+       else
+               vol -= 23;
+
+       /* PATH1_VOLUME */
+       cx18_av_write(cx, 0x8d4, 228 - (vol * 2));
+}
+
+static int get_bass(struct cx18 *cx)
+{
+       /* bass is 49 steps +12dB to -12dB */
+
+       /* check PATH1_EQ_BASS_VOL */
+       int bass = cx18_av_read(cx, 0x8d9) & 0x3f;
+       bass = (((48 - bass) * 0xffff) + 47) / 48;
+       return bass;
+}
+
+static void set_bass(struct cx18 *cx, int bass)
+{
+       /* PATH1_EQ_BASS_VOL */
+       cx18_av_and_or(cx, 0x8d9, ~0x3f, 48 - (bass * 48 / 0xffff));
+}
+
+static int get_treble(struct cx18 *cx)
+{
+       /* treble is 49 steps +12dB to -12dB */
+
+       /* check PATH1_EQ_TREBLE_VOL */
+       int treble = cx18_av_read(cx, 0x8db) & 0x3f;
+       treble = (((48 - treble) * 0xffff) + 47) / 48;
+       return treble;
+}
+
+static void set_treble(struct cx18 *cx, int treble)
+{
+       /* PATH1_EQ_TREBLE_VOL */
+       cx18_av_and_or(cx, 0x8db, ~0x3f, 48 - (treble * 48 / 0xffff));
+}
+
+static int get_balance(struct cx18 *cx)
+{
+       /* balance is 7 bit, 0 to -96dB */
+
+       /* check PATH1_BAL_LEVEL */
+       int balance = cx18_av_read(cx, 0x8d5) & 0x7f;
+       /* check PATH1_BAL_LEFT */
+       if ((cx18_av_read(cx, 0x8d5) & 0x80) == 0)
+               balance = 0x80 - balance;
+       else
+               balance = 0x80 + balance;
+       return balance << 8;
+}
+
+static void set_balance(struct cx18 *cx, int balance)
+{
+       int bal = balance >> 8;
+       if (bal > 0x80) {
+               /* PATH1_BAL_LEFT */
+               cx18_av_and_or(cx, 0x8d5, 0x7f, 0x80);
+               /* PATH1_BAL_LEVEL */
+               cx18_av_and_or(cx, 0x8d5, ~0x7f, bal & 0x7f);
+       } else {
+               /* PATH1_BAL_LEFT */
+               cx18_av_and_or(cx, 0x8d5, 0x7f, 0x00);
+               /* PATH1_BAL_LEVEL */
+               cx18_av_and_or(cx, 0x8d5, ~0x7f, 0x80 - bal);
+       }
+}
+
+static int get_mute(struct cx18 *cx)
+{
+       /* check SRC1_MUTE_EN */
+       return cx18_av_read(cx, 0x8d3) & 0x2 ? 1 : 0;
+}
+
+static void set_mute(struct cx18 *cx, int mute)
+{
+       struct cx18_av_state *state = &cx->av_state;
+
+       if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
+               /* Must turn off microcontroller in order to mute sound.
+                * Not sure if this is the best method, but it does work.
+                * If the microcontroller is running, then it will undo any
+                * changes to the mute register. */
+               if (mute) {
+                       /* disable microcontroller */
+                       cx18_av_and_or(cx, 0x803, ~0x10, 0x00);
+                       cx18_av_write(cx, 0x8d3, 0x1f);
+               } else {
+                       /* enable microcontroller */
+                       cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
+               }
+       } else {
+               /* SRC1_MUTE_EN */
+               cx18_av_and_or(cx, 0x8d3, ~0x2, mute ? 0x02 : 0x00);
+       }
+}
+
+int cx18_av_audio(struct cx18 *cx, unsigned int cmd, void *arg)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       struct v4l2_control *ctrl = arg;
+       int retval;
+
+       switch (cmd) {
+       case VIDIOC_INT_AUDIO_CLOCK_FREQ:
+               if (state->aud_input != CX18_AV_AUDIO_SERIAL) {
+                       cx18_av_and_or(cx, 0x803, ~0x10, 0);
+                       cx18_av_write(cx, 0x8d3, 0x1f);
+               }
+               cx18_av_and_or(cx, 0x810, ~0x1, 1);
+               retval = set_audclk_freq(cx, *(u32 *)arg);
+               cx18_av_and_or(cx, 0x810, ~0x1, 0);
+               if (state->aud_input != CX18_AV_AUDIO_SERIAL)
+                       cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
+               return retval;
+
+       case VIDIOC_G_CTRL:
+               switch (ctrl->id) {
+               case V4L2_CID_AUDIO_VOLUME:
+                       ctrl->value = get_volume(cx);
+                       break;
+               case V4L2_CID_AUDIO_BASS:
+                       ctrl->value = get_bass(cx);
+                       break;
+               case V4L2_CID_AUDIO_TREBLE:
+                       ctrl->value = get_treble(cx);
+                       break;
+               case V4L2_CID_AUDIO_BALANCE:
+                       ctrl->value = get_balance(cx);
+                       break;
+               case V4L2_CID_AUDIO_MUTE:
+                       ctrl->value = get_mute(cx);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+
+       case VIDIOC_S_CTRL:
+               switch (ctrl->id) {
+               case V4L2_CID_AUDIO_VOLUME:
+                       set_volume(cx, ctrl->value);
+                       break;
+               case V4L2_CID_AUDIO_BASS:
+                       set_bass(cx, ctrl->value);
+                       break;
+               case V4L2_CID_AUDIO_TREBLE:
+                       set_treble(cx, ctrl->value);
+                       break;
+               case V4L2_CID_AUDIO_BALANCE:
+                       set_balance(cx, ctrl->value);
+                       break;
+               case V4L2_CID_AUDIO_MUTE:
+                       set_mute(cx, ctrl->value);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-av-core.c b/drivers/media/video/cx18/cx18-av-core.c
new file mode 100644 (file)
index 0000000..6686490
--- /dev/null
@@ -0,0 +1,879 @@
+/*
+ *  cx18 ADEC audio functions
+ *
+ *  Derived from cx25840-core.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ *  02110-1301, USA.
+ */
+
+#include "cx18-driver.h"
+
+int cx18_av_write(struct cx18 *cx, u16 addr, u8 value)
+{
+       u32 x = readl(cx->reg_mem + 0xc40000 + (addr & ~3));
+       u32 mask = 0xff;
+       int shift = (addr & 3) * 8;
+
+       x = (x & ~(mask << shift)) | ((u32)value << shift);
+       writel(x, cx->reg_mem + 0xc40000 + (addr & ~3));
+       return 0;
+}
+
+int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value)
+{
+       writel(value, cx->reg_mem + 0xc40000 + addr);
+       return 0;
+}
+
+u8 cx18_av_read(struct cx18 *cx, u16 addr)
+{
+       u32 x = readl(cx->reg_mem + 0xc40000 + (addr & ~3));
+       int shift = (addr & 3) * 8;
+
+       return (x >> shift) & 0xff;
+}
+
+u32 cx18_av_read4(struct cx18 *cx, u16 addr)
+{
+       return readl(cx->reg_mem + 0xc40000 + addr);
+}
+
+int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned and_mask,
+                  u8 or_value)
+{
+       return cx18_av_write(cx, addr,
+                            (cx18_av_read(cx, addr) & and_mask) |
+                            or_value);
+}
+
+int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 and_mask,
+                  u32 or_value)
+{
+       return cx18_av_write4(cx, addr,
+                            (cx18_av_read4(cx, addr) & and_mask) |
+                            or_value);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
+                                       enum cx18_av_audio_input aud_input);
+static void log_audio_status(struct cx18 *cx);
+static void log_video_status(struct cx18 *cx);
+
+/* ----------------------------------------------------------------------- */
+
+static void cx18_av_initialize(struct cx18 *cx)
+{
+       u32 v;
+
+       cx18_av_loadfw(cx);
+       /* Stop 8051 code execution */
+       cx18_av_write4(cx, CXADEC_DL_CTL, 0x03000000);
+
+       /* initallize the PLL by toggling sleep bit */
+       v = cx18_av_read4(cx, CXADEC_HOST_REG1);
+       /* enable sleep mode */
+       cx18_av_write4(cx, CXADEC_HOST_REG1, v | 1);
+       /* disable sleep mode */
+       cx18_av_write4(cx, CXADEC_HOST_REG1, v & 0xfffe);
+
+       /* initialize DLLs */
+       v = cx18_av_read4(cx, CXADEC_DLL1_DIAG_CTRL) & 0xE1FFFEFF;
+       /* disable FLD */
+       cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v);
+       /* enable FLD */
+       cx18_av_write4(cx, CXADEC_DLL1_DIAG_CTRL, v | 0x10000100);
+
+       v = cx18_av_read4(cx, CXADEC_DLL2_DIAG_CTRL) & 0xE1FFFEFF;
+       /* disable FLD */
+       cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v);
+       /* enable FLD */
+       cx18_av_write4(cx, CXADEC_DLL2_DIAG_CTRL, v | 0x06000100);
+
+       /* set analog bias currents. Set Vreg to 1.20V. */
+       cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL1, 0x000A1802);
+
+       v = cx18_av_read4(cx, CXADEC_AFE_DIAG_CTRL3) | 1;
+       /* enable TUNE_FIL_RST */
+       cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL3, v);
+       /* disable TUNE_FIL_RST */
+       cx18_av_write4(cx, CXADEC_AFE_DIAG_CTRL3, v & 0xFFFFFFFE);
+
+       /* enable 656 output */
+       cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x040C00);
+
+       /* video output drive strength */
+       cx18_av_and_or4(cx, CXADEC_PIN_CTRL2, ~0, 0x2);
+
+       /* reset video */
+       cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0x8000);
+       cx18_av_write4(cx, CXADEC_SOFT_RST_CTRL, 0);
+
+       /* set video to auto-detect */
+       /* Clear bits 11-12 to enable slow locking mode.  Set autodetect mode */
+       /* set the comb notch = 1 */
+       cx18_av_and_or4(cx, CXADEC_MODE_CTRL, 0xFFF7E7F0, 0x02040800);
+
+       /* Enable wtw_en in CRUSH_CTRL (Set bit 22) */
+       /* Enable maj_sel in CRUSH_CTRL (Set bit 20) */
+       cx18_av_and_or4(cx, CXADEC_CRUSH_CTRL, ~0, 0x00500000);
+
+       /* Set VGA_TRACK_RANGE to 0x20 */
+       cx18_av_and_or4(cx, CXADEC_DFE_CTRL2, 0xFFFF00FF, 0x00002000);
+
+       /* Enable VBI capture */
+       cx18_av_write4(cx, CXADEC_OUT_CTRL1, 0x4010253F);
+       /* cx18_av_write4(cx, CXADEC_OUT_CTRL1, 0x4010253E); */
+
+       /* Set the video input.
+          The setting in MODE_CTRL gets lost when we do the above setup */
+       /* EncSetSignalStd(dwDevNum, pEnc->dwSigStd); */
+       /* EncSetVideoInput(dwDevNum, pEnc->VidIndSelection); */
+
+       v = cx18_av_read4(cx, CXADEC_AFE_CTRL);
+       v &= 0xFFFBFFFF;            /* turn OFF bit 18 for droop_comp_ch1 */
+       v &= 0xFFFF7FFF;            /* turn OFF bit 9 for clamp_sel_ch1 */
+       v &= 0xFFFFFFFE;            /* turn OFF bit 0 for 12db_ch1 */
+       /* v |= 0x00000001;*/            /* turn ON bit 0 for 12db_ch1 */
+       cx18_av_write4(cx, CXADEC_AFE_CTRL, v);
+
+/*     if(dwEnable && dw3DCombAvailable) { */
+/*             CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x7728021F); */
+/*    } else { */
+/*             CxDevWrReg(CXADEC_SRC_COMB_CFG, 0x6628021F); */
+/*    } */
+       cx18_av_write4(cx, CXADEC_SRC_COMB_CFG, 0x6628021F);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static void input_change(struct cx18 *cx)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       v4l2_std_id std = state->std;
+
+       /* Follow step 8c and 8d of section 3.16 in the cx18_av datasheet */
+       if (std & V4L2_STD_SECAM)
+               cx18_av_write(cx, 0x402, 0);
+       else {
+               cx18_av_write(cx, 0x402, 0x04);
+               cx18_av_write(cx, 0x49f, (std & V4L2_STD_NTSC) ? 0x14 : 0x11);
+       }
+       cx18_av_and_or(cx, 0x401, ~0x60, 0);
+       cx18_av_and_or(cx, 0x401, ~0x60, 0x60);
+
+       if (std & V4L2_STD_525_60) {
+               if (std == V4L2_STD_NTSC_M_JP) {
+                       /* Japan uses EIAJ audio standard */
+                       cx18_av_write(cx, 0x808, 0xf7);
+               } else if (std == V4L2_STD_NTSC_M_KR) {
+                       /* South Korea uses A2 audio standard */
+                       cx18_av_write(cx, 0x808, 0xf8);
+               } else {
+                       /* Others use the BTSC audio standard */
+                       cx18_av_write(cx, 0x808, 0xf6);
+               }
+               cx18_av_write(cx, 0x80b, 0x00);
+       } else if (std & V4L2_STD_PAL) {
+               /* Follow tuner change procedure for PAL */
+               cx18_av_write(cx, 0x808, 0xff);
+               cx18_av_write(cx, 0x80b, 0x03);
+       } else if (std & V4L2_STD_SECAM) {
+               /* Select autodetect for SECAM */
+               cx18_av_write(cx, 0x808, 0xff);
+               cx18_av_write(cx, 0x80b, 0x03);
+       }
+
+       if (cx18_av_read(cx, 0x803) & 0x10) {
+               /* restart audio decoder microcontroller */
+               cx18_av_and_or(cx, 0x803, ~0x10, 0x00);
+               cx18_av_and_or(cx, 0x803, ~0x10, 0x10);
+       }
+}
+
+static int set_input(struct cx18 *cx, enum cx18_av_video_input vid_input,
+                                       enum cx18_av_audio_input aud_input)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       u8 is_composite = (vid_input >= CX18_AV_COMPOSITE1 &&
+                          vid_input <= CX18_AV_COMPOSITE8);
+       u8 reg;
+
+       CX18_DEBUG_INFO("decoder set video input %d, audio input %d\n",
+                       vid_input, aud_input);
+
+       if (is_composite) {
+               reg = 0xf0 + (vid_input - CX18_AV_COMPOSITE1);
+       } else {
+               int luma = vid_input & 0xf0;
+               int chroma = vid_input & 0xf00;
+
+               if ((vid_input & ~0xff0) ||
+                   luma < CX18_AV_SVIDEO_LUMA1 ||
+                   luma > CX18_AV_SVIDEO_LUMA4 ||
+                   chroma < CX18_AV_SVIDEO_CHROMA4 ||
+                   chroma > CX18_AV_SVIDEO_CHROMA8) {
+                       CX18_ERR("0x%04x is not a valid video input!\n",
+                                       vid_input);
+                       return -EINVAL;
+               }
+               reg = 0xf0 + ((luma - CX18_AV_SVIDEO_LUMA1) >> 4);
+               if (chroma >= CX18_AV_SVIDEO_CHROMA7) {
+                       reg &= 0x3f;
+                       reg |= (chroma - CX18_AV_SVIDEO_CHROMA7) >> 2;
+               } else {
+                       reg &= 0xcf;
+                       reg |= (chroma - CX18_AV_SVIDEO_CHROMA4) >> 4;
+               }
+       }
+
+       switch (aud_input) {
+       case CX18_AV_AUDIO_SERIAL:
+               /* do nothing, use serial audio input */
+               break;
+       case CX18_AV_AUDIO4: reg &= ~0x30; break;
+       case CX18_AV_AUDIO5: reg &= ~0x30; reg |= 0x10; break;
+       case CX18_AV_AUDIO6: reg &= ~0x30; reg |= 0x20; break;
+       case CX18_AV_AUDIO7: reg &= ~0xc0; break;
+       case CX18_AV_AUDIO8: reg &= ~0xc0; reg |= 0x40; break;
+
+       default:
+               CX18_ERR("0x%04x is not a valid audio input!\n", aud_input);
+               return -EINVAL;
+       }
+
+       cx18_av_write(cx, 0x103, reg);
+       /* Set INPUT_MODE to Composite (0) or S-Video (1) */
+       cx18_av_and_or(cx, 0x401, ~0x6, is_composite ? 0 : 0x02);
+       /* Set CH_SEL_ADC2 to 1 if input comes from CH3 */
+       cx18_av_and_or(cx, 0x102, ~0x2, (reg & 0x80) == 0 ? 2 : 0);
+       /* Set DUAL_MODE_ADC2 to 1 if input comes from both CH2 and CH3 */
+       if ((reg & 0xc0) != 0xc0 && (reg & 0x30) != 0x30)
+               cx18_av_and_or(cx, 0x102, ~0x4, 4);
+       else
+               cx18_av_and_or(cx, 0x102, ~0x4, 0);
+       /*cx18_av_and_or4(cx, 0x104, ~0x001b4180, 0x00004180);*/
+
+       state->vid_input = vid_input;
+       state->aud_input = aud_input;
+       cx18_av_audio_set_path(cx);
+       input_change(cx);
+       return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int set_v4lstd(struct cx18 *cx)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       u8 fmt = 0;     /* zero is autodetect */
+       u8 pal_m = 0;
+
+       /* First tests should be against specific std */
+       if (state->std == V4L2_STD_NTSC_M_JP) {
+               fmt = 0x2;
+       } else if (state->std == V4L2_STD_NTSC_443) {
+               fmt = 0x3;
+       } else if (state->std == V4L2_STD_PAL_M) {
+               pal_m = 1;
+               fmt = 0x5;
+       } else if (state->std == V4L2_STD_PAL_N) {
+               fmt = 0x6;
+       } else if (state->std == V4L2_STD_PAL_Nc) {
+               fmt = 0x7;
+       } else if (state->std == V4L2_STD_PAL_60) {
+               fmt = 0x8;
+       } else {
+               /* Then, test against generic ones */
+               if (state->std & V4L2_STD_NTSC)
+                       fmt = 0x1;
+               else if (state->std & V4L2_STD_PAL)
+                       fmt = 0x4;
+               else if (state->std & V4L2_STD_SECAM)
+                       fmt = 0xc;
+       }
+
+       CX18_DEBUG_INFO("changing video std to fmt %i\n", fmt);
+
+       /* Follow step 9 of section 3.16 in the cx18_av datasheet.
+          Without this PAL may display a vertical ghosting effect.
+          This happens for example with the Yuan MPC622. */
+       if (fmt >= 4 && fmt < 8) {
+               /* Set format to NTSC-M */
+               cx18_av_and_or(cx, 0x400, ~0xf, 1);
+               /* Turn off LCOMB */
+               cx18_av_and_or(cx, 0x47b, ~6, 0);
+       }
+       cx18_av_and_or(cx, 0x400, ~0xf, fmt);
+       cx18_av_and_or(cx, 0x403, ~0x3, pal_m);
+       cx18_av_vbi_setup(cx);
+       input_change(cx);
+       return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int set_v4lctrl(struct cx18 *cx, struct v4l2_control *ctrl)
+{
+       switch (ctrl->id) {
+       case V4L2_CID_BRIGHTNESS:
+               if (ctrl->value < 0 || ctrl->value > 255) {
+                       CX18_ERR("invalid brightness setting %d\n",
+                                   ctrl->value);
+                       return -ERANGE;
+               }
+
+               cx18_av_write(cx, 0x414, ctrl->value - 128);
+               break;
+
+       case V4L2_CID_CONTRAST:
+               if (ctrl->value < 0 || ctrl->value > 127) {
+                       CX18_ERR("invalid contrast setting %d\n",
+                                   ctrl->value);
+                       return -ERANGE;
+               }
+
+               cx18_av_write(cx, 0x415, ctrl->value << 1);
+               break;
+
+       case V4L2_CID_SATURATION:
+               if (ctrl->value < 0 || ctrl->value > 127) {
+                       CX18_ERR("invalid saturation setting %d\n",
+                                   ctrl->value);
+                       return -ERANGE;
+               }
+
+               cx18_av_write(cx, 0x420, ctrl->value << 1);
+               cx18_av_write(cx, 0x421, ctrl->value << 1);
+               break;
+
+       case V4L2_CID_HUE:
+               if (ctrl->value < -127 || ctrl->value > 127) {
+                       CX18_ERR("invalid hue setting %d\n", ctrl->value);
+                       return -ERANGE;
+               }
+
+               cx18_av_write(cx, 0x422, ctrl->value);
+               break;
+
+       case V4L2_CID_AUDIO_VOLUME:
+       case V4L2_CID_AUDIO_BASS:
+       case V4L2_CID_AUDIO_TREBLE:
+       case V4L2_CID_AUDIO_BALANCE:
+       case V4L2_CID_AUDIO_MUTE:
+               return cx18_av_audio(cx, VIDIOC_S_CTRL, ctrl);
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int get_v4lctrl(struct cx18 *cx, struct v4l2_control *ctrl)
+{
+       switch (ctrl->id) {
+       case V4L2_CID_BRIGHTNESS:
+               ctrl->value = (s8)cx18_av_read(cx, 0x414) + 128;
+               break;
+       case V4L2_CID_CONTRAST:
+               ctrl->value = cx18_av_read(cx, 0x415) >> 1;
+               break;
+       case V4L2_CID_SATURATION:
+               ctrl->value = cx18_av_read(cx, 0x420) >> 1;
+               break;
+       case V4L2_CID_HUE:
+               ctrl->value = (s8)cx18_av_read(cx, 0x422);
+               break;
+       case V4L2_CID_AUDIO_VOLUME:
+       case V4L2_CID_AUDIO_BASS:
+       case V4L2_CID_AUDIO_TREBLE:
+       case V4L2_CID_AUDIO_BALANCE:
+       case V4L2_CID_AUDIO_MUTE:
+               return cx18_av_audio(cx, VIDIOC_G_CTRL, ctrl);
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+static int get_v4lfmt(struct cx18 *cx, struct v4l2_format *fmt)
+{
+       switch (fmt->type) {
+       case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+               return cx18_av_vbi(cx, VIDIOC_G_FMT, fmt);
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int set_v4lfmt(struct cx18 *cx, struct v4l2_format *fmt)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       struct v4l2_pix_format *pix;
+       int HSC, VSC, Vsrc, Hsrc, filter, Vlines;
+       int is_50Hz = !(state->std & V4L2_STD_525_60);
+
+       switch (fmt->type) {
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+               pix = &(fmt->fmt.pix);
+
+               Vsrc = (cx18_av_read(cx, 0x476) & 0x3f) << 4;
+               Vsrc |= (cx18_av_read(cx, 0x475) & 0xf0) >> 4;
+
+               Hsrc = (cx18_av_read(cx, 0x472) & 0x3f) << 4;
+               Hsrc |= (cx18_av_read(cx, 0x471) & 0xf0) >> 4;
+
+               Vlines = pix->height + (is_50Hz ? 4 : 7);
+
+               if ((pix->width * 16 < Hsrc) || (Hsrc < pix->width) ||
+                   (Vlines * 8 < Vsrc) || (Vsrc < Vlines)) {
+                       CX18_ERR("%dx%d is not a valid size!\n",
+                                   pix->width, pix->height);
+                       return -ERANGE;
+               }
+
+               HSC = (Hsrc * (1 << 20)) / pix->width - (1 << 20);
+               VSC = (1 << 16) - (Vsrc * (1 << 9) / Vlines - (1 << 9));
+               VSC &= 0x1fff;
+
+               if (pix->width >= 385)
+                       filter = 0;
+               else if (pix->width > 192)
+                       filter = 1;
+               else if (pix->width > 96)
+                       filter = 2;
+               else
+                       filter = 3;
+
+               CX18_DEBUG_INFO("decoder set size %dx%d -> scale  %ux%u\n",
+                           pix->width, pix->height, HSC, VSC);
+
+               /* HSCALE=HSC */
+               cx18_av_write(cx, 0x418, HSC & 0xff);
+               cx18_av_write(cx, 0x419, (HSC >> 8) & 0xff);
+               cx18_av_write(cx, 0x41a, HSC >> 16);
+               /* VSCALE=VSC */
+               cx18_av_write(cx, 0x41c, VSC & 0xff);
+               cx18_av_write(cx, 0x41d, VSC >> 8);
+               /* VS_INTRLACE=1 VFILT=filter */
+               cx18_av_write(cx, 0x41e, 0x8 | filter);
+               break;
+
+       case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+               return cx18_av_vbi(cx, VIDIOC_S_FMT, fmt);
+
+       case V4L2_BUF_TYPE_VBI_CAPTURE:
+               return cx18_av_vbi(cx, VIDIOC_S_FMT, fmt);
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+int cx18_av_cmd(struct cx18 *cx, unsigned int cmd, void *arg)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       struct v4l2_tuner *vt = arg;
+       struct v4l2_routing *route = arg;
+
+       /* ignore these commands */
+       switch (cmd) {
+       case TUNER_SET_TYPE_ADDR:
+               return 0;
+       }
+
+       if (!state->is_initialized) {
+               CX18_DEBUG_INFO("cmd %08x triggered fw load\n", cmd);
+               /* initialize on first use */
+               state->is_initialized = 1;
+               cx18_av_initialize(cx);
+       }
+
+       switch (cmd) {
+       case VIDIOC_INT_DECODE_VBI_LINE:
+               return cx18_av_vbi(cx, cmd, arg);
+
+       case VIDIOC_INT_AUDIO_CLOCK_FREQ:
+               return cx18_av_audio(cx, cmd, arg);
+
+       case VIDIOC_STREAMON:
+               CX18_DEBUG_INFO("enable output\n");
+               cx18_av_write(cx, 0x115, 0x8c);
+               cx18_av_write(cx, 0x116, 0x07);
+               break;
+
+       case VIDIOC_STREAMOFF:
+               CX18_DEBUG_INFO("disable output\n");
+               cx18_av_write(cx, 0x115, 0x00);
+               cx18_av_write(cx, 0x116, 0x00);
+               break;
+
+       case VIDIOC_LOG_STATUS:
+               log_video_status(cx);
+               log_audio_status(cx);
+               break;
+
+       case VIDIOC_G_CTRL:
+               return get_v4lctrl(cx, (struct v4l2_control *)arg);
+
+       case VIDIOC_S_CTRL:
+               return set_v4lctrl(cx, (struct v4l2_control *)arg);
+
+       case VIDIOC_QUERYCTRL:
+       {
+               struct v4l2_queryctrl *qc = arg;
+
+               switch (qc->id) {
+               case V4L2_CID_BRIGHTNESS:
+               case V4L2_CID_CONTRAST:
+               case V4L2_CID_SATURATION:
+               case V4L2_CID_HUE:
+                       return v4l2_ctrl_query_fill_std(qc);
+               default:
+                       break;
+               }
+
+               switch (qc->id) {
+               case V4L2_CID_AUDIO_VOLUME:
+               case V4L2_CID_AUDIO_MUTE:
+               case V4L2_CID_AUDIO_BALANCE:
+               case V4L2_CID_AUDIO_BASS:
+               case V4L2_CID_AUDIO_TREBLE:
+                       return v4l2_ctrl_query_fill_std(qc);
+               default:
+                       return -EINVAL;
+               }
+               return -EINVAL;
+       }
+
+       case VIDIOC_G_STD:
+               *(v4l2_std_id *)arg = state->std;
+               break;
+
+       case VIDIOC_S_STD:
+               if (state->radio == 0 && state->std == *(v4l2_std_id *)arg)
+                       return 0;
+               state->radio = 0;
+               state->std = *(v4l2_std_id *)arg;
+               return set_v4lstd(cx);
+
+       case AUDC_SET_RADIO:
+               state->radio = 1;
+               break;
+
+       case VIDIOC_INT_G_VIDEO_ROUTING:
+               route->input = state->vid_input;
+               route->output = 0;
+               break;
+
+       case VIDIOC_INT_S_VIDEO_ROUTING:
+               return set_input(cx, route->input, state->aud_input);
+
+       case VIDIOC_INT_G_AUDIO_ROUTING:
+               route->input = state->aud_input;
+               route->output = 0;
+               break;
+
+       case VIDIOC_INT_S_AUDIO_ROUTING:
+               return set_input(cx, state->vid_input, route->input);
+
+       case VIDIOC_S_FREQUENCY:
+               input_change(cx);
+               break;
+
+       case VIDIOC_G_TUNER:
+       {
+               u8 vpres = cx18_av_read(cx, 0x40e) & 0x20;
+               u8 mode;
+               int val = 0;
+
+               if (state->radio)
+                       break;
+
+               vt->signal = vpres ? 0xffff : 0x0;
+
+               vt->capability |=
+                   V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 |
+                   V4L2_TUNER_CAP_LANG2 | V4L2_TUNER_CAP_SAP;
+
+               mode = cx18_av_read(cx, 0x804);
+
+               /* get rxsubchans and audmode */
+               if ((mode & 0xf) == 1)
+                       val |= V4L2_TUNER_SUB_STEREO;
+               else
+                       val |= V4L2_TUNER_SUB_MONO;
+
+               if (mode == 2 || mode == 4)
+                       val = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
+
+               if (mode & 0x10)
+                       val |= V4L2_TUNER_SUB_SAP;
+
+               vt->rxsubchans = val;
+               vt->audmode = state->audmode;
+               break;
+       }
+
+       case VIDIOC_S_TUNER:
+               if (state->radio)
+                       break;
+
+               switch (vt->audmode) {
+               case V4L2_TUNER_MODE_MONO:
+                       /* mono      -> mono
+                          stereo    -> mono
+                          bilingual -> lang1 */
+                       cx18_av_and_or(cx, 0x809, ~0xf, 0x00);
+                       break;
+               case V4L2_TUNER_MODE_STEREO:
+               case V4L2_TUNER_MODE_LANG1:
+                       /* mono      -> mono
+                          stereo    -> stereo
+                          bilingual -> lang1 */
+                       cx18_av_and_or(cx, 0x809, ~0xf, 0x04);
+                       break;
+               case V4L2_TUNER_MODE_LANG1_LANG2:
+                       /* mono      -> mono
+                          stereo    -> stereo
+                          bilingual -> lang1/lang2 */
+                       cx18_av_and_or(cx, 0x809, ~0xf, 0x07);
+                       break;
+               case V4L2_TUNER_MODE_LANG2:
+                       /* mono      -> mono
+                          stereo    -> stereo
+                          bilingual -> lang2 */
+                       cx18_av_and_or(cx, 0x809, ~0xf, 0x01);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               state->audmode = vt->audmode;
+               break;
+
+       case VIDIOC_G_FMT:
+               return get_v4lfmt(cx, (struct v4l2_format *)arg);
+
+       case VIDIOC_S_FMT:
+               return set_v4lfmt(cx, (struct v4l2_format *)arg);
+
+       case VIDIOC_INT_RESET:
+               cx18_av_initialize(cx);
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* ----------------------------------------------------------------------- */
+
+static void log_video_status(struct cx18 *cx)
+{
+       static const char *const fmt_strs[] = {
+               "0x0",
+               "NTSC-M", "NTSC-J", "NTSC-4.43",
+               "PAL-BDGHI", "PAL-M", "PAL-N", "PAL-Nc", "PAL-60",
+               "0x9", "0xA", "0xB",
+               "SECAM",
+               "0xD", "0xE", "0xF"
+       };
+
+       struct cx18_av_state *state = &cx->av_state;
+       u8 vidfmt_sel = cx18_av_read(cx, 0x400) & 0xf;
+       u8 gen_stat1 = cx18_av_read(cx, 0x40d);
+       u8 gen_stat2 = cx18_av_read(cx, 0x40e);
+       int vid_input = state->vid_input;
+
+       CX18_INFO("Video signal:              %spresent\n",
+                   (gen_stat2 & 0x20) ? "" : "not ");
+       CX18_INFO("Detected format:           %s\n",
+                   fmt_strs[gen_stat1 & 0xf]);
+
+       CX18_INFO("Specified standard:        %s\n",
+                   vidfmt_sel ? fmt_strs[vidfmt_sel] : "automatic detection");
+
+       if (vid_input >= CX18_AV_COMPOSITE1 &&
+           vid_input <= CX18_AV_COMPOSITE8) {
+               CX18_INFO("Specified video input:     Composite %d\n",
+                       vid_input - CX18_AV_COMPOSITE1 + 1);
+       } else {
+               CX18_INFO("Specified video input:     S-Video (Luma In%d, Chroma In%d)\n",
+                       (vid_input & 0xf0) >> 4, (vid_input & 0xf00) >> 8);
+       }
+
+       CX18_INFO("Specified audioclock freq: %d Hz\n", state->audclk_freq);
+}
+
+/* ----------------------------------------------------------------------- */
+
+static void log_audio_status(struct cx18 *cx)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       u8 download_ctl = cx18_av_read(cx, 0x803);
+       u8 mod_det_stat0 = cx18_av_read(cx, 0x805);
+       u8 mod_det_stat1 = cx18_av_read(cx, 0x804);
+       u8 audio_config = cx18_av_read(cx, 0x808);
+       u8 pref_mode = cx18_av_read(cx, 0x809);
+       u8 afc0 = cx18_av_read(cx, 0x80b);
+       u8 mute_ctl = cx18_av_read(cx, 0x8d3);
+       int aud_input = state->aud_input;
+       char *p;
+
+       switch (mod_det_stat0) {
+       case 0x00: p = "mono"; break;
+       case 0x01: p = "stereo"; break;
+       case 0x02: p = "dual"; break;
+       case 0x04: p = "tri"; break;
+       case 0x10: p = "mono with SAP"; break;
+       case 0x11: p = "stereo with SAP"; break;
+       case 0x12: p = "dual with SAP"; break;
+       case 0x14: p = "tri with SAP"; break;
+       case 0xfe: p = "forced mode"; break;
+       default: p = "not defined";
+       }
+       CX18_INFO("Detected audio mode:       %s\n", p);
+
+       switch (mod_det_stat1) {
+       case 0x00: p = "BTSC"; break;
+       case 0x01: p = "EIAJ"; break;
+       case 0x02: p = "A2-M"; break;
+       case 0x03: p = "A2-BG"; break;
+       case 0x04: p = "A2-DK1"; break;
+       case 0x05: p = "A2-DK2"; break;
+       case 0x06: p = "A2-DK3"; break;
+       case 0x07: p = "A1 (6.0 MHz FM Mono)"; break;
+       case 0x08: p = "AM-L"; break;
+       case 0x09: p = "NICAM-BG"; break;
+       case 0x0a: p = "NICAM-DK"; break;
+       case 0x0b: p = "NICAM-I"; break;
+       case 0x0c: p = "NICAM-L"; break;
+       case 0x0d: p = "BTSC/EIAJ/A2-M Mono (4.5 MHz FMMono)"; break;
+       case 0xff: p = "no detected audio standard"; break;
+       default: p = "not defined";
+       }
+       CX18_INFO("Detected audio standard:   %s\n", p);
+       CX18_INFO("Audio muted:               %s\n",
+                   (mute_ctl & 0x2) ? "yes" : "no");
+       CX18_INFO("Audio microcontroller:     %s\n",
+                   (download_ctl & 0x10) ? "running" : "stopped");
+
+       switch (audio_config >> 4) {
+       case 0x00: p = "BTSC"; break;
+       case 0x01: p = "EIAJ"; break;
+       case 0x02: p = "A2-M"; break;
+       case 0x03: p = "A2-BG"; break;
+       case 0x04: p = "A2-DK1"; break;
+       case 0x05: p = "A2-DK2"; break;
+       case 0x06: p = "A2-DK3"; break;
+       case 0x07: p = "A1 (6.0 MHz FM Mono)"; break;
+       case 0x08: p = "AM-L"; break;
+       case 0x09: p = "NICAM-BG"; break;
+       case 0x0a: p = "NICAM-DK"; break;
+       case 0x0b: p = "NICAM-I"; break;
+       case 0x0c: p = "NICAM-L"; break;
+       case 0x0d: p = "FM radio"; break;
+       case 0x0f: p = "automatic detection"; break;
+       default: p = "undefined";
+       }
+       CX18_INFO("Configured audio standard: %s\n", p);
+
+       if ((audio_config >> 4) < 0xF) {
+               switch (audio_config & 0xF) {
+               case 0x00: p = "MONO1 (LANGUAGE A/Mono L+R channel for BTSC, EIAJ, A2)"; break;
+               case 0x01: p = "MONO2 (LANGUAGE B)"; break;
+               case 0x02: p = "MONO3 (STEREO forced MONO)"; break;
+               case 0x03: p = "MONO4 (NICAM ANALOG-Language C/Analog Fallback)"; break;
+               case 0x04: p = "STEREO"; break;
+               case 0x05: p = "DUAL1 (AB)"; break;
+               case 0x06: p = "DUAL2 (AC) (FM)"; break;
+               case 0x07: p = "DUAL3 (BC) (FM)"; break;
+               case 0x08: p = "DUAL4 (AC) (AM)"; break;
+               case 0x09: p = "DUAL5 (BC) (AM)"; break;
+               case 0x0a: p = "SAP"; break;
+               default: p = "undefined";
+               }
+               CX18_INFO("Configured audio mode:     %s\n", p);
+       } else {
+               switch (audio_config & 0xF) {
+               case 0x00: p = "BG"; break;
+               case 0x01: p = "DK1"; break;
+               case 0x02: p = "DK2"; break;
+               case 0x03: p = "DK3"; break;
+               case 0x04: p = "I"; break;
+               case 0x05: p = "L"; break;
+               case 0x06: p = "BTSC"; break;
+               case 0x07: p = "EIAJ"; break;
+               case 0x08: p = "A2-M"; break;
+               case 0x09: p = "FM Radio"; break;
+               case 0x0f: p = "automatic standard and mode detection"; break;
+               default: p = "undefined";
+               }
+               CX18_INFO("Configured audio system:   %s\n", p);
+       }
+
+       if (aud_input)
+               CX18_INFO("Specified audio input:     Tuner (In%d)\n",
+                               aud_input);
+       else
+               CX18_INFO("Specified audio input:     External\n");
+
+       switch (pref_mode & 0xf) {
+       case 0: p = "mono/language A"; break;
+       case 1: p = "language B"; break;
+       case 2: p = "language C"; break;
+       case 3: p = "analog fallback"; break;
+       case 4: p = "stereo"; break;
+       case 5: p = "language AC"; break;
+       case 6: p = "language BC"; break;
+       case 7: p = "language AB"; break;
+       default: p = "undefined";
+       }
+       CX18_INFO("Preferred audio mode:      %s\n", p);
+
+       if ((audio_config & 0xf) == 0xf) {
+               switch ((afc0 >> 2) & 0x1) {
+               case 0: p = "system DK"; break;
+               case 1: p = "system L"; break;
+               }
+               CX18_INFO("Selected 65 MHz format:    %s\n", p);
+
+               switch (afc0 & 0x3) {
+               case 0: p = "BTSC"; break;
+               case 1: p = "EIAJ"; break;
+               case 2: p = "A2-M"; break;
+               default: p = "undefined";
+               }
+               CX18_INFO("Selected 45 MHz format:    %s\n", p);
+       }
+}
diff --git a/drivers/media/video/cx18/cx18-av-core.h b/drivers/media/video/cx18/cx18-av-core.h
new file mode 100644 (file)
index 0000000..786901d
--- /dev/null
@@ -0,0 +1,318 @@
+/*
+ *  cx18 ADEC header
+ *
+ *  Derived from cx25840-core.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ *  02110-1301, USA.
+ */
+
+#ifndef _CX18_AV_CORE_H_
+#define _CX18_AV_CORE_H_
+
+struct cx18;
+
+enum cx18_av_video_input {
+       /* Composite video inputs In1-In8 */
+       CX18_AV_COMPOSITE1 = 1,
+       CX18_AV_COMPOSITE2,
+       CX18_AV_COMPOSITE3,
+       CX18_AV_COMPOSITE4,
+       CX18_AV_COMPOSITE5,
+       CX18_AV_COMPOSITE6,
+       CX18_AV_COMPOSITE7,
+       CX18_AV_COMPOSITE8,
+
+       /* S-Video inputs consist of one luma input (In1-In4) ORed with one
+          chroma input (In5-In8) */
+       CX18_AV_SVIDEO_LUMA1 = 0x10,
+       CX18_AV_SVIDEO_LUMA2 = 0x20,
+       CX18_AV_SVIDEO_LUMA3 = 0x30,
+       CX18_AV_SVIDEO_LUMA4 = 0x40,
+       CX18_AV_SVIDEO_CHROMA4 = 0x400,
+       CX18_AV_SVIDEO_CHROMA5 = 0x500,
+       CX18_AV_SVIDEO_CHROMA6 = 0x600,
+       CX18_AV_SVIDEO_CHROMA7 = 0x700,
+       CX18_AV_SVIDEO_CHROMA8 = 0x800,
+
+       /* S-Video aliases for common luma/chroma combinations */
+       CX18_AV_SVIDEO1 = 0x510,
+       CX18_AV_SVIDEO2 = 0x620,
+       CX18_AV_SVIDEO3 = 0x730,
+       CX18_AV_SVIDEO4 = 0x840,
+};
+
+enum cx18_av_audio_input {
+       /* Audio inputs: serial or In4-In8 */
+       CX18_AV_AUDIO_SERIAL,
+       CX18_AV_AUDIO4 = 4,
+       CX18_AV_AUDIO5,
+       CX18_AV_AUDIO6,
+       CX18_AV_AUDIO7,
+       CX18_AV_AUDIO8,
+};
+
+struct cx18_av_state {
+       int radio;
+       v4l2_std_id std;
+       enum cx18_av_video_input vid_input;
+       enum cx18_av_audio_input aud_input;
+       u32 audclk_freq;
+       int audmode;
+       int vbi_line_offset;
+       u32 id;
+       u32 rev;
+       int is_initialized;
+};
+
+
+/* Registers */
+#define CXADEC_CHIP_TYPE_TIGER     0x837
+#define CXADEC_CHIP_TYPE_MAKO      0x843
+
+#define CXADEC_HOST_REG1           0x000
+#define CXADEC_HOST_REG2           0x001
+
+#define CXADEC_CHIP_CTRL           0x100
+#define CXADEC_AFE_CTRL            0x104
+#define CXADEC_PLL_CTRL1           0x108
+#define CXADEC_VID_PLL_FRAC        0x10C
+#define CXADEC_AUX_PLL_FRAC        0x110
+#define CXADEC_PIN_CTRL1           0x114
+#define CXADEC_PIN_CTRL2           0x118
+#define CXADEC_PIN_CFG1            0x11C
+#define CXADEC_PIN_CFG2            0x120
+
+#define CXADEC_PIN_CFG3            0x124
+#define CXADEC_I2S_MCLK            0x127
+
+#define CXADEC_AUD_LOCK1           0x128
+#define CXADEC_AUD_LOCK2           0x12C
+#define CXADEC_POWER_CTRL          0x130
+#define CXADEC_AFE_DIAG_CTRL1      0x134
+#define CXADEC_AFE_DIAG_CTRL2      0x138
+#define CXADEC_AFE_DIAG_CTRL3      0x13C
+#define CXADEC_PLL_DIAG_CTRL       0x140
+#define CXADEC_TEST_CTRL1          0x144
+#define CXADEC_TEST_CTRL2          0x148
+#define CXADEC_BIST_STAT           0x14C
+#define CXADEC_DLL1_DIAG_CTRL      0x158
+#define CXADEC_DLL2_DIAG_CTRL      0x15C
+
+/* IR registers */
+#define CXADEC_IR_CTRL_REG         0x200
+#define CXADEC_IR_TXCLK_REG        0x204
+#define CXADEC_IR_RXCLK_REG        0x208
+#define CXADEC_IR_CDUTY_REG        0x20C
+#define CXADEC_IR_STAT_REG         0x210
+#define CXADEC_IR_IRQEN_REG        0x214
+#define CXADEC_IR_FILTER_REG       0x218
+#define CXADEC_IR_FIFO_REG         0x21C
+
+/* Video Registers */
+#define CXADEC_MODE_CTRL           0x400
+#define CXADEC_OUT_CTRL1           0x404
+#define CXADEC_OUT_CTRL2           0x408
+#define CXADEC_GEN_STAT            0x40C
+#define CXADEC_INT_STAT_MASK       0x410
+#define CXADEC_LUMA_CTRL           0x414
+
+#define CXADEC_BRIGHTNESS_CTRL_BYTE 0x414
+#define CXADEC_CONTRAST_CTRL_BYTE  0x415
+#define CXADEC_LUMA_CTRL_BYTE_3    0x416
+
+#define CXADEC_HSCALE_CTRL         0x418
+#define CXADEC_VSCALE_CTRL         0x41C
+
+#define CXADEC_CHROMA_CTRL         0x420
+
+#define CXADEC_USAT_CTRL_BYTE      0x420
+#define CXADEC_VSAT_CTRL_BYTE      0x421
+#define CXADEC_HUE_CTRL_BYTE       0x422
+
+#define CXADEC_VBI_LINE_CTRL1      0x424
+#define CXADEC_VBI_LINE_CTRL2      0x428
+#define CXADEC_VBI_LINE_CTRL3      0x42C
+#define CXADEC_VBI_LINE_CTRL4      0x430
+#define CXADEC_VBI_LINE_CTRL5      0x434
+#define CXADEC_VBI_FC_CFG          0x438
+#define CXADEC_VBI_MISC_CFG1       0x43C
+#define CXADEC_VBI_MISC_CFG2       0x440
+#define CXADEC_VBI_PAY1            0x444
+#define CXADEC_VBI_PAY2            0x448
+#define CXADEC_VBI_CUST1_CFG1      0x44C
+#define CXADEC_VBI_CUST1_CFG2      0x450
+#define CXADEC_VBI_CUST1_CFG3      0x454
+#define CXADEC_VBI_CUST2_CFG1      0x458
+#define CXADEC_VBI_CUST2_CFG2      0x45C
+#define CXADEC_VBI_CUST2_CFG3      0x460
+#define CXADEC_VBI_CUST3_CFG1      0x464
+#define CXADEC_VBI_CUST3_CFG2      0x468
+#define CXADEC_VBI_CUST3_CFG3      0x46C
+#define CXADEC_HORIZ_TIM_CTRL      0x470
+#define CXADEC_VERT_TIM_CTRL       0x474
+#define CXADEC_SRC_COMB_CFG        0x478
+#define CXADEC_CHROMA_VBIOFF_CFG   0x47C
+#define CXADEC_FIELD_COUNT         0x480
+#define CXADEC_MISC_TIM_CTRL       0x484
+#define CXADEC_DFE_CTRL1           0x488
+#define CXADEC_DFE_CTRL2           0x48C
+#define CXADEC_DFE_CTRL3           0x490
+#define CXADEC_PLL_CTRL2           0x494
+#define CXADEC_HTL_CTRL            0x498
+#define CXADEC_COMB_CTRL           0x49C
+#define CXADEC_CRUSH_CTRL          0x4A0
+#define CXADEC_SOFT_RST_CTRL       0x4A4
+#define CXADEC_MV_DT_CTRL2         0x4A8
+#define CXADEC_MV_DT_CTRL3         0x4AC
+#define CXADEC_MISC_DIAG_CTRL      0x4B8
+
+#define CXADEC_DL_CTL              0x800
+#define CXADEC_DL_CTL_ADDRESS_LOW  0x800   /* Byte 1 in DL_CTL */
+#define CXADEC_DL_CTL_ADDRESS_HIGH 0x801   /* Byte 2 in DL_CTL */
+#define CXADEC_DL_CTL_DATA         0x802   /* Byte 3 in DL_CTL */
+#define CXADEC_DL_CTL_CONTROL      0x803   /* Byte 4 in DL_CTL */
+
+#define CXADEC_STD_DET_STATUS      0x804
+
+#define CXADEC_STD_DET_CTL         0x808
+#define CXADEC_STD_DET_CTL_AUD_CTL   0x808 /* Byte 1 in STD_DET_CTL */
+#define CXADEC_STD_DET_CTL_PREF_MODE 0x809 /* Byte 2 in STD_DET_CTL */
+
+#define CXADEC_DW8051_INT          0x80C
+#define CXADEC_GENERAL_CTL         0x810
+#define CXADEC_AAGC_CTL            0x814
+#define CXADEC_IF_SRC_CTL          0x818
+#define CXADEC_ANLOG_DEMOD_CTL     0x81C
+#define CXADEC_ROT_FREQ_CTL        0x820
+#define CXADEC_FM1_CTL             0x824
+#define CXADEC_PDF_CTL             0x828
+#define CXADEC_DFT1_CTL1           0x82C
+#define CXADEC_DFT1_CTL2           0x830
+#define CXADEC_DFT_STATUS          0x834
+#define CXADEC_DFT2_CTL1           0x838
+#define CXADEC_DFT2_CTL2           0x83C
+#define CXADEC_DFT2_STATUS         0x840
+#define CXADEC_DFT3_CTL1           0x844
+#define CXADEC_DFT3_CTL2           0x848
+#define CXADEC_DFT3_STATUS         0x84C
+#define CXADEC_DFT4_CTL1           0x850
+#define CXADEC_DFT4_CTL2           0x854
+#define CXADEC_DFT4_STATUS         0x858
+#define CXADEC_AM_MTS_DET          0x85C
+#define CXADEC_ANALOG_MUX_CTL      0x860
+#define CXADEC_DIG_PLL_CTL1        0x864
+#define CXADEC_DIG_PLL_CTL2        0x868
+#define CXADEC_DIG_PLL_CTL3        0x86C
+#define CXADEC_DIG_PLL_CTL4        0x870
+#define CXADEC_DIG_PLL_CTL5        0x874
+#define CXADEC_DEEMPH_GAIN_CTL     0x878
+#define CXADEC_DEEMPH_COEF1        0x87C
+#define CXADEC_DEEMPH_COEF2        0x880
+#define CXADEC_DBX1_CTL1           0x884
+#define CXADEC_DBX1_CTL2           0x888
+#define CXADEC_DBX1_STATUS         0x88C
+#define CXADEC_DBX2_CTL1           0x890
+#define CXADEC_DBX2_CTL2           0x894
+#define CXADEC_DBX2_STATUS         0x898
+#define CXADEC_AM_FM_DIFF          0x89C
+
+/* NICAM registers go here */
+#define CXADEC_NICAM_STATUS        0x8C8
+#define CXADEC_DEMATRIX_CTL        0x8CC
+
+#define CXADEC_PATH1_CTL1          0x8D0
+#define CXADEC_PATH1_VOL_CTL       0x8D4
+#define CXADEC_PATH1_EQ_CTL        0x8D8
+#define CXADEC_PATH1_SC_CTL        0x8DC
+
+#define CXADEC_PATH2_CTL1          0x8E0
+#define CXADEC_PATH2_VOL_CTL       0x8E4
+#define CXADEC_PATH2_EQ_CTL        0x8E8
+#define CXADEC_PATH2_SC_CTL        0x8EC
+
+#define CXADEC_SRC_CTL             0x8F0
+#define CXADEC_SRC_LF_COEF         0x8F4
+#define CXADEC_SRC1_CTL            0x8F8
+#define CXADEC_SRC2_CTL            0x8FC
+#define CXADEC_SRC3_CTL            0x900
+#define CXADEC_SRC4_CTL            0x904
+#define CXADEC_SRC5_CTL            0x908
+#define CXADEC_SRC6_CTL            0x90C
+
+#define CXADEC_BASEBAND_OUT_SEL    0x910
+#define CXADEC_I2S_IN_CTL          0x914
+#define CXADEC_I2S_OUT_CTL         0x918
+#define CXADEC_AC97_CTL            0x91C
+#define CXADEC_QAM_PDF             0x920
+#define CXADEC_QAM_CONST_DEC       0x924
+#define CXADEC_QAM_ROTATOR_FREQ    0x948
+
+/* Bit defintions / settings used in Mako Audio */
+#define CXADEC_PREF_MODE_MONO_LANGA        0
+#define CXADEC_PREF_MODE_MONO_LANGB        1
+#define CXADEC_PREF_MODE_MONO_LANGC        2
+#define CXADEC_PREF_MODE_FALLBACK          3
+#define CXADEC_PREF_MODE_STEREO            4
+#define CXADEC_PREF_MODE_DUAL_LANG_AC      5
+#define CXADEC_PREF_MODE_DUAL_LANG_BC      6
+#define CXADEC_PREF_MODE_DUAL_LANG_AB      7
+
+
+#define CXADEC_DETECT_STEREO               1
+#define CXADEC_DETECT_DUAL                 2
+#define CXADEC_DETECT_TRI                  4
+#define CXADEC_DETECT_SAP                  0x10
+#define CXADEC_DETECT_NO_SIGNAL            0xFF
+
+#define CXADEC_SELECT_AUDIO_STANDARD_BG    0xF0  /* NICAM BG and A2 BG */
+#define CXADEC_SELECT_AUDIO_STANDARD_DK1   0xF1  /* NICAM DK and A2 DK */
+#define CXADEC_SELECT_AUDIO_STANDARD_DK2   0xF2
+#define CXADEC_SELECT_AUDIO_STANDARD_DK3   0xF3
+#define CXADEC_SELECT_AUDIO_STANDARD_I     0xF4  /* NICAM I and A1 */
+#define CXADEC_SELECT_AUDIO_STANDARD_L     0xF5  /* NICAM L and System L AM */
+#define CXADEC_SELECT_AUDIO_STANDARD_BTSC  0xF6
+#define CXADEC_SELECT_AUDIO_STANDARD_EIAJ  0xF7
+#define CXADEC_SELECT_AUDIO_STANDARD_A2_M  0xF8  /* A2 M */
+#define CXADEC_SELECT_AUDIO_STANDARD_FM    0xF9  /* FM radio */
+#define CXADEC_SELECT_AUDIO_STANDARD_AUTO  0xFF  /* Auto detect */
+
+/* ----------------------------------------------------------------------- */
+/* cx18_av-core.c                                                         */
+int cx18_av_write(struct cx18 *cx, u16 addr, u8 value);
+int cx18_av_write4(struct cx18 *cx, u16 addr, u32 value);
+u8 cx18_av_read(struct cx18 *cx, u16 addr);
+u32 cx18_av_read4(struct cx18 *cx, u16 addr);
+int cx18_av_and_or(struct cx18 *cx, u16 addr, unsigned mask, u8 value);
+int cx18_av_and_or4(struct cx18 *cx, u16 addr, u32 mask, u32 value);
+int cx18_av_cmd(struct cx18 *cx, unsigned int cmd, void *arg);
+
+/* ----------------------------------------------------------------------- */
+/* cx18_av-firmware.c                                                      */
+int cx18_av_loadfw(struct cx18 *cx);
+
+/* ----------------------------------------------------------------------- */
+/* cx18_av-audio.c                                                         */
+int cx18_av_audio(struct cx18 *cx, unsigned int cmd, void *arg);
+void cx18_av_audio_set_path(struct cx18 *cx);
+
+/* ----------------------------------------------------------------------- */
+/* cx18_av-vbi.c                                                           */
+void cx18_av_vbi_setup(struct cx18 *cx);
+int cx18_av_vbi(struct cx18 *cx, unsigned int cmd, void *arg);
+
+#endif
diff --git a/drivers/media/video/cx18/cx18-av-firmware.c b/drivers/media/video/cx18/cx18-av-firmware.c
new file mode 100644 (file)
index 0000000..526e142
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ *  cx18 ADEC firmware functions
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ *  02110-1301, USA.
+ */
+
+#include "cx18-driver.h"
+#include <linux/firmware.h>
+
+#define FWFILE "v4l-cx23418-dig.fw"
+
+int cx18_av_loadfw(struct cx18 *cx)
+{
+       const struct firmware *fw = NULL;
+       u32 size;
+       u32 v;
+       u8 *ptr;
+       int i;
+
+       if (request_firmware(&fw, FWFILE, &cx->dev->dev) != 0) {
+               CX18_ERR("unable to open firmware %s\n", FWFILE);
+               return -EINVAL;
+       }
+
+       cx18_av_write4(cx, CXADEC_CHIP_CTRL, 0x00010000);
+       cx18_av_write(cx, CXADEC_STD_DET_CTL, 0xf6); /* Byte 0 */
+
+       /* Reset the Mako core (Register is undocumented.) */
+       cx18_av_write4(cx, 0x8100, 0x00010000);
+
+       /* Put the 8051 in reset and enable firmware upload */
+       cx18_av_write4(cx, CXADEC_DL_CTL, 0x0F000000);
+
+       ptr = fw->data;
+       size = fw->size;
+
+       for (i = 0; i < size; i++) {
+               u32 dl_control = 0x0F000000 | ((u32)ptr[i] << 16);
+               u32 value = 0;
+               int retries;
+
+               for (retries = 0; retries < 5; retries++) {
+                       cx18_av_write4(cx, CXADEC_DL_CTL, dl_control);
+                       value = cx18_av_read4(cx, CXADEC_DL_CTL);
+                       if ((value & 0x3F00) == (dl_control & 0x3F00))
+                               break;
+               }
+               if (retries >= 5) {
+                       CX18_ERR("unable to load firmware %s\n", FWFILE);
+                       release_firmware(fw);
+                       return -EIO;
+               }
+       }
+
+       cx18_av_write4(cx, CXADEC_DL_CTL, 0x13000000 | fw->size);
+
+       /* Output to the 416 */
+       cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x78000);
+
+       /* Audio input control 1 set to Sony mode */
+       /* Audio output input 2 is 0 for slave operation input */
+       /* 0xC4000914[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */
+       /* 0xC4000914[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge
+          after WS transition for first bit of audio word. */
+       cx18_av_write4(cx, CXADEC_I2S_IN_CTL, 0x000000A0);
+
+       /* Audio output control 1 is set to Sony mode */
+       /* Audio output control 2 is set to 1 for master mode */
+       /* 0xC4000918[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */
+       /* 0xC4000918[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge
+          after WS transition for first bit of audio word. */
+       /* 0xC4000918[8]: 0 = slave operation, 1 = master (SCK_OUT and WS_OUT
+          are generated) */
+       cx18_av_write4(cx, CXADEC_I2S_OUT_CTL, 0x000001A0);
+
+       /* set alt I2s master clock to /16 and enable alt divider i2s
+          passthrough */
+       cx18_av_write4(cx, CXADEC_PIN_CFG3, 0x5000B687);
+
+       cx18_av_write4(cx, CXADEC_STD_DET_CTL, 0x000000F6);
+       /* CxDevWrReg(CXADEC_STD_DET_CTL, 0x000000FF); */
+
+       /* Set bit 0 in register 0x9CC to signify that this is MiniMe. */
+       /* Register 0x09CC is defined by the Merlin firmware, and doesn't
+          have a name in the spec. */
+       cx18_av_write4(cx, 0x09CC, 1);
+
+#define CX18_AUDIO_ENABLE              0xc72014
+       v = read_reg(CX18_AUDIO_ENABLE);
+       /* If bit 11 is 1 */
+       if (v & 0x800)
+               write_reg(v & 0xFFFFFBFF, CX18_AUDIO_ENABLE); /* Clear bit 10 */
+
+       /* Enable WW auto audio standard detection */
+       v = cx18_av_read4(cx, CXADEC_STD_DET_CTL);
+       v |= 0xFF;   /* Auto by default */
+       v |= 0x400;  /* Stereo by default */
+       v |= 0x14000000;
+       cx18_av_write4(cx, CXADEC_STD_DET_CTL, v);
+
+       release_firmware(fw);
+
+       CX18_INFO("loaded %s firmware (%d bytes)\n", FWFILE, size);
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-av-vbi.c b/drivers/media/video/cx18/cx18-av-vbi.c
new file mode 100644 (file)
index 0000000..d09f1da
--- /dev/null
@@ -0,0 +1,413 @@
+/*
+ *  cx18 ADEC VBI functions
+ *
+ *  Derived from cx25840-vbi.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License
+ *  as published by the Free Software Foundation; either version 2
+ *  of the License, or (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ *  02110-1301, USA.
+ */
+
+
+#include "cx18-driver.h"
+
+static int odd_parity(u8 c)
+{
+       c ^= (c >> 4);
+       c ^= (c >> 2);
+       c ^= (c >> 1);
+
+       return c & 1;
+}
+
+static int decode_vps(u8 *dst, u8 *p)
+{
+       static const u8 biphase_tbl[] = {
+               0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
+               0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
+               0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
+               0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
+               0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
+               0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
+               0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
+               0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
+               0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
+               0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
+               0xc3, 0x4b, 0x43, 0xc3, 0x87, 0x0f, 0x07, 0x87,
+               0x83, 0x0b, 0x03, 0x83, 0xc3, 0x4b, 0x43, 0xc3,
+               0xc1, 0x49, 0x41, 0xc1, 0x85, 0x0d, 0x05, 0x85,
+               0x81, 0x09, 0x01, 0x81, 0xc1, 0x49, 0x41, 0xc1,
+               0xe1, 0x69, 0x61, 0xe1, 0xa5, 0x2d, 0x25, 0xa5,
+               0xa1, 0x29, 0x21, 0xa1, 0xe1, 0x69, 0x61, 0xe1,
+               0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
+               0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
+               0xc2, 0x4a, 0x42, 0xc2, 0x86, 0x0e, 0x06, 0x86,
+               0x82, 0x0a, 0x02, 0x82, 0xc2, 0x4a, 0x42, 0xc2,
+               0xc0, 0x48, 0x40, 0xc0, 0x84, 0x0c, 0x04, 0x84,
+               0x80, 0x08, 0x00, 0x80, 0xc0, 0x48, 0x40, 0xc0,
+               0xe0, 0x68, 0x60, 0xe0, 0xa4, 0x2c, 0x24, 0xa4,
+               0xa0, 0x28, 0x20, 0xa0, 0xe0, 0x68, 0x60, 0xe0,
+               0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
+               0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
+               0xd2, 0x5a, 0x52, 0xd2, 0x96, 0x1e, 0x16, 0x96,
+               0x92, 0x1a, 0x12, 0x92, 0xd2, 0x5a, 0x52, 0xd2,
+               0xd0, 0x58, 0x50, 0xd0, 0x94, 0x1c, 0x14, 0x94,
+               0x90, 0x18, 0x10, 0x90, 0xd0, 0x58, 0x50, 0xd0,
+               0xf0, 0x78, 0x70, 0xf0, 0xb4, 0x3c, 0x34, 0xb4,
+               0xb0, 0x38, 0x30, 0xb0, 0xf0, 0x78, 0x70, 0xf0,
+       };
+
+       u8 c, err = 0;
+       int i;
+
+       for (i = 0; i < 2 * 13; i += 2) {
+               err |= biphase_tbl[p[i]] | biphase_tbl[p[i + 1]];
+               c = (biphase_tbl[p[i + 1]] & 0xf) |
+                   ((biphase_tbl[p[i]] & 0xf) << 4);
+               dst[i / 2] = c;
+       }
+
+       return err & 0xf0;
+}
+
+void cx18_av_vbi_setup(struct cx18 *cx)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       v4l2_std_id std = state->std;
+       int hblank, hactive, burst, vblank, vactive, sc;
+       int vblank656, src_decimation;
+       int luma_lpf, uv_lpf, comb;
+       u32 pll_int, pll_frac, pll_post;
+
+       /* datasheet startup, step 8d */
+       if (std & ~V4L2_STD_NTSC)
+               cx18_av_write(cx, 0x49f, 0x11);
+       else
+               cx18_av_write(cx, 0x49f, 0x14);
+
+       if (std & V4L2_STD_625_50) {
+               hblank = 0x084;
+               hactive = 0x2d0;
+               burst = 0x5d;
+               vblank = 0x024;
+               vactive = 0x244;
+               vblank656 = 0x28;
+               src_decimation = 0x21f;
+
+               luma_lpf = 2;
+               if (std & V4L2_STD_SECAM) {
+                       uv_lpf = 0;
+                       comb = 0;
+                       sc = 0x0a425f;
+               } else if (std == V4L2_STD_PAL_Nc) {
+                       uv_lpf = 1;
+                       comb = 0x20;
+                       sc = 556453;
+               } else {
+                       uv_lpf = 1;
+                       comb = 0x20;
+                       sc = 0x0a8263;
+               }
+       } else {
+               hactive = 720;
+               hblank = 122;
+               vactive = 487;
+               luma_lpf = 1;
+               uv_lpf = 1;
+
+               src_decimation = 0x21f;
+               if (std == V4L2_STD_PAL_60) {
+                       vblank = 26;
+                       vblank656 = 26;
+                       burst = 0x5b;
+                       luma_lpf = 2;
+                       comb = 0x20;
+                       sc = 0x0a8263;
+               } else if (std == V4L2_STD_PAL_M) {
+                       vblank = 20;
+                       vblank656 = 24;
+                       burst = 0x61;
+                       comb = 0x20;
+
+                       sc = 555452;
+               } else {
+                       vblank = 26;
+                       vblank656 = 26;
+                       burst = 0x5b;
+                       comb = 0x66;
+                       sc = 556063;
+               }
+       }
+
+       /* DEBUG: Displays configured PLL frequency */
+       pll_int = cx18_av_read(cx, 0x108);
+       pll_frac = cx18_av_read4(cx, 0x10c) & 0x1ffffff;
+       pll_post = cx18_av_read(cx, 0x109);
+       CX18_DEBUG_INFO("PLL regs = int: %u, frac: %u, post: %u\n",
+                       pll_int, pll_frac, pll_post);
+
+       if (pll_post) {
+               int fin, fsc;
+               int pll = 28636363L * ((((u64)pll_int) << 25) + pll_frac);
+
+               pll >>= 25;
+               pll /= pll_post;
+               CX18_DEBUG_INFO("PLL = %d.%06d MHz\n",
+                                       pll / 1000000, pll % 1000000);
+               CX18_DEBUG_INFO("PLL/8 = %d.%06d MHz\n",
+                                       pll / 8000000, (pll / 8) % 1000000);
+
+               fin = ((u64)src_decimation * pll) >> 12;
+               CX18_DEBUG_INFO("ADC Sampling freq = %d.%06d MHz\n",
+                                       fin / 1000000, fin % 1000000);
+
+               fsc = (((u64)sc) * pll) >> 24L;
+               CX18_DEBUG_INFO("Chroma sub-carrier freq = %d.%06d MHz\n",
+                                       fsc / 1000000, fsc % 1000000);
+
+               CX18_DEBUG_INFO("hblank %i, hactive %i, "
+                       "vblank %i , vactive %i, vblank656 %i, src_dec %i,"
+                       "burst 0x%02x, luma_lpf %i, uv_lpf %i, comb 0x%02x,"
+                       " sc 0x%06x\n",
+                       hblank, hactive, vblank, vactive, vblank656,
+                       src_decimation, burst, luma_lpf, uv_lpf, comb, sc);
+       }
+
+       /* Sets horizontal blanking delay and active lines */
+       cx18_av_write(cx, 0x470, hblank);
+       cx18_av_write(cx, 0x471, 0xff & (((hblank >> 8) & 0x3) |
+                                               (hactive << 4)));
+       cx18_av_write(cx, 0x472, hactive >> 4);
+
+       /* Sets burst gate delay */
+       cx18_av_write(cx, 0x473, burst);
+
+       /* Sets vertical blanking delay and active duration */
+       cx18_av_write(cx, 0x474, vblank);
+       cx18_av_write(cx, 0x475, 0xff & (((vblank >> 8) & 0x3) |
+                                               (vactive << 4)));
+       cx18_av_write(cx, 0x476, vactive >> 4);
+       cx18_av_write(cx, 0x477, vblank656);
+
+       /* Sets src decimation rate */
+       cx18_av_write(cx, 0x478, 0xff & src_decimation);
+       cx18_av_write(cx, 0x479, 0xff & (src_decimation >> 8));
+
+       /* Sets Luma and UV Low pass filters */
+       cx18_av_write(cx, 0x47a, luma_lpf << 6 | ((uv_lpf << 4) & 0x30));
+
+       /* Enables comb filters */
+       cx18_av_write(cx, 0x47b, comb);
+
+       /* Sets SC Step*/
+       cx18_av_write(cx, 0x47c, sc);
+       cx18_av_write(cx, 0x47d, 0xff & sc >> 8);
+       cx18_av_write(cx, 0x47e, 0xff & sc >> 16);
+
+       /* Sets VBI parameters */
+       if (std & V4L2_STD_625_50) {
+               cx18_av_write(cx, 0x47f, 0x01);
+               state->vbi_line_offset = 5;
+       } else {
+               cx18_av_write(cx, 0x47f, 0x00);
+               state->vbi_line_offset = 8;
+       }
+}
+
+int cx18_av_vbi(struct cx18 *cx, unsigned int cmd, void *arg)
+{
+       struct cx18_av_state *state = &cx->av_state;
+       struct v4l2_format *fmt;
+       struct v4l2_sliced_vbi_format *svbi;
+
+       switch (cmd) {
+       case VIDIOC_G_FMT:
+       {
+               static u16 lcr2vbi[] = {
+                       0, V4L2_SLICED_TELETEXT_B, 0,   /* 1 */
+                       0, V4L2_SLICED_WSS_625, 0,      /* 4 */
+                       V4L2_SLICED_CAPTION_525,        /* 6 */
+                       0, 0, V4L2_SLICED_VPS, 0, 0,    /* 9 */
+                       0, 0, 0, 0
+               };
+               int is_pal = !(state->std & V4L2_STD_525_60);
+               int i;
+
+               fmt = arg;
+               if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+                       return -EINVAL;
+               svbi = &fmt->fmt.sliced;
+               memset(svbi, 0, sizeof(*svbi));
+               /* we're done if raw VBI is active */
+               if ((cx18_av_read(cx, 0x404) & 0x10) == 0)
+                       break;
+
+               if (is_pal) {
+                       for (i = 7; i <= 23; i++) {
+                               u8 v = cx18_av_read(cx, 0x424 + i - 7);
+
+                               svbi->service_lines[0][i] = lcr2vbi[v >> 4];
+                               svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
+                               svbi->service_set |= svbi->service_lines[0][i] |
+                                       svbi->service_lines[1][i];
+                       }
+               } else {
+                       for (i = 10; i <= 21; i++) {
+                               u8 v = cx18_av_read(cx, 0x424 + i - 10);
+
+                               svbi->service_lines[0][i] = lcr2vbi[v >> 4];
+                               svbi->service_lines[1][i] = lcr2vbi[v & 0xf];
+                               svbi->service_set |= svbi->service_lines[0][i] |
+                                       svbi->service_lines[1][i];
+                       }
+               }
+               break;
+       }
+
+       case VIDIOC_S_FMT:
+       {
+               int is_pal = !(state->std & V4L2_STD_525_60);
+               int vbi_offset = is_pal ? 1 : 0;
+               int i, x;
+               u8 lcr[24];
+
+               fmt = arg;
+               if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+                       return -EINVAL;
+               svbi = &fmt->fmt.sliced;
+               if (svbi->service_set == 0) {
+                       /* raw VBI */
+                       memset(svbi, 0, sizeof(*svbi));
+
+                       /* Setup VBI */
+                       cx18_av_vbi_setup(cx);
+
+                       /* VBI Offset */
+                       cx18_av_write(cx, 0x47f, vbi_offset);
+                       cx18_av_write(cx, 0x404, 0x2e);
+                       break;
+               }
+
+               for (x = 0; x <= 23; x++)
+                       lcr[x] = 0x00;
+
+               /* Setup VBI */
+               cx18_av_vbi_setup(cx);
+
+               /* Sliced VBI */
+               cx18_av_write(cx, 0x404, 0x32); /* Ancillary data */
+               cx18_av_write(cx, 0x406, 0x13);
+               cx18_av_write(cx, 0x47f, vbi_offset);
+
+               if (is_pal) {
+                       for (i = 0; i <= 6; i++)
+                               svbi->service_lines[0][i] =
+                                       svbi->service_lines[1][i] = 0;
+               } else {
+                       for (i = 0; i <= 9; i++)
+                               svbi->service_lines[0][i] =
+                                       svbi->service_lines[1][i] = 0;
+
+                       for (i = 22; i <= 23; i++)
+                               svbi->service_lines[0][i] =
+                                       svbi->service_lines[1][i] = 0;
+               }
+
+               for (i = 7; i <= 23; i++) {
+                       for (x = 0; x <= 1; x++) {
+                               switch (svbi->service_lines[1-x][i]) {
+                               case V4L2_SLICED_TELETEXT_B:
+                                       lcr[i] |= 1 << (4 * x);
+                                       break;
+                               case V4L2_SLICED_WSS_625:
+                                       lcr[i] |= 4 << (4 * x);
+                                       break;
+                               case V4L2_SLICED_CAPTION_525:
+                                       lcr[i] |= 6 << (4 * x);
+                                       break;
+                               case V4L2_SLICED_VPS:
+                                       lcr[i] |= 9 << (4 * x);
+                                       break;
+                               }
+                       }
+               }
+
+               if (is_pal) {
+                       for (x = 1, i = 0x424; i <= 0x434; i++, x++)
+                               cx18_av_write(cx, i, lcr[6 + x]);
+               } else {
+                       for (x = 1, i = 0x424; i <= 0x430; i++, x++)
+                               cx18_av_write(cx, i, lcr[9 + x]);
+                       for (i = 0x431; i <= 0x434; i++)
+                               cx18_av_write(cx, i, 0);
+               }
+
+               cx18_av_write(cx, 0x43c, 0x16);
+               cx18_av_write(cx, 0x474, is_pal ? 0x2a : 0x22);
+               break;
+       }
+
+       case VIDIOC_INT_DECODE_VBI_LINE:
+       {
+               struct v4l2_decode_vbi_line *vbi = arg;
+               u8 *p = vbi->p;
+               int id1, id2, l, err = 0;
+
+               if (p[0] || p[1] != 0xff || p[2] != 0xff ||
+                   (p[3] != 0x55 && p[3] != 0x91)) {
+                       vbi->line = vbi->type = 0;
+                       break;
+               }
+
+               p += 4;
+               id1 = p[-1];
+               id2 = p[0] & 0xf;
+               l = p[2] & 0x3f;
+               l += state->vbi_line_offset;
+               p += 4;
+
+               switch (id2) {
+               case 1:
+                       id2 = V4L2_SLICED_TELETEXT_B;
+                       break;
+               case 4:
+                       id2 = V4L2_SLICED_WSS_625;
+                       break;
+               case 6:
+                       id2 = V4L2_SLICED_CAPTION_525;
+                       err = !odd_parity(p[0]) || !odd_parity(p[1]);
+                       break;
+               case 9:
+                       id2 = V4L2_SLICED_VPS;
+                       if (decode_vps(p, p) != 0)
+                               err = 1;
+                       break;
+               default:
+                       id2 = 0;
+                       err = 1;
+                       break;
+               }
+
+               vbi->type = err ? 0 : id2;
+               vbi->line = err ? 0 : l;
+               vbi->is_second_field = err ? 0 : (id1 == 0x55);
+               vbi->p = p;
+               break;
+       }
+       }
+
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
new file mode 100644 (file)
index 0000000..f5e3ba1
--- /dev/null
@@ -0,0 +1,277 @@
+/*
+ *  cx18 functions to query card hardware
+ *
+ *  Derived from ivtv-cards.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-cards.h"
+#include "cx18-i2c.h"
+#include <media/cs5345.h>
+
+/********************** card configuration *******************************/
+
+/* usual i2c tuner addresses to probe */
+static struct cx18_card_tuner_i2c cx18_i2c_std = {
+       .radio = { I2C_CLIENT_END },
+       .demod = { 0x43, I2C_CLIENT_END },
+       .tv    = { 0x61, 0x60, I2C_CLIENT_END },
+};
+
+/* Please add new PCI IDs to: http://pci-ids.ucw.cz/iii
+   This keeps the PCI ID database up to date. Note that the entries
+   must be added under vendor 0x4444 (Conexant) as subsystem IDs.
+   New vendor IDs should still be added to the vendor ID list. */
+
+/* Hauppauge HVR-1600 cards */
+
+/* Note: for Hauppauge cards the tveeprom information is used instead
+   of PCI IDs */
+static const struct cx18_card cx18_card_hvr1600_esmt = {
+       .type = CX18_CARD_HVR_1600_ESMT,
+       .name = "Hauppauge HVR-1600",
+       .comment = "DVB & VBI are not yet supported\n",
+       .v4l2_capabilities = CX18_CAP_ENCODER,
+       .hw_audio_ctrl = CX18_HW_CX23418,
+       .hw_muxer = CX18_HW_CS5345,
+       .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER | CX18_HW_CS5345,
+       .video_inputs = {
+               { CX18_CARD_INPUT_VID_TUNER,  0, CX23418_COMPOSITE7 },
+               { CX18_CARD_INPUT_SVIDEO1,    1, CX23418_SVIDEO1    },
+               { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
+               { CX18_CARD_INPUT_SVIDEO2,    2, CX23418_SVIDEO2    },
+               { CX18_CARD_INPUT_COMPOSITE2, 2, CX23418_COMPOSITE4 },
+       },
+       .audio_inputs = {
+               { CX18_CARD_INPUT_AUD_TUNER,
+                 CX23418_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
+               { CX18_CARD_INPUT_LINE_IN1,
+                 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
+               { CX18_CARD_INPUT_LINE_IN2,
+                 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
+       },
+       .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+                        CX23418_AUDIO_SERIAL, 0 },
+       .ddr = {
+               /* ESMT M13S128324A-5B memory */
+               .chip_config = 0x003,
+               .refresh = 0x30c,
+               .timing1 = 0x44220e82,
+               .timing2 = 0x08,
+               .tune_lane = 0,
+               .initial_emrs = 0,
+       },
+       .gpio_init.initial_value = 0x3001,
+       .gpio_init.direction = 0x3001,
+       .i2c = &cx18_i2c_std,
+};
+
+static const struct cx18_card cx18_card_hvr1600_samsung = {
+       .type = CX18_CARD_HVR_1600_SAMSUNG,
+       .name = "Hauppauge HVR-1600 (Preproduction)",
+       .comment = "DVB & VBI are not yet supported\n",
+       .v4l2_capabilities = CX18_CAP_ENCODER,
+       .hw_audio_ctrl = CX18_HW_CX23418,
+       .hw_muxer = CX18_HW_CS5345,
+       .hw_all = CX18_HW_TVEEPROM | CX18_HW_TUNER | CX18_HW_CS5345,
+       .video_inputs = {
+               { CX18_CARD_INPUT_VID_TUNER,  0, CX23418_COMPOSITE7 },
+               { CX18_CARD_INPUT_SVIDEO1,    1, CX23418_SVIDEO1    },
+               { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
+               { CX18_CARD_INPUT_SVIDEO2,    2, CX23418_SVIDEO2    },
+               { CX18_CARD_INPUT_COMPOSITE2, 2, CX23418_COMPOSITE4 },
+       },
+       .audio_inputs = {
+               { CX18_CARD_INPUT_AUD_TUNER,
+                 CX23418_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
+               { CX18_CARD_INPUT_LINE_IN1,
+                 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
+               { CX18_CARD_INPUT_LINE_IN2,
+                 CX23418_AUDIO_SERIAL, CS5345_IN_2 },
+       },
+       .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+                        CX23418_AUDIO_SERIAL, 0 },
+       .ddr = {
+               /* Samsung K4D263238G-VC33 memory */
+               .chip_config = 0x003,
+               .refresh = 0x30c,
+               .timing1 = 0x23230b73,
+               .timing2 = 0x08,
+               .tune_lane = 0,
+               .initial_emrs = 2,
+       },
+       .gpio_init.initial_value = 0x3001,
+       .gpio_init.direction = 0x3001,
+       .i2c = &cx18_i2c_std,
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* Compro VideoMate H900: not working at the moment! */
+
+static const struct cx18_card_pci_info cx18_pci_h900[] = {
+       { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_COMPRO, 0xe100 },
+       { 0, 0, 0 }
+};
+
+static const struct cx18_card cx18_card_h900 = {
+       .type = CX18_CARD_COMPRO_H900,
+       .name = "Compro VideoMate H900",
+       .comment = "Not yet supported!\n",
+       .v4l2_capabilities = 0,
+       .hw_audio_ctrl = CX18_HW_CX23418,
+       .hw_all = CX18_HW_TUNER,
+       .video_inputs = {
+               { CX18_CARD_INPUT_VID_TUNER,  0, CX23418_COMPOSITE7 },
+               { CX18_CARD_INPUT_SVIDEO1,    1, CX23418_SVIDEO1    },
+               { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
+       },
+       .audio_inputs = {
+               { CX18_CARD_INPUT_AUD_TUNER,
+                 CX23418_AUDIO8, 0 },
+               { CX18_CARD_INPUT_LINE_IN1,
+                 CX23418_AUDIO_SERIAL, 0 },
+       },
+       .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+                        CX23418_AUDIO_SERIAL, 0 },
+       .tuners = {
+               { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
+       },
+       .ddr = {
+               /* EtronTech EM6A9160TS-5G memory */
+               .chip_config = 0x50003,
+               .refresh = 0x753,
+               .timing1 = 0x24330e84,
+               .timing2 = 0x1f,
+               .tune_lane = 0,
+               .initial_emrs = 0,
+       },
+       .pci_list = cx18_pci_h900,
+       .i2c = &cx18_i2c_std,
+};
+
+/* ------------------------------------------------------------------------- */
+
+/* Yuan MPC718: not working at the moment! */
+
+static const struct cx18_card_pci_info cx18_pci_mpc718[] = {
+       { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_YUAN, 0x0718 },
+       { 0, 0, 0 }
+};
+
+static const struct cx18_card cx18_card_mpc718 = {
+       .type = CX18_CARD_YUAN_MPC718,
+       .name = "Yuan MPC718",
+       .comment = "Not yet supported!\n",
+       .v4l2_capabilities = 0,
+       .hw_audio_ctrl = CX18_HW_CX23418,
+       .hw_all = CX18_HW_TUNER,
+       .video_inputs = {
+               { CX18_CARD_INPUT_VID_TUNER,  0, CX23418_COMPOSITE7 },
+               { CX18_CARD_INPUT_SVIDEO1,    1, CX23418_SVIDEO1    },
+               { CX18_CARD_INPUT_COMPOSITE1, 1, CX23418_COMPOSITE3 },
+       },
+       .audio_inputs = {
+               { CX18_CARD_INPUT_AUD_TUNER,
+                 CX23418_AUDIO8, 0 },
+               { CX18_CARD_INPUT_LINE_IN1,
+                 CX23418_AUDIO_SERIAL, 0 },
+       },
+       .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
+                        CX23418_AUDIO_SERIAL, 0 },
+       .tuners = {
+               /* XC3028 tuner */
+               { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
+       },
+       /* tuner reset */
+       .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 },
+       .ddr = {
+               /* Probably Samsung K4D263238G-VC33 memory */
+               .chip_config = 0x003,
+               .refresh = 0x30c,
+               .timing1 = 0x23230b73,
+               .timing2 = 0x08,
+               .tune_lane = 0,
+               .initial_emrs = 2,
+       },
+       .pci_list = cx18_pci_mpc718,
+       .i2c = &cx18_i2c_std,
+};
+
+static const struct cx18_card *cx18_card_list[] = {
+       &cx18_card_hvr1600_esmt,
+       &cx18_card_hvr1600_samsung,
+       &cx18_card_h900,
+       &cx18_card_mpc718,
+};
+
+const struct cx18_card *cx18_get_card(u16 index)
+{
+       if (index >= ARRAY_SIZE(cx18_card_list))
+               return NULL;
+       return cx18_card_list[index];
+}
+
+int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input)
+{
+       const struct cx18_card_video_input *card_input =
+               cx->card->video_inputs + index;
+       static const char * const input_strs[] = {
+               "Tuner 1",
+               "S-Video 1",
+               "S-Video 2",
+               "Composite 1",
+               "Composite 2",
+               "Composite 3"
+       };
+
+       memset(input, 0, sizeof(*input));
+       if (index >= cx->nof_inputs)
+               return -EINVAL;
+       input->index = index;
+       strlcpy(input->name, input_strs[card_input->video_type - 1],
+                       sizeof(input->name));
+       input->type = (card_input->video_type == CX18_CARD_INPUT_VID_TUNER ?
+                       V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA);
+       input->audioset = (1 << cx->nof_audio_inputs) - 1;
+       input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ?
+                               cx->tuner_std : V4L2_STD_ALL;
+       return 0;
+}
+
+int cx18_get_audio_input(struct cx18 *cx, u16 index, struct v4l2_audio *audio)
+{
+       const struct cx18_card_audio_input *aud_input =
+               cx->card->audio_inputs + index;
+       static const char * const input_strs[] = {
+               "Tuner 1",
+               "Line In 1",
+               "Line In 2"
+       };
+
+       memset(audio, 0, sizeof(*audio));
+       if (index >= cx->nof_audio_inputs)
+               return -EINVAL;
+       strlcpy(audio->name, input_strs[aud_input->audio_type - 1],
+                       sizeof(audio->name));
+       audio->index = index;
+       audio->capability = V4L2_AUDCAP_STEREO;
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-cards.h b/drivers/media/video/cx18/cx18-cards.h
new file mode 100644 (file)
index 0000000..bca249b
--- /dev/null
@@ -0,0 +1,170 @@
+/*
+ *  cx18 functions to query card hardware
+ *
+ *  Derived from ivtv-cards.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/* hardware flags */
+#define CX18_HW_TUNER     (1 << 0)
+#define CX18_HW_TVEEPROM  (1 << 1)
+#define CX18_HW_CS5345    (1 << 2)
+#define CX18_HW_GPIO      (1 << 3)
+#define CX18_HW_CX23418   (1 << 4)
+#define CX18_HW_DVB      (1 << 5)
+
+/* video inputs */
+#define        CX18_CARD_INPUT_VID_TUNER       1
+#define        CX18_CARD_INPUT_SVIDEO1         2
+#define        CX18_CARD_INPUT_SVIDEO2         3
+#define        CX18_CARD_INPUT_COMPOSITE1      4
+#define        CX18_CARD_INPUT_COMPOSITE2      5
+#define        CX18_CARD_INPUT_COMPOSITE3      6
+
+enum cx34180_video_input {
+       /* Composite video inputs In1-In8 */
+       CX23418_COMPOSITE1 = 1,
+       CX23418_COMPOSITE2,
+       CX23418_COMPOSITE3,
+       CX23418_COMPOSITE4,
+       CX23418_COMPOSITE5,
+       CX23418_COMPOSITE6,
+       CX23418_COMPOSITE7,
+       CX23418_COMPOSITE8,
+
+       /* S-Video inputs consist of one luma input (In1-In4) ORed with one
+          chroma input (In5-In8) */
+       CX23418_SVIDEO_LUMA1 = 0x10,
+       CX23418_SVIDEO_LUMA2 = 0x20,
+       CX23418_SVIDEO_LUMA3 = 0x30,
+       CX23418_SVIDEO_LUMA4 = 0x40,
+       CX23418_SVIDEO_CHROMA4 = 0x400,
+       CX23418_SVIDEO_CHROMA5 = 0x500,
+       CX23418_SVIDEO_CHROMA6 = 0x600,
+       CX23418_SVIDEO_CHROMA7 = 0x700,
+       CX23418_SVIDEO_CHROMA8 = 0x800,
+
+       /* S-Video aliases for common luma/chroma combinations */
+       CX23418_SVIDEO1 = 0x510,
+       CX23418_SVIDEO2 = 0x620,
+       CX23418_SVIDEO3 = 0x730,
+       CX23418_SVIDEO4 = 0x840,
+};
+
+/* audio inputs */
+#define        CX18_CARD_INPUT_AUD_TUNER       1
+#define        CX18_CARD_INPUT_LINE_IN1        2
+#define        CX18_CARD_INPUT_LINE_IN2        3
+
+#define CX18_CARD_MAX_VIDEO_INPUTS 6
+#define CX18_CARD_MAX_AUDIO_INPUTS 3
+#define CX18_CARD_MAX_TUNERS      2
+
+enum cx23418_audio_input {
+       /* Audio inputs: serial or In4-In8 */
+       CX23418_AUDIO_SERIAL,
+       CX23418_AUDIO4 = 4,
+       CX23418_AUDIO5,
+       CX23418_AUDIO6,
+       CX23418_AUDIO7,
+       CX23418_AUDIO8,
+};
+
+/* V4L2 capability aliases */
+#define CX18_CAP_ENCODER (V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER | \
+                         V4L2_CAP_AUDIO | V4L2_CAP_READWRITE)
+/* | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE) not yet */
+
+struct cx18_card_video_input {
+       u8  video_type;         /* video input type */
+       u8  audio_index;        /* index in cx18_card_audio_input array */
+       u16 video_input;        /* hardware video input */
+};
+
+struct cx18_card_audio_input {
+       u8  audio_type;         /* audio input type */
+       u32 audio_input;        /* hardware audio input */
+       u16 muxer_input;        /* hardware muxer input for boards with a
+                                  multiplexer chip */
+};
+
+struct cx18_card_pci_info {
+       u16 device;
+       u16 subsystem_vendor;
+       u16 subsystem_device;
+};
+
+/* GPIO definitions */
+
+/* The mask is the set of bits used by the operation */
+
+struct cx18_gpio_init { /* set initial GPIO DIR and OUT values */
+       u16 direction;  /* DIR setting. Leave to 0 if no init is needed */
+       u16 initial_value;
+};
+
+struct cx18_card_tuner {
+       v4l2_std_id std;        /* standard for which the tuner is suitable */
+       int         tuner;      /* tuner ID (from tuner.h) */
+};
+
+struct cx18_card_tuner_i2c {
+       unsigned short radio[2];/* radio tuner i2c address to probe */
+       unsigned short demod[2];/* demodulator i2c address to probe */
+       unsigned short tv[4];   /* tv tuner i2c addresses to probe */
+};
+
+struct cx18_ddr {              /* DDR config data */
+       u32 chip_config;
+       u32 refresh;
+       u32 timing1;
+       u32 timing2;
+       u32 tune_lane;
+       u32 initial_emrs;
+};
+
+/* for card information/parameters */
+struct cx18_card {
+       int type;
+       char *name;
+       char *comment;
+       u32 v4l2_capabilities;
+       u32 hw_audio_ctrl;      /* hardware used for the V4L2 controls (only
+                                  1 dev allowed) */
+       u32 hw_muxer;           /* hardware used to multiplex audio input */
+       u32 hw_all;             /* all hardware used by the board */
+       struct cx18_card_video_input video_inputs[CX18_CARD_MAX_VIDEO_INPUTS];
+       struct cx18_card_audio_input audio_inputs[CX18_CARD_MAX_AUDIO_INPUTS];
+       struct cx18_card_audio_input radio_input;
+
+       /* GPIO card-specific settings */
+       struct cx18_gpio_init           gpio_init;
+
+       struct cx18_card_tuner tuners[CX18_CARD_MAX_TUNERS];
+       struct cx18_card_tuner_i2c *i2c;
+
+       struct cx18_ddr ddr;
+
+       /* list of device and subsystem vendor/devices that
+          correspond to this card type. */
+       const struct cx18_card_pci_info *pci_list;
+};
+
+int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input);
+int cx18_get_audio_input(struct cx18 *cx, u16 index, struct v4l2_audio *input);
+const struct cx18_card *cx18_get_card(u16 index);
diff --git a/drivers/media/video/cx18/cx18-controls.c b/drivers/media/video/cx18/cx18-controls.c
new file mode 100644 (file)
index 0000000..2bdac5e
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ *  cx18 ioctl control functions
+ *
+ *  Derived from ivtv-controls.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-av-core.h"
+#include "cx18-cards.h"
+#include "cx18-ioctl.h"
+#include "cx18-audio.h"
+#include "cx18-i2c.h"
+#include "cx18-mailbox.h"
+#include "cx18-controls.h"
+
+static const u32 user_ctrls[] = {
+       V4L2_CID_USER_CLASS,
+       V4L2_CID_BRIGHTNESS,
+       V4L2_CID_CONTRAST,
+       V4L2_CID_SATURATION,
+       V4L2_CID_HUE,
+       V4L2_CID_AUDIO_VOLUME,
+       V4L2_CID_AUDIO_BALANCE,
+       V4L2_CID_AUDIO_BASS,
+       V4L2_CID_AUDIO_TREBLE,
+       V4L2_CID_AUDIO_MUTE,
+       V4L2_CID_AUDIO_LOUDNESS,
+       0
+};
+
+static const u32 *ctrl_classes[] = {
+       user_ctrls,
+       cx2341x_mpeg_ctrls,
+       NULL
+};
+
+static int cx18_queryctrl(struct cx18 *cx, struct v4l2_queryctrl *qctrl)
+{
+       const char *name;
+
+       CX18_DEBUG_IOCTL("VIDIOC_QUERYCTRL(%08x)\n", qctrl->id);
+
+       qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
+       if (qctrl->id == 0)
+               return -EINVAL;
+
+       switch (qctrl->id) {
+       /* Standard V4L2 controls */
+       case V4L2_CID_BRIGHTNESS:
+       case V4L2_CID_HUE:
+       case V4L2_CID_SATURATION:
+       case V4L2_CID_CONTRAST:
+               if (cx18_av_cmd(cx, VIDIOC_QUERYCTRL, qctrl))
+                       qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+               return 0;
+
+       case V4L2_CID_AUDIO_VOLUME:
+       case V4L2_CID_AUDIO_MUTE:
+       case V4L2_CID_AUDIO_BALANCE:
+       case V4L2_CID_AUDIO_BASS:
+       case V4L2_CID_AUDIO_TREBLE:
+       case V4L2_CID_AUDIO_LOUDNESS:
+               if (cx18_i2c_hw(cx, cx->card->hw_audio_ctrl, VIDIOC_QUERYCTRL, qctrl))
+                       qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+               return 0;
+
+       default:
+               if (cx2341x_ctrl_query(&cx->params, qctrl))
+                       qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
+               return 0;
+       }
+       strncpy(qctrl->name, name, sizeof(qctrl->name) - 1);
+       qctrl->name[sizeof(qctrl->name) - 1] = 0;
+       return 0;
+}
+
+static int cx18_querymenu(struct cx18 *cx, struct v4l2_querymenu *qmenu)
+{
+       struct v4l2_queryctrl qctrl;
+
+       qctrl.id = qmenu->id;
+       cx18_queryctrl(cx, &qctrl);
+       return v4l2_ctrl_query_menu(qmenu, &qctrl, cx2341x_ctrl_get_menu(qmenu->id));
+}
+
+static int cx18_s_ctrl(struct cx18 *cx, struct v4l2_control *vctrl)
+{
+       s32 v = vctrl->value;
+
+       CX18_DEBUG_IOCTL("VIDIOC_S_CTRL(%08x, %x)\n", vctrl->id, v);
+
+       switch (vctrl->id) {
+               /* Standard V4L2 controls */
+       case V4L2_CID_BRIGHTNESS:
+       case V4L2_CID_HUE:
+       case V4L2_CID_SATURATION:
+       case V4L2_CID_CONTRAST:
+               return cx18_av_cmd(cx, VIDIOC_S_CTRL, vctrl);
+
+       case V4L2_CID_AUDIO_VOLUME:
+       case V4L2_CID_AUDIO_MUTE:
+       case V4L2_CID_AUDIO_BALANCE:
+       case V4L2_CID_AUDIO_BASS:
+       case V4L2_CID_AUDIO_TREBLE:
+       case V4L2_CID_AUDIO_LOUDNESS:
+               return cx18_i2c_hw(cx, cx->card->hw_audio_ctrl, VIDIOC_S_CTRL, vctrl);
+
+       default:
+               CX18_DEBUG_IOCTL("invalid control %x\n", vctrl->id);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int cx18_g_ctrl(struct cx18 *cx, struct v4l2_control *vctrl)
+{
+       CX18_DEBUG_IOCTL("VIDIOC_G_CTRL(%08x)\n", vctrl->id);
+
+       switch (vctrl->id) {
+               /* Standard V4L2 controls */
+       case V4L2_CID_BRIGHTNESS:
+       case V4L2_CID_HUE:
+       case V4L2_CID_SATURATION:
+       case V4L2_CID_CONTRAST:
+               return cx18_av_cmd(cx, VIDIOC_G_CTRL, vctrl);
+
+       case V4L2_CID_AUDIO_VOLUME:
+       case V4L2_CID_AUDIO_MUTE:
+       case V4L2_CID_AUDIO_BALANCE:
+       case V4L2_CID_AUDIO_BASS:
+       case V4L2_CID_AUDIO_TREBLE:
+       case V4L2_CID_AUDIO_LOUDNESS:
+               return cx18_i2c_hw(cx, cx->card->hw_audio_ctrl, VIDIOC_G_CTRL, vctrl);
+       default:
+               CX18_DEBUG_IOCTL("invalid control %x\n", vctrl->id);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int cx18_setup_vbi_fmt(struct cx18 *cx, enum v4l2_mpeg_stream_vbi_fmt fmt)
+{
+       if (!(cx->v4l2_cap & V4L2_CAP_SLICED_VBI_CAPTURE))
+               return -EINVAL;
+       if (atomic_read(&cx->capturing) > 0)
+               return -EBUSY;
+
+       /* First try to allocate sliced VBI buffers if needed. */
+       if (fmt && cx->vbi.sliced_mpeg_data[0] == NULL) {
+               int i;
+
+               for (i = 0; i < CX18_VBI_FRAMES; i++) {
+                       /* Yuck, hardcoded. Needs to be a define */
+                       cx->vbi.sliced_mpeg_data[i] = kmalloc(2049, GFP_KERNEL);
+                       if (cx->vbi.sliced_mpeg_data[i] == NULL) {
+                               while (--i >= 0) {
+                                       kfree(cx->vbi.sliced_mpeg_data[i]);
+                                       cx->vbi.sliced_mpeg_data[i] = NULL;
+                               }
+                               return -ENOMEM;
+                       }
+               }
+       }
+
+       cx->vbi.insert_mpeg = fmt;
+
+       if (cx->vbi.insert_mpeg == 0)
+               return 0;
+       /* Need sliced data for mpeg insertion */
+       if (cx18_get_service_set(cx->vbi.sliced_in) == 0) {
+               if (cx->is_60hz)
+                       cx->vbi.sliced_in->service_set = V4L2_SLICED_CAPTION_525;
+               else
+                       cx->vbi.sliced_in->service_set = V4L2_SLICED_WSS_625;
+               cx18_expand_service_set(cx->vbi.sliced_in, cx->is_50hz);
+       }
+       return 0;
+}
+
+int cx18_control_ioctls(struct cx18 *cx, unsigned int cmd, void *arg)
+{
+       struct v4l2_control ctrl;
+
+       switch (cmd) {
+       case VIDIOC_QUERYMENU:
+               CX18_DEBUG_IOCTL("VIDIOC_QUERYMENU\n");
+               return cx18_querymenu(cx, arg);
+
+       case VIDIOC_QUERYCTRL:
+               return cx18_queryctrl(cx, arg);
+
+       case VIDIOC_S_CTRL:
+               return cx18_s_ctrl(cx, arg);
+
+       case VIDIOC_G_CTRL:
+               return cx18_g_ctrl(cx, arg);
+
+       case VIDIOC_S_EXT_CTRLS:
+       {
+               struct v4l2_ext_controls *c = arg;
+
+               if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
+                       int i;
+                       int err = 0;
+
+                       for (i = 0; i < c->count; i++) {
+                               ctrl.id = c->controls[i].id;
+                               ctrl.value = c->controls[i].value;
+                               err = cx18_s_ctrl(cx, &ctrl);
+                               c->controls[i].value = ctrl.value;
+                               if (err) {
+                                       c->error_idx = i;
+                                       break;
+                               }
+                       }
+                       return err;
+               }
+               CX18_DEBUG_IOCTL("VIDIOC_S_EXT_CTRLS\n");
+               if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG) {
+                       struct cx2341x_mpeg_params p = cx->params;
+                       int err = cx2341x_ext_ctrls(&p, atomic_read(&cx->capturing), arg, cmd);
+
+                       if (err)
+                               return err;
+
+                       if (p.video_encoding != cx->params.video_encoding) {
+                               int is_mpeg1 = p.video_encoding ==
+                                               V4L2_MPEG_VIDEO_ENCODING_MPEG_1;
+                               struct v4l2_format fmt;
+
+                               /* fix videodecoder resolution */
+                               fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+                               fmt.fmt.pix.width = cx->params.width / (is_mpeg1 ? 2 : 1);
+                               fmt.fmt.pix.height = cx->params.height;
+                               cx18_av_cmd(cx, VIDIOC_S_FMT, &fmt);
+                       }
+                       err = cx2341x_update(cx, cx18_api_func, &cx->params, &p);
+                       if (!err && cx->params.stream_vbi_fmt != p.stream_vbi_fmt)
+                               err = cx18_setup_vbi_fmt(cx, p.stream_vbi_fmt);
+                       cx->params = p;
+                       cx->dualwatch_stereo_mode = p.audio_properties & 0x0300;
+                       cx18_audio_set_audio_clock_freq(cx, p.audio_properties & 0x03);
+                       return err;
+               }
+               return -EINVAL;
+       }
+
+       case VIDIOC_G_EXT_CTRLS:
+       {
+               struct v4l2_ext_controls *c = arg;
+
+               if (c->ctrl_class == V4L2_CTRL_CLASS_USER) {
+                       int i;
+                       int err = 0;
+
+                       for (i = 0; i < c->count; i++) {
+                               ctrl.id = c->controls[i].id;
+                               ctrl.value = c->controls[i].value;
+                               err = cx18_g_ctrl(cx, &ctrl);
+                               c->controls[i].value = ctrl.value;
+                               if (err) {
+                                       c->error_idx = i;
+                                       break;
+                               }
+                       }
+                       return err;
+               }
+               CX18_DEBUG_IOCTL("VIDIOC_G_EXT_CTRLS\n");
+               if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
+                       return cx2341x_ext_ctrls(&cx->params, 0, arg, cmd);
+               return -EINVAL;
+       }
+
+       case VIDIOC_TRY_EXT_CTRLS:
+       {
+               struct v4l2_ext_controls *c = arg;
+
+               CX18_DEBUG_IOCTL("VIDIOC_TRY_EXT_CTRLS\n");
+               if (c->ctrl_class == V4L2_CTRL_CLASS_MPEG)
+                       return cx2341x_ext_ctrls(&cx->params,
+                                       atomic_read(&cx->capturing), arg, cmd);
+               return -EINVAL;
+       }
+
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-controls.h b/drivers/media/video/cx18/cx18-controls.h
new file mode 100644 (file)
index 0000000..6e985cf
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ *  cx18 ioctl control functions
+ *
+ *  Derived from ivtv-controls.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+int cx18_control_ioctls(struct cx18 *cx, unsigned int cmd, void *arg);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
new file mode 100644 (file)
index 0000000..8f5ed9b
--- /dev/null
@@ -0,0 +1,971 @@
+/*
+ *  cx18 driver initialization and card probing
+ *
+ *  Derived from ivtv-driver.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-version.h"
+#include "cx18-cards.h"
+#include "cx18-i2c.h"
+#include "cx18-irq.h"
+#include "cx18-gpio.h"
+#include "cx18-firmware.h"
+#include "cx18-streams.h"
+#include "cx18-av-core.h"
+#include "cx18-scb.h"
+#include "cx18-mailbox.h"
+#include "cx18-ioctl.h"
+#include "tuner-xc2028.h"
+
+#include <media/tveeprom.h>
+
+
+/* var to keep track of the number of array elements in use */
+int cx18_cards_active;
+
+/* If you have already X v4l cards, then set this to X. This way
+   the device numbers stay matched. Example: you have a WinTV card
+   without radio and a Compro H900 with. Normally this would give a
+   video1 device together with a radio0 device for the Compro. By
+   setting this to 1 you ensure that radio0 is now also radio1. */
+int cx18_first_minor;
+
+/* Master variable for all cx18 info */
+struct cx18 *cx18_cards[CX18_MAX_CARDS];
+
+/* Protects cx18_cards_active */
+DEFINE_SPINLOCK(cx18_cards_lock);
+
+/* add your revision and whatnot here */
+static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
+       {PCI_VENDOR_ID_CX, PCI_DEVICE_ID_CX23418,
+        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+       {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
+
+/* Parameter declarations */
+static int cardtype[CX18_MAX_CARDS];
+static int tuner[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
+                                    -1, -1, -1, -1, -1, -1, -1, -1,
+                                    -1, -1, -1, -1, -1, -1, -1, -1,
+                                    -1, -1, -1, -1, -1, -1, -1, -1 };
+static int radio[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
+                                    -1, -1, -1, -1, -1, -1, -1, -1,
+                                    -1, -1, -1, -1, -1, -1, -1, -1,
+                                    -1, -1, -1, -1, -1, -1, -1, -1 };
+
+static int cardtype_c = 1;
+static int tuner_c = 1;
+static int radio_c = 1;
+static char pal[] = "--";
+static char secam[] = "--";
+static char ntsc[] = "-";
+
+/* Buffers */
+static int enc_mpg_buffers = CX18_DEFAULT_ENC_MPG_BUFFERS;
+static int enc_ts_buffers = CX18_DEFAULT_ENC_TS_BUFFERS;
+static int enc_yuv_buffers = CX18_DEFAULT_ENC_YUV_BUFFERS;
+static int enc_vbi_buffers = CX18_DEFAULT_ENC_VBI_BUFFERS;
+static int enc_pcm_buffers = CX18_DEFAULT_ENC_PCM_BUFFERS;
+
+static int cx18_pci_latency = 1;
+
+int cx18_debug;
+
+module_param_array(tuner, int, &tuner_c, 0644);
+module_param_array(radio, bool, &radio_c, 0644);
+module_param_array(cardtype, int, &cardtype_c, 0644);
+module_param_string(pal, pal, sizeof(pal), 0644);
+module_param_string(secam, secam, sizeof(secam), 0644);
+module_param_string(ntsc, ntsc, sizeof(ntsc), 0644);
+module_param_named(debug, cx18_debug, int, 0644);
+module_param(cx18_pci_latency, int, 0644);
+module_param(cx18_first_minor, int, 0644);
+
+module_param(enc_mpg_buffers, int, 0644);
+module_param(enc_ts_buffers, int, 0644);
+module_param(enc_yuv_buffers, int, 0644);
+module_param(enc_vbi_buffers, int, 0644);
+module_param(enc_pcm_buffers, int, 0644);
+
+MODULE_PARM_DESC(tuner, "Tuner type selection,\n"
+                       "\t\t\tsee tuner.h for values");
+MODULE_PARM_DESC(radio,
+                "Enable or disable the radio. Use only if autodetection\n"
+                "\t\t\tfails. 0 = disable, 1 = enable");
+MODULE_PARM_DESC(cardtype,
+                "Only use this option if your card is not detected properly.\n"
+                "\t\tSpecify card type:\n"
+                "\t\t\t 1 = Hauppauge HVR 1600 (ESMT memory)\n"
+                "\t\t\t 2 = Hauppauge HVR 1600 (Samsung memory)\n"
+                "\t\t\t 3 = Compro VideoMate H900\n"
+                "\t\t\t 4 = Yuan MPC718\n"
+                "\t\t\t 0 = Autodetect (default)\n"
+                "\t\t\t-1 = Ignore this card\n\t\t");
+MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
+MODULE_PARM_DESC(secam, "Set SECAM standard: B, G, H, D, K, L, LC");
+MODULE_PARM_DESC(ntsc, "Set NTSC standard: M, J, K");
+MODULE_PARM_DESC(debug,
+                "Debug level (bitmask). Default: 0\n"
+                "\t\t\t  1/0x0001: warning\n"
+                "\t\t\t  2/0x0002: info\n"
+                "\t\t\t  4/0x0004: mailbox\n"
+                "\t\t\t  8/0x0008: dma\n"
+                "\t\t\t 16/0x0010: ioctl\n"
+                "\t\t\t 32/0x0020: file\n"
+                "\t\t\t 64/0x0040: i2c\n"
+                "\t\t\t128/0x0080: irq\n"
+                "\t\t\t256/0x0100: high volume\n");
+MODULE_PARM_DESC(cx18_pci_latency,
+                "Change the PCI latency to 64 if lower: 0 = No, 1 = Yes,\n"
+                "\t\t\tDefault: Yes");
+MODULE_PARM_DESC(enc_mpg_buffers,
+                "Encoder MPG Buffers (in MB)\n"
+                "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_MPG_BUFFERS));
+MODULE_PARM_DESC(enc_ts_buffers,
+                "Encoder TS Buffers (in MB)\n"
+                "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_TS_BUFFERS));
+MODULE_PARM_DESC(enc_yuv_buffers,
+                "Encoder YUV Buffers (in MB)\n"
+                "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_YUV_BUFFERS));
+MODULE_PARM_DESC(enc_vbi_buffers,
+                "Encoder VBI Buffers (in MB)\n"
+                "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_VBI_BUFFERS));
+MODULE_PARM_DESC(enc_pcm_buffers,
+                "Encoder PCM buffers (in MB)\n"
+                "\t\t\tDefault: " __stringify(CX18_DEFAULT_ENC_PCM_BUFFERS));
+
+MODULE_PARM_DESC(cx18_first_minor, "Set minor assigned to first card");
+
+MODULE_AUTHOR("Hans Verkuil");
+MODULE_DESCRIPTION("CX23418 driver");
+MODULE_SUPPORTED_DEVICE("CX23418 MPEG2 encoder");
+MODULE_LICENSE("GPL");
+
+MODULE_VERSION(CX18_VERSION);
+
+int cx18_waitq(wait_queue_head_t *waitq)
+{
+       DEFINE_WAIT(wait);
+
+       prepare_to_wait(waitq, &wait, TASK_INTERRUPTIBLE);
+       schedule();
+       finish_wait(waitq, &wait);
+       return signal_pending(current) ? -EINTR : 0;
+}
+
+/* Generic utility functions */
+int cx18_msleep_timeout(unsigned int msecs, int intr)
+{
+       int timeout = msecs_to_jiffies(msecs);
+       int sig;
+
+       do {
+               set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
+               timeout = schedule_timeout(timeout);
+               sig = intr ? signal_pending(current) : 0;
+       } while (!sig && timeout);
+       return sig;
+}
+
+/* Release ioremapped memory */
+static void cx18_iounmap(struct cx18 *cx)
+{
+       if (cx == NULL)
+               return;
+
+       /* Release io memory */
+       if (cx->enc_mem != NULL) {
+               CX18_DEBUG_INFO("releasing enc_mem\n");
+               iounmap(cx->enc_mem);
+               cx->enc_mem = NULL;
+       }
+}
+
+/* Hauppauge card? get values from tveeprom */
+void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
+{
+       u8 eedata[256];
+
+       cx->i2c_client[0].addr = 0xA0 >> 1;
+       tveeprom_read(&cx->i2c_client[0], eedata, sizeof(eedata));
+       tveeprom_hauppauge_analog(&cx->i2c_client[0], tv, eedata);
+}
+
+static void cx18_process_eeprom(struct cx18 *cx)
+{
+       struct tveeprom tv;
+
+       cx18_read_eeprom(cx, &tv);
+
+       /* Many thanks to Steven Toth from Hauppauge for providing the
+          model numbers */
+       switch (tv.model) {
+       case 74000 ... 74099:
+               cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
+               break;
+       case 74700 ... 74799:
+               cx->card = cx18_get_card(CX18_CARD_HVR_1600_SAMSUNG);
+               break;
+       case 0:
+               CX18_ERR("Invalid EEPROM\n");
+               return;
+       default:
+               CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model);
+               cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
+               break;
+       }
+
+       cx->v4l2_cap = cx->card->v4l2_capabilities;
+       cx->card_name = cx->card->name;
+       cx->card_i2c = cx->card->i2c;
+
+       CX18_INFO("Autodetected %s\n", cx->card_name);
+
+       if (tv.tuner_type == TUNER_ABSENT)
+               CX18_ERR("tveeprom cannot autodetect tuner!");
+
+       if (cx->options.tuner == -1)
+               cx->options.tuner = tv.tuner_type;
+       if (cx->options.radio == -1)
+               cx->options.radio = (tv.has_radio != 0);
+
+       if (cx->std != 0)
+               /* user specified tuner standard */
+               return;
+
+       /* autodetect tuner standard */
+       if (tv.tuner_formats & V4L2_STD_PAL) {
+               CX18_DEBUG_INFO("PAL tuner detected\n");
+               cx->std |= V4L2_STD_PAL_BG | V4L2_STD_PAL_H;
+       } else if (tv.tuner_formats & V4L2_STD_NTSC) {
+               CX18_DEBUG_INFO("NTSC tuner detected\n");
+               cx->std |= V4L2_STD_NTSC_M;
+       } else if (tv.tuner_formats & V4L2_STD_SECAM) {
+               CX18_DEBUG_INFO("SECAM tuner detected\n");
+               cx->std |= V4L2_STD_SECAM_L;
+       } else {
+               CX18_INFO("No tuner detected, default to NTSC-M\n");
+               cx->std |= V4L2_STD_NTSC_M;
+       }
+}
+
+static v4l2_std_id cx18_parse_std(struct cx18 *cx)
+{
+       switch (pal[0]) {
+       case '6':
+               return V4L2_STD_PAL_60;
+       case 'b':
+       case 'B':
+       case 'g':
+       case 'G':
+               return V4L2_STD_PAL_BG;
+       case 'h':
+       case 'H':
+               return V4L2_STD_PAL_H;
+       case 'n':
+       case 'N':
+               if (pal[1] == 'c' || pal[1] == 'C')
+                       return V4L2_STD_PAL_Nc;
+               return V4L2_STD_PAL_N;
+       case 'i':
+       case 'I':
+               return V4L2_STD_PAL_I;
+       case 'd':
+       case 'D':
+       case 'k':
+       case 'K':
+               return V4L2_STD_PAL_DK;
+       case 'M':
+       case 'm':
+               return V4L2_STD_PAL_M;
+       case '-':
+               break;
+       default:
+               CX18_WARN("pal= argument not recognised\n");
+               return 0;
+       }
+
+       switch (secam[0]) {
+       case 'b':
+       case 'B':
+       case 'g':
+       case 'G':
+       case 'h':
+       case 'H':
+               return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H;
+       case 'd':
+       case 'D':
+       case 'k':
+       case 'K':
+               return V4L2_STD_SECAM_DK;
+       case 'l':
+       case 'L':
+               if (secam[1] == 'C' || secam[1] == 'c')
+                       return V4L2_STD_SECAM_LC;
+               return V4L2_STD_SECAM_L;
+       case '-':
+               break;
+       default:
+               CX18_WARN("secam= argument not recognised\n");
+               return 0;
+       }
+
+       switch (ntsc[0]) {
+       case 'm':
+       case 'M':
+               return V4L2_STD_NTSC_M;
+       case 'j':
+       case 'J':
+               return V4L2_STD_NTSC_M_JP;
+       case 'k':
+       case 'K':
+               return V4L2_STD_NTSC_M_KR;
+       case '-':
+               break;
+       default:
+               CX18_WARN("ntsc= argument not recognised\n");
+               return 0;
+       }
+
+       /* no match found */
+       return 0;
+}
+
+static void cx18_process_options(struct cx18 *cx)
+{
+       int i, j;
+
+       cx->options.megabytes[CX18_ENC_STREAM_TYPE_MPG] = enc_mpg_buffers;
+       cx->options.megabytes[CX18_ENC_STREAM_TYPE_TS] = enc_ts_buffers;
+       cx->options.megabytes[CX18_ENC_STREAM_TYPE_YUV] = enc_yuv_buffers;
+       cx->options.megabytes[CX18_ENC_STREAM_TYPE_VBI] = enc_vbi_buffers;
+       cx->options.megabytes[CX18_ENC_STREAM_TYPE_PCM] = enc_pcm_buffers;
+       cx->options.cardtype = cardtype[cx->num];
+       cx->options.tuner = tuner[cx->num];
+       cx->options.radio = radio[cx->num];
+
+       cx->std = cx18_parse_std(cx);
+       if (cx->options.cardtype == -1) {
+               CX18_INFO("Ignore card\n");
+               return;
+       }
+       cx->card = cx18_get_card(cx->options.cardtype - 1);
+       if (cx->card)
+               CX18_INFO("User specified %s card\n", cx->card->name);
+       else if (cx->options.cardtype != 0)
+               CX18_ERR("Unknown user specified type, trying to autodetect card\n");
+       if (cx->card == NULL) {
+               if (cx->dev->subsystem_vendor == CX18_PCI_ID_HAUPPAUGE) {
+                       cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
+                       CX18_INFO("Autodetected Hauppauge card\n");
+               }
+       }
+       if (cx->card == NULL) {
+               for (i = 0; (cx->card = cx18_get_card(i)); i++) {
+                       if (cx->card->pci_list == NULL)
+                               continue;
+                       for (j = 0; cx->card->pci_list[j].device; j++) {
+                               if (cx->dev->device !=
+                                   cx->card->pci_list[j].device)
+                                       continue;
+                               if (cx->dev->subsystem_vendor !=
+                                   cx->card->pci_list[j].subsystem_vendor)
+                                       continue;
+                               if (cx->dev->subsystem_device !=
+                                   cx->card->pci_list[j].subsystem_device)
+                                       continue;
+                               CX18_INFO("Autodetected %s card\n", cx->card->name);
+                               goto done;
+                       }
+               }
+       }
+done:
+
+       if (cx->card == NULL) {
+               cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
+               CX18_ERR("Unknown card: vendor/device: %04x/%04x\n",
+                    cx->dev->vendor, cx->dev->device);
+               CX18_ERR("              subsystem vendor/device: %04x/%04x\n",
+                    cx->dev->subsystem_vendor, cx->dev->subsystem_device);
+               CX18_ERR("Defaulting to %s card\n", cx->card->name);
+               CX18_ERR("Please mail the vendor/device and subsystem vendor/device IDs and what kind of\n");
+               CX18_ERR("card you have to the ivtv-devel mailinglist (www.ivtvdriver.org)\n");
+               CX18_ERR("Prefix your subject line with [UNKNOWN CX18 CARD].\n");
+       }
+       cx->v4l2_cap = cx->card->v4l2_capabilities;
+       cx->card_name = cx->card->name;
+       cx->card_i2c = cx->card->i2c;
+}
+
+/* Precondition: the cx18 structure has been memset to 0. Only
+   the dev and num fields have been filled in.
+   No assumptions on the card type may be made here (see cx18_init_struct2
+   for that).
+ */
+static int __devinit cx18_init_struct1(struct cx18 *cx)
+{
+       cx->base_addr = pci_resource_start(cx->dev, 0);
+
+       mutex_init(&cx->serialize_lock);
+       mutex_init(&cx->i2c_bus_lock[0]);
+       mutex_init(&cx->i2c_bus_lock[1]);
+
+       spin_lock_init(&cx->lock);
+       spin_lock_init(&cx->dma_reg_lock);
+
+       /* start counting open_id at 1 */
+       cx->open_id = 1;
+
+       /* Initial settings */
+       cx2341x_fill_defaults(&cx->params);
+       cx->temporal_strength = cx->params.video_temporal_filter;
+       cx->spatial_strength = cx->params.video_spatial_filter;
+       cx->filter_mode = cx->params.video_spatial_filter_mode |
+               (cx->params.video_temporal_filter_mode << 1) |
+               (cx->params.video_median_filter_type << 2);
+       cx->params.port = CX2341X_PORT_MEMORY;
+       cx->params.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
+       init_waitqueue_head(&cx->cap_w);
+       init_waitqueue_head(&cx->mb_apu_waitq);
+       init_waitqueue_head(&cx->mb_cpu_waitq);
+       init_waitqueue_head(&cx->mb_epu_waitq);
+       init_waitqueue_head(&cx->mb_hpu_waitq);
+       init_waitqueue_head(&cx->dma_waitq);
+
+       /* VBI */
+       cx->vbi.in.type = V4L2_BUF_TYPE_SLICED_VBI_CAPTURE;
+       cx->vbi.sliced_in = &cx->vbi.in.fmt.sliced;
+       cx->vbi.raw_size = 1456;
+       cx->vbi.raw_decoder_line_size = 1456;
+       cx->vbi.raw_decoder_sav_odd_field = 0x20;
+       cx->vbi.raw_decoder_sav_even_field = 0x60;
+       cx->vbi.sliced_decoder_line_size = 272;
+       cx->vbi.sliced_decoder_sav_odd_field = 0xB0;
+       cx->vbi.sliced_decoder_sav_even_field = 0xF0;
+       return 0;
+}
+
+/* Second initialization part. Here the card type has been
+   autodetected. */
+static void __devinit cx18_init_struct2(struct cx18 *cx)
+{
+       int i;
+
+       for (i = 0; i < CX18_CARD_MAX_VIDEO_INPUTS; i++)
+               if (cx->card->video_inputs[i].video_type == 0)
+                       break;
+       cx->nof_inputs = i;
+       for (i = 0; i < CX18_CARD_MAX_AUDIO_INPUTS; i++)
+               if (cx->card->audio_inputs[i].audio_type == 0)
+                       break;
+       cx->nof_audio_inputs = i;
+
+       /* Find tuner input */
+       for (i = 0; i < cx->nof_inputs; i++) {
+               if (cx->card->video_inputs[i].video_type ==
+                               CX18_CARD_INPUT_VID_TUNER)
+                       break;
+       }
+       if (i == cx->nof_inputs)
+               i = 0;
+       cx->active_input = i;
+       cx->audio_input = cx->card->video_inputs[i].audio_index;
+       cx->av_state.vid_input = CX18_AV_COMPOSITE7;
+       cx->av_state.aud_input = CX18_AV_AUDIO8;
+       cx->av_state.audclk_freq = 48000;
+       cx->av_state.audmode = V4L2_TUNER_MODE_LANG1;
+       cx->av_state.vbi_line_offset = 8;
+}
+
+static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *dev,
+                         const struct pci_device_id *pci_id)
+{
+       u16 cmd;
+       unsigned char pci_latency;
+
+       CX18_DEBUG_INFO("Enabling pci device\n");
+
+       if (pci_enable_device(dev)) {
+               CX18_ERR("Can't enable device %d!\n", cx->num);
+               return -EIO;
+       }
+       if (pci_set_dma_mask(dev, 0xffffffff)) {
+               CX18_ERR("No suitable DMA available on card %d.\n", cx->num);
+               return -EIO;
+       }
+       if (!request_mem_region(cx->base_addr, CX18_MEM_SIZE, "cx18 encoder")) {
+               CX18_ERR("Cannot request encoder memory region on card %d.\n", cx->num);
+               return -EIO;
+       }
+
+       /* Check for bus mastering */
+       pci_read_config_word(dev, PCI_COMMAND, &cmd);
+       cmd |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
+       pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+       pci_read_config_byte(dev, PCI_CLASS_REVISION, &cx->card_rev);
+       pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);
+
+       if (pci_latency < 64 && cx18_pci_latency) {
+               CX18_INFO("Unreasonably low latency timer, "
+                              "setting to 64 (was %d)\n", pci_latency);
+               pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
+               pci_read_config_byte(dev, PCI_LATENCY_TIMER, &pci_latency);
+       }
+       /* This config space value relates to DMA latencies. The
+          default value 0x8080 is too low however and will lead
+          to DMA errors. 0xffff is the max value which solves
+          these problems. */
+       pci_write_config_dword(dev, 0x40, 0xffff);
+
+       CX18_DEBUG_INFO("cx%d (rev %d) at %02x:%02x.%x, "
+                  "irq: %d, latency: %d, memory: 0x%lx\n",
+                  cx->dev->device, cx->card_rev, dev->bus->number,
+                  PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn),
+                  cx->dev->irq, pci_latency, (unsigned long)cx->base_addr);
+
+       return 0;
+}
+
+static u32 cx18_request_module(struct cx18 *cx, u32 hw,
+               const char *name, u32 id)
+{
+       if ((hw & id) == 0)
+               return hw;
+       if (request_module(name) != 0) {
+               CX18_ERR("Failed to load module %s\n", name);
+               return hw & ~id;
+       }
+       CX18_DEBUG_INFO("Loaded module %s\n", name);
+       return hw;
+}
+
+static void cx18_load_and_init_modules(struct cx18 *cx)
+{
+       u32 hw = cx->card->hw_all;
+       int i;
+
+       /* load modules */
+#ifndef CONFIG_MEDIA_TUNER
+       hw = cx18_request_module(cx, hw, "tuner", CX18_HW_TUNER);
+#endif
+#ifndef CONFIG_VIDEO_CS5345
+       hw = cx18_request_module(cx, hw, "cs5345", CX18_HW_CS5345);
+#endif
+
+       /* check which i2c devices are actually found */
+       for (i = 0; i < 32; i++) {
+               u32 device = 1 << i;
+
+               if (!(device & hw))
+                       continue;
+               if (device == CX18_HW_GPIO || device == CX18_HW_TVEEPROM ||
+                   device == CX18_HW_CX23418 || device == CX18_HW_DVB) {
+                       /* These 'devices' do not use i2c probing */
+                       cx->hw_flags |= device;
+                       continue;
+               }
+               cx18_i2c_register(cx, i);
+               if (cx18_i2c_hw_addr(cx, device) > 0)
+                       cx->hw_flags |= device;
+       }
+
+       hw = cx->hw_flags;
+}
+
+static int __devinit cx18_probe(struct pci_dev *dev,
+                               const struct pci_device_id *pci_id)
+{
+       int retval = 0;
+       int vbi_buf_size;
+       u32 devtype;
+       struct cx18 *cx;
+
+       spin_lock(&cx18_cards_lock);
+
+       /* Make sure we've got a place for this card */
+       if (cx18_cards_active == CX18_MAX_CARDS) {
+               printk(KERN_ERR "cx18:  Maximum number of cards detected (%d).\n",
+                             cx18_cards_active);
+               spin_unlock(&cx18_cards_lock);
+               return -ENOMEM;
+       }
+
+       cx = kzalloc(sizeof(struct cx18), GFP_ATOMIC);
+       if (cx == 0) {
+               spin_unlock(&cx18_cards_lock);
+               return -ENOMEM;
+       }
+       cx18_cards[cx18_cards_active] = cx;
+       cx->dev = dev;
+       cx->num = cx18_cards_active++;
+       snprintf(cx->name, sizeof(cx->name) - 1, "cx18-%d", cx->num);
+       CX18_INFO("Initializing card #%d\n", cx->num);
+
+       spin_unlock(&cx18_cards_lock);
+
+       cx18_process_options(cx);
+       if (cx->options.cardtype == -1) {
+               retval = -ENODEV;
+               goto err;
+       }
+       if (cx18_init_struct1(cx)) {
+               retval = -ENOMEM;
+               goto err;
+       }
+
+       CX18_DEBUG_INFO("base addr: 0x%08x\n", cx->base_addr);
+
+       /* PCI Device Setup */
+       retval = cx18_setup_pci(cx, dev, pci_id);
+       if (retval != 0) {
+               if (retval == -EIO)
+                       goto free_workqueue;
+               else if (retval == -ENXIO)
+                       goto free_mem;
+       }
+       /* save cx in the pci struct for later use */
+       pci_set_drvdata(dev, cx);
+
+       /* map io memory */
+       CX18_DEBUG_INFO("attempting ioremap at 0x%08x len 0x%08x\n",
+                  cx->base_addr + CX18_MEM_OFFSET, CX18_MEM_SIZE);
+       cx->enc_mem = ioremap_nocache(cx->base_addr + CX18_MEM_OFFSET,
+                                      CX18_MEM_SIZE);
+       if (!cx->enc_mem) {
+               CX18_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
+               CX18_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
+               retval = -ENOMEM;
+               goto free_mem;
+       }
+       cx->reg_mem = cx->enc_mem + CX18_REG_OFFSET;
+       devtype = read_reg(0xC72028);
+       switch (devtype & 0xff000000) {
+       case 0xff000000:
+               CX18_INFO("cx23418 revision %08x (A)\n", devtype);
+               break;
+       case 0x01000000:
+               CX18_INFO("cx23418 revision %08x (B)\n", devtype);
+               break;
+       default:
+               CX18_INFO("cx23418 revision %08x (Unknown)\n", devtype);
+               break;
+       }
+
+       cx18_init_power(cx, 1);
+       cx18_init_memory(cx);
+
+       cx->scb = (struct cx18_scb *)(cx->enc_mem + SCB_OFFSET);
+       cx18_init_scb(cx);
+
+       cx18_gpio_init(cx);
+
+       /* active i2c  */
+       CX18_DEBUG_INFO("activating i2c...\n");
+       if (init_cx18_i2c(cx)) {
+               CX18_ERR("Could not initialize i2c\n");
+               goto free_map;
+       }
+
+       CX18_DEBUG_INFO("Active card count: %d.\n", cx18_cards_active);
+
+       if (cx->card->hw_all & CX18_HW_TVEEPROM) {
+               /* Based on the model number the cardtype may be changed.
+                  The PCI IDs are not always reliable. */
+               cx18_process_eeprom(cx);
+       }
+       if (cx->card->comment)
+               CX18_INFO("%s", cx->card->comment);
+       if (cx->card->v4l2_capabilities == 0) {
+               retval = -ENODEV;
+               goto free_i2c;
+       }
+       cx18_init_memory(cx);
+
+       /* Register IRQ */
+       retval = request_irq(cx->dev->irq, cx18_irq_handler,
+                            IRQF_SHARED | IRQF_DISABLED, cx->name, (void *)cx);
+       if (retval) {
+               CX18_ERR("Failed to register irq %d\n", retval);
+               goto free_i2c;
+       }
+
+       if (cx->std == 0)
+               cx->std = V4L2_STD_NTSC_M;
+
+       if (cx->options.tuner == -1) {
+               int i;
+
+               for (i = 0; i < CX18_CARD_MAX_TUNERS; i++) {
+                       if ((cx->std & cx->card->tuners[i].std) == 0)
+                               continue;
+                       cx->options.tuner = cx->card->tuners[i].tuner;
+                       break;
+               }
+       }
+       /* if no tuner was found, then pick the first tuner in the card list */
+       if (cx->options.tuner == -1 && cx->card->tuners[0].std) {
+               cx->std = cx->card->tuners[0].std;
+               cx->options.tuner = cx->card->tuners[0].tuner;
+       }
+       if (cx->options.radio == -1)
+               cx->options.radio = (cx->card->radio_input.audio_type != 0);
+
+       /* The card is now fully identified, continue with card-specific
+          initialization. */
+       cx18_init_struct2(cx);
+
+       cx18_load_and_init_modules(cx);
+
+       if (cx->std & V4L2_STD_525_60) {
+               cx->is_60hz = 1;
+               cx->is_out_60hz = 1;
+       } else {
+               cx->is_50hz = 1;
+               cx->is_out_50hz = 1;
+       }
+       cx->params.video_gop_size = cx->is_60hz ? 15 : 12;
+
+       cx->stream_buf_size[CX18_ENC_STREAM_TYPE_MPG] = 0x08000;
+       cx->stream_buf_size[CX18_ENC_STREAM_TYPE_TS] = 0x08000;
+       cx->stream_buf_size[CX18_ENC_STREAM_TYPE_PCM] = 0x01200;
+       cx->stream_buf_size[CX18_ENC_STREAM_TYPE_YUV] = 0x20000;
+       vbi_buf_size = cx->vbi.raw_size * (cx->is_60hz ? 24 : 36) / 2;
+       cx->stream_buf_size[CX18_ENC_STREAM_TYPE_VBI] = vbi_buf_size;
+
+       if (cx->options.radio > 0)
+               cx->v4l2_cap |= V4L2_CAP_RADIO;
+
+       retval = cx18_streams_setup(cx);
+       if (retval) {
+               CX18_ERR("Error %d setting up streams\n", retval);
+               goto free_irq;
+       }
+       retval = cx18_streams_register(cx);
+       if (retval) {
+               CX18_ERR("Error %d registering devices\n", retval);
+               goto free_streams;
+       }
+
+       if (cx->options.tuner > -1) {
+               struct tuner_setup setup;
+
+               setup.addr = ADDR_UNSET;
+               setup.type = cx->options.tuner;
+               setup.mode_mask = T_ANALOG_TV;  /* matches TV tuners */
+               setup.tuner_callback = (setup.type == TUNER_XC2028) ?
+                       cx18_reset_tuner_gpio : NULL;
+               cx18_call_i2c_clients(cx, TUNER_SET_TYPE_ADDR, &setup);
+               if (setup.type == TUNER_XC2028) {
+                       static struct xc2028_ctrl ctrl = {
+                               .fname = XC2028_DEFAULT_FIRMWARE,
+                               .max_len = 64,
+                       };
+                       struct v4l2_priv_tun_config cfg = {
+                               .tuner = cx->options.tuner,
+                               .priv = &ctrl,
+                       };
+                       cx18_call_i2c_clients(cx, TUNER_SET_CONFIG, &cfg);
+               }
+       }
+
+       /* The tuner is fixed to the standard. The other inputs (e.g. S-Video)
+          are not. */
+       cx->tuner_std = cx->std;
+
+       cx18_init_on_first_open(cx);
+
+       CX18_INFO("Initialized card #%d: %s\n", cx->num, cx->card_name);
+
+       return 0;
+
+free_streams:
+       cx18_streams_cleanup(cx);
+free_irq:
+       free_irq(cx->dev->irq, (void *)cx);
+free_i2c:
+       exit_cx18_i2c(cx);
+free_map:
+       cx18_iounmap(cx);
+free_mem:
+       release_mem_region(cx->base_addr, CX18_MEM_SIZE);
+free_workqueue:
+err:
+       if (retval == 0)
+               retval = -ENODEV;
+       CX18_ERR("Error %d on initialization\n", retval);
+
+       kfree(cx18_cards[cx18_cards_active]);
+       cx18_cards[cx18_cards_active] = NULL;
+       return retval;
+}
+
+int cx18_init_on_first_open(struct cx18 *cx)
+{
+       int video_input;
+       int fw_retry_count = 3;
+       struct v4l2_frequency vf;
+
+       if (test_bit(CX18_F_I_FAILED, &cx->i_flags))
+               return -ENXIO;
+
+       if (test_and_set_bit(CX18_F_I_INITED, &cx->i_flags))
+               return 0;
+
+       while (--fw_retry_count > 0) {
+               /* load firmware */
+               if (cx18_firmware_init(cx) == 0)
+                       break;
+               if (fw_retry_count > 1)
+                       CX18_WARN("Retry loading firmware\n");
+       }
+
+       if (fw_retry_count == 0) {
+               set_bit(CX18_F_I_FAILED, &cx->i_flags);
+               return -ENXIO;
+       }
+       set_bit(CX18_F_I_LOADED_FW, &cx->i_flags);
+
+       /* Init the firmware twice to work around a silicon bug
+        * transport related. */
+
+       fw_retry_count = 3;
+       while (--fw_retry_count > 0) {
+               /* load firmware */
+               if (cx18_firmware_init(cx) == 0)
+                       break;
+               if (fw_retry_count > 1)
+                       CX18_WARN("Retry loading firmware\n");
+       }
+
+       if (fw_retry_count == 0) {
+               set_bit(CX18_F_I_FAILED, &cx->i_flags);
+               return -ENXIO;
+       }
+
+       vf.tuner = 0;
+       vf.type = V4L2_TUNER_ANALOG_TV;
+       vf.frequency = 6400; /* the tuner 'baseline' frequency */
+
+       /* Set initial frequency. For PAL/SECAM broadcasts no
+          'default' channel exists AFAIK. */
+       if (cx->std == V4L2_STD_NTSC_M_JP)
+               vf.frequency = 1460;    /* ch. 1 91250*16/1000 */
+       else if (cx->std & V4L2_STD_NTSC_M)
+               vf.frequency = 1076;    /* ch. 4 67250*16/1000 */
+
+       video_input = cx->active_input;
+       cx->active_input++;     /* Force update of input */
+       cx18_v4l2_ioctls(cx, NULL, VIDIOC_S_INPUT, &video_input);
+
+       /* Let the VIDIOC_S_STD ioctl do all the work, keeps the code
+          in one place. */
+       cx->std++;              /* Force full standard initialization */
+       cx18_v4l2_ioctls(cx, NULL, VIDIOC_S_STD, &cx->tuner_std);
+       cx18_v4l2_ioctls(cx, NULL, VIDIOC_S_FREQUENCY, &vf);
+       return 0;
+}
+
+static void cx18_remove(struct pci_dev *pci_dev)
+{
+       struct cx18 *cx = pci_get_drvdata(pci_dev);
+
+       CX18_DEBUG_INFO("Removing Card #%d\n", cx->num);
+
+       /* Stop all captures */
+       CX18_DEBUG_INFO("Stopping all streams\n");
+       if (atomic_read(&cx->capturing) > 0)
+               cx18_stop_all_captures(cx);
+
+       /* Interrupts */
+       sw1_irq_disable(IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
+       sw2_irq_disable(IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
+
+       cx18_halt_firmware(cx);
+
+       cx18_streams_cleanup(cx);
+
+       exit_cx18_i2c(cx);
+
+       free_irq(cx->dev->irq, (void *)cx);
+
+       if (cx->dev)
+               cx18_iounmap(cx);
+
+       release_mem_region(cx->base_addr, CX18_MEM_SIZE);
+
+       pci_disable_device(cx->dev);
+
+       CX18_INFO("Removed %s, card #%d\n", cx->card_name, cx->num);
+}
+
+/* define a pci_driver for card detection */
+static struct pci_driver cx18_pci_driver = {
+      .name =     "cx18",
+      .id_table = cx18_pci_tbl,
+      .probe =    cx18_probe,
+      .remove =   cx18_remove,
+};
+
+static int module_start(void)
+{
+       printk(KERN_INFO "cx18:  Start initialization, version %s\n", CX18_VERSION);
+
+       memset(cx18_cards, 0, sizeof(cx18_cards));
+
+       /* Validate parameters */
+       if (cx18_first_minor < 0 || cx18_first_minor >= CX18_MAX_CARDS) {
+               printk(KERN_ERR "cx18:  Exiting, ivtv_first_minor must be between 0 and %d\n",
+                    CX18_MAX_CARDS - 1);
+               return -1;
+       }
+
+       if (cx18_debug < 0 || cx18_debug > 511) {
+               cx18_debug = 0;
+               printk(KERN_INFO "cx18:   Debug value must be >= 0 and <= 511!\n");
+       }
+
+       if (pci_register_driver(&cx18_pci_driver)) {
+               printk(KERN_ERR "cx18:   Error detecting PCI card\n");
+               return -ENODEV;
+       }
+       printk(KERN_INFO "cx18:  End initialization\n");
+       return 0;
+}
+
+static void module_cleanup(void)
+{
+       int i;
+
+       pci_unregister_driver(&cx18_pci_driver);
+
+       for (i = 0; i < cx18_cards_active; i++) {
+               if (cx18_cards[i] == NULL)
+                       continue;
+               kfree(cx18_cards[i]);
+       }
+}
+
+module_init(module_start);
+module_exit(module_cleanup);
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
new file mode 100644 (file)
index 0000000..2ee9391
--- /dev/null
@@ -0,0 +1,500 @@
+/*
+ *  cx18 driver internal defines and structures
+ *
+ *  Derived from ivtv-driver.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#ifndef CX18_DRIVER_H
+#define CX18_DRIVER_H
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/list.h>
+#include <linux/unistd.h>
+#include <linux/byteorder/swab.h>
+#include <linux/pagemap.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+#include <linux/dvb/video.h>
+#include <linux/dvb/audio.h>
+#include <media/v4l2-common.h>
+#include <media/tuner.h>
+#include "cx18-mailbox.h"
+#include "cx18-av-core.h"
+#include "cx23418.h"
+
+/* DVB */
+#include "demux.h"
+#include "dmxdev.h"
+#include "dvb_demux.h"
+#include "dvb_frontend.h"
+#include "dvb_net.h"
+#include "dvbdev.h"
+
+#ifndef CONFIG_PCI
+#  error "This driver requires kernel PCI support."
+#endif
+
+#define CX18_MEM_OFFSET        0x00000000
+#define CX18_MEM_SIZE  0x04000000
+#define CX18_REG_OFFSET        0x02000000
+
+/* Maximum cx18 driver instances. */
+#define CX18_MAX_CARDS 32
+
+/* Supported cards */
+#define CX18_CARD_HVR_1600_ESMT              0 /* Hauppauge HVR 1600 (ESMT memory) */
+#define CX18_CARD_HVR_1600_SAMSUNG    1        /* Hauppauge HVR 1600 (Samsung memory) */
+#define CX18_CARD_COMPRO_H900        2 /* Compro VideoMate H900 */
+#define CX18_CARD_YUAN_MPC718        3 /* Yuan MPC718 */
+#define CX18_CARD_LAST                       3
+
+#define CX18_ENC_STREAM_TYPE_MPG  0
+#define CX18_ENC_STREAM_TYPE_TS   1
+#define CX18_ENC_STREAM_TYPE_YUV  2
+#define CX18_ENC_STREAM_TYPE_VBI  3
+#define CX18_ENC_STREAM_TYPE_PCM  4
+#define CX18_ENC_STREAM_TYPE_IDX  5
+#define CX18_ENC_STREAM_TYPE_RAD  6
+#define CX18_MAX_STREAMS         7
+
+/* system vendor and device IDs */
+#define PCI_VENDOR_ID_CX      0x14f1
+#define PCI_DEVICE_ID_CX23418 0x5b7a
+
+/* subsystem vendor ID */
+#define CX18_PCI_ID_HAUPPAUGE          0x0070
+#define CX18_PCI_ID_COMPRO             0x185b
+#define CX18_PCI_ID_YUAN               0x12ab
+
+/* ======================================================================== */
+/* ========================== START USER SETTABLE DMA VARIABLES =========== */
+/* ======================================================================== */
+
+/* DMA Buffers, Default size in MB allocated */
+#define CX18_DEFAULT_ENC_TS_BUFFERS  1
+#define CX18_DEFAULT_ENC_MPG_BUFFERS 2
+#define CX18_DEFAULT_ENC_IDX_BUFFERS 1
+#define CX18_DEFAULT_ENC_YUV_BUFFERS 2
+#define CX18_DEFAULT_ENC_VBI_BUFFERS 1
+#define CX18_DEFAULT_ENC_PCM_BUFFERS 1
+
+/* i2c stuff */
+#define I2C_CLIENTS_MAX 16
+
+/* debugging */
+
+/* Flag to turn on high volume debugging */
+#define CX18_DBGFLG_WARN  (1 << 0)
+#define CX18_DBGFLG_INFO  (1 << 1)
+#define CX18_DBGFLG_API   (1 << 2)
+#define CX18_DBGFLG_DMA   (1 << 3)
+#define CX18_DBGFLG_IOCTL (1 << 4)
+#define CX18_DBGFLG_FILE  (1 << 5)
+#define CX18_DBGFLG_I2C   (1 << 6)
+#define CX18_DBGFLG_IRQ   (1 << 7)
+/* Flag to turn on high volume debugging */
+#define CX18_DBGFLG_HIGHVOL (1 << 8)
+
+/* NOTE: extra space before comma in 'cx->num , ## args' is required for
+   gcc-2.95, otherwise it won't compile. */
+#define CX18_DEBUG(x, type, fmt, args...) \
+       do { \
+               if ((x) & cx18_debug) \
+                       printk(KERN_INFO "cx18-%d " type ": " fmt, cx->num , ## args); \
+       } while (0)
+#define CX18_DEBUG_WARN(fmt, args...)  CX18_DEBUG(CX18_DBGFLG_WARN, "warning", fmt , ## args)
+#define CX18_DEBUG_INFO(fmt, args...)  CX18_DEBUG(CX18_DBGFLG_INFO, "info", fmt , ## args)
+#define CX18_DEBUG_API(fmt, args...)   CX18_DEBUG(CX18_DBGFLG_API, "api", fmt , ## args)
+#define CX18_DEBUG_DMA(fmt, args...)   CX18_DEBUG(CX18_DBGFLG_DMA, "dma", fmt , ## args)
+#define CX18_DEBUG_IOCTL(fmt, args...) CX18_DEBUG(CX18_DBGFLG_IOCTL, "ioctl", fmt , ## args)
+#define CX18_DEBUG_FILE(fmt, args...)  CX18_DEBUG(CX18_DBGFLG_FILE, "file", fmt , ## args)
+#define CX18_DEBUG_I2C(fmt, args...)   CX18_DEBUG(CX18_DBGFLG_I2C, "i2c", fmt , ## args)
+#define CX18_DEBUG_IRQ(fmt, args...)   CX18_DEBUG(CX18_DBGFLG_IRQ, "irq", fmt , ## args)
+
+#define CX18_DEBUG_HIGH_VOL(x, type, fmt, args...) \
+       do { \
+               if (((x) & cx18_debug) && (cx18_debug & CX18_DBGFLG_HIGHVOL)) \
+                       printk(KERN_INFO "cx18%d " type ": " fmt, cx->num , ## args); \
+       } while (0)
+#define CX18_DEBUG_HI_WARN(fmt, args...)  CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_WARN, "warning", fmt , ## args)
+#define CX18_DEBUG_HI_INFO(fmt, args...)  CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_INFO, "info", fmt , ## args)
+#define CX18_DEBUG_HI_API(fmt, args...)   CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_API, "api", fmt , ## args)
+#define CX18_DEBUG_HI_DMA(fmt, args...)   CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_DMA, "dma", fmt , ## args)
+#define CX18_DEBUG_HI_IOCTL(fmt, args...) CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_IOCTL, "ioctl", fmt , ## args)
+#define CX18_DEBUG_HI_FILE(fmt, args...)  CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_FILE, "file", fmt , ## args)
+#define CX18_DEBUG_HI_I2C(fmt, args...)   CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_I2C, "i2c", fmt , ## args)
+#define CX18_DEBUG_HI_IRQ(fmt, args...)   CX18_DEBUG_HIGH_VOL(CX18_DBGFLG_IRQ, "irq", fmt , ## args)
+
+/* Standard kernel messages */
+#define CX18_ERR(fmt, args...)      printk(KERN_ERR  "cx18-%d: " fmt, cx->num , ## args)
+#define CX18_WARN(fmt, args...)     printk(KERN_WARNING "cx18-%d: " fmt, cx->num , ## args)
+#define CX18_INFO(fmt, args...)     printk(KERN_INFO "cx18-%d: " fmt, cx->num , ## args)
+
+/* Values for CX18_API_DEC_PLAYBACK_SPEED mpeg_frame_type_mask parameter: */
+#define MPEG_FRAME_TYPE_IFRAME 1
+#define MPEG_FRAME_TYPE_IFRAME_PFRAME 3
+#define MPEG_FRAME_TYPE_ALL 7
+
+#define CX18_MAX_PGM_INDEX (400)
+
+extern int cx18_debug;
+
+
+struct cx18_options {
+       int megabytes[CX18_MAX_STREAMS]; /* Size in megabytes of each stream */
+       int cardtype;           /* force card type on load */
+       int tuner;              /* set tuner on load */
+       int radio;              /* enable/disable radio */
+};
+
+/* per-buffer bit flags */
+#define CX18_F_B_NEED_BUF_SWAP  0      /* this buffer should be byte swapped */
+
+/* per-stream, s_flags */
+#define CX18_F_S_CLAIMED       3       /* this stream is claimed */
+#define CX18_F_S_STREAMING      4      /* the fw is decoding/encoding this stream */
+#define CX18_F_S_INTERNAL_USE  5       /* this stream is used internally (sliced VBI processing) */
+#define CX18_F_S_STREAMOFF     7       /* signal end of stream EOS */
+#define CX18_F_S_APPL_IO        8      /* this stream is used read/written by an application */
+
+/* per-cx18, i_flags */
+#define CX18_F_I_LOADED_FW     0       /* Loaded the firmware the first time */
+#define CX18_F_I_EOS           4       /* End of encoder stream reached */
+#define CX18_F_I_RADIO_USER    5       /* The radio tuner is selected */
+#define CX18_F_I_ENC_PAUSED    13      /* the encoder is paused */
+#define CX18_F_I_INITED                21      /* set after first open */
+#define CX18_F_I_FAILED                22      /* set if first open failed */
+
+/* These are the VBI types as they appear in the embedded VBI private packets. */
+#define CX18_SLICED_TYPE_TELETEXT_B     (1)
+#define CX18_SLICED_TYPE_CAPTION_525    (4)
+#define CX18_SLICED_TYPE_WSS_625        (5)
+#define CX18_SLICED_TYPE_VPS            (7)
+
+struct cx18_buffer {
+       struct list_head list;
+       dma_addr_t dma_handle;
+       u32 id;
+       unsigned long b_flags;
+       char *buf;
+
+       u32 bytesused;
+       u32 readpos;
+};
+
+struct cx18_queue {
+       struct list_head list;
+       u32 buffers;
+       u32 length;
+       u32 bytesused;
+};
+
+struct cx18_dvb {
+       struct dmx_frontend hw_frontend;
+       struct dmx_frontend mem_frontend;
+       struct dmxdev dmxdev;
+       struct dvb_adapter dvb_adapter;
+       struct dvb_demux demux;
+       struct dvb_frontend *fe;
+       struct dvb_net dvbnet;
+       int enabled;
+       int feeding;
+
+       struct mutex feedlock;
+
+};
+
+struct cx18;    /* forward reference */
+struct cx18_scb; /* forward reference */
+
+struct cx18_stream {
+       /* These first four fields are always set, even if the stream
+          is not actually created. */
+       struct video_device *v4l2dev;   /* NULL when stream not created */
+       struct cx18 *cx;                /* for ease of use */
+       const char *name;               /* name of the stream */
+       int type;                       /* stream type */
+       u32 handle;                     /* task handle */
+       unsigned mdl_offset;
+
+       u32 id;
+       spinlock_t qlock;       /* locks access to the queues */
+       unsigned long s_flags;  /* status flags, see above */
+       int dma;                /* can be PCI_DMA_TODEVICE,
+                                  PCI_DMA_FROMDEVICE or
+                                  PCI_DMA_NONE */
+       u64 dma_pts;
+       wait_queue_head_t waitq;
+
+       /* Buffer Stats */
+       u32 buffers;
+       u32 buf_size;
+       u32 buffers_stolen;
+
+       /* Buffer Queues */
+       struct cx18_queue q_free;       /* free buffers */
+       struct cx18_queue q_full;       /* full buffers */
+       struct cx18_queue q_io;         /* waiting for I/O */
+
+       /* DVB / Digital Transport */
+       struct cx18_dvb dvb;
+};
+
+struct cx18_open_id {
+       u32 open_id;
+       int type;
+       enum v4l2_priority prio;
+       struct cx18 *cx;
+};
+
+/* forward declaration of struct defined in cx18-cards.h */
+struct cx18_card;
+
+
+#define CX18_VBI_FRAMES 32
+
+/* VBI data */
+struct vbi_info {
+       u32 enc_size;
+       u32 frame;
+       u8 cc_data_odd[256];
+       u8 cc_data_even[256];
+       int cc_pos;
+       u8 cc_no_update;
+       u8 vps[5];
+       u8 vps_found;
+       int wss;
+       u8 wss_found;
+       u8 wss_no_update;
+       u32 raw_decoder_line_size;
+       u8 raw_decoder_sav_odd_field;
+       u8 raw_decoder_sav_even_field;
+       u32 sliced_decoder_line_size;
+       u8 sliced_decoder_sav_odd_field;
+       u8 sliced_decoder_sav_even_field;
+       struct v4l2_format in;
+       /* convenience pointer to sliced struct in vbi_in union */
+       struct v4l2_sliced_vbi_format *sliced_in;
+       u32 service_set_in;
+       int insert_mpeg;
+
+       /* Buffer for the maximum of 2 * 18 * packet_size sliced VBI lines.
+          One for /dev/vbi0 and one for /dev/vbi8 */
+       struct v4l2_sliced_vbi_data sliced_data[36];
+
+       /* Buffer for VBI data inserted into MPEG stream.
+          The first byte is a dummy byte that's never used.
+          The next 16 bytes contain the MPEG header for the VBI data,
+          the remainder is the actual VBI data.
+          The max size accepted by the MPEG VBI reinsertion turns out
+          to be 1552 bytes, which happens to be 4 + (1 + 42) * (2 * 18) bytes,
+          where 4 is a four byte header, 42 is the max sliced VBI payload, 1 is
+          a single line header byte and 2 * 18 is the number of VBI lines per frame.
+
+          However, it seems that the data must be 1K aligned, so we have to
+          pad the data until the 1 or 2 K boundary.
+
+          This pointer array will allocate 2049 bytes to store each VBI frame. */
+       u8 *sliced_mpeg_data[CX18_VBI_FRAMES];
+       u32 sliced_mpeg_size[CX18_VBI_FRAMES];
+       struct cx18_buffer sliced_mpeg_buf;
+       u32 inserted_frame;
+
+       u32 start[2], count;
+       u32 raw_size;
+       u32 sliced_size;
+};
+
+/* Per cx23418, per I2C bus private algo callback data */
+struct cx18_i2c_algo_callback_data {
+       struct cx18 *cx;
+       int bus_index;   /* 0 or 1 for the cx23418's 1st or 2nd I2C bus */
+};
+
+/* Struct to hold info about cx18 cards */
+struct cx18 {
+       int num;                /* board number, -1 during init! */
+       char name[8];           /* board name for printk and interrupts (e.g. 'cx180') */
+       struct pci_dev *dev;    /* PCI device */
+       const struct cx18_card *card;   /* card information */
+       const char *card_name;  /* full name of the card */
+       const struct cx18_card_tuner_i2c *card_i2c; /* i2c addresses to probe for tuner */
+       u8 is_50hz;
+       u8 is_60hz;
+       u8 is_out_50hz;
+       u8 is_out_60hz;
+       u8 nof_inputs;          /* number of video inputs */
+       u8 nof_audio_inputs;    /* number of audio inputs */
+       u16 buffer_id;          /* buffer ID counter */
+       u32 v4l2_cap;           /* V4L2 capabilities of card */
+       u32 hw_flags;           /* Hardware description of the board */
+       unsigned mdl_offset;
+       struct cx18_scb *scb;   /* pointer to SCB */
+
+       struct cx18_av_state av_state;
+
+       /* codec settings */
+       struct cx2341x_mpeg_params params;
+       u32 filter_mode;
+       u32 temporal_strength;
+       u32 spatial_strength;
+
+       /* dualwatch */
+       unsigned long dualwatch_jiffies;
+       u16 dualwatch_stereo_mode;
+
+       /* Digitizer type */
+       int digitizer;          /* 0x00EF = saa7114 0x00FO = saa7115 0x0106 = mic */
+
+       struct mutex serialize_lock;    /* mutex used to serialize open/close/start/stop/ioctl operations */
+       struct cx18_options options;    /* User options */
+       int stream_buf_size[CX18_MAX_STREAMS]; /* Stream buffer size */
+       struct cx18_stream streams[CX18_MAX_STREAMS];   /* Stream data */
+       unsigned long i_flags;  /* global cx18 flags */
+       atomic_t capturing;     /* count number of active capture streams */
+       spinlock_t lock;        /* lock access to this struct */
+       int search_pack_header;
+
+       spinlock_t dma_reg_lock; /* lock access to DMA engine registers */
+
+       int open_id;            /* incremented each time an open occurs, used as
+                                  unique ID. Starts at 1, so 0 can be used as
+                                  uninitialized value in the stream->id. */
+
+       u32 base_addr;
+       struct v4l2_prio_state prio;
+
+       u8 card_rev;
+       void __iomem *enc_mem, *reg_mem;
+
+       struct vbi_info vbi;
+
+       u32 pgm_info_offset;
+       u32 pgm_info_num;
+       u32 pgm_info_write_idx;
+       u32 pgm_info_read_idx;
+       struct v4l2_enc_idx_entry pgm_info[CX18_MAX_PGM_INDEX];
+
+       u64 mpg_data_received;
+       u64 vbi_data_inserted;
+
+       wait_queue_head_t mb_apu_waitq;
+       wait_queue_head_t mb_cpu_waitq;
+       wait_queue_head_t mb_epu_waitq;
+       wait_queue_head_t mb_hpu_waitq;
+       wait_queue_head_t cap_w;
+       /* when the current DMA is finished this queue is woken up */
+       wait_queue_head_t dma_waitq;
+
+       /* i2c */
+       struct i2c_adapter i2c_adap[2];
+       struct i2c_algo_bit_data i2c_algo[2];
+       struct cx18_i2c_algo_callback_data i2c_algo_cb_data[2];
+       struct i2c_client i2c_client[2];
+       struct mutex i2c_bus_lock[2];
+       struct i2c_client *i2c_clients[I2C_CLIENTS_MAX];
+
+       /* v4l2 and User settings */
+
+       /* codec settings */
+       u32 audio_input;
+       u32 active_input;
+       u32 active_output;
+       v4l2_std_id std;
+       v4l2_std_id tuner_std;  /* The norm of the tuner (fixed) */
+};
+
+/* Globals */
+extern struct cx18 *cx18_cards[];
+extern int cx18_cards_active;
+extern int cx18_first_minor;
+extern spinlock_t cx18_cards_lock;
+
+/*==============Prototypes==================*/
+
+/* Return non-zero if a signal is pending */
+int cx18_msleep_timeout(unsigned int msecs, int intr);
+
+/* Wait on queue, returns -EINTR if interrupted */
+int cx18_waitq(wait_queue_head_t *waitq);
+
+/* Read Hauppauge eeprom */
+struct tveeprom; /* forward reference */
+void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv);
+
+/* First-open initialization: load firmware, etc. */
+int cx18_init_on_first_open(struct cx18 *cx);
+
+/* This is a PCI post thing, where if the pci register is not read, then
+   the write doesn't always take effect right away. By reading back the
+   register any pending PCI writes will be performed (in order), and so
+   you can be sure that the writes are guaranteed to be done.
+
+   Rarely needed, only in some timing sensitive cases.
+   Apparently if this is not done some motherboards seem
+   to kill the firmware and get into the broken state until computer is
+   rebooted. */
+#define write_sync(val, reg) \
+       do { writel(val, reg); readl(reg); } while (0)
+
+#define read_reg(reg) readl(cx->reg_mem + (reg))
+#define write_reg(val, reg) writel(val, cx->reg_mem + (reg))
+#define write_reg_sync(val, reg) \
+       do { write_reg(val, reg); read_reg(reg); } while (0)
+
+#define read_enc(addr) readl(cx->enc_mem + (u32)(addr))
+#define write_enc(val, addr) writel(val, cx->enc_mem + (u32)(addr))
+#define write_enc_sync(val, addr) \
+       do { write_enc(val, addr); read_enc(addr); } while (0)
+
+#define sw1_irq_enable(val) do { \
+       write_reg(val, SW1_INT_STATUS); \
+       write_reg(read_reg(SW1_INT_ENABLE_PCI) | (val), SW1_INT_ENABLE_PCI); \
+} while (0)
+
+#define sw1_irq_disable(val) \
+       write_reg(read_reg(SW1_INT_ENABLE_PCI) & ~(val), SW1_INT_ENABLE_PCI);
+
+#define sw2_irq_enable(val) do { \
+       write_reg(val, SW2_INT_STATUS); \
+       write_reg(read_reg(SW2_INT_ENABLE_PCI) | (val), SW2_INT_ENABLE_PCI); \
+} while (0)
+
+#define sw2_irq_disable(val) \
+       write_reg(read_reg(SW2_INT_ENABLE_PCI) & ~(val), SW2_INT_ENABLE_PCI);
+
+#define setup_page(addr) do { \
+    u32 val = read_reg(0xD000F8) & ~0x1f00; \
+    write_reg(val | (((addr) >> 17) & 0x1f00), 0xD000F8); \
+} while (0)
+
+#endif /* CX18_DRIVER_H */
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
new file mode 100644 (file)
index 0000000..65efe69
--- /dev/null
@@ -0,0 +1,288 @@
+/*
+ *  cx18 functions for DVB support
+ *
+ *  Copyright (c) 2008 Steven Toth <stoth@hauppauge.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "cx18-version.h"
+#include "cx18-dvb.h"
+#include "cx18-streams.h"
+#include "cx18-cards.h"
+#include "s5h1409.h"
+
+/* Wait until the MXL500X driver is merged */
+#ifdef HAVE_MXL500X
+#include "mxl500x.h"
+#endif
+
+DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
+
+#define CX18_REG_DMUX_NUM_PORT_0_CONTROL 0xd5a000
+
+#ifdef HAVE_MXL500X
+static struct mxl500x_config hauppauge_hvr1600_tuner = {
+       .delsys    = MXL500x_MODE_ATSC,
+       .octf      = MXL500x_OCTF_CH,
+       .xtal_freq = 16000000,
+       .iflo_freq = 5380000,
+       .ref_freq  = 322800000,
+       .rssi_ena  = MXL_RSSI_ENABLE,
+       .addr      = 0xC6 >> 1,
+};
+
+static struct s5h1409_config hauppauge_hvr1600_config = {
+       .demod_address = 0x32 >> 1,
+       .output_mode   = S5H1409_SERIAL_OUTPUT,
+       .gpio          = S5H1409_GPIO_ON,
+       .qam_if        = 44000,
+       .inversion     = S5H1409_INVERSION_OFF,
+       .status_mode   = S5H1409_DEMODLOCKING,
+       .mpeg_timing   = S5H1409_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK
+
+};
+#endif
+
+static int dvb_register(struct cx18_stream *stream);
+
+/* Kernel DVB framework calls this when the feed needs to start.
+ * The CX18 framework should enable the transport DMA handling
+ * and queue processing.
+ */
+static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
+{
+       struct dvb_demux *demux = feed->demux;
+       struct cx18_stream *stream = (struct cx18_stream *) demux->priv;
+       struct cx18 *cx = stream->cx;
+       int ret = -EINVAL;
+       u32 v;
+
+       CX18_DEBUG_INFO("Start feed: pid = 0x%x index = %d\n",
+                       feed->pid, feed->index);
+       switch (cx->card->type) {
+       case CX18_CARD_HVR_1600_ESMT:
+       case CX18_CARD_HVR_1600_SAMSUNG:
+               v = read_reg(CX18_REG_DMUX_NUM_PORT_0_CONTROL);
+               v |= 0x00400000; /* Serial Mode */
+               v |= 0x00002000; /* Data Length - Byte */
+               v |= 0x00010000; /* Error - Polarity */
+               v |= 0x00020000; /* Error - Passthru */
+               v |= 0x000c0000; /* Error - Ignore */
+               write_reg(v, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
+               break;
+
+       default:
+               /* Assumption - Parallel transport - Signalling
+                * undefined or default.
+                */
+               break;
+       }
+
+       if (!demux->dmx.frontend)
+               return -EINVAL;
+
+       if (stream) {
+               mutex_lock(&stream->dvb.feedlock);
+               if (stream->dvb.feeding++ == 0) {
+                       CX18_DEBUG_INFO("Starting Transport DMA\n");
+                       ret = cx18_start_v4l2_encode_stream(stream);
+               } else
+                       ret = 0;
+               mutex_unlock(&stream->dvb.feedlock);
+       }
+
+       return ret;
+}
+
+/* Kernel DVB framework calls this when the feed needs to stop. */
+static int cx18_dvb_stop_feed(struct dvb_demux_feed *feed)
+{
+       struct dvb_demux *demux = feed->demux;
+       struct cx18_stream *stream = (struct cx18_stream *)demux->priv;
+       struct cx18 *cx = stream->cx;
+       int ret = -EINVAL;
+
+       CX18_DEBUG_INFO("Stop feed: pid = 0x%x index = %d\n",
+                       feed->pid, feed->index);
+
+       if (stream) {
+               mutex_lock(&stream->dvb.feedlock);
+               if (--stream->dvb.feeding == 0) {
+                       CX18_DEBUG_INFO("Stopping Transport DMA\n");
+                       ret = cx18_stop_v4l2_encode_stream(stream, 0);
+               } else
+                       ret = 0;
+               mutex_unlock(&stream->dvb.feedlock);
+       }
+
+       return ret;
+}
+
+int cx18_dvb_register(struct cx18_stream *stream)
+{
+       struct cx18 *cx = stream->cx;
+       struct cx18_dvb *dvb = &stream->dvb;
+       struct dvb_adapter *dvb_adapter;
+       struct dvb_demux *dvbdemux;
+       struct dmx_demux *dmx;
+       int ret;
+
+       if (!dvb)
+               return -EINVAL;
+
+       ret = dvb_register_adapter(&dvb->dvb_adapter,
+                       CX18_DRIVER_NAME,
+                       THIS_MODULE, &cx->dev->dev, adapter_nr);
+       if (ret < 0)
+               goto err_out;
+
+       dvb_adapter = &dvb->dvb_adapter;
+
+       dvbdemux = &dvb->demux;
+
+       dvbdemux->priv = (void *)stream;
+
+       dvbdemux->filternum = 256;
+       dvbdemux->feednum = 256;
+       dvbdemux->start_feed = cx18_dvb_start_feed;
+       dvbdemux->stop_feed = cx18_dvb_stop_feed;
+       dvbdemux->dmx.capabilities = (DMX_TS_FILTERING |
+               DMX_SECTION_FILTERING | DMX_MEMORY_BASED_FILTERING);
+       ret = dvb_dmx_init(dvbdemux);
+       if (ret < 0)
+               goto err_dvb_unregister_adapter;
+
+       dmx = &dvbdemux->dmx;
+
+       dvb->hw_frontend.source = DMX_FRONTEND_0;
+       dvb->mem_frontend.source = DMX_MEMORY_FE;
+       dvb->dmxdev.filternum = 256;
+       dvb->dmxdev.demux = dmx;
+
+       ret = dvb_dmxdev_init(&dvb->dmxdev, dvb_adapter);
+       if (ret < 0)
+               goto err_dvb_dmx_release;
+
+       ret = dmx->add_frontend(dmx, &dvb->hw_frontend);
+       if (ret < 0)
+               goto err_dvb_dmxdev_release;
+
+       ret = dmx->add_frontend(dmx, &dvb->mem_frontend);
+       if (ret < 0)
+               goto err_remove_hw_frontend;
+
+       ret = dmx->connect_frontend(dmx, &dvb->hw_frontend);
+       if (ret < 0)
+               goto err_remove_mem_frontend;
+
+       ret = dvb_register(stream);
+       if (ret < 0)
+               goto err_disconnect_frontend;
+
+       dvb_net_init(dvb_adapter, &dvb->dvbnet, dmx);
+
+       CX18_INFO("DVB Frontend registered\n");
+       mutex_init(&dvb->feedlock);
+       dvb->enabled = 1;
+       return ret;
+
+err_disconnect_frontend:
+       dmx->disconnect_frontend(dmx);
+err_remove_mem_frontend:
+       dmx->remove_frontend(dmx, &dvb->mem_frontend);
+err_remove_hw_frontend:
+       dmx->remove_frontend(dmx, &dvb->hw_frontend);
+err_dvb_dmxdev_release:
+       dvb_dmxdev_release(&dvb->dmxdev);
+err_dvb_dmx_release:
+       dvb_dmx_release(dvbdemux);
+err_dvb_unregister_adapter:
+       dvb_unregister_adapter(dvb_adapter);
+err_out:
+       return ret;
+}
+
+void cx18_dvb_unregister(struct cx18_stream *stream)
+{
+       struct cx18 *cx = stream->cx;
+       struct cx18_dvb *dvb = &stream->dvb;
+       struct dvb_adapter *dvb_adapter;
+       struct dvb_demux *dvbdemux;
+       struct dmx_demux *dmx;
+
+       CX18_INFO("unregister DVB\n");
+
+       dvb_adapter = &dvb->dvb_adapter;
+       dvbdemux = &dvb->demux;
+       dmx = &dvbdemux->dmx;
+
+       dmx->close(dmx);
+       dvb_net_release(&dvb->dvbnet);
+       dmx->remove_frontend(dmx, &dvb->mem_frontend);
+       dmx->remove_frontend(dmx, &dvb->hw_frontend);
+       dvb_dmxdev_release(&dvb->dmxdev);
+       dvb_dmx_release(dvbdemux);
+       dvb_unregister_frontend(dvb->fe);
+       dvb_frontend_detach(dvb->fe);
+       dvb_unregister_adapter(dvb_adapter);
+}
+
+/* All the DVB attach calls go here, this function get's modified
+ * for each new card. No other function in this file needs
+ * to change.
+ */
+static int dvb_register(struct cx18_stream *stream)
+{
+       struct cx18_dvb *dvb = &stream->dvb;
+       struct cx18 *cx = stream->cx;
+       int ret = 0;
+
+       switch (cx->card->type) {
+/* Wait until the MXL500X driver is merged */
+#ifdef HAVE_MXL500X
+       case CX18_CARD_HVR_1600_ESMT:
+       case CX18_CARD_HVR_1600_SAMSUNG:
+               dvb->fe = dvb_attach(s5h1409_attach,
+                       &hauppauge_hvr1600_config,
+                       &cx->i2c_adap[0]);
+               if (dvb->fe != NULL) {
+                       dvb_attach(mxl500x_attach, dvb->fe,
+                               &hauppauge_hvr1600_tuner,
+                               &cx->i2c_adap[0]);
+                       ret = 0;
+               }
+               break;
+#endif
+       default:
+               /* No Digital Tv Support */
+               break;
+       }
+
+       if (dvb->fe == NULL) {
+               CX18_ERR("frontend initialization failed\n");
+               return -1;
+       }
+
+       ret = dvb_register_frontend(&dvb->dvb_adapter, dvb->fe);
+       if (ret < 0) {
+               if (dvb->fe->ops.release)
+                       dvb->fe->ops.release(dvb->fe);
+               return ret;
+       }
+
+       return ret;
+}
diff --git a/drivers/media/video/cx18/cx18-dvb.h b/drivers/media/video/cx18/cx18-dvb.h
new file mode 100644 (file)
index 0000000..d6a6ccd
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ *  cx18 functions for DVB support
+ *
+ *  Copyright (c) 2008 Steven Toth <stoth@hauppauge.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include "cx18-driver.h"
+
+int cx18_dvb_register(struct cx18_stream *stream);
+void cx18_dvb_unregister(struct cx18_stream *stream);
diff --git a/drivers/media/video/cx18/cx18-fileops.c b/drivers/media/video/cx18/cx18-fileops.c
new file mode 100644 (file)
index 0000000..6930306
--- /dev/null
@@ -0,0 +1,711 @@
+/*
+ *  cx18 file operation functions
+ *
+ *  Derived from ivtv-fileops.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-fileops.h"
+#include "cx18-i2c.h"
+#include "cx18-queue.h"
+#include "cx18-vbi.h"
+#include "cx18-audio.h"
+#include "cx18-mailbox.h"
+#include "cx18-scb.h"
+#include "cx18-streams.h"
+#include "cx18-controls.h"
+#include "cx18-ioctl.h"
+#include "cx18-cards.h"
+
+/* This function tries to claim the stream for a specific file descriptor.
+   If no one else is using this stream then the stream is claimed and
+   associated VBI streams are also automatically claimed.
+   Possible error returns: -EBUSY if someone else has claimed
+   the stream or 0 on success. */
+int cx18_claim_stream(struct cx18_open_id *id, int type)
+{
+       struct cx18 *cx = id->cx;
+       struct cx18_stream *s = &cx->streams[type];
+       struct cx18_stream *s_vbi;
+       int vbi_type;
+
+       if (test_and_set_bit(CX18_F_S_CLAIMED, &s->s_flags)) {
+               /* someone already claimed this stream */
+               if (s->id == id->open_id) {
+                       /* yes, this file descriptor did. So that's OK. */
+                       return 0;
+               }
+               if (s->id == -1 && type == CX18_ENC_STREAM_TYPE_VBI) {
+                       /* VBI is handled already internally, now also assign
+                          the file descriptor to this stream for external
+                          reading of the stream. */
+                       s->id = id->open_id;
+                       CX18_DEBUG_INFO("Start Read VBI\n");
+                       return 0;
+               }
+               /* someone else is using this stream already */
+               CX18_DEBUG_INFO("Stream %d is busy\n", type);
+               return -EBUSY;
+       }
+       s->id = id->open_id;
+
+       /* CX18_DEC_STREAM_TYPE_MPG needs to claim CX18_DEC_STREAM_TYPE_VBI,
+          CX18_ENC_STREAM_TYPE_MPG needs to claim CX18_ENC_STREAM_TYPE_VBI
+          (provided VBI insertion is on and sliced VBI is selected), for all
+          other streams we're done */
+       if (type == CX18_ENC_STREAM_TYPE_MPG &&
+                  cx->vbi.insert_mpeg && cx->vbi.sliced_in->service_set) {
+               vbi_type = CX18_ENC_STREAM_TYPE_VBI;
+       } else {
+               return 0;
+       }
+       s_vbi = &cx->streams[vbi_type];
+
+       set_bit(CX18_F_S_CLAIMED, &s_vbi->s_flags);
+
+       /* mark that it is used internally */
+       set_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags);
+       return 0;
+}
+
+/* This function releases a previously claimed stream. It will take into
+   account associated VBI streams. */
+void cx18_release_stream(struct cx18_stream *s)
+{
+       struct cx18 *cx = s->cx;
+       struct cx18_stream *s_vbi;
+
+       s->id = -1;
+       if (s->type == CX18_ENC_STREAM_TYPE_VBI &&
+               test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags)) {
+               /* this stream is still in use internally */
+               return;
+       }
+       if (!test_and_clear_bit(CX18_F_S_CLAIMED, &s->s_flags)) {
+               CX18_DEBUG_WARN("Release stream %s not in use!\n", s->name);
+               return;
+       }
+
+       cx18_flush_queues(s);
+
+       /* CX18_ENC_STREAM_TYPE_MPG needs to release CX18_ENC_STREAM_TYPE_VBI,
+          for all other streams we're done */
+       if (s->type == CX18_ENC_STREAM_TYPE_MPG)
+               s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
+       else
+               return;
+
+       /* clear internal use flag */
+       if (!test_and_clear_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags)) {
+               /* was already cleared */
+               return;
+       }
+       if (s_vbi->id != -1) {
+               /* VBI stream still claimed by a file descriptor */
+               return;
+       }
+       clear_bit(CX18_F_S_CLAIMED, &s_vbi->s_flags);
+       cx18_flush_queues(s_vbi);
+}
+
+static void cx18_dualwatch(struct cx18 *cx)
+{
+       struct v4l2_tuner vt;
+       u16 new_bitmap;
+       u16 new_stereo_mode;
+       const u16 stereo_mask = 0x0300;
+       const u16 dual = 0x0200;
+
+       new_stereo_mode = cx->params.audio_properties & stereo_mask;
+       memset(&vt, 0, sizeof(vt));
+       cx18_call_i2c_clients(cx, VIDIOC_G_TUNER, &vt);
+       if (vt.audmode == V4L2_TUNER_MODE_LANG1_LANG2 &&
+                       (vt.rxsubchans & V4L2_TUNER_SUB_LANG2))
+               new_stereo_mode = dual;
+
+       if (new_stereo_mode == cx->dualwatch_stereo_mode)
+               return;
+
+       new_bitmap = new_stereo_mode | (cx->params.audio_properties & ~stereo_mask);
+
+       CX18_DEBUG_INFO("dualwatch: change stereo flag from 0x%x to 0x%x. new audio_bitmask=0x%ux\n",
+                          cx->dualwatch_stereo_mode, new_stereo_mode, new_bitmap);
+
+       if (cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
+                               cx18_find_handle(cx), new_bitmap) == 0) {
+               cx->dualwatch_stereo_mode = new_stereo_mode;
+               return;
+       }
+       CX18_DEBUG_INFO("dualwatch: changing stereo flag failed\n");
+}
+
+
+static struct cx18_buffer *cx18_get_buffer(struct cx18_stream *s, int non_block, int *err)
+{
+       struct cx18 *cx = s->cx;
+       struct cx18_stream *s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
+       struct cx18_buffer *buf;
+       DEFINE_WAIT(wait);
+
+       *err = 0;
+       while (1) {
+               if (s->type == CX18_ENC_STREAM_TYPE_MPG) {
+
+                       if (time_after(jiffies, cx->dualwatch_jiffies + msecs_to_jiffies(1000))) {
+                               cx->dualwatch_jiffies = jiffies;
+                               cx18_dualwatch(cx);
+                       }
+                       if (test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
+                           !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
+                               while ((buf = cx18_dequeue(s_vbi, &s_vbi->q_full))) {
+                                       /* byteswap and process VBI data */
+/*                                     cx18_process_vbi_data(cx, buf, s_vbi->dma_pts, s_vbi->type); */
+                                       cx18_enqueue(s_vbi, buf, &s_vbi->q_free);
+                               }
+                       }
+                       buf = &cx->vbi.sliced_mpeg_buf;
+                       if (buf->readpos != buf->bytesused)
+                               return buf;
+               }
+
+               /* do we have leftover data? */
+               buf = cx18_dequeue(s, &s->q_io);
+               if (buf)
+                       return buf;
+
+               /* do we have new data? */
+               buf = cx18_dequeue(s, &s->q_full);
+               if (buf) {
+                       if (!test_and_clear_bit(CX18_F_B_NEED_BUF_SWAP,
+                                               &buf->b_flags))
+                               return buf;
+                       if (s->type == CX18_ENC_STREAM_TYPE_MPG)
+                               /* byteswap MPG data */
+                               cx18_buf_swap(buf);
+                       else {
+                               /* byteswap and process VBI data */
+                               cx18_process_vbi_data(cx, buf,
+                                               s->dma_pts, s->type);
+                       }
+                       return buf;
+               }
+
+               /* return if end of stream */
+               if (!test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
+                       CX18_DEBUG_INFO("EOS %s\n", s->name);
+                       return NULL;
+               }
+
+               /* return if file was opened with O_NONBLOCK */
+               if (non_block) {
+                       *err = -EAGAIN;
+                       return NULL;
+               }
+
+               /* wait for more data to arrive */
+               prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
+               /* New buffers might have become available before we were added
+                  to the waitqueue */
+               if (!s->q_full.buffers)
+                       schedule();
+               finish_wait(&s->waitq, &wait);
+               if (signal_pending(current)) {
+                       /* return if a signal was received */
+                       CX18_DEBUG_INFO("User stopped %s\n", s->name);
+                       *err = -EINTR;
+                       return NULL;
+               }
+       }
+}
+
+static void cx18_setup_sliced_vbi_buf(struct cx18 *cx)
+{
+       int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
+
+       cx->vbi.sliced_mpeg_buf.buf = cx->vbi.sliced_mpeg_data[idx];
+       cx->vbi.sliced_mpeg_buf.bytesused = cx->vbi.sliced_mpeg_size[idx];
+       cx->vbi.sliced_mpeg_buf.readpos = 0;
+}
+
+static size_t cx18_copy_buf_to_user(struct cx18_stream *s,
+               struct cx18_buffer *buf, char __user *ubuf, size_t ucount)
+{
+       struct cx18 *cx = s->cx;
+       size_t len = buf->bytesused - buf->readpos;
+
+       if (len > ucount)
+               len = ucount;
+       if (cx->vbi.insert_mpeg && s->type == CX18_ENC_STREAM_TYPE_MPG &&
+           cx->vbi.sliced_in->service_set && buf != &cx->vbi.sliced_mpeg_buf) {
+               const char *start = buf->buf + buf->readpos;
+               const char *p = start + 1;
+               const u8 *q;
+               u8 ch = cx->search_pack_header ? 0xba : 0xe0;
+               int stuffing, i;
+
+               while (start + len > p) {
+                       q = memchr(p, 0, start + len - p);
+                       if (q == NULL)
+                               break;
+                       p = q + 1;
+                       if ((char *)q + 15 >= buf->buf + buf->bytesused ||
+                           q[1] != 0 || q[2] != 1 || q[3] != ch)
+                               continue;
+                       if (!cx->search_pack_header) {
+                               if ((q[6] & 0xc0) != 0x80)
+                                       continue;
+                               if (((q[7] & 0xc0) == 0x80 &&
+                                    (q[9] & 0xf0) == 0x20) ||
+                                   ((q[7] & 0xc0) == 0xc0 &&
+                                    (q[9] & 0xf0) == 0x30)) {
+                                       ch = 0xba;
+                                       cx->search_pack_header = 1;
+                                       p = q + 9;
+                               }
+                               continue;
+                       }
+                       stuffing = q[13] & 7;
+                       /* all stuffing bytes must be 0xff */
+                       for (i = 0; i < stuffing; i++)
+                               if (q[14 + i] != 0xff)
+                                       break;
+                       if (i == stuffing &&
+                           (q[4] & 0xc4) == 0x44 &&
+                           (q[12] & 3) == 3 &&
+                           q[14 + stuffing] == 0 &&
+                           q[15 + stuffing] == 0 &&
+                           q[16 + stuffing] == 1) {
+                               cx->search_pack_header = 0;
+                               len = (char *)q - start;
+                               cx18_setup_sliced_vbi_buf(cx);
+                               break;
+                       }
+               }
+       }
+       if (copy_to_user(ubuf, (u8 *)buf->buf + buf->readpos, len)) {
+               CX18_DEBUG_WARN("copy %zd bytes to user failed for %s\n",
+                               len, s->name);
+               return -EFAULT;
+       }
+       buf->readpos += len;
+       if (s->type == CX18_ENC_STREAM_TYPE_MPG &&
+           buf != &cx->vbi.sliced_mpeg_buf)
+               cx->mpg_data_received += len;
+       return len;
+}
+
+static ssize_t cx18_read(struct cx18_stream *s, char __user *ubuf,
+               size_t tot_count, int non_block)
+{
+       struct cx18 *cx = s->cx;
+       size_t tot_written = 0;
+       int single_frame = 0;
+
+       if (atomic_read(&cx->capturing) == 0 && s->id == -1) {
+               /* shouldn't happen */
+               CX18_DEBUG_WARN("Stream %s not initialized before read\n",
+                               s->name);
+               return -EIO;
+       }
+
+       /* Each VBI buffer is one frame, the v4l2 API says that for VBI the
+          frames should arrive one-by-one, so make sure we never output more
+          than one VBI frame at a time */
+       if (s->type == CX18_ENC_STREAM_TYPE_VBI &&
+           cx->vbi.sliced_in->service_set)
+               single_frame = 1;
+
+       for (;;) {
+               struct cx18_buffer *buf;
+               int rc;
+
+               buf = cx18_get_buffer(s, non_block, &rc);
+               /* if there is no data available... */
+               if (buf == NULL) {
+                       /* if we got data, then return that regardless */
+                       if (tot_written)
+                               break;
+                       /* EOS condition */
+                       if (rc == 0) {
+                               clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);
+                               clear_bit(CX18_F_S_APPL_IO, &s->s_flags);
+                               cx18_release_stream(s);
+                       }
+                       /* set errno */
+                       return rc;
+               }
+
+               rc = cx18_copy_buf_to_user(s, buf, ubuf + tot_written,
+                               tot_count - tot_written);
+
+               if (buf != &cx->vbi.sliced_mpeg_buf) {
+                       if (buf->readpos == buf->bytesused) {
+                               cx18_buf_sync_for_device(s, buf);
+                               cx18_enqueue(s, buf, &s->q_free);
+                               cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5,
+                                       s->handle,
+                                       (void *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
+                                       1, buf->id, s->buf_size);
+                       } else
+                               cx18_enqueue(s, buf, &s->q_io);
+               } else if (buf->readpos == buf->bytesused) {
+                       int idx = cx->vbi.inserted_frame % CX18_VBI_FRAMES;
+
+                       cx->vbi.sliced_mpeg_size[idx] = 0;
+                       cx->vbi.inserted_frame++;
+                       cx->vbi_data_inserted += buf->bytesused;
+               }
+               if (rc < 0)
+                       return rc;
+               tot_written += rc;
+
+               if (tot_written == tot_count || single_frame)
+                       break;
+       }
+       return tot_written;
+}
+
+static ssize_t cx18_read_pos(struct cx18_stream *s, char __user *ubuf,
+               size_t count, loff_t *pos, int non_block)
+{
+       ssize_t rc = count ? cx18_read(s, ubuf, count, non_block) : 0;
+       struct cx18 *cx = s->cx;
+
+       CX18_DEBUG_HI_FILE("read %zd from %s, got %zd\n", count, s->name, rc);
+       if (rc > 0)
+               pos += rc;
+       return rc;
+}
+
+int cx18_start_capture(struct cx18_open_id *id)
+{
+       struct cx18 *cx = id->cx;
+       struct cx18_stream *s = &cx->streams[id->type];
+       struct cx18_stream *s_vbi;
+
+       if (s->type == CX18_ENC_STREAM_TYPE_RAD) {
+               /* you cannot read from these stream types. */
+               return -EPERM;
+       }
+
+       /* Try to claim this stream. */
+       if (cx18_claim_stream(id, s->type))
+               return -EBUSY;
+
+       /* If capture is already in progress, then we also have to
+          do nothing extra. */
+       if (test_bit(CX18_F_S_STREAMOFF, &s->s_flags) ||
+           test_and_set_bit(CX18_F_S_STREAMING, &s->s_flags)) {
+               set_bit(CX18_F_S_APPL_IO, &s->s_flags);
+               return 0;
+       }
+
+       /* Start VBI capture if required */
+       s_vbi = &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
+       if (s->type == CX18_ENC_STREAM_TYPE_MPG &&
+           test_bit(CX18_F_S_INTERNAL_USE, &s_vbi->s_flags) &&
+           !test_and_set_bit(CX18_F_S_STREAMING, &s_vbi->s_flags)) {
+               /* Note: the CX18_ENC_STREAM_TYPE_VBI is claimed
+                  automatically when the MPG stream is claimed.
+                  We only need to start the VBI capturing. */
+               if (cx18_start_v4l2_encode_stream(s_vbi)) {
+                       CX18_DEBUG_WARN("VBI capture start failed\n");
+
+                       /* Failure, clean up and return an error */
+                       clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags);
+                       clear_bit(CX18_F_S_STREAMING, &s->s_flags);
+                       /* also releases the associated VBI stream */
+                       cx18_release_stream(s);
+                       return -EIO;
+               }
+               CX18_DEBUG_INFO("VBI insertion started\n");
+       }
+
+       /* Tell the card to start capturing */
+       if (!cx18_start_v4l2_encode_stream(s)) {
+               /* We're done */
+               set_bit(CX18_F_S_APPL_IO, &s->s_flags);
+               /* Resume a possibly paused encoder */
+               if (test_and_clear_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags))
+                       cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, s->handle);
+               return 0;
+       }
+
+       /* failure, clean up */
+       CX18_DEBUG_WARN("Failed to start capturing for stream %s\n", s->name);
+
+       /* Note: the CX18_ENC_STREAM_TYPE_VBI is released
+          automatically when the MPG stream is released.
+          We only need to stop the VBI capturing. */
+       if (s->type == CX18_ENC_STREAM_TYPE_MPG &&
+           test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags)) {
+               cx18_stop_v4l2_encode_stream(s_vbi, 0);
+               clear_bit(CX18_F_S_STREAMING, &s_vbi->s_flags);
+       }
+       clear_bit(CX18_F_S_STREAMING, &s->s_flags);
+       cx18_release_stream(s);
+       return -EIO;
+}
+
+ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
+               loff_t *pos)
+{
+       struct cx18_open_id *id = filp->private_data;
+       struct cx18 *cx = id->cx;
+       struct cx18_stream *s = &cx->streams[id->type];
+       int rc;
+
+       CX18_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
+
+       mutex_lock(&cx->serialize_lock);
+       rc = cx18_start_capture(id);
+       mutex_unlock(&cx->serialize_lock);
+       if (rc)
+               return rc;
+       return cx18_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
+}
+
+unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait)
+{
+       struct cx18_open_id *id = filp->private_data;
+       struct cx18 *cx = id->cx;
+       struct cx18_stream *s = &cx->streams[id->type];
+       int eof = test_bit(CX18_F_S_STREAMOFF, &s->s_flags);
+
+       /* Start a capture if there is none */
+       if (!eof && !test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
+               int rc;
+
+               mutex_lock(&cx->serialize_lock);
+               rc = cx18_start_capture(id);
+               mutex_unlock(&cx->serialize_lock);
+               if (rc) {
+                       CX18_DEBUG_INFO("Could not start capture for %s (%d)\n",
+                                       s->name, rc);
+                       return POLLERR;
+               }
+               CX18_DEBUG_FILE("Encoder poll started capture\n");
+       }
+
+       /* add stream's waitq to the poll list */
+       CX18_DEBUG_HI_FILE("Encoder poll\n");
+       poll_wait(filp, &s->waitq, wait);
+
+       if (s->q_full.length || s->q_io.length)
+               return POLLIN | POLLRDNORM;
+       if (eof)
+               return POLLHUP;
+       return 0;
+}
+
+void cx18_stop_capture(struct cx18_open_id *id, int gop_end)
+{
+       struct cx18 *cx = id->cx;
+       struct cx18_stream *s = &cx->streams[id->type];
+
+       CX18_DEBUG_IOCTL("close() of %s\n", s->name);
+
+       /* 'Unclaim' this stream */
+
+       /* Stop capturing */
+       if (test_bit(CX18_F_S_STREAMING, &s->s_flags)) {
+               struct cx18_stream *s_vbi =
+                       &cx->streams[CX18_ENC_STREAM_TYPE_VBI];
+
+               CX18_DEBUG_INFO("close stopping capture\n");
+               /* Special case: a running VBI capture for VBI insertion
+                  in the mpeg stream. Need to stop that too. */
+               if (id->type == CX18_ENC_STREAM_TYPE_MPG &&
+                   test_bit(CX18_F_S_STREAMING, &s_vbi->s_flags) &&
+                   !test_bit(CX18_F_S_APPL_IO, &s_vbi->s_flags)) {
+                       CX18_DEBUG_INFO("close stopping embedded VBI capture\n");
+                       cx18_stop_v4l2_encode_stream(s_vbi, 0);
+               }
+               if (id->type == CX18_ENC_STREAM_TYPE_VBI &&
+                   test_bit(CX18_F_S_INTERNAL_USE, &s->s_flags))
+                       /* Also used internally, don't stop capturing */
+                       s->id = -1;
+               else
+                       cx18_stop_v4l2_encode_stream(s, gop_end);
+       }
+       if (!gop_end) {
+               clear_bit(CX18_F_S_APPL_IO, &s->s_flags);
+               clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);
+               cx18_release_stream(s);
+       }
+}
+
+int cx18_v4l2_close(struct inode *inode, struct file *filp)
+{
+       struct cx18_open_id *id = filp->private_data;
+       struct cx18 *cx = id->cx;
+       struct cx18_stream *s = &cx->streams[id->type];
+
+       CX18_DEBUG_IOCTL("close() of %s\n", s->name);
+
+       v4l2_prio_close(&cx->prio, &id->prio);
+
+       /* Easy case first: this stream was never claimed by us */
+       if (s->id != id->open_id) {
+               kfree(id);
+               return 0;
+       }
+
+       /* 'Unclaim' this stream */
+
+       /* Stop radio */
+       mutex_lock(&cx->serialize_lock);
+       if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
+               /* Closing radio device, return to TV mode */
+               cx18_mute(cx);
+               /* Mark that the radio is no longer in use */
+               clear_bit(CX18_F_I_RADIO_USER, &cx->i_flags);
+               /* Switch tuner to TV */
+               cx18_call_i2c_clients(cx, VIDIOC_S_STD, &cx->std);
+               /* Select correct audio input (i.e. TV tuner or Line in) */
+               cx18_audio_set_io(cx);
+               if (atomic_read(&cx->capturing) > 0) {
+                       /* Undo video mute */
+                       cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2, s->handle,
+                               cx->params.video_mute |
+                                       (cx->params.video_mute_yuv << 8));
+               }
+               /* Done! Unmute and continue. */
+               cx18_unmute(cx);
+               cx18_release_stream(s);
+       } else {
+               cx18_stop_capture(id, 0);
+       }
+       kfree(id);
+       mutex_unlock(&cx->serialize_lock);
+       return 0;
+}
+
+static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
+{
+       struct cx18 *cx = s->cx;
+       struct cx18_open_id *item;
+
+       CX18_DEBUG_FILE("open %s\n", s->name);
+
+       /* Allocate memory */
+       item = kmalloc(sizeof(struct cx18_open_id), GFP_KERNEL);
+       if (NULL == item) {
+               CX18_DEBUG_WARN("nomem on v4l2 open\n");
+               return -ENOMEM;
+       }
+       item->cx = cx;
+       item->type = s->type;
+       v4l2_prio_open(&cx->prio, &item->prio);
+
+       item->open_id = cx->open_id++;
+       filp->private_data = item;
+
+       if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
+               /* Try to claim this stream */
+               if (cx18_claim_stream(item, item->type)) {
+                       /* No, it's already in use */
+                       kfree(item);
+                       return -EBUSY;
+               }
+
+               if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
+                       if (atomic_read(&cx->capturing) > 0) {
+                               /* switching to radio while capture is
+                                  in progress is not polite */
+                               cx18_release_stream(s);
+                               kfree(item);
+                               return -EBUSY;
+                       }
+               }
+
+               /* Mark that the radio is being used. */
+               set_bit(CX18_F_I_RADIO_USER, &cx->i_flags);
+               /* We have the radio */
+               cx18_mute(cx);
+               /* Switch tuner to radio */
+               cx18_call_i2c_clients(cx, AUDC_SET_RADIO, NULL);
+               /* Select the correct audio input (i.e. radio tuner) */
+               cx18_audio_set_io(cx);
+               /* Done! Unmute and continue. */
+               cx18_unmute(cx);
+       }
+       return 0;
+}
+
+int cx18_v4l2_open(struct inode *inode, struct file *filp)
+{
+       int res, x, y = 0;
+       struct cx18 *cx = NULL;
+       struct cx18_stream *s = NULL;
+       int minor = iminor(inode);
+
+       /* Find which card this open was on */
+       spin_lock(&cx18_cards_lock);
+       for (x = 0; cx == NULL && x < cx18_cards_active; x++) {
+               /* find out which stream this open was on */
+               for (y = 0; y < CX18_MAX_STREAMS; y++) {
+                       s = &cx18_cards[x]->streams[y];
+                       if (s->v4l2dev && s->v4l2dev->minor == minor) {
+                               cx = cx18_cards[x];
+                               break;
+                       }
+               }
+       }
+       spin_unlock(&cx18_cards_lock);
+
+       if (cx == NULL) {
+               /* Couldn't find a device registered
+                  on that minor, shouldn't happen! */
+               printk(KERN_WARNING "No cx18 device found on minor %d\n",
+                               minor);
+               return -ENXIO;
+       }
+
+       mutex_lock(&cx->serialize_lock);
+       if (cx18_init_on_first_open(cx)) {
+               CX18_ERR("Failed to initialize on minor %d\n", minor);
+               mutex_unlock(&cx->serialize_lock);
+               return -ENXIO;
+       }
+       res = cx18_serialized_open(s, filp);
+       mutex_unlock(&cx->serialize_lock);
+       return res;
+}
+
+void cx18_mute(struct cx18 *cx)
+{
+       if (atomic_read(&cx->capturing))
+               cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
+                               cx18_find_handle(cx), 1);
+       CX18_DEBUG_INFO("Mute\n");
+}
+
+void cx18_unmute(struct cx18 *cx)
+{
+       if (atomic_read(&cx->capturing)) {
+               cx18_msleep_timeout(100, 0);
+               cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2,
+                               cx18_find_handle(cx), 12);
+               cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
+                               cx18_find_handle(cx), 0);
+       }
+       CX18_DEBUG_INFO("Unmute\n");
+}
diff --git a/drivers/media/video/cx18/cx18-fileops.h b/drivers/media/video/cx18/cx18-fileops.h
new file mode 100644 (file)
index 0000000..16cdafb
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ *  cx18 file operation functions
+ *
+ *  Derived from ivtv-fileops.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+/* Testing/Debugging */
+int cx18_v4l2_open(struct inode *inode, struct file *filp);
+ssize_t cx18_v4l2_read(struct file *filp, char __user *buf, size_t count,
+                     loff_t *pos);
+ssize_t cx18_v4l2_write(struct file *filp, const char __user *buf, size_t count,
+                      loff_t *pos);
+int cx18_v4l2_close(struct inode *inode, struct file *filp);
+unsigned int cx18_v4l2_enc_poll(struct file *filp, poll_table *wait);
+int cx18_start_capture(struct cx18_open_id *id);
+void cx18_stop_capture(struct cx18_open_id *id, int gop_end);
+void cx18_mute(struct cx18 *cx);
+void cx18_unmute(struct cx18 *cx);
+
+/* Utilities */
+
+/* Try to claim a stream for the filehandle. Return 0 on success,
+   -EBUSY if stream already claimed. Once a stream is claimed, it
+   remains claimed until the associated filehandle is closed. */
+int cx18_claim_stream(struct cx18_open_id *id, int type);
+
+/* Release a previously claimed stream. */
+void cx18_release_stream(struct cx18_stream *s);
diff --git a/drivers/media/video/cx18/cx18-firmware.c b/drivers/media/video/cx18/cx18-firmware.c
new file mode 100644 (file)
index 0000000..2694ce3
--- /dev/null
@@ -0,0 +1,373 @@
+/*
+ *  cx18 firmware functions
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-scb.h"
+#include "cx18-irq.h"
+#include "cx18-firmware.h"
+#include "cx18-cards.h"
+#include <linux/firmware.h>
+
+#define CX18_PROC_SOFT_RESET           0xc70010
+#define CX18_DDR_SOFT_RESET            0xc70014
+#define CX18_CLOCK_SELECT1             0xc71000
+#define CX18_CLOCK_SELECT2             0xc71004
+#define CX18_HALF_CLOCK_SELECT1        0xc71008
+#define CX18_HALF_CLOCK_SELECT2        0xc7100C
+#define CX18_CLOCK_POLARITY1           0xc71010
+#define CX18_CLOCK_POLARITY2           0xc71014
+#define CX18_ADD_DELAY_ENABLE1         0xc71018
+#define CX18_ADD_DELAY_ENABLE2         0xc7101C
+#define CX18_CLOCK_ENABLE1             0xc71020
+#define CX18_CLOCK_ENABLE2             0xc71024
+
+#define CX18_REG_BUS_TIMEOUT_EN        0xc72024
+
+#define CX18_AUDIO_ENABLE              0xc72014
+#define CX18_REG_BUS_TIMEOUT_EN        0xc72024
+
+#define CX18_FAST_CLOCK_PLL_INT        0xc78000
+#define CX18_FAST_CLOCK_PLL_FRAC       0xc78004
+#define CX18_FAST_CLOCK_PLL_POST       0xc78008
+#define CX18_FAST_CLOCK_PLL_PRESCALE   0xc7800C
+#define CX18_FAST_CLOCK_PLL_ADJUST_BANDWIDTH 0xc78010
+
+#define CX18_SLOW_CLOCK_PLL_INT        0xc78014
+#define CX18_SLOW_CLOCK_PLL_FRAC       0xc78018
+#define CX18_SLOW_CLOCK_PLL_POST       0xc7801C
+#define CX18_MPEG_CLOCK_PLL_INT                0xc78040
+#define CX18_MPEG_CLOCK_PLL_FRAC       0xc78044
+#define CX18_MPEG_CLOCK_PLL_POST       0xc78048
+#define CX18_PLL_POWER_DOWN            0xc78088
+#define CX18_SW1_INT_STATUS             0xc73104
+#define CX18_SW1_INT_ENABLE_PCI         0xc7311C
+#define CX18_SW2_INT_SET                0xc73140
+#define CX18_SW2_INT_STATUS             0xc73144
+#define CX18_ADEC_CONTROL              0xc78120
+
+#define CX18_DDR_REQUEST_ENABLE        0xc80000
+#define CX18_DDR_CHIP_CONFIG           0xc80004
+#define CX18_DDR_REFRESH               0xc80008
+#define CX18_DDR_TIMING1               0xc8000C
+#define CX18_DDR_TIMING2               0xc80010
+#define CX18_DDR_POWER_REG             0xc8001C
+
+#define CX18_DDR_TUNE_LANE             0xc80048
+#define CX18_DDR_INITIAL_EMRS          0xc80054
+#define CX18_DDR_MB_PER_ROW_7          0xc8009C
+#define CX18_DDR_BASE_63_ADDR          0xc804FC
+
+#define CX18_WMB_CLIENT02              0xc90108
+#define CX18_WMB_CLIENT05              0xc90114
+#define CX18_WMB_CLIENT06              0xc90118
+#define CX18_WMB_CLIENT07              0xc9011C
+#define CX18_WMB_CLIENT08              0xc90120
+#define CX18_WMB_CLIENT09              0xc90124
+#define CX18_WMB_CLIENT10              0xc90128
+#define CX18_WMB_CLIENT11              0xc9012C
+#define CX18_WMB_CLIENT12              0xc90130
+#define CX18_WMB_CLIENT13              0xc90134
+#define CX18_WMB_CLIENT14              0xc90138
+
+#define CX18_DSP0_INTERRUPT_MASK       0xd0004C
+
+/* Encoder/decoder firmware sizes */
+#define CX18_FW_CPU_SIZE               (174716)
+#define CX18_FW_APU_SIZE               (141200)
+
+#define APU_ROM_SYNC1 0x6D676553 /* "mgeS" */
+#define APU_ROM_SYNC2 0x72646548 /* "rdeH" */
+
+struct cx18_apu_rom_seghdr {
+       u32 sync1;
+       u32 sync2;
+       u32 addr;
+       u32 size;
+};
+
+static int load_cpu_fw_direct(const char *fn, u8 __iomem *mem, struct cx18 *cx, long size)
+{
+       const struct firmware *fw = NULL;
+       int retries = 3;
+       int i, j;
+       u32 __iomem *dst = (u32 __iomem *)mem;
+       const u32 *src;
+
+retry:
+       if (!retries || request_firmware(&fw, fn, &cx->dev->dev)) {
+               CX18_ERR("Unable to open firmware %s (must be %ld bytes)\n",
+                               fn, size);
+               CX18_ERR("Did you put the firmware in the hotplug firmware directory?\n");
+               return -ENOMEM;
+       }
+
+       src = (const u32 *)fw->data;
+
+       if (fw->size != size) {
+               /* Due to race conditions in firmware loading (esp. with
+                  udev <0.95) the wrong file was sometimes loaded. So we check
+                  filesizes to see if at least the right-sized file was
+                  loaded. If not, then we retry. */
+               CX18_INFO("retry: file loaded was not %s (expected size %ld, got %zd)\n",
+                               fn, size, fw->size);
+               release_firmware(fw);
+               retries--;
+               goto retry;
+       }
+       for (i = 0; i < fw->size; i += 4096) {
+               setup_page(i);
+               for (j = i; j < fw->size && j < i + 4096; j += 4) {
+                       /* no need for endianness conversion on the ppc */
+                       __raw_writel(*src, dst);
+                       if (__raw_readl(dst) != *src) {
+                               CX18_ERR("Mismatch at offset %x\n", i);
+                               release_firmware(fw);
+                               return -EIO;
+                       }
+                       dst++;
+                       src++;
+               }
+       }
+       if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags))
+               CX18_INFO("loaded %s firmware (%zd bytes)\n", fn, fw->size);
+       release_firmware(fw);
+       return size;
+}
+
+static int load_apu_fw_direct(const char *fn, u8 __iomem *dst, struct cx18 *cx, long size)
+{
+       const struct firmware *fw = NULL;
+       int retries = 3;
+       int i, j;
+       const u32 *src;
+       struct cx18_apu_rom_seghdr seghdr;
+       const u8 *vers;
+       u32 offset = 0;
+       u32 apu_version = 0;
+       int sz;
+
+retry:
+       if (!retries || request_firmware(&fw, fn, &cx->dev->dev)) {
+               CX18_ERR("unable to open firmware %s (must be %ld bytes)\n",
+                               fn, size);
+               CX18_ERR("did you put the firmware in the hotplug firmware directory?\n");
+               return -ENOMEM;
+       }
+
+       src = (const u32 *)fw->data;
+       vers = fw->data + sizeof(seghdr);
+       sz = fw->size;
+
+       if (fw->size != size) {
+               /* Due to race conditions in firmware loading (esp. with
+                  udev <0.95) the wrong file was sometimes loaded. So we check
+                  filesizes to see if at least the right-sized file was
+                  loaded. If not, then we retry. */
+               CX18_INFO("retry: file loaded was not %s (expected size %ld, got %zd)\n",
+                              fn, size, fw->size);
+               release_firmware(fw);
+               retries--;
+               goto retry;
+       }
+       apu_version = (vers[0] << 24) | (vers[4] << 16) | vers[32];
+       while (offset + sizeof(seghdr) < size) {
+               /* TODO: byteswapping */
+               memcpy(&seghdr, src + offset / 4, sizeof(seghdr));
+               offset += sizeof(seghdr);
+               if (seghdr.sync1 != APU_ROM_SYNC1 ||
+                   seghdr.sync2 != APU_ROM_SYNC2) {
+                       offset += seghdr.size;
+                       continue;
+               }
+               CX18_DEBUG_INFO("load segment %x-%x\n", seghdr.addr,
+                               seghdr.addr + seghdr.size - 1);
+               if (offset + seghdr.size > sz)
+                       break;
+               for (i = 0; i < seghdr.size; i += 4096) {
+                       setup_page(offset + i);
+                       for (j = i; j < seghdr.size && j < i + 4096; j += 4) {
+                               /* no need for endianness conversion on the ppc */
+                               __raw_writel(src[(offset + j) / 4], dst + seghdr.addr + j);
+                               if (__raw_readl(dst + seghdr.addr + j) != src[(offset + j) / 4]) {
+                                       CX18_ERR("Mismatch at offset %x\n", offset + j);
+                                       release_firmware(fw);
+                                       return -EIO;
+                               }
+                       }
+               }
+               offset += seghdr.size;
+       }
+       if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags))
+               CX18_INFO("loaded %s firmware V%08x (%zd bytes)\n",
+                               fn, apu_version, fw->size);
+       release_firmware(fw);
+       /* Clear bit0 for APU to start from 0 */
+       write_reg(read_reg(0xc72030) & ~1, 0xc72030);
+       return size;
+}
+
+void cx18_halt_firmware(struct cx18 *cx)
+{
+       CX18_DEBUG_INFO("Preparing for firmware halt.\n");
+       write_reg(0x000F000F, CX18_PROC_SOFT_RESET); /* stop the fw */
+       write_reg(0x00020002, CX18_ADEC_CONTROL);
+}
+
+void cx18_init_power(struct cx18 *cx, int lowpwr)
+{
+       /* power-down Spare and AOM PLLs */
+       /* power-up fast, slow and mpeg PLLs */
+       write_reg(0x00000008, CX18_PLL_POWER_DOWN);
+
+       /* ADEC out of sleep */
+       write_reg(0x00020000, CX18_ADEC_CONTROL);
+
+       /* The fast clock is at 200/245 MHz */
+       write_reg(lowpwr ? 0xD : 0x11, CX18_FAST_CLOCK_PLL_INT);
+       write_reg(lowpwr ? 0x1EFBF37 : 0x038E3D7, CX18_FAST_CLOCK_PLL_FRAC);
+
+       write_reg(2, CX18_FAST_CLOCK_PLL_POST);
+       write_reg(1, CX18_FAST_CLOCK_PLL_PRESCALE);
+       write_reg(4, CX18_FAST_CLOCK_PLL_ADJUST_BANDWIDTH);
+
+       /* set slow clock to 125/120 MHz */
+       write_reg(lowpwr ? 0x11 : 0x10, CX18_SLOW_CLOCK_PLL_INT);
+       write_reg(lowpwr ? 0xEBAF05 : 0x18618A8, CX18_SLOW_CLOCK_PLL_FRAC);
+       write_reg(4, CX18_SLOW_CLOCK_PLL_POST);
+
+       /* mpeg clock pll 54MHz */
+       write_reg(0xF, CX18_MPEG_CLOCK_PLL_INT);
+       write_reg(0x2BCFEF, CX18_MPEG_CLOCK_PLL_FRAC);
+       write_reg(8, CX18_MPEG_CLOCK_PLL_POST);
+
+       /* Defaults */
+       /* APU = SC or SC/2 = 125/62.5 */
+       /* EPU = SC = 125 */
+       /* DDR = FC = 180 */
+       /* ENC = SC = 125 */
+       /* AI1 = SC = 125 */
+       /* VIM2 = disabled */
+       /* PCI = FC/2 = 90 */
+       /* AI2 = disabled */
+       /* DEMUX = disabled */
+       /* AO = SC/2 = 62.5 */
+       /* SER = 54MHz */
+       /* VFC = disabled */
+       /* USB = disabled */
+
+       write_reg(lowpwr ? 0xFFFF0020 : 0x00060004, CX18_CLOCK_SELECT1);
+       write_reg(lowpwr ? 0xFFFF0004 : 0x00060006, CX18_CLOCK_SELECT2);
+
+       write_reg(0xFFFF0002, CX18_HALF_CLOCK_SELECT1);
+       write_reg(0xFFFF0104, CX18_HALF_CLOCK_SELECT2);
+
+       write_reg(0xFFFF9026, CX18_CLOCK_ENABLE1);
+       write_reg(0xFFFF3105, CX18_CLOCK_ENABLE2);
+}
+
+void cx18_init_memory(struct cx18 *cx)
+{
+       cx18_msleep_timeout(10, 0);
+       write_reg(0x10000, CX18_DDR_SOFT_RESET);
+       cx18_msleep_timeout(10, 0);
+
+       write_reg(cx->card->ddr.chip_config, CX18_DDR_CHIP_CONFIG);
+
+       cx18_msleep_timeout(10, 0);
+
+       write_reg(cx->card->ddr.refresh, CX18_DDR_REFRESH);
+       write_reg(cx->card->ddr.timing1, CX18_DDR_TIMING1);
+       write_reg(cx->card->ddr.timing2, CX18_DDR_TIMING2);
+
+       cx18_msleep_timeout(10, 0);
+
+       /* Initialize DQS pad time */
+       write_reg(cx->card->ddr.tune_lane, CX18_DDR_TUNE_LANE);
+       write_reg(cx->card->ddr.initial_emrs, CX18_DDR_INITIAL_EMRS);
+
+       cx18_msleep_timeout(10, 0);
+
+       write_reg(0x20000, CX18_DDR_SOFT_RESET);
+       cx18_msleep_timeout(10, 0);
+
+       /* use power-down mode when idle */
+       write_reg(0x00000010, CX18_DDR_POWER_REG);
+
+       write_reg(0x10001, CX18_REG_BUS_TIMEOUT_EN);
+
+       write_reg(0x48, CX18_DDR_MB_PER_ROW_7);
+       write_reg(0xE0000, CX18_DDR_BASE_63_ADDR);
+
+       write_reg(0x00000101, CX18_WMB_CLIENT02);  /* AO */
+       write_reg(0x00000101, CX18_WMB_CLIENT09);  /* AI2 */
+       write_reg(0x00000101, CX18_WMB_CLIENT05);  /* VIM1 */
+       write_reg(0x00000101, CX18_WMB_CLIENT06);  /* AI1 */
+       write_reg(0x00000101, CX18_WMB_CLIENT07);  /* 3D comb */
+       write_reg(0x00000101, CX18_WMB_CLIENT10);  /* ME */
+       write_reg(0x00000101, CX18_WMB_CLIENT12);  /* ENC */
+       write_reg(0x00000101, CX18_WMB_CLIENT13);  /* PK */
+       write_reg(0x00000101, CX18_WMB_CLIENT11);  /* RC */
+       write_reg(0x00000101, CX18_WMB_CLIENT14);  /* AVO */
+}
+
+int cx18_firmware_init(struct cx18 *cx)
+{
+       /* Allow chip to control CLKRUN */
+       write_reg(0x5, CX18_DSP0_INTERRUPT_MASK);
+
+       write_reg(0x000F000F, CX18_PROC_SOFT_RESET); /* stop the fw */
+
+       cx18_msleep_timeout(1, 0);
+
+       sw1_irq_enable(IRQ_CPU_TO_EPU | IRQ_APU_TO_EPU);
+       sw2_irq_enable(IRQ_CPU_TO_EPU_ACK | IRQ_APU_TO_EPU_ACK);
+
+       /* Only if the processor is not running */
+       if (read_reg(CX18_PROC_SOFT_RESET) & 8) {
+               int sz = load_apu_fw_direct("v4l-cx23418-apu.fw",
+                              cx->enc_mem, cx, CX18_FW_APU_SIZE);
+
+               sz = sz <= 0 ? sz : load_cpu_fw_direct("v4l-cx23418-cpu.fw",
+                                       cx->enc_mem, cx, CX18_FW_CPU_SIZE);
+
+               if (sz > 0) {
+                       int retries = 0;
+
+                       /* start the CPU */
+                       write_reg(0x00080000, CX18_PROC_SOFT_RESET);
+                       while (retries++ < 50) { /* Loop for max 500mS */
+                               if ((read_reg(CX18_PROC_SOFT_RESET) & 1) == 0)
+                                       break;
+                               cx18_msleep_timeout(10, 0);
+                       }
+                       cx18_msleep_timeout(200, 0);
+                       if (retries == 51) {
+                               CX18_ERR("Could not start the CPU\n");
+                               return -EIO;
+                       }
+               }
+               if (sz <= 0)
+                       return -EIO;
+       }
+       /* initialize GPIO */
+       write_reg(0x14001400, 0xC78110);
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-firmware.h b/drivers/media/video/cx18/cx18-firmware.h
new file mode 100644 (file)
index 0000000..38d4c05
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ *  cx18 firmware functions
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+int cx18_firmware_init(struct cx18 *cx);
+void cx18_halt_firmware(struct cx18 *cx);
+void cx18_init_memory(struct cx18 *cx);
+void cx18_init_power(struct cx18 *cx, int lowpwr);
diff --git a/drivers/media/video/cx18/cx18-gpio.c b/drivers/media/video/cx18/cx18-gpio.c
new file mode 100644 (file)
index 0000000..19253e6
--- /dev/null
@@ -0,0 +1,74 @@
+/*
+ *  cx18 gpio functions
+ *
+ *  Derived from ivtv-gpio.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-cards.h"
+#include "cx18-gpio.h"
+#include "tuner-xc2028.h"
+
+/********************* GPIO stuffs *********************/
+
+/* GPIO registers */
+#define CX18_REG_GPIO_IN     0xc72010
+#define CX18_REG_GPIO_OUT1   0xc78100
+#define CX18_REG_GPIO_DIR1   0xc78108
+#define CX18_REG_GPIO_OUT2   0xc78104
+#define CX18_REG_GPIO_DIR2   0xc7810c
+
+/*
+ * HVR-1600 GPIO pins, courtesy of Hauppauge:
+ *
+ * gpio0: zilog ir process reset pin
+ * gpio1: zilog programming pin (you should never use this)
+ * gpio12: cx24227 reset pin
+ * gpio13: cs5345 reset pin
+*/
+
+void cx18_gpio_init(struct cx18 *cx)
+{
+       if (cx->card->gpio_init.direction == 0)
+               return;
+
+       CX18_DEBUG_INFO("GPIO initial dir: %08x out: %08x\n",
+                  read_reg(CX18_REG_GPIO_DIR1), read_reg(CX18_REG_GPIO_OUT1));
+
+       /* init output data then direction */
+       write_reg(cx->card->gpio_init.direction << 16, CX18_REG_GPIO_DIR1);
+       write_reg(0, CX18_REG_GPIO_DIR2);
+       write_reg((cx->card->gpio_init.direction << 16) |
+                       cx->card->gpio_init.initial_value, CX18_REG_GPIO_OUT1);
+       write_reg(0, CX18_REG_GPIO_OUT2);
+}
+
+/* Xceive tuner reset function */
+int cx18_reset_tuner_gpio(void *dev, int cmd, int value)
+{
+       struct i2c_algo_bit_data *algo = dev;
+       struct cx18 *cx = algo->data;
+/*     int curdir, curout;*/
+
+       if (cmd != XC2028_TUNER_RESET)
+               return 0;
+       CX18_DEBUG_INFO("Resetting tuner\n");
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-gpio.h b/drivers/media/video/cx18/cx18-gpio.h
new file mode 100644 (file)
index 0000000..41bac88
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ *  cx18 gpio functions
+ *
+ *  Derived from ivtv-gpio.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+void cx18_gpio_init(struct cx18 *cx);
+int cx18_reset_tuner_gpio(void *dev, int cmd, int value);
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
new file mode 100644 (file)
index 0000000..18c88d1
--- /dev/null
@@ -0,0 +1,431 @@
+/*
+ *  cx18 I2C functions
+ *
+ *  Derived from ivtv-i2c.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-cards.h"
+#include "cx18-gpio.h"
+#include "cx18-av-core.h"
+
+#include <media/ir-kbd-i2c.h>
+
+#define CX18_REG_I2C_1_WR   0xf15000
+#define CX18_REG_I2C_1_RD   0xf15008
+#define CX18_REG_I2C_2_WR   0xf25100
+#define CX18_REG_I2C_2_RD   0xf25108
+
+#define SETSCL_BIT      0x0001
+#define SETSDL_BIT      0x0002
+#define GETSCL_BIT      0x0004
+#define GETSDL_BIT      0x0008
+
+#ifndef I2C_ADAP_CLASS_TV_ANALOG
+#define I2C_ADAP_CLASS_TV_ANALOG I2C_CLASS_TV_ANALOG
+#endif
+
+#define CX18_CS5345_I2C_ADDR           0x4c
+
+/* This array should match the CX18_HW_ defines */
+static const u8 hw_driverids[] = {
+       I2C_DRIVERID_TUNER,
+       I2C_DRIVERID_TVEEPROM,
+       I2C_DRIVERID_CS5345,
+       0,              /* CX18_HW_GPIO dummy driver ID */
+       0               /* CX18_HW_CX23418 dummy driver ID */
+};
+
+/* This array should match the CX18_HW_ defines */
+static const u8 hw_addrs[] = {
+       0,
+       0,
+       CX18_CS5345_I2C_ADDR,
+       0,              /* CX18_HW_GPIO dummy driver ID */
+       0,              /* CX18_HW_CX23418 dummy driver ID */
+};
+
+/* This array should match the CX18_HW_ defines */
+/* This might well become a card-specific array */
+static const u8 hw_bus[] = {
+       0,
+       0,
+       0,
+       0,              /* CX18_HW_GPIO dummy driver ID */
+       0,              /* CX18_HW_CX23418 dummy driver ID */
+};
+
+/* This array should match the CX18_HW_ defines */
+static const char * const hw_drivernames[] = {
+       "tuner",
+       "tveeprom",
+       "cs5345",
+       "gpio",
+       "cx23418",
+};
+
+int cx18_i2c_register(struct cx18 *cx, unsigned idx)
+{
+       struct i2c_board_info info;
+       struct i2c_client *c;
+       u8 id, bus;
+       int i;
+
+       CX18_DEBUG_I2C("i2c client register\n");
+       if (idx >= ARRAY_SIZE(hw_driverids) || hw_driverids[idx] == 0)
+               return -1;
+       id = hw_driverids[idx];
+       bus = hw_bus[idx];
+       memset(&info, 0, sizeof(info));
+       strlcpy(info.driver_name, hw_drivernames[idx],
+                       sizeof(info.driver_name));
+       info.addr = hw_addrs[idx];
+       for (i = 0; i < I2C_CLIENTS_MAX; i++)
+               if (cx->i2c_clients[i] == NULL)
+                       break;
+
+       if (i == I2C_CLIENTS_MAX) {
+               CX18_ERR("insufficient room for new I2C client!\n");
+               return -ENOMEM;
+       }
+
+       if (id != I2C_DRIVERID_TUNER) {
+               c = i2c_new_device(&cx->i2c_adap[bus], &info);
+               if (c->driver == NULL)
+                       i2c_unregister_device(c);
+               else
+                       cx->i2c_clients[i] = c;
+               return cx->i2c_clients[i] ? 0 : -ENODEV;
+       }
+
+       /* special tuner handling */
+       c = i2c_new_probed_device(&cx->i2c_adap[1], &info, cx->card_i2c->radio);
+       if (c && c->driver == NULL)
+               i2c_unregister_device(c);
+       else if (c)
+               cx->i2c_clients[i++] = c;
+       c = i2c_new_probed_device(&cx->i2c_adap[1], &info, cx->card_i2c->demod);
+       if (c && c->driver == NULL)
+               i2c_unregister_device(c);
+       else if (c)
+               cx->i2c_clients[i++] = c;
+       c = i2c_new_probed_device(&cx->i2c_adap[1], &info, cx->card_i2c->tv);
+       if (c && c->driver == NULL)
+               i2c_unregister_device(c);
+       else if (c)
+               cx->i2c_clients[i++] = c;
+       return 0;
+}
+
+static int attach_inform(struct i2c_client *client)
+{
+       return 0;
+}
+
+static int detach_inform(struct i2c_client *client)
+{
+       int i;
+       struct cx18 *cx = (struct cx18 *)i2c_get_adapdata(client->adapter);
+
+       CX18_DEBUG_I2C("i2c client detach\n");
+       for (i = 0; i < I2C_CLIENTS_MAX; i++) {
+               if (cx->i2c_clients[i] == client) {
+                       cx->i2c_clients[i] = NULL;
+                       break;
+               }
+       }
+       CX18_DEBUG_I2C("i2c detach [client=%s,%s]\n",
+                  client->name, (i < I2C_CLIENTS_MAX) ? "ok" : "failed");
+
+       return 0;
+}
+
+static void cx18_setscl(void *data, int state)
+{
+       struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
+       int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
+       u32 addr = bus_index ? CX18_REG_I2C_2_WR : CX18_REG_I2C_1_WR;
+       u32 r = read_reg(addr);
+
+       if (state)
+               write_reg_sync(r | SETSCL_BIT, addr);
+       else
+               write_reg_sync(r & ~SETSCL_BIT, addr);
+}
+
+static void cx18_setsda(void *data, int state)
+{
+       struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
+       int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
+       u32 addr = bus_index ? CX18_REG_I2C_2_WR : CX18_REG_I2C_1_WR;
+       u32 r = read_reg(addr);
+
+       if (state)
+               write_reg_sync(r | SETSDL_BIT, addr);
+       else
+               write_reg_sync(r & ~SETSDL_BIT, addr);
+}
+
+static int cx18_getscl(void *data)
+{
+       struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
+       int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
+       u32 addr = bus_index ? CX18_REG_I2C_2_RD : CX18_REG_I2C_1_RD;
+
+       return read_reg(addr) & GETSCL_BIT;
+}
+
+static int cx18_getsda(void *data)
+{
+       struct cx18 *cx = ((struct cx18_i2c_algo_callback_data *)data)->cx;
+       int bus_index = ((struct cx18_i2c_algo_callback_data *)data)->bus_index;
+       u32 addr = bus_index ? CX18_REG_I2C_2_RD : CX18_REG_I2C_1_RD;
+
+       return read_reg(addr) & GETSDL_BIT;
+}
+
+/* template for i2c-bit-algo */
+static struct i2c_adapter cx18_i2c_adap_template = {
+       .name = "cx18 i2c driver",
+       .id = I2C_HW_B_CX2341X,
+       .algo = NULL,                   /* set by i2c-algo-bit */
+       .algo_data = NULL,              /* filled from template */
+       .client_register = attach_inform,
+       .client_unregister = detach_inform,
+       .owner = THIS_MODULE,
+};
+
+#define CX18_SCL_PERIOD (10) /* usecs. 10 usec is period for a 100 KHz clock */
+#define CX18_ALGO_BIT_TIMEOUT (2) /* seconds */
+
+static struct i2c_algo_bit_data cx18_i2c_algo_template = {
+       .setsda         = cx18_setsda,
+       .setscl         = cx18_setscl,
+       .getsda         = cx18_getsda,
+       .getscl         = cx18_getscl,
+       .udelay         = CX18_SCL_PERIOD/2,       /* 1/2 clock period in usec*/
+       .timeout        = CX18_ALGO_BIT_TIMEOUT*HZ /* jiffies */
+};
+
+static struct i2c_client cx18_i2c_client_template = {
+       .name = "cx18 internal",
+};
+
+int cx18_call_i2c_client(struct cx18 *cx, int addr, unsigned cmd, void *arg)
+{
+       struct i2c_client *client;
+       int retval;
+       int i;
+
+       CX18_DEBUG_I2C("call_i2c_client addr=%02x\n", addr);
+       for (i = 0; i < I2C_CLIENTS_MAX; i++) {
+               client = cx->i2c_clients[i];
+               if (client == NULL || client->driver == NULL ||
+                               client->driver->command == NULL)
+                       continue;
+               if (addr == client->addr) {
+                       retval = client->driver->command(client, cmd, arg);
+                       return retval;
+               }
+       }
+       if (cmd != VIDIOC_G_CHIP_IDENT)
+               CX18_ERR("i2c addr 0x%02x not found for cmd 0x%x!\n",
+                              addr, cmd);
+       return -ENODEV;
+}
+
+/* Find the i2c device based on the driver ID and return
+   its i2c address or -ENODEV if no matching device was found. */
+static int cx18_i2c_id_addr(struct cx18 *cx, u32 id)
+{
+       struct i2c_client *client;
+       int retval = -ENODEV;
+       int i;
+
+       for (i = 0; i < I2C_CLIENTS_MAX; i++) {
+               client = cx->i2c_clients[i];
+               if (client == NULL || client->driver == NULL)
+                       continue;
+               if (id == client->driver->id) {
+                       retval = client->addr;
+                       break;
+               }
+       }
+       return retval;
+}
+
+/* Find the i2c device name matching the DRIVERID */
+static const char *cx18_i2c_id_name(u32 id)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
+               if (hw_driverids[i] == id)
+                       return hw_drivernames[i];
+       return "unknown device";
+}
+
+/* Find the i2c device name matching the CX18_HW_ flag */
+static const char *cx18_i2c_hw_name(u32 hw)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
+               if (1 << i == hw)
+                       return hw_drivernames[i];
+       return "unknown device";
+}
+
+/* Find the i2c device matching the CX18_HW_ flag and return
+   its i2c address or -ENODEV if no matching device was found. */
+int cx18_i2c_hw_addr(struct cx18 *cx, u32 hw)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(hw_driverids); i++)
+               if (1 << i == hw)
+                       return cx18_i2c_id_addr(cx, hw_driverids[i]);
+       return -ENODEV;
+}
+
+/* Calls i2c device based on CX18_HW_ flag. If hw == 0, then do nothing.
+   If hw == CX18_HW_GPIO then call the gpio handler. */
+int cx18_i2c_hw(struct cx18 *cx, u32 hw, unsigned int cmd, void *arg)
+{
+       int addr;
+
+       if (hw == CX18_HW_GPIO || hw == 0)
+               return 0;
+       if (hw == CX18_HW_CX23418)
+               return cx18_av_cmd(cx, cmd, arg);
+
+       addr = cx18_i2c_hw_addr(cx, hw);
+       if (addr < 0) {
+               CX18_ERR("i2c hardware 0x%08x (%s) not found for cmd 0x%x!\n",
+                              hw, cx18_i2c_hw_name(hw), cmd);
+               return addr;
+       }
+       return cx18_call_i2c_client(cx, addr, cmd, arg);
+}
+
+/* Calls i2c device based on I2C driver ID. */
+int cx18_i2c_id(struct cx18 *cx, u32 id, unsigned int cmd, void *arg)
+{
+       int addr;
+
+       addr = cx18_i2c_id_addr(cx, id);
+       if (addr < 0) {
+               if (cmd != VIDIOC_G_CHIP_IDENT)
+                       CX18_ERR("i2c ID 0x%08x (%s) not found for cmd 0x%x!\n",
+                               id, cx18_i2c_id_name(id), cmd);
+               return addr;
+       }
+       return cx18_call_i2c_client(cx, addr, cmd, arg);
+}
+
+/* broadcast cmd for all I2C clients and for the gpio subsystem */
+void cx18_call_i2c_clients(struct cx18 *cx, unsigned int cmd, void *arg)
+{
+       if (cx->i2c_adap[0].algo == NULL || cx->i2c_adap[1].algo == NULL) {
+               CX18_ERR("adapter is not set\n");
+               return;
+       }
+       cx18_av_cmd(cx, cmd, arg);
+       i2c_clients_command(&cx->i2c_adap[0], cmd, arg);
+       i2c_clients_command(&cx->i2c_adap[1], cmd, arg);
+}
+
+/* init + register i2c algo-bit adapter */
+int init_cx18_i2c(struct cx18 *cx)
+{
+       int i;
+       CX18_DEBUG_I2C("i2c init\n");
+
+       for (i = 0; i < 2; i++) {
+               memcpy(&cx->i2c_adap[i], &cx18_i2c_adap_template,
+                       sizeof(struct i2c_adapter));
+               memcpy(&cx->i2c_algo[i], &cx18_i2c_algo_template,
+                       sizeof(struct i2c_algo_bit_data));
+               cx->i2c_algo_cb_data[i].cx = cx;
+               cx->i2c_algo_cb_data[i].bus_index = i;
+               cx->i2c_algo[i].data = &cx->i2c_algo_cb_data[i];
+               cx->i2c_adap[i].algo_data = &cx->i2c_algo[i];
+
+               sprintf(cx->i2c_adap[i].name + strlen(cx->i2c_adap[i].name),
+                               " #%d-%d", cx->num, i);
+               i2c_set_adapdata(&cx->i2c_adap[i], cx);
+
+               memcpy(&cx->i2c_client[i], &cx18_i2c_client_template,
+                       sizeof(struct i2c_client));
+               sprintf(cx->i2c_client[i].name +
+                               strlen(cx->i2c_client[i].name), "%d", i);
+               cx->i2c_client[i].adapter = &cx->i2c_adap[i];
+               cx->i2c_adap[i].dev.parent = &cx->dev->dev;
+       }
+
+       if (read_reg(CX18_REG_I2C_2_WR) != 0x0003c02f) {
+               /* Reset/Unreset I2C hardware block */
+               write_reg(0x10000000, 0xc71004); /* Clock select 220MHz */
+               write_reg_sync(0x10001000, 0xc71024); /* Clock Enable */
+       }
+       /* courtesy of Steven Toth <stoth@hauppauge.com> */
+       write_reg_sync(0x00c00000, 0xc7001c);
+       mdelay(10);
+       write_reg_sync(0x00c000c0, 0xc7001c);
+       mdelay(10);
+       write_reg_sync(0x00c00000, 0xc7001c);
+
+       write_reg_sync(0x00c00000, 0xc730c8); /* Set to edge-triggered intrs. */
+       write_reg_sync(0x00c00000, 0xc730c4); /* Clear any stale intrs */
+
+       /* Hw I2C1 Clock Freq ~100kHz */
+       write_reg_sync(0x00021c0f & ~4, CX18_REG_I2C_1_WR);
+       cx18_setscl(&cx->i2c_algo_cb_data[0], 1);
+       cx18_setsda(&cx->i2c_algo_cb_data[0], 1);
+
+       /* Hw I2C2 Clock Freq ~100kHz */
+       write_reg_sync(0x00021c0f & ~4, CX18_REG_I2C_2_WR);
+       cx18_setscl(&cx->i2c_algo_cb_data[1], 1);
+       cx18_setsda(&cx->i2c_algo_cb_data[1], 1);
+
+       return i2c_bit_add_bus(&cx->i2c_adap[0]) ||
+               i2c_bit_add_bus(&cx->i2c_adap[1]);
+}
+
+void exit_cx18_i2c(struct cx18 *cx)
+{
+       int i;
+       CX18_DEBUG_I2C("i2c exit\n");
+       write_reg(read_reg(CX18_REG_I2C_1_WR) | 4, CX18_REG_I2C_1_WR);
+       write_reg(read_reg(CX18_REG_I2C_2_WR) | 4, CX18_REG_I2C_2_WR);
+
+       for (i = 0; i < 2; i++) {
+               i2c_del_adapter(&cx->i2c_adap[i]);
+       }
+}
+
+/*
+   Hauppauge HVR1600 should have:
+   32 cx24227
+   98 unknown
+   a0 eeprom
+   c2 tuner
+   e? zilog ir
+   */
diff --git a/drivers/media/video/cx18/cx18-i2c.h b/drivers/media/video/cx18/cx18-i2c.h
new file mode 100644 (file)
index 0000000..113c3f9
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *  cx18 I2C functions
+ *
+ *  Derived from ivtv-i2c.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+int cx18_i2c_hw_addr(struct cx18 *cx, u32 hw);
+int cx18_i2c_hw(struct cx18 *cx, u32 hw, unsigned int cmd, void *arg);
+int cx18_i2c_id(struct cx18 *cx, u32 id, unsigned int cmd, void *arg);
+int cx18_call_i2c_client(struct cx18 *cx, int addr, unsigned cmd, void *arg);
+void cx18_call_i2c_clients(struct cx18 *cx, unsigned int cmd, void *arg);
+int cx18_i2c_register(struct cx18 *cx, unsigned idx);
+
+/* init + register i2c algo-bit adapter */
+int init_cx18_i2c(struct cx18 *cx);
+void exit_cx18_i2c(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx18-ioctl.c b/drivers/media/video/cx18/cx18-ioctl.c
new file mode 100644 (file)
index 0000000..dbdcb86
--- /dev/null
@@ -0,0 +1,851 @@
+/*
+ *  cx18 ioctl system call
+ *
+ *  Derived from ivtv-ioctl.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-version.h"
+#include "cx18-mailbox.h"
+#include "cx18-i2c.h"
+#include "cx18-queue.h"
+#include "cx18-fileops.h"
+#include "cx18-vbi.h"
+#include "cx18-audio.h"
+#include "cx18-video.h"
+#include "cx18-streams.h"
+#include "cx18-ioctl.h"
+#include "cx18-gpio.h"
+#include "cx18-controls.h"
+#include "cx18-cards.h"
+#include "cx18-av-core.h"
+#include <media/tveeprom.h>
+#include <media/v4l2-chip-ident.h>
+#include <linux/i2c-id.h>
+
+u16 cx18_service2vbi(int type)
+{
+       switch (type) {
+       case V4L2_SLICED_TELETEXT_B:
+               return CX18_SLICED_TYPE_TELETEXT_B;
+       case V4L2_SLICED_CAPTION_525:
+               return CX18_SLICED_TYPE_CAPTION_525;
+       case V4L2_SLICED_WSS_625:
+               return CX18_SLICED_TYPE_WSS_625;
+       case V4L2_SLICED_VPS:
+               return CX18_SLICED_TYPE_VPS;
+       default:
+               return 0;
+       }
+}
+
+static int valid_service_line(int field, int line, int is_pal)
+{
+       return (is_pal && line >= 6 && (line != 23 || field == 0)) ||
+              (!is_pal && line >= 10 && line < 22);
+}
+
+static u16 select_service_from_set(int field, int line, u16 set, int is_pal)
+{
+       u16 valid_set = (is_pal ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525);
+       int i;
+
+       set = set & valid_set;
+       if (set == 0 || !valid_service_line(field, line, is_pal))
+               return 0;
+       if (!is_pal) {
+               if (line == 21 && (set & V4L2_SLICED_CAPTION_525))
+                       return V4L2_SLICED_CAPTION_525;
+       } else {
+               if (line == 16 && field == 0 && (set & V4L2_SLICED_VPS))
+                       return V4L2_SLICED_VPS;
+               if (line == 23 && field == 0 && (set & V4L2_SLICED_WSS_625))
+                       return V4L2_SLICED_WSS_625;
+               if (line == 23)
+                       return 0;
+       }
+       for (i = 0; i < 32; i++) {
+               if ((1 << i) & set)
+                       return 1 << i;
+       }
+       return 0;
+}
+
+void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
+{
+       u16 set = fmt->service_set;
+       int f, l;
+
+       fmt->service_set = 0;
+       for (f = 0; f < 2; f++) {
+               for (l = 0; l < 24; l++)
+                       fmt->service_lines[f][l] = select_service_from_set(f, l, set, is_pal);
+       }
+}
+
+static int check_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal)
+{
+       int f, l;
+       u16 set = 0;
+
+       for (f = 0; f < 2; f++) {
+               for (l = 0; l < 24; l++) {
+                       fmt->service_lines[f][l] = select_service_from_set(f, l, fmt->service_lines[f][l], is_pal);
+                       set |= fmt->service_lines[f][l];
+               }
+       }
+       return set != 0;
+}
+
+u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt)
+{
+       int f, l;
+       u16 set = 0;
+
+       for (f = 0; f < 2; f++) {
+               for (l = 0; l < 24; l++)
+                       set |= fmt->service_lines[f][l];
+       }
+       return set;
+}
+
+static const struct {
+       v4l2_std_id  std;
+       char        *name;
+} enum_stds[] = {
+       { V4L2_STD_PAL_BG | V4L2_STD_PAL_H, "PAL-BGH" },
+       { V4L2_STD_PAL_DK,    "PAL-DK"    },
+       { V4L2_STD_PAL_I,     "PAL-I"     },
+       { V4L2_STD_PAL_M,     "PAL-M"     },
+       { V4L2_STD_PAL_N,     "PAL-N"     },
+       { V4L2_STD_PAL_Nc,    "PAL-Nc"    },
+       { V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H, "SECAM-BGH" },
+       { V4L2_STD_SECAM_DK,  "SECAM-DK"  },
+       { V4L2_STD_SECAM_L,   "SECAM-L"   },
+       { V4L2_STD_SECAM_LC,  "SECAM-L'"  },
+       { V4L2_STD_NTSC_M,    "NTSC-M"    },
+       { V4L2_STD_NTSC_M_JP, "NTSC-J"    },
+       { V4L2_STD_NTSC_M_KR, "NTSC-K"    },
+};
+
+static const struct v4l2_standard cx18_std_60hz = {
+       .frameperiod = {.numerator = 1001, .denominator = 30000},
+       .framelines = 525,
+};
+
+static const struct v4l2_standard cx18_std_50hz = {
+       .frameperiod = { .numerator = 1, .denominator = 25 },
+       .framelines = 625,
+};
+
+static int cx18_cxc(struct cx18 *cx, unsigned int cmd, void *arg)
+{
+       struct v4l2_register *regs = arg;
+       unsigned long flags;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+       if (regs->reg >= CX18_MEM_OFFSET + CX18_MEM_SIZE)
+               return -EINVAL;
+
+       spin_lock_irqsave(&cx18_cards_lock, flags);
+       if (cmd == VIDIOC_DBG_G_REGISTER)
+               regs->val = read_enc(regs->reg);
+       else
+               write_enc(regs->val, regs->reg);
+       spin_unlock_irqrestore(&cx18_cards_lock, flags);
+       return 0;
+}
+
+static int cx18_get_fmt(struct cx18 *cx, int streamtype, struct v4l2_format *fmt)
+{
+       switch (fmt->type) {
+       case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+               fmt->fmt.pix.width = cx->params.width;
+               fmt->fmt.pix.height = cx->params.height;
+               fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
+               fmt->fmt.pix.field = V4L2_FIELD_INTERLACED;
+               if (streamtype == CX18_ENC_STREAM_TYPE_YUV) {
+                       fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_HM12;
+                       /* YUV size is (Y=(h*w) + UV=(h*(w/2))) */
+                       fmt->fmt.pix.sizeimage =
+                               fmt->fmt.pix.height * fmt->fmt.pix.width +
+                               fmt->fmt.pix.height * (fmt->fmt.pix.width / 2);
+               } else {
+                       fmt->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
+                       fmt->fmt.pix.sizeimage = 128 * 1024;
+               }
+               break;
+
+       case V4L2_BUF_TYPE_VBI_CAPTURE:
+               fmt->fmt.vbi.sampling_rate = 27000000;
+               fmt->fmt.vbi.offset = 248;
+               fmt->fmt.vbi.samples_per_line = cx->vbi.raw_decoder_line_size - 4;
+               fmt->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY;
+               fmt->fmt.vbi.start[0] = cx->vbi.start[0];
+               fmt->fmt.vbi.start[1] = cx->vbi.start[1];
+               fmt->fmt.vbi.count[0] = fmt->fmt.vbi.count[1] = cx->vbi.count;
+               break;
+
+       case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+       {
+               struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
+
+               vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
+               memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
+               memset(vbifmt->service_lines, 0, sizeof(vbifmt->service_lines));
+
+               cx18_av_cmd(cx, VIDIOC_G_FMT, fmt);
+               vbifmt->service_set = cx18_get_service_set(vbifmt);
+               break;
+       }
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int cx18_try_or_set_fmt(struct cx18 *cx, int streamtype,
+               struct v4l2_format *fmt, int set_fmt)
+{
+       struct v4l2_sliced_vbi_format *vbifmt = &fmt->fmt.sliced;
+       u16 set;
+
+       /* set window size */
+       if (fmt->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
+               int w = fmt->fmt.pix.width;
+               int h = fmt->fmt.pix.height;
+
+               if (w > 720)
+                       w = 720;
+               else if (w < 1)
+                       w = 1;
+               if (h > (cx->is_50hz ? 576 : 480))
+                       h = (cx->is_50hz ? 576 : 480);
+               else if (h < 2)
+                       h = 2;
+               cx18_get_fmt(cx, streamtype, fmt);
+               fmt->fmt.pix.width = w;
+               fmt->fmt.pix.height = h;
+
+               if (!set_fmt || (cx->params.width == w && cx->params.height == h))
+                       return 0;
+               if (atomic_read(&cx->capturing) > 0)
+                       return -EBUSY;
+
+               cx->params.width = w;
+               cx->params.height = h;
+               if (w != 720 || h != (cx->is_50hz ? 576 : 480))
+                       cx->params.video_temporal_filter = 0;
+               else
+                       cx->params.video_temporal_filter = 8;
+               cx18_av_cmd(cx, VIDIOC_S_FMT, fmt);
+               return cx18_get_fmt(cx, streamtype, fmt);
+       }
+
+       /* set raw VBI format */
+       if (fmt->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
+               if (set_fmt && streamtype == CX18_ENC_STREAM_TYPE_VBI &&
+                   cx->vbi.sliced_in->service_set &&
+                   atomic_read(&cx->capturing) > 0)
+                       return -EBUSY;
+               if (set_fmt) {
+                       cx->vbi.sliced_in->service_set = 0;
+                       cx18_av_cmd(cx, VIDIOC_S_FMT, &cx->vbi.in);
+               }
+               return cx18_get_fmt(cx, streamtype, fmt);
+       }
+
+       /* any else but sliced VBI capture is an error */
+       if (fmt->type != V4L2_BUF_TYPE_SLICED_VBI_CAPTURE)
+               return -EINVAL;
+
+       /* TODO: implement sliced VBI, for now silently return 0 */
+       return 0;
+
+       /* set sliced VBI capture format */
+       vbifmt->io_size = sizeof(struct v4l2_sliced_vbi_data) * 36;
+       memset(vbifmt->reserved, 0, sizeof(vbifmt->reserved));
+
+       if (vbifmt->service_set)
+               cx18_expand_service_set(vbifmt, cx->is_50hz);
+       set = check_service_set(vbifmt, cx->is_50hz);
+       vbifmt->service_set = cx18_get_service_set(vbifmt);
+
+       if (!set_fmt)
+               return 0;
+       if (set == 0)
+               return -EINVAL;
+       if (atomic_read(&cx->capturing) > 0 && cx->vbi.sliced_in->service_set == 0)
+               return -EBUSY;
+       cx18_av_cmd(cx, VIDIOC_S_FMT, fmt);
+       memcpy(cx->vbi.sliced_in, vbifmt, sizeof(*cx->vbi.sliced_in));
+       return 0;
+}
+
+static int cx18_debug_ioctls(struct file *filp, unsigned int cmd, void *arg)
+{
+       struct cx18_open_id *id = (struct cx18_open_id *)filp->private_data;
+       struct cx18 *cx = id->cx;
+       struct v4l2_register *reg = arg;
+
+       switch (cmd) {
+       /* ioctls to allow direct access to the encoder registers for testing */
+       case VIDIOC_DBG_G_REGISTER:
+               if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
+                       return cx18_cxc(cx, cmd, arg);
+               if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
+                       return cx18_i2c_id(cx, reg->match_chip, cmd, arg);
+               return cx18_call_i2c_client(cx, reg->match_chip, cmd, arg);
+
+       case VIDIOC_DBG_S_REGISTER:
+               if (v4l2_chip_match_host(reg->match_type, reg->match_chip))
+                       return cx18_cxc(cx, cmd, arg);
+               if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
+                       return cx18_i2c_id(cx, reg->match_chip, cmd, arg);
+               return cx18_call_i2c_client(cx, reg->match_chip, cmd, arg);
+
+       case VIDIOC_G_CHIP_IDENT: {
+               struct v4l2_chip_ident *chip = arg;
+
+               chip->ident = V4L2_IDENT_NONE;
+               chip->revision = 0;
+               if (reg->match_type == V4L2_CHIP_MATCH_HOST) {
+                       if (v4l2_chip_match_host(reg->match_type, reg->match_chip)) {
+                               struct v4l2_chip_ident *chip = arg;
+
+                               chip->ident = V4L2_IDENT_CX23418;
+                       }
+                       return 0;
+               }
+               if (reg->match_type == V4L2_CHIP_MATCH_I2C_DRIVER)
+                       return cx18_i2c_id(cx, reg->match_chip, cmd, arg);
+               if (reg->match_type == V4L2_CHIP_MATCH_I2C_ADDR)
+                       return cx18_call_i2c_client(cx, reg->match_chip, cmd, arg);
+               return -EINVAL;
+       }
+
+       case VIDIOC_INT_S_AUDIO_ROUTING: {
+               struct v4l2_routing *route = arg;
+
+               cx18_audio_set_route(cx, route);
+               break;
+       }
+
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+int cx18_v4l2_ioctls(struct cx18 *cx, struct file *filp, unsigned cmd, void *arg)
+{
+       struct cx18_open_id *id = NULL;
+
+       if (filp)
+               id = (struct cx18_open_id *)filp->private_data;
+
+       switch (cmd) {
+       case VIDIOC_G_PRIORITY:
+       {
+               enum v4l2_priority *p = arg;
+
+               *p = v4l2_prio_max(&cx->prio);
+               break;
+       }
+
+       case VIDIOC_S_PRIORITY:
+       {
+               enum v4l2_priority *prio = arg;
+
+               return v4l2_prio_change(&cx->prio, &id->prio, *prio);
+       }
+
+       case VIDIOC_QUERYCAP:{
+               struct v4l2_capability *vcap = arg;
+
+               memset(vcap, 0, sizeof(*vcap));
+               strlcpy(vcap->driver, CX18_DRIVER_NAME, sizeof(vcap->driver));
+               strlcpy(vcap->card, cx->card_name, sizeof(vcap->card));
+               strlcpy(vcap->bus_info, pci_name(cx->dev), sizeof(vcap->bus_info));
+               vcap->version = CX18_DRIVER_VERSION;        /* version */
+               vcap->capabilities = cx->v4l2_cap;          /* capabilities */
+
+               /* reserved.. must set to 0! */
+               vcap->reserved[0] = vcap->reserved[1] =
+                       vcap->reserved[2] = vcap->reserved[3] = 0;
+               break;
+       }
+
+       case VIDIOC_ENUMAUDIO:{
+               struct v4l2_audio *vin = arg;
+
+               return cx18_get_audio_input(cx, vin->index, vin);
+       }
+
+       case VIDIOC_G_AUDIO:{
+               struct v4l2_audio *vin = arg;
+
+               vin->index = cx->audio_input;
+               return cx18_get_audio_input(cx, vin->index, vin);
+       }
+
+       case VIDIOC_S_AUDIO:{
+               struct v4l2_audio *vout = arg;
+
+               if (vout->index >= cx->nof_audio_inputs)
+                       return -EINVAL;
+               cx->audio_input = vout->index;
+               cx18_audio_set_io(cx);
+               break;
+       }
+
+       case VIDIOC_ENUMINPUT:{
+               struct v4l2_input *vin = arg;
+
+               /* set it to defaults from our table */
+               return cx18_get_input(cx, vin->index, vin);
+       }
+
+       case VIDIOC_TRY_FMT:
+       case VIDIOC_S_FMT: {
+               struct v4l2_format *fmt = arg;
+
+               return cx18_try_or_set_fmt(cx, id->type, fmt, cmd == VIDIOC_S_FMT);
+       }
+
+       case VIDIOC_G_FMT: {
+               struct v4l2_format *fmt = arg;
+               int type = fmt->type;
+
+               memset(fmt, 0, sizeof(*fmt));
+               fmt->type = type;
+               return cx18_get_fmt(cx, id->type, fmt);
+       }
+
+       case VIDIOC_CROPCAP: {
+               struct v4l2_cropcap *cropcap = arg;
+
+               if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+                       return -EINVAL;
+               cropcap->bounds.top = cropcap->bounds.left = 0;
+               cropcap->bounds.width = 720;
+               cropcap->bounds.height = cx->is_50hz ? 576 : 480;
+               cropcap->pixelaspect.numerator = cx->is_50hz ? 59 : 10;
+               cropcap->pixelaspect.denominator = cx->is_50hz ? 54 : 11;
+               cropcap->defrect = cropcap->bounds;
+               return 0;
+       }
+
+       case VIDIOC_S_CROP: {
+               struct v4l2_crop *crop = arg;
+
+               if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+                       return -EINVAL;
+               return cx18_av_cmd(cx, VIDIOC_S_CROP, arg);
+       }
+
+       case VIDIOC_G_CROP: {
+               struct v4l2_crop *crop = arg;
+
+               if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+                       return -EINVAL;
+               return cx18_av_cmd(cx, VIDIOC_G_CROP, arg);
+       }
+
+       case VIDIOC_ENUM_FMT: {
+               static struct v4l2_fmtdesc formats[] = {
+                       { 0, 0, 0,
+                         "HM12 (YUV 4:1:1)", V4L2_PIX_FMT_HM12,
+                         { 0, 0, 0, 0 }
+                       },
+                       { 1, 0, V4L2_FMT_FLAG_COMPRESSED,
+                         "MPEG", V4L2_PIX_FMT_MPEG,
+                         { 0, 0, 0, 0 }
+                       }
+               };
+               struct v4l2_fmtdesc *fmt = arg;
+               enum v4l2_buf_type type = fmt->type;
+
+               switch (type) {
+               case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               if (fmt->index > 1)
+                       return -EINVAL;
+               *fmt = formats[fmt->index];
+               fmt->type = type;
+               return 0;
+       }
+
+       case VIDIOC_G_INPUT:{
+               *(int *)arg = cx->active_input;
+               break;
+       }
+
+       case VIDIOC_S_INPUT:{
+               int inp = *(int *)arg;
+
+               if (inp < 0 || inp >= cx->nof_inputs)
+                       return -EINVAL;
+
+               if (inp == cx->active_input) {
+                       CX18_DEBUG_INFO("Input unchanged\n");
+                       break;
+               }
+               CX18_DEBUG_INFO("Changing input from %d to %d\n",
+                               cx->active_input, inp);
+
+               cx->active_input = inp;
+               /* Set the audio input to whatever is appropriate for the
+                  input type. */
+               cx->audio_input = cx->card->video_inputs[inp].audio_index;
+
+               /* prevent others from messing with the streams until
+                  we're finished changing inputs. */
+               cx18_mute(cx);
+               cx18_video_set_io(cx);
+               cx18_audio_set_io(cx);
+               cx18_unmute(cx);
+               break;
+       }
+
+       case VIDIOC_G_FREQUENCY:{
+               struct v4l2_frequency *vf = arg;
+
+               if (vf->tuner != 0)
+                       return -EINVAL;
+               cx18_call_i2c_clients(cx, cmd, arg);
+               break;
+       }
+
+       case VIDIOC_S_FREQUENCY:{
+               struct v4l2_frequency vf = *(struct v4l2_frequency *)arg;
+
+               if (vf.tuner != 0)
+                       return -EINVAL;
+
+               cx18_mute(cx);
+               CX18_DEBUG_INFO("v4l2 ioctl: set frequency %d\n", vf.frequency);
+               cx18_call_i2c_clients(cx, cmd, &vf);
+               cx18_unmute(cx);
+               break;
+       }
+
+       case VIDIOC_ENUMSTD:{
+               struct v4l2_standard *vs = arg;
+               int idx = vs->index;
+
+               if (idx < 0 || idx >= ARRAY_SIZE(enum_stds))
+                       return -EINVAL;
+
+               *vs = (enum_stds[idx].std & V4L2_STD_525_60) ?
+                               cx18_std_60hz : cx18_std_50hz;
+               vs->index = idx;
+               vs->id = enum_stds[idx].std;
+               strlcpy(vs->name, enum_stds[idx].name, sizeof(vs->name));
+               break;
+       }
+
+       case VIDIOC_G_STD:{
+               *(v4l2_std_id *) arg = cx->std;
+               break;
+       }
+
+       case VIDIOC_S_STD: {
+               v4l2_std_id std = *(v4l2_std_id *) arg;
+
+               if ((std & V4L2_STD_ALL) == 0)
+                       return -EINVAL;
+
+               if (std == cx->std)
+                       break;
+
+               if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags) ||
+                   atomic_read(&cx->capturing) > 0) {
+                       /* Switching standard would turn off the radio or mess
+                          with already running streams, prevent that by
+                          returning EBUSY. */
+                       return -EBUSY;
+               }
+
+               cx->std = std;
+               cx->is_60hz = (std & V4L2_STD_525_60) ? 1 : 0;
+               cx->params.is_50hz = cx->is_50hz = !cx->is_60hz;
+               cx->params.width = 720;
+               cx->params.height = cx->is_50hz ? 576 : 480;
+               cx->vbi.count = cx->is_50hz ? 18 : 12;
+               cx->vbi.start[0] = cx->is_50hz ? 6 : 10;
+               cx->vbi.start[1] = cx->is_50hz ? 318 : 273;
+               cx->vbi.sliced_decoder_line_size = cx->is_60hz ? 272 : 284;
+               CX18_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)cx->std);
+
+               /* Tuner */
+               cx18_call_i2c_clients(cx, VIDIOC_S_STD, &cx->std);
+               break;
+       }
+
+       case VIDIOC_S_TUNER: {  /* Setting tuner can only set audio mode */
+               struct v4l2_tuner *vt = arg;
+
+               if (vt->index != 0)
+                       return -EINVAL;
+
+               cx18_call_i2c_clients(cx, VIDIOC_S_TUNER, vt);
+               break;
+       }
+
+       case VIDIOC_G_TUNER: {
+               struct v4l2_tuner *vt = arg;
+
+               if (vt->index != 0)
+                       return -EINVAL;
+
+               memset(vt, 0, sizeof(*vt));
+               cx18_call_i2c_clients(cx, VIDIOC_G_TUNER, vt);
+
+               if (test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
+                       strlcpy(vt->name, "cx18 Radio Tuner", sizeof(vt->name));
+                       vt->type = V4L2_TUNER_RADIO;
+               } else {
+                       strlcpy(vt->name, "cx18 TV Tuner", sizeof(vt->name));
+                       vt->type = V4L2_TUNER_ANALOG_TV;
+               }
+               break;
+       }
+
+       case VIDIOC_G_SLICED_VBI_CAP: {
+               struct v4l2_sliced_vbi_cap *cap = arg;
+               int set = cx->is_50hz ? V4L2_SLICED_VBI_625 : V4L2_SLICED_VBI_525;
+               int f, l;
+               enum v4l2_buf_type type = cap->type;
+
+               memset(cap, 0, sizeof(*cap));
+               cap->type = type;
+               if (type == V4L2_BUF_TYPE_SLICED_VBI_CAPTURE) {
+                       for (f = 0; f < 2; f++) {
+                               for (l = 0; l < 24; l++) {
+                                       if (valid_service_line(f, l, cx->is_50hz))
+                                               cap->service_lines[f][l] = set;
+                               }
+                       }
+                       return 0;
+               }
+               return -EINVAL;
+       }
+
+       case VIDIOC_ENCODER_CMD:
+       case VIDIOC_TRY_ENCODER_CMD: {
+               struct v4l2_encoder_cmd *enc = arg;
+               int try = cmd == VIDIOC_TRY_ENCODER_CMD;
+
+               memset(&enc->raw, 0, sizeof(enc->raw));
+               switch (enc->cmd) {
+               case V4L2_ENC_CMD_START:
+                       enc->flags = 0;
+                       if (try)
+                               return 0;
+                       return cx18_start_capture(id);
+
+               case V4L2_ENC_CMD_STOP:
+                       enc->flags &= V4L2_ENC_CMD_STOP_AT_GOP_END;
+                       if (try)
+                               return 0;
+                       cx18_stop_capture(id, enc->flags & V4L2_ENC_CMD_STOP_AT_GOP_END);
+                       return 0;
+
+               case V4L2_ENC_CMD_PAUSE:
+                       enc->flags = 0;
+                       if (try)
+                               return 0;
+                       if (!atomic_read(&cx->capturing))
+                               return -EPERM;
+                       if (test_and_set_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags))
+                               return 0;
+                       cx18_mute(cx);
+                       cx18_vapi(cx, CX18_CPU_CAPTURE_PAUSE, 1, cx18_find_handle(cx));
+                       break;
+
+               case V4L2_ENC_CMD_RESUME:
+                       enc->flags = 0;
+                       if (try)
+                               return 0;
+                       if (!atomic_read(&cx->capturing))
+                               return -EPERM;
+                       if (!test_and_clear_bit(CX18_F_I_ENC_PAUSED, &cx->i_flags))
+                               return 0;
+                       cx18_vapi(cx, CX18_CPU_CAPTURE_RESUME, 1, cx18_find_handle(cx));
+                       cx18_unmute(cx);
+                       break;
+               default:
+                       return -EINVAL;
+               }
+               break;
+       }
+
+       case VIDIOC_LOG_STATUS:
+       {
+               struct v4l2_input vidin;
+               struct v4l2_audio audin;
+               int i;
+
+               CX18_INFO("=================  START STATUS CARD #%d  =================\n", cx->num);
+               if (cx->hw_flags & CX18_HW_TVEEPROM) {
+                       struct tveeprom tv;
+
+                       cx18_read_eeprom(cx, &tv);
+               }
+               cx18_call_i2c_clients(cx, VIDIOC_LOG_STATUS, NULL);
+               cx18_get_input(cx, cx->active_input, &vidin);
+               cx18_get_audio_input(cx, cx->audio_input, &audin);
+               CX18_INFO("Video Input: %s\n", vidin.name);
+               CX18_INFO("Audio Input: %s\n", audin.name);
+               CX18_INFO("Tuner: %s\n",
+                       test_bit(CX18_F_I_RADIO_USER, &cx->i_flags) ?
+                       "Radio" : "TV");
+               cx2341x_log_status(&cx->params, cx->name);
+               CX18_INFO("Status flags: 0x%08lx\n", cx->i_flags);
+               for (i = 0; i < CX18_MAX_STREAMS; i++) {
+                       struct cx18_stream *s = &cx->streams[i];
+
+                       if (s->v4l2dev == NULL || s->buffers == 0)
+                               continue;
+                       CX18_INFO("Stream %s: status 0x%04lx, %d%% of %d KiB (%d buffers) in use\n",
+                               s->name, s->s_flags,
+                               (s->buffers - s->q_free.buffers) * 100 / s->buffers,
+                               (s->buffers * s->buf_size) / 1024, s->buffers);
+               }
+               CX18_INFO("Read MPEG/VBI: %lld/%lld bytes\n",
+                               (long long)cx->mpg_data_received,
+                               (long long)cx->vbi_data_inserted);
+               CX18_INFO("==================  END STATUS CARD #%d  ==================\n", cx->num);
+               break;
+       }
+
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int cx18_v4l2_do_ioctl(struct inode *inode, struct file *filp,
+                             unsigned int cmd, void *arg)
+{
+       struct cx18_open_id *id = (struct cx18_open_id *)filp->private_data;
+       struct cx18 *cx = id->cx;
+       int ret;
+
+       /* check priority */
+       switch (cmd) {
+       case VIDIOC_S_CTRL:
+       case VIDIOC_S_STD:
+       case VIDIOC_S_INPUT:
+       case VIDIOC_S_TUNER:
+       case VIDIOC_S_FREQUENCY:
+       case VIDIOC_S_FMT:
+       case VIDIOC_S_CROP:
+       case VIDIOC_S_EXT_CTRLS:
+               ret = v4l2_prio_check(&cx->prio, &id->prio);
+               if (ret)
+                       return ret;
+       }
+
+       switch (cmd) {
+       case VIDIOC_DBG_G_REGISTER:
+       case VIDIOC_DBG_S_REGISTER:
+       case VIDIOC_G_CHIP_IDENT:
+       case VIDIOC_INT_S_AUDIO_ROUTING:
+       case VIDIOC_INT_RESET:
+               if (cx18_debug & CX18_DBGFLG_IOCTL) {
+                       printk(KERN_INFO "cx18%d ioctl: ", cx->num);
+                       v4l_printk_ioctl(cmd);
+               }
+               return cx18_debug_ioctls(filp, cmd, arg);
+
+       case VIDIOC_G_PRIORITY:
+       case VIDIOC_S_PRIORITY:
+       case VIDIOC_QUERYCAP:
+       case VIDIOC_ENUMINPUT:
+       case VIDIOC_G_INPUT:
+       case VIDIOC_S_INPUT:
+       case VIDIOC_G_FMT:
+       case VIDIOC_S_FMT:
+       case VIDIOC_TRY_FMT:
+       case VIDIOC_ENUM_FMT:
+       case VIDIOC_CROPCAP:
+       case VIDIOC_G_CROP:
+       case VIDIOC_S_CROP:
+       case VIDIOC_G_FREQUENCY:
+       case VIDIOC_S_FREQUENCY:
+       case VIDIOC_ENUMSTD:
+       case VIDIOC_G_STD:
+       case VIDIOC_S_STD:
+       case VIDIOC_S_TUNER:
+       case VIDIOC_G_TUNER:
+       case VIDIOC_ENUMAUDIO:
+       case VIDIOC_S_AUDIO:
+       case VIDIOC_G_AUDIO:
+       case VIDIOC_G_SLICED_VBI_CAP:
+       case VIDIOC_LOG_STATUS:
+       case VIDIOC_G_ENC_INDEX:
+       case VIDIOC_ENCODER_CMD:
+       case VIDIOC_TRY_ENCODER_CMD:
+               if (cx18_debug & CX18_DBGFLG_IOCTL) {
+                       printk(KERN_INFO "cx18%d ioctl: ", cx->num);
+                       v4l_printk_ioctl(cmd);
+               }
+               return cx18_v4l2_ioctls(cx, filp, cmd, arg);
+
+       case VIDIOC_QUERYMENU:
+       case VIDIOC_QUERYCTRL:
+       case VIDIOC_S_CTRL:
+       case VIDIOC_G_CTRL:
+       case VIDIOC_S_EXT_CTRLS:
+       case VIDIOC_G_EXT_CTRLS:
+       case VIDIOC_TRY_EXT_CTRLS:
+               if (cx18_debug & CX18_DBGFLG_IOCTL) {
+                       printk(KERN_INFO "cx18%d ioctl: ", cx->num);
+                       v4l_printk_ioctl(cmd);
+               }
+               return cx18_control_ioctls(cx, cmd, arg);
+
+       case 0x00005401:        /* Handle isatty() calls */
+               return -EINVAL;
+       default:
+               return v4l_compat_translate_ioctl(inode, filp, cmd, arg,
+                                                  cx18_v4l2_do_ioctl);
+       }
+       return 0;
+}
+
+int cx18_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+                   unsigned long arg)
+{
+       struct cx18_open_id *id = (struct cx18_open_id *)filp->private_data;
+       struct cx18 *cx = id->cx;
+       int res;
+
+       mutex_lock(&cx->serialize_lock);
+       res = video_usercopy(inode, filp, cmd, arg, cx18_v4l2_do_ioctl);
+       mutex_unlock(&cx->serialize_lock);
+       return res;
+}
diff --git a/drivers/media/video/cx18/cx18-ioctl.h b/drivers/media/video/cx18/cx18-ioctl.h
new file mode 100644 (file)
index 0000000..9f4c7eb
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ *  cx18 ioctl system call
+ *
+ *  Derived from ivtv-ioctl.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+u16 cx18_service2vbi(int type);
+void cx18_expand_service_set(struct v4l2_sliced_vbi_format *fmt, int is_pal);
+u16 cx18_get_service_set(struct v4l2_sliced_vbi_format *fmt);
+int cx18_v4l2_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+                   unsigned long arg);
+int cx18_v4l2_ioctls(struct cx18 *cx, struct file *filp, unsigned cmd,
+                    void *arg);
diff --git a/drivers/media/video/cx18/cx18-irq.c b/drivers/media/video/cx18/cx18-irq.c
new file mode 100644 (file)
index 0000000..6e14f8b
--- /dev/null
@@ -0,0 +1,179 @@
+/*
+ *  cx18 interrupt handling
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-firmware.h"
+#include "cx18-fileops.h"
+#include "cx18-queue.h"
+#include "cx18-irq.h"
+#include "cx18-ioctl.h"
+#include "cx18-mailbox.h"
+#include "cx18-vbi.h"
+#include "cx18-scb.h"
+
+#define DMA_MAGIC_COOKIE 0x000001fe
+
+static void epu_dma_done(struct cx18 *cx, struct cx18_mailbox *mb)
+{
+       u32 handle = mb->args[0];
+       struct cx18_stream *s = NULL;
+       struct cx18_buffer *buf;
+       u32 off;
+       int i;
+       int id;
+
+       for (i = 0; i < CX18_MAX_STREAMS; i++) {
+               s = &cx->streams[i];
+               if ((handle == s->handle) && (s->dvb.enabled))
+                       break;
+               if (s->v4l2dev && handle == s->handle)
+                       break;
+       }
+       if (i == CX18_MAX_STREAMS) {
+               CX18_WARN("DMA done for unknown handle %d for stream %s\n",
+                       handle, s->name);
+               mb->error = CXERR_NOT_OPEN;
+               mb->cmd = 0;
+               cx18_mb_ack(cx, mb);
+               return;
+       }
+
+       off = mb->args[1];
+       if (mb->args[2] != 1)
+               CX18_WARN("Ack struct = %d for %s\n",
+                       mb->args[2], s->name);
+       id = read_enc(off);
+       buf = cx18_queue_find_buf(s, id, read_enc(off + 4));
+       CX18_DEBUG_HI_DMA("DMA DONE for %s (buffer %d)\n", s->name, id);
+       if (buf) {
+               cx18_buf_sync_for_cpu(s, buf);
+               if (s->type == CX18_ENC_STREAM_TYPE_TS && s->dvb.enabled) {
+                       /* process the buffer here */
+                       CX18_DEBUG_HI_DMA("TS recv and sent bytesused=%d\n",
+                                       buf->bytesused);
+
+                       dvb_dmx_swfilter(&s->dvb.demux, buf->buf,
+                                       buf->bytesused);
+
+                       cx18_buf_sync_for_device(s, buf);
+                       cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
+                           (void *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem,
+                           1, buf->id, s->buf_size);
+               } else
+                       set_bit(CX18_F_B_NEED_BUF_SWAP, &buf->b_flags);
+       } else {
+               CX18_WARN("Could not find buf %d for stream %s\n",
+                               read_enc(off), s->name);
+       }
+       mb->error = 0;
+       mb->cmd = 0;
+       cx18_mb_ack(cx, mb);
+       wake_up(&cx->dma_waitq);
+       if (s->id != -1)
+               wake_up(&s->waitq);
+}
+
+static void epu_debug(struct cx18 *cx, struct cx18_mailbox *mb)
+{
+       char str[256] = { 0 };
+       char *p;
+
+       if (mb->args[1]) {
+               setup_page(mb->args[1]);
+               memcpy_fromio(str, cx->enc_mem + mb->args[1], 252);
+               str[252] = 0;
+       }
+       cx18_mb_ack(cx, mb);
+       CX18_DEBUG_INFO("%x %s\n", mb->args[0], str);
+       p = strchr(str, '.');
+       if (!test_bit(CX18_F_I_LOADED_FW, &cx->i_flags) && p && p > str)
+               CX18_INFO("FW version: %s\n", p - 1);
+}
+
+static void hpu_cmd(struct cx18 *cx, u32 sw1)
+{
+       struct cx18_mailbox mb;
+
+       if (sw1 & IRQ_CPU_TO_EPU) {
+               memcpy_fromio(&mb, &cx->scb->cpu2epu_mb, sizeof(mb));
+               mb.error = 0;
+
+               switch (mb.cmd) {
+               case CX18_EPU_DMA_DONE:
+                       epu_dma_done(cx, &mb);
+                       break;
+               case CX18_EPU_DEBUG:
+                       epu_debug(cx, &mb);
+                       break;
+               default:
+                       CX18_WARN("Unexpected mailbox command %08x\n", mb.cmd);
+                       break;
+               }
+       }
+       if (sw1 & (IRQ_APU_TO_EPU | IRQ_HPU_TO_EPU))
+               CX18_WARN("Unexpected interrupt %08x\n", sw1);
+}
+
+irqreturn_t cx18_irq_handler(int irq, void *dev_id)
+{
+       struct cx18 *cx = (struct cx18 *)dev_id;
+       u32 sw1, sw1_mask;
+       u32 sw2, sw2_mask;
+       u32 hw2, hw2_mask;
+
+       spin_lock(&cx->dma_reg_lock);
+
+       hw2_mask = read_reg(HW2_INT_MASK5_PCI);
+       hw2 = read_reg(HW2_INT_CLR_STATUS) & hw2_mask;
+       sw2_mask = read_reg(SW2_INT_ENABLE_PCI) | IRQ_EPU_TO_HPU_ACK;
+       sw2 = read_reg(SW2_INT_STATUS) & sw2_mask;
+       sw1_mask = read_reg(SW1_INT_ENABLE_PCI) | IRQ_EPU_TO_HPU;
+       sw1 = read_reg(SW1_INT_STATUS) & sw1_mask;
+
+       write_reg(sw2&sw2_mask, SW2_INT_STATUS);
+       write_reg(sw1&sw1_mask, SW1_INT_STATUS);
+       write_reg(hw2&hw2_mask, HW2_INT_CLR_STATUS);
+
+       if (sw1 || sw2 || hw2)
+               CX18_DEBUG_HI_IRQ("SW1: %x  SW2: %x  HW2: %x\n", sw1, sw2, hw2);
+
+       /* To do: interrupt-based I2C handling
+       if (hw2 & 0x00c00000) {
+       }
+       */
+
+       if (sw2) {
+               if (sw2 & (cx->scb->cpu2hpu_irq_ack | cx->scb->cpu2epu_irq_ack))
+                       wake_up(&cx->mb_cpu_waitq);
+               if (sw2 & (cx->scb->apu2hpu_irq_ack | cx->scb->apu2epu_irq_ack))
+                       wake_up(&cx->mb_apu_waitq);
+               if (sw2 & cx->scb->epu2hpu_irq_ack)
+                       wake_up(&cx->mb_epu_waitq);
+               if (sw2 & cx->scb->hpu2epu_irq_ack)
+                       wake_up(&cx->mb_hpu_waitq);
+       }
+
+       if (sw1)
+               hpu_cmd(cx, sw1);
+       spin_unlock(&cx->dma_reg_lock);
+
+       return (hw2 | sw1 | sw2) ? IRQ_HANDLED : IRQ_NONE;
+}
diff --git a/drivers/media/video/cx18/cx18-irq.h b/drivers/media/video/cx18/cx18-irq.h
new file mode 100644 (file)
index 0000000..379f704
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ *  cx18 interrupt handling
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#define HW2_I2C1_INT                   (1 << 22)
+#define HW2_I2C2_INT                   (1 << 23)
+#define HW2_INT_CLR_STATUS             0xc730c4
+#define HW2_INT_MASK5_PCI              0xc730e4
+#define SW1_INT_SET                     0xc73100
+#define SW1_INT_STATUS                  0xc73104
+#define SW1_INT_ENABLE_PCI              0xc7311c
+#define SW2_INT_SET                     0xc73140
+#define SW2_INT_STATUS                  0xc73144
+#define SW2_INT_ENABLE_PCI              0xc7315c
+
+irqreturn_t cx18_irq_handler(int irq, void *dev_id);
+
+void cx18_irq_work_handler(struct work_struct *work);
+void cx18_dma_stream_dec_prepare(struct cx18_stream *s, u32 offset, int lock);
+void cx18_unfinished_dma(unsigned long arg);
diff --git a/drivers/media/video/cx18/cx18-mailbox.c b/drivers/media/video/cx18/cx18-mailbox.c
new file mode 100644 (file)
index 0000000..0c5f328
--- /dev/null
@@ -0,0 +1,372 @@
+/*
+ *  cx18 mailbox functions
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include <stdarg.h>
+
+#include "cx18-driver.h"
+#include "cx18-scb.h"
+#include "cx18-irq.h"
+#include "cx18-mailbox.h"
+
+#define API_FAST (1 << 2) /* Short timeout */
+#define API_SLOW (1 << 3) /* Additional 300ms timeout */
+
+#define APU 0
+#define CPU 1
+#define EPU 2
+#define HPU 3
+
+struct cx18_api_info {
+       u32 cmd;
+       u8 flags;               /* Flags, see above */
+       u8 rpu;                 /* Processing unit */
+       const char *name;       /* The name of the command */
+};
+
+#define API_ENTRY(rpu, x, f) { (x), (f), (rpu), #x }
+
+static const struct cx18_api_info api_info[] = {
+       /* MPEG encoder API */
+       API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE,               0),
+       API_ENTRY(CPU, CX18_EPU_DEBUG,                          0),
+       API_ENTRY(CPU, CX18_CREATE_TASK,                        0),
+       API_ENTRY(CPU, CX18_DESTROY_TASK,                       0),
+       API_ENTRY(CPU, CX18_CPU_CAPTURE_START,                  API_SLOW),
+       API_ENTRY(CPU, CX18_CPU_CAPTURE_STOP,                   API_SLOW),
+       API_ENTRY(CPU, CX18_CPU_CAPTURE_PAUSE,                  0),
+       API_ENTRY(CPU, CX18_CPU_CAPTURE_RESUME,                 0),
+       API_ENTRY(CPU, CX18_CPU_SET_CHANNEL_TYPE,               0),
+       API_ENTRY(CPU, CX18_CPU_SET_STREAM_OUTPUT_TYPE,         0),
+       API_ENTRY(CPU, CX18_CPU_SET_VIDEO_IN,                   0),
+       API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RATE,                 0),
+       API_ENTRY(CPU, CX18_CPU_SET_VIDEO_RESOLUTION,           0),
+       API_ENTRY(CPU, CX18_CPU_SET_FILTER_PARAM,               0),
+       API_ENTRY(CPU, CX18_CPU_SET_SPATIAL_FILTER_TYPE,        0),
+       API_ENTRY(CPU, CX18_CPU_SET_MEDIAN_CORING,              0),
+       API_ENTRY(CPU, CX18_CPU_SET_INDEXTABLE,                 0),
+       API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PARAMETERS,           0),
+       API_ENTRY(CPU, CX18_CPU_SET_VIDEO_MUTE,                 0),
+       API_ENTRY(CPU, CX18_CPU_SET_AUDIO_MUTE,                 0),
+       API_ENTRY(CPU, CX18_CPU_SET_MISC_PARAMETERS,            0),
+       API_ENTRY(CPU, CX18_CPU_SET_RAW_VBI_PARAM,              API_SLOW),
+       API_ENTRY(CPU, CX18_CPU_SET_CAPTURE_LINE_NO,            0),
+       API_ENTRY(CPU, CX18_CPU_SET_COPYRIGHT,                  0),
+       API_ENTRY(CPU, CX18_CPU_SET_AUDIO_PID,                  0),
+       API_ENTRY(CPU, CX18_CPU_SET_VIDEO_PID,                  0),
+       API_ENTRY(CPU, CX18_CPU_SET_VER_CROP_LINE,              0),
+       API_ENTRY(CPU, CX18_CPU_SET_GOP_STRUCTURE,              0),
+       API_ENTRY(CPU, CX18_CPU_SET_SCENE_CHANGE_DETECTION,     0),
+       API_ENTRY(CPU, CX18_CPU_SET_ASPECT_RATIO,               0),
+       API_ENTRY(CPU, CX18_CPU_SET_SKIP_INPUT_FRAME,           0),
+       API_ENTRY(CPU, CX18_CPU_SET_SLICED_VBI_PARAM,           0),
+       API_ENTRY(CPU, CX18_CPU_SET_USERDATA_PLACE_HOLDER,      0),
+       API_ENTRY(CPU, CX18_CPU_GET_ENC_PTS,                    0),
+       API_ENTRY(CPU, CX18_CPU_DE_SET_MDL_ACK,                 0),
+       API_ENTRY(CPU, CX18_CPU_DE_SET_MDL,                     API_FAST),
+       API_ENTRY(0, 0,                                         0),
+};
+
+static const struct cx18_api_info *find_api_info(u32 cmd)
+{
+       int i;
+
+       for (i = 0; api_info[i].cmd; i++)
+               if (api_info[i].cmd == cmd)
+                       return &api_info[i];
+       return NULL;
+}
+
+static struct cx18_mailbox *cx18_mb_is_complete(struct cx18 *cx, int rpu,
+               u32 *state, u32 *irq, u32 *req)
+{
+       struct cx18_mailbox *mb = NULL;
+       int wait_count = 0;
+       u32 ack;
+
+       switch (rpu) {
+       case APU:
+               mb = &cx->scb->epu2apu_mb;
+               *state = readl(&cx->scb->apu_state);
+               *irq = readl(&cx->scb->epu2apu_irq);
+               break;
+
+       case CPU:
+               mb = &cx->scb->epu2cpu_mb;
+               *state = readl(&cx->scb->cpu_state);
+               *irq = readl(&cx->scb->epu2cpu_irq);
+               break;
+
+       case HPU:
+               mb = &cx->scb->epu2hpu_mb;
+               *state = readl(&cx->scb->hpu_state);
+               *irq = readl(&cx->scb->epu2hpu_irq);
+               break;
+       }
+
+       if (mb == NULL)
+               return mb;
+
+       do {
+               *req = readl(&mb->request);
+               ack = readl(&mb->ack);
+               wait_count++;
+       } while (*req != ack && wait_count < 600);
+
+       if (*req == ack) {
+               (*req)++;
+               if (*req == 0 || *req == 0xffffffff)
+                       *req = 1;
+               return mb;
+       }
+       return NULL;
+}
+
+long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb)
+{
+       const struct cx18_api_info *info = find_api_info(mb->cmd);
+       struct cx18_mailbox *ack_mb;
+       u32 ack_irq;
+       u8 rpu = CPU;
+
+       if (info == NULL && mb->cmd) {
+               CX18_WARN("Cannot ack unknown command %x\n", mb->cmd);
+               return -EINVAL;
+       }
+       if (info)
+               rpu = info->rpu;
+
+       switch (rpu) {
+       case HPU:
+               ack_irq = IRQ_EPU_TO_HPU_ACK;
+               ack_mb = &cx->scb->hpu2epu_mb;
+               break;
+       case APU:
+               ack_irq = IRQ_EPU_TO_APU_ACK;
+               ack_mb = &cx->scb->apu2epu_mb;
+               break;
+       case CPU:
+               ack_irq = IRQ_EPU_TO_CPU_ACK;
+               ack_mb = &cx->scb->cpu2epu_mb;
+               break;
+       default:
+               CX18_WARN("Unknown RPU for command %x\n", mb->cmd);
+               return -EINVAL;
+       }
+
+       setup_page(SCB_OFFSET);
+       write_sync(mb->request, &ack_mb->ack);
+       write_reg(ack_irq, SW2_INT_SET);
+       return 0;
+}
+
+
+static int cx18_api_call(struct cx18 *cx, u32 cmd, int args, u32 data[])
+{
+       const struct cx18_api_info *info = find_api_info(cmd);
+       u32 state = 0, irq = 0, req, oldreq, err;
+       struct cx18_mailbox *mb;
+       wait_queue_head_t *waitq;
+       int timeout = 100;
+       int cnt = 0;
+       int sig = 0;
+       int i;
+
+       if (info == NULL) {
+               CX18_WARN("unknown cmd %x\n", cmd);
+               return -EINVAL;
+       }
+
+       if (cmd == CX18_CPU_DE_SET_MDL)
+               CX18_DEBUG_HI_API("%s\n", info->name);
+       else
+               CX18_DEBUG_API("%s\n", info->name);
+       setup_page(SCB_OFFSET);
+       mb = cx18_mb_is_complete(cx, info->rpu, &state, &irq, &req);
+
+       if (mb == NULL) {
+               CX18_ERR("mb %s busy\n", info->name);
+               return -EBUSY;
+       }
+
+       oldreq = req - 1;
+       writel(cmd, &mb->cmd);
+       for (i = 0; i < args; i++)
+               writel(data[i], &mb->args[i]);
+       writel(0, &mb->error);
+       writel(req, &mb->request);
+
+       switch (info->rpu) {
+       case APU: waitq = &cx->mb_apu_waitq; break;
+       case CPU: waitq = &cx->mb_cpu_waitq; break;
+       case EPU: waitq = &cx->mb_epu_waitq; break;
+       case HPU: waitq = &cx->mb_hpu_waitq; break;
+       default: return -EINVAL;
+       }
+       if (info->flags & API_FAST)
+               timeout /= 2;
+       write_reg(irq, SW1_INT_SET);
+
+       while (!sig && readl(&mb->ack) != readl(&mb->request) && cnt < 660) {
+               if (cnt > 200 && !in_atomic())
+                       sig = cx18_msleep_timeout(10, 1);
+               cnt++;
+       }
+       if (sig)
+               return -EINTR;
+       if (cnt == 660) {
+               writel(oldreq, &mb->request);
+               CX18_ERR("mb %s failed\n", info->name);
+               return -EINVAL;
+       }
+       for (i = 0; i < MAX_MB_ARGUMENTS; i++)
+               data[i] = readl(&mb->args[i]);
+       err = readl(&mb->error);
+       if (!in_atomic() && (info->flags & API_SLOW))
+               cx18_msleep_timeout(300, 0);
+       if (err)
+               CX18_DEBUG_API("mailbox error %08x for command %s\n", err,
+                               info->name);
+       return err ? -EIO : 0;
+}
+
+int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[])
+{
+       int res = cx18_api_call(cx, cmd, args, data);
+
+       /* Allow a single retry, probably already too late though.
+          If there is no free mailbox then that is usually an indication
+          of a more serious problem. */
+       return (res == -EBUSY) ? cx18_api_call(cx, cmd, args, data) : res;
+}
+
+static int cx18_set_filter_param(struct cx18_stream *s)
+{
+       struct cx18 *cx = s->cx;
+       u32 mode;
+       int ret;
+
+       mode = (cx->filter_mode & 1) ? 2 : (cx->spatial_strength ? 1 : 0);
+       ret = cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
+                       s->handle, 1, mode, cx->spatial_strength);
+       mode = (cx->filter_mode & 2) ? 2 : (cx->temporal_strength ? 1 : 0);
+       ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
+                       s->handle, 0, mode, cx->temporal_strength);
+       ret = ret ? ret : cx18_vapi(cx, CX18_CPU_SET_FILTER_PARAM, 4,
+                       s->handle, 2, cx->filter_mode >> 2, 0);
+       return ret;
+}
+
+int cx18_api_func(void *priv, u32 cmd, int in, int out,
+               u32 data[CX2341X_MBOX_MAX_DATA])
+{
+       struct cx18 *cx = priv;
+       struct cx18_stream *s = &cx->streams[CX18_ENC_STREAM_TYPE_MPG];
+
+       switch (cmd) {
+       case CX2341X_ENC_SET_OUTPUT_PORT:
+               return 0;
+       case CX2341X_ENC_SET_FRAME_RATE:
+               return cx18_vapi(cx, CX18_CPU_SET_VIDEO_IN, 6,
+                               s->handle, 0, 0, 0, 0, data[0]);
+       case CX2341X_ENC_SET_FRAME_SIZE:
+               return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RESOLUTION, 3,
+                               s->handle, data[1], data[0]);
+       case CX2341X_ENC_SET_STREAM_TYPE:
+               return cx18_vapi(cx, CX18_CPU_SET_STREAM_OUTPUT_TYPE, 2,
+                               s->handle, data[0]);
+       case CX2341X_ENC_SET_ASPECT_RATIO:
+               return cx18_vapi(cx, CX18_CPU_SET_ASPECT_RATIO, 2,
+                               s->handle, data[0]);
+
+       case CX2341X_ENC_SET_GOP_PROPERTIES:
+               return cx18_vapi(cx, CX18_CPU_SET_GOP_STRUCTURE, 3,
+                               s->handle, data[0], data[1]);
+       case CX2341X_ENC_SET_GOP_CLOSURE:
+               return 0;
+       case CX2341X_ENC_SET_AUDIO_PROPERTIES:
+               return cx18_vapi(cx, CX18_CPU_SET_AUDIO_PARAMETERS, 2,
+                               s->handle, data[0]);
+       case CX2341X_ENC_MUTE_AUDIO:
+               return cx18_vapi(cx, CX18_CPU_SET_AUDIO_MUTE, 2,
+                               s->handle, data[0]);
+       case CX2341X_ENC_SET_BIT_RATE:
+               return cx18_vapi(cx, CX18_CPU_SET_VIDEO_RATE, 5,
+                               s->handle, data[0], data[1], data[2], data[3]);
+       case CX2341X_ENC_MUTE_VIDEO:
+               return cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
+                               s->handle, data[0]);
+       case CX2341X_ENC_SET_FRAME_DROP_RATE:
+               return cx18_vapi(cx, CX18_CPU_SET_SKIP_INPUT_FRAME, 2,
+                               s->handle, data[0]);
+       case CX2341X_ENC_MISC:
+               return cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 4,
+                               s->handle, data[0], data[1], data[2]);
+       case CX2341X_ENC_SET_DNR_FILTER_MODE:
+               cx->filter_mode = (data[0] & 3) | (data[1] << 2);
+               return cx18_set_filter_param(s);
+       case CX2341X_ENC_SET_DNR_FILTER_PROPS:
+               cx->spatial_strength = data[0];
+               cx->temporal_strength = data[1];
+               return cx18_set_filter_param(s);
+       case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
+               return cx18_vapi(cx, CX18_CPU_SET_SPATIAL_FILTER_TYPE, 3,
+                               s->handle, data[0], data[1]);
+       case CX2341X_ENC_SET_CORING_LEVELS:
+               return cx18_vapi(cx, CX18_CPU_SET_MEDIAN_CORING, 5,
+                               s->handle, data[0], data[1], data[2], data[3]);
+       }
+       CX18_WARN("Unknown cmd %x\n", cmd);
+       return 0;
+}
+
+int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS],
+               u32 cmd, int args, ...)
+{
+       va_list ap;
+       int i;
+
+       va_start(ap, args);
+       for (i = 0; i < args; i++)
+               data[i] = va_arg(ap, u32);
+       va_end(ap);
+       return cx18_api(cx, cmd, args, data);
+}
+
+int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...)
+{
+       u32 data[MAX_MB_ARGUMENTS];
+       va_list ap;
+       int i;
+
+       if (cx == NULL) {
+               CX18_ERR("cx == NULL (cmd=%x)\n", cmd);
+               return 0;
+       }
+       if (args > MAX_MB_ARGUMENTS) {
+               CX18_ERR("args too big (cmd=%x)\n", cmd);
+               args = MAX_MB_ARGUMENTS;
+       }
+       va_start(ap, args);
+       for (i = 0; i < args; i++)
+               data[i] = va_arg(ap, u32);
+       va_end(ap);
+       return cx18_api(cx, cmd, args, data);
+}
diff --git a/drivers/media/video/cx18/cx18-mailbox.h b/drivers/media/video/cx18/cx18-mailbox.h
new file mode 100644 (file)
index 0000000..d995641
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ *  cx18 mailbox functions
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#ifndef _CX18_MAILBOX_H_
+#define _CX18_MAILBOX_H_
+
+/* mailbox max args */
+#define MAX_MB_ARGUMENTS 6
+/* compatibility, should be same as the define in cx2341x.h */
+#define CX2341X_MBOX_MAX_DATA 16
+
+#define MB_RESERVED_HANDLE_0 0
+#define MB_RESERVED_HANDLE_1 0xFFFFFFFF
+
+struct cx18;
+
+/* The cx18_mailbox struct is the mailbox structure which is used for passing
+   messages between processors */
+struct cx18_mailbox {
+    /* The sender sets a handle in 'request' after he fills the command. The
+       'request' should be different than 'ack'. The sender, also, generates
+       an interrupt on XPU2YPU_irq where XPU is the sender and YPU is the
+       receiver. */
+    u32       request;
+    /* The receiver detects a new command when 'req' is different than 'ack'.
+       He sets 'ack' to the same value as 'req' to clear the command. He, also,
+       generates an interrupt on YPU2XPU_irq where XPU is the sender and YPU
+       is the receiver. */
+    u32       ack;
+    u32       reserved[6];
+    /* 'cmd' identifies the command. The list of these commands are in
+       cx23418.h */
+    u32       cmd;
+    /* Each command can have up to 6 arguments */
+    u32       args[MAX_MB_ARGUMENTS];
+    /* The return code can be one of the codes in the file cx23418.h. If the
+       command is completed successfuly, the error will be ERR_SYS_SUCCESS.
+       If it is pending, the code is ERR_SYS_PENDING. If it failed, the error
+       code would indicate the task from which the error originated and will
+       be one of the errors in cx23418.h. In that case, the following
+       applies ((error & 0xff) != 0).
+       If the command is pending, the return will be passed in a MB from the
+       receiver to the sender. 'req' will be returned in args[0] */
+    u32       error;
+};
+
+int cx18_api(struct cx18 *cx, u32 cmd, int args, u32 data[]);
+int cx18_vapi_result(struct cx18 *cx, u32 data[MAX_MB_ARGUMENTS], u32 cmd,
+               int args, ...);
+int cx18_vapi(struct cx18 *cx, u32 cmd, int args, ...);
+int cx18_api_func(void *priv, u32 cmd, int in, int out,
+               u32 data[CX2341X_MBOX_MAX_DATA]);
+long cx18_mb_ack(struct cx18 *cx, const struct cx18_mailbox *mb);
+
+#endif
diff --git a/drivers/media/video/cx18/cx18-queue.c b/drivers/media/video/cx18/cx18-queue.c
new file mode 100644 (file)
index 0000000..65af1bb
--- /dev/null
@@ -0,0 +1,282 @@
+/*
+ *  cx18 buffer queues
+ *
+ *  Derived from ivtv-queue.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-streams.h"
+#include "cx18-queue.h"
+#include "cx18-scb.h"
+
+int cx18_buf_copy_from_user(struct cx18_stream *s, struct cx18_buffer *buf,
+               const char __user *src, int copybytes)
+{
+       if (s->buf_size - buf->bytesused < copybytes)
+               copybytes = s->buf_size - buf->bytesused;
+       if (copy_from_user(buf->buf + buf->bytesused, src, copybytes))
+               return -EFAULT;
+       buf->bytesused += copybytes;
+       return copybytes;
+}
+
+void cx18_buf_swap(struct cx18_buffer *buf)
+{
+       int i;
+
+       for (i = 0; i < buf->bytesused; i += 4)
+               swab32s((u32 *)(buf->buf + i));
+}
+
+void cx18_queue_init(struct cx18_queue *q)
+{
+       INIT_LIST_HEAD(&q->list);
+       q->buffers = 0;
+       q->length = 0;
+       q->bytesused = 0;
+}
+
+void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
+               struct cx18_queue *q)
+{
+       unsigned long flags = 0;
+
+       /* clear the buffer if it is going to be enqueued to the free queue */
+       if (q == &s->q_free) {
+               buf->bytesused = 0;
+               buf->readpos = 0;
+               buf->b_flags = 0;
+       }
+       spin_lock_irqsave(&s->qlock, flags);
+       list_add_tail(&buf->list, &q->list);
+       q->buffers++;
+       q->length += s->buf_size;
+       q->bytesused += buf->bytesused - buf->readpos;
+       spin_unlock_irqrestore(&s->qlock, flags);
+}
+
+struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
+{
+       struct cx18_buffer *buf = NULL;
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&s->qlock, flags);
+       if (!list_empty(&q->list)) {
+               buf = list_entry(q->list.next, struct cx18_buffer, list);
+               list_del_init(q->list.next);
+               q->buffers--;
+               q->length -= s->buf_size;
+               q->bytesused -= buf->bytesused - buf->readpos;
+       }
+       spin_unlock_irqrestore(&s->qlock, flags);
+       return buf;
+}
+
+struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id,
+       u32 bytesused)
+{
+       struct cx18 *cx = s->cx;
+       struct list_head *p;
+
+       list_for_each(p, &s->q_free.list) {
+               struct cx18_buffer *buf =
+                       list_entry(p, struct cx18_buffer, list);
+
+               if (buf->id != id)
+                       continue;
+               buf->bytesused = bytesused;
+               /* the transport buffers are handled differently,
+                  so there is no need to move them to the full queue */
+               if (s->type == CX18_ENC_STREAM_TYPE_TS)
+                       return buf;
+               s->q_free.buffers--;
+               s->q_free.length -= s->buf_size;
+               s->q_full.buffers++;
+               s->q_full.length += s->buf_size;
+               s->q_full.bytesused += buf->bytesused;
+               list_move_tail(&buf->list, &s->q_full.list);
+               return buf;
+       }
+       CX18_ERR("Cannot find buffer %d for stream %s\n", id, s->name);
+       return NULL;
+}
+
+static void cx18_queue_move_buf(struct cx18_stream *s, struct cx18_queue *from,
+               struct cx18_queue *to, int clear, int full)
+{
+       struct cx18_buffer *buf =
+               list_entry(from->list.next, struct cx18_buffer, list);
+
+       list_move_tail(from->list.next, &to->list);
+       from->buffers--;
+       from->length -= s->buf_size;
+       from->bytesused -= buf->bytesused - buf->readpos;
+       /* special handling for q_free */
+       if (clear)
+               buf->bytesused = buf->readpos = buf->b_flags = 0;
+       else if (full) {
+               /* special handling for stolen buffers, assume
+                  all bytes are used. */
+               buf->bytesused = s->buf_size;
+               buf->readpos = buf->b_flags = 0;
+       }
+       to->buffers++;
+       to->length += s->buf_size;
+       to->bytesused += buf->bytesused - buf->readpos;
+}
+
+/* Move 'needed_bytes' worth of buffers from queue 'from' into queue 'to'.
+   If 'needed_bytes' == 0, then move all buffers from 'from' into 'to'.
+   If 'steal' != NULL, then buffers may also taken from that queue if
+   needed.
+
+   The buffer is automatically cleared if it goes to the free queue. It is
+   also cleared if buffers need to be taken from the 'steal' queue and
+   the 'from' queue is the free queue.
+
+   When 'from' is q_free, then needed_bytes is compared to the total
+   available buffer length, otherwise needed_bytes is compared to the
+   bytesused value. For the 'steal' queue the total available buffer
+   length is always used.
+
+   -ENOMEM is returned if the buffers could not be obtained, 0 if all
+   buffers where obtained from the 'from' list and if non-zero then
+   the number of stolen buffers is returned. */
+int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
+       struct cx18_queue *steal, struct cx18_queue *to, int needed_bytes)
+{
+       unsigned long flags;
+       int rc = 0;
+       int from_free = from == &s->q_free;
+       int to_free = to == &s->q_free;
+       int bytes_available;
+
+       spin_lock_irqsave(&s->qlock, flags);
+       if (needed_bytes == 0) {
+               from_free = 1;
+               needed_bytes = from->length;
+       }
+
+       bytes_available = from_free ? from->length : from->bytesused;
+       bytes_available += steal ? steal->length : 0;
+
+       if (bytes_available < needed_bytes) {
+               spin_unlock_irqrestore(&s->qlock, flags);
+               return -ENOMEM;
+       }
+       if (from_free) {
+               u32 old_length = to->length;
+
+               while (to->length - old_length < needed_bytes) {
+                       if (list_empty(&from->list))
+                               from = steal;
+                       if (from == steal)
+                               rc++;   /* keep track of 'stolen' buffers */
+                       cx18_queue_move_buf(s, from, to, 1, 0);
+               }
+       } else {
+               u32 old_bytesused = to->bytesused;
+
+               while (to->bytesused - old_bytesused < needed_bytes) {
+                       if (list_empty(&from->list))
+                               from = steal;
+                       if (from == steal)
+                               rc++;   /* keep track of 'stolen' buffers */
+                       cx18_queue_move_buf(s, from, to, to_free, rc);
+               }
+       }
+       spin_unlock_irqrestore(&s->qlock, flags);
+       return rc;
+}
+
+void cx18_flush_queues(struct cx18_stream *s)
+{
+       cx18_queue_move(s, &s->q_io, NULL, &s->q_free, 0);
+       cx18_queue_move(s, &s->q_full, NULL, &s->q_free, 0);
+}
+
+int cx18_stream_alloc(struct cx18_stream *s)
+{
+       struct cx18 *cx = s->cx;
+       int i;
+
+       if (s->buffers == 0)
+               return 0;
+
+       CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%dkB total)\n",
+               s->name, s->buffers, s->buf_size,
+               s->buffers * s->buf_size / 1024);
+
+       if (((char *)&cx->scb->cpu_mdl[cx->mdl_offset + s->buffers] -
+                               (char *)cx->scb) > SCB_RESERVED_SIZE) {
+               unsigned bufsz = (((char *)cx->scb) + SCB_RESERVED_SIZE -
+                                       ((char *)cx->scb->cpu_mdl));
+
+               CX18_ERR("Too many buffers, cannot fit in SCB area\n");
+               CX18_ERR("Max buffers = %zd\n",
+                       bufsz / sizeof(struct cx18_mdl));
+               return -ENOMEM;
+       }
+
+       s->mdl_offset = cx->mdl_offset;
+
+       /* allocate stream buffers. Initially all buffers are in q_free. */
+       for (i = 0; i < s->buffers; i++) {
+               struct cx18_buffer *buf =
+                       kzalloc(sizeof(struct cx18_buffer), GFP_KERNEL);
+
+               if (buf == NULL)
+                       break;
+               buf->buf = kmalloc(s->buf_size, GFP_KERNEL);
+               if (buf->buf == NULL) {
+                       kfree(buf);
+                       break;
+               }
+               buf->id = cx->buffer_id++;
+               INIT_LIST_HEAD(&buf->list);
+               buf->dma_handle = pci_map_single(s->cx->dev,
+                               buf->buf, s->buf_size, s->dma);
+               cx18_buf_sync_for_cpu(s, buf);
+               cx18_enqueue(s, buf, &s->q_free);
+       }
+       if (i == s->buffers) {
+               cx->mdl_offset += s->buffers;
+               return 0;
+       }
+       CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
+       cx18_stream_free(s);
+       return -ENOMEM;
+}
+
+void cx18_stream_free(struct cx18_stream *s)
+{
+       struct cx18_buffer *buf;
+
+       /* move all buffers to q_free */
+       cx18_flush_queues(s);
+
+       /* empty q_free */
+       while ((buf = cx18_dequeue(s, &s->q_free))) {
+               pci_unmap_single(s->cx->dev, buf->dma_handle,
+                               s->buf_size, s->dma);
+               kfree(buf->buf);
+               kfree(buf);
+       }
+}
diff --git a/drivers/media/video/cx18/cx18-queue.h b/drivers/media/video/cx18/cx18-queue.h
new file mode 100644 (file)
index 0000000..f86c8a6
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ *  cx18 buffer queues
+ *
+ *  Derived from ivtv-queue.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#define CX18_DMA_UNMAPPED      ((u32) -1)
+
+/* cx18_buffer utility functions */
+
+static inline void cx18_buf_sync_for_cpu(struct cx18_stream *s,
+       struct cx18_buffer *buf)
+{
+       pci_dma_sync_single_for_cpu(s->cx->dev, buf->dma_handle,
+                               s->buf_size, s->dma);
+}
+
+static inline void cx18_buf_sync_for_device(struct cx18_stream *s,
+       struct cx18_buffer *buf)
+{
+       pci_dma_sync_single_for_device(s->cx->dev, buf->dma_handle,
+                               s->buf_size, s->dma);
+}
+
+int cx18_buf_copy_from_user(struct cx18_stream *s, struct cx18_buffer *buf,
+       const char __user *src, int copybytes);
+void cx18_buf_swap(struct cx18_buffer *buf);
+
+/* cx18_queue utility functions */
+void cx18_queue_init(struct cx18_queue *q);
+void cx18_enqueue(struct cx18_stream *s, struct cx18_buffer *buf,
+       struct cx18_queue *q);
+struct cx18_buffer *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
+int cx18_queue_move(struct cx18_stream *s, struct cx18_queue *from,
+       struct cx18_queue *steal, struct cx18_queue *to, int needed_bytes);
+struct cx18_buffer *cx18_queue_find_buf(struct cx18_stream *s, u32 id,
+       u32 bytesused);
+void cx18_flush_queues(struct cx18_stream *s);
+
+/* cx18_stream utility functions */
+int cx18_stream_alloc(struct cx18_stream *s);
+void cx18_stream_free(struct cx18_stream *s);
diff --git a/drivers/media/video/cx18/cx18-scb.c b/drivers/media/video/cx18/cx18-scb.c
new file mode 100644 (file)
index 0000000..30bc803
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ *  cx18 System Control Block initialization
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-scb.h"
+
+void cx18_init_scb(struct cx18 *cx)
+{
+       setup_page(SCB_OFFSET);
+       memset_io(cx->scb, 0, 0x10000);
+
+       writel(IRQ_APU_TO_CPU,     &cx->scb->apu2cpu_irq);
+       writel(IRQ_CPU_TO_APU_ACK, &cx->scb->cpu2apu_irq_ack);
+       writel(IRQ_HPU_TO_CPU,     &cx->scb->hpu2cpu_irq);
+       writel(IRQ_CPU_TO_HPU_ACK, &cx->scb->cpu2hpu_irq_ack);
+       writel(IRQ_PPU_TO_CPU,     &cx->scb->ppu2cpu_irq);
+       writel(IRQ_CPU_TO_PPU_ACK, &cx->scb->cpu2ppu_irq_ack);
+       writel(IRQ_EPU_TO_CPU,     &cx->scb->epu2cpu_irq);
+       writel(IRQ_CPU_TO_EPU_ACK, &cx->scb->cpu2epu_irq_ack);
+
+       writel(IRQ_CPU_TO_APU,     &cx->scb->cpu2apu_irq);
+       writel(IRQ_APU_TO_CPU_ACK, &cx->scb->apu2cpu_irq_ack);
+       writel(IRQ_HPU_TO_APU,     &cx->scb->hpu2apu_irq);
+       writel(IRQ_APU_TO_HPU_ACK, &cx->scb->apu2hpu_irq_ack);
+       writel(IRQ_PPU_TO_APU,     &cx->scb->ppu2apu_irq);
+       writel(IRQ_APU_TO_PPU_ACK, &cx->scb->apu2ppu_irq_ack);
+       writel(IRQ_EPU_TO_APU,     &cx->scb->epu2apu_irq);
+       writel(IRQ_APU_TO_EPU_ACK, &cx->scb->apu2epu_irq_ack);
+
+       writel(IRQ_CPU_TO_HPU,     &cx->scb->cpu2hpu_irq);
+       writel(IRQ_HPU_TO_CPU_ACK, &cx->scb->hpu2cpu_irq_ack);
+       writel(IRQ_APU_TO_HPU,     &cx->scb->apu2hpu_irq);
+       writel(IRQ_HPU_TO_APU_ACK, &cx->scb->hpu2apu_irq_ack);
+       writel(IRQ_PPU_TO_HPU,     &cx->scb->ppu2hpu_irq);
+       writel(IRQ_HPU_TO_PPU_ACK, &cx->scb->hpu2ppu_irq_ack);
+       writel(IRQ_EPU_TO_HPU,     &cx->scb->epu2hpu_irq);
+       writel(IRQ_HPU_TO_EPU_ACK, &cx->scb->hpu2epu_irq_ack);
+
+       writel(IRQ_CPU_TO_PPU,     &cx->scb->cpu2ppu_irq);
+       writel(IRQ_PPU_TO_CPU_ACK, &cx->scb->ppu2cpu_irq_ack);
+       writel(IRQ_APU_TO_PPU,     &cx->scb->apu2ppu_irq);
+       writel(IRQ_PPU_TO_APU_ACK, &cx->scb->ppu2apu_irq_ack);
+       writel(IRQ_HPU_TO_PPU,     &cx->scb->hpu2ppu_irq);
+       writel(IRQ_PPU_TO_HPU_ACK, &cx->scb->ppu2hpu_irq_ack);
+       writel(IRQ_EPU_TO_PPU,     &cx->scb->epu2ppu_irq);
+       writel(IRQ_PPU_TO_EPU_ACK, &cx->scb->ppu2epu_irq_ack);
+
+       writel(IRQ_CPU_TO_EPU,     &cx->scb->cpu2epu_irq);
+       writel(IRQ_EPU_TO_CPU_ACK, &cx->scb->epu2cpu_irq_ack);
+       writel(IRQ_APU_TO_EPU,     &cx->scb->apu2epu_irq);
+       writel(IRQ_EPU_TO_APU_ACK, &cx->scb->epu2apu_irq_ack);
+       writel(IRQ_HPU_TO_EPU,     &cx->scb->hpu2epu_irq);
+       writel(IRQ_EPU_TO_HPU_ACK, &cx->scb->epu2hpu_irq_ack);
+       writel(IRQ_PPU_TO_EPU,     &cx->scb->ppu2epu_irq);
+       writel(IRQ_EPU_TO_PPU_ACK, &cx->scb->epu2ppu_irq_ack);
+
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2cpu_mb),
+                       &cx->scb->apu2cpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2cpu_mb),
+                       &cx->scb->hpu2cpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2cpu_mb),
+                       &cx->scb->ppu2cpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2cpu_mb),
+                       &cx->scb->epu2cpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2apu_mb),
+                       &cx->scb->cpu2apu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2apu_mb),
+                       &cx->scb->hpu2apu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2apu_mb),
+                       &cx->scb->ppu2apu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2apu_mb),
+                       &cx->scb->epu2apu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2hpu_mb),
+                       &cx->scb->cpu2hpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2hpu_mb),
+                       &cx->scb->apu2hpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2hpu_mb),
+                       &cx->scb->ppu2hpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2hpu_mb),
+                       &cx->scb->epu2hpu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2ppu_mb),
+                       &cx->scb->cpu2ppu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2ppu_mb),
+                       &cx->scb->apu2ppu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2ppu_mb),
+                       &cx->scb->hpu2ppu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, epu2ppu_mb),
+                       &cx->scb->epu2ppu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu2epu_mb),
+                       &cx->scb->cpu2epu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, apu2epu_mb),
+                       &cx->scb->apu2epu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, hpu2epu_mb),
+                       &cx->scb->hpu2epu_mb_offset);
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, ppu2epu_mb),
+                       &cx->scb->ppu2epu_mb_offset);
+
+       writel(SCB_OFFSET + offsetof(struct cx18_scb, cpu_state),
+                       &cx->scb->ipc_offset);
+
+       writel(1, &cx->scb->hpu_state);
+       writel(1, &cx->scb->epu_state);
+}
diff --git a/drivers/media/video/cx18/cx18-scb.h b/drivers/media/video/cx18/cx18-scb.h
new file mode 100644 (file)
index 0000000..86b4cb1
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ *  cx18 System Control Block initialization
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#ifndef CX18_SCB_H
+#define CX18_SCB_H
+
+#include "cx18-mailbox.h"
+
+/* NOTE: All ACK interrupts are in the SW2 register.  All non-ACK interrupts
+   are in the SW1 register. */
+
+#define IRQ_APU_TO_CPU         0x00000001
+#define IRQ_CPU_TO_APU_ACK     0x00000001
+#define IRQ_HPU_TO_CPU         0x00000002
+#define IRQ_CPU_TO_HPU_ACK     0x00000002
+#define IRQ_PPU_TO_CPU         0x00000004
+#define IRQ_CPU_TO_PPU_ACK     0x00000004
+#define IRQ_EPU_TO_CPU         0x00000008
+#define IRQ_CPU_TO_EPU_ACK     0x00000008
+
+#define IRQ_CPU_TO_APU         0x00000010
+#define IRQ_APU_TO_CPU_ACK     0x00000010
+#define IRQ_HPU_TO_APU         0x00000020
+#define IRQ_APU_TO_HPU_ACK     0x00000020
+#define IRQ_PPU_TO_APU         0x00000040
+#define IRQ_APU_TO_PPU_ACK     0x00000040
+#define IRQ_EPU_TO_APU         0x00000080
+#define IRQ_APU_TO_EPU_ACK     0x00000080
+
+#define IRQ_CPU_TO_HPU         0x00000100
+#define IRQ_HPU_TO_CPU_ACK     0x00000100
+#define IRQ_APU_TO_HPU         0x00000200
+#define IRQ_HPU_TO_APU_ACK     0x00000200
+#define IRQ_PPU_TO_HPU         0x00000400
+#define IRQ_HPU_TO_PPU_ACK     0x00000400
+#define IRQ_EPU_TO_HPU         0x00000800
+#define IRQ_HPU_TO_EPU_ACK     0x00000800
+
+#define IRQ_CPU_TO_PPU         0x00001000
+#define IRQ_PPU_TO_CPU_ACK     0x00001000
+#define IRQ_APU_TO_PPU         0x00002000
+#define IRQ_PPU_TO_APU_ACK     0x00002000
+#define IRQ_HPU_TO_PPU         0x00004000
+#define IRQ_PPU_TO_HPU_ACK     0x00004000
+#define IRQ_EPU_TO_PPU         0x00008000
+#define IRQ_PPU_TO_EPU_ACK     0x00008000
+
+#define IRQ_CPU_TO_EPU         0x00010000
+#define IRQ_EPU_TO_CPU_ACK     0x00010000
+#define IRQ_APU_TO_EPU         0x00020000
+#define IRQ_EPU_TO_APU_ACK     0x00020000
+#define IRQ_HPU_TO_EPU         0x00040000
+#define IRQ_EPU_TO_HPU_ACK     0x00040000
+#define IRQ_PPU_TO_EPU         0x00080000
+#define IRQ_EPU_TO_PPU_ACK     0x00080000
+
+#define SCB_OFFSET  0xDC0000
+
+/* If Firmware uses fixed memory map, it shall not allocate the area
+   between SCB_OFFSET and SCB_OFFSET+SCB_RESERVED_SIZE-1 inclusive */
+#define SCB_RESERVED_SIZE 0x10000
+
+
+/* This structure is used by EPU to provide memory descriptors in its memory */
+struct cx18_mdl {
+    u32 paddr;  /* Physical address of a buffer segment */
+    u32 length; /* Length of the buffer segment */
+};
+
+/* This structure is used by CPU to provide completed buffers information */
+struct cx18_mdl_ack {
+    u32 id;        /* ID of a completed MDL */
+    u32 data_used; /* Total data filled in the MDL for buffer 'id' */
+};
+
+struct cx18_scb {
+       /* These fields form the System Control Block which is used at boot time
+          for localizing the IPC data as well as the code positions for all
+          processors. The offsets are from the start of this struct. */
+
+       /* Offset where to find the Inter-Processor Communication data */
+       u32 ipc_offset;
+       u32 reserved01[7];
+       /* Offset where to find the start of the CPU code */
+       u32 cpu_code_offset;
+       u32 reserved02[3];
+       /* Offset where to find the start of the APU code */
+       u32 apu_code_offset;
+       u32 reserved03[3];
+       /* Offset where to find the start of the HPU code */
+       u32 hpu_code_offset;
+       u32 reserved04[3];
+       /* Offset where to find the start of the PPU code */
+       u32 ppu_code_offset;
+       u32 reserved05[3];
+
+       /* These fields form Inter-Processor Communication data which is used
+          by all processors to locate the information needed for communicating
+          with other processors */
+
+       /* Fields for CPU: */
+
+       /* bit 0: 1/0 processor ready/not ready. Set other bits to 0. */
+       u32 cpu_state;
+       u32 reserved1[7];
+       /* Offset to the mailbox used for sending commands from APU to CPU */
+       u32 apu2cpu_mb_offset;
+       /* Value to write to register SW1 register set (0xC7003100) after the
+          command is ready */
+       u32 apu2cpu_irq;
+       /* Value to write to register SW2 register set (0xC7003140) after the
+          command is cleared */
+       u32 apu2cpu_irq_ack;
+       u32 reserved2[13];
+
+       u32 hpu2cpu_mb_offset;
+       u32 hpu2cpu_irq;
+       u32 hpu2cpu_irq_ack;
+       u32 reserved3[13];
+
+       u32 ppu2cpu_mb_offset;
+       u32 ppu2cpu_irq;
+       u32 ppu2cpu_irq_ack;
+       u32 reserved4[13];
+
+       u32 epu2cpu_mb_offset;
+       u32 epu2cpu_irq;
+       u32 epu2cpu_irq_ack;
+       u32 reserved5[13];
+       u32 reserved6[8];
+
+       /* Fields for APU: */
+
+       u32 apu_state;
+       u32 reserved11[7];
+       u32 cpu2apu_mb_offset;
+       u32 cpu2apu_irq;
+       u32 cpu2apu_irq_ack;
+       u32 reserved12[13];
+
+       u32 hpu2apu_mb_offset;
+       u32 hpu2apu_irq;
+       u32 hpu2apu_irq_ack;
+       u32 reserved13[13];
+
+       u32 ppu2apu_mb_offset;
+       u32 ppu2apu_irq;
+       u32 ppu2apu_irq_ack;
+       u32 reserved14[13];
+
+       u32 epu2apu_mb_offset;
+       u32 epu2apu_irq;
+       u32 epu2apu_irq_ack;
+       u32 reserved15[13];
+       u32 reserved16[8];
+
+       /* Fields for HPU: */
+
+       u32 hpu_state;
+       u32 reserved21[7];
+       u32 cpu2hpu_mb_offset;
+       u32 cpu2hpu_irq;
+       u32 cpu2hpu_irq_ack;
+       u32 reserved22[13];
+
+       u32 apu2hpu_mb_offset;
+       u32 apu2hpu_irq;
+       u32 apu2hpu_irq_ack;
+       u32 reserved23[13];
+
+       u32 ppu2hpu_mb_offset;
+       u32 ppu2hpu_irq;
+       u32 ppu2hpu_irq_ack;
+       u32 reserved24[13];
+
+       u32 epu2hpu_mb_offset;
+       u32 epu2hpu_irq;
+       u32 epu2hpu_irq_ack;
+       u32 reserved25[13];
+       u32 reserved26[8];
+
+       /* Fields for PPU: */
+
+       u32 ppu_state;
+       u32 reserved31[7];
+       u32 cpu2ppu_mb_offset;
+       u32 cpu2ppu_irq;
+       u32 cpu2ppu_irq_ack;
+       u32 reserved32[13];
+
+       u32 apu2ppu_mb_offset;
+       u32 apu2ppu_irq;
+       u32 apu2ppu_irq_ack;
+       u32 reserved33[13];
+
+       u32 hpu2ppu_mb_offset;
+       u32 hpu2ppu_irq;
+       u32 hpu2ppu_irq_ack;
+       u32 reserved34[13];
+
+       u32 epu2ppu_mb_offset;
+       u32 epu2ppu_irq;
+       u32 epu2ppu_irq_ack;
+       u32 reserved35[13];
+       u32 reserved36[8];
+
+       /* Fields for EPU: */
+
+       u32 epu_state;
+       u32 reserved41[7];
+       u32 cpu2epu_mb_offset;
+       u32 cpu2epu_irq;
+       u32 cpu2epu_irq_ack;
+       u32 reserved42[13];
+
+       u32 apu2epu_mb_offset;
+       u32 apu2epu_irq;
+       u32 apu2epu_irq_ack;
+       u32 reserved43[13];
+
+       u32 hpu2epu_mb_offset;
+       u32 hpu2epu_irq;
+       u32 hpu2epu_irq_ack;
+       u32 reserved44[13];
+
+       u32 ppu2epu_mb_offset;
+       u32 ppu2epu_irq;
+       u32 ppu2epu_irq_ack;
+       u32 reserved45[13];
+       u32 reserved46[8];
+
+       u32 semaphores[8];  /* Semaphores */
+
+       u32 reserved50[32]; /* Reserved for future use */
+
+       struct cx18_mailbox  apu2cpu_mb;
+       struct cx18_mailbox  hpu2cpu_mb;
+       struct cx18_mailbox  ppu2cpu_mb;
+       struct cx18_mailbox  epu2cpu_mb;
+
+       struct cx18_mailbox  cpu2apu_mb;
+       struct cx18_mailbox  hpu2apu_mb;
+       struct cx18_mailbox  ppu2apu_mb;
+       struct cx18_mailbox  epu2apu_mb;
+
+       struct cx18_mailbox  cpu2hpu_mb;
+       struct cx18_mailbox  apu2hpu_mb;
+       struct cx18_mailbox  ppu2hpu_mb;
+       struct cx18_mailbox  epu2hpu_mb;
+
+       struct cx18_mailbox  cpu2ppu_mb;
+       struct cx18_mailbox  apu2ppu_mb;
+       struct cx18_mailbox  hpu2ppu_mb;
+       struct cx18_mailbox  epu2ppu_mb;
+
+       struct cx18_mailbox  cpu2epu_mb;
+       struct cx18_mailbox  apu2epu_mb;
+       struct cx18_mailbox  hpu2epu_mb;
+       struct cx18_mailbox  ppu2epu_mb;
+
+       struct cx18_mdl_ack  cpu_mdl_ack[CX18_MAX_STREAMS][2];
+       struct cx18_mdl      cpu_mdl[1];
+};
+
+void cx18_init_scb(struct cx18 *cx);
+
+#endif
diff --git a/drivers/media/video/cx18/cx18-streams.c b/drivers/media/video/cx18/cx18-streams.c
new file mode 100644 (file)
index 0000000..afb141b
--- /dev/null
@@ -0,0 +1,566 @@
+/*
+ *  cx18 init/start/stop/exit stream functions
+ *
+ *  Derived from ivtv-streams.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-fileops.h"
+#include "cx18-mailbox.h"
+#include "cx18-i2c.h"
+#include "cx18-queue.h"
+#include "cx18-ioctl.h"
+#include "cx18-streams.h"
+#include "cx18-cards.h"
+#include "cx18-scb.h"
+#include "cx18-av-core.h"
+#include "cx18-dvb.h"
+
+#define CX18_DSP0_INTERRUPT_MASK       0xd0004C
+
+static struct file_operations cx18_v4l2_enc_fops = {
+      .owner = THIS_MODULE,
+      .read = cx18_v4l2_read,
+      .open = cx18_v4l2_open,
+      .ioctl = cx18_v4l2_ioctl,
+      .release = cx18_v4l2_close,
+      .poll = cx18_v4l2_enc_poll,
+};
+
+/* offset from 0 to register ts v4l2 minors on */
+#define CX18_V4L2_ENC_TS_OFFSET   16
+/* offset from 0 to register pcm v4l2 minors on */
+#define CX18_V4L2_ENC_PCM_OFFSET  24
+/* offset from 0 to register yuv v4l2 minors on */
+#define CX18_V4L2_ENC_YUV_OFFSET  32
+
+static struct {
+       const char *name;
+       int vfl_type;
+       int minor_offset;
+       int dma;
+       enum v4l2_buf_type buf_type;
+       struct file_operations *fops;
+} cx18_stream_info[] = {
+       {       /* CX18_ENC_STREAM_TYPE_MPG */
+               "encoder MPEG",
+               VFL_TYPE_GRABBER, 0,
+               PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+               &cx18_v4l2_enc_fops
+       },
+       {       /* CX18_ENC_STREAM_TYPE_TS */
+               "TS",
+               VFL_TYPE_GRABBER, -1,
+               PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+               &cx18_v4l2_enc_fops
+       },
+       {       /* CX18_ENC_STREAM_TYPE_YUV */
+               "encoder YUV",
+               VFL_TYPE_GRABBER, CX18_V4L2_ENC_YUV_OFFSET,
+               PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+               &cx18_v4l2_enc_fops
+       },
+       {       /* CX18_ENC_STREAM_TYPE_VBI */
+               "encoder VBI",
+               VFL_TYPE_VBI, 0,
+               PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VBI_CAPTURE,
+               &cx18_v4l2_enc_fops
+       },
+       {       /* CX18_ENC_STREAM_TYPE_PCM */
+               "encoder PCM audio",
+               VFL_TYPE_GRABBER, CX18_V4L2_ENC_PCM_OFFSET,
+               PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_PRIVATE,
+               &cx18_v4l2_enc_fops
+       },
+       {       /* CX18_ENC_STREAM_TYPE_IDX */
+               "encoder IDX",
+               VFL_TYPE_GRABBER, -1,
+               PCI_DMA_FROMDEVICE, V4L2_BUF_TYPE_VIDEO_CAPTURE,
+               &cx18_v4l2_enc_fops
+       },
+       {       /* CX18_ENC_STREAM_TYPE_RAD */
+               "encoder radio",
+               VFL_TYPE_RADIO, 0,
+               PCI_DMA_NONE, V4L2_BUF_TYPE_PRIVATE,
+               &cx18_v4l2_enc_fops
+       },
+};
+
+static void cx18_stream_init(struct cx18 *cx, int type)
+{
+       struct cx18_stream *s = &cx->streams[type];
+       struct video_device *dev = s->v4l2dev;
+       u32 max_size = cx->options.megabytes[type] * 1024 * 1024;
+
+       /* we need to keep v4l2dev, so restore it afterwards */
+       memset(s, 0, sizeof(*s));
+       s->v4l2dev = dev;
+
+       /* initialize cx18_stream fields */
+       s->cx = cx;
+       s->type = type;
+       s->name = cx18_stream_info[type].name;
+       s->handle = 0xffffffff;
+
+       s->dma = cx18_stream_info[type].dma;
+       s->buf_size = cx->stream_buf_size[type];
+       if (s->buf_size)
+               s->buffers = max_size / s->buf_size;
+       if (s->buffers > 63) {
+               /* Each stream has a maximum of 63 buffers,
+                  ensure we do not exceed that. */
+               s->buffers = 63;
+               s->buf_size = (max_size / s->buffers) & ~0xfff;
+       }
+       spin_lock_init(&s->qlock);
+       init_waitqueue_head(&s->waitq);
+       s->id = -1;
+       cx18_queue_init(&s->q_free);
+       cx18_queue_init(&s->q_full);
+       cx18_queue_init(&s->q_io);
+}
+
+static int cx18_prep_dev(struct cx18 *cx, int type)
+{
+       struct cx18_stream *s = &cx->streams[type];
+       u32 cap = cx->v4l2_cap;
+       int minor_offset = cx18_stream_info[type].minor_offset;
+       int minor;
+
+       /* These four fields are always initialized. If v4l2dev == NULL, then
+          this stream is not in use. In that case no other fields but these
+          four can be used. */
+       s->v4l2dev = NULL;
+       s->cx = cx;
+       s->type = type;
+       s->name = cx18_stream_info[type].name;
+
+       /* Check whether the radio is supported */
+       if (type == CX18_ENC_STREAM_TYPE_RAD && !(cap & V4L2_CAP_RADIO))
+               return 0;
+
+       /* Check whether VBI is supported */
+       if (type == CX18_ENC_STREAM_TYPE_VBI &&
+           !(cap & (V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE)))
+               return 0;
+
+       /* card number + user defined offset + device offset */
+       minor = cx->num + cx18_first_minor + minor_offset;
+
+       /* User explicitly selected 0 buffers for these streams, so don't
+          create them. */
+       if (cx18_stream_info[type].dma != PCI_DMA_NONE &&
+           cx->options.megabytes[type] == 0) {
+               CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name);
+               return 0;
+       }
+
+       cx18_stream_init(cx, type);
+
+       if (minor_offset == -1)
+               return 0;
+
+       /* allocate and initialize the v4l2 video device structure */
+       s->v4l2dev = video_device_alloc();
+       if (s->v4l2dev == NULL) {
+               CX18_ERR("Couldn't allocate v4l2 video_device for %s\n",
+                               s->name);
+               return -ENOMEM;
+       }
+
+       s->v4l2dev->type =
+               VID_TYPE_CAPTURE | VID_TYPE_TUNER | VID_TYPE_TELETEXT |
+               VID_TYPE_CLIPPING | VID_TYPE_SCALES | VID_TYPE_MPEG_ENCODER;
+       snprintf(s->v4l2dev->name, sizeof(s->v4l2dev->name), "cx18%d %s",
+                       cx->num, s->name);
+
+       s->v4l2dev->minor = minor;
+       s->v4l2dev->dev = &cx->dev->dev;
+       s->v4l2dev->fops = cx18_stream_info[type].fops;
+       s->v4l2dev->release = video_device_release;
+
+       return 0;
+}
+
+/* Initialize v4l2 variables and register v4l2 devices */
+int cx18_streams_setup(struct cx18 *cx)
+{
+       int type;
+
+       /* Setup V4L2 Devices */
+       for (type = 0; type < CX18_MAX_STREAMS; type++) {
+               /* Prepare device */
+               if (cx18_prep_dev(cx, type))
+                       break;
+
+               /* Allocate Stream */
+               if (cx18_stream_alloc(&cx->streams[type]))
+                       break;
+       }
+       if (type == CX18_MAX_STREAMS)
+               return 0;
+
+       /* One or more streams could not be initialized. Clean 'em all up. */
+       cx18_streams_cleanup(cx);
+       return -ENOMEM;
+}
+
+static int cx18_reg_dev(struct cx18 *cx, int type)
+{
+       struct cx18_stream *s = &cx->streams[type];
+       int vfl_type = cx18_stream_info[type].vfl_type;
+       int minor;
+
+       /* TODO: Shouldn't this be a VFL_TYPE_TRANSPORT or something?
+        * We need a VFL_TYPE_TS defined.
+        */
+       if (strcmp("TS", s->name) == 0) {
+               /* just return if no DVB is supported */
+               if ((cx->card->hw_all & CX18_HW_DVB) == 0)
+                       return 0;
+               if (cx18_dvb_register(s) < 0) {
+                       CX18_ERR("DVB failed to register\n");
+                       return -EINVAL;
+               }
+       }
+
+       if (s->v4l2dev == NULL)
+               return 0;
+
+       minor = s->v4l2dev->minor;
+
+       /* Register device. First try the desired minor, then any free one. */
+       if (video_register_device(s->v4l2dev, vfl_type, minor) &&
+                       video_register_device(s->v4l2dev, vfl_type, -1)) {
+               CX18_ERR("Couldn't register v4l2 device for %s minor %d\n",
+                       s->name, minor);
+               video_device_release(s->v4l2dev);
+               s->v4l2dev = NULL;
+               return -ENOMEM;
+       }
+       minor = s->v4l2dev->minor;
+
+       switch (vfl_type) {
+       case VFL_TYPE_GRABBER:
+               CX18_INFO("Registered device video%d for %s (%d MB)\n",
+                       minor, s->name, cx->options.megabytes[type]);
+               break;
+
+       case VFL_TYPE_RADIO:
+               CX18_INFO("Registered device radio%d for %s\n",
+                       minor - MINOR_VFL_TYPE_RADIO_MIN, s->name);
+               break;
+
+       case VFL_TYPE_VBI:
+               if (cx->options.megabytes[type])
+                       CX18_INFO("Registered device vbi%d for %s (%d MB)\n",
+                               minor - MINOR_VFL_TYPE_VBI_MIN,
+                               s->name, cx->options.megabytes[type]);
+               else
+                       CX18_INFO("Registered device vbi%d for %s\n",
+                               minor - MINOR_VFL_TYPE_VBI_MIN, s->name);
+               break;
+       }
+
+       return 0;
+}
+
+/* Register v4l2 devices */
+int cx18_streams_register(struct cx18 *cx)
+{
+       int type;
+       int err = 0;
+
+       /* Register V4L2 devices */
+       for (type = 0; type < CX18_MAX_STREAMS; type++)
+               err |= cx18_reg_dev(cx, type);
+
+       if (err == 0)
+               return 0;
+
+       /* One or more streams could not be initialized. Clean 'em all up. */
+       cx18_streams_cleanup(cx);
+       return -ENOMEM;
+}
+
+/* Unregister v4l2 devices */
+void cx18_streams_cleanup(struct cx18 *cx)
+{
+       struct video_device *vdev;
+       int type;
+
+       /* Teardown all streams */
+       for (type = 0; type < CX18_MAX_STREAMS; type++) {
+               if (cx->streams[type].dvb.enabled)
+                       cx18_dvb_unregister(&cx->streams[type]);
+
+               vdev = cx->streams[type].v4l2dev;
+
+               cx->streams[type].v4l2dev = NULL;
+               if (vdev == NULL)
+                       continue;
+
+               cx18_stream_free(&cx->streams[type]);
+
+               /* Unregister device */
+               video_unregister_device(vdev);
+       }
+}
+
+static void cx18_vbi_setup(struct cx18_stream *s)
+{
+       struct cx18 *cx = s->cx;
+       int raw = cx->vbi.sliced_in->service_set == 0;
+       u32 data[CX2341X_MBOX_MAX_DATA];
+       int lines;
+
+       if (cx->is_60hz) {
+               cx->vbi.count = 12;
+               cx->vbi.start[0] = 10;
+               cx->vbi.start[1] = 273;
+       } else {        /* PAL/SECAM */
+               cx->vbi.count = 18;
+               cx->vbi.start[0] = 6;
+               cx->vbi.start[1] = 318;
+       }
+
+       /* setup VBI registers */
+       cx18_av_cmd(cx, VIDIOC_S_FMT, &cx->vbi.in);
+
+       /* determine number of lines and total number of VBI bytes.
+          A raw line takes 1443 bytes: 2 * 720 + 4 byte frame header - 1
+          The '- 1' byte is probably an unused U or V byte. Or something...
+          A sliced line takes 51 bytes: 4 byte frame header, 4 byte internal
+          header, 42 data bytes + checksum (to be confirmed) */
+       if (raw) {
+               lines = cx->vbi.count * 2;
+       } else {
+               lines = cx->is_60hz ? 24 : 38;
+               if (cx->is_60hz)
+                       lines += 2;
+       }
+
+       cx->vbi.enc_size = lines *
+               (raw ? cx->vbi.raw_size : cx->vbi.sliced_size);
+
+       data[0] = s->handle;
+       /* Lines per field */
+       data[1] = (lines / 2) | ((lines / 2) << 16);
+       /* bytes per line */
+       data[2] = (raw ? cx->vbi.raw_size : cx->vbi.sliced_size);
+       /* Every X number of frames a VBI interrupt arrives
+          (frames as in 25 or 30 fps) */
+       data[3] = 1;
+       /* Setup VBI for the cx25840 digitizer */
+       if (raw) {
+               data[4] = 0x20602060;
+               data[5] = 0x30703070;
+       } else {
+               data[4] = 0xB0F0B0F0;
+               data[5] = 0xA0E0A0E0;
+       }
+
+       CX18_DEBUG_INFO("Setup VBI h: %d lines %x bpl %d fr %d %x %x\n",
+                       data[0], data[1], data[2], data[3], data[4], data[5]);
+
+       if (s->type == CX18_ENC_STREAM_TYPE_VBI)
+               cx18_api(cx, CX18_CPU_SET_RAW_VBI_PARAM, 6, data);
+}
+
+int cx18_start_v4l2_encode_stream(struct cx18_stream *s)
+{
+       u32 data[MAX_MB_ARGUMENTS];
+       struct cx18 *cx = s->cx;
+       struct list_head *p;
+       int ts = 0;
+       int captype = 0;
+
+       if (s->v4l2dev == NULL && s->dvb.enabled == 0)
+               return -EINVAL;
+
+       CX18_DEBUG_INFO("Start encoder stream %s\n", s->name);
+
+       switch (s->type) {
+       case CX18_ENC_STREAM_TYPE_MPG:
+               captype = CAPTURE_CHANNEL_TYPE_MPEG;
+               cx->mpg_data_received = cx->vbi_data_inserted = 0;
+               cx->dualwatch_jiffies = jiffies;
+               cx->dualwatch_stereo_mode = cx->params.audio_properties & 0x300;
+               cx->search_pack_header = 0;
+               break;
+
+       case CX18_ENC_STREAM_TYPE_TS:
+               captype = CAPTURE_CHANNEL_TYPE_TS;
+               ts = 1;
+               break;
+       case CX18_ENC_STREAM_TYPE_YUV:
+               captype = CAPTURE_CHANNEL_TYPE_YUV;
+               break;
+       case CX18_ENC_STREAM_TYPE_PCM:
+               captype = CAPTURE_CHANNEL_TYPE_PCM;
+               break;
+       case CX18_ENC_STREAM_TYPE_VBI:
+               captype = cx->vbi.sliced_in->service_set ?
+                   CAPTURE_CHANNEL_TYPE_SLICED_VBI : CAPTURE_CHANNEL_TYPE_VBI;
+               cx->vbi.frame = 0;
+               cx->vbi.inserted_frame = 0;
+               memset(cx->vbi.sliced_mpeg_size,
+                       0, sizeof(cx->vbi.sliced_mpeg_size));
+               break;
+       default:
+               return -EINVAL;
+       }
+       s->buffers_stolen = 0;
+
+       /* mute/unmute video */
+       cx18_vapi(cx, CX18_CPU_SET_VIDEO_MUTE, 2,
+                 s->handle, !!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags));
+
+       /* Clear Streamoff flags in case left from last capture */
+       clear_bit(CX18_F_S_STREAMOFF, &s->s_flags);
+
+       cx18_vapi_result(cx, data, CX18_CREATE_TASK, 1, CPU_CMD_MASK_CAPTURE);
+       s->handle = data[0];
+       cx18_vapi(cx, CX18_CPU_SET_CHANNEL_TYPE, 2, s->handle, captype);
+
+       if (atomic_read(&cx->capturing) == 0 && !ts) {
+               /* Stuff from Windows, we don't know what it is */
+               cx18_vapi(cx, CX18_CPU_SET_VER_CROP_LINE, 2, s->handle, 0);
+               cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 3, 1);
+               cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 8, 0);
+               cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 3, s->handle, 4, 1);
+               cx18_vapi(cx, CX18_CPU_SET_MISC_PARAMETERS, 2, s->handle, 12);
+
+               cx18_vapi(cx, CX18_CPU_SET_CAPTURE_LINE_NO, 3,
+                              s->handle, cx->digitizer, cx->digitizer);
+
+               /* Setup VBI */
+               if (cx->v4l2_cap & V4L2_CAP_VBI_CAPTURE)
+                       cx18_vbi_setup(s);
+
+               /* assign program index info.
+                  Mask 7: select I/P/B, Num_req: 400 max */
+               cx18_vapi_result(cx, data, CX18_CPU_SET_INDEXTABLE, 1, 0);
+
+               /* Setup API for Stream */
+               cx2341x_update(cx, cx18_api_func, NULL, &cx->params);
+       }
+
+       if (atomic_read(&cx->capturing) == 0) {
+               clear_bit(CX18_F_I_EOS, &cx->i_flags);
+               write_reg(7, CX18_DSP0_INTERRUPT_MASK);
+       }
+
+       cx18_vapi(cx, CX18_CPU_DE_SET_MDL_ACK, 3, s->handle,
+               (void *)&cx->scb->cpu_mdl_ack[s->type][0] - cx->enc_mem,
+               (void *)&cx->scb->cpu_mdl_ack[s->type][1] - cx->enc_mem);
+
+       list_for_each(p, &s->q_free.list) {
+               struct cx18_buffer *buf = list_entry(p, struct cx18_buffer, list);
+
+               writel(buf->dma_handle, &cx->scb->cpu_mdl[buf->id].paddr);
+               writel(s->buf_size, &cx->scb->cpu_mdl[buf->id].length);
+               cx18_vapi(cx, CX18_CPU_DE_SET_MDL, 5, s->handle,
+                       (void *)&cx->scb->cpu_mdl[buf->id] - cx->enc_mem, 1,
+                       buf->id, s->buf_size);
+       }
+       /* begin_capture */
+       if (cx18_vapi(cx, CX18_CPU_CAPTURE_START, 1, s->handle)) {
+               CX18_DEBUG_WARN("Error starting capture!\n");
+               cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
+               return -EINVAL;
+       }
+
+       /* you're live! sit back and await interrupts :) */
+       atomic_inc(&cx->capturing);
+       return 0;
+}
+
+void cx18_stop_all_captures(struct cx18 *cx)
+{
+       int i;
+
+       for (i = CX18_MAX_STREAMS - 1; i >= 0; i--) {
+               struct cx18_stream *s = &cx->streams[i];
+
+               if (s->v4l2dev == NULL && s->dvb.enabled == 0)
+                       continue;
+               if (test_bit(CX18_F_S_STREAMING, &s->s_flags))
+                       cx18_stop_v4l2_encode_stream(s, 0);
+       }
+}
+
+int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end)
+{
+       struct cx18 *cx = s->cx;
+       unsigned long then;
+
+       if (s->v4l2dev == NULL && s->dvb.enabled == 0)
+               return -EINVAL;
+
+       /* This function assumes that you are allowed to stop the capture
+          and that we are actually capturing */
+
+       CX18_DEBUG_INFO("Stop Capture\n");
+
+       if (atomic_read(&cx->capturing) == 0)
+               return 0;
+
+       if (s->type == CX18_ENC_STREAM_TYPE_MPG)
+               cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 2, s->handle, !gop_end);
+       else
+               cx18_vapi(cx, CX18_CPU_CAPTURE_STOP, 1, s->handle);
+
+       then = jiffies;
+
+       if (s->type == CX18_ENC_STREAM_TYPE_MPG && gop_end) {
+               CX18_INFO("ignoring gop_end: not (yet?) supported by the firmware\n");
+       }
+
+       atomic_dec(&cx->capturing);
+
+       /* Clear capture and no-read bits */
+       clear_bit(CX18_F_S_STREAMING, &s->s_flags);
+
+       cx18_vapi(cx, CX18_DESTROY_TASK, 1, s->handle);
+       s->handle = 0xffffffff;
+
+       if (atomic_read(&cx->capturing) > 0)
+               return 0;
+
+       write_reg(5, CX18_DSP0_INTERRUPT_MASK);
+       wake_up(&s->waitq);
+
+       return 0;
+}
+
+u32 cx18_find_handle(struct cx18 *cx)
+{
+       int i;
+
+       /* find first available handle to be used for global settings */
+       for (i = 0; i < CX18_MAX_STREAMS; i++) {
+               struct cx18_stream *s = &cx->streams[i];
+
+               if (s->v4l2dev && s->handle)
+                       return s->handle;
+       }
+       return 0;
+}
diff --git a/drivers/media/video/cx18/cx18-streams.h b/drivers/media/video/cx18/cx18-streams.h
new file mode 100644 (file)
index 0000000..8c7ba7d
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ *  cx18 init/start/stop/exit stream functions
+ *
+ *  Derived from ivtv-streams.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+u32 cx18_find_handle(struct cx18 *cx);
+int cx18_streams_setup(struct cx18 *cx);
+int cx18_streams_register(struct cx18 *cx);
+void cx18_streams_cleanup(struct cx18 *cx);
+
+/* Capture related */
+int cx18_start_v4l2_encode_stream(struct cx18_stream *s);
+int cx18_stop_v4l2_encode_stream(struct cx18_stream *s, int gop_end);
+
+void cx18_stop_all_captures(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx18-vbi.c b/drivers/media/video/cx18/cx18-vbi.c
new file mode 100644 (file)
index 0000000..22e76ee
--- /dev/null
@@ -0,0 +1,208 @@
+/*
+ *  cx18 Vertical Blank Interval support functions
+ *
+ *  Derived from ivtv-vbi.c
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-vbi.h"
+#include "cx18-ioctl.h"
+#include "cx18-queue.h"
+#include "cx18-av-core.h"
+
+static void copy_vbi_data(struct cx18 *cx, int lines, u32 pts_stamp)
+{
+       int line = 0;
+       int i;
+       u32 linemask[2] = { 0, 0 };
+       unsigned short size;
+       static const u8 mpeg_hdr_data[] = {
+               0x00, 0x00, 0x01, 0xba, 0x44, 0x00, 0x0c, 0x66,
+               0x24, 0x01, 0x01, 0xd1, 0xd3, 0xfa, 0xff, 0xff,
+               0x00, 0x00, 0x01, 0xbd, 0x00, 0x1a, 0x84, 0x80,
+               0x07, 0x21, 0x00, 0x5d, 0x63, 0xa7, 0xff, 0xff
+       };
+       const int sd = sizeof(mpeg_hdr_data);   /* start of vbi data */
+       int idx = cx->vbi.frame % CX18_VBI_FRAMES;
+       u8 *dst = &cx->vbi.sliced_mpeg_data[idx][0];
+
+       for (i = 0; i < lines; i++) {
+               struct v4l2_sliced_vbi_data *sdata = cx->vbi.sliced_data + i;
+               int f, l;
+
+               if (sdata->id == 0)
+                       continue;
+
+               l = sdata->line - 6;
+               f = sdata->field;
+               if (f)
+                       l += 18;
+               if (l < 32)
+                       linemask[0] |= (1 << l);
+               else
+                       linemask[1] |= (1 << (l - 32));
+               dst[sd + 12 + line * 43] = cx18_service2vbi(sdata->id);
+               memcpy(dst + sd + 12 + line * 43 + 1, sdata->data, 42);
+               line++;
+       }
+       memcpy(dst, mpeg_hdr_data, sizeof(mpeg_hdr_data));
+       if (line == 36) {
+               /* All lines are used, so there is no space for the linemask
+                  (the max size of the VBI data is 36 * 43 + 4 bytes).
+                  So in this case we use the magic number 'ITV0'. */
+               memcpy(dst + sd, "ITV0", 4);
+               memcpy(dst + sd + 4, dst + sd + 12, line * 43);
+               size = 4 + ((43 * line + 3) & ~3);
+       } else {
+               memcpy(dst + sd, "cx0", 4);
+               memcpy(dst + sd + 4, &linemask[0], 8);
+               size = 12 + ((43 * line + 3) & ~3);
+       }
+       dst[4+16] = (size + 10) >> 8;
+       dst[5+16] = (size + 10) & 0xff;
+       dst[9+16] = 0x21 | ((pts_stamp >> 29) & 0x6);
+       dst[10+16] = (pts_stamp >> 22) & 0xff;
+       dst[11+16] = 1 | ((pts_stamp >> 14) & 0xff);
+       dst[12+16] = (pts_stamp >> 7) & 0xff;
+       dst[13+16] = 1 | ((pts_stamp & 0x7f) << 1);
+       cx->vbi.sliced_mpeg_size[idx] = sd + size;
+}
+
+/* Compress raw VBI format, removes leading SAV codes and surplus space
+   after the field.
+   Returns new compressed size. */
+static u32 compress_raw_buf(struct cx18 *cx, u8 *buf, u32 size)
+{
+       u32 line_size = cx->vbi.raw_decoder_line_size;
+       u32 lines = cx->vbi.count;
+       u8 sav1 = cx->vbi.raw_decoder_sav_odd_field;
+       u8 sav2 = cx->vbi.raw_decoder_sav_even_field;
+       u8 *q = buf;
+       u8 *p;
+       int i;
+
+       for (i = 0; i < lines; i++) {
+               p = buf + i * line_size;
+
+               /* Look for SAV code */
+               if (p[0] != 0xff || p[1] || p[2] ||
+                   (p[3] != sav1 && p[3] != sav2))
+                       break;
+               memcpy(q, p + 4, line_size - 4);
+               q += line_size - 4;
+       }
+       return lines * (line_size - 4);
+}
+
+
+/* Compressed VBI format, all found sliced blocks put next to one another
+   Returns new compressed size */
+static u32 compress_sliced_buf(struct cx18 *cx, u32 line, u8 *buf,
+                              u32 size, u8 sav)
+{
+       u32 line_size = cx->vbi.sliced_decoder_line_size;
+       struct v4l2_decode_vbi_line vbi;
+       int i;
+
+       /* find the first valid line */
+       for (i = 0; i < size; i++, buf++) {
+               if (buf[0] == 0xff && !buf[1] && !buf[2] && buf[3] == sav)
+                       break;
+       }
+
+       size -= i;
+       if (size < line_size)
+               return line;
+       for (i = 0; i < size / line_size; i++) {
+               u8 *p = buf + i * line_size;
+
+               /* Look for SAV code  */
+               if (p[0] != 0xff || p[1] || p[2] || p[3] != sav)
+                       continue;
+               vbi.p = p + 4;
+               cx18_av_cmd(cx, VIDIOC_INT_DECODE_VBI_LINE, &vbi);
+               if (vbi.type) {
+                       cx->vbi.sliced_data[line].id = vbi.type;
+                       cx->vbi.sliced_data[line].field = vbi.is_second_field;
+                       cx->vbi.sliced_data[line].line = vbi.line;
+                       memcpy(cx->vbi.sliced_data[line].data, vbi.p, 42);
+                       line++;
+               }
+       }
+       return line;
+}
+
+void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf,
+                          u64 pts_stamp, int streamtype)
+{
+       u8 *p = (u8 *) buf->buf;
+       u32 size = buf->bytesused;
+       int lines;
+
+       if (streamtype != CX18_ENC_STREAM_TYPE_VBI)
+               return;
+
+       /* Raw VBI data */
+       if (cx->vbi.sliced_in->service_set == 0) {
+               u8 type;
+
+               cx18_buf_swap(buf);
+
+               type = p[3];
+
+               size = buf->bytesused = compress_raw_buf(cx, p, size);
+
+               /* second field of the frame? */
+               if (type == cx->vbi.raw_decoder_sav_even_field) {
+                       /* Dirty hack needed for backwards
+                          compatibility of old VBI software. */
+                       p += size - 4;
+                       memcpy(p, &cx->vbi.frame, 4);
+                       cx->vbi.frame++;
+               }
+               return;
+       }
+
+       /* Sliced VBI data with data insertion */
+       cx18_buf_swap(buf);
+
+       /* first field */
+       lines = compress_sliced_buf(cx, 0, p, size / 2,
+                       cx->vbi.sliced_decoder_sav_odd_field);
+       /* second field */
+       /* experimentation shows that the second half does not always
+          begin at the exact address. So start a bit earlier
+          (hence 32). */
+       lines = compress_sliced_buf(cx, lines, p + size / 2 - 32,
+                       size / 2 + 32, cx->vbi.sliced_decoder_sav_even_field);
+       /* always return at least one empty line */
+       if (lines == 0) {
+               cx->vbi.sliced_data[0].id = 0;
+               cx->vbi.sliced_data[0].line = 0;
+               cx->vbi.sliced_data[0].field = 0;
+               lines = 1;
+       }
+       buf->bytesused = size = lines * sizeof(cx->vbi.sliced_data[0]);
+       memcpy(p, &cx->vbi.sliced_data[0], size);
+
+       if (cx->vbi.insert_mpeg)
+               copy_vbi_data(cx, lines, pts_stamp);
+       cx->vbi.frame++;
+}
diff --git a/drivers/media/video/cx18/cx18-vbi.h b/drivers/media/video/cx18/cx18-vbi.h
new file mode 100644 (file)
index 0000000..c56ff7d
--- /dev/null
@@ -0,0 +1,26 @@
+/*
+ *  cx18 Vertical Blank Interval support functions
+ *
+ *  Derived from ivtv-vbi.h
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+void cx18_process_vbi_data(struct cx18 *cx, struct cx18_buffer *buf,
+                          u64 pts_stamp, int streamtype);
+int cx18_used_line(struct cx18 *cx, int line, int field);
diff --git a/drivers/media/video/cx18/cx18-version.h b/drivers/media/video/cx18/cx18-version.h
new file mode 100644 (file)
index 0000000..d5c7a6f
--- /dev/null
@@ -0,0 +1,34 @@
+/*
+ *  cx18 driver version information
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#ifndef CX18_VERSION_H
+#define CX18_VERSION_H
+
+#define CX18_DRIVER_NAME "cx18"
+#define CX18_DRIVER_VERSION_MAJOR 1
+#define CX18_DRIVER_VERSION_MINOR 0
+#define CX18_DRIVER_VERSION_PATCHLEVEL 0
+
+#define CX18_VERSION __stringify(CX18_DRIVER_VERSION_MAJOR) "." __stringify(CX18_DRIVER_VERSION_MINOR) "." __stringify(CX18_DRIVER_VERSION_PATCHLEVEL)
+#define CX18_DRIVER_VERSION KERNEL_VERSION(CX18_DRIVER_VERSION_MAJOR, \
+       CX18_DRIVER_VERSION_MINOR, CX18_DRIVER_VERSION_PATCHLEVEL)
+
+#endif
diff --git a/drivers/media/video/cx18/cx18-video.c b/drivers/media/video/cx18/cx18-video.c
new file mode 100644 (file)
index 0000000..2e5c419
--- /dev/null
@@ -0,0 +1,45 @@
+/*
+ *  cx18 video interface functions
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#include "cx18-driver.h"
+#include "cx18-video.h"
+#include "cx18-av-core.h"
+#include "cx18-cards.h"
+
+void cx18_video_set_io(struct cx18 *cx)
+{
+       struct v4l2_routing route;
+       int inp = cx->active_input;
+       u32 type;
+
+       route.input = cx->card->video_inputs[inp].video_input;
+       route.output = 0;
+       cx18_av_cmd(cx, VIDIOC_INT_S_VIDEO_ROUTING, &route);
+
+       type = cx->card->video_inputs[inp].video_type;
+
+       if (type == CX18_CARD_INPUT_VID_TUNER)
+               route.input = 0;  /* Tuner */
+       else if (type < CX18_CARD_INPUT_COMPOSITE1)
+               route.input = 2;  /* S-Video */
+       else
+               route.input = 1;  /* Composite */
+}
diff --git a/drivers/media/video/cx18/cx18-video.h b/drivers/media/video/cx18/cx18-video.h
new file mode 100644 (file)
index 0000000..529006a
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ *  cx18 video interface functions
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+void cx18_video_set_io(struct cx18 *cx);
diff --git a/drivers/media/video/cx18/cx23418.h b/drivers/media/video/cx18/cx23418.h
new file mode 100644 (file)
index 0000000..33f78da
--- /dev/null
@@ -0,0 +1,458 @@
+/*
+ *  cx18 header containing common defines.
+ *
+ *  Copyright (C) 2007  Hans Verkuil <hverkuil@xs4all.nl>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ *  02111-1307  USA
+ */
+
+#ifndef CX23418_H
+#define CX23418_H
+
+#include <media/cx2341x.h>
+
+#define MGR_CMD_MASK                           0x40000000
+/* The MSB of the command code indicates that this is the completion of a
+   command */
+#define MGR_CMD_MASK_ACK                       (MGR_CMD_MASK | 0x80000000)
+
+/* Description: This command creates a new instance of a certain task
+   IN[0]  - Task ID. This is one of the XPU_CMD_MASK_YYY where XPU is
+           the processor on which the task YYY will be created
+   OUT[0] - Task handle. This handle is passed along with commands to
+           dispatch to the right instance of the task
+   ReturnCode - One of the ERR_SYS_... */
+#define CX18_CREATE_TASK                       (MGR_CMD_MASK | 0x0001)
+
+/* Description: This command destroys an instance of a task
+   IN[0] - Task handle. Hanlde of the task to destroy
+   ReturnCode - One of the ERR_SYS_... */
+#define CX18_DESTROY_TASK                      (MGR_CMD_MASK | 0x0002)
+
+/* All commands for CPU have the following mask set */
+#define CPU_CMD_MASK                           0x20000000
+#define CPU_CMD_MASK_ACK                       (CPU_CMD_MASK | 0x80000000)
+#define CPU_CMD_MASK_CAPTURE                   (CPU_CMD_MASK | 0x00020000)
+#define CPU_CMD_MASK_TS                        (CPU_CMD_MASK | 0x00040000)
+
+#define EPU_CMD_MASK                           0x02000000
+#define EPU_CMD_MASK_DEBUG                     (EPU_CMD_MASK | 0x000000)
+#define EPU_CMD_MASK_DE                        (EPU_CMD_MASK | 0x040000)
+
+/* Description: This command indicates that a Memory Descriptor List has been
+   filled with the requested channel type
+   IN[0] - Task handle. Handle of the task
+   IN[1] - Offset of the MDL_ACK from the beginning of the local DDR.
+   IN[2] - Number of CNXT_MDL_ACK structures in the array pointed to by IN[1]
+   ReturnCode - One of the ERR_DE_... */
+#define CX18_EPU_DMA_DONE                              (EPU_CMD_MASK_DE | 0x0001)
+
+/* Something interesting happened
+   IN[0] - A value to log
+   IN[1] - An offset of a string in the MiniMe memory;
+          0/zero/NULL means "I have nothing to say" */
+#define CX18_EPU_DEBUG                                 (EPU_CMD_MASK_DEBUG | 0x0003)
+
+/* Description: This command starts streaming with the set channel type
+   IN[0] - Task handle. Handle of the task to start
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_CAPTURE_START                 (CPU_CMD_MASK_CAPTURE | 0x0002)
+
+/* Description: This command stops streaming with the set channel type
+   IN[0] - Task handle. Handle of the task to stop
+   IN[1] - 0 = stop at end of GOP, 1 = stop at end of frame (MPEG only)
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_CAPTURE_STOP                  (CPU_CMD_MASK_CAPTURE | 0x0003)
+
+/* Description: This command pauses streaming with the set channel type
+   IN[0] - Task handle. Handle of the task to pause
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_CAPTURE_PAUSE                 (CPU_CMD_MASK_CAPTURE | 0x0007)
+
+/* Description: This command resumes streaming with the set channel type
+   IN[0] - Task handle. Handle of the task to resume
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_CAPTURE_RESUME                (CPU_CMD_MASK_CAPTURE | 0x0008)
+
+#define CAPTURE_CHANNEL_TYPE_NONE              0
+#define CAPTURE_CHANNEL_TYPE_MPEG              1
+#define CAPTURE_CHANNEL_TYPE_INDEX             2
+#define CAPTURE_CHANNEL_TYPE_YUV               3
+#define CAPTURE_CHANNEL_TYPE_PCM               4
+#define CAPTURE_CHANNEL_TYPE_VBI               5
+#define CAPTURE_CHANNEL_TYPE_SLICED_VBI                6
+#define CAPTURE_CHANNEL_TYPE_TS                        7
+#define CAPTURE_CHANNEL_TYPE_MAX               15
+
+/* Description: This command sets the channel type. This can only be done
+   when stopped.
+   IN[0] - Task handle. Handle of the task to start
+   IN[1] - Channel Type. See Below.
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_CHANNEL_TYPE                      (CPU_CMD_MASK_CAPTURE + 1)
+
+/* Description: Set stream output type
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - type
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_STREAM_OUTPUT_TYPE                (CPU_CMD_MASK_CAPTURE | 0x0012)
+
+/* Description: Set video input resolution and frame rate
+   IN[0] - task handle
+   IN[1] - reserved
+   IN[2] - reserved
+   IN[3] - reserved
+   IN[4] - reserved
+   IN[5] - frame rate, 0 - 29.97f/s, 1 - 25f/s
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_VIDEO_IN                  (CPU_CMD_MASK_CAPTURE | 0x0004)
+
+/* Description: Set video frame rate
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - video bit rate mode
+   IN[2] - video average rate
+   IN[3] - video peak rate
+   IN[4] - system mux rate
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_VIDEO_RATE                (CPU_CMD_MASK_CAPTURE | 0x0005)
+
+/* Description: Set video output resolution
+   IN[0] - task handle
+   IN[1] - horizontal size
+   IN[2] - vertical size
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_VIDEO_RESOLUTION          (CPU_CMD_MASK_CAPTURE | 0x0006)
+
+/* Description: This command set filter parameters
+   IN[0] - Task handle. Handle of the task
+   IN[1] - type, 0 - temporal, 1 - spatial, 2 - median
+   IN[2] - mode,  temporal/spatial: 0 - disable, 1 - static, 2 - dynamic
+                       median: 0 = disable, 1 = horizontal, 2 = vertical,
+                               3 = horizontal/vertical, 4 = diagonal
+   IN[3] - strength, temporal 0 - 31, spatial 0 - 15
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_FILTER_PARAM              (CPU_CMD_MASK_CAPTURE | 0x0009)
+
+/* Description: This command set spatial filter type
+   IN[0] - Task handle.
+   IN[1] - luma type: 0 = disable, 1 = 1D horizontal only, 2 = 1D vertical only,
+                     3 = 2D H/V separable, 4 = 2D symmetric non-separable
+   IN[2] - chroma type: 0 - diable, 1 = 1D horizontal
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_SPATIAL_FILTER_TYPE       (CPU_CMD_MASK_CAPTURE | 0x000C)
+
+/* Description: This command set coring levels for median filter
+   IN[0] - Task handle.
+   IN[1] - luma_high
+   IN[2] - luma_low
+   IN[3] - chroma_high
+   IN[4] - chroma_low
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_MEDIAN_CORING             (CPU_CMD_MASK_CAPTURE | 0x000E)
+
+/* Description: This command set the picture type mask for index file
+   IN[0] -     0 = disable index file output
+                       1 = output I picture
+                       2 = P picture
+                       4 = B picture
+                       other = illegal */
+#define CX18_CPU_SET_INDEXTABLE                (CPU_CMD_MASK_CAPTURE | 0x0010)
+
+/* Description: Set audio parameters
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - audio parameter
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_AUDIO_PARAMETERS          (CPU_CMD_MASK_CAPTURE | 0x0011)
+
+/* Description: Set video mute
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - bit31-24: muteYvalue
+          bit23-16: muteUvalue
+          bit15-8:  muteVvalue
+          bit0:     1:mute, 0: unmute
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_VIDEO_MUTE                        (CPU_CMD_MASK_CAPTURE | 0x0013)
+
+/* Description: Set audio mute
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - mute/unmute
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_AUDIO_MUTE                        (CPU_CMD_MASK_CAPTURE | 0x0014)
+
+/* Description: Set stream output type
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - subType
+           SET_INITIAL_SCR                     1
+           SET_QUALITY_MODE            2
+           SET_VIM_PROTECT_MODE        3
+           SET_PTS_CORRECTION          4
+           SET_USB_FLUSH_MODE          5
+           SET_MERAQPAR_ENABLE         6
+           SET_NAV_PACK_INSERTION      7
+           SET_SCENE_CHANGE_ENABLE     8
+   IN[2] - parameter 1
+   IN[3] - parameter 2
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_MISC_PARAMETERS           (CPU_CMD_MASK_CAPTURE | 0x0015)
+
+/* Description: Set raw VBI parameters
+   IN[0] - Task handle
+   IN[1] - No. of input lines per field:
+                               bit[15:0]: field 1,
+                               bit[31:16]: field 2
+   IN[2] - No. of input bytes per line
+   IN[3] - No. of output frames per transfer
+   IN[4] - start code
+   IN[5] - stop code
+   ReturnCode */
+#define CX18_CPU_SET_RAW_VBI_PARAM             (CPU_CMD_MASK_CAPTURE | 0x0016)
+
+/* Description: Set capture line No.
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - height1
+   IN[2] - height2
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_CAPTURE_LINE_NO           (CPU_CMD_MASK_CAPTURE | 0x0017)
+
+/* Description: Set copyright
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - copyright
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_COPYRIGHT                 (CPU_CMD_MASK_CAPTURE | 0x0018)
+
+/* Description: Set audio PID
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - PID
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_AUDIO_PID                 (CPU_CMD_MASK_CAPTURE | 0x0019)
+
+/* Description: Set video PID
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - PID
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_VIDEO_PID                 (CPU_CMD_MASK_CAPTURE | 0x001A)
+
+/* Description: Set Vertical Crop Line
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - Line
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_VER_CROP_LINE             (CPU_CMD_MASK_CAPTURE | 0x001B)
+
+/* Description: Set COP structure
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - M
+   IN[2] - N
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_GOP_STRUCTURE             (CPU_CMD_MASK_CAPTURE | 0x001C)
+
+/* Description: Set Scene Change Detection
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - scene change
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_SCENE_CHANGE_DETECTION    (CPU_CMD_MASK_CAPTURE | 0x001D)
+
+/* Description: Set Aspect Ratio
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - AspectRatio
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_ASPECT_RATIO              (CPU_CMD_MASK_CAPTURE | 0x001E)
+
+/* Description: Set Skip Input Frame
+   IN[0] - task handle. Handle of the task to start
+   IN[1] - skip input frames
+   ReturnCode - One of the ERR_CAPTURE_... */
+#define CX18_CPU_SET_SKIP_INPUT_FRAME          (CPU_CMD_MASK_CAPTURE | 0x001F)
+
+/* Description: Set sliced VBI parameters -
+   Note This API will only apply to MPEG and Sliced VBI Channels
+   IN[0] - Task handle
+   IN[1] - output type, 0 - CC, 1 - Moji, 2 - Teletext
+   IN[2] - start / stop line
+                       bit[15:0] start line number
+                       bit[31:16] stop line number
+   IN[3] - number of output frames per interrupt
+   IN[4] - VBI insertion mode
+                       bit 0:  output user data, 1 - enable
+                       bit 1:  output private stream, 1 - enable
+                       bit 2:  mux option, 0 - in GOP, 1 - in picture
+                       bit[7:0]        private stream ID
+   IN[5] - insertion period while mux option is in picture
+   ReturnCode - VBI data offset */
+#define CX18_CPU_SET_SLICED_VBI_PARAM          (CPU_CMD_MASK_CAPTURE | 0x0020)
+
+/* Description: Set the user data place holder
+   IN[0] - type of data (0 for user)
+   IN[1] - Stuffing period
+   IN[2] - ID data size in word (less than 10)
+   IN[3] - Pointer to ID buffer */
+#define CX18_CPU_SET_USERDATA_PLACE_HOLDER     (CPU_CMD_MASK_CAPTURE | 0x0021)
+
+
+/* Description:
+   In[0] Task Handle
+   return parameter:
+   Out[0]  Reserved
+   Out[1]  Video PTS bit[32:2] of last output video frame.
+   Out[2]  Video PTS bit[ 1:0] of last output video frame.
+   Out[3]  Hardware Video PTS counter bit[31:0],
+            these bits get incremented on every 90kHz clock tick.
+   Out[4]  Hardware Video PTS counter bit32,
+            these bits get incremented on every 90kHz clock tick.
+   ReturnCode */
+#define CX18_CPU_GET_ENC_PTS                   (CPU_CMD_MASK_CAPTURE | 0x0022)
+
+/* Below is the list of commands related to the data exchange */
+#define CPU_CMD_MASK_DE                        (CPU_CMD_MASK | 0x040000)
+
+/* Description: This command provides the physical base address of the local
+   DDR as viewed by EPU
+   IN[0] - Physical offset where EPU has the local DDR mapped
+   ReturnCode - One of the ERR_DE_... */
+#define CPU_CMD_DE_SetBase                     (CPU_CMD_MASK_DE | 0x0001)
+
+/* Description: This command provides the offsets in the device memory where
+   the 2 cx18_mdl_ack blocks reside
+   IN[0] - Task handle. Handle of the task to start
+   IN[1] - Offset of the first cx18_mdl_ack from the beginning of the
+          local DDR.
+   IN[2] - Offset of the second cx18_mdl_ack from the beginning of the
+          local DDR.
+   ReturnCode - One of the ERR_DE_... */
+#define CX18_CPU_DE_SET_MDL_ACK                        (CPU_CMD_MASK_DE | 0x0002)
+
+/* Description: This command provides the offset to a Memory Descriptor List
+   IN[0] - Task handle. Handle of the task to start
+   IN[1] - Offset of the MDL from the beginning of the local DDR.
+   IN[2] - Number of cx18_mdl structures in the array pointed to by IN[1]
+   IN[3] - Buffer ID
+   IN[4] - Total buffer length
+   ReturnCode - One of the ERR_DE_... */
+#define CX18_CPU_DE_SET_MDL                    (CPU_CMD_MASK_DE | 0x0005)
+
+/* Description: This command requests return of all current Memory
+   Descriptor Lists to the driver
+   IN[0] - Task handle. Handle of the task to start
+   ReturnCode - One of the ERR_DE_... */
+/* #define CX18_CPU_DE_ReleaseMDL               (CPU_CMD_MASK_DE | 0x0006) */
+
+/* Description: This command signals the cpu that the dat buffer has been
+   consumed and ready for re-use.
+   IN[0] - Task handle. Handle of the task
+   IN[1] - Offset of the data block from the beginning of the local DDR.
+   IN[2] - Number of bytes in the data block
+   ReturnCode - One of the ERR_DE_... */
+/* #define CX18_CPU_DE_RELEASE_BUFFER           (CPU_CMD_MASK_DE | 0x0007) */
+
+/* No Error / Success */
+#define CNXT_OK                 0x000000
+
+/* Received unknown command */
+#define CXERR_UNK_CMD           0x000001
+
+/* First parameter in the command is invalid */
+#define CXERR_INVALID_PARAM1    0x000002
+
+/* Second parameter in the command is invalid */
+#define CXERR_INVALID_PARAM2    0x000003
+
+/* Device interface is not open/found */
+#define CXERR_DEV_NOT_FOUND     0x000004
+
+/* Requested function is not implemented/available */
+#define CXERR_NOTSUPPORTED      0x000005
+
+/* Invalid pointer is provided */
+#define CXERR_BADPTR            0x000006
+
+/* Unable to allocate memory */
+#define CXERR_NOMEM             0x000007
+
+/* Object/Link not found */
+#define CXERR_LINK              0x000008
+
+/* Device busy, command cannot be executed */
+#define CXERR_BUSY              0x000009
+
+/* File/device/handle is not open. */
+#define CXERR_NOT_OPEN          0x00000A
+
+/* Value is out of range */
+#define CXERR_OUTOFRANGE        0x00000B
+
+/* Buffer overflow */
+#define CXERR_OVERFLOW          0x00000C
+
+/* Version mismatch */
+#define CXERR_BADVER            0x00000D
+
+/* Operation timed out */
+#define CXERR_TIMEOUT           0x00000E
+
+/* Operation aborted */
+#define CXERR_ABORT             0x00000F
+
+/* Specified I2C device not found for read/write */
+#define CXERR_I2CDEV_NOTFOUND   0x000010
+
+/* Error in I2C data xfer (but I2C device is present) */
+#define CXERR_I2CDEV_XFERERR    0x000011
+
+/* Chanel changing component not ready */
+#define CXERR_CHANNELNOTREADY   0x000012
+
+/* PPU (Presensation/Decoder) mail box is corrupted */
+#define CXERR_PPU_MB_CORRUPT    0x000013
+
+/* CPU (Capture/Encoder) mail box is corrupted */
+#define CXERR_CPU_MB_CORRUPT    0x000014
+
+/* APU (Audio) mail box is corrupted */
+#define CXERR_APU_MB_CORRUPT    0x000015
+
+/* Unable to open file for reading */
+#define CXERR_FILE_OPEN_READ    0x000016
+
+/* Unable to open file for writing */
+#define CXERR_FILE_OPEN_WRITE   0x000017
+
+/* Unable to find the I2C section specified */
+#define CXERR_I2C_BADSECTION    0x000018
+
+/* Error in I2C data xfer (but I2C device is present) */
+#define CXERR_I2CDEV_DATALOW    0x000019
+
+/* Error in I2C data xfer (but I2C device is present) */
+#define CXERR_I2CDEV_CLOCKLOW   0x00001A
+
+/* No Interrupt received from HW (for I2C access) */
+#define CXERR_NO_HW_I2C_INTR    0x00001B
+
+/* RPU is not ready to accept commands! */
+#define CXERR_RPU_NOT_READY     0x00001C
+
+/* RPU is not ready to accept commands! */
+#define CXERR_RPU_NO_ACK        0x00001D
+
+/* The are no buffers ready. Try again soon! */
+#define CXERR_NODATA_AGAIN      0x00001E
+
+/* The stream is stopping. Function not alllowed now! */
+#define CXERR_STOPPING_STATUS   0x00001F
+
+/* Trying to access hardware when the power is turned OFF */
+#define CXERR_DEVPOWER_OFF      0x000020
+
+#endif /* CX23418_H */
index ca5fbce3a9095bc07dca912ce74e19387f5a4f02..cadf936c367351fb249bef07ac1a8c42a547c987 100644 (file)
@@ -4,19 +4,19 @@ config VIDEO_CX23885
        select I2C_ALGOBIT
        select FW_LOADER
        select VIDEO_BTCX
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_TVEEPROM
        select VIDEO_IR
        select VIDEOBUF_DVB
        select VIDEO_CX25840
-       select DVB_TUNER_MT2131 if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_MT2131 if !DVB_FE_CUSTOMISE
        select DVB_S5H1409 if !DVB_FE_CUSTOMISE
        select DVB_LGDT330X if !DVB_FE_CUSTOMISE
        select DVB_PLL if !DVB_FE_CUSTOMISE
-       select TUNER_XC2028 if !DVB_FE_CUSTOMIZE
-       select TUNER_TDA8290 if !DVB_FE_CUSTOMIZE
-       select DVB_TDA18271 if !DVB_FE_CUSTOMIZE
-       select DVB_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
+       select MEDIA_TUNER_XC2028 if !DVB_FE_CUSTOMIZE
+       select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE
+       select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE
+       select MEDIA_TUNER_XC5000 if !DVB_FE_CUSTOMIZE
        select DVB_TDA10048 if !DVB_FE_CUSTOMIZE
        ---help---
          This is a video4linux driver for Conexant 23885 based
index d7b0721af062611215e6b516ec5c86e970911414..29c23b44c13c95bd498c705c024024297f885914 100644 (file)
@@ -3,6 +3,7 @@ cx23885-objs    := cx23885-cards.o cx23885-video.o cx23885-vbi.o cx23885-core.o cx2
 obj-$(CONFIG_VIDEO_CX23885) += cx23885.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
 
index 7fde678b2c4a5fbb0785af63a77cfbee86cb22d2..88823810497ca3c002fa53e25801551f911c30a4 100644 (file)
@@ -1209,7 +1209,8 @@ static int cx25840_command(struct i2c_client *client, unsigned int cmd,
 
 /* ----------------------------------------------------------------------- */
 
-static int cx25840_probe(struct i2c_client *client)
+static int cx25840_probe(struct i2c_client *client,
+                        const struct i2c_device_id *did)
 {
        struct cx25840_state *state;
        u32 id;
index 27635cdcbaf22d5457f419ff7e9b621dd77826ef..b0d7d6a7a4cc359dc725ee27b1ed3c5d87264d4e 100644 (file)
@@ -5,7 +5,7 @@ config VIDEO_CX88
        select FW_LOADER
        select VIDEO_BTCX
        select VIDEOBUF_DMA_SG
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_TVEEPROM
        select VIDEO_IR
        select VIDEO_WM8775 if VIDEO_HELPER_CHIPS_AUTO
@@ -57,7 +57,7 @@ config VIDEO_CX88_DVB
        select DVB_NXT200X if !DVB_FE_CUSTOMISE
        select DVB_CX24123 if !DVB_FE_CUSTOMISE
        select DVB_ISL6421 if !DVB_FE_CUSTOMISE
-       select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
        select DVB_S5H1411 if !DVB_FE_CUSTOMISE
        ---help---
          This adds support for DVB/ATSC cards based on the
index 532cee35eb3c4f49962cc4b627792f18798a113a..6ec30f242578984a4563c7ce5cc789c57e266502 100644 (file)
@@ -10,5 +10,6 @@ obj-$(CONFIG_VIDEO_CX88_DVB) += cx88-dvb.o
 obj-$(CONFIG_VIDEO_CX88_VP3054) += cx88-vp3054-i2c.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
index 2b6b283cda15b658c08d1841328350c80aa069f2..aeba26dc0a37ce69a8da26cd7fff176b139da54a 100644 (file)
@@ -57,6 +57,9 @@ MODULE_PARM_DESC(latency,"pci latency timer");
 /* ------------------------------------------------------------------ */
 /* board config info                                                  */
 
+/* If radio_type !=UNSET, radio_addr should be specified
+ */
+
 static const struct cx88_board cx88_boards[] = {
        [CX88_BOARD_UNKNOWN] = {
                .name           = "UNKNOWN/GENERIC",
@@ -2446,25 +2449,31 @@ EXPORT_SYMBOL_GPL(cx88_setup_xc3028);
 static void cx88_card_setup(struct cx88_core *core)
 {
        static u8 eeprom[256];
+       struct tuner_setup tun_setup;
+       unsigned int mode_mask = T_RADIO     |
+                                T_ANALOG_TV |
+                                T_DIGITAL_TV;
+
+       memset(&tun_setup, 0, sizeof(tun_setup));
 
        if (0 == core->i2c_rc) {
                core->i2c_client.addr = 0xa0 >> 1;
-               tveeprom_read(&core->i2c_client,eeprom,sizeof(eeprom));
+               tveeprom_read(&core->i2c_client, eeprom, sizeof(eeprom));
        }
 
        switch (core->boardnr) {
        case CX88_BOARD_HAUPPAUGE:
        case CX88_BOARD_HAUPPAUGE_ROSLYN:
                if (0 == core->i2c_rc)
-                       hauppauge_eeprom(core,eeprom+8);
+                       hauppauge_eeprom(core, eeprom+8);
                break;
        case CX88_BOARD_GDI:
                if (0 == core->i2c_rc)
-                       gdi_eeprom(core,eeprom);
+                       gdi_eeprom(core, eeprom);
                break;
        case CX88_BOARD_WINFAST2000XP_EXPERT:
                if (0 == core->i2c_rc)
-                       leadtek_eeprom(core,eeprom);
+                       leadtek_eeprom(core, eeprom);
                break;
        case CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1:
        case CX88_BOARD_HAUPPAUGE_NOVASE2_S1:
@@ -2474,7 +2483,7 @@ static void cx88_card_setup(struct cx88_core *core)
        case CX88_BOARD_HAUPPAUGE_HVR3000:
        case CX88_BOARD_HAUPPAUGE_HVR1300:
                if (0 == core->i2c_rc)
-                       hauppauge_eeprom(core,eeprom);
+                       hauppauge_eeprom(core, eeprom);
                break;
        case CX88_BOARD_KWORLD_DVBS_100:
                cx_write(MO_GP0_IO, 0x000007f8);
@@ -2555,6 +2564,35 @@ static void cx88_card_setup(struct cx88_core *core)
 
                cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &tea5767_cfg);
        }
+       } /*end switch() */
+
+
+       /* Setup tuners */
+       if ((core->board.radio_type != UNSET)) {
+               tun_setup.mode_mask      = T_RADIO;
+               tun_setup.type           = core->board.radio_type;
+               tun_setup.addr           = core->board.radio_addr;
+               tun_setup.tuner_callback = cx88_tuner_callback;
+               cx88_call_i2c_clients(core, TUNER_SET_TYPE_ADDR, &tun_setup);
+               mode_mask &= ~T_RADIO;
+       }
+
+       if (core->board.tuner_type != TUNER_ABSENT) {
+               tun_setup.mode_mask      = mode_mask;
+               tun_setup.type           = core->board.tuner_type;
+               tun_setup.addr           = core->board.tuner_addr;
+               tun_setup.tuner_callback = cx88_tuner_callback;
+
+               cx88_call_i2c_clients(core, TUNER_SET_TYPE_ADDR, &tun_setup);
+       }
+
+       if (core->board.tda9887_conf) {
+               struct v4l2_priv_tun_config tda9887_cfg;
+
+               tda9887_cfg.tuner = TUNER_TDA9887;
+               tda9887_cfg.priv  = &core->board.tda9887_conf;
+
+               cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &tda9887_cfg);
        }
 
        if (core->board.tuner_type == TUNER_XC2028) {
@@ -2572,6 +2610,7 @@ static void cx88_card_setup(struct cx88_core *core)
                            ctl.fname);
                cx88_call_i2c_clients(core, TUNER_SET_CONFIG, &xc2028_cfg);
        }
+       cx88_call_i2c_clients (core, TUNER_SET_STANDBY, NULL);
 }
 
 /* ------------------------------------------------------------------ */
@@ -2710,7 +2749,6 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
        if (TUNER_ABSENT != core->board.tuner_type)
                request_module("tuner");
 
-       cx88_call_i2c_clients (core, TUNER_SET_STANDBY, NULL);
        cx88_card_setup(core);
        cx88_ir_init(core, pci);
 
index c6b44732a082fd743b6fbbc3ac39f4ab511e514f..cb6a096069c7b0fc7a0fcc093dc225d018948ba5 100644 (file)
@@ -99,42 +99,11 @@ static int cx8800_bit_getsda(void *data)
 
 static int attach_inform(struct i2c_client *client)
 {
-       struct tuner_setup tun_setup;
        struct cx88_core *core = i2c_get_adapdata(client->adapter);
 
        dprintk(1, "%s i2c attach [addr=0x%x,client=%s]\n",
                client->driver->driver.name, client->addr, client->name);
-       if (!client->driver->command)
-               return 0;
-
-       if (core->board.radio_type != UNSET) {
-               if ((core->board.radio_addr==ADDR_UNSET)||(core->board.radio_addr==client->addr)) {
-                       tun_setup.mode_mask      = T_RADIO;
-                       tun_setup.type           = core->board.radio_type;
-                       tun_setup.addr           = core->board.radio_addr;
-                       tun_setup.tuner_callback = cx88_tuner_callback;
-                       client->driver->command (client, TUNER_SET_TYPE_ADDR, &tun_setup);
-               }
-       }
-       if (core->board.tuner_type != UNSET) {
-               if ((core->board.tuner_addr==ADDR_UNSET)||(core->board.tuner_addr==client->addr)) {
-
-                       tun_setup.mode_mask      = T_ANALOG_TV;
-                       tun_setup.type           = core->board.tuner_type;
-                       tun_setup.addr           = core->board.tuner_addr;
-                       tun_setup.tuner_callback = cx88_tuner_callback;
-                       client->driver->command (client,TUNER_SET_TYPE_ADDR, &tun_setup);
-               }
-       }
-
-       if (core->board.tda9887_conf) {
-               struct v4l2_priv_tun_config tda9887_cfg;
 
-               tda9887_cfg.tuner = TUNER_TDA9887;
-               tda9887_cfg.priv  = &core->board.tda9887_conf;
-
-               client->driver->command(client, TUNER_SET_CONFIG, &tda9887_cfg);
-       }
        return 0;
 }
 
index 9caffed2b6b8302637cde0ad128c791cc5ce364e..c7c2896bbd8b312823d0abd9898c3ed8ee8e455f 100644 (file)
@@ -1,7 +1,7 @@
 config VIDEO_EM28XX
        tristate "Empia EM28xx USB video capture support"
        depends on VIDEO_DEV && I2C && INPUT
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_TVEEPROM
        select VIDEO_IR
        select VIDEOBUF_VMALLOC
index 3d1c3cc337fe65c7407d728afab04990bdcd4e98..8137a8c94bfc365a1d63ffd677cf8cd1ed236d60 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_EM28XX_ALSA) += em28xx-alsa.o
 obj-$(CONFIG_VIDEO_EM28XX_DVB) += em28xx-dvb.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
 
index b6171702c4d051824c5215f5b93b9ca8f339cbc2..eec115bf9517a54342ea749323f14208020cd05f 100644 (file)
@@ -4,7 +4,7 @@ config VIDEO_IVTV
        select I2C_ALGOBIT
        select FW_LOADER
        select VIDEO_IR
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_TVEEPROM
        select VIDEO_CX2341X
        select VIDEO_CX25840
index a0389014fa88dc8dc156263e140232794b156e45..26ce0d6eaee1a3c15a918783130b26eb713827da 100644 (file)
@@ -8,6 +8,7 @@ obj-$(CONFIG_VIDEO_IVTV) += ivtv.o
 obj-$(CONFIG_VIDEO_FB_IVTV) += ivtvfb.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
 
index e908649ea37cc738fc2611fb4cb14cbfe3a90ea0..4fb8faefe2ced9f54d6c08bca9d8cdff42e10000 100644 (file)
@@ -40,6 +40,8 @@
 #define MSP_MONO   MSP_INPUT(MSP_IN_MONO, MSP_IN_TUNER1, \
                                MSP_DSP_IN_SCART, MSP_DSP_IN_SCART)
 
+#define V4L2_STD_NOT_MN (V4L2_STD_PAL|V4L2_STD_SECAM)
+
 /* usual i2c tuner addresses to probe */
 static struct ivtv_card_tuner_i2c ivtv_i2c_std = {
        .radio = { I2C_CLIENT_END },
@@ -298,7 +300,7 @@ static const struct ivtv_card ivtv_card_mpg600 = {
        .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
        .tuners = {
                /* The PAL tuner is confirmed */
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FQ1216ME },
                { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
        },
        .pci_list = ivtv_pci_mpg600,
@@ -339,7 +341,7 @@ static const struct ivtv_card ivtv_card_mpg160 = {
                              .lang1 = 0x0004, .lang2  = 0x0000, .both   = 0x0008 },
        .gpio_audio_detect = { .mask = 0x0900, .stereo = 0x0100 },
        .tuners = {
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FQ1216ME },
                { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
        },
        .pci_list = ivtv_pci_mpg160,
@@ -375,7 +377,7 @@ static const struct ivtv_card ivtv_card_pg600 = {
                { IVTV_CARD_INPUT_LINE_IN1,   CX25840_AUDIO_SERIAL },
        },
        .tuners = {
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FQ1216ME },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FQ1216ME },
                { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FQ1286 },
        },
        .pci_list = ivtv_pci_pg600,
@@ -416,7 +418,7 @@ static const struct ivtv_card ivtv_card_avc2410 = {
           on the country/region setting of the user to decide which tuner
           is available. */
        .tuners = {
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
                { .std = V4L2_STD_ALL - V4L2_STD_NTSC_M_JP,
                        .tuner = TUNER_PHILIPS_FM1236_MK3 },
                { .std = V4L2_STD_NTSC_M_JP, .tuner = TUNER_PHILIPS_FQ1286 },
@@ -490,7 +492,7 @@ static const struct ivtv_card ivtv_card_tg5000tv = {
        .gpio_video_input  = { .mask = 0x0030, .tuner  = 0x0000,
                          .composite = 0x0010, .svideo = 0x0020 },
        .tuners = {
-               { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 },
+               { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
        },
        .pci_list = ivtv_pci_tg5000tv,
        .i2c = &ivtv_i2c_std,
@@ -521,7 +523,7 @@ static const struct ivtv_card ivtv_card_va2000 = {
                { IVTV_CARD_INPUT_AUD_TUNER, MSP_TUNER },
        },
        .tuners = {
-               { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 },
+               { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
        },
        .pci_list = ivtv_pci_va2000,
        .i2c = &ivtv_i2c_std,
@@ -565,7 +567,7 @@ static const struct ivtv_card ivtv_card_cx23416gyc = {
        .gpio_audio_freq   = { .mask = 0xc000, .f32000 = 0x0000,
                             .f44100 = 0x4000, .f48000 = 0x8000 },
        .tuners = {
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
                { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
        },
        .pci_list = ivtv_pci_cx23416gyc,
@@ -597,7 +599,7 @@ static const struct ivtv_card ivtv_card_cx23416gyc_nogr = {
        .gpio_audio_freq   = { .mask = 0xc000, .f32000 = 0x0000,
                             .f44100 = 0x4000, .f48000 = 0x8000 },
        .tuners = {
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
                { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
        },
        .i2c = &ivtv_i2c_std,
@@ -627,7 +629,7 @@ static const struct ivtv_card ivtv_card_cx23416gyc_nogrycs = {
        .gpio_audio_freq   = { .mask = 0xc000, .f32000 = 0x0000,
                             .f44100 = 0x4000, .f48000 = 0x8000 },
        .tuners = {
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
                { .std = V4L2_STD_ALL, .tuner = TUNER_PHILIPS_FM1236_MK3 },
        },
        .i2c = &ivtv_i2c_std,
@@ -667,7 +669,7 @@ static const struct ivtv_card ivtv_card_gv_mvprx = {
        .gpio_audio_input  = { .mask = 0xffff, .tuner  = 0x0200, .linein = 0x0300 },
        .tuners = {
                /* This card has the Panasonic VP27 tuner */
-               { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 },
+               { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PANASONIC_VP27 },
        },
        .pci_list = ivtv_pci_gv_mvprx,
        .i2c = &ivtv_i2c_std,
@@ -704,7 +706,7 @@ static const struct ivtv_card ivtv_card_gv_mvprx2e = {
        .gpio_audio_input  = { .mask = 0xffff, .tuner  = 0x0200, .linein = 0x0300 },
        .tuners = {
                /* This card has the Panasonic VP27 tuner */
-               { .std = V4L2_STD_525_60, .tuner = TUNER_PANASONIC_VP27 },
+               { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PANASONIC_VP27 },
        },
        .pci_list = ivtv_pci_gv_mvprx2e,
        .i2c = &ivtv_i2c_std,
@@ -739,7 +741,7 @@ static const struct ivtv_card ivtv_card_gotview_pci_dvd = {
        .gpio_init = { .direction = 0xf000, .initial_value = 0xA000 },
        .tuners = {
                /* This card has a Philips FQ1216ME MK3 tuner */
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
        },
        .pci_list = ivtv_pci_gotview_pci_dvd,
        .i2c = &ivtv_i2c_std,
@@ -778,7 +780,7 @@ static const struct ivtv_card ivtv_card_gotview_pci_dvd2 = {
        .gpio_audio_input  = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 },
        .tuners = {
                /* This card has a Philips FQ1216ME MK5 tuner */
-               { .std = V4L2_STD_625_50, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
+               { .std = V4L2_STD_NOT_MN, .tuner = TUNER_PHILIPS_FM1216ME_MK3 },
        },
        .pci_list = ivtv_pci_gotview_pci_dvd2,
        .i2c = &ivtv_i2c_std,
@@ -856,7 +858,7 @@ static const struct ivtv_card ivtv_card_dctmvtvp1 = {
        .gpio_video_input  = { .mask = 0x0030, .tuner  = 0x0000,
                               .composite = 0x0010, .svideo = 0x0020},
        .tuners = {
-               { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FQ1286 },
+               { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FQ1286 },
        },
        .pci_list = ivtv_pci_dctmvtvp1,
        .i2c = &ivtv_i2c_std,
@@ -875,6 +877,7 @@ static const struct ivtv_card_pci_info ivtv_pci_pg600v2[] = {
 static const struct ivtv_card ivtv_card_pg600v2 = {
        .type = IVTV_CARD_PG600V2,
        .name = "Yuan PG600-2, GotView PCI DVD Lite",
+       .comment = "only Composite and S-Video inputs are supported, not the tuner\n",
        .v4l2_capabilities = IVTV_CAP_ENCODER,
        .hw_video = IVTV_HW_CX25840,
        .hw_audio = IVTV_HW_CX25840,
@@ -921,6 +924,7 @@ static const struct ivtv_card ivtv_card_club3d = {
        },
        .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO5 },
        .gpio_init = { .direction = 0x1000, .initial_value = 0x1000 }, /* tuner reset */
+       .xceive_pin = 12,
        .tuners = {
                { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
        },
@@ -944,15 +948,22 @@ static const struct ivtv_card ivtv_card_avertv_mce116 = {
        .hw_video = IVTV_HW_CX25840,
        .hw_audio = IVTV_HW_CX25840,
        .hw_audio_ctrl = IVTV_HW_CX25840,
-       .hw_all = IVTV_HW_CX25840 | IVTV_HW_WM8739,
+       .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER | IVTV_HW_WM8739,
        .video_inputs = {
-               { IVTV_CARD_INPUT_SVIDEO1,    0, CX25840_SVIDEO3    },
-               { IVTV_CARD_INPUT_COMPOSITE1, 0, CX25840_COMPOSITE1 },
+               { IVTV_CARD_INPUT_VID_TUNER,  0, CX25840_COMPOSITE2 },
+               { IVTV_CARD_INPUT_SVIDEO1,    1, CX25840_SVIDEO3    },
+               { IVTV_CARD_INPUT_COMPOSITE1, 1, CX25840_COMPOSITE1 },
        },
        .audio_inputs = {
+               { IVTV_CARD_INPUT_AUD_TUNER,  CX25840_AUDIO5       },
                { IVTV_CARD_INPUT_LINE_IN1,   CX25840_AUDIO_SERIAL, 1 },
        },
-       .gpio_init = { .direction = 0xe000, .initial_value = 0x4000 }, /* enable line-in */
+       /* enable line-in */
+       .gpio_init = { .direction = 0xe400, .initial_value = 0x4400 },
+       .xceive_pin = 10,
+       .tuners = {
+               { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
+       },
        .pci_list = ivtv_pci_avertv_mce116,
        .i2c = &ivtv_i2c_std,
 };
@@ -990,7 +1001,7 @@ static const struct ivtv_card ivtv_card_aver_pvr150 = {
        .gpio_audio_input  = { .mask = 0x0800, .tuner = 0, .linein = 0, .radio = 0x0800 },
        .tuners = {
                /* This card has a Partsnic PTI-5NF05 tuner */
-               { .std = V4L2_STD_525_60, .tuner = TUNER_TCL_2002N },
+               { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_TCL_2002N },
        },
        .pci_list = ivtv_pci_aver_pvr150,
        .i2c = &ivtv_i2c_radio,
@@ -1058,12 +1069,48 @@ static const struct ivtv_card ivtv_card_asus_falcon2 = {
        },
        .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, M52790_IN_TUNER },
        .tuners = {
-               { .std = V4L2_STD_525_60, .tuner = TUNER_PHILIPS_FM1236_MK3 },
+               { .std = V4L2_STD_525_60|V4L2_STD_MN, .tuner = TUNER_PHILIPS_FM1236_MK3 },
        },
        .pci_list = ivtv_pci_asus_falcon2,
        .i2c = &ivtv_i2c_std,
 };
 
+/* ------------------------------------------------------------------------- */
+
+/* AVerMedia M104 miniPCI card */
+
+static const struct ivtv_card_pci_info ivtv_pci_aver_m104[] = {
+       { PCI_DEVICE_ID_IVTV16, IVTV_PCI_ID_AVERMEDIA, 0xc136 },
+       { 0, 0, 0 }
+};
+
+static const struct ivtv_card ivtv_card_aver_m104 = {
+       .type = IVTV_CARD_AVER_M104,
+       .name = "AVerMedia M104",
+       .comment = "Not yet supported!\n",
+       .v4l2_capabilities = 0, /*IVTV_CAP_ENCODER,*/
+       .hw_video = IVTV_HW_CX25840,
+       .hw_audio = IVTV_HW_CX25840,
+       .hw_audio_ctrl = IVTV_HW_CX25840,
+       .hw_all = IVTV_HW_CX25840 | IVTV_HW_TUNER | IVTV_HW_WM8739,
+       .video_inputs = {
+               { IVTV_CARD_INPUT_SVIDEO1,    0, CX25840_SVIDEO3    },
+               { IVTV_CARD_INPUT_COMPOSITE1, 0, CX25840_COMPOSITE1 },
+       },
+       .audio_inputs = {
+               { IVTV_CARD_INPUT_LINE_IN1,   CX25840_AUDIO_SERIAL, 1 },
+       },
+       .radio_input = { IVTV_CARD_INPUT_AUD_TUNER, CX25840_AUDIO_SERIAL, 2 },
+       /* enable line-in + reset tuner */
+       .gpio_init = { .direction = 0xe400, .initial_value = 0x4000 },
+       .xceive_pin = 10,
+       .tuners = {
+               { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 },
+       },
+       .pci_list = ivtv_pci_aver_m104,
+       .i2c = &ivtv_i2c_std,
+};
+
 static const struct ivtv_card *ivtv_card_list[] = {
        &ivtv_card_pvr250,
        &ivtv_card_pvr350,
@@ -1089,6 +1136,7 @@ static const struct ivtv_card *ivtv_card_list[] = {
        &ivtv_card_asus_falcon2,
        &ivtv_card_aver_pvr150,
        &ivtv_card_aver_ezmaker,
+       &ivtv_card_aver_m104,
 
        /* Variations of standard cards but with the same PCI IDs.
           These cards must come last in this list. */
@@ -1120,7 +1168,8 @@ int ivtv_get_input(struct ivtv *itv, u16 index, struct v4l2_input *input)
        if (index >= itv->nof_inputs)
                return -EINVAL;
        input->index = index;
-       strcpy(input->name, input_strs[card_input->video_type - 1]);
+       strlcpy(input->name, input_strs[card_input->video_type - 1],
+                       sizeof(input->name));
        input->type = (card_input->video_type == IVTV_CARD_INPUT_VID_TUNER ?
                        V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA);
        input->audioset = (1 << itv->nof_audio_inputs) - 1;
@@ -1137,7 +1186,7 @@ int ivtv_get_output(struct ivtv *itv, u16 index, struct v4l2_output *output)
        if (index >= itv->card->nof_outputs)
                return -EINVAL;
        output->index = index;
-       strcpy(output->name, card_output->name);
+       strlcpy(output->name, card_output->name, sizeof(output->name));
        output->type = V4L2_OUTPUT_TYPE_ANALOG;
        output->audioset = 1;
        output->std = V4L2_STD_ALL;
@@ -1156,7 +1205,8 @@ int ivtv_get_audio_input(struct ivtv *itv, u16 index, struct v4l2_audio *audio)
        memset(audio, 0, sizeof(*audio));
        if (index >= itv->nof_audio_inputs)
                return -EINVAL;
-       strcpy(audio->name, input_strs[aud_input->audio_type - 1]);
+       strlcpy(audio->name, input_strs[aud_input->audio_type - 1],
+                       sizeof(audio->name));
        audio->index = index;
        audio->capability = V4L2_AUDCAP_STEREO;
        return 0;
@@ -1167,6 +1217,6 @@ int ivtv_get_audio_output(struct ivtv *itv, u16 index, struct v4l2_audioout *aud
        memset(aud_output, 0, sizeof(*aud_output));
        if (itv->card->video_outputs == NULL || index != 0)
                return -EINVAL;
-       strcpy(aud_output->name, "A/V Audio Out");
+       strlcpy(aud_output->name, "A/V Audio Out", sizeof(aud_output->name));
        return 0;
 }
index 9186fa2ee5fc01bff46a451df77569cbe593eba6..748485dcebbdab220009e3ac05c307b9adbf2d71 100644 (file)
@@ -48,7 +48,8 @@
 #define IVTV_CARD_ASUS_FALCON2      21 /* ASUS Falcon2 */
 #define IVTV_CARD_AVER_PVR150PLUS    22 /* AVerMedia PVR-150 Plus */
 #define IVTV_CARD_AVER_EZMAKER       23 /* AVerMedia EZMaker PCI Deluxe */
-#define IVTV_CARD_LAST                      23
+#define IVTV_CARD_AVER_M104          24 /* AverMedia M104 miniPCI card */
+#define IVTV_CARD_LAST                      24
 
 /* Variants of existing cards but with the same PCI IDs. The driver
    detects these based on other device information.
@@ -244,6 +245,7 @@ struct ivtv_card_tuner_i2c {
 struct ivtv_card {
        int type;
        char *name;
+       char *comment;
        u32 v4l2_capabilities;
        u32 hw_video;           /* hardware used to process video */
        u32 hw_audio;           /* hardware used to process audio */
@@ -256,6 +258,7 @@ struct ivtv_card {
        int nof_outputs;
        const struct ivtv_card_output *video_outputs;
        u8 gr_config;           /* config byte for the ghost reduction device */
+       u8 xceive_pin;          /* XCeive tuner GPIO reset pin */
 
        /* GPIO card-specific settings */
        struct ivtv_gpio_init           gpio_init;
index 065df53f80fdcbf543486a9c1fb5ce739e440841..ed020f722b05ec840bada9ecdd776406c8ef18f1 100644 (file)
@@ -190,6 +190,7 @@ MODULE_PARM_DESC(cardtype,
                 "\t\t\t22 = ASUS Falcon2\n"
                 "\t\t\t23 = AverMedia PVR-150 Plus\n"
                 "\t\t\t24 = AverMedia EZMaker PCI Deluxe\n"
+                "\t\t\t25 = AverMedia M104 (not yet working)\n"
                 "\t\t\t 0 = Autodetect (default)\n"
                 "\t\t\t-1 = Ignore this card\n\t\t");
 MODULE_PARM_DESC(pal, "Set PAL standard: BGH, DK, I, M, N, Nc, 60");
@@ -871,7 +872,7 @@ static void ivtv_load_and_init_modules(struct ivtv *itv)
        unsigned i;
 
        /* load modules */
-#ifndef CONFIG_VIDEO_TUNER
+#ifndef CONFIG_MEDIA_TUNER
        hw = ivtv_request_module(itv, hw, "tuner", IVTV_HW_TUNER);
 #endif
 #ifndef CONFIG_VIDEO_CX25840
@@ -1048,7 +1049,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
                                       IVTV_ENCODER_SIZE);
        if (!itv->enc_mem) {
                IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
-               IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n");
+               IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
                retval = -ENOMEM;
                goto free_mem;
        }
@@ -1060,7 +1061,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
                                IVTV_DECODER_SIZE);
                if (!itv->dec_mem) {
                        IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
-                       IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n");
+                       IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
                        retval = -ENOMEM;
                        goto free_mem;
                }
@@ -1076,7 +1077,7 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
            ioremap_nocache(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        if (!itv->reg_mem) {
                IVTV_ERR("ioremap failed, perhaps increasing __VMALLOC_RESERVE in page.h\n");
-               IVTV_ERR("or disabling CONFIG_HIMEM4G into the kernel would help\n");
+               IVTV_ERR("or disabling CONFIG_HIGHMEM4G into the kernel would help\n");
                retval = -ENOMEM;
                goto free_io;
        }
@@ -1097,6 +1098,13 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
                   The PCI IDs are not always reliable. */
                ivtv_process_eeprom(itv);
        }
+       if (itv->card->comment)
+               IVTV_INFO("%s", itv->card->comment);
+       if (itv->card->v4l2_capabilities == 0) {
+               /* card was detected but is not supported */
+               retval = -ENODEV;
+               goto free_i2c;
+       }
 
        if (itv->std == 0) {
                itv->std = V4L2_STD_NTSC_M;
@@ -1195,13 +1203,6 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
                ivtv_call_i2c_clients(itv, VIDIOC_INT_S_STD_OUTPUT, &itv->std);
        }
 
-       retval = ivtv_streams_setup(itv);
-       if (retval) {
-               IVTV_ERR("Error %d setting up streams\n", retval);
-               goto free_i2c;
-       }
-
-       IVTV_DEBUG_IRQ("Masking interrupts\n");
        /* clear interrupt mask, effectively disabling interrupts */
        ivtv_set_irq_mask(itv, 0xffffffff);
 
@@ -1210,32 +1211,38 @@ static int __devinit ivtv_probe(struct pci_dev *dev,
                             IRQF_SHARED | IRQF_DISABLED, itv->name, (void *)itv);
        if (retval) {
                IVTV_ERR("Failed to register irq %d\n", retval);
-               goto free_streams;
+               goto free_i2c;
+       }
+
+       retval = ivtv_streams_setup(itv);
+       if (retval) {
+               IVTV_ERR("Error %d setting up streams\n", retval);
+               goto free_irq;
        }
        retval = ivtv_streams_register(itv);
        if (retval) {
                IVTV_ERR("Error %d registering devices\n", retval);
-               goto free_irq;
+               goto free_streams;
        }
        IVTV_INFO("Initialized card #%d: %s\n", itv->num, itv->card_name);
        return 0;
 
-      free_irq:
-       free_irq(itv->dev->irq, (void *)itv);
-      free_streams:
+free_streams:
        ivtv_streams_cleanup(itv);
-      free_i2c:
+free_irq:
+       free_irq(itv->dev->irq, (void *)itv);
+free_i2c:
        exit_ivtv_i2c(itv);
-      free_io:
+free_io:
        ivtv_iounmap(itv);
-      free_mem:
+free_mem:
        release_mem_region(itv->base_addr, IVTV_ENCODER_SIZE);
        release_mem_region(itv->base_addr + IVTV_REG_OFFSET, IVTV_REG_SIZE);
        if (itv->has_cx23415)
                release_mem_region(itv->base_addr + IVTV_DECODER_OFFSET, IVTV_DECODER_SIZE);
-      free_workqueue:
+free_workqueue:
        destroy_workqueue(itv->irq_work_queues);
-      err:
+err:
        if (retval == 0)
                retval = -ENODEV;
        IVTV_ERR("Error %d on initialization\n", retval);
index a7640c49f1d84aa6f89e8f81e67b291d62a67350..2b74b0ab147788c7942d5af34dfe1beeb954f904 100644 (file)
@@ -755,8 +755,10 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
        IVTV_DEBUG_HI_FILE("Encoder poll\n");
        poll_wait(filp, &s->waitq, wait);
 
-       if (eof || s->q_full.length || s->q_io.length)
+       if (s->q_full.length || s->q_io.length)
                return POLLIN | POLLRDNORM;
+       if (eof)
+               return POLLHUP;
        return 0;
 }
 
index 688cd3856685f09a0374fa91f42f20efbf1fdc92..d8ac09f3cce6ffc17506c6bc7d7d4021e8bacf07 100644 (file)
@@ -128,20 +128,17 @@ int ivtv_reset_tuner_gpio(void *dev, int cmd, int value)
 {
        struct i2c_algo_bit_data *algo = dev;
        struct ivtv *itv = algo->data;
-       int curdir, curout;
+       u32 curout;
 
        if (cmd != XC2028_TUNER_RESET)
                return 0;
        IVTV_DEBUG_INFO("Resetting tuner\n");
        curout = read_reg(IVTV_REG_GPIO_OUT);
-       curdir = read_reg(IVTV_REG_GPIO_DIR);
-       curdir |= (1 << 12);  /* GPIO bit 12 */
-
-       curout &= ~(1 << 12);
+       curout &= ~(1 << itv->card->xceive_pin);
        write_reg(curout, IVTV_REG_GPIO_OUT);
        schedule_timeout_interruptible(msecs_to_jiffies(1));
 
-       curout |= (1 << 12);
+       curout |= 1 << itv->card->xceive_pin;
        write_reg(curout, IVTV_REG_GPIO_OUT);
        schedule_timeout_interruptible(msecs_to_jiffies(1));
        return 0;
index 9824eafee02131eb88b67045de9ce32de8f0e93e..771adf47e944ff1379f4264db37f4a17b4dc210b 100644 (file)
@@ -167,7 +167,8 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
                return -1;
        id = hw_driverids[idx];
        memset(&info, 0, sizeof(info));
-       strcpy(info.driver_name, hw_drivernames[idx]);
+       strlcpy(info.driver_name, hw_drivernames[idx],
+                       sizeof(info.driver_name));
        info.addr = hw_addrs[idx];
        for (i = 0; itv->i2c_clients[i] && i < I2C_CLIENTS_MAX; i++) {}
 
index 15cac1812122e22c11805e3fe006c5111f2581ae..d508b5d0538cfb71bfa9b7b2c137eebfcbdaafd4 100644 (file)
@@ -243,20 +243,31 @@ static int ivtv_validate_speed(int cur_speed, int new_speed)
        int fact = new_speed < 0 ? -1 : 1;
        int s;
 
-       if (new_speed < 0) new_speed = -new_speed;
-       if (cur_speed < 0) cur_speed = -cur_speed;
+       if (cur_speed == 0)
+               cur_speed = 1000;
+       if (new_speed < 0)
+               new_speed = -new_speed;
+       if (cur_speed < 0)
+               cur_speed = -cur_speed;
 
        if (cur_speed <= new_speed) {
-               if (new_speed > 1500) return fact * 2000;
-               if (new_speed > 1000) return fact * 1500;
+               if (new_speed > 1500)
+                       return fact * 2000;
+               if (new_speed > 1000)
+                       return fact * 1500;
        }
        else {
-               if (new_speed >= 2000) return fact * 2000;
-               if (new_speed >= 1500) return fact * 1500;
-               if (new_speed >= 1000) return fact * 1000;
-       }
-       if (new_speed == 0) return 1000;
-       if (new_speed == 1 || new_speed == 1000) return fact * new_speed;
+               if (new_speed >= 2000)
+                       return fact * 2000;
+               if (new_speed >= 1500)
+                       return fact * 1500;
+               if (new_speed >= 1000)
+                       return fact * 1000;
+       }
+       if (new_speed == 0)
+               return 1000;
+       if (new_speed == 1 || new_speed == 1000)
+               return fact * new_speed;
 
        s = new_speed;
        new_speed = 1000 / new_speed;
@@ -741,10 +752,9 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
                struct v4l2_capability *vcap = arg;
 
                memset(vcap, 0, sizeof(*vcap));
-               strcpy(vcap->driver, IVTV_DRIVER_NAME);     /* driver name */
-               strncpy(vcap->card, itv->card_name,
-                               sizeof(vcap->card)-1);      /* card type */
-               strcpy(vcap->bus_info, pci_name(itv->dev)); /* bus info... */
+               strlcpy(vcap->driver, IVTV_DRIVER_NAME, sizeof(vcap->driver));
+               strlcpy(vcap->card, itv->card_name, sizeof(vcap->card));
+               strlcpy(vcap->bus_info, pci_name(itv->dev), sizeof(vcap->bus_info));
                vcap->version = IVTV_DRIVER_VERSION;        /* version */
                vcap->capabilities = itv->v4l2_cap;         /* capabilities */
 
@@ -1018,7 +1028,7 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
                                ivtv_std_60hz : ivtv_std_50hz;
                vs->index = idx;
                vs->id = enum_stds[idx].std;
-               strcpy(vs->name, enum_stds[idx].name);
+               strlcpy(vs->name, enum_stds[idx].name, sizeof(vs->name));
                break;
        }
 
@@ -1102,10 +1112,10 @@ int ivtv_v4l2_ioctls(struct ivtv *itv, struct file *filp, unsigned int cmd, void
                ivtv_call_i2c_clients(itv, VIDIOC_G_TUNER, vt);
 
                if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
-                       strcpy(vt->name, "ivtv Radio Tuner");
+                       strlcpy(vt->name, "ivtv Radio Tuner", sizeof(vt->name));
                        vt->type = V4L2_TUNER_RADIO;
                } else {
-                       strcpy(vt->name, "ivtv TV Tuner");
+                       strlcpy(vt->name, "ivtv TV Tuner", sizeof(vt->name));
                        vt->type = V4L2_TUNER_ANALOG_TV;
                }
                break;
index a329c4689dbf71de5b56a307c3256b6d8979aa76..d8ba3a4a8761fb0c66993ae5a2ae998aa068c58f 100644 (file)
@@ -384,7 +384,7 @@ static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
        ivtv_stream_sync_for_device(s);
        write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
        write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
-       itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
+       itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
        add_timer(&itv->dma_timer);
 }
 
@@ -400,7 +400,7 @@ static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
        ivtv_stream_sync_for_device(s);
        write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
        write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
-       itv->dma_timer.expires = jiffies + msecs_to_jiffies(100);
+       itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
        add_timer(&itv->dma_timer);
 }
 
index 0f1d4cc4b4d9b49dd95f2aa7b46411f0a9769fd9..02c5ab071d1b3518eec07500f78af9df25f0ea0f 100644 (file)
@@ -23,7 +23,7 @@
 #define IVTV_DRIVER_NAME "ivtv"
 #define IVTV_DRIVER_VERSION_MAJOR 1
 #define IVTV_DRIVER_VERSION_MINOR 2
-#define IVTV_DRIVER_VERSION_PATCHLEVEL 0
+#define IVTV_DRIVER_VERSION_PATCHLEVEL 1
 
 #define IVTV_VERSION __stringify(IVTV_DRIVER_VERSION_MAJOR) "." __stringify(IVTV_DRIVER_VERSION_MINOR) "." __stringify(IVTV_DRIVER_VERSION_PATCHLEVEL)
 #define IVTV_DRIVER_VERSION KERNEL_VERSION(IVTV_DRIVER_VERSION_MAJOR,IVTV_DRIVER_VERSION_MINOR,IVTV_DRIVER_VERSION_PATCHLEVEL)
index 393d917cd67265367aa0280395bf521b4a57902e..62f70bd5e3cb149f47ec1199ba3381d9574735a9 100644 (file)
@@ -1098,8 +1098,8 @@ void ivtv_yuv_setup_stream_frame(struct ivtv *itv)
        ivtv_yuv_next_free(itv);
 
        /* Copy V4L2 parameters to an ivtv_dma_frame struct... */
-       dma_args.y_source = 0L;
-       dma_args.uv_source = 0L;
+       dma_args.y_source = NULL;
+       dma_args.uv_source = NULL;
        dma_args.src.left = 0;
        dma_args.src.top = 0;
        dma_args.src.width = yi->v4l2_src_w;
index 3b23fc05f7c4331dc1d15b61af39a3cb1d3133da..df789f683e63ce1402f34725f0fe16ccb11e824a 100644 (file)
@@ -532,7 +532,7 @@ static int ivtvfb_get_fix(struct ivtv *itv, struct fb_fix_screeninfo *fix)
 
        IVTVFB_DEBUG_INFO("ivtvfb_get_fix\n");
        memset(fix, 0, sizeof(struct fb_fix_screeninfo));
-       strcpy(fix->id, "cx23415 TV out");
+       strlcpy(fix->id, "cx23415 TV out", sizeof(fix->id));
        fix->smem_start = oi->video_pbase;
        fix->smem_len = oi->video_buffer_size;
        fix->type = FB_TYPE_PACKED_PIXELS;
index d4bf14c284ef2c967d59c3fe4b588d27170d0b49..5b9dfa2c51b43874e8b9045379ee685260982720 100644 (file)
@@ -126,7 +126,8 @@ static int m52790_command(struct i2c_client *client, unsigned int cmd,
 
 /* i2c implementation */
 
-static int m52790_probe(struct i2c_client *client)
+static int m52790_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
        struct m52790_state *state;
 
index b73c740f7fb2697a5e128feee6de2f49abeec192..e6273162e12379e4ae054dfeb410458267f3c83a 100644 (file)
@@ -805,7 +805,7 @@ static int msp_resume(struct i2c_client *client)
 
 /* ----------------------------------------------------------------------- */
 
-static int msp_probe(struct i2c_client *client)
+static int msp_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
        struct msp_state *state;
        int (*thread_func)(void *data) = NULL;
index 3fb5f63df1e6392e000cd7e3b94180e5bec2705d..179e47049a452ff1619d977d4bc44779a1b00847 100644 (file)
@@ -372,7 +372,7 @@ static int mt9m001_set_register(struct soc_camera_device *icd,
 }
 #endif
 
-const struct v4l2_queryctrl mt9m001_controls[] = {
+static const struct v4l2_queryctrl mt9m001_controls[] = {
        {
                .id             = V4L2_CID_VFLIP,
                .type           = V4L2_CTRL_TYPE_BOOLEAN,
@@ -620,7 +620,8 @@ static void mt9m001_video_remove(struct soc_camera_device *icd)
        soc_camera_video_stop(&mt9m001->icd);
 }
 
-static int mt9m001_probe(struct i2c_client *client)
+static int mt9m001_probe(struct i2c_client *client,
+                        const struct i2c_device_id *did)
 {
        struct mt9m001 *mt9m001;
        struct soc_camera_device *icd;
@@ -696,12 +697,19 @@ static int mt9m001_remove(struct i2c_client *client)
        return 0;
 }
 
+static const struct i2c_device_id mt9m001_id[] = {
+       { "mt9m001", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9m001_id);
+
 static struct i2c_driver mt9m001_i2c_driver = {
        .driver = {
                .name = "mt9m001",
        },
        .probe          = mt9m001_probe,
        .remove         = mt9m001_remove,
+       .id_table       = mt9m001_id,
 };
 
 static int __init mt9m001_mod_init(void)
index d4b9e2744343dd0dc7f48049deb2d61a9f6d0e2b..d1391ac550963b14ebeb2e52e9470c5215501bfd 100644 (file)
@@ -452,7 +452,7 @@ static int mt9v022_set_register(struct soc_camera_device *icd,
 }
 #endif
 
-const struct v4l2_queryctrl mt9v022_controls[] = {
+static const struct v4l2_queryctrl mt9v022_controls[] = {
        {
                .id             = V4L2_CID_VFLIP,
                .type           = V4L2_CTRL_TYPE_BOOLEAN,
@@ -745,7 +745,8 @@ static void mt9v022_video_remove(struct soc_camera_device *icd)
        soc_camera_video_stop(&mt9v022->icd);
 }
 
-static int mt9v022_probe(struct i2c_client *client)
+static int mt9v022_probe(struct i2c_client *client,
+                        const struct i2c_device_id *did)
 {
        struct mt9v022 *mt9v022;
        struct soc_camera_device *icd;
@@ -818,12 +819,19 @@ static int mt9v022_remove(struct i2c_client *client)
        return 0;
 }
 
+static const struct i2c_device_id mt9v022_id[] = {
+       { "mt9v022", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, mt9v022_id);
+
 static struct i2c_driver mt9v022_i2c_driver = {
        .driver = {
                .name = "mt9v022",
        },
        .probe          = mt9v022_probe,
        .remove         = mt9v022_remove,
+       .id_table       = mt9v022_id,
 };
 
 static int __init mt9v022_mod_init(void)
index 158b3d0c653282bf27ca31352be43a8a46ddf173..9620c67fae77050f442dd024f186915affda8353 100644 (file)
@@ -1,14 +1,15 @@
 config VIDEO_PVRUSB2
        tristate "Hauppauge WinTV-PVR USB2 support"
-       depends on VIDEO_V4L2 && I2C && EXPERIMENTAL
+       depends on VIDEO_V4L2 && I2C
        select FW_LOADER
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_TVEEPROM
        select VIDEO_CX2341X
        select VIDEO_SAA711X
        select VIDEO_CX25840
        select VIDEO_MSP3400
        select VIDEO_WM8775
+       select VIDEO_CS53L32A
        ---help---
          This is a video4linux driver for Conexant 23416 based
          usb2 personal video recorder devices.
@@ -16,32 +17,6 @@ config VIDEO_PVRUSB2
          To compile this driver as a module, choose M here: the
          module will be called pvrusb2
 
-config VIDEO_PVRUSB2_ONAIR_CREATOR
-       bool "pvrusb2 driver support for OnAir Creator model"
-       depends on VIDEO_PVRUSB2 && EXPERIMENTAL
-       select VIDEO_SAA711X
-       select VIDEO_CS53L32A
-       ---help---
-
-         This option enables support for the OnAir Creator USB tuner
-         device.  This is a hybrid device, however currently only
-         analog mode is supported.
-
-         If you are in doubt, say Y.
-
-config VIDEO_PVRUSB2_ONAIR_USB2
-       bool "pvrusb2 driver support for OnAir USB2 model"
-       depends on VIDEO_PVRUSB2 && EXPERIMENTAL
-       select VIDEO_SAA711X
-       select VIDEO_CS53L32A
-       ---help---
-
-         This option enables support for the OnAir USB2 tuner device
-         (also known as the Sasem tuner).  This is a hybrid device,
-         however currently only analog mode is supported.
-
-         If you are in doubt, say Y.
-
 config VIDEO_PVRUSB2_SYSFS
        bool "pvrusb2 sysfs support (EXPERIMENTAL)"
        default y
@@ -59,29 +34,23 @@ config VIDEO_PVRUSB2_SYSFS
          Note: This feature is experimental and subject to change.
 
 config VIDEO_PVRUSB2_DVB
-       bool "pvrusb2 DVB support (EXPERIMENTAL)"
-       default n
+       bool "pvrusb2 ATSC/DVB support (EXPERIMENTAL)"
+       default y
        depends on VIDEO_PVRUSB2 && DVB_CORE && EXPERIMENTAL
        select DVB_LGDT330X if !DVB_FE_CUSTOMISE
        select DVB_S5H1409 if !DVB_FE_CUSTOMISE
        select DVB_S5H1411 if !DVB_FE_CUSTOMISE
        select DVB_TDA10048 if !DVB_FE_CUSTOMIZE
-       select DVB_TDA18271 if !DVB_FE_CUSTOMIZE
-       select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
-       select TUNER_TDA8290 if !DVB_FE_CUSTOMIZE
+       select MEDIA_TUNER_TDA18271 if !DVB_FE_CUSTOMIZE
+       select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_TDA8290 if !DVB_FE_CUSTOMIZE
        ---help---
 
-         This option enables compilation of a DVB interface for the
-         pvrusb2 driver.  Currently this is very very experimental.
-         It is also limiting - the DVB interface can only access the
-         digital side of hybrid devices, and there are going to be
-         issues if you attempt to mess with the V4L side at the same
-         time.  Don't turn this on unless you know what you are
-         doing.
-
-         If you are in doubt, say N.
+         This option enables a DVB interface for the pvrusb2 driver.
+         If your device does not support digital television, this
+         feature will have no affect on the driver's operation.
 
-         Note: This feature is very experimental and might break
+         If you are in doubt, say Y.
 
 config VIDEO_PVRUSB2_DEBUGIFC
        bool "pvrusb2 debug interface"
index 5b3083c89aa9bcd1841c41d76c029d3ae682d4dc..4fda2de69ab7240022856bd4a2c3064b41c136d4 100644 (file)
@@ -16,5 +16,6 @@ pvrusb2-objs  := pvrusb2-i2c-core.o pvrusb2-i2c-cmd-v4l2.o \
 obj-$(CONFIG_VIDEO_PVRUSB2) += pvrusb2.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
index 9a7c8e9c3e8ba3ef33ab19b4f449e64791c43a79..8d859ccd48ecc0b0da9a59d030a988f37528e629 100644 (file)
@@ -75,7 +75,7 @@ static void set_stereo(struct pvr2_msp3400_handler *ctxt)
        pvr2_trace(PVR2_TRACE_CHIPS,"i2c msp3400 v4l2 set_stereo");
 
        if ((sid < ARRAY_SIZE(routing_schemes)) &&
-           ((sp = routing_schemes + sid) != 0) &&
+           ((sp = routing_schemes + sid) != NULL) &&
            (hdw->input_val >= 0) &&
            (hdw->input_val < sp->cnt)) {
                route.input = sp->def[hdw->input_val];
index b5db6a5bab31e94b5338feb1b788ee14b129eb6e..73dcb1c57ae66c8b520558cfc0fb3efe60ded5e0 100644 (file)
@@ -195,7 +195,7 @@ static int pvr2_context_thread_func(void *foo)
 int pvr2_context_global_init(void)
 {
        pvr2_context_thread_ptr = kthread_run(pvr2_context_thread_func,
-                                             0,
+                                             NULL,
                                              "pvrusb2-context");
        return (pvr2_context_thread_ptr ? 0 : -ENOMEM);
 }
index 97350b048b8deeb394fba59265467215f3930b45..29d50597c88a5aeb2cd9ce14daac7fa5c238afa1 100644 (file)
@@ -123,7 +123,7 @@ static void set_input(struct pvr2_v4l_cx2584x *ctxt)
        memset(&route,0,sizeof(route));
 
        if ((sid < ARRAY_SIZE(routing_schemes)) &&
-           ((sp = routing_schemes + sid) != 0) &&
+           ((sp = routing_schemes + sid) != NULL) &&
            (hdw->input_val >= 0) &&
            (hdw->input_val < sp->cnt)) {
                vid_input = sp->def[hdw->input_val].vid;
index 11537ddf8aa3b0d6b81a44f069b83fb1feec220c..707d2d9635d7a9e2f68f8ecd5078f0b2e184f257 100644 (file)
@@ -54,6 +54,7 @@ extern int pvrusb2_debug;
 #define PVR2_TRACE_DATA_FLOW  (1 << 25) /* Track data flow */
 #define PVR2_TRACE_DEBUGIFC   (1 << 26) /* Debug interface actions */
 #define PVR2_TRACE_GPIO       (1 << 27) /* GPIO state bit changes */
+#define PVR2_TRACE_DVB_FEED   (1 << 28) /* DVB transport feed debug */
 
 
 #endif /* __PVRUSB2_HDW_INTERNAL_H */
index 3a141d93e1a9f2cff4792545bc697aedd5e60b29..5bf6d8fda1f9f284667093337b1ad851f09f9412 100644 (file)
@@ -153,7 +153,6 @@ static const struct pvr2_device_desc pvr2_device_gotview_2d = {
 
 
 
-#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_CREATOR
 /*------------------------------------------------------------------------*/
 /* OnAir Creator */
 
@@ -212,11 +211,9 @@ static const struct pvr2_device_desc pvr2_device_onair_creator = {
                .dvb_props = &pvr2_onair_creator_fe_props,
 #endif
 };
-#endif
 
 
 
-#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_USB2
 /*------------------------------------------------------------------------*/
 /* OnAir USB 2.0 */
 
@@ -274,7 +271,6 @@ static const struct pvr2_device_desc pvr2_device_onair_usb2 = {
                .dvb_props = &pvr2_onair_usb2_fe_props,
 #endif
 };
-#endif
 
 
 
@@ -497,14 +493,10 @@ struct usb_device_id pvr2_device_table[] = {
          .driver_info = (kernel_ulong_t)&pvr2_device_gotview_2},
        { USB_DEVICE(0x1164, 0x0602),
          .driver_info = (kernel_ulong_t)&pvr2_device_gotview_2d},
-#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_CREATOR
        { USB_DEVICE(0x11ba, 0x1003),
          .driver_info = (kernel_ulong_t)&pvr2_device_onair_creator},
-#endif
-#ifdef CONFIG_VIDEO_PVRUSB2_ONAIR_USB2
        { USB_DEVICE(0x11ba, 0x1001),
          .driver_info = (kernel_ulong_t)&pvr2_device_onair_usb2},
-#endif
        { USB_DEVICE(0x2040, 0x7300),
          .driver_info = (kernel_ulong_t)&pvr2_device_73xxx},
        { USB_DEVICE(0x2040, 0x7500),
index 2e64f98d124149404ba76d6f5ab3131e14910662..6ec4bf81fc7f3e0d65c2f3308e0ae93fdff0aa82 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include "dvbdev.h"
+#include "pvrusb2-debug.h"
 #include "pvrusb2-hdw-internal.h"
 #include "pvrusb2-hdw.h"
 #include "pvrusb2-io.h"
@@ -35,7 +36,7 @@ static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
        struct pvr2_buffer *bp;
        struct pvr2_stream *stream;
 
-       printk(KERN_DEBUG "dvb thread started\n");
+       pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread started");
        set_freezable();
 
        stream = adap->channel.stream->stream;
@@ -82,7 +83,7 @@ static int pvr2_dvb_feed_func(struct pvr2_dvb_adapter *adap)
        /* If we get here and ret is < 0, then an error has occurred.
           Probably would be a good idea to communicate that to DVB core... */
 
-       printk(KERN_DEBUG "dvb thread stopped\n");
+       pvr2_trace(PVR2_TRACE_DVB_FEED, "dvb feed thread stopped");
 
        return 0;
 }
@@ -130,7 +131,7 @@ static void pvr2_dvb_stream_end(struct pvr2_dvb_adapter *adap)
                for (idx = 0; idx < PVR2_DVB_BUFFER_COUNT; idx++) {
                        if (!(adap->buffer_storage[idx])) continue;
                        kfree(adap->buffer_storage[idx]);
-                       adap->buffer_storage[idx] = 0;
+                       adap->buffer_storage[idx] = NULL;
                }
                adap->stream_run = 0;
        }
@@ -142,7 +143,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
        unsigned int idx;
        int ret;
        struct pvr2_buffer *bp;
-       struct pvr2_stream *stream = 0;
+       struct pvr2_stream *stream = NULL;
 
        if (adap->stream_run) return -EIO;
 
@@ -174,7 +175,7 @@ static int pvr2_dvb_stream_do_start(struct pvr2_dvb_adapter *adap)
        ret = pvr2_hdw_set_streaming(adap->channel.hdw, 1);
        if (ret < 0) return ret;
 
-       while ((bp = pvr2_stream_get_idle_buffer(stream)) != 0) {
+       while ((bp = pvr2_stream_get_idle_buffer(stream)) != NULL) {
                ret = pvr2_buffer_queue(bp);
                if (ret < 0) return ret;
        }
@@ -210,7 +211,8 @@ static int pvr2_dvb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
        do {
                if (onoff) {
                        if (!adap->feedcount) {
-                               printk(KERN_DEBUG "start feeding\n");
+                               pvr2_trace(PVR2_TRACE_DVB_FEED,
+                                          "start feeding demux");
                                ret = pvr2_dvb_stream_start(adap);
                                if (ret < 0) break;
                        }
@@ -218,7 +220,8 @@ static int pvr2_dvb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
                } else if (adap->feedcount > 0) {
                        (adap->feedcount)--;
                        if (!adap->feedcount) {
-                               printk(KERN_DEBUG "stop feeding\n");
+                               pvr2_trace(PVR2_TRACE_DVB_FEED,
+                                          "stop feeding demux");
                                pvr2_dvb_stream_end(adap);
                        }
                }
@@ -230,15 +233,13 @@ static int pvr2_dvb_ctrl_feed(struct dvb_demux_feed *dvbdmxfeed, int onoff)
 
 static int pvr2_dvb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
 {
-       printk(KERN_DEBUG "start pid: 0x%04x, feedtype: %d\n",
-              dvbdmxfeed->pid, dvbdmxfeed->type);
+       pvr2_trace(PVR2_TRACE_DVB_FEED, "start pid: 0x%04x", dvbdmxfeed->pid);
        return pvr2_dvb_ctrl_feed(dvbdmxfeed, 1);
 }
 
 static int pvr2_dvb_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
 {
-       printk(KERN_DEBUG "stop pid: 0x%04x, feedtype: %d\n",
-              dvbdmxfeed->pid, dvbdmxfeed->type);
+       pvr2_trace(PVR2_TRACE_DVB_FEED, "stop pid: 0x%04x", dvbdmxfeed->pid);
        return pvr2_dvb_ctrl_feed(dvbdmxfeed, 0);
 }
 
@@ -259,7 +260,8 @@ static int pvr2_dvb_adapter_init(struct pvr2_dvb_adapter *adap)
                                   &adap->channel.hdw->usb_dev->dev,
                                   adapter_nr);
        if (ret < 0) {
-               err("dvb_register_adapter failed: error %d", ret);
+               pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+                          "dvb_register_adapter failed: error %d", ret);
                goto err;
        }
        adap->dvb_adap.priv = adap;
@@ -276,7 +278,8 @@ static int pvr2_dvb_adapter_init(struct pvr2_dvb_adapter *adap)
 
        ret = dvb_dmx_init(&adap->demux);
        if (ret < 0) {
-               err("dvb_dmx_init failed: error %d", ret);
+               pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+                          "dvb_dmx_init failed: error %d", ret);
                goto err_dmx;
        }
 
@@ -286,7 +289,8 @@ static int pvr2_dvb_adapter_init(struct pvr2_dvb_adapter *adap)
 
        ret = dvb_dmxdev_init(&adap->dmxdev, &adap->dvb_adap);
        if (ret < 0) {
-               err("dvb_dmxdev_init failed: error %d", ret);
+               pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+                          "dvb_dmxdev_init failed: error %d", ret);
                goto err_dmx_dev;
        }
 
@@ -304,7 +308,7 @@ err:
 
 static int pvr2_dvb_adapter_exit(struct pvr2_dvb_adapter *adap)
 {
-       printk(KERN_DEBUG "unregistering DVB devices\n");
+       pvr2_trace(PVR2_TRACE_INFO, "unregistering DVB devices");
        dvb_net_release(&adap->dvb_net);
        adap->demux.dmx.close(&adap->demux.dmx);
        dvb_dmxdev_release(&adap->dmxdev);
@@ -320,7 +324,7 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
        int ret = 0;
 
        if (dvb_props == NULL) {
-               err("fe_props not defined!");
+               pvr2_trace(PVR2_TRACE_ERROR_LEGS, "fe_props not defined!");
                return -EINVAL;
        }
 
@@ -328,13 +332,15 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
            &adap->channel,
            (1 << PVR2_CVAL_INPUT_DTV));
        if (ret) {
-               err("failed to grab control of dtv input (code=%d)",
+               pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+                          "failed to grab control of dtv input (code=%d)",
                    ret);
                return ret;
        }
 
        if (dvb_props->frontend_attach == NULL) {
-               err("frontend_attach not defined!");
+               pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+                          "frontend_attach not defined!");
                ret = -EINVAL;
                goto done;
        }
@@ -342,7 +348,8 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
        if ((dvb_props->frontend_attach(adap) == 0) && (adap->fe)) {
 
                if (dvb_register_frontend(&adap->dvb_adap, adap->fe)) {
-                       err("frontend registration failed!");
+                       pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+                                  "frontend registration failed!");
                        dvb_frontend_detach(adap->fe);
                        adap->fe = NULL;
                        ret = -ENODEV;
@@ -359,7 +366,8 @@ static int pvr2_dvb_frontend_init(struct pvr2_dvb_adapter *adap)
                adap->fe->ops.ts_bus_ctrl = pvr2_dvb_bus_ctrl;
 
        } else {
-               err("no frontend was attached!");
+               pvr2_trace(PVR2_TRACE_ERROR_LEGS,
+                          "no frontend was attached!");
                ret = -ENODEV;
                return ret;
        }
index 087a18245560f467fb059590a91531c4840a4d0d..e9b5d4e913277ebc25348c8f2c71edd231df5fb3 100644 (file)
@@ -1261,7 +1261,7 @@ struct pvr2_v4l2 *pvr2_v4l2_create(struct pvr2_context *mnp)
  fail:
        pvr2_trace(PVR2_TRACE_STRUCT,"Failure creating pvr2_v4l2 id=%p",vp);
        pvr2_v4l2_destroy_no_lock(vp);
-       return 0;
+       return NULL;
 }
 
 /*
index 7c47345501b610ea988f349f935de29666415193..2433a31600413cbf40f6276acd5d83dd50b8094b 100644 (file)
@@ -81,7 +81,7 @@ static void set_input(struct pvr2_v4l_decoder *ctxt)
        pvr2_trace(PVR2_TRACE_CHIPS,"i2c v4l2 set_input(%d)",hdw->input_val);
 
        if ((sid < ARRAY_SIZE(routing_schemes)) &&
-           ((sp = routing_schemes + sid) != 0) &&
+           ((sp = routing_schemes + sid) != NULL) &&
            (hdw->input_val >= 0) &&
            (hdw->input_val < sp->cnt)) {
                route.input = sp->def[hdw->input_val];
index 416d05d4a969088d02d2f1c5a319de4fb215fe68..e684108637add93351b42fb005f27cc9da5be7da 100644 (file)
@@ -1450,7 +1450,8 @@ static int saa7115_command(struct i2c_client *client, unsigned int cmd, void *ar
 
 /* ----------------------------------------------------------------------- */
 
-static int saa7115_probe(struct i2c_client *client)
+static int saa7115_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
 {
        struct saa711x_state *state;
        int     i;
index 06c88db656b41d4a8a94dc67ef9814c4fe852e50..e750cd65c1c37676612c1955c9674ad5b232ebb5 100644 (file)
@@ -661,7 +661,8 @@ static int saa7127_command(struct i2c_client *client,
 
 /* ----------------------------------------------------------------------- */
 
-static int saa7127_probe(struct i2c_client *client)
+static int saa7127_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
 {
        struct saa7127_state *state;
        struct v4l2_sliced_vbi_data vbi = { 0, 0, 0, 0 };  /* set to disabled */
index e086f14d56639ef458e1c69be6dd83eaa7d50f57..40e4c3bd2cb9b39a6b764793f3286801454e508c 100644 (file)
@@ -3,7 +3,7 @@ config VIDEO_SAA7134
        depends on VIDEO_DEV && PCI && I2C && INPUT
        select VIDEOBUF_DMA_SG
        select VIDEO_IR
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_TVEEPROM
        select CRC32
        ---help---
@@ -35,9 +35,9 @@ config VIDEO_SAA7134_DVB
        select DVB_NXT200X if !DVB_FE_CUSTOMISE
        select DVB_TDA10086 if !DVB_FE_CUSTOMISE
        select DVB_TDA826X if !DVB_FE_CUSTOMISE
-       select DVB_TDA827X if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_TDA827X if !DVB_FE_CUSTOMISE
        select DVB_ISL6421 if !DVB_FE_CUSTOMISE
-       select TUNER_SIMPLE if !DVB_FE_CUSTOMISE
+       select MEDIA_TUNER_SIMPLE if !DVB_FE_CUSTOMISE
        ---help---
          This adds support for DVB cards based on the
          Philips saa7134 chip.
index 9aff937ba7a5232ab506d8f1e0617f5089be3726..3dbaa19a6d00d7f3dbc81f7769681c91d5fff41e 100644 (file)
@@ -11,5 +11,6 @@ obj-$(CONFIG_VIDEO_SAA7134_ALSA) += saa7134-alsa.o
 obj-$(CONFIG_VIDEO_SAA7134_DVB) += saa7134-dvb.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
 EXTRA_CFLAGS += -Idrivers/media/dvb/dvb-core
 EXTRA_CFLAGS += -Idrivers/media/dvb/frontends
index 98375955a84b7c42e62b34a83cf76dc358d0c52f..b111903aa322cd287a4d5ca0041396242b7e6c43 100644 (file)
@@ -47,6 +47,9 @@ static char name_svideo[]  = "S-Video";
 /* ------------------------------------------------------------------ */
 /* board config info                                                  */
 
+/* If radio_type !=UNSET, radio_addr should be specified
+ */
+
 struct saa7134_board saa7134_boards[] = {
        [SAA7134_BOARD_UNKNOWN] = {
                .name           = "UNKNOWN/GENERIC",
@@ -3087,7 +3090,7 @@ struct saa7134_board saa7134_boards[] = {
                .tuner_type     = TUNER_PHILIPS_TD1316, /* untested */
                .radio_type     = TUNER_TEA5767, /* untested */
                .tuner_addr     = ADDR_UNSET,
-               .radio_addr     = ADDR_UNSET,
+               .radio_addr     = 0x60,
                .tda9887_conf   = TDA9887_PRESENT,
                .mpeg           = SAA7134_MPEG_DVB,
                .inputs         = {{
@@ -4247,6 +4250,36 @@ struct saa7134_board saa7134_boards[] = {
                        .amux = LINE1,
                } },
        },
+       [SAA7134_BOARD_BEHOLD_H6] = {
+               /* Igor Kuznetsov <igk@igk.ru> */
+               .name           = "Beholder BeholdTV H6",
+               .audio_clock    = 0x00187de7,
+               .tuner_type     = TUNER_PHILIPS_FMD1216ME_MK3,
+               .radio_type     = UNSET,
+               .tuner_addr     = ADDR_UNSET,
+               .radio_addr     = ADDR_UNSET,
+               .tda9887_conf   = TDA9887_PRESENT,
+               .inputs         = {{
+                       .name = name_tv,
+                       .vmux = 3,
+                       .amux = TV,
+                       .tv   = 1,
+               }, {
+                       .name = name_comp1,
+                       .vmux = 1,
+                       .amux = LINE1,
+               }, {
+                       .name = name_svideo,
+                       .vmux = 8,
+                       .amux = LINE1,
+               } },
+               .radio = {
+                       .name = name_radio,
+                       .amux = LINE2,
+               },
+               /* no DVB support for now */
+               /* .mpeg           = SAA7134_MPEG_DVB, */
+       },
 };
 
 const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -5197,6 +5230,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
                .subvendor    = 0x5ace,
                .subdevice    = 0x6193,
                .driver_data  = SAA7134_BOARD_BEHOLD_M6,
+       }, {
+               .vendor       = PCI_VENDOR_ID_PHILIPS,
+               .device       = PCI_DEVICE_ID_PHILIPS_SAA7133,
+               .subvendor    = 0x5ace,
+               .subdevice    = 0x6191,
+               .driver_data  = SAA7134_BOARD_BEHOLD_M6,
        },{
                .vendor       = PCI_VENDOR_ID_PHILIPS,
                .device       = PCI_DEVICE_ID_PHILIPS_SAA7133,
@@ -5245,6 +5284,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
                .subvendor    = 0x185b,
                .subdevice    = 0xc900,
                .driver_data  = SAA7134_BOARD_VIDEOMATE_T750,
+       }, {
+               .vendor       = PCI_VENDOR_ID_PHILIPS,
+               .device       = PCI_DEVICE_ID_PHILIPS_SAA7133,
+               .subvendor    = 0x5ace,
+               .subdevice    = 0x6290,
+               .driver_data  = SAA7134_BOARD_BEHOLD_H6,
        }, {
                /* --- boards without eeprom + subsystem ID --- */
                .vendor       = PCI_VENDOR_ID_PHILIPS,
@@ -5577,20 +5622,87 @@ int saa7134_board_init1(struct saa7134_dev *dev)
        return 0;
 }
 
+static void saa7134_tuner_setup(struct saa7134_dev *dev)
+{
+       struct tuner_setup tun_setup;
+       unsigned int mode_mask = T_RADIO     |
+                                T_ANALOG_TV |
+                                T_DIGITAL_TV;
+
+       memset(&tun_setup, 0, sizeof(tun_setup));
+       tun_setup.tuner_callback = saa7134_tuner_callback;
+
+       if (saa7134_boards[dev->board].radio_type != UNSET) {
+               tun_setup.type = saa7134_boards[dev->board].radio_type;
+               tun_setup.addr = saa7134_boards[dev->board].radio_addr;
+
+               tun_setup.mode_mask = T_RADIO;
+
+               saa7134_i2c_call_clients(dev, TUNER_SET_TYPE_ADDR, &tun_setup);
+               mode_mask &= ~T_RADIO;
+       }
+
+       if ((dev->tuner_type != TUNER_ABSENT) && (dev->tuner_type != UNSET)) {
+               tun_setup.type = dev->tuner_type;
+               tun_setup.addr = dev->tuner_addr;
+               tun_setup.config = saa7134_boards[dev->board].tuner_config;
+               tun_setup.tuner_callback = saa7134_tuner_callback;
+
+               tun_setup.mode_mask = mode_mask;
+
+               saa7134_i2c_call_clients(dev, TUNER_SET_TYPE_ADDR, &tun_setup);
+       }
+
+       if (dev->tda9887_conf) {
+               struct v4l2_priv_tun_config tda9887_cfg;
+
+               tda9887_cfg.tuner = TUNER_TDA9887;
+               tda9887_cfg.priv = &dev->tda9887_conf;
+
+               saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG,
+                                        &tda9887_cfg);
+       }
+
+       if (dev->tuner_type == TUNER_XC2028) {
+               struct v4l2_priv_tun_config  xc2028_cfg;
+               struct xc2028_ctrl           ctl;
+
+               memset(&xc2028_cfg, 0, sizeof(ctl));
+               memset(&ctl, 0, sizeof(ctl));
+
+               ctl.fname   = XC2028_DEFAULT_FIRMWARE;
+               ctl.max_len = 64;
+
+               switch (dev->board) {
+               case SAA7134_BOARD_AVERMEDIA_A16D:
+                       ctl.demod = XC3028_FE_ZARLINK456;
+                       break;
+               default:
+                       ctl.demod = XC3028_FE_OREN538;
+                       ctl.mts = 1;
+               }
+
+               xc2028_cfg.tuner = TUNER_XC2028;
+               xc2028_cfg.priv  = &ctl;
+
+               saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &xc2028_cfg);
+       }
+}
+
 /* stuff which needs working i2c */
 int saa7134_board_init2(struct saa7134_dev *dev)
 {
        unsigned char buf;
        int board;
-       struct tuner_setup tun_setup;
-       tun_setup.config = 0;
-       tun_setup.tuner_callback = saa7134_tuner_callback;
+
+       dev->tuner_type = saa7134_boards[dev->board].tuner_type;
+       dev->tuner_addr = saa7134_boards[dev->board].tuner_addr;
 
        switch (dev->board) {
        case SAA7134_BOARD_BMK_MPEX_NOTUNER:
        case SAA7134_BOARD_BMK_MPEX_TUNER:
                dev->i2c_client.addr = 0x60;
-               board = (i2c_master_recv(&dev->i2c_client,&buf,0) < 0)
+               board = (i2c_master_recv(&dev->i2c_client, &buf, 0) < 0)
                        ? SAA7134_BOARD_BMK_MPEX_NOTUNER
                        : SAA7134_BOARD_BMK_MPEX_TUNER;
                if (board == dev->board)
@@ -5600,21 +5712,9 @@ int saa7134_board_init2(struct saa7134_dev *dev)
                saa7134_boards[dev->board].name);
                dev->tuner_type = saa7134_boards[dev->board].tuner_type;
 
-               if (TUNER_ABSENT != dev->tuner_type) {
-                       tun_setup.mode_mask = T_RADIO     |
-                                             T_ANALOG_TV |
-                                             T_DIGITAL_TV;
-                       tun_setup.type = dev->tuner_type;
-                       tun_setup.addr = ADDR_UNSET;
-                       tun_setup.tuner_callback = saa7134_tuner_callback;
-
-                       saa7134_i2c_call_clients(dev,
-                                                TUNER_SET_TYPE_ADDR,
-                                                &tun_setup);
-               }
                break;
        case SAA7134_BOARD_MD7134:
-               {
+       {
                u8 subaddr;
                u8 data[3];
                int ret, tuner_t;
@@ -5667,30 +5767,8 @@ int saa7134_board_init2(struct saa7134_dev *dev)
                }
 
                printk(KERN_INFO "%s Tuner type is %d\n", dev->name, dev->tuner_type);
-               if (dev->tuner_type == TUNER_PHILIPS_FMD1216ME_MK3) {
-                       struct v4l2_priv_tun_config tda9887_cfg;
-
-                       tda9887_cfg.tuner = TUNER_TDA9887;
-                       tda9887_cfg.priv  = &dev->tda9887_conf;
-
-                       dev->tda9887_conf = TDA9887_PRESENT      |
-                                           TDA9887_PORT1_ACTIVE |
-                                           TDA9887_PORT2_ACTIVE;
-
-                       saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG,
-                                                &tda9887_cfg);
-               }
-
-               tun_setup.mode_mask = T_RADIO     |
-                                     T_ANALOG_TV |
-                                     T_DIGITAL_TV;
-               tun_setup.type = dev->tuner_type;
-               tun_setup.addr = ADDR_UNSET;
-
-               saa7134_i2c_call_clients(dev,
-                                        TUNER_SET_TYPE_ADDR, &tun_setup);
-               }
                break;
+       }
        case SAA7134_BOARD_PHILIPS_EUROPA:
                if (dev->autodetected && (dev->eedata[0x41] == 0x1c)) {
                        /* Reconfigure board as Snake reference design */
@@ -5702,43 +5780,43 @@ int saa7134_board_init2(struct saa7134_dev *dev)
                }
        case SAA7134_BOARD_VIDEOMATE_DVBT_300:
        case SAA7134_BOARD_ASUS_EUROPA2_HYBRID:
+       {
+
                /* The Philips EUROPA based hybrid boards have the tuner connected through
                 * the channel decoder. We have to make it transparent to find it
                 */
-               {
                u8 data[] = { 0x07, 0x02};
                struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
                i2c_transfer(&dev->i2c_adap, &msg, 1);
 
-               tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
-               tun_setup.type = dev->tuner_type;
-               tun_setup.addr = dev->tuner_addr;
-
-               saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup);
-               }
                break;
+       }
        case SAA7134_BOARD_PHILIPS_TIGER:
        case SAA7134_BOARD_PHILIPS_TIGER_S:
-               {
+       {
                u8 data[] = { 0x3c, 0x33, 0x60};
                struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
-               if(dev->autodetected && (dev->eedata[0x49] == 0x50)) {
+               if (dev->autodetected && (dev->eedata[0x49] == 0x50)) {
                        dev->board = SAA7134_BOARD_PHILIPS_TIGER_S;
                        printk(KERN_INFO "%s: Reconfigured board as %s\n",
                                dev->name, saa7134_boards[dev->board].name);
                }
-               if(dev->board == SAA7134_BOARD_PHILIPS_TIGER_S) {
-                       tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
-                       tun_setup.type = TUNER_PHILIPS_TDA8290;
-                       tun_setup.addr = 0x4b;
-                       tun_setup.config = 2;
+               if (dev->board == SAA7134_BOARD_PHILIPS_TIGER_S) {
+                       dev->tuner_type = TUNER_PHILIPS_TDA8290;
+
+                       saa7134_tuner_setup(dev);
 
-                       saa7134_i2c_call_clients (dev, TUNER_SET_TYPE_ADDR,&tun_setup);
                        data[2] = 0x68;
+                       i2c_transfer(&dev->i2c_adap, &msg, 1);
+
+                       /* Tuner setup is handled before I2C transfer.
+                          Due to that, there's no need to do it later
+                        */
+                       return 0;
                }
                i2c_transfer(&dev->i2c_adap, &msg, 1);
-               }
                break;
+       }
        case SAA7134_BOARD_HAUPPAUGE_HVR1110:
                hauppauge_eeprom(dev, dev->eedata+0x80);
                /* break intentionally omitted */
@@ -5751,52 +5829,55 @@ int saa7134_board_init2(struct saa7134_dev *dev)
        case SAA7134_BOARD_AVERMEDIA_SUPER_007:
        case SAA7134_BOARD_TWINHAN_DTV_DVB_3056:
        case SAA7134_BOARD_CREATIX_CTX953:
+       {
                /* this is a hybrid board, initialize to analog mode
                 * and configure firmware eeprom address
                 */
-               {
                u8 data[] = { 0x3c, 0x33, 0x60};
                struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
                i2c_transfer(&dev->i2c_adap, &msg, 1);
-               }
                break;
+       }
        case SAA7134_BOARD_FLYDVB_TRIO:
-               {
+       {
                u8 data[] = { 0x3c, 0x33, 0x62};
                struct i2c_msg msg = {.addr=0x09, .flags=0, .buf=data, .len = sizeof(data)};
                i2c_transfer(&dev->i2c_adap, &msg, 1);
-               }
                break;
+       }
        case SAA7134_BOARD_ADS_DUO_CARDBUS_PTV331:
        case SAA7134_BOARD_FLYDVBT_HYBRID_CARDBUS:
+       {
                /* initialize analog mode  */
-               {
                u8 data[] = { 0x3c, 0x33, 0x6a};
                struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
                i2c_transfer(&dev->i2c_adap, &msg, 1);
-               }
                break;
+       }
        case SAA7134_BOARD_CINERGY_HT_PCMCIA:
        case SAA7134_BOARD_CINERGY_HT_PCI:
+       {
                /* initialize analog mode */
-               {
                u8 data[] = { 0x3c, 0x33, 0x68};
                struct i2c_msg msg = {.addr=0x08, .flags=0, .buf=data, .len = sizeof(data)};
                i2c_transfer(&dev->i2c_adap, &msg, 1);
-               }
                break;
+       }
        case SAA7134_BOARD_KWORLD_ATSC110:
-               {
-                       /* enable tuner */
-                       int i;
-                       static const u8 buffer [] = { 0x10,0x12,0x13,0x04,0x16,0x00,0x14,0x04,0x017,0x00 };
-                       dev->i2c_client.addr = 0x0a;
-                       for (i = 0; i < 5; i++)
-                               if (2 != i2c_master_send(&dev->i2c_client,&buffer[i*2],2))
-                                       printk(KERN_WARNING "%s: Unable to enable tuner(%i).\n",
-                                              dev->name, i);
-               }
+       {
+               /* enable tuner */
+               int i;
+               static const u8 buffer [] = { 0x10, 0x12, 0x13, 0x04, 0x16,
+                                             0x00, 0x14, 0x04, 0x17, 0x00 };
+               dev->i2c_client.addr = 0x0a;
+               for (i = 0; i < 5; i++)
+                       if (2 != i2c_master_send(&dev->i2c_client,
+                                                &buffer[i*2], 2))
+                               printk(KERN_WARNING
+                                      "%s: Unable to enable tuner(%i).\n",
+                                      dev->name, i);
                break;
+       }
        case SAA7134_BOARD_VIDEOMATE_DVBT_200:
        case SAA7134_BOARD_VIDEOMATE_DVBT_200A:
                /* The T200 and the T200A share the same pci id.  Consequently,
@@ -5821,7 +5902,7 @@ int saa7134_board_init2(struct saa7134_dev *dev)
                }
                break;
        case SAA7134_BOARD_BEHOLD_COLUMBUS_TVFM:
-               {
+       {
                struct v4l2_priv_tun_config tea5767_cfg;
                struct tea5767_ctrl ctl;
 
@@ -5832,34 +5913,11 @@ int saa7134_board_init2(struct saa7134_dev *dev)
                tea5767_cfg.tuner = TUNER_TEA5767;
                tea5767_cfg.priv  = &ctl;
                saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &tea5767_cfg);
-               }
                break;
        }
+       } /* switch() */
 
-       if (dev->tuner_type == TUNER_XC2028) {
-               struct v4l2_priv_tun_config  xc2028_cfg;
-               struct xc2028_ctrl           ctl;
-
-               memset(&xc2028_cfg, 0, sizeof(ctl));
-               memset(&ctl, 0, sizeof(ctl));
-
-               ctl.fname   = XC2028_DEFAULT_FIRMWARE;
-               ctl.max_len = 64;
-
-               switch (dev->board) {
-               case SAA7134_BOARD_AVERMEDIA_A16D:
-                       ctl.demod = XC3028_FE_ZARLINK456;
-                       break;
-               default:
-                       ctl.demod = XC3028_FE_OREN538;
-                       ctl.mts = 1;
-               }
-
-               xc2028_cfg.tuner = TUNER_XC2028;
-               xc2028_cfg.priv  = &ctl;
-
-               saa7134_i2c_call_clients(dev, TUNER_SET_CONFIG, &xc2028_cfg);
-       }
+       saa7134_tuner_setup(dev);
 
        return 0;
 }
index 2ccfaba0c490bfc6d42da43722a8ab6f665d42c9..d8af3863f2d3de675492e34d8a84ec2269d1e5ff 100644 (file)
@@ -324,8 +324,6 @@ static u32 functionality(struct i2c_adapter *adap)
 static int attach_inform(struct i2c_client *client)
 {
        struct saa7134_dev *dev = client->adapter->algo_data;
-       int tuner = dev->tuner_type;
-       struct tuner_setup tun_setup;
 
        d1printk( "%s i2c attach [addr=0x%x,client=%s]\n",
                client->driver->driver.name, client->addr, client->name);
@@ -346,46 +344,6 @@ static int attach_inform(struct i2c_client *client)
                }
        }
 
-       if (!client->driver->command)
-               return 0;
-
-       if (saa7134_boards[dev->board].radio_type != UNSET) {
-
-               tun_setup.type = saa7134_boards[dev->board].radio_type;
-               tun_setup.addr = saa7134_boards[dev->board].radio_addr;
-
-               if ((tun_setup.addr == ADDR_UNSET) || (tun_setup.addr == client->addr)) {
-                       tun_setup.mode_mask = T_RADIO;
-
-                       client->driver->command(client, TUNER_SET_TYPE_ADDR, &tun_setup);
-               }
-       }
-
-       if (tuner != UNSET) {
-               tun_setup.type = tuner;
-               tun_setup.addr = saa7134_boards[dev->board].tuner_addr;
-               tun_setup.config = saa7134_boards[dev->board].tuner_config;
-               tun_setup.tuner_callback = saa7134_tuner_callback;
-
-               if ((tun_setup.addr == ADDR_UNSET)||(tun_setup.addr == client->addr)) {
-
-                       tun_setup.mode_mask = T_ANALOG_TV;
-
-                       client->driver->command(client,TUNER_SET_TYPE_ADDR, &tun_setup);
-               }
-
-               if (tuner == TUNER_TDA9887) {
-                       struct v4l2_priv_tun_config tda9887_cfg;
-
-                       tda9887_cfg.tuner = TUNER_TDA9887;
-                       tda9887_cfg.priv = &dev->tda9887_conf;
-
-                       client->driver->command(client, TUNER_SET_CONFIG,
-                                               &tda9887_cfg);
-               }
-       }
-
-
        return 0;
 }
 
index 767ff30832f27d9e613ca3883c9f174438a59064..919632b10aae0ed20510fc68c2d89e944117b00c 100644 (file)
@@ -531,6 +531,7 @@ void saa7134_set_i2c_ir(struct saa7134_dev *dev, struct IR_i2c *ir)
                break;
        case SAA7134_BOARD_BEHOLD_607_9FM:
        case SAA7134_BOARD_BEHOLD_M6:
+       case SAA7134_BOARD_BEHOLD_H6:
                snprintf(ir->c.name, sizeof(ir->c.name), "BeholdTV");
                ir->get_key   = get_key_beholdm6xx;
                ir->ir_codes  = ir_codes_behold;
index 924ffd13637ead8b5e66d058b037ae7b9cbb9bd1..34ff0d4998f382ea53e68cae6e4c22a787b76b1a 100644 (file)
@@ -263,6 +263,7 @@ struct saa7134_format {
 #define SAA7134_BOARD_VIDEOMATE_T750       139
 #define SAA7134_BOARD_AVERMEDIA_A700_PRO    140
 #define SAA7134_BOARD_AVERMEDIA_A700_HYBRID 141
+#define SAA7134_BOARD_BEHOLD_H6      142
 
 
 #define SAA7134_MAXBOARDS 8
index 53c5edbcf7ea1add2ce7ea1730ef0fd0332df3c3..72c4081feff5e4d8af049bacbc5d62ab07181b3b 100644 (file)
@@ -1418,7 +1418,8 @@ static int saa717x_command(struct i2c_client *client, unsigned cmd, void *arg)
 /* i2c implementation */
 
 /* ----------------------------------------------------------------------- */
-static int saa717x_probe(struct i2c_client *client)
+static int saa717x_probe(struct i2c_client *client,
+                        const struct i2c_device_id *did)
 {
        struct saa717x_state *decoder;
        u8 id = 0;
index 6943b447a1bd7f4067e4a6f97a92c9e6d67ef83e..e57a646057785eb51dd331febc4db1772284204c 100644 (file)
@@ -840,7 +840,8 @@ static struct v4l2_int_device tcm825x_int_device = {
        },
 };
 
-static int tcm825x_probe(struct i2c_client *client)
+static int tcm825x_probe(struct i2c_client *client,
+                        const struct i2c_device_id *did)
 {
        struct tcm825x_sensor *sensor = &tcm825x;
        int rval;
index dc7b9c220b90f5f7ab3e25b18dd00c571daa912f..f1db54202deaebb36138b119279bdf18424644aa 100644 (file)
@@ -125,7 +125,8 @@ static int tlv320aic23b_command(struct i2c_client *client,
  * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
  */
 
-static int tlv320aic23b_probe(struct i2c_client *client)
+static int tlv320aic23b_probe(struct i2c_client *client,
+                             const struct i2c_device_id *id)
 {
        struct tlv320aic23b_state *state;
 
index 2b72e10e6b9f3274c1f6408f0625fc437706a10d..6bf104ea051da24c06c8b7d7bfe470555bd2f964 100644 (file)
 
 #define PREFIX t->i2c->driver->driver.name
 
+/** This macro allows us to probe dynamically, avoiding static links */
+#ifdef CONFIG_MEDIA_ATTACH
+#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
+       int __r = -EINVAL; \
+       typeof(&FUNCTION) __a = symbol_request(FUNCTION); \
+       if (__a) { \
+               __r = (int) __a(ARGS); \
+       } else { \
+               printk(KERN_ERR "TUNER: Unable to find " \
+                               "symbol "#FUNCTION"()\n"); \
+       } \
+       symbol_put(FUNCTION); \
+       __r; \
+})
+
+static void tuner_detach(struct dvb_frontend *fe)
+{
+       if (fe->ops.tuner_ops.release) {
+               fe->ops.tuner_ops.release(fe);
+               symbol_put_addr(fe->ops.tuner_ops.release);
+       }
+       if (fe->ops.analog_ops.release) {
+               fe->ops.analog_ops.release(fe);
+               symbol_put_addr(fe->ops.analog_ops.release);
+       }
+}
+#else
+#define tuner_symbol_probe(FUNCTION, ARGS...) ({ \
+       FUNCTION(ARGS); \
+})
+
+static void tuner_detach(struct dvb_frontend *fe)
+{
+       if (fe->ops.tuner_ops.release)
+               fe->ops.tuner_ops.release(fe);
+       if (fe->ops.analog_ops.release)
+               fe->ops.analog_ops.release(fe);
+}
+#endif
+
 struct tuner {
        /* device */
        struct dvb_frontend fe;
@@ -56,7 +96,7 @@ struct tuner {
 
 /* standard i2c insmod options */
 static unsigned short normal_i2c[] = {
-#if defined(CONFIG_TUNER_TEA5761) || (defined(CONFIG_TUNER_TEA5761_MODULE) && defined(MODULE))
+#if defined(CONFIG_MEDIA_TUNER_TEA5761) || (defined(CONFIG_MEDIA_TUNER_TEA5761_MODULE) && defined(MODULE))
        0x10,
 #endif
        0x42, 0x43, 0x4a, 0x4b,                 /* tda8290 */
@@ -139,22 +179,6 @@ static void fe_set_params(struct dvb_frontend *fe,
        fe_tuner_ops->set_analog_params(fe, params);
 }
 
-static void fe_release(struct dvb_frontend *fe)
-{
-       if (fe->ops.tuner_ops.release)
-               fe->ops.tuner_ops.release(fe);
-
-       /* DO NOT kfree(fe->analog_demod_priv)
-        *
-        * If we are in this function, analog_demod_priv contains a pointer
-        * to struct tuner *t.  This will be kfree'd in tuner_detach().
-        *
-        * Otherwise, fe->ops.analog_demod_ops->release will
-        * handle the cleanup for analog demodulator modules.
-        */
-       fe->analog_demod_priv = NULL;
-}
-
 static void fe_standby(struct dvb_frontend *fe)
 {
        struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
@@ -191,7 +215,6 @@ static void tuner_status(struct dvb_frontend *fe);
 static struct analog_demod_ops tuner_core_ops = {
        .set_params     = fe_set_params,
        .standby        = fe_standby,
-       .release        = fe_release,
        .has_signal     = fe_has_signal,
        .set_config     = fe_set_config,
        .tuner_status   = tuner_status
@@ -323,7 +346,8 @@ static void attach_tda829x(struct tuner *t)
                .lna_cfg        = t->config,
                .tuner_callback = t->tuner_callback,
        };
-       tda829x_attach(&t->fe, t->i2c->adapter, t->i2c->addr, &cfg);
+       dvb_attach(tda829x_attach,
+                  &t->fe, t->i2c->adapter, t->i2c->addr, &cfg);
 }
 
 static struct xc5000_config xc5000_cfg;
@@ -356,12 +380,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
        }
 
        /* discard private data, in case set_type() was previously called */
-       if (analog_ops->release)
-               analog_ops->release(&t->fe);
+       tuner_detach(&t->fe);
+       t->fe.analog_demod_priv = NULL;
 
        switch (t->type) {
        case TUNER_MT2032:
-               microtune_attach(&t->fe, t->i2c->adapter, t->i2c->addr);
+               dvb_attach(microtune_attach,
+                          &t->fe, t->i2c->adapter, t->i2c->addr);
                break;
        case TUNER_PHILIPS_TDA8290:
        {
@@ -369,12 +394,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
                break;
        }
        case TUNER_TEA5767:
-               if (!tea5767_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
+               if (!dvb_attach(tea5767_attach, &t->fe,
+                               t->i2c->adapter, t->i2c->addr))
                        goto attach_failed;
                t->mode_mask = T_RADIO;
                break;
        case TUNER_TEA5761:
-               if (!tea5761_attach(&t->fe, t->i2c->adapter, t->i2c->addr))
+               if (!dvb_attach(tea5761_attach, &t->fe,
+                               t->i2c->adapter, t->i2c->addr))
                        goto attach_failed;
                t->mode_mask = T_RADIO;
                break;
@@ -388,8 +415,8 @@ static void set_type(struct i2c_client *c, unsigned int type,
                buffer[2] = 0x86;
                buffer[3] = 0x54;
                i2c_master_send(c, buffer, 4);
-               if (!simple_tuner_attach(&t->fe, t->i2c->adapter, t->i2c->addr,
-                                       t->type))
+               if (!dvb_attach(simple_tuner_attach, &t->fe,
+                               t->i2c->adapter, t->i2c->addr, t->type))
                        goto attach_failed;
                break;
        case TUNER_PHILIPS_TD1316:
@@ -397,9 +424,9 @@ static void set_type(struct i2c_client *c, unsigned int type,
                buffer[1] = 0xdc;
                buffer[2] = 0x86;
                buffer[3] = 0xa4;
-               i2c_master_send(c,buffer,4);
-               if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
-                                       t->i2c->addr, t->type))
+               i2c_master_send(c, buffer, 4);
+               if (!dvb_attach(simple_tuner_attach, &t->fe,
+                               t->i2c->adapter, t->i2c->addr, t->type))
                        goto attach_failed;
                break;
        case TUNER_XC2028:
@@ -409,12 +436,13 @@ static void set_type(struct i2c_client *c, unsigned int type,
                        .i2c_addr  = t->i2c->addr,
                        .callback  = t->tuner_callback,
                };
-               if (!xc2028_attach(&t->fe, &cfg))
+               if (!dvb_attach(xc2028_attach, &t->fe, &cfg))
                        goto attach_failed;
                break;
        }
        case TUNER_TDA9887:
-               tda9887_attach(&t->fe, t->i2c->adapter, t->i2c->addr);
+               dvb_attach(tda9887_attach,
+                          &t->fe, t->i2c->adapter, t->i2c->addr);
                break;
        case TUNER_XC5000:
        {
@@ -424,7 +452,8 @@ static void set_type(struct i2c_client *c, unsigned int type,
                xc5000_cfg.if_khz         = 5380;
                xc5000_cfg.priv           = c->adapter->algo_data;
                xc5000_cfg.tuner_callback = t->tuner_callback;
-               if (!xc5000_attach(&t->fe, t->i2c->adapter, &xc5000_cfg))
+               if (!dvb_attach(xc5000_attach,
+                               &t->fe, t->i2c->adapter, &xc5000_cfg))
                        goto attach_failed;
 
                xc_tuner_ops = &t->fe.ops.tuner_ops;
@@ -433,8 +462,8 @@ static void set_type(struct i2c_client *c, unsigned int type,
                break;
        }
        default:
-               if (!simple_tuner_attach(&t->fe, t->i2c->adapter,
-                                       t->i2c->addr, t->type))
+               if (!dvb_attach(simple_tuner_attach, &t->fe,
+                               t->i2c->adapter, t->i2c->addr, t->type))
                        goto attach_failed;
 
                break;
@@ -442,12 +471,14 @@ static void set_type(struct i2c_client *c, unsigned int type,
 
        if ((NULL == analog_ops->set_params) &&
            (fe_tuner_ops->set_analog_params)) {
+
                strlcpy(t->i2c->name, fe_tuner_ops->info.name,
                        sizeof(t->i2c->name));
 
                t->fe.analog_demod_priv = t;
                memcpy(analog_ops, &tuner_core_ops,
                       sizeof(struct analog_demod_ops));
+
        } else {
                strlcpy(t->i2c->name, analog_ops->info.name,
                        sizeof(t->i2c->name));
@@ -645,8 +676,8 @@ static void tuner_status(struct dvb_frontend *fe)
 {
        struct tuner *t = fe->analog_demod_priv;
        unsigned long freq, freq_fraction;
-       struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
-       struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
+       struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops;
+       struct analog_demod_ops *analog_ops = &fe->ops.analog_ops;
        const char *p;
 
        switch (t->mode) {
@@ -730,8 +761,10 @@ static int tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
        struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops;
        struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
 
-       if (tuner_debug>1)
+       if (tuner_debug > 1) {
                v4l_i2c_print_ioctl(client,cmd);
+               printk("\n");
+       }
 
        switch (cmd) {
        /* --- configuration --- */
@@ -1073,7 +1106,8 @@ static void tuner_lookup(struct i2c_adapter *adap,
 /* During client attach, set_type is called by adapter's attach_inform callback.
    set_type must then be completed by tuner_probe.
  */
-static int tuner_probe(struct i2c_client *client)
+static int tuner_probe(struct i2c_client *client,
+                      const struct i2c_device_id *id)
 {
        struct tuner *t;
        struct tuner *radio;
@@ -1111,8 +1145,9 @@ static int tuner_probe(struct i2c_client *client)
        if (!no_autodetect) {
                switch (client->addr) {
                case 0x10:
-                       if (tea5761_autodetection(t->i2c->adapter,
-                                                 t->i2c->addr) >= 0) {
+                       if (tuner_symbol_probe(tea5761_autodetection,
+                                              t->i2c->adapter,
+                                              t->i2c->addr) >= 0) {
                                t->type = TUNER_TEA5761;
                                t->mode_mask = T_RADIO;
                                t->mode = T_STANDBY;
@@ -1131,8 +1166,8 @@ static int tuner_probe(struct i2c_client *client)
                case 0x4b:
                        /* If chip is not tda8290, don't register.
                           since it can be tda9887*/
-                       if (tda829x_probe(t->i2c->adapter,
-                                         t->i2c->addr) == 0) {
+                       if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter,
+                                              t->i2c->addr) == 0) {
                                tuner_dbg("tda829x detected\n");
                        } else {
                                /* Default is being tda9887 */
@@ -1144,7 +1179,8 @@ static int tuner_probe(struct i2c_client *client)
                        }
                        break;
                case 0x60:
-                       if (tea5767_autodetection(t->i2c->adapter, t->i2c->addr)
+                       if (tuner_symbol_probe(tea5767_autodetection,
+                                              t->i2c->adapter, t->i2c->addr)
                                        != EINVAL) {
                                t->type = TUNER_TEA5767;
                                t->mode_mask = T_RADIO;
@@ -1233,10 +1269,9 @@ static int tuner_legacy_probe(struct i2c_adapter *adap)
 static int tuner_remove(struct i2c_client *client)
 {
        struct tuner *t = i2c_get_clientdata(client);
-       struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops;
 
-       if (analog_ops->release)
-               analog_ops->release(&t->fe);
+       tuner_detach(&t->fe);
+       t->fe.analog_demod_priv = NULL;
 
        list_del(&t->list);
        kfree(t);
index f29a2cd0f2f27c3db0afead3d5a7f65ba7f8fd56..6f9945b04e1f2bcd676f0ed8dc910994b29ed300 100644 (file)
@@ -1461,7 +1461,7 @@ static struct CHIPDESC chiplist[] = {
 /* ---------------------------------------------------------------------- */
 /* i2c registration                                                       */
 
-static int chip_probe(struct i2c_client *client)
+static int chip_probe(struct i2c_client *client, const struct i2c_device_id *id)
 {
        struct CHIPSTATE *chip;
        struct CHIPDESC  *desc;
index bd201397a2acdb350824f7062119dfb93e5ee1b7..93bfd19dec7d6fdad7da5d4d6cf111e82327248e 100644 (file)
@@ -195,7 +195,8 @@ static int upd64031a_command(struct i2c_client *client, unsigned cmd, void *arg)
 
 /* i2c implementation */
 
-static int upd64031a_probe(struct i2c_client *client)
+static int upd64031a_probe(struct i2c_client *client,
+                          const struct i2c_device_id *id)
 {
        struct upd64031a_state *state;
        int i;
index 2d9a88f70c85ba2ac2efdebb9ac02d895ddea50c..9ab712a56ce0a9e1e4b11940a6770c3672c7b1a6 100644 (file)
@@ -172,7 +172,8 @@ static int upd64083_command(struct i2c_client *client, unsigned cmd, void *arg)
 
 /* i2c implementation */
 
-static int upd64083_probe(struct i2c_client *client)
+static int upd64083_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
 {
        struct upd64083_state *state;
        int i;
index 64819353276a6a2f2540175625ab0bf42e4fac23..17f542dfb366c6588aeba260f4ce3b599405d6d9 100644 (file)
 
 #define VICAM_HEADER_SIZE       64
 
-#define clamp( x, l, h )        max_t( __typeof__( x ),         \
-                                      ( l ),                   \
-                                      min_t( __typeof__( x ),  \
-                                             ( h ),            \
-                                             ( x ) ) )
-
 /* Not sure what all the bytes in these char
  * arrays do, but they're necessary to make
  * the camera work.
index fc24ef05b3f31cf527b6fc850423cae317ad224b..74e1d3075a20c28d34f27f171bd09019e230373c 100644 (file)
@@ -1,7 +1,7 @@
 config VIDEO_USBVISION
        tristate "USB video devices based on Nogatech NT1003/1004/1005"
        depends on I2C && VIDEO_V4L2
-       select VIDEO_TUNER
+       select MEDIA_TUNER
        select VIDEO_SAA711X if VIDEO_HELPER_CHIPS_AUTO
        ---help---
          There are more than 50 different USB video devices based on
index 9ac92a80c645442ecba5d038e75b9479bebb05d6..338718750945bf8feb63e3d00a8e254b6781a3cc 100644 (file)
@@ -3,3 +3,4 @@ usbvision-objs  := usbvision-core.o usbvision-video.o usbvision-i2c.o usbvision-
 obj-$(CONFIG_VIDEO_USBVISION) += usbvision.o
 
 EXTRA_CFLAGS += -Idrivers/media/video
+EXTRA_CFLAGS += -Idrivers/media/common/tuners
index 34deb68ae568fdb910f2d833b1e146a08cad9fdd..e9dd996fd5df1f6084930345b076e95a12d16941 100644 (file)
@@ -710,13 +710,14 @@ EXPORT_SYMBOL(v4l2_chip_ident_i2c_client);
 /* Helper function for I2C legacy drivers */
 
 int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver,
-               const char *name, int (*probe)(struct i2c_client *))
+               const char *name,
+               int (*probe)(struct i2c_client *, const struct i2c_device_id *))
 {
        struct i2c_client *client;
        int err;
 
        client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-       if (client == 0)
+       if (!client)
                return -ENOMEM;
 
        client->addr = address;
@@ -724,7 +725,7 @@ int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver
        client->driver = driver;
        strlcpy(client->name, name, sizeof(client->name));
 
-       err = probe(client);
+       err = probe(client, NULL);
        if (err == 0) {
                i2c_attach_client(client);
        } else {
index fc51e4918bbf790d05ee8d93160a31ddd994aefb..982f4463896c68ed74361dacc3c57918a9fb1212 100644 (file)
@@ -97,7 +97,10 @@ int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
 void *videobuf_queue_to_vmalloc (struct videobuf_queue *q,
                           struct videobuf_buffer *buf)
 {
-       return CALL(q, vmalloc, buf);
+       if (q->int_ops->vmalloc)
+               return q->int_ops->vmalloc(buf);
+       else
+               return NULL;
 }
 EXPORT_SYMBOL_GPL(videobuf_queue_to_vmalloc);
 
index d545c98dd5e7e3eb1a85c12ff64adfa977ba9345..01ea99c9bc1aa5db2d208bff7e23aa54463ab7c6 100644 (file)
@@ -13,7 +13,7 @@
 /*
  * TODO:
  * - remove "mark pages reserved-hacks" from memory allocation code
- *   and implement nopage()
+ *   and implement fault()
  * - check decimation, calculating and reporting image size when
  *   using decimation
  * - implement read(), user mode buffers and overlay (?)
index 282c81403c976a2c1ecfefc693e9019aa24613b0..fac0deba24afc69b67d07963c557ecc748e24c84 100644 (file)
@@ -121,7 +121,8 @@ static int vp27smpx_command(struct i2c_client *client, unsigned cmd, void *arg)
  * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
  */
 
-static int vp27smpx_probe(struct i2c_client *client)
+static int vp27smpx_probe(struct i2c_client *client,
+                         const struct i2c_device_id *id)
 {
        struct vp27smpx_state *state;
 
index 31795b4f8b6316e2e480ac838a53d52840660de4..0f8ed8461fba25f7c70f0a3bd9573f4698569a03 100644 (file)
@@ -261,7 +261,8 @@ static int wm8739_command(struct i2c_client *client, unsigned cmd, void *arg)
 
 /* i2c implementation */
 
-static int wm8739_probe(struct i2c_client *client)
+static int wm8739_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
        struct wm8739_state *state;
 
index 869f9e7946b6082bb9df221ccc056d13e808e0d9..67a409e60c46559b4aa76fe820ac60b2a88e07b5 100644 (file)
@@ -159,7 +159,8 @@ static int wm8775_command(struct i2c_client *client, unsigned cmd, void *arg)
  * concerning the addresses: i2c wants 7 bit (without the r/w bit), so '>>1'
  */
 
-static int wm8775_probe(struct i2c_client *client)
+static int wm8775_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
        struct wm8775_state *state;
 
index 328ed6e7ac6abe5388a30a90b3b9752e01c9c842..870bc5a70e3fdf7fb41f12b5142bba4c050722c4 100644 (file)
@@ -180,6 +180,7 @@ static ssize_t zoran_write(struct file *file, const char __user *buffer,
 }
 
 static const struct file_operations zoran_operations = {
+       .owner          = THIS_MODULE,
        .open           = zoran_open,
        .read           = seq_read,
        .write          = zoran_write,
@@ -195,10 +196,8 @@ zoran_proc_init (struct zoran *zr)
        char name[8];
 
        snprintf(name, 7, "zoran%d", zr->id);
-       if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) {
-               zr->zoran_proc->data = zr;
-               zr->zoran_proc->owner = THIS_MODULE;
-               zr->zoran_proc->proc_fops = &zoran_operations;
+       zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr);
+       if (zr->zoran_proc != NULL) {
                dprintk(2,
                        KERN_INFO
                        "%s: procfs entry /proc/%s allocated. data=%p\n",
index a95314897402b53cd7d677e0eff4c8a27966668e..81483de8c0fdfeae0262dc745a1147ca5c28f966 100644 (file)
@@ -371,7 +371,7 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
        /* connect the i2o_block_request to the request */
        if (!req->special) {
                ireq = i2o_block_request_alloc();
-               if (unlikely(IS_ERR(ireq))) {
+               if (IS_ERR(ireq)) {
                        osm_debug("unable to allocate i2o_block_request!\n");
                        return BLKPREP_DEFER;
                }
index 6fdd072201f990e4b975724bef3668b08f44dfac..54a3016ff45de24afd2430ff538c6d3bb9bf6484 100644 (file)
@@ -1893,13 +1893,11 @@ static int i2o_proc_create_entries(struct proc_dir_entry *dir,
        struct proc_dir_entry *tmp;
 
        while (i2o_pe->name) {
-               tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir);
+               tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir,
+                                      i2o_pe->fops, data);
                if (!tmp)
                        return -1;
 
-               tmp->data = data;
-               tmp->proc_fops = i2o_pe->fops;
-
                i2o_pe++;
        }
 
index f6f2d960cadbef6e883842b4b43b09a85947f4ea..ef8a492766a72b5d13076bd8f8d7ab9cf4a0e6f5 100644 (file)
@@ -132,7 +132,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
 
        if (iter >= MAX_ASIC_ISR_LOOPS)
                printk(KERN_ERR "%s: interrupt processing overrun\n",
-                      __FUNCTION__);
+                      __func__);
 }
 
 static inline int asic3_irq_to_bank(struct asic3 *asic, int irq)
@@ -409,7 +409,7 @@ int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio)
                return asic3_get_gpio_d(asic, Status) & mask;
        default:
                printk(KERN_ERR "%s: invalid GPIO value 0x%x",
-                      __FUNCTION__, gpio);
+                      __func__, gpio);
                return -EINVAL;
        }
 }
@@ -437,7 +437,7 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val)
                return;
        default:
                printk(KERN_ERR "%s: invalid GPIO value 0x%x",
-                      __FUNCTION__, gpio);
+                      __func__, gpio);
                return;
        }
 }
index 4edc120a6359490a816eca96f30dbbc3a5eda624..633cbba072f024f013a17f42cc8b3181c63ef8a4 100644 (file)
@@ -132,8 +132,9 @@ static struct ds1wm_platform_data ds1wm_pdata = {
        .disable   = ds1wm_disable,
 };
 
-static int ds1wm_device_add(struct device *pasic3_dev, int bus_shift)
+static int ds1wm_device_add(struct platform_device *pasic3_pdev, int bus_shift)
 {
+       struct device *pasic3_dev = &pasic3_pdev->dev;
        struct pasic3_data *asic = pasic3_dev->driver_data;
        struct platform_device *pdev;
        int ret;
@@ -144,8 +145,8 @@ static int ds1wm_device_add(struct device *pasic3_dev, int bus_shift)
                return -ENOMEM;
        }
 
-       ret = platform_device_add_resources(pdev, pdev->resource,
-                                               pdev->num_resources);
+       ret = platform_device_add_resources(pdev, pasic3_pdev->resource,
+                                               pasic3_pdev->num_resources);
        if (ret < 0) {
                dev_dbg(pasic3_dev, "failed to add DS1WM resources\n");
                goto exit_pdev_put;
@@ -207,7 +208,7 @@ static int __init pasic3_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       ret = ds1wm_device_add(dev, asic->bus_shift);
+       ret = ds1wm_device_add(pdev, asic->bus_shift);
        if (ret < 0)
                dev_warn(dev, "failed to register DS1WM\n");
 
index 13bac53db69a1ffe58726720a461ee0bbac9edd0..2fe64734d8afd2d6e893e40224ee56ff6b935706 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/sm501.h>
 #include <linux/sm501-regs.h>
+#include <linux/serial_8250.h>
 
 #include <asm/io.h>
 
@@ -348,11 +349,11 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to)
        mode &= 3;              /* get current power mode */
 
        if (unit >= ARRAY_SIZE(sm->unit_power)) {
-               dev_err(dev, "%s: bad unit %d\n", __FUNCTION__, unit);
+               dev_err(dev, "%s: bad unit %d\n", __func__, unit);
                goto already;
        }
 
-       dev_dbg(sm->dev, "%s: unit %d, cur %d, to %d\n", __FUNCTION__, unit,
+       dev_dbg(sm->dev, "%s: unit %d, cur %d, to %d\n", __func__, unit,
                sm->unit_power[unit], to);
 
        if (to == 0 && sm->unit_power[unit] == 0) {
@@ -723,13 +724,14 @@ static void sm501_device_release(struct device *dev)
 */
 
 static struct platform_device *
-sm501_create_subdev(struct sm501_devdata *sm,
-                   char *name, unsigned int res_count)
+sm501_create_subdev(struct sm501_devdata *sm, char *name,
+                   unsigned int res_count, unsigned int platform_data_size)
 {
        struct sm501_device *smdev;
 
        smdev = kzalloc(sizeof(struct sm501_device) +
-                       sizeof(struct resource) * res_count, GFP_KERNEL);
+                       (sizeof(struct resource) * res_count) +
+                       platform_data_size, GFP_KERNEL);
        if (!smdev)
                return NULL;
 
@@ -737,11 +739,15 @@ sm501_create_subdev(struct sm501_devdata *sm,
 
        smdev->pdev.name = name;
        smdev->pdev.id = sm->pdev_id;
-       smdev->pdev.resource = (struct resource *)(smdev+1);
-       smdev->pdev.num_resources = res_count;
-
        smdev->pdev.dev.parent = sm->dev;
 
+       if (res_count) {
+               smdev->pdev.resource = (struct resource *)(smdev+1);
+               smdev->pdev.num_resources = res_count;
+       }
+       if (platform_data_size)
+               smdev->pdev.dev.platform_data = (void *)(smdev+1);
+
        return &smdev->pdev;
 }
 
@@ -829,7 +835,7 @@ static int sm501_register_usbhost(struct sm501_devdata *sm,
 {
        struct platform_device *pdev;
 
-       pdev = sm501_create_subdev(sm, "sm501-usb", 3);
+       pdev = sm501_create_subdev(sm, "sm501-usb", 3, 0);
        if (!pdev)
                return -ENOMEM;
 
@@ -840,12 +846,55 @@ static int sm501_register_usbhost(struct sm501_devdata *sm,
        return sm501_register_device(sm, pdev);
 }
 
+static void sm501_setup_uart_data(struct sm501_devdata *sm,
+                                 struct plat_serial8250_port *uart_data,
+                                 unsigned int offset)
+{
+       uart_data->membase = sm->regs + offset;
+       uart_data->mapbase = sm->io_res->start + offset;
+       uart_data->iotype = UPIO_MEM;
+       uart_data->irq = sm->irq;
+       uart_data->flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ;
+       uart_data->regshift = 2;
+       uart_data->uartclk = (9600 * 16);
+}
+
+static int sm501_register_uart(struct sm501_devdata *sm, int devices)
+{
+       struct platform_device *pdev;
+       struct plat_serial8250_port *uart_data;
+
+       pdev = sm501_create_subdev(sm, "serial8250", 0,
+                                  sizeof(struct plat_serial8250_port) * 3);
+       if (!pdev)
+               return -ENOMEM;
+
+       uart_data = pdev->dev.platform_data;
+
+       if (devices & SM501_USE_UART0) {
+               sm501_setup_uart_data(sm, uart_data++, 0x30000);
+               sm501_unit_power(sm->dev, SM501_GATE_UART0, 1);
+               sm501_modify_reg(sm->dev, SM501_IRQ_MASK, 1 << 12, 0);
+               sm501_modify_reg(sm->dev, SM501_GPIO63_32_CONTROL, 0x01e0, 0);
+       }
+       if (devices & SM501_USE_UART1) {
+               sm501_setup_uart_data(sm, uart_data++, 0x30020);
+               sm501_unit_power(sm->dev, SM501_GATE_UART1, 1);
+               sm501_modify_reg(sm->dev, SM501_IRQ_MASK, 1 << 13, 0);
+               sm501_modify_reg(sm->dev, SM501_GPIO63_32_CONTROL, 0x1e00, 0);
+       }
+
+       pdev->id = PLAT8250_DEV_SM501;
+
+       return sm501_register_device(sm, pdev);
+}
+
 static int sm501_register_display(struct sm501_devdata *sm,
                                  resource_size_t *mem_avail)
 {
        struct platform_device *pdev;
 
-       pdev = sm501_create_subdev(sm, "sm501-fb", 4);
+       pdev = sm501_create_subdev(sm, "sm501-fb", 4, 0);
        if (!pdev)
                return -ENOMEM;
 
@@ -963,6 +1012,7 @@ static unsigned int sm501_mem_local[] = {
 
 static int sm501_init_dev(struct sm501_devdata *sm)
 {
+       struct sm501_initdata *idata;
        resource_size_t mem_avail;
        unsigned long dramctrl;
        unsigned long devid;
@@ -980,6 +1030,9 @@ static int sm501_init_dev(struct sm501_devdata *sm)
                return -EINVAL;
        }
 
+       /* disable irqs */
+       writel(0, sm->regs + SM501_IRQ_MASK);
+
        dramctrl = readl(sm->regs + SM501_DRAM_CONTROL);
        mem_avail = sm501_mem_local[(dramctrl >> 13) & 0x7];
 
@@ -998,15 +1051,14 @@ static int sm501_init_dev(struct sm501_devdata *sm)
 
        /* check to see if we have some device initialisation */
 
-       if (sm->platdata) {
-               struct sm501_platdata *pdata = sm->platdata;
+       idata = sm->platdata ? sm->platdata->init : NULL;
+       if (idata) {
+               sm501_init_regs(sm, idata);
 
-               if (pdata->init) {
-                       sm501_init_regs(sm, sm->platdata->init);
-
-                       if (pdata->init->devices & SM501_USE_USB_HOST)
-                               sm501_register_usbhost(sm, &mem_avail);
-               }
+               if (idata->devices & SM501_USE_USB_HOST)
+                       sm501_register_usbhost(sm, &mem_avail);
+               if (idata->devices & (SM501_USE_UART0 | SM501_USE_UART1))
+                       sm501_register_uart(sm, idata->devices);
        }
 
        ret = sm501_check_clocks(sm);
index 5e859486eaf851ea077501abf02d987a16d5a379..ad34e2d22524f5a7d96d4acd609c2b1c59dd454e 100644 (file)
@@ -204,8 +204,7 @@ static inline int ucb1x00_ts_pen_down(struct ucb1x00_ts *ts)
 static int ucb1x00_thread(void *_ts)
 {
        struct ucb1x00_ts *ts = _ts;
-       struct task_struct *tsk = current;
-       DECLARE_WAITQUEUE(wait, tsk);
+       DECLARE_WAITQUEUE(wait, current);
        int valid = 0;
 
        set_freezable();
@@ -234,7 +233,7 @@ static int ucb1x00_thread(void *_ts)
 
 
                if (ucb1x00_ts_pen_down(ts)) {
-                       set_task_state(tsk, TASK_INTERRUPTIBLE);
+                       set_current_state(TASK_INTERRUPTIBLE);
 
                        ucb1x00_enable_irq(ts->ucb, UCB_IRQ_TSPX, machine_is_collie() ? UCB_RISING : UCB_FALLING);
                        ucb1x00_disable(ts->ucb);
@@ -262,7 +261,7 @@ static int ucb1x00_thread(void *_ts)
                                valid = 1;
                        }
 
-                       set_task_state(tsk, TASK_INTERRUPTIBLE);
+                       set_current_state(TASK_INTERRUPTIBLE);
                        timeout = HZ / 100;
                }
 
index 297a48f8544679dda2af4bd435cb32feff65b661..636af2862308fab614b4f9f9b2395ce0a24c7e4f 100644 (file)
@@ -140,6 +140,7 @@ config ACER_WMI
        depends on EXPERIMENTAL
        depends on ACPI
        depends on LEDS_CLASS
+       depends on NEW_LEDS
        depends on BACKLIGHT_CLASS_DEVICE
        depends on SERIO_I8042
        select ACPI_WMI
@@ -160,6 +161,7 @@ config ASUS_LAPTOP
         depends on ACPI
        depends on EXPERIMENTAL && !ACPI_ASUS
        depends on LEDS_CLASS
+       depends on NEW_LEDS
        depends on BACKLIGHT_CLASS_DEVICE
         ---help---
          This is the new Linux driver for Asus laptops. It may also support some
@@ -241,10 +243,13 @@ config SONYPI_COMPAT
 config THINKPAD_ACPI
        tristate "ThinkPad ACPI Laptop Extras"
        depends on X86 && ACPI
+       select BACKLIGHT_LCD_SUPPORT
        select BACKLIGHT_CLASS_DEVICE
        select HWMON
        select NVRAM
-       depends on INPUT
+       select INPUT
+       select NEW_LEDS
+       select LEDS_CLASS
        ---help---
          This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
          support for Fn-Fx key combinations, Bluetooth control, video
@@ -344,6 +349,7 @@ config ATMEL_SSC
 config INTEL_MENLOW
        tristate "Thermal Management driver for Intel menlow platform"
        depends on ACPI_THERMAL
+       select THERMAL
        depends on X86
        ---help---
          ACPI thermal management enhancement driver on
@@ -351,6 +357,19 @@ config INTEL_MENLOW
 
          If unsure, say N.
 
+config EEEPC_LAPTOP
+       tristate "Eee PC Hotkey Driver (EXPERIMENTAL)"
+       depends on X86
+       depends on ACPI
+       depends on BACKLIGHT_CLASS_DEVICE
+       depends on HWMON
+       depends on EXPERIMENTAL
+       ---help---
+         This driver supports the Fn-Fx keys on Eee PC laptops.
+         It also adds the ability to switch camera/wlan on/off.
+
+         If you have an Eee PC laptop, say Y or M here.
+
 config ENCLOSURE_SERVICES
        tristate "Enclosure Services"
        default n
index 5914da434854a226cf1df15e8cac1508ddb2aeef..1952875a272ec406dfcbc8566a606002eebee128 100644 (file)
@@ -7,7 +7,8 @@ obj-$(CONFIG_IBM_ASM)           += ibmasm/
 obj-$(CONFIG_HDPU_FEATURES)    += hdpuftrs/
 obj-$(CONFIG_MSI_LAPTOP)     += msi-laptop.o
 obj-$(CONFIG_ACER_WMI)     += acer-wmi.o
-obj-$(CONFIG_ASUS_LAPTOP)     += asus-laptop.o
+obj-$(CONFIG_ASUS_LAPTOP)      += asus-laptop.o
+obj-$(CONFIG_EEEPC_LAPTOP)     += eeepc-laptop.o
 obj-$(CONFIG_ATMEL_PWM)                += atmel_pwm.o
 obj-$(CONFIG_ATMEL_SSC)                += atmel-ssc.o
 obj-$(CONFIG_ATMEL_TCLIB)      += atmel_tclib.o
diff --git a/drivers/misc/eeepc-laptop.c b/drivers/misc/eeepc-laptop.c
new file mode 100644 (file)
index 0000000..6d72760
--- /dev/null
@@ -0,0 +1,666 @@
+/*
+ *  eepc-laptop.c - Asus Eee PC extras
+ *
+ *  Based on asus_acpi.c as patched for the Eee PC by Asus:
+ *  ftp://ftp.asus.com/pub/ASUS/EeePC/701/ASUS_ACPI_071126.rar
+ *  Based on eee.c from eeepc-linux
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/backlight.h>
+#include <linux/fb.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+#include <linux/uaccess.h>
+
+#define EEEPC_LAPTOP_VERSION   "0.1"
+
+#define EEEPC_HOTK_NAME                "Eee PC Hotkey Driver"
+#define EEEPC_HOTK_FILE                "eeepc"
+#define EEEPC_HOTK_CLASS       "hotkey"
+#define EEEPC_HOTK_DEVICE_NAME "Hotkey"
+#define EEEPC_HOTK_HID         "ASUS010"
+
+#define EEEPC_LOG      EEEPC_HOTK_FILE ": "
+#define EEEPC_ERR      KERN_ERR        EEEPC_LOG
+#define EEEPC_WARNING  KERN_WARNING    EEEPC_LOG
+#define EEEPC_NOTICE   KERN_NOTICE     EEEPC_LOG
+#define EEEPC_INFO     KERN_INFO       EEEPC_LOG
+
+/*
+ * Definitions for Asus EeePC
+ */
+#define        NOTIFY_WLAN_ON  0x10
+#define NOTIFY_BRN_MIN 0x20
+#define NOTIFY_BRN_MAX 0x2f
+
+enum {
+       DISABLE_ASL_WLAN = 0x0001,
+       DISABLE_ASL_BLUETOOTH = 0x0002,
+       DISABLE_ASL_IRDA = 0x0004,
+       DISABLE_ASL_CAMERA = 0x0008,
+       DISABLE_ASL_TV = 0x0010,
+       DISABLE_ASL_GPS = 0x0020,
+       DISABLE_ASL_DISPLAYSWITCH = 0x0040,
+       DISABLE_ASL_MODEM = 0x0080,
+       DISABLE_ASL_CARDREADER = 0x0100
+};
+
+enum {
+       CM_ASL_WLAN = 0,
+       CM_ASL_BLUETOOTH,
+       CM_ASL_IRDA,
+       CM_ASL_1394,
+       CM_ASL_CAMERA,
+       CM_ASL_TV,
+       CM_ASL_GPS,
+       CM_ASL_DVDROM,
+       CM_ASL_DISPLAYSWITCH,
+       CM_ASL_PANELBRIGHT,
+       CM_ASL_BIOSFLASH,
+       CM_ASL_ACPIFLASH,
+       CM_ASL_CPUFV,
+       CM_ASL_CPUTEMPERATURE,
+       CM_ASL_FANCPU,
+       CM_ASL_FANCHASSIS,
+       CM_ASL_USBPORT1,
+       CM_ASL_USBPORT2,
+       CM_ASL_USBPORT3,
+       CM_ASL_MODEM,
+       CM_ASL_CARDREADER,
+       CM_ASL_LID
+};
+
+const char *cm_getv[] = {
+       "WLDG", NULL, NULL, NULL,
+       "CAMG", NULL, NULL, NULL,
+       NULL, "PBLG", NULL, NULL,
+       "CFVG", NULL, NULL, NULL,
+       "USBG", NULL, NULL, "MODG",
+       "CRDG", "LIDG"
+};
+
+const char *cm_setv[] = {
+       "WLDS", NULL, NULL, NULL,
+       "CAMS", NULL, NULL, NULL,
+       "SDSP", "PBLS", "HDPS", NULL,
+       "CFVS", NULL, NULL, NULL,
+       "USBG", NULL, NULL, "MODS",
+       "CRDS", NULL
+};
+
+#define EEEPC_EC       "\\_SB.PCI0.SBRG.EC0."
+
+#define EEEPC_EC_FAN_PWM       EEEPC_EC "SC02" /* Fan PWM duty cycle (%) */
+#define EEEPC_EC_SC02          0x63
+#define EEEPC_EC_FAN_HRPM      EEEPC_EC "SC05" /* High byte, fan speed (RPM) */
+#define EEEPC_EC_FAN_LRPM      EEEPC_EC "SC06" /* Low byte, fan speed (RPM) */
+#define EEEPC_EC_FAN_CTRL      EEEPC_EC "SFB3" /* Byte containing SF25  */
+#define EEEPC_EC_SFB3          0xD3
+
+/*
+ * This is the main structure, we can use it to store useful information
+ * about the hotk device
+ */
+struct eeepc_hotk {
+       struct acpi_device *device;     /* the device we are in */
+       acpi_handle handle;             /* the handle of the hotk device */
+       u32 cm_supported;               /* the control methods supported
+                                          by this BIOS */
+       uint init_flag;                 /* Init flags */
+       u16 event_count[128];           /* count for each event */
+};
+
+/* The actual device the driver binds to */
+static struct eeepc_hotk *ehotk;
+
+/* Platform device/driver */
+static struct platform_driver platform_driver = {
+       .driver = {
+               .name = EEEPC_HOTK_FILE,
+               .owner = THIS_MODULE,
+       }
+};
+
+static struct platform_device *platform_device;
+
+/*
+ * The hotkey driver declaration
+ */
+static int eeepc_hotk_add(struct acpi_device *device);
+static int eeepc_hotk_remove(struct acpi_device *device, int type);
+
+static const struct acpi_device_id eeepc_device_ids[] = {
+       {EEEPC_HOTK_HID, 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, eeepc_device_ids);
+
+static struct acpi_driver eeepc_hotk_driver = {
+       .name = EEEPC_HOTK_NAME,
+       .class = EEEPC_HOTK_CLASS,
+       .ids = eeepc_device_ids,
+       .ops = {
+               .add = eeepc_hotk_add,
+               .remove = eeepc_hotk_remove,
+       },
+};
+
+/* The backlight device /sys/class/backlight */
+static struct backlight_device *eeepc_backlight_device;
+
+/* The hwmon device */
+static struct device *eeepc_hwmon_device;
+
+/*
+ * The backlight class declaration
+ */
+static int read_brightness(struct backlight_device *bd);
+static int update_bl_status(struct backlight_device *bd);
+static struct backlight_ops eeepcbl_ops = {
+       .get_brightness = read_brightness,
+       .update_status = update_bl_status,
+};
+
+MODULE_AUTHOR("Corentin Chary, Eric Cooper");
+MODULE_DESCRIPTION(EEEPC_HOTK_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ * ACPI Helpers
+ */
+static int write_acpi_int(acpi_handle handle, const char *method, int val,
+                         struct acpi_buffer *output)
+{
+       struct acpi_object_list params;
+       union acpi_object in_obj;
+       acpi_status status;
+
+       params.count = 1;
+       params.pointer = &in_obj;
+       in_obj.type = ACPI_TYPE_INTEGER;
+       in_obj.integer.value = val;
+
+       status = acpi_evaluate_object(handle, (char *)method, &params, output);
+       return (status == AE_OK ? 0 : -1);
+}
+
+static int read_acpi_int(acpi_handle handle, const char *method, int *val)
+{
+       acpi_status status;
+       ulong result;
+
+       status = acpi_evaluate_integer(handle, (char *)method, NULL, &result);
+       if (ACPI_FAILURE(status)) {
+               *val = -1;
+               return -1;
+       } else {
+               *val = result;
+               return 0;
+       }
+}
+
+static int set_acpi(int cm, int value)
+{
+       if (ehotk->cm_supported & (0x1 << cm)) {
+               const char *method = cm_setv[cm];
+               if (method == NULL)
+                       return -ENODEV;
+               if (write_acpi_int(ehotk->handle, method, value, NULL))
+                       printk(EEEPC_WARNING "Error writing %s\n", method);
+       }
+       return 0;
+}
+
+static int get_acpi(int cm)
+{
+       int value = -1;
+       if ((ehotk->cm_supported & (0x1 << cm))) {
+               const char *method = cm_getv[cm];
+               if (method == NULL)
+                       return -ENODEV;
+               if (read_acpi_int(ehotk->handle, method, &value))
+                       printk(EEEPC_WARNING "Error reading %s\n", method);
+       }
+       return value;
+}
+
+/*
+ * Backlight
+ */
+static int read_brightness(struct backlight_device *bd)
+{
+       return get_acpi(CM_ASL_PANELBRIGHT);
+}
+
+static int set_brightness(struct backlight_device *bd, int value)
+{
+       value = max(0, min(15, value));
+       return set_acpi(CM_ASL_PANELBRIGHT, value);
+}
+
+static int update_bl_status(struct backlight_device *bd)
+{
+       return set_brightness(bd, bd->props.brightness);
+}
+
+/*
+ * Sys helpers
+ */
+static int parse_arg(const char *buf, unsigned long count, int *val)
+{
+       if (!count)
+               return 0;
+       if (sscanf(buf, "%i", val) != 1)
+               return -EINVAL;
+       return count;
+}
+
+static ssize_t store_sys_acpi(int cm, const char *buf, size_t count)
+{
+       int rv, value;
+
+       rv = parse_arg(buf, count, &value);
+       if (rv > 0)
+               set_acpi(cm, value);
+       return rv;
+}
+
+static ssize_t show_sys_acpi(int cm, char *buf)
+{
+       return sprintf(buf, "%d\n", get_acpi(cm));
+}
+
+#define EEEPC_CREATE_DEVICE_ATTR(_name, _cm)                           \
+       static ssize_t show_##_name(struct device *dev,                 \
+                                   struct device_attribute *attr,      \
+                                   char *buf)                          \
+       {                                                               \
+               return show_sys_acpi(_cm, buf);                         \
+       }                                                               \
+       static ssize_t store_##_name(struct device *dev,                \
+                                    struct device_attribute *attr,     \
+                                    const char *buf, size_t count)     \
+       {                                                               \
+               return store_sys_acpi(_cm, buf, count);                 \
+       }                                                               \
+       static struct device_attribute dev_attr_##_name = {             \
+               .attr = {                                               \
+                       .name = __stringify(_name),                     \
+                       .mode = 0644 },                                 \
+               .show   = show_##_name,                                 \
+               .store  = store_##_name,                                \
+       }
+
+EEEPC_CREATE_DEVICE_ATTR(camera, CM_ASL_CAMERA);
+EEEPC_CREATE_DEVICE_ATTR(cardr, CM_ASL_CARDREADER);
+EEEPC_CREATE_DEVICE_ATTR(disp, CM_ASL_DISPLAYSWITCH);
+EEEPC_CREATE_DEVICE_ATTR(wlan, CM_ASL_WLAN);
+
+static struct attribute *platform_attributes[] = {
+       &dev_attr_camera.attr,
+       &dev_attr_cardr.attr,
+       &dev_attr_disp.attr,
+       &dev_attr_wlan.attr,
+       NULL
+};
+
+static struct attribute_group platform_attribute_group = {
+       .attrs = platform_attributes
+};
+
+/*
+ * Hotkey functions
+ */
+static int eeepc_hotk_check(void)
+{
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       int result;
+
+       result = acpi_bus_get_status(ehotk->device);
+       if (result)
+               return result;
+       if (ehotk->device->status.present) {
+               if (write_acpi_int(ehotk->handle, "INIT", ehotk->init_flag,
+                                   &buffer)) {
+                       printk(EEEPC_ERR "Hotkey initialization failed\n");
+                       return -ENODEV;
+               } else {
+                       printk(EEEPC_NOTICE "Hotkey init flags 0x%x\n",
+                              ehotk->init_flag);
+               }
+               /* get control methods supported */
+               if (read_acpi_int(ehotk->handle, "CMSG"
+                                  , &ehotk->cm_supported)) {
+                       printk(EEEPC_ERR
+                              "Get control methods supported failed\n");
+                       return -ENODEV;
+               } else {
+                       printk(EEEPC_INFO
+                              "Get control methods supported: 0x%x\n",
+                              ehotk->cm_supported);
+               }
+       } else {
+               printk(EEEPC_ERR "Hotkey device not present, aborting\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void notify_wlan(u32 *event)
+{
+       /* if DISABLE_ASL_WLAN is set, the notify code for fn+f2
+          will always be 0x10 */
+       if (ehotk->cm_supported & (0x1 << CM_ASL_WLAN)) {
+               const char *method = cm_getv[CM_ASL_WLAN];
+               int value;
+               if (read_acpi_int(ehotk->handle, method, &value))
+                       printk(EEEPC_WARNING "Error reading %s\n",
+                              method);
+               else if (value == 1)
+                       *event = 0x11;
+       }
+}
+
+static void notify_brn(void)
+{
+       struct backlight_device *bd = eeepc_backlight_device;
+       bd->props.brightness = read_brightness(bd);
+}
+
+static void eeepc_hotk_notify(acpi_handle handle, u32 event, void *data)
+{
+       if (!ehotk)
+               return;
+       if (event == NOTIFY_WLAN_ON && (DISABLE_ASL_WLAN & ehotk->init_flag))
+               notify_wlan(&event);
+       if (event >= NOTIFY_BRN_MIN && event <= NOTIFY_BRN_MAX)
+               notify_brn();
+       acpi_bus_generate_proc_event(ehotk->device, event,
+                                    ehotk->event_count[event % 128]++);
+}
+
+static int eeepc_hotk_add(struct acpi_device *device)
+{
+       acpi_status status = AE_OK;
+       int result;
+
+       if (!device)
+                return -EINVAL;
+       printk(EEEPC_NOTICE EEEPC_HOTK_NAME "\n");
+       ehotk = kzalloc(sizeof(struct eeepc_hotk), GFP_KERNEL);
+       if (!ehotk)
+               return -ENOMEM;
+       ehotk->init_flag = DISABLE_ASL_WLAN | DISABLE_ASL_DISPLAYSWITCH;
+       ehotk->handle = device->handle;
+       strcpy(acpi_device_name(device), EEEPC_HOTK_DEVICE_NAME);
+       strcpy(acpi_device_class(device), EEEPC_HOTK_CLASS);
+       acpi_driver_data(device) = ehotk;
+       ehotk->device = device;
+       result = eeepc_hotk_check();
+       if (result)
+               goto end;
+       status = acpi_install_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
+                                            eeepc_hotk_notify, ehotk);
+       if (ACPI_FAILURE(status))
+               printk(EEEPC_ERR "Error installing notify handler\n");
+ end:
+       if (result) {
+               kfree(ehotk);
+               ehotk = NULL;
+       }
+       return result;
+}
+
+static int eeepc_hotk_remove(struct acpi_device *device, int type)
+{
+       acpi_status status = 0;
+
+       if (!device || !acpi_driver_data(device))
+                return -EINVAL;
+       status = acpi_remove_notify_handler(ehotk->handle, ACPI_SYSTEM_NOTIFY,
+                                           eeepc_hotk_notify);
+       if (ACPI_FAILURE(status))
+               printk(EEEPC_ERR "Error removing notify handler\n");
+       kfree(ehotk);
+       return 0;
+}
+
+/*
+ * Hwmon
+ */
+static int eeepc_get_fan_pwm(void)
+{
+       int value = 0;
+
+       read_acpi_int(NULL, EEEPC_EC_FAN_PWM, &value);
+       return (value);
+}
+
+static void eeepc_set_fan_pwm(int value)
+{
+       value = SENSORS_LIMIT(value, 0, 100);
+       ec_write(EEEPC_EC_SC02, value);
+}
+
+static int eeepc_get_fan_rpm(void)
+{
+       int high = 0;
+       int low = 0;
+
+       read_acpi_int(NULL, EEEPC_EC_FAN_HRPM, &high);
+       read_acpi_int(NULL, EEEPC_EC_FAN_LRPM, &low);
+       return (high << 8 | low);
+}
+
+static int eeepc_get_fan_ctrl(void)
+{
+       int value = 0;
+
+       read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
+       return ((value & 0x02 ? 1 : 0));
+}
+
+static void eeepc_set_fan_ctrl(int manual)
+{
+       int value = 0;
+
+       read_acpi_int(NULL, EEEPC_EC_FAN_CTRL, &value);
+       if (manual)
+               value |= 0x02;
+       else
+               value &= ~0x02;
+       ec_write(EEEPC_EC_SFB3, value);
+}
+
+static ssize_t store_sys_hwmon(void (*set)(int), const char *buf, size_t count)
+{
+       int rv, value;
+
+       rv = parse_arg(buf, count, &value);
+       if (rv > 0)
+               set(value);
+       return rv;
+}
+
+static ssize_t show_sys_hwmon(int (*get)(void), char *buf)
+{
+       return sprintf(buf, "%d\n", get());
+}
+
+#define EEEPC_CREATE_SENSOR_ATTR(_name, _mode, _set, _get)             \
+       static ssize_t show_##_name(struct device *dev,                 \
+                                   struct device_attribute *attr,      \
+                                   char *buf)                          \
+       {                                                               \
+               return show_sys_hwmon(_set, buf);                       \
+       }                                                               \
+       static ssize_t store_##_name(struct device *dev,                \
+                                    struct device_attribute *attr,     \
+                                    const char *buf, size_t count)     \
+       {                                                               \
+               return store_sys_hwmon(_get, buf, count);               \
+       }                                                               \
+       static SENSOR_DEVICE_ATTR(_name, _mode, show_##_name, store_##_name, 0);
+
+EEEPC_CREATE_SENSOR_ATTR(fan1_input, S_IRUGO, eeepc_get_fan_rpm, NULL);
+EEEPC_CREATE_SENSOR_ATTR(fan1_pwm, S_IRUGO | S_IWUSR,
+                        eeepc_get_fan_pwm, eeepc_set_fan_pwm);
+EEEPC_CREATE_SENSOR_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
+                        eeepc_get_fan_ctrl, eeepc_set_fan_ctrl);
+
+static struct attribute *hwmon_attributes[] = {
+       &sensor_dev_attr_fan1_pwm.dev_attr.attr,
+       &sensor_dev_attr_fan1_input.dev_attr.attr,
+       &sensor_dev_attr_pwm1_enable.dev_attr.attr,
+       NULL
+};
+
+static struct attribute_group hwmon_attribute_group = {
+       .attrs = hwmon_attributes
+};
+
+/*
+ * exit/init
+ */
+static void eeepc_backlight_exit(void)
+{
+       if (eeepc_backlight_device)
+               backlight_device_unregister(eeepc_backlight_device);
+       eeepc_backlight_device = NULL;
+}
+
+static void eeepc_hwmon_exit(void)
+{
+       struct device *hwmon;
+
+       hwmon = eeepc_hwmon_device;
+       if (!hwmon)
+               return ;
+       hwmon_device_unregister(hwmon);
+       sysfs_remove_group(&hwmon->kobj,
+                          &hwmon_attribute_group);
+       eeepc_hwmon_device = NULL;
+}
+
+static void __exit eeepc_laptop_exit(void)
+{
+       eeepc_backlight_exit();
+       eeepc_hwmon_exit();
+       acpi_bus_unregister_driver(&eeepc_hotk_driver);
+       sysfs_remove_group(&platform_device->dev.kobj,
+                          &platform_attribute_group);
+       platform_device_unregister(platform_device);
+       platform_driver_unregister(&platform_driver);
+}
+
+static int eeepc_backlight_init(struct device *dev)
+{
+       struct backlight_device *bd;
+
+       bd = backlight_device_register(EEEPC_HOTK_FILE, dev,
+                                      NULL, &eeepcbl_ops);
+       if (IS_ERR(bd)) {
+               printk(EEEPC_ERR
+                      "Could not register eeepc backlight device\n");
+               eeepc_backlight_device = NULL;
+               return PTR_ERR(bd);
+       }
+       eeepc_backlight_device = bd;
+       bd->props.max_brightness = 15;
+       bd->props.brightness = read_brightness(NULL);
+       bd->props.power = FB_BLANK_UNBLANK;
+       backlight_update_status(bd);
+       return 0;
+}
+
+static int eeepc_hwmon_init(struct device *dev)
+{
+       struct device *hwmon;
+       int result;
+
+       hwmon = hwmon_device_register(dev);
+       if (IS_ERR(hwmon)) {
+               printk(EEEPC_ERR
+                      "Could not register eeepc hwmon device\n");
+               eeepc_hwmon_device = NULL;
+               return PTR_ERR(hwmon);
+       }
+       eeepc_hwmon_device = hwmon;
+       result = sysfs_create_group(&hwmon->kobj,
+                                   &hwmon_attribute_group);
+       if (result)
+               eeepc_hwmon_exit();
+       return result;
+}
+
+static int __init eeepc_laptop_init(void)
+{
+       struct device *dev;
+       int result;
+
+       if (acpi_disabled)
+               return -ENODEV;
+       result = acpi_bus_register_driver(&eeepc_hotk_driver);
+       if (result < 0)
+               return result;
+       if (!ehotk) {
+               acpi_bus_unregister_driver(&eeepc_hotk_driver);
+               return -ENODEV;
+       }
+       dev = acpi_get_physical_device(ehotk->device->handle);
+       result = eeepc_backlight_init(dev);
+       if (result)
+               goto fail_backlight;
+       result = eeepc_hwmon_init(dev);
+       if (result)
+               goto fail_hwmon;
+       /* Register platform stuff */
+       result = platform_driver_register(&platform_driver);
+       if (result)
+               goto fail_platform_driver;
+       platform_device = platform_device_alloc(EEEPC_HOTK_FILE, -1);
+       if (!platform_device) {
+               result = -ENOMEM;
+               goto fail_platform_device1;
+       }
+       result = platform_device_add(platform_device);
+       if (result)
+               goto fail_platform_device2;
+       result = sysfs_create_group(&platform_device->dev.kobj,
+                                   &platform_attribute_group);
+       if (result)
+               goto fail_sysfs;
+       return 0;
+fail_sysfs:
+       platform_device_del(platform_device);
+fail_platform_device2:
+       platform_device_put(platform_device);
+fail_platform_device1:
+       platform_driver_unregister(&platform_driver);
+fail_platform_driver:
+       eeepc_hwmon_exit();
+fail_hwmon:
+       eeepc_backlight_exit();
+fail_backlight:
+       return result;
+}
+
+module_init(eeepc_laptop_init);
+module_exit(eeepc_laptop_exit);
index fafb57fed761c8f1e6fa5c91f136c139593e0211..0736cff9d97a2c7499b4c91eaf30beac046ad9b6 100644 (file)
@@ -31,7 +31,6 @@
 static LIST_HEAD(container_list);
 static DEFINE_MUTEX(container_list_lock);
 static struct class enclosure_class;
-static struct class enclosure_component_class;
 
 /**
  * enclosure_find - find an enclosure given a device
@@ -166,6 +165,40 @@ void enclosure_unregister(struct enclosure_device *edev)
 }
 EXPORT_SYMBOL_GPL(enclosure_unregister);
 
+#define ENCLOSURE_NAME_SIZE    64
+
+static void enclosure_link_name(struct enclosure_component *cdev, char *name)
+{
+       strcpy(name, "enclosure_device:");
+       strcat(name, cdev->cdev.bus_id);
+}
+
+static void enclosure_remove_links(struct enclosure_component *cdev)
+{
+       char name[ENCLOSURE_NAME_SIZE];
+
+       enclosure_link_name(cdev, name);
+       sysfs_remove_link(&cdev->dev->kobj, name);
+       sysfs_remove_link(&cdev->cdev.kobj, "device");
+}
+
+static int enclosure_add_links(struct enclosure_component *cdev)
+{
+       int error;
+       char name[ENCLOSURE_NAME_SIZE];
+
+       error = sysfs_create_link(&cdev->cdev.kobj, &cdev->dev->kobj, "device");
+       if (error)
+               return error;
+
+       enclosure_link_name(cdev, name);
+       error = sysfs_create_link(&cdev->dev->kobj, &cdev->cdev.kobj, name);
+       if (error)
+               sysfs_remove_link(&cdev->cdev.kobj, "device");
+
+       return error;
+}
+
 static void enclosure_release(struct device *cdev)
 {
        struct enclosure_device *edev = to_enclosure_device(cdev);
@@ -178,10 +211,15 @@ static void enclosure_component_release(struct device *dev)
 {
        struct enclosure_component *cdev = to_enclosure_component(dev);
 
-       put_device(cdev->dev);
+       if (cdev->dev) {
+               enclosure_remove_links(cdev);
+               put_device(cdev->dev);
+       }
        put_device(dev->parent);
 }
 
+static struct attribute_group *enclosure_groups[];
+
 /**
  * enclosure_component_register - add a particular component to an enclosure
  * @edev:      the enclosure to add the component
@@ -217,12 +255,14 @@ enclosure_component_register(struct enclosure_device *edev,
        ecomp->number = number;
        cdev = &ecomp->cdev;
        cdev->parent = get_device(&edev->edev);
-       cdev->class = &enclosure_component_class;
        if (name)
                snprintf(cdev->bus_id, BUS_ID_SIZE, "%s", name);
        else
                snprintf(cdev->bus_id, BUS_ID_SIZE, "%u", number);
 
+       cdev->release = enclosure_component_release;
+       cdev->groups = enclosure_groups;
+
        err = device_register(cdev);
        if (err)
                ERR_PTR(err);
@@ -255,10 +295,12 @@ int enclosure_add_device(struct enclosure_device *edev, int component,
 
        cdev = &edev->component[component];
 
-       device_del(&cdev->cdev);
+       if (cdev->dev)
+               enclosure_remove_links(cdev);
+
        put_device(cdev->dev);
        cdev->dev = get_device(dev);
-       return device_add(&cdev->cdev);
+       return enclosure_add_links(cdev);
 }
 EXPORT_SYMBOL_GPL(enclosure_add_device);
 
@@ -442,24 +484,32 @@ static ssize_t get_component_type(struct device *cdev,
 }
 
 
-static struct device_attribute enclosure_component_attrs[] = {
-       __ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
-              set_component_fault),
-       __ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
-              set_component_status),
-       __ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
-              set_component_active),
-       __ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
-              set_component_locate),
-       __ATTR(type, S_IRUGO, get_component_type, NULL),
-       __ATTR_NULL
+static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
+                   set_component_fault);
+static DEVICE_ATTR(status, S_IRUGO | S_IWUSR, get_component_status,
+                  set_component_status);
+static DEVICE_ATTR(active, S_IRUGO | S_IWUSR, get_component_active,
+                  set_component_active);
+static DEVICE_ATTR(locate, S_IRUGO | S_IWUSR, get_component_locate,
+                  set_component_locate);
+static DEVICE_ATTR(type, S_IRUGO, get_component_type, NULL);
+
+static struct attribute *enclosure_component_attrs[] = {
+       &dev_attr_fault.attr,
+       &dev_attr_status.attr,
+       &dev_attr_active.attr,
+       &dev_attr_locate.attr,
+       &dev_attr_type.attr,
+       NULL
 };
 
-static struct class enclosure_component_class =  {
-       .name                   = "enclosure_component",
-       .owner                  = THIS_MODULE,
-       .dev_attrs      = enclosure_component_attrs,
-       .dev_release            = enclosure_component_release,
+static struct attribute_group enclosure_group = {
+       .attrs = enclosure_component_attrs,
+};
+
+static struct attribute_group *enclosure_groups[] = {
+       &enclosure_group,
+       NULL
 };
 
 static int __init enclosure_init(void)
@@ -469,20 +519,12 @@ static int __init enclosure_init(void)
        err = class_register(&enclosure_class);
        if (err)
                return err;
-       err = class_register(&enclosure_component_class);
-       if (err)
-               goto err_out;
 
        return 0;
- err_out:
-       class_unregister(&enclosure_class);
-
-       return err;
 }
 
 static void __exit enclosure_exit(void)
 {
-       class_unregister(&enclosure_component_class);
        class_unregister(&enclosure_class);
 }
 
index 302e92418bbef2464e0e3f8fdf85edbb47729a20..ff51ab67231c95a9605496ced2ab104cd9ed5105 100644 (file)
@@ -210,13 +210,10 @@ static int hdpu_cpustate_probe(struct platform_device *pdev)
                return ret;
        }
 
-       proc_de = create_proc_entry("sky_cpustate", 0666, &proc_root);
+       proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate);
        if (!proc_de) {
                printk(KERN_WARNING "sky_cpustate: "
                       "Unable to create proc entry\n");
-       } else {
-               proc_de->proc_fops = &proc_cpustate;
-               proc_de->owner = THIS_MODULE;
        }
 
        printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n");
index 2fa36f7a6eb388ec02db087560ec3dca4c8b2d1d..08e26beefe64182f78f509343bcdbc5156d81aca 100644 (file)
@@ -102,22 +102,17 @@ static int hdpu_nexus_probe(struct platform_device *pdev)
                printk(KERN_ERR "sky_nexus: Could not map slot id\n");
        }
 
-       hdpu_slot_id = create_proc_entry("sky_slot_id", 0666, &proc_root);
+       hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id);
        if (!hdpu_slot_id) {
                printk(KERN_WARNING "sky_nexus: "
                       "Unable to create proc dir entry: sky_slot_id\n");
-       } else {
-               hdpu_slot_id->proc_fops = &proc_slot_id;
-               hdpu_slot_id->owner = THIS_MODULE;
        }
 
-       hdpu_chassis_id = create_proc_entry("sky_chassis_id", 0666, &proc_root);
-       if (!hdpu_chassis_id) {
+       hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL,
+                                     &proc_chassis_id);
+       if (!hdpu_chassis_id)
                printk(KERN_WARNING "sky_nexus: "
                       "Unable to create proc dir entry: sky_chassis_id\n");
-       } else {
-               hdpu_chassis_id->proc_fops = &proc_chassis_id;
-               hdpu_chassis_id->owner = THIS_MODULE;
        }
 
        return 0;
@@ -128,8 +123,8 @@ static int hdpu_nexus_remove(struct platform_device *pdev)
        slot_id = -1;
        chassis_id = -1;
 
-       remove_proc_entry("sky_slot_id", &proc_root);
-       remove_proc_entry("sky_chassis_id", &proc_root);
+       remove_proc_entry("sky_slot_id", NULL);
+       remove_proc_entry("sky_chassis_id", NULL);
 
        hdpu_slot_id = 0;
        hdpu_chassis_id = 0;
index 1a0e7978226a6b593e048e31ef00e4e40dd13d41..276d3fb68094a53d6815979688b89cd412e8b489 100644 (file)
@@ -96,7 +96,7 @@ static inline void do_exec_command(struct service_processor *sp)
 {
        char tsbuf[32];
 
-       dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+       dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 
        if (ibmasm_send_i2o_message(sp)) {
                sp->current_command->status = IBMASM_CMD_FAILED;
@@ -119,7 +119,7 @@ void ibmasm_exec_command(struct service_processor *sp, struct command *cmd)
        unsigned long flags;
        char tsbuf[32];
 
-       dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+       dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 
        spin_lock_irqsave(&sp->lock, flags);
 
@@ -139,7 +139,7 @@ static void exec_next_command(struct service_processor *sp)
        unsigned long flags;
        char tsbuf[32];
 
-       dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+       dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
 
        spin_lock_irqsave(&sp->lock, flags);
        sp->current_command = dequeue_command(sp);
index 3036e785b3e4c8e6ab8c4cfb1a9b1e29e89caf39..1bc4306572a477aa6d440187c5bb7db4a23e7a26 100644 (file)
@@ -75,9 +75,9 @@ void ibmasm_heartbeat_exit(struct service_processor *sp)
 {
        char tsbuf[32];
 
-       dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+       dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
        ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL);
-       dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+       dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
        suspend_heartbeats = 1;
        command_put(sp->heartbeat);
 }
@@ -88,7 +88,7 @@ void ibmasm_receive_heartbeat(struct service_processor *sp,  void *message, size
        struct dot_command_header *header = (struct dot_command_header *)cmd->buffer;
        char tsbuf[32];
 
-       dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf));
+       dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf));
        if (suspend_heartbeats)
                return;
 
index 0c0bb3093e07e7ab1afea5c37acfb38ac83394dc..80a1363524087a1e66e575058360d071bbca352f 100644 (file)
@@ -175,19 +175,17 @@ static int intel_menlow_memory_add(struct acpi_device *device)
                goto end;
        }
 
-       if (cdev) {
-               acpi_driver_data(device) = cdev;
-               result = sysfs_create_link(&device->dev.kobj,
-                                       &cdev->device.kobj, "thermal_cooling");
-               if (result)
-                       goto unregister;
-
-               result = sysfs_create_link(&cdev->device.kobj,
-                                       &device->dev.kobj, "device");
-               if (result) {
-                       sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
-                       goto unregister;
-               }
+       acpi_driver_data(device) = cdev;
+       result = sysfs_create_link(&device->dev.kobj,
+                               &cdev->device.kobj, "thermal_cooling");
+       if (result)
+               goto unregister;
+
+       result = sysfs_create_link(&cdev->device.kobj,
+                               &device->dev.kobj, "device");
+       if (result) {
+               sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+               goto unregister;
        }
 
  end:
index 05172d2613d66d3f980c2b2c148bd40992c4e64d..6f76573e7c8a244bdac0be865a1a1df72bfee94c 100644 (file)
@@ -75,7 +75,7 @@ ioc4_register_submodule(struct ioc4_submodule *is)
                        printk(KERN_WARNING
                               "%s: IOC4 submodule %s probe failed "
                               "for pci_dev %s",
-                              __FUNCTION__, module_name(is->is_owner),
+                              __func__, module_name(is->is_owner),
                               pci_name(idd->idd_pdev));
                }
        }
@@ -102,7 +102,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is)
                        printk(KERN_WARNING
                               "%s: IOC4 submodule %s remove failed "
                               "for pci_dev %s.\n",
-                              __FUNCTION__, module_name(is->is_owner),
+                              __func__, module_name(is->is_owner),
                               pci_name(idd->idd_pdev));
                }
        }
@@ -282,7 +282,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
        if ((ret = pci_enable_device(pdev))) {
                printk(KERN_WARNING
                       "%s: Failed to enable IOC4 device for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(pdev));
+                      __func__, pci_name(pdev));
                goto out;
        }
        pci_set_master(pdev);
@@ -292,7 +292,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
        if (!idd) {
                printk(KERN_WARNING
                       "%s: Failed to allocate IOC4 data for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(pdev));
+                      __func__, pci_name(pdev));
                ret = -ENODEV;
                goto out_idd;
        }
@@ -307,7 +307,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
                printk(KERN_WARNING
                       "%s: Unable to find IOC4 misc resource "
                       "for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
+                      __func__, pci_name(idd->idd_pdev));
                ret = -ENODEV;
                goto out_pci;
        }
@@ -316,7 +316,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
                printk(KERN_WARNING
                       "%s: Unable to request IOC4 misc region "
                       "for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
+                      __func__, pci_name(idd->idd_pdev));
                ret = -ENODEV;
                goto out_pci;
        }
@@ -326,7 +326,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
                printk(KERN_WARNING
                       "%s: Unable to remap IOC4 misc region "
                       "for pci_dev %s.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
+                      __func__, pci_name(idd->idd_pdev));
                ret = -ENODEV;
                goto out_misc_region;
        }
@@ -372,7 +372,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id)
                        printk(KERN_WARNING
                               "%s: IOC4 submodule 0x%s probe failed "
                               "for pci_dev %s.\n",
-                              __FUNCTION__, module_name(is->is_owner),
+                              __func__, module_name(is->is_owner),
                               pci_name(idd->idd_pdev));
                }
        }
@@ -406,7 +406,7 @@ ioc4_remove(struct pci_dev *pdev)
                        printk(KERN_WARNING
                               "%s: IOC4 submodule 0x%s remove failed "
                               "for pci_dev %s.\n",
-                              __FUNCTION__, module_name(is->is_owner),
+                              __func__, module_name(is->is_owner),
                               pci_name(idd->idd_pdev));
                }
        }
@@ -418,7 +418,7 @@ ioc4_remove(struct pci_dev *pdev)
                printk(KERN_WARNING
                       "%s: Unable to get IOC4 misc mapping for pci_dev %s. "
                       "Device removal may be incomplete.\n",
-                      __FUNCTION__, pci_name(idd->idd_pdev));
+                      __func__, pci_name(idd->idd_pdev));
        }
        release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs));
 
index 6d6286c4eeacaeadfaa8ad57cd8d4d64a4252ed7..30a1af857c7a9345d254d118ea756acc84559fb4 100644 (file)
@@ -132,7 +132,7 @@ static int send_ack;
 static int final_ack;
 static int hw_break_val;
 static int hw_break_val2;
-#if defined(CONFIG_ARM) || defined(CONFIG_MIPS)
+#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC)
 static int arch_needs_sstep_emulation = 1;
 #else
 static int arch_needs_sstep_emulation;
index 7fa61e907e1c115b16f14936ffea65f865816f4e..71d1c84e2fa8e4ed918461a702d2e50e2d4b3ccd 100644 (file)
@@ -12,6 +12,7 @@
  *  or alternatively, you might use OpenHaptics provided by Sensable.
  */
 
+#include <linux/compat.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/device.h>
@@ -91,11 +92,8 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
        unsigned long flags;
        unsigned int i;
 
-       if (_IOC_TYPE(cmd) != PH_IOC_MAGIC ||
-                       _IOC_NR(cmd) > PH_IOC_MAXNR)
-               return -ENOTTY;
-
        switch (cmd) {
+       case PHN_SETREG:
        case PHN_SET_REG:
                if (copy_from_user(&r, argp, sizeof(r)))
                        return -EFAULT;
@@ -126,6 +124,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
                        phantom_status(dev, dev->status & ~PHB_RUNNING);
                spin_unlock_irqrestore(&dev->regs_lock, flags);
                break;
+       case PHN_SETREGS:
        case PHN_SET_REGS:
                if (copy_from_user(&rs, argp, sizeof(rs)))
                        return -EFAULT;
@@ -143,6 +142,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
                }
                spin_unlock_irqrestore(&dev->regs_lock, flags);
                break;
+       case PHN_GETREG:
        case PHN_GET_REG:
                if (copy_from_user(&r, argp, sizeof(r)))
                        return -EFAULT;
@@ -155,6 +155,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
                if (copy_to_user(argp, &r, sizeof(r)))
                        return -EFAULT;
                break;
+       case PHN_GETREGS:
        case PHN_GET_REGS: {
                u32 m;
 
@@ -168,6 +169,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
                for (i = 0; i < m; i++)
                        if (rs.mask & BIT(i))
                                rs.values[i] = ioread32(dev->iaddr + i);
+               atomic_set(&dev->counter, 0);
                spin_unlock_irqrestore(&dev->regs_lock, flags);
 
                if (copy_to_user(argp, &rs, sizeof(rs)))
@@ -191,6 +193,20 @@ static long phantom_ioctl(struct file *file, unsigned int cmd,
        return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static long phantom_compat_ioctl(struct file *filp, unsigned int cmd,
+               unsigned long arg)
+{
+       if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+               cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT);
+               cmd |= sizeof(void *) << _IOC_SIZESHIFT;
+       }
+       return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define phantom_compat_ioctl NULL
+#endif
+
 static int phantom_open(struct inode *inode, struct file *file)
 {
        struct phantom_device *dev = container_of(inode->i_cdev,
@@ -239,11 +255,12 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
 
        pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter));
        poll_wait(file, &dev->wait, wait);
-       if (atomic_read(&dev->counter)) {
+
+       if (!(dev->status & PHB_RUNNING))
+               mask = POLLERR;
+       else if (atomic_read(&dev->counter))
                mask = POLLIN | POLLRDNORM;
-               atomic_dec(&dev->counter);
-       } else if ((dev->status & PHB_RUNNING) == 0)
-               mask = POLLIN | POLLRDNORM | POLLERR;
+
        pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter));
 
        return mask;
@@ -253,6 +270,7 @@ static struct file_operations phantom_file_ops = {
        .open = phantom_open,
        .release = phantom_release,
        .unlocked_ioctl = phantom_ioctl,
+       .compat_ioctl = phantom_compat_ioctl,
        .poll = phantom_poll,
 };
 
index 27e200ec582692e42b5d9186a15b953a3a323bf5..acd3fd4285d7c954c824286a804c48c6a75d39ce 100644 (file)
@@ -211,7 +211,7 @@ xpc_rsvd_page_init(void)
         */
        amos_page = xpc_vars->amos_page;
        if (amos_page == NULL) {
-               amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0));
+               amos_page = (AMO_t *)TO_AMO(uncached_alloc_page(0, 1));
                if (amos_page == NULL) {
                        dev_err(xpc_part, "can't allocate page of AMOs\n");
                        return NULL;
@@ -230,7 +230,7 @@ xpc_rsvd_page_init(void)
                                dev_err(xpc_part, "can't change memory "
                                        "protections\n");
                                uncached_free_page(__IA64_UNCACHED_OFFSET |
-                                                  TO_PHYS((u64)amos_page));
+                                                  TO_PHYS((u64)amos_page), 1);
                                return NULL;
                        }
                }
index 02ff3d19b1ccaf40981c861e9977c43eea634f3f..00e48e2a9c11229da5576897d566fead0ac2a8f4 100644 (file)
@@ -961,7 +961,7 @@ static int sony_nc_resume(struct acpi_device *device)
                ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset,
                                       item->value, NULL);
                if (ret < 0) {
-                       printk("%s: %d\n", __FUNCTION__, ret);
+                       printk("%s: %d\n", __func__, ret);
                        break;
                }
        }
@@ -1453,7 +1453,7 @@ static struct sonypi_eventtypes type4_events[] = {
                udelay(1);                                              \
        if (!n)                                                         \
                dprintk("command failed at %s : %s (line %d)\n",        \
-                               __FILE__, __FUNCTION__, __LINE__);      \
+                               __FILE__, __func__, __LINE__);  \
 }
 
 static u8 sony_pic_call1(u8 dev)
index 6cb781262f947611a20916bf6b996c9ff3e713cb..3f28f6eabdbffcca3868b0b688b4a0516da91347 100644 (file)
@@ -21,7 +21,7 @@
  *  02110-1301, USA.
  */
 
-#define TPACPI_VERSION "0.19"
+#define TPACPI_VERSION "0.20"
 #define TPACPI_SYSFS_VERSION 0x020200
 
 /*
@@ -67,6 +67,7 @@
 #include <linux/hwmon.h>
 #include <linux/hwmon-sysfs.h>
 #include <linux/input.h>
+#include <linux/leds.h>
 #include <asm/uaccess.h>
 
 #include <linux/dmi.h>
@@ -85,6 +86,8 @@
 #define TP_CMOS_VOLUME_MUTE    2
 #define TP_CMOS_BRIGHTNESS_UP  4
 #define TP_CMOS_BRIGHTNESS_DOWN        5
+#define TP_CMOS_THINKLIGHT_ON  12
+#define TP_CMOS_THINKLIGHT_OFF 13
 
 /* NVRAM Addresses */
 enum tp_nvram_addr {
@@ -133,8 +136,12 @@ enum {
 #define TPACPI_PROC_DIR "ibm"
 #define TPACPI_ACPI_EVENT_PREFIX "ibm"
 #define TPACPI_DRVR_NAME TPACPI_FILE
+#define TPACPI_DRVR_SHORTNAME "tpacpi"
 #define TPACPI_HWMON_DRVR_NAME TPACPI_NAME "_hwmon"
 
+#define TPACPI_NVRAM_KTHREAD_NAME "ktpacpi_nvramd"
+#define TPACPI_WORKQUEUE_NAME "ktpacpid"
+
 #define TPACPI_MAX_ACPI_ARGS 3
 
 /* Debugging */
@@ -225,6 +232,7 @@ static struct {
        u32 light:1;
        u32 light_status:1;
        u32 bright_16levels:1;
+       u32 bright_acpimode:1;
        u32 wan:1;
        u32 fan_ctrl_status_undef:1;
        u32 input_device_registered:1;
@@ -236,6 +244,11 @@ static struct {
        u32 hotkey_poll_active:1;
 } tp_features;
 
+static struct {
+       u16 hotkey_mask_ff:1;
+       u16 bright_cmos_ec_unsync:1;
+} tp_warned;
+
 struct thinkpad_id_data {
        unsigned int vendor;    /* ThinkPad vendor:
                                 * PCI_VENDOR_ID_IBM/PCI_VENDOR_ID_LENOVO */
@@ -246,7 +259,8 @@ struct thinkpad_id_data {
        u16 bios_model;         /* Big Endian, TP-1Y = 0x5931, 0 = unknown */
        u16 ec_model;
 
-       char *model_str;
+       char *model_str;        /* ThinkPad T43 */
+       char *nummodel_str;     /* 9384A9C for a 9384-A9C model */
 };
 static struct thinkpad_id_data thinkpad_id;
 
@@ -259,6 +273,16 @@ static enum {
 static int experimental;
 static u32 dbg_level;
 
+static struct workqueue_struct *tpacpi_wq;
+
+/* Special LED class that can defer work */
+struct tpacpi_led_classdev {
+       struct led_classdev led_classdev;
+       struct work_struct work;
+       enum led_brightness new_brightness;
+       unsigned int led;
+};
+
 /****************************************************************************
  ****************************************************************************
  *
@@ -807,6 +831,80 @@ static int parse_strtoul(const char *buf,
        return 0;
 }
 
+static int __init tpacpi_query_bcl_levels(acpi_handle handle)
+{
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj;
+       int rc;
+
+       if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
+               obj = (union acpi_object *)buffer.pointer;
+               if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
+                       printk(TPACPI_ERR "Unknown _BCL data, "
+                              "please report this to %s\n", TPACPI_MAIL);
+                       rc = 0;
+               } else {
+                       rc = obj->package.count;
+               }
+       } else {
+               return 0;
+       }
+
+       kfree(buffer.pointer);
+       return rc;
+}
+
+static acpi_status __init tpacpi_acpi_walk_find_bcl(acpi_handle handle,
+                                       u32 lvl, void *context, void **rv)
+{
+       char name[ACPI_PATH_SEGMENT_LENGTH];
+       struct acpi_buffer buffer = { sizeof(name), &name };
+
+       if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
+           !strncmp("_BCL", name, sizeof(name) - 1)) {
+               BUG_ON(!rv || !*rv);
+               **(int **)rv = tpacpi_query_bcl_levels(handle);
+               return AE_CTRL_TERMINATE;
+       } else {
+               return AE_OK;
+       }
+}
+
+/*
+ * Returns 0 (no ACPI _BCL or _BCL invalid), or size of brightness map
+ */
+static int __init tpacpi_check_std_acpi_brightness_support(void)
+{
+       int status;
+       int bcl_levels = 0;
+       void *bcl_ptr = &bcl_levels;
+
+       if (!vid_handle) {
+               TPACPI_ACPIHANDLE_INIT(vid);
+       }
+       if (!vid_handle)
+               return 0;
+
+       /*
+        * Search for a _BCL method, and execute it.  This is safe on all
+        * ThinkPads, and as a side-effect, _BCL will place a Lenovo Vista
+        * BIOS in ACPI backlight control mode.  We do NOT have to care
+        * about calling the _BCL method in an enabled video device, any
+        * will do for our purposes.
+        */
+
+       status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
+                                    tpacpi_acpi_walk_find_bcl, NULL,
+                                    &bcl_ptr);
+
+       if (ACPI_SUCCESS(status) && bcl_levels > 2) {
+               tp_features.bright_acpimode = 1;
+               return (bcl_levels - 2);
+       }
+
+       return 0;
+}
+
 /*************************************************************************
  * thinkpad-acpi driver attributes
  */
@@ -909,12 +1007,14 @@ static int __init thinkpad_acpi_driver_init(struct ibm_init_struct *iibm)
                        thinkpad_id.ec_version_str : "unknown");
 
        if (thinkpad_id.vendor && thinkpad_id.model_str)
-               printk(TPACPI_INFO "%s %s\n",
+               printk(TPACPI_INFO "%s %s, model %s\n",
                        (thinkpad_id.vendor == PCI_VENDOR_ID_IBM) ?
                                "IBM" : ((thinkpad_id.vendor ==
                                                PCI_VENDOR_ID_LENOVO) ?
                                        "Lenovo" : "Unknown vendor"),
-                       thinkpad_id.model_str);
+                       thinkpad_id.model_str,
+                       (thinkpad_id.nummodel_str) ?
+                               thinkpad_id.nummodel_str : "unknown");
 
        return 0;
 }
@@ -1107,6 +1207,19 @@ static int hotkey_mask_set(u32 mask)
        int rc = 0;
 
        if (tp_features.hotkey_mask) {
+               if (!tp_warned.hotkey_mask_ff &&
+                   (mask == 0xffff || mask == 0xffffff ||
+                    mask == 0xffffffff)) {
+                       tp_warned.hotkey_mask_ff = 1;
+                       printk(TPACPI_NOTICE
+                              "setting the hotkey mask to 0x%08x is likely "
+                              "not the best way to go about it\n", mask);
+                       printk(TPACPI_NOTICE
+                              "please consider using the driver defaults, "
+                              "and refer to up-to-date thinkpad-acpi "
+                              "documentation\n");
+               }
+
                HOTKEY_CONFIG_CRITICAL_START
                for (i = 0; i < 32; i++) {
                        u32 m = 1 << i;
@@ -1427,8 +1540,7 @@ static void hotkey_poll_setup(int may_warn)
            (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) {
                if (!tpacpi_hotkey_task) {
                        tpacpi_hotkey_task = kthread_run(hotkey_kthread,
-                                                        NULL,
-                                                        TPACPI_FILE "d");
+                                       NULL, TPACPI_NVRAM_KTHREAD_NAME);
                        if (IS_ERR(tpacpi_hotkey_task)) {
                                tpacpi_hotkey_task = NULL;
                                printk(TPACPI_ERR
@@ -1887,6 +1999,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
                KEY_UNKNOWN,    /* 0x0D: FN+INSERT */
                KEY_UNKNOWN,    /* 0x0E: FN+DELETE */
 
+               /* These either have to go through ACPI video, or
+                * act like in the IBM ThinkPads, so don't ever
+                * enable them by default */
                KEY_RESERVED,   /* 0x0F: FN+HOME (brightness up) */
                KEY_RESERVED,   /* 0x10: FN+END (brightness down) */
 
@@ -2091,6 +2206,32 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
                        set_bit(SW_TABLET_MODE, tpacpi_inputdev->swbit);
                }
 
+               /* Do not issue duplicate brightness change events to
+                * userspace */
+               if (!tp_features.bright_acpimode)
+                       /* update bright_acpimode... */
+                       tpacpi_check_std_acpi_brightness_support();
+
+               if (tp_features.bright_acpimode) {
+                       printk(TPACPI_INFO
+                              "This ThinkPad has standard ACPI backlight "
+                              "brightness control, supported by the ACPI "
+                              "video driver\n");
+                       printk(TPACPI_NOTICE
+                              "Disabling thinkpad-acpi brightness events "
+                              "by default...\n");
+
+                       /* The hotkey_reserved_mask change below is not
+                        * necessary while the keys are at KEY_RESERVED in the
+                        * default map, but better safe than sorry, leave it
+                        * here as a marker of what we have to do, especially
+                        * when we finally become able to set this at runtime
+                        * on response to X.org requests */
+                       hotkey_reserved_mask |=
+                               (1 << TP_ACPI_HOTKEYSCAN_FNHOME)
+                               | (1 << TP_ACPI_HOTKEYSCAN_FNEND);
+               }
+
                dbg_printk(TPACPI_DBG_INIT,
                                "enabling hot key handling\n");
                res = hotkey_status_set(1);
@@ -3110,13 +3251,82 @@ static struct ibm_struct video_driver_data = {
 TPACPI_HANDLE(lght, root, "\\LGHT");   /* A21e, A2xm/p, T20-22, X20-21 */
 TPACPI_HANDLE(ledb, ec, "LEDB");               /* G4x */
 
+static int light_get_status(void)
+{
+       int status = 0;
+
+       if (tp_features.light_status) {
+               if (!acpi_evalf(ec_handle, &status, "KBLT", "d"))
+                       return -EIO;
+               return (!!status);
+       }
+
+       return -ENXIO;
+}
+
+static int light_set_status(int status)
+{
+       int rc;
+
+       if (tp_features.light) {
+               if (cmos_handle) {
+                       rc = acpi_evalf(cmos_handle, NULL, NULL, "vd",
+                                       (status)?
+                                               TP_CMOS_THINKLIGHT_ON :
+                                               TP_CMOS_THINKLIGHT_OFF);
+               } else {
+                       rc = acpi_evalf(lght_handle, NULL, NULL, "vd",
+                                       (status)? 1 : 0);
+               }
+               return (rc)? 0 : -EIO;
+       }
+
+       return -ENXIO;
+}
+
+static void light_set_status_worker(struct work_struct *work)
+{
+       struct tpacpi_led_classdev *data =
+                       container_of(work, struct tpacpi_led_classdev, work);
+
+       if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
+               light_set_status((data->new_brightness != LED_OFF));
+}
+
+static void light_sysfs_set(struct led_classdev *led_cdev,
+                       enum led_brightness brightness)
+{
+       struct tpacpi_led_classdev *data =
+               container_of(led_cdev,
+                            struct tpacpi_led_classdev,
+                            led_classdev);
+       data->new_brightness = brightness;
+       queue_work(tpacpi_wq, &data->work);
+}
+
+static enum led_brightness light_sysfs_get(struct led_classdev *led_cdev)
+{
+       return (light_get_status() == 1)? LED_FULL : LED_OFF;
+}
+
+static struct tpacpi_led_classdev tpacpi_led_thinklight = {
+       .led_classdev = {
+               .name           = "tpacpi::thinklight",
+               .brightness_set = &light_sysfs_set,
+               .brightness_get = &light_sysfs_get,
+       }
+};
+
 static int __init light_init(struct ibm_init_struct *iibm)
 {
+       int rc = 0;
+
        vdbg_printk(TPACPI_DBG_INIT, "initializing light subdriver\n");
 
        TPACPI_ACPIHANDLE_INIT(ledb);
        TPACPI_ACPIHANDLE_INIT(lght);
        TPACPI_ACPIHANDLE_INIT(cmos);
+       INIT_WORK(&tpacpi_led_thinklight.work, light_set_status_worker);
 
        /* light not supported on 570, 600e/x, 770e, 770x, G4x, R30, R31 */
        tp_features.light = (cmos_handle || lght_handle) && !ledb_handle;
@@ -3130,13 +3340,31 @@ static int __init light_init(struct ibm_init_struct *iibm)
        vdbg_printk(TPACPI_DBG_INIT, "light is %s\n",
                str_supported(tp_features.light));
 
-       return (tp_features.light)? 0 : 1;
+       if (tp_features.light) {
+               rc = led_classdev_register(&tpacpi_pdev->dev,
+                                          &tpacpi_led_thinklight.led_classdev);
+       }
+
+       if (rc < 0) {
+               tp_features.light = 0;
+               tp_features.light_status = 0;
+       } else {
+               rc = (tp_features.light)? 0 : 1;
+       }
+       return rc;
+}
+
+static void light_exit(void)
+{
+       led_classdev_unregister(&tpacpi_led_thinklight.led_classdev);
+       if (work_pending(&tpacpi_led_thinklight.work))
+               flush_workqueue(tpacpi_wq);
 }
 
 static int light_read(char *p)
 {
        int len = 0;
-       int status = 0;
+       int status;
 
        if (!tp_features.light) {
                len += sprintf(p + len, "status:\t\tnot supported\n");
@@ -3144,8 +3372,9 @@ static int light_read(char *p)
                len += sprintf(p + len, "status:\t\tunknown\n");
                len += sprintf(p + len, "commands:\ton, off\n");
        } else {
-               if (!acpi_evalf(ec_handle, &status, "KBLT", "d"))
-                       return -EIO;
+               status = light_get_status();
+               if (status < 0)
+                       return status;
                len += sprintf(p + len, "status:\t\t%s\n", onoff(status, 0));
                len += sprintf(p + len, "commands:\ton, off\n");
        }
@@ -3155,37 +3384,29 @@ static int light_read(char *p)
 
 static int light_write(char *buf)
 {
-       int cmos_cmd, lght_cmd;
        char *cmd;
-       int success;
+       int newstatus = 0;
 
        if (!tp_features.light)
                return -ENODEV;
 
        while ((cmd = next_cmd(&buf))) {
                if (strlencmp(cmd, "on") == 0) {
-                       cmos_cmd = 0x0c;
-                       lght_cmd = 1;
+                       newstatus = 1;
                } else if (strlencmp(cmd, "off") == 0) {
-                       cmos_cmd = 0x0d;
-                       lght_cmd = 0;
+                       newstatus = 0;
                } else
                        return -EINVAL;
-
-               success = cmos_handle ?
-                   acpi_evalf(cmos_handle, NULL, NULL, "vd", cmos_cmd) :
-                   acpi_evalf(lght_handle, NULL, NULL, "vd", lght_cmd);
-               if (!success)
-                       return -EIO;
        }
 
-       return 0;
+       return light_set_status(newstatus);
 }
 
 static struct ibm_struct light_driver_data = {
        .name = "light",
        .read = light_read,
        .write = light_write,
+       .exit = light_exit,
 };
 
 /*************************************************************************
@@ -3583,6 +3804,12 @@ enum {   /* For TPACPI_LED_OLD */
        TPACPI_LED_EC_HLMS = 0x0e,      /* EC reg to select led to command */
 };
 
+enum led_status_t {
+       TPACPI_LED_OFF = 0,
+       TPACPI_LED_ON,
+       TPACPI_LED_BLINK,
+};
+
 static enum led_access_mode led_supported;
 
 TPACPI_HANDLE(led, ec, "SLED", /* 570 */
@@ -3591,8 +3818,174 @@ TPACPI_HANDLE(led, ec, "SLED",  /* 570 */
           "LED",               /* all others */
           );                   /* R30, R31 */
 
+#define TPACPI_LED_NUMLEDS 8
+static struct tpacpi_led_classdev *tpacpi_leds;
+static enum led_status_t tpacpi_led_state_cache[TPACPI_LED_NUMLEDS];
+static const char const *tpacpi_led_names[TPACPI_LED_NUMLEDS] = {
+       /* there's a limit of 19 chars + NULL before 2.6.26 */
+       "tpacpi::power",
+       "tpacpi:orange:batt",
+       "tpacpi:green:batt",
+       "tpacpi::dock_active",
+       "tpacpi::bay_active",
+       "tpacpi::dock_batt",
+       "tpacpi::unknown_led",
+       "tpacpi::standby",
+};
+
+static int led_get_status(unsigned int led)
+{
+       int status;
+       enum led_status_t led_s;
+
+       switch (led_supported) {
+       case TPACPI_LED_570:
+               if (!acpi_evalf(ec_handle,
+                               &status, "GLED", "dd", 1 << led))
+                       return -EIO;
+               led_s = (status == 0)?
+                               TPACPI_LED_OFF :
+                               ((status == 1)?
+                                       TPACPI_LED_ON :
+                                       TPACPI_LED_BLINK);
+               tpacpi_led_state_cache[led] = led_s;
+               return led_s;
+       default:
+               return -ENXIO;
+       }
+
+       /* not reached */
+}
+
+static int led_set_status(unsigned int led, enum led_status_t ledstatus)
+{
+       /* off, on, blink. Index is led_status_t */
+       static const int const led_sled_arg1[] = { 0, 1, 3 };
+       static const int const led_exp_hlbl[] = { 0, 0, 1 };    /* led# * */
+       static const int const led_exp_hlcl[] = { 0, 1, 1 };    /* led# * */
+       static const int const led_led_arg1[] = { 0, 0x80, 0xc0 };
+
+       int rc = 0;
+
+       switch (led_supported) {
+       case TPACPI_LED_570:
+                       /* 570 */
+                       led = 1 << led;
+                       if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
+                                       led, led_sled_arg1[ledstatus]))
+                               rc = -EIO;
+                       break;
+       case TPACPI_LED_OLD:
+                       /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
+                       led = 1 << led;
+                       rc = ec_write(TPACPI_LED_EC_HLMS, led);
+                       if (rc >= 0)
+                               rc = ec_write(TPACPI_LED_EC_HLBL,
+                                             led * led_exp_hlbl[ledstatus]);
+                       if (rc >= 0)
+                               rc = ec_write(TPACPI_LED_EC_HLCL,
+                                             led * led_exp_hlcl[ledstatus]);
+                       break;
+       case TPACPI_LED_NEW:
+                       /* all others */
+                       if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
+                                       led, led_led_arg1[ledstatus]))
+                               rc = -EIO;
+                       break;
+       default:
+               rc = -ENXIO;
+       }
+
+       if (!rc)
+               tpacpi_led_state_cache[led] = ledstatus;
+
+       return rc;
+}
+
+static void led_sysfs_set_status(unsigned int led,
+                                enum led_brightness brightness)
+{
+       led_set_status(led,
+                       (brightness == LED_OFF) ?
+                       TPACPI_LED_OFF :
+                       (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
+                               TPACPI_LED_BLINK : TPACPI_LED_ON);
+}
+
+static void led_set_status_worker(struct work_struct *work)
+{
+       struct tpacpi_led_classdev *data =
+               container_of(work, struct tpacpi_led_classdev, work);
+
+       if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
+               led_sysfs_set_status(data->led, data->new_brightness);
+}
+
+static void led_sysfs_set(struct led_classdev *led_cdev,
+                       enum led_brightness brightness)
+{
+       struct tpacpi_led_classdev *data = container_of(led_cdev,
+                            struct tpacpi_led_classdev, led_classdev);
+
+       data->new_brightness = brightness;
+       queue_work(tpacpi_wq, &data->work);
+}
+
+static int led_sysfs_blink_set(struct led_classdev *led_cdev,
+                       unsigned long *delay_on, unsigned long *delay_off)
+{
+       struct tpacpi_led_classdev *data = container_of(led_cdev,
+                            struct tpacpi_led_classdev, led_classdev);
+
+       /* Can we choose the flash rate? */
+       if (*delay_on == 0 && *delay_off == 0) {
+               /* yes. set them to the hardware blink rate (1 Hz) */
+               *delay_on = 500; /* ms */
+               *delay_off = 500; /* ms */
+       } else if ((*delay_on != 500) || (*delay_off != 500))
+               return -EINVAL;
+
+       data->new_brightness = TPACPI_LED_BLINK;
+       queue_work(tpacpi_wq, &data->work);
+
+       return 0;
+}
+
+static enum led_brightness led_sysfs_get(struct led_classdev *led_cdev)
+{
+       int rc;
+
+       struct tpacpi_led_classdev *data = container_of(led_cdev,
+                            struct tpacpi_led_classdev, led_classdev);
+
+       rc = led_get_status(data->led);
+
+       if (rc == TPACPI_LED_OFF || rc < 0)
+               rc = LED_OFF;   /* no error handling in led class :( */
+       else
+               rc = LED_FULL;
+
+       return rc;
+}
+
+static void led_exit(void)
+{
+       unsigned int i;
+
+       for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+               if (tpacpi_leds[i].led_classdev.name)
+                       led_classdev_unregister(&tpacpi_leds[i].led_classdev);
+       }
+
+       kfree(tpacpi_leds);
+       tpacpi_leds = NULL;
+}
+
 static int __init led_init(struct ibm_init_struct *iibm)
 {
+       unsigned int i;
+       int rc;
+
        vdbg_printk(TPACPI_DBG_INIT, "initializing LED subdriver\n");
 
        TPACPI_ACPIHANDLE_INIT(led);
@@ -3613,10 +4006,41 @@ static int __init led_init(struct ibm_init_struct *iibm)
        vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n",
                str_supported(led_supported), led_supported);
 
+       tpacpi_leds = kzalloc(sizeof(*tpacpi_leds) * TPACPI_LED_NUMLEDS,
+                             GFP_KERNEL);
+       if (!tpacpi_leds) {
+               printk(TPACPI_ERR "Out of memory for LED data\n");
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < TPACPI_LED_NUMLEDS; i++) {
+               tpacpi_leds[i].led = i;
+
+               tpacpi_leds[i].led_classdev.brightness_set = &led_sysfs_set;
+               tpacpi_leds[i].led_classdev.blink_set = &led_sysfs_blink_set;
+               if (led_supported == TPACPI_LED_570)
+                       tpacpi_leds[i].led_classdev.brightness_get =
+                                                       &led_sysfs_get;
+
+               tpacpi_leds[i].led_classdev.name = tpacpi_led_names[i];
+
+               INIT_WORK(&tpacpi_leds[i].work, led_set_status_worker);
+
+               rc = led_classdev_register(&tpacpi_pdev->dev,
+                                          &tpacpi_leds[i].led_classdev);
+               if (rc < 0) {
+                       tpacpi_leds[i].led_classdev.name = NULL;
+                       led_exit();
+                       return rc;
+               }
+       }
+
        return (led_supported != TPACPI_LED_NONE)? 0 : 1;
 }
 
-#define led_status(s) ((s) == 0 ? "off" : ((s) == 1 ? "on" : "blinking"))
+#define str_led_status(s) \
+       ((s) == TPACPI_LED_OFF ? "off" : \
+               ((s) == TPACPI_LED_ON ? "on" : "blinking"))
 
 static int led_read(char *p)
 {
@@ -3632,11 +4056,11 @@ static int led_read(char *p)
                /* 570 */
                int i, status;
                for (i = 0; i < 8; i++) {
-                       if (!acpi_evalf(ec_handle,
-                                       &status, "GLED", "dd", 1 << i))
+                       status = led_get_status(i);
+                       if (status < 0)
                                return -EIO;
                        len += sprintf(p + len, "%d:\t\t%s\n",
-                                      i, led_status(status));
+                                      i, str_led_status(status));
                }
        }
 
@@ -3646,16 +4070,11 @@ static int led_read(char *p)
        return len;
 }
 
-/* off, on, blink */
-static const int led_sled_arg1[] = { 0, 1, 3 };
-static const int led_exp_hlbl[] = { 0, 0, 1 }; /* led# * */
-static const int led_exp_hlcl[] = { 0, 1, 1 }; /* led# * */
-static const int led_led_arg1[] = { 0, 0x80, 0xc0 };
-
 static int led_write(char *buf)
 {
        char *cmd;
-       int led, ind, ret;
+       int led, rc;
+       enum led_status_t s;
 
        if (!led_supported)
                return -ENODEV;
@@ -3665,38 +4084,18 @@ static int led_write(char *buf)
                        return -EINVAL;
 
                if (strstr(cmd, "off")) {
-                       ind = 0;
+                       s = TPACPI_LED_OFF;
                } else if (strstr(cmd, "on")) {
-                       ind = 1;
+                       s = TPACPI_LED_ON;
                } else if (strstr(cmd, "blink")) {
-                       ind = 2;
-               } else
-                       return -EINVAL;
-
-               if (led_supported == TPACPI_LED_570) {
-                       /* 570 */
-                       led = 1 << led;
-                       if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
-                                       led, led_sled_arg1[ind]))
-                               return -EIO;
-               } else if (led_supported == TPACPI_LED_OLD) {
-                       /* 600e/x, 770e, 770x, A21e, A2xm/p, T20-22, X20 */
-                       led = 1 << led;
-                       ret = ec_write(TPACPI_LED_EC_HLMS, led);
-                       if (ret >= 0)
-                               ret = ec_write(TPACPI_LED_EC_HLBL,
-                                               led * led_exp_hlbl[ind]);
-                       if (ret >= 0)
-                               ret = ec_write(TPACPI_LED_EC_HLCL,
-                                               led * led_exp_hlcl[ind]);
-                       if (ret < 0)
-                               return ret;
+                       s = TPACPI_LED_BLINK;
                } else {
-                       /* all others */
-                       if (!acpi_evalf(led_handle, NULL, NULL, "vdd",
-                                       led, led_led_arg1[ind]))
-                               return -EIO;
+                       return -EINVAL;
                }
+
+               rc = led_set_status(led, s);
+               if (rc < 0)
+                       return rc;
        }
 
        return 0;
@@ -3706,6 +4105,7 @@ static struct ibm_struct led_driver_data = {
        .name = "led",
        .read = led_read,
        .write = led_write,
+       .exit = led_exit,
 };
 
 /*************************************************************************
@@ -4170,8 +4570,16 @@ static struct ibm_struct ecdump_driver_data = {
 
 #define TPACPI_BACKLIGHT_DEV_NAME "thinkpad_screen"
 
+enum {
+       TP_EC_BACKLIGHT = 0x31,
+
+       /* TP_EC_BACKLIGHT bitmasks */
+       TP_EC_BACKLIGHT_LVLMSK = 0x1F,
+       TP_EC_BACKLIGHT_CMDMSK = 0xE0,
+       TP_EC_BACKLIGHT_MAPSW = 0x20,
+};
+
 static struct backlight_device *ibm_backlight_device;
-static int brightness_offset = 0x31;
 static int brightness_mode;
 static unsigned int brightness_enable = 2; /* 2 = auto, 0 = no, 1 = yes */
 
@@ -4180,16 +4588,24 @@ static struct mutex brightness_mutex;
 /*
  * ThinkPads can read brightness from two places: EC 0x31, or
  * CMOS NVRAM byte 0x5E, bits 0-3.
+ *
+ * EC 0x31 has the following layout
+ *   Bit 7: unknown function
+ *   Bit 6: unknown function
+ *   Bit 5: Z: honour scale changes, NZ: ignore scale changes
+ *   Bit 4: must be set to zero to avoid problems
+ *   Bit 3-0: backlight brightness level
+ *
+ * brightness_get_raw returns status data in the EC 0x31 layout
  */
-static int brightness_get(struct backlight_device *bd)
+static int brightness_get_raw(int *status)
 {
        u8 lec = 0, lcmos = 0, level = 0;
 
        if (brightness_mode & 1) {
-               if (!acpi_ec_read(brightness_offset, &lec))
+               if (!acpi_ec_read(TP_EC_BACKLIGHT, &lec))
                        return -EIO;
-               lec &= (tp_features.bright_16levels)? 0x0f : 0x07;
-               level = lec;
+               level = lec & TP_EC_BACKLIGHT_LVLMSK;
        };
        if (brightness_mode & 2) {
                lcmos = (nvram_read_byte(TP_NVRAM_ADDR_BRIGHTNESS)
@@ -4199,16 +4615,27 @@ static int brightness_get(struct backlight_device *bd)
                level = lcmos;
        }
 
-       if (brightness_mode == 3 && lec != lcmos) {
-               printk(TPACPI_ERR
-                       "CMOS NVRAM (%u) and EC (%u) do not agree "
-                       "on display brightness level\n",
-                       (unsigned int) lcmos,
-                       (unsigned int) lec);
-               return -EIO;
+       if (brightness_mode == 3) {
+               *status = lec;  /* Prefer EC, CMOS is just a backing store */
+               lec &= TP_EC_BACKLIGHT_LVLMSK;
+               if (lec == lcmos)
+                       tp_warned.bright_cmos_ec_unsync = 0;
+               else {
+                       if (!tp_warned.bright_cmos_ec_unsync) {
+                               printk(TPACPI_ERR
+                                       "CMOS NVRAM (%u) and EC (%u) do not "
+                                       "agree on display brightness level\n",
+                                       (unsigned int) lcmos,
+                                       (unsigned int) lec);
+                               tp_warned.bright_cmos_ec_unsync = 1;
+                       }
+                       return -EIO;
+               }
+       } else {
+               *status = level;
        }
 
-       return level;
+       return 0;
 }
 
 /* May return EINTR which can always be mapped to ERESTARTSYS */
@@ -4216,19 +4643,22 @@ static int brightness_set(int value)
 {
        int cmos_cmd, inc, i, res;
        int current_value;
+       int command_bits;
 
-       if (value > ((tp_features.bright_16levels)? 15 : 7))
+       if (value > ((tp_features.bright_16levels)? 15 : 7) ||
+           value < 0)
                return -EINVAL;
 
        res = mutex_lock_interruptible(&brightness_mutex);
        if (res < 0)
                return res;
 
-       current_value = brightness_get(NULL);
-       if (current_value < 0) {
-               res = current_value;
+       res = brightness_get_raw(&current_value);
+       if (res < 0)
                goto errout;
-       }
+
+       command_bits = current_value & TP_EC_BACKLIGHT_CMDMSK;
+       current_value &= TP_EC_BACKLIGHT_LVLMSK;
 
        cmos_cmd = value > current_value ?
                        TP_CMOS_BRIGHTNESS_UP :
@@ -4243,7 +4673,8 @@ static int brightness_set(int value)
                        goto errout;
                }
                if ((brightness_mode & 1) &&
-                   !acpi_ec_write(brightness_offset, i + inc)) {
+                   !acpi_ec_write(TP_EC_BACKLIGHT,
+                                  (i + inc) | command_bits)) {
                        res = -EIO;
                        goto errout;;
                }
@@ -4266,106 +4697,23 @@ static int brightness_update_status(struct backlight_device *bd)
                                bd->props.brightness : 0);
 }
 
-static struct backlight_ops ibm_backlight_data = {
-       .get_brightness = brightness_get,
-       .update_status  = brightness_update_status,
-};
-
-/* --------------------------------------------------------------------- */
-
-static int __init tpacpi_query_bcll_levels(acpi_handle handle)
-{
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj;
-       int rc;
-
-       if (ACPI_SUCCESS(acpi_evaluate_object(handle, NULL, NULL, &buffer))) {
-               obj = (union acpi_object *)buffer.pointer;
-               if (!obj || (obj->type != ACPI_TYPE_PACKAGE)) {
-                       printk(TPACPI_ERR "Unknown BCLL data, "
-                              "please report this to %s\n", TPACPI_MAIL);
-                       rc = 0;
-               } else {
-                       rc = obj->package.count;
-               }
-       } else {
-               return 0;
-       }
-
-       kfree(buffer.pointer);
-       return rc;
-}
-
-static acpi_status __init brightness_find_bcll(acpi_handle handle, u32 lvl,
-                                       void *context, void **rv)
-{
-       char name[ACPI_PATH_SEGMENT_LENGTH];
-       struct acpi_buffer buffer = { sizeof(name), &name };
-
-       if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
-           !strncmp("BCLL", name, sizeof(name) - 1)) {
-               if (tpacpi_query_bcll_levels(handle) == 16) {
-                       *rv = handle;
-                       return AE_CTRL_TERMINATE;
-               } else {
-                       return AE_OK;
-               }
-       } else {
-               return AE_OK;
-       }
-}
-
-static int __init brightness_check_levels(void)
+static int brightness_get(struct backlight_device *bd)
 {
-       int status;
-       void *found_node = NULL;
+       int status, res;
 
-       if (!vid_handle) {
-               TPACPI_ACPIHANDLE_INIT(vid);
-       }
-       if (!vid_handle)
-               return 0;
-
-       /* Search for a BCLL package with 16 levels */
-       status = acpi_walk_namespace(ACPI_TYPE_PACKAGE, vid_handle, 3,
-                                       brightness_find_bcll, NULL,
-                                       &found_node);
-
-       return (ACPI_SUCCESS(status) && found_node != NULL);
-}
-
-static acpi_status __init brightness_find_bcl(acpi_handle handle, u32 lvl,
-                                       void *context, void **rv)
-{
-       char name[ACPI_PATH_SEGMENT_LENGTH];
-       struct acpi_buffer buffer = { sizeof(name), &name };
+       res = brightness_get_raw(&status);
+       if (res < 0)
+               return 0; /* FIXME: teach backlight about error handling */
 
-       if (ACPI_SUCCESS(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)) &&
-           !strncmp("_BCL", name, sizeof(name) - 1)) {
-               *rv = handle;
-               return AE_CTRL_TERMINATE;
-       } else {
-               return AE_OK;
-       }
+       return status & TP_EC_BACKLIGHT_LVLMSK;
 }
 
-static int __init brightness_check_std_acpi_support(void)
-{
-       int status;
-       void *found_node = NULL;
-
-       if (!vid_handle) {
-               TPACPI_ACPIHANDLE_INIT(vid);
-       }
-       if (!vid_handle)
-               return 0;
-
-       /* Search for a _BCL method, but don't execute it */
-       status = acpi_walk_namespace(ACPI_TYPE_METHOD, vid_handle, 3,
-                                    brightness_find_bcl, NULL, &found_node);
+static struct backlight_ops ibm_backlight_data = {
+       .get_brightness = brightness_get,
+       .update_status  = brightness_update_status,
+};
 
-       return (ACPI_SUCCESS(status) && found_node != NULL);
-}
+/* --------------------------------------------------------------------- */
 
 static int __init brightness_init(struct ibm_init_struct *iibm)
 {
@@ -4375,13 +4723,19 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
 
        mutex_init(&brightness_mutex);
 
-       if (!brightness_enable) {
-               dbg_printk(TPACPI_DBG_INIT,
-                          "brightness support disabled by "
-                          "module parameter\n");
-               return 1;
-       } else if (brightness_enable > 1) {
-               if (brightness_check_std_acpi_support()) {
+       /*
+        * We always attempt to detect acpi support, so as to switch
+        * Lenovo Vista BIOS to ACPI brightness mode even if we are not
+        * going to publish a backlight interface
+        */
+       b = tpacpi_check_std_acpi_brightness_support();
+       if (b > 0) {
+               if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO) {
+                       printk(TPACPI_NOTICE
+                              "Lenovo BIOS switched to ACPI backlight "
+                              "control mode\n");
+               }
+               if (brightness_enable > 1) {
                        printk(TPACPI_NOTICE
                               "standard ACPI backlight interface "
                               "available, not loading native one...\n");
@@ -4389,6 +4743,22 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
                }
        }
 
+       if (!brightness_enable) {
+               dbg_printk(TPACPI_DBG_INIT,
+                          "brightness support disabled by "
+                          "module parameter\n");
+               return 1;
+       }
+
+       if (b > 16) {
+               printk(TPACPI_ERR
+                      "Unsupported brightness interface, "
+                      "please contact %s\n", TPACPI_MAIL);
+               return 1;
+       }
+       if (b == 16)
+               tp_features.bright_16levels = 1;
+
        if (!brightness_mode) {
                if (thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO)
                        brightness_mode = 2;
@@ -4402,12 +4772,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
        if (brightness_mode > 3)
                return -EINVAL;
 
-       tp_features.bright_16levels =
-                       thinkpad_id.vendor == PCI_VENDOR_ID_LENOVO &&
-                       brightness_check_levels();
-
-       b = brightness_get(NULL);
-       if (b < 0)
+       if (brightness_get_raw(&b) < 0)
                return 1;
 
        if (tp_features.bright_16levels)
@@ -4425,7 +4790,7 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
 
        ibm_backlight_device->props.max_brightness =
                                (tp_features.bright_16levels)? 15 : 7;
-       ibm_backlight_device->props.brightness = b;
+       ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
        backlight_update_status(ibm_backlight_device);
 
        return 0;
@@ -5046,11 +5411,11 @@ static void fan_watchdog_reset(void)
        if (fan_watchdog_maxinterval > 0 &&
            tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
                fan_watchdog_active = 1;
-               if (!schedule_delayed_work(&fan_watchdog_task,
+               if (!queue_delayed_work(tpacpi_wq, &fan_watchdog_task,
                                msecs_to_jiffies(fan_watchdog_maxinterval
                                                 * 1000))) {
                        printk(TPACPI_ERR
-                              "failed to schedule the fan watchdog, "
+                              "failed to queue the fan watchdog, "
                               "watchdog will not trigger\n");
                }
        } else
@@ -5420,7 +5785,7 @@ static void fan_exit(void)
                           &driver_attr_fan_watchdog);
 
        cancel_delayed_work(&fan_watchdog_task);
-       flush_scheduled_work();
+       flush_workqueue(tpacpi_wq);
 }
 
 static int fan_read(char *p)
@@ -5826,10 +6191,13 @@ static void __init get_thinkpad_model_data(struct thinkpad_id_data *tp)
 
        tp->model_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_VERSION),
                                        GFP_KERNEL);
-       if (strnicmp(tp->model_str, "ThinkPad", 8) != 0) {
+       if (tp->model_str && strnicmp(tp->model_str, "ThinkPad", 8) != 0) {
                kfree(tp->model_str);
                tp->model_str = NULL;
        }
+
+       tp->nummodel_str = kstrdup(dmi_get_system_info(DMI_PRODUCT_NAME),
+                                       GFP_KERNEL);
 }
 
 static int __init probe_for_thinkpad(void)
@@ -6071,6 +6439,9 @@ static void thinkpad_acpi_module_exit(void)
        if (proc_dir)
                remove_proc_entry(TPACPI_PROC_DIR, acpi_root_dir);
 
+       if (tpacpi_wq)
+               destroy_workqueue(tpacpi_wq);
+
        kfree(thinkpad_id.bios_version_str);
        kfree(thinkpad_id.ec_version_str);
        kfree(thinkpad_id.model_str);
@@ -6101,6 +6472,12 @@ static int __init thinkpad_acpi_module_init(void)
        TPACPI_ACPIHANDLE_INIT(ecrd);
        TPACPI_ACPIHANDLE_INIT(ecwr);
 
+       tpacpi_wq = create_singlethread_workqueue(TPACPI_WORKQUEUE_NAME);
+       if (!tpacpi_wq) {
+               thinkpad_acpi_module_exit();
+               return -ENOMEM;
+       }
+
        proc_dir = proc_mkdir(TPACPI_PROC_DIR, acpi_root_dir);
        if (!proc_dir) {
                printk(TPACPI_ERR
@@ -6223,6 +6600,8 @@ static int __init thinkpad_acpi_module_init(void)
 /* Please remove this in year 2009 */
 MODULE_ALIAS("ibm_acpi");
 
+MODULE_ALIAS(TPACPI_DRVR_SHORTNAME);
+
 /*
  * DMI matching for module autoloading
  *
index 365024b83d3da9df9b6e4f7a9d4cf6d216ba523d..35508584ac2ae55ffae78479877fe67b32a3a7d2 100644 (file)
@@ -340,7 +340,7 @@ checkstatus:
 
        /* SPI R3, R4, or R7 == R1 + 4 bytes */
        case MMC_RSP_SPI_R3:
-               cmd->resp[1] = be32_to_cpu(get_unaligned((u32 *)cp));
+               cmd->resp[1] = get_unaligned_be32(cp);
                break;
 
        /* SPI R1 == just one status byte */
index 95244a7e73531ad0fdf2d451d907edabcfbf5a4c..626ac083f4e09c294c4d465dd3c85e886082867d 100644 (file)
@@ -213,9 +213,10 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
        void __iomem *base = host->base;
        char *ptr = buffer;
        u32 status;
+       int host_remain = host->size;
 
        do {
-               int count = host->size - (readl(base + MMCIFIFOCNT) << 2);
+               int count = host_remain - (readl(base + MMCIFIFOCNT) << 2);
 
                if (count > remain)
                        count = remain;
@@ -227,6 +228,7 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
 
                ptr += count;
                remain -= count;
+               host_remain -= count;
 
                if (remain == 0)
                        break;
index f0b10ca0502920240b9fee8e3ff728df1808d451..3eb2643b232875cea389ed0cfb45a55147eb898b 100644 (file)
@@ -209,7 +209,7 @@ static int platram_probe(struct platform_device *pdev)
        /* probe for the right mtd map driver
         * supplied by the platform_data struct */
 
-       if (pdata->map_probes != 0) {
+       if (pdata->map_probes) {
                const char **map_probes = pdata->map_probes;
 
                for ( ; !info->mtd && *map_probes; map_probes++)
index 9c6573419f5a3174d67b0ec92462983c3abcb315..fdfb2b2cb734e3fe31455a34dcb7e8eaef495749 100644 (file)
@@ -670,7 +670,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
                                        memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length);
                                        }
                                        skb->protocol = eth_type_trans(skb,dev);
-                                       adapter->stats.rx_bytes += skb->len;
+                                       dev->stats.rx_bytes += skb->len;
                                        netif_rx(skb);
                                        dev->last_rx = jiffies;
                                }
@@ -773,12 +773,12 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
                                         * received board statistics
                                         */
                                case CMD_NETWORK_STATISTICS_RESPONSE:
-                                       adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
-                                       adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
-                                       adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
-                                       adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
-                                       adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
-                                       adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
+                                       dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv;
+                                       dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit;
+                                       dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC;
+                                       dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align;
+                                       dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun;
+                                       dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res;
                                        adapter->got[CMD_NETWORK_STATISTICS] = 1;
                                        if (elp_debug >= 3)
                                                printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name);
@@ -794,11 +794,11 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id)
                                                break;
                                        switch (adapter->irx_pcb.data.xmit_resp.c_stat) {
                                        case 0xffff:
-                                               adapter->stats.tx_aborted_errors++;
+                                               dev->stats.tx_aborted_errors++;
                                                printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name);
                                                break;
                                        case 0xfffe:
-                                               adapter->stats.tx_fifo_errors++;
+                                               dev->stats.tx_fifo_errors++;
                                                printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name);
                                                break;
                                        }
@@ -986,7 +986,7 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
                return false;
        }
 
-       adapter->stats.tx_bytes += nlen;
+       dev->stats.tx_bytes += nlen;
 
        /*
         * send the adapter a transmit packet command. Ignore segment and offset
@@ -1041,7 +1041,6 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb)
 
 static void elp_timeout(struct net_device *dev)
 {
-       elp_device *adapter = dev->priv;
        int stat;
 
        stat = inb_status(dev->base_addr);
@@ -1049,7 +1048,7 @@ static void elp_timeout(struct net_device *dev)
        if (elp_debug >= 1)
                printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat);
        dev->trans_start = jiffies;
-       adapter->stats.tx_dropped++;
+       dev->stats.tx_dropped++;
        netif_wake_queue(dev);
 }
 
@@ -1113,7 +1112,7 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
        /* If the device is closed, just return the latest stats we have,
           - we cannot ask from the adapter without interrupts */
        if (!netif_running(dev))
-               return &adapter->stats;
+               return &dev->stats;
 
        /* send a get statistics command to the board */
        adapter->tx_pcb.command = CMD_NETWORK_STATISTICS;
@@ -1126,12 +1125,12 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev)
                while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout));
                if (time_after_eq(jiffies, timeout)) {
                        TIMEOUT_MSG(__LINE__);
-                       return &adapter->stats;
+                       return &dev->stats;
                }
        }
 
        /* statistics are now up to date */
-       return &adapter->stats;
+       return &dev->stats;
 }
 
 
@@ -1571,7 +1570,6 @@ static int __init elplus_setup(struct net_device *dev)
        dev->set_multicast_list = elp_set_mc_list;      /* local */
        dev->ethtool_ops = &netdev_ethtool_ops;         /* local */
 
-       memset(&(adapter->stats), 0, sizeof(struct net_device_stats));
        dev->mem_start = dev->mem_end = 0;
 
        err = register_netdev(dev);
index 1910cb1dc787c099bd59781d0f5ccb6083a81b0b..04df2a9002b62f00078fc6f5227ad80fdc8af2fc 100644 (file)
@@ -264,7 +264,6 @@ typedef struct {
        pcb_struct rx_pcb;      /* PCB for foreground receiving */
        pcb_struct itx_pcb;     /* PCB for background sending */
        pcb_struct irx_pcb;     /* PCB for background receiving */
-       struct net_device_stats stats;
 
        void *dma_buffer;
 
index 54dac0696d9117de35338e5dc5bbb61048d970f9..e6c545fe5f58025ae4a581a91622957b362ce3ad 100644 (file)
@@ -167,7 +167,6 @@ enum RxFilter {
 enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA };
 
 struct el3_private {
-       struct net_device_stats stats;
        spinlock_t lock;
        /* skb send-queue */
        int head, size;
@@ -794,7 +793,6 @@ el3_open(struct net_device *dev)
 static void
 el3_tx_timeout (struct net_device *dev)
 {
-       struct el3_private *lp = netdev_priv(dev);
        int ioaddr = dev->base_addr;
 
        /* Transmitter timeout, serious problems. */
@@ -802,7 +800,7 @@ el3_tx_timeout (struct net_device *dev)
                   "Tx FIFO room %d.\n",
                   dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
                   inw(ioaddr + TX_FREE));
-       lp->stats.tx_errors++;
+       dev->stats.tx_errors++;
        dev->trans_start = jiffies;
        /* Issue TX_RESET and TX_START commands. */
        outw(TxReset, ioaddr + EL3_CMD);
@@ -820,7 +818,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        netif_stop_queue (dev);
 
-       lp->stats.tx_bytes += skb->len;
+       dev->stats.tx_bytes += skb->len;
 
        if (el3_debug > 4) {
                printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n",
@@ -881,7 +879,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                int i = 4;
 
                while (--i > 0  &&      (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
-                       if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+                       if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
                        if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
                        if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
                        outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
@@ -931,12 +929,11 @@ el3_interrupt(int irq, void *dev_id)
                                outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
                        }
                        if (status & TxComplete) {                      /* Really Tx error. */
-                               struct el3_private *lp = netdev_priv(dev);
                                short tx_status;
                                int i = 4;
 
                                while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) {
-                                       if (tx_status & 0x38) lp->stats.tx_aborted_errors++;
+                                       if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
                                        if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD);
                                        if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD);
                                        outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
@@ -1002,7 +999,7 @@ el3_get_stats(struct net_device *dev)
        spin_lock_irqsave(&lp->lock, flags);
        update_stats(dev);
        spin_unlock_irqrestore(&lp->lock, flags);
-       return &lp->stats;
+       return &dev->stats;
 }
 
 /*  Update statistics.  We change to register window 6, so this should be run
@@ -1012,7 +1009,6 @@ el3_get_stats(struct net_device *dev)
        */
 static void update_stats(struct net_device *dev)
 {
-       struct el3_private *lp = netdev_priv(dev);
        int ioaddr = dev->base_addr;
 
        if (el3_debug > 5)
@@ -1021,13 +1017,13 @@ static void update_stats(struct net_device *dev)
        outw(StatsDisable, ioaddr + EL3_CMD);
        /* Switch to the stats window, and read everything. */
        EL3WINDOW(6);
-       lp->stats.tx_carrier_errors     += inb(ioaddr + 0);
-       lp->stats.tx_heartbeat_errors   += inb(ioaddr + 1);
+       dev->stats.tx_carrier_errors    += inb(ioaddr + 0);
+       dev->stats.tx_heartbeat_errors  += inb(ioaddr + 1);
        /* Multiple collisions. */         inb(ioaddr + 2);
-       lp->stats.collisions            += inb(ioaddr + 3);
-       lp->stats.tx_window_errors      += inb(ioaddr + 4);
-       lp->stats.rx_fifo_errors        += inb(ioaddr + 5);
-       lp->stats.tx_packets            += inb(ioaddr + 6);
+       dev->stats.collisions           += inb(ioaddr + 3);
+       dev->stats.tx_window_errors     += inb(ioaddr + 4);
+       dev->stats.rx_fifo_errors       += inb(ioaddr + 5);
+       dev->stats.tx_packets           += inb(ioaddr + 6);
        /* Rx packets   */                 inb(ioaddr + 7);
        /* Tx deferrals */                 inb(ioaddr + 8);
        inw(ioaddr + 10);       /* Total Rx and Tx octets. */
@@ -1042,7 +1038,6 @@ static void update_stats(struct net_device *dev)
 static int
 el3_rx(struct net_device *dev)
 {
-       struct el3_private *lp = netdev_priv(dev);
        int ioaddr = dev->base_addr;
        short rx_status;
 
@@ -1054,21 +1049,21 @@ el3_rx(struct net_device *dev)
                        short error = rx_status & 0x3800;
 
                        outw(RxDiscard, ioaddr + EL3_CMD);
-                       lp->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        switch (error) {
-                       case 0x0000:            lp->stats.rx_over_errors++; break;
-                       case 0x0800:            lp->stats.rx_length_errors++; break;
-                       case 0x1000:            lp->stats.rx_frame_errors++; break;
-                       case 0x1800:            lp->stats.rx_length_errors++; break;
-                       case 0x2000:            lp->stats.rx_frame_errors++; break;
-                       case 0x2800:            lp->stats.rx_crc_errors++; break;
+                       case 0x0000:            dev->stats.rx_over_errors++; break;
+                       case 0x0800:            dev->stats.rx_length_errors++; break;
+                       case 0x1000:            dev->stats.rx_frame_errors++; break;
+                       case 0x1800:            dev->stats.rx_length_errors++; break;
+                       case 0x2000:            dev->stats.rx_frame_errors++; break;
+                       case 0x2800:            dev->stats.rx_crc_errors++; break;
                        }
                } else {
                        short pkt_len = rx_status & 0x7ff;
                        struct sk_buff *skb;
 
                        skb = dev_alloc_skb(pkt_len+5);
-                       lp->stats.rx_bytes += pkt_len;
+                       dev->stats.rx_bytes += pkt_len;
                        if (el3_debug > 4)
                                printk("Receiving packet size %d status %4.4x.\n",
                                           pkt_len, rx_status);
@@ -1083,11 +1078,11 @@ el3_rx(struct net_device *dev)
                                skb->protocol = eth_type_trans(skb,dev);
                                netif_rx(skb);
                                dev->last_rx = jiffies;
-                               lp->stats.rx_packets++;
+                               dev->stats.rx_packets++;
                                continue;
                        }
                        outw(RxDiscard, ioaddr + EL3_CMD);
-                       lp->stats.rx_dropped++;
+                       dev->stats.rx_dropped++;
                        if (el3_debug)
                                printk("%s: Couldn't allocate a sk_buff of size %d.\n",
                                           dev->name, pkt_len);
index 6ab84b661d70b34549d954ff9f173e07be06bd41..105a8c7ca7e9dce46ab3aae935db671df7949b8f 100644 (file)
@@ -310,7 +310,6 @@ struct corkscrew_private {
        struct sk_buff *tx_skbuff[TX_RING_SIZE];
        unsigned int cur_rx, cur_tx;    /* The next free ring entry */
        unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */
-       struct net_device_stats stats;
        struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl.  */
        struct timer_list timer;        /* Media selection timer. */
        int capabilities        ;       /* Adapter capabilities word. */
@@ -983,8 +982,8 @@ static void corkscrew_timeout(struct net_device *dev)
                        break;
        outw(TxEnable, ioaddr + EL3_CMD);
        dev->trans_start = jiffies;
-       vp->stats.tx_errors++;
-       vp->stats.tx_dropped++;
+       dev->stats.tx_errors++;
+       dev->stats.tx_dropped++;
        netif_wake_queue(dev);
 }
 
@@ -1050,7 +1049,7 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
        }
        /* Put out the doubleword header... */
        outl(skb->len, ioaddr + TX_FIFO);
-       vp->stats.tx_bytes += skb->len;
+       dev->stats.tx_bytes += skb->len;
 #ifdef VORTEX_BUS_MASTER
        if (vp->bus_master) {
                /* Set the bus-master controller to transfer the packet. */
@@ -1094,9 +1093,9 @@ static int corkscrew_start_xmit(struct sk_buff *skb,
                                        printk("%s: Tx error, status %2.2x.\n",
                                                dev->name, tx_status);
                                if (tx_status & 0x04)
-                                       vp->stats.tx_fifo_errors++;
+                                       dev->stats.tx_fifo_errors++;
                                if (tx_status & 0x38)
-                                       vp->stats.tx_aborted_errors++;
+                                       dev->stats.tx_aborted_errors++;
                                if (tx_status & 0x30) {
                                        int j;
                                        outw(TxReset, ioaddr + EL3_CMD);
@@ -1257,7 +1256,6 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id)
 
 static int corkscrew_rx(struct net_device *dev)
 {
-       struct corkscrew_private *vp = netdev_priv(dev);
        int ioaddr = dev->base_addr;
        int i;
        short rx_status;
@@ -1271,17 +1269,17 @@ static int corkscrew_rx(struct net_device *dev)
                        if (corkscrew_debug > 2)
                                printk(" Rx error: status %2.2x.\n",
                                       rx_error);
-                       vp->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (rx_error & 0x01)
-                               vp->stats.rx_over_errors++;
+                               dev->stats.rx_over_errors++;
                        if (rx_error & 0x02)
-                               vp->stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                        if (rx_error & 0x04)
-                               vp->stats.rx_frame_errors++;
+                               dev->stats.rx_frame_errors++;
                        if (rx_error & 0x08)
-                               vp->stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
                        if (rx_error & 0x10)
-                               vp->stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                } else {
                        /* The packet length: up to 4.5K!. */
                        short pkt_len = rx_status & 0x1fff;
@@ -1301,8 +1299,8 @@ static int corkscrew_rx(struct net_device *dev)
                                skb->protocol = eth_type_trans(skb, dev);
                                netif_rx(skb);
                                dev->last_rx = jiffies;
-                               vp->stats.rx_packets++;
-                               vp->stats.rx_bytes += pkt_len;
+                               dev->stats.rx_packets++;
+                               dev->stats.rx_bytes += pkt_len;
                                /* Wait a limited time to go to next packet. */
                                for (i = 200; i >= 0; i--)
                                        if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -1312,7 +1310,7 @@ static int corkscrew_rx(struct net_device *dev)
                                printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len);
                }
                outw(RxDiscard, ioaddr + EL3_CMD);
-               vp->stats.rx_dropped++;
+               dev->stats.rx_dropped++;
                /* Wait a limited time to skip this packet. */
                for (i = 200; i >= 0; i--)
                        if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
@@ -1337,23 +1335,23 @@ static int boomerang_rx(struct net_device *dev)
                        if (corkscrew_debug > 2)
                                printk(" Rx error: status %2.2x.\n",
                                       rx_error);
-                       vp->stats.rx_errors++;
+                       dev->stats.rx_errors++;
                        if (rx_error & 0x01)
-                               vp->stats.rx_over_errors++;
+                               dev->stats.rx_over_errors++;
                        if (rx_error & 0x02)
-                               vp->stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                        if (rx_error & 0x04)
-                               vp->stats.rx_frame_errors++;
+                               dev->stats.rx_frame_errors++;
                        if (rx_error & 0x08)
-                               vp->stats.rx_crc_errors++;
+                               dev->stats.rx_crc_errors++;
                        if (rx_error & 0x10)
-                               vp->stats.rx_length_errors++;
+                               dev->stats.rx_length_errors++;
                } else {
                        /* The packet length: up to 4.5K!. */
                        short pkt_len = rx_status & 0x1fff;
                        struct sk_buff *skb;
 
-                       vp->stats.rx_bytes += pkt_len;
+                       dev->stats.rx_bytes += pkt_len;
                        if (corkscrew_debug > 4)
                                printk("Receiving packet size %d status %4.4x.\n",
                                     pkt_len, rx_status);
@@ -1388,7 +1386,7 @@ static int boomerang_rx(struct net_device *dev)
                        skb->protocol = eth_type_trans(skb, dev);
                        netif_rx(skb);
                        dev->last_rx = jiffies;
-                       vp->stats.rx_packets++;
+                       dev->stats.rx_packets++;
                }
                entry = (++vp->cur_rx) % RX_RING_SIZE;
        }
@@ -1475,7 +1473,7 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
                update_stats(dev->base_addr, dev);
                spin_unlock_irqrestore(&vp->lock, flags);
        }
-       return &vp->stats;
+       return &dev->stats;
 }
 
 /*  Update statistics.
@@ -1487,19 +1485,17 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev)
        */
 static void update_stats(int ioaddr, struct net_device *dev)
 {
-       struct corkscrew_private *vp = netdev_priv(dev);
-
        /* Unlike the 3c5x9 we need not turn off stats updates while reading. */
        /* Switch to the stats window, and read everything. */
        EL3WINDOW(6);
-       vp->stats.tx_carrier_errors += inb(ioaddr + 0);
-       vp->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+       dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+       dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
        /* Multiple collisions. */ inb(ioaddr + 2);
-       vp->stats.collisions += inb(ioaddr + 3);
-       vp->stats.tx_window_errors += inb(ioaddr + 4);
-       vp->stats.rx_fifo_errors += inb(ioaddr + 5);
-       vp->stats.tx_packets += inb(ioaddr + 6);
-       vp->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4;
+       dev->stats.collisions += inb(ioaddr + 3);
+       dev->stats.tx_window_errors += inb(ioaddr + 4);
+       dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+       dev->stats.tx_packets += inb(ioaddr + 6);
+       dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4;
                                                /* Rx packets   */ inb(ioaddr + 7);
                                                /* Must read to clear */
        /* Tx deferrals */ inb(ioaddr + 8);
index 0697aa8ea77459d4c36a7f991007c56b7bd967d2..af46341827f27b2220ad63216e38fd7e1ae07366 100644 (file)
@@ -469,7 +469,7 @@ config SNI_82596
 
 config KORINA
        tristate "Korina (IDT RC32434) Ethernet support"
-       depends on NET_ETHERNET && MIKROTIK_RB500
+       depends on NET_ETHERNET && MIKROTIK_RB532
        help
          If you have a Mikrotik RouterBoard 500 or IDT RC32434
          based system say Y. Otherwise say N.
@@ -2011,7 +2011,7 @@ config E1000_DISABLE_PACKET_SPLIT
 
 config E1000E
        tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support"
-       depends on PCI
+       depends on PCI && (!SPARC32 || BROKEN)
        ---help---
          This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
          ethernet family of adapters. For PCI or PCI-X e1000 adapters,
@@ -2593,6 +2593,7 @@ config BNX2X
          To compile this driver as a module, choose M here: the module
          will be called bnx2x.  This is recommended.
 
+source "drivers/net/sfc/Kconfig"
 
 endif # NETDEV_10000
 
index 2f1f3f2739fdf32dc088a883775491da4f3c161e..dcbfe8421154b25a83fd50ab7bf946da955eb65a 100644 (file)
@@ -253,3 +253,5 @@ obj-$(CONFIG_FS_ENET) += fs_enet/
 obj-$(CONFIG_NETXEN_NIC) += netxen/
 obj-$(CONFIG_NIU) += niu.o
 obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
+obj-$(CONFIG_SFC) += sfc/
+
index f9cc2b621fe200d6a4c588f2e075f4641c9d1ad1..8eda6eeb43b70ffbbc39463d0c9161cb89585fd1 100644 (file)
@@ -47,3 +47,11 @@ config EP93XX_ETH
        help
          This is a driver for the ethernet hardware included in EP93xx CPUs.
          Say Y if you are building a kernel for EP93xx based devices.
+
+config IXP4XX_ETH
+       tristate "Intel IXP4xx Ethernet support"
+       depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR
+       select MII
+       help
+         Say Y here if you want to use built-in Ethernet ports
+         on IXP4xx processor.
index a4c868278e115c5d1c5ca1fbf263fdd682e6d513..7c812ac2b6a51c7305f8b5e69b51b219959f20df 100644 (file)
@@ -9,3 +9,4 @@ obj-$(CONFIG_ARM_ETHER3)        += ether3.o
 obj-$(CONFIG_ARM_ETHER1)       += ether1.o
 obj-$(CONFIG_ARM_AT91_ETHER)   += at91_ether.o
 obj-$(CONFIG_EP93XX_ETH)       += ep93xx_eth.o
+obj-$(CONFIG_IXP4XX_ETH)       += ixp4xx_eth.o
index ba6bd03a015ff6afad60ae65d8c614b35a6671d4..a637910b02dd74766f4c79ebed4385dea8fb18f4 100644 (file)
@@ -693,11 +693,15 @@ static int __init am79c961_probe(struct platform_device *pdev)
         * done by the ether bootp loader.
         */
        dev->base_addr = res->start;
-       dev->irq = platform_get_irq(pdev, 0);
+       ret = platform_get_irq(pdev, 0);
 
-       ret = -ENODEV;
-       if (dev->irq < 0)
+       if (ret < 0) {
+               ret = -ENODEV;
                goto nodev;
+       }
+       dev->irq = ret;
+
+       ret = -ENODEV;
        if (!request_region(dev->base_addr, 0x18, dev->name))
                goto nodev;
 
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
new file mode 100644 (file)
index 0000000..c617b64
--- /dev/null
@@ -0,0 +1,1265 @@
+/*
+ * Intel IXP4xx Ethernet driver for Linux
+ *
+ * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ *
+ * Ethernet port config (0x00 is not present on IXP42X):
+ *
+ * logical port                0x00            0x10            0x20
+ * NPE                 0 (NPE-A)       1 (NPE-B)       2 (NPE-C)
+ * physical PortId     2               0               1
+ * TX queue            23              24              25
+ * RX-free queue       26              27              28
+ * TX-done queue is always 31, per-port RX and TX-ready queues are configurable
+ *
+ *
+ * Queue entries:
+ * bits 0 -> 1 - NPE ID (RX and TX-done)
+ * bits 0 -> 2 - priority (TX, per 802.1D)
+ * bits 3 -> 4 - port ID (user-set?)
+ * bits 5 -> 31        - physical descriptor address
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <asm/arch/npe.h>
+#include <asm/arch/qmgr.h>
+
+#define DEBUG_QUEUES           0
+#define DEBUG_DESC             0
+#define DEBUG_RX               0
+#define DEBUG_TX               0
+#define DEBUG_PKT_BYTES                0
+#define DEBUG_MDIO             0
+#define DEBUG_CLOSE            0
+
+#define DRV_NAME               "ixp4xx_eth"
+
+#define MAX_NPES               3
+
+#define RX_DESCS               64 /* also length of all RX queues */
+#define TX_DESCS               16 /* also length of all TX queues */
+#define TXDONE_QUEUE_LEN       64 /* dwords */
+
+#define POOL_ALLOC_SIZE                (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
+#define REGS_SIZE              0x1000
+#define MAX_MRU                        1536 /* 0x600 */
+#define RX_BUFF_SIZE           ALIGN((NET_IP_ALIGN) + MAX_MRU, 4)
+
+#define NAPI_WEIGHT            16
+#define MDIO_INTERVAL          (3 * HZ)
+#define MAX_MDIO_RETRIES       100 /* microseconds, typically 30 cycles */
+#define MAX_MII_RESET_RETRIES  100 /* mdio_read() cycles, typically 4 */
+#define MAX_CLOSE_WAIT         1000 /* microseconds, typically 2-3 cycles */
+
+#define NPE_ID(port_id)                ((port_id) >> 4)
+#define PHYSICAL_ID(port_id)   ((NPE_ID(port_id) + 2) % 3)
+#define TX_QUEUE(port_id)      (NPE_ID(port_id) + 23)
+#define RXFREE_QUEUE(port_id)  (NPE_ID(port_id) + 26)
+#define TXDONE_QUEUE           31
+
+/* TX Control Registers */
+#define TX_CNTRL0_TX_EN                0x01
+#define TX_CNTRL0_HALFDUPLEX   0x02
+#define TX_CNTRL0_RETRY                0x04
+#define TX_CNTRL0_PAD_EN       0x08
+#define TX_CNTRL0_APPEND_FCS   0x10
+#define TX_CNTRL0_2DEFER       0x20
+#define TX_CNTRL0_RMII         0x40 /* reduced MII */
+#define TX_CNTRL1_RETRIES      0x0F /* 4 bits */
+
+/* RX Control Registers */
+#define RX_CNTRL0_RX_EN                0x01
+#define RX_CNTRL0_PADSTRIP_EN  0x02
+#define RX_CNTRL0_SEND_FCS     0x04
+#define RX_CNTRL0_PAUSE_EN     0x08
+#define RX_CNTRL0_LOOP_EN      0x10
+#define RX_CNTRL0_ADDR_FLTR_EN 0x20
+#define RX_CNTRL0_RX_RUNT_EN   0x40
+#define RX_CNTRL0_BCAST_DIS    0x80
+#define RX_CNTRL1_DEFER_EN     0x01
+
+/* Core Control Register */
+#define CORE_RESET             0x01
+#define CORE_RX_FIFO_FLUSH     0x02
+#define CORE_TX_FIFO_FLUSH     0x04
+#define CORE_SEND_JAM          0x08
+#define CORE_MDC_EN            0x10 /* MDIO using NPE-B ETH-0 only */
+
+#define DEFAULT_TX_CNTRL0      (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY |    \
+                                TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \
+                                TX_CNTRL0_2DEFER)
+#define DEFAULT_RX_CNTRL0      RX_CNTRL0_RX_EN
+#define DEFAULT_CORE_CNTRL     CORE_MDC_EN
+
+
+/* NPE message codes */
+#define NPE_GETSTATUS                  0x00
+#define NPE_EDB_SETPORTADDRESS         0x01
+#define NPE_EDB_GETMACADDRESSDATABASE  0x02
+#define NPE_EDB_SETMACADDRESSSDATABASE 0x03
+#define NPE_GETSTATS                   0x04
+#define NPE_RESETSTATS                 0x05
+#define NPE_SETMAXFRAMELENGTHS         0x06
+#define NPE_VLAN_SETRXTAGMODE          0x07
+#define NPE_VLAN_SETDEFAULTRXVID       0x08
+#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09
+#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A
+#define NPE_VLAN_SETRXQOSENTRY         0x0B
+#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C
+#define NPE_STP_SETBLOCKINGSTATE       0x0D
+#define NPE_FW_SETFIREWALLMODE         0x0E
+#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F
+#define NPE_PC_SETAPMACTABLE           0x11
+#define NPE_SETLOOPBACK_MODE           0x12
+#define NPE_PC_SETBSSIDTABLE           0x13
+#define NPE_ADDRESS_FILTER_CONFIG      0x14
+#define NPE_APPENDFCSCONFIG            0x15
+#define NPE_NOTIFY_MAC_RECOVERY_DONE   0x16
+#define NPE_MAC_RECOVERY_START         0x17
+
+
+#ifdef __ARMEB__
+typedef struct sk_buff buffer_t;
+#define free_buffer dev_kfree_skb
+#define free_buffer_irq dev_kfree_skb_irq
+#else
+typedef void buffer_t;
+#define free_buffer kfree
+#define free_buffer_irq kfree
+#endif
+
+struct eth_regs {
+       u32 tx_control[2], __res1[2];           /* 000 */
+       u32 rx_control[2], __res2[2];           /* 010 */
+       u32 random_seed, __res3[3];             /* 020 */
+       u32 partial_empty_threshold, __res4;    /* 030 */
+       u32 partial_full_threshold, __res5;     /* 038 */
+       u32 tx_start_bytes, __res6[3];          /* 040 */
+       u32 tx_deferral, rx_deferral, __res7[2];/* 050 */
+       u32 tx_2part_deferral[2], __res8[2];    /* 060 */
+       u32 slot_time, __res9[3];               /* 070 */
+       u32 mdio_command[4];                    /* 080 */
+       u32 mdio_status[4];                     /* 090 */
+       u32 mcast_mask[6], __res10[2];          /* 0A0 */
+       u32 mcast_addr[6], __res11[2];          /* 0C0 */
+       u32 int_clock_threshold, __res12[3];    /* 0E0 */
+       u32 hw_addr[6], __res13[61];            /* 0F0 */
+       u32 core_control;                       /* 1FC */
+};
+
+struct port {
+       struct resource *mem_res;
+       struct eth_regs __iomem *regs;
+       struct npe *npe;
+       struct net_device *netdev;
+       struct napi_struct napi;
+       struct net_device_stats stat;
+       struct mii_if_info mii;
+       struct delayed_work mdio_thread;
+       struct eth_plat_info *plat;
+       buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
+       struct desc *desc_tab;  /* coherent */
+       u32 desc_tab_phys;
+       int id;                 /* logical port ID */
+       u16 mii_bmcr;
+};
+
+/* NPE message structure */
+struct msg {
+#ifdef __ARMEB__
+       u8 cmd, eth_id, byte2, byte3;
+       u8 byte4, byte5, byte6, byte7;
+#else
+       u8 byte3, byte2, eth_id, cmd;
+       u8 byte7, byte6, byte5, byte4;
+#endif
+};
+
+/* Ethernet packet descriptor */
+struct desc {
+       u32 next;               /* pointer to next buffer, unused */
+
+#ifdef __ARMEB__
+       u16 buf_len;            /* buffer length */
+       u16 pkt_len;            /* packet length */
+       u32 data;               /* pointer to data buffer in RAM */
+       u8 dest_id;
+       u8 src_id;
+       u16 flags;
+       u8 qos;
+       u8 padlen;
+       u16 vlan_tci;
+#else
+       u16 pkt_len;            /* packet length */
+       u16 buf_len;            /* buffer length */
+       u32 data;               /* pointer to data buffer in RAM */
+       u16 flags;
+       u8 src_id;
+       u8 dest_id;
+       u16 vlan_tci;
+       u8 padlen;
+       u8 qos;
+#endif
+
+#ifdef __ARMEB__
+       u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3;
+       u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1;
+       u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5;
+#else
+       u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0;
+       u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4;
+       u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2;
+#endif
+};
+
+
+#define rx_desc_phys(port, n)  ((port)->desc_tab_phys +                \
+                                (n) * sizeof(struct desc))
+#define rx_desc_ptr(port, n)   (&(port)->desc_tab[n])
+
+#define tx_desc_phys(port, n)  ((port)->desc_tab_phys +                \
+                                ((n) + RX_DESCS) * sizeof(struct desc))
+#define tx_desc_ptr(port, n)   (&(port)->desc_tab[(n) + RX_DESCS])
+
+#ifndef __ARMEB__
+static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
+{
+       int i;
+       for (i = 0; i < cnt; i++)
+               dest[i] = swab32(src[i]);
+}
+#endif
+
+static spinlock_t mdio_lock;
+static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */
+static int ports_open;
+static struct port *npe_port_tab[MAX_NPES];
+static struct dma_pool *dma_pool;
+
+
+static u16 mdio_cmd(struct net_device *dev, int phy_id, int location,
+                   int write, u16 cmd)
+{
+       int cycles = 0;
+
+       if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) {
+               printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name);
+               return 0;
+       }
+
+       if (write) {
+               __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]);
+               __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]);
+       }
+       __raw_writel(((phy_id << 5) | location) & 0xFF,
+                    &mdio_regs->mdio_command[2]);
+       __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */,
+                    &mdio_regs->mdio_command[3]);
+
+       while ((cycles < MAX_MDIO_RETRIES) &&
+              (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) {
+               udelay(1);
+               cycles++;
+       }
+
+       if (cycles == MAX_MDIO_RETRIES) {
+               printk(KERN_ERR "%s: MII write failed\n", dev->name);
+               return 0;
+       }
+
+#if DEBUG_MDIO
+       printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name,
+              cycles);
+#endif
+
+       if (write)
+               return 0;
+
+       if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) {
+               printk(KERN_ERR "%s: MII read failed\n", dev->name);
+               return 0;
+       }
+
+       return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) |
+               (__raw_readl(&mdio_regs->mdio_status[1]) << 8);
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+       unsigned long flags;
+       u16 val;
+
+       spin_lock_irqsave(&mdio_lock, flags);
+       val = mdio_cmd(dev, phy_id, location, 0, 0);
+       spin_unlock_irqrestore(&mdio_lock, flags);
+       return val;
+}
+
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+                      int val)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&mdio_lock, flags);
+       mdio_cmd(dev, phy_id, location, 1, val);
+       spin_unlock_irqrestore(&mdio_lock, flags);
+}
+
+static void phy_reset(struct net_device *dev, int phy_id)
+{
+       struct port *port = netdev_priv(dev);
+       int cycles = 0;
+
+       mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET);
+
+       while (cycles < MAX_MII_RESET_RETRIES) {
+               if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) {
+#if DEBUG_MDIO
+                       printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n",
+                              dev->name, cycles);
+#endif
+                       return;
+               }
+               udelay(1);
+               cycles++;
+       }
+
+       printk(KERN_ERR "%s: MII reset failed\n", dev->name);
+}
+
+static void eth_set_duplex(struct port *port)
+{
+       if (port->mii.full_duplex)
+               __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX,
+                            &port->regs->tx_control[0]);
+       else
+               __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX,
+                            &port->regs->tx_control[0]);
+}
+
+
+static void phy_check_media(struct port *port, int init)
+{
+       if (mii_check_media(&port->mii, 1, init))
+               eth_set_duplex(port);
+       if (port->mii.force_media) { /* mii_check_media() doesn't work */
+               struct net_device *dev = port->netdev;
+               int cur_link = mii_link_ok(&port->mii);
+               int prev_link = netif_carrier_ok(dev);
+
+               if (!prev_link && cur_link) {
+                       printk(KERN_INFO "%s: link up\n", dev->name);
+                       netif_carrier_on(dev);
+               } else if (prev_link && !cur_link) {
+                       printk(KERN_INFO "%s: link down\n", dev->name);
+                       netif_carrier_off(dev);
+               }
+       }
+}
+
+
+static void mdio_thread(struct work_struct *work)
+{
+       struct port *port = container_of(work, struct port, mdio_thread.work);
+
+       phy_check_media(port, 0);
+       schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
+}
+
+
+static inline void debug_pkt(struct net_device *dev, const char *func,
+                            u8 *data, int len)
+{
+#if DEBUG_PKT_BYTES
+       int i;
+
+       printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len);
+       for (i = 0; i < len; i++) {
+               if (i >= DEBUG_PKT_BYTES)
+                       break;
+               printk("%s%02X",
+                      ((i == 6) || (i == 12) || (i >= 14)) ? " " : "",
+                      data[i]);
+       }
+       printk("\n");
+#endif
+}
+
+
+static inline void debug_desc(u32 phys, struct desc *desc)
+{
+#if DEBUG_DESC
+       printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X"
+              " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n",
+              phys, desc->next, desc->buf_len, desc->pkt_len,
+              desc->data, desc->dest_id, desc->src_id, desc->flags,
+              desc->qos, desc->padlen, desc->vlan_tci,
+              desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2,
+              desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5,
+              desc->src_mac_0, desc->src_mac_1, desc->src_mac_2,
+              desc->src_mac_3, desc->src_mac_4, desc->src_mac_5);
+#endif
+}
+
+static inline void debug_queue(unsigned int queue, int is_get, u32 phys)
+{
+#if DEBUG_QUEUES
+       static struct {
+               int queue;
+               char *name;
+       } names[] = {
+               { TX_QUEUE(0x10), "TX#0 " },
+               { TX_QUEUE(0x20), "TX#1 " },
+               { TX_QUEUE(0x00), "TX#2 " },
+               { RXFREE_QUEUE(0x10), "RX-free#0 " },
+               { RXFREE_QUEUE(0x20), "RX-free#1 " },
+               { RXFREE_QUEUE(0x00), "RX-free#2 " },
+               { TXDONE_QUEUE, "TX-done " },
+       };
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(names); i++)
+               if (names[i].queue == queue)
+                       break;
+
+       printk(KERN_DEBUG "Queue %i %s%s %X\n", queue,
+              i < ARRAY_SIZE(names) ? names[i].name : "",
+              is_get ? "->" : "<-", phys);
+#endif
+}
+
+static inline u32 queue_get_entry(unsigned int queue)
+{
+       u32 phys = qmgr_get_entry(queue);
+       debug_queue(queue, 1, phys);
+       return phys;
+}
+
+static inline int queue_get_desc(unsigned int queue, struct port *port,
+                                int is_tx)
+{
+       u32 phys, tab_phys, n_desc;
+       struct desc *tab;
+
+       if (!(phys = queue_get_entry(queue)))
+               return -1;
+
+       phys &= ~0x1F; /* mask out non-address bits */
+       tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
+       tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
+       n_desc = (phys - tab_phys) / sizeof(struct desc);
+       BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
+       debug_desc(phys, &tab[n_desc]);
+       BUG_ON(tab[n_desc].next);
+       return n_desc;
+}
+
+static inline void queue_put_desc(unsigned int queue, u32 phys,
+                                 struct desc *desc)
+{
+       debug_queue(queue, 0, phys);
+       debug_desc(phys, desc);
+       BUG_ON(phys & 0x1F);
+       qmgr_put_entry(queue, phys);
+       BUG_ON(qmgr_stat_overflow(queue));
+}
+
+
+static inline void dma_unmap_tx(struct port *port, struct desc *desc)
+{
+#ifdef __ARMEB__
+       dma_unmap_single(&port->netdev->dev, desc->data,
+                        desc->buf_len, DMA_TO_DEVICE);
+#else
+       dma_unmap_single(&port->netdev->dev, desc->data & ~3,
+                        ALIGN((desc->data & 3) + desc->buf_len, 4),
+                        DMA_TO_DEVICE);
+#endif
+}
+
+
+static void eth_rx_irq(void *pdev)
+{
+       struct net_device *dev = pdev;
+       struct port *port = netdev_priv(dev);
+
+#if DEBUG_RX
+       printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name);
+#endif
+       qmgr_disable_irq(port->plat->rxq);
+       netif_rx_schedule(dev, &port->napi);
+}
+
+static int eth_poll(struct napi_struct *napi, int budget)
+{
+       struct port *port = container_of(napi, struct port, napi);
+       struct net_device *dev = port->netdev;
+       unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id);
+       int received = 0;
+
+#if DEBUG_RX
+       printk(KERN_DEBUG "%s: eth_poll\n", dev->name);
+#endif
+
+       while (received < budget) {
+               struct sk_buff *skb;
+               struct desc *desc;
+               int n;
+#ifdef __ARMEB__
+               struct sk_buff *temp;
+               u32 phys;
+#endif
+
+               if ((n = queue_get_desc(rxq, port, 0)) < 0) {
+                       received = 0; /* No packet received */
+#if DEBUG_RX
+                       printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n",
+                              dev->name);
+#endif
+                       netif_rx_complete(dev, napi);
+                       qmgr_enable_irq(rxq);
+                       if (!qmgr_stat_empty(rxq) &&
+                           netif_rx_reschedule(dev, napi)) {
+#if DEBUG_RX
+                               printk(KERN_DEBUG "%s: eth_poll"
+                                      " netif_rx_reschedule successed\n",
+                                      dev->name);
+#endif
+                               qmgr_disable_irq(rxq);
+                               continue;
+                       }
+#if DEBUG_RX
+                       printk(KERN_DEBUG "%s: eth_poll all done\n",
+                              dev->name);
+#endif
+                       return 0; /* all work done */
+               }
+
+               desc = rx_desc_ptr(port, n);
+
+#ifdef __ARMEB__
+               if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) {
+                       phys = dma_map_single(&dev->dev, skb->data,
+                                             RX_BUFF_SIZE, DMA_FROM_DEVICE);
+                       if (dma_mapping_error(phys)) {
+                               dev_kfree_skb(skb);
+                               skb = NULL;
+                       }
+               }
+#else
+               skb = netdev_alloc_skb(dev,
+                                      ALIGN(NET_IP_ALIGN + desc->pkt_len, 4));
+#endif
+
+               if (!skb) {
+                       port->stat.rx_dropped++;
+                       /* put the desc back on RX-ready queue */
+                       desc->buf_len = MAX_MRU;
+                       desc->pkt_len = 0;
+                       queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+                       continue;
+               }
+
+               /* process received frame */
+#ifdef __ARMEB__
+               temp = skb;
+               skb = port->rx_buff_tab[n];
+               dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN,
+                                RX_BUFF_SIZE, DMA_FROM_DEVICE);
+#else
+               dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN,
+                               RX_BUFF_SIZE, DMA_FROM_DEVICE);
+               memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
+                             ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4);
+#endif
+               skb_reserve(skb, NET_IP_ALIGN);
+               skb_put(skb, desc->pkt_len);
+
+               debug_pkt(dev, "eth_poll", skb->data, skb->len);
+
+               skb->protocol = eth_type_trans(skb, dev);
+               dev->last_rx = jiffies;
+               port->stat.rx_packets++;
+               port->stat.rx_bytes += skb->len;
+               netif_receive_skb(skb);
+
+               /* put the new buffer on RX-free queue */
+#ifdef __ARMEB__
+               port->rx_buff_tab[n] = temp;
+               desc->data = phys + NET_IP_ALIGN;
+#endif
+               desc->buf_len = MAX_MRU;
+               desc->pkt_len = 0;
+               queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
+               received++;
+       }
+
+#if DEBUG_RX
+       printk(KERN_DEBUG "eth_poll(): end, not all work done\n");
+#endif
+       return received;                /* not all work done */
+}
+
+
+static void eth_txdone_irq(void *unused)
+{
+       u32 phys;
+
+#if DEBUG_TX
+       printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n");
+#endif
+       while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) {
+               u32 npe_id, n_desc;
+               struct port *port;
+               struct desc *desc;
+               int start;
+
+               npe_id = phys & 3;
+               BUG_ON(npe_id >= MAX_NPES);
+               port = npe_port_tab[npe_id];
+               BUG_ON(!port);
+               phys &= ~0x1F; /* mask out non-address bits */
+               n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc);
+               BUG_ON(n_desc >= TX_DESCS);
+               desc = tx_desc_ptr(port, n_desc);
+               debug_desc(phys, desc);
+
+               if (port->tx_buff_tab[n_desc]) { /* not the draining packet */
+                       port->stat.tx_packets++;
+                       port->stat.tx_bytes += desc->pkt_len;
+
+                       dma_unmap_tx(port, desc);
+#if DEBUG_TX
+                       printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n",
+                              port->netdev->name, port->tx_buff_tab[n_desc]);
+#endif
+                       free_buffer_irq(port->tx_buff_tab[n_desc]);
+                       port->tx_buff_tab[n_desc] = NULL;
+               }
+
+               start = qmgr_stat_empty(port->plat->txreadyq);
+               queue_put_desc(port->plat->txreadyq, phys, desc);
+               if (start) {
+#if DEBUG_TX
+                       printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n",
+                              port->netdev->name);
+#endif
+                       netif_wake_queue(port->netdev);
+               }
+       }
+}
+
+static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct port *port = netdev_priv(dev);
+       unsigned int txreadyq = port->plat->txreadyq;
+       int len, offset, bytes, n;
+       void *mem;
+       u32 phys;
+       struct desc *desc;
+
+#if DEBUG_TX
+       printk(KERN_DEBUG "%s: eth_xmit\n", dev->name);
+#endif
+
+       if (unlikely(skb->len > MAX_MRU)) {
+               dev_kfree_skb(skb);
+               port->stat.tx_errors++;
+               return NETDEV_TX_OK;
+       }
+
+       debug_pkt(dev, "eth_xmit", skb->data, skb->len);
+
+       len = skb->len;
+#ifdef __ARMEB__
+       offset = 0; /* no need to keep alignment */
+       bytes = len;
+       mem = skb->data;
+#else
+       offset = (int)skb->data & 3; /* keep 32-bit alignment */
+       bytes = ALIGN(offset + len, 4);
+       if (!(mem = kmalloc(bytes, GFP_ATOMIC))) {
+               dev_kfree_skb(skb);
+               port->stat.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
+       memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4);
+       dev_kfree_skb(skb);
+#endif
+
+       phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(phys)) {
+#ifdef __ARMEB__
+               dev_kfree_skb(skb);
+#else
+               kfree(mem);
+#endif
+               port->stat.tx_dropped++;
+               return NETDEV_TX_OK;
+       }
+
+       n = queue_get_desc(txreadyq, port, 1);
+       BUG_ON(n < 0);
+       desc = tx_desc_ptr(port, n);
+
+#ifdef __ARMEB__
+       port->tx_buff_tab[n] = skb;
+#else
+       port->tx_buff_tab[n] = mem;
+#endif
+       desc->data = phys + offset;
+       desc->buf_len = desc->pkt_len = len;
+
+       /* NPE firmware pads short frames with zeros internally */
+       wmb();
+       queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc);
+       dev->trans_start = jiffies;
+
+       if (qmgr_stat_empty(txreadyq)) {
+#if DEBUG_TX
+               printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name);
+#endif
+               netif_stop_queue(dev);
+               /* we could miss TX ready interrupt */
+               if (!qmgr_stat_empty(txreadyq)) {
+#if DEBUG_TX
+                       printk(KERN_DEBUG "%s: eth_xmit ready again\n",
+                              dev->name);
+#endif
+                       netif_wake_queue(dev);
+               }
+       }
+
+#if DEBUG_TX
+       printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name);
+#endif
+       return NETDEV_TX_OK;
+}
+
+
+static struct net_device_stats *eth_stats(struct net_device *dev)
+{
+       struct port *port = netdev_priv(dev);
+       return &port->stat;
+}
+
+static void eth_set_mcast_list(struct net_device *dev)
+{
+       struct port *port = netdev_priv(dev);
+       struct dev_mc_list *mclist = dev->mc_list;
+       u8 diffs[ETH_ALEN], *addr;
+       int cnt = dev->mc_count, i;
+
+       if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
+               __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
+                            &port->regs->rx_control[0]);
+               return;
+       }
+
+       memset(diffs, 0, ETH_ALEN);
+       addr = mclist->dmi_addr; /* first MAC address */
+
+       while (--cnt && (mclist = mclist->next))
+               for (i = 0; i < ETH_ALEN; i++)
+                       diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
+
+       for (i = 0; i < ETH_ALEN; i++) {
+               __raw_writel(addr[i], &port->regs->mcast_addr[i]);
+               __raw_writel(~diffs[i], &port->regs->mcast_mask[i]);
+       }
+
+       __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN,
+                    &port->regs->rx_control[0]);
+}
+
+
+static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+       struct port *port = netdev_priv(dev);
+       unsigned int duplex_chg;
+       int err;
+
+       if (!netif_running(dev))
+               return -EINVAL;
+       err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg);
+       if (duplex_chg)
+               eth_set_duplex(port);
+       return err;
+}
+
+
+static int request_queues(struct port *port)
+{
+       int err;
+
+       err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0);
+       if (err)
+               return err;
+
+       err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0);
+       if (err)
+               goto rel_rxfree;
+
+       err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0);
+       if (err)
+               goto rel_rx;
+
+       err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0);
+       if (err)
+               goto rel_tx;
+
+       /* TX-done queue handles skbs sent out by the NPEs */
+       if (!ports_open) {
+               err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0);
+               if (err)
+                       goto rel_txready;
+       }
+       return 0;
+
+rel_txready:
+       qmgr_release_queue(port->plat->txreadyq);
+rel_tx:
+       qmgr_release_queue(TX_QUEUE(port->id));
+rel_rx:
+       qmgr_release_queue(port->plat->rxq);
+rel_rxfree:
+       qmgr_release_queue(RXFREE_QUEUE(port->id));
+       printk(KERN_DEBUG "%s: unable to request hardware queues\n",
+              port->netdev->name);
+       return err;
+}
+
+static void release_queues(struct port *port)
+{
+       qmgr_release_queue(RXFREE_QUEUE(port->id));
+       qmgr_release_queue(port->plat->rxq);
+       qmgr_release_queue(TX_QUEUE(port->id));
+       qmgr_release_queue(port->plat->txreadyq);
+
+       if (!ports_open)
+               qmgr_release_queue(TXDONE_QUEUE);
+}
+
+static int init_queues(struct port *port)
+{
+       int i;
+
+       if (!ports_open)
+               if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
+                                                POOL_ALLOC_SIZE, 32, 0)))
+                       return -ENOMEM;
+
+       if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
+                                             &port->desc_tab_phys)))
+               return -ENOMEM;
+       memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
+       memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
+       memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
+
+       /* Setup RX buffers */
+       for (i = 0; i < RX_DESCS; i++) {
+               struct desc *desc = rx_desc_ptr(port, i);
+               buffer_t *buff; /* skb or kmalloc()ated memory */
+               void *data;
+#ifdef __ARMEB__
+               if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE)))
+                       return -ENOMEM;
+               data = buff->data;
+#else
+               if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL)))
+                       return -ENOMEM;
+               data = buff;
+#endif
+               desc->buf_len = MAX_MRU;
+               desc->data = dma_map_single(&port->netdev->dev, data,
+                                           RX_BUFF_SIZE, DMA_FROM_DEVICE);
+               if (dma_mapping_error(desc->data)) {
+                       free_buffer(buff);
+                       return -EIO;
+               }
+               desc->data += NET_IP_ALIGN;
+               port->rx_buff_tab[i] = buff;
+       }
+
+       return 0;
+}
+
+static void destroy_queues(struct port *port)
+{
+       int i;
+
+       if (port->desc_tab) {
+               for (i = 0; i < RX_DESCS; i++) {
+                       struct desc *desc = rx_desc_ptr(port, i);
+                       buffer_t *buff = port->rx_buff_tab[i];
+                       if (buff) {
+                               dma_unmap_single(&port->netdev->dev,
+                                                desc->data - NET_IP_ALIGN,
+                                                RX_BUFF_SIZE, DMA_FROM_DEVICE);
+                               free_buffer(buff);
+                       }
+               }
+               for (i = 0; i < TX_DESCS; i++) {
+                       struct desc *desc = tx_desc_ptr(port, i);
+                       buffer_t *buff = port->tx_buff_tab[i];
+                       if (buff) {
+                               dma_unmap_tx(port, desc);
+                               free_buffer(buff);
+                       }
+               }
+               dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
+               port->desc_tab = NULL;
+       }
+
+       if (!ports_open && dma_pool) {
+               dma_pool_destroy(dma_pool);
+               dma_pool = NULL;
+       }
+}
+
+static int eth_open(struct net_device *dev)
+{
+       struct port *port = netdev_priv(dev);
+       struct npe *npe = port->npe;
+       struct msg msg;
+       int i, err;
+
+       if (!npe_running(npe)) {
+               err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
+               if (err)
+                       return err;
+
+               if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
+                       printk(KERN_ERR "%s: %s not responding\n", dev->name,
+                              npe_name(npe));
+                       return -EIO;
+               }
+       }
+
+       mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr);
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd = NPE_VLAN_SETRXQOSENTRY;
+       msg.eth_id = port->id;
+       msg.byte5 = port->plat->rxq | 0x80;
+       msg.byte7 = port->plat->rxq << 4;
+       for (i = 0; i < 8; i++) {
+               msg.byte3 = i;
+               if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
+                       return -EIO;
+       }
+
+       msg.cmd = NPE_EDB_SETPORTADDRESS;
+       msg.eth_id = PHYSICAL_ID(port->id);
+       msg.byte2 = dev->dev_addr[0];
+       msg.byte3 = dev->dev_addr[1];
+       msg.byte4 = dev->dev_addr[2];
+       msg.byte5 = dev->dev_addr[3];
+       msg.byte6 = dev->dev_addr[4];
+       msg.byte7 = dev->dev_addr[5];
+       if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
+               return -EIO;
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd = NPE_FW_SETFIREWALLMODE;
+       msg.eth_id = port->id;
+       if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
+               return -EIO;
+
+       if ((err = request_queues(port)) != 0)
+               return err;
+
+       if ((err = init_queues(port)) != 0) {
+               destroy_queues(port);
+               release_queues(port);
+               return err;
+       }
+
+       for (i = 0; i < ETH_ALEN; i++)
+               __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
+       __raw_writel(0x08, &port->regs->random_seed);
+       __raw_writel(0x12, &port->regs->partial_empty_threshold);
+       __raw_writel(0x30, &port->regs->partial_full_threshold);
+       __raw_writel(0x08, &port->regs->tx_start_bytes);
+       __raw_writel(0x15, &port->regs->tx_deferral);
+       __raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
+       __raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
+       __raw_writel(0x80, &port->regs->slot_time);
+       __raw_writel(0x01, &port->regs->int_clock_threshold);
+
+       /* Populate queues with buffers, no failure after this point */
+       for (i = 0; i < TX_DESCS; i++)
+               queue_put_desc(port->plat->txreadyq,
+                              tx_desc_phys(port, i), tx_desc_ptr(port, i));
+
+       for (i = 0; i < RX_DESCS; i++)
+               queue_put_desc(RXFREE_QUEUE(port->id),
+                              rx_desc_phys(port, i), rx_desc_ptr(port, i));
+
+       __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
+       __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
+       __raw_writel(0, &port->regs->rx_control[1]);
+       __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);
+
+       napi_enable(&port->napi);
+       phy_check_media(port, 1);
+       eth_set_mcast_list(dev);
+       netif_start_queue(dev);
+       schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL);
+
+       qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
+                    eth_rx_irq, dev);
+       if (!ports_open) {
+               qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
+                            eth_txdone_irq, NULL);
+               qmgr_enable_irq(TXDONE_QUEUE);
+       }
+       ports_open++;
+       /* we may already have RX data, enables IRQ */
+       netif_rx_schedule(dev, &port->napi);
+       return 0;
+}
+
+static int eth_close(struct net_device *dev)
+{
+       struct port *port = netdev_priv(dev);
+       struct msg msg;
+       int buffs = RX_DESCS; /* allocated RX buffers */
+       int i;
+
+       ports_open--;
+       qmgr_disable_irq(port->plat->rxq);
+       napi_disable(&port->napi);
+       netif_stop_queue(dev);
+
+       while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
+               buffs--;
+
+       memset(&msg, 0, sizeof(msg));
+       msg.cmd = NPE_SETLOOPBACK_MODE;
+       msg.eth_id = port->id;
+       msg.byte3 = 1;
+       if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
+               printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);
+
+       i = 0;
+       do {                    /* drain RX buffers */
+               while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
+                       buffs--;
+               if (!buffs)
+                       break;
+               if (qmgr_stat_empty(TX_QUEUE(port->id))) {
+                       /* we have to inject some packet */
+                       struct desc *desc;
+                       u32 phys;
+                       int n = queue_get_desc(port->plat->txreadyq, port, 1);
+                       BUG_ON(n < 0);
+                       desc = tx_desc_ptr(port, n);
+                       phys = tx_desc_phys(port, n);
+                       desc->buf_len = desc->pkt_len = 1;
+                       wmb();
+                       queue_put_desc(TX_QUEUE(port->id), phys, desc);
+               }
+               udelay(1);
+       } while (++i < MAX_CLOSE_WAIT);
+
+       if (buffs)
+               printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
+                      " left in NPE\n", dev->name, buffs);
+#if DEBUG_CLOSE
+       if (!buffs)
+               printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
+#endif
+
+       buffs = TX_DESCS;
+       while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
+               buffs--; /* cancel TX */
+
+       i = 0;
+       do {
+               while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
+                       buffs--;
+               if (!buffs)
+                       break;
+       } while (++i < MAX_CLOSE_WAIT);
+
+       if (buffs)
+               printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
+                      "left in NPE\n", dev->name, buffs);
+#if DEBUG_CLOSE
+       if (!buffs)
+               printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
+#endif
+
+       msg.byte3 = 0;
+       if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
+               printk(KERN_CRIT "%s: unable to disable loopback\n",
+                      dev->name);
+
+       port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) &
+               ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */
+       mdio_write(dev, port->plat->phy, MII_BMCR,
+                  port->mii_bmcr | BMCR_PDOWN);
+
+       if (!ports_open)
+               qmgr_disable_irq(TXDONE_QUEUE);
+       cancel_rearming_delayed_work(&port->mdio_thread);
+       destroy_queues(port);
+       release_queues(port);
+       return 0;
+}
+
+static int __devinit eth_init_one(struct platform_device *pdev)
+{
+       struct port *port;
+       struct net_device *dev;
+       struct eth_plat_info *plat = pdev->dev.platform_data;
+       u32 regs_phys;
+       int err;
+
+       if (!(dev = alloc_etherdev(sizeof(struct port))))
+               return -ENOMEM;
+
+       SET_NETDEV_DEV(dev, &pdev->dev);
+       port = netdev_priv(dev);
+       port->netdev = dev;
+       port->id = pdev->id;
+
+       switch (port->id) {
+       case IXP4XX_ETH_NPEA:
+               port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT;
+               regs_phys  = IXP4XX_EthA_BASE_PHYS;
+               break;
+       case IXP4XX_ETH_NPEB:
+               port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+               regs_phys  = IXP4XX_EthB_BASE_PHYS;
+               break;
+       case IXP4XX_ETH_NPEC:
+               port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT;
+               regs_phys  = IXP4XX_EthC_BASE_PHYS;
+               break;
+       default:
+               err = -ENOSYS;
+               goto err_free;
+       }
+
+       dev->open = eth_open;
+       dev->hard_start_xmit = eth_xmit;
+       dev->stop = eth_close;
+       dev->get_stats = eth_stats;
+       dev->do_ioctl = eth_ioctl;
+       dev->set_multicast_list = eth_set_mcast_list;
+       dev->tx_queue_len = 100;
+
+       netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT);
+
+       if (!(port->npe = npe_request(NPE_ID(port->id)))) {
+               err = -EIO;
+               goto err_free;
+       }
+
+       if (register_netdev(dev)) {
+               err = -EIO;
+               goto err_npe_rel;
+       }
+
+       port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name);
+       if (!port->mem_res) {
+               err = -EBUSY;
+               goto err_unreg;
+       }
+
+       port->plat = plat;
+       npe_port_tab[NPE_ID(port->id)] = port;
+       memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN);
+
+       platform_set_drvdata(pdev, dev);
+
+       __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET,
+                    &port->regs->core_control);
+       udelay(50);
+       __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
+       udelay(50);
+
+       port->mii.dev = dev;
+       port->mii.mdio_read = mdio_read;
+       port->mii.mdio_write = mdio_write;
+       port->mii.phy_id = plat->phy;
+       port->mii.phy_id_mask = 0x1F;
+       port->mii.reg_num_mask = 0x1F;
+
+       printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy,
+              npe_name(port->npe));
+
+       phy_reset(dev, plat->phy);
+       port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) &
+               ~(BMCR_RESET | BMCR_PDOWN);
+       mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN);
+
+       INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread);
+       return 0;
+
+err_unreg:
+       unregister_netdev(dev);
+err_npe_rel:
+       npe_release(port->npe);
+err_free:
+       free_netdev(dev);
+       return err;
+}
+
+static int __devexit eth_remove_one(struct platform_device *pdev)
+{
+       struct net_device *dev = platform_get_drvdata(pdev);
+       struct port *port = netdev_priv(dev);
+
+       unregister_netdev(dev);
+       npe_port_tab[NPE_ID(port->id)] = NULL;
+       platform_set_drvdata(pdev, NULL);
+       npe_release(port->npe);
+       release_resource(port->mem_res);
+       free_netdev(dev);
+       return 0;
+}
+
+static struct platform_driver drv = {
+       .driver.name    = DRV_NAME,
+       .probe          = eth_init_one,
+       .remove         = eth_remove_one,
+};
+
+static int __init eth_init_module(void)
+{
+       if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0))
+               return -ENOSYS;
+
+       /* All MII PHY accesses use NPE-B Ethernet registers */
+       spin_lock_init(&mdio_lock);
+       mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT;
+       __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control);
+
+       return platform_driver_register(&drv);
+}
+
+static void __exit eth_cleanup_module(void)
+{
+       platform_driver_unregister(&drv);
+}
+
+MODULE_AUTHOR("Krzysztof Halasa");
+MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:ixp4xx_eth");
+module_init(eth_init_module);
+module_exit(eth_cleanup_module);
index 4fec8581bfd7bb80e5c7a49ae7c548532f263cf7..89c0018132ec7e8137a4877da7166d7758b5e93b 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/phy.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/ethtool.h>
 #include <linux/skbuff.h>
 #include <linux/platform_device.h>
 
@@ -42,7 +43,7 @@
 #define DRV_NAME       "bfin_mac"
 #define DRV_VERSION    "1.1"
 #define DRV_AUTHOR     "Bryan Wu, Luke Yang"
-#define DRV_DESC       "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver"
+#define DRV_DESC       "Blackfin on-chip Ethernet MAC driver"
 
 MODULE_AUTHOR(DRV_AUTHOR);
 MODULE_LICENSE("GPL");
@@ -73,8 +74,14 @@ static struct net_dma_desc_tx *current_tx_ptr;
 static struct net_dma_desc_tx *tx_desc;
 static struct net_dma_desc_rx *rx_desc;
 
-static void bf537mac_disable(void);
-static void bf537mac_enable(void);
+#if defined(CONFIG_BFIN_MAC_RMII)
+static u16 pin_req[] = P_RMII0;
+#else
+static u16 pin_req[] = P_MII0;
+#endif
+
+static void bfin_mac_disable(void);
+static void bfin_mac_enable(void);
 
 static void desc_list_free(void)
 {
@@ -243,27 +250,6 @@ init_error:
 
 /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
 
-/* Set FER regs to MUX in Ethernet pins */
-static int setup_pin_mux(int action)
-{
-#if defined(CONFIG_BFIN_MAC_RMII)
-       u16 pin_req[] = P_RMII0;
-#else
-       u16 pin_req[] = P_MII0;
-#endif
-
-       if (action) {
-               if (peripheral_request_list(pin_req, DRV_NAME)) {
-                       printk(KERN_ERR DRV_NAME
-                       ": Requesting Peripherals failed\n");
-                       return -EFAULT;
-               }
-       } else
-               peripheral_free_list(pin_req);
-
-       return 0;
-}
-
 /*
  * MII operations
  */
@@ -322,9 +308,9 @@ static int mdiobus_reset(struct mii_bus *bus)
        return 0;
 }
 
-static void bf537_adjust_link(struct net_device *dev)
+static void bfin_mac_adjust_link(struct net_device *dev)
 {
-       struct bf537mac_local *lp = netdev_priv(dev);
+       struct bfin_mac_local *lp = netdev_priv(dev);
        struct phy_device *phydev = lp->phydev;
        unsigned long flags;
        int new_state = 0;
@@ -395,7 +381,7 @@ static void bf537_adjust_link(struct net_device *dev)
 
 static int mii_probe(struct net_device *dev)
 {
-       struct bf537mac_local *lp = netdev_priv(dev);
+       struct bfin_mac_local *lp = netdev_priv(dev);
        struct phy_device *phydev = NULL;
        unsigned short sysctl;
        int i;
@@ -431,10 +417,10 @@ static int mii_probe(struct net_device *dev)
        }
 
 #if defined(CONFIG_BFIN_MAC_RMII)
-       phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0,
+       phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
                        PHY_INTERFACE_MODE_RMII);
 #else
-       phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0,
+       phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0,
                        PHY_INTERFACE_MODE_MII);
 #endif
 
@@ -469,6 +455,51 @@ static int mii_probe(struct net_device *dev)
        return 0;
 }
 
+/*
+ * Ethtool support
+ */
+
+static int
+bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct bfin_mac_local *lp = netdev_priv(dev);
+
+       if (lp->phydev)
+               return phy_ethtool_gset(lp->phydev, cmd);
+
+       return -EINVAL;
+}
+
+static int
+bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+       struct bfin_mac_local *lp = netdev_priv(dev);
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (lp->phydev)
+               return phy_ethtool_sset(lp->phydev, cmd);
+
+       return -EINVAL;
+}
+
+static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
+                                       struct ethtool_drvinfo *info)
+{
+       strcpy(info->driver, DRV_NAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->fw_version, "N/A");
+       strcpy(info->bus_info, dev->dev.bus_id);
+}
+
+static struct ethtool_ops bfin_mac_ethtool_ops = {
+       .get_settings = bfin_mac_ethtool_getsettings,
+       .set_settings = bfin_mac_ethtool_setsettings,
+       .get_link = ethtool_op_get_link,
+       .get_drvinfo = bfin_mac_ethtool_getdrvinfo,
+};
+
 /**************************************************************************/
 void setup_system_regs(struct net_device *dev)
 {
@@ -511,7 +542,7 @@ static void setup_mac_addr(u8 *mac_addr)
        bfin_write_EMAC_ADDRHI(addr_hi);
 }
 
-static int bf537mac_set_mac_address(struct net_device *dev, void *p)
+static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
 {
        struct sockaddr *addr = p;
        if (netif_running(dev))
@@ -573,7 +604,7 @@ adjust_head:
 
 }
 
-static int bf537mac_hard_start_xmit(struct sk_buff *skb,
+static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
                                struct net_device *dev)
 {
        unsigned int data;
@@ -631,7 +662,7 @@ out:
        return 0;
 }
 
-static void bf537mac_rx(struct net_device *dev)
+static void bfin_mac_rx(struct net_device *dev)
 {
        struct sk_buff *skb, *new_skb;
        unsigned short len;
@@ -680,7 +711,7 @@ out:
 }
 
 /* interrupt routine to handle rx and error signal */
-static irqreturn_t bf537mac_interrupt(int irq, void *dev_id)
+static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
 {
        struct net_device *dev = dev_id;
        int number = 0;
@@ -700,21 +731,21 @@ get_one_packet:
        }
 
 real_rx:
-       bf537mac_rx(dev);
+       bfin_mac_rx(dev);
        number++;
        goto get_one_packet;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void bf537mac_poll(struct net_device *dev)
+static void bfin_mac_poll(struct net_device *dev)
 {
        disable_irq(IRQ_MAC_RX);
-       bf537mac_interrupt(IRQ_MAC_RX, dev);
+       bfin_mac_interrupt(IRQ_MAC_RX, dev);
        enable_irq(IRQ_MAC_RX);
 }
 #endif                         /* CONFIG_NET_POLL_CONTROLLER */
 
-static void bf537mac_disable(void)
+static void bfin_mac_disable(void)
 {
        unsigned int opmode;
 
@@ -728,7 +759,7 @@ static void bf537mac_disable(void)
 /*
  * Enable Interrupts, Receive, and Transmit
  */
-static void bf537mac_enable(void)
+static void bfin_mac_enable(void)
 {
        u32 opmode;
 
@@ -766,23 +797,23 @@ static void bf537mac_enable(void)
 }
 
 /* Our watchdog timed out. Called by the networking layer */
-static void bf537mac_timeout(struct net_device *dev)
+static void bfin_mac_timeout(struct net_device *dev)
 {
        pr_debug("%s: %s\n", dev->name, __FUNCTION__);
 
-       bf537mac_disable();
+       bfin_mac_disable();
 
        /* reset tx queue */
        tx_list_tail = tx_list_head->next;
 
-       bf537mac_enable();
+       bfin_mac_enable();
 
        /* We can accept TX packets again */
        dev->trans_start = jiffies;
        netif_wake_queue(dev);
 }
 
-static void bf537mac_multicast_hash(struct net_device *dev)
+static void bfin_mac_multicast_hash(struct net_device *dev)
 {
        u32 emac_hashhi, emac_hashlo;
        struct dev_mc_list *dmi = dev->mc_list;
@@ -821,7 +852,7 @@ static void bf537mac_multicast_hash(struct net_device *dev)
  * promiscuous mode (for TCPDUMP and cousins) or accept
  * a select set of multicast packets
  */
-static void bf537mac_set_multicast_list(struct net_device *dev)
+static void bfin_mac_set_multicast_list(struct net_device *dev)
 {
        u32 sysctl;
 
@@ -840,7 +871,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
                sysctl = bfin_read_EMAC_OPMODE();
                sysctl |= HM;
                bfin_write_EMAC_OPMODE(sysctl);
-               bf537mac_multicast_hash(dev);
+               bfin_mac_multicast_hash(dev);
        } else {
                /* clear promisc or multicast mode */
                sysctl = bfin_read_EMAC_OPMODE();
@@ -852,7 +883,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev)
 /*
  * this puts the device in an inactive state
  */
-static void bf537mac_shutdown(struct net_device *dev)
+static void bfin_mac_shutdown(struct net_device *dev)
 {
        /* Turn off the EMAC */
        bfin_write_EMAC_OPMODE(0x00000000);
@@ -866,9 +897,9 @@ static void bf537mac_shutdown(struct net_device *dev)
  *
  * Set up everything, reset the card, etc..
  */
-static int bf537mac_open(struct net_device *dev)
+static int bfin_mac_open(struct net_device *dev)
 {
-       struct bf537mac_local *lp = netdev_priv(dev);
+       struct bfin_mac_local *lp = netdev_priv(dev);
        int retval;
        pr_debug("%s: %s\n", dev->name, __FUNCTION__);
 
@@ -891,8 +922,8 @@ static int bf537mac_open(struct net_device *dev)
        phy_start(lp->phydev);
        phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
        setup_system_regs(dev);
-       bf537mac_disable();
-       bf537mac_enable();
+       bfin_mac_disable();
+       bfin_mac_enable();
        pr_debug("hardware init finished\n");
        netif_start_queue(dev);
        netif_carrier_on(dev);
@@ -906,9 +937,9 @@ static int bf537mac_open(struct net_device *dev)
  * and not talk to the outside world.   Caused by
  * an 'ifconfig ethX down'
  */
-static int bf537mac_close(struct net_device *dev)
+static int bfin_mac_close(struct net_device *dev)
 {
-       struct bf537mac_local *lp = netdev_priv(dev);
+       struct bfin_mac_local *lp = netdev_priv(dev);
        pr_debug("%s: %s\n", dev->name, __FUNCTION__);
 
        netif_stop_queue(dev);
@@ -918,7 +949,7 @@ static int bf537mac_close(struct net_device *dev)
        phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
 
        /* clear everything */
-       bf537mac_shutdown(dev);
+       bfin_mac_shutdown(dev);
 
        /* free the rx/tx buffers */
        desc_list_free();
@@ -926,46 +957,59 @@ static int bf537mac_close(struct net_device *dev)
        return 0;
 }
 
-static int __init bf537mac_probe(struct net_device *dev)
+static int __init bfin_mac_probe(struct platform_device *pdev)
 {
-       struct bf537mac_local *lp = netdev_priv(dev);
-       int retval;
-       int i;
+       struct net_device *ndev;
+       struct bfin_mac_local *lp;
+       int rc, i;
+
+       ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
+       if (!ndev) {
+               dev_err(&pdev->dev, "Cannot allocate net device!\n");
+               return -ENOMEM;
+       }
+
+       SET_NETDEV_DEV(ndev, &pdev->dev);
+       platform_set_drvdata(pdev, ndev);
+       lp = netdev_priv(ndev);
 
        /* Grab the MAC address in the MAC */
-       *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
-       *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
+       *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
+       *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
 
        /* probe mac */
        /*todo: how to proble? which is revision_register */
        bfin_write_EMAC_ADDRLO(0x12345678);
        if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
-               pr_debug("can't detect bf537 mac!\n");
-               retval = -ENODEV;
-               goto err_out;
+               dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
+               rc = -ENODEV;
+               goto out_err_probe_mac;
        }
 
        /* set the GPIO pins to Ethernet mode */
-       retval = setup_pin_mux(1);
-       if (retval)
-               return retval;
-
-       /*Is it valid? (Did bootloader initialize it?) */
-       if (!is_valid_ether_addr(dev->dev_addr)) {
-               /* Grab the MAC from the board somehow - this is done in the
-                  arch/blackfin/mach-bf537/boards/eth_mac.c */
-               bfin_get_ether_addr(dev->dev_addr);
+       rc = peripheral_request_list(pin_req, DRV_NAME);
+       if (rc) {
+               dev_err(&pdev->dev, "Requesting peripherals failed!\n");
+               rc = -EFAULT;
+               goto out_err_setup_pin_mux;
        }
 
+       /*
+        * Is it valid? (Did bootloader initialize it?)
+        * Grab the MAC from the board somehow
+        * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
+        */
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               bfin_get_ether_addr(ndev->dev_addr);
+
        /* If still not valid, get a random one */
-       if (!is_valid_ether_addr(dev->dev_addr)) {
-               random_ether_addr(dev->dev_addr);
-       }
+       if (!is_valid_ether_addr(ndev->dev_addr))
+               random_ether_addr(ndev->dev_addr);
 
-       setup_mac_addr(dev->dev_addr);
+       setup_mac_addr(ndev->dev_addr);
 
        /* MDIO bus initial */
-       lp->mii_bus.priv = dev;
+       lp->mii_bus.priv = ndev;
        lp->mii_bus.read = mdiobus_read;
        lp->mii_bus.write = mdiobus_write;
        lp->mii_bus.reset = mdiobus_reset;
@@ -975,86 +1019,86 @@ static int __init bf537mac_probe(struct net_device *dev)
        for (i = 0; i < PHY_MAX_ADDR; ++i)
                lp->mii_bus.irq[i] = PHY_POLL;
 
-       mdiobus_register(&lp->mii_bus);
+       rc = mdiobus_register(&lp->mii_bus);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
+               goto out_err_mdiobus_register;
+       }
 
-       retval = mii_probe(dev);
-       if (retval)
-               return retval;
+       rc = mii_probe(ndev);
+       if (rc) {
+               dev_err(&pdev->dev, "MII Probe failed!\n");
+               goto out_err_mii_probe;
+       }
 
        /* Fill in the fields of the device structure with ethernet values. */
-       ether_setup(dev);
-
-       dev->open = bf537mac_open;
-       dev->stop = bf537mac_close;
-       dev->hard_start_xmit = bf537mac_hard_start_xmit;
-       dev->set_mac_address = bf537mac_set_mac_address;
-       dev->tx_timeout = bf537mac_timeout;
-       dev->set_multicast_list = bf537mac_set_multicast_list;
+       ether_setup(ndev);
+
+       ndev->open = bfin_mac_open;
+       ndev->stop = bfin_mac_close;
+       ndev->hard_start_xmit = bfin_mac_hard_start_xmit;
+       ndev->set_mac_address = bfin_mac_set_mac_address;
+       ndev->tx_timeout = bfin_mac_timeout;
+       ndev->set_multicast_list = bfin_mac_set_multicast_list;
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       dev->poll_controller = bf537mac_poll;
+       ndev->poll_controller = bfin_mac_poll;
 #endif
+       ndev->ethtool_ops = &bfin_mac_ethtool_ops;
 
        spin_lock_init(&lp->lock);
 
        /* now, enable interrupts */
        /* register irq handler */
-       if (request_irq
-           (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED,
-            "EMAC_RX", dev)) {
-               printk(KERN_WARNING DRV_NAME
-                      ": Unable to attach BlackFin MAC RX interrupt\n");
-               return -EBUSY;
+       rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
+                       IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
+               rc = -EBUSY;
+               goto out_err_request_irq;
        }
 
-
-       retval = register_netdev(dev);
-       if (retval == 0) {
-               /* now, print out the card info, in a short format.. */
-               printk(KERN_INFO "%s: Version %s, %s\n",
-                        DRV_NAME, DRV_VERSION, DRV_DESC);
-       }
-
-err_out:
-       return retval;
-}
-
-static int bfin_mac_probe(struct platform_device *pdev)
-{
-       struct net_device *ndev;
-
-       ndev = alloc_etherdev(sizeof(struct bf537mac_local));
-       if (!ndev) {
-               printk(KERN_WARNING DRV_NAME ": could not allocate device\n");
-               return -ENOMEM;
+       rc = register_netdev(ndev);
+       if (rc) {
+               dev_err(&pdev->dev, "Cannot register net device!\n");
+               goto out_err_reg_ndev;
        }
 
-       SET_NETDEV_DEV(ndev, &pdev->dev);
+       /* now, print out the card info, in a short format.. */
+       dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
 
-       platform_set_drvdata(pdev, ndev);
+       return 0;
 
-       if (bf537mac_probe(ndev) != 0) {
-               platform_set_drvdata(pdev, NULL);
-               free_netdev(ndev);
-               printk(KERN_WARNING DRV_NAME ": not found\n");
-               return -ENODEV;
-       }
+out_err_reg_ndev:
+       free_irq(IRQ_MAC_RX, ndev);
+out_err_request_irq:
+out_err_mii_probe:
+       mdiobus_unregister(&lp->mii_bus);
+out_err_mdiobus_register:
+       peripheral_free_list(pin_req);
+out_err_setup_pin_mux:
+out_err_probe_mac:
+       platform_set_drvdata(pdev, NULL);
+       free_netdev(ndev);
 
-       return 0;
+       return rc;
 }
 
 static int bfin_mac_remove(struct platform_device *pdev)
 {
        struct net_device *ndev = platform_get_drvdata(pdev);
+       struct bfin_mac_local *lp = netdev_priv(ndev);
 
        platform_set_drvdata(pdev, NULL);
 
+       mdiobus_unregister(&lp->mii_bus);
+
        unregister_netdev(ndev);
 
        free_irq(IRQ_MAC_RX, ndev);
 
        free_netdev(ndev);
 
-       setup_pin_mux(0);
+       peripheral_free_list(pin_req);
 
        return 0;
 }
@@ -1065,7 +1109,7 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
        struct net_device *net_dev = platform_get_drvdata(pdev);
 
        if (netif_running(net_dev))
-               bf537mac_close(net_dev);
+               bfin_mac_close(net_dev);
 
        return 0;
 }
@@ -1075,7 +1119,7 @@ static int bfin_mac_resume(struct platform_device *pdev)
        struct net_device *net_dev = platform_get_drvdata(pdev);
 
        if (netif_running(net_dev))
-               bf537mac_open(net_dev);
+               bfin_mac_open(net_dev);
 
        return 0;
 }
index f774d5a36942aa4dcaf6f1e6078b9aa84d521ebf..beff51064ff4e1f115c18609c1066be7a0812b54 100644 (file)
@@ -49,7 +49,7 @@ struct net_dma_desc_tx {
        struct status_area_tx status;
 };
 
-struct bf537mac_local {
+struct bfin_mac_local {
        /*
         * these are things that the kernel wants me to keep, so users
         * can find out semi-useless statistics of how well the card is
index 6e91b4b7aabb3402629571288f43754a3b67f104..6425603bc37976bebbec44283b37f4d377325c65 100644 (file)
@@ -3282,17 +3282,14 @@ static int bond_create_proc_entry(struct bonding *bond)
        struct net_device *bond_dev = bond->dev;
 
        if (bond_proc_dir) {
-               bond->proc_entry = create_proc_entry(bond_dev->name,
-                                                    S_IRUGO,
-                                                    bond_proc_dir);
+               bond->proc_entry = proc_create_data(bond_dev->name,
+                                                   S_IRUGO, bond_proc_dir,
+                                                   &bond_info_fops, bond);
                if (bond->proc_entry == NULL) {
                        printk(KERN_WARNING DRV_NAME
                               ": Warning: Cannot create /proc/net/%s/%s\n",
                               DRV_NAME, bond_dev->name);
                } else {
-                       bond->proc_entry->data = bond;
-                       bond->proc_entry->proc_fops = &bond_info_fops;
-                       bond->proc_entry->owner = THIS_MODULE;
                        memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
                }
        }
index 229303ff6a39016c0fd3823d7e5212ecc77fcaff..a0177fc55e28a9b241dbfdbd39fa5df1f24c268e 100644 (file)
@@ -38,7 +38,7 @@
 #define DRV_VERSION "1.0-ko"
 
 /* Firmware version */
-#define FW_VERSION_MAJOR 5
+#define FW_VERSION_MAJOR 6
 #define FW_VERSION_MINOR 0
 #define FW_VERSION_MICRO 0
 #endif                         /* __CHELSIO_VERSION_H */
index 2d139ec7977768e78112dd52513dcae57d5b5698..f3cba5e24ec511a7a08ea6c0659155850a165b00 100644 (file)
@@ -1802,7 +1802,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
         * it is protected by the before last buffer's el bit being set */
        if (rx->prev->skb) {
                struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data;
-               put_unaligned(cpu_to_le32(rx->dma_addr), &prev_rfd->link);
+               put_unaligned_le32(rx->dma_addr, &prev_rfd->link);
        }
 
        return 0;
index 83bda6ccde98ef1fcb855be404cfd8fa7445afaf..56f50491a453f444412f15ee25b6179b3fd17684 100644 (file)
@@ -633,7 +633,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
                printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC));
                printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI));
                printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber));
-               printk(KERN_DEBUG " AutoPort: %d\n", GetBit(!Word,ee_Jabber));
+               printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort));
                printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex));
        }
 
index d7a3ea88eddb562be4f632387d30776da84f4ef7..32a4f17d35fc5f1387ca232bc7100761cc7bcafe 100644 (file)
 #define        FEC_MAX_PORTS   1
 #endif
 
+#if defined(CONFIG_FADS) || defined(CONFIG_RPXCLASSIC) || defined(CONFIG_M5272)
+#define HAVE_mii_link_interrupt
+#endif
+
 /*
  * Define the fixed address of the FEC hardware.
  */
@@ -205,7 +209,10 @@ struct fec_enet_private {
        cbd_t   *cur_rx, *cur_tx;               /* The next free ring entry */
        cbd_t   *dirty_tx;      /* The ring entries to be free()ed. */
        uint    tx_full;
-       spinlock_t lock;
+       /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
+       spinlock_t hw_lock;
+       /* hold while accessing the mii_list_t() elements */
+       spinlock_t mii_lock;
 
        uint    phy_id;
        uint    phy_id_done;
@@ -309,6 +316,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        volatile fec_t  *fecp;
        volatile cbd_t  *bdp;
        unsigned short  status;
+       unsigned long flags;
 
        fep = netdev_priv(dev);
        fecp = (volatile fec_t*)dev->base_addr;
@@ -318,6 +326,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                return 1;
        }
 
+       spin_lock_irqsave(&fep->hw_lock, flags);
        /* Fill in a Tx ring entry */
        bdp = fep->cur_tx;
 
@@ -328,6 +337,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
                 * This should not happen, since dev->tbusy should be set.
                 */
                printk("%s: tx queue full!.\n", dev->name);
+               spin_unlock_irqrestore(&fep->hw_lock, flags);
                return 1;
        }
 #endif
@@ -366,8 +376,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        flush_dcache_range((unsigned long)skb->data,
                           (unsigned long)skb->data + skb->len);
 
-       spin_lock_irq(&fep->lock);
-
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
         */
@@ -396,7 +404,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 
        fep->cur_tx = (cbd_t *)bdp;
 
-       spin_unlock_irq(&fep->lock);
+       spin_unlock_irqrestore(&fep->hw_lock, flags);
 
        return 0;
 }
@@ -454,19 +462,20 @@ fec_enet_interrupt(int irq, void * dev_id)
        struct  net_device *dev = dev_id;
        volatile fec_t  *fecp;
        uint    int_events;
-       int handled = 0;
+       irqreturn_t ret = IRQ_NONE;
 
        fecp = (volatile fec_t*)dev->base_addr;
 
        /* Get the interrupt events that caused us to be here.
        */
-       while ((int_events = fecp->fec_ievent) != 0) {
+       do {
+               int_events = fecp->fec_ievent;
                fecp->fec_ievent = int_events;
 
                /* Handle receive event in its own function.
                 */
                if (int_events & FEC_ENET_RXF) {
-                       handled = 1;
+                       ret = IRQ_HANDLED;
                        fec_enet_rx(dev);
                }
 
@@ -475,17 +484,18 @@ fec_enet_interrupt(int irq, void * dev_id)
                   them as part of the transmit process.
                */
                if (int_events & FEC_ENET_TXF) {
-                       handled = 1;
+                       ret = IRQ_HANDLED;
                        fec_enet_tx(dev);
                }
 
                if (int_events & FEC_ENET_MII) {
-                       handled = 1;
+                       ret = IRQ_HANDLED;
                        fec_enet_mii(dev);
                }
 
-       }
-       return IRQ_RETVAL(handled);
+       } while (int_events);
+
+       return ret;
 }
 
 
@@ -498,7 +508,7 @@ fec_enet_tx(struct net_device *dev)
        struct  sk_buff *skb;
 
        fep = netdev_priv(dev);
-       spin_lock(&fep->lock);
+       spin_lock_irq(&fep->hw_lock);
        bdp = fep->dirty_tx;
 
        while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
@@ -557,7 +567,7 @@ fec_enet_tx(struct net_device *dev)
                }
        }
        fep->dirty_tx = (cbd_t *)bdp;
-       spin_unlock(&fep->lock);
+       spin_unlock_irq(&fep->hw_lock);
 }
 
 
@@ -584,6 +594,8 @@ fec_enet_rx(struct net_device *dev)
        fep = netdev_priv(dev);
        fecp = (volatile fec_t*)dev->base_addr;
 
+       spin_lock_irq(&fep->hw_lock);
+
        /* First, grab all of the stats for the incoming packet.
         * These get messed up if we get called due to a busy condition.
         */
@@ -689,6 +701,8 @@ while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
         */
        fecp->fec_r_des_active = 0;
 #endif
+
+       spin_unlock_irq(&fep->hw_lock);
 }
 
 
@@ -702,11 +716,11 @@ fec_enet_mii(struct net_device *dev)
        uint            mii_reg;
 
        fep = netdev_priv(dev);
+       spin_lock_irq(&fep->mii_lock);
+
        ep = fep->hwp;
        mii_reg = ep->fec_mii_data;
 
-       spin_lock(&fep->lock);
-
        if ((mip = mii_head) == NULL) {
                printk("MII and no head!\n");
                goto unlock;
@@ -723,7 +737,7 @@ fec_enet_mii(struct net_device *dev)
                ep->fec_mii_data = mip->mii_regval;
 
 unlock:
-       spin_unlock(&fep->lock);
+       spin_unlock_irq(&fep->mii_lock);
 }
 
 static int
@@ -737,12 +751,11 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
        /* Add PHY address to register command.
        */
        fep = netdev_priv(dev);
-       regval |= fep->phy_addr << 23;
+       spin_lock_irqsave(&fep->mii_lock, flags);
 
+       regval |= fep->phy_addr << 23;
        retval = 0;
 
-       spin_lock_irqsave(&fep->lock,flags);
-
        if ((mip = mii_free) != NULL) {
                mii_free = mip->mii_next;
                mip->mii_regval = regval;
@@ -759,9 +772,8 @@ mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_devi
                retval = 1;
        }
 
-       spin_unlock_irqrestore(&fep->lock,flags);
-
-       return(retval);
+       spin_unlock_irqrestore(&fep->mii_lock, flags);
+       return retval;
 }
 
 static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
@@ -1222,7 +1234,7 @@ static phy_info_t const * const phy_info[] = {
 };
 
 /* ------------------------------------------------------------------------- */
-#if !defined(CONFIG_M532x)
+#ifdef HAVE_mii_link_interrupt
 #ifdef CONFIG_RPXCLASSIC
 static void
 mii_link_interrupt(void *dev_id);
@@ -1362,18 +1374,8 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
                unsigned short irq;
        } *idp, id[] = {
                { "fec(TXF)", 23 },
-               { "fec(TXB)", 24 },
-               { "fec(TXFIFO)", 25 },
-               { "fec(TXCR)", 26 },
                { "fec(RXF)", 27 },
-               { "fec(RXB)", 28 },
                { "fec(MII)", 29 },
-               { "fec(LC)", 30 },
-               { "fec(HBERR)", 31 },
-               { "fec(GRA)", 32 },
-               { "fec(EBERR)", 33 },
-               { "fec(BABT)", 34 },
-               { "fec(BABR)", 35 },
                { NULL },
        };
 
@@ -1533,18 +1535,8 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
                unsigned short irq;
        } *idp, id[] = {
                { "fec(TXF)", 23 },
-               { "fec(TXB)", 24 },
-               { "fec(TXFIFO)", 25 },
-               { "fec(TXCR)", 26 },
                { "fec(RXF)", 27 },
-               { "fec(RXB)", 28 },
                { "fec(MII)", 29 },
-               { "fec(LC)", 30 },
-               { "fec(HBERR)", 31 },
-               { "fec(GRA)", 32 },
-               { "fec(EBERR)", 33 },
-               { "fec(BABT)", 34 },
-               { "fec(BABR)", 35 },
                { NULL },
        };
 
@@ -1660,18 +1652,8 @@ static void __inline__ fec_request_intrs(struct net_device *dev)
                unsigned short irq;
        } *idp, id[] = {
            { "fec(TXF)", 36 },
-           { "fec(TXB)", 37 },
-           { "fec(TXFIFO)", 38 },
-           { "fec(TXCR)", 39 },
            { "fec(RXF)", 40 },
-           { "fec(RXB)", 41 },
            { "fec(MII)", 42 },
-           { "fec(LC)", 43 },
-           { "fec(HBERR)", 44 },
-           { "fec(GRA)", 45 },
-           { "fec(EBERR)", 46 },
-           { "fec(BABT)", 47 },
-           { "fec(BABR)", 48 },
            { NULL },
        };
 
@@ -2126,6 +2108,7 @@ mii_discover_phy(uint mii_reg, struct net_device *dev)
 
 /* This interrupt occurs when the PHY detects a link change.
 */
+#ifdef HAVE_mii_link_interrupt
 #ifdef CONFIG_RPXCLASSIC
 static void
 mii_link_interrupt(void *dev_id)
@@ -2148,6 +2131,7 @@ mii_link_interrupt(int irq, void * dev_id)
 
        return IRQ_HANDLED;
 }
+#endif
 
 static int
 fec_enet_open(struct net_device *dev)
@@ -2243,13 +2227,13 @@ static void set_multicast_list(struct net_device *dev)
                        /* Catch all multicast addresses, so set the
                         * filter to all 1's.
                         */
-                       ep->fec_hash_table_high = 0xffffffff;
-                       ep->fec_hash_table_low = 0xffffffff;
+                       ep->fec_grp_hash_table_high = 0xffffffff;
+                       ep->fec_grp_hash_table_low = 0xffffffff;
                } else {
                        /* Clear filter and add the addresses in hash register.
                        */
-                       ep->fec_hash_table_high = 0;
-                       ep->fec_hash_table_low = 0;
+                       ep->fec_grp_hash_table_high = 0;
+                       ep->fec_grp_hash_table_low = 0;
 
                        dmi = dev->mc_list;
 
@@ -2280,9 +2264,9 @@ static void set_multicast_list(struct net_device *dev)
                                hash = (crc >> (32 - HASH_BITS)) & 0x3f;
 
                                if (hash > 31)
-                                       ep->fec_hash_table_high |= 1 << (hash - 32);
+                                       ep->fec_grp_hash_table_high |= 1 << (hash - 32);
                                else
-                                       ep->fec_hash_table_low |= 1 << hash;
+                                       ep->fec_grp_hash_table_low |= 1 << hash;
                        }
                }
        }
@@ -2332,6 +2316,9 @@ int __init fec_enet_init(struct net_device *dev)
                return -ENOMEM;
        }
 
+       spin_lock_init(&fep->hw_lock);
+       spin_lock_init(&fep->mii_lock);
+
        /* Create an Ethernet device instance.
        */
        fecp = (volatile fec_t *) fec_hw[index];
@@ -2430,11 +2417,15 @@ int __init fec_enet_init(struct net_device *dev)
        */
        fec_request_intrs(dev);
 
-       fecp->fec_hash_table_high = 0;
-       fecp->fec_hash_table_low = 0;
+       fecp->fec_grp_hash_table_high = 0;
+       fecp->fec_grp_hash_table_low = 0;
        fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
        fecp->fec_ecntrl = 2;
        fecp->fec_r_des_active = 0;
+#ifndef CONFIG_M5272
+       fecp->fec_hash_table_high = 0;
+       fecp->fec_hash_table_low = 0;
+#endif
 
        dev->base_addr = (unsigned long)fecp;
 
@@ -2455,8 +2446,7 @@ int __init fec_enet_init(struct net_device *dev)
 
        /* Clear and enable interrupts */
        fecp->fec_ievent = 0xffc00000;
-       fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
-               FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+       fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
 
        /* Queue up command to detect the PHY and initialize the
         * remainder of the interface.
@@ -2500,8 +2490,8 @@ fec_restart(struct net_device *dev, int duplex)
 
        /* Reset all multicast.
        */
-       fecp->fec_hash_table_high = 0;
-       fecp->fec_hash_table_low = 0;
+       fecp->fec_grp_hash_table_high = 0;
+       fecp->fec_grp_hash_table_low = 0;
 
        /* Set maximum receive buffer size.
        */
@@ -2583,8 +2573,7 @@ fec_restart(struct net_device *dev, int duplex)
 
        /* Enable interrupts we wish to service.
        */
-       fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_TXB |
-               FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII);
+       fecp->fec_imask = (FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII);
 }
 
 static void
@@ -2624,7 +2613,7 @@ fec_stop(struct net_device *dev)
 static int __init fec_enet_module_init(void)
 {
        struct net_device *dev;
-       int i, j, err;
+       int i, err;
        DECLARE_MAC_BUF(mac);
 
        printk("FEC ENET Version 0.2\n");
index 1d421606984fd4925e0a058ffb6bbc57e1332b06..292719daceff0e8060aa7ae6ddef7fda0a0bcf31 100644 (file)
@@ -88,8 +88,8 @@ typedef struct fec {
        unsigned long   fec_reserved7[158];
        unsigned long   fec_addr_low;           /* Low 32bits MAC address */
        unsigned long   fec_addr_high;          /* High 16bits MAC address */
-       unsigned long   fec_hash_table_high;    /* High 32bits hash table */
-       unsigned long   fec_hash_table_low;     /* Low 32bits hash table */
+       unsigned long   fec_grp_hash_table_high;/* High 32bits hash table */
+       unsigned long   fec_grp_hash_table_low; /* Low 32bits hash table */
        unsigned long   fec_r_des_start;        /* Receive descriptor ring */
        unsigned long   fec_x_des_start;        /* Transmit descriptor ring */
        unsigned long   fec_r_buff_size;        /* Maximum receive buff size */
index e5e6352556fa5391b4c0223a0fc949343e563526..d21b7ab64bd13224fc96e5b53a3c47a83285b50b 100644 (file)
@@ -491,20 +491,23 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
 
        out_be32(&fec->ievent, ievent);         /* clear pending events */
 
-       if (ievent & ~(FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
-               if (ievent & ~FEC_IEVENT_TFINT)
-                       dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
+       /* on fifo error, soft-reset fec */
+       if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) {
+
+               if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
+                       dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
+               if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
+                       dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
+
+               mpc52xx_fec_reset(dev);
+
+               netif_wake_queue(dev);
                return IRQ_HANDLED;
        }
 
-       if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR))
-               dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n");
-       if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
-               dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
+       if (ievent & ~FEC_IEVENT_TFINT)
+               dev_dbg(&dev->dev, "ievent: %08x\n", ievent);
 
-       mpc52xx_fec_reset(dev);
-
-       netif_wake_queue(dev);
        return IRQ_HANDLED;
 }
 
index 99a4b990939f7720780e02da49d822dbdf2b9abd..587afe7be689d174f6f36f0967fd0cfd40abc611 100644 (file)
@@ -131,8 +131,6 @@ static void free_skb_resources(struct gfar_private *priv);
 static void gfar_set_multi(struct net_device *dev);
 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
 static void gfar_configure_serdes(struct net_device *dev);
-extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value);
-extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
 #ifdef CONFIG_GFAR_NAPI
 static int gfar_poll(struct napi_struct *napi, int budget);
 #endif
@@ -477,24 +475,30 @@ static int init_phy(struct net_device *dev)
        return 0;
 }
 
+/*
+ * Initialize TBI PHY interface for communicating with the
+ * SERDES lynx PHY on the chip.  We communicate with this PHY
+ * through the MDIO bus on each controller, treating it as a
+ * "normal" PHY at the address found in the TBIPA register.  We assume
+ * that the TBIPA register is valid.  Either the MDIO bus code will set
+ * it to a value that doesn't conflict with other PHYs on the bus, or the
+ * value doesn't matter, as there are no other PHYs on the bus.
+ */
 static void gfar_configure_serdes(struct net_device *dev)
 {
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar_mii __iomem *regs =
                        (void __iomem *)&priv->regs->gfar_mii_regs;
+       int tbipa = gfar_read(&priv->regs->tbipa);
 
-       /* Initialise TBI i/f to communicate with serdes (lynx phy) */
+       /* Single clk mode, mii mode off(for serdes communication) */
+       gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT);
 
-       /* Single clk mode, mii mode off(for aerdes communication) */
-       gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
-
-       /* Supported pause and full-duplex, no half-duplex */
-       gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
+       gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE,
                        ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
                        ADVERTISE_1000XPSE_ASYM);
 
-       /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */
-       gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
+       gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE |
                        BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
 }
 
@@ -541,9 +545,6 @@ static void init_registers(struct net_device *dev)
 
        /* Initialize the Minimum Frame Length Register */
        gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS);
-
-       /* Assign the TBI an address which won't conflict with the PHYs */
-       gfar_write(&priv->regs->tbipa, TBIPA_VALUE);
 }
 
 
index 0d0883609469ed02549af321f5ea4febbb229b3f..fd487be3993eabc2409816a747de46132fc223bb 100644 (file)
@@ -137,7 +137,6 @@ extern const char gfar_driver_version[];
 #define DEFAULT_RXCOUNT        0
 #endif /* CONFIG_GFAR_NAPI */
 
-#define TBIPA_VALUE            0x1f
 #define MIIMCFG_INIT_VALUE     0x00000007
 #define MIIMCFG_RESET           0x80000000
 #define MIIMIND_BUSY            0x00000001
index b8898927236a69fa5723c1d1c0bc10faec7db2b3..ebcfb27a904eae72e7ca5ae5ee869acf5969496e 100644 (file)
@@ -78,7 +78,6 @@ int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
  * same as system mdio bus, used for controlling the external PHYs, for eg.
  */
 int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum)
-
 {
        u16 value;
 
@@ -122,7 +121,7 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
 }
 
 /* Reset the MIIM registers, and wait for the bus to free */
-int gfar_mdio_reset(struct mii_bus *bus)
+static int gfar_mdio_reset(struct mii_bus *bus)
 {
        struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
        unsigned int timeout = PHY_INIT_TIMEOUT;
@@ -152,14 +151,15 @@ int gfar_mdio_reset(struct mii_bus *bus)
 }
 
 
-int gfar_mdio_probe(struct device *dev)
+static int gfar_mdio_probe(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct gianfar_mdio_data *pdata;
        struct gfar_mii __iomem *regs;
+       struct gfar __iomem *enet_regs;
        struct mii_bus *new_bus;
        struct resource *r;
-       int err = 0;
+       int i, err = 0;
 
        if (NULL == dev)
                return -EINVAL;
@@ -199,6 +199,34 @@ int gfar_mdio_probe(struct device *dev)
        new_bus->dev = dev;
        dev_set_drvdata(dev, new_bus);
 
+       /*
+        * This is mildly evil, but so is our hardware for doing this.
+        * Also, we have to cast back to struct gfar_mii because of
+        * definition weirdness done in gianfar.h.
+        */
+       enet_regs = (struct gfar __iomem *)
+               ((char *)regs - offsetof(struct gfar, gfar_mii_regs));
+
+       /* Scan the bus, looking for an empty spot for TBIPA */
+       gfar_write(&enet_regs->tbipa, 0);
+       for (i = PHY_MAX_ADDR; i > 0; i--) {
+               u32 phy_id;
+               int r;
+
+               r = get_phy_id(new_bus, i, &phy_id);
+               if (r)
+                       return r;
+
+               if (phy_id == 0xffffffff)
+                       break;
+       }
+
+       /* The bus is full.  We don't support using 31 PHYs, sorry */
+       if (i == 0)
+               return -EBUSY;
+
+       gfar_write(&enet_regs->tbipa, i);
+
        err = mdiobus_register(new_bus);
 
        if (0 != err) {
@@ -218,7 +246,7 @@ reg_map_fail:
 }
 
 
-int gfar_mdio_remove(struct device *dev)
+static int gfar_mdio_remove(struct device *dev)
 {
        struct mii_bus *bus = dev_get_drvdata(dev);
 
index b373091c703184f78ad7d1cfa6ffa58e3d702631..2af28b16a0e278054d11f4cf5873bf76b222e932 100644 (file)
@@ -41,6 +41,9 @@ struct gfar_mii {
 
 int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
 int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
+int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id,
+                         int regnum, u16 value);
+int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum);
 int __init gfar_mdio_init(void);
 void gfar_mdio_exit(void);
 #endif /* GIANFAR_PHY_H */
index b53f6b6491b3f049bc69562dfbadf0536d47e409..e5c2380f50ca0ef8c45d371a4eb69c69f671dbe4 100644 (file)
@@ -1508,7 +1508,7 @@ static int hamachi_rx(struct net_device *dev)
                                            hmp->rx_buf_sz,
                                            PCI_DMA_FROMDEVICE);
                buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
-               frame_status = le32_to_cpu(get_unaligned((__le32*)&(buf_addr[data_size - 12])));
+               frame_status = get_unaligned_le32(&(buf_addr[data_size - 12]));
                if (hamachi_debug > 4)
                        printk(KERN_DEBUG "  hamachi_rx() status was %8.8x.\n",
                                frame_status);
index 1da55dd2a5a0a07b4e05340a050e618bbf805166..9d5721287d6f50d7030499e4470b41a36f4cb6b5 100644 (file)
@@ -148,13 +148,13 @@ static void sp_xmit_on_air(unsigned long channel)
 
        if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) {
                sp->led_state = 0x70;
-               sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+               sp->tty->ops->write(sp->tty, &sp->led_state, 1);
                sp->tx_enable = 1;
-               actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2);
+               actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2);
                sp->xleft -= actual;
                sp->xhead += actual;
                sp->led_state = 0x60;
-               sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+               sp->tty->ops->write(sp->tty, &sp->led_state, 1);
                sp->status2 = 0;
        } else
                mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100);
@@ -220,13 +220,13 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len)
         */
        if (sp->duplex == 1) {
                sp->led_state = 0x70;
-               sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+               sp->tty->ops->write(sp->tty, &sp->led_state, 1);
                sp->tx_enable = 1;
-               actual = sp->tty->driver->write(sp->tty, sp->xbuff, count);
+               actual = sp->tty->ops->write(sp->tty, sp->xbuff, count);
                sp->xleft = count - actual;
                sp->xhead = sp->xbuff + actual;
                sp->led_state = 0x60;
-               sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+               sp->tty->ops->write(sp->tty, &sp->led_state, 1);
        } else {
                sp->xleft = count;
                sp->xhead = sp->xbuff;
@@ -444,7 +444,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty)
        }
 
        if (sp->tx_enable) {
-               actual = tty->driver->write(tty, sp->xhead, sp->xleft);
+               actual = tty->ops->write(tty, sp->xhead, sp->xleft);
                sp->xleft -= actual;
                sp->xhead += actual;
        }
@@ -491,9 +491,7 @@ static void sixpack_receive_buf(struct tty_struct *tty,
        sixpack_decode(sp, buf, count1);
 
        sp_put(sp);
-       if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
-           && tty->driver->unthrottle)
-               tty->driver->unthrottle(tty);
+       tty_unthrottle(tty);
 }
 
 /*
@@ -554,8 +552,8 @@ static void resync_tnc(unsigned long channel)
        /* resync the TNC */
 
        sp->led_state = 0x60;
-       sp->tty->driver->write(sp->tty, &sp->led_state, 1);
-       sp->tty->driver->write(sp->tty, &resync_cmd, 1);
+       sp->tty->ops->write(sp->tty, &sp->led_state, 1);
+       sp->tty->ops->write(sp->tty, &resync_cmd, 1);
 
 
        /* Start resync timer again -- the TNC might be still absent */
@@ -573,7 +571,7 @@ static inline int tnc_init(struct sixpack *sp)
 
        tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP);
 
-       sp->tty->driver->write(sp->tty, &inbyte, 1);
+       sp->tty->ops->write(sp->tty, &inbyte, 1);
 
        del_timer(&sp->resync_t);
        sp->resync_t.data = (unsigned long) sp;
@@ -601,6 +599,8 @@ static int sixpack_open(struct tty_struct *tty)
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
 
        dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup);
        if (!dev) {
@@ -914,9 +914,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
        } else { /* output watchdog char if idle */
                if ((sp->status2 != 0) && (sp->duplex == 1)) {
                        sp->led_state = 0x70;
-                       sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+                       sp->tty->ops->write(sp->tty, &sp->led_state, 1);
                        sp->tx_enable = 1;
-                       actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2);
+                       actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2);
                        sp->xleft -= actual;
                        sp->xhead += actual;
                        sp->led_state = 0x60;
@@ -926,7 +926,7 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd)
        }
 
        /* needed to trigger the TNC watchdog */
-       sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+       sp->tty->ops->write(sp->tty, &sp->led_state, 1);
 
         /* if the state byte has been received, the TNC is present,
            so the resync timer can be reset. */
@@ -956,12 +956,12 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd)
                        if ((sp->status & SIXP_RX_DCD_MASK) ==
                                SIXP_RX_DCD_MASK) {
                                sp->led_state = 0x68;
-                               sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+                               sp->tty->ops->write(sp->tty, &sp->led_state, 1);
                        }
                } else {
                        sp->led_state = 0x60;
                        /* fill trailing bytes with zeroes */
-                       sp->tty->driver->write(sp->tty, &sp->led_state, 1);
+                       sp->tty->ops->write(sp->tty, &sp->led_state, 1);
                        rest = sp->rx_count;
                        if (rest != 0)
                                 for (i = rest; i <= 3; i++)
index 30c9b3b0d1319fe0c18b4906594748ee3ddaa6e0..65166035aca0c81b5bea15d608bb88cf945ef24c 100644 (file)
@@ -516,7 +516,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len)
        spin_unlock_bh(&ax->buflock);
 
        set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags);
-       actual = ax->tty->driver->write(ax->tty, ax->xbuff, count);
+       actual = ax->tty->ops->write(ax->tty, ax->xbuff, count);
        ax->stats.tx_packets++;
        ax->stats.tx_bytes += actual;
 
@@ -546,7 +546,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev)
                }
 
                printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name,
-                      (ax->tty->driver->chars_in_buffer(ax->tty) || ax->xleft) ?
+                      (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ?
                       "bad line quality" : "driver error");
 
                ax->xleft = 0;
@@ -736,6 +736,8 @@ static int mkiss_open(struct tty_struct *tty)
 
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
 
        dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
        if (!dev) {
@@ -754,8 +756,7 @@ static int mkiss_open(struct tty_struct *tty)
        tty->disc_data = ax;
        tty->receive_room = 65535;
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
 
        /* Restore default settings */
        dev->type = ARPHRD_AX25;
@@ -935,9 +936,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
        }
 
        mkiss_put(ax);
-       if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
-           && tty->driver->unthrottle)
-               tty->driver->unthrottle(tty);
+       tty_unthrottle(tty);
 }
 
 /*
@@ -962,7 +961,7 @@ static void mkiss_write_wakeup(struct tty_struct *tty)
                goto out;
        }
 
-       actual = tty->driver->write(tty, ax->xhead, ax->xleft);
+       actual = tty->ops->write(tty, ax->xhead, ax->xleft);
        ax->xleft -= actual;
        ax->xhead += actual;
 
index ce4fc2ec2fe4650af4fe4ee614ca9503e0060f86..00527805e4f170a8a1bdb6888aa2a30829210d6b 100644 (file)
@@ -1302,13 +1302,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
        if (ibmveth_proc_dir) {
                char u_addr[10];
                sprintf(u_addr, "%x", adapter->vdev->unit_address);
-               entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
-               if (!entry) {
+               entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
+                                        &ibmveth_proc_fops, adapter);
+               if (!entry)
                        ibmveth_error_printk("Cannot create adapter proc entry");
-               } else {
-                       entry->data = (void *) adapter;
-                       entry->proc_fops = &ibmveth_proc_fops;
-               }
        }
        return;
 }
index fc753d7f674e41b0a231ac45663806c7b84ab06a..e6f40b7f9041d48e8c8b0d2020175dbff20e1331 100644 (file)
@@ -64,7 +64,7 @@ static int irtty_chars_in_buffer(struct sir_dev *dev)
        IRDA_ASSERT(priv != NULL, return -1;);
        IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
 
-       return priv->tty->driver->chars_in_buffer(priv->tty);
+       return tty_chars_in_buffer(priv->tty);
 }
 
 /* Wait (sleep) until underlaying hardware finished transmission
@@ -93,10 +93,8 @@ static void irtty_wait_until_sent(struct sir_dev *dev)
        IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
 
        tty = priv->tty;
-       if (tty->driver->wait_until_sent) {
-               lock_kernel();
-               tty->driver->wait_until_sent(tty, msecs_to_jiffies(100));
-               unlock_kernel();
+       if (tty->ops->wait_until_sent) {
+               tty->ops->wait_until_sent(tty, msecs_to_jiffies(100));
        }
        else {
                msleep(USBSERIAL_TX_DONE_DELAY);
@@ -125,48 +123,14 @@ static int irtty_change_speed(struct sir_dev *dev, unsigned speed)
 
        tty = priv->tty;
 
-       lock_kernel();
+       mutex_lock(&tty->termios_mutex);
        old_termios = *(tty->termios);
        cflag = tty->termios->c_cflag;
-
-       cflag &= ~CBAUD;
-
-       IRDA_DEBUG(2, "%s(), Setting speed to %d\n", __FUNCTION__, speed);
-
-       switch (speed) {
-       case 1200:
-               cflag |= B1200;
-               break;
-       case 2400:
-               cflag |= B2400;
-               break;
-       case 4800:
-               cflag |= B4800;
-               break;
-       case 19200:
-               cflag |= B19200;
-               break;
-       case 38400:
-               cflag |= B38400;
-               break;
-       case 57600:
-               cflag |= B57600;
-               break;
-       case 115200:
-               cflag |= B115200;
-               break;
-       case 9600:
-       default:
-               cflag |= B9600;
-               break;
-       }       
-
-       tty->termios->c_cflag = cflag;
-       if (tty->driver->set_termios)
-               tty->driver->set_termios(tty, &old_termios);
-       unlock_kernel();
-
+       tty_encode_baud_rate(tty, speed, speed);
+       if (tty->ops->set_termios)
+               tty->ops->set_termios(tty, &old_termios);
        priv->io.speed = speed;
+       mutex_unlock(&tty->termios_mutex);
 
        return 0;
 }
@@ -202,8 +166,8 @@ static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts)
         * This function is not yet defined for all tty driver, so
         * let's be careful... Jean II
         */
-       IRDA_ASSERT(priv->tty->driver->tiocmset != NULL, return -1;);
-       priv->tty->driver->tiocmset(priv->tty, NULL, set, clear);
+       IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;);
+       priv->tty->ops->tiocmset(priv->tty, NULL, set, clear);
 
        return 0;
 }
@@ -225,17 +189,13 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
        IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;);
 
        tty = priv->tty;
-       if (!tty->driver->write)
+       if (!tty->ops->write)
                return 0;
        tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
-       if (tty->driver->write_room) {
-               writelen = tty->driver->write_room(tty);
-               if (writelen > len)
-                       writelen = len;
-       }
-       else
+       writelen = tty_write_room(tty);
+       if (writelen > len)
                writelen = len;
-       return tty->driver->write(tty, ptr, writelen);
+       return tty->ops->write(tty, ptr, writelen);
 }
 
 /* ------------------------------------------------------- */
@@ -321,7 +281,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
        struct ktermios old_termios;
        int cflag;
 
-       lock_kernel();
+       mutex_lock(&tty->termios_mutex);
        old_termios = *(tty->termios);
        cflag = tty->termios->c_cflag;
        
@@ -331,9 +291,9 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop)
                cflag |= CREAD;
 
        tty->termios->c_cflag = cflag;
-       if (tty->driver->set_termios)
-               tty->driver->set_termios(tty, &old_termios);
-       unlock_kernel();
+       if (tty->ops->set_termios)
+               tty->ops->set_termios(tty, &old_termios);
+       mutex_unlock(&tty->termios_mutex);
 }
 
 /*****************************************************************/
@@ -359,8 +319,8 @@ static int irtty_start_dev(struct sir_dev *dev)
 
        tty = priv->tty;
 
-       if (tty->driver->start)
-               tty->driver->start(tty);
+       if (tty->ops->start)
+               tty->ops->start(tty);
        /* Make sure we can receive more data */
        irtty_stop_receiver(tty, FALSE);
 
@@ -388,8 +348,8 @@ static int irtty_stop_dev(struct sir_dev *dev)
 
        /* Make sure we don't receive more data */
        irtty_stop_receiver(tty, TRUE);
-       if (tty->driver->stop)
-               tty->driver->stop(tty);
+       if (tty->ops->stop)
+               tty->ops->stop(tty);
 
        mutex_unlock(&irtty_mutex);
 
@@ -483,11 +443,10 @@ static int irtty_open(struct tty_struct *tty)
 
        /* stop the underlying  driver */
        irtty_stop_receiver(tty, TRUE);
-       if (tty->driver->stop)
-               tty->driver->stop(tty);
+       if (tty->ops->stop)
+               tty->ops->stop(tty);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
        
        /* apply mtt override */
        sir_tty_drv.qos_mtt_bits = qos_mtt_bits;
@@ -564,8 +523,8 @@ static void irtty_close(struct tty_struct *tty)
        /* Stop tty */
        irtty_stop_receiver(tty, TRUE);
        tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
-       if (tty->driver->stop)
-               tty->driver->stop(tty);
+       if (tty->ops->stop)
+               tty->ops->stop(tty);
 
        kfree(priv);
 
index 93916cf33f295604e939d02d2b7a1f0bfef28e66..ad92d3ff1c4093ff6ea8281c430374ab26ff3ac2 100644 (file)
@@ -464,7 +464,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len)
        }
 
        fcs = ~(crc32_le(~0, buf, new_len));
-       if(fcs != le32_to_cpu(get_unaligned((__le32 *)(buf+new_len)))) {
+       if(fcs != get_unaligned_le32(buf + new_len)) {
                IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len);
                mcs->stats.rx_errors++;
                mcs->stats.rx_crc_errors++;
index e59c485bc497f0f98054b6fea0424c320c3a3dfb..0519637827497c0e3e978a09ac5b0628df0d7ba3 100644 (file)
@@ -329,7 +329,7 @@ static void fir_eof(struct stir_cb *stir)
        }
 
        fcs = ~(crc32_le(~0, rx_buff->data, len));
-       if (fcs != le32_to_cpu(get_unaligned((__le32 *)(rx_buff->data+len)))) {
+       if (fcs != get_unaligned_le32(rx_buff->data + len)) {
                pr_debug("crc error calc 0x%x len %d\n", fcs, len);
                stir->stats.rx_errors++;
                stir->stats.rx_crc_errors++;
index acd082a96a4f2b6dd986c956552adae410d48065..d15e00b8591e173aeb55a08623a773ced53b6d8f 100644 (file)
@@ -1674,13 +1674,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        if (vlsi_proc_root != NULL) {
                struct proc_dir_entry *ent;
 
-               ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root);
+               ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO,
+                                      vlsi_proc_root, VLSI_PROC_FOPS, ndev);
                if (!ent) {
                        IRDA_WARNING("%s: failed to create proc entry\n",
                                     __FUNCTION__);
                } else {
-                       ent->data = ndev;
-                       ent->proc_fops = VLSI_PROC_FOPS;
                        ent->size = 0;
                }
                idev->proc_entry = ent;
index 75ef9d0d974dc165e8b5630c28a0affc017a6969..f9d6b4dca180e49f6d16fc4ebfe594831cc0cf29 100644 (file)
@@ -196,3 +196,160 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
        }
 }
 EXPORT_SYMBOL_GPL(mlx4_buf_free);
+
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
+{
+       struct mlx4_db_pgdir *pgdir;
+
+       pgdir = kzalloc(sizeof *pgdir, GFP_KERNEL);
+       if (!pgdir)
+               return NULL;
+
+       bitmap_fill(pgdir->order1, MLX4_DB_PER_PAGE / 2);
+       pgdir->bits[0] = pgdir->order0;
+       pgdir->bits[1] = pgdir->order1;
+       pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
+                                           &pgdir->db_dma, GFP_KERNEL);
+       if (!pgdir->db_page) {
+               kfree(pgdir);
+               return NULL;
+       }
+
+       return pgdir;
+}
+
+static int mlx4_alloc_db_from_pgdir(struct mlx4_db_pgdir *pgdir,
+                                   struct mlx4_db *db, int order)
+{
+       int o;
+       int i;
+
+       for (o = order; o <= 1; ++o) {
+               i = find_first_bit(pgdir->bits[o], MLX4_DB_PER_PAGE >> o);
+               if (i < MLX4_DB_PER_PAGE >> o)
+                       goto found;
+       }
+
+       return -ENOMEM;
+
+found:
+       clear_bit(i, pgdir->bits[o]);
+
+       i <<= o;
+
+       if (o > order)
+               set_bit(i ^ 1, pgdir->bits[order]);
+
+       db->u.pgdir = pgdir;
+       db->index   = i;
+       db->db      = pgdir->db_page + db->index;
+       db->dma     = pgdir->db_dma  + db->index * 4;
+       db->order   = order;
+
+       return 0;
+}
+
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       struct mlx4_db_pgdir *pgdir;
+       int ret = 0;
+
+       mutex_lock(&priv->pgdir_mutex);
+
+       list_for_each_entry(pgdir, &priv->pgdir_list, list)
+               if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
+                       goto out;
+
+       pgdir = mlx4_alloc_db_pgdir(&(dev->pdev->dev));
+       if (!pgdir) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       list_add(&pgdir->list, &priv->pgdir_list);
+
+       /* This should never fail -- we just allocated an empty page: */
+       WARN_ON(mlx4_alloc_db_from_pgdir(pgdir, db, order));
+
+out:
+       mutex_unlock(&priv->pgdir_mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(mlx4_db_alloc);
+
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int o;
+       int i;
+
+       mutex_lock(&priv->pgdir_mutex);
+
+       o = db->order;
+       i = db->index;
+
+       if (db->order == 0 && test_bit(i ^ 1, db->u.pgdir->order0)) {
+               clear_bit(i ^ 1, db->u.pgdir->order0);
+               ++o;
+       }
+       i >>= o;
+       set_bit(i, db->u.pgdir->bits[o]);
+
+       if (bitmap_full(db->u.pgdir->order1, MLX4_DB_PER_PAGE / 2)) {
+               dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+                                 db->u.pgdir->db_page, db->u.pgdir->db_dma);
+               list_del(&db->u.pgdir->list);
+               kfree(db->u.pgdir);
+       }
+
+       mutex_unlock(&priv->pgdir_mutex);
+}
+EXPORT_SYMBOL_GPL(mlx4_db_free);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+                      int size, int max_direct)
+{
+       int err;
+
+       err = mlx4_db_alloc(dev, &wqres->db, 1);
+       if (err)
+               return err;
+
+       *wqres->db.db = 0;
+
+       err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf);
+       if (err)
+               goto err_db;
+
+       err = mlx4_mtt_init(dev, wqres->buf.npages, wqres->buf.page_shift,
+                           &wqres->mtt);
+       if (err)
+               goto err_buf;
+
+       err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
+       if (err)
+               goto err_mtt;
+
+       return 0;
+
+err_mtt:
+       mlx4_mtt_cleanup(dev, &wqres->mtt);
+err_buf:
+       mlx4_buf_free(dev, size, &wqres->buf);
+err_db:
+       mlx4_db_free(dev, &wqres->db);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_alloc_hwq_res);
+
+void mlx4_free_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+                      int size)
+{
+       mlx4_mtt_cleanup(dev, &wqres->mtt);
+       mlx4_buf_free(dev, size, &wqres->buf);
+       mlx4_db_free(dev, &wqres->db);
+}
+EXPORT_SYMBOL_GPL(mlx4_free_hwq_res);
index caa5bcf54e35a6ee4ab1f033c88a41fb75ead1f3..95e87a2f8896f97ca5110613fdcd2a6527ccfc5f 100644 (file)
@@ -180,7 +180,7 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
        cq_context->mtt_base_addr_h = mtt_addr >> 32;
        cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff);
 
-       err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1);
+       err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0);
 
        mlx4_free_cmd_mailbox(dev, mailbox);
        return err;
@@ -188,7 +188,8 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
 EXPORT_SYMBOL_GPL(mlx4_cq_resize);
 
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
-                 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq)
+                 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+                 int collapsed)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_cq_table *cq_table = &priv->cq_table;
@@ -224,6 +225,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
        cq_context = mailbox->buf;
        memset(cq_context, 0, sizeof *cq_context);
 
+       cq_context->flags           = cpu_to_be32(!!collapsed << 18);
        cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
        cq_context->comp_eqn        = priv->eq_table.eq[MLX4_EQ_COMP].eqn;
        cq_context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
index 49a4acab5e82ab1f5d8f2075af39d953fa414f6f..a6aa49fc1d68a6654d449bcc6fa90a7ad8f40f23 100644 (file)
@@ -798,6 +798,9 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
        INIT_LIST_HEAD(&priv->ctx_list);
        spin_lock_init(&priv->ctx_lock);
 
+       INIT_LIST_HEAD(&priv->pgdir_list);
+       mutex_init(&priv->pgdir_mutex);
+
        /*
         * Now reset the HCA before we touch the PCI capabilities or
         * attempt a firmware command, since a boot ROM may have left
index 73336810e6528124005cc576fd74d04b0aeed26e..a4023c2dd05092762ba3f8be32ef9fac5b1a7652 100644 (file)
@@ -257,6 +257,9 @@ struct mlx4_priv {
        struct list_head        ctx_list;
        spinlock_t              ctx_lock;
 
+       struct list_head        pgdir_list;
+       struct mutex            pgdir_mutex;
+
        struct mlx4_fw          fw;
        struct mlx4_cmd         cmd;
 
index 79b317b88c86b13105124b64a4754330839b753e..cb46446b2691b88a7db70ce08a482f9a09696e5a 100644 (file)
@@ -607,15 +607,9 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
 void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
                    u32 *lkey, u32 *rkey)
 {
-       u32 key;
-
        if (!fmr->maps)
                return;
 
-       key = key_to_hw_index(fmr->mr.key);
-       key &= dev->caps.num_mpts - 1;
-       *lkey = *rkey = fmr->mr.key = hw_index_to_key(key);
-
        fmr->maps = 0;
 
        *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
index fa24e6597591fc0f10ab11158ead02051f884a6d..ee5484c44a1833781887dbbdc42775f5d661bc60 100644 (file)
@@ -299,3 +299,34 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
 }
 EXPORT_SYMBOL_GPL(mlx4_qp_query);
 
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    struct mlx4_qp_context *context,
+                    struct mlx4_qp *qp, enum mlx4_qp_state *qp_state)
+{
+       int err;
+       int i;
+       enum mlx4_qp_state states[] = {
+               MLX4_QP_STATE_RST,
+               MLX4_QP_STATE_INIT,
+               MLX4_QP_STATE_RTR,
+               MLX4_QP_STATE_RTS
+       };
+
+       for (i = 0; i < ARRAY_SIZE(states) - 1; i++) {
+               context->flags &= cpu_to_be32(~(0xf << 28));
+               context->flags |= cpu_to_be32(states[i + 1] << 28);
+               err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
+                                    context, 0, 0, qp);
+               if (err) {
+                       mlx4_err(dev, "Failed to bring QP to state: "
+                                "%d with error: %d\n",
+                                states[i + 1], err);
+                       return err;
+               }
+
+               *qp_state = states[i + 1];
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_qp_to_ready);
index cead81e80f0c84dfcc443e0eb1466f09664f0f71..ef63c8d2bd7e4ae550a2f2add91b9327b1e6a40a 100644 (file)
@@ -2437,7 +2437,7 @@ static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev)
        int status;
 
        segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6);
-       if (unlikely(IS_ERR(segs)))
+       if (IS_ERR(segs))
                goto drop;
 
        while (segs) {
index 3ac8529bb92ca9915b76b2cb1026f5308f05c107..6bf9e76b0a008be5f4eaac35411714e2eed3b811 100644 (file)
@@ -48,7 +48,7 @@ config VITESSE_PHY
 config SMSC_PHY
        tristate "Drivers for SMSC PHYs"
        ---help---
-         Currently supports the LAN83C185 PHY
+         Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs
 
 config BROADCOM_PHY
        tristate "Drivers for Broadcom PHYs"
index ddf8d51832a679dcd7b2767bd8c9793d5b2a8a23..ac3c01d28fdfa8407068289907346d8853dc8b9a 100644 (file)
@@ -256,7 +256,7 @@ void phy_prepare_link(struct phy_device *phydev,
 /**
  * phy_connect - connect an ethernet device to a PHY device
  * @dev: the network device to connect
- * @phy_id: the PHY device to connect
+ * @bus_id: the id string of the PHY device to connect
  * @handler: callback function for state change notifications
  * @flags: PHY device's dev_flags
  * @interface: PHY device's interface
index b1d8ed40ad98e541120c6c1e00ff87741b4d8040..73baa7a3bb0ee3c9d43e69bb01d45908ef3340f0 100644 (file)
@@ -12,6 +12,8 @@
  * Free Software Foundation;  either version 2 of the  License, or (at your
  * option) any later version.
  *
+ * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@smsc.com
+ *
  */
 
 #include <linux/kernel.h>
@@ -38,7 +40,7 @@
        (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
 
 
-static int lan83c185_config_intr(struct phy_device *phydev)
+static int smsc_phy_config_intr(struct phy_device *phydev)
 {
        int rc = phy_write (phydev, MII_LAN83C185_IM,
                        ((PHY_INTERRUPT_ENABLED == phydev->interrupts)
@@ -48,16 +50,16 @@ static int lan83c185_config_intr(struct phy_device *phydev)
        return rc < 0 ? rc : 0;
 }
 
-static int lan83c185_ack_interrupt(struct phy_device *phydev)
+static int smsc_phy_ack_interrupt(struct phy_device *phydev)
 {
        int rc = phy_read (phydev, MII_LAN83C185_ISF);
 
        return rc < 0 ? rc : 0;
 }
 
-static int lan83c185_config_init(struct phy_device *phydev)
+static int smsc_phy_config_init(struct phy_device *phydev)
 {
-       return lan83c185_ack_interrupt (phydev);
+       return smsc_phy_ack_interrupt (phydev);
 }
 
 
@@ -73,22 +75,87 @@ static struct phy_driver lan83c185_driver = {
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .config_init    = lan83c185_config_init,
+       .config_init    = smsc_phy_config_init,
 
        /* IRQ related */
-       .ack_interrupt  = lan83c185_ack_interrupt,
-       .config_intr    = lan83c185_config_intr,
+       .ack_interrupt  = smsc_phy_ack_interrupt,
+       .config_intr    = smsc_phy_config_intr,
+
+       .driver         = { .owner = THIS_MODULE, }
+};
+
+static struct phy_driver lan8187_driver = {
+       .phy_id         = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "SMSC LAN8187",
+
+       .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause
+                               | SUPPORTED_Asym_Pause),
+       .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+       /* basic functions */
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .config_init    = smsc_phy_config_init,
+
+       /* IRQ related */
+       .ack_interrupt  = smsc_phy_ack_interrupt,
+       .config_intr    = smsc_phy_config_intr,
+
+       .driver         = { .owner = THIS_MODULE, }
+};
+
+static struct phy_driver lan8700_driver = {
+       .phy_id         = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "SMSC LAN8700",
+
+       .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause
+                               | SUPPORTED_Asym_Pause),
+       .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+       /* basic functions */
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .config_init    = smsc_phy_config_init,
+
+       /* IRQ related */
+       .ack_interrupt  = smsc_phy_ack_interrupt,
+       .config_intr    = smsc_phy_config_intr,
 
        .driver         = { .owner = THIS_MODULE, }
 };
 
 static int __init smsc_init(void)
 {
-       return phy_driver_register (&lan83c185_driver);
+       int ret;
+
+       ret = phy_driver_register (&lan83c185_driver);
+       if (ret)
+               goto err1;
+
+       ret = phy_driver_register (&lan8187_driver);
+       if (ret)
+               goto err2;
+
+       ret = phy_driver_register (&lan8700_driver);
+       if (ret)
+               goto err3;
+
+       return 0;
+
+err3:
+       phy_driver_unregister (&lan8187_driver);
+err2:
+       phy_driver_unregister (&lan83c185_driver);
+err1:
+       return ret;
 }
 
 static void __exit smsc_exit(void)
 {
+       phy_driver_unregister (&lan8700_driver);
+       phy_driver_unregister (&lan8187_driver);
        phy_driver_unregister (&lan83c185_driver);
 }
 
index f023d5b67e6efcc38d1673ef56b9de8536371392..f1a52def1241a387c28ed6f5a045b839df8f994f 100644 (file)
@@ -158,6 +158,9 @@ ppp_asynctty_open(struct tty_struct *tty)
        struct asyncppp *ap;
        int err;
 
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
+
        err = -ENOMEM;
        ap = kzalloc(sizeof(*ap), GFP_KERNEL);
        if (!ap)
@@ -358,9 +361,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
        if (!skb_queue_empty(&ap->rqueue))
                tasklet_schedule(&ap->tsk);
        ap_put(ap);
-       if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
-           && tty->driver->unthrottle)
-               tty->driver->unthrottle(tty);
+       tty_unthrottle(tty);
 }
 
 static void
@@ -676,7 +677,7 @@ ppp_async_push(struct asyncppp *ap)
                if (!tty_stuffed && ap->optr < ap->olim) {
                        avail = ap->olim - ap->optr;
                        set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-                       sent = tty->driver->write(tty, ap->optr, avail);
+                       sent = tty->ops->write(tty, ap->optr, avail);
                        if (sent < 0)
                                goto flush;     /* error, e.g. loss of CD */
                        ap->optr += sent;
index 0d80fa54671963a002540d29eefdcaf042c7aa8d..b8f0369a71e777a30b8a52897184a6cca3a2cb4a 100644 (file)
@@ -207,6 +207,9 @@ ppp_sync_open(struct tty_struct *tty)
        struct syncppp *ap;
        int err;
 
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
+
        ap = kzalloc(sizeof(*ap), GFP_KERNEL);
        err = -ENOMEM;
        if (!ap)
@@ -398,9 +401,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
        if (!skb_queue_empty(&ap->rqueue))
                tasklet_schedule(&ap->tsk);
        sp_put(ap);
-       if (test_and_clear_bit(TTY_THROTTLED, &tty->flags)
-           && tty->driver->unthrottle)
-               tty->driver->unthrottle(tty);
+       tty_unthrottle(tty);
 }
 
 static void
@@ -653,7 +654,7 @@ ppp_sync_push(struct syncppp *ap)
                        tty_stuffed = 0;
                if (!tty_stuffed && ap->tpkt) {
                        set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
-                       sent = tty->driver->write(tty, ap->tpkt->data, ap->tpkt->len);
+                       sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len);
                        if (sent < 0)
                                goto flush;     /* error, e.g. loss of CD */
                        if (sent < ap->tpkt->len) {
index 4fad4ddb35048419beb3484ea4d14acd4ac6b241..58a26a47af29a1ee3764968dda5e7497752c9e7a 100644 (file)
@@ -1052,11 +1052,9 @@ static int __init pppoe_proc_init(void)
 {
        struct proc_dir_entry *p;
 
-       p = create_proc_entry("pppoe", S_IRUGO, init_net.proc_net);
+       p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops);
        if (!p)
                return -ENOMEM;
-
-       p->proc_fops = &pppoe_seq_fops;
        return 0;
 }
 #else /* CONFIG_PROC_FS */
index 3d10ca050b7991b9f53cf4a6cc3d5adb873137aa..244d7830c92aac88486981f7ec52b3d3b141631f 100644 (file)
@@ -2469,12 +2469,12 @@ static int __init pppol2tp_init(void)
                goto out_unregister_pppol2tp_proto;
 
 #ifdef CONFIG_PROC_FS
-       pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net);
+       pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0,
+                                            &pppol2tp_proc_fops);
        if (!pppol2tp_proc) {
                err = -ENOMEM;
                goto out_unregister_pppox_proto;
        }
-       pppol2tp_proc->proc_fops = &pppol2tp_proc_fops;
 #endif /* CONFIG_PROC_FS */
        printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
               PPPOL2TP_DRV_VERSION);
index 3acfeeabdee1ee44cc842695b0d0ddb905e2d044..6572425046210a1037c2c62b0e8d13e89ad62513 100644 (file)
@@ -1617,6 +1617,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        SET_NETDEV_DEV(dev, &pdev->dev);
        tp = netdev_priv(dev);
        tp->dev = dev;
+       tp->pci_dev = pdev;
        tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
 
        /* enable device (incl. PCI PM wakeup and hotplug setup) */
@@ -1705,18 +1706,18 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        rtl8169_print_mac_version(tp);
 
-       for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) {
+       for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) {
                if (tp->mac_version == rtl_chip_info[i].mac_version)
                        break;
        }
-       if (i < 0) {
+       if (i == ARRAY_SIZE(rtl_chip_info)) {
                /* Unknown chip: assume array element #0, original RTL-8169 */
                if (netif_msg_probe(tp)) {
                        dev_printk(KERN_DEBUG, &pdev->dev,
                                "unknown chip version, assuming %s\n",
                                rtl_chip_info[0].name);
                }
-               i++;
+               i = 0;
        }
        tp->chipset = i;
 
@@ -1777,7 +1778,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 #endif
 
        tp->intr_mask = 0xffff;
-       tp->pci_dev = pdev;
        tp->mmio_addr = ioaddr;
        tp->align = cfg->align;
        tp->hw_start = cfg->hw_start;
index e7fd08adbbacbfe307d81a47623cf5d627b7053b..2b8fd68bc5163b49a09b373fb1913d6793bc051e 100644 (file)
@@ -77,7 +77,7 @@ static int rionet_capable = 1;
  * could be made into a hash table to save memory depending
  * on system trade-offs.
  */
-static struct rio_dev *rionet_active[RIO_MAX_ROUTE_ENTRIES];
+static struct rio_dev **rionet_active;
 
 #define is_rionet_capable(pef, src_ops, dst_ops)               \
                        ((pef & RIO_PEF_INB_MBOX) &&            \
@@ -195,7 +195,8 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
        }
 
        if (eth->h_dest[0] & 0x01) {
-               for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++)
+               for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rnet->mport->sys_size);
+                               i++)
                        if (rionet_active[i])
                                rionet_queue_tx_msg(skb, ndev,
                                                    rionet_active[i]);
@@ -385,6 +386,8 @@ static void rionet_remove(struct rio_dev *rdev)
        struct net_device *ndev = NULL;
        struct rionet_peer *peer, *tmp;
 
+       free_pages((unsigned long)rionet_active, rdev->net->hport->sys_size ?
+                                       __ilog2(sizeof(void *)) + 4 : 0);
        unregister_netdev(ndev);
        kfree(ndev);
 
@@ -443,6 +446,15 @@ static int rionet_setup_netdev(struct rio_mport *mport)
                goto out;
        }
 
+       rionet_active = (struct rio_dev **)__get_free_pages(GFP_KERNEL,
+                       mport->sys_size ? __ilog2(sizeof(void *)) + 4 : 0);
+       if (!rionet_active) {
+               rc = -ENOMEM;
+               goto out;
+       }
+       memset((void *)rionet_active, 0, sizeof(void *) *
+                               RIO_MAX_ROUTE_ENTRIES(mport->sys_size));
+
        /* Set up private area */
        rnet = (struct rionet_private *)ndev->priv;
        rnet->mport = mport;
index 157fd932e95140ce54936e3d0a79bdb30cd25048..523478ebfd6922096cd8d9de828a9e8dd6903f9b 100644 (file)
@@ -86,7 +86,7 @@
 #include "s2io.h"
 #include "s2io-regs.h"
 
-#define DRV_VERSION "2.0.26.22"
+#define DRV_VERSION "2.0.26.23"
 
 /* S2io Driver name & version. */
 static char s2io_driver_name[] = "Neterion";
@@ -809,6 +809,7 @@ static int init_shared_mem(struct s2io_nic *nic)
                    config->rx_cfg[i].num_rxd - 1;
                mac_control->rings[i].nic = nic;
                mac_control->rings[i].ring_no = i;
+               mac_control->rings[i].lro = lro_enable;
 
                blk_cnt = config->rx_cfg[i].num_rxd /
                                (rxd_count[nic->rxd_mode] + 1);
@@ -1560,113 +1561,112 @@ static int init_nic(struct s2io_nic *nic)
        writeq(val64, &bar0->tx_fifo_partition_0);
 
        /* Filling the Rx round robin registers as per the
-        * number of Rings and steering based on QoS.
-         */
+        * number of Rings and steering based on QoS with
+        * equal priority.
+        */
        switch (config->rx_ring_num) {
        case 1:
+               val64 = 0x0;
+               writeq(val64, &bar0->rx_w_round_robin_0);
+               writeq(val64, &bar0->rx_w_round_robin_1);
+               writeq(val64, &bar0->rx_w_round_robin_2);
+               writeq(val64, &bar0->rx_w_round_robin_3);
+               writeq(val64, &bar0->rx_w_round_robin_4);
+
                val64 = 0x8080808080808080ULL;
                writeq(val64, &bar0->rts_qos_steering);
                break;
        case 2:
-               val64 = 0x0000010000010000ULL;
+               val64 = 0x0001000100010001ULL;
                writeq(val64, &bar0->rx_w_round_robin_0);
-               val64 = 0x0100000100000100ULL;
                writeq(val64, &bar0->rx_w_round_robin_1);
-               val64 = 0x0001000001000001ULL;
                writeq(val64, &bar0->rx_w_round_robin_2);
-               val64 = 0x0000010000010000ULL;
                writeq(val64, &bar0->rx_w_round_robin_3);
-               val64 = 0x0100000000000000ULL;
+               val64 = 0x0001000100000000ULL;
                writeq(val64, &bar0->rx_w_round_robin_4);
 
                val64 = 0x8080808040404040ULL;
                writeq(val64, &bar0->rts_qos_steering);
                break;
        case 3:
-               val64 = 0x0001000102000001ULL;
+               val64 = 0x0001020001020001ULL;
                writeq(val64, &bar0->rx_w_round_robin_0);
-               val64 = 0x0001020000010001ULL;
+               val64 = 0x0200010200010200ULL;
                writeq(val64, &bar0->rx_w_round_robin_1);
-               val64 = 0x0200000100010200ULL;
+               val64 = 0x0102000102000102ULL;
                writeq(val64, &bar0->rx_w_round_robin_2);
-               val64 = 0x0001000102000001ULL;
+               val64 = 0x0001020001020001ULL;
                writeq(val64, &bar0->rx_w_round_robin_3);
-               val64 = 0x0001020000000000ULL;
+               val64 = 0x0200010200000000ULL;
                writeq(val64, &bar0->rx_w_round_robin_4);
 
                val64 = 0x8080804040402020ULL;
                writeq(val64, &bar0->rts_qos_steering);
                break;
        case 4:
-               val64 = 0x0001020300010200ULL;
+               val64 = 0x0001020300010203ULL;
                writeq(val64, &bar0->rx_w_round_robin_0);
-               val64 = 0x0100000102030001ULL;
                writeq(val64, &bar0->rx_w_round_robin_1);
-               val64 = 0x0200010000010203ULL;
                writeq(val64, &bar0->rx_w_round_robin_2);
-               val64 = 0x0001020001000001ULL;
                writeq(val64, &bar0->rx_w_round_robin_3);
-               val64 = 0x0203000100000000ULL;
+               val64 = 0x0001020300000000ULL;
                writeq(val64, &bar0->rx_w_round_robin_4);
 
                val64 = 0x8080404020201010ULL;
                writeq(val64, &bar0->rts_qos_steering);
                break;
        case 5:
-               val64 = 0x0001000203000102ULL;
+               val64 = 0x0001020304000102ULL;
                writeq(val64, &bar0->rx_w_round_robin_0);
-               val64 = 0x0001020001030004ULL;
+               val64 = 0x0304000102030400ULL;
                writeq(val64, &bar0->rx_w_round_robin_1);
-               val64 = 0x0001000203000102ULL;
+               val64 = 0x0102030400010203ULL;
                writeq(val64, &bar0->rx_w_round_robin_2);
-               val64 = 0x0001020001030004ULL;
+               val64 = 0x0400010203040001ULL;
                writeq(val64, &bar0->rx_w_round_robin_3);
-               val64 = 0x0001000000000000ULL;
+               val64 = 0x0203040000000000ULL;
                writeq(val64, &bar0->rx_w_round_robin_4);
 
                val64 = 0x8080404020201008ULL;
                writeq(val64, &bar0->rts_qos_steering);
                break;
        case 6:
-               val64 = 0x0001020304000102ULL;
+               val64 = 0x0001020304050001ULL;
                writeq(val64, &bar0->rx_w_round_robin_0);
-               val64 = 0x0304050001020001ULL;
+               val64 = 0x0203040500010203ULL;
                writeq(val64, &bar0->rx_w_round_robin_1);
-               val64 = 0x0203000100000102ULL;
+               val64 = 0x0405000102030405ULL;
                writeq(val64, &bar0->rx_w_round_robin_2);
-               val64 = 0x0304000102030405ULL;
+               val64 = 0x0001020304050001ULL;
                writeq(val64, &bar0->rx_w_round_robin_3);
-               val64 = 0x0001000200000000ULL;
+               val64 = 0x0203040500000000ULL;
                writeq(val64, &bar0->rx_w_round_robin_4);
 
                val64 = 0x8080404020100804ULL;
                writeq(val64, &bar0->rts_qos_steering);
                break;
        case 7:
-               val64 = 0x0001020001020300ULL;
+               val64 = 0x0001020304050600ULL;
                writeq(val64, &bar0->rx_w_round_robin_0);
-               val64 = 0x0102030400010203ULL;
+               val64 = 0x0102030405060001ULL;
                writeq(val64, &bar0->rx_w_round_robin_1);
-               val64 = 0x0405060001020001ULL;
+               val64 = 0x0203040506000102ULL;
                writeq(val64, &bar0->rx_w_round_robin_2);
-               val64 = 0x0304050000010200ULL;
+               val64 = 0x0304050600010203ULL;
                writeq(val64, &bar0->rx_w_round_robin_3);
-               val64 = 0x0102030000000000ULL;
+               val64 = 0x0405060000000000ULL;
                writeq(val64, &bar0->rx_w_round_robin_4);
 
                val64 = 0x8080402010080402ULL;
                writeq(val64, &bar0->rts_qos_steering);
                break;
        case 8:
-               val64 = 0x0001020300040105ULL;
+               val64 = 0x0001020304050607ULL;
                writeq(val64, &bar0->rx_w_round_robin_0);
-               val64 = 0x0200030106000204ULL;
                writeq(val64, &bar0->rx_w_round_robin_1);
-               val64 = 0x0103000502010007ULL;
                writeq(val64, &bar0->rx_w_round_robin_2);
-               val64 = 0x0304010002060500ULL;
                writeq(val64, &bar0->rx_w_round_robin_3);
-               val64 = 0x0103020400000000ULL;
+               val64 = 0x0001020300000000ULL;
                writeq(val64, &bar0->rx_w_round_robin_4);
 
                val64 = 0x8040201008040201ULL;
@@ -2499,8 +2499,7 @@ static void stop_nic(struct s2io_nic *nic)
 
 /**
  *  fill_rx_buffers - Allocates the Rx side skbs
- *  @nic:  device private variable
- *  @ring_no: ring number
+ *  @ring_info: per ring structure
  *  Description:
  *  The function allocates Rx side skbs and puts the physical
  *  address of these buffers into the RxD buffer pointers, so that the NIC
@@ -2518,103 +2517,94 @@ static void stop_nic(struct s2io_nic *nic)
  *  SUCCESS on success or an appropriate -ve value on failure.
  */
 
-static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
+static int fill_rx_buffers(struct ring_info *ring)
 {
-       struct net_device *dev = nic->dev;
        struct sk_buff *skb;
        struct RxD_t *rxdp;
-       int off, off1, size, block_no, block_no1;
+       int off, size, block_no, block_no1;
        u32 alloc_tab = 0;
        u32 alloc_cnt;
-       struct mac_info *mac_control;
-       struct config_param *config;
        u64 tmp;
        struct buffAdd *ba;
        struct RxD_t *first_rxdp = NULL;
        u64 Buffer0_ptr = 0, Buffer1_ptr = 0;
+       int rxd_index = 0;
        struct RxD1 *rxdp1;
        struct RxD3 *rxdp3;
-       struct swStat *stats = &nic->mac_control.stats_info->sw_stat;
+       struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat;
 
-       mac_control = &nic->mac_control;
-       config = &nic->config;
-       alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
-           atomic_read(&nic->rx_bufs_left[ring_no]);
+       alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left;
 
-       block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
-       off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
+       block_no1 = ring->rx_curr_get_info.block_index;
        while (alloc_tab < alloc_cnt) {
-               block_no = mac_control->rings[ring_no].rx_curr_put_info.
-                   block_index;
-               off = mac_control->rings[ring_no].rx_curr_put_info.offset;
+               block_no = ring->rx_curr_put_info.block_index;
 
-               rxdp = mac_control->rings[ring_no].
-                               rx_blocks[block_no].rxds[off].virt_addr;
+               off = ring->rx_curr_put_info.offset;
+
+               rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr;
+
+               rxd_index = off + 1;
+               if (block_no)
+                       rxd_index += (block_no * ring->rxd_count);
 
-               if ((block_no == block_no1) && (off == off1) &&
-                                       (rxdp->Host_Control)) {
+               if ((block_no == block_no1) && 
+                       (off == ring->rx_curr_get_info.offset) &&
+                       (rxdp->Host_Control)) {
                        DBG_PRINT(INTR_DBG, "%s: Get and Put",
-                                 dev->name);
+                               ring->dev->name);
                        DBG_PRINT(INTR_DBG, " info equated\n");
                        goto end;
                }
-               if (off && (off == rxd_count[nic->rxd_mode])) {
-                       mac_control->rings[ring_no].rx_curr_put_info.
-                           block_index++;
-                       if (mac_control->rings[ring_no].rx_curr_put_info.
-                           block_index == mac_control->rings[ring_no].
-                                       block_count)
-                               mac_control->rings[ring_no].rx_curr_put_info.
-                                       block_index = 0;
-                       block_no = mac_control->rings[ring_no].
-                                       rx_curr_put_info.block_index;
-                       if (off == rxd_count[nic->rxd_mode])
-                               off = 0;
-                       mac_control->rings[ring_no].rx_curr_put_info.
-                               offset = off;
-                       rxdp = mac_control->rings[ring_no].
-                               rx_blocks[block_no].block_virt_addr;
+               if (off && (off == ring->rxd_count)) {
+                       ring->rx_curr_put_info.block_index++;
+                       if (ring->rx_curr_put_info.block_index ==
+                                                       ring->block_count)
+                               ring->rx_curr_put_info.block_index = 0;
+                       block_no = ring->rx_curr_put_info.block_index;
+                       off = 0;
+                       ring->rx_curr_put_info.offset = off;
+                       rxdp = ring->rx_blocks[block_no].block_virt_addr;
                        DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
-                                 dev->name, rxdp);
+                                 ring->dev->name, rxdp);
+
                }
 
                if ((rxdp->Control_1 & RXD_OWN_XENA) &&
-                       ((nic->rxd_mode == RXD_MODE_3B) &&
+                       ((ring->rxd_mode == RXD_MODE_3B) &&
                                (rxdp->Control_2 & s2BIT(0)))) {
-                       mac_control->rings[ring_no].rx_curr_put_info.
-                                       offset = off;
+                       ring->rx_curr_put_info.offset = off;
                        goto end;
                }
                /* calculate size of skb based on ring mode */
-               size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+               size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE +
                                HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
-               if (nic->rxd_mode == RXD_MODE_1)
+               if (ring->rxd_mode == RXD_MODE_1)
                        size += NET_IP_ALIGN;
                else
-                       size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+                       size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4;
 
                /* allocate skb */
                skb = dev_alloc_skb(size);
                if(!skb) {
-                       DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name);
+                       DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name);
                        DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n");
                        if (first_rxdp) {
                                wmb();
                                first_rxdp->Control_1 |= RXD_OWN_XENA;
                        }
-                       nic->mac_control.stats_info->sw_stat. \
-                               mem_alloc_fail_cnt++;
+                       stats->mem_alloc_fail_cnt++;
+                               
                        return -ENOMEM ;
                }
-               nic->mac_control.stats_info->sw_stat.mem_allocated
-                       += skb->truesize;
-               if (nic->rxd_mode == RXD_MODE_1) {
+               stats->mem_allocated += skb->truesize;
+
+               if (ring->rxd_mode == RXD_MODE_1) {
                        /* 1 buffer mode - normal operation mode */
                        rxdp1 = (struct RxD1*)rxdp;
                        memset(rxdp, 0, sizeof(struct RxD1));
                        skb_reserve(skb, NET_IP_ALIGN);
                        rxdp1->Buffer0_ptr = pci_map_single
-                           (nic->pdev, skb->data, size - NET_IP_ALIGN,
+                           (ring->pdev, skb->data, size - NET_IP_ALIGN,
                                PCI_DMA_FROMDEVICE);
                        if( (rxdp1->Buffer0_ptr == 0) ||
                                (rxdp1->Buffer0_ptr ==
@@ -2623,8 +2613,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
 
                        rxdp->Control_2 =
                                SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
-
-               } else if (nic->rxd_mode == RXD_MODE_3B) {
+                       rxdp->Host_Control = (unsigned long) (skb);
+               } else if (ring->rxd_mode == RXD_MODE_3B) {
                        /*
                         * 2 buffer mode -
                         * 2 buffer mode provides 128
@@ -2640,7 +2630,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
                        rxdp3->Buffer0_ptr = Buffer0_ptr;
                        rxdp3->Buffer1_ptr = Buffer1_ptr;
 
-                       ba = &mac_control->rings[ring_no].ba[block_no][off];
+                       ba = &ring->ba[block_no][off];
                        skb_reserve(skb, BUF0_LEN);
                        tmp = (u64)(unsigned long) skb->data;
                        tmp += ALIGN_SIZE;
@@ -2650,10 +2640,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
 
                        if (!(rxdp3->Buffer0_ptr))
                                rxdp3->Buffer0_ptr =
-                                  pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
-                                          PCI_DMA_FROMDEVICE);
+                                  pci_map_single(ring->pdev, ba->ba_0,
+                                       BUF0_LEN, PCI_DMA_FROMDEVICE);
                        else
-                               pci_dma_sync_single_for_device(nic->pdev,
+                               pci_dma_sync_single_for_device(ring->pdev,
                                (dma_addr_t) rxdp3->Buffer0_ptr,
                                    BUF0_LEN, PCI_DMA_FROMDEVICE);
                        if( (rxdp3->Buffer0_ptr == 0) ||
@@ -2661,7 +2651,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
                                goto pci_map_failed;
 
                        rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
-                       if (nic->rxd_mode == RXD_MODE_3B) {
+                       if (ring->rxd_mode == RXD_MODE_3B) {
                                /* Two buffer mode */
 
                                /*
@@ -2669,39 +2659,42 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
                                 * L4 payload
                                 */
                                rxdp3->Buffer2_ptr = pci_map_single
-                               (nic->pdev, skb->data, dev->mtu + 4,
+                               (ring->pdev, skb->data, ring->mtu + 4,
                                                PCI_DMA_FROMDEVICE);
 
                                if( (rxdp3->Buffer2_ptr == 0) ||
                                        (rxdp3->Buffer2_ptr == DMA_ERROR_CODE))
                                        goto pci_map_failed;
 
-                               rxdp3->Buffer1_ptr =
-                                               pci_map_single(nic->pdev,
+                               if (!rxdp3->Buffer1_ptr)
+                                       rxdp3->Buffer1_ptr =
+                                               pci_map_single(ring->pdev,
                                                ba->ba_1, BUF1_LEN,
                                                PCI_DMA_FROMDEVICE);
+
                                if( (rxdp3->Buffer1_ptr == 0) ||
                                        (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) {
                                        pci_unmap_single
-                                               (nic->pdev,
-                                               (dma_addr_t)rxdp3->Buffer2_ptr,
-                                               dev->mtu + 4,
+                                               (ring->pdev,
+                                               (dma_addr_t)(unsigned long)
+                                               skb->data,
+                                               ring->mtu + 4,
                                                PCI_DMA_FROMDEVICE);
                                        goto pci_map_failed;
                                }
                                rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
                                rxdp->Control_2 |= SET_BUFFER2_SIZE_3
-                                                               (dev->mtu + 4);
+                                                               (ring->mtu + 4);
                        }
                        rxdp->Control_2 |= s2BIT(0);
+                       rxdp->Host_Control = (unsigned long) (skb);
                }
-               rxdp->Host_Control = (unsigned long) (skb);
                if (alloc_tab & ((1 << rxsync_frequency) - 1))
                        rxdp->Control_1 |= RXD_OWN_XENA;
                off++;
-               if (off == (rxd_count[nic->rxd_mode] + 1))
+               if (off == (ring->rxd_count + 1))
                        off = 0;
-               mac_control->rings[ring_no].rx_curr_put_info.offset = off;
+               ring->rx_curr_put_info.offset = off;
 
                rxdp->Control_2 |= SET_RXD_MARKER;
                if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
@@ -2711,7 +2704,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
                        }
                        first_rxdp = rxdp;
                }
-               atomic_inc(&nic->rx_bufs_left[ring_no]);
+               ring->rx_bufs_left += 1;
                alloc_tab++;
        }
 
@@ -2783,7 +2776,7 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
                }
                sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize;
                dev_kfree_skb(skb);
-               atomic_dec(&sp->rx_bufs_left[ring_no]);
+               mac_control->rings[ring_no].rx_bufs_left -= 1;
        }
 }
 
@@ -2814,7 +2807,7 @@ static void free_rx_buffers(struct s2io_nic *sp)
                mac_control->rings[i].rx_curr_get_info.block_index = 0;
                mac_control->rings[i].rx_curr_put_info.offset = 0;
                mac_control->rings[i].rx_curr_get_info.offset = 0;
-               atomic_set(&sp->rx_bufs_left[i], 0);
+               mac_control->rings[i].rx_bufs_left = 0;
                DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
                          dev->name, buf_cnt, i);
        }
@@ -2864,7 +2857,7 @@ static int s2io_poll(struct napi_struct *napi, int budget)
        netif_rx_complete(dev, napi);
 
        for (i = 0; i < config->rx_ring_num; i++) {
-               if (fill_rx_buffers(nic, i) == -ENOMEM) {
+               if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
                        DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
                        DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
                        break;
@@ -2877,7 +2870,7 @@ static int s2io_poll(struct napi_struct *napi, int budget)
 
 no_rx:
        for (i = 0; i < config->rx_ring_num; i++) {
-               if (fill_rx_buffers(nic, i) == -ENOMEM) {
+               if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
                        DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
                        DBG_PRINT(INFO_DBG, " in Rx Poll!!\n");
                        break;
@@ -2928,7 +2921,7 @@ static void s2io_netpoll(struct net_device *dev)
                rx_intr_handler(&mac_control->rings[i]);
 
        for (i = 0; i < config->rx_ring_num; i++) {
-               if (fill_rx_buffers(nic, i) == -ENOMEM) {
+               if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) {
                        DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
                        DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
                        break;
@@ -2953,8 +2946,6 @@ static void s2io_netpoll(struct net_device *dev)
  */
 static void rx_intr_handler(struct ring_info *ring_data)
 {
-       struct s2io_nic *nic = ring_data->nic;
-       struct net_device *dev = (struct net_device *) nic->dev;
        int get_block, put_block;
        struct rx_curr_get_info get_info, put_info;
        struct RxD_t *rxdp;
@@ -2977,33 +2968,34 @@ static void rx_intr_handler(struct ring_info *ring_data)
                 */
                if ((get_block == put_block) &&
                    (get_info.offset + 1) == put_info.offset) {
-                       DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
+                       DBG_PRINT(INTR_DBG, "%s: Ring Full\n",
+                               ring_data->dev->name);
                        break;
                }
                skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
                if (skb == NULL) {
                        DBG_PRINT(ERR_DBG, "%s: The skb is ",
-                                 dev->name);
+                                 ring_data->dev->name);
                        DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
                        return;
                }
-               if (nic->rxd_mode == RXD_MODE_1) {
+               if (ring_data->rxd_mode == RXD_MODE_1) {
                        rxdp1 = (struct RxD1*)rxdp;
-                       pci_unmap_single(nic->pdev, (dma_addr_t)
+                       pci_unmap_single(ring_data->pdev, (dma_addr_t)
                                rxdp1->Buffer0_ptr,
-                               dev->mtu +
+                               ring_data->mtu +
                                HEADER_ETHERNET_II_802_3_SIZE +
                                HEADER_802_2_SIZE +
                                HEADER_SNAP_SIZE,
                                PCI_DMA_FROMDEVICE);
-               } else if (nic->rxd_mode == RXD_MODE_3B) {
+               } else if (ring_data->rxd_mode == RXD_MODE_3B) {
                        rxdp3 = (struct RxD3*)rxdp;
-                       pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
+                       pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t)
                                rxdp3->Buffer0_ptr,
                                BUF0_LEN, PCI_DMA_FROMDEVICE);
-                       pci_unmap_single(nic->pdev, (dma_addr_t)
+                       pci_unmap_single(ring_data->pdev, (dma_addr_t)
                                rxdp3->Buffer2_ptr,
-                               dev->mtu + 4,
+                               ring_data->mtu + 4,
                                PCI_DMA_FROMDEVICE);
                }
                prefetch(skb->data);
@@ -3012,7 +3004,7 @@ static void rx_intr_handler(struct ring_info *ring_data)
                ring_data->rx_curr_get_info.offset = get_info.offset;
                rxdp = ring_data->rx_blocks[get_block].
                                rxds[get_info.offset].virt_addr;
-               if (get_info.offset == rxd_count[nic->rxd_mode]) {
+               if (get_info.offset == rxd_count[ring_data->rxd_mode]) {
                        get_info.offset = 0;
                        ring_data->rx_curr_get_info.offset = get_info.offset;
                        get_block++;
@@ -3022,19 +3014,21 @@ static void rx_intr_handler(struct ring_info *ring_data)
                        rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
                }
 
-               nic->pkts_to_process -= 1;
-               if ((napi) && (!nic->pkts_to_process))
-                       break;
+               if(ring_data->nic->config.napi){
+                       ring_data->nic->pkts_to_process -= 1;
+                       if (!ring_data->nic->pkts_to_process)
+                               break;
+               }
                pkt_cnt++;
                if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
                        break;
        }
-       if (nic->lro) {
+       if (ring_data->lro) {
                /* Clear all LRO sessions before exiting */
                for (i=0; i<MAX_LRO_SESSIONS; i++) {
-                       struct lro *lro = &nic->lro0_n[i];
+                       struct lro *lro = &ring_data->lro0_n[i];
                        if (lro->in_use) {
-                               update_L3L4_header(nic, lro);
+                               update_L3L4_header(ring_data->nic, lro);
                                queue_rx_frame(lro->parent, lro->vlan_tag);
                                clear_lro_session(lro);
                        }
@@ -4333,10 +4327,10 @@ s2io_alarm_handle(unsigned long data)
        mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
 }
 
-static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
+static int s2io_chk_rx_buffers(struct ring_info *ring)
 {
-       if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
-               DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name);
+       if (fill_rx_buffers(ring) == -ENOMEM) {
+               DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
                DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
        }
        return 0;
@@ -4351,7 +4345,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
                return IRQ_HANDLED;
 
        rx_intr_handler(ring);
-       s2io_chk_rx_buffers(sp, ring->ring_no);
+       s2io_chk_rx_buffers(ring);
 
        return IRQ_HANDLED;
 }
@@ -4809,7 +4803,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
                 */
                if (!config->napi) {
                        for (i = 0; i < config->rx_ring_num; i++)
-                               s2io_chk_rx_buffers(sp, i);
+                               s2io_chk_rx_buffers(&mac_control->rings[i]);
                }
                writeq(sp->general_int_mask, &bar0->general_int_mask);
                readl(&bar0->general_int_status);
@@ -4866,6 +4860,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
        struct s2io_nic *sp = dev->priv;
        struct mac_info *mac_control;
        struct config_param *config;
+       int i;
 
 
        mac_control = &sp->mac_control;
@@ -4885,6 +4880,13 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
        sp->stats.rx_length_errors =
                le64_to_cpu(mac_control->stats_info->rmac_long_frms);
 
+       /* collect per-ring rx_packets and rx_bytes */
+       sp->stats.rx_packets = sp->stats.rx_bytes = 0;
+       for (i = 0; i < config->rx_ring_num; i++) {
+               sp->stats.rx_packets += mac_control->rings[i].rx_packets;
+               sp->stats.rx_bytes += mac_control->rings[i].rx_bytes;
+       }
+
        return (&sp->stats);
 }
 
@@ -7157,7 +7159,9 @@ static int s2io_card_up(struct s2io_nic * sp)
        config = &sp->config;
 
        for (i = 0; i < config->rx_ring_num; i++) {
-               if ((ret = fill_rx_buffers(sp, i))) {
+               mac_control->rings[i].mtu = dev->mtu;
+               ret = fill_rx_buffers(&mac_control->rings[i]);
+               if (ret) {
                        DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
                                  dev->name);
                        s2io_reset(sp);
@@ -7165,7 +7169,7 @@ static int s2io_card_up(struct s2io_nic * sp)
                        return -ENOMEM;
                }
                DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
-                         atomic_read(&sp->rx_bufs_left[i]));
+                         mac_control->rings[i].rx_bufs_left);
        }
 
        /* Initialise napi */
@@ -7300,7 +7304,7 @@ static void s2io_tx_watchdog(struct net_device *dev)
 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
 {
        struct s2io_nic *sp = ring_data->nic;
-       struct net_device *dev = (struct net_device *) sp->dev;
+       struct net_device *dev = (struct net_device *) ring_data->dev;
        struct sk_buff *skb = (struct sk_buff *)
                ((unsigned long) rxdp->Host_Control);
        int ring_no = ring_data->ring_no;
@@ -7377,19 +7381,19 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
                        sp->mac_control.stats_info->sw_stat.mem_freed
                                += skb->truesize;
                        dev_kfree_skb(skb);
-                       atomic_dec(&sp->rx_bufs_left[ring_no]);
+                       ring_data->rx_bufs_left -= 1;
                        rxdp->Host_Control = 0;
                        return 0;
                }
        }
 
        /* Updating statistics */
-       sp->stats.rx_packets++;
+       ring_data->rx_packets++;
        rxdp->Host_Control = 0;
        if (sp->rxd_mode == RXD_MODE_1) {
                int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
 
-               sp->stats.rx_bytes += len;
+               ring_data->rx_bytes += len;
                skb_put(skb, len);
 
        } else if (sp->rxd_mode == RXD_MODE_3B) {
@@ -7400,13 +7404,13 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
                unsigned char *buff = skb_push(skb, buf0_len);
 
                struct buffAdd *ba = &ring_data->ba[get_block][get_off];
-               sp->stats.rx_bytes += buf0_len + buf2_len;
+               ring_data->rx_bytes += buf0_len + buf2_len;
                memcpy(buff, ba->ba_0, buf0_len);
                skb_put(skb, buf2_len);
        }
 
-       if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
-           (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
+       if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) ||
+           (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
            (sp->rx_csum)) {
                l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
                l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
@@ -7417,14 +7421,14 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
                         * a flag in the RxD.
                         */
                        skb->ip_summed = CHECKSUM_UNNECESSARY;
-                       if (sp->lro) {
+                       if (ring_data->lro) {
                                u32 tcp_len;
                                u8 *tcp;
                                int ret = 0;
 
-                               ret = s2io_club_tcp_session(skb->data, &tcp,
-                                                           &tcp_len, &lro,
-                                                           rxdp, sp);
+                               ret = s2io_club_tcp_session(ring_data,
+                                       skb->data, &tcp, &tcp_len, &lro,
+                                       rxdp, sp);
                                switch (ret) {
                                        case 3: /* Begin anew */
                                                lro->parent = skb;
@@ -7486,7 +7490,7 @@ send_up:
        queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2));
        dev->last_rx = jiffies;
 aggregate:
-       atomic_dec(&sp->rx_bufs_left[ring_no]);
+       sp->mac_control.rings[ring_no].rx_bufs_left -= 1;
        return SUCCESS;
 }
 
@@ -7603,12 +7607,14 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
                tx_steering_type = NO_STEERING;
        }
 
-       if ( rx_ring_num > 8) {
-               DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
+       if (rx_ring_num > MAX_RX_RINGS) {
+               DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not "
                         "supported\n");
-               DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
-               rx_ring_num = 8;
+               DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n",
+                       MAX_RX_RINGS);
+               rx_ring_num = MAX_RX_RINGS;
        }
+
        if (*dev_intr_type != INTA)
                napi = 0;
 
@@ -7836,10 +7842,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
 
        /* Rx side parameters. */
        config->rx_ring_num = rx_ring_num;
-       for (i = 0; i < MAX_RX_RINGS; i++) {
+       for (i = 0; i < config->rx_ring_num; i++) {
                config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
                    (rxd_count[sp->rxd_mode] + 1);
                config->rx_cfg[i].ring_priority = i;
+               mac_control->rings[i].rx_bufs_left = 0;
+               mac_control->rings[i].rxd_mode = sp->rxd_mode;
+               mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode];
+               mac_control->rings[i].pdev = sp->pdev;
+               mac_control->rings[i].dev = sp->dev;
        }
 
        for (i = 0; i < rx_ring_num; i++) {
@@ -7854,10 +7865,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
 
 
-       /* Initialize Ring buffer parameters. */
-       for (i = 0; i < config->rx_ring_num; i++)
-               atomic_set(&sp->rx_bufs_left[i], 0);
-
        /*  initialize the shared memory used by the NIC and the host */
        if (init_shared_mem(sp)) {
                DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
@@ -8077,6 +8084,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
        DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name,
                sp->config.tx_fifo_num);
 
+       DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name,
+                 sp->config.rx_ring_num);
+
        switch(sp->config.intr_type) {
                case INTA:
                    DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
@@ -8391,8 +8401,9 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
 }
 
 static int
-s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
-                     struct RxD_t *rxdp, struct s2io_nic *sp)
+s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp,
+       u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
+       struct s2io_nic *sp)
 {
        struct iphdr *ip;
        struct tcphdr *tcph;
@@ -8410,7 +8421,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
        tcph = (struct tcphdr *)*tcp;
        *tcp_len = get_l4_pyld_length(ip, tcph);
        for (i=0; i<MAX_LRO_SESSIONS; i++) {
-               struct lro *l_lro = &sp->lro0_n[i];
+               struct lro *l_lro = &ring_data->lro0_n[i];
                if (l_lro->in_use) {
                        if (check_for_socket_match(l_lro, ip, tcph))
                                continue;
@@ -8448,7 +8459,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
                }
 
                for (i=0; i<MAX_LRO_SESSIONS; i++) {
-                       struct lro *l_lro = &sp->lro0_n[i];
+                       struct lro *l_lro = &ring_data->lro0_n[i];
                        if (!(l_lro->in_use)) {
                                *lro = l_lro;
                                ret = 3; /* Begin anew */
index ce53a02105f25ab99aee4e017c74b4c04a2e2600..0709ebae91397326cb20fe554e002d35428d8c12 100644 (file)
@@ -678,11 +678,53 @@ struct rx_block_info {
        struct rxd_info *rxds;
 };
 
+/* Data structure to represent a LRO session */
+struct lro {
+       struct sk_buff  *parent;
+       struct sk_buff  *last_frag;
+       u8              *l2h;
+       struct iphdr    *iph;
+       struct tcphdr   *tcph;
+       u32             tcp_next_seq;
+       __be32          tcp_ack;
+       int             total_len;
+       int             frags_len;
+       int             sg_num;
+       int             in_use;
+       __be16          window;
+       u16             vlan_tag;
+       u32             cur_tsval;
+       __be32          cur_tsecr;
+       u8              saw_ts;
+} ____cacheline_aligned;
+
 /* Ring specific structure */
 struct ring_info {
        /* The ring number */
        int ring_no;
 
+       /* per-ring buffer counter */
+       u32 rx_bufs_left;
+
+       #define MAX_LRO_SESSIONS        32
+       struct lro lro0_n[MAX_LRO_SESSIONS];
+       u8              lro;
+
+       /* copy of sp->rxd_mode flag */
+       int rxd_mode;
+
+       /* Number of rxds per block for the rxd_mode */
+       int rxd_count;
+
+       /* copy of sp pointer */
+       struct s2io_nic *nic;
+
+       /* copy of sp->dev pointer */
+       struct net_device *dev;
+
+       /* copy of sp->pdev pointer */
+       struct pci_dev *pdev;
+
        /*
         *  Place holders for the virtual and physical addresses of
         *  all the Rx Blocks
@@ -703,10 +745,16 @@ struct ring_info {
         */
        struct rx_curr_get_info rx_curr_get_info;
 
+       /* interface MTU value */
+        unsigned mtu;
+    
        /* Buffer Address store. */
        struct buffAdd **ba;
-       struct s2io_nic *nic;
-};
+
+       /* per-Ring statistics */
+       unsigned long rx_packets;
+       unsigned long rx_bytes;
+} ____cacheline_aligned;
 
 /* Fifo specific structure */
 struct fifo_info {
@@ -813,26 +861,6 @@ struct msix_info_st {
        u64 data;
 };
 
-/* Data structure to represent a LRO session */
-struct lro {
-       struct sk_buff  *parent;
-       struct sk_buff  *last_frag;
-       u8              *l2h;
-       struct iphdr    *iph;
-       struct tcphdr   *tcph;
-       u32             tcp_next_seq;
-       __be32          tcp_ack;
-       int             total_len;
-       int             frags_len;
-       int             sg_num;
-       int             in_use;
-       __be16          window;
-       u16             vlan_tag;
-       u32             cur_tsval;
-       __be32          cur_tsecr;
-       u8              saw_ts;
-} ____cacheline_aligned;
-
 /* These flags represent the devices temporary state */
 enum s2io_device_state_t
 {
@@ -872,8 +900,6 @@ struct s2io_nic {
        /* Space to back up the PCI config space */
        u32 config_space[256 / sizeof(u32)];
 
-       atomic_t rx_bufs_left[MAX_RX_RINGS];
-
 #define PROMISC     1
 #define ALL_MULTI   2
 
@@ -950,8 +976,6 @@ struct s2io_nic {
 #define XFRAME_II_DEVICE       2
        u8 device_type;
 
-#define MAX_LRO_SESSIONS       32
-       struct lro lro0_n[MAX_LRO_SESSIONS];
        unsigned long   clubbed_frms_cnt;
        unsigned long   sending_both;
        u8              lro;
@@ -1118,9 +1142,9 @@ static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr);
 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset);
 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr);
 
-static int
-s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
-                     struct RxD_t *rxdp, struct s2io_nic *sp);
+static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer,
+       u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp,
+       struct s2io_nic *sp);
 static void clear_lro_session(struct lro *lro);
 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag);
 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig
new file mode 100644 (file)
index 0000000..dbad95c
--- /dev/null
@@ -0,0 +1,12 @@
+config SFC
+       tristate "Solarflare Solarstorm SFC4000 support"
+       depends on PCI && INET
+       select MII
+       select INET_LRO
+       select CRC32
+       help
+         This driver supports 10-gigabit Ethernet cards based on
+         the Solarflare Communications Solarstorm SFC4000 controller.
+
+         To compile this driver as a module, choose M here.  The module
+         will be called sfc.
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
new file mode 100644 (file)
index 0000000..0f02344
--- /dev/null
@@ -0,0 +1,5 @@
+sfc-y                  += efx.o falcon.o tx.o rx.o falcon_xmac.o \
+                          i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \
+                          tenxpress.o boards.o sfe4001.o
+
+obj-$(CONFIG_SFC)      += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
new file mode 100644 (file)
index 0000000..2806201
--- /dev/null
@@ -0,0 +1,508 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_BITFIELD_H
+#define EFX_BITFIELD_H
+
+/*
+ * Efx bitfield access
+ *
+ * Efx NICs make extensive use of bitfields up to 128 bits
+ * wide.  Since there is no native 128-bit datatype on most systems,
+ * and since 64-bit datatypes are inefficient on 32-bit systems and
+ * vice versa, we wrap accesses in a way that uses the most efficient
+ * datatype.
+ *
+ * The NICs are PCI devices and therefore little-endian.  Since most
+ * of the quantities that we deal with are DMAed to/from host memory,
+ * we define our datatypes (efx_oword_t, efx_qword_t and
+ * efx_dword_t) to be little-endian.
+ */
+
+/* Lowest bit numbers and widths */
+#define EFX_DUMMY_FIELD_LBN 0
+#define EFX_DUMMY_FIELD_WIDTH 0
+#define EFX_DWORD_0_LBN 0
+#define EFX_DWORD_0_WIDTH 32
+#define EFX_DWORD_1_LBN 32
+#define EFX_DWORD_1_WIDTH 32
+#define EFX_DWORD_2_LBN 64
+#define EFX_DWORD_2_WIDTH 32
+#define EFX_DWORD_3_LBN 96
+#define EFX_DWORD_3_WIDTH 32
+
+/* Specified attribute (e.g. LBN) of the specified field */
+#define EFX_VAL(field, attribute) field ## _ ## attribute
+/* Low bit number of the specified field */
+#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
+/* Bit width of the specified field */
+#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
+/* High bit number of the specified field */
+#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 64 bits.
+ */
+#define EFX_MASK64(field)                                      \
+       (EFX_WIDTH(field) == 64 ? ~((u64) 0) :          \
+        (((((u64) 1) << EFX_WIDTH(field))) - 1))
+
+/* Mask equal in width to the specified field.
+ *
+ * For example, a field with width 5 would have a mask of 0x1f.
+ *
+ * The maximum width mask that can be generated is 32 bits.  Use
+ * EFX_MASK64 for higher width fields.
+ */
+#define EFX_MASK32(field)                                      \
+       (EFX_WIDTH(field) == 32 ? ~((u32) 0) :          \
+        (((((u32) 1) << EFX_WIDTH(field))) - 1))
+
+/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
+typedef union efx_dword {
+       __le32 u32[1];
+} efx_dword_t;
+
+/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
+typedef union efx_qword {
+       __le64 u64[1];
+       __le32 u32[2];
+       efx_dword_t dword[2];
+} efx_qword_t;
+
+/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
+typedef union efx_oword {
+       __le64 u64[2];
+       efx_qword_t qword[2];
+       __le32 u32[4];
+       efx_dword_t dword[4];
+} efx_oword_t;
+
+/* Format string and value expanders for printk */
+#define EFX_DWORD_FMT "%08x"
+#define EFX_QWORD_FMT "%08x:%08x"
+#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
+#define EFX_DWORD_VAL(dword)                           \
+       ((unsigned int) le32_to_cpu((dword).u32[0]))
+#define EFX_QWORD_VAL(qword)                           \
+       ((unsigned int) le32_to_cpu((qword).u32[1])),   \
+       ((unsigned int) le32_to_cpu((qword).u32[0]))
+#define EFX_OWORD_VAL(oword)                           \
+       ((unsigned int) le32_to_cpu((oword).u32[3])),   \
+       ((unsigned int) le32_to_cpu((oword).u32[2])),   \
+       ((unsigned int) le32_to_cpu((oword).u32[1])),   \
+       ((unsigned int) le32_to_cpu((oword).u32[0]))
+
+/*
+ * Extract bit field portion [low,high) from the native-endian element
+ * which contains bits [min,max).
+ *
+ * For example, suppose "element" represents the high 32 bits of a
+ * 64-bit value, and we wish to extract the bits belonging to the bit
+ * field occupying bits 28-45 of this 64-bit value.
+ *
+ * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
+ *
+ *   ( element ) << 4
+ *
+ * The result will contain the relevant bits filled in in the range
+ * [0,high-low), with garbage in bits [high-low+1,...).
+ */
+#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high)                \
+       (((low > max) || (high < min)) ? 0 :                            \
+        ((low > min) ?                                                 \
+         ((native_element) >> (low - min)) :                           \
+         ((native_element) << (min - low))))
+
+/*
+ * Extract bit field portion [low,high) from the 64-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT64(element, min, max, low, high)                    \
+       EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
+
+/*
+ * Extract bit field portion [low,high) from the 32-bit little-endian
+ * element which contains bits [min,max)
+ */
+#define EFX_EXTRACT32(element, min, max, low, high)                    \
+       EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
+
+#define EFX_EXTRACT_OWORD64(oword, low, high)                          \
+       (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) |              \
+        EFX_EXTRACT64((oword).u64[1], 64, 127, low, high))
+
+#define EFX_EXTRACT_QWORD64(qword, low, high)                          \
+       EFX_EXTRACT64((qword).u64[0], 0, 63, low, high)
+
+#define EFX_EXTRACT_OWORD32(oword, low, high)                          \
+       (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) |              \
+        EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) |             \
+        EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) |             \
+        EFX_EXTRACT32((oword).u32[3], 96, 127, low, high))
+
+#define EFX_EXTRACT_QWORD32(qword, low, high)                          \
+       (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) |              \
+        EFX_EXTRACT32((qword).u32[1], 32, 63, low, high))
+
+#define EFX_EXTRACT_DWORD(dword, low, high)                            \
+       EFX_EXTRACT32((dword).u32[0], 0, 31, low, high)
+
+#define EFX_OWORD_FIELD64(oword, field)                                        \
+       (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
+        & EFX_MASK64(field))
+
+#define EFX_QWORD_FIELD64(qword, field)                                        \
+       (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
+        & EFX_MASK64(field))
+
+#define EFX_OWORD_FIELD32(oword, field)                                        \
+       (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
+        & EFX_MASK32(field))
+
+#define EFX_QWORD_FIELD32(qword, field)                                        \
+       (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
+        & EFX_MASK32(field))
+
+#define EFX_DWORD_FIELD(dword, field)                                     \
+       (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \
+        & EFX_MASK32(field))
+
+#define EFX_OWORD_IS_ZERO64(oword)                                     \
+       (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
+
+#define EFX_QWORD_IS_ZERO64(qword)                                     \
+       (((qword).u64[0]) == (__force __le64) 0)
+
+#define EFX_OWORD_IS_ZERO32(oword)                                          \
+       (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
+        == (__force __le32) 0)
+
+#define EFX_QWORD_IS_ZERO32(qword)                                     \
+       (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
+
+#define EFX_DWORD_IS_ZERO(dword)                                       \
+       (((dword).u32[0]) == (__force __le32) 0)
+
+#define EFX_OWORD_IS_ALL_ONES64(oword)                                 \
+       (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
+
+#define EFX_QWORD_IS_ALL_ONES64(qword)                                 \
+       ((qword).u64[0] == ~((__force __le64) 0))
+
+#define EFX_OWORD_IS_ALL_ONES32(oword)                                 \
+       (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
+        == ~((__force __le32) 0))
+
+#define EFX_QWORD_IS_ALL_ONES32(qword)                                 \
+       (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
+
+#define EFX_DWORD_IS_ALL_ONES(dword)                                   \
+       ((dword).u32[0] == ~((__force __le32) 0))
+
+#if BITS_PER_LONG == 64
+#define EFX_OWORD_FIELD                EFX_OWORD_FIELD64
+#define EFX_QWORD_FIELD                EFX_QWORD_FIELD64
+#define EFX_OWORD_IS_ZERO      EFX_OWORD_IS_ZERO64
+#define EFX_QWORD_IS_ZERO      EFX_QWORD_IS_ZERO64
+#define EFX_OWORD_IS_ALL_ONES  EFX_OWORD_IS_ALL_ONES64
+#define EFX_QWORD_IS_ALL_ONES  EFX_QWORD_IS_ALL_ONES64
+#else
+#define EFX_OWORD_FIELD                EFX_OWORD_FIELD32
+#define EFX_QWORD_FIELD                EFX_QWORD_FIELD32
+#define EFX_OWORD_IS_ZERO      EFX_OWORD_IS_ZERO32
+#define EFX_QWORD_IS_ZERO      EFX_QWORD_IS_ZERO32
+#define EFX_OWORD_IS_ALL_ONES  EFX_OWORD_IS_ALL_ONES32
+#define EFX_QWORD_IS_ALL_ONES  EFX_QWORD_IS_ALL_ONES32
+#endif
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the bit field [low,high) that lies within
+ * the range [min,max).
+ */
+#define EFX_INSERT_NATIVE64(min, max, low, high, value)                \
+       (((low > max) || (high < min)) ? 0 :                    \
+        ((low > min) ?                                         \
+         (((u64) (value)) << (low - min)) :            \
+         (((u64) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE32(min, max, low, high, value)                \
+       (((low > max) || (high < min)) ? 0 :                    \
+        ((low > min) ?                                         \
+         (((u32) (value)) << (low - min)) :            \
+         (((u32) (value)) >> (min - low))))
+
+#define EFX_INSERT_NATIVE(min, max, low, high, value)          \
+       ((((max - min) >= 32) || ((high - low) >= 32)) ?        \
+        EFX_INSERT_NATIVE64(min, max, low, high, value) :      \
+        EFX_INSERT_NATIVE32(min, max, low, high, value))
+
+/*
+ * Construct bit field portion
+ *
+ * Creates the portion of the named bit field that lies within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELD_NATIVE(min, max, field, value)                \
+       EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field),         \
+                         EFX_HIGH_BIT(field), value)
+
+/*
+ * Construct bit field
+ *
+ * Creates the portion of the named bit fields that lie within the
+ * range [min,max).
+ */
+#define EFX_INSERT_FIELDS_NATIVE(min, max,                             \
+                                field1, value1,                        \
+                                field2, value2,                        \
+                                field3, value3,                        \
+                                field4, value4,                        \
+                                field5, value5,                        \
+                                field6, value6,                        \
+                                field7, value7,                        \
+                                field8, value8,                        \
+                                field9, value9,                        \
+                                field10, value10)                      \
+       (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) |      \
+        EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)))
+
+#define EFX_INSERT_FIELDS64(...)                               \
+       cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_INSERT_FIELDS32(...)                               \
+       cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
+
+#define EFX_POPULATE_OWORD64(oword, ...) do {                          \
+       (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__);       \
+       (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__);     \
+       } while (0)
+
+#define EFX_POPULATE_QWORD64(qword, ...) do {                          \
+       (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__);       \
+       } while (0)
+
+#define EFX_POPULATE_OWORD32(oword, ...) do {                          \
+       (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);       \
+       (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__);      \
+       (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__);      \
+       (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__);     \
+       } while (0)
+
+#define EFX_POPULATE_QWORD32(qword, ...) do {                          \
+       (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);       \
+       (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__);      \
+       } while (0)
+
+#define EFX_POPULATE_DWORD(dword, ...) do {                            \
+       (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__);       \
+       } while (0)
+
+#if BITS_PER_LONG == 64
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
+#else
+#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
+#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
+#endif
+
+/* Populate an octword field with various numbers of arguments */
+#define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD
+#define EFX_POPULATE_OWORD_9(oword, ...) \
+       EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_8(oword, ...) \
+       EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_7(oword, ...) \
+       EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_6(oword, ...) \
+       EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_5(oword, ...) \
+       EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_4(oword, ...) \
+       EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_3(oword, ...) \
+       EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_2(oword, ...) \
+       EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_OWORD_1(oword, ...) \
+       EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_OWORD(oword) \
+       EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_OWORD(oword) \
+       EFX_POPULATE_OWORD_4(oword, \
+                            EFX_DWORD_0, 0xffffffff, \
+                            EFX_DWORD_1, 0xffffffff, \
+                            EFX_DWORD_2, 0xffffffff, \
+                            EFX_DWORD_3, 0xffffffff)
+
+/* Populate a quadword field with various numbers of arguments */
+#define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD
+#define EFX_POPULATE_QWORD_9(qword, ...) \
+       EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_8(qword, ...) \
+       EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_7(qword, ...) \
+       EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_6(qword, ...) \
+       EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_5(qword, ...) \
+       EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_4(qword, ...) \
+       EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_3(qword, ...) \
+       EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_2(qword, ...) \
+       EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_QWORD_1(qword, ...) \
+       EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_QWORD(qword) \
+       EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_QWORD(qword) \
+       EFX_POPULATE_QWORD_2(qword, \
+                            EFX_DWORD_0, 0xffffffff, \
+                            EFX_DWORD_1, 0xffffffff)
+
+/* Populate a dword field with various numbers of arguments */
+#define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD
+#define EFX_POPULATE_DWORD_9(dword, ...) \
+       EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_8(dword, ...) \
+       EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_7(dword, ...) \
+       EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_6(dword, ...) \
+       EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_5(dword, ...) \
+       EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_4(dword, ...) \
+       EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_3(dword, ...) \
+       EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_2(dword, ...) \
+       EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_POPULATE_DWORD_1(dword, ...) \
+       EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
+#define EFX_ZERO_DWORD(dword) \
+       EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
+#define EFX_SET_DWORD(dword) \
+       EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
+
+/*
+ * Modify a named field within an already-populated structure.  Used
+ * for read-modify-write operations.
+ *
+ */
+
+#define EFX_INVERT_OWORD(oword) do {           \
+       (oword).u64[0] = ~((oword).u64[0]);     \
+       (oword).u64[1] = ~((oword).u64[1]);     \
+       } while (0)
+
+#define EFX_INSERT_FIELD64(...)                                        \
+       cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
+
+#define EFX_INSERT_FIELD32(...)                                        \
+       cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__))
+
+#define EFX_INPLACE_MASK64(min, max, field)                    \
+       EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field))
+
+#define EFX_INPLACE_MASK32(min, max, field)                    \
+       EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field))
+
+#define EFX_SET_OWORD_FIELD64(oword, field, value) do {                        \
+       (oword).u64[0] = (((oword).u64[0]                               \
+                          & ~EFX_INPLACE_MASK64(0,  63, field))        \
+                         | EFX_INSERT_FIELD64(0,  63, field, value));  \
+       (oword).u64[1] = (((oword).u64[1]                               \
+                          & ~EFX_INPLACE_MASK64(64, 127, field))       \
+                         | EFX_INSERT_FIELD64(64, 127, field, value)); \
+       } while (0)
+
+#define EFX_SET_QWORD_FIELD64(qword, field, value) do {                        \
+       (qword).u64[0] = (((qword).u64[0]                               \
+                          & ~EFX_INPLACE_MASK64(0, 63, field))         \
+                         | EFX_INSERT_FIELD64(0, 63, field, value));   \
+       } while (0)
+
+#define EFX_SET_OWORD_FIELD32(oword, field, value) do {                        \
+       (oword).u32[0] = (((oword).u32[0]                               \
+                          & ~EFX_INPLACE_MASK32(0, 31, field))         \
+                         | EFX_INSERT_FIELD32(0, 31, field, value));   \
+       (oword).u32[1] = (((oword).u32[1]                               \
+                          & ~EFX_INPLACE_MASK32(32, 63, field))        \
+                         | EFX_INSERT_FIELD32(32, 63, field, value));  \
+       (oword).u32[2] = (((oword).u32[2]                               \
+                          & ~EFX_INPLACE_MASK32(64, 95, field))        \
+                         | EFX_INSERT_FIELD32(64, 95, field, value));  \
+       (oword).u32[3] = (((oword).u32[3]                               \
+                          & ~EFX_INPLACE_MASK32(96, 127, field))       \
+                         | EFX_INSERT_FIELD32(96, 127, field, value)); \
+       } while (0)
+
+#define EFX_SET_QWORD_FIELD32(qword, field, value) do {                        \
+       (qword).u32[0] = (((qword).u32[0]                               \
+                          & ~EFX_INPLACE_MASK32(0, 31, field))         \
+                         | EFX_INSERT_FIELD32(0, 31, field, value));   \
+       (qword).u32[1] = (((qword).u32[1]                               \
+                          & ~EFX_INPLACE_MASK32(32, 63, field))        \
+                         | EFX_INSERT_FIELD32(32, 63, field, value));  \
+       } while (0)
+
+#define EFX_SET_DWORD_FIELD(dword, field, value) do {                  \
+       (dword).u32[0] = (((dword).u32[0]                               \
+                          & ~EFX_INPLACE_MASK32(0, 31, field))         \
+                         | EFX_INSERT_FIELD32(0, 31, field, value));   \
+       } while (0)
+
+#if BITS_PER_LONG == 64
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
+#else
+#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
+#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
+#endif
+
+#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
+       if (FALCON_REV(efx) >= FALCON_REV_B0) {                    \
+               EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
+       } else { \
+               EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
+       } \
+} while (0)
+
+#define EFX_QWORD_FIELD_VER(efx, qword, field) \
+       (FALCON_REV(efx) >= FALCON_REV_B0 ?     \
+        EFX_QWORD_FIELD((qword), field##_B0) : \
+        EFX_QWORD_FIELD((qword), field##_A1))
+
+/* Used to avoid compiler warnings about shift range exceeding width
+ * of the data types when dma_addr_t is only 32 bits wide.
+ */
+#define DMA_ADDR_T_WIDTH       (8 * sizeof(dma_addr_t))
+#define EFX_DMA_TYPE_WIDTH(width) \
+       (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
+#define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \
+                         ~((u64) 0) : ~((u32) 0))
+#define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK)
+
+#endif /* EFX_BITFIELD_H */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
new file mode 100644 (file)
index 0000000..eecaa6d
--- /dev/null
@@ -0,0 +1,167 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include "net_driver.h"
+#include "phy.h"
+#include "boards.h"
+#include "efx.h"
+
+/* Macros for unpacking the board revision */
+/* The revision info is in host byte order. */
+#define BOARD_TYPE(_rev) (_rev >> 8)
+#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
+#define BOARD_MINOR(_rev) (_rev & 0xf)
+
+/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
+#define BLINK_INTERVAL (HZ/2)
+
+static void blink_led_timer(unsigned long context)
+{
+       struct efx_nic *efx = (struct efx_nic *)context;
+       struct efx_blinker *bl = &efx->board_info.blinker;
+       efx->board_info.set_fault_led(efx, bl->state);
+       bl->state = !bl->state;
+       if (bl->resubmit) {
+               bl->timer.expires = jiffies + BLINK_INTERVAL;
+               add_timer(&bl->timer);
+       }
+}
+
+static void board_blink(struct efx_nic *efx, int blink)
+{
+       struct efx_blinker *blinker = &efx->board_info.blinker;
+
+       /* The rtnl mutex serialises all ethtool ioctls, so
+        * nothing special needs doing here. */
+       if (blink) {
+               blinker->resubmit = 1;
+               blinker->state = 0;
+               setup_timer(&blinker->timer, blink_led_timer,
+                           (unsigned long)efx);
+               blinker->timer.expires = jiffies + BLINK_INTERVAL;
+               add_timer(&blinker->timer);
+       } else {
+               blinker->resubmit = 0;
+               if (blinker->timer.function)
+                       del_timer_sync(&blinker->timer);
+               efx->board_info.set_fault_led(efx, 0);
+       }
+}
+
+/*****************************************************************************
+ * Support for the SFE4002
+ *
+ */
+/****************************************************************************/
+/* LED allocations. Note that on rev A0 boards the schematic and the reality
+ * differ: red and green are swapped. Below is the fixed (A1) layout (there
+ * are only 3 A0 boards in existence, so no real reason to make this
+ * conditional).
+ */
+#define SFE4002_FAULT_LED (2)  /* Red */
+#define SFE4002_RX_LED    (0)  /* Green */
+#define SFE4002_TX_LED    (1)  /* Amber */
+
+static int sfe4002_init_leds(struct efx_nic *efx)
+{
+       /* Set the TX and RX LEDs to reflect status and activity, and the
+        * fault LED off */
+       xfp_set_led(efx, SFE4002_TX_LED,
+                   QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
+       xfp_set_led(efx, SFE4002_RX_LED,
+                   QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
+       xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
+       efx->board_info.blinker.led_num = SFE4002_FAULT_LED;
+       return 0;
+}
+
+static void sfe4002_fault_led(struct efx_nic *efx, int state)
+{
+       xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
+                       QUAKE_LED_OFF);
+}
+
+static int sfe4002_init(struct efx_nic *efx)
+{
+       efx->board_info.init_leds = sfe4002_init_leds;
+       efx->board_info.set_fault_led = sfe4002_fault_led;
+       efx->board_info.blink = board_blink;
+       return 0;
+}
+
+/* This will get expanded as board-specific details get moved out of the
+ * PHY drivers. */
+struct efx_board_data {
+       const char *ref_model;
+       const char *gen_type;
+       int (*init) (struct efx_nic *nic);
+};
+
+static int dummy_init(struct efx_nic *nic)
+{
+       return 0;
+}
+
+static struct efx_board_data board_data[] = {
+       [EFX_BOARD_INVALID] =
+       {NULL,      NULL,                  dummy_init},
+       [EFX_BOARD_SFE4001] =
+       {"SFE4001", "10GBASE-T adapter",   sfe4001_poweron},
+       [EFX_BOARD_SFE4002] =
+       {"SFE4002", "XFP adapter",         sfe4002_init},
+};
+
+int efx_set_board_info(struct efx_nic *efx, u16 revision_info)
+{
+       int rc = 0;
+       struct efx_board_data *data;
+
+       if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) {
+               EFX_ERR(efx, "squashing unknown board type %d\n",
+                       BOARD_TYPE(revision_info));
+               revision_info = 0;
+       }
+
+       if (BOARD_TYPE(revision_info) == 0) {
+               efx->board_info.major = 0;
+               efx->board_info.minor = 0;
+               /* For early boards that don't have revision info. there is
+                * only 1 board for each PHY type, so we can work it out, with
+                * the exception of the PHY-less boards. */
+               switch (efx->phy_type) {
+               case PHY_TYPE_10XPRESS:
+                       efx->board_info.type = EFX_BOARD_SFE4001;
+                       break;
+               case PHY_TYPE_XFP:
+                       efx->board_info.type = EFX_BOARD_SFE4002;
+                       break;
+               default:
+                       efx->board_info.type = 0;
+                       break;
+               }
+       } else {
+               efx->board_info.type = BOARD_TYPE(revision_info);
+               efx->board_info.major = BOARD_MAJOR(revision_info);
+               efx->board_info.minor = BOARD_MINOR(revision_info);
+       }
+
+       data = &board_data[efx->board_info.type];
+
+       /* Report the board model number or generic type for recognisable
+        * boards. */
+       if (efx->board_info.type != 0)
+               EFX_INFO(efx, "board is %s rev %c%d\n",
+                        (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
+                        ? data->ref_model : data->gen_type,
+                        'A' + efx->board_info.major, efx->board_info.minor);
+
+       efx->board_info.init = data->init;
+
+       return rc;
+}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
new file mode 100644 (file)
index 0000000..f56341d
--- /dev/null
@@ -0,0 +1,26 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_BOARDS_H
+#define EFX_BOARDS_H
+
+/* Board IDs (must fit in 8 bits) */
+enum efx_board_type {
+       EFX_BOARD_INVALID = 0,
+       EFX_BOARD_SFE4001 = 1,   /* SFE4001 (10GBASE-T) */
+       EFX_BOARD_SFE4002 = 2,
+       /* Insert new types before here */
+       EFX_BOARD_MAX
+};
+
+extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info);
+extern int sfe4001_poweron(struct efx_nic *efx);
+extern void sfe4001_poweroff(struct efx_nic *efx);
+
+#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
new file mode 100644 (file)
index 0000000..59edcf7
--- /dev/null
@@ -0,0 +1,2208 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/notifier.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include "net_driver.h"
+#include "gmii.h"
+#include "ethtool.h"
+#include "tx.h"
+#include "rx.h"
+#include "efx.h"
+#include "mdio_10g.h"
+#include "falcon.h"
+#include "workarounds.h"
+#include "mac.h"
+
+#define EFX_MAX_MTU (9 * 1024)
+
+/* RX slow fill workqueue. If memory allocation fails in the fast path,
+ * a work item is pushed onto this work queue to retry the allocation later,
+ * to avoid the NIC being starved of RX buffers. Since this is a per cpu
+ * workqueue, there is nothing to be gained in making it per NIC
+ */
+static struct workqueue_struct *refill_workqueue;
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ *************************************************************************/
+
+/*
+ * Enable large receive offload (LRO) aka soft segment reassembly (SSR)
+ *
+ * This sets the default for new devices.  It can be controlled later
+ * using ethtool.
+ */
+static int lro = 1;
+module_param(lro, int, 0644);
+MODULE_PARM_DESC(lro, "Large receive offload acceleration");
+
+/*
+ * Use separate channels for TX and RX events
+ *
+ * Set this to 1 to use separate channels for TX and RX. It allows us to
+ * apply a higher level of interrupt moderation to TX events.
+ *
+ * This is forced to 0 for MSI interrupt mode as the interrupt vector
+ * is not written
+ */
+static unsigned int separate_tx_and_rx_channels = 1;
+
+/* This is the weight assigned to each of the (per-channel) virtual
+ * NAPI devices.
+ */
+static int napi_weight = 64;
+
+/* This is the time (in jiffies) between invocations of the hardware
+ * monitor, which checks for known hardware bugs and resets the
+ * hardware and driver as necessary.
+ */
+unsigned int efx_monitor_interval = 1 * HZ;
+
+/* This controls whether or not the hardware monitor will trigger a
+ * reset when it detects an error condition.
+ */
+static unsigned int monitor_reset = 1;
+
+/* This controls whether or not the driver will initialise devices
+ * with invalid MAC addresses stored in the EEPROM or flash.  If true,
+ * such devices will be initialised with a random locally-generated
+ * MAC address.  This allows for loading the sfc_mtd driver to
+ * reprogram the flash, even if the flash contents (including the MAC
+ * address) have previously been erased.
+ */
+static unsigned int allow_bad_hwaddr;
+
+/* Initial interrupt moderation settings.  They can be modified after
+ * module load with ethtool.
+ *
+ * The default for RX should strike a balance between increasing the
+ * round-trip latency and reducing overhead.
+ */
+static unsigned int rx_irq_mod_usec = 60;
+
+/* Initial interrupt moderation settings.  They can be modified after
+ * module load with ethtool.
+ *
+ * This default is chosen to ensure that a 10G link does not go idle
+ * while a TX queue is stopped after it has become full.  A queue is
+ * restarted when it drops below half full.  The time this takes (assuming
+ * worst case 3 descriptors per packet and 1024 descriptors) is
+ *   512 / 3 * 1.2 = 205 usec.
+ */
+static unsigned int tx_irq_mod_usec = 150;
+
+/* This is the first interrupt mode to try out of:
+ * 0 => MSI-X
+ * 1 => MSI
+ * 2 => legacy
+ */
+static unsigned int interrupt_mode;
+
+/* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
+ * i.e. the number of CPUs among which we may distribute simultaneous
+ * interrupt handling.
+ *
+ * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
+ * The default (0) means to assign an interrupt to each package (level II cache)
+ */
+static unsigned int rss_cpus;
+module_param(rss_cpus, uint, 0444);
+MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling");
+
+/**************************************************************************
+ *
+ * Utility functions and prototypes
+ *
+ *************************************************************************/
+static void efx_remove_channel(struct efx_channel *channel);
+static void efx_remove_port(struct efx_nic *efx);
+static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_channels(struct efx_nic *efx);
+
+#define EFX_ASSERT_RESET_SERIALISED(efx)               \
+       do {                                            \
+               if ((efx->state == STATE_RUNNING) ||    \
+                   (efx->state == STATE_RESETTING))    \
+                       ASSERT_RTNL();                  \
+       } while (0)
+
+/**************************************************************************
+ *
+ * Event queue processing
+ *
+ *************************************************************************/
+
+/* Process channel's event queue
+ *
+ * This function is responsible for processing the event queue of a
+ * single channel.  The caller must guarantee that this function will
+ * never be concurrently called more than once on the same channel,
+ * though different channels may be being processed concurrently.
+ */
+static inline int efx_process_channel(struct efx_channel *channel, int rx_quota)
+{
+       int rxdmaqs;
+       struct efx_rx_queue *rx_queue;
+
+       if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE ||
+                    !channel->enabled))
+               return rx_quota;
+
+       rxdmaqs = falcon_process_eventq(channel, &rx_quota);
+
+       /* Deliver last RX packet. */
+       if (channel->rx_pkt) {
+               __efx_rx_packet(channel, channel->rx_pkt,
+                               channel->rx_pkt_csummed);
+               channel->rx_pkt = NULL;
+       }
+
+       efx_flush_lro(channel);
+       efx_rx_strategy(channel);
+
+       /* Refill descriptor rings as necessary */
+       rx_queue = &channel->efx->rx_queue[0];
+       while (rxdmaqs) {
+               if (rxdmaqs & 0x01)
+                       efx_fast_push_rx_descriptors(rx_queue);
+               rx_queue++;
+               rxdmaqs >>= 1;
+       }
+
+       return rx_quota;
+}
+
+/* Mark channel as finished processing
+ *
+ * Note that since we will not receive further interrupts for this
+ * channel before we finish processing and call the eventq_read_ack()
+ * method, there is no need to use the interrupt hold-off timers.
+ */
+static inline void efx_channel_processed(struct efx_channel *channel)
+{
+       /* Write to EVQ_RPTR_REG.  If a new event arrived in a race
+        * with finishing processing, a new interrupt will be raised.
+        */
+       channel->work_pending = 0;
+       smp_wmb(); /* Ensure channel updated before any new interrupt. */
+       falcon_eventq_read_ack(channel);
+}
+
+/* NAPI poll handler
+ *
+ * NAPI guarantees serialisation of polls of the same device, which
+ * provides the guarantee required by efx_process_channel().
+ */
+static int efx_poll(struct napi_struct *napi, int budget)
+{
+       struct efx_channel *channel =
+               container_of(napi, struct efx_channel, napi_str);
+       struct net_device *napi_dev = channel->napi_dev;
+       int unused;
+       int rx_packets;
+
+       EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n",
+                 channel->channel, raw_smp_processor_id());
+
+       unused = efx_process_channel(channel, budget);
+       rx_packets = (budget - unused);
+
+       if (rx_packets < budget) {
+               /* There is no race here; although napi_disable() will
+                * only wait for netif_rx_complete(), this isn't a problem
+                * since efx_channel_processed() will have no effect if
+                * interrupts have already been disabled.
+                */
+               netif_rx_complete(napi_dev, napi);
+               efx_channel_processed(channel);
+       }
+
+       return rx_packets;
+}
+
+/* Process the eventq of the specified channel immediately on this CPU
+ *
+ * Disable hardware generated interrupts, wait for any existing
+ * processing to finish, then directly poll (and ack ) the eventq.
+ * Finally reenable NAPI and interrupts.
+ *
+ * Since we are touching interrupts the caller should hold the suspend lock
+ */
+void efx_process_channel_now(struct efx_channel *channel)
+{
+       struct efx_nic *efx = channel->efx;
+
+       BUG_ON(!channel->used_flags);
+       BUG_ON(!channel->enabled);
+
+       /* Disable interrupts and wait for ISRs to complete */
+       falcon_disable_interrupts(efx);
+       if (efx->legacy_irq)
+               synchronize_irq(efx->legacy_irq);
+       if (channel->has_interrupt && channel->irq)
+               synchronize_irq(channel->irq);
+
+       /* Wait for any NAPI processing to complete */
+       napi_disable(&channel->napi_str);
+
+       /* Poll the channel */
+       (void) efx_process_channel(channel, efx->type->evq_size);
+
+       /* Ack the eventq. This may cause an interrupt to be generated
+        * when they are reenabled */
+       efx_channel_processed(channel);
+
+       napi_enable(&channel->napi_str);
+       falcon_enable_interrupts(efx);
+}
+
+/* Create event queue
+ * Event queue memory allocations are done only once.  If the channel
+ * is reset, the memory buffer will be reused; this guards against
+ * errors during channel reset and also simplifies interrupt handling.
+ */
+static int efx_probe_eventq(struct efx_channel *channel)
+{
+       EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel);
+
+       return falcon_probe_eventq(channel);
+}
+
+/* Prepare channel's event queue */
+static int efx_init_eventq(struct efx_channel *channel)
+{
+       EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel);
+
+       channel->eventq_read_ptr = 0;
+
+       return falcon_init_eventq(channel);
+}
+
+static void efx_fini_eventq(struct efx_channel *channel)
+{
+       EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel);
+
+       falcon_fini_eventq(channel);
+}
+
+static void efx_remove_eventq(struct efx_channel *channel)
+{
+       EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel);
+
+       falcon_remove_eventq(channel);
+}
+
+/**************************************************************************
+ *
+ * Channel handling
+ *
+ *************************************************************************/
+
+/* Setup per-NIC RX buffer parameters.
+ * Calculate the rx buffer allocation parameters required to support
+ * the current MTU, including padding for header alignment and overruns.
+ */
+static void efx_calc_rx_buffer_params(struct efx_nic *efx)
+{
+       unsigned int order, len;
+
+       len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +
+              EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
+              efx->type->rx_buffer_padding);
+
+       /* Calculate page-order */
+       for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)
+               ;
+
+       efx->rx_buffer_len = len;
+       efx->rx_buffer_order = order;
+}
+
+static int efx_probe_channel(struct efx_channel *channel)
+{
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+       int rc;
+
+       EFX_LOG(channel->efx, "creating channel %d\n", channel->channel);
+
+       rc = efx_probe_eventq(channel);
+       if (rc)
+               goto fail1;
+
+       efx_for_each_channel_tx_queue(tx_queue, channel) {
+               rc = efx_probe_tx_queue(tx_queue);
+               if (rc)
+                       goto fail2;
+       }
+
+       efx_for_each_channel_rx_queue(rx_queue, channel) {
+               rc = efx_probe_rx_queue(rx_queue);
+               if (rc)
+                       goto fail3;
+       }
+
+       channel->n_rx_frm_trunc = 0;
+
+       return 0;
+
+ fail3:
+       efx_for_each_channel_rx_queue(rx_queue, channel)
+               efx_remove_rx_queue(rx_queue);
+ fail2:
+       efx_for_each_channel_tx_queue(tx_queue, channel)
+               efx_remove_tx_queue(tx_queue);
+ fail1:
+       return rc;
+}
+
+
+/* Channels are shutdown and reinitialised whilst the NIC is running
+ * to propagate configuration changes (mtu, checksum offload), or
+ * to clear hardware error conditions
+ */
+static int efx_init_channels(struct efx_nic *efx)
+{
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+       struct efx_channel *channel;
+       int rc = 0;
+
+       efx_calc_rx_buffer_params(efx);
+
+       /* Initialise the channels */
+       efx_for_each_channel(channel, efx) {
+               EFX_LOG(channel->efx, "init chan %d\n", channel->channel);
+
+               rc = efx_init_eventq(channel);
+               if (rc)
+                       goto err;
+
+               efx_for_each_channel_tx_queue(tx_queue, channel) {
+                       rc = efx_init_tx_queue(tx_queue);
+                       if (rc)
+                               goto err;
+               }
+
+               /* The rx buffer allocation strategy is MTU dependent */
+               efx_rx_strategy(channel);
+
+               efx_for_each_channel_rx_queue(rx_queue, channel) {
+                       rc = efx_init_rx_queue(rx_queue);
+                       if (rc)
+                               goto err;
+               }
+
+               WARN_ON(channel->rx_pkt != NULL);
+               efx_rx_strategy(channel);
+       }
+
+       return 0;
+
+ err:
+       EFX_ERR(efx, "failed to initialise channel %d\n",
+               channel ? channel->channel : -1);
+       efx_fini_channels(efx);
+       return rc;
+}
+
+/* This enables event queue processing and packet transmission.
+ *
+ * Note that this function is not allowed to fail, since that would
+ * introduce too much complexity into the suspend/resume path.
+ */
+static void efx_start_channel(struct efx_channel *channel)
+{
+       struct efx_rx_queue *rx_queue;
+
+       EFX_LOG(channel->efx, "starting chan %d\n", channel->channel);
+
+       if (!(channel->efx->net_dev->flags & IFF_UP))
+               netif_napi_add(channel->napi_dev, &channel->napi_str,
+                              efx_poll, napi_weight);
+
+       channel->work_pending = 0;
+       channel->enabled = 1;
+       smp_wmb(); /* ensure channel updated before first interrupt */
+
+       napi_enable(&channel->napi_str);
+
+       /* Load up RX descriptors */
+       efx_for_each_channel_rx_queue(rx_queue, channel)
+               efx_fast_push_rx_descriptors(rx_queue);
+}
+
+/* This disables event queue processing and packet transmission.
+ * This function does not guarantee that all queue processing
+ * (e.g. RX refill) is complete.
+ */
+static void efx_stop_channel(struct efx_channel *channel)
+{
+       struct efx_rx_queue *rx_queue;
+
+       if (!channel->enabled)
+               return;
+
+       EFX_LOG(channel->efx, "stop chan %d\n", channel->channel);
+
+       channel->enabled = 0;
+       napi_disable(&channel->napi_str);
+
+       /* Ensure that any worker threads have exited or will be no-ops */
+       efx_for_each_channel_rx_queue(rx_queue, channel) {
+               spin_lock_bh(&rx_queue->add_lock);
+               spin_unlock_bh(&rx_queue->add_lock);
+       }
+}
+
+static void efx_fini_channels(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+       BUG_ON(efx->port_enabled);
+
+       efx_for_each_channel(channel, efx) {
+               EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel);
+
+               efx_for_each_channel_rx_queue(rx_queue, channel)
+                       efx_fini_rx_queue(rx_queue);
+               efx_for_each_channel_tx_queue(tx_queue, channel)
+                       efx_fini_tx_queue(tx_queue);
+       }
+
+       /* Do the event queues last so that we can handle flush events
+        * for all DMA queues. */
+       efx_for_each_channel(channel, efx) {
+               EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel);
+
+               efx_fini_eventq(channel);
+       }
+}
+
+static void efx_remove_channel(struct efx_channel *channel)
+{
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+
+       EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel);
+
+       efx_for_each_channel_rx_queue(rx_queue, channel)
+               efx_remove_rx_queue(rx_queue);
+       efx_for_each_channel_tx_queue(tx_queue, channel)
+               efx_remove_tx_queue(tx_queue);
+       efx_remove_eventq(channel);
+
+       channel->used_flags = 0;
+}
+
+void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay)
+{
+       queue_delayed_work(refill_workqueue, &rx_queue->work, delay);
+}
+
+/**************************************************************************
+ *
+ * Port handling
+ *
+ **************************************************************************/
+
+/* This ensures that the kernel is kept informed (via
+ * netif_carrier_on/off) of the link status, and also maintains the
+ * link status's stop on the port's TX queue.
+ */
+static void efx_link_status_changed(struct efx_nic *efx)
+{
+       int carrier_ok;
+
+       /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure
+        * that no events are triggered between unregister_netdev() and the
+        * driver unloading. A more general condition is that NETDEV_CHANGE
+        * can only be generated between NETDEV_UP and NETDEV_DOWN */
+       if (!netif_running(efx->net_dev))
+               return;
+
+       carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0;
+       if (efx->link_up != carrier_ok) {
+               efx->n_link_state_changes++;
+
+               if (efx->link_up)
+                       netif_carrier_on(efx->net_dev);
+               else
+                       netif_carrier_off(efx->net_dev);
+       }
+
+       /* Status message for kernel log */
+       if (efx->link_up) {
+               struct mii_if_info *gmii = &efx->mii;
+               unsigned adv, lpa;
+               /* NONE here means direct XAUI from the controller, with no
+                * MDIO-attached device we can query. */
+               if (efx->phy_type != PHY_TYPE_NONE) {
+                       adv = gmii_advertised(gmii);
+                       lpa = gmii_lpa(gmii);
+               } else {
+                       lpa = GM_LPA_10000 | LPA_DUPLEX;
+                       adv = lpa;
+               }
+               EFX_INFO(efx, "link up at %dMbps %s-duplex "
+                        "(adv %04x lpa %04x) (MTU %d)%s\n",
+                        (efx->link_options & GM_LPA_10000 ? 10000 :
+                         (efx->link_options & GM_LPA_1000 ? 1000 :
+                          (efx->link_options & GM_LPA_100 ? 100 :
+                           10))),
+                        (efx->link_options & GM_LPA_DUPLEX ?
+                         "full" : "half"),
+                        adv, lpa,
+                        efx->net_dev->mtu,
+                        (efx->promiscuous ? " [PROMISC]" : ""));
+       } else {
+               EFX_INFO(efx, "link down\n");
+       }
+
+}
+
+/* This call reinitialises the MAC to pick up new PHY settings. The
+ * caller must hold the mac_lock */
+static void __efx_reconfigure_port(struct efx_nic *efx)
+{
+       WARN_ON(!mutex_is_locked(&efx->mac_lock));
+
+       EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n",
+               raw_smp_processor_id());
+
+       falcon_reconfigure_xmac(efx);
+
+       /* Inform kernel of loss/gain of carrier */
+       efx_link_status_changed(efx);
+}
+
+/* Reinitialise the MAC to pick up new PHY settings, even if the port is
+ * disabled. */
+void efx_reconfigure_port(struct efx_nic *efx)
+{
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       mutex_lock(&efx->mac_lock);
+       __efx_reconfigure_port(efx);
+       mutex_unlock(&efx->mac_lock);
+}
+
+/* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all()
+ * we don't efx_reconfigure_port() if the port is disabled. Care is taken
+ * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */
+static void efx_reconfigure_work(struct work_struct *data)
+{
+       struct efx_nic *efx = container_of(data, struct efx_nic,
+                                          reconfigure_work);
+
+       mutex_lock(&efx->mac_lock);
+       if (efx->port_enabled)
+               __efx_reconfigure_port(efx);
+       mutex_unlock(&efx->mac_lock);
+}
+
+static int efx_probe_port(struct efx_nic *efx)
+{
+       int rc;
+
+       EFX_LOG(efx, "create port\n");
+
+       /* Connect up MAC/PHY operations table and read MAC address */
+       rc = falcon_probe_port(efx);
+       if (rc)
+               goto err;
+
+       /* Sanity check MAC address */
+       if (is_valid_ether_addr(efx->mac_address)) {
+               memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
+       } else {
+               DECLARE_MAC_BUF(mac);
+
+               EFX_ERR(efx, "invalid MAC address %s\n",
+                       print_mac(mac, efx->mac_address));
+               if (!allow_bad_hwaddr) {
+                       rc = -EINVAL;
+                       goto err;
+               }
+               random_ether_addr(efx->net_dev->dev_addr);
+               EFX_INFO(efx, "using locally-generated MAC %s\n",
+                        print_mac(mac, efx->net_dev->dev_addr));
+       }
+
+       return 0;
+
+ err:
+       efx_remove_port(efx);
+       return rc;
+}
+
+static int efx_init_port(struct efx_nic *efx)
+{
+       int rc;
+
+       EFX_LOG(efx, "init port\n");
+
+       /* Initialise the MAC and PHY */
+       rc = falcon_init_xmac(efx);
+       if (rc)
+               return rc;
+
+       efx->port_initialized = 1;
+
+       /* Reconfigure port to program MAC registers */
+       falcon_reconfigure_xmac(efx);
+
+       return 0;
+}
+
+/* Allow efx_reconfigure_port() to be scheduled, and close the window
+ * between efx_stop_port and efx_flush_all whereby a previously scheduled
+ * efx_reconfigure_port() may have been cancelled */
+static void efx_start_port(struct efx_nic *efx)
+{
+       EFX_LOG(efx, "start port\n");
+       BUG_ON(efx->port_enabled);
+
+       mutex_lock(&efx->mac_lock);
+       efx->port_enabled = 1;
+       __efx_reconfigure_port(efx);
+       mutex_unlock(&efx->mac_lock);
+}
+
+/* Prevent efx_reconfigure_work and efx_monitor() from executing, and
+ * efx_set_multicast_list() from scheduling efx_reconfigure_work.
+ * efx_reconfigure_work can still be scheduled via NAPI processing
+ * until efx_flush_all() is called */
+static void efx_stop_port(struct efx_nic *efx)
+{
+       EFX_LOG(efx, "stop port\n");
+
+       mutex_lock(&efx->mac_lock);
+       efx->port_enabled = 0;
+       mutex_unlock(&efx->mac_lock);
+
+       /* Serialise against efx_set_multicast_list() */
+       if (NET_DEV_REGISTERED(efx)) {
+               netif_tx_lock_bh(efx->net_dev);
+               netif_tx_unlock_bh(efx->net_dev);
+       }
+}
+
+static void efx_fini_port(struct efx_nic *efx)
+{
+       EFX_LOG(efx, "shut down port\n");
+
+       if (!efx->port_initialized)
+               return;
+
+       falcon_fini_xmac(efx);
+       efx->port_initialized = 0;
+
+       efx->link_up = 0;
+       efx_link_status_changed(efx);
+}
+
+static void efx_remove_port(struct efx_nic *efx)
+{
+       EFX_LOG(efx, "destroying port\n");
+
+       falcon_remove_port(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC handling
+ *
+ **************************************************************************/
+
+/* This configures the PCI device to enable I/O and DMA. */
+static int efx_init_io(struct efx_nic *efx)
+{
+       struct pci_dev *pci_dev = efx->pci_dev;
+       dma_addr_t dma_mask = efx->type->max_dma_mask;
+       int rc;
+
+       EFX_LOG(efx, "initialising I/O\n");
+
+       rc = pci_enable_device(pci_dev);
+       if (rc) {
+               EFX_ERR(efx, "failed to enable PCI device\n");
+               goto fail1;
+       }
+
+       pci_set_master(pci_dev);
+
+       /* Set the PCI DMA mask.  Try all possibilities from our
+        * genuine mask down to 32 bits, because some architectures
+        * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit
+        * masks event though they reject 46 bit masks.
+        */
+       while (dma_mask > 0x7fffffffUL) {
+               if (pci_dma_supported(pci_dev, dma_mask) &&
+                   ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0))
+                       break;
+               dma_mask >>= 1;
+       }
+       if (rc) {
+               EFX_ERR(efx, "could not find a suitable DMA mask\n");
+               goto fail2;
+       }
+       EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask);
+       rc = pci_set_consistent_dma_mask(pci_dev, dma_mask);
+       if (rc) {
+               /* pci_set_consistent_dma_mask() is not *allowed* to
+                * fail with a mask that pci_set_dma_mask() accepted,
+                * but just in case...
+                */
+               EFX_ERR(efx, "failed to set consistent DMA mask\n");
+               goto fail2;
+       }
+
+       efx->membase_phys = pci_resource_start(efx->pci_dev,
+                                              efx->type->mem_bar);
+       rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
+       if (rc) {
+               EFX_ERR(efx, "request for memory BAR failed\n");
+               rc = -EIO;
+               goto fail3;
+       }
+       efx->membase = ioremap_nocache(efx->membase_phys,
+                                      efx->type->mem_map_size);
+       if (!efx->membase) {
+               EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",
+                       efx->type->mem_bar, efx->membase_phys,
+                       efx->type->mem_map_size);
+               rc = -ENOMEM;
+               goto fail4;
+       }
+       EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",
+               efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,
+               efx->membase);
+
+       return 0;
+
+ fail4:
+       release_mem_region(efx->membase_phys, efx->type->mem_map_size);
+ fail3:
+       efx->membase_phys = 0UL;
+ fail2:
+       pci_disable_device(efx->pci_dev);
+ fail1:
+       return rc;
+}
+
+static void efx_fini_io(struct efx_nic *efx)
+{
+       EFX_LOG(efx, "shutting down I/O\n");
+
+       if (efx->membase) {
+               iounmap(efx->membase);
+               efx->membase = NULL;
+       }
+
+       if (efx->membase_phys) {
+               pci_release_region(efx->pci_dev, efx->type->mem_bar);
+               efx->membase_phys = 0UL;
+       }
+
+       pci_disable_device(efx->pci_dev);
+}
+
+/* Probe the number and type of interrupts we are able to obtain. */
+static void efx_probe_interrupts(struct efx_nic *efx)
+{
+       int max_channel = efx->type->phys_addr_channels - 1;
+       struct msix_entry xentries[EFX_MAX_CHANNELS];
+       int rc, i;
+
+       if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
+               BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX));
+
+               efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus();
+               efx->rss_queues = min(efx->rss_queues, max_channel + 1);
+               efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS);
+
+               /* Request maximum number of MSI interrupts, and fill out
+                * the channel interrupt information the allowed allocation */
+               for (i = 0; i < efx->rss_queues; i++)
+                       xentries[i].entry = i;
+               rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues);
+               if (rc > 0) {
+                       EFX_BUG_ON_PARANOID(rc >= efx->rss_queues);
+                       efx->rss_queues = rc;
+                       rc = pci_enable_msix(efx->pci_dev, xentries,
+                                            efx->rss_queues);
+               }
+
+               if (rc == 0) {
+                       for (i = 0; i < efx->rss_queues; i++) {
+                               efx->channel[i].has_interrupt = 1;
+                               efx->channel[i].irq = xentries[i].vector;
+                       }
+               } else {
+                       /* Fall back to single channel MSI */
+                       efx->interrupt_mode = EFX_INT_MODE_MSI;
+                       EFX_ERR(efx, "could not enable MSI-X\n");
+               }
+       }
+
+       /* Try single interrupt MSI */
+       if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
+               efx->rss_queues = 1;
+               rc = pci_enable_msi(efx->pci_dev);
+               if (rc == 0) {
+                       efx->channel[0].irq = efx->pci_dev->irq;
+                       efx->channel[0].has_interrupt = 1;
+               } else {
+                       EFX_ERR(efx, "could not enable MSI\n");
+                       efx->interrupt_mode = EFX_INT_MODE_LEGACY;
+               }
+       }
+
+       /* Assume legacy interrupts */
+       if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
+               efx->rss_queues = 1;
+               /* Every channel is interruptible */
+               for (i = 0; i < EFX_MAX_CHANNELS; i++)
+                       efx->channel[i].has_interrupt = 1;
+               efx->legacy_irq = efx->pci_dev->irq;
+       }
+}
+
+static void efx_remove_interrupts(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+
+       /* Remove MSI/MSI-X interrupts */
+       efx_for_each_channel_with_interrupt(channel, efx)
+               channel->irq = 0;
+       pci_disable_msi(efx->pci_dev);
+       pci_disable_msix(efx->pci_dev);
+
+       /* Remove legacy interrupt */
+       efx->legacy_irq = 0;
+}
+
+/* Select number of used resources
+ * Should be called after probe_interrupts()
+ */
+static void efx_select_used(struct efx_nic *efx)
+{
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+       int i;
+
+       /* TX queues.  One per port per channel with TX capability
+        * (more than one per port won't work on Linux, due to out
+        *  of order issues... but will be fine on Solaris)
+        */
+       tx_queue = &efx->tx_queue[0];
+
+       /* Perform this for each channel with TX capabilities.
+        * At the moment, we only support a single TX queue
+        */
+       tx_queue->used = 1;
+       if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels)
+               tx_queue->channel = &efx->channel[1];
+       else
+               tx_queue->channel = &efx->channel[0];
+       tx_queue->channel->used_flags |= EFX_USED_BY_TX;
+       tx_queue++;
+
+       /* RX queues.  Each has a dedicated channel. */
+       for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
+               rx_queue = &efx->rx_queue[i];
+
+               if (i < efx->rss_queues) {
+                       rx_queue->used = 1;
+                       /* If we allow multiple RX queues per channel
+                        * we need to decide that here
+                        */
+                       rx_queue->channel = &efx->channel[rx_queue->queue];
+                       rx_queue->channel->used_flags |= EFX_USED_BY_RX;
+                       rx_queue++;
+               }
+       }
+}
+
+static int efx_probe_nic(struct efx_nic *efx)
+{
+       int rc;
+
+       EFX_LOG(efx, "creating NIC\n");
+
+       /* Carry out hardware-type specific initialisation */
+       rc = falcon_probe_nic(efx);
+       if (rc)
+               return rc;
+
+       /* Determine the number of channels and RX queues by trying to hook
+        * in MSI-X interrupts. */
+       efx_probe_interrupts(efx);
+
+       /* Determine number of RX queues and TX queues */
+       efx_select_used(efx);
+
+       /* Initialise the interrupt moderation settings */
+       efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec);
+
+       return 0;
+}
+
+static void efx_remove_nic(struct efx_nic *efx)
+{
+       EFX_LOG(efx, "destroying NIC\n");
+
+       efx_remove_interrupts(efx);
+       falcon_remove_nic(efx);
+}
+
+/**************************************************************************
+ *
+ * NIC startup/shutdown
+ *
+ *************************************************************************/
+
+static int efx_probe_all(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+       int rc;
+
+       /* Create NIC */
+       rc = efx_probe_nic(efx);
+       if (rc) {
+               EFX_ERR(efx, "failed to create NIC\n");
+               goto fail1;
+       }
+
+       /* Create port */
+       rc = efx_probe_port(efx);
+       if (rc) {
+               EFX_ERR(efx, "failed to create port\n");
+               goto fail2;
+       }
+
+       /* Create channels */
+       efx_for_each_channel(channel, efx) {
+               rc = efx_probe_channel(channel);
+               if (rc) {
+                       EFX_ERR(efx, "failed to create channel %d\n",
+                               channel->channel);
+                       goto fail3;
+               }
+       }
+
+       return 0;
+
+ fail3:
+       efx_for_each_channel(channel, efx)
+               efx_remove_channel(channel);
+       efx_remove_port(efx);
+ fail2:
+       efx_remove_nic(efx);
+ fail1:
+       return rc;
+}
+
+/* Called after previous invocation(s) of efx_stop_all, restarts the
+ * port, kernel transmit queue, NAPI processing and hardware interrupts,
+ * and ensures that the port is scheduled to be reconfigured.
+ * This function is safe to call multiple times when the NIC is in any
+ * state. */
+static void efx_start_all(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       /* Check that it is appropriate to restart the interface. All
+        * of these flags are safe to read under just the rtnl lock */
+       if (efx->port_enabled)
+               return;
+       if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))
+               return;
+       if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))
+               return;
+
+       /* Mark the port as enabled so port reconfigurations can start, then
+        * restart the transmit interface early so the watchdog timer stops */
+       efx_start_port(efx);
+       efx_wake_queue(efx);
+
+       efx_for_each_channel(channel, efx)
+               efx_start_channel(channel);
+
+       falcon_enable_interrupts(efx);
+
+       /* Start hardware monitor if we're in RUNNING */
+       if (efx->state == STATE_RUNNING)
+               queue_delayed_work(efx->workqueue, &efx->monitor_work,
+                                  efx_monitor_interval);
+}
+
+/* Flush all delayed work. Should only be called when no more delayed work
+ * will be scheduled. This doesn't flush pending online resets (efx_reset),
+ * since we're holding the rtnl_lock at this point. */
+static void efx_flush_all(struct efx_nic *efx)
+{
+       struct efx_rx_queue *rx_queue;
+
+       /* Make sure the hardware monitor is stopped */
+       cancel_delayed_work_sync(&efx->monitor_work);
+
+       /* Ensure that all RX slow refills are complete. */
+       efx_for_each_rx_queue(rx_queue, efx) {
+               cancel_delayed_work_sync(&rx_queue->work);
+       }
+
+       /* Stop scheduled port reconfigurations */
+       cancel_work_sync(&efx->reconfigure_work);
+
+}
+
+/* Quiesce hardware and software without bringing the link down.
+ * Safe to call multiple times, when the nic and interface is in any
+ * state. The caller is guaranteed to subsequently be in a position
+ * to modify any hardware and software state they see fit without
+ * taking locks. */
+static void efx_stop_all(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       /* port_enabled can be read safely under the rtnl lock */
+       if (!efx->port_enabled)
+               return;
+
+       /* Disable interrupts and wait for ISR to complete */
+       falcon_disable_interrupts(efx);
+       if (efx->legacy_irq)
+               synchronize_irq(efx->legacy_irq);
+       efx_for_each_channel_with_interrupt(channel, efx)
+               if (channel->irq)
+                       synchronize_irq(channel->irq);
+
+       /* Stop all NAPI processing and synchronous rx refills */
+       efx_for_each_channel(channel, efx)
+               efx_stop_channel(channel);
+
+       /* Stop all asynchronous port reconfigurations. Since all
+        * event processing has already been stopped, there is no
+        * window to loose phy events */
+       efx_stop_port(efx);
+
+       /* Flush reconfigure_work, refill_workqueue, monitor_work */
+       efx_flush_all(efx);
+
+       /* Isolate the MAC from the TX and RX engines, so that queue
+        * flushes will complete in a timely fashion. */
+       falcon_deconfigure_mac_wrapper(efx);
+       falcon_drain_tx_fifo(efx);
+
+       /* Stop the kernel transmit interface late, so the watchdog
+        * timer isn't ticking over the flush */
+       efx_stop_queue(efx);
+       if (NET_DEV_REGISTERED(efx)) {
+               netif_tx_lock_bh(efx->net_dev);
+               netif_tx_unlock_bh(efx->net_dev);
+       }
+}
+
+static void efx_remove_all(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx)
+               efx_remove_channel(channel);
+       efx_remove_port(efx);
+       efx_remove_nic(efx);
+}
+
+/* A convinience function to safely flush all the queues */
+int efx_flush_queues(struct efx_nic *efx)
+{
+       int rc;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       efx_stop_all(efx);
+
+       efx_fini_channels(efx);
+       rc = efx_init_channels(efx);
+       if (rc) {
+               efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+               return rc;
+       }
+
+       efx_start_all(efx);
+
+       return 0;
+}
+
+/**************************************************************************
+ *
+ * Interrupt moderation
+ *
+ **************************************************************************/
+
+/* Set interrupt moderation parameters */
+void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs)
+{
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       efx_for_each_tx_queue(tx_queue, efx)
+               tx_queue->channel->irq_moderation = tx_usecs;
+
+       efx_for_each_rx_queue(rx_queue, efx)
+               rx_queue->channel->irq_moderation = rx_usecs;
+}
+
+/**************************************************************************
+ *
+ * Hardware monitor
+ *
+ **************************************************************************/
+
+/* Run periodically off the general workqueue. Serialised against
+ * efx_reconfigure_port via the mac_lock */
+static void efx_monitor(struct work_struct *data)
+{
+       struct efx_nic *efx = container_of(data, struct efx_nic,
+                                          monitor_work.work);
+       int rc = 0;
+
+       EFX_TRACE(efx, "hardware monitor executing on CPU %d\n",
+                 raw_smp_processor_id());
+
+
+       /* If the mac_lock is already held then it is likely a port
+        * reconfiguration is already in place, which will likely do
+        * most of the work of check_hw() anyway. */
+       if (!mutex_trylock(&efx->mac_lock)) {
+               queue_delayed_work(efx->workqueue, &efx->monitor_work,
+                                  efx_monitor_interval);
+               return;
+       }
+
+       if (efx->port_enabled)
+               rc = falcon_check_xmac(efx);
+       mutex_unlock(&efx->mac_lock);
+
+       if (rc) {
+               if (monitor_reset) {
+                       EFX_ERR(efx, "hardware monitor detected a fault: "
+                               "triggering reset\n");
+                       efx_schedule_reset(efx, RESET_TYPE_MONITOR);
+               } else {
+                       EFX_ERR(efx, "hardware monitor detected a fault, "
+                               "skipping reset\n");
+               }
+       }
+
+       queue_delayed_work(efx->workqueue, &efx->monitor_work,
+                          efx_monitor_interval);
+}
+
+/**************************************************************************
+ *
+ * ioctls
+ *
+ *************************************************************************/
+
+/* Net device ioctl
+ * Context: process, rtnl_lock() held.
+ */
+static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL);
+}
+
+/**************************************************************************
+ *
+ * NAPI interface
+ *
+ **************************************************************************/
+
+static int efx_init_napi(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+       int rc;
+
+       efx_for_each_channel(channel, efx) {
+               channel->napi_dev = efx->net_dev;
+               rc = efx_lro_init(&channel->lro_mgr, efx);
+               if (rc)
+                       goto err;
+       }
+       return 0;
+ err:
+       efx_fini_napi(efx);
+       return rc;
+}
+
+static void efx_fini_napi(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx) {
+               efx_lro_fini(&channel->lro_mgr);
+               channel->napi_dev = NULL;
+       }
+}
+
+/**************************************************************************
+ *
+ * Kernel netpoll interface
+ *
+ *************************************************************************/
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+
+/* Although in the common case interrupts will be disabled, this is not
+ * guaranteed. However, all our work happens inside the NAPI callback,
+ * so no locking is required.
+ */
+static void efx_netpoll(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+       struct efx_channel *channel;
+
+       efx_for_each_channel_with_interrupt(channel, efx)
+               efx_schedule_channel(channel);
+}
+
+#endif
+
+/**************************************************************************
+ *
+ * Kernel net device interface
+ *
+ *************************************************************************/
+
+/* Context: process, rtnl_lock() held. */
+static int efx_net_open(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name,
+               raw_smp_processor_id());
+
+       efx_start_all(efx);
+       return 0;
+}
+
+/* Context: process, rtnl_lock() held.
+ * Note that the kernel will ignore our return code; this method
+ * should really be a void.
+ */
+static int efx_net_stop(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+       int rc;
+
+       EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name,
+               raw_smp_processor_id());
+
+       /* Stop the device and flush all the channels */
+       efx_stop_all(efx);
+       efx_fini_channels(efx);
+       rc = efx_init_channels(efx);
+       if (rc)
+               efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+
+       return 0;
+}
+
+/* Context: process, dev_base_lock held, non-blocking. */
+static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+       struct efx_mac_stats *mac_stats = &efx->mac_stats;
+       struct net_device_stats *stats = &net_dev->stats;
+
+       if (!spin_trylock(&efx->stats_lock))
+               return stats;
+       if (efx->state == STATE_RUNNING) {
+               falcon_update_stats_xmac(efx);
+               falcon_update_nic_stats(efx);
+       }
+       spin_unlock(&efx->stats_lock);
+
+       stats->rx_packets = mac_stats->rx_packets;
+       stats->tx_packets = mac_stats->tx_packets;
+       stats->rx_bytes = mac_stats->rx_bytes;
+       stats->tx_bytes = mac_stats->tx_bytes;
+       stats->multicast = mac_stats->rx_multicast;
+       stats->collisions = mac_stats->tx_collision;
+       stats->rx_length_errors = (mac_stats->rx_gtjumbo +
+                                  mac_stats->rx_length_error);
+       stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt;
+       stats->rx_crc_errors = mac_stats->rx_bad;
+       stats->rx_frame_errors = mac_stats->rx_align_error;
+       stats->rx_fifo_errors = mac_stats->rx_overflow;
+       stats->rx_missed_errors = mac_stats->rx_missed;
+       stats->tx_window_errors = mac_stats->tx_late_collision;
+
+       stats->rx_errors = (stats->rx_length_errors +
+                           stats->rx_over_errors +
+                           stats->rx_crc_errors +
+                           stats->rx_frame_errors +
+                           stats->rx_fifo_errors +
+                           stats->rx_missed_errors +
+                           mac_stats->rx_symbol_error);
+       stats->tx_errors = (stats->tx_window_errors +
+                           mac_stats->tx_bad);
+
+       return stats;
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+static void efx_watchdog(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n",
+               atomic_read(&efx->netif_stop_count), efx->port_enabled,
+               monitor_reset ? "resetting channels" : "skipping reset");
+
+       if (monitor_reset)
+               efx_schedule_reset(efx, RESET_TYPE_MONITOR);
+}
+
+
+/* Context: process, rtnl_lock() held. */
+static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+       struct efx_nic *efx = net_dev->priv;
+       int rc = 0;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       if (new_mtu > EFX_MAX_MTU)
+               return -EINVAL;
+
+       efx_stop_all(efx);
+
+       EFX_LOG(efx, "changing MTU to %d\n", new_mtu);
+
+       efx_fini_channels(efx);
+       net_dev->mtu = new_mtu;
+       rc = efx_init_channels(efx);
+       if (rc)
+               goto fail;
+
+       efx_start_all(efx);
+       return rc;
+
+ fail:
+       efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+       return rc;
+}
+
+static int efx_set_mac_address(struct net_device *net_dev, void *data)
+{
+       struct efx_nic *efx = net_dev->priv;
+       struct sockaddr *addr = data;
+       char *new_addr = addr->sa_data;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       if (!is_valid_ether_addr(new_addr)) {
+               DECLARE_MAC_BUF(mac);
+               EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n",
+                       print_mac(mac, new_addr));
+               return -EINVAL;
+       }
+
+       memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len);
+
+       /* Reconfigure the MAC */
+       efx_reconfigure_port(efx);
+
+       return 0;
+}
+
+/* Context: netif_tx_lock held, BHs disabled. */
+static void efx_set_multicast_list(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+       struct dev_mc_list *mc_list = net_dev->mc_list;
+       union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+       int promiscuous;
+       u32 crc;
+       int bit;
+       int i;
+
+       /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */
+       promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0;
+       if (efx->promiscuous != promiscuous) {
+               efx->promiscuous = promiscuous;
+               /* Close the window between efx_stop_port() and efx_flush_all()
+                * by only queuing work when the port is enabled. */
+               if (efx->port_enabled)
+                       queue_work(efx->workqueue, &efx->reconfigure_work);
+       }
+
+       /* Build multicast hash table */
+       if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) {
+               memset(mc_hash, 0xff, sizeof(*mc_hash));
+       } else {
+               memset(mc_hash, 0x00, sizeof(*mc_hash));
+               for (i = 0; i < net_dev->mc_count; i++) {
+                       crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
+                       bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
+                       set_bit_le(bit, mc_hash->byte);
+                       mc_list = mc_list->next;
+               }
+       }
+
+       /* Create and activate new global multicast hash table */
+       falcon_set_multicast_hash(efx);
+}
+
+static int efx_netdev_event(struct notifier_block *this,
+                           unsigned long event, void *ptr)
+{
+       struct net_device *net_dev = (struct net_device *)ptr;
+
+       if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {
+               struct efx_nic *efx = net_dev->priv;
+
+               strcpy(efx->name, net_dev->name);
+       }
+
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block efx_netdev_notifier = {
+       .notifier_call = efx_netdev_event,
+};
+
+static int efx_register_netdev(struct efx_nic *efx)
+{
+       struct net_device *net_dev = efx->net_dev;
+       int rc;
+
+       net_dev->watchdog_timeo = 5 * HZ;
+       net_dev->irq = efx->pci_dev->irq;
+       net_dev->open = efx_net_open;
+       net_dev->stop = efx_net_stop;
+       net_dev->get_stats = efx_net_stats;
+       net_dev->tx_timeout = &efx_watchdog;
+       net_dev->hard_start_xmit = efx_hard_start_xmit;
+       net_dev->do_ioctl = efx_ioctl;
+       net_dev->change_mtu = efx_change_mtu;
+       net_dev->set_mac_address = efx_set_mac_address;
+       net_dev->set_multicast_list = efx_set_multicast_list;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       net_dev->poll_controller = efx_netpoll;
+#endif
+       SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev);
+       SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+
+       /* Always start with carrier off; PHY events will detect the link */
+       netif_carrier_off(efx->net_dev);
+
+       /* Clear MAC statistics */
+       falcon_update_stats_xmac(efx);
+       memset(&efx->mac_stats, 0, sizeof(efx->mac_stats));
+
+       rc = register_netdev(net_dev);
+       if (rc) {
+               EFX_ERR(efx, "could not register net dev\n");
+               return rc;
+       }
+       strcpy(efx->name, net_dev->name);
+
+       return 0;
+}
+
+static void efx_unregister_netdev(struct efx_nic *efx)
+{
+       struct efx_tx_queue *tx_queue;
+
+       if (!efx->net_dev)
+               return;
+
+       BUG_ON(efx->net_dev->priv != efx);
+
+       /* Free up any skbs still remaining. This has to happen before
+        * we try to unregister the netdev as running their destructors
+        * may be needed to get the device ref. count to 0. */
+       efx_for_each_tx_queue(tx_queue, efx)
+               efx_release_tx_buffers(tx_queue);
+
+       if (NET_DEV_REGISTERED(efx)) {
+               strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+               unregister_netdev(efx->net_dev);
+       }
+}
+
+/**************************************************************************
+ *
+ * Device reset and suspend
+ *
+ **************************************************************************/
+
+/* The final hardware and software finalisation before reset. */
+static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+       int rc;
+
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       rc = falcon_xmac_get_settings(efx, ecmd);
+       if (rc) {
+               EFX_ERR(efx, "could not back up PHY settings\n");
+               goto fail;
+       }
+
+       efx_fini_channels(efx);
+       return 0;
+
+ fail:
+       return rc;
+}
+
+/* The first part of software initialisation after a hardware reset
+ * This function does not handle serialisation with the kernel, it
+ * assumes the caller has done this */
+static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+       int rc;
+
+       rc = efx_init_channels(efx);
+       if (rc)
+               goto fail1;
+
+       /* Restore MAC and PHY settings. */
+       rc = falcon_xmac_set_settings(efx, ecmd);
+       if (rc) {
+               EFX_ERR(efx, "could not restore PHY settings\n");
+               goto fail2;
+       }
+
+       return 0;
+
+ fail2:
+       efx_fini_channels(efx);
+ fail1:
+       return rc;
+}
+
+/* Reset the NIC as transparently as possible. Do not reset the PHY
+ * Note that the reset may fail, in which case the card will be left
+ * in a most-probably-unusable state.
+ *
+ * This function will sleep.  You cannot reset from within an atomic
+ * state; use efx_schedule_reset() instead.
+ *
+ * Grabs the rtnl_lock.
+ */
+static int efx_reset(struct efx_nic *efx)
+{
+       struct ethtool_cmd ecmd;
+       enum reset_type method = efx->reset_pending;
+       int rc;
+
+       /* Serialise with kernel interfaces */
+       rtnl_lock();
+
+       /* If we're not RUNNING then don't reset. Leave the reset_pending
+        * flag set so that efx_pci_probe_main will be retried */
+       if (efx->state != STATE_RUNNING) {
+               EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n");
+               goto unlock_rtnl;
+       }
+
+       efx->state = STATE_RESETTING;
+       EFX_INFO(efx, "resetting (%d)\n", method);
+
+       /* The net_dev->get_stats handler is quite slow, and will fail
+        * if a fetch is pending over reset. Serialise against it. */
+       spin_lock(&efx->stats_lock);
+       spin_unlock(&efx->stats_lock);
+
+       efx_stop_all(efx);
+       mutex_lock(&efx->mac_lock);
+
+       rc = efx_reset_down(efx, &ecmd);
+       if (rc)
+               goto fail1;
+
+       rc = falcon_reset_hw(efx, method);
+       if (rc) {
+               EFX_ERR(efx, "failed to reset hardware\n");
+               goto fail2;
+       }
+
+       /* Allow resets to be rescheduled. */
+       efx->reset_pending = RESET_TYPE_NONE;
+
+       /* Reinitialise bus-mastering, which may have been turned off before
+        * the reset was scheduled. This is still appropriate, even in the
+        * RESET_TYPE_DISABLE since this driver generally assumes the hardware
+        * can respond to requests. */
+       pci_set_master(efx->pci_dev);
+
+       /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE
+        * case so the driver can talk to external SRAM */
+       rc = falcon_init_nic(efx);
+       if (rc) {
+               EFX_ERR(efx, "failed to initialise NIC\n");
+               goto fail3;
+       }
+
+       /* Leave device stopped if necessary */
+       if (method == RESET_TYPE_DISABLE) {
+               /* Reinitialise the device anyway so the driver unload sequence
+                * can talk to the external SRAM */
+               (void) falcon_init_nic(efx);
+               rc = -EIO;
+               goto fail4;
+       }
+
+       rc = efx_reset_up(efx, &ecmd);
+       if (rc)
+               goto fail5;
+
+       mutex_unlock(&efx->mac_lock);
+       EFX_LOG(efx, "reset complete\n");
+
+       efx->state = STATE_RUNNING;
+       efx_start_all(efx);
+
+ unlock_rtnl:
+       rtnl_unlock();
+       return 0;
+
+ fail5:
+ fail4:
+ fail3:
+ fail2:
+ fail1:
+       EFX_ERR(efx, "has been disabled\n");
+       efx->state = STATE_DISABLED;
+
+       mutex_unlock(&efx->mac_lock);
+       rtnl_unlock();
+       efx_unregister_netdev(efx);
+       efx_fini_port(efx);
+       return rc;
+}
+
+/* The worker thread exists so that code that cannot sleep can
+ * schedule a reset for later.
+ */
+static void efx_reset_work(struct work_struct *data)
+{
+       struct efx_nic *nic = container_of(data, struct efx_nic, reset_work);
+
+       efx_reset(nic);
+}
+
+void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
+{
+       enum reset_type method;
+
+       if (efx->reset_pending != RESET_TYPE_NONE) {
+               EFX_INFO(efx, "quenching already scheduled reset\n");
+               return;
+       }
+
+       switch (type) {
+       case RESET_TYPE_INVISIBLE:
+       case RESET_TYPE_ALL:
+       case RESET_TYPE_WORLD:
+       case RESET_TYPE_DISABLE:
+               method = type;
+               break;
+       case RESET_TYPE_RX_RECOVERY:
+       case RESET_TYPE_RX_DESC_FETCH:
+       case RESET_TYPE_TX_DESC_FETCH:
+       case RESET_TYPE_TX_SKIP:
+               method = RESET_TYPE_INVISIBLE;
+               break;
+       default:
+               method = RESET_TYPE_ALL;
+               break;
+       }
+
+       if (method != type)
+               EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method);
+       else
+               EFX_LOG(efx, "scheduling reset (%d)\n", method);
+
+       efx->reset_pending = method;
+
+       queue_work(efx->workqueue, &efx->reset_work);
+}
+
+/**************************************************************************
+ *
+ * List of NICs we support
+ *
+ **************************************************************************/
+
+/* PCI device ID table */
+static struct pci_device_id efx_pci_table[] __devinitdata = {
+       {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
+        .driver_data = (unsigned long) &falcon_a_nic_type},
+       {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
+        .driver_data = (unsigned long) &falcon_b_nic_type},
+       {0}                     /* end of list */
+};
+
+/**************************************************************************
+ *
+ * Dummy PHY/MAC/Board operations
+ *
+ * Can be used where the MAC does not implement this operation
+ * Needed so all function pointers are valid and do not have to be tested
+ * before use
+ *
+ **************************************************************************/
+int efx_port_dummy_op_int(struct efx_nic *efx)
+{
+       return 0;
+}
+void efx_port_dummy_op_void(struct efx_nic *efx) {}
+void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {}
+
+static struct efx_phy_operations efx_dummy_phy_operations = {
+       .init            = efx_port_dummy_op_int,
+       .reconfigure     = efx_port_dummy_op_void,
+       .check_hw        = efx_port_dummy_op_int,
+       .fini            = efx_port_dummy_op_void,
+       .clear_interrupt = efx_port_dummy_op_void,
+       .reset_xaui      = efx_port_dummy_op_void,
+};
+
+/* Dummy board operations */
+static int efx_nic_dummy_op_int(struct efx_nic *nic)
+{
+       return 0;
+}
+
+static struct efx_board efx_dummy_board_info = {
+       .init    = efx_nic_dummy_op_int,
+       .init_leds = efx_port_dummy_op_int,
+       .set_fault_led = efx_port_dummy_op_blink,
+};
+
+/**************************************************************************
+ *
+ * Data housekeeping
+ *
+ **************************************************************************/
+
+/* This zeroes out and then fills in the invariants in a struct
+ * efx_nic (including all sub-structures).
+ */
+static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
+                          struct pci_dev *pci_dev, struct net_device *net_dev)
+{
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+       int i, rc;
+
+       /* Initialise common structures */
+       memset(efx, 0, sizeof(*efx));
+       spin_lock_init(&efx->biu_lock);
+       spin_lock_init(&efx->phy_lock);
+       INIT_WORK(&efx->reset_work, efx_reset_work);
+       INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor);
+       efx->pci_dev = pci_dev;
+       efx->state = STATE_INIT;
+       efx->reset_pending = RESET_TYPE_NONE;
+       strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name));
+       efx->board_info = efx_dummy_board_info;
+
+       efx->net_dev = net_dev;
+       efx->rx_checksum_enabled = 1;
+       spin_lock_init(&efx->netif_stop_lock);
+       spin_lock_init(&efx->stats_lock);
+       mutex_init(&efx->mac_lock);
+       efx->phy_op = &efx_dummy_phy_operations;
+       efx->mii.dev = net_dev;
+       INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work);
+       atomic_set(&efx->netif_stop_count, 1);
+
+       for (i = 0; i < EFX_MAX_CHANNELS; i++) {
+               channel = &efx->channel[i];
+               channel->efx = efx;
+               channel->channel = i;
+               channel->evqnum = i;
+               channel->work_pending = 0;
+       }
+       for (i = 0; i < EFX_MAX_TX_QUEUES; i++) {
+               tx_queue = &efx->tx_queue[i];
+               tx_queue->efx = efx;
+               tx_queue->queue = i;
+               tx_queue->buffer = NULL;
+               tx_queue->channel = &efx->channel[0]; /* for safety */
+       }
+       for (i = 0; i < EFX_MAX_RX_QUEUES; i++) {
+               rx_queue = &efx->rx_queue[i];
+               rx_queue->efx = efx;
+               rx_queue->queue = i;
+               rx_queue->channel = &efx->channel[0]; /* for safety */
+               rx_queue->buffer = NULL;
+               spin_lock_init(&rx_queue->add_lock);
+               INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work);
+       }
+
+       efx->type = type;
+
+       /* Sanity-check NIC type */
+       EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
+                           (efx->type->txd_ring_mask + 1));
+       EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
+                           (efx->type->rxd_ring_mask + 1));
+       EFX_BUG_ON_PARANOID(efx->type->evq_size &
+                           (efx->type->evq_size - 1));
+       /* As close as we can get to guaranteeing that we don't overflow */
+       EFX_BUG_ON_PARANOID(efx->type->evq_size <
+                           (efx->type->txd_ring_mask + 1 +
+                            efx->type->rxd_ring_mask + 1));
+       EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
+
+       /* Higher numbered interrupt modes are less capable! */
+       efx->interrupt_mode = max(efx->type->max_interrupt_mode,
+                                 interrupt_mode);
+
+       efx->workqueue = create_singlethread_workqueue("sfc_work");
+       if (!efx->workqueue) {
+               rc = -ENOMEM;
+               goto fail1;
+       }
+
+       return 0;
+
+ fail1:
+       return rc;
+}
+
+static void efx_fini_struct(struct efx_nic *efx)
+{
+       if (efx->workqueue) {
+               destroy_workqueue(efx->workqueue);
+               efx->workqueue = NULL;
+       }
+}
+
+/**************************************************************************
+ *
+ * PCI interface
+ *
+ **************************************************************************/
+
+/* Main body of final NIC shutdown code
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove_main(struct efx_nic *efx)
+{
+       EFX_ASSERT_RESET_SERIALISED(efx);
+
+       /* Skip everything if we never obtained a valid membase */
+       if (!efx->membase)
+               return;
+
+       efx_fini_channels(efx);
+       efx_fini_port(efx);
+
+       /* Shutdown the board, then the NIC and board state */
+       falcon_fini_interrupt(efx);
+
+       efx_fini_napi(efx);
+       efx_remove_all(efx);
+}
+
+/* Final NIC shutdown
+ * This is called only at module unload (or hotplug removal).
+ */
+static void efx_pci_remove(struct pci_dev *pci_dev)
+{
+       struct efx_nic *efx;
+
+       efx = pci_get_drvdata(pci_dev);
+       if (!efx)
+               return;
+
+       /* Mark the NIC as fini, then stop the interface */
+       rtnl_lock();
+       efx->state = STATE_FINI;
+       dev_close(efx->net_dev);
+
+       /* Allow any queued efx_resets() to complete */
+       rtnl_unlock();
+
+       if (efx->membase == NULL)
+               goto out;
+
+       efx_unregister_netdev(efx);
+
+       /* Wait for any scheduled resets to complete. No more will be
+        * scheduled from this point because efx_stop_all() has been
+        * called, we are no longer registered with driverlink, and
+        * the net_device's have been removed. */
+       flush_workqueue(efx->workqueue);
+
+       efx_pci_remove_main(efx);
+
+out:
+       efx_fini_io(efx);
+       EFX_LOG(efx, "shutdown successful\n");
+
+       pci_set_drvdata(pci_dev, NULL);
+       efx_fini_struct(efx);
+       free_netdev(efx->net_dev);
+};
+
+/* Main body of NIC initialisation
+ * This is called at module load (or hotplug insertion, theoretically).
+ */
+static int efx_pci_probe_main(struct efx_nic *efx)
+{
+       int rc;
+
+       /* Do start-of-day initialisation */
+       rc = efx_probe_all(efx);
+       if (rc)
+               goto fail1;
+
+       rc = efx_init_napi(efx);
+       if (rc)
+               goto fail2;
+
+       /* Initialise the board */
+       rc = efx->board_info.init(efx);
+       if (rc) {
+               EFX_ERR(efx, "failed to initialise board\n");
+               goto fail3;
+       }
+
+       rc = falcon_init_nic(efx);
+       if (rc) {
+               EFX_ERR(efx, "failed to initialise NIC\n");
+               goto fail4;
+       }
+
+       rc = efx_init_port(efx);
+       if (rc) {
+               EFX_ERR(efx, "failed to initialise port\n");
+               goto fail5;
+       }
+
+       rc = efx_init_channels(efx);
+       if (rc)
+               goto fail6;
+
+       rc = falcon_init_interrupt(efx);
+       if (rc)
+               goto fail7;
+
+       return 0;
+
+ fail7:
+       efx_fini_channels(efx);
+ fail6:
+       efx_fini_port(efx);
+ fail5:
+ fail4:
+ fail3:
+       efx_fini_napi(efx);
+ fail2:
+       efx_remove_all(efx);
+ fail1:
+       return rc;
+}
+
+/* NIC initialisation
+ *
+ * This is called at module load (or hotplug insertion,
+ * theoretically).  It sets up PCI mappings, tests and resets the NIC,
+ * sets up and registers the network devices with the kernel and hooks
+ * the interrupt service routine.  It does not prepare the device for
+ * transmission; this is left to the first time one of the network
+ * interfaces is brought up (i.e. efx_net_open).
+ */
+static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
+                                  const struct pci_device_id *entry)
+{
+       struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data;
+       struct net_device *net_dev;
+       struct efx_nic *efx;
+       int i, rc;
+
+       /* Allocate and initialise a struct net_device and struct efx_nic */
+       net_dev = alloc_etherdev(sizeof(*efx));
+       if (!net_dev)
+               return -ENOMEM;
+       net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
+       if (lro)
+               net_dev->features |= NETIF_F_LRO;
+       efx = net_dev->priv;
+       pci_set_drvdata(pci_dev, efx);
+       rc = efx_init_struct(efx, type, pci_dev, net_dev);
+       if (rc)
+               goto fail1;
+
+       EFX_INFO(efx, "Solarflare Communications NIC detected\n");
+
+       /* Set up basic I/O (BAR mappings etc) */
+       rc = efx_init_io(efx);
+       if (rc)
+               goto fail2;
+
+       /* No serialisation is required with the reset path because
+        * we're in STATE_INIT. */
+       for (i = 0; i < 5; i++) {
+               rc = efx_pci_probe_main(efx);
+               if (rc == 0)
+                       break;
+
+               /* Serialise against efx_reset(). No more resets will be
+                * scheduled since efx_stop_all() has been called, and we
+                * have not and never have been registered with either
+                * the rtnetlink or driverlink layers. */
+               cancel_work_sync(&efx->reset_work);
+
+               /* Retry if a recoverably reset event has been scheduled */
+               if ((efx->reset_pending != RESET_TYPE_INVISIBLE) &&
+                   (efx->reset_pending != RESET_TYPE_ALL))
+                       goto fail3;
+
+               efx->reset_pending = RESET_TYPE_NONE;
+       }
+
+       if (rc) {
+               EFX_ERR(efx, "Could not reset NIC\n");
+               goto fail4;
+       }
+
+       /* Switch to the running state before we expose the device to
+        * the OS.  This is to ensure that the initial gathering of
+        * MAC stats succeeds. */
+       rtnl_lock();
+       efx->state = STATE_RUNNING;
+       rtnl_unlock();
+
+       rc = efx_register_netdev(efx);
+       if (rc)
+               goto fail5;
+
+       EFX_LOG(efx, "initialisation successful\n");
+
+       return 0;
+
+ fail5:
+       efx_pci_remove_main(efx);
+ fail4:
+ fail3:
+       efx_fini_io(efx);
+ fail2:
+       efx_fini_struct(efx);
+ fail1:
+       EFX_LOG(efx, "initialisation failed. rc=%d\n", rc);
+       free_netdev(net_dev);
+       return rc;
+}
+
+static struct pci_driver efx_pci_driver = {
+       .name           = EFX_DRIVER_NAME,
+       .id_table       = efx_pci_table,
+       .probe          = efx_pci_probe,
+       .remove         = efx_pci_remove,
+};
+
+/**************************************************************************
+ *
+ * Kernel module interface
+ *
+ *************************************************************************/
+
+module_param(interrupt_mode, uint, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+                "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)");
+
+static int __init efx_init_module(void)
+{
+       int rc;
+
+       printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n");
+
+       rc = register_netdevice_notifier(&efx_netdev_notifier);
+       if (rc)
+               goto err_notifier;
+
+       refill_workqueue = create_workqueue("sfc_refill");
+       if (!refill_workqueue) {
+               rc = -ENOMEM;
+               goto err_refill;
+       }
+
+       rc = pci_register_driver(&efx_pci_driver);
+       if (rc < 0)
+               goto err_pci;
+
+       return 0;
+
+ err_pci:
+       destroy_workqueue(refill_workqueue);
+ err_refill:
+       unregister_netdevice_notifier(&efx_netdev_notifier);
+ err_notifier:
+       return rc;
+}
+
+static void __exit efx_exit_module(void)
+{
+       printk(KERN_INFO "Solarflare NET driver unloading\n");
+
+       pci_unregister_driver(&efx_pci_driver);
+       destroy_workqueue(refill_workqueue);
+       unregister_netdevice_notifier(&efx_netdev_notifier);
+
+}
+
+module_init(efx_init_module);
+module_exit(efx_exit_module);
+
+MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and "
+             "Solarflare Communications");
+MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
new file mode 100644 (file)
index 0000000..3b2f69f
--- /dev/null
@@ -0,0 +1,67 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_EFX_H
+#define EFX_EFX_H
+
+#include "net_driver.h"
+
+/* PCI IDs */
+#define EFX_VENDID_SFC         0x1924
+#define FALCON_A_P_DEVID       0x0703
+#define FALCON_A_S_DEVID        0x6703
+#define FALCON_B_P_DEVID        0x0710
+
+/* TX */
+extern int efx_xmit(struct efx_nic *efx,
+                   struct efx_tx_queue *tx_queue, struct sk_buff *skb);
+extern void efx_stop_queue(struct efx_nic *efx);
+extern void efx_wake_queue(struct efx_nic *efx);
+
+/* RX */
+extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
+extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+                         unsigned int len, int checksummed, int discard);
+extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
+
+/* Channels */
+extern void efx_process_channel_now(struct efx_channel *channel);
+extern int efx_flush_queues(struct efx_nic *efx);
+
+/* Ports */
+extern void efx_reconfigure_port(struct efx_nic *efx);
+
+/* Global */
+extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
+extern void efx_suspend(struct efx_nic *efx);
+extern void efx_resume(struct efx_nic *efx);
+extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
+                                   int rx_usecs);
+extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
+extern void efx_hex_dump(const u8 *, unsigned int, const char *);
+
+/* Dummy PHY ops for PHY drivers */
+extern int efx_port_dummy_op_int(struct efx_nic *efx);
+extern void efx_port_dummy_op_void(struct efx_nic *efx);
+extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink);
+
+
+extern unsigned int efx_monitor_interval;
+
+static inline void efx_schedule_channel(struct efx_channel *channel)
+{
+       EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n",
+                 channel->channel, raw_smp_processor_id());
+       channel->work_pending = 1;
+
+       netif_rx_schedule(channel->napi_dev, &channel->napi_str);
+}
+
+#endif /* EFX_EFX_H */
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h
new file mode 100644 (file)
index 0000000..43663a4
--- /dev/null
@@ -0,0 +1,50 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ENUM_H
+#define EFX_ENUM_H
+
+/*****************************************************************************/
+
+/**
+ * enum reset_type - reset types
+ *
+ * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
+ * %RESET_TYPE_DISABLE specify the method/scope of the reset.  The
+ * other valuesspecify reasons, which efx_schedule_reset() will choose
+ * a method for.
+ *
+ * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts
+ * @RESET_TYPE_ALL: reset everything but PCI core blocks
+ * @RESET_TYPE_WORLD: reset everything, save & restore PCI config
+ * @RESET_TYPE_DISABLE: disable NIC
+ * @RESET_TYPE_MONITOR: reset due to hardware monitor
+ * @RESET_TYPE_INT_ERROR: reset due to internal error
+ * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors
+ * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch
+ * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch
+ * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
+ */
+enum reset_type {
+       RESET_TYPE_NONE = -1,
+       RESET_TYPE_INVISIBLE = 0,
+       RESET_TYPE_ALL = 1,
+       RESET_TYPE_WORLD = 2,
+       RESET_TYPE_DISABLE = 3,
+       RESET_TYPE_MAX_METHOD,
+       RESET_TYPE_MONITOR,
+       RESET_TYPE_INT_ERROR,
+       RESET_TYPE_RX_RECOVERY,
+       RESET_TYPE_RX_DESC_FETCH,
+       RESET_TYPE_TX_DESC_FETCH,
+       RESET_TYPE_TX_SKIP,
+       RESET_TYPE_MAX,
+};
+
+#endif /* EFX_ENUM_H */
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
new file mode 100644 (file)
index 0000000..ad541ba
--- /dev/null
@@ -0,0 +1,460 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "ethtool.h"
+#include "falcon.h"
+#include "gmii.h"
+#include "mac.h"
+
+static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable);
+
+struct ethtool_string {
+       char name[ETH_GSTRING_LEN];
+};
+
+struct efx_ethtool_stat {
+       const char *name;
+       enum {
+               EFX_ETHTOOL_STAT_SOURCE_mac_stats,
+               EFX_ETHTOOL_STAT_SOURCE_nic,
+               EFX_ETHTOOL_STAT_SOURCE_channel
+       } source;
+       unsigned offset;
+       u64(*get_stat) (void *field); /* Reader function */
+};
+
+/* Initialiser for a struct #efx_ethtool_stat with type-checking */
+#define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \
+                               get_stat_function) {                    \
+       .name = #stat_name,                                             \
+       .source = EFX_ETHTOOL_STAT_SOURCE_##source_name,                \
+       .offset = ((((field_type *) 0) ==                               \
+                     &((struct efx_##source_name *)0)->field) ?        \
+                   offsetof(struct efx_##source_name, field) :         \
+                   offsetof(struct efx_##source_name, field)),         \
+       .get_stat = get_stat_function,                                  \
+}
+
+static u64 efx_get_uint_stat(void *field)
+{
+       return *(unsigned int *)field;
+}
+
+static u64 efx_get_ulong_stat(void *field)
+{
+       return *(unsigned long *)field;
+}
+
+static u64 efx_get_u64_stat(void *field)
+{
+       return *(u64 *) field;
+}
+
+static u64 efx_get_atomic_stat(void *field)
+{
+       return atomic_read((atomic_t *) field);
+}
+
+#define EFX_ETHTOOL_ULONG_MAC_STAT(field)                      \
+       EFX_ETHTOOL_STAT(field, mac_stats, field,               \
+                         unsigned long, efx_get_ulong_stat)
+
+#define EFX_ETHTOOL_U64_MAC_STAT(field)                                \
+       EFX_ETHTOOL_STAT(field, mac_stats, field,               \
+                         u64, efx_get_u64_stat)
+
+#define EFX_ETHTOOL_UINT_NIC_STAT(name)                                \
+       EFX_ETHTOOL_STAT(name, nic, n_##name,                   \
+                        unsigned int, efx_get_uint_stat)
+
+#define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field)               \
+       EFX_ETHTOOL_STAT(field, nic, field,                     \
+                        atomic_t, efx_get_atomic_stat)
+
+#define EFX_ETHTOOL_UINT_CHANNEL_STAT(field)                   \
+       EFX_ETHTOOL_STAT(field, channel, n_##field,             \
+                        unsigned int, efx_get_uint_stat)
+
+static struct efx_ethtool_stat efx_ethtool_stats[] = {
+       EFX_ETHTOOL_U64_MAC_STAT(tx_bytes),
+       EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes),
+       EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_control),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_64),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error),
+       EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error),
+       EFX_ETHTOOL_U64_MAC_STAT(rx_bytes),
+       EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes),
+       EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_good),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_control),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_64),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error),
+       EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error),
+       EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt),
+       EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
+       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
+};
+
+/* Number of ethtool statistics */
+#define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats)
+
+/**************************************************************************
+ *
+ * Ethtool operations
+ *
+ **************************************************************************
+ */
+
+/* Identify device by flashing LEDs */
+static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       efx->board_info.blink(efx, 1);
+       schedule_timeout_interruptible(seconds * HZ);
+       efx->board_info.blink(efx, 0);
+       return 0;
+}
+
+/* This must be called with rtnl_lock held. */
+int efx_ethtool_get_settings(struct net_device *net_dev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct efx_nic *efx = net_dev->priv;
+       int rc;
+
+       mutex_lock(&efx->mac_lock);
+       rc = falcon_xmac_get_settings(efx, ecmd);
+       mutex_unlock(&efx->mac_lock);
+
+       return rc;
+}
+
+/* This must be called with rtnl_lock held. */
+int efx_ethtool_set_settings(struct net_device *net_dev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct efx_nic *efx = net_dev->priv;
+       int rc;
+
+       mutex_lock(&efx->mac_lock);
+       rc = falcon_xmac_set_settings(efx, ecmd);
+       mutex_unlock(&efx->mac_lock);
+       if (!rc)
+               efx_reconfigure_port(efx);
+
+       return rc;
+}
+
+static void efx_ethtool_get_drvinfo(struct net_device *net_dev,
+                                   struct ethtool_drvinfo *info)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver));
+       strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info));
+}
+
+static int efx_ethtool_get_stats_count(struct net_device *net_dev)
+{
+       return EFX_ETHTOOL_NUM_STATS;
+}
+
+static void efx_ethtool_get_strings(struct net_device *net_dev,
+                                   u32 string_set, u8 *strings)
+{
+       struct ethtool_string *ethtool_strings =
+               (struct ethtool_string *)strings;
+       int i;
+
+       if (string_set == ETH_SS_STATS)
+               for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++)
+                       strncpy(ethtool_strings[i].name,
+                               efx_ethtool_stats[i].name,
+                               sizeof(ethtool_strings[i].name));
+}
+
+static void efx_ethtool_get_stats(struct net_device *net_dev,
+                                 struct ethtool_stats *stats,
+                                 u64 *data)
+{
+       struct efx_nic *efx = net_dev->priv;
+       struct efx_mac_stats *mac_stats = &efx->mac_stats;
+       struct efx_ethtool_stat *stat;
+       struct efx_channel *channel;
+       int i;
+
+       EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS);
+
+       /* Update MAC and NIC statistics */
+       net_dev->get_stats(net_dev);
+
+       /* Fill detailed statistics buffer */
+       for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) {
+               stat = &efx_ethtool_stats[i];
+               switch (stat->source) {
+               case EFX_ETHTOOL_STAT_SOURCE_mac_stats:
+                       data[i] = stat->get_stat((void *)mac_stats +
+                                                stat->offset);
+                       break;
+               case EFX_ETHTOOL_STAT_SOURCE_nic:
+                       data[i] = stat->get_stat((void *)efx + stat->offset);
+                       break;
+               case EFX_ETHTOOL_STAT_SOURCE_channel:
+                       data[i] = 0;
+                       efx_for_each_channel(channel, efx)
+                               data[i] += stat->get_stat((void *)channel +
+                                                         stat->offset);
+                       break;
+               }
+       }
+}
+
+static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable)
+{
+       struct efx_nic *efx = net_dev->priv;
+       int rc;
+
+       rc = ethtool_op_set_tx_csum(net_dev, enable);
+       if (rc)
+               return rc;
+
+       efx_flush_queues(efx);
+
+       return 0;
+}
+
+static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       /* No way to stop the hardware doing the checks; we just
+        * ignore the result.
+        */
+       efx->rx_checksum_enabled = (enable ? 1 : 0);
+
+       return 0;
+}
+
+static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       return efx->rx_checksum_enabled;
+}
+
+/* Restart autonegotiation */
+static int efx_ethtool_nway_reset(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       return mii_nway_restart(&efx->mii);
+}
+
+static u32 efx_ethtool_get_link(struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       return efx->link_up;
+}
+
+static int efx_ethtool_get_coalesce(struct net_device *net_dev,
+                                   struct ethtool_coalesce *coalesce)
+{
+       struct efx_nic *efx = net_dev->priv;
+       struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
+       struct efx_channel *channel;
+
+       memset(coalesce, 0, sizeof(*coalesce));
+
+       /* Find lowest IRQ moderation across all used TX queues */
+       coalesce->tx_coalesce_usecs_irq = ~((u32) 0);
+       efx_for_each_tx_queue(tx_queue, efx) {
+               channel = tx_queue->channel;
+               if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) {
+                       if (channel->used_flags != EFX_USED_BY_RX_TX)
+                               coalesce->tx_coalesce_usecs_irq =
+                                       channel->irq_moderation;
+                       else
+                               coalesce->tx_coalesce_usecs_irq = 0;
+               }
+       }
+
+       /* Find lowest IRQ moderation across all used RX queues */
+       coalesce->rx_coalesce_usecs_irq = ~((u32) 0);
+       efx_for_each_rx_queue(rx_queue, efx) {
+               channel = rx_queue->channel;
+               if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq)
+                       coalesce->rx_coalesce_usecs_irq =
+                               channel->irq_moderation;
+       }
+
+       return 0;
+}
+
+/* Set coalescing parameters
+ * The difficulties occur for shared channels
+ */
+static int efx_ethtool_set_coalesce(struct net_device *net_dev,
+                                   struct ethtool_coalesce *coalesce)
+{
+       struct efx_nic *efx = net_dev->priv;
+       struct efx_channel *channel;
+       struct efx_tx_queue *tx_queue;
+       unsigned tx_usecs, rx_usecs;
+
+       if (coalesce->use_adaptive_rx_coalesce ||
+           coalesce->use_adaptive_tx_coalesce)
+               return -EOPNOTSUPP;
+
+       if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) {
+               EFX_ERR(efx, "invalid coalescing setting. "
+                       "Only rx/tx_coalesce_usecs_irq are supported\n");
+               return -EOPNOTSUPP;
+       }
+
+       rx_usecs = coalesce->rx_coalesce_usecs_irq;
+       tx_usecs = coalesce->tx_coalesce_usecs_irq;
+
+       /* If the channel is shared only allow RX parameters to be set */
+       efx_for_each_tx_queue(tx_queue, efx) {
+               if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) &&
+                   tx_usecs) {
+                       EFX_ERR(efx, "Channel is shared. "
+                               "Only RX coalescing may be set\n");
+                       return -EOPNOTSUPP;
+               }
+       }
+
+       efx_init_irq_moderation(efx, tx_usecs, rx_usecs);
+
+       /* Reset channel to pick up new moderation value.  Note that
+        * this may change the value of the irq_moderation field
+        * (e.g. to allow for hardware timer granularity).
+        */
+       efx_for_each_channel(channel, efx)
+               falcon_set_int_moderation(channel);
+
+       return 0;
+}
+
+static int efx_ethtool_set_pauseparam(struct net_device *net_dev,
+                                     struct ethtool_pauseparam *pause)
+{
+       struct efx_nic *efx = net_dev->priv;
+       enum efx_fc_type flow_control = efx->flow_control;
+       int rc;
+
+       flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO);
+       flow_control |= pause->rx_pause ? EFX_FC_RX : 0;
+       flow_control |= pause->tx_pause ? EFX_FC_TX : 0;
+       flow_control |= pause->autoneg ? EFX_FC_AUTO : 0;
+
+       /* Try to push the pause parameters */
+       mutex_lock(&efx->mac_lock);
+       rc = falcon_xmac_set_pause(efx, flow_control);
+       mutex_unlock(&efx->mac_lock);
+
+       if (!rc)
+               efx_reconfigure_port(efx);
+
+       return rc;
+}
+
+static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
+                                      struct ethtool_pauseparam *pause)
+{
+       struct efx_nic *efx = net_dev->priv;
+
+       pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
+       pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
+       pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0;
+}
+
+
+struct ethtool_ops efx_ethtool_ops = {
+       .get_settings           = efx_ethtool_get_settings,
+       .set_settings           = efx_ethtool_set_settings,
+       .get_drvinfo            = efx_ethtool_get_drvinfo,
+       .nway_reset             = efx_ethtool_nway_reset,
+       .get_link               = efx_ethtool_get_link,
+       .get_coalesce           = efx_ethtool_get_coalesce,
+       .set_coalesce           = efx_ethtool_set_coalesce,
+       .get_pauseparam         = efx_ethtool_get_pauseparam,
+       .set_pauseparam         = efx_ethtool_set_pauseparam,
+       .get_rx_csum            = efx_ethtool_get_rx_csum,
+       .set_rx_csum            = efx_ethtool_set_rx_csum,
+       .get_tx_csum            = ethtool_op_get_tx_csum,
+       .set_tx_csum            = efx_ethtool_set_tx_csum,
+       .get_sg                 = ethtool_op_get_sg,
+       .set_sg                 = ethtool_op_set_sg,
+       .get_flags              = ethtool_op_get_flags,
+       .set_flags              = ethtool_op_set_flags,
+       .get_strings            = efx_ethtool_get_strings,
+       .phys_id                = efx_ethtool_phys_id,
+       .get_stats_count        = efx_ethtool_get_stats_count,
+       .get_ethtool_stats      = efx_ethtool_get_stats,
+};
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h
new file mode 100644 (file)
index 0000000..3628e43
--- /dev/null
@@ -0,0 +1,27 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005 Fen Systems Ltd.
+ * Copyright 2006 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_ETHTOOL_H
+#define EFX_ETHTOOL_H
+
+#include "net_driver.h"
+
+/*
+ * Ethtool support
+ */
+
+extern int efx_ethtool_get_settings(struct net_device *net_dev,
+                                   struct ethtool_cmd *ecmd);
+extern int efx_ethtool_set_settings(struct net_device *net_dev,
+                                   struct ethtool_cmd *ecmd);
+
+extern struct ethtool_ops efx_ethtool_ops;
+
+#endif /* EFX_ETHTOOL_H */
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
new file mode 100644 (file)
index 0000000..46db549
--- /dev/null
@@ -0,0 +1,2722 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include "net_driver.h"
+#include "bitfield.h"
+#include "efx.h"
+#include "mac.h"
+#include "gmii.h"
+#include "spi.h"
+#include "falcon.h"
+#include "falcon_hwdefs.h"
+#include "falcon_io.h"
+#include "mdio_10g.h"
+#include "phy.h"
+#include "boards.h"
+#include "workarounds.h"
+
+/* Falcon hardware control.
+ * Falcon is the internal codename for the SFC4000 controller that is
+ * present in SFE400X evaluation boards
+ */
+
+/**
+ * struct falcon_nic_data - Falcon NIC state
+ * @next_buffer_table: First available buffer table id
+ * @pci_dev2: The secondary PCI device if present
+ */
+struct falcon_nic_data {
+       unsigned next_buffer_table;
+       struct pci_dev *pci_dev2;
+};
+
+/**************************************************************************
+ *
+ * Configurable values
+ *
+ **************************************************************************
+ */
+
+static int disable_dma_stats;
+
+/* This is set to 16 for a good reason.  In summary, if larger than
+ * 16, the descriptor cache holds more than a default socket
+ * buffer's worth of packets (for UDP we can only have at most one
+ * socket buffer's worth outstanding).  This combined with the fact
+ * that we only get 1 TX event per descriptor cache means the NIC
+ * goes idle.
+ */
+#define TX_DC_ENTRIES 16
+#define TX_DC_ENTRIES_ORDER 0
+#define TX_DC_BASE 0x130000
+
+#define RX_DC_ENTRIES 64
+#define RX_DC_ENTRIES_ORDER 2
+#define RX_DC_BASE 0x100000
+
+/* RX FIFO XOFF watermark
+ *
+ * When the amount of the RX FIFO increases used increases past this
+ * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A)
+ * This also has an effect on RX/TX arbitration
+ */
+static int rx_xoff_thresh_bytes = -1;
+module_param(rx_xoff_thresh_bytes, int, 0644);
+MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold");
+
+/* RX FIFO XON watermark
+ *
+ * When the amount of the RX FIFO used decreases below this
+ * watermark send XON. Only used if TX flow control is enabled (ethtool -A)
+ * This also has an effect on RX/TX arbitration
+ */
+static int rx_xon_thresh_bytes = -1;
+module_param(rx_xon_thresh_bytes, int, 0644);
+MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
+
+/* TX descriptor ring size - min 512 max 4k */
+#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
+#define FALCON_TXD_RING_SIZE 1024
+#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
+
+/* RX descriptor ring size - min 512 max 4k */
+#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
+#define FALCON_RXD_RING_SIZE 1024
+#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
+
+/* Event queue size - max 32k */
+#define FALCON_EVQ_ORDER EVQ_SIZE_4K
+#define FALCON_EVQ_SIZE 4096
+#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
+
+/* Max number of internal errors. After this resets will not be performed */
+#define FALCON_MAX_INT_ERRORS 4
+
+/* Maximum period that we wait for flush events. If the flush event
+ * doesn't arrive in this period of time then we check if the queue
+ * was disabled anyway. */
+#define FALCON_FLUSH_TIMEOUT 10 /* 10ms */
+
+/**************************************************************************
+ *
+ * Falcon constants
+ *
+ **************************************************************************
+ */
+
+/* DMA address mask (up to 46-bit, avoiding compiler warnings)
+ *
+ * Note that it is possible to have a platform with 64-bit longs and
+ * 32-bit DMA addresses, or vice versa.  EFX_DMA_MASK takes care of the
+ * platform DMA mask.
+ */
+#if BITS_PER_LONG == 64
+#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)
+#else
+#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)
+#endif
+
+/* TX DMA length mask (13-bit) */
+#define FALCON_TX_DMA_MASK (4096 - 1)
+
+/* Size and alignment of special buffers (4KB) */
+#define FALCON_BUF_SIZE 4096
+
+/* Dummy SRAM size code */
+#define SRM_NB_BSZ_ONCHIP_ONLY (-1)
+
+/* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */
+#define PCI_EXP_DEVCAP_PWR_VAL_LBN     18
+#define PCI_EXP_DEVCAP_PWR_SCL_LBN     26
+#define PCI_EXP_DEVCTL_PAYLOAD_LBN     5
+#define PCI_EXP_LNKSTA_LNK_WID         0x3f0
+#define PCI_EXP_LNKSTA_LNK_WID_LBN     4
+
+#define FALCON_IS_DUAL_FUNC(efx)               \
+       (FALCON_REV(efx) < FALCON_REV_B0)
+
+/**************************************************************************
+ *
+ * Falcon hardware access
+ *
+ **************************************************************************/
+
+/* Read the current event from the event queue */
+static inline efx_qword_t *falcon_event(struct efx_channel *channel,
+                                       unsigned int index)
+{
+       return (((efx_qword_t *) (channel->eventq.addr)) + index);
+}
+
+/* See if an event is present
+ *
+ * We check both the high and low dword of the event for all ones.  We
+ * wrote all ones when we cleared the event, and no valid event can
+ * have all ones in either its high or low dwords.  This approach is
+ * robust against reordering.
+ *
+ * Note that using a single 64-bit comparison is incorrect; even
+ * though the CPU read will be atomic, the DMA write may not be.
+ */
+static inline int falcon_event_present(efx_qword_t *event)
+{
+       return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
+                 EFX_DWORD_IS_ALL_ONES(event->dword[1])));
+}
+
+/**************************************************************************
+ *
+ * I2C bus - this is a bit-bashing interface using GPIO pins
+ * Note that it uses the output enables to tristate the outputs
+ * SDA is the data pin and SCL is the clock
+ *
+ **************************************************************************
+ */
+static void falcon_setsdascl(struct efx_i2c_interface *i2c)
+{
+       efx_oword_t reg;
+
+       falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
+       EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1));
+       EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1));
+       falcon_write(i2c->efx, &reg, GPIO_CTL_REG_KER);
+}
+
+static int falcon_getsda(struct efx_i2c_interface *i2c)
+{
+       efx_oword_t reg;
+
+       falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
+       return EFX_OWORD_FIELD(reg, GPIO3_IN);
+}
+
+static int falcon_getscl(struct efx_i2c_interface *i2c)
+{
+       efx_oword_t reg;
+
+       falcon_read(i2c->efx, &reg, GPIO_CTL_REG_KER);
+       return EFX_DWORD_FIELD(reg, GPIO0_IN);
+}
+
+static struct efx_i2c_bit_operations falcon_i2c_bit_operations = {
+       .setsda         = falcon_setsdascl,
+       .setscl         = falcon_setsdascl,
+       .getsda         = falcon_getsda,
+       .getscl         = falcon_getscl,
+       .udelay         = 100,
+       .mdelay         = 10,
+};
+
+/**************************************************************************
+ *
+ * Falcon special buffer handling
+ * Special buffers are used for event queues and the TX and RX
+ * descriptor rings.
+ *
+ *************************************************************************/
+
+/*
+ * Initialise a Falcon special buffer
+ *
+ * This will define a buffer (previously allocated via
+ * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing
+ * it to be used for event queues, descriptor rings etc.
+ */
+static int
+falcon_init_special_buffer(struct efx_nic *efx,
+                          struct efx_special_buffer *buffer)
+{
+       efx_qword_t buf_desc;
+       int index;
+       dma_addr_t dma_addr;
+       int i;
+
+       EFX_BUG_ON_PARANOID(!buffer->addr);
+
+       /* Write buffer descriptors to NIC */
+       for (i = 0; i < buffer->entries; i++) {
+               index = buffer->index + i;
+               dma_addr = buffer->dma_addr + (i * 4096);
+               EFX_LOG(efx, "mapping special buffer %d at %llx\n",
+                       index, (unsigned long long)dma_addr);
+               EFX_POPULATE_QWORD_4(buf_desc,
+                                    IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K,
+                                    BUF_ADR_REGION, 0,
+                                    BUF_ADR_FBUF, (dma_addr >> 12),
+                                    BUF_OWNER_ID_FBUF, 0);
+               falcon_write_sram(efx, &buf_desc, index);
+       }
+
+       return 0;
+}
+
+/* Unmaps a buffer from Falcon and clears the buffer table entries */
+static void
+falcon_fini_special_buffer(struct efx_nic *efx,
+                          struct efx_special_buffer *buffer)
+{
+       efx_oword_t buf_tbl_upd;
+       unsigned int start = buffer->index;
+       unsigned int end = (buffer->index + buffer->entries - 1);
+
+       if (!buffer->entries)
+               return;
+
+       EFX_LOG(efx, "unmapping special buffers %d-%d\n",
+               buffer->index, buffer->index + buffer->entries - 1);
+
+       EFX_POPULATE_OWORD_4(buf_tbl_upd,
+                            BUF_UPD_CMD, 0,
+                            BUF_CLR_CMD, 1,
+                            BUF_CLR_END_ID, end,
+                            BUF_CLR_START_ID, start);
+       falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER);
+}
+
+/*
+ * Allocate a new Falcon special buffer
+ *
+ * This allocates memory for a new buffer, clears it and allocates a
+ * new buffer ID range.  It does not write into Falcon's buffer table.
+ *
+ * This call will allocate 4KB buffers, since Falcon can't use 8KB
+ * buffers for event queues and descriptor rings.
+ */
+static int falcon_alloc_special_buffer(struct efx_nic *efx,
+                                      struct efx_special_buffer *buffer,
+                                      unsigned int len)
+{
+       struct falcon_nic_data *nic_data = efx->nic_data;
+
+       len = ALIGN(len, FALCON_BUF_SIZE);
+
+       buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
+                                           &buffer->dma_addr);
+       if (!buffer->addr)
+               return -ENOMEM;
+       buffer->len = len;
+       buffer->entries = len / FALCON_BUF_SIZE;
+       BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1));
+
+       /* All zeros is a potentially valid event so memset to 0xff */
+       memset(buffer->addr, 0xff, len);
+
+       /* Select new buffer ID */
+       buffer->index = nic_data->next_buffer_table;
+       nic_data->next_buffer_table += buffer->entries;
+
+       EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
+               "(virt %p phys %lx)\n", buffer->index,
+               buffer->index + buffer->entries - 1,
+               (unsigned long long)buffer->dma_addr, len,
+               buffer->addr, virt_to_phys(buffer->addr));
+
+       return 0;
+}
+
+static void falcon_free_special_buffer(struct efx_nic *efx,
+                                      struct efx_special_buffer *buffer)
+{
+       if (!buffer->addr)
+               return;
+
+       EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x "
+               "(virt %p phys %lx)\n", buffer->index,
+               buffer->index + buffer->entries - 1,
+               (unsigned long long)buffer->dma_addr, buffer->len,
+               buffer->addr, virt_to_phys(buffer->addr));
+
+       pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr,
+                           buffer->dma_addr);
+       buffer->addr = NULL;
+       buffer->entries = 0;
+}
+
+/**************************************************************************
+ *
+ * Falcon generic buffer handling
+ * These buffers are used for interrupt status and MAC stats
+ *
+ **************************************************************************/
+
+static int falcon_alloc_buffer(struct efx_nic *efx,
+                              struct efx_buffer *buffer, unsigned int len)
+{
+       buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
+                                           &buffer->dma_addr);
+       if (!buffer->addr)
+               return -ENOMEM;
+       buffer->len = len;
+       memset(buffer->addr, 0, len);
+       return 0;
+}
+
+static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
+{
+       if (buffer->addr) {
+               pci_free_consistent(efx->pci_dev, buffer->len,
+                                   buffer->addr, buffer->dma_addr);
+               buffer->addr = NULL;
+       }
+}
+
+/**************************************************************************
+ *
+ * Falcon TX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified transmit descriptor in the TX
+ * descriptor queue belonging to the specified channel.
+ */
+static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue,
+                                              unsigned int index)
+{
+       return (((efx_qword_t *) (tx_queue->txd.addr)) + index);
+}
+
+/* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
+static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
+{
+       unsigned write_ptr;
+       efx_dword_t reg;
+
+       write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
+       EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr);
+       falcon_writel_page(tx_queue->efx, &reg,
+                          TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue);
+}
+
+
+/* For each entry inserted into the software descriptor ring, create a
+ * descriptor in the hardware TX descriptor ring (in host memory), and
+ * write a doorbell.
+ */
+void falcon_push_buffers(struct efx_tx_queue *tx_queue)
+{
+
+       struct efx_tx_buffer *buffer;
+       efx_qword_t *txd;
+       unsigned write_ptr;
+
+       BUG_ON(tx_queue->write_count == tx_queue->insert_count);
+
+       do {
+               write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK;
+               buffer = &tx_queue->buffer[write_ptr];
+               txd = falcon_tx_desc(tx_queue, write_ptr);
+               ++tx_queue->write_count;
+
+               /* Create TX descriptor ring entry */
+               EFX_POPULATE_QWORD_5(*txd,
+                                    TX_KER_PORT, 0,
+                                    TX_KER_CONT, buffer->continuation,
+                                    TX_KER_BYTE_CNT, buffer->len,
+                                    TX_KER_BUF_REGION, 0,
+                                    TX_KER_BUF_ADR, buffer->dma_addr);
+       } while (tx_queue->write_count != tx_queue->insert_count);
+
+       wmb(); /* Ensure descriptors are written before they are fetched */
+       falcon_notify_tx_desc(tx_queue);
+}
+
+/* Allocate hardware resources for a TX queue */
+int falcon_probe_tx(struct efx_tx_queue *tx_queue)
+{
+       struct efx_nic *efx = tx_queue->efx;
+       return falcon_alloc_special_buffer(efx, &tx_queue->txd,
+                                          FALCON_TXD_RING_SIZE *
+                                          sizeof(efx_qword_t));
+}
+
+int falcon_init_tx(struct efx_tx_queue *tx_queue)
+{
+       efx_oword_t tx_desc_ptr;
+       struct efx_nic *efx = tx_queue->efx;
+       int rc;
+
+       /* Pin TX descriptor ring */
+       rc = falcon_init_special_buffer(efx, &tx_queue->txd);
+       if (rc)
+               return rc;
+
+       /* Push TX descriptor ring to card */
+       EFX_POPULATE_OWORD_10(tx_desc_ptr,
+                             TX_DESCQ_EN, 1,
+                             TX_ISCSI_DDIG_EN, 0,
+                             TX_ISCSI_HDIG_EN, 0,
+                             TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
+                             TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum,
+                             TX_DESCQ_OWNER_ID, 0,
+                             TX_DESCQ_LABEL, tx_queue->queue,
+                             TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER,
+                             TX_DESCQ_TYPE, 0,
+                             TX_NON_IP_DROP_DIS_B0, 1);
+
+       if (FALCON_REV(efx) >= FALCON_REV_B0) {
+               int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);
+               EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);
+               EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);
+       }
+
+       falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+                          tx_queue->queue);
+
+       if (FALCON_REV(efx) < FALCON_REV_B0) {
+               efx_oword_t reg;
+
+               BUG_ON(tx_queue->queue >= 128); /* HW limit */
+
+               falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
+               if (efx->net_dev->features & NETIF_F_IP_CSUM)
+                       clear_bit_le(tx_queue->queue, (void *)&reg);
+               else
+                       set_bit_le(tx_queue->queue, (void *)&reg);
+               falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1);
+       }
+
+       return 0;
+}
+
+static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
+{
+       struct efx_nic *efx = tx_queue->efx;
+       struct efx_channel *channel = &efx->channel[0];
+       efx_oword_t tx_flush_descq;
+       unsigned int read_ptr, i;
+
+       /* Post a flush command */
+       EFX_POPULATE_OWORD_2(tx_flush_descq,
+                            TX_FLUSH_DESCQ_CMD, 1,
+                            TX_FLUSH_DESCQ, tx_queue->queue);
+       falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER);
+       msleep(FALCON_FLUSH_TIMEOUT);
+
+       if (EFX_WORKAROUND_7803(efx))
+               return 0;
+
+       /* Look for a flush completed event */
+       read_ptr = channel->eventq_read_ptr;
+       for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
+               efx_qword_t *event = falcon_event(channel, read_ptr);
+               int ev_code, ev_sub_code, ev_queue;
+               if (!falcon_event_present(event))
+                       break;
+
+               ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
+               ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
+               ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID);
+               if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) &&
+                   (ev_queue == tx_queue->queue)) {
+                       EFX_LOG(efx, "tx queue %d flush command succesful\n",
+                               tx_queue->queue);
+                       return 0;
+               }
+
+               read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
+       }
+
+       if (EFX_WORKAROUND_11557(efx)) {
+               efx_oword_t reg;
+               int enabled;
+
+               falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base,
+                                 tx_queue->queue);
+               enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN);
+               if (!enabled) {
+                       EFX_LOG(efx, "tx queue %d disabled without a "
+                               "flush event seen\n", tx_queue->queue);
+                       return 0;
+               }
+       }
+
+       EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue);
+       return -ETIMEDOUT;
+}
+
+void falcon_fini_tx(struct efx_tx_queue *tx_queue)
+{
+       struct efx_nic *efx = tx_queue->efx;
+       efx_oword_t tx_desc_ptr;
+
+       /* Stop the hardware using the queue */
+       if (falcon_flush_tx_queue(tx_queue))
+               EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue);
+
+       /* Remove TX descriptor ring from card */
+       EFX_ZERO_OWORD(tx_desc_ptr);
+       falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
+                          tx_queue->queue);
+
+       /* Unpin TX descriptor ring */
+       falcon_fini_special_buffer(efx, &tx_queue->txd);
+}
+
+/* Free buffers backing TX queue */
+void falcon_remove_tx(struct efx_tx_queue *tx_queue)
+{
+       falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd);
+}
+
+/**************************************************************************
+ *
+ * Falcon RX path
+ *
+ **************************************************************************/
+
+/* Returns a pointer to the specified descriptor in the RX descriptor queue */
+static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue,
+                                              unsigned int index)
+{
+       return (((efx_qword_t *) (rx_queue->rxd.addr)) + index);
+}
+
+/* This creates an entry in the RX descriptor queue */
+static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
+                                       unsigned index)
+{
+       struct efx_rx_buffer *rx_buf;
+       efx_qword_t *rxd;
+
+       rxd = falcon_rx_desc(rx_queue, index);
+       rx_buf = efx_rx_buffer(rx_queue, index);
+       EFX_POPULATE_QWORD_3(*rxd,
+                            RX_KER_BUF_SIZE,
+                            rx_buf->len -
+                            rx_queue->efx->type->rx_buffer_padding,
+                            RX_KER_BUF_REGION, 0,
+                            RX_KER_BUF_ADR, rx_buf->dma_addr);
+}
+
+/* This writes to the RX_DESC_WPTR register for the specified receive
+ * descriptor ring.
+ */
+void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
+{
+       efx_dword_t reg;
+       unsigned write_ptr;
+
+       while (rx_queue->notified_count != rx_queue->added_count) {
+               falcon_build_rx_desc(rx_queue,
+                                    rx_queue->notified_count &
+                                    FALCON_RXD_RING_MASK);
+               ++rx_queue->notified_count;
+       }
+
+       wmb();
+       write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK;
+       EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr);
+       falcon_writel_page(rx_queue->efx, &reg,
+                          RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue);
+}
+
+int falcon_probe_rx(struct efx_rx_queue *rx_queue)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
+                                          FALCON_RXD_RING_SIZE *
+                                          sizeof(efx_qword_t));
+}
+
+int falcon_init_rx(struct efx_rx_queue *rx_queue)
+{
+       efx_oword_t rx_desc_ptr;
+       struct efx_nic *efx = rx_queue->efx;
+       int rc;
+       int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;
+       int iscsi_digest_en = is_b0;
+
+       EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",
+               rx_queue->queue, rx_queue->rxd.index,
+               rx_queue->rxd.index + rx_queue->rxd.entries - 1);
+
+       /* Pin RX descriptor ring */
+       rc = falcon_init_special_buffer(efx, &rx_queue->rxd);
+       if (rc)
+               return rc;
+
+       /* Push RX descriptor ring to card */
+       EFX_POPULATE_OWORD_10(rx_desc_ptr,
+                             RX_ISCSI_DDIG_EN, iscsi_digest_en,
+                             RX_ISCSI_HDIG_EN, iscsi_digest_en,
+                             RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
+                             RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum,
+                             RX_DESCQ_OWNER_ID, 0,
+                             RX_DESCQ_LABEL, rx_queue->queue,
+                             RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER,
+                             RX_DESCQ_TYPE, 0 /* kernel queue */ ,
+                             /* For >=B0 this is scatter so disable */
+                             RX_DESCQ_JUMBO, !is_b0,
+                             RX_DESCQ_EN, 1);
+       falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+                          rx_queue->queue);
+       return 0;
+}
+
+static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       struct efx_channel *channel = &efx->channel[0];
+       unsigned int read_ptr, i;
+       efx_oword_t rx_flush_descq;
+
+       /* Post a flush command */
+       EFX_POPULATE_OWORD_2(rx_flush_descq,
+                            RX_FLUSH_DESCQ_CMD, 1,
+                            RX_FLUSH_DESCQ, rx_queue->queue);
+       falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER);
+       msleep(FALCON_FLUSH_TIMEOUT);
+
+       if (EFX_WORKAROUND_7803(efx))
+               return 0;
+
+       /* Look for a flush completed event */
+       read_ptr = channel->eventq_read_ptr;
+       for (i = 0; i < FALCON_EVQ_SIZE; ++i) {
+               efx_qword_t *event = falcon_event(channel, read_ptr);
+               int ev_code, ev_sub_code, ev_queue, ev_failed;
+               if (!falcon_event_present(event))
+                       break;
+
+               ev_code = EFX_QWORD_FIELD(*event, EV_CODE);
+               ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
+               ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID);
+               ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL);
+
+               if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) &&
+                   (ev_queue == rx_queue->queue)) {
+                       if (ev_failed) {
+                               EFX_INFO(efx, "rx queue %d flush command "
+                                        "failed\n", rx_queue->queue);
+                               return -EAGAIN;
+                       } else {
+                               EFX_LOG(efx, "rx queue %d flush command "
+                                       "succesful\n", rx_queue->queue);
+                               return 0;
+                       }
+               }
+
+               read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
+       }
+
+       if (EFX_WORKAROUND_11557(efx)) {
+               efx_oword_t reg;
+               int enabled;
+
+               falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base,
+                                 rx_queue->queue);
+               enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN);
+               if (!enabled) {
+                       EFX_LOG(efx, "rx queue %d disabled without a "
+                               "flush event seen\n", rx_queue->queue);
+                       return 0;
+               }
+       }
+
+       EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue);
+       return -ETIMEDOUT;
+}
+
+void falcon_fini_rx(struct efx_rx_queue *rx_queue)
+{
+       efx_oword_t rx_desc_ptr;
+       struct efx_nic *efx = rx_queue->efx;
+       int i, rc;
+
+       /* Try and flush the rx queue. This may need to be repeated */
+       for (i = 0; i < 5; i++) {
+               rc = falcon_flush_rx_queue(rx_queue);
+               if (rc == -EAGAIN)
+                       continue;
+               break;
+       }
+       if (rc)
+               EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue);
+
+       /* Remove RX descriptor ring from card */
+       EFX_ZERO_OWORD(rx_desc_ptr);
+       falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
+                          rx_queue->queue);
+
+       /* Unpin RX descriptor ring */
+       falcon_fini_special_buffer(efx, &rx_queue->rxd);
+}
+
+/* Free buffers backing RX queue */
+void falcon_remove_rx(struct efx_rx_queue *rx_queue)
+{
+       falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
+}
+
+/**************************************************************************
+ *
+ * Falcon event queue processing
+ * Event queues are processed by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Update a channel's event queue's read pointer (RPTR) register
+ *
+ * This writes the EVQ_RPTR_REG register for the specified channel's
+ * event queue.
+ *
+ * Note that EVQ_RPTR_REG contains the index of the "last read" event,
+ * whereas channel->eventq_read_ptr contains the index of the "next to
+ * read" event.
+ */
+void falcon_eventq_read_ack(struct efx_channel *channel)
+{
+       efx_dword_t reg;
+       struct efx_nic *efx = channel->efx;
+
+       EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr);
+       falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base,
+                           channel->evqnum);
+}
+
+/* Use HW to insert a SW defined event */
+void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
+{
+       efx_oword_t drv_ev_reg;
+
+       EFX_POPULATE_OWORD_2(drv_ev_reg,
+                            DRV_EV_QID, channel->evqnum,
+                            DRV_EV_DATA,
+                            EFX_QWORD_FIELD64(*event, WHOLE_EVENT));
+       falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER);
+}
+
+/* Handle a transmit completion event
+ *
+ * Falcon batches TX completion events; the message we receive is of
+ * the form "complete all TX events up to this index".
+ */
+static inline void falcon_handle_tx_event(struct efx_channel *channel,
+                                         efx_qword_t *event)
+{
+       unsigned int tx_ev_desc_ptr;
+       unsigned int tx_ev_q_label;
+       struct efx_tx_queue *tx_queue;
+       struct efx_nic *efx = channel->efx;
+
+       if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) {
+               /* Transmit completion */
+               tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR);
+               tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
+               tx_queue = &efx->tx_queue[tx_ev_q_label];
+               efx_xmit_done(tx_queue, tx_ev_desc_ptr);
+       } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) {
+               /* Rewrite the FIFO write pointer */
+               tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);
+               tx_queue = &efx->tx_queue[tx_ev_q_label];
+
+               if (NET_DEV_REGISTERED(efx))
+                       netif_tx_lock(efx->net_dev);
+               falcon_notify_tx_desc(tx_queue);
+               if (NET_DEV_REGISTERED(efx))
+                       netif_tx_unlock(efx->net_dev);
+       } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&
+                  EFX_WORKAROUND_10727(efx)) {
+               efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+       } else {
+               EFX_ERR(efx, "channel %d unexpected TX event "
+                       EFX_QWORD_FMT"\n", channel->channel,
+                       EFX_QWORD_VAL(*event));
+       }
+}
+
+/* Check received packet's destination MAC address. */
+static int check_dest_mac(struct efx_rx_queue *rx_queue,
+                         const efx_qword_t *event)
+{
+       struct efx_rx_buffer *rx_buf;
+       struct efx_nic *efx = rx_queue->efx;
+       int rx_ev_desc_ptr;
+       struct ethhdr *eh;
+
+       if (efx->promiscuous)
+               return 1;
+
+       rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
+       rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr);
+       eh = (struct ethhdr *)rx_buf->data;
+       if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN))
+               return 0;
+       return 1;
+}
+
+/* Detect errors included in the rx_evt_pkt_ok bit. */
+static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
+                                   const efx_qword_t *event,
+                                   unsigned *rx_ev_pkt_ok,
+                                   int *discard, int byte_count)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
+       unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
+       unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
+       unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm;
+       unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
+       int snap, non_ip;
+
+       rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
+       rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
+       rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC);
+       rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE);
+       rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
+                                                RX_EV_BUF_OWNER_ID_ERR);
+       rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR);
+       rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
+                                                 RX_EV_IP_HDR_CHKSUM_ERR);
+       rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
+                                                  RX_EV_TCP_UDP_CHKSUM_ERR);
+       rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);
+       rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);
+       rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?
+                         0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));
+       rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);
+
+       /* Every error apart from tobe_disc and pause_frm */
+       rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
+                          rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
+                          rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
+
+       snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) ||
+               (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE);
+       non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE);
+
+       /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the
+        * length field of an LLC frame, which sets TOBE_DISC. We could set
+        * PASS_LEN_ERR, but we want the MAC to filter out short frames (to
+        * protect the RX block).
+        *
+        * bug5475 - LLC/SNAP: Falcon identifies SNAP packets.
+        * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag.
+        *                       LLC can't encapsulate IP, so by definition
+        *                       these packets are NON_IP.
+        *
+        * Unicast mismatch will also cause TOBE_DISC, so the driver needs
+        * to check this.
+        */
+       if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) {
+               /* If all the other flags are zero then we can state the
+                * entire packet is ok, which will flag to the kernel not
+                * to recalculate checksums.
+                */
+               if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm))
+                       *rx_ev_pkt_ok = 1;
+
+               rx_ev_tobe_disc = 0;
+
+               /* TOBE_DISC is set for unicast mismatch.  But given that
+                * we can't trust TOBE_DISC here, we must validate the dest
+                * MAC address ourselves.
+                */
+               if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event))
+                       rx_ev_tobe_disc = 1;
+       }
+
+       /* Count errors that are not in MAC stats. */
+       if (rx_ev_frm_trunc)
+               ++rx_queue->channel->n_rx_frm_trunc;
+       else if (rx_ev_tobe_disc)
+               ++rx_queue->channel->n_rx_tobe_disc;
+       else if (rx_ev_ip_hdr_chksum_err)
+               ++rx_queue->channel->n_rx_ip_hdr_chksum_err;
+       else if (rx_ev_tcp_udp_chksum_err)
+               ++rx_queue->channel->n_rx_tcp_udp_chksum_err;
+       if (rx_ev_ip_frag_err)
+               ++rx_queue->channel->n_rx_ip_frag_err;
+
+       /* The frame must be discarded if any of these are true. */
+       *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
+                   rx_ev_tobe_disc | rx_ev_pause_frm);
+
+       /* TOBE_DISC is expected on unicast mismatches; don't print out an
+        * error message.  FRM_TRUNC indicates RXDP dropped the packet due
+        * to a FIFO overflow.
+        */
+#ifdef EFX_ENABLE_DEBUG
+       if (rx_ev_other_err) {
+               EFX_INFO_RL(efx, " RX queue %d unexpected RX event "
+                           EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n",
+                           rx_queue->queue, EFX_QWORD_VAL(*event),
+                           rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
+                           rx_ev_ip_hdr_chksum_err ?
+                           " [IP_HDR_CHKSUM_ERR]" : "",
+                           rx_ev_tcp_udp_chksum_err ?
+                           " [TCP_UDP_CHKSUM_ERR]" : "",
+                           rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
+                           rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
+                           rx_ev_drib_nib ? " [DRIB_NIB]" : "",
+                           rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
+                           rx_ev_pause_frm ? " [PAUSE]" : "",
+                           snap ? " [SNAP/LLC]" : "");
+       }
+#endif
+
+       if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) &&
+                    efx->phy_type == PHY_TYPE_10XPRESS))
+               tenxpress_crc_err(efx);
+}
+
+/* Handle receive events that are not in-order. */
+static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
+                                      unsigned index)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       unsigned expected, dropped;
+
+       expected = rx_queue->removed_count & FALCON_RXD_RING_MASK;
+       dropped = ((index + FALCON_RXD_RING_SIZE - expected) &
+                  FALCON_RXD_RING_MASK);
+       EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
+               dropped, index, expected);
+
+       efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
+                          RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+}
+
+/* Handle a packet received event
+ *
+ * Falcon silicon gives a "discard" flag if it's a unicast packet with the
+ * wrong destination address
+ * Also "is multicast" and "matches multicast filter" flags can be used to
+ * discard non-matching multicast packets.
+ */
+static inline int falcon_handle_rx_event(struct efx_channel *channel,
+                                        const efx_qword_t *event)
+{
+       unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt;
+       unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt;
+       unsigned expected_ptr;
+       int discard = 0, checksummed;
+       struct efx_rx_queue *rx_queue;
+       struct efx_nic *efx = channel->efx;
+
+       /* Basic packet information */
+       rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT);
+       rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK);
+       rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE);
+       WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT));
+       WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1);
+
+       rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL);
+       rx_queue = &efx->rx_queue[rx_ev_q_label];
+
+       rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR);
+       expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK;
+       if (unlikely(rx_ev_desc_ptr != expected_ptr)) {
+               falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
+               return rx_ev_q_label;
+       }
+
+       if (likely(rx_ev_pkt_ok)) {
+               /* If packet is marked as OK and packet type is TCP/IPv4 or
+                * UDP/IPv4, then we can rely on the hardware checksum.
+                */
+               checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type);
+       } else {
+               falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
+                                       &discard, rx_ev_byte_cnt);
+               checksummed = 0;
+       }
+
+       /* Detect multicast packets that didn't match the filter */
+       rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT);
+       if (rx_ev_mcast_pkt) {
+               unsigned int rx_ev_mcast_hash_match =
+                       EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH);
+
+               if (unlikely(!rx_ev_mcast_hash_match))
+                       discard = 1;
+       }
+
+       /* Handle received packet */
+       efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt,
+                     checksummed, discard);
+
+       return rx_ev_q_label;
+}
+
+/* Global events are basically PHY events */
+static void falcon_handle_global_event(struct efx_channel *channel,
+                                      efx_qword_t *event)
+{
+       struct efx_nic *efx = channel->efx;
+       int is_phy_event = 0, handled = 0;
+
+       /* Check for interrupt on either port.  Some boards have a
+        * single PHY wired to the interrupt line for port 1. */
+       if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) ||
+           EFX_QWORD_FIELD(*event, G_PHY1_INTR) ||
+           EFX_QWORD_FIELD(*event, XG_PHY_INTR))
+               is_phy_event = 1;
+
+       if ((FALCON_REV(efx) >= FALCON_REV_B0) &&
+           EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))
+               is_phy_event = 1;
+
+       if (is_phy_event) {
+               efx->phy_op->clear_interrupt(efx);
+               queue_work(efx->workqueue, &efx->reconfigure_work);
+               handled = 1;
+       }
+
+       if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) {
+               EFX_ERR(efx, "channel %d seen global RX_RESET "
+                       "event. Resetting.\n", channel->channel);
+
+               atomic_inc(&efx->rx_reset);
+               efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+                                  RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+               handled = 1;
+       }
+
+       if (!handled)
+               EFX_ERR(efx, "channel %d unknown global event "
+                       EFX_QWORD_FMT "\n", channel->channel,
+                       EFX_QWORD_VAL(*event));
+}
+
+static void falcon_handle_driver_event(struct efx_channel *channel,
+                                      efx_qword_t *event)
+{
+       struct efx_nic *efx = channel->efx;
+       unsigned int ev_sub_code;
+       unsigned int ev_sub_data;
+
+       ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE);
+       ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA);
+
+       switch (ev_sub_code) {
+       case TX_DESCQ_FLS_DONE_EV_DECODE:
+               EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
+                         channel->channel, ev_sub_data);
+               break;
+       case RX_DESCQ_FLS_DONE_EV_DECODE:
+               EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
+                         channel->channel, ev_sub_data);
+               break;
+       case EVQ_INIT_DONE_EV_DECODE:
+               EFX_LOG(efx, "channel %d EVQ %d initialised\n",
+                       channel->channel, ev_sub_data);
+               break;
+       case SRM_UPD_DONE_EV_DECODE:
+               EFX_TRACE(efx, "channel %d SRAM update done\n",
+                         channel->channel);
+               break;
+       case WAKE_UP_EV_DECODE:
+               EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
+                         channel->channel, ev_sub_data);
+               break;
+       case TIMER_EV_DECODE:
+               EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
+                         channel->channel, ev_sub_data);
+               break;
+       case RX_RECOVERY_EV_DECODE:
+               EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
+                       "Resetting.\n", channel->channel);
+               efx_schedule_reset(efx,
+                                  EFX_WORKAROUND_6555(efx) ?
+                                  RESET_TYPE_RX_RECOVERY :
+                                  RESET_TYPE_DISABLE);
+               break;
+       case RX_DSC_ERROR_EV_DECODE:
+               EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
+                       " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+               efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
+               break;
+       case TX_DSC_ERROR_EV_DECODE:
+               EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
+                       " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
+               efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
+               break;
+       default:
+               EFX_TRACE(efx, "channel %d unknown driver event code %d "
+                         "data %04x\n", channel->channel, ev_sub_code,
+                         ev_sub_data);
+               break;
+       }
+}
+
+int falcon_process_eventq(struct efx_channel *channel, int *rx_quota)
+{
+       unsigned int read_ptr;
+       efx_qword_t event, *p_event;
+       int ev_code;
+       int rxq;
+       int rxdmaqs = 0;
+
+       read_ptr = channel->eventq_read_ptr;
+
+       do {
+               p_event = falcon_event(channel, read_ptr);
+               event = *p_event;
+
+               if (!falcon_event_present(&event))
+                       /* End of events */
+                       break;
+
+               EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n",
+                         channel->channel, EFX_QWORD_VAL(event));
+
+               /* Clear this event by marking it all ones */
+               EFX_SET_QWORD(*p_event);
+
+               ev_code = EFX_QWORD_FIELD(event, EV_CODE);
+
+               switch (ev_code) {
+               case RX_IP_EV_DECODE:
+                       rxq = falcon_handle_rx_event(channel, &event);
+                       rxdmaqs |= (1 << rxq);
+                       (*rx_quota)--;
+                       break;
+               case TX_IP_EV_DECODE:
+                       falcon_handle_tx_event(channel, &event);
+                       break;
+               case DRV_GEN_EV_DECODE:
+                       channel->eventq_magic
+                               = EFX_QWORD_FIELD(event, EVQ_MAGIC);
+                       EFX_LOG(channel->efx, "channel %d received generated "
+                               "event "EFX_QWORD_FMT"\n", channel->channel,
+                               EFX_QWORD_VAL(event));
+                       break;
+               case GLOBAL_EV_DECODE:
+                       falcon_handle_global_event(channel, &event);
+                       break;
+               case DRIVER_EV_DECODE:
+                       falcon_handle_driver_event(channel, &event);
+                       break;
+               default:
+                       EFX_ERR(channel->efx, "channel %d unknown event type %d"
+                               " (data " EFX_QWORD_FMT ")\n", channel->channel,
+                               ev_code, EFX_QWORD_VAL(event));
+               }
+
+               /* Increment read pointer */
+               read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK;
+
+       } while (*rx_quota);
+
+       channel->eventq_read_ptr = read_ptr;
+       return rxdmaqs;
+}
+
+void falcon_set_int_moderation(struct efx_channel *channel)
+{
+       efx_dword_t timer_cmd;
+       struct efx_nic *efx = channel->efx;
+
+       /* Set timer register */
+       if (channel->irq_moderation) {
+               /* Round to resolution supported by hardware.  The value we
+                * program is based at 0.  So actual interrupt moderation
+                * achieved is ((x + 1) * res).
+                */
+               unsigned int res = 5;
+               channel->irq_moderation -= (channel->irq_moderation % res);
+               if (channel->irq_moderation < res)
+                       channel->irq_moderation = res;
+               EFX_POPULATE_DWORD_2(timer_cmd,
+                                    TIMER_MODE, TIMER_MODE_INT_HLDOFF,
+                                    TIMER_VAL,
+                                    (channel->irq_moderation / res) - 1);
+       } else {
+               EFX_POPULATE_DWORD_2(timer_cmd,
+                                    TIMER_MODE, TIMER_MODE_DIS,
+                                    TIMER_VAL, 0);
+       }
+       falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER,
+                                 channel->evqnum);
+
+}
+
+/* Allocate buffer table entries for event queue */
+int falcon_probe_eventq(struct efx_channel *channel)
+{
+       struct efx_nic *efx = channel->efx;
+       unsigned int evq_size;
+
+       evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t);
+       return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size);
+}
+
+int falcon_init_eventq(struct efx_channel *channel)
+{
+       efx_oword_t evq_ptr;
+       struct efx_nic *efx = channel->efx;
+       int rc;
+
+       EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n",
+               channel->channel, channel->eventq.index,
+               channel->eventq.index + channel->eventq.entries - 1);
+
+       /* Pin event queue buffer */
+       rc = falcon_init_special_buffer(efx, &channel->eventq);
+       if (rc)
+               return rc;
+
+       /* Fill event queue with all ones (i.e. empty events) */
+       memset(channel->eventq.addr, 0xff, channel->eventq.len);
+
+       /* Push event queue to card */
+       EFX_POPULATE_OWORD_3(evq_ptr,
+                            EVQ_EN, 1,
+                            EVQ_SIZE, FALCON_EVQ_ORDER,
+                            EVQ_BUF_BASE_ID, channel->eventq.index);
+       falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
+                          channel->evqnum);
+
+       falcon_set_int_moderation(channel);
+
+       return 0;
+}
+
+void falcon_fini_eventq(struct efx_channel *channel)
+{
+       efx_oword_t eventq_ptr;
+       struct efx_nic *efx = channel->efx;
+
+       /* Remove event queue from card */
+       EFX_ZERO_OWORD(eventq_ptr);
+       falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
+                          channel->evqnum);
+
+       /* Unpin event queue */
+       falcon_fini_special_buffer(efx, &channel->eventq);
+}
+
+/* Free buffers backing event queue */
+void falcon_remove_eventq(struct efx_channel *channel)
+{
+       falcon_free_special_buffer(channel->efx, &channel->eventq);
+}
+
+
+/* Generates a test event on the event queue.  A subsequent call to
+ * process_eventq() should pick up the event and place the value of
+ * "magic" into channel->eventq_magic;
+ */
+void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
+{
+       efx_qword_t test_event;
+
+       EFX_POPULATE_QWORD_2(test_event,
+                            EV_CODE, DRV_GEN_EV_DECODE,
+                            EVQ_MAGIC, magic);
+       falcon_generate_event(channel, &test_event);
+}
+
+
+/**************************************************************************
+ *
+ * Falcon hardware interrupts
+ * The hardware interrupt handler does very little work; all the event
+ * queue processing is carried out by per-channel tasklets.
+ *
+ **************************************************************************/
+
+/* Enable/disable/generate Falcon interrupts */
+static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
+                                    int force)
+{
+       efx_oword_t int_en_reg_ker;
+
+       EFX_POPULATE_OWORD_2(int_en_reg_ker,
+                            KER_INT_KER, force,
+                            DRV_INT_EN_KER, enabled);
+       falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER);
+}
+
+void falcon_enable_interrupts(struct efx_nic *efx)
+{
+       efx_oword_t int_adr_reg_ker;
+       struct efx_channel *channel;
+
+       EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr));
+       wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
+
+       /* Program address */
+       EFX_POPULATE_OWORD_2(int_adr_reg_ker,
+                            NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx),
+                            INT_ADR_KER, efx->irq_status.dma_addr);
+       falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER);
+
+       /* Enable interrupts */
+       falcon_interrupts(efx, 1, 0);
+
+       /* Force processing of all the channels to get the EVQ RPTRs up to
+          date */
+       efx_for_each_channel_with_interrupt(channel, efx)
+               efx_schedule_channel(channel);
+}
+
+void falcon_disable_interrupts(struct efx_nic *efx)
+{
+       /* Disable interrupts */
+       falcon_interrupts(efx, 0, 0);
+}
+
+/* Generate a Falcon test interrupt
+ * Interrupt must already have been enabled, otherwise nasty things
+ * may happen.
+ */
+void falcon_generate_interrupt(struct efx_nic *efx)
+{
+       falcon_interrupts(efx, 1, 1);
+}
+
+/* Acknowledge a legacy interrupt from Falcon
+ *
+ * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG.
+ *
+ * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the
+ * BIU. Interrupt acknowledge is read sensitive so must write instead
+ * (then read to ensure the BIU collector is flushed)
+ *
+ * NB most hardware supports MSI interrupts
+ */
+static inline void falcon_irq_ack_a1(struct efx_nic *efx)
+{
+       efx_dword_t reg;
+
+       EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e);
+       falcon_writel(efx, &reg, INT_ACK_REG_KER_A1);
+       falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1);
+}
+
+/* Process a fatal interrupt
+ * Disable bus mastering ASAP and schedule a reset
+ */
+static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
+{
+       struct falcon_nic_data *nic_data = efx->nic_data;
+       efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+       efx_oword_t fatal_intr;
+       int error, mem_perr;
+       static int n_int_errors;
+
+       falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER);
+       error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR);
+
+       EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
+               EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
+               EFX_OWORD_VAL(fatal_intr),
+               error ? "disabling bus mastering" : "no recognised error");
+       if (error == 0)
+               goto out;
+
+       /* If this is a memory parity error dump which blocks are offending */
+       mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER);
+       if (mem_perr) {
+               efx_oword_t reg;
+               falcon_read(efx, &reg, MEM_STAT_REG_KER);
+               EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
+                       EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
+       }
+
+       /* Disable DMA bus mastering on both devices */
+       pci_disable_device(efx->pci_dev);
+       if (FALCON_IS_DUAL_FUNC(efx))
+               pci_disable_device(nic_data->pci_dev2);
+
+       if (++n_int_errors < FALCON_MAX_INT_ERRORS) {
+               EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
+               efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
+       } else {
+               EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen."
+                       "NIC will be disabled\n");
+               efx_schedule_reset(efx, RESET_TYPE_DISABLE);
+       }
+out:
+       return IRQ_HANDLED;
+}
+
+/* Handle a legacy interrupt from Falcon
+ * Acknowledges the interrupt and schedule event queue processing.
+ */
+static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
+{
+       struct efx_nic *efx = (struct efx_nic *)dev_id;
+       efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+       struct efx_channel *channel;
+       efx_dword_t reg;
+       u32 queues;
+       int syserr;
+
+       /* Read the ISR which also ACKs the interrupts */
+       falcon_readl(efx, &reg, INT_ISR0_B0);
+       queues = EFX_EXTRACT_DWORD(reg, 0, 31);
+
+       /* Check to see if we have a serious error condition */
+       syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
+       if (unlikely(syserr))
+               return falcon_fatal_interrupt(efx);
+
+       if (queues == 0)
+               return IRQ_NONE;
+
+       efx->last_irq_cpu = raw_smp_processor_id();
+       EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
+                 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+
+       /* Schedule processing of any interrupting queues */
+       channel = &efx->channel[0];
+       while (queues) {
+               if (queues & 0x01)
+                       efx_schedule_channel(channel);
+               channel++;
+               queues >>= 1;
+       }
+
+       return IRQ_HANDLED;
+}
+
+
+static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
+{
+       struct efx_nic *efx = (struct efx_nic *)dev_id;
+       efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+       struct efx_channel *channel;
+       int syserr;
+       int queues;
+
+       /* Check to see if this is our interrupt.  If it isn't, we
+        * exit without having touched the hardware.
+        */
+       if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) {
+               EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq,
+                         raw_smp_processor_id());
+               return IRQ_NONE;
+       }
+       efx->last_irq_cpu = raw_smp_processor_id();
+       EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+                 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+       /* Check to see if we have a serious error condition */
+       syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
+       if (unlikely(syserr))
+               return falcon_fatal_interrupt(efx);
+
+       /* Determine interrupting queues, clear interrupt status
+        * register and acknowledge the device interrupt.
+        */
+       BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS);
+       queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS);
+       EFX_ZERO_OWORD(*int_ker);
+       wmb(); /* Ensure the vector is cleared before interrupt ack */
+       falcon_irq_ack_a1(efx);
+
+       /* Schedule processing of any interrupting queues */
+       channel = &efx->channel[0];
+       while (queues) {
+               if (queues & 0x01)
+                       efx_schedule_channel(channel);
+               channel++;
+               queues >>= 1;
+       }
+
+       return IRQ_HANDLED;
+}
+
+/* Handle an MSI interrupt from Falcon
+ *
+ * Handle an MSI hardware interrupt.  This routine schedules event
+ * queue processing.  No interrupt acknowledgement cycle is necessary.
+ * Also, we never need to check that the interrupt is for us, since
+ * MSI interrupts cannot be shared.
+ */
+static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)
+{
+       struct efx_channel *channel = (struct efx_channel *)dev_id;
+       struct efx_nic *efx = channel->efx;
+       efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;
+       int syserr;
+
+       efx->last_irq_cpu = raw_smp_processor_id();
+       EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n",
+                 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
+
+       /* Check to see if we have a serious error condition */
+       syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT);
+       if (unlikely(syserr))
+               return falcon_fatal_interrupt(efx);
+
+       /* Schedule processing of the channel */
+       efx_schedule_channel(channel);
+
+       return IRQ_HANDLED;
+}
+
+
+/* Setup RSS indirection table.
+ * This maps from the hash value of the packet to RXQ
+ */
+static void falcon_setup_rss_indir_table(struct efx_nic *efx)
+{
+       int i = 0;
+       unsigned long offset;
+       efx_dword_t dword;
+
+       if (FALCON_REV(efx) < FALCON_REV_B0)
+               return;
+
+       for (offset = RX_RSS_INDIR_TBL_B0;
+            offset < RX_RSS_INDIR_TBL_B0 + 0x800;
+            offset += 0x10) {
+               EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0,
+                                    i % efx->rss_queues);
+               falcon_writel(efx, &dword, offset);
+               i++;
+       }
+}
+
+/* Hook interrupt handler(s)
+ * Try MSI and then legacy interrupts.
+ */
+int falcon_init_interrupt(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+       int rc;
+
+       if (!EFX_INT_MODE_USE_MSI(efx)) {
+               irq_handler_t handler;
+               if (FALCON_REV(efx) >= FALCON_REV_B0)
+                       handler = falcon_legacy_interrupt_b0;
+               else
+                       handler = falcon_legacy_interrupt_a1;
+
+               rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED,
+                                efx->name, efx);
+               if (rc) {
+                       EFX_ERR(efx, "failed to hook legacy IRQ %d\n",
+                               efx->pci_dev->irq);
+                       goto fail1;
+               }
+               return 0;
+       }
+
+       /* Hook MSI or MSI-X interrupt */
+       efx_for_each_channel_with_interrupt(channel, efx) {
+               rc = request_irq(channel->irq, falcon_msi_interrupt,
+                                IRQF_PROBE_SHARED, /* Not shared */
+                                efx->name, channel);
+               if (rc) {
+                       EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq);
+                       goto fail2;
+               }
+       }
+
+       return 0;
+
+ fail2:
+       efx_for_each_channel_with_interrupt(channel, efx)
+               free_irq(channel->irq, channel);
+ fail1:
+       return rc;
+}
+
+void falcon_fini_interrupt(struct efx_nic *efx)
+{
+       struct efx_channel *channel;
+       efx_oword_t reg;
+
+       /* Disable MSI/MSI-X interrupts */
+       efx_for_each_channel_with_interrupt(channel, efx)
+               if (channel->irq)
+                       free_irq(channel->irq, channel);
+
+       /* ACK legacy interrupt */
+       if (FALCON_REV(efx) >= FALCON_REV_B0)
+               falcon_read(efx, &reg, INT_ISR0_B0);
+       else
+               falcon_irq_ack_a1(efx);
+
+       /* Disable legacy interrupt */
+       if (efx->legacy_irq)
+               free_irq(efx->legacy_irq, efx);
+}
+
+/**************************************************************************
+ *
+ * EEPROM/flash
+ *
+ **************************************************************************
+ */
+
+#define FALCON_SPI_MAX_LEN sizeof(efx_oword_t)
+
+/* Wait for SPI command completion */
+static int falcon_spi_wait(struct efx_nic *efx)
+{
+       efx_oword_t reg;
+       int cmd_en, timer_active;
+       int count;
+
+       count = 0;
+       do {
+               falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER);
+               cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN);
+               timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE);
+               if (!cmd_en && !timer_active)
+                       return 0;
+               udelay(10);
+       } while (++count < 10000); /* wait upto 100msec */
+       EFX_ERR(efx, "timed out waiting for SPI\n");
+       return -ETIMEDOUT;
+}
+
+static int
+falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command,
+               unsigned int address, unsigned int addr_len,
+               void *data, unsigned int len)
+{
+       efx_oword_t reg;
+       int rc;
+
+       BUG_ON(len > FALCON_SPI_MAX_LEN);
+
+       /* Check SPI not currently being accessed */
+       rc = falcon_spi_wait(efx);
+       if (rc)
+               return rc;
+
+       /* Program address register */
+       EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address);
+       falcon_write(efx, &reg, EE_SPI_HADR_REG_KER);
+
+       /* Issue read command */
+       EFX_POPULATE_OWORD_7(reg,
+                            EE_SPI_HCMD_CMD_EN, 1,
+                            EE_SPI_HCMD_SF_SEL, device_id,
+                            EE_SPI_HCMD_DABCNT, len,
+                            EE_SPI_HCMD_READ, EE_SPI_READ,
+                            EE_SPI_HCMD_DUBCNT, 0,
+                            EE_SPI_HCMD_ADBCNT, addr_len,
+                            EE_SPI_HCMD_ENC, command);
+       falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER);
+
+       /* Wait for read to complete */
+       rc = falcon_spi_wait(efx);
+       if (rc)
+               return rc;
+
+       /* Read data */
+       falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER);
+       memcpy(data, &reg, len);
+       return 0;
+}
+
+/**************************************************************************
+ *
+ * MAC wrapper
+ *
+ **************************************************************************
+ */
+void falcon_drain_tx_fifo(struct efx_nic *efx)
+{
+       efx_oword_t temp;
+       int count;
+
+       if (FALCON_REV(efx) < FALCON_REV_B0)
+               return;
+
+       falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
+       /* There is no point in draining more than once */
+       if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
+               return;
+
+       /* MAC stats will fail whilst the TX fifo is draining. Serialise
+        * the drain sequence with the statistics fetch */
+       spin_lock(&efx->stats_lock);
+
+       EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1);
+       falcon_write(efx, &temp, MAC0_CTRL_REG_KER);
+
+       /* Reset the MAC and EM block. */
+       falcon_read(efx, &temp, GLB_CTL_REG_KER);
+       EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1);
+       EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1);
+       EFX_SET_OWORD_FIELD(temp, RST_EM, 1);
+       falcon_write(efx, &temp, GLB_CTL_REG_KER);
+
+       count = 0;
+       while (1) {
+               falcon_read(efx, &temp, GLB_CTL_REG_KER);
+               if (!EFX_OWORD_FIELD(temp, RST_XGTX) &&
+                   !EFX_OWORD_FIELD(temp, RST_XGRX) &&
+                   !EFX_OWORD_FIELD(temp, RST_EM)) {
+                       EFX_LOG(efx, "Completed MAC reset after %d loops\n",
+                               count);
+                       break;
+               }
+               if (count > 20) {
+                       EFX_ERR(efx, "MAC reset failed\n");
+                       break;
+               }
+               count++;
+               udelay(10);
+       }
+
+       spin_unlock(&efx->stats_lock);
+
+       /* If we've reset the EM block and the link is up, then
+        * we'll have to kick the XAUI link so the PHY can recover */
+       if (efx->link_up && EFX_WORKAROUND_5147(efx))
+               falcon_reset_xaui(efx);
+}
+
+void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
+{
+       efx_oword_t temp;
+
+       if (FALCON_REV(efx) < FALCON_REV_B0)
+               return;
+
+       /* Isolate the MAC -> RX */
+       falcon_read(efx, &temp, RX_CFG_REG_KER);
+       EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0);
+       falcon_write(efx, &temp, RX_CFG_REG_KER);
+
+       if (!efx->link_up)
+               falcon_drain_tx_fifo(efx);
+}
+
+void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
+{
+       efx_oword_t reg;
+       int link_speed;
+       unsigned int tx_fc;
+
+       if (efx->link_options & GM_LPA_10000)
+               link_speed = 0x3;
+       else if (efx->link_options & GM_LPA_1000)
+               link_speed = 0x2;
+       else if (efx->link_options & GM_LPA_100)
+               link_speed = 0x1;
+       else
+               link_speed = 0x0;
+       /* MAC_LINK_STATUS controls MAC backpressure but doesn't work
+        * as advertised.  Disable to ensure packets are not
+        * indefinitely held and TX queue can be flushed at any point
+        * while the link is down. */
+       EFX_POPULATE_OWORD_5(reg,
+                            MAC_XOFF_VAL, 0xffff /* max pause time */,
+                            MAC_BCAD_ACPT, 1,
+                            MAC_UC_PROM, efx->promiscuous,
+                            MAC_LINK_STATUS, 1, /* always set */
+                            MAC_SPEED, link_speed);
+       /* On B0, MAC backpressure can be disabled and packets get
+        * discarded. */
+       if (FALCON_REV(efx) >= FALCON_REV_B0) {
+               EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,
+                                   !efx->link_up);
+       }
+
+       falcon_write(efx, &reg, MAC0_CTRL_REG_KER);
+
+       /* Restore the multicast hash registers. */
+       falcon_set_multicast_hash(efx);
+
+       /* Transmission of pause frames when RX crosses the threshold is
+        * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
+        * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
+       tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0;
+       falcon_read(efx, &reg, RX_CFG_REG_KER);
+       EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);
+
+       /* Unisolate the MAC -> RX */
+       if (FALCON_REV(efx) >= FALCON_REV_B0)
+               EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);
+       falcon_write(efx, &reg, RX_CFG_REG_KER);
+}
+
+int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
+{
+       efx_oword_t reg;
+       u32 *dma_done;
+       int i;
+
+       if (disable_dma_stats)
+               return 0;
+
+       /* Statistics fetch will fail if the MAC is in TX drain */
+       if (FALCON_REV(efx) >= FALCON_REV_B0) {
+               efx_oword_t temp;
+               falcon_read(efx, &temp, MAC0_CTRL_REG_KER);
+               if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))
+                       return 0;
+       }
+
+       dma_done = (efx->stats_buffer.addr + done_offset);
+       *dma_done = FALCON_STATS_NOT_DONE;
+       wmb(); /* ensure done flag is clear */
+
+       /* Initiate DMA transfer of stats */
+       EFX_POPULATE_OWORD_2(reg,
+                            MAC_STAT_DMA_CMD, 1,
+                            MAC_STAT_DMA_ADR,
+                            efx->stats_buffer.dma_addr);
+       falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER);
+
+       /* Wait for transfer to complete */
+       for (i = 0; i < 400; i++) {
+               if (*(volatile u32 *)dma_done == FALCON_STATS_DONE)
+                       return 0;
+               udelay(10);
+       }
+
+       EFX_ERR(efx, "timed out waiting for statistics\n");
+       return -ETIMEDOUT;
+}
+
+/**************************************************************************
+ *
+ * PHY access via GMII
+ *
+ **************************************************************************
+ */
+
+/* Use the top bit of the MII PHY id to indicate the PHY type
+ * (1G/10G), with the remaining bits as the actual PHY id.
+ *
+ * This allows us to avoid leaking information from the mii_if_info
+ * structure into other data structures.
+ */
+#define FALCON_PHY_ID_ID_WIDTH  EFX_WIDTH(MD_PRT_DEV_ADR)
+#define FALCON_PHY_ID_ID_MASK   ((1 << FALCON_PHY_ID_ID_WIDTH) - 1)
+#define FALCON_PHY_ID_WIDTH     (FALCON_PHY_ID_ID_WIDTH + 1)
+#define FALCON_PHY_ID_MASK      ((1 << FALCON_PHY_ID_WIDTH) - 1)
+#define FALCON_PHY_ID_10G       (1 << (FALCON_PHY_ID_WIDTH - 1))
+
+
+/* Packing the clause 45 port and device fields into a single value */
+#define MD_PRT_ADR_COMP_LBN   (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN)
+#define MD_PRT_ADR_COMP_WIDTH  MD_PRT_ADR_WIDTH
+#define MD_DEV_ADR_COMP_LBN    0
+#define MD_DEV_ADR_COMP_WIDTH  MD_DEV_ADR_WIDTH
+
+
+/* Wait for GMII access to complete */
+static int falcon_gmii_wait(struct efx_nic *efx)
+{
+       efx_dword_t md_stat;
+       int count;
+
+       for (count = 0; count < 1000; count++) {        /* wait upto 10ms */
+               falcon_readl(efx, &md_stat, MD_STAT_REG_KER);
+               if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) {
+                       if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 ||
+                           EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) {
+                               EFX_ERR(efx, "error from GMII access "
+                                       EFX_DWORD_FMT"\n",
+                                       EFX_DWORD_VAL(md_stat));
+                               return -EIO;
+                       }
+                       return 0;
+               }
+               udelay(10);
+       }
+       EFX_ERR(efx, "timed out waiting for GMII\n");
+       return -ETIMEDOUT;
+}
+
+/* Writes a GMII register of a PHY connected to Falcon using MDIO. */
+static void falcon_mdio_write(struct net_device *net_dev, int phy_id,
+                             int addr, int value)
+{
+       struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
+       unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;
+       efx_oword_t reg;
+
+       /* The 'generic' prt/dev packing in mdio_10g.h is conveniently
+        * chosen so that the only current user, Falcon, can take the
+        * packed value and use them directly.
+        * Fail to build if this assumption is broken.
+        */
+       BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G);
+       BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH);
+       BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN);
+       BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN);
+
+       if (phy_id2 == PHY_ADDR_INVALID)
+               return;
+
+       /* See falcon_mdio_read for an explanation. */
+       if (!(phy_id & FALCON_PHY_ID_10G)) {
+               int mmd = ffs(efx->phy_op->mmds) - 1;
+               EFX_TRACE(efx, "Fixing erroneous clause22 write\n");
+               phy_id2 = mdio_clause45_pack(phy_id2, mmd)
+                       & FALCON_PHY_ID_ID_MASK;
+       }
+
+       EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id,
+                   addr, value);
+
+       spin_lock_bh(&efx->phy_lock);
+
+       /* Check MII not currently being accessed */
+       if (falcon_gmii_wait(efx) != 0)
+               goto out;
+
+       /* Write the address/ID register */
+       EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
+       falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
+
+       EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2);
+       falcon_write(efx, &reg, MD_ID_REG_KER);
+
+       /* Write data */
+       EFX_POPULATE_OWORD_1(reg, MD_TXD, value);
+       falcon_write(efx, &reg, MD_TXD_REG_KER);
+
+       EFX_POPULATE_OWORD_2(reg,
+                            MD_WRC, 1,
+                            MD_GC, 0);
+       falcon_write(efx, &reg, MD_CS_REG_KER);
+
+       /* Wait for data to be written */
+       if (falcon_gmii_wait(efx) != 0) {
+               /* Abort the write operation */
+               EFX_POPULATE_OWORD_2(reg,
+                                    MD_WRC, 0,
+                                    MD_GC, 1);
+               falcon_write(efx, &reg, MD_CS_REG_KER);
+               udelay(10);
+       }
+
+ out:
+       spin_unlock_bh(&efx->phy_lock);
+}
+
+/* Reads a GMII register from a PHY connected to Falcon.  If no value
+ * could be read, -1 will be returned. */
+static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)
+{
+       struct efx_nic *efx = (struct efx_nic *)net_dev->priv;
+       unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;
+       efx_oword_t reg;
+       int value = -1;
+
+       if (phy_addr == PHY_ADDR_INVALID)
+               return -1;
+
+       /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G)
+        * but the generic Linux code does not make any distinction or have
+        * any state for this.
+        * We spot the case where someone tried to talk 22 to a 45 PHY and
+        * redirect the request to the lowest numbered MMD as a clause45
+        * request. This is enough to allow simple queries like id and link
+        * state to succeed. TODO: We may need to do more in future.
+        */
+       if (!(phy_id & FALCON_PHY_ID_10G)) {
+               int mmd = ffs(efx->phy_op->mmds) - 1;
+               EFX_TRACE(efx, "Fixing erroneous clause22 read\n");
+               phy_addr = mdio_clause45_pack(phy_addr, mmd)
+                       & FALCON_PHY_ID_ID_MASK;
+       }
+
+       spin_lock_bh(&efx->phy_lock);
+
+       /* Check MII not currently being accessed */
+       if (falcon_gmii_wait(efx) != 0)
+               goto out;
+
+       EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr);
+       falcon_write(efx, &reg, MD_PHY_ADR_REG_KER);
+
+       EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr);
+       falcon_write(efx, &reg, MD_ID_REG_KER);
+
+       /* Request data to be read */
+       EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0);
+       falcon_write(efx, &reg, MD_CS_REG_KER);
+
+       /* Wait for data to become available */
+       value = falcon_gmii_wait(efx);
+       if (value == 0) {
+               falcon_read(efx, &reg, MD_RXD_REG_KER);
+               value = EFX_OWORD_FIELD(reg, MD_RXD);
+               EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n",
+                           phy_id, addr, value);
+       } else {
+               /* Abort the read operation */
+               EFX_POPULATE_OWORD_2(reg,
+                                    MD_RIC, 0,
+                                    MD_GC, 1);
+               falcon_write(efx, &reg, MD_CS_REG_KER);
+
+               EFX_LOG(efx, "read from GMII 0x%x register %02x, got "
+                       "error %d\n", phy_id, addr, value);
+       }
+
+ out:
+       spin_unlock_bh(&efx->phy_lock);
+
+       return value;
+}
+
+static void falcon_init_mdio(struct mii_if_info *gmii)
+{
+       gmii->mdio_read = falcon_mdio_read;
+       gmii->mdio_write = falcon_mdio_write;
+       gmii->phy_id_mask = FALCON_PHY_ID_MASK;
+       gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1);
+}
+
+static int falcon_probe_phy(struct efx_nic *efx)
+{
+       switch (efx->phy_type) {
+       case PHY_TYPE_10XPRESS:
+               efx->phy_op = &falcon_tenxpress_phy_ops;
+               break;
+       case PHY_TYPE_XFP:
+               efx->phy_op = &falcon_xfp_phy_ops;
+               break;
+       default:
+               EFX_ERR(efx, "Unknown PHY type %d\n",
+                       efx->phy_type);
+               return -1;
+       }
+       return 0;
+}
+
+/* This call is responsible for hooking in the MAC and PHY operations */
+int falcon_probe_port(struct efx_nic *efx)
+{
+       int rc;
+
+       /* Hook in PHY operations table */
+       rc = falcon_probe_phy(efx);
+       if (rc)
+               return rc;
+
+       /* Set up GMII structure for PHY */
+       efx->mii.supports_gmii = 1;
+       falcon_init_mdio(&efx->mii);
+
+       /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */
+       if (FALCON_REV(efx) >= FALCON_REV_B0)
+               efx->flow_control = EFX_FC_RX | EFX_FC_TX;
+       else
+               efx->flow_control = EFX_FC_RX;
+
+       /* Allocate buffer for stats */
+       rc = falcon_alloc_buffer(efx, &efx->stats_buffer,
+                                FALCON_MAC_STATS_SIZE);
+       if (rc)
+               return rc;
+       EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n",
+               (unsigned long long)efx->stats_buffer.dma_addr,
+               efx->stats_buffer.addr,
+               virt_to_phys(efx->stats_buffer.addr));
+
+       return 0;
+}
+
+void falcon_remove_port(struct efx_nic *efx)
+{
+       falcon_free_buffer(efx, &efx->stats_buffer);
+}
+
+/**************************************************************************
+ *
+ * Multicast filtering
+ *
+ **************************************************************************
+ */
+
+void falcon_set_multicast_hash(struct efx_nic *efx)
+{
+       union efx_multicast_hash *mc_hash = &efx->multicast_hash;
+
+       /* Broadcast packets go through the multicast hash filter.
+        * ether_crc_le() of the broadcast address is 0xbe2612ff
+        * so we always add bit 0xff to the mask.
+        */
+       set_bit_le(0xff, mc_hash->byte);
+
+       falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER);
+       falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER);
+}
+
+/**************************************************************************
+ *
+ * Device reset
+ *
+ **************************************************************************
+ */
+
+/* Resets NIC to known state.  This routine must be called in process
+ * context and is allowed to sleep. */
+int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+       struct falcon_nic_data *nic_data = efx->nic_data;
+       efx_oword_t glb_ctl_reg_ker;
+       int rc;
+
+       EFX_LOG(efx, "performing hardware reset (%d)\n", method);
+
+       /* Initiate device reset */
+       if (method == RESET_TYPE_WORLD) {
+               rc = pci_save_state(efx->pci_dev);
+               if (rc) {
+                       EFX_ERR(efx, "failed to backup PCI state of primary "
+                               "function prior to hardware reset\n");
+                       goto fail1;
+               }
+               if (FALCON_IS_DUAL_FUNC(efx)) {
+                       rc = pci_save_state(nic_data->pci_dev2);
+                       if (rc) {
+                               EFX_ERR(efx, "failed to backup PCI state of "
+                                       "secondary function prior to "
+                                       "hardware reset\n");
+                               goto fail2;
+                       }
+               }
+
+               EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
+                                    EXT_PHY_RST_DUR, 0x7,
+                                    SWRST, 1);
+       } else {
+               int reset_phy = (method == RESET_TYPE_INVISIBLE ?
+                                EXCLUDE_FROM_RESET : 0);
+
+               EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
+                                    EXT_PHY_RST_CTL, reset_phy,
+                                    PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET,
+                                    PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET,
+                                    PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET,
+                                    EE_RST_CTL, EXCLUDE_FROM_RESET,
+                                    EXT_PHY_RST_DUR, 0x7 /* 10ms */,
+                                    SWRST, 1);
+       }
+       falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
+
+       EFX_LOG(efx, "waiting for hardware reset\n");
+       schedule_timeout_uninterruptible(HZ / 20);
+
+       /* Restore PCI configuration if needed */
+       if (method == RESET_TYPE_WORLD) {
+               if (FALCON_IS_DUAL_FUNC(efx)) {
+                       rc = pci_restore_state(nic_data->pci_dev2);
+                       if (rc) {
+                               EFX_ERR(efx, "failed to restore PCI config for "
+                                       "the secondary function\n");
+                               goto fail3;
+                       }
+               }
+               rc = pci_restore_state(efx->pci_dev);
+               if (rc) {
+                       EFX_ERR(efx, "failed to restore PCI config for the "
+                               "primary function\n");
+                       goto fail4;
+               }
+               EFX_LOG(efx, "successfully restored PCI config\n");
+       }
+
+       /* Assert that reset complete */
+       falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER);
+       if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) {
+               rc = -ETIMEDOUT;
+               EFX_ERR(efx, "timed out waiting for hardware reset\n");
+               goto fail5;
+       }
+       EFX_LOG(efx, "hardware reset complete\n");
+
+       return 0;
+
+       /* pci_save_state() and pci_restore_state() MUST be called in pairs */
+fail2:
+fail3:
+       pci_restore_state(efx->pci_dev);
+fail1:
+fail4:
+fail5:
+       return rc;
+}
+
+/* Zeroes out the SRAM contents.  This routine must be called in
+ * process context and is allowed to sleep.
+ */
+static int falcon_reset_sram(struct efx_nic *efx)
+{
+       efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker;
+       int count;
+
+       /* Set the SRAM wake/sleep GPIO appropriately. */
+       falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
+       EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1);
+       EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1);
+       falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER);
+
+       /* Initiate SRAM reset */
+       EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
+                            SRAM_OOB_BT_INIT_EN, 1,
+                            SRM_NUM_BANKS_AND_BANK_SIZE, 0);
+       falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
+
+       /* Wait for SRAM reset to complete */
+       count = 0;
+       do {
+               EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count);
+
+               /* SRAM reset is slow; expect around 16ms */
+               schedule_timeout_uninterruptible(HZ / 50);
+
+               /* Check for reset complete */
+               falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER);
+               if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) {
+                       EFX_LOG(efx, "SRAM reset complete\n");
+
+                       return 0;
+               }
+       } while (++count < 20); /* wait upto 0.4 sec */
+
+       EFX_ERR(efx, "timed out waiting for SRAM reset\n");
+       return -ETIMEDOUT;
+}
+
+/* Extract non-volatile configuration */
+static int falcon_probe_nvconfig(struct efx_nic *efx)
+{
+       struct falcon_nvconfig *nvconfig;
+       efx_oword_t nic_stat;
+       int device_id;
+       unsigned addr_len;
+       size_t offset, len;
+       int magic_num, struct_ver, board_rev;
+       int rc;
+
+       /* Find the boot device. */
+       falcon_read(efx, &nic_stat, NIC_STAT_REG);
+       if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) {
+               device_id = EE_SPI_FLASH;
+               addr_len = 3;
+       } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) {
+               device_id = EE_SPI_EEPROM;
+               addr_len = 2;
+       } else {
+               return -ENODEV;
+       }
+
+       nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
+
+       /* Read the whole configuration structure into memory. */
+       for (offset = 0; offset < sizeof(*nvconfig); offset += len) {
+               len = min(sizeof(*nvconfig) - offset,
+                         (size_t) FALCON_SPI_MAX_LEN);
+               rc = falcon_spi_read(efx, device_id, SPI_READ,
+                                    NVCONFIG_BASE + offset, addr_len,
+                                    (char *)nvconfig + offset, len);
+               if (rc)
+                       goto out;
+       }
+
+       /* Read the MAC addresses */
+       memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
+
+       /* Read the board configuration. */
+       magic_num = le16_to_cpu(nvconfig->board_magic_num);
+       struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
+
+       if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) {
+               EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x "
+                       "therefore using defaults\n", magic_num, struct_ver);
+               efx->phy_type = PHY_TYPE_NONE;
+               efx->mii.phy_id = PHY_ADDR_INVALID;
+               board_rev = 0;
+       } else {
+               struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
+
+               efx->phy_type = v2->port0_phy_type;
+               efx->mii.phy_id = v2->port0_phy_addr;
+               board_rev = le16_to_cpu(v2->board_revision);
+       }
+
+       EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id);
+
+       efx_set_board_info(efx, board_rev);
+
+ out:
+       kfree(nvconfig);
+       return rc;
+}
+
+/* Probe the NIC variant (revision, ASIC vs FPGA, function count, port
+ * count, port speed).  Set workaround and feature flags accordingly.
+ */
+static int falcon_probe_nic_variant(struct efx_nic *efx)
+{
+       efx_oword_t altera_build;
+
+       falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER);
+       if (EFX_OWORD_FIELD(altera_build, VER_ALL)) {
+               EFX_ERR(efx, "Falcon FPGA not supported\n");
+               return -ENODEV;
+       }
+
+       switch (FALCON_REV(efx)) {
+       case FALCON_REV_A0:
+       case 0xff:
+               EFX_ERR(efx, "Falcon rev A0 not supported\n");
+               return -ENODEV;
+
+       case FALCON_REV_A1:{
+               efx_oword_t nic_stat;
+
+               falcon_read(efx, &nic_stat, NIC_STAT_REG);
+
+               if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) {
+                       EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
+                       return -ENODEV;
+               }
+               if (!EFX_OWORD_FIELD(nic_stat, STRAP_10G)) {
+                       EFX_ERR(efx, "1G mode not supported\n");
+                       return -ENODEV;
+               }
+               break;
+       }
+
+       case FALCON_REV_B0:
+               break;
+
+       default:
+               EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx));
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+int falcon_probe_nic(struct efx_nic *efx)
+{
+       struct falcon_nic_data *nic_data;
+       int rc;
+
+       /* Initialise I2C interface state */
+       efx->i2c.efx = efx;
+       efx->i2c.op = &falcon_i2c_bit_operations;
+       efx->i2c.sda = 1;
+       efx->i2c.scl = 1;
+
+       /* Allocate storage for hardware specific data */
+       nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
+       efx->nic_data = (void *) nic_data;
+
+       /* Determine number of ports etc. */
+       rc = falcon_probe_nic_variant(efx);
+       if (rc)
+               goto fail1;
+
+       /* Probe secondary function if expected */
+       if (FALCON_IS_DUAL_FUNC(efx)) {
+               struct pci_dev *dev = pci_dev_get(efx->pci_dev);
+
+               while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID,
+                                            dev))) {
+                       if (dev->bus == efx->pci_dev->bus &&
+                           dev->devfn == efx->pci_dev->devfn + 1) {
+                               nic_data->pci_dev2 = dev;
+                               break;
+                       }
+               }
+               if (!nic_data->pci_dev2) {
+                       EFX_ERR(efx, "failed to find secondary function\n");
+                       rc = -ENODEV;
+                       goto fail2;
+               }
+       }
+
+       /* Now we can reset the NIC */
+       rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
+       if (rc) {
+               EFX_ERR(efx, "failed to reset NIC\n");
+               goto fail3;
+       }
+
+       /* Allocate memory for INT_KER */
+       rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t));
+       if (rc)
+               goto fail4;
+       BUG_ON(efx->irq_status.dma_addr & 0x0f);
+
+       EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n",
+               (unsigned long long)efx->irq_status.dma_addr,
+               efx->irq_status.addr, virt_to_phys(efx->irq_status.addr));
+
+       /* Read in the non-volatile configuration */
+       rc = falcon_probe_nvconfig(efx);
+       if (rc)
+               goto fail5;
+
+       return 0;
+
+ fail5:
+       falcon_free_buffer(efx, &efx->irq_status);
+ fail4:
+       /* fall-thru */
+ fail3:
+       if (nic_data->pci_dev2) {
+               pci_dev_put(nic_data->pci_dev2);
+               nic_data->pci_dev2 = NULL;
+       }
+ fail2:
+       /* fall-thru */
+ fail1:
+       kfree(efx->nic_data);
+       return rc;
+}
+
+/* This call performs hardware-specific global initialisation, such as
+ * defining the descriptor cache sizes and number of RSS channels.
+ * It does not set up any buffers, descriptor rings or event queues.
+ */
+int falcon_init_nic(struct efx_nic *efx)
+{
+       struct falcon_nic_data *data;
+       efx_oword_t temp;
+       unsigned thresh;
+       int rc;
+
+       data = (struct falcon_nic_data *)efx->nic_data;
+
+       /* Set up the address region register. This is only needed
+        * for the B0 FPGA, but since we are just pushing in the
+        * reset defaults this may as well be unconditional. */
+       EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0,
+                                  ADR_REGION1, (1 << 16),
+                                  ADR_REGION2, (2 << 16),
+                                  ADR_REGION3, (3 << 16));
+       falcon_write(efx, &temp, ADR_REGION_REG_KER);
+
+       /* Use on-chip SRAM */
+       falcon_read(efx, &temp, NIC_STAT_REG);
+       EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1);
+       falcon_write(efx, &temp, NIC_STAT_REG);
+
+       /* Set buffer table mode */
+       EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
+       falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
+
+       rc = falcon_reset_sram(efx);
+       if (rc)
+               return rc;
+
+       /* Set positions of descriptor caches in SRAM. */
+       EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
+       falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER);
+       EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
+       falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER);
+
+       /* Set TX descriptor cache size. */
+       BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
+       EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
+       falcon_write(efx, &temp, TX_DC_CFG_REG_KER);
+
+       /* Set RX descriptor cache size.  Set low watermark to size-8, as
+        * this allows most efficient prefetching.
+        */
+       BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
+       EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
+       falcon_write(efx, &temp, RX_DC_CFG_REG_KER);
+       EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
+       falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER);
+
+       /* Clear the parity enables on the TX data fifos as
+        * they produce false parity errors because of timing issues
+        */
+       if (EFX_WORKAROUND_5129(efx)) {
+               falcon_read(efx, &temp, SPARE_REG_KER);
+               EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0);
+               falcon_write(efx, &temp, SPARE_REG_KER);
+       }
+
+       /* Enable all the genuinely fatal interrupts.  (They are still
+        * masked by the overall interrupt mask, controlled by
+        * falcon_interrupts()).
+        *
+        * Note: All other fatal interrupts are enabled
+        */
+       EFX_POPULATE_OWORD_3(temp,
+                            ILL_ADR_INT_KER_EN, 1,
+                            RBUF_OWN_INT_KER_EN, 1,
+                            TBUF_OWN_INT_KER_EN, 1);
+       EFX_INVERT_OWORD(temp);
+       falcon_write(efx, &temp, FATAL_INTR_REG_KER);
+
+       /* Set number of RSS queues for receive path. */
+       falcon_read(efx, &temp, RX_FILTER_CTL_REG);
+       if (FALCON_REV(efx) >= FALCON_REV_B0)
+               EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);
+       else
+               EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);
+       if (EFX_WORKAROUND_7244(efx)) {
+               EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8);
+               EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8);
+               EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8);
+               EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8);
+       }
+       falcon_write(efx, &temp, RX_FILTER_CTL_REG);
+
+       falcon_setup_rss_indir_table(efx);
+
+       /* Setup RX.  Wait for descriptor is broken and must
+        * be disabled.  RXDP recovery shouldn't be needed, but is.
+        */
+       falcon_read(efx, &temp, RX_SELF_RST_REG_KER);
+       EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1);
+       EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1);
+       if (EFX_WORKAROUND_5583(efx))
+               EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1);
+       falcon_write(efx, &temp, RX_SELF_RST_REG_KER);
+
+       /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
+        * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
+        */
+       falcon_read(efx, &temp, TX_CFG2_REG_KER);
+       EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe);
+       EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1);
+       EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1);
+       EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0);
+       EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1);
+       /* Enable SW_EV to inherit in char driver - assume harmless here */
+       EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1);
+       /* Prefetch threshold 2 => fetch when descriptor cache half empty */
+       EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);
+       /* Squash TX of packets of 16 bytes or less */
+       if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
+               EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);
+       falcon_write(efx, &temp, TX_CFG2_REG_KER);
+
+       /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
+        * descriptors (which is bad).
+        */
+       falcon_read(efx, &temp, TX_CFG_REG_KER);
+       EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0);
+       falcon_write(efx, &temp, TX_CFG_REG_KER);
+
+       /* RX config */
+       falcon_read(efx, &temp, RX_CFG_REG_KER);
+       EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
+       if (EFX_WORKAROUND_7575(efx))
+               EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
+                                       (3 * 4096) / 32);
+       if (FALCON_REV(efx) >= FALCON_REV_B0)
+               EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
+
+       /* RX FIFO flow control thresholds */
+       thresh = ((rx_xon_thresh_bytes >= 0) ?
+                 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
+       EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
+       thresh = ((rx_xoff_thresh_bytes >= 0) ?
+                 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
+       EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
+       /* RX control FIFO thresholds [32 entries] */
+       EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25);
+       EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20);
+       falcon_write(efx, &temp, RX_CFG_REG_KER);
+
+       /* Set destination of both TX and RX Flush events */
+       if (FALCON_REV(efx) >= FALCON_REV_B0) {
+               EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);
+               falcon_write(efx, &temp, DP_CTRL_REG);
+       }
+
+       return 0;
+}
+
+void falcon_remove_nic(struct efx_nic *efx)
+{
+       struct falcon_nic_data *nic_data = efx->nic_data;
+
+       falcon_free_buffer(efx, &efx->irq_status);
+
+       (void) falcon_reset_hw(efx, RESET_TYPE_ALL);
+
+       /* Release the second function after the reset */
+       if (nic_data->pci_dev2) {
+               pci_dev_put(nic_data->pci_dev2);
+               nic_data->pci_dev2 = NULL;
+       }
+
+       /* Tear down the private nic state */
+       kfree(efx->nic_data);
+       efx->nic_data = NULL;
+}
+
+void falcon_update_nic_stats(struct efx_nic *efx)
+{
+       efx_oword_t cnt;
+
+       falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER);
+       efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT);
+}
+
+/**************************************************************************
+ *
+ * Revision-dependent attributes used by efx.c
+ *
+ **************************************************************************
+ */
+
+struct efx_nic_type falcon_a_nic_type = {
+       .mem_bar = 2,
+       .mem_map_size = 0x20000,
+       .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1,
+       .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1,
+       .buf_tbl_base = BUF_TBL_KER_A1,
+       .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1,
+       .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1,
+       .txd_ring_mask = FALCON_TXD_RING_MASK,
+       .rxd_ring_mask = FALCON_RXD_RING_MASK,
+       .evq_size = FALCON_EVQ_SIZE,
+       .max_dma_mask = FALCON_DMA_MASK,
+       .tx_dma_mask = FALCON_TX_DMA_MASK,
+       .bug5391_mask = 0xf,
+       .rx_xoff_thresh = 2048,
+       .rx_xon_thresh = 512,
+       .rx_buffer_padding = 0x24,
+       .max_interrupt_mode = EFX_INT_MODE_MSI,
+       .phys_addr_channels = 4,
+};
+
+struct efx_nic_type falcon_b_nic_type = {
+       .mem_bar = 2,
+       /* Map everything up to and including the RSS indirection
+        * table.  Don't map MSI-X table, MSI-X PBA since Linux
+        * requires that they not be mapped.  */
+       .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800,
+       .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0,
+       .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0,
+       .buf_tbl_base = BUF_TBL_KER_B0,
+       .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0,
+       .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0,
+       .txd_ring_mask = FALCON_TXD_RING_MASK,
+       .rxd_ring_mask = FALCON_RXD_RING_MASK,
+       .evq_size = FALCON_EVQ_SIZE,
+       .max_dma_mask = FALCON_DMA_MASK,
+       .tx_dma_mask = FALCON_TX_DMA_MASK,
+       .bug5391_mask = 0,
+       .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
+       .rx_xon_thresh = 27648,  /* ~3*max MTU */
+       .rx_buffer_padding = 0,
+       .max_interrupt_mode = EFX_INT_MODE_MSIX,
+       .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
+                                  * interrupt handler only supports 32
+                                  * channels */
+};
+
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
new file mode 100644 (file)
index 0000000..6117403
--- /dev/null
@@ -0,0 +1,130 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FALCON_H
+#define EFX_FALCON_H
+
+#include "net_driver.h"
+
+/*
+ * Falcon hardware control
+ */
+
+enum falcon_revision {
+       FALCON_REV_A0 = 0,
+       FALCON_REV_A1 = 1,
+       FALCON_REV_B0 = 2,
+};
+
+#define FALCON_REV(efx) ((efx)->pci_dev->revision)
+
+extern struct efx_nic_type falcon_a_nic_type;
+extern struct efx_nic_type falcon_b_nic_type;
+
+/**************************************************************************
+ *
+ * Externs
+ *
+ **************************************************************************
+ */
+
+/* TX data path */
+extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
+extern int falcon_init_tx(struct efx_tx_queue *tx_queue);
+extern void falcon_fini_tx(struct efx_tx_queue *tx_queue);
+extern void falcon_remove_tx(struct efx_tx_queue *tx_queue);
+extern void falcon_push_buffers(struct efx_tx_queue *tx_queue);
+
+/* RX data path */
+extern int falcon_probe_rx(struct efx_rx_queue *rx_queue);
+extern int falcon_init_rx(struct efx_rx_queue *rx_queue);
+extern void falcon_fini_rx(struct efx_rx_queue *rx_queue);
+extern void falcon_remove_rx(struct efx_rx_queue *rx_queue);
+extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue);
+
+/* Event data path */
+extern int falcon_probe_eventq(struct efx_channel *channel);
+extern int falcon_init_eventq(struct efx_channel *channel);
+extern void falcon_fini_eventq(struct efx_channel *channel);
+extern void falcon_remove_eventq(struct efx_channel *channel);
+extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota);
+extern void falcon_eventq_read_ack(struct efx_channel *channel);
+
+/* Ports */
+extern int falcon_probe_port(struct efx_nic *efx);
+extern void falcon_remove_port(struct efx_nic *efx);
+
+/* MAC/PHY */
+extern int falcon_xaui_link_ok(struct efx_nic *efx);
+extern int falcon_dma_stats(struct efx_nic *efx,
+                           unsigned int done_offset);
+extern void falcon_drain_tx_fifo(struct efx_nic *efx);
+extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx);
+extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx);
+
+/* Interrupts and test events */
+extern int falcon_init_interrupt(struct efx_nic *efx);
+extern void falcon_enable_interrupts(struct efx_nic *efx);
+extern void falcon_generate_test_event(struct efx_channel *channel,
+                                      unsigned int magic);
+extern void falcon_generate_interrupt(struct efx_nic *efx);
+extern void falcon_set_int_moderation(struct efx_channel *channel);
+extern void falcon_disable_interrupts(struct efx_nic *efx);
+extern void falcon_fini_interrupt(struct efx_nic *efx);
+
+/* Global Resources */
+extern int falcon_probe_nic(struct efx_nic *efx);
+extern int falcon_probe_resources(struct efx_nic *efx);
+extern int falcon_init_nic(struct efx_nic *efx);
+extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
+extern void falcon_remove_resources(struct efx_nic *efx);
+extern void falcon_remove_nic(struct efx_nic *efx);
+extern void falcon_update_nic_stats(struct efx_nic *efx);
+extern void falcon_set_multicast_hash(struct efx_nic *efx);
+extern int falcon_reset_xaui(struct efx_nic *efx);
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ */
+
+#define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset)
+#define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH)
+
+/* Retrieve statistic from statistics block */
+#define FALCON_STAT(efx, falcon_stat, efx_stat) do {           \
+       if (FALCON_STAT_WIDTH(falcon_stat) == 16)               \
+               (efx)->mac_stats.efx_stat += le16_to_cpu(       \
+                       *((__force __le16 *)                            \
+                         (efx->stats_buffer.addr +             \
+                          FALCON_STAT_OFFSET(falcon_stat))));  \
+       else if (FALCON_STAT_WIDTH(falcon_stat) == 32)          \
+               (efx)->mac_stats.efx_stat += le32_to_cpu(       \
+                       *((__force __le32 *)                            \
+                         (efx->stats_buffer.addr +             \
+                          FALCON_STAT_OFFSET(falcon_stat))));  \
+       else                                                    \
+               (efx)->mac_stats.efx_stat += le64_to_cpu(       \
+                       *((__force __le64 *)                            \
+                         (efx->stats_buffer.addr +             \
+                          FALCON_STAT_OFFSET(falcon_stat))));  \
+       } while (0)
+
+#define FALCON_MAC_STATS_SIZE 0x100
+
+#define MAC_DATA_LBN 0
+#define MAC_DATA_WIDTH 32
+
+extern void falcon_generate_event(struct efx_channel *channel,
+                                 efx_qword_t *event);
+
+#endif /* EFX_FALCON_H */
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
new file mode 100644 (file)
index 0000000..0485a63
--- /dev/null
@@ -0,0 +1,1135 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FALCON_HWDEFS_H
+#define EFX_FALCON_HWDEFS_H
+
+/*
+ * Falcon hardware value definitions.
+ * Falcon is the internal codename for the SFC4000 controller that is
+ * present in SFE400X evaluation boards
+ */
+
+/**************************************************************************
+ *
+ * Falcon registers
+ *
+ **************************************************************************
+ */
+
+/* Address region register */
+#define ADR_REGION_REG_KER     0x00
+#define ADR_REGION0_LBN        0
+#define ADR_REGION0_WIDTH      18
+#define ADR_REGION1_LBN        32
+#define ADR_REGION1_WIDTH      18
+#define ADR_REGION2_LBN        64
+#define ADR_REGION2_WIDTH      18
+#define ADR_REGION3_LBN        96
+#define ADR_REGION3_WIDTH      18
+
+/* Interrupt enable register */
+#define INT_EN_REG_KER 0x0010
+#define KER_INT_KER_LBN 3
+#define KER_INT_KER_WIDTH 1
+#define DRV_INT_EN_KER_LBN 0
+#define DRV_INT_EN_KER_WIDTH 1
+
+/* Interrupt status address register */
+#define INT_ADR_REG_KER        0x0030
+#define NORM_INT_VEC_DIS_KER_LBN 64
+#define NORM_INT_VEC_DIS_KER_WIDTH 1
+#define INT_ADR_KER_LBN 0
+#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
+
+/* Interrupt status register (B0 only) */
+#define INT_ISR0_B0 0x90
+#define INT_ISR1_B0 0xA0
+
+/* Interrupt acknowledge register (A0/A1 only) */
+#define INT_ACK_REG_KER_A1 0x0050
+#define INT_ACK_DUMMY_DATA_LBN 0
+#define INT_ACK_DUMMY_DATA_WIDTH 32
+
+/* Interrupt acknowledge work-around register (A0/A1 only )*/
+#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
+
+/* SPI host command register */
+#define EE_SPI_HCMD_REG_KER 0x0100
+#define EE_SPI_HCMD_CMD_EN_LBN 31
+#define EE_SPI_HCMD_CMD_EN_WIDTH 1
+#define EE_WR_TIMER_ACTIVE_LBN 28
+#define EE_WR_TIMER_ACTIVE_WIDTH 1
+#define EE_SPI_HCMD_SF_SEL_LBN 24
+#define EE_SPI_HCMD_SF_SEL_WIDTH 1
+#define EE_SPI_EEPROM 0
+#define EE_SPI_FLASH 1
+#define EE_SPI_HCMD_DABCNT_LBN 16
+#define EE_SPI_HCMD_DABCNT_WIDTH 5
+#define EE_SPI_HCMD_READ_LBN 15
+#define EE_SPI_HCMD_READ_WIDTH 1
+#define EE_SPI_READ 1
+#define EE_SPI_WRITE 0
+#define EE_SPI_HCMD_DUBCNT_LBN 12
+#define EE_SPI_HCMD_DUBCNT_WIDTH 2
+#define EE_SPI_HCMD_ADBCNT_LBN 8
+#define EE_SPI_HCMD_ADBCNT_WIDTH 2
+#define EE_SPI_HCMD_ENC_LBN 0
+#define EE_SPI_HCMD_ENC_WIDTH 8
+
+/* SPI host address register */
+#define EE_SPI_HADR_REG_KER 0x0110
+#define EE_SPI_HADR_ADR_LBN 0
+#define EE_SPI_HADR_ADR_WIDTH 24
+
+/* SPI host data register */
+#define EE_SPI_HDATA_REG_KER 0x0120
+
+/* PCIE CORE ACCESS REG */
+#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
+#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
+#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
+#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
+
+/* NIC status register */
+#define NIC_STAT_REG 0x0200
+#define ONCHIP_SRAM_LBN 16
+#define ONCHIP_SRAM_WIDTH 1
+#define SF_PRST_LBN 9
+#define SF_PRST_WIDTH 1
+#define EE_PRST_LBN 8
+#define EE_PRST_WIDTH 1
+/* See pic_mode_t for decoding of this field */
+/* These bit definitions are extrapolated from the list of numerical
+ * values for STRAP_PINS.
+ */
+#define STRAP_10G_LBN 2
+#define STRAP_10G_WIDTH 1
+#define STRAP_PCIE_LBN 0
+#define STRAP_PCIE_WIDTH 1
+
+/* GPIO control register */
+#define GPIO_CTL_REG_KER 0x0210
+#define GPIO_OUTPUTS_LBN   (16)
+#define GPIO_OUTPUTS_WIDTH (4)
+#define GPIO_INPUTS_LBN (8)
+#define GPIO_DIRECTION_LBN (24)
+#define GPIO_DIRECTION_WIDTH (4)
+#define GPIO_DIRECTION_OUT (1)
+#define GPIO_SRAM_SLEEP (1 << 1)
+
+#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
+#define        GPIO3_OEN_WIDTH 1
+#define        GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
+#define        GPIO2_OEN_WIDTH 1
+#define        GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
+#define        GPIO1_OEN_WIDTH 1
+#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
+#define        GPIO0_OEN_WIDTH 1
+
+#define        GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
+#define        GPIO3_OUT_WIDTH 1
+#define        GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
+#define        GPIO2_OUT_WIDTH 1
+#define        GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
+#define        GPIO1_OUT_WIDTH 1
+#define        GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
+#define        GPIO0_OUT_WIDTH 1
+
+#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
+#define        GPIO3_IN_WIDTH 1
+#define        GPIO2_IN_WIDTH 1
+#define        GPIO1_IN_WIDTH 1
+#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
+#define        GPIO0_IN_WIDTH 1
+
+/* Global control register */
+#define GLB_CTL_REG_KER        0x0220
+#define EXT_PHY_RST_CTL_LBN 63
+#define EXT_PHY_RST_CTL_WIDTH 1
+#define PCIE_SD_RST_CTL_LBN 61
+#define PCIE_SD_RST_CTL_WIDTH 1
+
+#define PCIE_NSTCK_RST_CTL_LBN 58
+#define PCIE_NSTCK_RST_CTL_WIDTH 1
+#define PCIE_CORE_RST_CTL_LBN 57
+#define PCIE_CORE_RST_CTL_WIDTH 1
+#define EE_RST_CTL_LBN 49
+#define EE_RST_CTL_WIDTH 1
+#define RST_XGRX_LBN 24
+#define RST_XGRX_WIDTH 1
+#define RST_XGTX_LBN 23
+#define RST_XGTX_WIDTH 1
+#define RST_EM_LBN 22
+#define RST_EM_WIDTH 1
+#define EXT_PHY_RST_DUR_LBN 1
+#define EXT_PHY_RST_DUR_WIDTH 3
+#define SWRST_LBN 0
+#define SWRST_WIDTH 1
+#define INCLUDE_IN_RESET 0
+#define EXCLUDE_FROM_RESET 1
+
+/* Fatal interrupt register */
+#define FATAL_INTR_REG_KER 0x0230
+#define RBUF_OWN_INT_KER_EN_LBN 39
+#define RBUF_OWN_INT_KER_EN_WIDTH 1
+#define TBUF_OWN_INT_KER_EN_LBN 38
+#define TBUF_OWN_INT_KER_EN_WIDTH 1
+#define ILL_ADR_INT_KER_EN_LBN 33
+#define ILL_ADR_INT_KER_EN_WIDTH 1
+#define MEM_PERR_INT_KER_LBN 8
+#define MEM_PERR_INT_KER_WIDTH 1
+#define INT_KER_ERROR_LBN 0
+#define INT_KER_ERROR_WIDTH 12
+
+#define DP_CTRL_REG 0x250
+#define FLS_EVQ_ID_LBN 0
+#define FLS_EVQ_ID_WIDTH 11
+
+#define MEM_STAT_REG_KER 0x260
+
+/* Debug probe register */
+#define DEBUG_BLK_SEL_MISC 7
+#define DEBUG_BLK_SEL_SERDES 6
+#define DEBUG_BLK_SEL_EM 5
+#define DEBUG_BLK_SEL_SR 4
+#define DEBUG_BLK_SEL_EV 3
+#define DEBUG_BLK_SEL_RX 2
+#define DEBUG_BLK_SEL_TX 1
+#define DEBUG_BLK_SEL_BIU 0
+
+/* FPGA build version */
+#define ALTERA_BUILD_REG_KER 0x0300
+#define VER_ALL_LBN 0
+#define VER_ALL_WIDTH 32
+
+/* Spare EEPROM bits register (flash 0x390) */
+#define SPARE_REG_KER 0x310
+#define MEM_PERR_EN_TX_DATA_LBN 72
+#define MEM_PERR_EN_TX_DATA_WIDTH 2
+
+/* Timer table for kernel access */
+#define TIMER_CMD_REG_KER 0x420
+#define TIMER_MODE_LBN 12
+#define TIMER_MODE_WIDTH 2
+#define TIMER_MODE_DIS 0
+#define TIMER_MODE_INT_HLDOFF 2
+#define TIMER_VAL_LBN 0
+#define TIMER_VAL_WIDTH 12
+
+/* Driver generated event register */
+#define DRV_EV_REG_KER 0x440
+#define DRV_EV_QID_LBN 64
+#define DRV_EV_QID_WIDTH 12
+#define DRV_EV_DATA_LBN 0
+#define DRV_EV_DATA_WIDTH 64
+
+/* Buffer table configuration register */
+#define BUF_TBL_CFG_REG_KER 0x600
+#define BUF_TBL_MODE_LBN 3
+#define BUF_TBL_MODE_WIDTH 1
+#define BUF_TBL_MODE_HALF 0
+#define BUF_TBL_MODE_FULL 1
+
+/* SRAM receive descriptor cache configuration register */
+#define SRM_RX_DC_CFG_REG_KER 0x610
+#define SRM_RX_DC_BASE_ADR_LBN 0
+#define SRM_RX_DC_BASE_ADR_WIDTH 21
+
+/* SRAM transmit descriptor cache configuration register */
+#define SRM_TX_DC_CFG_REG_KER 0x620
+#define SRM_TX_DC_BASE_ADR_LBN 0
+#define SRM_TX_DC_BASE_ADR_WIDTH 21
+
+/* SRAM configuration register */
+#define SRM_CFG_REG_KER 0x630
+#define SRAM_OOB_BT_INIT_EN_LBN 3
+#define SRAM_OOB_BT_INIT_EN_WIDTH 1
+#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
+#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
+#define SRM_NB_BSZ_1BANKS_2M 0
+#define SRM_NB_BSZ_1BANKS_4M 1
+#define SRM_NB_BSZ_1BANKS_8M 2
+#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
+#define SRM_NB_BSZ_2BANKS_4M 4
+#define SRM_NB_BSZ_2BANKS_8M 5
+#define SRM_NB_BSZ_2BANKS_16M 6
+#define SRM_NB_BSZ_RESERVED 7
+
+/* Special buffer table update register */
+#define BUF_TBL_UPD_REG_KER 0x0650
+#define BUF_UPD_CMD_LBN 63
+#define BUF_UPD_CMD_WIDTH 1
+#define BUF_CLR_CMD_LBN 62
+#define BUF_CLR_CMD_WIDTH 1
+#define BUF_CLR_END_ID_LBN 32
+#define BUF_CLR_END_ID_WIDTH 20
+#define BUF_CLR_START_ID_LBN 0
+#define BUF_CLR_START_ID_WIDTH 20
+
+/* Receive configuration register */
+#define RX_CFG_REG_KER 0x800
+
+/* B0 */
+#define RX_INGR_EN_B0_LBN 47
+#define RX_INGR_EN_B0_WIDTH 1
+#define RX_DESC_PUSH_EN_B0_LBN 43
+#define RX_DESC_PUSH_EN_B0_WIDTH 1
+#define RX_XON_TX_TH_B0_LBN 33
+#define RX_XON_TX_TH_B0_WIDTH 5
+#define RX_XOFF_TX_TH_B0_LBN 28
+#define RX_XOFF_TX_TH_B0_WIDTH 5
+#define RX_USR_BUF_SIZE_B0_LBN 19
+#define RX_USR_BUF_SIZE_B0_WIDTH 9
+#define RX_XON_MAC_TH_B0_LBN 10
+#define RX_XON_MAC_TH_B0_WIDTH 9
+#define RX_XOFF_MAC_TH_B0_LBN 1
+#define RX_XOFF_MAC_TH_B0_WIDTH 9
+#define RX_XOFF_MAC_EN_B0_LBN 0
+#define RX_XOFF_MAC_EN_B0_WIDTH 1
+
+/* A1 */
+#define RX_DESC_PUSH_EN_A1_LBN 35
+#define RX_DESC_PUSH_EN_A1_WIDTH 1
+#define RX_XON_TX_TH_A1_LBN 25
+#define RX_XON_TX_TH_A1_WIDTH 5
+#define RX_XOFF_TX_TH_A1_LBN 20
+#define RX_XOFF_TX_TH_A1_WIDTH 5
+#define RX_USR_BUF_SIZE_A1_LBN 11
+#define RX_USR_BUF_SIZE_A1_WIDTH 9
+#define RX_XON_MAC_TH_A1_LBN 6
+#define RX_XON_MAC_TH_A1_WIDTH 5
+#define RX_XOFF_MAC_TH_A1_LBN 1
+#define RX_XOFF_MAC_TH_A1_WIDTH 5
+#define RX_XOFF_MAC_EN_A1_LBN 0
+#define RX_XOFF_MAC_EN_A1_WIDTH 1
+
+/* Receive filter control register */
+#define RX_FILTER_CTL_REG 0x810
+#define UDP_FULL_SRCH_LIMIT_LBN 32
+#define UDP_FULL_SRCH_LIMIT_WIDTH 8
+#define NUM_KER_LBN 24
+#define NUM_KER_WIDTH 2
+#define UDP_WILD_SRCH_LIMIT_LBN 16
+#define UDP_WILD_SRCH_LIMIT_WIDTH 8
+#define TCP_WILD_SRCH_LIMIT_LBN 8
+#define TCP_WILD_SRCH_LIMIT_WIDTH 8
+#define TCP_FULL_SRCH_LIMIT_LBN 0
+#define TCP_FULL_SRCH_LIMIT_WIDTH 8
+
+/* RX queue flush register */
+#define RX_FLUSH_DESCQ_REG_KER 0x0820
+#define RX_FLUSH_DESCQ_CMD_LBN 24
+#define RX_FLUSH_DESCQ_CMD_WIDTH 1
+#define RX_FLUSH_DESCQ_LBN 0
+#define RX_FLUSH_DESCQ_WIDTH 12
+
+/* Receive descriptor update register */
+#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
+#define RX_DESC_WPTR_DWORD_LBN 0
+#define RX_DESC_WPTR_DWORD_WIDTH 12
+
+/* Receive descriptor cache configuration register */
+#define RX_DC_CFG_REG_KER 0x840
+#define RX_DC_SIZE_LBN 0
+#define RX_DC_SIZE_WIDTH 2
+
+#define RX_DC_PF_WM_REG_KER 0x850
+#define RX_DC_PF_LWM_LBN 0
+#define RX_DC_PF_LWM_WIDTH 6
+
+/* RX no descriptor drop counter */
+#define RX_NODESC_DROP_REG_KER 0x880
+#define RX_NODESC_DROP_CNT_LBN 0
+#define RX_NODESC_DROP_CNT_WIDTH 16
+
+/* RX black magic register */
+#define RX_SELF_RST_REG_KER 0x890
+#define RX_ISCSI_DIS_LBN 17
+#define RX_ISCSI_DIS_WIDTH 1
+#define RX_NODESC_WAIT_DIS_LBN 9
+#define RX_NODESC_WAIT_DIS_WIDTH 1
+#define RX_RECOVERY_EN_LBN 8
+#define RX_RECOVERY_EN_WIDTH 1
+
+/* TX queue flush register */
+#define TX_FLUSH_DESCQ_REG_KER 0x0a00
+#define TX_FLUSH_DESCQ_CMD_LBN 12
+#define TX_FLUSH_DESCQ_CMD_WIDTH 1
+#define TX_FLUSH_DESCQ_LBN 0
+#define TX_FLUSH_DESCQ_WIDTH 12
+
+/* Transmit descriptor update register */
+#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
+#define TX_DESC_WPTR_DWORD_LBN 0
+#define TX_DESC_WPTR_DWORD_WIDTH 12
+
+/* Transmit descriptor cache configuration register */
+#define TX_DC_CFG_REG_KER 0xa20
+#define TX_DC_SIZE_LBN 0
+#define TX_DC_SIZE_WIDTH 2
+
+/* Transmit checksum configuration register (A0/A1 only) */
+#define TX_CHKSM_CFG_REG_KER_A1 0xa30
+
+/* Transmit configuration register */
+#define TX_CFG_REG_KER 0xa50
+#define TX_NO_EOP_DISC_EN_LBN 5
+#define TX_NO_EOP_DISC_EN_WIDTH 1
+
+/* Transmit configuration register 2 */
+#define TX_CFG2_REG_KER 0xa80
+#define TX_CSR_PUSH_EN_LBN 89
+#define TX_CSR_PUSH_EN_WIDTH 1
+#define TX_RX_SPACER_LBN 64
+#define TX_RX_SPACER_WIDTH 8
+#define TX_SW_EV_EN_LBN 59
+#define TX_SW_EV_EN_WIDTH 1
+#define TX_RX_SPACER_EN_LBN 57
+#define TX_RX_SPACER_EN_WIDTH 1
+#define TX_PREF_THRESHOLD_LBN 19
+#define TX_PREF_THRESHOLD_WIDTH 2
+#define TX_ONE_PKT_PER_Q_LBN 18
+#define TX_ONE_PKT_PER_Q_WIDTH 1
+#define TX_DIS_NON_IP_EV_LBN 17
+#define TX_DIS_NON_IP_EV_WIDTH 1
+#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
+#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
+
+/* PHY management transmit data register */
+#define MD_TXD_REG_KER 0xc00
+#define MD_TXD_LBN 0
+#define MD_TXD_WIDTH 16
+
+/* PHY management receive data register */
+#define MD_RXD_REG_KER 0xc10
+#define MD_RXD_LBN 0
+#define MD_RXD_WIDTH 16
+
+/* PHY management configuration & status register */
+#define MD_CS_REG_KER 0xc20
+#define MD_GC_LBN 4
+#define MD_GC_WIDTH 1
+#define MD_RIC_LBN 2
+#define MD_RIC_WIDTH 1
+#define MD_RDC_LBN 1
+#define MD_RDC_WIDTH 1
+#define MD_WRC_LBN 0
+#define MD_WRC_WIDTH 1
+
+/* PHY management PHY address register */
+#define MD_PHY_ADR_REG_KER 0xc30
+#define MD_PHY_ADR_LBN 0
+#define MD_PHY_ADR_WIDTH 16
+
+/* PHY management ID register */
+#define MD_ID_REG_KER 0xc40
+#define MD_PRT_ADR_LBN 11
+#define MD_PRT_ADR_WIDTH 5
+#define MD_DEV_ADR_LBN 6
+#define MD_DEV_ADR_WIDTH 5
+/* Used for writing both at once */
+#define MD_PRT_DEV_ADR_LBN 6
+#define MD_PRT_DEV_ADR_WIDTH 10
+
+/* PHY management status & mask register (DWORD read only) */
+#define MD_STAT_REG_KER 0xc50
+#define MD_BSERR_LBN 2
+#define MD_BSERR_WIDTH 1
+#define MD_LNFL_LBN 1
+#define MD_LNFL_WIDTH 1
+#define MD_BSY_LBN 0
+#define MD_BSY_WIDTH 1
+
+/* Port 0 and 1 MAC stats registers */
+#define MAC0_STAT_DMA_REG_KER 0xc60
+#define MAC_STAT_DMA_CMD_LBN 48
+#define MAC_STAT_DMA_CMD_WIDTH 1
+#define MAC_STAT_DMA_ADR_LBN 0
+#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
+
+/* Port 0 and 1 MAC control registers */
+#define MAC0_CTRL_REG_KER 0xc80
+#define MAC_XOFF_VAL_LBN 16
+#define MAC_XOFF_VAL_WIDTH 16
+#define TXFIFO_DRAIN_EN_B0_LBN 7
+#define TXFIFO_DRAIN_EN_B0_WIDTH 1
+#define MAC_BCAD_ACPT_LBN 4
+#define MAC_BCAD_ACPT_WIDTH 1
+#define MAC_UC_PROM_LBN 3
+#define MAC_UC_PROM_WIDTH 1
+#define MAC_LINK_STATUS_LBN 2
+#define MAC_LINK_STATUS_WIDTH 1
+#define MAC_SPEED_LBN 0
+#define MAC_SPEED_WIDTH 2
+
+/* 10G XAUI XGXS default values */
+#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
+#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
+#define XX_SD_CTL_DRV_DEFAULT 0  /* 20mA */
+
+/* Multicast address hash table */
+#define MAC_MCAST_HASH_REG0_KER 0xca0
+#define MAC_MCAST_HASH_REG1_KER 0xcb0
+
+/* GMAC registers */
+#define FALCON_GMAC_REGBANK 0xe00
+#define FALCON_GMAC_REGBANK_SIZE 0x200
+#define FALCON_GMAC_REG_SIZE 0x10
+
+/* XMAC registers */
+#define FALCON_XMAC_REGBANK 0x1200
+#define FALCON_XMAC_REGBANK_SIZE 0x200
+#define FALCON_XMAC_REG_SIZE 0x10
+
+/* XGMAC address register low */
+#define XM_ADR_LO_REG_MAC 0x00
+#define XM_ADR_3_LBN 24
+#define XM_ADR_3_WIDTH 8
+#define XM_ADR_2_LBN 16
+#define XM_ADR_2_WIDTH 8
+#define XM_ADR_1_LBN 8
+#define XM_ADR_1_WIDTH 8
+#define XM_ADR_0_LBN 0
+#define XM_ADR_0_WIDTH 8
+
+/* XGMAC address register high */
+#define XM_ADR_HI_REG_MAC 0x01
+#define XM_ADR_5_LBN 8
+#define XM_ADR_5_WIDTH 8
+#define XM_ADR_4_LBN 0
+#define XM_ADR_4_WIDTH 8
+
+/* XGMAC global configuration */
+#define XM_GLB_CFG_REG_MAC 0x02
+#define XM_RX_STAT_EN_LBN 11
+#define XM_RX_STAT_EN_WIDTH 1
+#define XM_TX_STAT_EN_LBN 10
+#define XM_TX_STAT_EN_WIDTH 1
+#define XM_RX_JUMBO_MODE_LBN 6
+#define XM_RX_JUMBO_MODE_WIDTH 1
+#define XM_INTCLR_MODE_LBN 3
+#define XM_INTCLR_MODE_WIDTH 1
+#define XM_CORE_RST_LBN 0
+#define XM_CORE_RST_WIDTH 1
+
+/* XGMAC transmit configuration */
+#define XM_TX_CFG_REG_MAC 0x03
+#define XM_IPG_LBN 16
+#define XM_IPG_WIDTH 4
+#define XM_FCNTL_LBN 10
+#define XM_FCNTL_WIDTH 1
+#define XM_TXCRC_LBN 8
+#define XM_TXCRC_WIDTH 1
+#define XM_AUTO_PAD_LBN 5
+#define XM_AUTO_PAD_WIDTH 1
+#define XM_TX_PRMBL_LBN 2
+#define XM_TX_PRMBL_WIDTH 1
+#define XM_TXEN_LBN 1
+#define XM_TXEN_WIDTH 1
+
+/* XGMAC receive configuration */
+#define XM_RX_CFG_REG_MAC 0x04
+#define XM_PASS_CRC_ERR_LBN 25
+#define XM_PASS_CRC_ERR_WIDTH 1
+#define XM_ACPT_ALL_MCAST_LBN 11
+#define XM_ACPT_ALL_MCAST_WIDTH 1
+#define XM_ACPT_ALL_UCAST_LBN 9
+#define XM_ACPT_ALL_UCAST_WIDTH 1
+#define XM_AUTO_DEPAD_LBN 8
+#define XM_AUTO_DEPAD_WIDTH 1
+#define XM_RXEN_LBN 1
+#define XM_RXEN_WIDTH 1
+
+/* XGMAC management interrupt mask register */
+#define XM_MGT_INT_MSK_REG_MAC_B0 0x5
+#define XM_MSK_PRMBLE_ERR_LBN 2
+#define XM_MSK_PRMBLE_ERR_WIDTH 1
+#define XM_MSK_RMTFLT_LBN 1
+#define XM_MSK_RMTFLT_WIDTH 1
+#define XM_MSK_LCLFLT_LBN 0
+#define XM_MSK_LCLFLT_WIDTH 1
+
+/* XGMAC flow control register */
+#define XM_FC_REG_MAC 0x7
+#define XM_PAUSE_TIME_LBN 16
+#define XM_PAUSE_TIME_WIDTH 16
+#define XM_DIS_FCNTL_LBN 0
+#define XM_DIS_FCNTL_WIDTH 1
+
+/* XGMAC pause time count register */
+#define XM_PAUSE_TIME_REG_MAC 0x9
+
+/* XGMAC transmit parameter register */
+#define XM_TX_PARAM_REG_MAC 0x0d
+#define XM_TX_JUMBO_MODE_LBN 31
+#define XM_TX_JUMBO_MODE_WIDTH 1
+#define XM_MAX_TX_FRM_SIZE_LBN 16
+#define XM_MAX_TX_FRM_SIZE_WIDTH 14
+
+/* XGMAC receive parameter register */
+#define XM_RX_PARAM_REG_MAC 0x0e
+#define XM_MAX_RX_FRM_SIZE_LBN 0
+#define XM_MAX_RX_FRM_SIZE_WIDTH 14
+
+/* XGMAC management interrupt status register */
+#define XM_MGT_INT_REG_MAC_B0 0x0f
+#define XM_PRMBLE_ERR 2
+#define XM_PRMBLE_WIDTH 1
+#define XM_RMTFLT_LBN 1
+#define XM_RMTFLT_WIDTH 1
+#define XM_LCLFLT_LBN 0
+#define XM_LCLFLT_WIDTH 1
+
+/* XGXS/XAUI powerdown/reset register */
+#define XX_PWR_RST_REG_MAC 0x10
+
+#define XX_PWRDND_EN_LBN 15
+#define XX_PWRDND_EN_WIDTH 1
+#define XX_PWRDNC_EN_LBN 14
+#define XX_PWRDNC_EN_WIDTH 1
+#define XX_PWRDNB_EN_LBN 13
+#define XX_PWRDNB_EN_WIDTH 1
+#define XX_PWRDNA_EN_LBN 12
+#define XX_PWRDNA_EN_WIDTH 1
+#define XX_RSTPLLCD_EN_LBN 9
+#define XX_RSTPLLCD_EN_WIDTH 1
+#define XX_RSTPLLAB_EN_LBN 8
+#define XX_RSTPLLAB_EN_WIDTH 1
+#define XX_RESETD_EN_LBN 7
+#define XX_RESETD_EN_WIDTH 1
+#define XX_RESETC_EN_LBN 6
+#define XX_RESETC_EN_WIDTH 1
+#define XX_RESETB_EN_LBN 5
+#define XX_RESETB_EN_WIDTH 1
+#define XX_RESETA_EN_LBN 4
+#define XX_RESETA_EN_WIDTH 1
+#define XX_RSTXGXSRX_EN_LBN 2
+#define XX_RSTXGXSRX_EN_WIDTH 1
+#define XX_RSTXGXSTX_EN_LBN 1
+#define XX_RSTXGXSTX_EN_WIDTH 1
+#define XX_RST_XX_EN_LBN 0
+#define XX_RST_XX_EN_WIDTH 1
+
+/* XGXS/XAUI powerdown/reset control register */
+#define XX_SD_CTL_REG_MAC 0x11
+#define XX_HIDRVD_LBN 15
+#define XX_HIDRVD_WIDTH 1
+#define XX_LODRVD_LBN 14
+#define XX_LODRVD_WIDTH 1
+#define XX_HIDRVC_LBN 13
+#define XX_HIDRVC_WIDTH 1
+#define XX_LODRVC_LBN 12
+#define XX_LODRVC_WIDTH 1
+#define XX_HIDRVB_LBN 11
+#define XX_HIDRVB_WIDTH 1
+#define XX_LODRVB_LBN 10
+#define XX_LODRVB_WIDTH 1
+#define XX_HIDRVA_LBN 9
+#define XX_HIDRVA_WIDTH 1
+#define XX_LODRVA_LBN 8
+#define XX_LODRVA_WIDTH 1
+
+#define XX_TXDRV_CTL_REG_MAC 0x12
+#define XX_DEQD_LBN 28
+#define XX_DEQD_WIDTH 4
+#define XX_DEQC_LBN 24
+#define XX_DEQC_WIDTH 4
+#define XX_DEQB_LBN 20
+#define XX_DEQB_WIDTH 4
+#define XX_DEQA_LBN 16
+#define XX_DEQA_WIDTH 4
+#define XX_DTXD_LBN 12
+#define XX_DTXD_WIDTH 4
+#define XX_DTXC_LBN 8
+#define XX_DTXC_WIDTH 4
+#define XX_DTXB_LBN 4
+#define XX_DTXB_WIDTH 4
+#define XX_DTXA_LBN 0
+#define XX_DTXA_WIDTH 4
+
+/* XAUI XGXS core status register */
+#define XX_FORCE_SIG_DECODE_FORCED 0xff
+#define XX_CORE_STAT_REG_MAC 0x16
+#define XX_ALIGN_DONE_LBN 20
+#define XX_ALIGN_DONE_WIDTH 1
+#define XX_SYNC_STAT_LBN 16
+#define XX_SYNC_STAT_WIDTH 4
+#define XX_SYNC_STAT_DECODE_SYNCED 0xf
+#define XX_COMMA_DET_LBN 12
+#define XX_COMMA_DET_WIDTH 4
+#define XX_COMMA_DET_DECODE_DETECTED 0xf
+#define XX_COMMA_DET_RESET 0xf
+#define XX_CHARERR_LBN 4
+#define XX_CHARERR_WIDTH 4
+#define XX_CHARERR_RESET 0xf
+#define XX_DISPERR_LBN 0
+#define XX_DISPERR_WIDTH 4
+#define XX_DISPERR_RESET 0xf
+
+/* Receive filter table */
+#define RX_FILTER_TBL0 0xF00000
+
+/* Receive descriptor pointer table */
+#define RX_DESC_PTR_TBL_KER_A1 0x11800
+#define RX_DESC_PTR_TBL_KER_B0 0xF40000
+#define RX_DESC_PTR_TBL_KER_P0 0x900
+#define RX_ISCSI_DDIG_EN_LBN 88
+#define RX_ISCSI_DDIG_EN_WIDTH 1
+#define RX_ISCSI_HDIG_EN_LBN 87
+#define RX_ISCSI_HDIG_EN_WIDTH 1
+#define RX_DESCQ_BUF_BASE_ID_LBN 36
+#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define RX_DESCQ_EVQ_ID_LBN 24
+#define RX_DESCQ_EVQ_ID_WIDTH 12
+#define RX_DESCQ_OWNER_ID_LBN 10
+#define RX_DESCQ_OWNER_ID_WIDTH 14
+#define RX_DESCQ_LABEL_LBN 5
+#define RX_DESCQ_LABEL_WIDTH 5
+#define RX_DESCQ_SIZE_LBN 3
+#define RX_DESCQ_SIZE_WIDTH 2
+#define RX_DESCQ_SIZE_4K 3
+#define RX_DESCQ_SIZE_2K 2
+#define RX_DESCQ_SIZE_1K 1
+#define RX_DESCQ_SIZE_512 0
+#define RX_DESCQ_TYPE_LBN 2
+#define RX_DESCQ_TYPE_WIDTH 1
+#define RX_DESCQ_JUMBO_LBN 1
+#define RX_DESCQ_JUMBO_WIDTH 1
+#define RX_DESCQ_EN_LBN 0
+#define RX_DESCQ_EN_WIDTH 1
+
+/* Transmit descriptor pointer table */
+#define TX_DESC_PTR_TBL_KER_A1 0x11900
+#define TX_DESC_PTR_TBL_KER_B0 0xF50000
+#define TX_DESC_PTR_TBL_KER_P0 0xa40
+#define TX_NON_IP_DROP_DIS_B0_LBN 91
+#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
+#define TX_IP_CHKSM_DIS_B0_LBN 90
+#define TX_IP_CHKSM_DIS_B0_WIDTH 1
+#define TX_TCP_CHKSM_DIS_B0_LBN 89
+#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
+#define TX_DESCQ_EN_LBN 88
+#define TX_DESCQ_EN_WIDTH 1
+#define TX_ISCSI_DDIG_EN_LBN 87
+#define TX_ISCSI_DDIG_EN_WIDTH 1
+#define TX_ISCSI_HDIG_EN_LBN 86
+#define TX_ISCSI_HDIG_EN_WIDTH 1
+#define TX_DESCQ_BUF_BASE_ID_LBN 36
+#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
+#define TX_DESCQ_EVQ_ID_LBN 24
+#define TX_DESCQ_EVQ_ID_WIDTH 12
+#define TX_DESCQ_OWNER_ID_LBN 10
+#define TX_DESCQ_OWNER_ID_WIDTH 14
+#define TX_DESCQ_LABEL_LBN 5
+#define TX_DESCQ_LABEL_WIDTH 5
+#define TX_DESCQ_SIZE_LBN 3
+#define TX_DESCQ_SIZE_WIDTH 2
+#define TX_DESCQ_SIZE_4K 3
+#define TX_DESCQ_SIZE_2K 2
+#define TX_DESCQ_SIZE_1K 1
+#define TX_DESCQ_SIZE_512 0
+#define TX_DESCQ_TYPE_LBN 1
+#define TX_DESCQ_TYPE_WIDTH 2
+
+/* Event queue pointer */
+#define EVQ_PTR_TBL_KER_A1 0x11a00
+#define EVQ_PTR_TBL_KER_B0 0xf60000
+#define EVQ_PTR_TBL_KER_P0 0x500
+#define EVQ_EN_LBN 23
+#define EVQ_EN_WIDTH 1
+#define EVQ_SIZE_LBN 20
+#define EVQ_SIZE_WIDTH 3
+#define EVQ_SIZE_32K 6
+#define EVQ_SIZE_16K 5
+#define EVQ_SIZE_8K 4
+#define EVQ_SIZE_4K 3
+#define EVQ_SIZE_2K 2
+#define EVQ_SIZE_1K 1
+#define EVQ_SIZE_512 0
+#define EVQ_BUF_BASE_ID_LBN 0
+#define EVQ_BUF_BASE_ID_WIDTH 20
+
+/* Event queue read pointer */
+#define EVQ_RPTR_REG_KER_A1 0x11b00
+#define EVQ_RPTR_REG_KER_B0 0xfa0000
+#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
+#define EVQ_RPTR_DWORD_LBN 0
+#define EVQ_RPTR_DWORD_WIDTH 14
+
+/* RSS indirection table */
+#define RX_RSS_INDIR_TBL_B0 0xFB0000
+#define RX_RSS_INDIR_ENT_B0_LBN 0
+#define RX_RSS_INDIR_ENT_B0_WIDTH 6
+
+/* Special buffer descriptors (full-mode) */
+#define BUF_FULL_TBL_KER_A1 0x8000
+#define BUF_FULL_TBL_KER_B0 0x800000
+#define IP_DAT_BUF_SIZE_LBN 50
+#define IP_DAT_BUF_SIZE_WIDTH 1
+#define IP_DAT_BUF_SIZE_8K 1
+#define IP_DAT_BUF_SIZE_4K 0
+#define BUF_ADR_REGION_LBN 48
+#define BUF_ADR_REGION_WIDTH 2
+#define BUF_ADR_FBUF_LBN 14
+#define BUF_ADR_FBUF_WIDTH 34
+#define BUF_OWNER_ID_FBUF_LBN 0
+#define BUF_OWNER_ID_FBUF_WIDTH 14
+
+/* Transmit descriptor */
+#define TX_KER_PORT_LBN 63
+#define TX_KER_PORT_WIDTH 1
+#define TX_KER_CONT_LBN 62
+#define TX_KER_CONT_WIDTH 1
+#define TX_KER_BYTE_CNT_LBN 48
+#define TX_KER_BYTE_CNT_WIDTH 14
+#define TX_KER_BUF_REGION_LBN 46
+#define TX_KER_BUF_REGION_WIDTH 2
+#define TX_KER_BUF_REGION0_DECODE 0
+#define TX_KER_BUF_REGION1_DECODE 1
+#define TX_KER_BUF_REGION2_DECODE 2
+#define TX_KER_BUF_REGION3_DECODE 3
+#define TX_KER_BUF_ADR_LBN 0
+#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
+
+/* Receive descriptor */
+#define RX_KER_BUF_SIZE_LBN 48
+#define RX_KER_BUF_SIZE_WIDTH 14
+#define RX_KER_BUF_REGION_LBN 46
+#define RX_KER_BUF_REGION_WIDTH 2
+#define RX_KER_BUF_REGION0_DECODE 0
+#define RX_KER_BUF_REGION1_DECODE 1
+#define RX_KER_BUF_REGION2_DECODE 2
+#define RX_KER_BUF_REGION3_DECODE 3
+#define RX_KER_BUF_ADR_LBN 0
+#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
+
+/**************************************************************************
+ *
+ * Falcon events
+ *
+ **************************************************************************
+ */
+
+/* Event queue entries */
+#define EV_CODE_LBN 60
+#define EV_CODE_WIDTH 4
+#define RX_IP_EV_DECODE 0
+#define TX_IP_EV_DECODE 2
+#define DRIVER_EV_DECODE 5
+#define GLOBAL_EV_DECODE 6
+#define DRV_GEN_EV_DECODE 7
+#define WHOLE_EVENT_LBN 0
+#define WHOLE_EVENT_WIDTH 64
+
+/* Receive events */
+#define RX_EV_PKT_OK_LBN 56
+#define RX_EV_PKT_OK_WIDTH 1
+#define RX_EV_PAUSE_FRM_ERR_LBN 55
+#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
+#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
+#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
+#define RX_EV_IF_FRAG_ERR_LBN 53
+#define RX_EV_IF_FRAG_ERR_WIDTH 1
+#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
+#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
+#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
+#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
+#define RX_EV_ETH_CRC_ERR_LBN 50
+#define RX_EV_ETH_CRC_ERR_WIDTH 1
+#define RX_EV_FRM_TRUNC_LBN 49
+#define RX_EV_FRM_TRUNC_WIDTH 1
+#define RX_EV_DRIB_NIB_LBN 48
+#define RX_EV_DRIB_NIB_WIDTH 1
+#define RX_EV_TOBE_DISC_LBN 47
+#define RX_EV_TOBE_DISC_WIDTH 1
+#define RX_EV_PKT_TYPE_LBN 44
+#define RX_EV_PKT_TYPE_WIDTH 3
+#define RX_EV_PKT_TYPE_ETH_DECODE 0
+#define RX_EV_PKT_TYPE_LLC_DECODE 1
+#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
+#define RX_EV_PKT_TYPE_VLAN_DECODE 3
+#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
+#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
+#define RX_EV_HDR_TYPE_LBN 42
+#define RX_EV_HDR_TYPE_WIDTH 2
+#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
+#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
+#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
+#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
+#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
+       ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
+#define RX_EV_MCAST_HASH_MATCH_LBN 40
+#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
+#define RX_EV_MCAST_PKT_LBN 39
+#define RX_EV_MCAST_PKT_WIDTH 1
+#define RX_EV_Q_LABEL_LBN 32
+#define RX_EV_Q_LABEL_WIDTH 5
+#define RX_EV_JUMBO_CONT_LBN 31
+#define RX_EV_JUMBO_CONT_WIDTH 1
+#define RX_EV_BYTE_CNT_LBN 16
+#define RX_EV_BYTE_CNT_WIDTH 14
+#define RX_EV_SOP_LBN 15
+#define RX_EV_SOP_WIDTH 1
+#define RX_EV_DESC_PTR_LBN 0
+#define RX_EV_DESC_PTR_WIDTH 12
+
+/* Transmit events */
+#define TX_EV_PKT_ERR_LBN 38
+#define TX_EV_PKT_ERR_WIDTH 1
+#define TX_EV_Q_LABEL_LBN 32
+#define TX_EV_Q_LABEL_WIDTH 5
+#define TX_EV_WQ_FF_FULL_LBN 15
+#define TX_EV_WQ_FF_FULL_WIDTH 1
+#define TX_EV_COMP_LBN 12
+#define TX_EV_COMP_WIDTH 1
+#define TX_EV_DESC_PTR_LBN 0
+#define TX_EV_DESC_PTR_WIDTH 12
+
+/* Driver events */
+#define DRIVER_EV_SUB_CODE_LBN 56
+#define DRIVER_EV_SUB_CODE_WIDTH 4
+#define DRIVER_EV_SUB_DATA_LBN 0
+#define DRIVER_EV_SUB_DATA_WIDTH 14
+#define TX_DESCQ_FLS_DONE_EV_DECODE 0
+#define RX_DESCQ_FLS_DONE_EV_DECODE 1
+#define EVQ_INIT_DONE_EV_DECODE 2
+#define EVQ_NOT_EN_EV_DECODE 3
+#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
+#define SRM_UPD_DONE_EV_DECODE 5
+#define WAKE_UP_EV_DECODE 6
+#define TX_PKT_NON_TCP_UDP_DECODE 9
+#define TIMER_EV_DECODE 10
+#define RX_RECOVERY_EV_DECODE 11
+#define RX_DSC_ERROR_EV_DECODE 14
+#define TX_DSC_ERROR_EV_DECODE 15
+#define DRIVER_EV_TX_DESCQ_ID_LBN 0
+#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
+#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
+#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
+#define DRIVER_EV_RX_DESCQ_ID_LBN 0
+#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
+#define SRM_CLR_EV_DECODE 0
+#define SRM_UPD_EV_DECODE 1
+#define SRM_ILLCLR_EV_DECODE 2
+
+/* Global events */
+#define RX_RECOVERY_B0_LBN 12
+#define RX_RECOVERY_B0_WIDTH 1
+#define XG_MNT_INTR_B0_LBN 11
+#define XG_MNT_INTR_B0_WIDTH 1
+#define RX_RECOVERY_A1_LBN 11
+#define RX_RECOVERY_A1_WIDTH 1
+#define XG_PHY_INTR_LBN 9
+#define XG_PHY_INTR_WIDTH 1
+#define G_PHY1_INTR_LBN 8
+#define G_PHY1_INTR_WIDTH 1
+#define G_PHY0_INTR_LBN 7
+#define G_PHY0_INTR_WIDTH 1
+
+/* Driver-generated test events */
+#define EVQ_MAGIC_LBN 0
+#define EVQ_MAGIC_WIDTH 32
+
+/**************************************************************************
+ *
+ * Falcon MAC stats
+ *
+ **************************************************************************
+ *
+ */
+#define GRxGoodOct_offset 0x0
+#define GRxBadOct_offset 0x8
+#define GRxMissPkt_offset 0x10
+#define GRxFalseCRS_offset 0x14
+#define GRxPausePkt_offset 0x18
+#define GRxBadPkt_offset 0x1C
+#define GRxUcastPkt_offset 0x20
+#define GRxMcastPkt_offset 0x24
+#define GRxBcastPkt_offset 0x28
+#define GRxGoodLt64Pkt_offset 0x2C
+#define GRxBadLt64Pkt_offset 0x30
+#define GRx64Pkt_offset 0x34
+#define GRx65to127Pkt_offset 0x38
+#define GRx128to255Pkt_offset 0x3C
+#define GRx256to511Pkt_offset 0x40
+#define GRx512to1023Pkt_offset 0x44
+#define GRx1024to15xxPkt_offset 0x48
+#define GRx15xxtoJumboPkt_offset 0x4C
+#define GRxGtJumboPkt_offset 0x50
+#define GRxFcsErr64to15xxPkt_offset 0x54
+#define GRxFcsErr15xxtoJumboPkt_offset 0x58
+#define GRxFcsErrGtJumboPkt_offset 0x5C
+#define GTxGoodBadOct_offset 0x80
+#define GTxGoodOct_offset 0x88
+#define GTxSglColPkt_offset 0x90
+#define GTxMultColPkt_offset 0x94
+#define GTxExColPkt_offset 0x98
+#define GTxDefPkt_offset 0x9C
+#define GTxLateCol_offset 0xA0
+#define GTxExDefPkt_offset 0xA4
+#define GTxPausePkt_offset 0xA8
+#define GTxBadPkt_offset 0xAC
+#define GTxUcastPkt_offset 0xB0
+#define GTxMcastPkt_offset 0xB4
+#define GTxBcastPkt_offset 0xB8
+#define GTxLt64Pkt_offset 0xBC
+#define GTx64Pkt_offset 0xC0
+#define GTx65to127Pkt_offset 0xC4
+#define GTx128to255Pkt_offset 0xC8
+#define GTx256to511Pkt_offset 0xCC
+#define GTx512to1023Pkt_offset 0xD0
+#define GTx1024to15xxPkt_offset 0xD4
+#define GTx15xxtoJumboPkt_offset 0xD8
+#define GTxGtJumboPkt_offset 0xDC
+#define GTxNonTcpUdpPkt_offset 0xE0
+#define GTxMacSrcErrPkt_offset 0xE4
+#define GTxIpSrcErrPkt_offset 0xE8
+#define GDmaDone_offset 0xEC
+
+#define XgRxOctets_offset 0x0
+#define XgRxOctets_WIDTH 48
+#define XgRxOctetsOK_offset 0x8
+#define XgRxOctetsOK_WIDTH 48
+#define XgRxPkts_offset 0x10
+#define XgRxPkts_WIDTH 32
+#define XgRxPktsOK_offset 0x14
+#define XgRxPktsOK_WIDTH 32
+#define XgRxBroadcastPkts_offset 0x18
+#define XgRxBroadcastPkts_WIDTH 32
+#define XgRxMulticastPkts_offset 0x1C
+#define XgRxMulticastPkts_WIDTH 32
+#define XgRxUnicastPkts_offset 0x20
+#define XgRxUnicastPkts_WIDTH 32
+#define XgRxUndersizePkts_offset 0x24
+#define XgRxUndersizePkts_WIDTH 32
+#define XgRxOversizePkts_offset 0x28
+#define XgRxOversizePkts_WIDTH 32
+#define XgRxJabberPkts_offset 0x2C
+#define XgRxJabberPkts_WIDTH 32
+#define XgRxUndersizeFCSerrorPkts_offset 0x30
+#define XgRxUndersizeFCSerrorPkts_WIDTH 32
+#define XgRxDropEvents_offset 0x34
+#define XgRxDropEvents_WIDTH 32
+#define XgRxFCSerrorPkts_offset 0x38
+#define XgRxFCSerrorPkts_WIDTH 32
+#define XgRxAlignError_offset 0x3C
+#define XgRxAlignError_WIDTH 32
+#define XgRxSymbolError_offset 0x40
+#define XgRxSymbolError_WIDTH 32
+#define XgRxInternalMACError_offset 0x44
+#define XgRxInternalMACError_WIDTH 32
+#define XgRxControlPkts_offset 0x48
+#define XgRxControlPkts_WIDTH 32
+#define XgRxPausePkts_offset 0x4C
+#define XgRxPausePkts_WIDTH 32
+#define XgRxPkts64Octets_offset 0x50
+#define XgRxPkts64Octets_WIDTH 32
+#define XgRxPkts65to127Octets_offset 0x54
+#define XgRxPkts65to127Octets_WIDTH 32
+#define XgRxPkts128to255Octets_offset 0x58
+#define XgRxPkts128to255Octets_WIDTH 32
+#define XgRxPkts256to511Octets_offset 0x5C
+#define XgRxPkts256to511Octets_WIDTH 32
+#define XgRxPkts512to1023Octets_offset 0x60
+#define XgRxPkts512to1023Octets_WIDTH 32
+#define XgRxPkts1024to15xxOctets_offset 0x64
+#define XgRxPkts1024to15xxOctets_WIDTH 32
+#define XgRxPkts15xxtoMaxOctets_offset 0x68
+#define XgRxPkts15xxtoMaxOctets_WIDTH 32
+#define XgRxLengthError_offset 0x6C
+#define XgRxLengthError_WIDTH 32
+#define XgTxPkts_offset 0x80
+#define XgTxPkts_WIDTH 32
+#define XgTxOctets_offset 0x88
+#define XgTxOctets_WIDTH 48
+#define XgTxMulticastPkts_offset 0x90
+#define XgTxMulticastPkts_WIDTH 32
+#define XgTxBroadcastPkts_offset 0x94
+#define XgTxBroadcastPkts_WIDTH 32
+#define XgTxUnicastPkts_offset 0x98
+#define XgTxUnicastPkts_WIDTH 32
+#define XgTxControlPkts_offset 0x9C
+#define XgTxControlPkts_WIDTH 32
+#define XgTxPausePkts_offset 0xA0
+#define XgTxPausePkts_WIDTH 32
+#define XgTxPkts64Octets_offset 0xA4
+#define XgTxPkts64Octets_WIDTH 32
+#define XgTxPkts65to127Octets_offset 0xA8
+#define XgTxPkts65to127Octets_WIDTH 32
+#define XgTxPkts128to255Octets_offset 0xAC
+#define XgTxPkts128to255Octets_WIDTH 32
+#define XgTxPkts256to511Octets_offset 0xB0
+#define XgTxPkts256to511Octets_WIDTH 32
+#define XgTxPkts512to1023Octets_offset 0xB4
+#define XgTxPkts512to1023Octets_WIDTH 32
+#define XgTxPkts1024to15xxOctets_offset 0xB8
+#define XgTxPkts1024to15xxOctets_WIDTH 32
+#define XgTxPkts1519toMaxOctets_offset 0xBC
+#define XgTxPkts1519toMaxOctets_WIDTH 32
+#define XgTxUndersizePkts_offset 0xC0
+#define XgTxUndersizePkts_WIDTH 32
+#define XgTxOversizePkts_offset 0xC4
+#define XgTxOversizePkts_WIDTH 32
+#define XgTxNonTcpUdpPkt_offset 0xC8
+#define XgTxNonTcpUdpPkt_WIDTH 16
+#define XgTxMacSrcErrPkt_offset 0xCC
+#define XgTxMacSrcErrPkt_WIDTH 16
+#define XgTxIpSrcErrPkt_offset 0xD0
+#define XgTxIpSrcErrPkt_WIDTH 16
+#define XgDmaDone_offset 0xD4
+
+#define FALCON_STATS_NOT_DONE 0x00000000
+#define FALCON_STATS_DONE 0xffffffff
+
+/* Interrupt status register bits */
+#define FATAL_INT_LBN 64
+#define FATAL_INT_WIDTH 1
+#define INT_EVQS_LBN 40
+#define INT_EVQS_WIDTH 4
+
+/**************************************************************************
+ *
+ * Falcon non-volatile configuration
+ *
+ **************************************************************************
+ */
+
+/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
+struct falcon_nvconfig_board_v2 {
+       __le16 nports;
+       u8 port0_phy_addr;
+       u8 port0_phy_type;
+       u8 port1_phy_addr;
+       u8 port1_phy_type;
+       __le16 asic_sub_revision;
+       __le16 board_revision;
+} __attribute__ ((packed));
+
+#define NVCONFIG_BASE 0x300
+#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
+struct falcon_nvconfig {
+       efx_oword_t ee_vpd_cfg_reg;                     /* 0x300 */
+       u8 mac_address[2][8];                   /* 0x310 */
+       efx_oword_t pcie_sd_ctl0123_reg;                /* 0x320 */
+       efx_oword_t pcie_sd_ctl45_reg;                  /* 0x330 */
+       efx_oword_t pcie_pcs_ctl_stat_reg;              /* 0x340 */
+       efx_oword_t hw_init_reg;                        /* 0x350 */
+       efx_oword_t nic_stat_reg;                       /* 0x360 */
+       efx_oword_t glb_ctl_reg;                        /* 0x370 */
+       efx_oword_t srm_cfg_reg;                        /* 0x380 */
+       efx_oword_t spare_reg;                          /* 0x390 */
+       __le16 board_magic_num;                 /* 0x3A0 */
+       __le16 board_struct_ver;
+       __le16 board_checksum;
+       struct falcon_nvconfig_board_v2 board_v2;
+} __attribute__ ((packed));
+
+#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
new file mode 100644 (file)
index 0000000..ea08184
--- /dev/null
@@ -0,0 +1,243 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_FALCON_IO_H
+#define EFX_FALCON_IO_H
+
+#include <linux/io.h>
+#include <linux/spinlock.h>
+#include "net_driver.h"
+
+/**************************************************************************
+ *
+ * Falcon hardware access
+ *
+ **************************************************************************
+ *
+ * Notes on locking strategy:
+ *
+ * Most Falcon registers require 16-byte (or 8-byte, for SRAM
+ * registers) atomic writes which necessitates locking.
+ * Under normal operation few writes to the Falcon BAR are made and these
+ * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
+ * cased to allow 4-byte (hence lockless) accesses.
+ *
+ * It *is* safe to write to these 4-byte registers in the middle of an
+ * access to an 8-byte or 16-byte register.  We therefore use a
+ * spinlock to protect accesses to the larger registers, but no locks
+ * for the 4-byte registers.
+ *
+ * A write barrier is needed to ensure that DW3 is written after DW0/1/2
+ * due to the way the 16byte registers are "collected" in the Falcon BIU
+ *
+ * We also lock when carrying out reads, to ensure consistency of the
+ * data (made possible since the BIU reads all 128 bits into a cache).
+ * Reads are very rare, so this isn't a significant performance
+ * impact.  (Most data transferred from NIC to host is DMAed directly
+ * into host memory).
+ *
+ * I/O BAR access uses locks for both reads and writes (but is only provided
+ * for testing purposes).
+ */
+
+/* Special buffer descriptors (Falcon SRAM) */
+#define BUF_TBL_KER_A1 0x18000
+#define BUF_TBL_KER_B0 0x800000
+
+
+#if BITS_PER_LONG == 64
+#define FALCON_USE_QWORD_IO 1
+#endif
+
+#define _falcon_writeq(efx, value, reg) \
+       __raw_writeq((__force u64) (value), (efx)->membase + (reg))
+#define _falcon_writel(efx, value, reg) \
+       __raw_writel((__force u32) (value), (efx)->membase + (reg))
+#define _falcon_readq(efx, reg) \
+       ((__force __le64) __raw_readq((efx)->membase + (reg)))
+#define _falcon_readl(efx, reg) \
+       ((__force __le32) __raw_readl((efx)->membase + (reg)))
+
+/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
+static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
+                               unsigned int reg)
+{
+       unsigned long flags;
+
+       EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
+                   EFX_OWORD_VAL(*value));
+
+       spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef FALCON_USE_QWORD_IO
+       _falcon_writeq(efx, value->u64[0], reg + 0);
+       wmb();
+       _falcon_writeq(efx, value->u64[1], reg + 8);
+#else
+       _falcon_writel(efx, value->u32[0], reg + 0);
+       _falcon_writel(efx, value->u32[1], reg + 4);
+       _falcon_writel(efx, value->u32[2], reg + 8);
+       wmb();
+       _falcon_writel(efx, value->u32[3], reg + 12);
+#endif
+       mmiowb();
+       spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
+static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
+                                    unsigned int index)
+{
+       unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
+       unsigned long flags;
+
+       EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
+                   reg, EFX_QWORD_VAL(*value));
+
+       spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef FALCON_USE_QWORD_IO
+       _falcon_writeq(efx, value->u64[0], reg + 0);
+#else
+       _falcon_writel(efx, value->u32[0], reg + 0);
+       wmb();
+       _falcon_writel(efx, value->u32[1], reg + 4);
+#endif
+       mmiowb();
+       spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+/* Write dword to Falcon register that allows partial writes
+ *
+ * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
+ * TX_DESC_UPD_REG) can be written to as a single dword.  This allows
+ * for lockless writes.
+ */
+static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
+                                unsigned int reg)
+{
+       EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
+                   reg, EFX_DWORD_VAL(*value));
+
+       /* No lock required */
+       _falcon_writel(efx, value->u32[0], reg);
+}
+
+/* Read from a Falcon register
+ *
+ * This reads an entire 16-byte Falcon register in one go, locking as
+ * appropriate.  It is essential to read the first dword first, as this
+ * prompts Falcon to load the current value into the shadow register.
+ */
+static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
+                              unsigned int reg)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&efx->biu_lock, flags);
+       value->u32[0] = _falcon_readl(efx, reg + 0);
+       rmb();
+       value->u32[1] = _falcon_readl(efx, reg + 4);
+       value->u32[2] = _falcon_readl(efx, reg + 8);
+       value->u32[3] = _falcon_readl(efx, reg + 12);
+       spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+       EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
+                   EFX_OWORD_VAL(*value));
+}
+
+/* This reads an 8-byte Falcon SRAM entry in one go. */
+static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
+                                   unsigned int index)
+{
+       unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
+       unsigned long flags;
+
+       spin_lock_irqsave(&efx->biu_lock, flags);
+#ifdef FALCON_USE_QWORD_IO
+       value->u64[0] = _falcon_readq(efx, reg + 0);
+#else
+       value->u32[0] = _falcon_readl(efx, reg + 0);
+       rmb();
+       value->u32[1] = _falcon_readl(efx, reg + 4);
+#endif
+       spin_unlock_irqrestore(&efx->biu_lock, flags);
+
+       EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
+                   reg, EFX_QWORD_VAL(*value));
+}
+
+/* Read dword from Falcon register that allows partial writes (sic) */
+static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
+                               unsigned int reg)
+{
+       value->u32[0] = _falcon_readl(efx, reg);
+       EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
+                   reg, EFX_DWORD_VAL(*value));
+}
+
+/* Write to a register forming part of a table */
+static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
+                                     unsigned int reg, unsigned int index)
+{
+       falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Read to a register forming part of a table */
+static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
+                                    unsigned int reg, unsigned int index)
+{
+       falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Write to a dword register forming part of a table */
+static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
+                                      unsigned int reg, unsigned int index)
+{
+       falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
+}
+
+/* Page-mapped register block size */
+#define FALCON_PAGE_BLOCK_SIZE 0x2000
+
+/* Calculate offset to page-mapped register block */
+#define FALCON_PAGED_REG(page, reg) \
+       ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
+
+/* As for falcon_write(), but for a page-mapped register. */
+static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
+                                    unsigned int reg, unsigned int page)
+{
+       falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
+}
+
+/* As for falcon_writel(), but for a page-mapped register. */
+static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
+                                     unsigned int reg, unsigned int page)
+{
+       falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
+}
+
+/* Write dword to Falcon page-mapped register with an extra lock.
+ *
+ * As for falcon_writel_page(), but for a register that suffers from
+ * SFC bug 3181. Take out a lock so the BIU collector cannot be
+ * confused. */
+static inline void falcon_writel_page_locked(struct efx_nic *efx,
+                                            efx_dword_t *value,
+                                            unsigned int reg,
+                                            unsigned int page)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&efx->biu_lock, flags);
+       falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
+       spin_unlock_irqrestore(&efx->biu_lock, flags);
+}
+
+#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
new file mode 100644 (file)
index 0000000..aa7521b
--- /dev/null
@@ -0,0 +1,585 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "efx.h"
+#include "falcon.h"
+#include "falcon_hwdefs.h"
+#include "falcon_io.h"
+#include "mac.h"
+#include "gmii.h"
+#include "mdio_10g.h"
+#include "phy.h"
+#include "boards.h"
+#include "workarounds.h"
+
+/**************************************************************************
+ *
+ * MAC register access
+ *
+ **************************************************************************/
+
+/* Offset of an XMAC register within Falcon */
+#define FALCON_XMAC_REG(mac_reg)                                       \
+       (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE))
+
+void falcon_xmac_writel(struct efx_nic *efx,
+                       efx_dword_t *value, unsigned int mac_reg)
+{
+       efx_oword_t temp;
+
+       EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA));
+       falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg));
+}
+
+void falcon_xmac_readl(struct efx_nic *efx,
+                      efx_dword_t *value, unsigned int mac_reg)
+{
+       efx_oword_t temp;
+
+       falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg));
+       EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA));
+}
+
+/**************************************************************************
+ *
+ * MAC operations
+ *
+ *************************************************************************/
+static int falcon_reset_xmac(struct efx_nic *efx)
+{
+       efx_dword_t reg;
+       int count;
+
+       EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1);
+       falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
+
+       for (count = 0; count < 10000; count++) {       /* wait upto 100ms */
+               falcon_xmac_readl(efx, &reg, XM_GLB_CFG_REG_MAC);
+               if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0)
+                       return 0;
+               udelay(10);
+       }
+
+       EFX_ERR(efx, "timed out waiting for XMAC core reset\n");
+       return -ETIMEDOUT;
+}
+
+/* Configure the XAUI driver that is an output from Falcon */
+static void falcon_setup_xaui(struct efx_nic *efx)
+{
+       efx_dword_t sdctl, txdrv;
+
+       /* Move the XAUI into low power, unless there is no PHY, in
+        * which case the XAUI will have to drive a cable. */
+       if (efx->phy_type == PHY_TYPE_NONE)
+               return;
+
+       falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC);
+       EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT);
+       EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT);
+       EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT);
+       EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT);
+       EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT);
+       EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT);
+       EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT);
+       EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT);
+       falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC);
+
+       EFX_POPULATE_DWORD_8(txdrv,
+                            XX_DEQD, XX_TXDRV_DEQ_DEFAULT,
+                            XX_DEQC, XX_TXDRV_DEQ_DEFAULT,
+                            XX_DEQB, XX_TXDRV_DEQ_DEFAULT,
+                            XX_DEQA, XX_TXDRV_DEQ_DEFAULT,
+                            XX_DTXD, XX_TXDRV_DTX_DEFAULT,
+                            XX_DTXC, XX_TXDRV_DTX_DEFAULT,
+                            XX_DTXB, XX_TXDRV_DTX_DEFAULT,
+                            XX_DTXA, XX_TXDRV_DTX_DEFAULT);
+       falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC);
+}
+
+static void falcon_hold_xaui_in_rst(struct efx_nic *efx)
+{
+       efx_dword_t reg;
+
+       EFX_ZERO_DWORD(reg);
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1);
+       EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+}
+
+static int _falcon_reset_xaui_a(struct efx_nic *efx)
+{
+       efx_dword_t reg;
+
+       falcon_hold_xaui_in_rst(efx);
+       falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
+
+       /* Follow the RAMBUS XAUI data reset sequencing
+        * Channels A and B first: power down, reset PLL, reset, clear
+        */
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0);
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0);
+       EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       /* Channels C and D: power down, reset PLL, reset, clear */
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0);
+       EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0);
+       EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       /* Setup XAUI */
+       falcon_setup_xaui(efx);
+       udelay(10);
+
+       /* Take XGXS out of reset */
+       EFX_ZERO_DWORD(reg);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       return 0;
+}
+
+static int _falcon_reset_xaui_b(struct efx_nic *efx)
+{
+       efx_dword_t reg;
+       int count;
+
+       EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+
+       /* Give some time for the link to establish */
+       for (count = 0; count < 1000; count++) { /* wait upto 10ms */
+               falcon_xmac_readl(efx, &reg, XX_PWR_RST_REG_MAC);
+               if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) {
+                       falcon_setup_xaui(efx);
+                       return 0;
+               }
+               udelay(10);
+       }
+       EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n");
+       return -ETIMEDOUT;
+}
+
+int falcon_reset_xaui(struct efx_nic *efx)
+{
+       int rc;
+
+       if (EFX_WORKAROUND_9388(efx)) {
+               falcon_hold_xaui_in_rst(efx);
+               efx->phy_op->reset_xaui(efx);
+               rc = _falcon_reset_xaui_a(efx);
+       } else {
+               rc = _falcon_reset_xaui_b(efx);
+       }
+       return rc;
+}
+
+static int falcon_xgmii_status(struct efx_nic *efx)
+{
+       efx_dword_t reg;
+
+       if (FALCON_REV(efx) < FALCON_REV_B0)
+               return 1;
+
+       /* The ISR latches, so clear it and re-read */
+       falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
+       falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
+       
+       if (EFX_DWORD_FIELD(reg, XM_LCLFLT) ||
+           EFX_DWORD_FIELD(reg, XM_RMTFLT)) {
+               EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg));
+               return 0;
+       }
+
+       return 1;
+}
+
+static void falcon_mask_status_intr(struct efx_nic *efx, int enable)
+{
+       efx_dword_t reg;
+
+       if (FALCON_REV(efx) < FALCON_REV_B0)
+               return;
+
+       /* Flush the ISR */
+       if (enable)
+               falcon_xmac_readl(efx, &reg, XM_MGT_INT_REG_MAC_B0);
+
+       EFX_POPULATE_DWORD_2(reg,
+                            XM_MSK_RMTFLT, !enable,
+                            XM_MSK_LCLFLT, !enable);
+       falcon_xmac_writel(efx, &reg, XM_MGT_INT_MSK_REG_MAC_B0);
+}
+
+int falcon_init_xmac(struct efx_nic *efx)
+{
+       int rc;
+
+       /* Initialize the PHY first so the clock is around */
+       rc = efx->phy_op->init(efx);
+       if (rc)
+               goto fail1;
+
+       rc = falcon_reset_xaui(efx);
+       if (rc)
+               goto fail2;
+
+       /* Wait again. Give the PHY and MAC time to come back */
+       schedule_timeout_uninterruptible(HZ / 10);
+
+       rc = falcon_reset_xmac(efx);
+       if (rc)
+               goto fail2;
+
+       falcon_mask_status_intr(efx, 1);
+       return 0;
+
+ fail2:
+       efx->phy_op->fini(efx);
+ fail1:
+       return rc;
+}
+
+int falcon_xaui_link_ok(struct efx_nic *efx)
+{
+       efx_dword_t reg;
+       int align_done, sync_status, link_ok = 0;
+
+       /* Read link status */
+       falcon_xmac_readl(efx, &reg, XX_CORE_STAT_REG_MAC);
+
+       align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE);
+       sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT);
+       if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED))
+               link_ok = 1;
+
+       /* Clear link status ready for next read */
+       EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET);
+       EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET);
+       EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET);
+       falcon_xmac_writel(efx, &reg, XX_CORE_STAT_REG_MAC);
+
+       /* If the link is up, then check the phy side of the xaui link
+        * (error conditions from the wire side propoagate back through
+        * the phy to the xaui side). */
+       if (efx->link_up && link_ok) {
+               int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS);
+               if (has_phyxs)
+                       link_ok = mdio_clause45_phyxgxs_lane_sync(efx);
+       }
+
+       /* If the PHY and XAUI links are up, then check the mac's xgmii
+        * fault state */
+       if (efx->link_up && link_ok)
+               link_ok = falcon_xgmii_status(efx);
+
+       return link_ok;
+}
+
+static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
+{
+       unsigned int max_frame_len;
+       efx_dword_t reg;
+       int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0;
+
+       /* Configure MAC  - cut-thru mode is hard wired on */
+       EFX_POPULATE_DWORD_3(reg,
+                            XM_RX_JUMBO_MODE, 1,
+                            XM_TX_STAT_EN, 1,
+                            XM_RX_STAT_EN, 1);
+       falcon_xmac_writel(efx, &reg, XM_GLB_CFG_REG_MAC);
+
+       /* Configure TX */
+       EFX_POPULATE_DWORD_6(reg,
+                            XM_TXEN, 1,
+                            XM_TX_PRMBL, 1,
+                            XM_AUTO_PAD, 1,
+                            XM_TXCRC, 1,
+                            XM_FCNTL, 1,
+                            XM_IPG, 0x3);
+       falcon_xmac_writel(efx, &reg, XM_TX_CFG_REG_MAC);
+
+       /* Configure RX */
+       EFX_POPULATE_DWORD_5(reg,
+                            XM_RXEN, 1,
+                            XM_AUTO_DEPAD, 0,
+                            XM_ACPT_ALL_MCAST, 1,
+                            XM_ACPT_ALL_UCAST, efx->promiscuous,
+                            XM_PASS_CRC_ERR, 1);
+       falcon_xmac_writel(efx, &reg, XM_RX_CFG_REG_MAC);
+
+       /* Set frame length */
+       max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
+       EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len);
+       falcon_xmac_writel(efx, &reg, XM_RX_PARAM_REG_MAC);
+       EFX_POPULATE_DWORD_2(reg,
+                            XM_MAX_TX_FRM_SIZE, max_frame_len,
+                            XM_TX_JUMBO_MODE, 1);
+       falcon_xmac_writel(efx, &reg, XM_TX_PARAM_REG_MAC);
+
+       EFX_POPULATE_DWORD_2(reg,
+                            XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
+                            XM_DIS_FCNTL, rx_fc ? 0 : 1);
+       falcon_xmac_writel(efx, &reg, XM_FC_REG_MAC);
+
+       /* Set MAC address */
+       EFX_POPULATE_DWORD_4(reg,
+                            XM_ADR_0, efx->net_dev->dev_addr[0],
+                            XM_ADR_1, efx->net_dev->dev_addr[1],
+                            XM_ADR_2, efx->net_dev->dev_addr[2],
+                            XM_ADR_3, efx->net_dev->dev_addr[3]);
+       falcon_xmac_writel(efx, &reg, XM_ADR_LO_REG_MAC);
+       EFX_POPULATE_DWORD_2(reg,
+                            XM_ADR_4, efx->net_dev->dev_addr[4],
+                            XM_ADR_5, efx->net_dev->dev_addr[5]);
+       falcon_xmac_writel(efx, &reg, XM_ADR_HI_REG_MAC);
+}
+
+/* Try and bring the Falcon side of the Falcon-Phy XAUI link fails
+ * to come back up. Bash it until it comes back up */
+static int falcon_check_xaui_link_up(struct efx_nic *efx)
+{
+       int max_tries, tries;
+       tries = EFX_WORKAROUND_5147(efx) ? 5 : 1;
+       max_tries = tries;
+
+       if (efx->phy_type == PHY_TYPE_NONE)
+               return 0;
+
+       while (tries) {
+               if (falcon_xaui_link_ok(efx))
+                       return 1;
+
+               EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n",
+                       __func__, tries);
+               (void) falcon_reset_xaui(efx);
+               udelay(200);
+               tries--;
+       }
+
+       EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n",
+               max_tries);
+       return 0;
+}
+
+void falcon_reconfigure_xmac(struct efx_nic *efx)
+{
+       int xaui_link_ok;
+
+       falcon_mask_status_intr(efx, 0);
+
+       falcon_deconfigure_mac_wrapper(efx);
+       efx->phy_op->reconfigure(efx);
+       falcon_reconfigure_xmac_core(efx);
+       falcon_reconfigure_mac_wrapper(efx);
+
+       /* Ensure XAUI link is up */
+       xaui_link_ok = falcon_check_xaui_link_up(efx);
+
+       if (xaui_link_ok && efx->link_up)
+               falcon_mask_status_intr(efx, 1);
+}
+
+void falcon_fini_xmac(struct efx_nic *efx)
+{
+       /* Isolate the MAC - PHY */
+       falcon_deconfigure_mac_wrapper(efx);
+
+       /* Potentially power down the PHY */
+       efx->phy_op->fini(efx);
+}
+
+void falcon_update_stats_xmac(struct efx_nic *efx)
+{
+       struct efx_mac_stats *mac_stats = &efx->mac_stats;
+       int rc;
+
+       rc = falcon_dma_stats(efx, XgDmaDone_offset);
+       if (rc)
+               return;
+
+       /* Update MAC stats from DMAed values */
+       FALCON_STAT(efx, XgRxOctets, rx_bytes);
+       FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes);
+       FALCON_STAT(efx, XgRxPkts, rx_packets);
+       FALCON_STAT(efx, XgRxPktsOK, rx_good);
+       FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast);
+       FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast);
+       FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast);
+       FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64);
+       FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo);
+       FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo);
+       FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64);
+       FALCON_STAT(efx, XgRxDropEvents, rx_overflow);
+       FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad);
+       FALCON_STAT(efx, XgRxAlignError, rx_align_error);
+       FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error);
+       FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error);
+       FALCON_STAT(efx, XgRxControlPkts, rx_control);
+       FALCON_STAT(efx, XgRxPausePkts, rx_pause);
+       FALCON_STAT(efx, XgRxPkts64Octets, rx_64);
+       FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127);
+       FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255);
+       FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511);
+       FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023);
+       FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx);
+       FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo);
+       FALCON_STAT(efx, XgRxLengthError, rx_length_error);
+       FALCON_STAT(efx, XgTxPkts, tx_packets);
+       FALCON_STAT(efx, XgTxOctets, tx_bytes);
+       FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast);
+       FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast);
+       FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast);
+       FALCON_STAT(efx, XgTxControlPkts, tx_control);
+       FALCON_STAT(efx, XgTxPausePkts, tx_pause);
+       FALCON_STAT(efx, XgTxPkts64Octets, tx_64);
+       FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127);
+       FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255);
+       FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511);
+       FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023);
+       FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx);
+       FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo);
+       FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64);
+       FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo);
+       FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp);
+       FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error);
+       FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error);
+
+       /* Update derived statistics */
+       mac_stats->tx_good_bytes =
+               (mac_stats->tx_bytes - mac_stats->tx_bad_bytes);
+       mac_stats->rx_bad_bytes =
+               (mac_stats->rx_bytes - mac_stats->rx_good_bytes);
+}
+
+#define EFX_XAUI_RETRAIN_MAX 8
+
+int falcon_check_xmac(struct efx_nic *efx)
+{
+       unsigned xaui_link_ok;
+       int rc;
+
+       falcon_mask_status_intr(efx, 0);
+       xaui_link_ok = falcon_xaui_link_ok(efx);
+
+       if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok)
+               (void) falcon_reset_xaui(efx);
+
+       /* Call the PHY check_hw routine */
+       rc = efx->phy_op->check_hw(efx);
+
+       /* Unmask interrupt if everything was (and still is) ok */
+       if (xaui_link_ok && efx->link_up)
+               falcon_mask_status_intr(efx, 1);
+
+       return rc;
+}
+
+/* Simulate a PHY event */
+void falcon_xmac_sim_phy_event(struct efx_nic *efx)
+{
+       efx_qword_t phy_event;
+
+       EFX_POPULATE_QWORD_2(phy_event,
+                            EV_CODE, GLOBAL_EV_DECODE,
+                            XG_PHY_INTR, 1);
+       falcon_generate_event(&efx->channel[0], &phy_event);
+}
+
+int falcon_xmac_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+       mdio_clause45_get_settings(efx, ecmd);
+       ecmd->transceiver = XCVR_INTERNAL;
+       ecmd->phy_address = efx->mii.phy_id;
+       ecmd->autoneg = AUTONEG_DISABLE;
+       ecmd->duplex = DUPLEX_FULL;
+       return 0;
+}
+
+int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
+{
+       if (ecmd->transceiver != XCVR_INTERNAL)
+               return -EINVAL;
+       if (ecmd->autoneg != AUTONEG_DISABLE)
+               return -EINVAL;
+       if (ecmd->duplex != DUPLEX_FULL)
+               return -EINVAL;
+
+       return mdio_clause45_set_settings(efx, ecmd);
+}
+
+
+int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control)
+{
+       int reset;
+
+       if (flow_control & EFX_FC_AUTO) {
+               EFX_LOG(efx, "10G does not support flow control "
+                       "autonegotiation\n");
+               return -EINVAL;
+       }
+
+       if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX))
+               return -EINVAL;
+
+       /* TX flow control may automatically turn itself off if the
+        * link partner (intermittently) stops responding to pause
+        * frames. There isn't any indication that this has happened,
+        * so the best we do is leave it up to the user to spot this
+        * and fix it be cycling transmit flow control on this end. */
+       reset = ((flow_control & EFX_FC_TX) &&
+                !(efx->flow_control & EFX_FC_TX));
+       if (EFX_WORKAROUND_11482(efx) && reset) {
+               if (FALCON_REV(efx) >= FALCON_REV_B0) {
+                       /* Recover by resetting the EM block */
+                       if (efx->link_up)
+                               falcon_drain_tx_fifo(efx);
+               } else {
+                       /* Schedule a reset to recover */
+                       efx_schedule_reset(efx, RESET_TYPE_INVISIBLE);
+               }
+       }
+
+       efx->flow_control = flow_control;
+
+       return 0;
+}
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
new file mode 100644 (file)
index 0000000..d25bbd1
--- /dev/null
@@ -0,0 +1,195 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_GMII_H
+#define EFX_GMII_H
+
+/*
+ * GMII interface
+ */
+
+#include <linux/mii.h>
+
+/* GMII registers, excluding registers already defined as MII
+ * registers in mii.h
+ */
+#define GMII_IER               0x12    /* Interrupt enable register */
+#define GMII_ISR               0x13    /* Interrupt status register */
+
+/* Interrupt enable register */
+#define IER_ANEG_ERR           0x8000  /* Bit 15 - autonegotiation error */
+#define IER_SPEED_CHG          0x4000  /* Bit 14 - speed changed */
+#define IER_DUPLEX_CHG         0x2000  /* Bit 13 - duplex changed */
+#define IER_PAGE_RCVD          0x1000  /* Bit 12 - page received */
+#define IER_ANEG_DONE          0x0800  /* Bit 11 - autonegotiation complete */
+#define IER_LINK_CHG           0x0400  /* Bit 10 - link status changed */
+#define IER_SYM_ERR            0x0200  /* Bit 9 - symbol error */
+#define IER_FALSE_CARRIER      0x0100  /* Bit 8 - false carrier */
+#define IER_FIFO_ERR           0x0080  /* Bit 7 - FIFO over/underflow */
+#define IER_MDIX_CHG           0x0040  /* Bit 6 - MDI crossover changed */
+#define IER_DOWNSHIFT          0x0020  /* Bit 5 - downshift */
+#define IER_ENERGY             0x0010  /* Bit 4 - energy detect */
+#define IER_DTE_POWER          0x0004  /* Bit 2 - DTE power detect */
+#define IER_POLARITY_CHG       0x0002  /* Bit 1 - polarity changed */
+#define IER_JABBER             0x0001  /* Bit 0 - jabber */
+
+/* Interrupt status register */
+#define ISR_ANEG_ERR           0x8000  /* Bit 15 - autonegotiation error */
+#define ISR_SPEED_CHG          0x4000  /* Bit 14 - speed changed */
+#define ISR_DUPLEX_CHG         0x2000  /* Bit 13 - duplex changed */
+#define ISR_PAGE_RCVD          0x1000  /* Bit 12 - page received */
+#define ISR_ANEG_DONE          0x0800  /* Bit 11 - autonegotiation complete */
+#define ISR_LINK_CHG           0x0400  /* Bit 10 - link status changed */
+#define ISR_SYM_ERR            0x0200  /* Bit 9 - symbol error */
+#define ISR_FALSE_CARRIER      0x0100  /* Bit 8 - false carrier */
+#define ISR_FIFO_ERR           0x0080  /* Bit 7 - FIFO over/underflow */
+#define ISR_MDIX_CHG           0x0040  /* Bit 6 - MDI crossover changed */
+#define ISR_DOWNSHIFT          0x0020  /* Bit 5 - downshift */
+#define ISR_ENERGY             0x0010  /* Bit 4 - energy detect */
+#define ISR_DTE_POWER          0x0004  /* Bit 2 - DTE power detect */
+#define ISR_POLARITY_CHG       0x0002  /* Bit 1 - polarity changed */
+#define ISR_JABBER             0x0001  /* Bit 0 - jabber */
+
+/* Logically extended advertisement register */
+#define GM_ADVERTISE_SLCT              ADVERTISE_SLCT
+#define GM_ADVERTISE_CSMA              ADVERTISE_CSMA
+#define GM_ADVERTISE_10HALF            ADVERTISE_10HALF
+#define GM_ADVERTISE_1000XFULL         ADVERTISE_1000XFULL
+#define GM_ADVERTISE_10FULL            ADVERTISE_10FULL
+#define GM_ADVERTISE_1000XHALF         ADVERTISE_1000XHALF
+#define GM_ADVERTISE_100HALF           ADVERTISE_100HALF
+#define GM_ADVERTISE_1000XPAUSE                ADVERTISE_1000XPAUSE
+#define GM_ADVERTISE_100FULL           ADVERTISE_100FULL
+#define GM_ADVERTISE_1000XPSE_ASYM     ADVERTISE_1000XPSE_ASYM
+#define GM_ADVERTISE_100BASE4          ADVERTISE_100BASE4
+#define GM_ADVERTISE_PAUSE_CAP         ADVERTISE_PAUSE_CAP
+#define GM_ADVERTISE_PAUSE_ASYM                ADVERTISE_PAUSE_ASYM
+#define GM_ADVERTISE_RESV              ADVERTISE_RESV
+#define GM_ADVERTISE_RFAULT            ADVERTISE_RFAULT
+#define GM_ADVERTISE_LPACK             ADVERTISE_LPACK
+#define GM_ADVERTISE_NPAGE             ADVERTISE_NPAGE
+#define GM_ADVERTISE_1000FULL          (ADVERTISE_1000FULL << 8)
+#define GM_ADVERTISE_1000HALF          (ADVERTISE_1000HALF << 8)
+#define GM_ADVERTISE_1000              (GM_ADVERTISE_1000FULL | \
+                                        GM_ADVERTISE_1000HALF)
+#define GM_ADVERTISE_FULL              (GM_ADVERTISE_1000FULL | \
+                                        ADVERTISE_FULL)
+#define GM_ADVERTISE_ALL               (GM_ADVERTISE_1000FULL | \
+                                        GM_ADVERTISE_1000HALF | \
+                                        ADVERTISE_ALL)
+
+/* Logically extended link partner ability register */
+#define GM_LPA_SLCT                    LPA_SLCT
+#define GM_LPA_10HALF                  LPA_10HALF
+#define GM_LPA_1000XFULL               LPA_1000XFULL
+#define GM_LPA_10FULL                  LPA_10FULL
+#define GM_LPA_1000XHALF               LPA_1000XHALF
+#define GM_LPA_100HALF                 LPA_100HALF
+#define GM_LPA_1000XPAUSE              LPA_1000XPAUSE
+#define GM_LPA_100FULL                 LPA_100FULL
+#define GM_LPA_1000XPAUSE_ASYM         LPA_1000XPAUSE_ASYM
+#define GM_LPA_100BASE4                        LPA_100BASE4
+#define GM_LPA_PAUSE_CAP               LPA_PAUSE_CAP
+#define GM_LPA_PAUSE_ASYM              LPA_PAUSE_ASYM
+#define GM_LPA_RESV                    LPA_RESV
+#define GM_LPA_RFAULT                  LPA_RFAULT
+#define GM_LPA_LPACK                   LPA_LPACK
+#define GM_LPA_NPAGE                   LPA_NPAGE
+#define GM_LPA_1000FULL                        (LPA_1000FULL << 6)
+#define GM_LPA_1000HALF                        (LPA_1000HALF << 6)
+#define GM_LPA_10000FULL               0x00040000
+#define GM_LPA_10000HALF               0x00080000
+#define GM_LPA_DUPLEX                  (GM_LPA_1000FULL | GM_LPA_10000FULL \
+                                        | LPA_DUPLEX)
+#define GM_LPA_10                      (LPA_10FULL | LPA_10HALF)
+#define GM_LPA_100                     LPA_100
+#define GM_LPA_1000                    (GM_LPA_1000FULL | GM_LPA_1000HALF)
+#define GM_LPA_10000                   (GM_LPA_10000FULL | GM_LPA_10000HALF)
+
+/* Retrieve GMII autonegotiation advertised abilities
+ *
+ * The MII advertisment register (MII_ADVERTISE) is logically extended
+ * to include advertisement bits ADVERTISE_1000FULL and
+ * ADVERTISE_1000HALF from MII_CTRL1000.  The result can be tested
+ * against the GM_ADVERTISE_xxx constants.
+ */
+static inline unsigned int gmii_advertised(struct mii_if_info *gmii)
+{
+       unsigned int advertise;
+       unsigned int ctrl1000;
+
+       advertise = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE);
+       ctrl1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000);
+       return (((ctrl1000 << 8) & GM_ADVERTISE_1000) | advertise);
+}
+
+/* Retrieve GMII autonegotiation link partner abilities
+ *
+ * The MII link partner ability register (MII_LPA) is logically
+ * extended by adding bits LPA_1000HALF and LPA_1000FULL from
+ * MII_STAT1000.  The result can be tested against the GM_LPA_xxx
+ * constants.
+ */
+static inline unsigned int gmii_lpa(struct mii_if_info *gmii)
+{
+       unsigned int lpa;
+       unsigned int stat1000;
+
+       lpa = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_LPA);
+       stat1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_STAT1000);
+       return (((stat1000 << 6) & GM_LPA_1000) | lpa);
+}
+
+/* Calculate GMII autonegotiated link technology
+ *
+ * "negotiated" should be the result of gmii_advertised() logically
+ * ANDed with the result of gmii_lpa().
+ *
+ * "tech" will be negotiated with the unused bits masked out.  For
+ * example, if both ends of the link are capable of both
+ * GM_LPA_1000FULL and GM_LPA_100FULL, GM_LPA_100FULL will be masked
+ * out.
+ */
+static inline unsigned int gmii_nway_result(unsigned int negotiated)
+{
+       unsigned int other_bits;
+
+       /* Mask out the speed and duplexity bits */
+       other_bits = negotiated & ~(GM_LPA_10 | GM_LPA_100 | GM_LPA_1000);
+
+       if (negotiated & GM_LPA_1000FULL)
+               return (other_bits | GM_LPA_1000FULL);
+       else if (negotiated & GM_LPA_1000HALF)
+               return (other_bits | GM_LPA_1000HALF);
+       else
+               return (other_bits | mii_nway_result(negotiated));
+}
+
+/* Calculate GMII non-autonegotiated link technology
+ *
+ * This provides an equivalent to gmii_nway_result for the case when
+ * autonegotiation is disabled.
+ */
+static inline unsigned int gmii_forced_result(unsigned int bmcr)
+{
+       unsigned int result;
+       int full_duplex;
+
+       full_duplex = bmcr & BMCR_FULLDPLX;
+       if (bmcr & BMCR_SPEED1000)
+               result = full_duplex ? GM_LPA_1000FULL : GM_LPA_1000HALF;
+       else if (bmcr & BMCR_SPEED100)
+               result = full_duplex ? GM_LPA_100FULL : GM_LPA_100HALF;
+       else
+               result = full_duplex ? GM_LPA_10FULL : GM_LPA_10HALF;
+       return result;
+}
+
+#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/i2c-direct.c b/drivers/net/sfc/i2c-direct.c
new file mode 100644 (file)
index 0000000..b6c62d0
--- /dev/null
@@ -0,0 +1,381 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "i2c-direct.h"
+
+/*
+ * I2C data (SDA) and clock (SCL) line read/writes with appropriate
+ * delays.
+ */
+
+static inline void setsda(struct efx_i2c_interface *i2c, int state)
+{
+       udelay(i2c->op->udelay);
+       i2c->sda = state;
+       i2c->op->setsda(i2c);
+       udelay(i2c->op->udelay);
+}
+
+static inline void setscl(struct efx_i2c_interface *i2c, int state)
+{
+       udelay(i2c->op->udelay);
+       i2c->scl = state;
+       i2c->op->setscl(i2c);
+       udelay(i2c->op->udelay);
+}
+
+static inline int getsda(struct efx_i2c_interface *i2c)
+{
+       int sda;
+
+       udelay(i2c->op->udelay);
+       sda = i2c->op->getsda(i2c);
+       udelay(i2c->op->udelay);
+       return sda;
+}
+
+static inline int getscl(struct efx_i2c_interface *i2c)
+{
+       int scl;
+
+       udelay(i2c->op->udelay);
+       scl = i2c->op->getscl(i2c);
+       udelay(i2c->op->udelay);
+       return scl;
+}
+
+/*
+ * I2C low-level protocol operations
+ *
+ */
+
+static inline void i2c_release(struct efx_i2c_interface *i2c)
+{
+       EFX_WARN_ON_PARANOID(!i2c->scl);
+       EFX_WARN_ON_PARANOID(!i2c->sda);
+       /* Devices may time out if operations do not end */
+       setscl(i2c, 1);
+       setsda(i2c, 1);
+       EFX_BUG_ON_PARANOID(getsda(i2c) != 1);
+       EFX_BUG_ON_PARANOID(getscl(i2c) != 1);
+}
+
+static inline void i2c_start(struct efx_i2c_interface *i2c)
+{
+       /* We may be restarting immediately after a {send,recv}_bit,
+        * so SCL will not necessarily already be high.
+        */
+       EFX_WARN_ON_PARANOID(!i2c->sda);
+       setscl(i2c, 1);
+       setsda(i2c, 0);
+       setscl(i2c, 0);
+       setsda(i2c, 1);
+}
+
+static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit)
+{
+       EFX_WARN_ON_PARANOID(i2c->scl != 0);
+       setsda(i2c, bit);
+       setscl(i2c, 1);
+       setscl(i2c, 0);
+       setsda(i2c, 1);
+}
+
+static inline int i2c_recv_bit(struct efx_i2c_interface *i2c)
+{
+       int bit;
+
+       EFX_WARN_ON_PARANOID(i2c->scl != 0);
+       EFX_WARN_ON_PARANOID(!i2c->sda);
+       setscl(i2c, 1);
+       bit = getsda(i2c);
+       setscl(i2c, 0);
+       return bit;
+}
+
+static inline void i2c_stop(struct efx_i2c_interface *i2c)
+{
+       EFX_WARN_ON_PARANOID(i2c->scl != 0);
+       setsda(i2c, 0);
+       setscl(i2c, 1);
+       setsda(i2c, 1);
+}
+
+/*
+ * I2C mid-level protocol operations
+ *
+ */
+
+/* Sends a byte via the I2C bus and checks for an acknowledgement from
+ * the slave device.
+ */
+static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte)
+{
+       int i;
+
+       /* Send byte */
+       for (i = 0; i < 8; i++) {
+               i2c_send_bit(i2c, !!(byte & 0x80));
+               byte <<= 1;
+       }
+
+       /* Check for acknowledgement from slave */
+       return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO);
+}
+
+/* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */
+static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack)
+{
+       u8 value = 0;
+       int i;
+
+       /* Receive byte */
+       for (i = 0; i < 8; i++)
+               value = (value << 1) | i2c_recv_bit(i2c);
+
+       /* Send ACK/NACK */
+       i2c_send_bit(i2c, (ack ? 0 : 1));
+
+       return value;
+}
+
+/* Calculate command byte for a read operation */
+static inline u8 i2c_read_cmd(u8 device_id)
+{
+       return ((device_id << 1) | 1);
+}
+
+/* Calculate command byte for a write operation */
+static inline u8 i2c_write_cmd(u8 device_id)
+{
+       return ((device_id << 1) | 0);
+}
+
+int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id)
+{
+       int rc;
+
+       /* If someone is driving the bus low we just give up. */
+       if (getsda(i2c) == 0 || getscl(i2c) == 0) {
+               EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low."
+                       " Giving up.\n", __func__);
+               return -EFAULT;
+       }
+
+       /* Pretend to initiate a device write */
+       i2c_start(i2c);
+       rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
+       if (rc)
+               goto out;
+
+ out:
+       i2c_stop(i2c);
+       i2c_release(i2c);
+
+       return rc;
+}
+
+/* This performs a fast read of one or more consecutive bytes from an
+ * I2C device.  Not all devices support consecutive reads of more than
+ * one byte; for these devices use efx_i2c_read() instead.
+ */
+int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
+                     u8 device_id, u8 offset, u8 *data, unsigned int len)
+{
+       int i;
+       int rc;
+
+       EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
+       EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
+       EFX_WARN_ON_PARANOID(data == NULL);
+       EFX_WARN_ON_PARANOID(len < 1);
+
+       /* Select device and starting offset */
+       i2c_start(i2c);
+       rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
+       if (rc)
+               goto out;
+       rc = i2c_send_byte(i2c, offset);
+       if (rc)
+               goto out;
+
+       /* Read data from device */
+       i2c_start(i2c);
+       rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
+       if (rc)
+               goto out;
+       for (i = 0; i < (len - 1); i++)
+               /* Read and acknowledge all but the last byte */
+               data[i] = i2c_recv_byte(i2c, 1);
+       /* Read last byte with no acknowledgement */
+       data[i] = i2c_recv_byte(i2c, 0);
+
+ out:
+       i2c_stop(i2c);
+       i2c_release(i2c);
+
+       return rc;
+}
+
+/* This performs a fast write of one or more consecutive bytes to an
+ * I2C device.  Not all devices support consecutive writes of more
+ * than one byte; for these devices use efx_i2c_write() instead.
+ */
+int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
+                      u8 device_id, u8 offset,
+                      const u8 *data, unsigned int len)
+{
+       int i;
+       int rc;
+
+       EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
+       EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
+       EFX_WARN_ON_PARANOID(len < 1);
+
+       /* Select device and starting offset */
+       i2c_start(i2c);
+       rc = i2c_send_byte(i2c, i2c_write_cmd(device_id));
+       if (rc)
+               goto out;
+       rc = i2c_send_byte(i2c, offset);
+       if (rc)
+               goto out;
+
+       /* Write data to device */
+       for (i = 0; i < len; i++) {
+               rc = i2c_send_byte(i2c, data[i]);
+               if (rc)
+                       goto out;
+       }
+
+ out:
+       i2c_stop(i2c);
+       i2c_release(i2c);
+
+       return rc;
+}
+
+/* I2C byte-by-byte read */
+int efx_i2c_read(struct efx_i2c_interface *i2c,
+                u8 device_id, u8 offset, u8 *data, unsigned int len)
+{
+       int rc;
+
+       /* i2c_fast_read with length 1 is a single byte read */
+       for (; len > 0; offset++, data++, len--) {
+               rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+/* I2C byte-by-byte write */
+int efx_i2c_write(struct efx_i2c_interface *i2c,
+                 u8 device_id, u8 offset, const u8 *data, unsigned int len)
+{
+       int rc;
+
+       /* i2c_fast_write with length 1 is a single byte write */
+       for (; len > 0; offset++, data++, len--) {
+               rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1);
+               if (rc)
+                       return rc;
+               mdelay(i2c->op->mdelay);
+       }
+
+       return 0;
+}
+
+
+/* This is just a slightly neater wrapper round efx_i2c_fast_write
+ * in the case where the target doesn't take an offset
+ */
+int efx_i2c_send_bytes(struct efx_i2c_interface *i2c,
+                      u8 device_id, const u8 *data, unsigned int len)
+{
+       return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1);
+}
+
+/* I2C receiving of bytes - does not send an offset byte */
+int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
+                      u8 *bytes, unsigned int len)
+{
+       int i;
+       int rc;
+
+       EFX_WARN_ON_PARANOID(getsda(i2c) != 1);
+       EFX_WARN_ON_PARANOID(getscl(i2c) != 1);
+       EFX_WARN_ON_PARANOID(len < 1);
+
+       /* Select device */
+       i2c_start(i2c);
+
+       /* Read data from device */
+       rc = i2c_send_byte(i2c, i2c_read_cmd(device_id));
+       if (rc)
+               goto out;
+
+       for (i = 0; i < (len - 1); i++)
+               /* Read and acknowledge all but the last byte */
+               bytes[i] = i2c_recv_byte(i2c, 1);
+       /* Read last byte with no acknowledgement */
+       bytes[i] = i2c_recv_byte(i2c, 0);
+
+ out:
+       i2c_stop(i2c);
+       i2c_release(i2c);
+
+       return rc;
+}
+
+/* SMBus and some I2C devices will time out if the I2C clock is
+ * held low for too long. This is most likely to happen in virtualised
+ * systems (when the entire domain is descheduled) but could in
+ * principle happen due to preemption on any busy system (and given the
+ * potential length of an I2C operation turning preemption off is not
+ * a sensible option). The following functions deal with the failure by
+ * retrying up to a fixed number of times.
+  */
+
+#define I2C_MAX_RETRIES        (10)
+
+/* The timeout problem will result in -EIO. If the wrapped function
+ * returns any other error, pass this up and do not retry. */
+#define RETRY_WRAPPER(_f) \
+       int retries = I2C_MAX_RETRIES; \
+       int rc; \
+       while (retries) { \
+               rc = _f; \
+               if (rc != -EIO) \
+                       return rc; \
+               retries--; \
+       } \
+       return rc; \
+
+int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id)
+{
+       RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id))
+}
+
+int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
+                u8 device_id, u8 offset, u8 *data, unsigned int len)
+{
+       RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len))
+}
+
+int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
+                 u8 device_id, u8 offset, const u8 *data, unsigned int len)
+{
+       RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len))
+}
diff --git a/drivers/net/sfc/i2c-direct.h b/drivers/net/sfc/i2c-direct.h
new file mode 100644 (file)
index 0000000..291e561
--- /dev/null
@@ -0,0 +1,91 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005 Fen Systems Ltd.
+ * Copyright 2006 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_I2C_DIRECT_H
+#define EFX_I2C_DIRECT_H
+
+#include "net_driver.h"
+
+/*
+ * Direct control of an I2C bus
+ */
+
+struct efx_i2c_interface;
+
+/**
+ * struct efx_i2c_bit_operations - I2C bus direct control methods
+ *
+ * I2C bus direct control methods.
+ *
+ * @setsda: Set state of SDA line
+ * @setscl: Set state of SCL line
+ * @getsda: Get state of SDA line
+ * @getscl: Get state of SCL line
+ * @udelay: Delay between each bit operation
+ * @mdelay: Delay between each byte write
+ */
+struct efx_i2c_bit_operations {
+       void (*setsda) (struct efx_i2c_interface *i2c);
+       void (*setscl) (struct efx_i2c_interface *i2c);
+       int (*getsda) (struct efx_i2c_interface *i2c);
+       int (*getscl) (struct efx_i2c_interface *i2c);
+       unsigned int udelay;
+       unsigned int mdelay;
+};
+
+/**
+ * struct efx_i2c_interface - an I2C interface
+ *
+ * An I2C interface.
+ *
+ * @efx: Attached Efx NIC
+ * @op: I2C bus control methods
+ * @sda: Current output state of SDA line
+ * @scl: Current output state of SCL line
+ */
+struct efx_i2c_interface {
+       struct efx_nic *efx;
+       struct efx_i2c_bit_operations *op;
+       unsigned int sda:1;
+       unsigned int scl:1;
+};
+
+extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id);
+extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c,
+                            u8 device_id, u8 offset,
+                            u8 *data, unsigned int len);
+extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c,
+                             u8 device_id, u8 offset,
+                             const u8 *data, unsigned int len);
+extern int efx_i2c_read(struct efx_i2c_interface *i2c,
+                       u8 device_id, u8 offset, u8 *data, unsigned int len);
+extern int efx_i2c_write(struct efx_i2c_interface *i2c,
+                        u8 device_id, u8 offset,
+                        const u8 *data, unsigned int len);
+
+extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id,
+                             const u8 *bytes, unsigned int len);
+
+extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id,
+                             u8 *bytes, unsigned int len);
+
+
+/* Versions of the API that retry on failure. */
+extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c,
+                                       u8 device_id);
+
+extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c,
+                       u8 device_id, u8 offset, u8 *data, unsigned int len);
+
+extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c,
+                        u8 device_id, u8 offset,
+                        const u8 *data, unsigned int len);
+
+#endif /* EFX_I2C_DIRECT_H */
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h
new file mode 100644 (file)
index 0000000..edd07d4
--- /dev/null
@@ -0,0 +1,33 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2006-2007 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MAC_H
+#define EFX_MAC_H
+
+#include "net_driver.h"
+
+extern void falcon_xmac_writel(struct efx_nic *efx,
+                              efx_dword_t *value, unsigned int mac_reg);
+extern void falcon_xmac_readl(struct efx_nic *efx,
+                             efx_dword_t *value, unsigned int mac_reg);
+extern int falcon_init_xmac(struct efx_nic *efx);
+extern void falcon_reconfigure_xmac(struct efx_nic *efx);
+extern void falcon_update_stats_xmac(struct efx_nic *efx);
+extern void falcon_fini_xmac(struct efx_nic *efx);
+extern int falcon_check_xmac(struct efx_nic *efx);
+extern void falcon_xmac_sim_phy_event(struct efx_nic *efx);
+extern int falcon_xmac_get_settings(struct efx_nic *efx,
+                                   struct ethtool_cmd *ecmd);
+extern int falcon_xmac_set_settings(struct efx_nic *efx,
+                                   struct ethtool_cmd *ecmd);
+extern int falcon_xmac_set_pause(struct efx_nic *efx,
+                                enum efx_fc_type pause_params);
+
+#endif
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
new file mode 100644 (file)
index 0000000..dc06bb0
--- /dev/null
@@ -0,0 +1,282 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+/*
+ * Useful functions for working with MDIO clause 45 PHYs
+ */
+#include <linux/types.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include "net_driver.h"
+#include "mdio_10g.h"
+#include "boards.h"
+
+int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd,
+                           int spins, int spintime)
+{
+       u32 ctrl;
+       int phy_id = port->mii.phy_id;
+
+       /* Catch callers passing values in the wrong units (or just silly) */
+       EFX_BUG_ON_PARANOID(spins * spintime >= 5000);
+
+       mdio_clause45_write(port, phy_id, mmd, MDIO_MMDREG_CTRL1,
+                           (1 << MDIO_MMDREG_CTRL1_RESET_LBN));
+       /* Wait for the reset bit to clear. */
+       do {
+               msleep(spintime);
+               ctrl = mdio_clause45_read(port, phy_id, mmd, MDIO_MMDREG_CTRL1);
+               spins--;
+
+       } while (spins && (ctrl & (1 << MDIO_MMDREG_CTRL1_RESET_LBN)));
+
+       return spins ? spins : -ETIMEDOUT;
+}
+
+static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd,
+                                  int fault_fatal)
+{
+       int status;
+       int phy_id = efx->mii.phy_id;
+
+       /* Read MMD STATUS2 to check it is responding. */
+       status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2);
+       if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) &
+            ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) !=
+           MDIO_MMDREG_STAT2_PRESENT_VAL) {
+               EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd);
+               return -EIO;
+       }
+
+       /* Read MMD STATUS 1 to check for fault. */
+       status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT1);
+       if ((status & (1 << MDIO_MMDREG_STAT1_FAULT_LBN)) != 0) {
+               if (fault_fatal) {
+                       EFX_ERR(efx, "PHY MMD %d reporting fatal"
+                               " fault: status %x\n", mmd, status);
+                       return -EIO;
+               } else {
+                       EFX_LOG(efx, "PHY MMD %d reporting status"
+                               " %x (expected)\n", mmd, status);
+               }
+       }
+       return 0;
+}
+
+/* This ought to be ridiculous overkill. We expect it to fail rarely */
+#define MDIO45_RESET_TIME      1000 /* ms */
+#define MDIO45_RESET_ITERS     100
+
+int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
+                                 unsigned int mmd_mask)
+{
+       const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS;
+       int tries = MDIO45_RESET_ITERS;
+       int rc = 0;
+       int in_reset;
+
+       while (tries) {
+               int mask = mmd_mask;
+               int mmd = 0;
+               int stat;
+               in_reset = 0;
+               while (mask) {
+                       if (mask & 1) {
+                               stat = mdio_clause45_read(efx,
+                                                         efx->mii.phy_id,
+                                                         mmd,
+                                                         MDIO_MMDREG_CTRL1);
+                               if (stat < 0) {
+                                       EFX_ERR(efx, "failed to read status of"
+                                               " MMD %d\n", mmd);
+                                       return -EIO;
+                               }
+                               if (stat & (1 << MDIO_MMDREG_CTRL1_RESET_LBN))
+                                       in_reset |= (1 << mmd);
+                       }
+                       mask = mask >> 1;
+                       mmd++;
+               }
+               if (!in_reset)
+                       break;
+               tries--;
+               msleep(spintime);
+       }
+       if (in_reset != 0) {
+               EFX_ERR(efx, "not all MMDs came out of reset in time."
+                       " MMDs still in reset: %x\n", in_reset);
+               rc = -ETIMEDOUT;
+       }
+       return rc;
+}
+
+int mdio_clause45_check_mmds(struct efx_nic *efx,
+                            unsigned int mmd_mask, unsigned int fatal_mask)
+{
+       int devices, mmd = 0;
+       int probe_mmd;
+
+       /* Historically we have probed the PHYXS to find out what devices are
+        * present,but that doesn't work so well if the PHYXS isn't expected
+        * to exist, if so just find the first item in the list supplied. */
+       probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS0_PHYXS) ? MDIO_MMD_PHYXS :
+           __ffs(mmd_mask);
+       devices = mdio_clause45_read(efx, efx->mii.phy_id,
+                                    probe_mmd, MDIO_MMDREG_DEVS0);
+
+       /* Check all the expected MMDs are present */
+       if (devices < 0) {
+               EFX_ERR(efx, "failed to read devices present\n");
+               return -EIO;
+       }
+       if ((devices & mmd_mask) != mmd_mask) {
+               EFX_ERR(efx, "required MMDs not present: got %x, "
+                       "wanted %x\n", devices, mmd_mask);
+               return -ENODEV;
+       }
+       EFX_TRACE(efx, "Devices present: %x\n", devices);
+
+       /* Check all required MMDs are responding and happy. */
+       while (mmd_mask) {
+               if (mmd_mask & 1) {
+                       int fault_fatal = fatal_mask & 1;
+                       if (mdio_clause45_check_mmd(efx, mmd, fault_fatal))
+                               return -EIO;
+               }
+               mmd_mask = mmd_mask >> 1;
+               fatal_mask = fatal_mask >> 1;
+               mmd++;
+       }
+
+       return 0;
+}
+
+int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask)
+{
+       int phy_id = efx->mii.phy_id;
+       int status;
+       int ok = 1;
+       int mmd = 0;
+       int good;
+
+       while (mmd_mask) {
+               if (mmd_mask & 1) {
+                       /* Double reads because link state is latched, and a
+                        * read moves the current state into the register */
+                       status = mdio_clause45_read(efx, phy_id,
+                                                   mmd, MDIO_MMDREG_STAT1);
+                       status = mdio_clause45_read(efx, phy_id,
+                                                   mmd, MDIO_MMDREG_STAT1);
+
+                       good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN);
+                       ok = ok && good;
+               }
+               mmd_mask = (mmd_mask >> 1);
+               mmd++;
+       }
+       return ok;
+}
+
+/**
+ * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO.
+ * @efx:               Efx NIC
+ * @ecmd:              Buffer for settings
+ *
+ * On return the 'port', 'speed', 'supported' and 'advertising' fields of
+ * ecmd have been filled out based on the PMA type.
+ */
+void mdio_clause45_get_settings(struct efx_nic *efx,
+                               struct ethtool_cmd *ecmd)
+{
+       int pma_type;
+
+       /* If no PMA is present we are presumably talking something XAUI-ish
+        * like CX4. Which we report as FIBRE (see below) */
+       if ((efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)) == 0) {
+               ecmd->speed = SPEED_10000;
+               ecmd->port = PORT_FIBRE;
+               ecmd->supported = SUPPORTED_FIBRE;
+               ecmd->advertising = ADVERTISED_FIBRE;
+               return;
+       }
+
+       pma_type = mdio_clause45_read(efx, efx->mii.phy_id,
+                                     MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL2);
+       pma_type &= MDIO_PMAPMD_CTRL2_TYPE_MASK;
+
+       switch (pma_type) {
+               /* We represent CX4 as fibre in the absence of anything
+                  better. */
+       case MDIO_PMAPMD_CTRL2_10G_CX4:
+               ecmd->speed = SPEED_10000;
+               ecmd->port = PORT_FIBRE;
+               ecmd->supported = SUPPORTED_FIBRE;
+               ecmd->advertising = ADVERTISED_FIBRE;
+               break;
+               /* 10G Base-T */
+       case MDIO_PMAPMD_CTRL2_10G_BT:
+               ecmd->speed = SPEED_10000;
+               ecmd->port = PORT_TP;
+               ecmd->supported = SUPPORTED_TP | SUPPORTED_10000baseT_Full;
+               ecmd->advertising = (ADVERTISED_FIBRE
+                                    | ADVERTISED_10000baseT_Full);
+               break;
+       case MDIO_PMAPMD_CTRL2_1G_BT:
+               ecmd->speed = SPEED_1000;
+               ecmd->port = PORT_TP;
+               ecmd->supported = SUPPORTED_TP | SUPPORTED_1000baseT_Full;
+               ecmd->advertising = (ADVERTISED_FIBRE
+                                    | ADVERTISED_1000baseT_Full);
+               break;
+       case MDIO_PMAPMD_CTRL2_100_BT:
+               ecmd->speed = SPEED_100;
+               ecmd->port = PORT_TP;
+               ecmd->supported = SUPPORTED_TP | SUPPORTED_100baseT_Full;
+               ecmd->advertising = (ADVERTISED_FIBRE
+                                    | ADVERTISED_100baseT_Full);
+               break;
+       case MDIO_PMAPMD_CTRL2_10_BT:
+               ecmd->speed = SPEED_10;
+               ecmd->port = PORT_TP;
+               ecmd->supported = SUPPORTED_TP | SUPPORTED_10baseT_Full;
+               ecmd->advertising = ADVERTISED_FIBRE | ADVERTISED_10baseT_Full;
+               break;
+       /* All the other defined modes are flavours of
+        * 10G optical */
+       default:
+               ecmd->speed = SPEED_10000;
+               ecmd->port = PORT_FIBRE;
+               ecmd->supported = SUPPORTED_FIBRE;
+               ecmd->advertising = ADVERTISED_FIBRE;
+               break;
+       }
+}
+
+/**
+ * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO.
+ * @efx:               Efx NIC
+ * @ecmd:              New settings
+ *
+ * Currently this just enforces that we are _not_ changing the
+ * 'port', 'speed', 'supported' or 'advertising' settings as these
+ * cannot be changed on any currently supported PHY.
+ */
+int mdio_clause45_set_settings(struct efx_nic *efx,
+                              struct ethtool_cmd *ecmd)
+{
+       struct ethtool_cmd tmpcmd;
+       mdio_clause45_get_settings(efx, &tmpcmd);
+       /* None of the current PHYs support more than one mode
+        * of operation (and only 10GBT ever will), so keep things
+        * simple for now */
+       if ((ecmd->speed == tmpcmd.speed) && (ecmd->port == tmpcmd.port) &&
+           (ecmd->supported == tmpcmd.supported) &&
+           (ecmd->advertising == tmpcmd.advertising))
+               return 0;
+       return -EOPNOTSUPP;
+}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
new file mode 100644 (file)
index 0000000..2214b6d
--- /dev/null
@@ -0,0 +1,232 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_MDIO_10G_H
+#define EFX_MDIO_10G_H
+
+/*
+ * Definitions needed for doing 10G MDIO as specified in clause 45
+ * MDIO, which do not appear in Linux yet. Also some helper functions.
+ */
+
+#include "efx.h"
+#include "boards.h"
+
+/* Numbering of the MDIO Manageable Devices (MMDs) */
+/* Physical Medium Attachment/ Physical Medium Dependent sublayer */
+#define MDIO_MMD_PMAPMD        (1)
+/* WAN Interface Sublayer */
+#define MDIO_MMD_WIS   (2)
+/* Physical Coding Sublayer */
+#define MDIO_MMD_PCS   (3)
+/* PHY Extender Sublayer */
+#define MDIO_MMD_PHYXS (4)
+/* Extender Sublayer */
+#define MDIO_MMD_DTEXS (5)
+/* Transmission convergence */
+#define MDIO_MMD_TC    (6)
+/* Auto negotiation */
+#define MDIO_MMD_AN    (7)
+
+/* Generic register locations */
+#define MDIO_MMDREG_CTRL1      (0)
+#define MDIO_MMDREG_STAT1      (1)
+#define MDIO_MMDREG_IDHI       (2)
+#define MDIO_MMDREG_IDLOW      (3)
+#define MDIO_MMDREG_SPEED      (4)
+#define MDIO_MMDREG_DEVS0      (5)
+#define MDIO_MMDREG_DEVS1      (6)
+#define MDIO_MMDREG_CTRL2      (7)
+#define MDIO_MMDREG_STAT2      (8)
+
+/* Bits in MMDREG_CTRL1 */
+/* Reset */
+#define MDIO_MMDREG_CTRL1_RESET_LBN    (15)
+#define MDIO_MMDREG_CTRL1_RESET_WIDTH  (1)
+
+/* Bits in MMDREG_STAT1 */
+#define MDIO_MMDREG_STAT1_FAULT_LBN    (7)
+#define MDIO_MMDREG_STAT1_FAULT_WIDTH  (1)
+/* Link state */
+#define MDIO_MMDREG_STAT1_LINK_LBN     (2)
+#define MDIO_MMDREG_STAT1_LINK_WIDTH   (1)
+
+/* Bits in ID reg */
+#define MDIO_ID_REV(_id32)     (_id32 & 0xf)
+#define MDIO_ID_MODEL(_id32)   ((_id32 >> 4) & 0x3f)
+#define MDIO_ID_OUI(_id32)     (_id32 >> 10)
+
+/* Bits in MMDREG_DEVS0. Someone thoughtfully layed things out
+ * so the 'bit present' bit number of an MMD is the number of
+ * that MMD */
+#define DEV_PRESENT_BIT(_b) (1 << _b)
+
+#define MDIO_MMDREG_DEVS0_PHYXS         DEV_PRESENT_BIT(MDIO_MMD_PHYXS)
+#define MDIO_MMDREG_DEVS0_PCS   DEV_PRESENT_BIT(MDIO_MMD_PCS)
+#define MDIO_MMDREG_DEVS0_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)
+
+/* Bits in MMDREG_STAT2 */
+#define MDIO_MMDREG_STAT2_PRESENT_VAL  (2)
+#define MDIO_MMDREG_STAT2_PRESENT_LBN  (14)
+#define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2)
+
+/* PMA type (4 bits) */
+#define MDIO_PMAPMD_CTRL2_10G_CX4      (0x0)
+#define MDIO_PMAPMD_CTRL2_10G_EW       (0x1)
+#define MDIO_PMAPMD_CTRL2_10G_LW       (0x2)
+#define MDIO_PMAPMD_CTRL2_10G_SW       (0x3)
+#define MDIO_PMAPMD_CTRL2_10G_LX4      (0x4)
+#define MDIO_PMAPMD_CTRL2_10G_ER       (0x5)
+#define MDIO_PMAPMD_CTRL2_10G_LR       (0x6)
+#define MDIO_PMAPMD_CTRL2_10G_SR       (0x7)
+/* Reserved */
+#define MDIO_PMAPMD_CTRL2_10G_BT       (0x9)
+/* Reserved */
+/* Reserved */
+#define MDIO_PMAPMD_CTRL2_1G_BT                (0xc)
+/* Reserved */
+#define MDIO_PMAPMD_CTRL2_100_BT       (0xe)
+#define MDIO_PMAPMD_CTRL2_10_BT                (0xf)
+#define MDIO_PMAPMD_CTRL2_TYPE_MASK    (0xf)
+
+/* /\* PHY XGXS lane state *\/ */
+#define MDIO_PHYXS_LANE_STATE          (0x18)
+#define MDIO_PHYXS_LANE_ALIGNED_LBN    (12)
+
+/* AN registers */
+#define MDIO_AN_STATUS                 (1)
+#define MDIO_AN_STATUS_XNP_LBN         (7)
+#define MDIO_AN_STATUS_PAGE_LBN                (6)
+#define MDIO_AN_STATUS_AN_DONE_LBN     (5)
+#define MDIO_AN_STATUS_LP_AN_CAP_LBN   (0)
+
+#define MDIO_AN_10GBT_STATUS           (33)
+#define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */
+#define MDIO_AN_10GBT_STATUS_MS_LBN     (14) /* MASTER/SLAVE config */
+#define MDIO_AN_10GBT_STATUS_LOC_OK_LBN (13) /* Local OK */
+#define MDIO_AN_10GBT_STATUS_REM_OK_LBN (12) /* Remote OK */
+#define MDIO_AN_10GBT_STATUS_LP_10G_LBN (11) /* Link partner is 10GBT capable */
+#define MDIO_AN_10GBT_STATUS_LP_LTA_LBN (10) /* LP loop timing ability */
+#define MDIO_AN_10GBT_STATUS_LP_TRR_LBN (9)  /* LP Training Reset Request */
+
+
+/* Packing of the prt and dev arguments of clause 45 style MDIO into a
+ * single int so they can be passed into the mdio_read/write functions
+ * that currently exist. Note that as Falcon is the only current user,
+ * the packed form is chosen to match what Falcon needs to write into
+ * a register. This is checked at compile-time so do not change it. If
+ * your target chip needs things layed out differently you will need
+ * to unpack the arguments in your chip-specific mdio functions.
+ */
+ /* These are defined by the standard. */
+#define MDIO45_PRT_ID_WIDTH  (5)
+#define MDIO45_DEV_ID_WIDTH  (5)
+
+/* The prt ID is just packed in immediately to the left of the dev ID */
+#define MDIO45_PRT_DEV_WIDTH (MDIO45_PRT_ID_WIDTH + MDIO45_DEV_ID_WIDTH)
+
+#define MDIO45_PRT_ID_MASK   ((1 << MDIO45_PRT_DEV_WIDTH) - 1)
+/* This is the prt + dev extended by 1 bit to hold the 'is clause 45' flag. */
+#define MDIO45_XPRT_ID_WIDTH   (MDIO45_PRT_DEV_WIDTH + 1)
+#define MDIO45_XPRT_ID_MASK   ((1 << MDIO45_XPRT_ID_WIDTH) - 1)
+#define MDIO45_XPRT_ID_IS10G   (1 << (MDIO45_XPRT_ID_WIDTH - 1))
+
+
+#define MDIO45_PRT_ID_COMP_LBN   MDIO45_DEV_ID_WIDTH
+#define MDIO45_PRT_ID_COMP_WIDTH  MDIO45_PRT_ID_WIDTH
+#define MDIO45_DEV_ID_COMP_LBN    0
+#define MDIO45_DEV_ID_COMP_WIDTH  MDIO45_DEV_ID_WIDTH
+
+/* Compose port and device into a phy_id */
+static inline int mdio_clause45_pack(u8 prt, u8 dev)
+{
+       efx_dword_t phy_id;
+       EFX_POPULATE_DWORD_2(phy_id, MDIO45_PRT_ID_COMP, prt,
+                            MDIO45_DEV_ID_COMP, dev);
+       return MDIO45_XPRT_ID_IS10G | EFX_DWORD_VAL(phy_id);
+}
+
+static inline void mdio_clause45_unpack(u32 val, u8 *prt, u8 *dev)
+{
+       efx_dword_t phy_id;
+       EFX_POPULATE_DWORD_1(phy_id, EFX_DWORD_0, val);
+       *prt = EFX_DWORD_FIELD(phy_id, MDIO45_PRT_ID_COMP);
+       *dev = EFX_DWORD_FIELD(phy_id, MDIO45_DEV_ID_COMP);
+}
+
+static inline int mdio_clause45_read(struct efx_nic *efx,
+                                    u8 prt, u8 dev, u16 addr)
+{
+       return efx->mii.mdio_read(efx->net_dev,
+                                 mdio_clause45_pack(prt, dev), addr);
+}
+
+static inline void mdio_clause45_write(struct efx_nic *efx,
+                                      u8 prt, u8 dev, u16 addr, int value)
+{
+       efx->mii.mdio_write(efx->net_dev,
+                           mdio_clause45_pack(prt, dev), addr, value);
+}
+
+
+static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd)
+{
+       int phy_id = efx->mii.phy_id;
+       u16 id_low = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDLOW);
+       u16 id_hi = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDHI);
+       return (id_hi << 16) | (id_low);
+}
+
+static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx)
+{
+       int i, sync, lane_status;
+
+       for (i = 0; i < 2; ++i)
+               lane_status = mdio_clause45_read(efx, efx->mii.phy_id,
+                                                MDIO_MMD_PHYXS,
+                                                MDIO_PHYXS_LANE_STATE);
+
+       sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0;
+       if (!sync)
+               EFX_INFO(efx, "XGXS lane status: %x\n", lane_status);
+       return sync;
+}
+
+extern const char *mdio_clause45_mmd_name(int mmd);
+
+/*
+ * Reset a specific MMD and wait for reset to clear.
+ * Return number of spins left (>0) on success, -%ETIMEDOUT on failure.
+ *
+ * This function will sleep
+ */
+extern int mdio_clause45_reset_mmd(struct efx_nic *efx, int mmd,
+                                  int spins, int spintime);
+
+/* As mdio_clause45_check_mmd but for multiple MMDs */
+int mdio_clause45_check_mmds(struct efx_nic *efx,
+                            unsigned int mmd_mask, unsigned int fatal_mask);
+
+/* Check the link status of specified mmds in bit mask */
+extern int mdio_clause45_links_ok(struct efx_nic *efx,
+                                 unsigned int mmd_mask);
+
+/* Read (some of) the PHY settings over MDIO */
+extern void mdio_clause45_get_settings(struct efx_nic *efx,
+                                      struct ethtool_cmd *ecmd);
+
+/* Set (some of) the PHY settings over MDIO */
+extern int mdio_clause45_set_settings(struct efx_nic *efx,
+                                     struct ethtool_cmd *ecmd);
+
+/* Wait for specified MMDs to exit reset within a timeout */
+extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx,
+                                        unsigned int mmd_mask);
+
+#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
new file mode 100644 (file)
index 0000000..c505482
--- /dev/null
@@ -0,0 +1,883 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/* Common definitions for all Efx net driver code */
+
+#ifndef EFX_NET_DRIVER_H
+#define EFX_NET_DRIVER_H
+
+#include <linux/version.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/timer.h>
+#include <linux/mii.h>
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/highmem.h>
+#include <linux/workqueue.h>
+#include <linux/inet_lro.h>
+
+#include "enum.h"
+#include "bitfield.h"
+#include "i2c-direct.h"
+
+#define EFX_MAX_LRO_DESCRIPTORS 8
+#define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS
+
+/**************************************************************************
+ *
+ * Build definitions
+ *
+ **************************************************************************/
+#ifndef EFX_DRIVER_NAME
+#define EFX_DRIVER_NAME        "sfc"
+#endif
+#define EFX_DRIVER_VERSION     "2.2.0136"
+
+#ifdef EFX_ENABLE_DEBUG
+#define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
+#define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
+#else
+#define EFX_BUG_ON_PARANOID(x) do {} while (0)
+#define EFX_WARN_ON_PARANOID(x) do {} while (0)
+#endif
+
+#define NET_DEV_REGISTERED(efx)                                        \
+       ((efx)->net_dev->reg_state == NETREG_REGISTERED)
+
+/* Include net device name in log messages if it has been registered.
+ * Use efx->name not efx->net_dev->name so that races with (un)registration
+ * are harmless.
+ */
+#define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "")
+
+/* Un-rate-limited logging */
+#define EFX_ERR(efx, fmt, args...) \
+dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args)
+
+#define EFX_INFO(efx, fmt, args...) \
+dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args)
+
+#ifdef EFX_ENABLE_DEBUG
+#define EFX_LOG(efx, fmt, args...) \
+dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
+#else
+#define EFX_LOG(efx, fmt, args...) \
+dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args)
+#endif
+
+#define EFX_TRACE(efx, fmt, args...) do {} while (0)
+
+#define EFX_REGDUMP(efx, fmt, args...) do {} while (0)
+
+/* Rate-limited logging */
+#define EFX_ERR_RL(efx, fmt, args...) \
+do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0)
+
+#define EFX_INFO_RL(efx, fmt, args...) \
+do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0)
+
+#define EFX_LOG_RL(efx, fmt, args...) \
+do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
+
+/* Kernel headers may redefine inline anyway */
+#ifndef inline
+#define inline inline __attribute__ ((always_inline))
+#endif
+
+/**************************************************************************
+ *
+ * Efx data structures
+ *
+ **************************************************************************/
+
+#define EFX_MAX_CHANNELS 32
+#define EFX_MAX_TX_QUEUES 1
+#define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
+
+/**
+ * struct efx_special_buffer - An Efx special buffer
+ * @addr: CPU base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ * @index: Buffer index within controller;s buffer table
+ * @entries: Number of buffer table entries
+ *
+ * Special buffers are used for the event queues and the TX and RX
+ * descriptor queues for each channel.  They are *not* used for the
+ * actual transmit and receive buffers.
+ *
+ * Note that for Falcon, TX and RX descriptor queues live in host memory.
+ * Allocation and freeing procedures must take this into account.
+ */
+struct efx_special_buffer {
+       void *addr;
+       dma_addr_t dma_addr;
+       unsigned int len;
+       int index;
+       int entries;
+};
+
+/**
+ * struct efx_tx_buffer - An Efx TX buffer
+ * @skb: The associated socket buffer.
+ *     Set only on the final fragment of a packet; %NULL for all other
+ *     fragments.  When this fragment completes, then we can free this
+ *     skb.
+ * @dma_addr: DMA address of the fragment.
+ * @len: Length of this fragment.
+ *     This field is zero when the queue slot is empty.
+ * @continuation: True if this fragment is not the end of a packet.
+ * @unmap_single: True if pci_unmap_single should be used.
+ * @unmap_addr: DMA address to unmap
+ * @unmap_len: Length of this fragment to unmap
+ */
+struct efx_tx_buffer {
+       const struct sk_buff *skb;
+       dma_addr_t dma_addr;
+       unsigned short len;
+       unsigned char continuation;
+       unsigned char unmap_single;
+       dma_addr_t unmap_addr;
+       unsigned short unmap_len;
+};
+
+/**
+ * struct efx_tx_queue - An Efx TX queue
+ *
+ * This is a ring buffer of TX fragments.
+ * Since the TX completion path always executes on the same
+ * CPU and the xmit path can operate on different CPUs,
+ * performance is increased by ensuring that the completion
+ * path and the xmit path operate on different cache lines.
+ * This is particularly important if the xmit path is always
+ * executing on one CPU which is different from the completion
+ * path.  There is also a cache line for members which are
+ * read but not written on the fast path.
+ *
+ * @efx: The associated Efx NIC
+ * @queue: DMA queue number
+ * @used: Queue is used by net driver
+ * @channel: The associated channel
+ * @buffer: The software buffer ring
+ * @txd: The hardware descriptor ring
+ * @read_count: Current read pointer.
+ *     This is the number of buffers that have been removed from both rings.
+ * @stopped: Stopped flag.
+ *     Set if this TX queue is currently stopping its port.
+ * @insert_count: Current insert pointer
+ *     This is the number of buffers that have been added to the
+ *     software ring.
+ * @write_count: Current write pointer
+ *     This is the number of buffers that have been added to the
+ *     hardware ring.
+ * @old_read_count: The value of read_count when last checked.
+ *     This is here for performance reasons.  The xmit path will
+ *     only get the up-to-date value of read_count if this
+ *     variable indicates that the queue is full.  This is to
+ *     avoid cache-line ping-pong between the xmit path and the
+ *     completion path.
+ */
+struct efx_tx_queue {
+       /* Members which don't change on the fast path */
+       struct efx_nic *efx ____cacheline_aligned_in_smp;
+       int queue;
+       int used;
+       struct efx_channel *channel;
+       struct efx_nic *nic;
+       struct efx_tx_buffer *buffer;
+       struct efx_special_buffer txd;
+
+       /* Members used mainly on the completion path */
+       unsigned int read_count ____cacheline_aligned_in_smp;
+       int stopped;
+
+       /* Members used only on the xmit path */
+       unsigned int insert_count ____cacheline_aligned_in_smp;
+       unsigned int write_count;
+       unsigned int old_read_count;
+};
+
+/**
+ * struct efx_rx_buffer - An Efx RX data buffer
+ * @dma_addr: DMA base address of the buffer
+ * @skb: The associated socket buffer, if any.
+ *     If both this and page are %NULL, the buffer slot is currently free.
+ * @page: The associated page buffer, if any.
+ *     If both this and skb are %NULL, the buffer slot is currently free.
+ * @data: Pointer to ethernet header
+ * @len: Buffer length, in bytes.
+ * @unmap_addr: DMA address to unmap
+ */
+struct efx_rx_buffer {
+       dma_addr_t dma_addr;
+       struct sk_buff *skb;
+       struct page *page;
+       char *data;
+       unsigned int len;
+       dma_addr_t unmap_addr;
+};
+
+/**
+ * struct efx_rx_queue - An Efx RX queue
+ * @efx: The associated Efx NIC
+ * @queue: DMA queue number
+ * @used: Queue is used by net driver
+ * @channel: The associated channel
+ * @buffer: The software buffer ring
+ * @rxd: The hardware descriptor ring
+ * @added_count: Number of buffers added to the receive queue.
+ * @notified_count: Number of buffers given to NIC (<= @added_count).
+ * @removed_count: Number of buffers removed from the receive queue.
+ * @add_lock: Receive queue descriptor add spin lock.
+ *     This lock must be held in order to add buffers to the RX
+ *     descriptor ring (rxd and buffer) and to update added_count (but
+ *     not removed_count).
+ * @max_fill: RX descriptor maximum fill level (<= ring size)
+ * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
+ *     (<= @max_fill)
+ * @fast_fill_limit: The level to which a fast fill will fill
+ *     (@fast_fill_trigger <= @fast_fill_limit <= @max_fill)
+ * @min_fill: RX descriptor minimum non-zero fill level.
+ *     This records the minimum fill level observed when a ring
+ *     refill was triggered.
+ * @min_overfill: RX descriptor minimum overflow fill level.
+ *     This records the minimum fill level at which RX queue
+ *     overflow was observed.  It should never be set.
+ * @alloc_page_count: RX allocation strategy counter.
+ * @alloc_skb_count: RX allocation strategy counter.
+ * @work: Descriptor push work thread
+ * @buf_page: Page for next RX buffer.
+ *     We can use a single page for multiple RX buffers. This tracks
+ *     the remaining space in the allocation.
+ * @buf_dma_addr: Page's DMA address.
+ * @buf_data: Page's host address.
+ */
+struct efx_rx_queue {
+       struct efx_nic *efx;
+       int queue;
+       int used;
+       struct efx_channel *channel;
+       struct efx_rx_buffer *buffer;
+       struct efx_special_buffer rxd;
+
+       int added_count;
+       int notified_count;
+       int removed_count;
+       spinlock_t add_lock;
+       unsigned int max_fill;
+       unsigned int fast_fill_trigger;
+       unsigned int fast_fill_limit;
+       unsigned int min_fill;
+       unsigned int min_overfill;
+       unsigned int alloc_page_count;
+       unsigned int alloc_skb_count;
+       struct delayed_work work;
+       unsigned int slow_fill_count;
+
+       struct page *buf_page;
+       dma_addr_t buf_dma_addr;
+       char *buf_data;
+};
+
+/**
+ * struct efx_buffer - An Efx general-purpose buffer
+ * @addr: host base address of the buffer
+ * @dma_addr: DMA base address of the buffer
+ * @len: Buffer length, in bytes
+ *
+ * Falcon uses these buffers for its interrupt status registers and
+ * MAC stats dumps.
+ */
+struct efx_buffer {
+       void *addr;
+       dma_addr_t dma_addr;
+       unsigned int len;
+};
+
+
+/* Flags for channel->used_flags */
+#define EFX_USED_BY_RX 1
+#define EFX_USED_BY_TX 2
+#define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX)
+
+enum efx_rx_alloc_method {
+       RX_ALLOC_METHOD_AUTO = 0,
+       RX_ALLOC_METHOD_SKB = 1,
+       RX_ALLOC_METHOD_PAGE = 2,
+};
+
+/**
+ * struct efx_channel - An Efx channel
+ *
+ * A channel comprises an event queue, at least one TX queue, at least
+ * one RX queue, and an associated tasklet for processing the event
+ * queue.
+ *
+ * @efx: Associated Efx NIC
+ * @evqnum: Event queue number
+ * @channel: Channel instance number
+ * @used_flags: Channel is used by net driver
+ * @enabled: Channel enabled indicator
+ * @irq: IRQ number (MSI and MSI-X only)
+ * @has_interrupt: Channel has an interrupt
+ * @irq_moderation: IRQ moderation value (in us)
+ * @napi_dev: Net device used with NAPI
+ * @napi_str: NAPI control structure
+ * @reset_work: Scheduled reset work thread
+ * @work_pending: Is work pending via NAPI?
+ * @eventq: Event queue buffer
+ * @eventq_read_ptr: Event queue read pointer
+ * @last_eventq_read_ptr: Last event queue read pointer value.
+ * @eventq_magic: Event queue magic value for driver-generated test events
+ * @lro_mgr: LRO state
+ * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
+ *     and diagnostic counters
+ * @rx_alloc_push_pages: RX allocation method currently in use for pushing
+ *     descriptors
+ * @rx_alloc_pop_pages: RX allocation method currently in use for popping
+ *     descriptors
+ * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
+ * @n_rx_ip_frag_err: Count of RX IP fragment errors
+ * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
+ * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
+ * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
+ * @n_rx_overlength: Count of RX_OVERLENGTH errors
+ * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
+ */
+struct efx_channel {
+       struct efx_nic *efx;
+       int evqnum;
+       int channel;
+       int used_flags;
+       int enabled;
+       int irq;
+       unsigned int has_interrupt;
+       unsigned int irq_moderation;
+       struct net_device *napi_dev;
+       struct napi_struct napi_str;
+       struct work_struct reset_work;
+       int work_pending;
+       struct efx_special_buffer eventq;
+       unsigned int eventq_read_ptr;
+       unsigned int last_eventq_read_ptr;
+       unsigned int eventq_magic;
+
+       struct net_lro_mgr lro_mgr;
+       int rx_alloc_level;
+       int rx_alloc_push_pages;
+       int rx_alloc_pop_pages;
+
+       unsigned n_rx_tobe_disc;
+       unsigned n_rx_ip_frag_err;
+       unsigned n_rx_ip_hdr_chksum_err;
+       unsigned n_rx_tcp_udp_chksum_err;
+       unsigned n_rx_frm_trunc;
+       unsigned n_rx_overlength;
+       unsigned n_skbuff_leaks;
+
+       /* Used to pipeline received packets in order to optimise memory
+        * access with prefetches.
+        */
+       struct efx_rx_buffer *rx_pkt;
+       int rx_pkt_csummed;
+
+};
+
+/**
+ * struct efx_blinker - S/W LED blinking context
+ * @led_num: LED ID (board-specific meaning)
+ * @state: Current state - on or off
+ * @resubmit: Timer resubmission flag
+ * @timer: Control timer for blinking
+ */
+struct efx_blinker {
+       int led_num;
+       int state;
+       int resubmit;
+       struct timer_list timer;
+};
+
+
+/**
+ * struct efx_board - board information
+ * @type: Board model type
+ * @major: Major rev. ('A', 'B' ...)
+ * @minor: Minor rev. (0, 1, ...)
+ * @init: Initialisation function
+ * @init_leds: Sets up board LEDs
+ * @set_fault_led: Turns the fault LED on or off
+ * @blink: Starts/stops blinking
+ * @blinker: used to blink LEDs in software
+ */
+struct efx_board {
+       int type;
+       int major;
+       int minor;
+       int (*init) (struct efx_nic *nic);
+       /* As the LEDs are typically attached to the PHY, LEDs
+        * have a separate init callback that happens later than
+        * board init. */
+       int (*init_leds)(struct efx_nic *efx);
+       void (*set_fault_led) (struct efx_nic *efx, int state);
+       void (*blink) (struct efx_nic *efx, int start);
+       struct efx_blinker blinker;
+};
+
+enum efx_int_mode {
+       /* Be careful if altering to correct macro below */
+       EFX_INT_MODE_MSIX = 0,
+       EFX_INT_MODE_MSI = 1,
+       EFX_INT_MODE_LEGACY = 2,
+       EFX_INT_MODE_MAX        /* Insert any new items before this */
+};
+#define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
+
+enum phy_type {
+       PHY_TYPE_NONE = 0,
+       PHY_TYPE_CX4_RTMR = 1,
+       PHY_TYPE_1G_ALASKA = 2,
+       PHY_TYPE_10XPRESS = 3,
+       PHY_TYPE_XFP = 4,
+       PHY_TYPE_PM8358 = 6,
+       PHY_TYPE_MAX    /* Insert any new items before this */
+};
+
+#define PHY_ADDR_INVALID 0xff
+
+enum nic_state {
+       STATE_INIT = 0,
+       STATE_RUNNING = 1,
+       STATE_FINI = 2,
+       STATE_RESETTING = 3, /* rtnl_lock always held */
+       STATE_DISABLED = 4,
+       STATE_MAX,
+};
+
+/*
+ * Alignment of page-allocated RX buffers
+ *
+ * Controls the number of bytes inserted at the start of an RX buffer.
+ * This is the equivalent of NET_IP_ALIGN [which controls the alignment
+ * of the skb->head for hardware DMA].
+ */
+#if defined(__i386__) || defined(__x86_64__)
+#define EFX_PAGE_IP_ALIGN 0
+#else
+#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
+#endif
+
+/*
+ * Alignment of the skb->head which wraps a page-allocated RX buffer
+ *
+ * The skb allocated to wrap an rx_buffer can have this alignment. Since
+ * the data is memcpy'd from the rx_buf, it does not need to be equal to
+ * EFX_PAGE_IP_ALIGN.
+ */
+#define EFX_PAGE_SKB_ALIGN 2
+
+/* Forward declaration */
+struct efx_nic;
+
+/* Pseudo bit-mask flow control field */
+enum efx_fc_type {
+       EFX_FC_RX = 1,
+       EFX_FC_TX = 2,
+       EFX_FC_AUTO = 4,
+};
+
+/**
+ * struct efx_phy_operations - Efx PHY operations table
+ * @init: Initialise PHY
+ * @fini: Shut down PHY
+ * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
+ * @clear_interrupt: Clear down interrupt
+ * @blink: Blink LEDs
+ * @check_hw: Check hardware
+ * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset)
+ * @mmds: MMD presence mask
+ */
+struct efx_phy_operations {
+       int (*init) (struct efx_nic *efx);
+       void (*fini) (struct efx_nic *efx);
+       void (*reconfigure) (struct efx_nic *efx);
+       void (*clear_interrupt) (struct efx_nic *efx);
+       int (*check_hw) (struct efx_nic *efx);
+       void (*reset_xaui) (struct efx_nic *efx);
+       int mmds;
+};
+
+/*
+ * Efx extended statistics
+ *
+ * Not all statistics are provided by all supported MACs.  The purpose
+ * is this structure is to contain the raw statistics provided by each
+ * MAC.
+ */
+struct efx_mac_stats {
+       u64 tx_bytes;
+       u64 tx_good_bytes;
+       u64 tx_bad_bytes;
+       unsigned long tx_packets;
+       unsigned long tx_bad;
+       unsigned long tx_pause;
+       unsigned long tx_control;
+       unsigned long tx_unicast;
+       unsigned long tx_multicast;
+       unsigned long tx_broadcast;
+       unsigned long tx_lt64;
+       unsigned long tx_64;
+       unsigned long tx_65_to_127;
+       unsigned long tx_128_to_255;
+       unsigned long tx_256_to_511;
+       unsigned long tx_512_to_1023;
+       unsigned long tx_1024_to_15xx;
+       unsigned long tx_15xx_to_jumbo;
+       unsigned long tx_gtjumbo;
+       unsigned long tx_collision;
+       unsigned long tx_single_collision;
+       unsigned long tx_multiple_collision;
+       unsigned long tx_excessive_collision;
+       unsigned long tx_deferred;
+       unsigned long tx_late_collision;
+       unsigned long tx_excessive_deferred;
+       unsigned long tx_non_tcpudp;
+       unsigned long tx_mac_src_error;
+       unsigned long tx_ip_src_error;
+       u64 rx_bytes;
+       u64 rx_good_bytes;
+       u64 rx_bad_bytes;
+       unsigned long rx_packets;
+       unsigned long rx_good;
+       unsigned long rx_bad;
+       unsigned long rx_pause;
+       unsigned long rx_control;
+       unsigned long rx_unicast;
+       unsigned long rx_multicast;
+       unsigned long rx_broadcast;
+       unsigned long rx_lt64;
+       unsigned long rx_64;
+       unsigned long rx_65_to_127;
+       unsigned long rx_128_to_255;
+       unsigned long rx_256_to_511;
+       unsigned long rx_512_to_1023;
+       unsigned long rx_1024_to_15xx;
+       unsigned long rx_15xx_to_jumbo;
+       unsigned long rx_gtjumbo;
+       unsigned long rx_bad_lt64;
+       unsigned long rx_bad_64_to_15xx;
+       unsigned long rx_bad_15xx_to_jumbo;
+       unsigned long rx_bad_gtjumbo;
+       unsigned long rx_overflow;
+       unsigned long rx_missed;
+       unsigned long rx_false_carrier;
+       unsigned long rx_symbol_error;
+       unsigned long rx_align_error;
+       unsigned long rx_length_error;
+       unsigned long rx_internal_error;
+       unsigned long rx_good_lt64;
+};
+
+/* Number of bits used in a multicast filter hash address */
+#define EFX_MCAST_HASH_BITS 8
+
+/* Number of (single-bit) entries in a multicast filter hash */
+#define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
+
+/* An Efx multicast filter hash */
+union efx_multicast_hash {
+       u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
+       efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
+};
+
+/**
+ * struct efx_nic - an Efx NIC
+ * @name: Device name (net device name or bus id before net device registered)
+ * @pci_dev: The PCI device
+ * @type: Controller type attributes
+ * @legacy_irq: IRQ number
+ * @workqueue: Workqueue for resets, port reconfigures and the HW monitor
+ * @reset_work: Scheduled reset workitem
+ * @monitor_work: Hardware monitor workitem
+ * @membase_phys: Memory BAR value as physical address
+ * @membase: Memory BAR value
+ * @biu_lock: BIU (bus interface unit) lock
+ * @interrupt_mode: Interrupt mode
+ * @i2c: I2C interface
+ * @board_info: Board-level information
+ * @state: Device state flag. Serialised by the rtnl_lock.
+ * @reset_pending: Pending reset method (normally RESET_TYPE_NONE)
+ * @tx_queue: TX DMA queues
+ * @rx_queue: RX DMA queues
+ * @channel: Channels
+ * @rss_queues: Number of RSS queues
+ * @rx_buffer_len: RX buffer length
+ * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @irq_status: Interrupt status buffer
+ * @last_irq_cpu: Last CPU to handle interrupt.
+ *     This register is written with the SMP processor ID whenever an
+ *     interrupt is handled.  It is used by falcon_test_interrupt()
+ *     to verify that an interrupt has occurred.
+ * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
+ * @nic_data: Hardware dependant state
+ * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and
+ *     efx_reconfigure_port()
+ * @port_enabled: Port enabled indicator.
+ *     Serialises efx_stop_all(), efx_start_all() and efx_monitor() and
+ *     efx_reconfigure_work with kernel interfaces. Safe to read under any
+ *     one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
+ *     be held to modify it.
+ * @port_initialized: Port initialized?
+ * @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @rx_checksum_enabled: RX checksumming enabled
+ * @netif_stop_count: Port stop count
+ * @netif_stop_lock: Port stop lock
+ * @mac_stats: MAC statistics. These include all statistics the MACs
+ *     can provide.  Generic code converts these into a standard
+ *     &struct net_device_stats.
+ * @stats_buffer: DMA buffer for statistics
+ * @stats_lock: Statistics update lock
+ * @mac_address: Permanent MAC address
+ * @phy_type: PHY type
+ * @phy_lock: PHY access lock
+ * @phy_op: PHY interface
+ * @phy_data: PHY private data (including PHY-specific stats)
+ * @mii: PHY interface
+ * @phy_powered: PHY power state
+ * @tx_disabled: PHY transmitter turned off
+ * @link_up: Link status
+ * @link_options: Link options (MII/GMII format)
+ * @n_link_state_changes: Number of times the link has changed state
+ * @promiscuous: Promiscuous flag. Protected by netif_tx_lock.
+ * @multicast_hash: Multicast hash table
+ * @flow_control: Flow control flags - separate RX/TX so can't use link_options
+ * @reconfigure_work: work item for dealing with PHY events
+ *
+ * The @priv field of the corresponding &struct net_device points to
+ * this.
+ */
+struct efx_nic {
+       char name[IFNAMSIZ];
+       struct pci_dev *pci_dev;
+       const struct efx_nic_type *type;
+       int legacy_irq;
+       struct workqueue_struct *workqueue;
+       struct work_struct reset_work;
+       struct delayed_work monitor_work;
+       unsigned long membase_phys;
+       void __iomem *membase;
+       spinlock_t biu_lock;
+       enum efx_int_mode interrupt_mode;
+
+       struct efx_i2c_interface i2c;
+       struct efx_board board_info;
+
+       enum nic_state state;
+       enum reset_type reset_pending;
+
+       struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES];
+       struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
+       struct efx_channel channel[EFX_MAX_CHANNELS];
+
+       int rss_queues;
+       unsigned int rx_buffer_len;
+       unsigned int rx_buffer_order;
+
+       struct efx_buffer irq_status;
+       volatile signed int last_irq_cpu;
+
+       unsigned n_rx_nodesc_drop_cnt;
+
+       void *nic_data;
+
+       struct mutex mac_lock;
+       int port_enabled;
+
+       int port_initialized;
+       struct net_device *net_dev;
+       int rx_checksum_enabled;
+
+       atomic_t netif_stop_count;
+       spinlock_t netif_stop_lock;
+
+       struct efx_mac_stats mac_stats;
+       struct efx_buffer stats_buffer;
+       spinlock_t stats_lock;
+
+       unsigned char mac_address[ETH_ALEN];
+
+       enum phy_type phy_type;
+       spinlock_t phy_lock;
+       struct efx_phy_operations *phy_op;
+       void *phy_data;
+       struct mii_if_info mii;
+
+       int link_up;
+       unsigned int link_options;
+       unsigned int n_link_state_changes;
+
+       int promiscuous;
+       union efx_multicast_hash multicast_hash;
+       enum efx_fc_type flow_control;
+       struct work_struct reconfigure_work;
+
+       atomic_t rx_reset;
+};
+
+/**
+ * struct efx_nic_type - Efx device type definition
+ * @mem_bar: Memory BAR number
+ * @mem_map_size: Memory BAR mapped size
+ * @txd_ptr_tbl_base: TX descriptor ring base address
+ * @rxd_ptr_tbl_base: RX descriptor ring base address
+ * @buf_tbl_base: Buffer table base address
+ * @evq_ptr_tbl_base: Event queue pointer table base address
+ * @evq_rptr_tbl_base: Event queue read-pointer table base address
+ * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
+ * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
+ * @evq_size: Event queue size (must be a power of two)
+ * @max_dma_mask: Maximum possible DMA mask
+ * @tx_dma_mask: TX DMA mask
+ * @bug5391_mask: Address mask for bug 5391 workaround
+ * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
+ * @rx_xon_thresh: RX FIFO XON watermark (bytes)
+ * @rx_buffer_padding: Padding added to each RX buffer
+ * @max_interrupt_mode: Highest capability interrupt mode supported
+ *     from &enum efx_init_mode.
+ * @phys_addr_channels: Number of channels with physically addressed
+ *     descriptors
+ */
+struct efx_nic_type {
+       unsigned int mem_bar;
+       unsigned int mem_map_size;
+       unsigned int txd_ptr_tbl_base;
+       unsigned int rxd_ptr_tbl_base;
+       unsigned int buf_tbl_base;
+       unsigned int evq_ptr_tbl_base;
+       unsigned int evq_rptr_tbl_base;
+
+       unsigned int txd_ring_mask;
+       unsigned int rxd_ring_mask;
+       unsigned int evq_size;
+       dma_addr_t max_dma_mask;
+       unsigned int tx_dma_mask;
+       unsigned bug5391_mask;
+
+       int rx_xoff_thresh;
+       int rx_xon_thresh;
+       unsigned int rx_buffer_padding;
+       unsigned int max_interrupt_mode;
+       unsigned int phys_addr_channels;
+};
+
+/**************************************************************************
+ *
+ * Prototypes and inline functions
+ *
+ *************************************************************************/
+
+/* Iterate over all used channels */
+#define efx_for_each_channel(_channel, _efx)                           \
+       for (_channel = &_efx->channel[0];                              \
+            _channel < &_efx->channel[EFX_MAX_CHANNELS];               \
+            _channel++)                                                \
+               if (!_channel->used_flags)                              \
+                       continue;                                       \
+               else
+
+/* Iterate over all used channels with interrupts */
+#define efx_for_each_channel_with_interrupt(_channel, _efx)            \
+       for (_channel = &_efx->channel[0];                              \
+            _channel < &_efx->channel[EFX_MAX_CHANNELS];               \
+            _channel++)                                                \
+               if (!(_channel->used_flags && _channel->has_interrupt)) \
+                       continue;                                       \
+               else
+
+/* Iterate over all used TX queues */
+#define efx_for_each_tx_queue(_tx_queue, _efx)                         \
+       for (_tx_queue = &_efx->tx_queue[0];                            \
+            _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES];            \
+            _tx_queue++)                                               \
+               if (!_tx_queue->used)                                   \
+                       continue;                                       \
+               else
+
+/* Iterate over all TX queues belonging to a channel */
+#define efx_for_each_channel_tx_queue(_tx_queue, _channel)             \
+       for (_tx_queue = &_channel->efx->tx_queue[0];                   \
+            _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES];   \
+            _tx_queue++)                                               \
+               if ((!_tx_queue->used) ||                               \
+                   (_tx_queue->channel != _channel))                   \
+                       continue;                                       \
+               else
+
+/* Iterate over all used RX queues */
+#define efx_for_each_rx_queue(_rx_queue, _efx)                         \
+       for (_rx_queue = &_efx->rx_queue[0];                            \
+            _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES];            \
+            _rx_queue++)                                               \
+               if (!_rx_queue->used)                                   \
+                       continue;                                       \
+               else
+
+/* Iterate over all RX queues belonging to a channel */
+#define efx_for_each_channel_rx_queue(_rx_queue, _channel)             \
+       for (_rx_queue = &_channel->efx->rx_queue[0];                   \
+            _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES];   \
+            _rx_queue++)                                               \
+               if ((!_rx_queue->used) ||                               \
+                   (_rx_queue->channel != _channel))                   \
+                       continue;                                       \
+               else
+
+/* Returns a pointer to the specified receive buffer in the RX
+ * descriptor queue.
+ */
+static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
+                                                 unsigned int index)
+{
+       return (&rx_queue->buffer[index]);
+}
+
+/* Set bit in a little-endian bitfield */
+static inline void set_bit_le(int nr, unsigned char *addr)
+{
+       addr[nr / 8] |= (1 << (nr % 8));
+}
+
+/* Clear bit in a little-endian bitfield */
+static inline void clear_bit_le(int nr, unsigned char *addr)
+{
+       addr[nr / 8] &= ~(1 << (nr % 8));
+}
+
+
+/**
+ * EFX_MAX_FRAME_LEN - calculate maximum frame length
+ *
+ * This calculates the maximum frame length that will be used for a
+ * given MTU.  The frame length will be equal to the MTU plus a
+ * constant amount of header space and padding.  This is the quantity
+ * that the net driver will program into the MAC as the maximum frame
+ * length.
+ *
+ * The 10G MAC used in Falcon requires 8-byte alignment on the frame
+ * length, so we round up to the nearest 8.
+ */
+#define EFX_MAX_FRAME_LEN(mtu) \
+       ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */) + 7) & ~7)
+
+
+#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
new file mode 100644 (file)
index 0000000..9d02c84
--- /dev/null
@@ -0,0 +1,48 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_PHY_H
+#define EFX_PHY_H
+
+/****************************************************************************
+ * 10Xpress (SFX7101) PHY
+ */
+extern struct efx_phy_operations falcon_tenxpress_phy_ops;
+
+enum tenxpress_state {
+       TENXPRESS_STATUS_OFF = 0,
+       TENXPRESS_STATUS_OTEMP = 1,
+       TENXPRESS_STATUS_NORMAL = 2,
+};
+
+extern void tenxpress_set_state(struct efx_nic *efx,
+                               enum tenxpress_state state);
+extern void tenxpress_phy_blink(struct efx_nic *efx, int blink);
+extern void tenxpress_crc_err(struct efx_nic *efx);
+
+/****************************************************************************
+ * Exported functions from the driver for XFP optical PHYs
+ */
+extern struct efx_phy_operations falcon_xfp_phy_ops;
+
+/* The QUAKE XFP PHY provides various H/W control states for LEDs */
+#define QUAKE_LED_LINK_INVAL   (0)
+#define QUAKE_LED_LINK_STAT    (1)
+#define QUAKE_LED_LINK_ACT     (2)
+#define QUAKE_LED_LINK_ACTSTAT (3)
+#define QUAKE_LED_OFF          (4)
+#define QUAKE_LED_ON           (5)
+#define QUAKE_LED_LINK_INPUT   (6)     /* Pin is an input. */
+/* What link the LED tracks */
+#define QUAKE_LED_TXLINK       (0)
+#define QUAKE_LED_RXLINK       (8)
+
+extern void xfp_set_led(struct efx_nic *p, int led, int state);
+
+#endif
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
new file mode 100644 (file)
index 0000000..551299b
--- /dev/null
@@ -0,0 +1,875 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include "net_driver.h"
+#include "rx.h"
+#include "efx.h"
+#include "falcon.h"
+#include "workarounds.h"
+
+/* Number of RX descriptors pushed at once. */
+#define EFX_RX_BATCH  8
+
+/* Size of buffer allocated for skb header area. */
+#define EFX_SKB_HEADERS  64u
+
+/*
+ * rx_alloc_method - RX buffer allocation method
+ *
+ * This driver supports two methods for allocating and using RX buffers:
+ * each RX buffer may be backed by an skb or by an order-n page.
+ *
+ * When LRO is in use then the second method has a lower overhead,
+ * since we don't have to allocate then free skbs on reassembled frames.
+ *
+ * Values:
+ *   - RX_ALLOC_METHOD_AUTO = 0
+ *   - RX_ALLOC_METHOD_SKB  = 1
+ *   - RX_ALLOC_METHOD_PAGE = 2
+ *
+ * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
+ * controlled by the parameters below.
+ *
+ *   - Since pushing and popping descriptors are separated by the rx_queue
+ *     size, so the watermarks should be ~rxd_size.
+ *   - The performance win by using page-based allocation for LRO is less
+ *     than the performance hit of using page-based allocation of non-LRO,
+ *     so the watermarks should reflect this.
+ *
+ * Per channel we maintain a single variable, updated by each channel:
+ *
+ *   rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
+ *                      RX_ALLOC_FACTOR_SKB)
+ * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
+ * limits the hysteresis), and update the allocation strategy:
+ *
+ *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
+ *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
+ */
+static int rx_alloc_method = RX_ALLOC_METHOD_PAGE;
+
+#define RX_ALLOC_LEVEL_LRO 0x2000
+#define RX_ALLOC_LEVEL_MAX 0x3000
+#define RX_ALLOC_FACTOR_LRO 1
+#define RX_ALLOC_FACTOR_SKB (-2)
+
+/* This is the percentage fill level below which new RX descriptors
+ * will be added to the RX descriptor ring.
+ */
+static unsigned int rx_refill_threshold = 90;
+
+/* This is the percentage fill level to which an RX queue will be refilled
+ * when the "RX refill threshold" is reached.
+ */
+static unsigned int rx_refill_limit = 95;
+
+/*
+ * RX maximum head room required.
+ *
+ * This must be at least 1 to prevent overflow and at least 2 to allow
+ * pipelined receives.
+ */
+#define EFX_RXD_HEAD_ROOM 2
+
+/* Macros for zero-order pages (potentially) containing multiple RX buffers */
+#define RX_DATA_OFFSET(_data)                          \
+       (((unsigned long) (_data)) & (PAGE_SIZE-1))
+#define RX_BUF_OFFSET(_rx_buf)                         \
+       RX_DATA_OFFSET((_rx_buf)->data)
+
+#define RX_PAGE_SIZE(_efx)                             \
+       (PAGE_SIZE * (1u << (_efx)->rx_buffer_order))
+
+
+/**************************************************************************
+ *
+ * Linux generic LRO handling
+ *
+ **************************************************************************
+ */
+
+static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr,
+                              void **tcpudp_hdr, u64 *hdr_flags, void *priv)
+{
+       struct efx_channel *channel = (struct efx_channel *)priv;
+       struct iphdr *iph;
+       struct tcphdr *th;
+
+       iph = (struct iphdr *)skb->data;
+       if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP)
+               goto fail;
+
+       th = (struct tcphdr *)(skb->data + iph->ihl * 4);
+
+       *tcpudp_hdr = th;
+       *ip_hdr = iph;
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+
+       channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+       return 0;
+fail:
+       channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+       return -1;
+}
+
+static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr,
+                           void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags,
+                           void *priv)
+{
+       struct efx_channel *channel = (struct efx_channel *)priv;
+       struct ethhdr *eh;
+       struct iphdr *iph;
+
+       /* We support EtherII and VLAN encapsulated IPv4 */
+       eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset);
+       *mac_hdr = eh;
+
+       if (eh->h_proto == htons(ETH_P_IP)) {
+               iph = (struct iphdr *)(eh + 1);
+       } else {
+               struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh;
+               if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP))
+                       goto fail;
+
+               iph = (struct iphdr *)(veh + 1);
+       }
+       *ip_hdr = iph;
+
+       /* We can only do LRO over TCP */
+       if (iph->protocol != IPPROTO_TCP)
+               goto fail;
+
+       *hdr_flags = LRO_IPV4 | LRO_TCP;
+       *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4);
+
+       channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+       return 0;
+ fail:
+       channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+       return -1;
+}
+
+int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx)
+{
+       size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS;
+       struct net_lro_desc *lro_arr;
+
+       /* Allocate the LRO descriptors structure */
+       lro_arr = kzalloc(s, GFP_KERNEL);
+       if (lro_arr == NULL)
+               return -ENOMEM;
+
+       lro_mgr->lro_arr = lro_arr;
+       lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS;
+       lro_mgr->max_aggr = EFX_MAX_LRO_AGGR;
+       lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN;
+
+       lro_mgr->get_skb_header = efx_lro_get_skb_hdr;
+       lro_mgr->get_frag_header = efx_get_frag_hdr;
+       lro_mgr->dev = efx->net_dev;
+
+       lro_mgr->features = LRO_F_NAPI;
+
+       /* We can pass packets up with the checksum intact */
+       lro_mgr->ip_summed = CHECKSUM_UNNECESSARY;
+
+       lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY;
+
+       return 0;
+}
+
+void efx_lro_fini(struct net_lro_mgr *lro_mgr)
+{
+       kfree(lro_mgr->lro_arr);
+       lro_mgr->lro_arr = NULL;
+}
+
+/**
+ * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation
+ *
+ * @rx_queue:          Efx RX queue
+ * @rx_buf:            RX buffer structure to populate
+ *
+ * This allocates memory for a new receive buffer, maps it for DMA,
+ * and populates a struct efx_rx_buffer with the relevant
+ * information.  Return a negative error code or 0 on success.
+ */
+static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
+                                        struct efx_rx_buffer *rx_buf)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       struct net_device *net_dev = efx->net_dev;
+       int skb_len = efx->rx_buffer_len;
+
+       rx_buf->skb = netdev_alloc_skb(net_dev, skb_len);
+       if (unlikely(!rx_buf->skb))
+               return -ENOMEM;
+
+       /* Adjust the SKB for padding and checksum */
+       skb_reserve(rx_buf->skb, NET_IP_ALIGN);
+       rx_buf->len = skb_len - NET_IP_ALIGN;
+       rx_buf->data = (char *)rx_buf->skb->data;
+       rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+       rx_buf->dma_addr = pci_map_single(efx->pci_dev,
+                                         rx_buf->data, rx_buf->len,
+                                         PCI_DMA_FROMDEVICE);
+
+       if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
+               dev_kfree_skb_any(rx_buf->skb);
+               rx_buf->skb = NULL;
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/**
+ * efx_init_rx_buffer_page - create new RX buffer using page-based allocation
+ *
+ * @rx_queue:          Efx RX queue
+ * @rx_buf:            RX buffer structure to populate
+ *
+ * This allocates memory for a new receive buffer, maps it for DMA,
+ * and populates a struct efx_rx_buffer with the relevant
+ * information.  Return a negative error code or 0 on success.
+ */
+static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
+                                         struct efx_rx_buffer *rx_buf)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       int bytes, space, offset;
+
+       bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
+
+       /* If there is space left in the previously allocated page,
+        * then use it. Otherwise allocate a new one */
+       rx_buf->page = rx_queue->buf_page;
+       if (rx_buf->page == NULL) {
+               dma_addr_t dma_addr;
+
+               rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
+                                          efx->rx_buffer_order);
+               if (unlikely(rx_buf->page == NULL))
+                       return -ENOMEM;
+
+               dma_addr = pci_map_page(efx->pci_dev, rx_buf->page,
+                                       0, RX_PAGE_SIZE(efx),
+                                       PCI_DMA_FROMDEVICE);
+
+               if (unlikely(pci_dma_mapping_error(dma_addr))) {
+                       __free_pages(rx_buf->page, efx->rx_buffer_order);
+                       rx_buf->page = NULL;
+                       return -EIO;
+               }
+
+               rx_queue->buf_page = rx_buf->page;
+               rx_queue->buf_dma_addr = dma_addr;
+               rx_queue->buf_data = ((char *) page_address(rx_buf->page) +
+                                     EFX_PAGE_IP_ALIGN);
+       }
+
+       offset = RX_DATA_OFFSET(rx_queue->buf_data);
+       rx_buf->len = bytes;
+       rx_buf->dma_addr = rx_queue->buf_dma_addr + offset;
+       rx_buf->data = rx_queue->buf_data;
+
+       /* Try to pack multiple buffers per page */
+       if (efx->rx_buffer_order == 0) {
+               /* The next buffer starts on the next 512 byte boundary */
+               rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff);
+               offset += ((bytes + 0x1ff) & ~0x1ff);
+
+               space = RX_PAGE_SIZE(efx) - offset;
+               if (space >= bytes) {
+                       /* Refs dropped on kernel releasing each skb */
+                       get_page(rx_queue->buf_page);
+                       goto out;
+               }
+       }
+
+       /* This is the final RX buffer for this page, so mark it for
+        * unmapping */
+       rx_queue->buf_page = NULL;
+       rx_buf->unmap_addr = rx_queue->buf_dma_addr;
+
+ out:
+       return 0;
+}
+
+/* This allocates memory for a new receive buffer, maps it for DMA,
+ * and populates a struct efx_rx_buffer with the relevant
+ * information.
+ */
+static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue,
+                                    struct efx_rx_buffer *new_rx_buf)
+{
+       int rc = 0;
+
+       if (rx_queue->channel->rx_alloc_push_pages) {
+               new_rx_buf->skb = NULL;
+               rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf);
+               rx_queue->alloc_page_count++;
+       } else {
+               new_rx_buf->page = NULL;
+               rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf);
+               rx_queue->alloc_skb_count++;
+       }
+
+       if (unlikely(rc < 0))
+               EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__,
+                          rx_queue->queue, rc);
+       return rc;
+}
+
+static inline void efx_unmap_rx_buffer(struct efx_nic *efx,
+                                      struct efx_rx_buffer *rx_buf)
+{
+       if (rx_buf->page) {
+               EFX_BUG_ON_PARANOID(rx_buf->skb);
+               if (rx_buf->unmap_addr) {
+                       pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr,
+                                      RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE);
+                       rx_buf->unmap_addr = 0;
+               }
+       } else if (likely(rx_buf->skb)) {
+               pci_unmap_single(efx->pci_dev, rx_buf->dma_addr,
+                                rx_buf->len, PCI_DMA_FROMDEVICE);
+       }
+}
+
+static inline void efx_free_rx_buffer(struct efx_nic *efx,
+                                     struct efx_rx_buffer *rx_buf)
+{
+       if (rx_buf->page) {
+               __free_pages(rx_buf->page, efx->rx_buffer_order);
+               rx_buf->page = NULL;
+       } else if (likely(rx_buf->skb)) {
+               dev_kfree_skb_any(rx_buf->skb);
+               rx_buf->skb = NULL;
+       }
+}
+
+static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
+                                     struct efx_rx_buffer *rx_buf)
+{
+       efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
+       efx_free_rx_buffer(rx_queue->efx, rx_buf);
+}
+
+/**
+ * efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue:          RX descriptor queue
+ * @retry:              Recheck the fill level
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@fast_fill_limit. If there is insufficient atomic
+ * memory to do so, the caller should retry.
+ */
+static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
+                                         int retry)
+{
+       struct efx_rx_buffer *rx_buf;
+       unsigned fill_level, index;
+       int i, space, rc = 0;
+
+       /* Calculate current fill level.  Do this outside the lock,
+        * because most of the time we'll end up not wanting to do the
+        * fill anyway.
+        */
+       fill_level = (rx_queue->added_count - rx_queue->removed_count);
+       EFX_BUG_ON_PARANOID(fill_level >
+                           rx_queue->efx->type->rxd_ring_mask + 1);
+
+       /* Don't fill if we don't need to */
+       if (fill_level >= rx_queue->fast_fill_trigger)
+               return 0;
+
+       /* Record minimum fill level */
+       if (unlikely(fill_level < rx_queue->min_fill))
+               if (fill_level)
+                       rx_queue->min_fill = fill_level;
+
+       /* Acquire RX add lock.  If this lock is contended, then a fast
+        * fill must already be in progress (e.g. in the refill
+        * tasklet), so we don't need to do anything
+        */
+       if (!spin_trylock_bh(&rx_queue->add_lock))
+               return -1;
+
+ retry:
+       /* Recalculate current fill level now that we have the lock */
+       fill_level = (rx_queue->added_count - rx_queue->removed_count);
+       EFX_BUG_ON_PARANOID(fill_level >
+                           rx_queue->efx->type->rxd_ring_mask + 1);
+       space = rx_queue->fast_fill_limit - fill_level;
+       if (space < EFX_RX_BATCH)
+               goto out_unlock;
+
+       EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
+                 " level %d to level %d using %s allocation\n",
+                 rx_queue->queue, fill_level, rx_queue->fast_fill_limit,
+                 rx_queue->channel->rx_alloc_push_pages ? "page" : "skb");
+
+       do {
+               for (i = 0; i < EFX_RX_BATCH; ++i) {
+                       index = (rx_queue->added_count &
+                                rx_queue->efx->type->rxd_ring_mask);
+                       rx_buf = efx_rx_buffer(rx_queue, index);
+                       rc = efx_init_rx_buffer(rx_queue, rx_buf);
+                       if (unlikely(rc))
+                               goto out;
+                       ++rx_queue->added_count;
+               }
+       } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
+
+       EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring "
+                 "to level %d\n", rx_queue->queue,
+                 rx_queue->added_count - rx_queue->removed_count);
+
+ out:
+       /* Send write pointer to card. */
+       falcon_notify_rx_desc(rx_queue);
+
+       /* If the fast fill is running inside from the refill tasklet, then
+        * for SMP systems it may be running on a different CPU to
+        * RX event processing, which means that the fill level may now be
+        * out of date. */
+       if (unlikely(retry && (rc == 0)))
+               goto retry;
+
+ out_unlock:
+       spin_unlock_bh(&rx_queue->add_lock);
+
+       return rc;
+}
+
+/**
+ * efx_fast_push_rx_descriptors - push new RX descriptors quickly
+ * @rx_queue:          RX descriptor queue
+ *
+ * This will aim to fill the RX descriptor queue up to
+ * @rx_queue->@fast_fill_limit.  If there is insufficient memory to do so,
+ * it will schedule a work item to immediately continue the fast fill
+ */
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
+{
+       int rc;
+
+       rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
+       if (unlikely(rc)) {
+               /* Schedule the work item to run immediately. The hope is
+                * that work is immediately pending to free some memory
+                * (e.g. an RX event or TX completion)
+                */
+               efx_schedule_slow_fill(rx_queue, 0);
+       }
+}
+
+void efx_rx_work(struct work_struct *data)
+{
+       struct efx_rx_queue *rx_queue;
+       int rc;
+
+       rx_queue = container_of(data, struct efx_rx_queue, work.work);
+
+       if (unlikely(!rx_queue->channel->enabled))
+               return;
+
+       EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
+                 "%d\n", rx_queue->queue, raw_smp_processor_id());
+
+       ++rx_queue->slow_fill_count;
+       /* Push new RX descriptors, allowing at least 1 jiffy for
+        * the kernel to free some more memory. */
+       rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
+       if (rc)
+               efx_schedule_slow_fill(rx_queue, 1);
+}
+
+static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
+                                           struct efx_rx_buffer *rx_buf,
+                                           int len, int *discard,
+                                           int *leak_packet)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
+
+       if (likely(len <= max_len))
+               return;
+
+       /* The packet must be discarded, but this is only a fatal error
+        * if the caller indicated it was
+        */
+       *discard = 1;
+
+       if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
+               EFX_ERR_RL(efx, " RX queue %d seriously overlength "
+                          "RX event (0x%x > 0x%x+0x%x). Leaking\n",
+                          rx_queue->queue, len, max_len,
+                          efx->type->rx_buffer_padding);
+               /* If this buffer was skb-allocated, then the meta
+                * data at the end of the skb will be trashed. So
+                * we have no choice but to leak the fragment.
+                */
+               *leak_packet = (rx_buf->skb != NULL);
+               efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
+       } else {
+               EFX_ERR_RL(efx, " RX queue %d overlength RX event "
+                          "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len);
+       }
+
+       rx_queue->channel->n_rx_overlength++;
+}
+
+/* Pass a received packet up through the generic LRO stack
+ *
+ * Handles driverlink veto, and passes the fragment up via
+ * the appropriate LRO method
+ */
+static inline void efx_rx_packet_lro(struct efx_channel *channel,
+                                    struct efx_rx_buffer *rx_buf)
+{
+       struct net_lro_mgr *lro_mgr = &channel->lro_mgr;
+       void *priv = channel;
+
+       /* Pass the skb/page into the LRO engine */
+       if (rx_buf->page) {
+               struct skb_frag_struct frags;
+
+               frags.page = rx_buf->page;
+               frags.page_offset = RX_BUF_OFFSET(rx_buf);
+               frags.size = rx_buf->len;
+
+               lro_receive_frags(lro_mgr, &frags, rx_buf->len,
+                                 rx_buf->len, priv, 0);
+
+               EFX_BUG_ON_PARANOID(rx_buf->skb);
+               rx_buf->page = NULL;
+       } else {
+               EFX_BUG_ON_PARANOID(!rx_buf->skb);
+
+               lro_receive_skb(lro_mgr, rx_buf->skb, priv);
+               rx_buf->skb = NULL;
+       }
+}
+
+/* Allocate and construct an SKB around a struct page.*/
+static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf,
+                                           struct efx_nic *efx,
+                                           int hdr_len)
+{
+       struct sk_buff *skb;
+
+       /* Allocate an SKB to store the headers */
+       skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN);
+       if (unlikely(skb == NULL)) {
+               EFX_ERR_RL(efx, "RX out of memory for skb\n");
+               return NULL;
+       }
+
+       EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags);
+       EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
+
+       skb->ip_summed = CHECKSUM_UNNECESSARY;
+       skb_reserve(skb, EFX_PAGE_SKB_ALIGN);
+
+       skb->len = rx_buf->len;
+       skb->truesize = rx_buf->len + sizeof(struct sk_buff);
+       memcpy(skb->data, rx_buf->data, hdr_len);
+       skb->tail += hdr_len;
+
+       /* Append the remaining page onto the frag list */
+       if (unlikely(rx_buf->len > hdr_len)) {
+               struct skb_frag_struct *frag = skb_shinfo(skb)->frags;
+               frag->page = rx_buf->page;
+               frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len;
+               frag->size = skb->len - hdr_len;
+               skb_shinfo(skb)->nr_frags = 1;
+               skb->data_len = frag->size;
+       } else {
+               __free_pages(rx_buf->page, efx->rx_buffer_order);
+               skb->data_len = 0;
+       }
+
+       /* Ownership has transferred from the rx_buf to skb */
+       rx_buf->page = NULL;
+
+       /* Move past the ethernet header */
+       skb->protocol = eth_type_trans(skb, efx->net_dev);
+
+       return skb;
+}
+
+void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
+                  unsigned int len, int checksummed, int discard)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       struct efx_rx_buffer *rx_buf;
+       int leak_packet = 0;
+
+       rx_buf = efx_rx_buffer(rx_queue, index);
+       EFX_BUG_ON_PARANOID(!rx_buf->data);
+       EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page);
+       EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page));
+
+       /* This allows the refill path to post another buffer.
+        * EFX_RXD_HEAD_ROOM ensures that the slot we are using
+        * isn't overwritten yet.
+        */
+       rx_queue->removed_count++;
+
+       /* Validate the length encoded in the event vs the descriptor pushed */
+       efx_rx_packet__check_len(rx_queue, rx_buf, len,
+                                &discard, &leak_packet);
+
+       EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n",
+                 rx_queue->queue, index,
+                 (unsigned long long)rx_buf->dma_addr, len,
+                 (checksummed ? " [SUMMED]" : ""),
+                 (discard ? " [DISCARD]" : ""));
+
+       /* Discard packet, if instructed to do so */
+       if (unlikely(discard)) {
+               if (unlikely(leak_packet))
+                       rx_queue->channel->n_skbuff_leaks++;
+               else
+                       /* We haven't called efx_unmap_rx_buffer yet,
+                        * so fini the entire rx_buffer here */
+                       efx_fini_rx_buffer(rx_queue, rx_buf);
+               return;
+       }
+
+       /* Release card resources - assumes all RX buffers consumed in-order
+        * per RX queue
+        */
+       efx_unmap_rx_buffer(efx, rx_buf);
+
+       /* Prefetch nice and early so data will (hopefully) be in cache by
+        * the time we look at it.
+        */
+       prefetch(rx_buf->data);
+
+       /* Pipeline receives so that we give time for packet headers to be
+        * prefetched into cache.
+        */
+       rx_buf->len = len;
+       if (rx_queue->channel->rx_pkt)
+               __efx_rx_packet(rx_queue->channel,
+                               rx_queue->channel->rx_pkt,
+                               rx_queue->channel->rx_pkt_csummed);
+       rx_queue->channel->rx_pkt = rx_buf;
+       rx_queue->channel->rx_pkt_csummed = checksummed;
+}
+
+/* Handle a received packet.  Second half: Touches packet payload. */
+void __efx_rx_packet(struct efx_channel *channel,
+                    struct efx_rx_buffer *rx_buf, int checksummed)
+{
+       struct efx_nic *efx = channel->efx;
+       struct sk_buff *skb;
+       int lro = efx->net_dev->features & NETIF_F_LRO;
+
+       if (rx_buf->skb) {
+               prefetch(skb_shinfo(rx_buf->skb));
+
+               skb_put(rx_buf->skb, rx_buf->len);
+
+               /* Move past the ethernet header. rx_buf->data still points
+                * at the ethernet header */
+               rx_buf->skb->protocol = eth_type_trans(rx_buf->skb,
+                                                      efx->net_dev);
+       }
+
+       /* Both our generic-LRO and SFC-SSR support skb and page based
+        * allocation, but neither support switching from one to the
+        * other on the fly. If we spot that the allocation mode has
+        * changed, then flush the LRO state.
+        */
+       if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) {
+               efx_flush_lro(channel);
+               channel->rx_alloc_pop_pages = (rx_buf->page != NULL);
+       }
+       if (likely(checksummed && lro)) {
+               efx_rx_packet_lro(channel, rx_buf);
+               goto done;
+       }
+
+       /* Form an skb if required */
+       if (rx_buf->page) {
+               int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS);
+               skb = efx_rx_mk_skb(rx_buf, efx, hdr_len);
+               if (unlikely(skb == NULL)) {
+                       efx_free_rx_buffer(efx, rx_buf);
+                       goto done;
+               }
+       } else {
+               /* We now own the SKB */
+               skb = rx_buf->skb;
+               rx_buf->skb = NULL;
+       }
+
+       EFX_BUG_ON_PARANOID(rx_buf->page);
+       EFX_BUG_ON_PARANOID(rx_buf->skb);
+       EFX_BUG_ON_PARANOID(!skb);
+
+       /* Set the SKB flags */
+       if (unlikely(!checksummed || !efx->rx_checksum_enabled))
+               skb->ip_summed = CHECKSUM_NONE;
+
+       /* Pass the packet up */
+       netif_receive_skb(skb);
+
+       /* Update allocation strategy method */
+       channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
+
+       /* fall-thru */
+done:
+       efx->net_dev->last_rx = jiffies;
+}
+
+void efx_rx_strategy(struct efx_channel *channel)
+{
+       enum efx_rx_alloc_method method = rx_alloc_method;
+
+       /* Only makes sense to use page based allocation if LRO is enabled */
+       if (!(channel->efx->net_dev->features & NETIF_F_LRO)) {
+               method = RX_ALLOC_METHOD_SKB;
+       } else if (method == RX_ALLOC_METHOD_AUTO) {
+               /* Constrain the rx_alloc_level */
+               if (channel->rx_alloc_level < 0)
+                       channel->rx_alloc_level = 0;
+               else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
+                       channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
+
+               /* Decide on the allocation method */
+               method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
+                         RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
+       }
+
+       /* Push the option */
+       channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
+}
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       unsigned int rxq_size;
+       int rc;
+
+       EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
+
+       /* Allocate RX buffers */
+       rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer);
+       rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
+       if (!rx_queue->buffer) {
+               rc = -ENOMEM;
+               goto fail1;
+       }
+
+       rc = falcon_probe_rx(rx_queue);
+       if (rc)
+               goto fail2;
+
+       return 0;
+
+ fail2:
+       kfree(rx_queue->buffer);
+       rx_queue->buffer = NULL;
+ fail1:
+       rx_queue->used = 0;
+
+       return rc;
+}
+
+int efx_init_rx_queue(struct efx_rx_queue *rx_queue)
+{
+       struct efx_nic *efx = rx_queue->efx;
+       unsigned int max_fill, trigger, limit;
+
+       EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
+
+       /* Initialise ptr fields */
+       rx_queue->added_count = 0;
+       rx_queue->notified_count = 0;
+       rx_queue->removed_count = 0;
+       rx_queue->min_fill = -1U;
+       rx_queue->min_overfill = -1U;
+
+       /* Initialise limit fields */
+       max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM;
+       trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
+       limit = max_fill * min(rx_refill_limit, 100U) / 100U;
+
+       rx_queue->max_fill = max_fill;
+       rx_queue->fast_fill_trigger = trigger;
+       rx_queue->fast_fill_limit = limit;
+
+       /* Set up RX descriptor ring */
+       return falcon_init_rx(rx_queue);
+}
+
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
+{
+       int i;
+       struct efx_rx_buffer *rx_buf;
+
+       EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
+
+       falcon_fini_rx(rx_queue);
+
+       /* Release RX buffers NB start at index 0 not current HW ptr */
+       if (rx_queue->buffer) {
+               for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) {
+                       rx_buf = efx_rx_buffer(rx_queue, i);
+                       efx_fini_rx_buffer(rx_queue, rx_buf);
+               }
+       }
+
+       /* For a page that is part-way through splitting into RX buffers */
+       if (rx_queue->buf_page != NULL) {
+               pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr,
+                              RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE);
+               __free_pages(rx_queue->buf_page,
+                            rx_queue->efx->rx_buffer_order);
+               rx_queue->buf_page = NULL;
+       }
+}
+
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
+{
+       EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue);
+
+       falcon_remove_rx(rx_queue);
+
+       kfree(rx_queue->buffer);
+       rx_queue->buffer = NULL;
+       rx_queue->used = 0;
+}
+
+void efx_flush_lro(struct efx_channel *channel)
+{
+       lro_flush_all(&channel->lro_mgr);
+}
+
+
+module_param(rx_alloc_method, int, 0644);
+MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
+
+module_param(rx_refill_threshold, uint, 0444);
+MODULE_PARM_DESC(rx_refill_threshold,
+                "RX descriptor ring fast/slow fill threshold (%)");
+
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h
new file mode 100644 (file)
index 0000000..f35e377
--- /dev/null
@@ -0,0 +1,29 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_RX_H
+#define EFX_RX_H
+
+#include "net_driver.h"
+
+int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
+int efx_init_rx_queue(struct efx_rx_queue *rx_queue);
+void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
+
+int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx);
+void efx_lro_fini(struct net_lro_mgr *lro_mgr);
+void efx_flush_lro(struct efx_channel *channel);
+void efx_rx_strategy(struct efx_channel *channel);
+void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
+void efx_rx_work(struct work_struct *data);
+void __efx_rx_packet(struct efx_channel *channel,
+                    struct efx_rx_buffer *rx_buf, int checksummed);
+
+#endif /* EFX_RX_H */
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c
new file mode 100644 (file)
index 0000000..11fa9fb
--- /dev/null
@@ -0,0 +1,252 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2007 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+/*****************************************************************************
+ * Support for the SFE4001 NIC: driver code for the PCA9539 I/O expander that
+ * controls the PHY power rails, and for the MAX6647 temp. sensor used to check
+ * the PHY
+ */
+#include <linux/delay.h>
+#include "efx.h"
+#include "phy.h"
+#include "boards.h"
+#include "falcon.h"
+#include "falcon_hwdefs.h"
+#include "mac.h"
+
+/**************************************************************************
+ *
+ * I2C IO Expander device
+ *
+ **************************************************************************/
+#define        PCA9539 0x74
+
+#define        P0_IN 0x00
+#define        P0_OUT 0x02
+#define        P0_INVERT 0x04
+#define        P0_CONFIG 0x06
+
+#define        P0_EN_1V0X_LBN 0
+#define        P0_EN_1V0X_WIDTH 1
+#define        P0_EN_1V2_LBN 1
+#define        P0_EN_1V2_WIDTH 1
+#define        P0_EN_2V5_LBN 2
+#define        P0_EN_2V5_WIDTH 1
+#define        P0_EN_3V3X_LBN 3
+#define        P0_EN_3V3X_WIDTH 1
+#define        P0_EN_5V_LBN 4
+#define        P0_EN_5V_WIDTH 1
+#define        P0_SHORTEN_JTAG_LBN 5
+#define        P0_SHORTEN_JTAG_WIDTH 1
+#define        P0_X_TRST_LBN 6
+#define        P0_X_TRST_WIDTH 1
+#define        P0_DSP_RESET_LBN 7
+#define        P0_DSP_RESET_WIDTH 1
+
+#define        P1_IN 0x01
+#define        P1_OUT 0x03
+#define        P1_INVERT 0x05
+#define        P1_CONFIG 0x07
+
+#define        P1_AFE_PWD_LBN 0
+#define        P1_AFE_PWD_WIDTH 1
+#define        P1_DSP_PWD25_LBN 1
+#define        P1_DSP_PWD25_WIDTH 1
+#define        P1_RESERVED_LBN 2
+#define        P1_RESERVED_WIDTH 2
+#define        P1_SPARE_LBN 4
+#define        P1_SPARE_WIDTH 4
+
+
+/**************************************************************************
+ *
+ * Temperature Sensor
+ *
+ **************************************************************************/
+#define        MAX6647 0x4e
+
+#define        RLTS    0x00
+#define        RLTE    0x01
+#define        RSL     0x02
+#define        RCL     0x03
+#define        RCRA    0x04
+#define        RLHN    0x05
+#define        RLLI    0x06
+#define        RRHI    0x07
+#define        RRLS    0x08
+#define        WCRW    0x0a
+#define        WLHO    0x0b
+#define        WRHA    0x0c
+#define        WRLN    0x0e
+#define        OSHT    0x0f
+#define        REET    0x10
+#define        RIET    0x11
+#define        RWOE    0x19
+#define        RWOI    0x20
+#define        HYS     0x21
+#define        QUEUE   0x22
+#define        MFID    0xfe
+#define        REVID   0xff
+
+/* Status bits */
+#define MAX6647_BUSY   (1 << 7)        /* ADC is converting */
+#define MAX6647_LHIGH  (1 << 6)        /* Local high temp. alarm */
+#define MAX6647_LLOW   (1 << 5)        /* Local low temp. alarm */
+#define MAX6647_RHIGH  (1 << 4)        /* Remote high temp. alarm */
+#define MAX6647_RLOW   (1 << 3)        /* Remote low temp. alarm */
+#define MAX6647_FAULT  (1 << 2)        /* DXN/DXP short/open circuit */
+#define MAX6647_EOT    (1 << 1)        /* Remote junction overtemp. */
+#define MAX6647_IOT    (1 << 0)        /* Local junction overtemp. */
+
+static const u8 xgphy_max_temperature = 90;
+
+void sfe4001_poweroff(struct efx_nic *efx)
+{
+       struct efx_i2c_interface *i2c = &efx->i2c;
+
+       u8 cfg, out, in;
+
+       EFX_INFO(efx, "%s\n", __func__);
+
+       /* Turn off all power rails */
+       out = 0xff;
+       (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
+
+       /* Disable port 1 outputs on IO expander */
+       cfg = 0xff;
+       (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
+
+       /* Disable port 0 outputs on IO expander */
+       cfg = 0xff;
+       (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
+
+       /* Clear any over-temperature alert */
+       (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
+}
+
+/* This board uses an I2C expander to provider power to the PHY, which needs to
+ * be turned on before the PHY can be used.
+ * Context: Process context, rtnl lock held
+ */
+int sfe4001_poweron(struct efx_nic *efx)
+{
+       struct efx_i2c_interface *i2c = &efx->i2c;
+       unsigned int count;
+       int rc;
+       u8 out, in, cfg;
+       efx_dword_t reg;
+
+       /* 10Xpress has fixed-function LED pins, so there is no board-specific
+        * blink code. */
+       efx->board_info.blink = tenxpress_phy_blink;
+
+       /* Ensure that XGXS and XAUI SerDes are held in reset */
+       EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1,
+                            XX_PWRDNB_EN, 1,
+                            XX_RSTPLLAB_EN, 1,
+                            XX_RESETA_EN, 1,
+                            XX_RESETB_EN, 1,
+                            XX_RSTXGXSRX_EN, 1,
+                            XX_RSTXGXSTX_EN, 1);
+       falcon_xmac_writel(efx, &reg, XX_PWR_RST_REG_MAC);
+       udelay(10);
+
+       /* Set DSP over-temperature alert threshold */
+       EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature);
+       rc = efx_i2c_write(i2c, MAX6647, WLHO,
+                          &xgphy_max_temperature, 1);
+       if (rc)
+               goto fail1;
+
+       /* Read it back and verify */
+       rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1);
+       if (rc)
+               goto fail1;
+       if (in != xgphy_max_temperature) {
+               rc = -EFAULT;
+               goto fail1;
+       }
+
+       /* Clear any previous over-temperature alert */
+       rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1);
+       if (rc)
+               goto fail1;
+
+       /* Enable port 0 and port 1 outputs on IO expander */
+       cfg = 0x00;
+       rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1);
+       if (rc)
+               goto fail1;
+       cfg = 0xff & ~(1 << P1_SPARE_LBN);
+       rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1);
+       if (rc)
+               goto fail2;
+
+       /* Turn all power off then wait 1 sec. This ensures PHY is reset */
+       out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) |
+                      (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) |
+                      (0 << P0_EN_1V0X_LBN));
+       rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
+       if (rc)
+               goto fail3;
+
+       schedule_timeout_uninterruptible(HZ);
+       count = 0;
+       do {
+               /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */
+               out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) |
+                              (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) |
+                              (1 << P0_X_TRST_LBN));
+
+               rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
+               if (rc)
+                       goto fail3;
+               msleep(10);
+
+               /* Turn on 1V power rail */
+               out &= ~(1 << P0_EN_1V0X_LBN);
+               rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
+               if (rc)
+                       goto fail3;
+
+               EFX_INFO(efx, "waiting for power (attempt %d)...\n", count);
+
+               schedule_timeout_uninterruptible(HZ);
+
+               /* Check DSP is powered */
+               rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1);
+               if (rc)
+                       goto fail3;
+               if (in & (1 << P1_AFE_PWD_LBN))
+                       goto done;
+
+       } while (++count < 20);
+
+       EFX_INFO(efx, "timed out waiting for power\n");
+       rc = -ETIMEDOUT;
+       goto fail3;
+
+done:
+       EFX_INFO(efx, "PHY is powered on\n");
+       return 0;
+
+fail3:
+       /* Turn off all power rails */
+       out = 0xff;
+       (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1);
+       /* Disable port 1 outputs on IO expander */
+       out = 0xff;
+       (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1);
+fail2:
+       /* Disable port 0 outputs on IO expander */
+       out = 0xff;
+       (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1);
+fail1:
+       return rc;
+}
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
new file mode 100644 (file)
index 0000000..34412f3
--- /dev/null
@@ -0,0 +1,71 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005 Fen Systems Ltd.
+ * Copyright 2006 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_SPI_H
+#define EFX_SPI_H
+
+#include "net_driver.h"
+
+/**************************************************************************
+ *
+ * Basic SPI command set and bit definitions
+ *
+ *************************************************************************/
+
+/*
+ * Commands common to all known devices.
+ *
+ */
+
+/* Write status register */
+#define SPI_WRSR 0x01
+
+/* Write data to memory array */
+#define SPI_WRITE 0x02
+
+/* Read data from memory array */
+#define SPI_READ 0x03
+
+/* Reset write enable latch */
+#define SPI_WRDI 0x04
+
+/* Read status register */
+#define SPI_RDSR 0x05
+
+/* Set write enable latch */
+#define SPI_WREN 0x06
+
+/* SST: Enable write to status register */
+#define SPI_SST_EWSR 0x50
+
+/*
+ * Status register bits.  Not all bits are supported on all devices.
+ *
+ */
+
+/* Write-protect pin enabled */
+#define SPI_STATUS_WPEN 0x80
+
+/* Block protection bit 2 */
+#define SPI_STATUS_BP2 0x10
+
+/* Block protection bit 1 */
+#define SPI_STATUS_BP1 0x08
+
+/* Block protection bit 0 */
+#define SPI_STATUS_BP0 0x04
+
+/* State of the write enable latch */
+#define SPI_STATUS_WEN 0x02
+
+/* Device busy flag */
+#define SPI_STATUS_NRDY 0x01
+
+#endif /* EFX_SPI_H */
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
new file mode 100644 (file)
index 0000000..a2e9f79
--- /dev/null
@@ -0,0 +1,434 @@
+/****************************************************************************
+ * Driver for Solarflare 802.3an compliant PHY
+ * Copyright 2007 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include "efx.h"
+#include "gmii.h"
+#include "mdio_10g.h"
+#include "falcon.h"
+#include "phy.h"
+#include "falcon_hwdefs.h"
+#include "boards.h"
+#include "mac.h"
+
+/* We expect these MMDs to be in the package */
+/* AN not here as mdio_check_mmds() requires STAT2 support */
+#define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PMAPMD | \
+                                MDIO_MMDREG_DEVS0_PCS    | \
+                                MDIO_MMDREG_DEVS0_PHYXS)
+
+/* We complain if we fail to see the link partner as 10G capable this many
+ * times in a row (must be > 1 as sampling the autoneg. registers is racy)
+ */
+#define MAX_BAD_LP_TRIES       (5)
+
+/* Extended control register */
+#define        PMA_PMD_XCONTROL_REG 0xc000
+#define        PMA_PMD_LNPGA_POWERDOWN_LBN 8
+#define        PMA_PMD_LNPGA_POWERDOWN_WIDTH 1
+
+/* extended status register */
+#define PMA_PMD_XSTATUS_REG 0xc001
+#define PMA_PMD_XSTAT_FLP_LBN   (12)
+
+/* LED control register */
+#define PMA_PMD_LED_CTRL_REG   (0xc007)
+#define PMA_PMA_LED_ACTIVITY_LBN       (3)
+
+/* LED function override register */
+#define PMA_PMD_LED_OVERR_REG  (0xc009)
+/* Bit positions for different LEDs (there are more but not wired on SFE4001)*/
+#define PMA_PMD_LED_LINK_LBN   (0)
+#define PMA_PMD_LED_SPEED_LBN  (2)
+#define PMA_PMD_LED_TX_LBN     (4)
+#define PMA_PMD_LED_RX_LBN     (6)
+/* Override settings */
+#define        PMA_PMD_LED_AUTO        (0)     /* H/W control */
+#define        PMA_PMD_LED_ON          (1)
+#define        PMA_PMD_LED_OFF         (2)
+#define PMA_PMD_LED_FLASH      (3)
+/* All LEDs under hardware control */
+#define PMA_PMD_LED_FULL_AUTO  (0)
+/* Green and Amber under hardware control, Red off */
+#define PMA_PMD_LED_DEFAULT    (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN)
+
+
+/* Self test (BIST) control register */
+#define PMA_PMD_BIST_CTRL_REG  (0xc014)
+#define PMA_PMD_BIST_BER_LBN   (2)     /* Run BER test */
+#define PMA_PMD_BIST_CONT_LBN  (1)     /* Run continuous BIST until cleared */
+#define PMA_PMD_BIST_SINGLE_LBN        (0)     /* Run 1 BIST iteration (self clears) */
+/* Self test status register */
+#define PMA_PMD_BIST_STAT_REG  (0xc015)
+#define PMA_PMD_BIST_ENX_LBN   (3)
+#define PMA_PMD_BIST_PMA_LBN   (2)
+#define PMA_PMD_BIST_RXD_LBN   (1)
+#define PMA_PMD_BIST_AFE_LBN   (0)
+
+#define BIST_MAX_DELAY (1000)
+#define BIST_POLL_DELAY        (10)
+
+/* Misc register defines */
+#define PCS_CLOCK_CTRL_REG 0xd801
+#define PLL312_RST_N_LBN 2
+
+#define PCS_SOFT_RST2_REG 0xd806
+#define SERDES_RST_N_LBN 13
+#define XGXS_RST_N_LBN 12
+
+#define        PCS_TEST_SELECT_REG 0xd807      /* PRM 10.5.8 */
+#define        CLK312_EN_LBN 3
+
+/* Boot status register */
+#define PCS_BOOT_STATUS_REG    (0xd000)
+#define PCS_BOOT_FATAL_ERR_LBN (0)
+#define PCS_BOOT_PROGRESS_LBN  (1)
+#define PCS_BOOT_PROGRESS_WIDTH        (2)
+#define PCS_BOOT_COMPLETE_LBN  (3)
+#define PCS_BOOT_MAX_DELAY     (100)
+#define PCS_BOOT_POLL_DELAY    (10)
+
+/* Time to wait between powering down the LNPGA and turning off the power
+ * rails */
+#define LNPGA_PDOWN_WAIT       (HZ / 5)
+
+static int crc_error_reset_threshold = 100;
+module_param(crc_error_reset_threshold, int, 0644);
+MODULE_PARM_DESC(crc_error_reset_threshold,
+                "Max number of CRC errors before XAUI reset");
+
+struct tenxpress_phy_data {
+       enum tenxpress_state state;
+       atomic_t bad_crc_count;
+       int bad_lp_tries;
+};
+
+static int tenxpress_state_is(struct efx_nic *efx, int state)
+{
+       struct tenxpress_phy_data *phy_data = efx->phy_data;
+       return (phy_data != NULL) && (state == phy_data->state);
+}
+
+void tenxpress_set_state(struct efx_nic *efx,
+                               enum tenxpress_state state)
+{
+       struct tenxpress_phy_data *phy_data = efx->phy_data;
+       if (phy_data != NULL)
+               phy_data->state = state;
+}
+
+void tenxpress_crc_err(struct efx_nic *efx)
+{
+       struct tenxpress_phy_data *phy_data = efx->phy_data;
+       if (phy_data != NULL)
+               atomic_inc(&phy_data->bad_crc_count);
+}
+
+/* Check that the C166 has booted successfully */
+static int tenxpress_phy_check(struct efx_nic *efx)
+{
+       int phy_id = efx->mii.phy_id;
+       int count = PCS_BOOT_MAX_DELAY / PCS_BOOT_POLL_DELAY;
+       int boot_stat;
+
+       /* Wait for the boot to complete (or not) */
+       while (count) {
+               boot_stat = mdio_clause45_read(efx, phy_id,
+                                              MDIO_MMD_PCS,
+                                              PCS_BOOT_STATUS_REG);
+               if (boot_stat & (1 << PCS_BOOT_COMPLETE_LBN))
+                       break;
+               count--;
+               udelay(PCS_BOOT_POLL_DELAY);
+       }
+
+       if (!count) {
+               EFX_ERR(efx, "%s: PHY boot timed out. Last status "
+                       "%x\n", __func__,
+                       (boot_stat >> PCS_BOOT_PROGRESS_LBN) &
+                       ((1 << PCS_BOOT_PROGRESS_WIDTH) - 1));
+               return -ETIMEDOUT;
+       }
+
+       return 0;
+}
+
+static void tenxpress_reset_xaui(struct efx_nic *efx);
+
+static int tenxpress_init(struct efx_nic *efx)
+{
+       int rc, reg;
+
+       /* Turn on the clock  */
+       reg = (1 << CLK312_EN_LBN);
+       mdio_clause45_write(efx, efx->mii.phy_id,
+                           MDIO_MMD_PCS, PCS_TEST_SELECT_REG, reg);
+
+       rc = tenxpress_phy_check(efx);
+       if (rc < 0)
+               return rc;
+
+       /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */
+       reg = mdio_clause45_read(efx, efx->mii.phy_id,
+                                MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG);
+       reg |= (1 << PMA_PMA_LED_ACTIVITY_LBN);
+       mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+                           PMA_PMD_LED_CTRL_REG, reg);
+
+       reg = PMA_PMD_LED_DEFAULT;
+       mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+                           PMA_PMD_LED_OVERR_REG, reg);
+
+       return rc;
+}
+
+static int tenxpress_phy_init(struct efx_nic *efx)
+{
+       struct tenxpress_phy_data *phy_data;
+       int rc = 0;
+
+       phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
+       efx->phy_data = phy_data;
+
+       tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL);
+
+       rc = mdio_clause45_wait_reset_mmds(efx,
+                                          TENXPRESS_REQUIRED_DEVS);
+       if (rc < 0)
+               goto fail;
+
+       rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0);
+       if (rc < 0)
+               goto fail;
+
+       rc = tenxpress_init(efx);
+       if (rc < 0)
+               goto fail;
+
+       schedule_timeout_uninterruptible(HZ / 5); /* 200ms */
+
+       /* Let XGXS and SerDes out of reset and resets 10XPress */
+       falcon_reset_xaui(efx);
+
+       return 0;
+
+ fail:
+       kfree(efx->phy_data);
+       efx->phy_data = NULL;
+       return rc;
+}
+
+static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp)
+{
+       struct tenxpress_phy_data *pd = efx->phy_data;
+       int reg;
+
+       /* Nothing to do if all is well and was previously so. */
+       if (!(bad_lp || pd->bad_lp_tries))
+               return;
+
+       reg = mdio_clause45_read(efx, efx->mii.phy_id,
+                                MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG);
+
+       if (bad_lp)
+               pd->bad_lp_tries++;
+       else
+               pd->bad_lp_tries = 0;
+
+       if (pd->bad_lp_tries == MAX_BAD_LP_TRIES) {
+               pd->bad_lp_tries = 0;   /* Restart count */
+               reg &= ~(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
+               reg |= (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN);
+               EFX_ERR(efx, "This NIC appears to be plugged into"
+                       " a port that is not 10GBASE-T capable.\n"
+                       " This PHY is 10GBASE-T ONLY, so no link can"
+                       " be established.\n");
+       } else {
+               reg |= (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN);
+       }
+       mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+                           PMA_PMD_LED_OVERR_REG, reg);
+}
+
+/* Check link status and return a boolean OK value. If the link is NOT
+ * OK we have a quick rummage round to see if we appear to be plugged
+ * into a non-10GBT port and if so warn the user that they won't get
+ * link any time soon as we are 10GBT only, unless caller specified
+ * not to do this check (it isn't useful in loopback) */
+static int tenxpress_link_ok(struct efx_nic *efx, int check_lp)
+{
+       int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS);
+
+       if (ok) {
+               tenxpress_set_bad_lp(efx, 0);
+       } else if (check_lp) {
+               /* Are we plugged into the wrong sort of link? */
+               int bad_lp = 0;
+               int phy_id = efx->mii.phy_id;
+               int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN,
+                                                MDIO_AN_STATUS);
+               int xphy_stat = mdio_clause45_read(efx, phy_id,
+                                                  MDIO_MMD_PMAPMD,
+                                                  PMA_PMD_XSTATUS_REG);
+               /* Are we plugged into anything that sends FLPs? If
+                * not we can't distinguish between not being plugged
+                * in and being plugged into a non-AN antique. The FLP
+                * bit has the advantage of not clearing when autoneg
+                * restarts. */
+               if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) {
+                       tenxpress_set_bad_lp(efx, 0);
+                       return ok;
+               }
+
+               /* If it can do 10GBT it must be XNP capable */
+               bad_lp = !(an_stat & (1 << MDIO_AN_STATUS_XNP_LBN));
+               if (!bad_lp && (an_stat & (1 << MDIO_AN_STATUS_PAGE_LBN))) {
+                       bad_lp = !(mdio_clause45_read(efx, phy_id,
+                                       MDIO_MMD_AN, MDIO_AN_10GBT_STATUS) &
+                                       (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN));
+               }
+               tenxpress_set_bad_lp(efx, bad_lp);
+       }
+       return ok;
+}
+
+static void tenxpress_phy_reconfigure(struct efx_nic *efx)
+{
+       if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL))
+               return;
+
+       efx->link_up = tenxpress_link_ok(efx, 0);
+       efx->link_options = GM_LPA_10000FULL;
+}
+
+static void tenxpress_phy_clear_interrupt(struct efx_nic *efx)
+{
+       /* Nothing done here - LASI interrupts aren't reliable so poll  */
+}
+
+
+/* Poll PHY for interrupt */
+static int tenxpress_phy_check_hw(struct efx_nic *efx)
+{
+       struct tenxpress_phy_data *phy_data = efx->phy_data;
+       int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL);
+       int link_ok;
+
+       link_ok = phy_up && tenxpress_link_ok(efx, 1);
+
+       if (link_ok != efx->link_up)
+               falcon_xmac_sim_phy_event(efx);
+
+       /* Nothing to check if we've already shut down the PHY */
+       if (!phy_up)
+               return 0;
+
+       if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) {
+               EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n");
+               falcon_reset_xaui(efx);
+               atomic_set(&phy_data->bad_crc_count, 0);
+       }
+
+       return 0;
+}
+
+static void tenxpress_phy_fini(struct efx_nic *efx)
+{
+       int reg;
+
+       /* Power down the LNPGA */
+       reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN);
+       mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+                           PMA_PMD_XCONTROL_REG, reg);
+
+       /* Waiting here ensures that the board fini, which can turn off the
+        * power to the PHY, won't get run until the LNPGA powerdown has been
+        * given long enough to complete. */
+       schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */
+
+       kfree(efx->phy_data);
+       efx->phy_data = NULL;
+}
+
+
+/* Set the RX and TX LEDs and Link LED flashing. The other LEDs
+ * (which probably aren't wired anyway) are left in AUTO mode */
+void tenxpress_phy_blink(struct efx_nic *efx, int blink)
+{
+       int reg;
+
+       if (blink)
+               reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) |
+                       (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) |
+                       (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN);
+       else
+               reg = PMA_PMD_LED_DEFAULT;
+
+       mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+                           PMA_PMD_LED_OVERR_REG, reg);
+}
+
+static void tenxpress_reset_xaui(struct efx_nic *efx)
+{
+       int phy = efx->mii.phy_id;
+       int clk_ctrl, test_select, soft_rst2;
+
+       /* Real work is done on clock_ctrl other resets are thought to be
+        * optional but make the reset more reliable
+        */
+
+       /* Read */
+       clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
+                                     PCS_CLOCK_CTRL_REG);
+       test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
+                                        PCS_TEST_SELECT_REG);
+       soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS,
+                                      PCS_SOFT_RST2_REG);
+
+       /* Put in reset */
+       test_select &= ~(1 << CLK312_EN_LBN);
+       mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
+                           PCS_TEST_SELECT_REG, test_select);
+
+       soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
+       mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
+                           PCS_SOFT_RST2_REG, soft_rst2);
+
+       clk_ctrl &= ~(1 << PLL312_RST_N_LBN);
+       mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
+                           PCS_CLOCK_CTRL_REG, clk_ctrl);
+       udelay(10);
+
+       /* Remove reset */
+       clk_ctrl |= (1 << PLL312_RST_N_LBN);
+       mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
+                           PCS_CLOCK_CTRL_REG, clk_ctrl);
+       udelay(10);
+
+       soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN));
+       mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
+                           PCS_SOFT_RST2_REG, soft_rst2);
+       udelay(10);
+
+       test_select |= (1 << CLK312_EN_LBN);
+       mdio_clause45_write(efx, phy, MDIO_MMD_PCS,
+                           PCS_TEST_SELECT_REG, test_select);
+       udelay(10);
+}
+
+struct efx_phy_operations falcon_tenxpress_phy_ops = {
+       .init             = tenxpress_phy_init,
+       .reconfigure      = tenxpress_phy_reconfigure,
+       .check_hw         = tenxpress_phy_check_hw,
+       .fini             = tenxpress_phy_fini,
+       .clear_interrupt  = tenxpress_phy_clear_interrupt,
+       .reset_xaui       = tenxpress_reset_xaui,
+       .mmds             = TENXPRESS_REQUIRED_DEVS,
+};
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
new file mode 100644 (file)
index 0000000..fbb866b
--- /dev/null
@@ -0,0 +1,452 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2005-2006 Fen Systems Ltd.
+ * Copyright 2005-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#include <linux/pci.h>
+#include <linux/tcp.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_ether.h>
+#include <linux/highmem.h>
+#include "net_driver.h"
+#include "tx.h"
+#include "efx.h"
+#include "falcon.h"
+#include "workarounds.h"
+
+/*
+ * TX descriptor ring full threshold
+ *
+ * The tx_queue descriptor ring fill-level must fall below this value
+ * before we restart the netif queue
+ */
+#define EFX_NETDEV_TX_THRESHOLD(_tx_queue)     \
+       (_tx_queue->efx->type->txd_ring_mask / 2u)
+
+/* We want to be able to nest calls to netif_stop_queue(), since each
+ * channel can have an individual stop on the queue.
+ */
+void efx_stop_queue(struct efx_nic *efx)
+{
+       spin_lock_bh(&efx->netif_stop_lock);
+       EFX_TRACE(efx, "stop TX queue\n");
+
+       atomic_inc(&efx->netif_stop_count);
+       netif_stop_queue(efx->net_dev);
+
+       spin_unlock_bh(&efx->netif_stop_lock);
+}
+
+/* Wake netif's TX queue
+ * We want to be able to nest calls to netif_stop_queue(), since each
+ * channel can have an individual stop on the queue.
+ */
+inline void efx_wake_queue(struct efx_nic *efx)
+{
+       local_bh_disable();
+       if (atomic_dec_and_lock(&efx->netif_stop_count,
+                               &efx->netif_stop_lock)) {
+               EFX_TRACE(efx, "waking TX queue\n");
+               netif_wake_queue(efx->net_dev);
+               spin_unlock(&efx->netif_stop_lock);
+       }
+       local_bh_enable();
+}
+
+static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
+                                     struct efx_tx_buffer *buffer)
+{
+       if (buffer->unmap_len) {
+               struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
+               if (buffer->unmap_single)
+                       pci_unmap_single(pci_dev, buffer->unmap_addr,
+                                        buffer->unmap_len, PCI_DMA_TODEVICE);
+               else
+                       pci_unmap_page(pci_dev, buffer->unmap_addr,
+                                      buffer->unmap_len, PCI_DMA_TODEVICE);
+               buffer->unmap_len = 0;
+               buffer->unmap_single = 0;
+       }
+
+       if (buffer->skb) {
+               dev_kfree_skb_any((struct sk_buff *) buffer->skb);
+               buffer->skb = NULL;
+               EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x "
+                         "complete\n", tx_queue->queue, read_ptr);
+       }
+}
+
+
+/*
+ * Add a socket buffer to a TX queue
+ *
+ * This maps all fragments of a socket buffer for DMA and adds them to
+ * the TX queue.  The queue's insert pointer will be incremented by
+ * the number of fragments in the socket buffer.
+ *
+ * If any DMA mapping fails, any mapped fragments will be unmapped,
+ * the queue's insert pointer will be restored to its original value.
+ *
+ * Returns NETDEV_TX_OK or NETDEV_TX_BUSY
+ * You must hold netif_tx_lock() to call this function.
+ */
+static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
+                                 const struct sk_buff *skb)
+{
+       struct efx_nic *efx = tx_queue->efx;
+       struct pci_dev *pci_dev = efx->pci_dev;
+       struct efx_tx_buffer *buffer;
+       skb_frag_t *fragment;
+       struct page *page;
+       int page_offset;
+       unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign;
+       dma_addr_t dma_addr, unmap_addr = 0;
+       unsigned int dma_len;
+       unsigned unmap_single;
+       int q_space, i = 0;
+       int rc = NETDEV_TX_OK;
+
+       EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count);
+
+       /* Get size of the initial fragment */
+       len = skb_headlen(skb);
+
+       fill_level = tx_queue->insert_count - tx_queue->old_read_count;
+       q_space = efx->type->txd_ring_mask - 1 - fill_level;
+
+       /* Map for DMA.  Use pci_map_single rather than pci_map_page
+        * since this is more efficient on machines with sparse
+        * memory.
+        */
+       unmap_single = 1;
+       dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+       /* Process all fragments */
+       while (1) {
+               if (unlikely(pci_dma_mapping_error(dma_addr)))
+                       goto pci_err;
+
+               /* Store fields for marking in the per-fragment final
+                * descriptor */
+               unmap_len = len;
+               unmap_addr = dma_addr;
+
+               /* Add to TX queue, splitting across DMA boundaries */
+               do {
+                       if (unlikely(q_space-- <= 0)) {
+                               /* It might be that completions have
+                                * happened since the xmit path last
+                                * checked.  Update the xmit path's
+                                * copy of read_count.
+                                */
+                               ++tx_queue->stopped;
+                               /* This memory barrier protects the
+                                * change of stopped from the access
+                                * of read_count. */
+                               smp_mb();
+                               tx_queue->old_read_count =
+                                       *(volatile unsigned *)
+                                       &tx_queue->read_count;
+                               fill_level = (tx_queue->insert_count
+                                             - tx_queue->old_read_count);
+                               q_space = (efx->type->txd_ring_mask - 1 -
+                                          fill_level);
+                               if (unlikely(q_space-- <= 0))
+                                       goto stop;
+                               smp_mb();
+                               --tx_queue->stopped;
+                       }
+
+                       insert_ptr = (tx_queue->insert_count &
+                                     efx->type->txd_ring_mask);
+                       buffer = &tx_queue->buffer[insert_ptr];
+                       EFX_BUG_ON_PARANOID(buffer->skb);
+                       EFX_BUG_ON_PARANOID(buffer->len);
+                       EFX_BUG_ON_PARANOID(buffer->continuation != 1);
+                       EFX_BUG_ON_PARANOID(buffer->unmap_len);
+
+                       dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1);
+                       if (likely(dma_len > len))
+                               dma_len = len;
+
+                       misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
+                       if (misalign && dma_len + misalign > 512)
+                               dma_len = 512 - misalign;
+
+                       /* Fill out per descriptor fields */
+                       buffer->len = dma_len;
+                       buffer->dma_addr = dma_addr;
+                       len -= dma_len;
+                       dma_addr += dma_len;
+                       ++tx_queue->insert_count;
+               } while (len);
+
+               /* Transfer ownership of the unmapping to the final buffer */
+               buffer->unmap_addr = unmap_addr;
+               buffer->unmap_single = unmap_single;
+               buffer->unmap_len = unmap_len;
+               unmap_len = 0;
+
+               /* Get address and size of next fragment */
+               if (i >= skb_shinfo(skb)->nr_frags)
+                       break;
+               fragment = &skb_shinfo(skb)->frags[i];
+               len = fragment->size;
+               page = fragment->page;
+               page_offset = fragment->page_offset;
+               i++;
+               /* Map for DMA */
+               unmap_single = 0;
+               dma_addr = pci_map_page(pci_dev, page, page_offset, len,
+                                       PCI_DMA_TODEVICE);
+       }
+
+       /* Transfer ownership of the skb to the final buffer */
+       buffer->skb = skb;
+       buffer->continuation = 0;
+
+       /* Pass off to hardware */
+       falcon_push_buffers(tx_queue);
+
+       return NETDEV_TX_OK;
+
+ pci_err:
+       EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d "
+                  "fragments for DMA\n", tx_queue->queue, skb->len,
+                  skb_shinfo(skb)->nr_frags + 1);
+
+       /* Mark the packet as transmitted, and free the SKB ourselves */
+       dev_kfree_skb_any((struct sk_buff *)skb);
+       goto unwind;
+
+ stop:
+       rc = NETDEV_TX_BUSY;
+
+       if (tx_queue->stopped == 1)
+               efx_stop_queue(efx);
+
+ unwind:
+       /* Work backwards until we hit the original insert pointer value */
+       while (tx_queue->insert_count != tx_queue->write_count) {
+               --tx_queue->insert_count;
+               insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask;
+               buffer = &tx_queue->buffer[insert_ptr];
+               efx_dequeue_buffer(tx_queue, buffer);
+               buffer->len = 0;
+       }
+
+       /* Free the fragment we were mid-way through pushing */
+       if (unmap_len)
+               pci_unmap_page(pci_dev, unmap_addr, unmap_len,
+                              PCI_DMA_TODEVICE);
+
+       return rc;
+}
+
+/* Remove packets from the TX queue
+ *
+ * This removes packets from the TX queue, up to and including the
+ * specified index.
+ */
+static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
+                                      unsigned int index)
+{
+       struct efx_nic *efx = tx_queue->efx;
+       unsigned int stop_index, read_ptr;
+       unsigned int mask = tx_queue->efx->type->txd_ring_mask;
+
+       stop_index = (index + 1) & mask;
+       read_ptr = tx_queue->read_count & mask;
+
+       while (read_ptr != stop_index) {
+               struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
+               if (unlikely(buffer->len == 0)) {
+                       EFX_ERR(tx_queue->efx, "TX queue %d spurious TX "
+                               "completion id %x\n", tx_queue->queue,
+                               read_ptr);
+                       efx_schedule_reset(efx, RESET_TYPE_TX_SKIP);
+                       return;
+               }
+
+               efx_dequeue_buffer(tx_queue, buffer);
+               buffer->continuation = 1;
+               buffer->len = 0;
+
+               ++tx_queue->read_count;
+               read_ptr = tx_queue->read_count & mask;
+       }
+}
+
+/* Initiate a packet transmission on the specified TX queue.
+ * Note that returning anything other than NETDEV_TX_OK will cause the
+ * OS to free the skb.
+ *
+ * This function is split out from efx_hard_start_xmit to allow the
+ * loopback test to direct packets via specific TX queues.  It is
+ * therefore a non-static inline, so as not to penalise performance
+ * for non-loopback transmissions.
+ *
+ * Context: netif_tx_lock held
+ */
+inline int efx_xmit(struct efx_nic *efx,
+                   struct efx_tx_queue *tx_queue, struct sk_buff *skb)
+{
+       int rc;
+
+       /* Map fragments for DMA and add to TX queue */
+       rc = efx_enqueue_skb(tx_queue, skb);
+       if (unlikely(rc != NETDEV_TX_OK))
+               goto out;
+
+       /* Update last TX timer */
+       efx->net_dev->trans_start = jiffies;
+
+ out:
+       return rc;
+}
+
+/* Initiate a packet transmission.  We use one channel per CPU
+ * (sharing when we have more CPUs than channels).  On Falcon, the TX
+ * completion events will be directed back to the CPU that transmitted
+ * the packet, which should be cache-efficient.
+ *
+ * Context: non-blocking.
+ * Note that returning anything other than NETDEV_TX_OK will cause the
+ * OS to free the skb.
+ */
+int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
+{
+       struct efx_nic *efx = net_dev->priv;
+       return efx_xmit(efx, &efx->tx_queue[0], skb);
+}
+
+void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
+{
+       unsigned fill_level;
+       struct efx_nic *efx = tx_queue->efx;
+
+       EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask);
+
+       efx_dequeue_buffers(tx_queue, index);
+
+       /* See if we need to restart the netif queue.  This barrier
+        * separates the update of read_count from the test of
+        * stopped. */
+       smp_mb();
+       if (unlikely(tx_queue->stopped)) {
+               fill_level = tx_queue->insert_count - tx_queue->read_count;
+               if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) {
+                       EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx));
+
+                       /* Do this under netif_tx_lock(), to avoid racing
+                        * with efx_xmit(). */
+                       netif_tx_lock(efx->net_dev);
+                       if (tx_queue->stopped) {
+                               tx_queue->stopped = 0;
+                               efx_wake_queue(efx);
+                       }
+                       netif_tx_unlock(efx->net_dev);
+               }
+       }
+}
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
+{
+       struct efx_nic *efx = tx_queue->efx;
+       unsigned int txq_size;
+       int i, rc;
+
+       EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
+
+       /* Allocate software ring */
+       txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer);
+       tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
+       if (!tx_queue->buffer) {
+               rc = -ENOMEM;
+               goto fail1;
+       }
+       for (i = 0; i <= efx->type->txd_ring_mask; ++i)
+               tx_queue->buffer[i].continuation = 1;
+
+       /* Allocate hardware ring */
+       rc = falcon_probe_tx(tx_queue);
+       if (rc)
+               goto fail2;
+
+       return 0;
+
+ fail2:
+       kfree(tx_queue->buffer);
+       tx_queue->buffer = NULL;
+ fail1:
+       tx_queue->used = 0;
+
+       return rc;
+}
+
+int efx_init_tx_queue(struct efx_tx_queue *tx_queue)
+{
+       EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue);
+
+       tx_queue->insert_count = 0;
+       tx_queue->write_count = 0;
+       tx_queue->read_count = 0;
+       tx_queue->old_read_count = 0;
+       BUG_ON(tx_queue->stopped);
+
+       /* Set up TX descriptor ring */
+       return falcon_init_tx(tx_queue);
+}
+
+void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
+{
+       struct efx_tx_buffer *buffer;
+
+       if (!tx_queue->buffer)
+               return;
+
+       /* Free any buffers left in the ring */
+       while (tx_queue->read_count != tx_queue->write_count) {
+               buffer = &tx_queue->buffer[tx_queue->read_count &
+                                          tx_queue->efx->type->txd_ring_mask];
+               efx_dequeue_buffer(tx_queue, buffer);
+               buffer->continuation = 1;
+               buffer->len = 0;
+
+               ++tx_queue->read_count;
+       }
+}
+
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
+{
+       EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue);
+
+       /* Flush TX queue, remove descriptor ring */
+       falcon_fini_tx(tx_queue);
+
+       efx_release_tx_buffers(tx_queue);
+
+       /* Release queue's stop on port, if any */
+       if (tx_queue->stopped) {
+               tx_queue->stopped = 0;
+               efx_wake_queue(tx_queue->efx);
+       }
+}
+
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
+{
+       EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue);
+       falcon_remove_tx(tx_queue);
+
+       kfree(tx_queue->buffer);
+       tx_queue->buffer = NULL;
+       tx_queue->used = 0;
+}
+
+
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h
new file mode 100644 (file)
index 0000000..1526a73
--- /dev/null
@@ -0,0 +1,24 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006 Fen Systems Ltd.
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_TX_H
+#define EFX_TX_H
+
+#include "net_driver.h"
+
+int efx_probe_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_remove_tx_queue(struct efx_tx_queue *tx_queue);
+int efx_init_tx_queue(struct efx_tx_queue *tx_queue);
+void efx_fini_tx_queue(struct efx_tx_queue *tx_queue);
+
+int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
+void efx_release_tx_buffers(struct efx_tx_queue *tx_queue);
+
+#endif /* EFX_TX_H */
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
new file mode 100644 (file)
index 0000000..dca62f1
--- /dev/null
@@ -0,0 +1,56 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_WORKAROUNDS_H
+#define EFX_WORKAROUNDS_H
+
+/*
+ * Hardware workarounds.
+ * Bug numbers are from Solarflare's Bugzilla.
+ */
+
+#define EFX_WORKAROUND_ALWAYS(efx) 1
+#define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1)
+
+/* XAUI resets if link not detected */
+#define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
+/* SNAP frames have TOBE_DISC set */
+#define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS
+/* RX PCIe double split performance issue */
+#define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS
+/* TX pkt parser problem with <= 16 byte TXes */
+#define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS
+/* XGXS and XAUI reset sequencing in SW */
+#define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS
+/* Low rate CRC errors require XAUI reset */
+#define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS
+/* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
+ * or a PCIe error (bug 11028) */
+#define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS
+/* Transmit flow control may get disabled */
+#define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS
+/* Flush events can take a very long time to appear */
+#define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS
+
+/* Spurious parity errors in TSORT buffers */
+#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
+/* iSCSI parsing errors */
+#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
+/* RX events go missing */
+#define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A
+/* RX_RESET on A1 */
+#define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A
+/* Increase filter depth to avoid RX_RESET */
+#define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A
+/* Flushes may never complete */
+#define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A
+/* Leak overlength packets rather than free */
+#define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A
+
+#endif /* EFX_WORKAROUNDS_H */
diff --git a/drivers/net/sfc/xenpack.h b/drivers/net/sfc/xenpack.h
new file mode 100644 (file)
index 0000000..b0d1f22
--- /dev/null
@@ -0,0 +1,62 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef EFX_XENPACK_H
+#define EFX_XENPACK_H
+
+/* Exported functions from Xenpack standard PHY control */
+
+#include "mdio_10g.h"
+
+/****************************************************************************/
+/* XENPACK MDIO register extensions */
+#define MDIO_XP_LASI_RX_CTRL   (0x9000)
+#define MDIO_XP_LASI_TX_CTRL   (0x9001)
+#define MDIO_XP_LASI_CTRL      (0x9002)
+#define MDIO_XP_LASI_RX_STAT   (0x9003)
+#define MDIO_XP_LASI_TX_STAT   (0x9004)
+#define MDIO_XP_LASI_STAT      (0x9005)
+
+/* Control/Status bits */
+#define XP_LASI_LS_ALARM       (1 << 0)
+#define XP_LASI_TX_ALARM       (1 << 1)
+#define XP_LASI_RX_ALARM       (1 << 2)
+/* These two are Quake vendor extensions to the standard XENPACK defines */
+#define XP_LASI_LS_INTB                (1 << 3)
+#define XP_LASI_TEST           (1 << 7)
+
+/* Enable LASI interrupts for PHY */
+static inline void xenpack_enable_lasi_irqs(struct efx_nic *efx)
+{
+       int reg;
+       int phy_id = efx->mii.phy_id;
+       /* Read to clear LASI status register */
+       reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD,
+                                MDIO_XP_LASI_STAT);
+
+       mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD,
+                           MDIO_XP_LASI_CTRL, XP_LASI_LS_ALARM);
+}
+
+/* Read the LASI interrupt status to clear the interrupt. */
+static inline int xenpack_clear_lasi_irqs(struct efx_nic *efx)
+{
+       /* Read to clear link status alarm */
+       return mdio_clause45_read(efx, efx->mii.phy_id,
+                                 MDIO_MMD_PMAPMD, MDIO_XP_LASI_STAT);
+}
+
+/* Turn off LASI interrupts */
+static inline void xenpack_disable_lasi_irqs(struct efx_nic *efx)
+{
+       mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD,
+                           MDIO_XP_LASI_CTRL, 0);
+}
+
+#endif /* EFX_XENPACK_H */
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c
new file mode 100644 (file)
index 0000000..66dd5bf
--- /dev/null
@@ -0,0 +1,132 @@
+/****************************************************************************
+ * Driver for Solarflare Solarstorm network controllers and boards
+ * Copyright 2006-2008 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+/*
+ * Driver for XFP optical PHYs (plus some support specific to the Quake 2032)
+ * See www.amcc.com for details (search for qt2032)
+ */
+
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include "efx.h"
+#include "gmii.h"
+#include "mdio_10g.h"
+#include "xenpack.h"
+#include "phy.h"
+#include "mac.h"
+
+#define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS |     \
+                          MDIO_MMDREG_DEVS0_PMAPMD |   \
+                          MDIO_MMDREG_DEVS0_PHYXS)
+
+/****************************************************************************/
+/* Quake-specific MDIO registers */
+#define MDIO_QUAKE_LED0_REG    (0xD006)
+
+void xfp_set_led(struct efx_nic *p, int led, int mode)
+{
+       int addr = MDIO_QUAKE_LED0_REG + led;
+       mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr,
+                           mode);
+}
+
+#define XFP_MAX_RESET_TIME 500
+#define XFP_RESET_WAIT 10
+
+/* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing
+ * a complete soft reset.
+ */
+static int xfp_reset_phy(struct efx_nic *efx)
+{
+       int rc;
+
+       rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS,
+                                    XFP_MAX_RESET_TIME / XFP_RESET_WAIT,
+                                    XFP_RESET_WAIT);
+       if (rc < 0)
+               goto fail;
+
+       /* Wait 250ms for the PHY to complete bootup */
+       msleep(250);
+
+       /* Check that all the MMDs we expect are present and responding. We
+        * expect faults on some if the link is down, but not on the PHY XS */
+       rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS,
+                                     MDIO_MMDREG_DEVS0_PHYXS);
+       if (rc < 0)
+               goto fail;
+
+       efx->board_info.init_leds(efx);
+
+       return rc;
+
+ fail:
+       EFX_ERR(efx, "XFP: reset timed out!\n");
+       return rc;
+}
+
+static int xfp_phy_init(struct efx_nic *efx)
+{
+       u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS);
+       int rc;
+
+       EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision"
+                " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
+                MDIO_ID_REV(devid));
+
+       rc = xfp_reset_phy(efx);
+
+       EFX_INFO(efx, "XFP: PHY init %s.\n",
+                rc ? "failed" : "successful");
+
+       return rc;
+}
+
+static void xfp_phy_clear_interrupt(struct efx_nic *efx)
+{
+       xenpack_clear_lasi_irqs(efx);
+}
+
+static int xfp_link_ok(struct efx_nic *efx)
+{
+       return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS);
+}
+
+static int xfp_phy_check_hw(struct efx_nic *efx)
+{
+       int rc = 0;
+       int link_up = xfp_link_ok(efx);
+       /* Simulate a PHY event if link state has changed */
+       if (link_up != efx->link_up)
+               falcon_xmac_sim_phy_event(efx);
+
+       return rc;
+}
+
+static void xfp_phy_reconfigure(struct efx_nic *efx)
+{
+       efx->link_up = xfp_link_ok(efx);
+       efx->link_options = GM_LPA_10000FULL;
+}
+
+
+static void xfp_phy_fini(struct efx_nic *efx)
+{
+       /* Clobber the LED if it was blinking */
+       efx->board_info.blink(efx, 0);
+}
+
+struct efx_phy_operations falcon_xfp_phy_ops = {
+       .init            = xfp_phy_init,
+       .reconfigure     = xfp_phy_reconfigure,
+       .check_hw        = xfp_phy_check_hw,
+       .fini            = xfp_phy_fini,
+       .clear_interrupt = xfp_phy_clear_interrupt,
+       .reset_xaui      = efx_port_dummy_op_void,
+       .mmds            = XFP_REQUIRED_DEVS,
+};
index 20745fd4e9738e92050db6d70d9658a2b4e78c1a..abc63b0663be9ec83854da1a5b79feeefe6d4e66 100644 (file)
@@ -212,6 +212,12 @@ enum _DescStatusBit {
        THOL2           = 0x20000000,
        THOL1           = 0x10000000,
        THOL0           = 0x00000000,
+
+       WND             = 0x00080000,
+       TABRT           = 0x00040000,
+       FIFO            = 0x00020000,
+       LINK            = 0x00010000,
+       ColCountMask    = 0x0000ffff,
        /* RxDesc.status */
        IPON            = 0x20000000,
        TCPON           = 0x10000000,
@@ -480,30 +486,23 @@ static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
        desc->status = 0x0;
 }
 
-static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
-                              struct RxDesc *desc, u32 rx_buf_sz)
+static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
+                                          struct RxDesc *desc)
 {
+       u32 rx_buf_sz = tp->rx_buf_sz;
        struct sk_buff *skb;
-       dma_addr_t mapping;
-       int ret = 0;
-
-       skb = dev_alloc_skb(rx_buf_sz);
-       if (!skb)
-               goto err_out;
-
-       *sk_buff = skb;
 
-       mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
-                                PCI_DMA_FROMDEVICE);
+       skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
+       if (likely(skb)) {
+               dma_addr_t mapping;
 
-       sis190_map_to_asic(desc, mapping, rx_buf_sz);
-out:
-       return ret;
+               mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
+                                        PCI_DMA_FROMDEVICE);
+               sis190_map_to_asic(desc, mapping, rx_buf_sz);
+       } else
+               sis190_make_unusable_by_asic(desc);
 
-err_out:
-       ret = -ENOMEM;
-       sis190_make_unusable_by_asic(desc);
-       goto out;
+       return skb;
 }
 
 static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
@@ -512,37 +511,41 @@ static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
        u32 cur;
 
        for (cur = start; cur < end; cur++) {
-               int ret, i = cur % NUM_RX_DESC;
+               unsigned int i = cur % NUM_RX_DESC;
 
                if (tp->Rx_skbuff[i])
                        continue;
 
-               ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
-                                         tp->RxDescRing + i, tp->rx_buf_sz);
-               if (ret < 0)
+               tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i);
+
+               if (!tp->Rx_skbuff[i])
                        break;
        }
        return cur - start;
 }
 
-static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
-                                    struct RxDesc *desc, int rx_buf_sz)
+static bool sis190_try_rx_copy(struct sis190_private *tp,
+                              struct sk_buff **sk_buff, int pkt_size,
+                              dma_addr_t addr)
 {
-       int ret = -1;
+       struct sk_buff *skb;
+       bool done = false;
 
-       if (pkt_size < rx_copybreak) {
-               struct sk_buff *skb;
+       if (pkt_size >= rx_copybreak)
+               goto out;
 
-               skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
-               if (skb) {
-                       skb_reserve(skb, NET_IP_ALIGN);
-                       skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
-                       *sk_buff = skb;
-                       sis190_give_to_asic(desc, rx_buf_sz);
-                       ret = 0;
-               }
-       }
-       return ret;
+       skb = netdev_alloc_skb(tp->dev, pkt_size + 2);
+       if (!skb)
+               goto out;
+
+       pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size,
+                                      PCI_DMA_FROMDEVICE);
+       skb_reserve(skb, 2);
+       skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
+       *sk_buff = skb;
+       done = true;
+out:
+       return done;
 }
 
 static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
@@ -592,9 +595,9 @@ static int sis190_rx_interrupt(struct net_device *dev,
                        sis190_give_to_asic(desc, tp->rx_buf_sz);
                else {
                        struct sk_buff *skb = tp->Rx_skbuff[entry];
+                       dma_addr_t addr = le32_to_cpu(desc->addr);
                        int pkt_size = (status & RxSizeMask) - 4;
-                       void (*pci_action)(struct pci_dev *, dma_addr_t,
-                               size_t, int) = pci_dma_sync_single_for_device;
+                       struct pci_dev *pdev = tp->pci_dev;
 
                        if (unlikely(pkt_size > tp->rx_buf_sz)) {
                                net_intr(tp, KERN_INFO
@@ -606,20 +609,18 @@ static int sis190_rx_interrupt(struct net_device *dev,
                                continue;
                        }
 
-                       pci_dma_sync_single_for_cpu(tp->pci_dev,
-                               le32_to_cpu(desc->addr), tp->rx_buf_sz,
-                               PCI_DMA_FROMDEVICE);
 
-                       if (sis190_try_rx_copy(&skb, pkt_size, desc,
-                                              tp->rx_buf_sz)) {
-                               pci_action = pci_unmap_single;
+                       if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) {
+                               pci_dma_sync_single_for_device(pdev, addr,
+                                       tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+                               sis190_give_to_asic(desc, tp->rx_buf_sz);
+                       } else {
+                               pci_unmap_single(pdev, addr, tp->rx_buf_sz,
+                                                PCI_DMA_FROMDEVICE);
                                tp->Rx_skbuff[entry] = NULL;
                                sis190_make_unusable_by_asic(desc);
                        }
 
-                       pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
-                                  tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
 
@@ -658,9 +659,31 @@ static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
        memset(desc, 0x00, sizeof(*desc));
 }
 
+static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats)
+{
+#define TxErrMask      (WND | TABRT | FIFO | LINK)
+
+       if (!unlikely(status & TxErrMask))
+               return 0;
+
+       if (status & WND)
+               stats->tx_window_errors++;
+       if (status & TABRT)
+               stats->tx_aborted_errors++;
+       if (status & FIFO)
+               stats->tx_fifo_errors++;
+       if (status & LINK)
+               stats->tx_carrier_errors++;
+
+       stats->tx_errors++;
+
+       return -1;
+}
+
 static void sis190_tx_interrupt(struct net_device *dev,
                                struct sis190_private *tp, void __iomem *ioaddr)
 {
+       struct net_device_stats *stats = &dev->stats;
        u32 pending, dirty_tx = tp->dirty_tx;
        /*
         * It would not be needed if queueing was allowed to be enabled
@@ -675,15 +698,19 @@ static void sis190_tx_interrupt(struct net_device *dev,
        for (; pending; pending--, dirty_tx++) {
                unsigned int entry = dirty_tx % NUM_TX_DESC;
                struct TxDesc *txd = tp->TxDescRing + entry;
+               u32 status = le32_to_cpu(txd->status);
                struct sk_buff *skb;
 
-               if (le32_to_cpu(txd->status) & OWNbit)
+               if (status & OWNbit)
                        break;
 
                skb = tp->Tx_skbuff[entry];
 
-               dev->stats.tx_packets++;
-               dev->stats.tx_bytes += skb->len;
+               if (likely(sis190_tx_pkt_err(status, stats) == 0)) {
+                       stats->tx_packets++;
+                       stats->tx_bytes += skb->len;
+                       stats->collisions += ((status & ColCountMask) - 1);
+               }
 
                sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
                tp->Tx_skbuff[entry] = NULL;
@@ -904,10 +931,9 @@ static void sis190_phy_task(struct work_struct *work)
                mod_timer(&tp->timer, jiffies + HZ/10);
        } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
                     BMSR_ANEGCOMPLETE)) {
-               net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
-                        dev->name);
                netif_carrier_off(dev);
-               mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
+               net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
+                        dev->name);
                mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
        } else {
                /* Rejoice ! */
index 5a55ede352f4bc79dbceb5ee73bc2a6c0afb59d6..84af68fdb6c2b08ca53bc2357a5db17959825cee 100644 (file)
@@ -396,14 +396,14 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
 
        /* Order of next two lines is *very* important.
         * When we are sending a little amount of data,
-        * the transfer may be completed inside driver.write()
+        * the transfer may be completed inside the ops->write()
         * routine, because it's running with interrupts enabled.
         * In this case we *never* got WRITE_WAKEUP event,
         * if we did not request it before write operation.
         *       14 Oct 1994  Dmitry Gorodchanin.
         */
        sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
-       actual = sl->tty->driver->write(sl->tty, sl->xbuff, count);
+       actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
 #ifdef SL_CHECK_TRANSMIT
        sl->dev->trans_start = jiffies;
 #endif
@@ -437,7 +437,7 @@ static void slip_write_wakeup(struct tty_struct *tty)
                return;
        }
 
-       actual = tty->driver->write(tty, sl->xhead, sl->xleft);
+       actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
 }
@@ -462,7 +462,7 @@ static void sl_tx_timeout(struct net_device *dev)
                }
                printk(KERN_WARNING "%s: transmit timed out, %s?\n",
                        dev->name,
-                       (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+                       (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
                                "bad line quality" : "driver error");
                sl->xleft = 0;
                sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
@@ -830,6 +830,9 @@ static int slip_open(struct tty_struct *tty)
        if (!capable(CAP_NET_ADMIN))
                return -EPERM;
 
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
+
        /* RTnetlink lock is misused here to serialize concurrent
           opens of slip channels. There are better ways, but it is
           the simplest one.
@@ -1432,7 +1435,7 @@ static void sl_outfill(unsigned long sls)
                        /* put END into tty queue. Is it right ??? */
                        if (!netif_queue_stopped(sl->dev)) {
                                /* if device busy no outfill */
-                               sl->tty->driver->write(sl->tty, &s, 1);
+                               sl->tty->ops->write(sl->tty, &s, 1);
                        }
                } else
                        set_bit(SLF_OUTWAIT, &sl->flags);
index e83b166aa6b9a53aeeb00a70224d10e46660f638..432e837a17607e323021da0d98dccdc310b1705e 100644 (file)
@@ -649,7 +649,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
                DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
        }
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!capable(CAP_SYS_RAWIO))
                return -EPERM;
 
        switch (data[0]) {
index e3f74c9f78bd848f1e61aa6647f7109e7282fd7c..b66c75e3b8a1f6428b4d9f0409e6f611421bb923 100644 (file)
@@ -4361,7 +4361,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
        }
 
        segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
-       if (unlikely(IS_ERR(segs)))
+       if (IS_ERR(segs))
                goto tg3_tso_bug_end;
 
        do {
index 6c6fc325c8f96856a7a511a17b163bce3b54d5ab..bc30c6e8fea2c69779746c67b250dc767918453f 100644 (file)
 static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n";
 
 #define c_char const char
-#define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((__le16 *)(a)))
 
 /*
 ** MII Information
@@ -4405,7 +4404,7 @@ srom_infoleaf_info(struct net_device *dev)
        }
     }
 
-    lp->infoleaf_offset = TWIDDLE(p+1);
+       lp->infoleaf_offset = get_unaligned_le16(p + 1);
 
     return 0;
 }
@@ -4476,7 +4475,7 @@ srom_exec(struct net_device *dev, u_char *p)
 
     while (count--) {
        gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
-                                                  *p++ : TWIDDLE(w++)), dev);
+                                                  *p++ : get_unaligned_le16(w++)), dev);
        mdelay(2);                          /* 2ms per action */
     }
 
@@ -4711,10 +4710,10 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p)
        lp->active = *p++;
        lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1);
        lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1);
-       lp->phy[lp->active].mc  = TWIDDLE(p); p += 2;
-       lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
-       lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
-       lp->phy[lp->active].ttm = TWIDDLE(p);
+       lp->phy[lp->active].mc  = get_unaligned_le16(p); p += 2;
+       lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
+       lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
+       lp->phy[lp->active].ttm = get_unaligned_le16(p);
        return 0;
     } else if ((lp->media == INIT) && (lp->timeout < 0)) {
         lp->ibn = 1;
@@ -4751,16 +4750,16 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p)
        lp->infoblock_media = (*p) & MEDIA_CODE;
 
         if ((*p++) & EXT_FIELD) {
-           lp->cache.csr13 = TWIDDLE(p); p += 2;
-           lp->cache.csr14 = TWIDDLE(p); p += 2;
-           lp->cache.csr15 = TWIDDLE(p); p += 2;
+           lp->cache.csr13 = get_unaligned_le16(p); p += 2;
+           lp->cache.csr14 = get_unaligned_le16(p); p += 2;
+           lp->cache.csr15 = get_unaligned_le16(p); p += 2;
        } else {
            lp->cache.csr13 = CSR13;
            lp->cache.csr14 = CSR14;
            lp->cache.csr15 = CSR15;
        }
-        lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
-        lp->cache.gep  = ((s32)(TWIDDLE(p)) << 16);
+        lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
+        lp->cache.gep  = ((s32)(get_unaligned_le16(p)) << 16);
        lp->infoblock_csr6 = OMR_SIA;
        lp->useMII = false;
 
@@ -4792,10 +4791,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
        if (MOTO_SROM_BUG) lp->active = 0;
        lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1);
        lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1);
-       lp->phy[lp->active].mc  = TWIDDLE(p); p += 2;
-       lp->phy[lp->active].ana = TWIDDLE(p); p += 2;
-       lp->phy[lp->active].fdx = TWIDDLE(p); p += 2;
-       lp->phy[lp->active].ttm = TWIDDLE(p); p += 2;
+       lp->phy[lp->active].mc  = get_unaligned_le16(p); p += 2;
+       lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2;
+       lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2;
+       lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2;
        lp->phy[lp->active].mci = *p;
        return 0;
     } else if ((lp->media == INIT) && (lp->timeout < 0)) {
@@ -4835,8 +4834,8 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p)
         lp->cache.csr13 = CSR13;              /* Hard coded defaults */
        lp->cache.csr14 = CSR14;
        lp->cache.csr15 = CSR15;
-        lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
-        lp->cache.gep  = ((s32)(TWIDDLE(p)) << 16); p += 2;
+        lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
+        lp->cache.gep  = ((s32)(get_unaligned_le16(p)) << 16); p += 2;
        csr6 = *p++;
        flags = *p++;
 
index 9fb8d7f079943c07c9bca5f982a7cc26ea2c4663..f5f33b3eb067ad98a4105c41b5a7adea6739ded4 100644 (file)
@@ -1017,4 +1017,4 @@ struct de4x5_ioctl {
 #define DE4X5_SET_OMR           0x0d /* Set the OMR Register contents */
 #define DE4X5_GET_REG           0x0e /* Get the DE4X5 Registers */
 
-#define MOTO_SROM_BUG    ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((__le32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008))
+#define MOTO_SROM_BUG    (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008)
index 908422f2f320369e5523ae015cb3dac280fa8782..92c68a22f16b50ab66d59a4fc7ac4e0fce3a1c2b 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/pci.h>
 #include <asm/io.h>
 #include <asm/irq.h>
+#include <asm/unaligned.h>
 
 
 
@@ -304,11 +305,7 @@ enum t21143_csr6_bits {
 
 #define RUN_AT(x) (jiffies + (x))
 
-#if defined(__i386__)                  /* AKA get_unaligned() */
-#define get_u16(ptr) (*(u16 *)(ptr))
-#else
-#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
-#endif
+#define get_u16(ptr) get_unaligned_le16((ptr))
 
 struct medialeaf {
        u8 type;
index fa1c1c329a2d890a19d1b225a7dac503ac5e740e..f9d13fa05d64dc010fcce435b237db7360df60af 100644 (file)
@@ -327,8 +327,8 @@ static void tulip_up(struct net_device *dev)
        tp->dirty_rx = tp->dirty_tx = 0;
 
        if (tp->flags & MC_HASH_ONLY) {
-               u32 addr_low = le32_to_cpu(get_unaligned((__le32 *)dev->dev_addr));
-               u32 addr_high = le16_to_cpu(get_unaligned((__le16 *)(dev->dev_addr+4)));
+               u32 addr_low = get_unaligned_le32(dev->dev_addr);
+               u32 addr_high = get_unaligned_le16(dev->dev_addr + 4);
                if (tp->chip_id == AX88140) {
                        iowrite32(0, ioaddr + CSR13);
                        iowrite32(addr_low,  ioaddr + CSR14);
@@ -1437,13 +1437,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
                        do
                                value = ioread32(ioaddr + CSR9);
                        while (value < 0  && --boguscnt > 0);
-                       put_unaligned(cpu_to_le16(value), ((__le16*)dev->dev_addr) + i);
+                       put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
                        sum += value & 0xffff;
                }
        } else if (chip_idx == COMET) {
                /* No need to read the EEPROM. */
-               put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (__le32 *)dev->dev_addr);
-               put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (__le16 *)(dev->dev_addr + 4));
+               put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
+               put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
                for (i = 0; i < 6; i ++)
                        sum += dev->dev_addr[i];
        } else {
index 63abfd72542d118c7a8b93f25cbd5ed3098337ca..e03eef2f2282088c3366356e80979ad52d538661 100644 (file)
@@ -178,6 +178,20 @@ static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal)
        CPC_TTY_UNLOCK(card,flags); 
 }
 
+
+static const struct tty_operations pc300_ops = {
+       .open = cpc_tty_open,
+       .close = cpc_tty_close,
+       .write = cpc_tty_write,
+       .write_room = cpc_tty_write_room,
+       .chars_in_buffer = cpc_tty_chars_in_buffer,
+       .tiocmset = pc300_tiocmset,
+       .tiocmget = pc300_tiocmget,
+       .flush_buffer = cpc_tty_flush_buffer,
+       .hangup = cpc_tty_hangup,
+};
+
+
 /*
  * PC300 TTY initialization routine
  *
@@ -225,15 +239,7 @@ void cpc_tty_init(pc300dev_t *pc300dev)
                serial_drv.flags = TTY_DRIVER_REAL_RAW;
 
                /* interface routines from the upper tty layer to the tty driver */
-               serial_drv.open = cpc_tty_open;
-               serial_drv.close = cpc_tty_close;
-               serial_drv.write = cpc_tty_write; 
-               serial_drv.write_room = cpc_tty_write_room; 
-               serial_drv.chars_in_buffer = cpc_tty_chars_in_buffer; 
-               serial_drv.tiocmset = pc300_tiocmset;
-               serial_drv.tiocmget = pc300_tiocmget;
-               serial_drv.flush_buffer = cpc_tty_flush_buffer; 
-               serial_drv.hangup = cpc_tty_hangup;
+               tty_set_operations(&serial_drv, &pc300_ops);
 
                /* register the TTY driver */
                if (tty_register_driver(&serial_drv)) { 
index 0f8aca8a4d4331b484226f749f6f33012c1a9cf3..249e18053d5fa4a2d6e29a60e33878bbe1f78c90 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/module.h>
 
 #include <asm/system.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <linux/bitops.h>
 #include <linux/string.h>
 #include <linux/mm.h>
@@ -95,7 +95,7 @@ static struct x25_asy *x25_asy_alloc(void)
                        x25_asy_devs[i] = dev;
                        return sl;
                } else {
-                       printk("x25_asy_alloc() - register_netdev() failure.\n");
+                       printk(KERN_WARNING "x25_asy_alloc() - register_netdev() failure.\n");
                        free_netdev(dev);
                }
        }
@@ -112,23 +112,22 @@ static void x25_asy_free(struct x25_asy *sl)
        kfree(sl->xbuff);
        sl->xbuff = NULL;
 
-       if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) {
-               printk("%s: x25_asy_free for already free unit.\n", sl->dev->name);
-       }
+       if (!test_and_clear_bit(SLF_INUSE, &sl->flags))
+               printk(KERN_ERR "%s: x25_asy_free for already free unit.\n",
+                       sl->dev->name);
 }
 
 static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
 {
        struct x25_asy *sl = dev->priv;
        unsigned char *xbuff, *rbuff;
-       int len = 2* newmtu;
+       int len = 2 * newmtu;
 
        xbuff = kmalloc(len + 4, GFP_ATOMIC);
        rbuff = kmalloc(len + 4, GFP_ATOMIC);
 
-       if (xbuff == NULL || rbuff == NULL)  
-       {
-               printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n",
+       if (xbuff == NULL || rbuff == NULL) {
+               printk(KERN_WARNING "%s: unable to grow X.25 buffers, MTU change cancelled.\n",
                       dev->name);
                kfree(xbuff);
                kfree(rbuff);
@@ -193,25 +192,23 @@ static void x25_asy_bump(struct x25_asy *sl)
        int err;
 
        count = sl->rcount;
-       sl->stats.rx_bytes+=count;
-       
+       sl->stats.rx_bytes += count;
+
        skb = dev_alloc_skb(count+1);
-       if (skb == NULL)  
-       {
-               printk("%s: memory squeeze, dropping packet.\n", sl->dev->name);
+       if (skb == NULL) {
+               printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n",
+                       sl->dev->name);
                sl->stats.rx_dropped++;
                return;
        }
-       skb_push(skb,1);        /* LAPB internal control */
-       memcpy(skb_put(skb,count), sl->rbuff, count);
+       skb_push(skb, 1);       /* LAPB internal control */
+       memcpy(skb_put(skb, count), sl->rbuff, count);
        skb->protocol = x25_type_trans(skb, sl->dev);
-       if((err=lapb_data_received(skb->dev, skb))!=LAPB_OK)
-       {
+       err = lapb_data_received(skb->dev, skb);
+       if (err != LAPB_OK) {
                kfree_skb(skb);
-               printk(KERN_DEBUG "x25_asy: data received err - %d\n",err);
-       }
-       else
-       {
+               printk(KERN_DEBUG "x25_asy: data received err - %d\n", err);
+       } else {
                netif_rx(skb);
                sl->dev->last_rx = jiffies;
                sl->stats.rx_packets++;
@@ -224,10 +221,11 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
        unsigned char *p;
        int actual, count, mtu = sl->dev->mtu;
 
-       if (len > mtu) 
-       {               /* Sigh, shouldn't occur BUT ... */
+       if (len > mtu) {
+               /* Sigh, shouldn't occur BUT ... */
                len = mtu;
-               printk ("%s: truncating oversized transmit packet!\n", sl->dev->name);
+               printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n",
+                                       sl->dev->name);
                sl->stats.tx_dropped++;
                x25_asy_unlock(sl);
                return;
@@ -245,7 +243,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len)
         *       14 Oct 1994  Dmitry Gorodchanin.
         */
        sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
-       actual = sl->tty->driver->write(sl->tty, sl->xbuff, count);
+       actual = sl->tty->ops->write(sl->tty, sl->xbuff, count);
        sl->xleft = count - actual;
        sl->xhead = sl->xbuff + actual;
        /* VSV */
@@ -265,8 +263,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
        if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
                return;
 
-       if (sl->xleft <= 0)  
-       {
+       if (sl->xleft <= 0) {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->stats.tx_packets++;
@@ -275,14 +272,14 @@ static void x25_asy_write_wakeup(struct tty_struct *tty)
                return;
        }
 
-       actual = tty->driver->write(tty, sl->xhead, sl->xleft);
+       actual = tty->ops->write(tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
 }
 
 static void x25_asy_timeout(struct net_device *dev)
 {
-       struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+       struct x25_asy *sl = dev->priv;
 
        spin_lock(&sl->lock);
        if (netif_queue_stopped(dev)) {
@@ -290,7 +287,7 @@ static void x25_asy_timeout(struct net_device *dev)
                 *      14 Oct 1994 Dmitry Gorodchanin.
                 */
                printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
-                      (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ?
+                      (tty_chars_in_buffer(sl->tty) || sl->xleft) ?
                       "bad line quality" : "driver error");
                sl->xleft = 0;
                sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
@@ -303,31 +300,34 @@ static void x25_asy_timeout(struct net_device *dev)
 
 static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+       struct x25_asy *sl = dev->priv;
        int err;
 
        if (!netif_running(sl->dev)) {
-               printk("%s: xmit call when iface is down\n", dev->name);
+               printk(KERN_ERR "%s: xmit call when iface is down\n",
+                       dev->name);
                kfree_skb(skb);
                return 0;
        }
-       
-       switch(skb->data[0])
-       {
-               case 0x00:break;
-               case 0x01: /* Connection request .. do nothing */
-                       if((err=lapb_connect_request(dev))!=LAPB_OK)
-                               printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
-                       kfree_skb(skb);
-                       return 0;
-               case 0x02: /* Disconnect request .. do nothing - hang up ?? */
-                       if((err=lapb_disconnect_request(dev))!=LAPB_OK)
-                               printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
-               default:
-                       kfree_skb(skb);
-                       return  0;
+
+       switch (skb->data[0]) {
+       case 0x00:
+               break;
+       case 0x01: /* Connection request .. do nothing */
+               err = lapb_connect_request(dev);
+               if (err != LAPB_OK)
+                       printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err);
+               kfree_skb(skb);
+               return 0;
+       case 0x02: /* Disconnect request .. do nothing - hang up ?? */
+               err = lapb_disconnect_request(dev);
+               if (err != LAPB_OK)
+                       printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err);
+       default:
+               kfree_skb(skb);
+               return  0;
        }
-       skb_pull(skb,1);        /* Remove control byte */
+       skb_pull(skb, 1);       /* Remove control byte */
        /*
         * If we are busy already- too bad.  We ought to be able
         * to queue things at this point, to allow for a little
@@ -338,10 +338,10 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
         * So, no queues !
         *        14 Oct 1994  Dmitry Gorodchanin.
         */
-       
-       if((err=lapb_data_request(dev,skb))!=LAPB_OK)
-       {
-               printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err);
+
+       err = lapb_data_request(dev, skb);
+       if (err != LAPB_OK) {
+               printk(KERN_ERR "x25_asy: lapb_data_request error - %d\n", err);
                kfree_skb(skb);
                return 0;
        }
@@ -357,7 +357,7 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev)
  *     Called when I frame data arrives. We did the work above - throw it
  *     at the net layer.
  */
-  
+
 static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
 {
        skb->dev->last_rx = jiffies;
@@ -369,24 +369,22 @@ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb)
  *     busy cases too well. Its tricky to see how to do this nicely -
  *     perhaps lapb should allow us to bounce this ?
  */
+
 static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
 {
-       struct x25_asy *sl=dev->priv;
-       
+       struct x25_asy *sl = dev->priv;
+
        spin_lock(&sl->lock);
-       if (netif_queue_stopped(sl->dev) || sl->tty == NULL)
-       {
+       if (netif_queue_stopped(sl->dev) || sl->tty == NULL) {
                spin_unlock(&sl->lock);
                printk(KERN_ERR "x25_asy: tbusy drop\n");
                kfree_skb(skb);
                return;
        }
        /* We were not busy, so we are now... :-) */
-       if (skb != NULL) 
-       {
+       if (skb != NULL) {
                x25_asy_lock(sl);
-               sl->stats.tx_bytes+=skb->len;
+               sl->stats.tx_bytes += skb->len;
                x25_asy_encaps(sl, skb->data, skb->len);
                dev_kfree_skb(skb);
        }
@@ -396,15 +394,16 @@ static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb)
 /*
  *     LAPB connection establish/down information.
  */
+
 static void x25_asy_connected(struct net_device *dev, int reason)
 {
        struct x25_asy *sl = dev->priv;
        struct sk_buff *skb;
        unsigned char *ptr;
 
-       if ((skb = dev_alloc_skb(1)) == NULL) {
-               printk(KERN_ERR "lapbeth: out of memory\n");
+       skb = dev_alloc_skb(1);
+       if (skb == NULL) {
+               printk(KERN_ERR "x25_asy: out of memory\n");
                return;
        }
 
@@ -422,7 +421,8 @@ static void x25_asy_disconnected(struct net_device *dev, int reason)
        struct sk_buff *skb;
        unsigned char *ptr;
 
-       if ((skb = dev_alloc_skb(1)) == NULL) {
+       skb = dev_alloc_skb(1);
+       if (skb == NULL) {
                printk(KERN_ERR "x25_asy: out of memory\n");
                return;
        }
@@ -449,7 +449,7 @@ static struct lapb_register_struct x25_asy_callbacks = {
 /* Open the low-level part of the X.25 channel. Easy! */
 static int x25_asy_open(struct net_device *dev)
 {
-       struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+       struct x25_asy *sl = dev->priv;
        unsigned long len;
        int err;
 
@@ -466,13 +466,11 @@ static int x25_asy_open(struct net_device *dev)
        len = dev->mtu * 2;
 
        sl->rbuff = kmalloc(len + 4, GFP_KERNEL);
-       if (sl->rbuff == NULL)   {
+       if (sl->rbuff == NULL)
                goto norbuff;
-       }
        sl->xbuff = kmalloc(len + 4, GFP_KERNEL);
-       if (sl->xbuff == NULL)   {
+       if (sl->xbuff == NULL)
                goto noxbuff;
-       }
 
        sl->buffsize = len;
        sl->rcount   = 0;
@@ -480,11 +478,12 @@ static int x25_asy_open(struct net_device *dev)
        sl->flags   &= (1 << SLF_INUSE);      /* Clear ESCAPE & ERROR flags */
 
        netif_start_queue(dev);
-                       
+
        /*
         *      Now attach LAPB
         */
-       if((err=lapb_register(dev, &x25_asy_callbacks))==LAPB_OK)
+       err = lapb_register(dev, &x25_asy_callbacks);
+       if (err == LAPB_OK)
                return 0;
 
        /* Cleanup */
@@ -499,18 +498,20 @@ norbuff:
 /* Close the low-level part of the X.25 channel. Easy! */
 static int x25_asy_close(struct net_device *dev)
 {
-       struct x25_asy *sl = (struct x25_asy*)(dev->priv);
+       struct x25_asy *sl = dev->priv;
        int err;
 
        spin_lock(&sl->lock);
-       if (sl->tty) 
+       if (sl->tty)
                sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
 
        netif_stop_queue(dev);
        sl->rcount = 0;
        sl->xleft  = 0;
-       if((err=lapb_unregister(dev))!=LAPB_OK)
-               printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",err);
+       err = lapb_unregister(dev);
+       if (err != LAPB_OK)
+               printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
+                       err);
        spin_unlock(&sl->lock);
        return 0;
 }
@@ -521,8 +522,9 @@ static int x25_asy_close(struct net_device *dev)
  * a block of X.25 data has been received, which can now be decapsulated
  * and sent on to some IP layer for further processing.
  */
-static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
+
+static void x25_asy_receive_buf(struct tty_struct *tty,
+                               const unsigned char *cp, char *fp, int count)
 {
        struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
 
@@ -533,9 +535,8 @@ static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp,
        /* Read the characters out of the buffer */
        while (count--) {
                if (fp && *fp++) {
-                       if (!test_and_set_bit(SLF_ERROR, &sl->flags))  {
+                       if (!test_and_set_bit(SLF_ERROR, &sl->flags))
                                sl->stats.rx_errors++;
-                       }
                        cp++;
                        continue;
                }
@@ -556,31 +557,31 @@ static int x25_asy_open_tty(struct tty_struct *tty)
        struct x25_asy *sl = (struct x25_asy *) tty->disc_data;
        int err;
 
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
+
        /* First make sure we're not already connected. */
-       if (sl && sl->magic == X25_ASY_MAGIC) {
+       if (sl && sl->magic == X25_ASY_MAGIC)
                return -EEXIST;
-       }
 
        /* OK.  Find a free X.25 channel to use. */
-       if ((sl = x25_asy_alloc()) == NULL) {
+       sl = x25_asy_alloc();
+       if (sl == NULL)
                return -ENFILE;
-       }
 
        sl->tty = tty;
        tty->disc_data = sl;
        tty->receive_room = 65536;
-       if (tty->driver->flush_buffer)  {
-               tty->driver->flush_buffer(tty);
-       }
+       tty_driver_flush_buffer(tty);
        tty_ldisc_flush(tty);
 
        /* Restore default settings */
        sl->dev->type = ARPHRD_X25;
-       
+
        /* Perform the low-level X.25 async init */
-       if ((err = x25_asy_open(sl->dev)))
+       err = x25_asy_open(sl->dev);
+       if (err)
                return err;
-
        /* Done.  We have linked the TTY line to a channel. */
        return sl->dev->base_addr;
 }
@@ -601,9 +602,7 @@ static void x25_asy_close_tty(struct tty_struct *tty)
                return;
 
        if (sl->dev->flags & IFF_UP)
-       {
-               (void) dev_close(sl->dev);
-       }
+               dev_close(sl->dev);
 
        tty->disc_data = NULL;
        sl->tty = NULL;
@@ -613,8 +612,7 @@ static void x25_asy_close_tty(struct tty_struct *tty)
 
 static struct net_device_stats *x25_asy_get_stats(struct net_device *dev)
 {
-       struct x25_asy *sl = (struct x25_asy*)(dev->priv);
-
+       struct x25_asy *sl = dev->priv;
        return &sl->stats;
 }
 
@@ -641,21 +639,19 @@ int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
         * character sequence, according to the X.25 protocol.
         */
 
-       while (len-- > 0) 
-       {
-               switch(c = *s++) 
-               {
-                       case X25_END:
-                               *ptr++ = X25_ESC;
-                               *ptr++ = X25_ESCAPE(X25_END);
-                               break;
-                       case X25_ESC:
-                               *ptr++ = X25_ESC;
-                               *ptr++ = X25_ESCAPE(X25_ESC);
-                               break;
-                        default:
-                               *ptr++ = c;
-                               break;
+       while (len-- > 0) {
+               switch (c = *s++) {
+               case X25_END:
+                       *ptr++ = X25_ESC;
+                       *ptr++ = X25_ESCAPE(X25_END);
+                       break;
+               case X25_ESC:
+                       *ptr++ = X25_ESC;
+                       *ptr++ = X25_ESCAPE(X25_ESC);
+                       break;
+               default:
+                       *ptr++ = c;
+                       break;
                }
        }
        *ptr++ = X25_END;
@@ -665,31 +661,25 @@ int x25_asy_esc(unsigned char *s, unsigned char *d, int len)
 static void x25_asy_unesc(struct x25_asy *sl, unsigned char s)
 {
 
-       switch(s) 
-       {
-               case X25_END:
-                       if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2))  
-                       {
-                               x25_asy_bump(sl);
-                       }
-                       clear_bit(SLF_ESCAPE, &sl->flags);
-                       sl->rcount = 0;
-                       return;
-
-               case X25_ESC:
-                       set_bit(SLF_ESCAPE, &sl->flags);
-                       return;
-                       
-               case X25_ESCAPE(X25_ESC):
-               case X25_ESCAPE(X25_END):
-                       if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
-                               s = X25_UNESCAPE(s);
-                       break;
-       }
-       if (!test_bit(SLF_ERROR, &sl->flags))  
-       {
-               if (sl->rcount < sl->buffsize)  
-               {
+       switch (s) {
+       case X25_END:
+               if (!test_and_clear_bit(SLF_ERROR, &sl->flags)
+                       && sl->rcount > 2)
+                       x25_asy_bump(sl);
+               clear_bit(SLF_ESCAPE, &sl->flags);
+               sl->rcount = 0;
+               return;
+       case X25_ESC:
+               set_bit(SLF_ESCAPE, &sl->flags);
+               return;
+       case X25_ESCAPE(X25_ESC):
+       case X25_ESCAPE(X25_END):
+               if (test_and_clear_bit(SLF_ESCAPE, &sl->flags))
+                       s = X25_UNESCAPE(s);
+               break;
+       }
+       if (!test_bit(SLF_ERROR, &sl->flags)) {
+               if (sl->rcount < sl->buffsize) {
                        sl->rbuff[sl->rcount++] = s;
                        return;
                }
@@ -709,7 +699,7 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
        if (!sl || sl->magic != X25_ASY_MAGIC)
                return -EINVAL;
 
-       switch(cmd) {
+       switch (cmd) {
        case SIOCGIFNAME:
                if (copy_to_user((void __user *)arg, sl->dev->name,
                                        strlen(sl->dev->name) + 1))
@@ -724,8 +714,8 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
 
 static int x25_asy_open_dev(struct net_device *dev)
 {
-       struct x25_asy *sl = (struct x25_asy*)(dev->priv);
-       if(sl->tty==NULL)
+       struct x25_asy *sl = dev->priv;
+       if (sl->tty == NULL)
                return -ENODEV;
        return 0;
 }
@@ -741,9 +731,9 @@ static void x25_asy_setup(struct net_device *dev)
        set_bit(SLF_INUSE, &sl->flags);
 
        /*
-        *      Finish setting up the DEVICE info. 
+        *      Finish setting up the DEVICE info.
         */
-        
+
        dev->mtu                = SL_MTU;
        dev->hard_start_xmit    = x25_asy_xmit;
        dev->tx_timeout         = x25_asy_timeout;
@@ -778,9 +768,10 @@ static int __init init_x25_asy(void)
                x25_asy_maxdev = 4; /* Sanity */
 
        printk(KERN_INFO "X.25 async: version 0.00 ALPHA "
-                       "(dynamic channels, max=%d).\n", x25_asy_maxdev );
+                       "(dynamic channels, max=%d).\n", x25_asy_maxdev);
 
-       x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL);
+       x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *),
+                               GFP_KERNEL);
        if (!x25_asy_devs) {
                printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] "
                                "array! Uaargh! (-> No X.25 available)\n");
@@ -802,7 +793,7 @@ static void __exit exit_x25_asy(void)
                        struct x25_asy *sl = dev->priv;
 
                        spin_lock_bh(&sl->lock);
-                       if (sl->tty) 
+                       if (sl->tty)
                                tty_hangup(sl->tty);
 
                        spin_unlock_bh(&sl->lock);
index c2642bc1d49bdbb02d09f1424939ab6439926508..2c343aae38d4c95676f92b57974c589a8df4c804 100644 (file)
@@ -56,7 +56,7 @@ obj-$(CONFIG_RTL8187) += rtl8187.o
 
 obj-$(CONFIG_ADM8211)  += adm8211.o
 
-obj-$(CONFIG_IWLCORE)  += iwlwifi/
+obj-$(CONFIG_IWLWIFI)  += iwlwifi/
 obj-$(CONFIG_RT2X00)   += rt2x00/
 
 obj-$(CONFIG_P54_COMMON)       += p54/
index 932d6b1c9d0bad70b45a285faafedd602426c5e1..45f47c1c0a353ec148a7ac60828954df9b042e90 100644 (file)
@@ -3657,7 +3657,7 @@ void mpi_receive_802_11 (struct airo_info *ai)
        ptr += hdrlen;
        if (hdrlen == 24)
                ptr += 6;
-       gap = le16_to_cpu(get_unaligned((__le16 *)ptr));
+       gap = get_unaligned_le16(ptr);
        ptr += sizeof(__le16);
        if (gap) {
                if (gap <= 8)
@@ -4347,24 +4347,28 @@ static int proc_config_open( struct inode *inode, struct file *file );
 static int proc_wepkey_open( struct inode *inode, struct file *file );
 
 static const struct file_operations proc_statsdelta_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .open           = proc_statsdelta_open,
        .release        = proc_close
 };
 
 static const struct file_operations proc_stats_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .open           = proc_stats_open,
        .release        = proc_close
 };
 
 static const struct file_operations proc_status_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .open           = proc_status_open,
        .release        = proc_close
 };
 
 static const struct file_operations proc_SSID_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .write          = proc_write,
        .open           = proc_SSID_open,
@@ -4372,6 +4376,7 @@ static const struct file_operations proc_SSID_ops = {
 };
 
 static const struct file_operations proc_BSSList_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .write          = proc_write,
        .open           = proc_BSSList_open,
@@ -4379,6 +4384,7 @@ static const struct file_operations proc_BSSList_ops = {
 };
 
 static const struct file_operations proc_APList_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .write          = proc_write,
        .open           = proc_APList_open,
@@ -4386,6 +4392,7 @@ static const struct file_operations proc_APList_ops = {
 };
 
 static const struct file_operations proc_config_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .write          = proc_write,
        .open           = proc_config_open,
@@ -4393,6 +4400,7 @@ static const struct file_operations proc_config_ops = {
 };
 
 static const struct file_operations proc_wepkey_ops = {
+       .owner          = THIS_MODULE,
        .read           = proc_read,
        .write          = proc_write,
        .open           = proc_wepkey_open,
@@ -4411,10 +4419,6 @@ struct proc_data {
        void (*on_close) (struct inode *, struct file *);
 };
 
-#ifndef SETPROC_OPS
-#define SETPROC_OPS(entry, ops) (entry)->proc_fops = &(ops)
-#endif
-
 static int setup_proc_entry( struct net_device *dev,
                             struct airo_info *apriv ) {
        struct proc_dir_entry *entry;
@@ -4430,100 +4434,76 @@ static int setup_proc_entry( struct net_device *dev,
        apriv->proc_entry->owner = THIS_MODULE;
 
        /* Setup the StatsDelta */
-       entry = create_proc_entry("StatsDelta",
-                                 S_IFREG | (S_IRUGO&proc_perm),
-                                 apriv->proc_entry);
+       entry = proc_create_data("StatsDelta",
+                                S_IFREG | (S_IRUGO&proc_perm),
+                                apriv->proc_entry, &proc_statsdelta_ops, dev);
        if (!entry)
                goto fail_stats_delta;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_statsdelta_ops);
 
        /* Setup the Stats */
-       entry = create_proc_entry("Stats",
-                                 S_IFREG | (S_IRUGO&proc_perm),
-                                 apriv->proc_entry);
+       entry = proc_create_data("Stats",
+                                S_IFREG | (S_IRUGO&proc_perm),
+                                apriv->proc_entry, &proc_stats_ops, dev);
        if (!entry)
                goto fail_stats;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_stats_ops);
 
        /* Setup the Status */
-       entry = create_proc_entry("Status",
-                                 S_IFREG | (S_IRUGO&proc_perm),
-                                 apriv->proc_entry);
+       entry = proc_create_data("Status",
+                                S_IFREG | (S_IRUGO&proc_perm),
+                                apriv->proc_entry, &proc_status_ops, dev);
        if (!entry)
                goto fail_status;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_status_ops);
 
        /* Setup the Config */
-       entry = create_proc_entry("Config",
-                                 S_IFREG | proc_perm,
-                                 apriv->proc_entry);
+       entry = proc_create_data("Config",
+                                S_IFREG | proc_perm,
+                                apriv->proc_entry, &proc_config_ops, dev);
        if (!entry)
                goto fail_config;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_config_ops);
 
        /* Setup the SSID */
-       entry = create_proc_entry("SSID",
-                                 S_IFREG | proc_perm,
-                                 apriv->proc_entry);
+       entry = proc_create_data("SSID",
+                                S_IFREG | proc_perm,
+                                apriv->proc_entry, &proc_SSID_ops, dev);
        if (!entry)
                goto fail_ssid;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_SSID_ops);
 
        /* Setup the APList */
-       entry = create_proc_entry("APList",
-                                 S_IFREG | proc_perm,
-                                 apriv->proc_entry);
+       entry = proc_create_data("APList",
+                                S_IFREG | proc_perm,
+                                apriv->proc_entry, &proc_APList_ops, dev);
        if (!entry)
                goto fail_aplist;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_APList_ops);
 
        /* Setup the BSSList */
-       entry = create_proc_entry("BSSList",
-                                 S_IFREG | proc_perm,
-                                 apriv->proc_entry);
+       entry = proc_create_data("BSSList",
+                                S_IFREG | proc_perm,
+                                apriv->proc_entry, &proc_BSSList_ops, dev);
        if (!entry)
                goto fail_bsslist;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_BSSList_ops);
 
        /* Setup the WepKey */
-       entry = create_proc_entry("WepKey",
-                                 S_IFREG | proc_perm,
-                                 apriv->proc_entry);
+       entry = proc_create_data("WepKey",
+                                S_IFREG | proc_perm,
+                                apriv->proc_entry, &proc_wepkey_ops, dev);
        if (!entry)
                goto fail_wepkey;
        entry->uid = proc_uid;
        entry->gid = proc_gid;
-       entry->data = dev;
-       entry->owner = THIS_MODULE;
-       SETPROC_OPS(entry, proc_wepkey_ops);
 
        return 0;
 
@@ -5625,9 +5605,9 @@ static int __init airo_init_module( void )
        int have_isa_dev = 0;
 #endif
 
-       airo_entry = create_proc_entry("aironet",
+       airo_entry = create_proc_entry("driver/aironet",
                                       S_IFDIR | airo_perm,
-                                      proc_root_driver);
+                                      NULL);
 
        if (airo_entry) {
                airo_entry->uid = proc_uid;
@@ -5651,7 +5631,7 @@ static int __init airo_init_module( void )
        airo_print_info("", "Finished probing for PCI adapters");
 
        if (i) {
-               remove_proc_entry("aironet", proc_root_driver);
+               remove_proc_entry("driver/aironet", NULL);
                return i;
        }
 #endif
@@ -5673,7 +5653,7 @@ static void __exit airo_cleanup_module( void )
 #ifdef CONFIG_PCI
        pci_unregister_driver(&airo_driver);
 #endif
-       remove_proc_entry("aironet", proc_root_driver);
+       remove_proc_entry("driver/aironet", NULL);
 }
 
 /*
index e18305b781c91163deef0912c8a1927897f960ce..4e5c8fc35200a8b0abcaaefe4e8148cf85cb20b5 100644 (file)
 #include "reg.h"
 #include "debug.h"
 
-/* unaligned little endian access */
-#define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p))))
-#define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p))))
-
 enum {
        ATH_LED_TX,
        ATH_LED_RX,
@@ -2909,9 +2905,9 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw,
                        if (!mclist)
                                break;
                        /* calculate XOR of eight 6-bit values */
-                       val = LE_READ_4(mclist->dmi_addr + 0);
+                       val = get_unaligned_le32(mclist->dmi_addr + 0);
                        pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
-                       val = LE_READ_4(mclist->dmi_addr + 3);
+                       val = get_unaligned_le32(mclist->dmi_addr + 3);
                        pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
                        pos &= 0x3f;
                        mfilt[pos / 32] |= (1 << (pos % 32));
index 4bf8a99099fee7713f357b5f3f450148441c19a6..8c24cd72aaca711ae9852fcdd0b0e0c2bb44b325 100644 (file)
@@ -2171,7 +2171,7 @@ static int b43_write_initvals(struct b43_wldev *dev,
                                goto err_format;
                        array_size -= sizeof(iv->data.d32);
 
-                       value = be32_to_cpu(get_unaligned(&iv->data.d32));
+                       value = get_unaligned_be32(&iv->data.d32);
                        b43_write32(dev, offset, value);
 
                        iv = (const struct b43_iv *)((const uint8_t *)iv +
index ef829ee8ffd47f3bd538c74964de21425d056a3f..14a5eea2573e8a01e28f2b9cb23cda7f4450bc8e 100644 (file)
@@ -1720,7 +1720,7 @@ static int b43legacy_write_initvals(struct b43legacy_wldev *dev,
                                goto err_format;
                        array_size -= sizeof(iv->data.d32);
 
-                       value = be32_to_cpu(get_unaligned(&iv->data.d32));
+                       value = get_unaligned_be32(&iv->data.d32);
                        b43legacy_write32(dev, offset, value);
 
                        iv = (const struct b43legacy_iv *)((const uint8_t *)iv +
index c4e631d14bfe12b5950e3aa3ef5e7d9017ab52db..d5b7a76fcaad62b3d684ad9bf697b4f27d9b3c59 100644 (file)
@@ -1,6 +1,15 @@
+config IWLWIFI
+       bool
+       default n
+
 config IWLCORE
        tristate "Intel Wireless Wifi Core"
        depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
+       select IWLWIFI
+       select MAC80211_LEDS if IWLWIFI_LEDS
+       select LEDS_CLASS if IWLWIFI_LEDS
+       select RFKILL if IWLWIFI_RFKILL
+       select RFKILL_INPUT if IWLWIFI_RFKILL
 
 config IWLWIFI_LEDS
        bool
@@ -9,8 +18,6 @@ config IWLWIFI_LEDS
 config IWLWIFI_RFKILL
        boolean "IWLWIFI RF kill support"
        depends on IWLCORE
-       select RFKILL
-       select RFKILL_INPUT
 
 config IWL4965
        tristate "Intel Wireless WiFi 4965AGN"
@@ -50,8 +57,6 @@ config IWL4965_HT
 config IWL4965_LEDS
        bool "Enable LEDS features in iwl4965 driver"
        depends on IWL4965
-       select MAC80211_LEDS
-       select LEDS_CLASS
        select IWLWIFI_LEDS
        ---help---
          This option enables LEDS for the iwlwifi drivers
@@ -106,6 +111,9 @@ config IWL3945
        tristate "Intel PRO/Wireless 3945ABG/BG Network Connection"
        depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL
        select FW_LOADER
+       select IWLWIFI
+       select MAC80211_LEDS if IWL3945_LEDS
+       select LEDS_CLASS if IWL3945_LEDS
        ---help---
          Select to build the driver supporting the:
 
@@ -137,8 +145,6 @@ config IWL3945_SPECTRUM_MEASUREMENT
 config IWL3945_LEDS
        bool "Enable LEDS features in iwl3945 driver"
        depends on IWL3945
-       select MAC80211_LEDS
-       select LEDS_CLASS
        ---help---
          This option enables LEDS for the iwl3945 driver.
 
index 598e4eef4f4080181a3cd089a617ba62ed453356..d3406830c8e3591231d7234680effc342d4ef69a 100644 (file)
@@ -554,40 +554,36 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv,
        iwl3945_rt->rt_hdr.it_pad = 0;
 
        /* total header + data */
-       put_unaligned(cpu_to_le16(sizeof(*iwl3945_rt)),
-                     &iwl3945_rt->rt_hdr.it_len);
+       put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len);
 
        /* Indicate all the fields we add to the radiotap header */
-       put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) |
-                                 (1 << IEEE80211_RADIOTAP_FLAGS) |
-                                 (1 << IEEE80211_RADIOTAP_RATE) |
-                                 (1 << IEEE80211_RADIOTAP_CHANNEL) |
-                                 (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
-                                 (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
-                                 (1 << IEEE80211_RADIOTAP_ANTENNA)),
-                     &iwl3945_rt->rt_hdr.it_present);
+       put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) |
+                          (1 << IEEE80211_RADIOTAP_FLAGS) |
+                          (1 << IEEE80211_RADIOTAP_RATE) |
+                          (1 << IEEE80211_RADIOTAP_CHANNEL) |
+                          (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) |
+                          (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) |
+                          (1 << IEEE80211_RADIOTAP_ANTENNA),
+                       &iwl3945_rt->rt_hdr.it_present);
 
        /* Zero the flags, we'll add to them as we go */
        iwl3945_rt->rt_flags = 0;
 
-       put_unaligned(cpu_to_le64(tsf), &iwl3945_rt->rt_tsf);
+       put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf);
 
        iwl3945_rt->rt_dbmsignal = signal;
        iwl3945_rt->rt_dbmnoise = noise;
 
        /* Convert the channel frequency and set the flags */
-       put_unaligned(cpu_to_le16(stats->freq), &iwl3945_rt->rt_channelMHz);
+       put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz);
        if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK))
-               put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
-                                         IEEE80211_CHAN_5GHZ),
+               put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ,
                              &iwl3945_rt->rt_chbitmask);
        else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK)
-               put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK |
-                                         IEEE80211_CHAN_2GHZ),
+               put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ,
                              &iwl3945_rt->rt_chbitmask);
        else    /* 802.11g */
-               put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM |
-                                         IEEE80211_CHAN_2GHZ),
+               put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ,
                              &iwl3945_rt->rt_chbitmask);
 
        if (rate == -1)
index e72c97a0d6c1f36bbed6edbf2d50f38e8f57a1a0..1a409fcc80d3fdb30f65a703a2938bcba58af8d2 100644 (file)
@@ -522,7 +522,7 @@ static int lbs_process_bss(struct bss_descriptor *bss,
 
        if (*bytesleft >= sizeof(beaconsize)) {
                /* Extract & convert beacon size from the command buffer */
-               beaconsize = le16_to_cpu(get_unaligned((__le16 *)*pbeaconinfo));
+               beaconsize = get_unaligned_le16(*pbeaconinfo);
                *bytesleft -= sizeof(beaconsize);
                *pbeaconinfo += sizeof(beaconsize);
        }
index bced3fe1cf8a412968ebc46375b297349b5fcc8d..5dd23c93497db90163365b84a7e62917571cd122 100644 (file)
@@ -767,42 +767,18 @@ static __u8 *UnStuffData(__u8 * src, __u8 * end, __u8 * dst,
 /************************************************************************/
 /* General routines for STRIP                                          */
 
-/*
- * get_baud returns the current baud rate, as one of the constants defined in
- * termbits.h
- * If the user has issued a baud rate override using the 'setserial' command
- * and the logical current rate is set to 38.4, then the true baud rate
- * currently in effect (57.6 or 115.2) is returned.
- */
-static unsigned int get_baud(struct tty_struct *tty)
-{
-       if (!tty || !tty->termios)
-               return (0);
-       if ((tty->termios->c_cflag & CBAUD) == B38400 && tty->driver_data) {
-               struct async_struct *info =
-                   (struct async_struct *) tty->driver_data;
-               if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
-                       return (B57600);
-               if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
-                       return (B115200);
-       }
-       return (tty->termios->c_cflag & CBAUD);
-}
-
 /*
  * set_baud sets the baud rate to the rate defined by baudcode
- * Note: The rate B38400 should be avoided, because the user may have
- * issued a 'setserial' speed override to map that to a different speed.
- * We could achieve a true rate of 38400 if we needed to by cancelling
- * any user speed override that is in place, but that might annoy the
- * user, so it is simplest to just avoid using 38400.
  */
-static void set_baud(struct tty_struct *tty, unsigned int baudcode)
+static void set_baud(struct tty_struct *tty, speed_t baudrate)
 {
-       struct ktermios old_termios = *(tty->termios);
-       tty->termios->c_cflag &= ~CBAUD;        /* Clear the old baud setting */
-       tty->termios->c_cflag |= baudcode;      /* Set the new baud setting */
-       tty->driver->set_termios(tty, &old_termios);
+       struct ktermios old_termios;
+
+       mutex_lock(&tty->termios_mutex);
+       old_termios =*(tty->termios);
+       tty_encode_baud_rate(tty, baudrate, baudrate);
+       tty->ops->set_termios(tty, &old_termios);
+       mutex_unlock(&tty->termios_mutex);
 }
 
 /*
@@ -1217,7 +1193,7 @@ static void ResetRadio(struct strip *strip_info)
        strip_info->watchdog_doreset = jiffies + 1 * HZ;
 
        /* If the user has selected a baud rate above 38.4 see what magic we have to do */
-       if (strip_info->user_baud > B38400) {
+       if (strip_info->user_baud > 38400) {
                /*
                 * Subtle stuff: Pay attention :-)
                 * If the serial port is currently at the user's selected (>38.4) rate,
@@ -1227,17 +1203,17 @@ static void ResetRadio(struct strip *strip_info)
                 * issued the ATS304 command last time through, so this time we restore
                 * the user's selected rate and issue the normal starmode reset string.
                 */
-               if (strip_info->user_baud == get_baud(tty)) {
+               if (strip_info->user_baud == tty_get_baud_rate(tty)) {
                        static const char b0[] = "ate0q1s304=57600\r";
                        static const char b1[] = "ate0q1s304=115200\r";
                        static const StringDescriptor baudstring[2] =
                            { {b0, sizeof(b0) - 1}
                        , {b1, sizeof(b1) - 1}
                        };
-                       set_baud(tty, B19200);
-                       if (strip_info->user_baud == B57600)
+                       set_baud(tty, 19200);
+                       if (strip_info->user_baud == 57600)
                                s = baudstring[0];
-                       else if (strip_info->user_baud == B115200)
+                       else if (strip_info->user_baud == 115200)
                                s = baudstring[1];
                        else
                                s = baudstring[1];      /* For now */
@@ -1245,7 +1221,7 @@ static void ResetRadio(struct strip *strip_info)
                        set_baud(tty, strip_info->user_baud);
        }
 
-       tty->driver->write(tty, s.string, s.length);
+       tty->ops->write(tty, s.string, s.length);
 #ifdef EXT_COUNTERS
        strip_info->tx_ebytes += s.length;
 #endif
@@ -1267,7 +1243,7 @@ static void strip_write_some_more(struct tty_struct *tty)
 
        if (strip_info->tx_left > 0) {
                int num_written =
-                   tty->driver->write(tty, strip_info->tx_head,
+                   tty->ops->write(tty, strip_info->tx_head,
                                      strip_info->tx_left);
                strip_info->tx_left -= num_written;
                strip_info->tx_head += num_written;
@@ -2457,7 +2433,7 @@ static int strip_open_low(struct net_device *dev)
        strip_info->working = FALSE;
        strip_info->firmware_level = NoStructure;
        strip_info->next_command = CompatibilityCommand;
-       strip_info->user_baud = get_baud(strip_info->tty);
+       strip_info->user_baud = tty_get_baud_rate(strip_info->tty);
 
        printk(KERN_INFO "%s: Initializing Radio.\n",
               strip_info->dev->name);
@@ -2631,6 +2607,13 @@ static int strip_open(struct tty_struct *tty)
        if (strip_info && strip_info->magic == STRIP_MAGIC)
                return -EEXIST;
 
+       /*
+        * We need a write method.
+        */
+
+       if (tty->ops->write == NULL)
+               return -EOPNOTSUPP;
+
        /*
         * OK.  Find a free STRIP channel to use.
         */
@@ -2652,8 +2635,7 @@ static int strip_open(struct tty_struct *tty)
        tty->disc_data = strip_info;
        tty->receive_room = 65536;
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
 
        /*
         * Restore default settings
index e34675c2f8fc9cb1c5e6394f450c4fde7120b2d9..5316074f39f0b0c28e73ace87813ecb0e3883ef6 100644 (file)
@@ -545,11 +545,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
         * be padded. Unaligned access might also happen if the length_info
         * structure is not present.
         */
-       if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG))
+       if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG)
        {
                unsigned int l, k, n;
                for (i = 0, l = 0;; i++) {
-                       k = le16_to_cpu(get_unaligned(&length_info->length[i]));
+                       k = get_unaligned_le16(&length_info->length[i]);
                        if (k == 0)
                                return;
                        n = l+k;
index 24640726f8bbde0b0d7ee0d1517abade187dfe7a..57e1f495b9fc0456db009a9540ec0f7111295b51 100644 (file)
@@ -1062,7 +1062,7 @@ static int yellowfin_rx(struct net_device *dev)
                buf_addr = rx_skb->data;
                data_size = (le32_to_cpu(desc->dbdma_cmd) -
                        le32_to_cpu(desc->result_status)) & 0xffff;
-               frame_status = le16_to_cpu(get_unaligned((__le16*)&(buf_addr[data_size - 2])));
+               frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
                if (yellowfin_debug > 4)
                        printk(KERN_DEBUG "  yellowfin_rx() status was %4.4x.\n",
                                   frame_status);
index e07492be1f4ac36a418896f77f45a4a25fffdaa7..208dd12825bc4314be4bbba70a9c898d8b99ae92 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/kernel.h>
 #include <linux/nubus.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/module.h>
 
 #include <asm/byteorder.h>
 
 static int
-get_nubus_dev_info(char *buf, char **start, off_t pos, int count)
+nubus_devices_proc_show(struct seq_file *m, void *v)
 {
        struct nubus_dev *dev = nubus_devices;
-       off_t at = 0;
-       int len, cnt;
 
-       cnt = 0;
-       while (dev && count > cnt) {
-               len = sprintf(buf, "%x\t%04x %04x %04x %04x",
+       while (dev) {
+               seq_printf(m, "%x\t%04x %04x %04x %04x",
                              dev->board->slot,
                              dev->category,
                              dev->type,
                              dev->dr_sw,
                              dev->dr_hw);
-               len += sprintf(buf+len,
-                              "\t%08lx",
-                              dev->board->slot_addr);
-               buf[len++] = '\n';
-               at += len;
-               if (at >= pos) {
-                       if (!*start) {
-                               *start = buf + (pos - (at - len));
-                               cnt = at - pos;
-                       } else
-                               cnt += len;
-                       buf += len;
-               }
+               seq_printf(m, "\t%08lx\n", dev->board->slot_addr);
                dev = dev->next;
        }
-       return (count > cnt) ? cnt : count;
+       return 0;
+}
+
+static int nubus_devices_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, nubus_devices_proc_show, NULL);
 }
 
+static const struct file_operations nubus_devices_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = nubus_devices_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
 static struct proc_dir_entry *proc_bus_nubus_dir;
 
 static void nubus_proc_subdir(struct nubus_dev* dev,
@@ -171,8 +170,7 @@ void __init nubus_proc_init(void)
 {
        if (!MACH_IS_MAC)
                return;
-       proc_bus_nubus_dir = proc_mkdir("nubus", proc_bus);
-       create_proc_info_entry("devices", 0, proc_bus_nubus_dir,
-                               get_nubus_dev_info);
+       proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL);
+       proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops);
        proc_bus_nubus_add_devices();
 }
index b07ba2a1411949cf7751d0830617a1ac34013abe..9304c45550790c7e2f1f53833d4370ac051b3f4f 100644 (file)
@@ -491,7 +491,7 @@ typedef enum {
  */
 void sync_buffer(int cpu)
 {
-       struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[cpu];
+       struct oprofile_cpu_buffer *cpu_buf = &per_cpu(cpu_buffer, cpu);
        struct mm_struct *mm = NULL;
        struct task_struct * new;
        unsigned long cookie = 0;
index c93d3d2640ab884b2a4d43a4339afd2613cd149d..efcbf4b4579f275bccea1b00fda95891925e4be6 100644 (file)
@@ -27,7 +27,7 @@
 #include "buffer_sync.h"
 #include "oprof.h"
 
-struct oprofile_cpu_buffer cpu_buffer[NR_CPUS] __cacheline_aligned;
+DEFINE_PER_CPU_SHARED_ALIGNED(struct oprofile_cpu_buffer, cpu_buffer);
 
 static void wq_sync_buffer(struct work_struct *work);
 
@@ -39,7 +39,7 @@ void free_cpu_buffers(void)
        int i;
  
        for_each_online_cpu(i)
-               vfree(cpu_buffer[i].buffer);
+               vfree(per_cpu(cpu_buffer, i).buffer);
 }
 
 int alloc_cpu_buffers(void)
@@ -49,7 +49,7 @@ int alloc_cpu_buffers(void)
        unsigned long buffer_size = fs_cpu_buffer_size;
  
        for_each_online_cpu(i) {
-               struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+               struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
  
                b->buffer = vmalloc_node(sizeof(struct op_sample) * buffer_size,
                        cpu_to_node(i));
@@ -83,7 +83,7 @@ void start_cpu_work(void)
        work_enabled = 1;
 
        for_each_online_cpu(i) {
-               struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+               struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
 
                /*
                 * Spread the work by 1 jiffy per cpu so they dont all
@@ -100,7 +100,7 @@ void end_cpu_work(void)
        work_enabled = 0;
 
        for_each_online_cpu(i) {
-               struct oprofile_cpu_buffer * b = &cpu_buffer[i];
+               struct oprofile_cpu_buffer *b = &per_cpu(cpu_buffer, i);
 
                cancel_delayed_work(&b->work);
        }
@@ -227,7 +227,7 @@ static void oprofile_end_trace(struct oprofile_cpu_buffer * cpu_buf)
 void oprofile_add_ext_sample(unsigned long pc, struct pt_regs * const regs,
                                unsigned long event, int is_kernel)
 {
-       struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
 
        if (!backtrace_depth) {
                log_sample(cpu_buf, pc, is_kernel, event);
@@ -254,13 +254,13 @@ void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
 
 void oprofile_add_pc(unsigned long pc, int is_kernel, unsigned long event)
 {
-       struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
        log_sample(cpu_buf, pc, is_kernel, event);
 }
 
 void oprofile_add_trace(unsigned long pc)
 {
-       struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
+       struct oprofile_cpu_buffer *cpu_buf = &__get_cpu_var(cpu_buffer);
 
        if (!cpu_buf->tracing)
                return;
index c66c025abe757804f3cea7cfc389103293762c58..13588174311d452fc17108717907f23737f0d15d 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/spinlock.h>
 #include <linux/workqueue.h>
 #include <linux/cache.h>
+#include <linux/sched.h>
  
 struct task_struct;
  
@@ -47,7 +48,7 @@ struct oprofile_cpu_buffer {
        struct delayed_work work;
 } ____cacheline_aligned;
 
-extern struct oprofile_cpu_buffer cpu_buffer[];
+DECLARE_PER_CPU(struct oprofile_cpu_buffer, cpu_buffer);
 
 void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
 
index d1f6d776e9e441ea31272d4b3b4bd58a6a927c6a..f99b28e7b79a424cb4aa52506f9ac6e0109e1391 100644 (file)
@@ -23,7 +23,7 @@ void oprofile_reset_stats(void)
        int i;
  
        for_each_possible_cpu(i) {
-               cpu_buf = &cpu_buffer[i]; 
+               cpu_buf = &per_cpu(cpu_buffer, i);
                cpu_buf->sample_received = 0;
                cpu_buf->sample_lost_overflow = 0;
                cpu_buf->backtrace_aborted = 0;
@@ -49,7 +49,7 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root)
                return;
 
        for_each_possible_cpu(i) {
-               cpu_buf = &cpu_buffer[i]; 
+               cpu_buf = &per_cpu(cpu_buffer, i);
                snprintf(buf, 10, "cpu%d", i);
                cpudir = oprofilefs_mkdir(sb, dir, buf);
  
index 62db3c3fe4dcbd35dcd6181c4061954e4b0b0d87..07d2a8d4498f395f52e822b9ff493555e1f3d600 100644 (file)
@@ -1551,8 +1551,7 @@ static int __init ccio_probe(struct parisc_device *dev)
 {
        int i;
        struct ioc *ioc, **ioc_p = &ioc_list;
-       struct proc_dir_entry *info_entry, *bitmap_entry;
-       
+
        ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL);
        if (ioc == NULL) {
                printk(KERN_ERR MODULE_NAME ": memory allocation failure\n");
@@ -1580,13 +1579,10 @@ static int __init ccio_probe(struct parisc_device *dev)
        HBA_DATA(dev->dev.platform_data)->iommu = ioc;
        
        if (ioc_count == 0) {
-               info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root);
-               if (info_entry)
-                       info_entry->proc_fops = &ccio_proc_info_fops;
-
-               bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root);
-               if (bitmap_entry)
-                       bitmap_entry->proc_fops = &ccio_proc_bitmap_fops;
+               proc_create(MODULE_NAME, 0, proc_runway_root,
+                           &ccio_proc_info_fops);
+               proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root,
+                           &ccio_proc_bitmap_fops);
        }
 
        ioc_count++;
index 8c4d2c13d5f246057a42d73ac518637973fca79e..afc849bd3f58a9ebaf9a0a00785c7a1f068e935d 100644 (file)
@@ -1895,7 +1895,9 @@ sba_driver_callback(struct parisc_device *dev)
        int i;
        char *version;
        void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
-       struct proc_dir_entry *info_entry, *bitmap_entry, *root;
+#ifdef CONFIG_PROC_FS
+       struct proc_dir_entry *root;
+#endif
 
        sba_dump_ranges(sba_addr);
 
@@ -1973,14 +1975,8 @@ sba_driver_callback(struct parisc_device *dev)
                break;
        }
 
-       info_entry = create_proc_entry("sba_iommu", 0, root);
-       bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root);
-
-       if (info_entry)
-               info_entry->proc_fops = &sba_proc_fops;
-
-       if (bitmap_entry)
-               bitmap_entry->proc_fops = &sba_proc_bitmap_fops;
+       proc_create("sba_iommu", 0, root, &sba_proc_fops);
+       proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
 #endif
 
        parisc_vmerge_boundary = IOVP_SIZE;
index 54a6ef72906ed741e2a0ba4ddbeb343de5418480..0338b09126742300efd226e4fce332a0e2c0bbad 100644 (file)
@@ -76,7 +76,7 @@ int parport_wait_event (struct parport *port, signed long timeout)
                   semaphore. */
                return 1;
 
-       init_timer (&timer);
+       init_timer_on_stack(&timer);
        timer.expires = jiffies + timeout;
        timer.function = timeout_waiting_on_port;
        port_from_cookie[port->number % PARPORT_MAX] = port;
@@ -88,6 +88,8 @@ int parport_wait_event (struct parport *port, signed long timeout)
                /* Timed out. */
                ret = 1;
 
+       destroy_timer_on_stack(&timer);
+
        return ret;
 }
 
index 0e77ae2b71a0ed408fd4399ff154940005efcd4d..e6a7e847ee805cbdd24384c08bc532560097c398 100644 (file)
@@ -365,11 +365,11 @@ static int __devinit parport_init_chip(struct parisc_device *dev)
        if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) {
 
                /* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */
-               printk("%s: initialize bidirectional-mode.\n", __FUNCTION__);
+               printk("%s: initialize bidirectional-mode.\n", __func__);
                parport_writeb ( (0x10 + 0x20), port + 4);
 
        } else {
-               printk("%s: enhanced parport-modes not supported.\n", __FUNCTION__);
+               printk("%s: enhanced parport-modes not supported.\n", __func__);
        }
        
        p = parport_gsc_probe_port(port, 0, dev->irq,
index a8580893820513adfd07bd72e1c6d3404d886777..e0c2a4584ec61f22cf3be8d2cae19d6259dd0ef5 100644 (file)
@@ -1415,7 +1415,7 @@ static void __devinit winbond_check(int io, int key)
 {
        int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
 
-       if (!request_region(io, 3, __FUNCTION__))
+       if (!request_region(io, 3, __func__))
                return;
 
        /* First probe without key */
@@ -1449,7 +1449,7 @@ static void __devinit winbond_check2(int io,int key)
 {
         int devid,devrev,oldid,x_devid,x_devrev,x_oldid;
 
-       if (!request_region(io, 3, __FUNCTION__))
+       if (!request_region(io, 3, __func__))
                return;
 
        /* First probe without the key */
@@ -1482,7 +1482,7 @@ static void __devinit smsc_check(int io, int key)
 {
         int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev;
 
-       if (!request_region(io, 3, __FUNCTION__))
+       if (!request_region(io, 3, __func__))
                return;
 
        /* First probe without the key */
@@ -1547,7 +1547,7 @@ static void __devinit detect_and_report_it87(void)
        u8 r;
        if (verbose_probing)
                printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n");
-       if (!request_region(0x2e, 1, __FUNCTION__))
+       if (!request_region(0x2e, 1, __func__))
                return;
        outb(0x87, 0x2e);
        outb(0x01, 0x2e);
@@ -3082,6 +3082,7 @@ static struct pci_driver parport_pc_pci_driver;
 static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;}
 #endif /* CONFIG_PCI */
 
+#ifdef CONFIG_PNP
 
 static const struct pnp_device_id parport_pc_pnp_tbl[] = {
        /* Standard LPT Printer Port */
@@ -3148,6 +3149,9 @@ static struct pnp_driver parport_pc_pnp_driver = {
        .remove         = parport_pc_pnp_remove,
 };
 
+#else
+static struct pnp_driver parport_pc_pnp_driver;
+#endif /* CONFIG_PNP */
 
 static int __devinit parport_pc_platform_probe(struct platform_device *pdev)
 {
index f14267e197dd7677ca3d70bc9293f970fa35a3fe..8264a76804354ffb011a8abc8ce6dddc19cdd975 100644 (file)
@@ -93,11 +93,10 @@ struct controller {
        u8 slot_device_offset;
        u32 first_slot;         /* First physical slot number */  /* PCIE only has 1 slot */
        u8 slot_bus;            /* Bus where the slots handled by this controller sit */
-       u8 ctrlcap;
+       u32 slot_cap;
        u8 cap_base;
        struct timer_list poll_timer;
        volatile int cmd_busy;
-       spinlock_t lock;
 };
 
 #define INT_BUTTON_IGNORE              0
@@ -137,13 +136,13 @@ struct controller {
 #define HP_SUPR_RM_SUP 0x00000020
 #define EMI_PRSN       0x00020000
 
-#define ATTN_BUTTN(cap)                (cap & ATTN_BUTTN_PRSN)
-#define POWER_CTRL(cap)                (cap & PWR_CTRL_PRSN)
-#define MRL_SENS(cap)          (cap & MRL_SENS_PRSN)
-#define ATTN_LED(cap)          (cap & ATTN_LED_PRSN)
-#define PWR_LED(cap)           (cap & PWR_LED_PRSN) 
-#define HP_SUPR_RM(cap)                (cap & HP_SUPR_RM_SUP)
-#define EMI(cap)               (cap & EMI_PRSN)
+#define ATTN_BUTTN(ctrl)       ((ctrl)->slot_cap & ATTN_BUTTN_PRSN)
+#define POWER_CTRL(ctrl)       ((ctrl)->slot_cap & PWR_CTRL_PRSN)
+#define MRL_SENS(ctrl)         ((ctrl)->slot_cap & MRL_SENS_PRSN)
+#define ATTN_LED(ctrl)         ((ctrl)->slot_cap & ATTN_LED_PRSN)
+#define PWR_LED(ctrl)          ((ctrl)->slot_cap & PWR_LED_PRSN)
+#define HP_SUPR_RM(ctrl)       ((ctrl)->slot_cap & HP_SUPR_RM_SUP)
+#define EMI(ctrl)              ((ctrl)->slot_cap & EMI_PRSN)
 
 extern int pciehp_sysfs_enable_slot(struct slot *slot);
 extern int pciehp_sysfs_disable_slot(struct slot *slot);
index aee19f013d84bd379e0b19bdbbc3c0855c808e66..43d8ddb2d6796a3fa29ffa675506fe7ca4975ed9 100644 (file)
@@ -41,6 +41,7 @@ int pciehp_debug;
 int pciehp_poll_mode;
 int pciehp_poll_time;
 int pciehp_force;
+int pciehp_slot_with_bus;
 struct workqueue_struct *pciehp_wq;
 
 #define DRIVER_VERSION "0.4"
@@ -55,10 +56,12 @@ module_param(pciehp_debug, bool, 0644);
 module_param(pciehp_poll_mode, bool, 0644);
 module_param(pciehp_poll_time, int, 0644);
 module_param(pciehp_force, bool, 0644);
+module_param(pciehp_slot_with_bus, bool, 0644);
 MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
 MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
 MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
 MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
+MODULE_PARM_DESC(pciehp_slot_with_bus, "Use bus number in the slot name");
 
 #define PCIE_MODULE_NAME "pciehp"
 
@@ -193,8 +196,12 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 
 static void make_slot_name(struct slot *slot)
 {
-       snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
-                slot->bus, slot->number);
+       if (pciehp_slot_with_bus)
+               snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
+                        slot->bus, slot->number);
+       else
+               snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
+                        slot->number);
 }
 
 static int init_slots(struct controller *ctrl)
@@ -251,7 +258,7 @@ static int init_slots(struct controller *ctrl)
                        goto error_info;
                }
                /* create additional sysfs entries */
-               if (EMI(ctrl->ctrlcap)) {
+               if (EMI(ctrl)) {
                        retval = sysfs_create_file(&hotplug_slot->kobj,
                                &hotplug_slot_attr_lock.attr);
                        if (retval) {
@@ -284,7 +291,7 @@ static void cleanup_slots(struct controller *ctrl)
        list_for_each_safe(tmp, next, &ctrl->slot_list) {
                slot = list_entry(tmp, struct slot, slot_list);
                list_del(&slot->slot_list);
-               if (EMI(ctrl->ctrlcap))
+               if (EMI(ctrl))
                        sysfs_remove_file(&slot->hotplug_slot->kobj,
                                &hotplug_slot_attr_lock.attr);
                cancel_delayed_work(&slot->work);
@@ -305,7 +312,7 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status)
 
        hotplug_slot->info->attention_status = status;
 
-       if (ATTN_LED(slot->ctrl->ctrlcap))
+       if (ATTN_LED(slot->ctrl))
                slot->hpc_ops->set_attention_status(slot, status);
 
        return 0;
@@ -472,7 +479,7 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
                if (rc) /* -ENODEV: shouldn't happen, but deal with it */
                        value = 0;
        }
-       if ((POWER_CTRL(ctrl->ctrlcap)) && !value) {
+       if ((POWER_CTRL(ctrl)) && !value) {
                rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
                if (rc)
                        goto err_out_free_ctrl_slot;
index 0c481f7d2ab32b34db251344c43db67bc4d898a2..0a7aa628e95511910408a3bcb3a81ae023a16782 100644 (file)
@@ -178,7 +178,7 @@ u8 pciehp_handle_power_fault(u8 hp_slot, struct controller *ctrl)
 static void set_slot_off(struct controller *ctrl, struct slot * pslot)
 {
        /* turn off slot, turn on Amber LED, turn off Green LED if supported*/
-       if (POWER_CTRL(ctrl->ctrlcap)) {
+       if (POWER_CTRL(ctrl)) {
                if (pslot->hpc_ops->power_off_slot(pslot)) {
                        err("%s: Issue of Slot Power Off command failed\n",
                            __func__);
@@ -186,10 +186,10 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
                }
        }
 
-       if (PWR_LED(ctrl->ctrlcap))
+       if (PWR_LED(ctrl))
                pslot->hpc_ops->green_led_off(pslot);
 
-       if (ATTN_LED(ctrl->ctrlcap)) {
+       if (ATTN_LED(ctrl)) {
                if (pslot->hpc_ops->set_attention_status(pslot, 1)) {
                        err("%s: Issue of Set Attention Led command failed\n",
                            __func__);
@@ -214,14 +214,14 @@ static int board_added(struct slot *p_slot)
                        __func__, p_slot->device,
                        ctrl->slot_device_offset, p_slot->hp_slot);
 
-       if (POWER_CTRL(ctrl->ctrlcap)) {
+       if (POWER_CTRL(ctrl)) {
                /* Power on slot */
                retval = p_slot->hpc_ops->power_on_slot(p_slot);
                if (retval)
                        return retval;
        }
 
-       if (PWR_LED(ctrl->ctrlcap))
+       if (PWR_LED(ctrl))
                p_slot->hpc_ops->green_led_blink(p_slot);
 
        /* Wait for ~1 second */
@@ -254,7 +254,7 @@ static int board_added(struct slot *p_slot)
         */
        if (pcie_mch_quirk)
                pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
-       if (PWR_LED(ctrl->ctrlcap))
+       if (PWR_LED(ctrl))
                p_slot->hpc_ops->green_led_on(p_slot);
 
        return 0;
@@ -279,7 +279,7 @@ static int remove_board(struct slot *p_slot)
 
        dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot);
 
-       if (POWER_CTRL(ctrl->ctrlcap)) {
+       if (POWER_CTRL(ctrl)) {
                /* power off slot */
                retval = p_slot->hpc_ops->power_off_slot(p_slot);
                if (retval) {
@@ -289,7 +289,7 @@ static int remove_board(struct slot *p_slot)
                }
        }
 
-       if (PWR_LED(ctrl->ctrlcap))
+       if (PWR_LED(ctrl))
                /* turn off Green LED */
                p_slot->hpc_ops->green_led_off(p_slot);
 
@@ -327,7 +327,7 @@ static void pciehp_power_thread(struct work_struct *work)
        case POWERON_STATE:
                mutex_unlock(&p_slot->lock);
                if (pciehp_enable_slot(p_slot) &&
-                   PWR_LED(p_slot->ctrl->ctrlcap))
+                   PWR_LED(p_slot->ctrl))
                        p_slot->hpc_ops->green_led_off(p_slot);
                mutex_lock(&p_slot->lock);
                p_slot->state = STATIC_STATE;
@@ -409,9 +409,9 @@ static void handle_button_press_event(struct slot *p_slot)
                             "press.\n", p_slot->name);
                }
                /* blink green LED and turn off amber */
-               if (PWR_LED(ctrl->ctrlcap))
+               if (PWR_LED(ctrl))
                        p_slot->hpc_ops->green_led_blink(p_slot);
-               if (ATTN_LED(ctrl->ctrlcap))
+               if (ATTN_LED(ctrl))
                        p_slot->hpc_ops->set_attention_status(p_slot, 0);
 
                schedule_delayed_work(&p_slot->work, 5*HZ);
@@ -427,13 +427,13 @@ static void handle_button_press_event(struct slot *p_slot)
                dbg("%s: button cancel\n", __func__);
                cancel_delayed_work(&p_slot->work);
                if (p_slot->state == BLINKINGOFF_STATE) {
-                       if (PWR_LED(ctrl->ctrlcap))
+                       if (PWR_LED(ctrl))
                                p_slot->hpc_ops->green_led_on(p_slot);
                } else {
-                       if (PWR_LED(ctrl->ctrlcap))
+                       if (PWR_LED(ctrl))
                                p_slot->hpc_ops->green_led_off(p_slot);
                }
-               if (ATTN_LED(ctrl->ctrlcap))
+               if (ATTN_LED(ctrl))
                        p_slot->hpc_ops->set_attention_status(p_slot, 0);
                info("PCI slot #%s - action canceled due to button press\n",
                     p_slot->name);
@@ -492,16 +492,16 @@ static void interrupt_event_handler(struct work_struct *work)
                handle_button_press_event(p_slot);
                break;
        case INT_POWER_FAULT:
-               if (!POWER_CTRL(ctrl->ctrlcap))
+               if (!POWER_CTRL(ctrl))
                        break;
-               if (ATTN_LED(ctrl->ctrlcap))
+               if (ATTN_LED(ctrl))
                        p_slot->hpc_ops->set_attention_status(p_slot, 1);
-               if (PWR_LED(ctrl->ctrlcap))
+               if (PWR_LED(ctrl))
                        p_slot->hpc_ops->green_led_off(p_slot);
                break;
        case INT_PRESENCE_ON:
        case INT_PRESENCE_OFF:
-               if (!HP_SUPR_RM(ctrl->ctrlcap))
+               if (!HP_SUPR_RM(ctrl))
                        break;
                dbg("Surprise Removal\n");
                update_slot_info(p_slot);
@@ -531,7 +531,7 @@ int pciehp_enable_slot(struct slot *p_slot)
                mutex_unlock(&p_slot->ctrl->crit_sect);
                return -ENODEV;
        }
-       if (MRL_SENS(p_slot->ctrl->ctrlcap)) {
+       if (MRL_SENS(p_slot->ctrl)) {
                rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
                if (rc || getstatus) {
                        info("%s: latch open on slot(%s)\n", __func__,
@@ -541,7 +541,7 @@ int pciehp_enable_slot(struct slot *p_slot)
                }
        }
 
-       if (POWER_CTRL(p_slot->ctrl->ctrlcap)) {
+       if (POWER_CTRL(p_slot->ctrl)) {
                rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
                if (rc || getstatus) {
                        info("%s: already enabled on slot(%s)\n", __func__,
@@ -576,7 +576,7 @@ int pciehp_disable_slot(struct slot *p_slot)
        /* Check to see if (latch closed, card present, power on) */
        mutex_lock(&p_slot->ctrl->crit_sect);
 
-       if (!HP_SUPR_RM(p_slot->ctrl->ctrlcap)) {
+       if (!HP_SUPR_RM(p_slot->ctrl)) {
                ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
                if (ret || !getstatus) {
                        info("%s: no adapter on slot(%s)\n", __func__,
@@ -586,7 +586,7 @@ int pciehp_disable_slot(struct slot *p_slot)
                }
        }
 
-       if (MRL_SENS(p_slot->ctrl->ctrlcap)) {
+       if (MRL_SENS(p_slot->ctrl)) {
                ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
                if (ret || getstatus) {
                        info("%s: latch open on slot(%s)\n", __func__,
@@ -596,7 +596,7 @@ int pciehp_disable_slot(struct slot *p_slot)
                }
        }
 
-       if (POWER_CTRL(p_slot->ctrl->ctrlcap)) {
+       if (POWER_CTRL(p_slot->ctrl)) {
                ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
                if (ret || !getstatus) {
                        info("%s: already disabled slot(%s)\n", __func__,
index b4bbd07d1e3902ff756a2280c01480c2ffb73320..891f81a0400c92cb434dcdbbee95dbfa96c0e213 100644 (file)
@@ -221,6 +221,32 @@ static void start_int_poll_timer(struct controller *ctrl, int sec)
        add_timer(&ctrl->poll_timer);
 }
 
+static inline int pciehp_request_irq(struct controller *ctrl)
+{
+       int retval, irq = ctrl->pci_dev->irq;
+
+       /* Install interrupt polling timer. Start with 10 sec delay */
+       if (pciehp_poll_mode) {
+               init_timer(&ctrl->poll_timer);
+               start_int_poll_timer(ctrl, 10);
+               return 0;
+       }
+
+       /* Installs the interrupt handler */
+       retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl);
+       if (retval)
+               err("Cannot get irq %d for the hotplug controller\n", irq);
+       return retval;
+}
+
+static inline void pciehp_free_irq(struct controller *ctrl)
+{
+       if (pciehp_poll_mode)
+               del_timer_sync(&ctrl->poll_timer);
+       else
+               free_irq(ctrl->pci_dev->irq, ctrl);
+}
+
 static inline int pcie_wait_cmd(struct controller *ctrl)
 {
        int retval = 0;
@@ -242,17 +268,15 @@ static inline int pcie_wait_cmd(struct controller *ctrl)
 
 /**
  * pcie_write_cmd - Issue controller command
- * @slot: slot to which the command is issued
+ * @ctrl: controller to which the command is issued
  * @cmd:  command value written to slot control register
  * @mask: bitmask of slot control register to be modified
  */
-static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask)
+static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
 {
-       struct controller *ctrl = slot->ctrl;
        int retval = 0;
        u16 slot_status;
        u16 slot_ctrl;
-       unsigned long flags;
 
        mutex_lock(&ctrl->ctrl_lock);
 
@@ -270,24 +294,24 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd, u16 mask)
                    __func__);
        }
 
-       spin_lock_irqsave(&ctrl->lock, flags);
        retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
        if (retval) {
                err("%s: Cannot read SLOTCTRL register\n", __func__);
-               goto out_spin_unlock;
+               goto out;
        }
 
        slot_ctrl &= ~mask;
-       slot_ctrl |= ((cmd & mask) | CMD_CMPL_INTR_ENABLE);
+       slot_ctrl |= (cmd & mask);
+       /* Don't enable command completed if caller is changing it. */
+       if (!(mask & CMD_CMPL_INTR_ENABLE))
+               slot_ctrl |= CMD_CMPL_INTR_ENABLE;
 
        ctrl->cmd_busy = 1;
+       smp_mb();
        retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl);
        if (retval)
                err("%s: Cannot write to SLOTCTRL register\n", __func__);
 
- out_spin_unlock:
-       spin_unlock_irqrestore(&ctrl->lock, flags);
-
        /*
         * Wait for command completion.
         */
@@ -467,12 +491,7 @@ static int hpc_toggle_emi(struct slot *slot)
 
        slot_cmd = EMI_CTRL;
        cmd_mask = EMI_CTRL;
-       if (!pciehp_poll_mode) {
-               slot_cmd = slot_cmd | HP_INTR_ENABLE;
-               cmd_mask = cmd_mask | HP_INTR_ENABLE;
-       }
-
-       rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
+       rc = pcie_write_cmd(slot->ctrl, slot_cmd, cmd_mask);
        slot->last_emi_toggle = get_seconds();
 
        return rc;
@@ -499,12 +518,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
                default:
                        return -1;
        }
-       if (!pciehp_poll_mode) {
-               slot_cmd = slot_cmd | HP_INTR_ENABLE;
-               cmd_mask = cmd_mask | HP_INTR_ENABLE;
-       }
-
-       rc = pcie_write_cmd(slot, slot_cmd, cmd_mask);
+       rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
        dbg("%s: SLOTCTRL %x write cmd %x\n",
            __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 
@@ -519,13 +533,7 @@ static void hpc_set_green_led_on(struct slot *slot)
 
        slot_cmd = 0x0100;
        cmd_mask = PWR_LED_CTRL;
-       if (!pciehp_poll_mode) {
-               slot_cmd = slot_cmd | HP_INTR_ENABLE;
-               cmd_mask = cmd_mask | HP_INTR_ENABLE;
-       }
-
-       pcie_write_cmd(slot, slot_cmd, cmd_mask);
-
+       pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
        dbg("%s: SLOTCTRL %x write cmd %x\n",
            __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 }
@@ -538,12 +546,7 @@ static void hpc_set_green_led_off(struct slot *slot)
 
        slot_cmd = 0x0300;
        cmd_mask = PWR_LED_CTRL;
-       if (!pciehp_poll_mode) {
-               slot_cmd = slot_cmd | HP_INTR_ENABLE;
-               cmd_mask = cmd_mask | HP_INTR_ENABLE;
-       }
-
-       pcie_write_cmd(slot, slot_cmd, cmd_mask);
+       pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
        dbg("%s: SLOTCTRL %x write cmd %x\n",
            __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 }
@@ -556,23 +559,19 @@ static void hpc_set_green_led_blink(struct slot *slot)
 
        slot_cmd = 0x0200;
        cmd_mask = PWR_LED_CTRL;
-       if (!pciehp_poll_mode) {
-               slot_cmd = slot_cmd | HP_INTR_ENABLE;
-               cmd_mask = cmd_mask | HP_INTR_ENABLE;
-       }
-
-       pcie_write_cmd(slot, slot_cmd, cmd_mask);
-
+       pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
        dbg("%s: SLOTCTRL %x write cmd %x\n",
            __func__, ctrl->cap_base + SLOTCTRL, slot_cmd);
 }
 
 static void hpc_release_ctlr(struct controller *ctrl)
 {
-       if (pciehp_poll_mode)
-               del_timer(&ctrl->poll_timer);
-       else
-               free_irq(ctrl->pci_dev->irq, ctrl);
+       /* Mask Hot-plug Interrupt Enable */
+       if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE))
+               err("%s: Cannot mask hotplut interrupt enable\n", __func__);
+
+       /* Free interrupt handler or interrupt polling timer */
+       pciehp_free_irq(ctrl);
 
        /*
         * If this is the last controller to be released, destroy the
@@ -612,19 +611,13 @@ static int hpc_power_on_slot(struct slot * slot)
        cmd_mask = PWR_CTRL;
        /* Enable detection that we turned off at slot power-off time */
        if (!pciehp_poll_mode) {
-               slot_cmd = slot_cmd |
-                          PWR_FAULT_DETECT_ENABLE |
-                          MRL_DETECT_ENABLE |
-                          PRSN_DETECT_ENABLE |
-                          HP_INTR_ENABLE;
-               cmd_mask = cmd_mask |
-                          PWR_FAULT_DETECT_ENABLE |
-                          MRL_DETECT_ENABLE |
-                          PRSN_DETECT_ENABLE |
-                          HP_INTR_ENABLE;
+               slot_cmd |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
+                            PRSN_DETECT_ENABLE);
+               cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
+                            PRSN_DETECT_ENABLE);
        }
 
-       retval = pcie_write_cmd(slot, slot_cmd, cmd_mask);
+       retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
 
        if (retval) {
                err("%s: Write %x command failed!\n", __func__, slot_cmd);
@@ -697,18 +690,13 @@ static int hpc_power_off_slot(struct slot * slot)
         * till the slot is powered on again.
         */
        if (!pciehp_poll_mode) {
-               slot_cmd = (slot_cmd &
-                           ~PWR_FAULT_DETECT_ENABLE &
-                           ~MRL_DETECT_ENABLE &
-                           ~PRSN_DETECT_ENABLE) | HP_INTR_ENABLE;
-               cmd_mask = cmd_mask |
-                          PWR_FAULT_DETECT_ENABLE |
-                          MRL_DETECT_ENABLE |
-                          PRSN_DETECT_ENABLE |
-                          HP_INTR_ENABLE;
+               slot_cmd &= ~(PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
+                             PRSN_DETECT_ENABLE);
+               cmd_mask |= (PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE |
+                            PRSN_DETECT_ENABLE);
        }
 
-       retval = pcie_write_cmd(slot, slot_cmd, cmd_mask);
+       retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask);
        if (retval) {
                err("%s: Write command failed!\n", __func__);
                retval = -1;
@@ -733,139 +721,56 @@ static int hpc_power_off_slot(struct slot * slot)
 static irqreturn_t pcie_isr(int irq, void *dev_id)
 {
        struct controller *ctrl = (struct controller *)dev_id;
-       u16 slot_status, intr_detect, intr_loc;
-       u16 temp_word;
-       int hp_slot = 0;        /* only 1 slot per PCI Express port */
-       int rc = 0;
-       unsigned long flags;
-
-       rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
-       if (rc) {
-               err("%s: Cannot read SLOTSTATUS register\n", __func__);
-               return IRQ_NONE;
-       }
-
-       intr_detect = (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED |
-                      MRL_SENS_CHANGED | PRSN_DETECT_CHANGED | CMD_COMPLETED);
-
-       intr_loc = slot_status & intr_detect;
-
-       /* Check to see if it was our interrupt */
-       if ( !intr_loc )
-               return IRQ_NONE;
+       u16 detected, intr_loc;
 
-       dbg("%s: intr_loc %x\n", __func__, intr_loc);
-       /* Mask Hot-plug Interrupt Enable */
-       if (!pciehp_poll_mode) {
-               spin_lock_irqsave(&ctrl->lock, flags);
-               rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
-               if (rc) {
-                       err("%s: Cannot read SLOT_CTRL register\n",
-                           __func__);
-                       spin_unlock_irqrestore(&ctrl->lock, flags);
+       /*
+        * In order to guarantee that all interrupt events are
+        * serviced, we need to re-inspect Slot Status register after
+        * clearing what is presumed to be the last pending interrupt.
+        */
+       intr_loc = 0;
+       do {
+               if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) {
+                       err("%s: Cannot read SLOTSTATUS\n", __func__);
                        return IRQ_NONE;
                }
 
-               dbg("%s: pciehp_readw(SLOTCTRL) with value %x\n",
-                   __func__, temp_word);
-               temp_word = (temp_word & ~HP_INTR_ENABLE &
-                            ~CMD_CMPL_INTR_ENABLE) | 0x00;
-               rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
-               if (rc) {
-                       err("%s: Cannot write to SLOTCTRL register\n",
-                           __func__);
-                       spin_unlock_irqrestore(&ctrl->lock, flags);
+               detected &= (ATTN_BUTTN_PRESSED | PWR_FAULT_DETECTED |
+                            MRL_SENS_CHANGED | PRSN_DETECT_CHANGED |
+                            CMD_COMPLETED);
+               intr_loc |= detected;
+               if (!intr_loc)
                        return IRQ_NONE;
-               }
-               spin_unlock_irqrestore(&ctrl->lock, flags);
-
-               rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
-               if (rc) {
-                       err("%s: Cannot read SLOT_STATUS register\n",
-                           __func__);
+               if (pciehp_writew(ctrl, SLOTSTATUS, detected)) {
+                       err("%s: Cannot write to SLOTSTATUS\n", __func__);
                        return IRQ_NONE;
                }
-               dbg("%s: pciehp_readw(SLOTSTATUS) with value %x\n",
-                   __func__, slot_status);
+       } while (detected);
 
-               /* Clear command complete interrupt caused by this write */
-               temp_word = 0x1f;
-               rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
-               if (rc) {
-                       err("%s: Cannot write to SLOTSTATUS register\n",
-                           __func__);
-                       return IRQ_NONE;
-               }
-       }
+       dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc);
 
+       /* Check Command Complete Interrupt Pending */
        if (intr_loc & CMD_COMPLETED) {
-               /*
-                * Command Complete Interrupt Pending
-                */
                ctrl->cmd_busy = 0;
+               smp_mb();
                wake_up_interruptible(&ctrl->queue);
        }
 
+       /* Check MRL Sensor Changed */
        if (intr_loc & MRL_SENS_CHANGED)
-               pciehp_handle_switch_change(hp_slot, ctrl);
+               pciehp_handle_switch_change(0, ctrl);
 
+       /* Check Attention Button Pressed */
        if (intr_loc & ATTN_BUTTN_PRESSED)
-               pciehp_handle_attention_button(hp_slot, ctrl);
+               pciehp_handle_attention_button(0, ctrl);
 
+       /* Check Presence Detect Changed */
        if (intr_loc & PRSN_DETECT_CHANGED)
-               pciehp_handle_presence_change(hp_slot, ctrl);
+               pciehp_handle_presence_change(0, ctrl);
 
+       /* Check Power Fault Detected */
        if (intr_loc & PWR_FAULT_DETECTED)
-               pciehp_handle_power_fault(hp_slot, ctrl);
-
-       /* Clear all events after serving them */
-       temp_word = 0x1F;
-       rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
-       if (rc) {
-               err("%s: Cannot write to SLOTSTATUS register\n", __func__);
-               return IRQ_NONE;
-       }
-       /* Unmask Hot-plug Interrupt Enable */
-       if (!pciehp_poll_mode) {
-               spin_lock_irqsave(&ctrl->lock, flags);
-               rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
-               if (rc) {
-                       err("%s: Cannot read SLOTCTRL register\n",
-                           __func__);
-                       spin_unlock_irqrestore(&ctrl->lock, flags);
-                       return IRQ_NONE;
-               }
-
-               dbg("%s: Unmask Hot-plug Interrupt Enable\n", __func__);
-               temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
-
-               rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
-               if (rc) {
-                       err("%s: Cannot write to SLOTCTRL register\n",
-                           __func__);
-                       spin_unlock_irqrestore(&ctrl->lock, flags);
-                       return IRQ_NONE;
-               }
-               spin_unlock_irqrestore(&ctrl->lock, flags);
-
-               rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
-               if (rc) {
-                       err("%s: Cannot read SLOT_STATUS register\n",
-                           __func__);
-                       return IRQ_NONE;
-               }
-
-               /* Clear command complete interrupt caused by this write */
-               temp_word = 0x1F;
-               rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
-               if (rc) {
-                       err("%s: Cannot write to SLOTSTATUS failed\n",
-                           __func__);
-                       return IRQ_NONE;
-               }
-               dbg("%s: pciehp_writew(SLOTSTATUS) with value %x\n",
-                   __func__, temp_word);
-       }
+               pciehp_handle_power_fault(0, ctrl);
 
        return IRQ_HANDLED;
 }
@@ -1052,7 +957,7 @@ static struct hpc_ops pciehp_hpc_ops = {
 };
 
 #ifdef CONFIG_ACPI
-int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
+static int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
 {
        acpi_status status;
        acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
@@ -1112,7 +1017,7 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
                        break;
        }
 
-       err("Cannot get control of hotplug hardware for pci %s\n",
+       dbg("Cannot get control of hotplug hardware for pci %s\n",
                        pci_name(dev));
 
        kfree(string.pointer);
@@ -1123,45 +1028,9 @@ int pciehp_acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev)
 static int pcie_init_hardware_part1(struct controller *ctrl,
                                    struct pcie_device *dev)
 {
-       int rc;
-       u16 temp_word;
-       u32 slot_cap;
-       u16 slot_status;
-
-       rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
-       if (rc) {
-               err("%s: Cannot read SLOTCAP register\n", __func__);
-               return -1;
-       }
-
        /* Mask Hot-plug Interrupt Enable */
-       rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
-       if (rc) {
-               err("%s: Cannot read SLOTCTRL register\n", __func__);
-               return -1;
-       }
-
-       dbg("%s: SLOTCTRL %x value read %x\n",
-           __func__, ctrl->cap_base + SLOTCTRL, temp_word);
-       temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) |
-               0x00;
-
-       rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
-       if (rc) {
-               err("%s: Cannot write to SLOTCTRL register\n", __func__);
-               return -1;
-       }
-
-       rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
-       if (rc) {
-               err("%s: Cannot read SLOTSTATUS register\n", __func__);
-               return -1;
-       }
-
-       temp_word = 0x1F; /* Clear all events */
-       rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
-       if (rc) {
-               err("%s: Cannot write to SLOTSTATUS register\n", __func__);
+       if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE)) {
+               err("%s: Cannot mask hotplug interrupt enable\n", __func__);
                return -1;
        }
        return 0;
@@ -1169,205 +1038,125 @@ static int pcie_init_hardware_part1(struct controller *ctrl,
 
 int pcie_init_hardware_part2(struct controller *ctrl, struct pcie_device *dev)
 {
-       int rc;
-       u16 temp_word;
-       u16 intr_enable = 0;
-       u32 slot_cap;
-       u16 slot_status;
+       u16 cmd, mask;
 
-       rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
-       if (rc) {
-               err("%s: Cannot read SLOTCTRL register\n", __func__);
-               goto abort;
-       }
-
-       intr_enable = intr_enable | PRSN_DETECT_ENABLE;
-
-       rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
-       if (rc) {
-               err("%s: Cannot read SLOTCAP register\n", __func__);
-               goto abort;
+       /*
+        * We need to clear all events before enabling hotplug interrupt
+        * notification mechanism in order for hotplug controler to
+        * generate interrupts.
+        */
+       if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) {
+               err("%s: Cannot write to SLOTSTATUS register\n", __FUNCTION__);
+               return -1;
        }
 
-       if (ATTN_BUTTN(slot_cap))
-               intr_enable = intr_enable | ATTN_BUTTN_ENABLE;
-
-       if (POWER_CTRL(slot_cap))
-               intr_enable = intr_enable | PWR_FAULT_DETECT_ENABLE;
-
-       if (MRL_SENS(slot_cap))
-               intr_enable = intr_enable | MRL_DETECT_ENABLE;
+       cmd = PRSN_DETECT_ENABLE;
+       if (ATTN_BUTTN(ctrl))
+               cmd |= ATTN_BUTTN_ENABLE;
+       if (POWER_CTRL(ctrl))
+               cmd |= PWR_FAULT_DETECT_ENABLE;
+       if (MRL_SENS(ctrl))
+               cmd |= MRL_DETECT_ENABLE;
+       if (!pciehp_poll_mode)
+               cmd |= HP_INTR_ENABLE;
 
-       temp_word = (temp_word & ~intr_enable) | intr_enable;
+       mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE |
+               PWR_FAULT_DETECT_ENABLE | MRL_DETECT_ENABLE | HP_INTR_ENABLE;
 
-       if (pciehp_poll_mode) {
-               temp_word = (temp_word & ~HP_INTR_ENABLE) | 0x0;
-       } else {
-               temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
-       }
-
-       /*
-        * Unmask Hot-plug Interrupt Enable for the interrupt
-        * notification mechanism case.
-        */
-       rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
-       if (rc) {
-               err("%s: Cannot write to SLOTCTRL register\n", __func__);
+       if (pcie_write_cmd(ctrl, cmd, mask)) {
+               err("%s: Cannot enable software notification\n", __func__);
                goto abort;
        }
-       rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
-       if (rc) {
-               err("%s: Cannot read SLOTSTATUS register\n", __func__);
-               goto abort_disable_intr;
-       }
-
-       temp_word =  0x1F; /* Clear all events */
-       rc = pciehp_writew(ctrl, SLOTSTATUS, temp_word);
-       if (rc) {
-               err("%s: Cannot write to SLOTSTATUS register\n", __func__);
-               goto abort_disable_intr;
-       }
 
-       if (pciehp_force) {
+       if (pciehp_force)
                dbg("Bypassing BIOS check for pciehp use on %s\n",
                                pci_name(ctrl->pci_dev));
-       } else {
-               rc = pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev);
-               if (rc)
-                       goto abort_disable_intr;
-       }
+       else if (pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev))
+               goto abort_disable_intr;
 
        return 0;
 
        /* We end up here for the many possible ways to fail this API. */
 abort_disable_intr:
-       rc = pciehp_readw(ctrl, SLOTCTRL, &temp_word);
-       if (!rc) {
-               temp_word &= ~(intr_enable | HP_INTR_ENABLE);
-               rc = pciehp_writew(ctrl, SLOTCTRL, temp_word);
-       }
-       if (rc)
+       if (pcie_write_cmd(ctrl, 0, HP_INTR_ENABLE))
                err("%s : disabling interrupts failed\n", __func__);
 abort:
        return -1;
 }
 
-int pcie_init(struct controller *ctrl, struct pcie_device *dev)
+static inline void dbg_ctrl(struct controller *ctrl)
 {
-       int rc;
-       u16 cap_reg;
-       u32 slot_cap;
-       int cap_base;
-       u16 slot_status, slot_ctrl;
-       struct pci_dev *pdev;
-
-       pdev = dev->port;
-       ctrl->pci_dev = pdev;   /* save pci_dev in context */
-
-       dbg("%s: hotplug controller vendor id 0x%x device id 0x%x\n",
-                       __func__, pdev->vendor, pdev->device);
+       int i;
+       u16 reg16;
+       struct pci_dev *pdev = ctrl->pci_dev;
 
-       cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-       if (cap_base == 0) {
-               dbg("%s: Can't find PCI_CAP_ID_EXP (0x10)\n", __func__);
-               goto abort;
-       }
+       if (!pciehp_debug)
+               return;
 
-       ctrl->cap_base = cap_base;
+       dbg("Hotplug Controller:\n");
+       dbg("  Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq);
+       dbg("  Vendor ID            : 0x%04x\n", pdev->vendor);
+       dbg("  Device ID            : 0x%04x\n", pdev->device);
+       dbg("  Subsystem ID         : 0x%04x\n", pdev->subsystem_device);
+       dbg("  Subsystem Vendor ID  : 0x%04x\n", pdev->subsystem_vendor);
+       dbg("  PCIe Cap offset      : 0x%02x\n", ctrl->cap_base);
+       for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+               if (!pci_resource_len(pdev, i))
+                       continue;
+               dbg("  PCI resource [%d]     : 0x%llx@0x%llx\n", i,
+                   (unsigned long long)pci_resource_len(pdev, i),
+                   (unsigned long long)pci_resource_start(pdev, i));
+       }
+       dbg("Slot Capabilities      : 0x%08x\n", ctrl->slot_cap);
+       dbg("  Physical Slot Number : %d\n", ctrl->first_slot);
+       dbg("  Attention Button     : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no");
+       dbg("  Power Controller     : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no");
+       dbg("  MRL Sensor           : %3s\n", MRL_SENS(ctrl)   ? "yes" : "no");
+       dbg("  Attention Indicator  : %3s\n", ATTN_LED(ctrl)   ? "yes" : "no");
+       dbg("  Power Indicator      : %3s\n", PWR_LED(ctrl)    ? "yes" : "no");
+       dbg("  Hot-Plug Surprise    : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no");
+       dbg("  EMI Present          : %3s\n", EMI(ctrl)        ? "yes" : "no");
+       pciehp_readw(ctrl, SLOTSTATUS, &reg16);
+       dbg("Slot Status            : 0x%04x\n", reg16);
+       pciehp_readw(ctrl, SLOTSTATUS, &reg16);
+       dbg("Slot Control           : 0x%04x\n", reg16);
+}
 
-       dbg("%s: pcie_cap_base %x\n", __func__, cap_base);
+int pcie_init(struct controller *ctrl, struct pcie_device *dev)
+{
+       u32 slot_cap;
+       struct pci_dev *pdev = dev->port;
 
-       rc = pciehp_readw(ctrl, CAPREG, &cap_reg);
-       if (rc) {
-               err("%s: Cannot read CAPREG register\n", __func__);
-               goto abort;
-       }
-       dbg("%s: CAPREG offset %x cap_reg %x\n",
-           __func__, ctrl->cap_base + CAPREG, cap_reg);
-
-       if (((cap_reg & SLOT_IMPL) == 0) ||
-           (((cap_reg & DEV_PORT_TYPE) != 0x0040)
-               && ((cap_reg & DEV_PORT_TYPE) != 0x0060))) {
-               dbg("%s : This is not a root port or the port is not "
-                   "connected to a slot\n", __func__);
+       ctrl->pci_dev = pdev;
+       ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+       if (!ctrl->cap_base) {
+               err("%s: Cannot find PCI Express capability\n", __func__);
                goto abort;
        }
-
-       rc = pciehp_readl(ctrl, SLOTCAP, &slot_cap);
-       if (rc) {
+       if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) {
                err("%s: Cannot read SLOTCAP register\n", __func__);
                goto abort;
        }
-       dbg("%s: SLOTCAP offset %x slot_cap %x\n",
-           __func__, ctrl->cap_base + SLOTCAP, slot_cap);
-
-       if (!(slot_cap & HP_CAP)) {
-               dbg("%s : This slot is not hot-plug capable\n", __func__);
-               goto abort;
-       }
-       /* For debugging purpose */
-       rc = pciehp_readw(ctrl, SLOTSTATUS, &slot_status);
-       if (rc) {
-               err("%s: Cannot read SLOTSTATUS register\n", __func__);
-               goto abort;
-       }
-       dbg("%s: SLOTSTATUS offset %x slot_status %x\n",
-           __func__, ctrl->cap_base + SLOTSTATUS, slot_status);
-
-       rc = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl);
-       if (rc) {
-               err("%s: Cannot read SLOTCTRL register\n", __func__);
-               goto abort;
-       }
-       dbg("%s: SLOTCTRL offset %x slot_ctrl %x\n",
-           __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl);
-
-       for (rc = 0; rc < DEVICE_COUNT_RESOURCE; rc++)
-               if (pci_resource_len(pdev, rc) > 0)
-                       dbg("pci resource[%d] start=0x%llx(len=0x%llx)\n", rc,
-                           (unsigned long long)pci_resource_start(pdev, rc),
-                           (unsigned long long)pci_resource_len(pdev, rc));
-
-       info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
-            pdev->vendor, pdev->device,
-            pdev->subsystem_vendor, pdev->subsystem_device);
 
+       ctrl->slot_cap = slot_cap;
+       ctrl->first_slot = slot_cap >> 19;
+       ctrl->slot_device_offset = 0;
+       ctrl->num_slots = 1;
+       ctrl->hpc_ops = &pciehp_hpc_ops;
        mutex_init(&ctrl->crit_sect);
        mutex_init(&ctrl->ctrl_lock);
-       spin_lock_init(&ctrl->lock);
-
-       /* setup wait queue */
        init_waitqueue_head(&ctrl->queue);
+       dbg_ctrl(ctrl);
 
-       /* return PCI Controller Info */
-       ctrl->slot_device_offset = 0;
-       ctrl->num_slots = 1;
-       ctrl->first_slot = slot_cap >> 19;
-       ctrl->ctrlcap = slot_cap & 0x0000007f;
+       info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n",
+            pdev->vendor, pdev->device,
+            pdev->subsystem_vendor, pdev->subsystem_device);
 
-       rc = pcie_init_hardware_part1(ctrl, dev);
-       if (rc)
+       if (pcie_init_hardware_part1(ctrl, dev))
                goto abort;
 
-       if (pciehp_poll_mode) {
-               /* Install interrupt polling timer. Start with 10 sec delay */
-               init_timer(&ctrl->poll_timer);
-               start_int_poll_timer(ctrl, 10);
-       } else {
-               /* Installs the interrupt handler */
-               rc = request_irq(ctrl->pci_dev->irq, pcie_isr, IRQF_SHARED,
-                                MY_NAME, (void *)ctrl);
-               dbg("%s: request_irq %d for hpc%d (returns %d)\n",
-                   __func__, ctrl->pci_dev->irq,
-                   atomic_read(&pciehp_num_controllers), rc);
-               if (rc) {
-                       err("Can't get irq %d for the hotplug controller\n",
-                           ctrl->pci_dev->irq);
-                       goto abort;
-               }
-       }
-       dbg("pciehp ctrl b:d:f:irq=0x%x:%x:%x:%x\n", pdev->bus->number,
-               PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq);
+       if (pciehp_request_irq(ctrl))
+               goto abort;
 
        /*
         * If this is the first controller to be initialized,
@@ -1376,21 +1165,17 @@ int pcie_init(struct controller *ctrl, struct pcie_device *dev)
        if (atomic_add_return(1, &pciehp_num_controllers) == 1) {
                pciehp_wq = create_singlethread_workqueue("pciehpd");
                if (!pciehp_wq) {
-                       rc = -ENOMEM;
                        goto abort_free_irq;
                }
        }
 
-       rc = pcie_init_hardware_part2(ctrl, dev);
-       if (rc == 0) {
-               ctrl->hpc_ops = &pciehp_hpc_ops;
-               return 0;
-       }
+       if (pcie_init_hardware_part2(ctrl, dev))
+               goto abort_free_irq;
+
+       return 0;
+
 abort_free_irq:
-       if (pciehp_poll_mode)
-               del_timer_sync(&ctrl->poll_timer);
-       else
-               free_irq(ctrl->pci_dev->irq, ctrl);
+       pciehp_free_irq(ctrl);
 abort:
        return -1;
 }
index 43816d4b3c435bf37a23740e4855551827059520..1648076600fc195a1c6a71a3f9ae444423400194 100644 (file)
@@ -39,6 +39,7 @@
 int shpchp_debug;
 int shpchp_poll_mode;
 int shpchp_poll_time;
+int shpchp_slot_with_bus;
 struct workqueue_struct *shpchp_wq;
 
 #define DRIVER_VERSION "0.4"
@@ -52,9 +53,11 @@ MODULE_LICENSE("GPL");
 module_param(shpchp_debug, bool, 0644);
 module_param(shpchp_poll_mode, bool, 0644);
 module_param(shpchp_poll_time, int, 0644);
+module_param(shpchp_slot_with_bus, bool, 0644);
 MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not");
 MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not");
 MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds");
+MODULE_PARM_DESC(shpchp_slot_with_bus, "Use bus number in the slot name");
 
 #define SHPC_MODULE_NAME "shpchp"
 
@@ -100,8 +103,12 @@ static void release_slot(struct hotplug_slot *hotplug_slot)
 
 static void make_slot_name(struct slot *slot)
 {
-       snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
-                slot->bus, slot->number);
+       if (shpchp_slot_with_bus)
+               snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d",
+                        slot->bus, slot->number);
+       else
+               snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d",
+                        slot->number);
 }
 
 static int init_slots(struct controller *ctrl)
index 26938da8f4380c5fd0bdb1786c71f8adb8418046..8c61304cbb37e8b9f32343c0fb93cd73bd276d27 100644 (file)
@@ -123,7 +123,7 @@ static void msix_flush_writes(unsigned int irq)
        }
 }
 
-static void msi_set_mask_bit(unsigned int irq, int flag)
+static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag)
 {
        struct msi_desc *entry;
 
@@ -137,8 +137,8 @@ static void msi_set_mask_bit(unsigned int irq, int flag)
 
                        pos = (long)entry->mask_base;
                        pci_read_config_dword(entry->dev, pos, &mask_bits);
-                       mask_bits &= ~(1);
-                       mask_bits |= flag;
+                       mask_bits &= ~(mask);
+                       mask_bits |= flag & mask;
                        pci_write_config_dword(entry->dev, pos, mask_bits);
                } else {
                        msi_set_enable(entry->dev, !flag);
@@ -241,13 +241,13 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
 
 void mask_msi_irq(unsigned int irq)
 {
-       msi_set_mask_bit(irq, 1);
+       msi_set_mask_bits(irq, 1, 1);
        msix_flush_writes(irq);
 }
 
 void unmask_msi_irq(unsigned int irq)
 {
-       msi_set_mask_bit(irq, 0);
+       msi_set_mask_bits(irq, 1, 0);
        msix_flush_writes(irq);
 }
 
@@ -291,7 +291,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
        msi_set_enable(dev, 0);
        write_msi_msg(dev->irq, &entry->msg);
        if (entry->msi_attrib.maskbit)
-               msi_set_mask_bit(dev->irq, entry->msi_attrib.masked);
+               msi_set_mask_bits(dev->irq, entry->msi_attrib.maskbits_mask,
+                                 entry->msi_attrib.masked);
 
        pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
        control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE);
@@ -315,7 +316,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
 
        list_for_each_entry(entry, &dev->msi_list, list) {
                write_msi_msg(entry->irq, &entry->msg);
-               msi_set_mask_bit(entry->irq, entry->msi_attrib.masked);
+               msi_set_mask_bits(entry->irq, 1, entry->msi_attrib.masked);
        }
 
        BUG_ON(list_empty(&dev->msi_list));
@@ -382,6 +383,7 @@ static int msi_capability_init(struct pci_dev *dev)
                pci_write_config_dword(dev,
                        msi_mask_bits_reg(pos, is_64bit_address(control)),
                        maskbits);
+               entry->msi_attrib.maskbits_mask = temp;
        }
        list_add_tail(&entry->list, &dev->msi_list);
 
@@ -569,10 +571,9 @@ int pci_enable_msi(struct pci_dev* dev)
 }
 EXPORT_SYMBOL(pci_enable_msi);
 
-void pci_disable_msi(struct pci_dev* dev)
+void pci_msi_shutdown(struct pci_dev* dev)
 {
        struct msi_desc *entry;
-       int default_irq;
 
        if (!pci_msi_enable || !dev || !dev->msi_enabled)
                return;
@@ -583,15 +584,31 @@ void pci_disable_msi(struct pci_dev* dev)
 
        BUG_ON(list_empty(&dev->msi_list));
        entry = list_entry(dev->msi_list.next, struct msi_desc, list);
-       if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI) {
-               return;
+       /* Return the the pci reset with msi irqs unmasked */
+       if (entry->msi_attrib.maskbit) {
+               u32 mask = entry->msi_attrib.maskbits_mask;
+               msi_set_mask_bits(dev->irq, mask, ~mask);
        }
-
-       default_irq = entry->msi_attrib.default_irq;
-       msi_free_irqs(dev);
+       if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
+               return;
 
        /* Restore dev->irq to its default pin-assertion irq */
-       dev->irq = default_irq;
+       dev->irq = entry->msi_attrib.default_irq;
+}
+void pci_disable_msi(struct pci_dev* dev)
+{
+       struct msi_desc *entry;
+
+       if (!pci_msi_enable || !dev || !dev->msi_enabled)
+               return;
+
+       pci_msi_shutdown(dev);
+
+       entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+       if (!entry->dev || entry->msi_attrib.type != PCI_CAP_ID_MSI)
+               return;
+
+       msi_free_irqs(dev);
 }
 EXPORT_SYMBOL(pci_disable_msi);
 
@@ -684,7 +701,7 @@ static void msix_free_all_irqs(struct pci_dev *dev)
        msi_free_irqs(dev);
 }
 
-void pci_disable_msix(struct pci_dev* dev)
+void pci_msix_shutdown(struct pci_dev* dev)
 {
        if (!pci_msi_enable || !dev || !dev->msix_enabled)
                return;
@@ -692,6 +709,13 @@ void pci_disable_msix(struct pci_dev* dev)
        msix_set_enable(dev, 0);
        pci_intx_for_msi(dev, 1);
        dev->msix_enabled = 0;
+}
+void pci_disable_msix(struct pci_dev* dev)
+{
+       if (!pci_msi_enable || !dev || !dev->msix_enabled)
+               return;
+
+       pci_msix_shutdown(dev);
 
        msix_free_all_irqs(dev);
 }
index e8d94fafc2804e74fddd9c66eee9fac29a11883b..72cf61ed8f966d20f99c32d6acd77424e419ee69 100644 (file)
@@ -360,6 +360,8 @@ static void pci_device_shutdown(struct device *dev)
 
        if (drv && drv->shutdown)
                drv->shutdown(pci_dev);
+       pci_msi_shutdown(pci_dev);
+       pci_msix_shutdown(pci_dev);
 }
 
 /**
index 25b04fb2517d486137b18cdc619de14075e57ad2..5a0c6ad53f8ee0f8b8ce9ee978394b850b895c59 100644 (file)
@@ -33,7 +33,7 @@ source "drivers/pci/pcie/aer/Kconfig"
 config PCIEASPM
        bool "PCI Express ASPM support(Experimental)"
        depends on PCI && EXPERIMENTAL && PCIEPORTBUS
-       default y
+       default n
        help
          This enables PCI Express ASPM (Active State Power Management) and
          Clock Power Management. ASPM supports state L0/L0s/L1.
index 96ac54072f6f310e713c6c577361871c28db7ada..d39a78dbd026b03b4d58d450d6efc11ccd3ca652 100644 (file)
@@ -31,7 +31,7 @@ int aer_osc_setup(struct pcie_device *pciedev)
 {
        acpi_status status = AE_NOT_FOUND;
        struct pci_dev *pdev = pciedev->port;
-       acpi_handle handle = 0;
+       acpi_handle handle = NULL;
 
        if (acpi_pci_disabled)
                return -1;
index f991359f0c3689d292e749b798ef2888f8af7934..4a55bf380957fd5142f0dd0a8dfa9c39af6d73e8 100644 (file)
@@ -842,11 +842,14 @@ static void set_pcie_port_type(struct pci_dev *pdev)
  * reading the dword at 0x100 which must either be 0 or a valid extended
  * capability header.
  */
-int pci_cfg_space_size(struct pci_dev *dev)
+int pci_cfg_space_size_ext(struct pci_dev *dev, unsigned check_exp_pcix)
 {
        int pos;
        u32 status;
 
+       if (!check_exp_pcix)
+               goto skip;
+
        pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
        if (!pos) {
                pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
@@ -858,6 +861,7 @@ int pci_cfg_space_size(struct pci_dev *dev)
                        goto fail;
        }
 
+ skip:
        if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL)
                goto fail;
        if (status == 0xffffffff)
@@ -869,6 +873,11 @@ int pci_cfg_space_size(struct pci_dev *dev)
        return PCI_CFG_SPACE_SIZE;
 }
 
+int pci_cfg_space_size(struct pci_dev *dev)
+{
+       return pci_cfg_space_size_ext(dev, 1);
+}
+
 static void pci_release_bus_bridge_dev(struct device *dev)
 {
        kfree(dev);
@@ -964,7 +973,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
        dev->dev.release = pci_release_dev;
        pci_dev_get(dev);
 
-       set_dev_node(&dev->dev, pcibus_to_node(bus));
        dev->dev.dma_mask = &dev->dma_mask;
        dev->dev.dma_parms = &dev->dma_parms;
        dev->dev.coherent_dma_mask = 0xffffffffull;
@@ -1080,6 +1088,10 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
        return max;
 }
 
+void __attribute__((weak)) set_pci_bus_resources_arch_default(struct pci_bus *b)
+{
+}
+
 struct pci_bus * pci_create_bus(struct device *parent,
                int bus, struct pci_ops *ops, void *sysdata)
 {
@@ -1119,6 +1131,9 @@ struct pci_bus * pci_create_bus(struct device *parent,
                goto dev_reg_err;
        b->bridge = get_device(dev);
 
+       if (!parent)
+               set_dev_node(b->bridge, pcibus_to_node(b));
+
        b->dev.class = &pcibus_class;
        b->dev.parent = b->bridge;
        sprintf(b->dev.bus_id, "%04x:%02x", pci_domain_nr(b), bus);
@@ -1136,6 +1151,8 @@ struct pci_bus * pci_create_bus(struct device *parent,
        b->resource[0] = &ioport_resource;
        b->resource[1] = &iomem_resource;
 
+       set_pci_bus_resources_arch_default(b);
+
        return b;
 
 dev_create_file_err:
index ef18fcd641e2c478927e5e6f29505d31380b75ae..963a97642ae9101a7454c7b0b3c0dbcc95f8240e 100644 (file)
@@ -293,6 +293,7 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file)
 #endif /* HAVE_PCI_MMAP */
 
 static const struct file_operations proc_bus_pci_operations = {
+       .owner          = THIS_MODULE,
        .llseek         = proc_bus_pci_lseek,
        .read           = proc_bus_pci_read,
        .write          = proc_bus_pci_write,
@@ -406,11 +407,10 @@ int pci_proc_attach_device(struct pci_dev *dev)
        }
 
        sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
-       e = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir);
+       e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir,
+                            &proc_bus_pci_operations, dev);
        if (!e)
                return -ENOMEM;
-       e->proc_fops = &proc_bus_pci_operations;
-       e->data = dev;
        e->size = dev->cfg_size;
        dev->procent = e;
 
@@ -462,6 +462,7 @@ static int proc_bus_pci_dev_open(struct inode *inode, struct file *file)
        return seq_open(file, &proc_bus_pci_devices_op);
 }
 static const struct file_operations proc_bus_pci_dev_operations = {
+       .owner          = THIS_MODULE,
        .open           = proc_bus_pci_dev_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -470,12 +471,10 @@ static const struct file_operations proc_bus_pci_dev_operations = {
 
 static int __init pci_proc_init(void)
 {
-       struct proc_dir_entry *entry;
        struct pci_dev *dev = NULL;
-       proc_bus_pci_dir = proc_mkdir("pci", proc_bus);
-       entry = create_proc_entry("devices", 0, proc_bus_pci_dir);
-       if (entry)
-               entry->proc_fops = &proc_bus_pci_dev_operations;
+       proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
+       proc_create("devices", 0, proc_bus_pci_dir,
+                   &proc_bus_pci_dev_operations);
        proc_initialized = 1;
        while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
                pci_proc_attach_device(dev);
index 8d8852651fd2a7cc705877d661c1bcca90d2d545..1b0eb5aaf650738a57300890941136d33ec06c62 100644 (file)
@@ -38,7 +38,6 @@ config PCMCIA_DEBUG
 config PCMCIA
        tristate "16-bit PCMCIA support"
        select CRC32
-       select HAVE_IDE
        default y
        ---help---
           This option enables support for 16-bit PCMCIA cards. Most older
index 74e051535d6c0419cad92901dabb9c0737929deb..c78d77fd7e3bcea2a7a448c979130be99f609f0e 100644 (file)
@@ -194,7 +194,7 @@ db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_s
                                default:
                                        pwr |= SET_VCC_VPP(0,0,sock);
                                        printk("%s: bad Vcc/Vpp (%d:%d)\n",
-                                                       __FUNCTION__,
+                                                       __func__,
                                                        state->Vcc,
                                                        state->Vpp);
                                        break;
@@ -215,7 +215,7 @@ db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_s
                                default:
                                        pwr |= SET_VCC_VPP(0,0,sock);
                                        printk("%s: bad Vcc/Vpp (%d:%d)\n",
-                                                       __FUNCTION__,
+                                                       __func__,
                                                        state->Vcc,
                                                        state->Vpp);
                                        break;
@@ -224,7 +224,7 @@ db1x00_pcmcia_configure_socket(struct au1000_pcmcia_socket *skt, struct socket_s
                default: /* what's this ? */
                        pwr |= SET_VCC_VPP(0,0,sock);
                        printk(KERN_ERR "%s: bad Vcc %d\n",
-                                       __FUNCTION__, state->Vcc);
+                                       __func__, state->Vcc);
                        break;
        }
 
index b693367d38cdbb3d907e14b1dd8275fed044ffe3..75e8f8505e47230156103ccb784a606ed43b8698 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/notifier.h>
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
+#include <linux/mutex.h>
 #include <linux/platform_device.h>
 
 #include <asm/io.h>
@@ -71,7 +72,7 @@ extern struct au1000_pcmcia_socket au1000_pcmcia_socket[];
 u32 *pcmcia_base_vaddrs[2];
 extern const unsigned long mips_io_port_base;
 
-DECLARE_MUTEX(pcmcia_sockets_lock);
+static DEFINE_MUTEX(pcmcia_sockets_lock);
 
 static int (*au1x00_pcmcia_hw_init[])(struct device *dev) = {
        au1x_board_init,
@@ -472,7 +473,7 @@ int au1x00_drv_pcmcia_remove(struct device *dev)
        struct skt_dev_info *sinfo = dev_get_drvdata(dev);
        int i;
 
-       down(&pcmcia_sockets_lock);
+       mutex_lock(&pcmcia_sockets_lock);
        dev_set_drvdata(dev, NULL);
 
        for (i = 0; i < sinfo->nskt; i++) {
@@ -488,7 +489,7 @@ int au1x00_drv_pcmcia_remove(struct device *dev)
        }
 
        kfree(sinfo);
-       up(&pcmcia_sockets_lock);
+       mutex_unlock(&pcmcia_sockets_lock);
        return 0;
 }
 
@@ -501,13 +502,13 @@ static int au1x00_drv_pcmcia_probe(struct device *dev)
 {
        int i, ret = -ENODEV;
 
-       down(&pcmcia_sockets_lock);
+       mutex_lock(&pcmcia_sockets_lock);
        for (i=0; i < ARRAY_SIZE(au1x00_pcmcia_hw_init); i++) {
                ret = au1x00_pcmcia_hw_init[i](dev);
                if (ret == 0)
                        break;
        }
-       up(&pcmcia_sockets_lock);
+       mutex_unlock(&pcmcia_sockets_lock);
        return ret;
 }
 
index 86c0808d6a057920bfe07412cf1e73c9efff7c2a..157e41423a0a5390e42bb5106eccc5be489fb16f 100644 (file)
@@ -244,7 +244,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
                                        pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
                                                        configure->sock);
                                        printk("%s: bad Vcc/Vpp (%d:%d)\n", 
-                                                       __FUNCTION__, 
+                                                       __func__,
                                                        configure->vcc, 
                                                        configure->vpp);
                                        break;
@@ -272,7 +272,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
                                        pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
                                                        configure->sock);
                                        printk("%s: bad Vcc/Vpp (%d:%d)\n", 
-                                                       __FUNCTION__, 
+                                                       __func__,
                                                        configure->vcc, 
                                                        configure->vpp);
                                        break;
@@ -300,7 +300,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
                                        pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
                                                        configure->sock);
                                        printk("%s: bad Vcc/Vpp (%d:%d)\n", 
-                                                       __FUNCTION__, 
+                                                       __func__,
                                                        configure->vcc, 
                                                        configure->vpp);
                                        break;
@@ -309,7 +309,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
                default: /* what's this ? */
                        pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,configure->sock);
                        printk(KERN_ERR "%s: bad Vcc %d\n", 
-                                       __FUNCTION__, configure->vcc);
+                                       __func__, configure->vcc);
                        break;
        }
 
@@ -353,7 +353,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
                                default:
                                        pcr |= SET_VCC_VPP(0,0);
                                        printk("%s: bad Vcc/Vpp (%d:%d)\n", 
-                                                       __FUNCTION__, 
+                                                       __func__,
                                                        configure->vcc, 
                                                        configure->vpp);
                                        break;
@@ -374,7 +374,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
                                default:
                                        pcr |= SET_VCC_VPP(0,0);
                                        printk("%s: bad Vcc/Vpp (%d:%d)\n", 
-                                                       __FUNCTION__, 
+                                                       __func__,
                                                        configure->vcc, 
                                                        configure->vpp);
                                        break;
@@ -383,7 +383,7 @@ pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
                default: /* what's this ? */
                        pcr |= SET_VCC_VPP(0,0);
                        printk(KERN_ERR "%s: bad Vcc %d\n", 
-                                       __FUNCTION__, configure->vcc);
+                                       __func__, configure->vcc);
                        break;
        }
 
index ce9d5c44a7b5a8253dc8d553f20b9a246cf38830..c78ed53475107c7371b503aa68c6d6ff96c6c4e6 100644 (file)
@@ -56,7 +56,7 @@
 #define PCMCIA_IRQ             AU1000_GPIO_4
 
 #if 0
-#define DEBUG(x,args...)       printk(__FUNCTION__ ": " x,##args)
+#define DEBUG(x, args...)      printk(__func__ ": " x, ##args)
 #else
 #define DEBUG(x,args...)
 #endif
index 714baaeb6da1257a264c903b4b36d67570c15486..fb2f38dc92c560de378ddab5b688657e82d8661a 100644 (file)
@@ -209,7 +209,7 @@ static void cardbus_assign_irqs(struct pci_bus *bus, int irq)
        }
 }
 
-int cb_alloc(struct pcmcia_socket * s)
+int __ref cb_alloc(struct pcmcia_socket * s)
 {
        struct pci_bus *bus = s->cb_dev->subordinate;
        struct pci_dev *dev;
index 06a85d7d5aa2fecb63edd53fb528aeb2d7ad828c..36379535f9daf17f8cb86a779cab491203afa9c1 100644 (file)
@@ -402,15 +402,6 @@ EXPORT_SYMBOL(pcmcia_replace_cis);
     
 ======================================================================*/
 
-static inline u16 cis_get_u16(void *ptr)
-{
-       return le16_to_cpu(get_unaligned((__le16 *) ptr));
-}
-static inline u32 cis_get_u32(void *ptr)
-{
-       return le32_to_cpu(get_unaligned((__le32 *) ptr));
-}
-
 typedef struct tuple_flags {
     u_int              link_space:4;
     u_int              has_link:1;
@@ -471,7 +462,7 @@ static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
        /* Get indirect link from the MFC tuple */
        read_cis_cache(s, LINK_SPACE(tuple->Flags),
                       tuple->LinkOffset, 5, link);
-       ofs = cis_get_u32(link + 1);
+       ofs = get_unaligned_le32(link + 1);
        SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
        /* Move to the next indirect link */
        tuple->LinkOffset += 5;
@@ -679,8 +670,8 @@ static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
     if (tuple->TupleDataLen < 5)
        return CS_BAD_TUPLE;
     p = (u_char *) tuple->TupleData;
-    csum->addr = tuple->CISOffset + cis_get_u16(p) - 2;
-    csum->len = cis_get_u16(p + 2);
+    csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
+    csum->len = get_unaligned_le16(p + 2);
     csum->sum = *(p + 4);
     return CS_SUCCESS;
 }
@@ -691,7 +682,7 @@ static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
 {
     if (tuple->TupleDataLen < 4)
        return CS_BAD_TUPLE;
-    link->addr = cis_get_u32(tuple->TupleData);
+    link->addr = get_unaligned_le32(tuple->TupleData);
     return CS_SUCCESS;
 }
 
@@ -710,7 +701,7 @@ static int parse_longlink_mfc(tuple_t *tuple,
        return CS_BAD_TUPLE;
     for (i = 0; i < link->nfn; i++) {
        link->fn[i].space = *p; p++;
-       link->fn[i].addr = cis_get_u32(p);
+       link->fn[i].addr = get_unaligned_le32(p);
        p += 4;
     }
     return CS_SUCCESS;
@@ -800,8 +791,8 @@ static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
 {
     if (tuple->TupleDataLen < 4)
        return CS_BAD_TUPLE;
-    m->manf = cis_get_u16(tuple->TupleData);
-    m->card = cis_get_u16(tuple->TupleData + 2);
+    m->manf = get_unaligned_le16(tuple->TupleData);
+    m->card = get_unaligned_le16(tuple->TupleData + 2);
     return CS_SUCCESS;
 }
 
@@ -1100,7 +1091,7 @@ static int parse_cftable_entry(tuple_t *tuple,
        break;
     case 0x20:
        entry->mem.nwin = 1;
-       entry->mem.win[0].len = cis_get_u16(p) << 8;
+       entry->mem.win[0].len = get_unaligned_le16(p) << 8;
        entry->mem.win[0].card_addr = 0;
        entry->mem.win[0].host_addr = 0;
        p += 2;
@@ -1108,8 +1099,8 @@ static int parse_cftable_entry(tuple_t *tuple,
        break;
     case 0x40:
        entry->mem.nwin = 1;
-       entry->mem.win[0].len = cis_get_u16(p) << 8;
-       entry->mem.win[0].card_addr = cis_get_u16(p + 2) << 8;
+       entry->mem.win[0].len = get_unaligned_le16(p) << 8;
+       entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
        entry->mem.win[0].host_addr = 0;
        p += 4;
        if (p > q) return CS_BAD_TUPLE;
@@ -1146,7 +1137,7 @@ static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar)
     p = (u_char *)tuple->TupleData;
     bar->attr = *p;
     p += 2;
-    bar->size = cis_get_u32(p);
+    bar->size = get_unaligned_le32(p);
     return CS_SUCCESS;
 }
 
@@ -1159,7 +1150,7 @@ static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config)
        return CS_BAD_TUPLE;
     config->last_idx = *(++p);
     p++;
-    config->base = cis_get_u32(p);
+    config->base = get_unaligned_le32(p);
     config->subtuples = tuple->TupleDataLen - 6;
     return CS_SUCCESS;
 }
@@ -1275,7 +1266,7 @@ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
 
     v2->vers = p[0];
     v2->comply = p[1];
-    v2->dindex = cis_get_u16(p +2 );
+    v2->dindex = get_unaligned_le16(p +2 );
     v2->vspec8 = p[6];
     v2->vspec9 = p[7];
     v2->nhdr = p[8];
@@ -1316,8 +1307,8 @@ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
 
     fmt->type = p[0];
     fmt->edc = p[1];
-    fmt->offset = cis_get_u32(p + 2);
-    fmt->length = cis_get_u32(p + 6);
+    fmt->offset = get_unaligned_le32(p + 2);
+    fmt->length = get_unaligned_le32(p + 6);
 
     return CS_SUCCESS;
 }
index 56230dbd347aee88f9d6eb9fc1a32ec9642d7799..29276bd28295007212a73e89c4bc44cb6f0a0a2a 100644 (file)
@@ -652,6 +652,9 @@ static int pccardd(void *__skt)
                complete(&skt->thread_done);
                return 0;
        }
+       ret = pccard_sysfs_add_socket(&skt->dev);
+       if (ret)
+               dev_warn(&skt->dev, "err %d adding socket attributes\n", ret);
 
        add_wait_queue(&skt->thread_wait, &wait);
        complete(&skt->thread_done);
@@ -694,6 +697,7 @@ static int pccardd(void *__skt)
        remove_wait_queue(&skt->thread_wait, &wait);
 
        /* remove from the device core */
+       pccard_sysfs_remove_socket(&skt->dev);
        device_unregister(&skt->dev);
 
        return 0;
@@ -940,20 +944,13 @@ EXPORT_SYMBOL(pcmcia_socket_class);
 
 static int __init init_pcmcia_cs(void)
 {
-       int ret;
-
        init_completion(&pcmcia_unload);
-       ret = class_register(&pcmcia_socket_class);
-       if (ret)
-               return (ret);
-       return class_interface_register(&pccard_sysfs_interface);
+       return class_register(&pcmcia_socket_class);
 }
 
 static void __exit exit_pcmcia_cs(void)
 {
-       class_interface_unregister(&pccard_sysfs_interface);
        class_unregister(&pcmcia_socket_class);
-
        wait_for_completion(&pcmcia_unload);
 }
 
index 9fa207e3c7b35757dcd394cb1478191b266d3ed6..e7d5d141f24dc9b661f9ed4412752c53a04b7764 100644 (file)
@@ -121,7 +121,8 @@ struct resource *pcmcia_find_mem_region(u_long base, u_long num, u_long align,
 void release_resource_db(struct pcmcia_socket *s);
 
 /* In socket_sysfs.c */
-extern struct class_interface pccard_sysfs_interface;
+extern int pccard_sysfs_add_socket(struct device *dev);
+extern void pccard_sysfs_remove_socket(struct device *dev);
 
 /* In cs.c */
 extern struct rw_semaphore pcmcia_socket_list_rwsem;
index 5a85871f5ee919ff3bdb18f10be8715132b037de..e40775443d04c73233735c61b0802eaa4936a751 100644 (file)
@@ -1520,7 +1520,7 @@ static void pcmcia_bus_remove_socket(struct device *dev,
 
 
 /* the pcmcia_bus_interface is used to handle pcmcia socket devices */
-static struct class_interface pcmcia_bus_interface = {
+static struct class_interface pcmcia_bus_interface __refdata = {
        .class = &pcmcia_socket_class,
        .add_dev = &pcmcia_bus_add_socket,
        .remove_dev = &pcmcia_bus_remove_socket,
index e54ecc580d9ed328cc887cb843ab58542d5f5b34..e13618656ff7ee7807618c9a52e0398334e04505 100644 (file)
@@ -53,7 +53,7 @@ static int i82092aa_socket_resume (struct pci_dev *dev)
 }
 #endif
 
-static struct pci_driver i82092aa_pci_drv = {
+static struct pci_driver i82092aa_pci_driver = {
        .name           = "i82092aa",
        .id_table       = i82092aa_pci_ids,
        .probe          = i82092aa_pci_probe,
@@ -714,13 +714,13 @@ static int i82092aa_set_mem_map(struct pcmcia_socket *socket, struct pccard_mem_
 
 static int i82092aa_module_init(void)
 {
-       return pci_register_driver(&i82092aa_pci_drv);
+       return pci_register_driver(&i82092aa_pci_driver);
 }
 
 static void i82092aa_module_exit(void)
 {
        enter("i82092aa_module_exit");
-       pci_unregister_driver(&i82092aa_pci_drv);
+       pci_unregister_driver(&i82092aa_pci_driver);
        if (sockets[0].io_base>0)
                         release_region(sockets[0].io_base, 2);
        leave("i82092aa_module_exit");
index bb6db3a582b2292d1dbe7777e87c545a4b00d737..46314b420765e6dffc63d3a5f4ce31241e3bccf6 100644 (file)
@@ -153,7 +153,7 @@ omap_cf_set_socket(struct pcmcia_socket *sock, struct socket_state_t *s)
 
 static int omap_cf_ss_suspend(struct pcmcia_socket *s)
 {
-       pr_debug("%s: %s\n", driver_name, __FUNCTION__);
+       pr_debug("%s: %s\n", driver_name, __func__);
        return omap_cf_set_socket(s, &dead_socket);
 }
 
index 27523c5f4dad50a9fd9de15ff768257945716d99..5f186abca1082843a7b12e30865b3bb7c8b83f34 100644 (file)
@@ -787,7 +787,7 @@ void __init pcmcia_setup_ioctl(void) {
                major_dev = i;
 
 #ifdef CONFIG_PROC_FS
-       proc_pccard = proc_mkdir("pccard", proc_bus);
+       proc_pccard = proc_mkdir("bus/pccard", NULL);
        if (proc_pccard)
                create_proc_read_entry("drivers",0,proc_pccard,proc_read_drivers,NULL);
 #endif
@@ -798,7 +798,7 @@ void __exit pcmcia_cleanup_ioctl(void) {
 #ifdef CONFIG_PROC_FS
        if (proc_pccard) {
                remove_proc_entry("drivers", proc_pccard);
-               remove_proc_entry("pccard", proc_bus);
+               remove_proc_entry("bus/pccard", NULL);
        }
 #endif
        if (major_dev != -1)
index abc10fe49bd85889fa813aba43c703eeacba4678..8bed1dab903983e4f5d8a11f238b59567931cec0 100644 (file)
@@ -778,7 +778,7 @@ static struct pci_device_id pd6729_pci_ids[] = {
 };
 MODULE_DEVICE_TABLE(pci, pd6729_pci_ids);
 
-static struct pci_driver pd6729_pci_drv = {
+static struct pci_driver pd6729_pci_driver = {
        .name           = "pd6729",
        .id_table       = pd6729_pci_ids,
        .probe          = pd6729_pci_probe,
@@ -791,12 +791,12 @@ static struct pci_driver pd6729_pci_drv = {
 
 static int pd6729_module_init(void)
 {
-       return pci_register_driver(&pd6729_pci_drv);
+       return pci_register_driver(&pd6729_pci_driver);
 }
 
 static void pd6729_module_exit(void)
 {
-       pci_unregister_driver(&pd6729_pci_drv);
+       pci_unregister_driver(&pd6729_pci_driver);
 }
 
 module_init(pd6729_module_init);
index 4a05802213c8bdae09138a359bb92ec025d41d2f..881ec8a8e3896bf969213bcc9433d4b9b9a95e3a 100644 (file)
@@ -87,7 +87,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
 
                default:
                        printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
-                              __FUNCTION__, state->Vcc);
+                              __func__, state->Vcc);
                        ret = -1;
                }
 
@@ -104,7 +104,7 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
                                pa_dwr_set |= GPIO_A0;
                        else {
                                printk(KERN_ERR "%s(): unrecognized Vpp %u\n",
-                                      __FUNCTION__, state->Vpp);
+                                      __func__, state->Vpp);
                                ret = -1;
                                break;
                        }
@@ -128,14 +128,14 @@ lubbock_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
 
                default:
                        printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
-                              __FUNCTION__, state->Vcc);
+                              __func__, state->Vcc);
                        ret = -1;
                        break;
                }
 
                if (state->Vpp != state->Vcc && state->Vpp != 0) {
                        printk(KERN_ERR "%s(): CF slot cannot support Vpp %u\n",
-                              __FUNCTION__, state->Vpp);
+                              __func__, state->Vpp);
                        ret = -1;
                        break;
                }
index 6fa5eaaab8afcf4188fd27f04bb95aa9c5746d16..145b85e0f02c10a84690d6aa0ccf8dd64bc39304 100644 (file)
@@ -99,7 +99,7 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
        case 50: power |= MST_PCMCIA_PWR_VCC_50; break;
        default:
                 printk(KERN_ERR "%s(): bad Vcc %u\n",
-                                __FUNCTION__, state->Vcc);
+                                __func__, state->Vcc);
                 ret = -1;
        }
 
@@ -111,7 +111,7 @@ static int mst_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
                          power |= MST_PCMCIA_PWR_VPP_VCC;
                  } else {
                          printk(KERN_ERR "%s(): bad Vpp %u\n",
-                                         __FUNCTION__, state->Vpp);
+                                         __func__, state->Vpp);
                          ret = -1;
                  }
        }
index a8d1007077213c96cdc6fe02b7c75d7b198fcde3..0fcf763b9175a077768d6e85b9a933e24ef52967 100644 (file)
@@ -1045,7 +1045,7 @@ static void __devexit pccard_sysfs_remove_rsrc(struct device *dev,
                device_remove_file(dev, *attr);
 }
 
-static struct class_interface pccard_rsrc_interface = {
+static struct class_interface pccard_rsrc_interface __refdata = {
        .class = &pcmcia_socket_class,
        .add_dev = &pccard_sysfs_add_rsrc,
        .remove_dev = __devexit_p(&pccard_sysfs_remove_rsrc),
index 7c57fdd3c8d77ae03985175243c72f1f42bf5ba7..ce133ce81c107c30670e5542777a1ec8c44ede87 100644 (file)
@@ -66,14 +66,14 @@ assabet_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_stat
 
        case 50:
                printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V...\n",
-                       __FUNCTION__);
+                       __func__);
 
        case 33:  /* Can only apply 3.3V to the CF slot. */
                mask = ASSABET_BCR_CF_PWR;
                break;
 
        default:
-               printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __FUNCTION__,
+               printk(KERN_ERR "%s(): unrecognized Vcc %u\n", __func__,
                        state->Vcc);
                return -1;
        }
index 62bfc7566ec2a927396bbe48f993b5502fd63d16..607c3f326eca493809022e9c72232cdaddf7a2b8 100644 (file)
@@ -82,14 +82,14 @@ badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state
        case 0:
                if ((state->Vcc != 0) &&
                    (state->Vcc != badge4_pcmvcc)) {
-                       complain_about_jumpering(__FUNCTION__, "pcmvcc",
+                       complain_about_jumpering(__func__, "pcmvcc",
                                                 badge4_pcmvcc, state->Vcc);
                        // Apply power regardless of the jumpering.
                        // return -1;
                }
                if ((state->Vpp != 0) &&
                    (state->Vpp != badge4_pcmvpp)) {
-                       complain_about_jumpering(__FUNCTION__, "pcmvpp",
+                       complain_about_jumpering(__func__, "pcmvpp",
                                                 badge4_pcmvpp, state->Vpp);
                        return -1;
                }
@@ -98,7 +98,7 @@ badge4_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_state
        case 1:
                if ((state->Vcc != 0) &&
                    (state->Vcc != badge4_cfvcc)) {
-                       complain_about_jumpering(__FUNCTION__, "cfvcc",
+                       complain_about_jumpering(__func__, "cfvcc",
                                                 badge4_cfvcc, state->Vcc);
                        return -1;
                }
@@ -143,7 +143,7 @@ int pcmcia_badge4_init(struct device *dev)
        if (machine_is_badge4()) {
                printk(KERN_INFO
                       "%s: badge4_pcmvcc=%d, badge4_pcmvpp=%d, badge4_cfvcc=%d\n",
-                      __FUNCTION__,
+                      __func__,
                       badge4_pcmvcc, badge4_pcmvpp, badge4_cfvcc);
 
                ret = sa11xx_drv_pcmcia_probe(dev, &badge4_pcmcia_ops, 0, 2);
index 549a1529fe35800e9c6034a6a4940dcf3460caab..7c3951a2675dd2c5ba8ff3b0194a8ac865294a7d 100644 (file)
@@ -63,7 +63,7 @@ cerf_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
 
        default:
                printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
-                       __FUNCTION__, state->Vcc);
+                       __func__, state->Vcc);
                return -1;
        }
 
index 6284c35dabc687f6ee1fc5afbf89bda6276ffd46..2167e6714d2dcbee063097e92fbee4411c1fbfe6 100644 (file)
@@ -42,7 +42,7 @@ jornada720_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_s
   unsigned int pa_dwr_mask, pa_dwr_set;
   int ret;
 
-printk("%s(): config socket %d vcc %d vpp %d\n", __FUNCTION__,
+printk("%s(): config socket %d vcc %d vpp %d\n", __func__,
        skt->nr, state->Vcc, state->Vpp);
 
   switch (skt->nr) {
@@ -74,7 +74,7 @@ printk("%s(): config socket %d vcc %d vpp %d\n", __FUNCTION__,
 
   if (state->Vpp != state->Vcc && state->Vpp != 0) {
     printk(KERN_ERR "%s(): slot cannot support VPP %u\n",
-          __FUNCTION__, state->Vpp);
+          __func__, state->Vpp);
     return -1;
   }
 
index 5bc9e9532b9d78f48f00ecd94fdcd5211118bd46..687492fcd5b44254913edd61508c83099e467232 100644 (file)
@@ -59,7 +59,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
                        ncr_set = NCR_A0VPP;
                else {
                        printk(KERN_ERR "%s(): unrecognized VPP %u\n",
-                              __FUNCTION__, state->Vpp);
+                              __func__, state->Vpp);
                        return -1;
                }
                break;
@@ -71,7 +71,7 @@ neponset_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, const socket_sta
 
                if (state->Vpp != state->Vcc && state->Vpp != 0) {
                        printk(KERN_ERR "%s(): CF slot cannot support VPP %u\n",
-                              __FUNCTION__, state->Vpp);
+                              __func__, state->Vpp);
                        return -1;
                }
                break;
index 9456f5478d09251178534d354a947ffd38032be3..494912fccc0d27dd4cafd93247cc82c4fd3ad52a 100644 (file)
@@ -73,19 +73,19 @@ shannon_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
 {
        switch (state->Vcc) {
        case 0: /* power off */
-               printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __FUNCTION__);
+               printk(KERN_WARNING "%s(): CS asked for 0V, still applying 3.3V..\n", __func__);
                break;
        case 50:
-               printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __FUNCTION__);
+               printk(KERN_WARNING "%s(): CS asked for 5V, applying 3.3V..\n", __func__);
        case 33:
                break;
        default:
                printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
-                      __FUNCTION__, state->Vcc);
+                      __func__, state->Vcc);
                return -1;
        }
 
-       printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __FUNCTION__);
+       printk(KERN_WARNING "%s(): Warning, Can't perform reset\n", __func__);
        
        /* Silently ignore Vpp, output enable, speaker enable. */
 
index 04d6f7f75f7806b6ea8fc66f4bc34b4163bbb3c3..42567de894b942916e3dbfeafe1015f4896adb8d 100644 (file)
@@ -90,7 +90,7 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
 
        default:
                printk(KERN_ERR "%s(): unrecognized Vcc %u\n",
-                       __FUNCTION__, state->Vcc);
+                       __func__, state->Vcc);
                clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1);
                local_irq_restore(flags);
                return -1;
index aa7779d89752681a05e5d10d0aa76c19ff08a648..420a77540f412758979e72b300d7aeac1801b145 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/kernel.h>
 #include <linux/timer.h>
 #include <linux/mm.h>
+#include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/spinlock.h>
@@ -353,7 +354,7 @@ soc_common_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *m
                (map->flags&MAP_PREFETCH)?"PREFETCH ":"");
 
        if (map->map >= MAX_IO_WIN) {
-               printk(KERN_ERR "%s(): map (%d) out of range\n", __FUNCTION__,
+               printk(KERN_ERR "%s(): map (%d) out of range\n", __func__,
                       map->map);
                return -1;
        }
@@ -578,7 +579,7 @@ EXPORT_SYMBOL(soc_pcmcia_enable_irqs);
 
 
 LIST_HEAD(soc_pcmcia_sockets);
-DECLARE_MUTEX(soc_pcmcia_sockets_lock);
+static DEFINE_MUTEX(soc_pcmcia_sockets_lock);
 
 static const char *skt_names[] = {
        "PCMCIA socket 0",
@@ -601,11 +602,11 @@ soc_pcmcia_notifier(struct notifier_block *nb, unsigned long val, void *data)
        struct cpufreq_freqs *freqs = data;
        int ret = 0;
 
-       down(&soc_pcmcia_sockets_lock);
+       mutex_lock(&soc_pcmcia_sockets_lock);
        list_for_each_entry(skt, &soc_pcmcia_sockets, node)
                if ( skt->ops->frequency_change )
                        ret += skt->ops->frequency_change(skt, val, freqs);
-       up(&soc_pcmcia_sockets_lock);
+       mutex_unlock(&soc_pcmcia_sockets_lock);
 
        return ret;
 }
@@ -642,7 +643,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops
        struct soc_pcmcia_socket *skt;
        int ret, i;
 
-       down(&soc_pcmcia_sockets_lock);
+       mutex_lock(&soc_pcmcia_sockets_lock);
 
        sinfo = kzalloc(SKT_DEV_INFO_SIZE(nr), GFP_KERNEL);
        if (!sinfo) {
@@ -782,7 +783,7 @@ int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops
        kfree(sinfo);
 
  out:
-       up(&soc_pcmcia_sockets_lock);
+       mutex_unlock(&soc_pcmcia_sockets_lock);
        return ret;
 }
 
@@ -793,7 +794,7 @@ int soc_common_drv_pcmcia_remove(struct device *dev)
 
        dev_set_drvdata(dev, NULL);
 
-       down(&soc_pcmcia_sockets_lock);
+       mutex_lock(&soc_pcmcia_sockets_lock);
        for (i = 0; i < sinfo->nskt; i++) {
                struct soc_pcmcia_socket *skt = &sinfo->skt[i];
 
@@ -818,7 +819,7 @@ int soc_common_drv_pcmcia_remove(struct device *dev)
        if (list_empty(&soc_pcmcia_sockets))
                soc_pcmcia_cpufreq_unregister();
 
-       up(&soc_pcmcia_sockets_lock);
+       mutex_unlock(&soc_pcmcia_sockets_lock);
 
        kfree(sinfo);
 
index 6f14126889b3457e6c8d7055e9994918e594b793..1edc1da9d3537b208c3af78e8de487e752477f91 100644 (file)
@@ -133,7 +133,6 @@ extern void soc_common_pcmcia_get_timing(struct soc_pcmcia_socket *, struct soc_
 
 
 extern struct list_head soc_pcmcia_sockets;
-extern struct semaphore soc_pcmcia_sockets_lock;
 
 extern int soc_common_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr);
 extern int soc_common_drv_pcmcia_remove(struct device *dev);
index b4409002b7f8415a88c4a8a823dbcd0ffa96f530..562384d6f3213b71e0f9f83999b4579b3d3b16e5 100644 (file)
@@ -356,19 +356,23 @@ static ssize_t pccard_store_cis(struct kobject *kobj,
 }
 
 
-static struct device_attribute *pccard_socket_attributes[] = {
-       &dev_attr_card_type,
-       &dev_attr_card_voltage,
-       &dev_attr_card_vpp,
-       &dev_attr_card_vcc,
-       &dev_attr_card_insert,
-       &dev_attr_card_pm_state,
-       &dev_attr_card_eject,
-       &dev_attr_card_irq_mask,
-       &dev_attr_available_resources_setup_done,
+static struct attribute *pccard_socket_attributes[] = {
+       &dev_attr_card_type.attr,
+       &dev_attr_card_voltage.attr,
+       &dev_attr_card_vpp.attr,
+       &dev_attr_card_vcc.attr,
+       &dev_attr_card_insert.attr,
+       &dev_attr_card_pm_state.attr,
+       &dev_attr_card_eject.attr,
+       &dev_attr_card_irq_mask.attr,
+       &dev_attr_available_resources_setup_done.attr,
        NULL,
 };
 
+static const struct attribute_group socket_attrs = {
+       .attrs = pccard_socket_attributes,
+};
+
 static struct bin_attribute pccard_cis_attr = {
        .attr = { .name = "cis", .mode = S_IRUGO | S_IWUSR },
        .size = 0x200,
@@ -376,35 +380,21 @@ static struct bin_attribute pccard_cis_attr = {
        .write = pccard_store_cis,
 };
 
-static int __devinit pccard_sysfs_add_socket(struct device *dev,
-                                            struct class_interface *class_intf)
+int pccard_sysfs_add_socket(struct device *dev)
 {
-       struct device_attribute **attr;
        int ret = 0;
 
-       for (attr = pccard_socket_attributes; *attr; attr++) {
-               ret = device_create_file(dev, *attr);
+       ret = sysfs_create_group(&dev->kobj, &socket_attrs);
+       if (!ret) {
+               ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
                if (ret)
-                       break;
+                       sysfs_remove_group(&dev->kobj, &socket_attrs);
        }
-       if (!ret)
-               ret = sysfs_create_bin_file(&dev->kobj, &pccard_cis_attr);
-
        return ret;
 }
 
-static void __devexit pccard_sysfs_remove_socket(struct device *dev,
-                                                struct class_interface *class_intf)
+void pccard_sysfs_remove_socket(struct device *dev)
 {
-       struct device_attribute **attr;
-
        sysfs_remove_bin_file(&dev->kobj, &pccard_cis_attr);
-       for (attr = pccard_socket_attributes; *attr; attr++)
-               device_remove_file(dev, *attr);
+       sysfs_remove_group(&dev->kobj, &socket_attrs);
 }
-
-struct class_interface pccard_sysfs_interface = {
-       .class = &pcmcia_socket_class,
-       .add_dev = &pccard_sysfs_add_socket,
-       .remove_dev = __devexit_p(&pccard_sysfs_remove_socket),
-};
index 31a633f6554769c46cd5676d13a758a104137738..4fe7c58f57e92e624b00bb7f5c0f6869b7ed250a 100644 (file)
@@ -1,12 +1,78 @@
 extern spinlock_t pnp_lock;
 void *pnp_alloc(long size);
+
+int pnp_register_protocol(struct pnp_protocol *protocol);
+void pnp_unregister_protocol(struct pnp_protocol *protocol);
+
+#define PNP_EISA_ID_MASK 0x7fffffff
+void pnp_eisa_id_to_string(u32 id, char *str);
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *, int id, char *pnpid);
+struct pnp_card *pnp_alloc_card(struct pnp_protocol *, int id, char *pnpid);
+
+int pnp_add_device(struct pnp_dev *dev);
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id);
 int pnp_interface_attach_device(struct pnp_dev *dev);
+
+int pnp_add_card(struct pnp_card *card);
+struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id);
+void pnp_remove_card(struct pnp_card *card);
+int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
+void pnp_remove_card_device(struct pnp_dev *dev);
+
+struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev);
+struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
+                                                int priority);
+int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option,
+                             struct pnp_irq *data);
+int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option,
+                             struct pnp_dma *data);
+int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option,
+                              struct pnp_port *data);
+int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option,
+                             struct pnp_mem *data);
+void pnp_init_resources(struct pnp_dev *dev);
+
 void pnp_fixup_device(struct pnp_dev *dev);
 void pnp_free_option(struct pnp_option *option);
 int __pnp_add_device(struct pnp_dev *dev);
 void __pnp_remove_device(struct pnp_dev *dev);
 
-int pnp_check_port(struct pnp_dev * dev, int idx);
-int pnp_check_mem(struct pnp_dev * dev, int idx);
-int pnp_check_irq(struct pnp_dev * dev, int idx);
-int pnp_check_dma(struct pnp_dev * dev, int idx);
+int pnp_check_port(struct pnp_dev *dev, struct resource *res);
+int pnp_check_mem(struct pnp_dev *dev, struct resource *res);
+int pnp_check_irq(struct pnp_dev *dev, struct resource *res);
+int pnp_check_dma(struct pnp_dev *dev, struct resource *res);
+
+void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc);
+
+void pnp_init_resource(struct resource *res);
+
+struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev,
+                                         unsigned int type, unsigned int num);
+
+#define PNP_MAX_PORT           40
+#define PNP_MAX_MEM            24
+#define PNP_MAX_IRQ             2
+#define PNP_MAX_DMA             2
+
+struct pnp_resource {
+       struct resource res;
+       unsigned int index;             /* ISAPNP config register index */
+};
+
+struct pnp_resource_table {
+       struct pnp_resource port[PNP_MAX_PORT];
+       struct pnp_resource mem[PNP_MAX_MEM];
+       struct pnp_resource dma[PNP_MAX_DMA];
+       struct pnp_resource irq[PNP_MAX_IRQ];
+};
+
+struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
+                                         int flags);
+struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
+                                         int flags);
+struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
+                                        resource_size_t start,
+                                        resource_size_t end, int flags);
+struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
+                                         resource_size_t start,
+                                         resource_size_t end, int flags);
index da1c9909eb44abbd5ed59602e3be4faf9fe99718..a762a4176736fcc5aa3e5d3ce4016ca921bcabbd 100644 (file)
@@ -5,6 +5,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/ctype.h>
 #include <linux/slab.h>
 #include <linux/pnp.h>
 #include "base.h"
@@ -100,19 +101,33 @@ static int card_probe(struct pnp_card *card, struct pnp_card_driver *drv)
  * @id: pointer to a pnp_id structure
  * @card: pointer to the desired card
  */
-int pnp_add_card_id(struct pnp_id *id, struct pnp_card *card)
+struct pnp_id *pnp_add_card_id(struct pnp_card *card, char *id)
 {
-       struct pnp_id *ptr;
+       struct pnp_id *dev_id, *ptr;
 
-       id->next = NULL;
+       dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
+       if (!dev_id)
+               return NULL;
+
+       dev_id->id[0] = id[0];
+       dev_id->id[1] = id[1];
+       dev_id->id[2] = id[2];
+       dev_id->id[3] = tolower(id[3]);
+       dev_id->id[4] = tolower(id[4]);
+       dev_id->id[5] = tolower(id[5]);
+       dev_id->id[6] = tolower(id[6]);
+       dev_id->id[7] = '\0';
+
+       dev_id->next = NULL;
        ptr = card->id;
        while (ptr && ptr->next)
                ptr = ptr->next;
        if (ptr)
-               ptr->next = id;
+               ptr->next = dev_id;
        else
-               card->id = id;
-       return 0;
+               card->id = dev_id;
+
+       return dev_id;
 }
 
 static void pnp_free_card_ids(struct pnp_card *card)
@@ -136,6 +151,31 @@ static void pnp_release_card(struct device *dmdev)
        kfree(card);
 }
 
+struct pnp_card *pnp_alloc_card(struct pnp_protocol *protocol, int id, char *pnpid)
+{
+       struct pnp_card *card;
+       struct pnp_id *dev_id;
+
+       card = kzalloc(sizeof(struct pnp_card), GFP_KERNEL);
+       if (!card)
+               return NULL;
+
+       card->protocol = protocol;
+       card->number = id;
+
+       card->dev.parent = &card->protocol->dev;
+       sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number,
+               card->number);
+
+       dev_id = pnp_add_card_id(card, pnpid);
+       if (!dev_id) {
+               kfree(card);
+               return NULL;
+       }
+
+       return card;
+}
+
 static ssize_t pnp_show_card_name(struct device *dmdev,
                                  struct device_attribute *attr, char *buf)
 {
@@ -191,9 +231,6 @@ int pnp_add_card(struct pnp_card *card)
        int error;
        struct list_head *pos, *temp;
 
-       sprintf(card->dev.bus_id, "%02x:%02x", card->protocol->number,
-               card->number);
-       card->dev.parent = &card->protocol->dev;
        card->dev.bus = NULL;
        card->dev.release = &pnp_release_card;
        error = device_register(&card->dev);
index 7d366ca672d34ea7b6d07afa04fbcfbec849ad10..20771b7d4482562d18536aca252f9e870b08f907 100644 (file)
@@ -106,18 +106,53 @@ static void pnp_release_device(struct device *dmdev)
        pnp_free_option(dev->independent);
        pnp_free_option(dev->dependent);
        pnp_free_ids(dev);
+       kfree(dev->res);
        kfree(dev);
 }
 
-int __pnp_add_device(struct pnp_dev *dev)
+struct pnp_dev *pnp_alloc_dev(struct pnp_protocol *protocol, int id, char *pnpid)
 {
-       int ret;
+       struct pnp_dev *dev;
+       struct pnp_id *dev_id;
 
-       pnp_fixup_device(dev);
+       dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
+       if (!dev)
+               return NULL;
+
+       dev->res = kzalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
+       if (!dev->res) {
+               kfree(dev);
+               return NULL;
+       }
+
+       dev->protocol = protocol;
+       dev->number = id;
+       dev->dma_mask = DMA_24BIT_MASK;
+
+       dev->dev.parent = &dev->protocol->dev;
        dev->dev.bus = &pnp_bus_type;
        dev->dev.dma_mask = &dev->dma_mask;
-       dev->dma_mask = dev->dev.coherent_dma_mask = DMA_24BIT_MASK;
+       dev->dev.coherent_dma_mask = dev->dma_mask;
        dev->dev.release = &pnp_release_device;
+
+       sprintf(dev->dev.bus_id, "%02x:%02x", dev->protocol->number,
+               dev->number);
+
+       dev_id = pnp_add_id(dev, pnpid);
+       if (!dev_id) {
+               kfree(dev->res);
+               kfree(dev);
+               return NULL;
+       }
+
+       return dev;
+}
+
+int __pnp_add_device(struct pnp_dev *dev)
+{
+       int ret;
+
+       pnp_fixup_device(dev);
        dev->status = PNP_READY;
        spin_lock(&pnp_lock);
        list_add_tail(&dev->global_list, &pnp_global);
@@ -145,9 +180,6 @@ int pnp_add_device(struct pnp_dev *dev)
        if (dev->card)
                return -EINVAL;
 
-       dev->dev.parent = &dev->protocol->dev;
-       sprintf(dev->dev.bus_id, "%02x:%02x", dev->protocol->number,
-               dev->number);
        ret = __pnp_add_device(dev);
        if (ret)
                return ret;
index 12a1645a2e4352b3a3cc8e07941a9888dd5567cb..d3f869ee1d92a5b8e90919b6df880efb7ec73ee8 100644 (file)
@@ -167,7 +167,7 @@ static int pnp_bus_suspend(struct device *dev, pm_message_t state)
                        return error;
        }
 
-       if (pnp_dev->protocol && pnp_dev->protocol->suspend)
+       if (pnp_dev->protocol->suspend)
                pnp_dev->protocol->suspend(pnp_dev, state);
        return 0;
 }
@@ -181,7 +181,7 @@ static int pnp_bus_resume(struct device *dev)
        if (!pnp_drv)
                return 0;
 
-       if (pnp_dev->protocol && pnp_dev->protocol->resume)
+       if (pnp_dev->protocol->resume)
                pnp_dev->protocol->resume(pnp_dev);
 
        if (pnp_can_write(pnp_dev)) {
@@ -226,22 +226,36 @@ void pnp_unregister_driver(struct pnp_driver *drv)
 
 /**
  * pnp_add_id - adds an EISA id to the specified device
- * @id: pointer to a pnp_id structure
  * @dev: pointer to the desired device
+ * @id: pointer to an EISA id string
  */
-int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev)
+struct pnp_id *pnp_add_id(struct pnp_dev *dev, char *id)
 {
-       struct pnp_id *ptr;
+       struct pnp_id *dev_id, *ptr;
 
-       id->next = NULL;
+       dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
+       if (!dev_id)
+               return NULL;
+
+       dev_id->id[0] = id[0];
+       dev_id->id[1] = id[1];
+       dev_id->id[2] = id[2];
+       dev_id->id[3] = tolower(id[3]);
+       dev_id->id[4] = tolower(id[4]);
+       dev_id->id[5] = tolower(id[5]);
+       dev_id->id[6] = tolower(id[6]);
+       dev_id->id[7] = '\0';
+
+       dev_id->next = NULL;
        ptr = dev->id;
        while (ptr && ptr->next)
                ptr = ptr->next;
        if (ptr)
-               ptr->next = id;
+               ptr->next = dev_id;
        else
-               dev->id = id;
-       return 0;
+               dev->id = dev_id;
+
+       return dev_id;
 }
 
 EXPORT_SYMBOL(pnp_register_driver);
index 982658477a58b4695df2ac979b00090cba9b1101..5d9301de17786bd35613e4eecda23ef48f5c35d6 100644 (file)
@@ -248,6 +248,7 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
                                          char *buf)
 {
        struct pnp_dev *dev = to_pnp_dev(dmdev);
+       struct resource *res;
        int i, ret;
        pnp_info_buffer_t *buffer;
 
@@ -267,50 +268,46 @@ static ssize_t pnp_show_current_resources(struct device *dmdev,
        else
                pnp_printf(buffer, "disabled\n");
 
-       for (i = 0; i < PNP_MAX_PORT; i++) {
-               if (pnp_port_valid(dev, i)) {
+       for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
+               if (pnp_resource_valid(res)) {
                        pnp_printf(buffer, "io");
-                       if (pnp_port_flags(dev, i) & IORESOURCE_DISABLED)
+                       if (res->flags & IORESOURCE_DISABLED)
                                pnp_printf(buffer, " disabled\n");
                        else
                                pnp_printf(buffer, " 0x%llx-0x%llx\n",
-                                          (unsigned long long)
-                                          pnp_port_start(dev, i),
-                                          (unsigned long long)pnp_port_end(dev,
-                                                                           i));
+                                          (unsigned long long) res->start,
+                                          (unsigned long long) res->end);
                }
        }
-       for (i = 0; i < PNP_MAX_MEM; i++) {
-               if (pnp_mem_valid(dev, i)) {
+       for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
+               if (pnp_resource_valid(res)) {
                        pnp_printf(buffer, "mem");
-                       if (pnp_mem_flags(dev, i) & IORESOURCE_DISABLED)
+                       if (res->flags & IORESOURCE_DISABLED)
                                pnp_printf(buffer, " disabled\n");
                        else
                                pnp_printf(buffer, " 0x%llx-0x%llx\n",
-                                          (unsigned long long)
-                                          pnp_mem_start(dev, i),
-                                          (unsigned long long)pnp_mem_end(dev,
-                                                                          i));
+                                          (unsigned long long) res->start,
+                                          (unsigned long long) res->end);
                }
        }
-       for (i = 0; i < PNP_MAX_IRQ; i++) {
-               if (pnp_irq_valid(dev, i)) {
+       for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) {
+               if (pnp_resource_valid(res)) {
                        pnp_printf(buffer, "irq");
-                       if (pnp_irq_flags(dev, i) & IORESOURCE_DISABLED)
+                       if (res->flags & IORESOURCE_DISABLED)
                                pnp_printf(buffer, " disabled\n");
                        else
                                pnp_printf(buffer, " %lld\n",
-                                          (unsigned long long)pnp_irq(dev, i));
+                                          (unsigned long long) res->start);
                }
        }
-       for (i = 0; i < PNP_MAX_DMA; i++) {
-               if (pnp_dma_valid(dev, i)) {
+       for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) {
+               if (pnp_resource_valid(res)) {
                        pnp_printf(buffer, "dma");
-                       if (pnp_dma_flags(dev, i) & IORESOURCE_DISABLED)
+                       if (res->flags & IORESOURCE_DISABLED)
                                pnp_printf(buffer, " disabled\n");
                        else
                                pnp_printf(buffer, " %lld\n",
-                                          (unsigned long long)pnp_dma(dev, i));
+                                          (unsigned long long) res->start);
                }
        }
        ret = (buffer->curr - buf);
@@ -323,8 +320,10 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
                          const char *ubuf, size_t count)
 {
        struct pnp_dev *dev = to_pnp_dev(dmdev);
+       struct pnp_resource *pnp_res;
        char *buf = (void *)ubuf;
        int retval = 0;
+       resource_size_t start, end;
 
        if (dev->status & PNP_ATTACHED) {
                retval = -EBUSY;
@@ -351,20 +350,20 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
        if (!strnicmp(buf, "auto", 4)) {
                if (dev->active)
                        goto done;
-               pnp_init_resource_table(&dev->res);
+               pnp_init_resources(dev);
                retval = pnp_auto_config_dev(dev);
                goto done;
        }
        if (!strnicmp(buf, "clear", 5)) {
                if (dev->active)
                        goto done;
-               pnp_init_resource_table(&dev->res);
+               pnp_init_resources(dev);
                goto done;
        }
        if (!strnicmp(buf, "get", 3)) {
                mutex_lock(&pnp_res_mutex);
                if (pnp_can_read(dev))
-                       dev->protocol->get(dev, &dev->res);
+                       dev->protocol->get(dev);
                mutex_unlock(&pnp_res_mutex);
                goto done;
        }
@@ -373,7 +372,7 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
                if (dev->active)
                        goto done;
                buf += 3;
-               pnp_init_resource_table(&dev->res);
+               pnp_init_resources(dev);
                mutex_lock(&pnp_res_mutex);
                while (1) {
                        while (isspace(*buf))
@@ -382,76 +381,60 @@ pnp_set_current_resources(struct device *dmdev, struct device_attribute *attr,
                                buf += 2;
                                while (isspace(*buf))
                                        ++buf;
-                               dev->res.port_resource[nport].start =
-                                   simple_strtoul(buf, &buf, 0);
+                               start = simple_strtoul(buf, &buf, 0);
                                while (isspace(*buf))
                                        ++buf;
                                if (*buf == '-') {
                                        buf += 1;
                                        while (isspace(*buf))
                                                ++buf;
-                                       dev->res.port_resource[nport].end =
-                                           simple_strtoul(buf, &buf, 0);
+                                       end = simple_strtoul(buf, &buf, 0);
                                } else
-                                       dev->res.port_resource[nport].end =
-                                           dev->res.port_resource[nport].start;
-                               dev->res.port_resource[nport].flags =
-                                   IORESOURCE_IO;
-                               nport++;
-                               if (nport >= PNP_MAX_PORT)
-                                       break;
+                                       end = start;
+                               pnp_res = pnp_add_io_resource(dev, start, end,
+                                                             0);
+                               if (pnp_res)
+                                       pnp_res->index = nport++;
                                continue;
                        }
                        if (!strnicmp(buf, "mem", 3)) {
                                buf += 3;
                                while (isspace(*buf))
                                        ++buf;
-                               dev->res.mem_resource[nmem].start =
-                                   simple_strtoul(buf, &buf, 0);
+                               start = simple_strtoul(buf, &buf, 0);
                                while (isspace(*buf))
                                        ++buf;
                                if (*buf == '-') {
                                        buf += 1;
                                        while (isspace(*buf))
                                                ++buf;
-                                       dev->res.mem_resource[nmem].end =
-                                           simple_strtoul(buf, &buf, 0);
+                                       end = simple_strtoul(buf, &buf, 0);
                                } else
-                                       dev->res.mem_resource[nmem].end =
-                                           dev->res.mem_resource[nmem].start;
-                               dev->res.mem_resource[nmem].flags =
-                                   IORESOURCE_MEM;
-                               nmem++;
-                               if (nmem >= PNP_MAX_MEM)
-                                       break;
+                                       end = start;
+                               pnp_res = pnp_add_mem_resource(dev, start, end,
+                                                              0);
+                               if (pnp_res)
+                                       pnp_res->index = nmem++;
                                continue;
                        }
                        if (!strnicmp(buf, "irq", 3)) {
                                buf += 3;
                                while (isspace(*buf))
                                        ++buf;
-                               dev->res.irq_resource[nirq].start =
-                                   dev->res.irq_resource[nirq].end =
-                                   simple_strtoul(buf, &buf, 0);
-                               dev->res.irq_resource[nirq].flags =
-                                   IORESOURCE_IRQ;
-                               nirq++;
-                               if (nirq >= PNP_MAX_IRQ)
-                                       break;
+                               start = simple_strtoul(buf, &buf, 0);
+                               pnp_res = pnp_add_irq_resource(dev, start, 0);
+                               if (pnp_res)
+                                       nirq++;
                                continue;
                        }
                        if (!strnicmp(buf, "dma", 3)) {
                                buf += 3;
                                while (isspace(*buf))
                                        ++buf;
-                               dev->res.dma_resource[ndma].start =
-                                   dev->res.dma_resource[ndma].end =
-                                   simple_strtoul(buf, &buf, 0);
-                               dev->res.dma_resource[ndma].flags =
-                                   IORESOURCE_DMA;
-                               ndma++;
-                               if (ndma >= PNP_MAX_DMA)
-                                       break;
+                               start = simple_strtoul(buf, &buf, 0);
+                               pnp_res = pnp_add_dma_resource(dev, start, 0);
+                               if (pnp_res)
+                                       pnp_res->index = ndma++;
                                continue;
                        }
                        break;
index cac18bbfb817b1e042d820fbf7eace2adbdc6f9f..3e38f06f8d785b64d77a76a392d78c2593e44205 100644 (file)
@@ -5,3 +5,7 @@
 isapnp-proc-$(CONFIG_PROC_FS) = proc.o
 
 obj-y := core.o compat.o $(isapnp-proc-y)
+
+ifeq ($(CONFIG_PNP_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
index 257f5d827d8365ac1563f7c986f0a74504acaebf..f1bccdbdeb0841f282bd1c0ea70d1ee06278c303 100644 (file)
@@ -44,6 +44,8 @@
 #include <linux/mutex.h>
 #include <asm/io.h>
 
+#include "../base.h"
+
 #if 0
 #define ISAPNP_REGION_OK
 #endif
@@ -88,6 +90,14 @@ MODULE_LICENSE("GPL");
 #define _LTAG_MEM32RANGE       0x85
 #define _LTAG_FIXEDMEM32RANGE  0x86
 
+/* Logical device control and configuration registers */
+
+#define ISAPNP_CFG_ACTIVATE    0x30    /* byte */
+#define ISAPNP_CFG_MEM         0x40    /* 4 * dword */
+#define ISAPNP_CFG_PORT                0x60    /* 8 * word */
+#define ISAPNP_CFG_IRQ         0x70    /* 2 * word */
+#define ISAPNP_CFG_DMA         0x74    /* 2 * byte */
+
 /*
  * Sizes of ISAPNP logical device configuration register sets.
  * See PNP-ISA-v1.0a.pdf, Appendix A.
@@ -387,28 +397,6 @@ static void __init isapnp_skip_bytes(int count)
        isapnp_peek(NULL, count);
 }
 
-/*
- *  Parse EISA id.
- */
-static void isapnp_parse_id(struct pnp_dev *dev, unsigned short vendor,
-                           unsigned short device)
-{
-       struct pnp_id *id;
-
-       if (!dev)
-               return;
-       id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
-       if (!id)
-               return;
-       sprintf(id->id, "%c%c%c%x%x%x%x",
-               'A' + ((vendor >> 2) & 0x3f) - 1,
-               'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
-               'A' + ((vendor >> 8) & 0x1f) - 1,
-               (device >> 4) & 0x0f,
-               device & 0x0f, (device >> 12) & 0x0f, (device >> 8) & 0x0f);
-       pnp_add_id(id, dev);
-}
-
 /*
  *  Parse logical device tag.
  */
@@ -417,30 +405,31 @@ static struct pnp_dev *__init isapnp_parse_device(struct pnp_card *card,
 {
        unsigned char tmp[6];
        struct pnp_dev *dev;
+       u32 eisa_id;
+       char id[8];
 
        isapnp_peek(tmp, size);
-       dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
+       eisa_id = tmp[0] | tmp[1] << 8 | tmp[2] << 16 | tmp[3] << 24;
+       pnp_eisa_id_to_string(eisa_id, id);
+
+       dev = pnp_alloc_dev(&isapnp_protocol, number, id);
        if (!dev)
                return NULL;
-       dev->number = number;
-       isapnp_parse_id(dev, (tmp[1] << 8) | tmp[0], (tmp[3] << 8) | tmp[2]);
-       dev->regs = tmp[4];
+
        dev->card = card;
-       if (size > 5)
-               dev->regs |= tmp[5] << 8;
-       dev->protocol = &isapnp_protocol;
        dev->capabilities |= PNP_CONFIGURABLE;
        dev->capabilities |= PNP_READ;
        dev->capabilities |= PNP_WRITE;
        dev->capabilities |= PNP_DISABLE;
-       pnp_init_resource_table(&dev->res);
+       pnp_init_resources(dev);
        return dev;
 }
 
 /*
  *  Add IRQ resource to resources list.
  */
-static void __init isapnp_parse_irq_resource(struct pnp_option *option,
+static void __init isapnp_parse_irq_resource(struct pnp_dev *dev,
+                                            struct pnp_option *option,
                                             int size)
 {
        unsigned char tmp[3];
@@ -457,13 +446,14 @@ static void __init isapnp_parse_irq_resource(struct pnp_option *option,
                irq->flags = tmp[2];
        else
                irq->flags = IORESOURCE_IRQ_HIGHEDGE;
-       pnp_register_irq_resource(option, irq);
+       pnp_register_irq_resource(dev, option, irq);
 }
 
 /*
  *  Add DMA resource to resources list.
  */
-static void __init isapnp_parse_dma_resource(struct pnp_option *option,
+static void __init isapnp_parse_dma_resource(struct pnp_dev *dev,
+                                            struct pnp_option *option,
                                             int size)
 {
        unsigned char tmp[2];
@@ -475,13 +465,14 @@ static void __init isapnp_parse_dma_resource(struct pnp_option *option,
                return;
        dma->map = tmp[0];
        dma->flags = tmp[1];
-       pnp_register_dma_resource(option, dma);
+       pnp_register_dma_resource(dev, option, dma);
 }
 
 /*
  *  Add port resource to resources list.
  */
-static void __init isapnp_parse_port_resource(struct pnp_option *option,
+static void __init isapnp_parse_port_resource(struct pnp_dev *dev,
+                                             struct pnp_option *option,
                                              int size)
 {
        unsigned char tmp[7];
@@ -496,13 +487,14 @@ static void __init isapnp_parse_port_resource(struct pnp_option *option,
        port->align = tmp[5];
        port->size = tmp[6];
        port->flags = tmp[0] ? PNP_PORT_FLAG_16BITADDR : 0;
-       pnp_register_port_resource(option, port);
+       pnp_register_port_resource(dev, option, port);
 }
 
 /*
  *  Add fixed port resource to resources list.
  */
-static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
+static void __init isapnp_parse_fixed_port_resource(struct pnp_dev *dev,
+                                                   struct pnp_option *option,
                                                    int size)
 {
        unsigned char tmp[3];
@@ -516,13 +508,14 @@ static void __init isapnp_parse_fixed_port_resource(struct pnp_option *option,
        port->size = tmp[2];
        port->align = 0;
        port->flags = PNP_PORT_FLAG_FIXED;
-       pnp_register_port_resource(option, port);
+       pnp_register_port_resource(dev, option, port);
 }
 
 /*
  *  Add memory resource to resources list.
  */
-static void __init isapnp_parse_mem_resource(struct pnp_option *option,
+static void __init isapnp_parse_mem_resource(struct pnp_dev *dev,
+                                            struct pnp_option *option,
                                             int size)
 {
        unsigned char tmp[9];
@@ -537,13 +530,14 @@ static void __init isapnp_parse_mem_resource(struct pnp_option *option,
        mem->align = (tmp[6] << 8) | tmp[5];
        mem->size = ((tmp[8] << 8) | tmp[7]) << 8;
        mem->flags = tmp[0];
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
 /*
  *  Add 32-bit memory resource to resources list.
  */
-static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
+static void __init isapnp_parse_mem32_resource(struct pnp_dev *dev,
+                                              struct pnp_option *option,
                                               int size)
 {
        unsigned char tmp[17];
@@ -560,13 +554,14 @@ static void __init isapnp_parse_mem32_resource(struct pnp_option *option,
        mem->size =
            (tmp[16] << 24) | (tmp[15] << 16) | (tmp[14] << 8) | tmp[13];
        mem->flags = tmp[0];
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
 /*
  *  Add 32-bit fixed memory resource to resources list.
  */
-static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
+static void __init isapnp_parse_fixed_mem32_resource(struct pnp_dev *dev,
+                                                    struct pnp_option *option,
                                                     int size)
 {
        unsigned char tmp[9];
@@ -581,7 +576,7 @@ static void __init isapnp_parse_fixed_mem32_resource(struct pnp_option *option,
        mem->size = (tmp[8] << 24) | (tmp[7] << 16) | (tmp[6] << 8) | tmp[5];
        mem->align = 0;
        mem->flags = tmp[0];
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
 /*
@@ -613,6 +608,8 @@ static int __init isapnp_create_device(struct pnp_card *card,
        unsigned char type, tmp[17];
        struct pnp_option *option;
        struct pnp_dev *dev;
+       u32 eisa_id;
+       char id[8];
 
        if ((dev = isapnp_parse_device(card, size, number++)) == NULL)
                return 1;
@@ -652,8 +649,10 @@ static int __init isapnp_create_device(struct pnp_card *card,
                case _STAG_COMPATDEVID:
                        if (size == 4 && compat < DEVICE_COUNT_COMPATIBLE) {
                                isapnp_peek(tmp, 4);
-                               isapnp_parse_id(dev, (tmp[1] << 8) | tmp[0],
-                                               (tmp[3] << 8) | tmp[2]);
+                               eisa_id = tmp[0] | tmp[1] << 8 |
+                                         tmp[2] << 16 | tmp[3] << 24;
+                               pnp_eisa_id_to_string(eisa_id, id);
+                               pnp_add_id(dev, id);
                                compat++;
                                size = 0;
                        }
@@ -661,13 +660,13 @@ static int __init isapnp_create_device(struct pnp_card *card,
                case _STAG_IRQ:
                        if (size < 2 || size > 3)
                                goto __skip;
-                       isapnp_parse_irq_resource(option, size);
+                       isapnp_parse_irq_resource(dev, option, size);
                        size = 0;
                        break;
                case _STAG_DMA:
                        if (size != 2)
                                goto __skip;
-                       isapnp_parse_dma_resource(option, size);
+                       isapnp_parse_dma_resource(dev, option, size);
                        size = 0;
                        break;
                case _STAG_STARTDEP:
@@ -687,17 +686,18 @@ static int __init isapnp_create_device(struct pnp_card *card,
                        if (size != 0)
                                goto __skip;
                        priority = 0;
+                       dev_dbg(&dev->dev, "end dependent options\n");
                        break;
                case _STAG_IOPORT:
                        if (size != 7)
                                goto __skip;
-                       isapnp_parse_port_resource(option, size);
+                       isapnp_parse_port_resource(dev, option, size);
                        size = 0;
                        break;
                case _STAG_FIXEDIO:
                        if (size != 3)
                                goto __skip;
-                       isapnp_parse_fixed_port_resource(option, size);
+                       isapnp_parse_fixed_port_resource(dev, option, size);
                        size = 0;
                        break;
                case _STAG_VENDOR:
@@ -705,7 +705,7 @@ static int __init isapnp_create_device(struct pnp_card *card,
                case _LTAG_MEMRANGE:
                        if (size != 9)
                                goto __skip;
-                       isapnp_parse_mem_resource(option, size);
+                       isapnp_parse_mem_resource(dev, option, size);
                        size = 0;
                        break;
                case _LTAG_ANSISTR:
@@ -720,13 +720,13 @@ static int __init isapnp_create_device(struct pnp_card *card,
                case _LTAG_MEM32RANGE:
                        if (size != 17)
                                goto __skip;
-                       isapnp_parse_mem32_resource(option, size);
+                       isapnp_parse_mem32_resource(dev, option, size);
                        size = 0;
                        break;
                case _LTAG_FIXEDMEM32RANGE:
                        if (size != 9)
                                goto __skip;
-                       isapnp_parse_fixed_mem32_resource(option, size);
+                       isapnp_parse_fixed_mem32_resource(dev, option, size);
                        size = 0;
                        break;
                case _STAG_END:
@@ -734,9 +734,8 @@ static int __init isapnp_create_device(struct pnp_card *card,
                                isapnp_skip_bytes(size);
                        return 1;
                default:
-                       printk(KERN_ERR
-                              "isapnp: unexpected or unknown tag type 0x%x for logical device %i (device %i), ignored\n",
-                              type, dev->number, card->number);
+                       dev_err(&dev->dev, "unknown tag %#x (card %i), "
+                               "ignored\n", type, card->number);
                }
 __skip:
                if (size > 0)
@@ -789,9 +788,8 @@ static void __init isapnp_parse_resource_map(struct pnp_card *card)
                                isapnp_skip_bytes(size);
                        return;
                default:
-                       printk(KERN_ERR
-                              "isapnp: unexpected or unknown tag type 0x%x for device %i, ignored\n",
-                              type, card->number);
+                       dev_err(&card->dev, "unknown tag %#x, ignored\n",
+                              type);
                }
 __skip:
                if (size > 0)
@@ -821,25 +819,6 @@ static unsigned char __init isapnp_checksum(unsigned char *data)
        return checksum;
 }
 
-/*
- *  Parse EISA id for ISA PnP card.
- */
-static void isapnp_parse_card_id(struct pnp_card *card, unsigned short vendor,
-                                unsigned short device)
-{
-       struct pnp_id *id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
-
-       if (!id)
-               return;
-       sprintf(id->id, "%c%c%c%x%x%x%x",
-               'A' + ((vendor >> 2) & 0x3f) - 1,
-               'A' + (((vendor & 3) << 3) | ((vendor >> 13) & 7)) - 1,
-               'A' + ((vendor >> 8) & 0x1f) - 1,
-               (device >> 4) & 0x0f,
-               device & 0x0f, (device >> 12) & 0x0f, (device >> 8) & 0x0f);
-       pnp_add_card_id(id, card);
-}
-
 /*
  *  Build device list for all present ISA PnP devices.
  */
@@ -848,6 +827,8 @@ static int __init isapnp_build_device_list(void)
        int csn;
        unsigned char header[9], checksum;
        struct pnp_card *card;
+       u32 eisa_id;
+       char id[8];
 
        isapnp_wait();
        isapnp_key();
@@ -855,32 +836,30 @@ static int __init isapnp_build_device_list(void)
                isapnp_wake(csn);
                isapnp_peek(header, 9);
                checksum = isapnp_checksum(header);
+               eisa_id = header[0] | header[1] << 8 |
+                         header[2] << 16 | header[3] << 24;
+               pnp_eisa_id_to_string(eisa_id, id);
+               card = pnp_alloc_card(&isapnp_protocol, csn, id);
+               if (!card)
+                       continue;
+
 #if 0
-               printk(KERN_DEBUG
+               dev_info(&card->dev,
                       "vendor: %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
                       header[0], header[1], header[2], header[3], header[4],
                       header[5], header[6], header[7], header[8]);
-               printk(KERN_DEBUG "checksum = 0x%x\n", checksum);
+               dev_info(&card->dev, "checksum = %#x\n", checksum);
 #endif
-               if ((card =
-                    kzalloc(sizeof(struct pnp_card), GFP_KERNEL)) == NULL)
-                       continue;
-
-               card->number = csn;
                INIT_LIST_HEAD(&card->devices);
-               isapnp_parse_card_id(card, (header[1] << 8) | header[0],
-                                    (header[3] << 8) | header[2]);
                card->serial =
                    (header[7] << 24) | (header[6] << 16) | (header[5] << 8) |
                    header[4];
                isapnp_checksum_value = 0x00;
                isapnp_parse_resource_map(card);
                if (isapnp_checksum_value != 0x00)
-                       printk(KERN_ERR
-                              "isapnp: checksum for device %i is not valid (0x%x)\n",
-                              csn, isapnp_checksum_value);
+                       dev_err(&card->dev, "invalid checksum %#x\n",
+                               isapnp_checksum_value);
                card->checksum = isapnp_checksum_value;
-               card->protocol = &isapnp_protocol;
 
                pnp_add_card(card);
        }
@@ -947,100 +926,117 @@ EXPORT_SYMBOL(isapnp_cfg_begin);
 EXPORT_SYMBOL(isapnp_cfg_end);
 EXPORT_SYMBOL(isapnp_write_byte);
 
-static int isapnp_read_resources(struct pnp_dev *dev,
-                                struct pnp_resource_table *res)
+static int isapnp_get_resources(struct pnp_dev *dev)
 {
-       int tmp, ret;
+       struct pnp_resource *pnp_res;
+       int i, ret;
 
+       dev_dbg(&dev->dev, "get resources\n");
+       pnp_init_resources(dev);
+       isapnp_cfg_begin(dev->card->number, dev->number);
        dev->active = isapnp_read_byte(ISAPNP_CFG_ACTIVATE);
-       if (dev->active) {
-               for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) {
-                       ret = isapnp_read_word(ISAPNP_CFG_PORT + (tmp << 1));
-                       if (!ret)
-                               continue;
-                       res->port_resource[tmp].start = ret;
-                       res->port_resource[tmp].flags = IORESOURCE_IO;
+       if (!dev->active)
+               goto __end;
+
+       for (i = 0; i < ISAPNP_MAX_PORT; i++) {
+               ret = isapnp_read_word(ISAPNP_CFG_PORT + (i << 1));
+               if (ret) {
+                       pnp_res = pnp_add_io_resource(dev, ret, ret, 0);
+                       if (pnp_res)
+                               pnp_res->index = i;
                }
-               for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) {
-                       ret =
-                           isapnp_read_word(ISAPNP_CFG_MEM + (tmp << 3)) << 8;
-                       if (!ret)
-                               continue;
-                       res->mem_resource[tmp].start = ret;
-                       res->mem_resource[tmp].flags = IORESOURCE_MEM;
+       }
+       for (i = 0; i < ISAPNP_MAX_MEM; i++) {
+               ret = isapnp_read_word(ISAPNP_CFG_MEM + (i << 3)) << 8;
+               if (ret) {
+                       pnp_res = pnp_add_mem_resource(dev, ret, ret, 0);
+                       if (pnp_res)
+                               pnp_res->index = i;
                }
-               for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) {
-                       ret =
-                           (isapnp_read_word(ISAPNP_CFG_IRQ + (tmp << 1)) >>
-                            8);
-                       if (!ret)
-                               continue;
-                       res->irq_resource[tmp].start =
-                           res->irq_resource[tmp].end = ret;
-                       res->irq_resource[tmp].flags = IORESOURCE_IRQ;
+       }
+       for (i = 0; i < ISAPNP_MAX_IRQ; i++) {
+               ret = isapnp_read_word(ISAPNP_CFG_IRQ + (i << 1)) >> 8;
+               if (ret) {
+                       pnp_res = pnp_add_irq_resource(dev, ret, 0);
+                       if (pnp_res)
+                               pnp_res->index = i;
                }
-               for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) {
-                       ret = isapnp_read_byte(ISAPNP_CFG_DMA + tmp);
-                       if (ret == 4)
-                               continue;
-                       res->dma_resource[tmp].start =
-                           res->dma_resource[tmp].end = ret;
-                       res->dma_resource[tmp].flags = IORESOURCE_DMA;
+       }
+       for (i = 0; i < ISAPNP_MAX_DMA; i++) {
+               ret = isapnp_read_byte(ISAPNP_CFG_DMA + i);
+               if (ret != 4) {
+                       pnp_res = pnp_add_dma_resource(dev, ret, 0);
+                       if  (pnp_res)
+                               pnp_res->index = i;
                }
        }
-       return 0;
-}
-
-static int isapnp_get_resources(struct pnp_dev *dev,
-                               struct pnp_resource_table *res)
-{
-       int ret;
 
-       pnp_init_resource_table(res);
-       isapnp_cfg_begin(dev->card->number, dev->number);
-       ret = isapnp_read_resources(dev, res);
+__end:
        isapnp_cfg_end();
-       return ret;
+       return 0;
 }
 
-static int isapnp_set_resources(struct pnp_dev *dev,
-                               struct pnp_resource_table *res)
+static int isapnp_set_resources(struct pnp_dev *dev)
 {
-       int tmp;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+       int tmp, index;
 
+       dev_dbg(&dev->dev, "set resources\n");
        isapnp_cfg_begin(dev->card->number, dev->number);
        dev->active = 1;
-       for (tmp = 0;
-            tmp < ISAPNP_MAX_PORT
-            && (res->port_resource[tmp].
-                flags & (IORESOURCE_IO | IORESOURCE_UNSET)) == IORESOURCE_IO;
-            tmp++)
-               isapnp_write_word(ISAPNP_CFG_PORT + (tmp << 1),
-                                 res->port_resource[tmp].start);
-       for (tmp = 0;
-            tmp < ISAPNP_MAX_IRQ
-            && (res->irq_resource[tmp].
-                flags & (IORESOURCE_IRQ | IORESOURCE_UNSET)) == IORESOURCE_IRQ;
-            tmp++) {
-               int irq = res->irq_resource[tmp].start;
-               if (irq == 2)
-                       irq = 9;
-               isapnp_write_byte(ISAPNP_CFG_IRQ + (tmp << 1), irq);
+       for (tmp = 0; tmp < ISAPNP_MAX_PORT; tmp++) {
+               pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, tmp);
+               if (!pnp_res)
+                       continue;
+               res = &pnp_res->res;
+               if (pnp_resource_valid(res)) {
+                       index = pnp_res->index;
+                       dev_dbg(&dev->dev, "  set io  %d to %#llx\n",
+                               index, (unsigned long long) res->start);
+                       isapnp_write_word(ISAPNP_CFG_PORT + (index << 1),
+                                         res->start);
+               }
+       }
+       for (tmp = 0; tmp < ISAPNP_MAX_IRQ; tmp++) {
+               pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, tmp);
+               if (!pnp_res)
+                       continue;
+               res = &pnp_res->res;
+               if (pnp_resource_valid(res)) {
+                       int irq = res->start;
+                       if (irq == 2)
+                               irq = 9;
+                       index = pnp_res->index;
+                       dev_dbg(&dev->dev, "  set irq %d to %d\n", index, irq);
+                       isapnp_write_byte(ISAPNP_CFG_IRQ + (index << 1), irq);
+               }
+       }
+       for (tmp = 0; tmp < ISAPNP_MAX_DMA; tmp++) {
+               pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, tmp);
+               if (!pnp_res)
+                       continue;
+               res = &pnp_res->res;
+               if (pnp_resource_valid(res)) {
+                       index = pnp_res->index;
+                       dev_dbg(&dev->dev, "  set dma %d to %lld\n",
+                               index, (unsigned long long) res->start);
+                       isapnp_write_byte(ISAPNP_CFG_DMA + index, res->start);
+               }
+       }
+       for (tmp = 0; tmp < ISAPNP_MAX_MEM; tmp++) {
+               pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, tmp);
+               if (!pnp_res)
+                       continue;
+               res = &pnp_res->res;
+               if (pnp_resource_valid(res)) {
+                       index = pnp_res->index;
+                       dev_dbg(&dev->dev, "  set mem %d to %#llx\n",
+                               index, (unsigned long long) res->start);
+                       isapnp_write_word(ISAPNP_CFG_MEM + (index << 3),
+                                         (res->start >> 8) & 0xffff);
+               }
        }
-       for (tmp = 0;
-            tmp < ISAPNP_MAX_DMA
-            && (res->dma_resource[tmp].
-                flags & (IORESOURCE_DMA | IORESOURCE_UNSET)) == IORESOURCE_DMA;
-            tmp++)
-               isapnp_write_byte(ISAPNP_CFG_DMA + tmp,
-                                 res->dma_resource[tmp].start);
-       for (tmp = 0;
-            tmp < ISAPNP_MAX_MEM
-            && (res->mem_resource[tmp].
-                flags & (IORESOURCE_MEM | IORESOURCE_UNSET)) == IORESOURCE_MEM;
-            tmp++)
-               isapnp_write_word(ISAPNP_CFG_MEM + (tmp << 3),
-                                 (res->mem_resource[tmp].start >> 8) & 0xffff);
        /* FIXME: We aren't handling 32bit mems properly here */
        isapnp_activate(dev->number);
        isapnp_cfg_end();
@@ -1138,13 +1134,13 @@ static int __init isapnp_init(void)
        protocol_for_each_card(&isapnp_protocol, card) {
                cards++;
                if (isapnp_verbose) {
-                       printk(KERN_INFO "isapnp: Card '%s'\n",
-                              card->name[0] ? card->name : "Unknown");
+                       dev_info(&card->dev, "card '%s'\n",
+                              card->name[0] ? card->name : "unknown");
                        if (isapnp_verbose < 2)
                                continue;
                        card_for_each_dev(card, dev) {
-                               printk(KERN_INFO "isapnp:   Device '%s'\n",
-                                      dev->name[0] ? dev->name : "Unknown");
+                               dev_info(&card->dev, "device '%s'\n",
+                                      dev->name[0] ? dev->name : "unknown");
                        }
                }
        }
index 2b8266c3d40f63aced5da403e66d745970d18107..3f94edab25fa7f8551d456b2d8df34e1fc0d3ed4 100644 (file)
@@ -85,6 +85,7 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf,
 }
 
 static const struct file_operations isapnp_proc_bus_file_operations = {
+       .owner  = THIS_MODULE,
        .llseek = isapnp_proc_bus_lseek,
        .read = isapnp_proc_bus_read,
 };
@@ -102,12 +103,10 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev)
                        return -ENOMEM;
        }
        sprintf(name, "%02x", dev->number);
-       e = dev->procent = create_proc_entry(name, S_IFREG | S_IRUGO, de);
+       e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de,
+                       &isapnp_proc_bus_file_operations, dev);
        if (!e)
                return -ENOMEM;
-       e->proc_fops = &isapnp_proc_bus_file_operations;
-       e->owner = THIS_MODULE;
-       e->data = dev;
        e->size = 256;
        return 0;
 }
@@ -116,7 +115,7 @@ int __init isapnp_proc_init(void)
 {
        struct pnp_dev *dev;
 
-       isapnp_proc_bus_dir = proc_mkdir("isapnp", proc_bus);
+       isapnp_proc_bus_dir = proc_mkdir("bus/isapnp", NULL);
        protocol_for_each_dev(&isapnp_protocol, dev) {
                isapnp_proc_attach_device(dev);
        }
index c28caf272c1167f13c761da916af4a0983d878da..bea0914ff947a337dcf45d2a58bf7b6fc0f38644 100644 (file)
@@ -19,100 +19,118 @@ DEFINE_MUTEX(pnp_res_mutex);
 
 static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
 {
-       resource_size_t *start, *end;
-       unsigned long *flags;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
 
-       if (idx >= PNP_MAX_PORT) {
+       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, idx);
+       if (!pnp_res) {
                dev_err(&dev->dev, "too many I/O port resources\n");
                /* pretend we were successful so at least the manager won't try again */
                return 1;
        }
 
+       res = &pnp_res->res;
+
        /* check if this resource has been manually set, if so skip */
-       if (!(dev->res.port_resource[idx].flags & IORESOURCE_AUTO))
+       if (!(res->flags & IORESOURCE_AUTO)) {
+               dev_dbg(&dev->dev, "  io %d already set to %#llx-%#llx "
+                       "flags %#lx\n", idx, (unsigned long long) res->start,
+                       (unsigned long long) res->end, res->flags);
                return 1;
-
-       start = &dev->res.port_resource[idx].start;
-       end = &dev->res.port_resource[idx].end;
-       flags = &dev->res.port_resource[idx].flags;
+       }
 
        /* set the initial values */
-       *flags |= rule->flags | IORESOURCE_IO;
-       *flags &= ~IORESOURCE_UNSET;
+       pnp_res->index = idx;
+       res->flags |= rule->flags | IORESOURCE_IO;
+       res->flags &= ~IORESOURCE_UNSET;
 
        if (!rule->size) {
-               *flags |= IORESOURCE_DISABLED;
+               res->flags |= IORESOURCE_DISABLED;
+               dev_dbg(&dev->dev, "  io %d disabled\n", idx);
                return 1;       /* skip disabled resource requests */
        }
 
-       *start = rule->min;
-       *end = *start + rule->size - 1;
+       res->start = rule->min;
+       res->end = res->start + rule->size - 1;
 
        /* run through until pnp_check_port is happy */
-       while (!pnp_check_port(dev, idx)) {
-               *start += rule->align;
-               *end = *start + rule->size - 1;
-               if (*start > rule->max || !rule->align)
+       while (!pnp_check_port(dev, res)) {
+               res->start += rule->align;
+               res->end = res->start + rule->size - 1;
+               if (res->start > rule->max || !rule->align) {
+                       dev_dbg(&dev->dev, "  couldn't assign io %d\n", idx);
                        return 0;
+               }
        }
+       dev_dbg(&dev->dev, "  assign io  %d %#llx-%#llx\n", idx,
+               (unsigned long long) res->start, (unsigned long long) res->end);
        return 1;
 }
 
 static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
 {
-       resource_size_t *start, *end;
-       unsigned long *flags;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
 
-       if (idx >= PNP_MAX_MEM) {
+       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, idx);
+       if (!pnp_res) {
                dev_err(&dev->dev, "too many memory resources\n");
                /* pretend we were successful so at least the manager won't try again */
                return 1;
        }
 
+       res = &pnp_res->res;
+
        /* check if this resource has been manually set, if so skip */
-       if (!(dev->res.mem_resource[idx].flags & IORESOURCE_AUTO))
+       if (!(res->flags & IORESOURCE_AUTO)) {
+               dev_dbg(&dev->dev, "  mem %d already set to %#llx-%#llx "
+                       "flags %#lx\n", idx, (unsigned long long) res->start,
+                       (unsigned long long) res->end, res->flags);
                return 1;
-
-       start = &dev->res.mem_resource[idx].start;
-       end = &dev->res.mem_resource[idx].end;
-       flags = &dev->res.mem_resource[idx].flags;
+       }
 
        /* set the initial values */
-       *flags |= rule->flags | IORESOURCE_MEM;
-       *flags &= ~IORESOURCE_UNSET;
+       pnp_res->index = idx;
+       res->flags |= rule->flags | IORESOURCE_MEM;
+       res->flags &= ~IORESOURCE_UNSET;
 
        /* convert pnp flags to standard Linux flags */
        if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
-               *flags |= IORESOURCE_READONLY;
+               res->flags |= IORESOURCE_READONLY;
        if (rule->flags & IORESOURCE_MEM_CACHEABLE)
-               *flags |= IORESOURCE_CACHEABLE;
+               res->flags |= IORESOURCE_CACHEABLE;
        if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
-               *flags |= IORESOURCE_RANGELENGTH;
+               res->flags |= IORESOURCE_RANGELENGTH;
        if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
-               *flags |= IORESOURCE_SHADOWABLE;
+               res->flags |= IORESOURCE_SHADOWABLE;
 
        if (!rule->size) {
-               *flags |= IORESOURCE_DISABLED;
+               res->flags |= IORESOURCE_DISABLED;
+               dev_dbg(&dev->dev, "  mem %d disabled\n", idx);
                return 1;       /* skip disabled resource requests */
        }
 
-       *start = rule->min;
-       *end = *start + rule->size - 1;
+       res->start = rule->min;
+       res->end = res->start + rule->size - 1;
 
        /* run through until pnp_check_mem is happy */
-       while (!pnp_check_mem(dev, idx)) {
-               *start += rule->align;
-               *end = *start + rule->size - 1;
-               if (*start > rule->max || !rule->align)
+       while (!pnp_check_mem(dev, res)) {
+               res->start += rule->align;
+               res->end = res->start + rule->size - 1;
+               if (res->start > rule->max || !rule->align) {
+                       dev_dbg(&dev->dev, "  couldn't assign mem %d\n", idx);
                        return 0;
+               }
        }
+       dev_dbg(&dev->dev, "  assign mem %d %#llx-%#llx\n", idx,
+               (unsigned long long) res->start, (unsigned long long) res->end);
        return 1;
 }
 
 static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
 {
-       resource_size_t *start, *end;
-       unsigned long *flags;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
        int i;
 
        /* IRQ priority: this table is good for i386 */
@@ -120,49 +138,59 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
                5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
        };
 
-       if (idx >= PNP_MAX_IRQ) {
+       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx);
+       if (!pnp_res) {
                dev_err(&dev->dev, "too many IRQ resources\n");
                /* pretend we were successful so at least the manager won't try again */
                return 1;
        }
 
+       res = &pnp_res->res;
+
        /* check if this resource has been manually set, if so skip */
-       if (!(dev->res.irq_resource[idx].flags & IORESOURCE_AUTO))
+       if (!(res->flags & IORESOURCE_AUTO)) {
+               dev_dbg(&dev->dev, "  irq %d already set to %d flags %#lx\n",
+                       idx, (int) res->start, res->flags);
                return 1;
-
-       start = &dev->res.irq_resource[idx].start;
-       end = &dev->res.irq_resource[idx].end;
-       flags = &dev->res.irq_resource[idx].flags;
+       }
 
        /* set the initial values */
-       *flags |= rule->flags | IORESOURCE_IRQ;
-       *flags &= ~IORESOURCE_UNSET;
+       pnp_res->index = idx;
+       res->flags |= rule->flags | IORESOURCE_IRQ;
+       res->flags &= ~IORESOURCE_UNSET;
 
        if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
-               *flags |= IORESOURCE_DISABLED;
+               res->flags |= IORESOURCE_DISABLED;
+               dev_dbg(&dev->dev, "  irq %d disabled\n", idx);
                return 1;       /* skip disabled resource requests */
        }
 
        /* TBD: need check for >16 IRQ */
-       *start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
-       if (*start < PNP_IRQ_NR) {
-               *end = *start;
+       res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
+       if (res->start < PNP_IRQ_NR) {
+               res->end = res->start;
+               dev_dbg(&dev->dev, "  assign irq %d %d\n", idx,
+                       (int) res->start);
                return 1;
        }
        for (i = 0; i < 16; i++) {
                if (test_bit(xtab[i], rule->map)) {
-                       *start = *end = xtab[i];
-                       if (pnp_check_irq(dev, idx))
+                       res->start = res->end = xtab[i];
+                       if (pnp_check_irq(dev, res)) {
+                               dev_dbg(&dev->dev, "  assign irq %d %d\n", idx,
+                                       (int) res->start);
                                return 1;
+                       }
                }
        }
+       dev_dbg(&dev->dev, "  couldn't assign irq %d\n", idx);
        return 0;
 }
 
 static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
 {
-       resource_size_t *start, *end;
-       unsigned long *flags;
+       struct pnp_resource *pnp_res;
+       struct resource *res;
        int i;
 
        /* DMA priority: this table is good for i386 */
@@ -170,71 +198,89 @@ static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
                1, 3, 5, 6, 7, 0, 2, 4
        };
 
-       if (idx >= PNP_MAX_DMA) {
+       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, idx);
+       if (!pnp_res) {
                dev_err(&dev->dev, "too many DMA resources\n");
                return;
        }
 
+       res = &pnp_res->res;
+
        /* check if this resource has been manually set, if so skip */
-       if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO))
+       if (!(res->flags & IORESOURCE_AUTO)) {
+               dev_dbg(&dev->dev, "  dma %d already set to %d flags %#lx\n",
+                       idx, (int) res->start, res->flags);
                return;
-
-       start = &dev->res.dma_resource[idx].start;
-       end = &dev->res.dma_resource[idx].end;
-       flags = &dev->res.dma_resource[idx].flags;
+       }
 
        /* set the initial values */
-       *flags |= rule->flags | IORESOURCE_DMA;
-       *flags &= ~IORESOURCE_UNSET;
+       pnp_res->index = idx;
+       res->flags |= rule->flags | IORESOURCE_DMA;
+       res->flags &= ~IORESOURCE_UNSET;
 
        for (i = 0; i < 8; i++) {
                if (rule->map & (1 << xtab[i])) {
-                       *start = *end = xtab[i];
-                       if (pnp_check_dma(dev, idx))
+                       res->start = res->end = xtab[i];
+                       if (pnp_check_dma(dev, res)) {
+                               dev_dbg(&dev->dev, "  assign dma %d %d\n", idx,
+                                       (int) res->start);
                                return;
+                       }
                }
        }
 #ifdef MAX_DMA_CHANNELS
-       *start = *end = MAX_DMA_CHANNELS;
+       res->start = res->end = MAX_DMA_CHANNELS;
 #endif
-       *flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+       res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
+       dev_dbg(&dev->dev, "  disable dma %d\n", idx);
+}
+
+void pnp_init_resource(struct resource *res)
+{
+       unsigned long type;
+
+       type = res->flags & (IORESOURCE_IO  | IORESOURCE_MEM |
+                            IORESOURCE_IRQ | IORESOURCE_DMA);
+
+       res->name = NULL;
+       res->flags = type | IORESOURCE_AUTO | IORESOURCE_UNSET;
+       if (type == IORESOURCE_IRQ || type == IORESOURCE_DMA) {
+               res->start = -1;
+               res->end = -1;
+       } else {
+               res->start = 0;
+               res->end = 0;
+       }
 }
 
 /**
  * pnp_init_resources - Resets a resource table to default values.
  * @table: pointer to the desired resource table
  */
-void pnp_init_resource_table(struct pnp_resource_table *table)
+void pnp_init_resources(struct pnp_dev *dev)
 {
+       struct resource *res;
        int idx;
 
        for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
-               table->irq_resource[idx].name = NULL;
-               table->irq_resource[idx].start = -1;
-               table->irq_resource[idx].end = -1;
-               table->irq_resource[idx].flags =
-                   IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->irq[idx].res;
+               res->flags = IORESOURCE_IRQ;
+               pnp_init_resource(res);
        }
        for (idx = 0; idx < PNP_MAX_DMA; idx++) {
-               table->dma_resource[idx].name = NULL;
-               table->dma_resource[idx].start = -1;
-               table->dma_resource[idx].end = -1;
-               table->dma_resource[idx].flags =
-                   IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->dma[idx].res;
+               res->flags = IORESOURCE_DMA;
+               pnp_init_resource(res);
        }
        for (idx = 0; idx < PNP_MAX_PORT; idx++) {
-               table->port_resource[idx].name = NULL;
-               table->port_resource[idx].start = 0;
-               table->port_resource[idx].end = 0;
-               table->port_resource[idx].flags =
-                   IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->port[idx].res;
+               res->flags = IORESOURCE_IO;
+               pnp_init_resource(res);
        }
        for (idx = 0; idx < PNP_MAX_MEM; idx++) {
-               table->mem_resource[idx].name = NULL;
-               table->mem_resource[idx].start = 0;
-               table->mem_resource[idx].end = 0;
-               table->mem_resource[idx].flags =
-                   IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->mem[idx].res;
+               res->flags = IORESOURCE_MEM;
+               pnp_init_resource(res);
        }
 }
 
@@ -242,41 +288,38 @@ void pnp_init_resource_table(struct pnp_resource_table *table)
  * pnp_clean_resources - clears resources that were not manually set
  * @res: the resources to clean
  */
-static void pnp_clean_resource_table(struct pnp_resource_table *res)
+static void pnp_clean_resource_table(struct pnp_dev *dev)
 {
+       struct resource *res;
        int idx;
 
        for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
-               if (!(res->irq_resource[idx].flags & IORESOURCE_AUTO))
-                       continue;
-               res->irq_resource[idx].start = -1;
-               res->irq_resource[idx].end = -1;
-               res->irq_resource[idx].flags =
-                   IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->irq[idx].res;
+               if (res->flags & IORESOURCE_AUTO) {
+                       res->flags = IORESOURCE_IRQ;
+                       pnp_init_resource(res);
+               }
        }
        for (idx = 0; idx < PNP_MAX_DMA; idx++) {
-               if (!(res->dma_resource[idx].flags & IORESOURCE_AUTO))
-                       continue;
-               res->dma_resource[idx].start = -1;
-               res->dma_resource[idx].end = -1;
-               res->dma_resource[idx].flags =
-                   IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->dma[idx].res;
+               if (res->flags & IORESOURCE_AUTO) {
+                       res->flags = IORESOURCE_DMA;
+                       pnp_init_resource(res);
+               }
        }
        for (idx = 0; idx < PNP_MAX_PORT; idx++) {
-               if (!(res->port_resource[idx].flags & IORESOURCE_AUTO))
-                       continue;
-               res->port_resource[idx].start = 0;
-               res->port_resource[idx].end = 0;
-               res->port_resource[idx].flags =
-                   IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->port[idx].res;
+               if (res->flags & IORESOURCE_AUTO) {
+                       res->flags = IORESOURCE_IO;
+                       pnp_init_resource(res);
+               }
        }
        for (idx = 0; idx < PNP_MAX_MEM; idx++) {
-               if (!(res->mem_resource[idx].flags & IORESOURCE_AUTO))
-                       continue;
-               res->mem_resource[idx].start = 0;
-               res->mem_resource[idx].end = 0;
-               res->mem_resource[idx].flags =
-                   IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
+               res = &dev->res->mem[idx].res;
+               if (res->flags & IORESOURCE_AUTO) {
+                       res->flags = IORESOURCE_MEM;
+                       pnp_init_resource(res);
+               }
        }
 }
 
@@ -298,9 +341,11 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
        if (!pnp_can_configure(dev))
                return -ENODEV;
 
+       dbg_pnp_show_resources(dev, "before pnp_assign_resources");
        mutex_lock(&pnp_res_mutex);
-       pnp_clean_resource_table(&dev->res);    /* start with a fresh slate */
+       pnp_clean_resource_table(dev);
        if (dev->independent) {
+               dev_dbg(&dev->dev, "assigning independent options\n");
                port = dev->independent->port;
                mem = dev->independent->mem;
                irq = dev->independent->irq;
@@ -333,6 +378,8 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
        if (depnum) {
                struct pnp_option *dep;
                int i;
+
+               dev_dbg(&dev->dev, "assigning dependent option %d\n", depnum);
                for (i = 1, dep = dev->dependent; i < depnum;
                     i++, dep = dep->next)
                        if (!dep)
@@ -368,67 +415,16 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
                goto fail;
 
        mutex_unlock(&pnp_res_mutex);
+       dbg_pnp_show_resources(dev, "after pnp_assign_resources");
        return 1;
 
 fail:
-       pnp_clean_resource_table(&dev->res);
+       pnp_clean_resource_table(dev);
        mutex_unlock(&pnp_res_mutex);
+       dbg_pnp_show_resources(dev, "after pnp_assign_resources (failed)");
        return 0;
 }
 
-/**
- * pnp_manual_config_dev - Disables Auto Config and Manually sets the resource table
- * @dev: pointer to the desired device
- * @res: pointer to the new resource config
- * @mode: 0 or PNP_CONFIG_FORCE
- *
- * This function can be used by drivers that want to manually set thier resources.
- */
-int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
-                         int mode)
-{
-       int i;
-       struct pnp_resource_table *bak;
-
-       if (!pnp_can_configure(dev))
-               return -ENODEV;
-       bak = pnp_alloc(sizeof(struct pnp_resource_table));
-       if (!bak)
-               return -ENOMEM;
-       *bak = dev->res;
-
-       mutex_lock(&pnp_res_mutex);
-       dev->res = *res;
-       if (!(mode & PNP_CONFIG_FORCE)) {
-               for (i = 0; i < PNP_MAX_PORT; i++) {
-                       if (!pnp_check_port(dev, i))
-                               goto fail;
-               }
-               for (i = 0; i < PNP_MAX_MEM; i++) {
-                       if (!pnp_check_mem(dev, i))
-                               goto fail;
-               }
-               for (i = 0; i < PNP_MAX_IRQ; i++) {
-                       if (!pnp_check_irq(dev, i))
-                               goto fail;
-               }
-               for (i = 0; i < PNP_MAX_DMA; i++) {
-                       if (!pnp_check_dma(dev, i))
-                               goto fail;
-               }
-       }
-       mutex_unlock(&pnp_res_mutex);
-
-       kfree(bak);
-       return 0;
-
-fail:
-       dev->res = *bak;
-       mutex_unlock(&pnp_res_mutex);
-       kfree(bak);
-       return -EINVAL;
-}
-
 /**
  * pnp_auto_config_dev - automatically assigns resources to a device
  * @dev: pointer to the desired device
@@ -473,7 +469,8 @@ int pnp_start_dev(struct pnp_dev *dev)
                return -EINVAL;
        }
 
-       if (dev->protocol->set(dev, &dev->res) < 0) {
+       dbg_pnp_show_resources(dev, "pnp_start_dev");
+       if (dev->protocol->set(dev) < 0) {
                dev_err(&dev->dev, "activation failed\n");
                return -EIO;
        }
@@ -549,30 +546,13 @@ int pnp_disable_dev(struct pnp_dev *dev)
 
        /* release the resources so that other devices can use them */
        mutex_lock(&pnp_res_mutex);
-       pnp_clean_resource_table(&dev->res);
+       pnp_clean_resource_table(dev);
        mutex_unlock(&pnp_res_mutex);
 
        return 0;
 }
 
-/**
- * pnp_resource_change - change one resource
- * @resource: pointer to resource to be changed
- * @start: start of region
- * @size: size of region
- */
-void pnp_resource_change(struct resource *resource, resource_size_t start,
-                        resource_size_t size)
-{
-       resource->flags &= ~(IORESOURCE_AUTO | IORESOURCE_UNSET);
-       resource->start = start;
-       resource->end = start + size - 1;
-}
-
-EXPORT_SYMBOL(pnp_manual_config_dev);
 EXPORT_SYMBOL(pnp_start_dev);
 EXPORT_SYMBOL(pnp_stop_dev);
 EXPORT_SYMBOL(pnp_activate_dev);
 EXPORT_SYMBOL(pnp_disable_dev);
-EXPORT_SYMBOL(pnp_resource_change);
-EXPORT_SYMBOL(pnp_init_resource_table);
index 905326fcca85021bd14083aee684610527c7f8b0..2d7a1e6908be7df6ba535fb8e18ad7c3701e4816 100644 (file)
@@ -3,3 +3,7 @@
 #
 
 obj-y := core.o rsparser.o
+
+ifeq ($(CONFIG_PNP_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
index c283a9a70d8321063d92bd4a3c40607255f4459d..50902773beaf9092fd0a241d535835a8839c843b 100644 (file)
@@ -25,6 +25,7 @@
 #include <acpi/acpi_bus.h>
 #include <acpi/actypes.h>
 
+#include "../base.h"
 #include "pnpacpi.h"
 
 static int num = 0;
@@ -44,7 +45,7 @@ static struct acpi_device_id excluded_id_list[] __initdata = {
        {"", 0},
 };
 
-static inline int is_exclusive_device(struct acpi_device *dev)
+static inline int __init is_exclusive_device(struct acpi_device *dev)
 {
        return (!acpi_match_device_ids(dev, excluded_id_list));
 }
@@ -72,40 +73,24 @@ static int __init ispnpidacpi(char *id)
        return 1;
 }
 
-static void __init pnpidacpi_to_pnpid(char *id, char *str)
+static int pnpacpi_get_resources(struct pnp_dev *dev)
 {
-       str[0] = id[0];
-       str[1] = id[1];
-       str[2] = id[2];
-       str[3] = tolower(id[3]);
-       str[4] = tolower(id[4]);
-       str[5] = tolower(id[5]);
-       str[6] = tolower(id[6]);
-       str[7] = '\0';
+       dev_dbg(&dev->dev, "get resources\n");
+       return pnpacpi_parse_allocated_resource(dev);
 }
 
-static int pnpacpi_get_resources(struct pnp_dev *dev,
-                                struct pnp_resource_table *res)
-{
-       acpi_status status;
-
-       status = pnpacpi_parse_allocated_resource((acpi_handle) dev->data,
-                                                 &dev->res);
-       return ACPI_FAILURE(status) ? -ENODEV : 0;
-}
-
-static int pnpacpi_set_resources(struct pnp_dev *dev,
-                                struct pnp_resource_table *res)
+static int pnpacpi_set_resources(struct pnp_dev *dev)
 {
        acpi_handle handle = dev->data;
        struct acpi_buffer buffer;
-       int ret = 0;
+       int ret;
        acpi_status status;
 
-       ret = pnpacpi_build_resource_template(handle, &buffer);
+       dev_dbg(&dev->dev, "set resources\n");
+       ret = pnpacpi_build_resource_template(dev, &buffer);
        if (ret)
                return ret;
-       ret = pnpacpi_encode_resources(res, &buffer);
+       ret = pnpacpi_encode_resources(dev, &buffer);
        if (ret) {
                kfree(buffer.pointer);
                return ret;
@@ -163,7 +148,6 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
 {
        acpi_handle temp = NULL;
        acpi_status status;
-       struct pnp_id *dev_id;
        struct pnp_dev *dev;
 
        status = acpi_get_handle(device->handle, "_CRS", &temp);
@@ -171,11 +155,10 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
            is_exclusive_device(device))
                return 0;
 
-       dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
-       if (!dev) {
-               pnp_err("Out of memory");
+       dev = pnp_alloc_dev(&pnpacpi_protocol, num, acpi_device_hid(device));
+       if (!dev)
                return -ENOMEM;
-       }
+
        dev->data = device->handle;
        /* .enabled means the device can decode the resources */
        dev->active = device->status.enabled;
@@ -191,44 +174,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
        if (ACPI_SUCCESS(status))
                dev->capabilities |= PNP_DISABLE;
 
-       dev->protocol = &pnpacpi_protocol;
-
        if (strlen(acpi_device_name(device)))
                strncpy(dev->name, acpi_device_name(device), sizeof(dev->name));
        else
                strncpy(dev->name, acpi_device_bid(device), sizeof(dev->name));
 
-       dev->number = num;
-
-       /* set the initial values for the PnP device */
-       dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
-       if (!dev_id)
-               goto err;
-       pnpidacpi_to_pnpid(acpi_device_hid(device), dev_id->id);
-       pnp_add_id(dev_id, dev);
-
-       if (dev->active) {
-               /* parse allocated resource */
-               status = pnpacpi_parse_allocated_resource(device->handle,
-                                                         &dev->res);
-               if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
-                       pnp_err("PnPACPI: METHOD_NAME__CRS failure for %s",
-                               dev_id->id);
-                       goto err1;
-               }
-       }
+       if (dev->active)
+               pnpacpi_parse_allocated_resource(dev);
 
-       if (dev->capabilities & PNP_CONFIGURABLE) {
-               status = pnpacpi_parse_resource_option_data(device->handle,
-                                                           dev);
-               if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) {
-                       pnp_err("PnPACPI: METHOD_NAME__PRS failure for %s",
-                               dev_id->id);
-                       goto err1;
-               }
-       }
+       if (dev->capabilities & PNP_CONFIGURABLE)
+               pnpacpi_parse_resource_option_data(dev);
 
-       /* parse compatible ids */
        if (device->flags.compatible_ids) {
                struct acpi_compatible_id_list *cid_list = device->pnp.cid_list;
                int i;
@@ -236,27 +192,17 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
                for (i = 0; i < cid_list->count; i++) {
                        if (!ispnpidacpi(cid_list->id[i].value))
                                continue;
-                       dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
-                       if (!dev_id)
-                               continue;
-
-                       pnpidacpi_to_pnpid(cid_list->id[i].value, dev_id->id);
-                       pnp_add_id(dev_id, dev);
+                       pnp_add_id(dev, cid_list->id[i].value);
                }
        }
 
        /* clear out the damaged flags */
        if (!dev->active)
-               pnp_init_resource_table(&dev->res);
+               pnp_init_resources(dev);
        pnp_add_device(dev);
        num++;
 
        return AE_OK;
-err1:
-       kfree(dev_id);
-err:
-       kfree(dev);
-       return -EINVAL;
 }
 
 static acpi_status __init pnpacpi_add_device_handler(acpi_handle handle,
index f28e2ed66fa34e6e1d2de9fa8ea1262ad703103d..3e60225b0227b2d6ff8f0c2ad863f195256c4b2e 100644 (file)
@@ -5,8 +5,8 @@
 #include <linux/acpi.h>
 #include <linux/pnp.h>
 
-acpi_status pnpacpi_parse_allocated_resource(acpi_handle, struct pnp_resource_table*);
-acpi_status pnpacpi_parse_resource_option_data(acpi_handle, struct pnp_dev*);
-int pnpacpi_encode_resources(struct pnp_resource_table *, struct acpi_buffer *);
-int pnpacpi_build_resource_template(acpi_handle, struct acpi_buffer*);
+int pnpacpi_parse_allocated_resource(struct pnp_dev *);
+int pnpacpi_parse_resource_option_data(struct pnp_dev *);
+int pnpacpi_encode_resources(struct pnp_dev *, struct acpi_buffer *);
+int pnpacpi_build_resource_template(struct pnp_dev *, struct acpi_buffer *);
 #endif
index 98cbc9f18eed36f281a00893e3ad0df58cfb0873..0201c8adfda748e7e74b153dd17f53b8e61c00f0 100644 (file)
@@ -21,6 +21,8 @@
 #include <linux/kernel.h>
 #include <linux/acpi.h>
 #include <linux/pci.h>
+#include <linux/pnp.h>
+#include "../base.h"
 #include "pnpacpi.h"
 
 #ifdef CONFIG_IA64
 /*
  * Allocated Resources
  */
-static int irq_flags(int triggering, int polarity)
+static int irq_flags(int triggering, int polarity, int shareable)
 {
+       int flags;
+
        if (triggering == ACPI_LEVEL_SENSITIVE) {
                if (polarity == ACPI_ACTIVE_LOW)
-                       return IORESOURCE_IRQ_LOWLEVEL;
+                       flags = IORESOURCE_IRQ_LOWLEVEL;
                else
-                       return IORESOURCE_IRQ_HIGHLEVEL;
+                       flags = IORESOURCE_IRQ_HIGHLEVEL;
        } else {
                if (polarity == ACPI_ACTIVE_LOW)
-                       return IORESOURCE_IRQ_LOWEDGE;
+                       flags = IORESOURCE_IRQ_LOWEDGE;
                else
-                       return IORESOURCE_IRQ_HIGHEDGE;
+                       flags = IORESOURCE_IRQ_HIGHEDGE;
        }
+
+       if (shareable)
+               flags |= IORESOURCE_IRQ_SHAREABLE;
+
+       return flags;
 }
 
 static void decode_irq_flags(int flag, int *triggering, int *polarity)
@@ -69,29 +78,16 @@ static void decode_irq_flags(int flag, int *triggering, int *polarity)
        }
 }
 
-static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
+static void pnpacpi_parse_allocated_irqresource(struct pnp_dev *dev,
                                                u32 gsi, int triggering,
                                                int polarity, int shareable)
 {
-       int i = 0;
-       int irq;
+       int irq, flags;
        int p, t;
-       static unsigned char warned;
 
        if (!valid_IRQ(gsi))
                return;
 
-       while (!(res->irq_resource[i].flags & IORESOURCE_UNSET) &&
-              i < PNP_MAX_IRQ)
-               i++;
-       if (i >= PNP_MAX_IRQ) {
-               if (!warned) {
-                       printk(KERN_WARNING "pnpacpi: exceeded the max number"
-                               " of IRQ resources: %d\n", PNP_MAX_IRQ);
-                       warned = 1;
-               }
-               return;
-       }
        /*
         * in IO-APIC mode, use overrided attribute. Two reasons:
         * 1. BIOS bug in DSDT
@@ -102,27 +98,21 @@ static void pnpacpi_parse_allocated_irqresource(struct pnp_resource_table *res,
                p = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
 
                if (triggering != t || polarity != p) {
-                       pnp_warn("IRQ %d override to %s, %s",
+                       dev_warn(&dev->dev, "IRQ %d override to %s, %s\n",
                                gsi, t ? "edge":"level", p ? "low":"high");
                        triggering = t;
                        polarity = p;
                }
        }
 
-       res->irq_resource[i].flags = IORESOURCE_IRQ;    // Also clears _UNSET flag
-       res->irq_resource[i].flags |= irq_flags(triggering, polarity);
+       flags = irq_flags(triggering, polarity, shareable);
        irq = acpi_register_gsi(gsi, triggering, polarity);
-       if (irq < 0) {
-               res->irq_resource[i].flags |= IORESOURCE_DISABLED;
-               return;
-       }
-
-       if (shareable)
-               res->irq_resource[i].flags |= IORESOURCE_IRQ_SHAREABLE;
+       if (irq >= 0)
+               pcibios_penalize_isa_irq(irq, 1);
+       else
+               flags |= IORESOURCE_DISABLED;
 
-       res->irq_resource[i].start = irq;
-       res->irq_resource[i].end = irq;
-       pcibios_penalize_isa_irq(irq, 1);
+       pnp_add_irq_resource(dev, irq, flags);
 }
 
 static int dma_flags(int type, int bus_master, int transfer)
@@ -168,88 +158,36 @@ static int dma_flags(int type, int bus_master, int transfer)
        return flags;
 }
 
-static void pnpacpi_parse_allocated_dmaresource(struct pnp_resource_table *res,
-                                               u32 dma, int type,
-                                               int bus_master, int transfer)
+static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start,
+                                              u64 len, int io_decode)
 {
-       int i = 0;
-       static unsigned char warned;
-
-       while (i < PNP_MAX_DMA &&
-              !(res->dma_resource[i].flags & IORESOURCE_UNSET))
-               i++;
-       if (i < PNP_MAX_DMA) {
-               res->dma_resource[i].flags = IORESOURCE_DMA;    // Also clears _UNSET flag
-               res->dma_resource[i].flags |=
-                   dma_flags(type, bus_master, transfer);
-               if (dma == -1) {
-                       res->dma_resource[i].flags |= IORESOURCE_DISABLED;
-                       return;
-               }
-               res->dma_resource[i].start = dma;
-               res->dma_resource[i].end = dma;
-       } else if (!warned) {
-               printk(KERN_WARNING "pnpacpi: exceeded the max number of DMA "
-                               "resources: %d \n", PNP_MAX_DMA);
-               warned = 1;
-       }
-}
+       int flags = 0;
+       u64 end = start + len - 1;
 
-static void pnpacpi_parse_allocated_ioresource(struct pnp_resource_table *res,
-                                              u64 io, u64 len, int io_decode)
-{
-       int i = 0;
-       static unsigned char warned;
+       if (io_decode == ACPI_DECODE_16)
+               flags |= PNP_PORT_FLAG_16BITADDR;
+       if (len == 0 || end >= 0x10003)
+               flags |= IORESOURCE_DISABLED;
 
-       while (!(res->port_resource[i].flags & IORESOURCE_UNSET) &&
-              i < PNP_MAX_PORT)
-               i++;
-       if (i < PNP_MAX_PORT) {
-               res->port_resource[i].flags = IORESOURCE_IO;    // Also clears _UNSET flag
-               if (io_decode == ACPI_DECODE_16)
-                       res->port_resource[i].flags |= PNP_PORT_FLAG_16BITADDR;
-               if (len <= 0 || (io + len - 1) >= 0x10003) {
-                       res->port_resource[i].flags |= IORESOURCE_DISABLED;
-                       return;
-               }
-               res->port_resource[i].start = io;
-               res->port_resource[i].end = io + len - 1;
-       } else if (!warned) {
-               printk(KERN_WARNING "pnpacpi: exceeded the max number of IO "
-                               "resources: %d \n", PNP_MAX_PORT);
-               warned = 1;
-       }
+       pnp_add_io_resource(dev, start, end, flags);
 }
 
-static void pnpacpi_parse_allocated_memresource(struct pnp_resource_table *res,
-                                               u64 mem, u64 len,
+static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev,
+                                               u64 start, u64 len,
                                                int write_protect)
 {
-       int i = 0;
-       static unsigned char warned;
+       int flags = 0;
+       u64 end = start + len - 1;
 
-       while (!(res->mem_resource[i].flags & IORESOURCE_UNSET) &&
-              (i < PNP_MAX_MEM))
-               i++;
-       if (i < PNP_MAX_MEM) {
-               res->mem_resource[i].flags = IORESOURCE_MEM;    // Also clears _UNSET flag
-               if (len <= 0) {
-                       res->mem_resource[i].flags |= IORESOURCE_DISABLED;
-                       return;
-               }
-               if (write_protect == ACPI_READ_WRITE_MEMORY)
-                       res->mem_resource[i].flags |= IORESOURCE_MEM_WRITEABLE;
-
-               res->mem_resource[i].start = mem;
-               res->mem_resource[i].end = mem + len - 1;
-       } else if (!warned) {
-               printk(KERN_WARNING "pnpacpi: exceeded the max number of mem "
-                               "resources: %d\n", PNP_MAX_MEM);
-               warned = 1;
-       }
+       if (len == 0)
+               flags |= IORESOURCE_DISABLED;
+       if (write_protect == ACPI_READ_WRITE_MEMORY)
+               flags |= IORESOURCE_MEM_WRITEABLE;
+
+       pnp_add_mem_resource(dev, start, end, flags);
 }
 
-static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res_table,
+static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev,
                                                  struct acpi_resource *res)
 {
        struct acpi_resource_address64 addr, *p = &addr;
@@ -257,7 +195,7 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res
 
        status = acpi_resource_to_address64(res, p);
        if (!ACPI_SUCCESS(status)) {
-               pnp_warn("PnPACPI: failed to convert resource type %d",
+               dev_warn(&dev->dev, "failed to convert resource type %d\n",
                         res->type);
                return;
        }
@@ -266,11 +204,11 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res
                return;
 
        if (p->resource_type == ACPI_MEMORY_RANGE)
-               pnpacpi_parse_allocated_memresource(res_table,
+               pnpacpi_parse_allocated_memresource(dev,
                        p->minimum, p->address_length,
                        p->info.mem.write_protect);
        else if (p->resource_type == ACPI_IO_RANGE)
-               pnpacpi_parse_allocated_ioresource(res_table,
+               pnpacpi_parse_allocated_ioresource(dev,
                        p->minimum, p->address_length,
                        p->granularity == 0xfff ? ACPI_DECODE_10 :
                                ACPI_DECODE_16);
@@ -279,8 +217,16 @@ static void pnpacpi_parse_allocated_address_space(struct pnp_resource_table *res
 static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
                                              void *data)
 {
-       struct pnp_resource_table *res_table = data;
-       int i;
+       struct pnp_dev *dev = data;
+       struct acpi_resource_irq *irq;
+       struct acpi_resource_dma *dma;
+       struct acpi_resource_io *io;
+       struct acpi_resource_fixed_io *fixed_io;
+       struct acpi_resource_memory24 *memory24;
+       struct acpi_resource_memory32 *memory32;
+       struct acpi_resource_fixed_memory32 *fixed_memory32;
+       struct acpi_resource_extended_irq *extended_irq;
+       int i, flags;
 
        switch (res->type) {
        case ACPI_RESOURCE_TYPE_IRQ:
@@ -288,29 +234,33 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
                 * Per spec, only one interrupt per descriptor is allowed in
                 * _CRS, but some firmware violates this, so parse them all.
                 */
-               for (i = 0; i < res->data.irq.interrupt_count; i++) {
-                       pnpacpi_parse_allocated_irqresource(res_table,
-                               res->data.irq.interrupts[i],
-                               res->data.irq.triggering,
-                               res->data.irq.polarity,
-                               res->data.irq.sharable);
+               irq = &res->data.irq;
+               for (i = 0; i < irq->interrupt_count; i++) {
+                       pnpacpi_parse_allocated_irqresource(dev,
+                               irq->interrupts[i],
+                               irq->triggering,
+                               irq->polarity,
+                               irq->sharable);
                }
                break;
 
        case ACPI_RESOURCE_TYPE_DMA:
-               if (res->data.dma.channel_count > 0)
-                       pnpacpi_parse_allocated_dmaresource(res_table,
-                               res->data.dma.channels[0],
-                               res->data.dma.type,
-                               res->data.dma.bus_master,
-                               res->data.dma.transfer);
+               dma = &res->data.dma;
+               if (dma->channel_count > 0) {
+                       flags = dma_flags(dma->type, dma->bus_master,
+                                         dma->transfer);
+                       if (dma->channels[0] == (u8) -1)
+                               flags |= IORESOURCE_DISABLED;
+                       pnp_add_dma_resource(dev, dma->channels[0], flags);
+               }
                break;
 
        case ACPI_RESOURCE_TYPE_IO:
-               pnpacpi_parse_allocated_ioresource(res_table,
-                       res->data.io.minimum,
-                       res->data.io.address_length,
-                       res->data.io.io_decode);
+               io = &res->data.io;
+               pnpacpi_parse_allocated_ioresource(dev,
+                       io->minimum,
+                       io->address_length,
+                       io->io_decode);
                break;
 
        case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -318,9 +268,10 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
                break;
 
        case ACPI_RESOURCE_TYPE_FIXED_IO:
-               pnpacpi_parse_allocated_ioresource(res_table,
-                       res->data.fixed_io.address,
-                       res->data.fixed_io.address_length,
+               fixed_io = &res->data.fixed_io;
+               pnpacpi_parse_allocated_ioresource(dev,
+                       fixed_io->address,
+                       fixed_io->address_length,
                        ACPI_DECODE_10);
                break;
 
@@ -331,27 +282,30 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
                break;
 
        case ACPI_RESOURCE_TYPE_MEMORY24:
-               pnpacpi_parse_allocated_memresource(res_table,
-                       res->data.memory24.minimum,
-                       res->data.memory24.address_length,
-                       res->data.memory24.write_protect);
+               memory24 = &res->data.memory24;
+               pnpacpi_parse_allocated_memresource(dev,
+                       memory24->minimum,
+                       memory24->address_length,
+                       memory24->write_protect);
                break;
        case ACPI_RESOURCE_TYPE_MEMORY32:
-               pnpacpi_parse_allocated_memresource(res_table,
-                       res->data.memory32.minimum,
-                       res->data.memory32.address_length,
-                       res->data.memory32.write_protect);
+               memory32 = &res->data.memory32;
+               pnpacpi_parse_allocated_memresource(dev,
+                       memory32->minimum,
+                       memory32->address_length,
+                       memory32->write_protect);
                break;
        case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
-               pnpacpi_parse_allocated_memresource(res_table,
-                       res->data.fixed_memory32.address,
-                       res->data.fixed_memory32.address_length,
-                       res->data.fixed_memory32.write_protect);
+               fixed_memory32 = &res->data.fixed_memory32;
+               pnpacpi_parse_allocated_memresource(dev,
+                       fixed_memory32->address,
+                       fixed_memory32->address_length,
+                       fixed_memory32->write_protect);
                break;
        case ACPI_RESOURCE_TYPE_ADDRESS16:
        case ACPI_RESOURCE_TYPE_ADDRESS32:
        case ACPI_RESOURCE_TYPE_ADDRESS64:
-               pnpacpi_parse_allocated_address_space(res_table, res);
+               pnpacpi_parse_allocated_address_space(dev, res);
                break;
 
        case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
@@ -360,15 +314,16 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
                break;
 
        case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
-               if (res->data.extended_irq.producer_consumer == ACPI_PRODUCER)
+               extended_irq = &res->data.extended_irq;
+               if (extended_irq->producer_consumer == ACPI_PRODUCER)
                        return AE_OK;
 
-               for (i = 0; i < res->data.extended_irq.interrupt_count; i++) {
-                       pnpacpi_parse_allocated_irqresource(res_table,
-                               res->data.extended_irq.interrupts[i],
-                               res->data.extended_irq.triggering,
-                               res->data.extended_irq.polarity,
-                               res->data.extended_irq.sharable);
+               for (i = 0; i < extended_irq->interrupt_count; i++) {
+                       pnpacpi_parse_allocated_irqresource(dev,
+                               extended_irq->interrupts[i],
+                               extended_irq->triggering,
+                               extended_irq->polarity,
+                               extended_irq->sharable);
                }
                break;
 
@@ -376,24 +331,36 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
                break;
 
        default:
-               pnp_warn("PnPACPI: unknown resource type %d", res->type);
+               dev_warn(&dev->dev, "unknown resource type %d in _CRS\n",
+                        res->type);
                return AE_ERROR;
        }
 
        return AE_OK;
 }
 
-acpi_status pnpacpi_parse_allocated_resource(acpi_handle handle,
-                                            struct pnp_resource_table * res)
+int pnpacpi_parse_allocated_resource(struct pnp_dev *dev)
 {
-       /* Blank the resource table values */
-       pnp_init_resource_table(res);
+       acpi_handle handle = dev->data;
+       acpi_status status;
+
+       dev_dbg(&dev->dev, "parse allocated resources\n");
 
-       return acpi_walk_resources(handle, METHOD_NAME__CRS,
-                                  pnpacpi_allocated_resource, res);
+       pnp_init_resources(dev);
+
+       status = acpi_walk_resources(handle, METHOD_NAME__CRS,
+                                    pnpacpi_allocated_resource, dev);
+
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND)
+                       dev_err(&dev->dev, "can't evaluate _CRS: %d", status);
+               return -EPERM;
+       }
+       return 0;
 }
 
-static __init void pnpacpi_parse_dma_option(struct pnp_option *option,
+static __init void pnpacpi_parse_dma_option(struct pnp_dev *dev,
+                                           struct pnp_option *option,
                                            struct acpi_resource_dma *p)
 {
        int i;
@@ -410,10 +377,11 @@ static __init void pnpacpi_parse_dma_option(struct pnp_option *option,
 
        dma->flags = dma_flags(p->type, p->bus_master, p->transfer);
 
-       pnp_register_dma_resource(option, dma);
+       pnp_register_dma_resource(dev, option, dma);
 }
 
-static __init void pnpacpi_parse_irq_option(struct pnp_option *option,
+static __init void pnpacpi_parse_irq_option(struct pnp_dev *dev,
+                                           struct pnp_option *option,
                                            struct acpi_resource_irq *p)
 {
        int i;
@@ -428,12 +396,13 @@ static __init void pnpacpi_parse_irq_option(struct pnp_option *option,
        for (i = 0; i < p->interrupt_count; i++)
                if (p->interrupts[i])
                        __set_bit(p->interrupts[i], irq->map);
-       irq->flags = irq_flags(p->triggering, p->polarity);
+       irq->flags = irq_flags(p->triggering, p->polarity, p->sharable);
 
-       pnp_register_irq_resource(option, irq);
+       pnp_register_irq_resource(dev, option, irq);
 }
 
-static __init void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
+static __init void pnpacpi_parse_ext_irq_option(struct pnp_dev *dev,
+                                               struct pnp_option *option,
                                        struct acpi_resource_extended_irq *p)
 {
        int i;
@@ -448,12 +417,13 @@ static __init void pnpacpi_parse_ext_irq_option(struct pnp_option *option,
        for (i = 0; i < p->interrupt_count; i++)
                if (p->interrupts[i])
                        __set_bit(p->interrupts[i], irq->map);
-       irq->flags = irq_flags(p->triggering, p->polarity);
+       irq->flags = irq_flags(p->triggering, p->polarity, p->sharable);
 
-       pnp_register_irq_resource(option, irq);
+       pnp_register_irq_resource(dev, option, irq);
 }
 
-static __init void pnpacpi_parse_port_option(struct pnp_option *option,
+static __init void pnpacpi_parse_port_option(struct pnp_dev *dev,
+                                            struct pnp_option *option,
                                             struct acpi_resource_io *io)
 {
        struct pnp_port *port;
@@ -469,10 +439,11 @@ static __init void pnpacpi_parse_port_option(struct pnp_option *option,
        port->size = io->address_length;
        port->flags = ACPI_DECODE_16 == io->io_decode ?
            PNP_PORT_FLAG_16BITADDR : 0;
-       pnp_register_port_resource(option, port);
+       pnp_register_port_resource(dev, option, port);
 }
 
-static __init void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
+static __init void pnpacpi_parse_fixed_port_option(struct pnp_dev *dev,
+                                                  struct pnp_option *option,
                                        struct acpi_resource_fixed_io *io)
 {
        struct pnp_port *port;
@@ -486,10 +457,11 @@ static __init void pnpacpi_parse_fixed_port_option(struct pnp_option *option,
        port->size = io->address_length;
        port->align = 0;
        port->flags = PNP_PORT_FLAG_FIXED;
-       pnp_register_port_resource(option, port);
+       pnp_register_port_resource(dev, option, port);
 }
 
-static __init void pnpacpi_parse_mem24_option(struct pnp_option *option,
+static __init void pnpacpi_parse_mem24_option(struct pnp_dev *dev,
+                                             struct pnp_option *option,
                                              struct acpi_resource_memory24 *p)
 {
        struct pnp_mem *mem;
@@ -507,10 +479,11 @@ static __init void pnpacpi_parse_mem24_option(struct pnp_option *option,
        mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
            IORESOURCE_MEM_WRITEABLE : 0;
 
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
-static __init void pnpacpi_parse_mem32_option(struct pnp_option *option,
+static __init void pnpacpi_parse_mem32_option(struct pnp_dev *dev,
+                                             struct pnp_option *option,
                                              struct acpi_resource_memory32 *p)
 {
        struct pnp_mem *mem;
@@ -528,10 +501,11 @@ static __init void pnpacpi_parse_mem32_option(struct pnp_option *option,
        mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
            IORESOURCE_MEM_WRITEABLE : 0;
 
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
-static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
+static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_dev *dev,
+                                                   struct pnp_option *option,
                                        struct acpi_resource_fixed_memory32 *p)
 {
        struct pnp_mem *mem;
@@ -548,10 +522,11 @@ static __init void pnpacpi_parse_fixed_mem32_option(struct pnp_option *option,
        mem->flags = (ACPI_READ_WRITE_MEMORY == p->write_protect) ?
            IORESOURCE_MEM_WRITEABLE : 0;
 
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
-static __init void pnpacpi_parse_address_option(struct pnp_option *option,
+static __init void pnpacpi_parse_address_option(struct pnp_dev *dev,
+                                               struct pnp_option *option,
                                                struct acpi_resource *r)
 {
        struct acpi_resource_address64 addr, *p = &addr;
@@ -579,7 +554,7 @@ static __init void pnpacpi_parse_address_option(struct pnp_option *option,
                mem->flags = (p->info.mem.write_protect ==
                              ACPI_READ_WRITE_MEMORY) ? IORESOURCE_MEM_WRITEABLE
                    : 0;
-               pnp_register_mem_resource(option, mem);
+               pnp_register_mem_resource(dev, option, mem);
        } else if (p->resource_type == ACPI_IO_RANGE) {
                port = kzalloc(sizeof(struct pnp_port), GFP_KERNEL);
                if (!port)
@@ -588,7 +563,7 @@ static __init void pnpacpi_parse_address_option(struct pnp_option *option,
                port->size = p->address_length;
                port->align = 0;
                port->flags = PNP_PORT_FLAG_FIXED;
-               pnp_register_port_resource(option, port);
+               pnp_register_port_resource(dev, option, port);
        }
 }
 
@@ -608,11 +583,11 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
 
        switch (res->type) {
        case ACPI_RESOURCE_TYPE_IRQ:
-               pnpacpi_parse_irq_option(option, &res->data.irq);
+               pnpacpi_parse_irq_option(dev, option, &res->data.irq);
                break;
 
        case ACPI_RESOURCE_TYPE_DMA:
-               pnpacpi_parse_dma_option(option, &res->data.dma);
+               pnpacpi_parse_dma_option(dev, option, &res->data.dma);
                break;
 
        case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -642,19 +617,22 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
        case ACPI_RESOURCE_TYPE_END_DEPENDENT:
                /*only one EndDependentFn is allowed */
                if (!parse_data->option_independent) {
-                       pnp_warn("PnPACPI: more than one EndDependentFn");
+                       dev_warn(&dev->dev, "more than one EndDependentFn "
+                                "in _PRS\n");
                        return AE_ERROR;
                }
                parse_data->option = parse_data->option_independent;
                parse_data->option_independent = NULL;
+               dev_dbg(&dev->dev, "end dependent options\n");
                break;
 
        case ACPI_RESOURCE_TYPE_IO:
-               pnpacpi_parse_port_option(option, &res->data.io);
+               pnpacpi_parse_port_option(dev, option, &res->data.io);
                break;
 
        case ACPI_RESOURCE_TYPE_FIXED_IO:
-               pnpacpi_parse_fixed_port_option(option, &res->data.fixed_io);
+               pnpacpi_parse_fixed_port_option(dev, option,
+                                               &res->data.fixed_io);
                break;
 
        case ACPI_RESOURCE_TYPE_VENDOR:
@@ -662,57 +640,67 @@ static __init acpi_status pnpacpi_option_resource(struct acpi_resource *res,
                break;
 
        case ACPI_RESOURCE_TYPE_MEMORY24:
-               pnpacpi_parse_mem24_option(option, &res->data.memory24);
+               pnpacpi_parse_mem24_option(dev, option, &res->data.memory24);
                break;
 
        case ACPI_RESOURCE_TYPE_MEMORY32:
-               pnpacpi_parse_mem32_option(option, &res->data.memory32);
+               pnpacpi_parse_mem32_option(dev, option, &res->data.memory32);
                break;
 
        case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
-               pnpacpi_parse_fixed_mem32_option(option,
+               pnpacpi_parse_fixed_mem32_option(dev, option,
                                                 &res->data.fixed_memory32);
                break;
 
        case ACPI_RESOURCE_TYPE_ADDRESS16:
        case ACPI_RESOURCE_TYPE_ADDRESS32:
        case ACPI_RESOURCE_TYPE_ADDRESS64:
-               pnpacpi_parse_address_option(option, res);
+               pnpacpi_parse_address_option(dev, option, res);
                break;
 
        case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
                break;
 
        case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
-               pnpacpi_parse_ext_irq_option(option, &res->data.extended_irq);
+               pnpacpi_parse_ext_irq_option(dev, option,
+                                            &res->data.extended_irq);
                break;
 
        case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
                break;
 
        default:
-               pnp_warn("PnPACPI: unknown resource type %d", res->type);
+               dev_warn(&dev->dev, "unknown resource type %d in _PRS\n",
+                        res->type);
                return AE_ERROR;
        }
 
        return AE_OK;
 }
 
-acpi_status __init pnpacpi_parse_resource_option_data(acpi_handle handle,
-                                                     struct pnp_dev *dev)
+int __init pnpacpi_parse_resource_option_data(struct pnp_dev *dev)
 {
+       acpi_handle handle = dev->data;
        acpi_status status;
        struct acpipnp_parse_option_s parse_data;
 
+       dev_dbg(&dev->dev, "parse resource options\n");
+
        parse_data.option = pnp_register_independent_option(dev);
        if (!parse_data.option)
-               return AE_ERROR;
+               return -ENOMEM;
+
        parse_data.option_independent = parse_data.option;
        parse_data.dev = dev;
        status = acpi_walk_resources(handle, METHOD_NAME__PRS,
                                     pnpacpi_option_resource, &parse_data);
 
-       return status;
+       if (ACPI_FAILURE(status)) {
+               if (status != AE_NOT_FOUND)
+                       dev_err(&dev->dev, "can't evaluate _PRS: %d", status);
+               return -EPERM;
+       }
+       return 0;
 }
 
 static int pnpacpi_supported_resource(struct acpi_resource *res)
@@ -760,9 +748,10 @@ static acpi_status pnpacpi_type_resources(struct acpi_resource *res, void *data)
        return AE_OK;
 }
 
-int pnpacpi_build_resource_template(acpi_handle handle,
+int pnpacpi_build_resource_template(struct pnp_dev *dev,
                                    struct acpi_buffer *buffer)
 {
+       acpi_handle handle = dev->data;
        struct acpi_resource *resource;
        int res_cnt = 0;
        acpi_status status;
@@ -770,7 +759,7 @@ int pnpacpi_build_resource_template(acpi_handle handle,
        status = acpi_walk_resources(handle, METHOD_NAME__CRS,
                                     pnpacpi_count_resources, &res_cnt);
        if (ACPI_FAILURE(status)) {
-               pnp_err("Evaluate _CRS failed");
+               dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
                return -EINVAL;
        }
        if (!res_cnt)
@@ -779,13 +768,13 @@ int pnpacpi_build_resource_template(acpi_handle handle,
        buffer->pointer = kzalloc(buffer->length - 1, GFP_KERNEL);
        if (!buffer->pointer)
                return -ENOMEM;
-       pnp_dbg("Res cnt %d", res_cnt);
+
        resource = (struct acpi_resource *)buffer->pointer;
        status = acpi_walk_resources(handle, METHOD_NAME__CRS,
                                     pnpacpi_type_resources, &resource);
        if (ACPI_FAILURE(status)) {
                kfree(buffer->pointer);
-               pnp_err("Evaluate _CRS failed");
+               dev_err(&dev->dev, "can't evaluate _CRS: %d\n", status);
                return -EINVAL;
        }
        /* resource will pointer the end resource now */
@@ -794,129 +783,184 @@ int pnpacpi_build_resource_template(acpi_handle handle,
        return 0;
 }
 
-static void pnpacpi_encode_irq(struct acpi_resource *resource,
+static void pnpacpi_encode_irq(struct pnp_dev *dev,
+                              struct acpi_resource *resource,
                               struct resource *p)
 {
+       struct acpi_resource_irq *irq = &resource->data.irq;
        int triggering, polarity;
 
        decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
-       resource->data.irq.triggering = triggering;
-       resource->data.irq.polarity = polarity;
+       irq->triggering = triggering;
+       irq->polarity = polarity;
        if (triggering == ACPI_EDGE_SENSITIVE)
-               resource->data.irq.sharable = ACPI_EXCLUSIVE;
+               irq->sharable = ACPI_EXCLUSIVE;
        else
-               resource->data.irq.sharable = ACPI_SHARED;
-       resource->data.irq.interrupt_count = 1;
-       resource->data.irq.interrupts[0] = p->start;
+               irq->sharable = ACPI_SHARED;
+       irq->interrupt_count = 1;
+       irq->interrupts[0] = p->start;
+
+       dev_dbg(&dev->dev, "  encode irq %d %s %s %s\n", (int) p->start,
+               triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
+               polarity == ACPI_ACTIVE_LOW ? "low" : "high",
+               irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
 }
 
-static void pnpacpi_encode_ext_irq(struct acpi_resource *resource,
+static void pnpacpi_encode_ext_irq(struct pnp_dev *dev,
+                                  struct acpi_resource *resource,
                                   struct resource *p)
 {
+       struct acpi_resource_extended_irq *extended_irq = &resource->data.extended_irq;
        int triggering, polarity;
 
        decode_irq_flags(p->flags & IORESOURCE_BITS, &triggering, &polarity);
-       resource->data.extended_irq.producer_consumer = ACPI_CONSUMER;
-       resource->data.extended_irq.triggering = triggering;
-       resource->data.extended_irq.polarity = polarity;
+       extended_irq->producer_consumer = ACPI_CONSUMER;
+       extended_irq->triggering = triggering;
+       extended_irq->polarity = polarity;
        if (triggering == ACPI_EDGE_SENSITIVE)
-               resource->data.irq.sharable = ACPI_EXCLUSIVE;
+               extended_irq->sharable = ACPI_EXCLUSIVE;
        else
-               resource->data.irq.sharable = ACPI_SHARED;
-       resource->data.extended_irq.interrupt_count = 1;
-       resource->data.extended_irq.interrupts[0] = p->start;
+               extended_irq->sharable = ACPI_SHARED;
+       extended_irq->interrupt_count = 1;
+       extended_irq->interrupts[0] = p->start;
+
+       dev_dbg(&dev->dev, "  encode irq %d %s %s %s\n", (int) p->start,
+               triggering == ACPI_LEVEL_SENSITIVE ? "level" : "edge",
+               polarity == ACPI_ACTIVE_LOW ? "low" : "high",
+               extended_irq->sharable == ACPI_SHARED ? "shared" : "exclusive");
 }
 
-static void pnpacpi_encode_dma(struct acpi_resource *resource,
+static void pnpacpi_encode_dma(struct pnp_dev *dev,
+                              struct acpi_resource *resource,
                               struct resource *p)
 {
+       struct acpi_resource_dma *dma = &resource->data.dma;
+
        /* Note: pnp_assign_dma will copy pnp_dma->flags into p->flags */
        switch (p->flags & IORESOURCE_DMA_SPEED_MASK) {
        case IORESOURCE_DMA_TYPEA:
-               resource->data.dma.type = ACPI_TYPE_A;
+               dma->type = ACPI_TYPE_A;
                break;
        case IORESOURCE_DMA_TYPEB:
-               resource->data.dma.type = ACPI_TYPE_B;
+               dma->type = ACPI_TYPE_B;
                break;
        case IORESOURCE_DMA_TYPEF:
-               resource->data.dma.type = ACPI_TYPE_F;
+               dma->type = ACPI_TYPE_F;
                break;
        default:
-               resource->data.dma.type = ACPI_COMPATIBILITY;
+               dma->type = ACPI_COMPATIBILITY;
        }
 
        switch (p->flags & IORESOURCE_DMA_TYPE_MASK) {
        case IORESOURCE_DMA_8BIT:
-               resource->data.dma.transfer = ACPI_TRANSFER_8;
+               dma->transfer = ACPI_TRANSFER_8;
                break;
        case IORESOURCE_DMA_8AND16BIT:
-               resource->data.dma.transfer = ACPI_TRANSFER_8_16;
+               dma->transfer = ACPI_TRANSFER_8_16;
                break;
        default:
-               resource->data.dma.transfer = ACPI_TRANSFER_16;
+               dma->transfer = ACPI_TRANSFER_16;
        }
 
-       resource->data.dma.bus_master = !!(p->flags & IORESOURCE_DMA_MASTER);
-       resource->data.dma.channel_count = 1;
-       resource->data.dma.channels[0] = p->start;
+       dma->bus_master = !!(p->flags & IORESOURCE_DMA_MASTER);
+       dma->channel_count = 1;
+       dma->channels[0] = p->start;
+
+       dev_dbg(&dev->dev, "  encode dma %d "
+               "type %#x transfer %#x master %d\n",
+               (int) p->start, dma->type, dma->transfer, dma->bus_master);
 }
 
-static void pnpacpi_encode_io(struct acpi_resource *resource,
+static void pnpacpi_encode_io(struct pnp_dev *dev,
+                             struct acpi_resource *resource,
                              struct resource *p)
 {
+       struct acpi_resource_io *io = &resource->data.io;
+
        /* Note: pnp_assign_port will copy pnp_port->flags into p->flags */
-       resource->data.io.io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ?
+       io->io_decode = (p->flags & PNP_PORT_FLAG_16BITADDR) ?
            ACPI_DECODE_16 : ACPI_DECODE_10;
-       resource->data.io.minimum = p->start;
-       resource->data.io.maximum = p->end;
-       resource->data.io.alignment = 0;        /* Correct? */
-       resource->data.io.address_length = p->end - p->start + 1;
+       io->minimum = p->start;
+       io->maximum = p->end;
+       io->alignment = 0;      /* Correct? */
+       io->address_length = p->end - p->start + 1;
+
+       dev_dbg(&dev->dev, "  encode io %#llx-%#llx decode %#x\n",
+               (unsigned long long) p->start, (unsigned long long) p->end,
+               io->io_decode);
 }
 
-static void pnpacpi_encode_fixed_io(struct acpi_resource *resource,
+static void pnpacpi_encode_fixed_io(struct pnp_dev *dev,
+                                   struct acpi_resource *resource,
                                    struct resource *p)
 {
-       resource->data.fixed_io.address = p->start;
-       resource->data.fixed_io.address_length = p->end - p->start + 1;
+       struct acpi_resource_fixed_io *fixed_io = &resource->data.fixed_io;
+
+       fixed_io->address = p->start;
+       fixed_io->address_length = p->end - p->start + 1;
+
+       dev_dbg(&dev->dev, "  encode fixed_io %#llx-%#llx\n",
+               (unsigned long long) p->start, (unsigned long long) p->end);
 }
 
-static void pnpacpi_encode_mem24(struct acpi_resource *resource,
+static void pnpacpi_encode_mem24(struct pnp_dev *dev,
+                                struct acpi_resource *resource,
                                 struct resource *p)
 {
+       struct acpi_resource_memory24 *memory24 = &resource->data.memory24;
+
        /* Note: pnp_assign_mem will copy pnp_mem->flags into p->flags */
-       resource->data.memory24.write_protect =
+       memory24->write_protect =
            (p->flags & IORESOURCE_MEM_WRITEABLE) ?
            ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
-       resource->data.memory24.minimum = p->start;
-       resource->data.memory24.maximum = p->end;
-       resource->data.memory24.alignment = 0;
-       resource->data.memory24.address_length = p->end - p->start + 1;
+       memory24->minimum = p->start;
+       memory24->maximum = p->end;
+       memory24->alignment = 0;
+       memory24->address_length = p->end - p->start + 1;
+
+       dev_dbg(&dev->dev, "  encode mem24 %#llx-%#llx write_protect %#x\n",
+               (unsigned long long) p->start, (unsigned long long) p->end,
+               memory24->write_protect);
 }
 
-static void pnpacpi_encode_mem32(struct acpi_resource *resource,
+static void pnpacpi_encode_mem32(struct pnp_dev *dev,
+                                struct acpi_resource *resource,
                                 struct resource *p)
 {
-       resource->data.memory32.write_protect =
+       struct acpi_resource_memory32 *memory32 = &resource->data.memory32;
+
+       memory32->write_protect =
            (p->flags & IORESOURCE_MEM_WRITEABLE) ?
            ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
-       resource->data.memory32.minimum = p->start;
-       resource->data.memory32.maximum = p->end;
-       resource->data.memory32.alignment = 0;
-       resource->data.memory32.address_length = p->end - p->start + 1;
+       memory32->minimum = p->start;
+       memory32->maximum = p->end;
+       memory32->alignment = 0;
+       memory32->address_length = p->end - p->start + 1;
+
+       dev_dbg(&dev->dev, "  encode mem32 %#llx-%#llx write_protect %#x\n",
+               (unsigned long long) p->start, (unsigned long long) p->end,
+               memory32->write_protect);
 }
 
-static void pnpacpi_encode_fixed_mem32(struct acpi_resource *resource,
+static void pnpacpi_encode_fixed_mem32(struct pnp_dev *dev,
+                                      struct acpi_resource *resource,
                                       struct resource *p)
 {
-       resource->data.fixed_memory32.write_protect =
+       struct acpi_resource_fixed_memory32 *fixed_memory32 = &resource->data.fixed_memory32;
+
+       fixed_memory32->write_protect =
            (p->flags & IORESOURCE_MEM_WRITEABLE) ?
            ACPI_READ_WRITE_MEMORY : ACPI_READ_ONLY_MEMORY;
-       resource->data.fixed_memory32.address = p->start;
-       resource->data.fixed_memory32.address_length = p->end - p->start + 1;
+       fixed_memory32->address = p->start;
+       fixed_memory32->address_length = p->end - p->start + 1;
+
+       dev_dbg(&dev->dev, "  encode fixed_mem32 %#llx-%#llx "
+               "write_protect %#x\n",
+               (unsigned long long) p->start, (unsigned long long) p->end,
+               fixed_memory32->write_protect);
 }
 
-int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
-                            struct acpi_buffer *buffer)
+int pnpacpi_encode_resources(struct pnp_dev *dev, struct acpi_buffer *buffer)
 {
        int i = 0;
        /* pnpacpi_build_resource_template allocates extra mem */
@@ -924,58 +968,48 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
        struct acpi_resource *resource = buffer->pointer;
        int port = 0, irq = 0, dma = 0, mem = 0;
 
-       pnp_dbg("res cnt %d", res_cnt);
+       dev_dbg(&dev->dev, "encode %d resources\n", res_cnt);
        while (i < res_cnt) {
                switch (resource->type) {
                case ACPI_RESOURCE_TYPE_IRQ:
-                       pnp_dbg("Encode irq");
-                       pnpacpi_encode_irq(resource,
-                                          &res_table->irq_resource[irq]);
+                       pnpacpi_encode_irq(dev, resource,
+                              pnp_get_resource(dev, IORESOURCE_IRQ, irq));
                        irq++;
                        break;
 
                case ACPI_RESOURCE_TYPE_DMA:
-                       pnp_dbg("Encode dma");
-                       pnpacpi_encode_dma(resource,
-                                          &res_table->dma_resource[dma]);
+                       pnpacpi_encode_dma(dev, resource,
+                               pnp_get_resource(dev, IORESOURCE_DMA, dma));
                        dma++;
                        break;
                case ACPI_RESOURCE_TYPE_IO:
-                       pnp_dbg("Encode io");
-                       pnpacpi_encode_io(resource,
-                                         &res_table->port_resource[port]);
+                       pnpacpi_encode_io(dev, resource,
+                               pnp_get_resource(dev, IORESOURCE_IO, port));
                        port++;
                        break;
                case ACPI_RESOURCE_TYPE_FIXED_IO:
-                       pnp_dbg("Encode fixed io");
-                       pnpacpi_encode_fixed_io(resource,
-                                               &res_table->
-                                               port_resource[port]);
+                       pnpacpi_encode_fixed_io(dev, resource,
+                               pnp_get_resource(dev, IORESOURCE_IO, port));
                        port++;
                        break;
                case ACPI_RESOURCE_TYPE_MEMORY24:
-                       pnp_dbg("Encode mem24");
-                       pnpacpi_encode_mem24(resource,
-                                            &res_table->mem_resource[mem]);
+                       pnpacpi_encode_mem24(dev, resource,
+                               pnp_get_resource(dev, IORESOURCE_MEM, mem));
                        mem++;
                        break;
                case ACPI_RESOURCE_TYPE_MEMORY32:
-                       pnp_dbg("Encode mem32");
-                       pnpacpi_encode_mem32(resource,
-                                            &res_table->mem_resource[mem]);
+                       pnpacpi_encode_mem32(dev, resource,
+                               pnp_get_resource(dev, IORESOURCE_MEM, mem));
                        mem++;
                        break;
                case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
-                       pnp_dbg("Encode fixed mem32");
-                       pnpacpi_encode_fixed_mem32(resource,
-                                                  &res_table->
-                                                  mem_resource[mem]);
+                       pnpacpi_encode_fixed_mem32(dev, resource,
+                               pnp_get_resource(dev, IORESOURCE_MEM, mem));
                        mem++;
                        break;
                case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
-                       pnp_dbg("Encode ext irq");
-                       pnpacpi_encode_ext_irq(resource,
-                                              &res_table->irq_resource[irq]);
+                       pnpacpi_encode_ext_irq(dev, resource,
+                               pnp_get_resource(dev, IORESOURCE_IRQ, irq));
                        irq++;
                        break;
                case ACPI_RESOURCE_TYPE_START_DEPENDENT:
@@ -988,7 +1022,8 @@ int pnpacpi_encode_resources(struct pnp_resource_table *res_table,
                case ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64:
                case ACPI_RESOURCE_TYPE_GENERIC_REGISTER:
                default:        /* other type */
-                       pnp_warn("unknown resource type %d", resource->type);
+                       dev_warn(&dev->dev, "can't encode unknown resource "
+                                "type %d\n", resource->type);
                        return -EINVAL;
                }
                resource++;
index 3cd3ed760605fbc7c6f5046bc4f714345aa3b01c..310e2b3a7710458e4e25518bfdb6f89ff713e9dd 100644 (file)
@@ -5,3 +5,7 @@
 pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o
 
 obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y)
+
+ifeq ($(CONFIG_PNP_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
index a8364d8152225e43960c6a20e5139bdf8dcc9b94..7ff824496b397b236eda67c1a39b178b6ea22024 100644 (file)
@@ -7,7 +7,6 @@
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/kernel.h>
-#include <linux/pnpbios.h>
 #include <linux/device.h>
 #include <linux/pnp.h>
 #include <linux/mm.h>
index a8a51500e1e9aae008cfc1cddc9ef547f4b6e10e..19a4be1a9a31afd73567724ccdf3b56a03445bc7 100644 (file)
@@ -50,7 +50,6 @@
 #include <linux/init.h>
 #include <linux/linkage.h>
 #include <linux/kernel.h>
-#include <linux/pnpbios.h>
 #include <linux/device.h>
 #include <linux/pnp.h>
 #include <linux/mm.h>
@@ -69,6 +68,7 @@
 #include <asm/system.h>
 #include <asm/byteorder.h>
 
+#include "../base.h"
 #include "pnpbios.h"
 
 /*
@@ -203,8 +203,7 @@ static int pnp_dock_thread(void *unused)
 
 #endif                         /* CONFIG_HOTPLUG */
 
-static int pnpbios_get_resources(struct pnp_dev *dev,
-                                struct pnp_resource_table *res)
+static int pnpbios_get_resources(struct pnp_dev *dev)
 {
        u8 nodenum = dev->number;
        struct pnp_bios_node *node;
@@ -212,6 +211,7 @@ static int pnpbios_get_resources(struct pnp_dev *dev,
        if (!pnpbios_is_dynamic(dev))
                return -EPERM;
 
+       dev_dbg(&dev->dev, "get resources\n");
        node = kzalloc(node_info.max_node_size, GFP_KERNEL);
        if (!node)
                return -1;
@@ -219,14 +219,13 @@ static int pnpbios_get_resources(struct pnp_dev *dev,
                kfree(node);
                return -ENODEV;
        }
-       pnpbios_read_resources_from_node(res, node);
+       pnpbios_read_resources_from_node(dev, node);
        dev->active = pnp_is_active(dev);
        kfree(node);
        return 0;
 }
 
-static int pnpbios_set_resources(struct pnp_dev *dev,
-                                struct pnp_resource_table *res)
+static int pnpbios_set_resources(struct pnp_dev *dev)
 {
        u8 nodenum = dev->number;
        struct pnp_bios_node *node;
@@ -235,6 +234,7 @@ static int pnpbios_set_resources(struct pnp_dev *dev,
        if (!pnpbios_is_dynamic(dev))
                return -EPERM;
 
+       dev_dbg(&dev->dev, "set resources\n");
        node = kzalloc(node_info.max_node_size, GFP_KERNEL);
        if (!node)
                return -1;
@@ -242,7 +242,7 @@ static int pnpbios_set_resources(struct pnp_dev *dev,
                kfree(node);
                return -ENODEV;
        }
-       if (pnpbios_write_resources_to_node(res, node) < 0) {
+       if (pnpbios_write_resources_to_node(dev, node) < 0) {
                kfree(node);
                return -1;
        }
@@ -317,7 +317,6 @@ static int __init insert_device(struct pnp_bios_node *node)
 {
        struct list_head *pos;
        struct pnp_dev *dev;
-       struct pnp_id *dev_id;
        char id[8];
 
        /* check if the device is already added */
@@ -327,20 +326,11 @@ static int __init insert_device(struct pnp_bios_node *node)
                        return -1;
        }
 
-       dev = kzalloc(sizeof(struct pnp_dev), GFP_KERNEL);
+       pnp_eisa_id_to_string(node->eisa_id & PNP_EISA_ID_MASK, id);
+       dev = pnp_alloc_dev(&pnpbios_protocol, node->handle, id);
        if (!dev)
                return -1;
 
-       dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
-       if (!dev_id) {
-               kfree(dev);
-               return -1;
-       }
-
-       dev->number = node->handle;
-       pnpid32_to_pnpid(node->eisa_id, id);
-       memcpy(dev_id->id, id, 7);
-       pnp_add_id(dev_id, dev);
        pnpbios_parse_data_stream(dev, node);
        dev->active = pnp_is_active(dev);
        dev->flags = node->flags;
@@ -353,11 +343,10 @@ static int __init insert_device(struct pnp_bios_node *node)
                dev->capabilities |= PNP_WRITE;
        if (dev->flags & PNPBIOS_REMOVABLE)
                dev->capabilities |= PNP_REMOVABLE;
-       dev->protocol = &pnpbios_protocol;
 
        /* clear out the damaged flags */
        if (!dev->active)
-               pnp_init_resource_table(&dev->res);
+               pnp_init_resources(dev);
 
        pnp_add_device(dev);
        pnpbios_interface_attach_device(node);
index d8cb2fd1f127a98a10673e937889802cf7d6b79b..b09cf6dc2075e1d5a673d4bf0119f5fdfbfe8cb6 100644 (file)
@@ -2,6 +2,142 @@
  * pnpbios.h - contains local definitions
  */
 
+/*
+ * Include file for the interface to a PnP BIOS
+ *
+ * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de)
+ * PnP handler parts (c) 1998 Tom Lees <tom@lpsg.demon.co.uk>
+ * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+/*
+ * Return codes
+ */
+#define PNP_SUCCESS                     0x00
+#define PNP_NOT_SET_STATICALLY          0x7f
+#define PNP_UNKNOWN_FUNCTION            0x81
+#define PNP_FUNCTION_NOT_SUPPORTED      0x82
+#define PNP_INVALID_HANDLE              0x83
+#define PNP_BAD_PARAMETER               0x84
+#define PNP_SET_FAILED                  0x85
+#define PNP_EVENTS_NOT_PENDING          0x86
+#define PNP_SYSTEM_NOT_DOCKED           0x87
+#define PNP_NO_ISA_PNP_CARDS            0x88
+#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89
+#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a
+#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b
+#define PNP_BUFFER_TOO_SMALL            0x8c
+#define PNP_USE_ESCD_SUPPORT            0x8d
+#define PNP_MESSAGE_NOT_SUPPORTED       0x8e
+#define PNP_HARDWARE_ERROR              0x8f
+
+#define ESCD_SUCCESS                    0x00
+#define ESCD_IO_ERROR_READING           0x55
+#define ESCD_INVALID                    0x56
+#define ESCD_BUFFER_TOO_SMALL           0x59
+#define ESCD_NVRAM_TOO_SMALL            0x5a
+#define ESCD_FUNCTION_NOT_SUPPORTED     0x81
+
+/*
+ * Events that can be received by "get event"
+ */
+#define PNPEV_ABOUT_TO_CHANGE_CONFIG   0x0001
+#define PNPEV_DOCK_CHANGED             0x0002
+#define PNPEV_SYSTEM_DEVICE_CHANGED    0x0003
+#define PNPEV_CONFIG_CHANGED_FAILED    0x0004
+#define PNPEV_UNKNOWN_SYSTEM_EVENT     0xffff
+/* 0x8000 through 0xfffe are OEM defined */
+
+/*
+ * Messages that should be sent through "send message"
+ */
+#define PNPMSG_OK                      0x00
+#define PNPMSG_ABORT                   0x01
+#define PNPMSG_UNDOCK_DEFAULT_ACTION   0x40
+#define PNPMSG_POWER_OFF               0x41
+#define PNPMSG_PNP_OS_ACTIVE           0x42
+#define PNPMSG_PNP_OS_INACTIVE         0x43
+
+/*
+ * Plug and Play BIOS flags
+ */
+#define PNPBIOS_NO_DISABLE             0x0001
+#define PNPBIOS_NO_CONFIG              0x0002
+#define PNPBIOS_OUTPUT                 0x0004
+#define PNPBIOS_INPUT                  0x0008
+#define PNPBIOS_BOOTABLE               0x0010
+#define PNPBIOS_DOCK                   0x0020
+#define PNPBIOS_REMOVABLE              0x0040
+#define pnpbios_is_static(x) (((x)->flags & 0x0100) == 0x0000)
+#define pnpbios_is_dynamic(x) ((x)->flags & 0x0080)
+
+/*
+ * Function Parameters
+ */
+#define PNPMODE_STATIC 1
+#define PNPMODE_DYNAMIC 0
+
+/* 0x8000 through 0xffff are OEM defined */
+
+#pragma pack(1)
+struct pnp_dev_node_info {
+       __u16 no_nodes;
+       __u16 max_node_size;
+};
+struct pnp_docking_station_info {
+       __u32 location_id;
+       __u32 serial;
+       __u16 capabilities;
+};
+struct pnp_isa_config_struc {
+       __u8 revision;
+       __u8 no_csns;
+       __u16 isa_rd_data_port;
+       __u16 reserved;
+};
+struct escd_info_struc {
+       __u16 min_escd_write_size;
+       __u16 escd_size;
+       __u32 nv_storage_base;
+};
+struct pnp_bios_node {
+       __u16 size;
+       __u8 handle;
+       __u32 eisa_id;
+       __u8 type_code[3];
+       __u16 flags;
+       __u8 data[0];
+};
+#pragma pack()
+
+/* non-exported */
+extern struct pnp_dev_node_info node_info;
+
+extern int pnp_bios_dev_node_info(struct pnp_dev_node_info *data);
+extern int pnp_bios_get_dev_node(u8 *nodenum, char config,
+                                struct pnp_bios_node *data);
+extern int pnp_bios_set_dev_node(u8 nodenum, char config,
+                                struct pnp_bios_node *data);
+extern int pnp_bios_get_stat_res(char *info);
+extern int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data);
+extern int pnp_bios_escd_info(struct escd_info_struc *data);
+extern int pnp_bios_read_escd(char *data, u32 nvram_base);
+extern int pnp_bios_dock_station_info(struct pnp_docking_station_info *data);
+
 #pragma pack(1)
 union pnp_bios_install_struct {
        struct {
@@ -28,8 +164,8 @@ extern int pnp_bios_present(void);
 extern int  pnpbios_dont_use_current_config;
 
 extern int pnpbios_parse_data_stream(struct pnp_dev *dev, struct pnp_bios_node * node);
-extern int pnpbios_read_resources_from_node(struct pnp_resource_table *res, struct pnp_bios_node * node);
-extern int pnpbios_write_resources_to_node(struct pnp_resource_table *res, struct pnp_bios_node * node);
+extern int pnpbios_read_resources_from_node(struct pnp_dev *dev, struct pnp_bios_node *node);
+extern int pnpbios_write_resources_to_node(struct pnp_dev *dev, struct pnp_bios_node *node);
 extern void pnpid32_to_pnpid(u32 id, char *str);
 
 extern void pnpbios_print_status(const char * module, u16 status);
index bb19bc957bad9274457930b8b75b7753e62a7ac4..b35d921bac6e47a0bc79b5d99e3dbbe14a31e6df 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/slab.h>
 #include <linux/types.h>
 #include <linux/proc_fs.h>
-#include <linux/pnpbios.h>
+#include <linux/pnp.h>
 #include <linux/init.h>
 
 #include <asm/uaccess.h>
@@ -256,7 +256,7 @@ int pnpbios_interface_attach_device(struct pnp_bios_node *node)
  */
 int __init pnpbios_proc_init(void)
 {
-       proc_pnp = proc_mkdir("pnp", proc_bus);
+       proc_pnp = proc_mkdir("bus/pnp", NULL);
        if (!proc_pnp)
                return -EIO;
        proc_pnp_boot = proc_mkdir("boot", proc_pnp);
@@ -294,5 +294,5 @@ void __exit pnpbios_proc_exit(void)
        remove_proc_entry("configuration_info", proc_pnp);
        remove_proc_entry("devices", proc_pnp);
        remove_proc_entry("boot", proc_pnp);
-       remove_proc_entry("pnp", proc_bus);
+       remove_proc_entry("bus/pnp", NULL);
 }
index caade3531416f6a7392595f7b5fd432fa2a05660..5ff9a4c0447ec8052e0de9a433255051826e1298 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <linux/ctype.h>
 #include <linux/pnp.h>
-#include <linux/pnpbios.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 
@@ -16,6 +15,7 @@ inline void pcibios_penalize_isa_irq(int irq, int active)
 }
 #endif                         /* CONFIG_PCI */
 
+#include "../base.h"
 #include "pnpbios.h"
 
 /* standard resource tags */
@@ -53,97 +53,43 @@ inline void pcibios_penalize_isa_irq(int irq, int active)
  * Allocated Resources
  */
 
-static void pnpbios_parse_allocated_irqresource(struct pnp_resource_table *res,
-                                               int irq)
+static void pnpbios_parse_allocated_ioresource(struct pnp_dev *dev,
+                                              int start, int len)
 {
-       int i = 0;
-
-       while (!(res->irq_resource[i].flags & IORESOURCE_UNSET)
-              && i < PNP_MAX_IRQ)
-               i++;
-       if (i < PNP_MAX_IRQ) {
-               res->irq_resource[i].flags = IORESOURCE_IRQ;    // Also clears _UNSET flag
-               if (irq == -1) {
-                       res->irq_resource[i].flags |= IORESOURCE_DISABLED;
-                       return;
-               }
-               res->irq_resource[i].start =
-                   res->irq_resource[i].end = (unsigned long)irq;
-               pcibios_penalize_isa_irq(irq, 1);
-       }
-}
+       int flags = 0;
+       int end = start + len - 1;
 
-static void pnpbios_parse_allocated_dmaresource(struct pnp_resource_table *res,
-                                               int dma)
-{
-       int i = 0;
-
-       while (i < PNP_MAX_DMA &&
-              !(res->dma_resource[i].flags & IORESOURCE_UNSET))
-               i++;
-       if (i < PNP_MAX_DMA) {
-               res->dma_resource[i].flags = IORESOURCE_DMA;    // Also clears _UNSET flag
-               if (dma == -1) {
-                       res->dma_resource[i].flags |= IORESOURCE_DISABLED;
-                       return;
-               }
-               res->dma_resource[i].start =
-                   res->dma_resource[i].end = (unsigned long)dma;
-       }
-}
+       if (len <= 0 || end >= 0x10003)
+               flags |= IORESOURCE_DISABLED;
 
-static void pnpbios_parse_allocated_ioresource(struct pnp_resource_table *res,
-                                              int io, int len)
-{
-       int i = 0;
-
-       while (!(res->port_resource[i].flags & IORESOURCE_UNSET)
-              && i < PNP_MAX_PORT)
-               i++;
-       if (i < PNP_MAX_PORT) {
-               res->port_resource[i].flags = IORESOURCE_IO;    // Also clears _UNSET flag
-               if (len <= 0 || (io + len - 1) >= 0x10003) {
-                       res->port_resource[i].flags |= IORESOURCE_DISABLED;
-                       return;
-               }
-               res->port_resource[i].start = (unsigned long)io;
-               res->port_resource[i].end = (unsigned long)(io + len - 1);
-       }
+       pnp_add_io_resource(dev, start, end, flags);
 }
 
-static void pnpbios_parse_allocated_memresource(struct pnp_resource_table *res,
-                                               int mem, int len)
+static void pnpbios_parse_allocated_memresource(struct pnp_dev *dev,
+                                               int start, int len)
 {
-       int i = 0;
-
-       while (!(res->mem_resource[i].flags & IORESOURCE_UNSET)
-              && i < PNP_MAX_MEM)
-               i++;
-       if (i < PNP_MAX_MEM) {
-               res->mem_resource[i].flags = IORESOURCE_MEM;    // Also clears _UNSET flag
-               if (len <= 0) {
-                       res->mem_resource[i].flags |= IORESOURCE_DISABLED;
-                       return;
-               }
-               res->mem_resource[i].start = (unsigned long)mem;
-               res->mem_resource[i].end = (unsigned long)(mem + len - 1);
-       }
+       int flags = 0;
+       int end = start + len - 1;
+
+       if (len <= 0)
+               flags |= IORESOURCE_DISABLED;
+
+       pnp_add_mem_resource(dev, start, end, flags);
 }
 
-static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
-                                                           unsigned char *end,
-                                                           struct
-                                                           pnp_resource_table
-                                                           *res)
+static unsigned char *pnpbios_parse_allocated_resource_data(struct pnp_dev *dev,
+                                                           unsigned char *p,
+                                                           unsigned char *end)
 {
        unsigned int len, tag;
-       int io, size, mask, i;
+       int io, size, mask, i, flags;
 
        if (!p)
                return NULL;
 
-       /* Blank the resource table values */
-       pnp_init_resource_table(res);
+       dev_dbg(&dev->dev, "parse allocated resources\n");
+
+       pnp_init_resources(dev);
 
        while ((char *)p < (char *)end) {
 
@@ -163,7 +109,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
                                goto len_err;
                        io = *(short *)&p[4];
                        size = *(short *)&p[10];
-                       pnpbios_parse_allocated_memresource(res, io, size);
+                       pnpbios_parse_allocated_memresource(dev, io, size);
                        break;
 
                case LARGE_TAG_ANSISTR:
@@ -179,7 +125,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
                                goto len_err;
                        io = *(int *)&p[4];
                        size = *(int *)&p[16];
-                       pnpbios_parse_allocated_memresource(res, io, size);
+                       pnpbios_parse_allocated_memresource(dev, io, size);
                        break;
 
                case LARGE_TAG_FIXEDMEM32:
@@ -187,29 +133,37 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
                                goto len_err;
                        io = *(int *)&p[4];
                        size = *(int *)&p[8];
-                       pnpbios_parse_allocated_memresource(res, io, size);
+                       pnpbios_parse_allocated_memresource(dev, io, size);
                        break;
 
                case SMALL_TAG_IRQ:
                        if (len < 2 || len > 3)
                                goto len_err;
+                       flags = 0;
                        io = -1;
                        mask = p[1] + p[2] * 256;
                        for (i = 0; i < 16; i++, mask = mask >> 1)
                                if (mask & 0x01)
                                        io = i;
-                       pnpbios_parse_allocated_irqresource(res, io);
+                       if (io != -1)
+                               pcibios_penalize_isa_irq(io, 1);
+                       else
+                               flags = IORESOURCE_DISABLED;
+                       pnp_add_irq_resource(dev, io, flags);
                        break;
 
                case SMALL_TAG_DMA:
                        if (len != 2)
                                goto len_err;
+                       flags = 0;
                        io = -1;
                        mask = p[1];
                        for (i = 0; i < 8; i++, mask = mask >> 1)
                                if (mask & 0x01)
                                        io = i;
-                       pnpbios_parse_allocated_dmaresource(res, io);
+                       if (io == -1)
+                               flags = IORESOURCE_DISABLED;
+                       pnp_add_dma_resource(dev, io, flags);
                        break;
 
                case SMALL_TAG_PORT:
@@ -217,7 +171,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
                                goto len_err;
                        io = p[2] + p[3] * 256;
                        size = p[7];
-                       pnpbios_parse_allocated_ioresource(res, io, size);
+                       pnpbios_parse_allocated_ioresource(dev, io, size);
                        break;
 
                case SMALL_TAG_VENDOR:
@@ -229,7 +183,7 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
                                goto len_err;
                        io = p[1] + p[2] * 256;
                        size = p[3];
-                       pnpbios_parse_allocated_ioresource(res, io, size);
+                       pnpbios_parse_allocated_ioresource(dev, io, size);
                        break;
 
                case SMALL_TAG_END:
@@ -239,9 +193,8 @@ static unsigned char *pnpbios_parse_allocated_resource_data(unsigned char *p,
 
                default:        /* an unkown tag */
 len_err:
-                       printk(KERN_ERR
-                              "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
-                              tag, len);
+                       dev_err(&dev->dev, "unknown tag %#x length %d\n",
+                               tag, len);
                        break;
                }
 
@@ -252,8 +205,7 @@ len_err:
                        p += len + 1;
        }
 
-       printk(KERN_ERR
-              "PnPBIOS: Resource structure does not contain an end tag.\n");
+       dev_err(&dev->dev, "no end tag in resource structure\n");
 
        return NULL;
 }
@@ -262,7 +214,8 @@ len_err:
  * Resource Configuration Options
  */
 
-static __init void pnpbios_parse_mem_option(unsigned char *p, int size,
+static __init void pnpbios_parse_mem_option(struct pnp_dev *dev,
+                                           unsigned char *p, int size,
                                            struct pnp_option *option)
 {
        struct pnp_mem *mem;
@@ -275,10 +228,11 @@ static __init void pnpbios_parse_mem_option(unsigned char *p, int size,
        mem->align = (p[9] << 8) | p[8];
        mem->size = ((p[11] << 8) | p[10]) << 8;
        mem->flags = p[3];
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
-static __init void pnpbios_parse_mem32_option(unsigned char *p, int size,
+static __init void pnpbios_parse_mem32_option(struct pnp_dev *dev,
+                                             unsigned char *p, int size,
                                              struct pnp_option *option)
 {
        struct pnp_mem *mem;
@@ -291,10 +245,11 @@ static __init void pnpbios_parse_mem32_option(unsigned char *p, int size,
        mem->align = (p[15] << 24) | (p[14] << 16) | (p[13] << 8) | p[12];
        mem->size = (p[19] << 24) | (p[18] << 16) | (p[17] << 8) | p[16];
        mem->flags = p[3];
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
-static __init void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
+static __init void pnpbios_parse_fixed_mem32_option(struct pnp_dev *dev,
+                                                   unsigned char *p, int size,
                                                    struct pnp_option *option)
 {
        struct pnp_mem *mem;
@@ -306,11 +261,12 @@ static __init void pnpbios_parse_fixed_mem32_option(unsigned char *p, int size,
        mem->size = (p[11] << 24) | (p[10] << 16) | (p[9] << 8) | p[8];
        mem->align = 0;
        mem->flags = p[3];
-       pnp_register_mem_resource(option, mem);
+       pnp_register_mem_resource(dev, option, mem);
 }
 
-static __init void pnpbios_parse_irq_option(unsigned char *p, int size,
-                                    struct pnp_option *option)
+static __init void pnpbios_parse_irq_option(struct pnp_dev *dev,
+                                           unsigned char *p, int size,
+                                           struct pnp_option *option)
 {
        struct pnp_irq *irq;
        unsigned long bits;
@@ -324,11 +280,12 @@ static __init void pnpbios_parse_irq_option(unsigned char *p, int size,
                irq->flags = p[3];
        else
                irq->flags = IORESOURCE_IRQ_HIGHEDGE;
-       pnp_register_irq_resource(option, irq);
+       pnp_register_irq_resource(dev, option, irq);
 }
 
-static __init void pnpbios_parse_dma_option(unsigned char *p, int size,
-                                    struct pnp_option *option)
+static __init void pnpbios_parse_dma_option(struct pnp_dev *dev,
+                                           unsigned char *p, int size,
+                                           struct pnp_option *option)
 {
        struct pnp_dma *dma;
 
@@ -337,10 +294,11 @@ static __init void pnpbios_parse_dma_option(unsigned char *p, int size,
                return;
        dma->map = p[1];
        dma->flags = p[2];
-       pnp_register_dma_resource(option, dma);
+       pnp_register_dma_resource(dev, option, dma);
 }
 
-static __init void pnpbios_parse_port_option(unsigned char *p, int size,
+static __init void pnpbios_parse_port_option(struct pnp_dev *dev,
+                                            unsigned char *p, int size,
                                             struct pnp_option *option)
 {
        struct pnp_port *port;
@@ -353,10 +311,11 @@ static __init void pnpbios_parse_port_option(unsigned char *p, int size,
        port->align = p[6];
        port->size = p[7];
        port->flags = p[1] ? PNP_PORT_FLAG_16BITADDR : 0;
-       pnp_register_port_resource(option, port);
+       pnp_register_port_resource(dev, option, port);
 }
 
-static __init void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
+static __init void pnpbios_parse_fixed_port_option(struct pnp_dev *dev,
+                                                  unsigned char *p, int size,
                                                   struct pnp_option *option)
 {
        struct pnp_port *port;
@@ -368,7 +327,7 @@ static __init void pnpbios_parse_fixed_port_option(unsigned char *p, int size,
        port->size = p[3];
        port->align = 0;
        port->flags = PNP_PORT_FLAG_FIXED;
-       pnp_register_port_resource(option, port);
+       pnp_register_port_resource(dev, option, port);
 }
 
 static __init unsigned char *
@@ -382,6 +341,8 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
        if (!p)
                return NULL;
 
+       dev_dbg(&dev->dev, "parse resource options\n");
+
        option_independent = option = pnp_register_independent_option(dev);
        if (!option)
                return NULL;
@@ -402,37 +363,37 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
                case LARGE_TAG_MEM:
                        if (len != 9)
                                goto len_err;
-                       pnpbios_parse_mem_option(p, len, option);
+                       pnpbios_parse_mem_option(dev, p, len, option);
                        break;
 
                case LARGE_TAG_MEM32:
                        if (len != 17)
                                goto len_err;
-                       pnpbios_parse_mem32_option(p, len, option);
+                       pnpbios_parse_mem32_option(dev, p, len, option);
                        break;
 
                case LARGE_TAG_FIXEDMEM32:
                        if (len != 9)
                                goto len_err;
-                       pnpbios_parse_fixed_mem32_option(p, len, option);
+                       pnpbios_parse_fixed_mem32_option(dev, p, len, option);
                        break;
 
                case SMALL_TAG_IRQ:
                        if (len < 2 || len > 3)
                                goto len_err;
-                       pnpbios_parse_irq_option(p, len, option);
+                       pnpbios_parse_irq_option(dev, p, len, option);
                        break;
 
                case SMALL_TAG_DMA:
                        if (len != 2)
                                goto len_err;
-                       pnpbios_parse_dma_option(p, len, option);
+                       pnpbios_parse_dma_option(dev, p, len, option);
                        break;
 
                case SMALL_TAG_PORT:
                        if (len != 7)
                                goto len_err;
-                       pnpbios_parse_port_option(p, len, option);
+                       pnpbios_parse_port_option(dev, p, len, option);
                        break;
 
                case SMALL_TAG_VENDOR:
@@ -442,7 +403,7 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
                case SMALL_TAG_FIXEDPORT:
                        if (len != 3)
                                goto len_err;
-                       pnpbios_parse_fixed_port_option(p, len, option);
+                       pnpbios_parse_fixed_port_option(dev, p, len, option);
                        break;
 
                case SMALL_TAG_STARTDEP:
@@ -460,9 +421,10 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
                        if (len != 0)
                                goto len_err;
                        if (option_independent == option)
-                               printk(KERN_WARNING
-                                      "PnPBIOS: Missing SMALL_TAG_STARTDEP tag\n");
+                               dev_warn(&dev->dev, "missing "
+                                        "SMALL_TAG_STARTDEP tag\n");
                        option = option_independent;
+                       dev_dbg(&dev->dev, "end dependent options\n");
                        break;
 
                case SMALL_TAG_END:
@@ -470,9 +432,8 @@ pnpbios_parse_resource_option_data(unsigned char *p, unsigned char *end,
 
                default:        /* an unkown tag */
 len_err:
-                       printk(KERN_ERR
-                              "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
-                              tag, len);
+                       dev_err(&dev->dev, "unknown tag %#x length %d\n",
+                               tag, len);
                        break;
                }
 
@@ -483,8 +444,7 @@ len_err:
                        p += len + 1;
        }
 
-       printk(KERN_ERR
-              "PnPBIOS: Resource structure does not contain an end tag.\n");
+       dev_err(&dev->dev, "no end tag in resource structure\n");
 
        return NULL;
 }
@@ -493,32 +453,12 @@ len_err:
  * Compatible Device IDs
  */
 
-#define HEX(id,a) hex[((id)>>a) & 15]
-#define CHAR(id,a) (0x40 + (((id)>>a) & 31))
-
-void pnpid32_to_pnpid(u32 id, char *str)
-{
-       const char *hex = "0123456789abcdef";
-
-       id = be32_to_cpu(id);
-       str[0] = CHAR(id, 26);
-       str[1] = CHAR(id, 21);
-       str[2] = CHAR(id, 16);
-       str[3] = HEX(id, 12);
-       str[4] = HEX(id, 8);
-       str[5] = HEX(id, 4);
-       str[6] = HEX(id, 0);
-       str[7] = '\0';
-}
-
-#undef CHAR
-#undef HEX
-
 static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
                                                   unsigned char *end,
                                                   struct pnp_dev *dev)
 {
        int len, tag;
+       u32 eisa_id;
        char id[8];
        struct pnp_id *dev_id;
 
@@ -548,13 +488,11 @@ static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
                case SMALL_TAG_COMPATDEVID:     /* compatible ID */
                        if (len != 4)
                                goto len_err;
-                       dev_id = kzalloc(sizeof(struct pnp_id), GFP_KERNEL);
+                       eisa_id = p[1] | p[2] << 8 | p[3] << 16 | p[4] << 24;
+                       pnp_eisa_id_to_string(eisa_id & PNP_EISA_ID_MASK, id);
+                       dev_id = pnp_add_id(dev, id);
                        if (!dev_id)
                                return NULL;
-                       pnpid32_to_pnpid(p[1] | p[2] << 8 | p[3] << 16 | p[4] <<
-                                        24, id);
-                       memcpy(&dev_id->id, id, 7);
-                       pnp_add_id(dev_id, dev);
                        break;
 
                case SMALL_TAG_END:
@@ -564,9 +502,8 @@ static unsigned char *pnpbios_parse_compatible_ids(unsigned char *p,
 
                default:        /* an unkown tag */
 len_err:
-                       printk(KERN_ERR
-                              "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
-                              tag, len);
+                       dev_err(&dev->dev, "unknown tag %#x length %d\n",
+                               tag, len);
                        break;
                }
 
@@ -577,8 +514,7 @@ len_err:
                        p += len + 1;
        }
 
-       printk(KERN_ERR
-              "PnPBIOS: Resource structure does not contain an end tag.\n");
+       dev_err(&dev->dev, "no end tag in resource structure\n");
 
        return NULL;
 }
@@ -587,7 +523,8 @@ len_err:
  * Allocated Resource Encoding
  */
 
-static void pnpbios_encode_mem(unsigned char *p, struct resource *res)
+static void pnpbios_encode_mem(struct pnp_dev *dev, unsigned char *p,
+                              struct resource *res)
 {
        unsigned long base = res->start;
        unsigned long len = res->end - res->start + 1;
@@ -598,9 +535,13 @@ static void pnpbios_encode_mem(unsigned char *p, struct resource *res)
        p[7] = ((base >> 8) >> 8) & 0xff;
        p[10] = (len >> 8) & 0xff;
        p[11] = ((len >> 8) >> 8) & 0xff;
+
+       dev_dbg(&dev->dev, "  encode mem %#llx-%#llx\n",
+               (unsigned long long) res->start, (unsigned long long) res->end);
 }
 
-static void pnpbios_encode_mem32(unsigned char *p, struct resource *res)
+static void pnpbios_encode_mem32(struct pnp_dev *dev, unsigned char *p,
+                                struct resource *res)
 {
        unsigned long base = res->start;
        unsigned long len = res->end - res->start + 1;
@@ -617,9 +558,13 @@ static void pnpbios_encode_mem32(unsigned char *p, struct resource *res)
        p[17] = (len >> 8) & 0xff;
        p[18] = (len >> 16) & 0xff;
        p[19] = (len >> 24) & 0xff;
+
+       dev_dbg(&dev->dev, "  encode mem32 %#llx-%#llx\n",
+               (unsigned long long) res->start, (unsigned long long) res->end);
 }
 
-static void pnpbios_encode_fixed_mem32(unsigned char *p, struct resource *res)
+static void pnpbios_encode_fixed_mem32(struct pnp_dev *dev, unsigned char *p,
+                                      struct resource *res)
 {
        unsigned long base = res->start;
        unsigned long len = res->end - res->start + 1;
@@ -632,26 +577,38 @@ static void pnpbios_encode_fixed_mem32(unsigned char *p, struct resource *res)
        p[9] = (len >> 8) & 0xff;
        p[10] = (len >> 16) & 0xff;
        p[11] = (len >> 24) & 0xff;
+
+       dev_dbg(&dev->dev, "  encode fixed_mem32 %#llx-%#llx\n",
+               (unsigned long long) res->start, (unsigned long long) res->end);
 }
 
-static void pnpbios_encode_irq(unsigned char *p, struct resource *res)
+static void pnpbios_encode_irq(struct pnp_dev *dev, unsigned char *p,
+                              struct resource *res)
 {
        unsigned long map = 0;
 
        map = 1 << res->start;
        p[1] = map & 0xff;
        p[2] = (map >> 8) & 0xff;
+
+       dev_dbg(&dev->dev, "  encode irq %llu\n",
+               (unsigned long long)res->start);
 }
 
-static void pnpbios_encode_dma(unsigned char *p, struct resource *res)
+static void pnpbios_encode_dma(struct pnp_dev *dev, unsigned char *p,
+                              struct resource *res)
 {
        unsigned long map = 0;
 
        map = 1 << res->start;
        p[1] = map & 0xff;
+
+       dev_dbg(&dev->dev, "  encode dma %llu\n",
+               (unsigned long long)res->start);
 }
 
-static void pnpbios_encode_port(unsigned char *p, struct resource *res)
+static void pnpbios_encode_port(struct pnp_dev *dev, unsigned char *p,
+                               struct resource *res)
 {
        unsigned long base = res->start;
        unsigned long len = res->end - res->start + 1;
@@ -661,9 +618,13 @@ static void pnpbios_encode_port(unsigned char *p, struct resource *res)
        p[4] = base & 0xff;
        p[5] = (base >> 8) & 0xff;
        p[7] = len & 0xff;
+
+       dev_dbg(&dev->dev, "  encode io %#llx-%#llx\n",
+               (unsigned long long) res->start, (unsigned long long) res->end);
 }
 
-static void pnpbios_encode_fixed_port(unsigned char *p, struct resource *res)
+static void pnpbios_encode_fixed_port(struct pnp_dev *dev, unsigned char *p,
+                                     struct resource *res)
 {
        unsigned long base = res->start;
        unsigned long len = res->end - res->start + 1;
@@ -671,13 +632,15 @@ static void pnpbios_encode_fixed_port(unsigned char *p, struct resource *res)
        p[1] = base & 0xff;
        p[2] = (base >> 8) & 0xff;
        p[3] = len & 0xff;
+
+       dev_dbg(&dev->dev, "  encode fixed_io %#llx-%#llx\n",
+               (unsigned long long) res->start, (unsigned long long) res->end);
 }
 
-static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
-                                                            unsigned char *end,
-                                                            struct
-                                                            pnp_resource_table
-                                                            *res)
+static unsigned char *pnpbios_encode_allocated_resource_data(struct pnp_dev
+                                                               *dev,
+                                                            unsigned char *p,
+                                                            unsigned char *end)
 {
        unsigned int len, tag;
        int port = 0, irq = 0, dma = 0, mem = 0;
@@ -701,42 +664,48 @@ static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
                case LARGE_TAG_MEM:
                        if (len != 9)
                                goto len_err;
-                       pnpbios_encode_mem(p, &res->mem_resource[mem]);
+                       pnpbios_encode_mem(dev, p,
+                               pnp_get_resource(dev, IORESOURCE_MEM, mem));
                        mem++;
                        break;
 
                case LARGE_TAG_MEM32:
                        if (len != 17)
                                goto len_err;
-                       pnpbios_encode_mem32(p, &res->mem_resource[mem]);
+                       pnpbios_encode_mem32(dev, p,
+                               pnp_get_resource(dev, IORESOURCE_MEM, mem));
                        mem++;
                        break;
 
                case LARGE_TAG_FIXEDMEM32:
                        if (len != 9)
                                goto len_err;
-                       pnpbios_encode_fixed_mem32(p, &res->mem_resource[mem]);
+                       pnpbios_encode_fixed_mem32(dev, p,
+                               pnp_get_resource(dev, IORESOURCE_MEM, mem));
                        mem++;
                        break;
 
                case SMALL_TAG_IRQ:
                        if (len < 2 || len > 3)
                                goto len_err;
-                       pnpbios_encode_irq(p, &res->irq_resource[irq]);
+                       pnpbios_encode_irq(dev, p,
+                               pnp_get_resource(dev, IORESOURCE_IRQ, irq));
                        irq++;
                        break;
 
                case SMALL_TAG_DMA:
                        if (len != 2)
                                goto len_err;
-                       pnpbios_encode_dma(p, &res->dma_resource[dma]);
+                       pnpbios_encode_dma(dev, p,
+                               pnp_get_resource(dev, IORESOURCE_DMA, dma));
                        dma++;
                        break;
 
                case SMALL_TAG_PORT:
                        if (len != 7)
                                goto len_err;
-                       pnpbios_encode_port(p, &res->port_resource[port]);
+                       pnpbios_encode_port(dev, p,
+                               pnp_get_resource(dev, IORESOURCE_IO, port));
                        port++;
                        break;
 
@@ -747,7 +716,8 @@ static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
                case SMALL_TAG_FIXEDPORT:
                        if (len != 3)
                                goto len_err;
-                       pnpbios_encode_fixed_port(p, &res->port_resource[port]);
+                       pnpbios_encode_fixed_port(dev, p,
+                               pnp_get_resource(dev, IORESOURCE_IO, port));
                        port++;
                        break;
 
@@ -758,9 +728,8 @@ static unsigned char *pnpbios_encode_allocated_resource_data(unsigned char *p,
 
                default:        /* an unkown tag */
 len_err:
-                       printk(KERN_ERR
-                              "PnPBIOS: Unknown tag '0x%x', length '%d'.\n",
-                              tag, len);
+                       dev_err(&dev->dev, "unknown tag %#x length %d\n",
+                               tag, len);
                        break;
                }
 
@@ -771,8 +740,7 @@ len_err:
                        p += len + 1;
        }
 
-       printk(KERN_ERR
-              "PnPBIOS: Resource structure does not contain an end tag.\n");
+       dev_err(&dev->dev, "no end tag in resource structure\n");
 
        return NULL;
 }
@@ -787,7 +755,7 @@ int __init pnpbios_parse_data_stream(struct pnp_dev *dev,
        unsigned char *p = (char *)node->data;
        unsigned char *end = (char *)(node->data + node->size);
 
-       p = pnpbios_parse_allocated_resource_data(p, end, &dev->res);
+       p = pnpbios_parse_allocated_resource_data(dev, p, end);
        if (!p)
                return -EIO;
        p = pnpbios_parse_resource_option_data(p, end, dev);
@@ -799,25 +767,25 @@ int __init pnpbios_parse_data_stream(struct pnp_dev *dev,
        return 0;
 }
 
-int pnpbios_read_resources_from_node(struct pnp_resource_table *res,
+int pnpbios_read_resources_from_node(struct pnp_dev *dev,
                                     struct pnp_bios_node *node)
 {
        unsigned char *p = (char *)node->data;
        unsigned char *end = (char *)(node->data + node->size);
 
-       p = pnpbios_parse_allocated_resource_data(p, end, res);
+       p = pnpbios_parse_allocated_resource_data(dev, p, end);
        if (!p)
                return -EIO;
        return 0;
 }
 
-int pnpbios_write_resources_to_node(struct pnp_resource_table *res,
+int pnpbios_write_resources_to_node(struct pnp_dev *dev,
                                    struct pnp_bios_node *node)
 {
        unsigned char *p = (char *)node->data;
        unsigned char *end = (char *)(node->data + node->size);
 
-       p = pnpbios_encode_allocated_resource_data(p, end, res);
+       p = pnpbios_encode_allocated_resource_data(dev, p, end);
        if (!p)
                return -EIO;
        return 0;
index 37993206ae5dd149d98672345ef5d16c6c21743c..d049a2279feac460d0dda5e4a21f2471c1b059b0 100644 (file)
@@ -49,8 +49,11 @@ static void quirk_awe32_resources(struct pnp_dev *dev)
                port2->max += 0x400;
                port3->min += 0x800;
                port3->max += 0x800;
+               dev_info(&dev->dev,
+                       "AWE32 quirk - added ioports 0x%lx and 0x%lx\n",
+                       (unsigned long)port2->min,
+                       (unsigned long)port3->min);
        }
-       printk(KERN_INFO "pnp: AWE32 quirk - adding two ports\n");
 }
 
 static void quirk_cmi8330_resources(struct pnp_dev *dev)
@@ -73,7 +76,8 @@ static void quirk_cmi8330_resources(struct pnp_dev *dev)
                            IORESOURCE_DMA_8BIT)
                                dma->map = 0x000A;
        }
-       printk(KERN_INFO "pnp: CMI8330 quirk - fixing interrupts and dma\n");
+       dev_info(&dev->dev, "CMI8330 quirk - forced possible IRQs to 5, 7, 10 "
+               "and DMA channels to 1, 3\n");
 }
 
 static void quirk_sb16audio_resources(struct pnp_dev *dev)
@@ -104,8 +108,7 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
                changed = 1;
        }
        if (changed)
-               printk(KERN_INFO
-                      "pnp: SB audio device quirk - increasing port range\n");
+               dev_info(&dev->dev, "SB audio device quirk - increased port range\n");
 }
 
 
@@ -114,6 +117,7 @@ static void quirk_sb16audio_resources(struct pnp_dev *dev)
 static void quirk_system_pci_resources(struct pnp_dev *dev)
 {
        struct pci_dev *pdev = NULL;
+       struct resource *res;
        resource_size_t pnp_start, pnp_end, pci_start, pci_end;
        int i, j;
 
@@ -134,13 +138,15 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
 
                        pci_start = pci_resource_start(pdev, i);
                        pci_end = pci_resource_end(pdev, i);
-                       for (j = 0; j < PNP_MAX_MEM; j++) {
-                               if (!pnp_mem_valid(dev, j) ||
-                                   pnp_mem_len(dev, j) == 0)
+                       for (j = 0;
+                            (res = pnp_get_resource(dev, IORESOURCE_MEM, j));
+                            j++) {
+                               if (res->flags & IORESOURCE_UNSET ||
+                                   (res->start == 0 && res->end == 0))
                                        continue;
 
-                               pnp_start = pnp_mem_start(dev, j);
-                               pnp_end = pnp_mem_end(dev, j);
+                               pnp_start = res->start;
+                               pnp_end = res->end;
 
                                /*
                                 * If the PNP region doesn't overlap the PCI
@@ -173,7 +179,7 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
                                        pci_name(pdev), i,
                                        (unsigned long long) pci_start,
                                        (unsigned long long) pci_end);
-                               pnp_mem_flags(dev, j) = 0;
+                               res->flags = 0;
                        }
                }
        }
@@ -214,8 +220,8 @@ void pnp_fixup_device(struct pnp_dev *dev)
                        quirk = pnp_fixups[i].quirk_function;
 
 #ifdef DEBUG
-                       dev_dbg(&dev->dev, "calling quirk 0x%p", quirk);
-                       print_fn_descriptor_symbol("%s()\n",
+                       dev_dbg(&dev->dev, "calling ");
+                       print_fn_descriptor_symbol("%s()\n",
                                (unsigned long) *quirk);
 #endif
                        (*quirk)(dev);
index e50ebcffb96240937e3f43f4474857f6cdee8030..2041620d5682828a1fd872d9e769e92f76dbedb0 100644 (file)
@@ -53,6 +53,8 @@ struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev)
        if (dev->independent)
                dev_err(&dev->dev, "independent resource already registered\n");
        dev->independent = option;
+
+       dev_dbg(&dev->dev, "new independent option\n");
        return option;
 }
 
@@ -70,12 +72,18 @@ struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
                parent->next = option;
        } else
                dev->dependent = option;
+
+       dev_dbg(&dev->dev, "new dependent option (priority %#x)\n", priority);
        return option;
 }
 
-int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data)
+int pnp_register_irq_resource(struct pnp_dev *dev, struct pnp_option *option,
+                             struct pnp_irq *data)
 {
        struct pnp_irq *ptr;
+#ifdef DEBUG
+       char buf[PNP_IRQ_NR];   /* hex-encoded, so this is overkill but safe */
+#endif
 
        ptr = option->irq;
        while (ptr && ptr->next)
@@ -94,10 +102,17 @@ int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data)
                                pcibios_penalize_isa_irq(i, 0);
        }
 #endif
+
+#ifdef DEBUG
+       bitmap_scnprintf(buf, sizeof(buf), data->map, PNP_IRQ_NR);
+       dev_dbg(&dev->dev, "  irq bitmask %s flags %#x\n", buf,
+               data->flags);
+#endif
        return 0;
 }
 
-int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data)
+int pnp_register_dma_resource(struct pnp_dev *dev, struct pnp_option *option,
+                             struct pnp_dma *data)
 {
        struct pnp_dma *ptr;
 
@@ -109,10 +124,13 @@ int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data)
        else
                option->dma = data;
 
+       dev_dbg(&dev->dev, "  dma bitmask %#x flags %#x\n", data->map,
+               data->flags);
        return 0;
 }
 
-int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data)
+int pnp_register_port_resource(struct pnp_dev *dev, struct pnp_option *option,
+                              struct pnp_port *data)
 {
        struct pnp_port *ptr;
 
@@ -124,10 +142,14 @@ int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data)
        else
                option->port = data;
 
+       dev_dbg(&dev->dev, "  io  "
+               "min %#x max %#x align %d size %d flags %#x\n",
+               data->min, data->max, data->align, data->size, data->flags);
        return 0;
 }
 
-int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data)
+int pnp_register_mem_resource(struct pnp_dev *dev, struct pnp_option *option,
+                             struct pnp_mem *data)
 {
        struct pnp_mem *ptr;
 
@@ -138,6 +160,10 @@ int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data)
                ptr->next = data;
        else
                option->mem = data;
+
+       dev_dbg(&dev->dev, "  mem "
+               "min %#x max %#x align %d size %d flags %#x\n",
+               data->min, data->max, data->align, data->size, data->flags);
        return 0;
 }
 
@@ -213,17 +239,18 @@ void pnp_free_option(struct pnp_option *option)
 #define cannot_compare(flags) \
 ((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
 
-int pnp_check_port(struct pnp_dev *dev, int idx)
+int pnp_check_port(struct pnp_dev *dev, struct resource *res)
 {
-       int tmp;
+       int i;
        struct pnp_dev *tdev;
+       struct resource *tres;
        resource_size_t *port, *end, *tport, *tend;
 
-       port = &dev->res.port_resource[idx].start;
-       end = &dev->res.port_resource[idx].end;
+       port = &res->start;
+       end = &res->end;
 
        /* if the resource doesn't exist, don't complain about it */
-       if (cannot_compare(dev->res.port_resource[idx].flags))
+       if (cannot_compare(res->flags))
                return 1;
 
        /* check if the resource is already in use, skip if the
@@ -234,18 +261,18 @@ int pnp_check_port(struct pnp_dev *dev, int idx)
        }
 
        /* check if the resource is reserved */
-       for (tmp = 0; tmp < 8; tmp++) {
-               int rport = pnp_reserve_io[tmp << 1];
-               int rend = pnp_reserve_io[(tmp << 1) + 1] + rport - 1;
+       for (i = 0; i < 8; i++) {
+               int rport = pnp_reserve_io[i << 1];
+               int rend = pnp_reserve_io[(i << 1) + 1] + rport - 1;
                if (ranged_conflict(port, end, &rport, &rend))
                        return 0;
        }
 
        /* check for internal conflicts */
-       for (tmp = 0; tmp < PNP_MAX_PORT && tmp != idx; tmp++) {
-               if (dev->res.port_resource[tmp].flags & IORESOURCE_IO) {
-                       tport = &dev->res.port_resource[tmp].start;
-                       tend = &dev->res.port_resource[tmp].end;
+       for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
+               if (tres != res && tres->flags & IORESOURCE_IO) {
+                       tport = &tres->start;
+                       tend = &tres->end;
                        if (ranged_conflict(port, end, tport, tend))
                                return 0;
                }
@@ -255,13 +282,14 @@ int pnp_check_port(struct pnp_dev *dev, int idx)
        pnp_for_each_dev(tdev) {
                if (tdev == dev)
                        continue;
-               for (tmp = 0; tmp < PNP_MAX_PORT; tmp++) {
-                       if (tdev->res.port_resource[tmp].flags & IORESOURCE_IO) {
-                               if (cannot_compare
-                                   (tdev->res.port_resource[tmp].flags))
+               for (i = 0;
+                    (tres = pnp_get_resource(tdev, IORESOURCE_IO, i));
+                    i++) {
+                       if (tres->flags & IORESOURCE_IO) {
+                               if (cannot_compare(tres->flags))
                                        continue;
-                               tport = &tdev->res.port_resource[tmp].start;
-                               tend = &tdev->res.port_resource[tmp].end;
+                               tport = &tres->start;
+                               tend = &tres->end;
                                if (ranged_conflict(port, end, tport, tend))
                                        return 0;
                        }
@@ -271,17 +299,18 @@ int pnp_check_port(struct pnp_dev *dev, int idx)
        return 1;
 }
 
-int pnp_check_mem(struct pnp_dev *dev, int idx)
+int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
 {
-       int tmp;
+       int i;
        struct pnp_dev *tdev;
+       struct resource *tres;
        resource_size_t *addr, *end, *taddr, *tend;
 
-       addr = &dev->res.mem_resource[idx].start;
-       end = &dev->res.mem_resource[idx].end;
+       addr = &res->start;
+       end = &res->end;
 
        /* if the resource doesn't exist, don't complain about it */
-       if (cannot_compare(dev->res.mem_resource[idx].flags))
+       if (cannot_compare(res->flags))
                return 1;
 
        /* check if the resource is already in use, skip if the
@@ -292,18 +321,18 @@ int pnp_check_mem(struct pnp_dev *dev, int idx)
        }
 
        /* check if the resource is reserved */
-       for (tmp = 0; tmp < 8; tmp++) {
-               int raddr = pnp_reserve_mem[tmp << 1];
-               int rend = pnp_reserve_mem[(tmp << 1) + 1] + raddr - 1;
+       for (i = 0; i < 8; i++) {
+               int raddr = pnp_reserve_mem[i << 1];
+               int rend = pnp_reserve_mem[(i << 1) + 1] + raddr - 1;
                if (ranged_conflict(addr, end, &raddr, &rend))
                        return 0;
        }
 
        /* check for internal conflicts */
-       for (tmp = 0; tmp < PNP_MAX_MEM && tmp != idx; tmp++) {
-               if (dev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
-                       taddr = &dev->res.mem_resource[tmp].start;
-                       tend = &dev->res.mem_resource[tmp].end;
+       for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
+               if (tres != res && tres->flags & IORESOURCE_MEM) {
+                       taddr = &tres->start;
+                       tend = &tres->end;
                        if (ranged_conflict(addr, end, taddr, tend))
                                return 0;
                }
@@ -313,13 +342,14 @@ int pnp_check_mem(struct pnp_dev *dev, int idx)
        pnp_for_each_dev(tdev) {
                if (tdev == dev)
                        continue;
-               for (tmp = 0; tmp < PNP_MAX_MEM; tmp++) {
-                       if (tdev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
-                               if (cannot_compare
-                                   (tdev->res.mem_resource[tmp].flags))
+               for (i = 0;
+                    (tres = pnp_get_resource(tdev, IORESOURCE_MEM, i));
+                    i++) {
+                       if (tres->flags & IORESOURCE_MEM) {
+                               if (cannot_compare(tres->flags))
                                        continue;
-                               taddr = &tdev->res.mem_resource[tmp].start;
-                               tend = &tdev->res.mem_resource[tmp].end;
+                               taddr = &tres->start;
+                               tend = &tres->end;
                                if (ranged_conflict(addr, end, taddr, tend))
                                        return 0;
                        }
@@ -334,14 +364,17 @@ static irqreturn_t pnp_test_handler(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-int pnp_check_irq(struct pnp_dev *dev, int idx)
+int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
 {
-       int tmp;
+       int i;
        struct pnp_dev *tdev;
-       resource_size_t *irq = &dev->res.irq_resource[idx].start;
+       struct resource *tres;
+       resource_size_t *irq;
+
+       irq = &res->start;
 
        /* if the resource doesn't exist, don't complain about it */
-       if (cannot_compare(dev->res.irq_resource[idx].flags))
+       if (cannot_compare(res->flags))
                return 1;
 
        /* check if the resource is valid */
@@ -349,15 +382,15 @@ int pnp_check_irq(struct pnp_dev *dev, int idx)
                return 0;
 
        /* check if the resource is reserved */
-       for (tmp = 0; tmp < 16; tmp++) {
-               if (pnp_reserve_irq[tmp] == *irq)
+       for (i = 0; i < 16; i++) {
+               if (pnp_reserve_irq[i] == *irq)
                        return 0;
        }
 
        /* check for internal conflicts */
-       for (tmp = 0; tmp < PNP_MAX_IRQ && tmp != idx; tmp++) {
-               if (dev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) {
-                       if (dev->res.irq_resource[tmp].start == *irq)
+       for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_IRQ, i)); i++) {
+               if (tres != res && tres->flags & IORESOURCE_IRQ) {
+                       if (tres->start == *irq)
                                return 0;
                }
        }
@@ -388,12 +421,13 @@ int pnp_check_irq(struct pnp_dev *dev, int idx)
        pnp_for_each_dev(tdev) {
                if (tdev == dev)
                        continue;
-               for (tmp = 0; tmp < PNP_MAX_IRQ; tmp++) {
-                       if (tdev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) {
-                               if (cannot_compare
-                                   (tdev->res.irq_resource[tmp].flags))
+               for (i = 0;
+                    (tres = pnp_get_resource(tdev, IORESOURCE_IRQ, i));
+                    i++) {
+                       if (tres->flags & IORESOURCE_IRQ) {
+                               if (cannot_compare(tres->flags))
                                        continue;
-                               if ((tdev->res.irq_resource[tmp].start == *irq))
+                               if (tres->start == *irq)
                                        return 0;
                        }
                }
@@ -402,15 +436,18 @@ int pnp_check_irq(struct pnp_dev *dev, int idx)
        return 1;
 }
 
-int pnp_check_dma(struct pnp_dev *dev, int idx)
+int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
 {
 #ifndef CONFIG_IA64
-       int tmp;
+       int i;
        struct pnp_dev *tdev;
-       resource_size_t *dma = &dev->res.dma_resource[idx].start;
+       struct resource *tres;
+       resource_size_t *dma;
+
+       dma = &res->start;
 
        /* if the resource doesn't exist, don't complain about it */
-       if (cannot_compare(dev->res.dma_resource[idx].flags))
+       if (cannot_compare(res->flags))
                return 1;
 
        /* check if the resource is valid */
@@ -418,15 +455,15 @@ int pnp_check_dma(struct pnp_dev *dev, int idx)
                return 0;
 
        /* check if the resource is reserved */
-       for (tmp = 0; tmp < 8; tmp++) {
-               if (pnp_reserve_dma[tmp] == *dma)
+       for (i = 0; i < 8; i++) {
+               if (pnp_reserve_dma[i] == *dma)
                        return 0;
        }
 
        /* check for internal conflicts */
-       for (tmp = 0; tmp < PNP_MAX_DMA && tmp != idx; tmp++) {
-               if (dev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
-                       if (dev->res.dma_resource[tmp].start == *dma)
+       for (i = 0; (tres = pnp_get_resource(dev, IORESOURCE_DMA, i)); i++) {
+               if (tres != res && tres->flags & IORESOURCE_DMA) {
+                       if (tres->start == *dma)
                                return 0;
                }
        }
@@ -443,12 +480,13 @@ int pnp_check_dma(struct pnp_dev *dev, int idx)
        pnp_for_each_dev(tdev) {
                if (tdev == dev)
                        continue;
-               for (tmp = 0; tmp < PNP_MAX_DMA; tmp++) {
-                       if (tdev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
-                               if (cannot_compare
-                                   (tdev->res.dma_resource[tmp].flags))
+               for (i = 0;
+                    (tres = pnp_get_resource(tdev, IORESOURCE_DMA, i));
+                    i++) {
+                       if (tres->flags & IORESOURCE_DMA) {
+                               if (cannot_compare(tres->flags))
                                        continue;
-                               if ((tdev->res.dma_resource[tmp].start == *dma))
+                               if (tres->start == *dma)
                                        return 0;
                        }
                }
@@ -461,6 +499,193 @@ int pnp_check_dma(struct pnp_dev *dev, int idx)
 #endif
 }
 
+struct pnp_resource *pnp_get_pnp_resource(struct pnp_dev *dev,
+                                         unsigned int type, unsigned int num)
+{
+       struct pnp_resource_table *res = dev->res;
+
+       switch (type) {
+       case IORESOURCE_IO:
+               if (num >= PNP_MAX_PORT)
+                       return NULL;
+               return &res->port[num];
+       case IORESOURCE_MEM:
+               if (num >= PNP_MAX_MEM)
+                       return NULL;
+               return &res->mem[num];
+       case IORESOURCE_IRQ:
+               if (num >= PNP_MAX_IRQ)
+                       return NULL;
+               return &res->irq[num];
+       case IORESOURCE_DMA:
+               if (num >= PNP_MAX_DMA)
+                       return NULL;
+               return &res->dma[num];
+       }
+       return NULL;
+}
+
+struct resource *pnp_get_resource(struct pnp_dev *dev,
+                                 unsigned int type, unsigned int num)
+{
+       struct pnp_resource *pnp_res;
+
+       pnp_res = pnp_get_pnp_resource(dev, type, num);
+       if (pnp_res)
+               return &pnp_res->res;
+
+       return NULL;
+}
+EXPORT_SYMBOL(pnp_get_resource);
+
+static struct pnp_resource *pnp_new_resource(struct pnp_dev *dev, int type)
+{
+       struct pnp_resource *pnp_res;
+       int i;
+
+       switch (type) {
+       case IORESOURCE_IO:
+               for (i = 0; i < PNP_MAX_PORT; i++) {
+                       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, i);
+                       if (pnp_res && !pnp_resource_valid(&pnp_res->res))
+                               return pnp_res;
+               }
+               break;
+       case IORESOURCE_MEM:
+               for (i = 0; i < PNP_MAX_MEM; i++) {
+                       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, i);
+                       if (pnp_res && !pnp_resource_valid(&pnp_res->res))
+                               return pnp_res;
+               }
+               break;
+       case IORESOURCE_IRQ:
+               for (i = 0; i < PNP_MAX_IRQ; i++) {
+                       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, i);
+                       if (pnp_res && !pnp_resource_valid(&pnp_res->res))
+                               return pnp_res;
+               }
+               break;
+       case IORESOURCE_DMA:
+               for (i = 0; i < PNP_MAX_DMA; i++) {
+                       pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, i);
+                       if (pnp_res && !pnp_resource_valid(&pnp_res->res))
+                               return pnp_res;
+               }
+               break;
+       }
+       return NULL;
+}
+
+struct pnp_resource *pnp_add_irq_resource(struct pnp_dev *dev, int irq,
+                                         int flags)
+{
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+       static unsigned char warned;
+
+       pnp_res = pnp_new_resource(dev, IORESOURCE_IRQ);
+       if (!pnp_res) {
+               if (!warned) {
+                       dev_err(&dev->dev, "can't add resource for IRQ %d\n",
+                               irq);
+                       warned = 1;
+               }
+               return NULL;
+       }
+
+       res = &pnp_res->res;
+       res->flags = IORESOURCE_IRQ | flags;
+       res->start = irq;
+       res->end = irq;
+
+       dev_dbg(&dev->dev, "  add irq %d flags %#x\n", irq, flags);
+       return pnp_res;
+}
+
+struct pnp_resource *pnp_add_dma_resource(struct pnp_dev *dev, int dma,
+                                         int flags)
+{
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+       static unsigned char warned;
+
+       pnp_res = pnp_new_resource(dev, IORESOURCE_DMA);
+       if (!pnp_res) {
+               if (!warned) {
+                       dev_err(&dev->dev, "can't add resource for DMA %d\n",
+                               dma);
+                       warned = 1;
+               }
+               return NULL;
+       }
+
+       res = &pnp_res->res;
+       res->flags = IORESOURCE_DMA | flags;
+       res->start = dma;
+       res->end = dma;
+
+       dev_dbg(&dev->dev, "  add dma %d flags %#x\n", dma, flags);
+       return pnp_res;
+}
+
+struct pnp_resource *pnp_add_io_resource(struct pnp_dev *dev,
+                                        resource_size_t start,
+                                        resource_size_t end, int flags)
+{
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+       static unsigned char warned;
+
+       pnp_res = pnp_new_resource(dev, IORESOURCE_IO);
+       if (!pnp_res) {
+               if (!warned) {
+                       dev_err(&dev->dev, "can't add resource for IO "
+                               "%#llx-%#llx\n",(unsigned long long) start,
+                               (unsigned long long) end);
+                       warned = 1;
+               }
+               return NULL;
+       }
+
+       res = &pnp_res->res;
+       res->flags = IORESOURCE_IO | flags;
+       res->start = start;
+       res->end = end;
+
+       dev_dbg(&dev->dev, "  add io  %#llx-%#llx flags %#x\n",
+               (unsigned long long) start, (unsigned long long) end, flags);
+       return pnp_res;
+}
+
+struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev,
+                                         resource_size_t start,
+                                         resource_size_t end, int flags)
+{
+       struct pnp_resource *pnp_res;
+       struct resource *res;
+       static unsigned char warned;
+
+       pnp_res = pnp_new_resource(dev, IORESOURCE_MEM);
+       if (!pnp_res) {
+               if (!warned) {
+                       dev_err(&dev->dev, "can't add resource for MEM "
+                               "%#llx-%#llx\n",(unsigned long long) start,
+                               (unsigned long long) end);
+                       warned = 1;
+               }
+               return NULL;
+       }
+
+       res = &pnp_res->res;
+       res->flags = IORESOURCE_MEM | flags;
+       res->start = start;
+       res->end = end;
+
+       dev_dbg(&dev->dev, "  add mem %#llx-%#llx flags %#x\n",
+               (unsigned long long) start, (unsigned long long) end, flags);
+       return pnp_res;
+}
+
 /* format is: pnp_reserve_irq=irq1[,irq2] .... */
 static int __init pnp_setup_reserve_irq(char *str)
 {
index 13c608f5fb302864b7bae16defaf2d95365ea815..3eba85ed729cfb28634e9bdc65f91adb066080c8 100644 (file)
@@ -25,3 +25,66 @@ int pnp_is_active(struct pnp_dev *dev)
 }
 
 EXPORT_SYMBOL(pnp_is_active);
+
+/*
+ * Functionally similar to acpi_ex_eisa_id_to_string(), but that's
+ * buried in the ACPI CA, and we can't depend on it being present.
+ */
+void pnp_eisa_id_to_string(u32 id, char *str)
+{
+       id = be32_to_cpu(id);
+
+       /*
+        * According to the specs, the first three characters are five-bit
+        * compressed ASCII, and the left-over high order bit should be zero.
+        * However, the Linux ISAPNP code historically used six bits for the
+        * first character, and there seem to be IDs that depend on that,
+        * e.g., "nEC8241" in the Linux 8250_pnp serial driver and the
+        * FreeBSD sys/pc98/cbus/sio_cbus.c driver.
+        */
+       str[0] = 'A' + ((id >> 26) & 0x3f) - 1;
+       str[1] = 'A' + ((id >> 21) & 0x1f) - 1;
+       str[2] = 'A' + ((id >> 16) & 0x1f) - 1;
+       str[3] = hex_asc((id >> 12) & 0xf);
+       str[4] = hex_asc((id >>  8) & 0xf);
+       str[5] = hex_asc((id >>  4) & 0xf);
+       str[6] = hex_asc((id >>  0) & 0xf);
+       str[7] = '\0';
+}
+
+void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
+{
+#ifdef DEBUG
+       struct resource *res;
+       int i;
+
+       dev_dbg(&dev->dev, "current resources: %s\n", desc);
+
+       for (i = 0; i < PNP_MAX_IRQ; i++) {
+               res = pnp_get_resource(dev, IORESOURCE_IRQ, i);
+               if (res && !(res->flags & IORESOURCE_UNSET))
+                       dev_dbg(&dev->dev, "  irq %lld flags %#lx\n",
+                               (unsigned long long) res->start, res->flags);
+       }
+       for (i = 0; i < PNP_MAX_DMA; i++) {
+               res = pnp_get_resource(dev, IORESOURCE_DMA, i);
+               if (res && !(res->flags & IORESOURCE_UNSET))
+                       dev_dbg(&dev->dev, "  dma %lld flags %#lx\n",
+                               (unsigned long long) res->start, res->flags);
+       }
+       for (i = 0; i < PNP_MAX_PORT; i++) {
+               res = pnp_get_resource(dev, IORESOURCE_IO, i);
+               if (res && !(res->flags & IORESOURCE_UNSET))
+                       dev_dbg(&dev->dev, "  io  %#llx-%#llx flags %#lx\n",
+                               (unsigned long long) res->start,
+                               (unsigned long long) res->end, res->flags);
+       }
+       for (i = 0; i < PNP_MAX_MEM; i++) {
+               res = pnp_get_resource(dev, IORESOURCE_MEM, i);
+               if (res && !(res->flags & IORESOURCE_UNSET))
+                       dev_dbg(&dev->dev, "  mem %#llx-%#llx flags %#lx\n",
+                               (unsigned long long) res->start,
+                               (unsigned long long) res->end, res->flags);
+       }
+#endif
+}
index 55c4563986b38aebc36b158eb7c9a649d7105d5c..9c2496dbeee45d0042ff3d814402a212b5f22d9b 100644 (file)
@@ -56,14 +56,15 @@ static void reserve_range(struct pnp_dev *dev, resource_size_t start,
 
 static void reserve_resources_of_dev(struct pnp_dev *dev)
 {
+       struct resource *res;
        int i;
 
-       for (i = 0; i < PNP_MAX_PORT; i++) {
-               if (!pnp_port_valid(dev, i))
+       for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_IO, i)); i++) {
+               if (res->flags & IORESOURCE_UNSET)
                        continue;
-               if (pnp_port_start(dev, i) == 0)
+               if (res->start == 0)
                        continue;       /* disabled */
-               if (pnp_port_start(dev, i) < 0x100)
+               if (res->start < 0x100)
                        /*
                         * Below 0x100 is only standard PC hardware
                         * (pics, kbd, timer, dma, ...)
@@ -73,19 +74,17 @@ static void reserve_resources_of_dev(struct pnp_dev *dev)
                         * So, do nothing
                         */
                        continue;
-               if (pnp_port_end(dev, i) < pnp_port_start(dev, i))
+               if (res->end < res->start)
                        continue;       /* invalid */
 
-               reserve_range(dev, pnp_port_start(dev, i),
-                             pnp_port_end(dev, i), 1);
+               reserve_range(dev, res->start, res->end, 1);
        }
 
-       for (i = 0; i < PNP_MAX_MEM; i++) {
-               if (!pnp_mem_valid(dev, i))
+       for (i = 0; (res = pnp_get_resource(dev, IORESOURCE_MEM, i)); i++) {
+               if (res->flags & IORESOURCE_UNSET)
                        continue;
 
-               reserve_range(dev, pnp_mem_start(dev, i),
-                             pnp_mem_end(dev, i), 0);
+               reserve_range(dev, res->start, res->end, 0);
        }
 }
 
index bdb9b7285b3d7b6b0afc744f3be084d01c1ca11b..71be36f18709e80771719103941e40c9f653f922 100644 (file)
@@ -262,7 +262,7 @@ static void ds2760_battery_work(struct work_struct *work)
                struct ds2760_device_info, monitor_work.work);
        const int interval = HZ * 60;
 
-       dev_dbg(di->dev, "%s\n", __FUNCTION__);
+       dev_dbg(di->dev, "%s\n", __func__);
 
        ds2760_battery_update_status(di);
        queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval);
@@ -275,7 +275,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy)
 {
        struct ds2760_device_info *di = to_ds2760_device_info(psy);
 
-       dev_dbg(di->dev, "%s\n", __FUNCTION__);
+       dev_dbg(di->dev, "%s\n", __func__);
 
        cancel_delayed_work(&di->monitor_work);
        queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10);
index af7a231092a461052af4408f6e127901622e4f29..ab1e8289f07f4a23ca08899635e2f3e4dc02330d 100644 (file)
@@ -315,7 +315,6 @@ static int __init olpc_bat_init(void)
        if (ret)
                goto battery_failed;
 
-       olpc_register_battery_callback(&olpc_battery_trigger_uevent);
        goto success;
 
 battery_failed:
@@ -328,7 +327,6 @@ success:
 
 static void __exit olpc_bat_exit(void)
 {
-       olpc_deregister_battery_callback();
        power_supply_unregister(&olpc_bat);
        power_supply_unregister(&olpc_ac);
        platform_device_unregister(bat_pdev);
index 03d6a38464ef8ca8d27c589b35735b0452d1aa97..138dd76ee347b005aaf21b01f3b68a0869731045 100644 (file)
@@ -39,7 +39,7 @@ static void power_supply_changed_work(struct work_struct *work)
        struct power_supply *psy = container_of(work, struct power_supply,
                                                changed_work);
 
-       dev_dbg(psy->dev, "%s\n", __FUNCTION__);
+       dev_dbg(psy->dev, "%s\n", __func__);
 
        class_for_each_device(power_supply_class, psy,
                              __power_supply_changed_work);
@@ -51,7 +51,7 @@ static void power_supply_changed_work(struct work_struct *work)
 
 void power_supply_changed(struct power_supply *psy)
 {
-       dev_dbg(psy->dev, "%s\n", __FUNCTION__);
+       dev_dbg(psy->dev, "%s\n", __func__);
 
        schedule_work(&psy->changed_work);
 }
@@ -82,7 +82,7 @@ int power_supply_am_i_supplied(struct power_supply *psy)
        error = class_for_each_device(power_supply_class, psy,
                                      __power_supply_am_i_supplied);
 
-       dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, error);
+       dev_dbg(psy->dev, "%s %d\n", __func__, error);
 
        return error;
 }
index fa3034f85c382d4a3dc3b5bdb6ed36469c46042a..2dece40c544f2098dae9b94ae110f31c8f50be31 100644 (file)
@@ -24,7 +24,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy)
        if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status))
                return;
 
-       dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, status.intval);
+       dev_dbg(psy->dev, "%s %d\n", __func__, status.intval);
 
        switch (status.intval) {
        case POWER_SUPPLY_STATUS_FULL:
@@ -101,7 +101,7 @@ static void power_supply_update_gen_leds(struct power_supply *psy)
        if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online))
                return;
 
-       dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, online.intval);
+       dev_dbg(psy->dev, "%s %d\n", __func__, online.intval);
 
        if (online.intval)
                led_trigger_event(psy->online_trig, LED_FULL);
index 4142115d298ef9a4e0cbd16190fa7ae039201a91..c32822ad84a40b6e0611557121b9aa1793104c16 100644 (file)
@@ -1,14 +1,6 @@
 #
 # RapidIO configuration
 #
-config RAPIDIO_8_BIT_TRANSPORT
-       bool "8-bit transport addressing"
-       depends on RAPIDIO
-       ---help---
-         By default, the kernel assumes a 16-bit addressed RapidIO
-         network. By selecting this option, the kernel will support
-         an 8-bit addressed network.
-
 config RAPIDIO_DISC_TIMEOUT
        int "Discovery timeout duration (seconds)"
        depends on RAPIDIO
index 8b56bbdd011ec0d5bf9870ca36abeda25caff49e..a3824baca2e51c920958731a9daf4ade575c1397 100644 (file)
@@ -48,7 +48,7 @@ int __rio_local_read_config_##size \
        u32 data = 0;                                                   \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
        spin_lock_irqsave(&rio_config_lock, flags);                     \
-       res = mport->ops->lcread(mport->id, offset, len, &data);        \
+       res = mport->ops->lcread(mport, mport->id, offset, len, &data); \
        *value = (type)data;                                            \
        spin_unlock_irqrestore(&rio_config_lock, flags);                \
        return res;                                                     \
@@ -71,7 +71,7 @@ int __rio_local_write_config_##size \
        unsigned long flags;                                            \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
        spin_lock_irqsave(&rio_config_lock, flags);                     \
-       res = mport->ops->lcwrite(mport->id, offset, len, value);       \
+       res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\
        spin_unlock_irqrestore(&rio_config_lock, flags);                \
        return res;                                                     \
 }
@@ -108,7 +108,7 @@ int rio_mport_read_config_##size \
        u32 data = 0;                                                   \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
        spin_lock_irqsave(&rio_config_lock, flags);                     \
-       res = mport->ops->cread(mport->id, destid, hopcount, offset, len, &data); \
+       res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \
        *value = (type)data;                                            \
        spin_unlock_irqrestore(&rio_config_lock, flags);                \
        return res;                                                     \
@@ -131,7 +131,7 @@ int rio_mport_write_config_##size \
        unsigned long flags;                                            \
        if (RIO_##size##_BAD) return RIO_BAD_SIZE;                      \
        spin_lock_irqsave(&rio_config_lock, flags);                     \
-       res = mport->ops->cwrite(mport->id, destid, hopcount, offset, len, value); \
+       res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \
        spin_unlock_irqrestore(&rio_config_lock, flags);                \
        return res;                                                     \
 }
@@ -166,7 +166,7 @@ int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data)
        unsigned long flags;
 
        spin_lock_irqsave(&rio_doorbell_lock, flags);
-       res = mport->ops->dsend(mport->id, destid, data);
+       res = mport->ops->dsend(mport, mport->id, destid, data);
        spin_unlock_irqrestore(&rio_doorbell_lock, flags);
 
        return res;
index 44420723a359fb578934c05715dc2b9c58a3f480..a926c896475ebc3839ef63e92b1bffd20c62ee82 100644 (file)
@@ -73,7 +73,7 @@ static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount)
 
        rio_mport_read_config_32(port, destid, hopcount, RIO_DID_CSR, &result);
 
-       return RIO_GET_DID(result);
+       return RIO_GET_DID(port->sys_size, result);
 }
 
 /**
@@ -88,7 +88,7 @@ static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount)
 static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u16 did)
 {
        rio_mport_write_config_32(port, destid, hopcount, RIO_DID_CSR,
-                                 RIO_SET_DID(did));
+                                 RIO_SET_DID(port->sys_size, did));
 }
 
 /**
@@ -100,7 +100,8 @@ static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u
  */
 static void rio_local_set_device_id(struct rio_mport *port, u16 did)
 {
-       rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(did));
+       rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(port->sys_size,
+                               did));
 }
 
 /**
@@ -350,8 +351,18 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
                rswitch->switchid = next_switchid;
                rswitch->hopcount = hopcount;
                rswitch->destid = destid;
+               rswitch->route_table = kzalloc(sizeof(u8)*
+                                       RIO_MAX_ROUTE_ENTRIES(port->sys_size),
+                                       GFP_KERNEL);
+               if (!rswitch->route_table) {
+                       kfree(rdev);
+                       rdev = NULL;
+                       kfree(rswitch);
+                       goto out;
+               }
                /* Initialize switch route table */
-               for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES; rdid++)
+               for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size);
+                               rdid++)
                        rswitch->route_table[rdid] = RIO_INVALID_ROUTE;
                rdev->rswitch = rswitch;
                sprintf(rio_name(rdev), "%02x:s:%04x", rdev->net->id,
@@ -480,7 +491,7 @@ static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount)
 {
        u32 result;
 
-       rio_mport_read_config_32(port, RIO_ANY_DESTID, hopcount,
+       rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount,
                                 RIO_HOST_DID_LOCK_CSR, &result);
 
        return (u16) (result & 0xffff);
@@ -571,14 +582,16 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
        }
 
        /* Attempt to acquire device lock */
-       rio_mport_write_config_32(port, RIO_ANY_DESTID, hopcount,
+       rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size),
+                                 hopcount,
                                  RIO_HOST_DID_LOCK_CSR, port->host_deviceid);
        while ((tmp = rio_get_host_deviceid_lock(port, hopcount))
               < port->host_deviceid) {
                /* Delay a bit */
                mdelay(1);
                /* Attempt to acquire device lock again */
-               rio_mport_write_config_32(port, RIO_ANY_DESTID, hopcount,
+               rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size),
+                                         hopcount,
                                          RIO_HOST_DID_LOCK_CSR,
                                          port->host_deviceid);
        }
@@ -590,7 +603,9 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
        }
 
        /* Setup new RIO device */
-       if ((rdev = rio_setup_device(net, port, RIO_ANY_DESTID, hopcount, 1))) {
+       rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size),
+                                       hopcount, 1);
+       if (rdev) {
                /* Add device to the global and bus/net specific list. */
                list_add_tail(&rdev->net_list, &net->devices);
        } else
@@ -598,7 +613,8 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
 
        if (rio_is_switch(rdev)) {
                next_switchid++;
-               sw_inport = rio_get_swpinfo_inport(port, RIO_ANY_DESTID, hopcount);
+               sw_inport = rio_get_swpinfo_inport(port,
+                               RIO_ANY_DESTID(port->sys_size), hopcount);
                rio_route_add_entry(port, rdev->rswitch, RIO_GLOBAL_TABLE,
                                    port->host_deviceid, sw_inport);
                rdev->rswitch->route_table[port->host_deviceid] = sw_inport;
@@ -612,7 +628,8 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
                }
 
                num_ports =
-                   rio_get_swpinfo_tports(port, RIO_ANY_DESTID, hopcount);
+                   rio_get_swpinfo_tports(port, RIO_ANY_DESTID(port->sys_size),
+                                               hopcount);
                pr_debug(
                    "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n",
                    rio_name(rdev), rdev->vid, rdev->did, num_ports);
@@ -624,13 +641,15 @@ static int rio_enum_peer(struct rio_net *net, struct rio_mport *port,
                        cur_destid = next_destid;
 
                        if (rio_sport_is_active
-                           (port, RIO_ANY_DESTID, hopcount, port_num)) {
+                           (port, RIO_ANY_DESTID(port->sys_size), hopcount,
+                            port_num)) {
                                pr_debug(
                                    "RIO: scanning device on port %d\n",
                                    port_num);
                                rio_route_add_entry(port, rdev->rswitch,
-                                                   RIO_GLOBAL_TABLE,
-                                                   RIO_ANY_DESTID, port_num);
+                                               RIO_GLOBAL_TABLE,
+                                               RIO_ANY_DESTID(port->sys_size),
+                                               port_num);
 
                                if (rio_enum_peer(net, port, hopcount + 1) < 0)
                                        return -1;
@@ -735,7 +754,8 @@ rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid,
                                pr_debug(
                                    "RIO: scanning device on port %d\n",
                                    port_num);
-                               for (ndestid = 0; ndestid < RIO_ANY_DESTID;
+                               for (ndestid = 0;
+                                    ndestid < RIO_ANY_DESTID(port->sys_size);
                                     ndestid++) {
                                        rio_route_get_entry(port, rdev->rswitch,
                                                            RIO_GLOBAL_TABLE,
@@ -917,7 +937,9 @@ static void rio_build_route_tables(void)
 
        list_for_each_entry(rdev, &rio_devices, global_list)
            if (rio_is_switch(rdev))
-               for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++) {
+               for (i = 0;
+                    i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
+                    i++) {
                        if (rio_route_get_entry
                            (rdev->net->hport, rdev->rswitch, RIO_GLOBAL_TABLE,
                             i, &sport) < 0)
@@ -981,7 +1003,8 @@ int rio_disc_mport(struct rio_mport *mport)
                del_timer_sync(&rio_enum_timer);
 
                pr_debug("done\n");
-               if (rio_disc_peer(net, mport, RIO_ANY_DESTID, 0) < 0) {
+               if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size),
+                                       0) < 0) {
                        printk(KERN_INFO
                               "RIO: master port %d device has failed discovery\n",
                               mport->id);
index 659e31164cf08b3c94cb6b6d09821f79e9f97793..97a147f050d623addeed0927f93fa46ee2153b9b 100644 (file)
@@ -43,7 +43,8 @@ static ssize_t routes_show(struct device *dev, struct device_attribute *attr, ch
        if (!rdev->rswitch)
                goto out;
 
-       for (i = 0; i < RIO_MAX_ROUTE_ENTRIES; i++) {
+       for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size);
+                       i++) {
                if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE)
                        continue;
                str +=
index 80c5f1ba2e49c4045269085b33155239f8d7cd9f..680661abbc4b700ce537a8ad828124be26821187 100644 (file)
@@ -43,7 +43,7 @@ u16 rio_local_get_device_id(struct rio_mport *port)
 
        rio_local_read_config_32(port, RIO_DID_CSR, &result);
 
-       return (RIO_GET_DID(result));
+       return (RIO_GET_DID(port->sys_size, result));
 }
 
 /**
index 80e3f03b5041e0348f766b14caca2fa601efa82f..7786d02581f2adf78dd086fec096e78beba41e43 100644 (file)
@@ -51,10 +51,5 @@ extern struct rio_route_ops __end_rio_route_ops[];
        DECLARE_RIO_ROUTE_SECTION(.rio_route_ops,                       \
                        vid, did, add_hook, get_hook)
 
-#ifdef CONFIG_RAPIDIO_8_BIT_TRANSPORT
-#define RIO_GET_DID(x) ((x & 0x00ff0000) >> 16)
-#define RIO_SET_DID(x) ((x & 0x000000ff) << 16)
-#else
-#define RIO_GET_DID(x) (x & 0xffff)
-#define RIO_SET_DID(x) (x & 0xffff)
-#endif
+#define RIO_GET_DID(size, x)   (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
+#define RIO_SET_DID(size, x)   (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
index 02a4c8cf2b2d8c5d240e2349d190b0ef620ad9f7..6cc2c0330230fefc372193e275b7748aee32c112 100644 (file)
@@ -20,10 +20,6 @@ menuconfig RTC_CLASS
 
 if RTC_CLASS
 
-if GEN_RTC || RTC
-comment "Conflicting RTC option has been selected, check GEN_RTC and RTC"
-endif
-
 config RTC_HCTOSYS
        bool "Set system time from RTC on startup and resume"
        depends on RTC_CLASS = y
@@ -304,6 +300,7 @@ comment "Platform RTC drivers"
 config RTC_DRV_CMOS
        tristate "PC-style 'CMOS'"
        depends on X86 || ALPHA || ARM || M32R || ATARI || PPC || MIPS
+       default y if X86
        help
          Say "yes" here to get direct support for the real time clock
          found in every PC or ACPI-based system, and some other boards.
index 52abffc86bcd0a2d24fcbadbe47185007752f19a..39e64ab1ecb71b139c336fae6ba4bb637ea61ffb 100644 (file)
@@ -83,7 +83,7 @@ static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm)
        tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
        tm->tm_year = tm->tm_year - 1900;
 
-       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                tm->tm_hour, tm->tm_min, tm->tm_sec);
 
@@ -97,7 +97,7 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
 {
        unsigned long cr;
 
-       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                tm->tm_hour, tm->tm_min, tm->tm_sec);
 
@@ -142,7 +142,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
        alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
                        ? 1 : 0;
 
-       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                tm->tm_hour, tm->tm_min, tm->tm_sec);
 
@@ -178,7 +178,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        if (alrm->enabled)
                at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
 
-       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __FUNCTION__,
+       pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
                at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
                tm.tm_min, tm.tm_sec);
 
@@ -193,7 +193,7 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
 {
        int ret = 0;
 
-       pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __FUNCTION__, cmd, arg);
+       pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg);
 
        switch (cmd) {
        case RTC_AIE_OFF:       /* alarm off */
@@ -265,7 +265,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
 
                rtc_update_irq(rtc, 1, events);
 
-               pr_debug("%s(): num=%ld, events=0x%02lx\n", __FUNCTION__,
+               pr_debug("%s(): num=%ld, events=0x%02lx\n", __func__,
                        events >> 8, events & 0x000000FF);
 
                return IRQ_HANDLED;
index 56728a2a3385f84b8ac9dffe35f9501066a58a7a..38d8742a4bdfb4554649cfa37496ebd66a3e027c 100644 (file)
@@ -288,7 +288,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *_rtc)
 
        rtc_update_irq(rtc->rtcdev, 1, events);
 
-       pr_debug("%s: num=%ld, events=0x%02lx\n", __FUNCTION__,
+       pr_debug("%s: num=%ld, events=0x%02lx\n", __func__,
                events >> 8, events & 0x000000FF);
 
        return IRQ_HANDLED;
index 4f28045d9ef201706e0aff6f7e330f564c758ab5..8624f55d05608cabe99e6f91a9e18220be699a13 100644 (file)
@@ -419,7 +419,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE);
-       if (unlikely(IS_ERR(rtc))) {
+       if (IS_ERR(rtc)) {
                ret = PTR_ERR(rtc->rtc_dev);
                goto err;
        }
index dcdc142a3441114ccc9a5446496af48a4746d704..d060a06ce05b67c7351a73e3a76bbd34444eb54c 100644 (file)
@@ -854,11 +854,12 @@ cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id)
                 * don't define the IRQ. It should always be safe to
                 * hardcode it in these cases
                 */
-               return cmos_do_probe(&pnp->dev, &pnp->res.port_resource[0], 8);
+               return cmos_do_probe(&pnp->dev,
+                               pnp_get_resource(pnp, IORESOURCE_IO, 0), 8);
        else
                return cmos_do_probe(&pnp->dev,
-                                    &pnp->res.port_resource[0],
-                                    pnp->res.irq_resource[0].start);
+                               pnp_get_resource(pnp, IORESOURCE_IO, 0),
+                               pnp_irq(pnp, 0));
 }
 
 static void __exit cmos_pnp_remove(struct pnp_dev *pnp)
index 7b002ceeaa7dab6a6d8336b601cbc20e8d888566..b9397818f73a6fd22b118a5643fd768a15b4d5ee 100644 (file)
@@ -122,7 +122,7 @@ static int ds1302_rtc_read_time(struct device *dev, struct rtc_time *tm)
 
        dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
 
index f389a28720d2da4302c05a1ef56e2fc5530272a7..bbf97e65202a24327b57beafbba98d6daca738a2 100644 (file)
@@ -99,45 +99,38 @@ struct ds1307 {
 };
 
 struct chip_desc {
-       char                    name[9];
        unsigned                nvram56:1;
        unsigned                alarm:1;
-       enum ds_type            type;
 };
 
-static const struct chip_desc chips[] = { {
-       .name           = "ds1307",
-       .type           = ds_1307,
+static const struct chip_desc chips[] = {
+[ds_1307] = {
        .nvram56        = 1,
-}, {
-       .name           = "ds1337",
-       .type           = ds_1337,
+},
+[ds_1337] = {
        .alarm          = 1,
-}, {
-       .name           = "ds1338",
-       .type           = ds_1338,
+},
+[ds_1338] = {
        .nvram56        = 1,
-}, {
-       .name           = "ds1339",
-       .type           = ds_1339,
+},
+[ds_1339] = {
        .alarm          = 1,
-}, {
-       .name           = "ds1340",
-       .type           = ds_1340,
-}, {
-       .name           = "m41t00",
-       .type           = m41t00,
+},
+[ds_1340] = {
+},
+[m41t00] = {
 }, };
 
-static inline const struct chip_desc *find_chip(const char *s)
-{
-       unsigned i;
-
-       for (i = 0; i < ARRAY_SIZE(chips); i++)
-               if (strnicmp(s, chips[i].name, sizeof chips[i].name) == 0)
-                       return &chips[i];
-       return NULL;
-}
+static const struct i2c_device_id ds1307_id[] = {
+       { "ds1307", ds_1307 },
+       { "ds1337", ds_1337 },
+       { "ds1338", ds_1338 },
+       { "ds1339", ds_1339 },
+       { "ds1340", ds_1340 },
+       { "m41t00", m41t00 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, ds1307_id);
 
 static int ds1307_get_time(struct device *dev, struct rtc_time *t)
 {
@@ -326,21 +319,15 @@ static struct bin_attribute nvram = {
 
 static struct i2c_driver ds1307_driver;
 
-static int __devinit ds1307_probe(struct i2c_client *client)
+static int __devinit ds1307_probe(struct i2c_client *client,
+                                 const struct i2c_device_id *id)
 {
        struct ds1307           *ds1307;
        int                     err = -ENODEV;
        int                     tmp;
-       const struct chip_desc  *chip;
+       const struct chip_desc  *chip = &chips[id->driver_data];
        struct i2c_adapter      *adapter = to_i2c_adapter(client->dev.parent);
 
-       chip = find_chip(client->name);
-       if (!chip) {
-               dev_err(&client->dev, "unknown chip type '%s'\n",
-                               client->name);
-               return -ENODEV;
-       }
-
        if (!i2c_check_functionality(adapter,
                        I2C_FUNC_I2C | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
                return -EIO;
@@ -361,7 +348,7 @@ static int __devinit ds1307_probe(struct i2c_client *client)
        ds1307->msg[1].len = sizeof(ds1307->regs);
        ds1307->msg[1].buf = ds1307->regs;
 
-       ds1307->type = chip->type;
+       ds1307->type = id->driver_data;
 
        switch (ds1307->type) {
        case ds_1337:
@@ -550,6 +537,7 @@ static struct i2c_driver ds1307_driver = {
        },
        .probe          = ds1307_probe,
        .remove         = __devexit_p(ds1307_remove),
+       .id_table       = ds1307_id,
 };
 
 static int __init ds1307_init(void)
index 45bda186befc2e1bfaf8bc3ff71f7cf02eec1d6a..fa2d2f8b3f4d071bca31834a46c1ac58c1367fe5 100644 (file)
 #define DS1374_REG_SR_AF       0x01 /* Alarm Flag */
 #define DS1374_REG_TCR         0x09 /* Trickle Charge */
 
+static const struct i2c_device_id ds1374_id[] = {
+       { "rtc-ds1374", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, ds1374_id);
+
 struct ds1374 {
        struct i2c_client *client;
        struct rtc_device *rtc;
@@ -355,7 +361,8 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
        .ioctl = ds1374_ioctl,
 };
 
-static int ds1374_probe(struct i2c_client *client)
+static int ds1374_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
        struct ds1374 *ds1374;
        int ret;
@@ -429,6 +436,7 @@ static struct i2c_driver ds1374_driver = {
        },
        .probe = ds1374_probe,
        .remove = __devexit_p(ds1374_remove),
+       .id_table = ds1374_id,
 };
 
 static int __init ds1374_init(void)
index d08912f18dddb58cee89dbfbd5a4ab9b9228e3f4..a83a40b3ebaaf9ad67b08c97a52a481645a0f4fa 100644 (file)
@@ -181,8 +181,7 @@ ds1511_wdog_disable(void)
  * stupidly, some callers call with year unmolested;
  * and some call with  year = year - 1900.  thanks.
  */
- int
-ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
+static int ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
 {
        u8 mon, day, dow, hrs, min, sec, yrs, cen;
        unsigned int flags;
@@ -245,8 +244,7 @@ ds1511_rtc_set_time(struct device *dev, struct rtc_time *rtc_tm)
        return 0;
 }
 
- int
-ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
+static int ds1511_rtc_read_time(struct device *dev, struct rtc_time *rtc_tm)
 {
        unsigned int century;
        unsigned int flags;
index e0900ca678ecc54ad7c50276d3cc4a580dcf2c40..6fa4556f5f5c06b4d469e4a8c553d0d47357ebe0 100644 (file)
@@ -50,13 +50,13 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 
        /* read date registers */
        if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: read error\n", __func__);
                return -EIO;
        }
 
        dev_dbg(&client->dev,
                "%s: raw read data - counters=%02x,%02x,%02x,%02x\n",
-               __FUNCTION__, buf[0], buf[1], buf[2], buf[3]);
+               __func__, buf[0], buf[1], buf[2], buf[3]);
 
        time = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0];
 
@@ -64,7 +64,7 @@ static int ds1672_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 
        dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__, tm->tm_sec, tm->tm_min, tm->tm_hour,
+               __func__, tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
        return 0;
@@ -84,7 +84,7 @@ static int ds1672_set_mmss(struct i2c_client *client, unsigned long secs)
 
        xfer = i2c_master_send(client, buf, 6);
        if (xfer != 6) {
-               dev_err(&client->dev, "%s: send: %d\n", __FUNCTION__, xfer);
+               dev_err(&client->dev, "%s: send: %d\n", __func__, xfer);
                return -EIO;
        }
 
@@ -98,7 +98,7 @@ static int ds1672_set_datetime(struct i2c_client *client, struct rtc_time *tm)
        dev_dbg(&client->dev,
                "%s: secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
@@ -133,7 +133,7 @@ static int ds1672_get_control(struct i2c_client *client, u8 *status)
 
        /* read control register */
        if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: read error\n", __func__);
                return -EIO;
        }
 
@@ -199,7 +199,7 @@ static int ds1672_probe(struct i2c_adapter *adapter, int address, int kind)
        struct i2c_client *client;
        struct rtc_device *rtc;
 
-       dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
+       dev_dbg(&adapter->dev, "%s\n", __func__);
 
        if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
                err = -ENODEV;
index 725b0c73c33399e1ccf9000fb0066af25914a15b..fbb90b1e40981f6fadfd4542e468ad35a3d70f4d 100644 (file)
 #include <linux/bcd.h>
 #include <linux/rtc.h>
 
-#define DRV_NAME "isl1208"
-#define DRV_VERSION "0.2"
+#define DRV_VERSION "0.3"
 
 /* Register map */
 /* rtc section */
 #define ISL1208_REG_SC  0x00
 #define ISL1208_REG_MN  0x01
 #define ISL1208_REG_HR  0x02
-#define ISL1208_REG_HR_MIL     (1<<7) /* 24h/12h mode */
-#define ISL1208_REG_HR_PM      (1<<5) /* PM/AM bit in 12h mode */
+#define ISL1208_REG_HR_MIL     (1<<7)  /* 24h/12h mode */
+#define ISL1208_REG_HR_PM      (1<<5)  /* PM/AM bit in 12h mode */
 #define ISL1208_REG_DT  0x03
 #define ISL1208_REG_MO  0x04
 #define ISL1208_REG_YR  0x05
 
 /* control/status section */
 #define ISL1208_REG_SR  0x07
-#define ISL1208_REG_SR_ARST    (1<<7) /* auto reset */
-#define ISL1208_REG_SR_XTOSCB  (1<<6) /* crystal oscillator */
-#define ISL1208_REG_SR_WRTC    (1<<4) /* write rtc */
-#define ISL1208_REG_SR_ALM     (1<<2) /* alarm */
-#define ISL1208_REG_SR_BAT     (1<<1) /* battery */
-#define ISL1208_REG_SR_RTCF    (1<<0) /* rtc fail */
+#define ISL1208_REG_SR_ARST    (1<<7)  /* auto reset */
+#define ISL1208_REG_SR_XTOSCB  (1<<6)  /* crystal oscillator */
+#define ISL1208_REG_SR_WRTC    (1<<4)  /* write rtc */
+#define ISL1208_REG_SR_ALM     (1<<2)  /* alarm */
+#define ISL1208_REG_SR_BAT     (1<<1)  /* battery */
+#define ISL1208_REG_SR_RTCF    (1<<0)  /* rtc fail */
 #define ISL1208_REG_INT 0x08
-#define ISL1208_REG_09  0x09 /* reserved */
+#define ISL1208_REG_09  0x09   /* reserved */
 #define ISL1208_REG_ATR 0x0a
 #define ISL1208_REG_DTR 0x0b
 
 #define ISL1208_REG_USR2 0x13
 #define ISL1208_USR_SECTION_LEN 2
 
-/* i2c configuration */
-#define ISL1208_I2C_ADDR 0xde
-
-static const unsigned short normal_i2c[] = {
-       ISL1208_I2C_ADDR>>1, I2C_CLIENT_END
-};
-I2C_CLIENT_INSMOD; /* defines addr_data */
-
-static int isl1208_attach_adapter(struct i2c_adapter *adapter);
-static int isl1208_detach_client(struct i2c_client *client);
-
-static struct i2c_driver isl1208_driver = {
-       .driver         = {
-               .name   = DRV_NAME,
-       },
-       .id             = I2C_DRIVERID_ISL1208,
-       .attach_adapter = &isl1208_attach_adapter,
-       .detach_client  = &isl1208_detach_client,
-};
+static struct i2c_driver isl1208_driver;
 
 /* block read */
 static int
 isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[],
-                      unsigned len)
+                     unsigned len)
 {
        u8 reg_addr[1] = { reg };
        struct i2c_msg msgs[2] = {
-               { client->addr, client->flags, sizeof(reg_addr), reg_addr },
-               { client->addr, client->flags | I2C_M_RD, len, buf }
+               {client->addr, 0, sizeof(reg_addr), reg_addr}
+               ,
+               {client->addr, I2C_M_RD, len, buf}
        };
        int ret;
 
-       BUG_ON(len == 0);
        BUG_ON(reg > ISL1208_REG_USR2);
        BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
 
@@ -103,15 +84,14 @@ isl1208_i2c_read_regs(struct i2c_client *client, u8 reg, u8 buf[],
 /* block write */
 static int
 isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[],
-                      unsigned len)
+                    unsigned len)
 {
        u8 i2c_buf[ISL1208_REG_USR2 + 2];
        struct i2c_msg msgs[1] = {
-               { client->addr, client->flags, len + 1, i2c_buf }
+               {client->addr, 0, len + 1, i2c_buf}
        };
        int ret;
 
-       BUG_ON(len == 0);
        BUG_ON(reg > ISL1208_REG_USR2);
        BUG_ON(reg + len > ISL1208_REG_USR2 + 1);
 
@@ -125,7 +105,8 @@ isl1208_i2c_set_regs(struct i2c_client *client, u8 reg, u8 const buf[],
 }
 
 /* simple check to see wether we have a isl1208 */
-static int isl1208_i2c_validate_client(struct i2c_client *client)
+static int
+isl1208_i2c_validate_client(struct i2c_client *client)
 {
        u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
        u8 zero_mask[ISL1208_RTC_SECTION_LEN] = {
@@ -139,24 +120,29 @@ static int isl1208_i2c_validate_client(struct i2c_client *client)
                return ret;
 
        for (i = 0; i < ISL1208_RTC_SECTION_LEN; ++i) {
-               if (regs[i] & zero_mask[i]) /* check if bits are cleared */
+               if (regs[i] & zero_mask[i])     /* check if bits are cleared */
                        return -ENODEV;
        }
 
        return 0;
 }
 
-static int isl1208_i2c_get_sr(struct i2c_client *client)
+static int
+isl1208_i2c_get_sr(struct i2c_client *client)
 {
-       return i2c_smbus_read_byte_data(client, ISL1208_REG_SR) == -1 ? -EIO:0;
+       int sr = i2c_smbus_read_byte_data(client, ISL1208_REG_SR);
+       if (sr < 0)
+               return -EIO;
+
+       return sr;
 }
 
-static int isl1208_i2c_get_atr(struct i2c_client *client)
+static int
+isl1208_i2c_get_atr(struct i2c_client *client)
 {
        int atr = i2c_smbus_read_byte_data(client, ISL1208_REG_ATR);
-
        if (atr < 0)
-               return -EIO;
+               return atr;
 
        /* The 6bit value in the ATR register controls the load
         * capacitance C_load * in steps of 0.25pF
@@ -169,51 +155,54 @@ static int isl1208_i2c_get_atr(struct i2c_client *client)
         *
         */
 
-       atr &= 0x3f; /* mask out lsb */
-       atr ^= 1<<5; /* invert 6th bit */
-       atr += 2*9; /* add offset of 4.5pF; unit[atr] = 0.25pF */
+       atr &= 0x3f;            /* mask out lsb */
+       atr ^= 1 << 5;          /* invert 6th bit */
+       atr += 2 * 9;           /* add offset of 4.5pF; unit[atr] = 0.25pF */
 
        return atr;
 }
 
-static int isl1208_i2c_get_dtr(struct i2c_client *client)
+static int
+isl1208_i2c_get_dtr(struct i2c_client *client)
 {
        int dtr = i2c_smbus_read_byte_data(client, ISL1208_REG_DTR);
-
        if (dtr < 0)
                return -EIO;
 
        /* dtr encodes adjustments of {-60,-40,-20,0,20,40,60} ppm */
-       dtr = ((dtr & 0x3) * 20) * (dtr & (1<<2) ? -1 : 1);
+       dtr = ((dtr & 0x3) * 20) * (dtr & (1 << 2) ? -1 : 1);
 
        return dtr;
 }
 
-static int isl1208_i2c_get_usr(struct i2c_client *client)
+static int
+isl1208_i2c_get_usr(struct i2c_client *client)
 {
        u8 buf[ISL1208_USR_SECTION_LEN] = { 0, };
        int ret;
 
-       ret = isl1208_i2c_read_regs (client, ISL1208_REG_USR1, buf,
-                                  ISL1208_USR_SECTION_LEN);
+       ret = isl1208_i2c_read_regs(client, ISL1208_REG_USR1, buf,
+                                   ISL1208_USR_SECTION_LEN);
        if (ret < 0)
                return ret;
 
        return (buf[1] << 8) | buf[0];
 }
 
-static int isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
+static int
+isl1208_i2c_set_usr(struct i2c_client *client, u16 usr)
 {
        u8 buf[ISL1208_USR_SECTION_LEN];
 
        buf[0] = usr & 0xff;
        buf[1] = (usr >> 8) & 0xff;
 
-       return isl1208_i2c_set_regs (client, ISL1208_REG_USR1, buf,
-                                    ISL1208_USR_SECTION_LEN);
+       return isl1208_i2c_set_regs(client, ISL1208_REG_USR1, buf,
+                                   ISL1208_USR_SECTION_LEN);
 }
 
-static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
+static int
+isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
 {
        struct i2c_client *const client = to_i2c_client(dev);
        int sr, dtr, atr, usr;
@@ -230,20 +219,19 @@ static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
                   (sr & ISL1208_REG_SR_ALM) ? " ALM" : "",
                   (sr & ISL1208_REG_SR_WRTC) ? " WRTC" : "",
                   (sr & ISL1208_REG_SR_XTOSCB) ? " XTOSCB" : "",
-                  (sr & ISL1208_REG_SR_ARST) ? " ARST" : "",
-                  sr);
+                  (sr & ISL1208_REG_SR_ARST) ? " ARST" : "", sr);
 
        seq_printf(seq, "batt_status\t: %s\n",
                   (sr & ISL1208_REG_SR_RTCF) ? "bad" : "okay");
 
        dtr = isl1208_i2c_get_dtr(client);
-       if (dtr >= 0 -1)
+       if (dtr >= 0 - 1)
                seq_printf(seq, "digital_trim\t: %d ppm\n", dtr);
 
        atr = isl1208_i2c_get_atr(client);
        if (atr >= 0)
                seq_printf(seq, "analog_trim\t: %d.%.2d pF\n",
-                          atr>>2, (atr&0x3)*25);
+                          atr >> 2, (atr & 0x3) * 25);
 
        usr = isl1208_i2c_get_usr(client);
        if (usr >= 0)
@@ -252,9 +240,8 @@ static int isl1208_rtc_proc(struct device *dev, struct seq_file *seq)
        return 0;
 }
 
-
-static int isl1208_i2c_read_time(struct i2c_client *client,
-                                struct rtc_time *tm)
+static int
+isl1208_i2c_read_time(struct i2c_client *client, struct rtc_time *tm)
 {
        int sr;
        u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
@@ -274,27 +261,30 @@ static int isl1208_i2c_read_time(struct i2c_client *client,
 
        tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SC]);
        tm->tm_min = BCD2BIN(regs[ISL1208_REG_MN]);
-       { /* HR field has a more complex interpretation */
+
+       /* HR field has a more complex interpretation */
+       {
                const u8 _hr = regs[ISL1208_REG_HR];
-               if (_hr & ISL1208_REG_HR_MIL) /* 24h format */
+               if (_hr & ISL1208_REG_HR_MIL)   /* 24h format */
                        tm->tm_hour = BCD2BIN(_hr & 0x3f);
-               else { // 12h format
+               else {
+                       /* 12h format */
                        tm->tm_hour = BCD2BIN(_hr & 0x1f);
-                       if (_hr & ISL1208_REG_HR_PM) /* PM flag set */
+                       if (_hr & ISL1208_REG_HR_PM)    /* PM flag set */
                                tm->tm_hour += 12;
                }
        }
 
        tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DT]);
-       tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */
+       tm->tm_mon = BCD2BIN(regs[ISL1208_REG_MO]) - 1; /* rtc starts at 1 */
        tm->tm_year = BCD2BIN(regs[ISL1208_REG_YR]) + 100;
        tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DW]);
 
        return 0;
 }
 
-static int isl1208_i2c_read_alarm(struct i2c_client *client,
-                                 struct rtc_wkalrm *alarm)
+static int
+isl1208_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm)
 {
        struct rtc_time *const tm = &alarm->time;
        u8 regs[ISL1208_ALARM_SECTION_LEN] = { 0, };
@@ -307,7 +297,7 @@ static int isl1208_i2c_read_alarm(struct i2c_client *client,
        }
 
        sr = isl1208_i2c_read_regs(client, ISL1208_REG_SCA, regs,
-                                 ISL1208_ALARM_SECTION_LEN);
+                                  ISL1208_ALARM_SECTION_LEN);
        if (sr < 0) {
                dev_err(&client->dev, "%s: reading alarm section failed\n",
                        __func__);
@@ -315,23 +305,25 @@ static int isl1208_i2c_read_alarm(struct i2c_client *client,
        }
 
        /* MSB of each alarm register is an enable bit */
-       tm->tm_sec  = BCD2BIN(regs[ISL1208_REG_SCA-ISL1208_REG_SCA] & 0x7f);
-       tm->tm_min  = BCD2BIN(regs[ISL1208_REG_MNA-ISL1208_REG_SCA] & 0x7f);
-       tm->tm_hour = BCD2BIN(regs[ISL1208_REG_HRA-ISL1208_REG_SCA] & 0x3f);
-       tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DTA-ISL1208_REG_SCA] & 0x3f);
-       tm->tm_mon  = BCD2BIN(regs[ISL1208_REG_MOA-ISL1208_REG_SCA] & 0x1f)-1;
-       tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DWA-ISL1208_REG_SCA] & 0x03);
+       tm->tm_sec = BCD2BIN(regs[ISL1208_REG_SCA - ISL1208_REG_SCA] & 0x7f);
+       tm->tm_min = BCD2BIN(regs[ISL1208_REG_MNA - ISL1208_REG_SCA] & 0x7f);
+       tm->tm_hour = BCD2BIN(regs[ISL1208_REG_HRA - ISL1208_REG_SCA] & 0x3f);
+       tm->tm_mday = BCD2BIN(regs[ISL1208_REG_DTA - ISL1208_REG_SCA] & 0x3f);
+       tm->tm_mon =
+               BCD2BIN(regs[ISL1208_REG_MOA - ISL1208_REG_SCA] & 0x1f) - 1;
+       tm->tm_wday = BCD2BIN(regs[ISL1208_REG_DWA - ISL1208_REG_SCA] & 0x03);
 
        return 0;
 }
 
-static int isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm)
+static int
+isl1208_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        return isl1208_i2c_read_time(to_i2c_client(dev), tm);
 }
 
-static int isl1208_i2c_set_time(struct i2c_client *client,
-                               struct rtc_time const *tm)
+static int
+isl1208_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm)
 {
        int sr;
        u8 regs[ISL1208_RTC_SECTION_LEN] = { 0, };
@@ -353,7 +345,7 @@ static int isl1208_i2c_set_time(struct i2c_client *client,
        }
 
        /* set WRTC */
-       sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR,
+       sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR,
                                       sr | ISL1208_REG_SR_WRTC);
        if (sr < 0) {
                dev_err(&client->dev, "%s: writing SR failed\n", __func__);
@@ -369,7 +361,7 @@ static int isl1208_i2c_set_time(struct i2c_client *client,
        }
 
        /* clear WRTC again */
-       sr = i2c_smbus_write_byte_data (client, ISL1208_REG_SR,
+       sr = i2c_smbus_write_byte_data(client, ISL1208_REG_SR,
                                       sr & ~ISL1208_REG_SR_WRTC);
        if (sr < 0) {
                dev_err(&client->dev, "%s: writing SR failed\n", __func__);
@@ -380,70 +372,69 @@ static int isl1208_i2c_set_time(struct i2c_client *client,
 }
 
 
-static int isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm)
+static int
+isl1208_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
        return isl1208_i2c_set_time(to_i2c_client(dev), tm);
 }
 
-static int isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
+static int
+isl1208_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
 {
        return isl1208_i2c_read_alarm(to_i2c_client(dev), alarm);
 }
 
 static const struct rtc_class_ops isl1208_rtc_ops = {
-       .proc           = isl1208_rtc_proc,
-       .read_time      = isl1208_rtc_read_time,
-       .set_time       = isl1208_rtc_set_time,
-       .read_alarm     = isl1208_rtc_read_alarm,
-       //.set_alarm    = isl1208_rtc_set_alarm,
+       .proc = isl1208_rtc_proc,
+       .read_time = isl1208_rtc_read_time,
+       .set_time = isl1208_rtc_set_time,
+       .read_alarm = isl1208_rtc_read_alarm,
+       /*.set_alarm    = isl1208_rtc_set_alarm, */
 };
 
 /* sysfs interface */
 
-static ssize_t isl1208_sysfs_show_atrim(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
+static ssize_t
+isl1208_sysfs_show_atrim(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
-       int atr;
-
-       atr = isl1208_i2c_get_atr(to_i2c_client(dev));
+       int atr = isl1208_i2c_get_atr(to_i2c_client(dev));
        if (atr < 0)
                return atr;
 
-       return sprintf(buf, "%d.%.2d pF\n", atr>>2, (atr&0x3)*25);
+       return sprintf(buf, "%d.%.2d pF\n", atr >> 2, (atr & 0x3) * 25);
 }
+
 static DEVICE_ATTR(atrim, S_IRUGO, isl1208_sysfs_show_atrim, NULL);
 
-static ssize_t isl1208_sysfs_show_dtrim(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
+static ssize_t
+isl1208_sysfs_show_dtrim(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
-       int dtr;
-
-       dtr = isl1208_i2c_get_dtr(to_i2c_client(dev));
+       int dtr = isl1208_i2c_get_dtr(to_i2c_client(dev));
        if (dtr < 0)
                return dtr;
 
        return sprintf(buf, "%d ppm\n", dtr);
 }
+
 static DEVICE_ATTR(dtrim, S_IRUGO, isl1208_sysfs_show_dtrim, NULL);
 
-static ssize_t isl1208_sysfs_show_usr(struct device *dev,
-                                      struct device_attribute *attr,
-                                      char *buf)
+static ssize_t
+isl1208_sysfs_show_usr(struct device *dev,
+                      struct device_attribute *attr, char *buf)
 {
-       int usr;
-
-       usr = isl1208_i2c_get_usr(to_i2c_client(dev));
+       int usr = isl1208_i2c_get_usr(to_i2c_client(dev));
        if (usr < 0)
                return usr;
 
        return sprintf(buf, "0x%.4x\n", usr);
 }
 
-static ssize_t isl1208_sysfs_store_usr(struct device *dev,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t count)
+static ssize_t
+isl1208_sysfs_store_usr(struct device *dev,
+                       struct device_attribute *attr,
+                       const char *buf, size_t count)
 {
        int usr = -1;
 
@@ -460,124 +451,123 @@ static ssize_t isl1208_sysfs_store_usr(struct device *dev,
 
        return isl1208_i2c_set_usr(to_i2c_client(dev), usr) ? -EIO : count;
 }
+
 static DEVICE_ATTR(usr, S_IRUGO | S_IWUSR, isl1208_sysfs_show_usr,
                   isl1208_sysfs_store_usr);
 
 static int
-isl1208_probe(struct i2c_adapter *adapter, int addr, int kind)
+isl1208_sysfs_register(struct device *dev)
 {
-       int rc = 0;
-       struct i2c_client *new_client = NULL;
-       struct rtc_device *rtc = NULL;
+       int err;
 
-       if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
-               rc = -ENODEV;
-               goto failout;
+       err = device_create_file(dev, &dev_attr_atrim);
+       if (err)
+               return err;
+
+       err = device_create_file(dev, &dev_attr_dtrim);
+       if (err) {
+               device_remove_file(dev, &dev_attr_atrim);
+               return err;
        }
 
-       new_client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL);
-       if (new_client == NULL) {
-               rc = -ENOMEM;
-               goto failout;
+       err = device_create_file(dev, &dev_attr_usr);
+       if (err) {
+               device_remove_file(dev, &dev_attr_atrim);
+               device_remove_file(dev, &dev_attr_dtrim);
        }
 
-       new_client->addr = addr;
-       new_client->adapter = adapter;
-       new_client->driver = &isl1208_driver;
-       new_client->flags = 0;
-       strcpy(new_client->name, DRV_NAME);
+       return 0;
+}
 
-       if (kind < 0) {
-               rc = isl1208_i2c_validate_client(new_client);
-               if (rc < 0)
-                       goto failout;
-       }
+static int
+isl1208_sysfs_unregister(struct device *dev)
+{
+       device_remove_file(dev, &dev_attr_atrim);
+       device_remove_file(dev, &dev_attr_atrim);
+       device_remove_file(dev, &dev_attr_usr);
+
+       return 0;
+}
+
+static int
+isl1208_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+       int rc = 0;
+       struct rtc_device *rtc;
+
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -ENODEV;
 
-       rc = i2c_attach_client(new_client);
-       if (rc < 0)
-               goto failout;
+       if (isl1208_i2c_validate_client(client) < 0)
+               return -ENODEV;
 
-       dev_info(&new_client->dev,
+       dev_info(&client->dev,
                 "chip found, driver version " DRV_VERSION "\n");
 
        rtc = rtc_device_register(isl1208_driver.driver.name,
-                                 &new_client->dev,
-                                 &isl1208_rtc_ops, THIS_MODULE);
+                                 &client->dev, &isl1208_rtc_ops,
+                                 THIS_MODULE);
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
 
-       if (IS_ERR(rtc)) {
-               rc = PTR_ERR(rtc);
-               goto failout_detach;
-       }
-
-       i2c_set_clientdata(new_client, rtc);
+       i2c_set_clientdata(client, rtc);
 
-       rc = isl1208_i2c_get_sr(new_client);
+       rc = isl1208_i2c_get_sr(client);
        if (rc < 0) {
-               dev_err(&new_client->dev, "reading status failed\n");
-               goto failout_unregister;
+               dev_err(&client->dev, "reading status failed\n");
+               goto exit_unregister;
        }
 
        if (rc & ISL1208_REG_SR_RTCF)
-               dev_warn(&new_client->dev, "rtc power failure detected, "
+               dev_warn(&client->dev, "rtc power failure detected, "
                         "please set clock.\n");
 
-       rc = device_create_file(&new_client->dev, &dev_attr_atrim);
-       if (rc < 0)
-               goto failout_unregister;
-       rc = device_create_file(&new_client->dev, &dev_attr_dtrim);
-       if (rc < 0)
-               goto failout_atrim;
-       rc = device_create_file(&new_client->dev, &dev_attr_usr);
-       if (rc < 0)
-               goto failout_dtrim;
+       rc = isl1208_sysfs_register(&client->dev);
+       if (rc)
+               goto exit_unregister;
 
        return 0;
 
- failout_dtrim:
-       device_remove_file(&new_client->dev, &dev_attr_dtrim);
- failout_atrim:
-       device_remove_file(&new_client->dev, &dev_attr_atrim);
- failout_unregister:
+exit_unregister:
        rtc_device_unregister(rtc);
- failout_detach:
-       i2c_detach_client(new_client);
- failout:
-       kfree(new_client);
-       return rc;
-}
 
-static int
-isl1208_attach_adapter (struct i2c_adapter *adapter)
-{
-       return i2c_probe(adapter, &addr_data, isl1208_probe);
+       return rc;
 }
 
 static int
-isl1208_detach_client(struct i2c_client *client)
+isl1208_remove(struct i2c_client *client)
 {
-       int rc;
-       struct rtc_device *const rtc = i2c_get_clientdata(client);
+       struct rtc_device *rtc = i2c_get_clientdata(client);
 
-       if (rtc)
-               rtc_device_unregister(rtc); /* do we need to kfree? */
-
-       rc = i2c_detach_client(client);
-       if (rc)
-               return rc;
-
-       kfree(client);
+       isl1208_sysfs_unregister(&client->dev);
+       rtc_device_unregister(rtc);
 
        return 0;
 }
 
-/* module management */
+static const struct i2c_device_id isl1208_id[] = {
+       { "isl1208", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, isl1208_id);
+
+static struct i2c_driver isl1208_driver = {
+       .driver = {
+                  .name = "rtc-isl1208",
+                  },
+       .probe = isl1208_probe,
+       .remove = isl1208_remove,
+       .id_table = isl1208_id,
+};
 
-static int __init isl1208_init(void)
+static int __init
+isl1208_init(void)
 {
        return i2c_add_driver(&isl1208_driver);
 }
 
-static void __exit isl1208_exit(void)
+static void __exit
+isl1208_exit(void)
 {
        i2c_del_driver(&isl1208_driver);
 }
index 1cb33cac12377095fc075e07e4341306dcda4bcc..316bfaa80872195badfeda9ec87de81265d77d96 100644 (file)
 
 #define DRV_VERSION "0.05"
 
-struct m41t80_chip_info {
-       const char *name;
-       u8 features;
-};
-
-static const struct m41t80_chip_info m41t80_chip_info_tbl[] = {
-       {
-               .name           = "m41t80",
-               .features       = 0,
-       },
-       {
-               .name           = "m41t81",
-               .features       = M41T80_FEATURE_HT,
-       },
-       {
-               .name           = "m41t81s",
-               .features       = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
-       },
-       {
-               .name           = "m41t82",
-               .features       = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
-       },
-       {
-               .name           = "m41t83",
-               .features       = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
-       },
-       {
-               .name           = "m41st84",
-               .features       = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
-       },
-       {
-               .name           = "m41st85",
-               .features       = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
-       },
-       {
-               .name           = "m41st87",
-               .features       = M41T80_FEATURE_HT | M41T80_FEATURE_BL,
-       },
+static const struct i2c_device_id m41t80_id[] = {
+       { "m41t80", 0 },
+       { "m41t81", M41T80_FEATURE_HT },
+       { "m41t81s", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
+       { "m41t82", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
+       { "m41t83", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
+       { "m41st84", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
+       { "m41st85", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
+       { "m41st87", M41T80_FEATURE_HT | M41T80_FEATURE_BL },
+       { }
 };
+MODULE_DEVICE_TABLE(i2c, m41t80_id);
 
 struct m41t80_data {
-       const struct m41t80_chip_info *chip;
+       u8 features;
        struct rtc_device *rtc;
 };
 
@@ -208,7 +181,7 @@ static int m41t80_rtc_proc(struct device *dev, struct seq_file *seq)
        struct m41t80_data *clientdata = i2c_get_clientdata(client);
        u8 reg;
 
-       if (clientdata->chip->features & M41T80_FEATURE_BL) {
+       if (clientdata->features & M41T80_FEATURE_BL) {
                reg = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS);
                seq_printf(seq, "battery\t\t: %s\n",
                           (reg & M41T80_FLAGS_BATT_LOW) ? "exhausted" : "ok");
@@ -756,12 +729,12 @@ static struct notifier_block wdt_notifier = {
  *
  *****************************************************************************
  */
-static int m41t80_probe(struct i2c_client *client)
+static int m41t80_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
-       int i, rc = 0;
+       int rc = 0;
        struct rtc_device *rtc = NULL;
        struct rtc_time tm;
-       const struct m41t80_chip_info *chip;
        struct m41t80_data *clientdata = NULL;
 
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C
@@ -773,19 +746,6 @@ static int m41t80_probe(struct i2c_client *client)
        dev_info(&client->dev,
                 "chip found, driver version " DRV_VERSION "\n");
 
-       chip = NULL;
-       for (i = 0; i < ARRAY_SIZE(m41t80_chip_info_tbl); i++) {
-               if (!strcmp(m41t80_chip_info_tbl[i].name, client->name)) {
-                       chip = &m41t80_chip_info_tbl[i];
-                       break;
-               }
-       }
-       if (!chip) {
-               dev_err(&client->dev, "%s is not supported\n", client->name);
-               rc = -ENODEV;
-               goto exit;
-       }
-
        clientdata = kzalloc(sizeof(*clientdata), GFP_KERNEL);
        if (!clientdata) {
                rc = -ENOMEM;
@@ -801,7 +761,7 @@ static int m41t80_probe(struct i2c_client *client)
        }
 
        clientdata->rtc = rtc;
-       clientdata->chip = chip;
+       clientdata->features = id->driver_data;
        i2c_set_clientdata(client, clientdata);
 
        /* Make sure HT (Halt Update) bit is cleared */
@@ -810,7 +770,7 @@ static int m41t80_probe(struct i2c_client *client)
                goto ht_err;
 
        if (rc & M41T80_ALHOUR_HT) {
-               if (chip->features & M41T80_FEATURE_HT) {
+               if (clientdata->features & M41T80_FEATURE_HT) {
                        m41t80_get_datetime(client, &tm);
                        dev_info(&client->dev, "HT bit was set!\n");
                        dev_info(&client->dev,
@@ -842,7 +802,7 @@ static int m41t80_probe(struct i2c_client *client)
                goto exit;
 
 #ifdef CONFIG_RTC_DRV_M41T80_WDT
-       if (chip->features & M41T80_FEATURE_HT) {
+       if (clientdata->features & M41T80_FEATURE_HT) {
                rc = misc_register(&wdt_dev);
                if (rc)
                        goto exit;
@@ -878,7 +838,7 @@ static int m41t80_remove(struct i2c_client *client)
        struct rtc_device *rtc = clientdata->rtc;
 
 #ifdef CONFIG_RTC_DRV_M41T80_WDT
-       if (clientdata->chip->features & M41T80_FEATURE_HT) {
+       if (clientdata->features & M41T80_FEATURE_HT) {
                misc_deregister(&wdt_dev);
                unregister_reboot_notifier(&wdt_notifier);
        }
@@ -896,6 +856,7 @@ static struct i2c_driver m41t80_driver = {
        },
        .probe = m41t80_probe,
        .remove = m41t80_remove,
+       .id_table = m41t80_id,
 };
 
 static int __init m41t80_rtc_init(void)
index 7683412970c4d234d9abe2bc323f020a17f111c0..ded3c0abad835736152c093d9ae5dc0402afec58 100644 (file)
@@ -98,7 +98,7 @@ static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf)
        rc = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
        if (rc != ARRAY_SIZE(msgs)) {
                dev_err(&client->dev, "%s: register read failed\n",
-                       __FUNCTION__);
+                       __func__);
                return -EIO;
        }
        return 0;
@@ -150,7 +150,7 @@ static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf)
 
 write_failed:
        dev_err(&client->dev, "%s: register write failed\n",
-               __FUNCTION__);
+               __func__);
        return -EIO;
 }
 
@@ -214,7 +214,7 @@ static int max6900_i2c_clear_write_protect(struct i2c_client *client)
        rc = i2c_smbus_write_byte_data (client, MAX6900_REG_CONTROL_WRITE, 0);
        if (rc < 0) {
                dev_err(&client->dev, "%s: control register write failed\n",
-                       __FUNCTION__);
+                       __func__);
                return -EIO;
        }
        return 0;
index 1f956dc5d56eeb57806bb66b471422ae5734ad37..12f0310ae89c661621c6d9db3c993db084adacd6 100644 (file)
@@ -140,7 +140,7 @@ static int max6902_get_datetime(struct device *dev, struct rtc_time *dt)
        dt->tm_year -= 1900;
 
 #ifdef MAX6902_DEBUG
-       printk("\n%s : Read RTC values\n",__FUNCTION__);
+       printk("\n%s : Read RTC values\n",__func__);
        printk("tm_hour: %i\n",dt->tm_hour);
        printk("tm_min : %i\n",dt->tm_min);
        printk("tm_sec : %i\n",dt->tm_sec);
@@ -158,7 +158,7 @@ static int max6902_set_datetime(struct device *dev, struct rtc_time *dt)
        dt->tm_year = dt->tm_year+1900;
 
 #ifdef MAX6902_DEBUG
-       printk("\n%s : Setting RTC values\n",__FUNCTION__);
+       printk("\n%s : Setting RTC values\n",__func__);
        printk("tm_sec : %i\n",dt->tm_sec);
        printk("tm_min : %i\n",dt->tm_min);
        printk("tm_hour: %i\n",dt->tm_hour);
index b3317fcc16c378c62525ad8d29035ec24b7b04a1..0fc4c363078003e6d189f39a8b3f6f5a4ddd790a 100644 (file)
 #include <linux/bcd.h>
 #include <linux/rtc.h>
 
-#define DRV_VERSION "0.4.2"
-
-/* Addresses to scan: none
- * This chip cannot be reliably autodetected. An empty eeprom
- * located at 0x51 will pass the validation routine due to
- * the way the registers are implemented.
- */
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Module parameters */
-I2C_CLIENT_INSMOD;
+#define DRV_VERSION "0.4.3"
 
 #define PCF8563_REG_ST1                0x00 /* status */
 #define PCF8563_REG_ST2                0x01
@@ -53,8 +43,10 @@ I2C_CLIENT_INSMOD;
 #define PCF8563_SC_LV          0x80 /* low voltage */
 #define PCF8563_MO_C           0x80 /* century */
 
+static struct i2c_driver pcf8563_driver;
+
 struct pcf8563 {
-       struct i2c_client client;
+       struct rtc_device *rtc;
        /*
         * The meaning of MO_C bit varies by the chip type.
         * From PCF8563 datasheet: this bit is toggled when the years
@@ -72,16 +64,13 @@ struct pcf8563 {
        int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
 };
 
-static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind);
-static int pcf8563_detach(struct i2c_client *client);
-
 /*
  * In the routines that deal directly with the pcf8563 hardware, we use
  * rtc_time -- month 0-11, hour 0-23, yr = calendar year-epoch.
  */
 static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-       struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
+       struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
        unsigned char buf[13] = { PCF8563_REG_ST1 };
 
        struct i2c_msg msgs[] = {
@@ -91,7 +80,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 
        /* read registers */
        if ((i2c_transfer(client->adapter, msgs, 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: read error\n", __func__);
                return -EIO;
        }
 
@@ -102,7 +91,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
        dev_dbg(&client->dev,
                "%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
                "mday=%02x, wday=%02x, mon=%02x, year=%02x\n",
-               __FUNCTION__,
+               __func__,
                buf[0], buf[1], buf[2], buf[3],
                buf[4], buf[5], buf[6], buf[7],
                buf[8]);
@@ -123,7 +112,7 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 
        dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
@@ -138,13 +127,13 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 
 static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
 {
-       struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
+       struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
        int i, err;
        unsigned char buf[9];
 
        dev_dbg(&client->dev, "%s: secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
@@ -174,7 +163,7 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
                if (err != sizeof(data)) {
                        dev_err(&client->dev,
                                "%s: err=%d addr=%02x, data=%02x\n",
-                               __FUNCTION__, err, data[0], data[1]);
+                               __func__, err, data[0], data[1]);
                        return -EIO;
                }
        };
@@ -219,7 +208,7 @@ static int pcf8563_validate_client(struct i2c_client *client)
                if (xfer != ARRAY_SIZE(msgs)) {
                        dev_err(&client->dev,
                                "%s: could not read register 0x%02X\n",
-                               __FUNCTION__, pattern[i].reg);
+                               __func__, pattern[i].reg);
 
                        return -EIO;
                }
@@ -231,7 +220,7 @@ static int pcf8563_validate_client(struct i2c_client *client)
                        dev_dbg(&client->dev,
                                "%s: pattern=%d, reg=%x, mask=0x%02x, min=%d, "
                                "max=%d, value=%d, raw=0x%02X\n",
-                               __FUNCTION__, i, pattern[i].reg, pattern[i].mask,
+                               __func__, i, pattern[i].reg, pattern[i].mask,
                                pattern[i].min, pattern[i].max,
                                value, buf);
 
@@ -257,100 +246,75 @@ static const struct rtc_class_ops pcf8563_rtc_ops = {
        .set_time       = pcf8563_rtc_set_time,
 };
 
-static int pcf8563_attach(struct i2c_adapter *adapter)
-{
-       return i2c_probe(adapter, &addr_data, pcf8563_probe);
-}
-
-static struct i2c_driver pcf8563_driver = {
-       .driver         = {
-               .name   = "pcf8563",
-       },
-       .id             = I2C_DRIVERID_PCF8563,
-       .attach_adapter = &pcf8563_attach,
-       .detach_client  = &pcf8563_detach,
-};
-
-static int pcf8563_probe(struct i2c_adapter *adapter, int address, int kind)
+static int pcf8563_probe(struct i2c_client *client,
+                               const struct i2c_device_id *id)
 {
        struct pcf8563 *pcf8563;
-       struct i2c_client *client;
-       struct rtc_device *rtc;
 
        int err = 0;
 
-       dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
+       dev_dbg(&client->dev, "%s\n", __func__);
 
-       if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
-               err = -ENODEV;
-               goto exit;
-       }
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -ENODEV;
 
-       if (!(pcf8563 = kzalloc(sizeof(struct pcf8563), GFP_KERNEL))) {
-               err = -ENOMEM;
-               goto exit;
-       }
-
-       client = &pcf8563->client;
-       client->addr = address;
-       client->driver = &pcf8563_driver;
-       client->adapter = adapter;
-
-       strlcpy(client->name, pcf8563_driver.driver.name, I2C_NAME_SIZE);
+       pcf8563 = kzalloc(sizeof(struct pcf8563), GFP_KERNEL);
+       if (!pcf8563)
+               return -ENOMEM;
 
        /* Verify the chip is really an PCF8563 */
-       if (kind < 0) {
-               if (pcf8563_validate_client(client) < 0) {
-                       err = -ENODEV;
-                       goto exit_kfree;
-               }
-       }
-
-       /* Inform the i2c layer */
-       if ((err = i2c_attach_client(client)))
+       if (pcf8563_validate_client(client) < 0) {
+               err = -ENODEV;
                goto exit_kfree;
+       }
 
        dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
 
-       rtc = rtc_device_register(pcf8563_driver.driver.name, &client->dev,
-                               &pcf8563_rtc_ops, THIS_MODULE);
+       pcf8563->rtc = rtc_device_register(pcf8563_driver.driver.name,
+                               &client->dev, &pcf8563_rtc_ops, THIS_MODULE);
 
-       if (IS_ERR(rtc)) {
-               err = PTR_ERR(rtc);
-               goto exit_detach;
+       if (IS_ERR(pcf8563->rtc)) {
+               err = PTR_ERR(pcf8563->rtc);
+               goto exit_kfree;
        }
 
-       i2c_set_clientdata(client, rtc);
+       i2c_set_clientdata(client, pcf8563);
 
        return 0;
 
-exit_detach:
-       i2c_detach_client(client);
-
 exit_kfree:
        kfree(pcf8563);
 
-exit:
        return err;
 }
 
-static int pcf8563_detach(struct i2c_client *client)
+static int pcf8563_remove(struct i2c_client *client)
 {
-       struct pcf8563 *pcf8563 = container_of(client, struct pcf8563, client);
-       int err;
-       struct rtc_device *rtc = i2c_get_clientdata(client);
+       struct pcf8563 *pcf8563 = i2c_get_clientdata(client);
 
-       if (rtc)
-               rtc_device_unregister(rtc);
-
-       if ((err = i2c_detach_client(client)))
-               return err;
+       if (pcf8563->rtc)
+               rtc_device_unregister(pcf8563->rtc);
 
        kfree(pcf8563);
 
        return 0;
 }
 
+static const struct i2c_device_id pcf8563_id[] = {
+       { "pcf8563", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, pcf8563_id);
+
+static struct i2c_driver pcf8563_driver = {
+       .driver         = {
+               .name   = "rtc-pcf8563",
+       },
+       .probe          = pcf8563_probe,
+       .remove         = pcf8563_remove,
+       .id_table       = pcf8563_id,
+};
+
 static int __init pcf8563_init(void)
 {
        return i2c_add_driver(&pcf8563_driver);
index 8b3997007506bbc44a0205e586acfd8f75e042ae..3d09d8f0b1f0244220bcd1562d109664fa87f266 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/string.h>
-#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/bcd.h>
index 8d300e6d0d9e92eedc96a87cc2055cc4062d7180..0c6257a034ff07b3dc85e9ce1db3847571c97fc4 100644 (file)
@@ -108,12 +108,10 @@ void rtc_proc_add_device(struct rtc_device *rtc)
        if (rtc->id == 0) {
                struct proc_dir_entry *ent;
 
-               ent = create_proc_entry("driver/rtc", 0, NULL);
-               if (ent) {
-                       ent->proc_fops = &rtc_proc_fops;
+               ent = proc_create_data("driver/rtc", 0, NULL,
+                                      &rtc_proc_fops, rtc);
+               if (ent)
                        ent->owner = rtc->owner;
-                       ent->data = rtc;
-               }
        }
 }
 
index 664e89a817eddbce2baeff7aa07ebbf792a5bf70..1c14d4497c4db8bffc37148b138e8b17070eaf28 100644 (file)
@@ -228,7 +228,7 @@ static int rs5c313_rtc_read_time(struct device *dev, struct rtc_time *tm)
                ndelay(700);    /* CE:L */
 
                if (cnt++ > 100) {
-                       dev_err(dev, "%s: timeout error\n", __FUNCTION__);
+                       dev_err(dev, "%s: timeout error\n", __func__);
                        return -EIO;
                }
        }
@@ -289,7 +289,7 @@ static int rs5c313_rtc_set_time(struct device *dev, struct rtc_time *tm)
                ndelay(700);    /* CE:L */
 
                if (cnt++ > 100) {
-                       dev_err(dev, "%s: timeout error\n", __FUNCTION__);
+                       dev_err(dev, "%s: timeout error\n", __func__);
                        return -EIO;
                }
        }
index 6b67b50979278df5c790ed2b950841e9843af621..56caf6b2c3e5d2fceface127d7dc197a839e0dac 100644 (file)
@@ -69,6 +69,15 @@ enum rtc_type {
        rtc_rv5c387a,
 };
 
+static const struct i2c_device_id rs5c372_id[] = {
+       { "rs5c372a", rtc_rs5c372a },
+       { "rs5c372b", rtc_rs5c372b },
+       { "rv5c386", rtc_rv5c386 },
+       { "rv5c387a", rtc_rv5c387a },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, rs5c372_id);
+
 /* REVISIT:  this assumes that:
  *  - we're in the 21st century, so it's safe to ignore the century
  *    bit for rv5c38[67] (REG_MONTH bit 7);
@@ -99,7 +108,7 @@ static int rs5c_get_regs(struct rs5c372 *rs5c)
         * least 80219 chips; this works around that bug.
         */
        if ((i2c_transfer(client->adapter, msgs, 1)) != 1) {
-               pr_debug("%s: can't read registers\n", rs5c->rtc->name);
+               dev_warn(&client->dev, "can't read registers\n");
                return -EIO;
        }
 
@@ -166,7 +175,7 @@ static int rs5c372_get_datetime(struct i2c_client *client, struct rtc_time *tm)
 
        dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
@@ -181,7 +190,7 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
 
        dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
@@ -195,7 +204,7 @@ static int rs5c372_set_datetime(struct i2c_client *client, struct rtc_time *tm)
        buf[7] = BIN2BCD(tm->tm_year - 100);
 
        if ((i2c_master_send(client, buf, 8)) != 8) {
-               dev_err(&client->dev, "%s: write error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: write error\n", __func__);
                return -EIO;
        }
 
@@ -220,7 +229,7 @@ static int rs5c372_get_trim(struct i2c_client *client, int *osc, int *trim)
                *osc = (tmp & RS5C372_TRIM_XSL) ? 32000 : 32768;
 
        if (trim) {
-               dev_dbg(&client->dev, "%s: raw trim=%x\n", __FUNCTION__, tmp);
+               dev_dbg(&client->dev, "%s: raw trim=%x\n", __func__, tmp);
                tmp &= RS5C372_TRIM_MASK;
                if (tmp & 0x3e) {
                        int t = tmp & 0x3f;
@@ -494,13 +503,14 @@ static void rs5c_sysfs_unregister(struct device *dev)
 
 static struct i2c_driver rs5c372_driver;
 
-static int rs5c372_probe(struct i2c_client *client)
+static int rs5c372_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
 {
        int err = 0;
        struct rs5c372 *rs5c372;
        struct rtc_time tm;
 
-       dev_dbg(&client->dev, "%s\n", __FUNCTION__);
+       dev_dbg(&client->dev, "%s\n", __func__);
 
        if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
                err = -ENODEV;
@@ -512,29 +522,17 @@ static int rs5c372_probe(struct i2c_client *client)
                goto exit;
        }
 
-       /* we read registers 0x0f then 0x00-0x0f; skip the first one */
-       rs5c372->regs=&rs5c372->buf[1];
-
        rs5c372->client = client;
        i2c_set_clientdata(client, rs5c372);
+       rs5c372->type = id->driver_data;
+
+       /* we read registers 0x0f then 0x00-0x0f; skip the first one */
+       rs5c372->regs = &rs5c372->buf[1];
 
        err = rs5c_get_regs(rs5c372);
        if (err < 0)
                goto exit_kfree;
 
-       if (strcmp(client->name, "rs5c372a") == 0)
-               rs5c372->type = rtc_rs5c372a;
-       else if (strcmp(client->name, "rs5c372b") == 0)
-               rs5c372->type = rtc_rs5c372b;
-       else if (strcmp(client->name, "rv5c386") == 0)
-               rs5c372->type = rtc_rv5c386;
-       else if (strcmp(client->name, "rv5c387a") == 0)
-               rs5c372->type = rtc_rv5c387a;
-       else {
-               rs5c372->type = rtc_rs5c372b;
-               dev_warn(&client->dev, "assuming rs5c372b\n");
-       }
-
        /* clock may be set for am/pm or 24 hr time */
        switch (rs5c372->type) {
        case rtc_rs5c372a:
@@ -651,6 +649,7 @@ static struct i2c_driver rs5c372_driver = {
        },
        .probe          = rs5c372_probe,
        .remove         = rs5c372_remove,
+       .id_table       = rs5c372_id,
 };
 
 static __init int rs5c372_init(void)
index e8abc90c32c5fe0e82938ad7d4b711807a8c7779..29f47bacfc77b5503086473f858fb1cc6124f33e 100644 (file)
 #define S35390A_FLAG_RESET     0x80
 #define S35390A_FLAG_TEST      0x01
 
+static const struct i2c_device_id s35390a_id[] = {
+       { "s35390a", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, s35390a_id);
+
 struct s35390a {
        struct i2c_client *client[8];
        struct rtc_device *rtc;
@@ -195,7 +201,8 @@ static const struct rtc_class_ops s35390a_rtc_ops = {
 
 static struct i2c_driver s35390a_driver;
 
-static int s35390a_probe(struct i2c_client *client)
+static int s35390a_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
 {
        int err;
        unsigned int i;
@@ -296,6 +303,7 @@ static struct i2c_driver s35390a_driver = {
        },
        .probe          = s35390a_probe,
        .remove         = s35390a_remove,
+       .id_table       = s35390a_id,
 };
 
 static int __init s35390a_rtc_init(void)
index 9f4d5129a496a445bce3ae90c89bfc9e0746f875..f26e0cad8f167c5221fd5a480c42e7f2fcdb2baa 100644 (file)
@@ -68,7 +68,7 @@ static void s3c_rtc_setaie(int to)
 {
        unsigned int tmp;
 
-       pr_debug("%s: aie=%d\n", __FUNCTION__, to);
+       pr_debug("%s: aie=%d\n", __func__, to);
 
        tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
 
@@ -82,7 +82,7 @@ static void s3c_rtc_setpie(int to)
 {
        unsigned int tmp;
 
-       pr_debug("%s: pie=%d\n", __FUNCTION__, to);
+       pr_debug("%s: pie=%d\n", __func__, to);
 
        spin_lock_irq(&s3c_rtc_pie_lock);
        tmp = readb(s3c_rtc_base + S3C2410_TICNT) & ~S3C2410_TICNT_ENABLE;
@@ -457,7 +457,7 @@ static int s3c_rtc_probe(struct platform_device *pdev)
        struct resource *res;
        int ret;
 
-       pr_debug("%s: probe=%p\n", __FUNCTION__, pdev);
+       pr_debug("%s: probe=%p\n", __func__, pdev);
 
        /* find the IRQs */
 
index c594b34c67679e3f1d7df1ba143854217bc38455..110699bb478751ac619b1e0a6869059dc99318a8 100644 (file)
@@ -361,7 +361,7 @@ static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
 
        dev_dbg(dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon + 1, tm->tm_year, tm->tm_wday);
 
index 4d27ccc4fc069c174d5785ddfa7ad27aa55b35b4..2531ce4c9db082d94cd3b1afba7ac7f4acf93f85 100644 (file)
@@ -145,6 +145,8 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr,
        unsigned long now, alarm;
        struct rtc_wkalrm alm;
        struct rtc_device *rtc = to_rtc_device(dev);
+       char *buf_ptr;
+       int adjust = 0;
 
        /* Only request alarms that trigger in the future.  Disable them
         * by writing another time, e.g. 0 meaning Jan 1 1970 UTC.
@@ -154,7 +156,15 @@ rtc_sysfs_set_wakealarm(struct device *dev, struct device_attribute *attr,
                return retval;
        rtc_tm_to_time(&alm.time, &now);
 
-       alarm = simple_strtoul(buf, NULL, 0);
+       buf_ptr = (char *)buf;
+       if (*buf_ptr == '+') {
+               buf_ptr++;
+               adjust = 1;
+       }
+       alarm = simple_strtoul(buf_ptr, NULL, 0);
+       if (adjust) {
+               alarm += now;
+       }
        if (alarm > now) {
                /* Avoid accidentally clobbering active alarms; we can't
                 * entirely prevent that here, without even the minimal
index 254c9fce27dabf324c388859363f76614bdde2bb..bc930022004a58fc2dcb344c86bf9da180abd317 100644 (file)
@@ -147,7 +147,7 @@ static int __devexit test_remove(struct platform_device *plat_dev)
        return 0;
 }
 
-static struct platform_driver test_drv = {
+static struct platform_driver test_driver = {
        .probe  = test_probe,
        .remove = __devexit_p(test_remove),
        .driver = {
@@ -160,7 +160,7 @@ static int __init test_init(void)
 {
        int err;
 
-       if ((err = platform_driver_register(&test_drv)))
+       if ((err = platform_driver_register(&test_driver)))
                return err;
 
        if ((test0 = platform_device_alloc("rtc-test", 0)) == NULL) {
@@ -191,7 +191,7 @@ exit_free_test0:
        platform_device_put(test0);
 
 exit_driver_unregister:
-       platform_driver_unregister(&test_drv);
+       platform_driver_unregister(&test_driver);
        return err;
 }
 
@@ -199,7 +199,7 @@ static void __exit test_exit(void)
 {
        platform_device_unregister(test0);
        platform_device_unregister(test1);
-       platform_driver_unregister(&test_drv);
+       platform_driver_unregister(&test_driver);
 }
 
 MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
index 24203a06051a98f2e1d4a8110129d454f5e8690a..10025d84026812e0eab2fe1f3fa5179e411b2aca 100644 (file)
@@ -107,7 +107,7 @@ static int v3020_read_time(struct device *dev, struct rtc_time *dt)
        dt->tm_year = BCD2BIN(tmp)+100;
 
 #ifdef DEBUG
-       printk("\n%s : Read RTC values\n",__FUNCTION__);
+       printk("\n%s : Read RTC values\n",__func__);
        printk("tm_hour: %i\n",dt->tm_hour);
        printk("tm_min : %i\n",dt->tm_min);
        printk("tm_sec : %i\n",dt->tm_sec);
@@ -126,7 +126,7 @@ static int v3020_set_time(struct device *dev, struct rtc_time *dt)
        struct v3020 *chip = dev_get_drvdata(dev);
 
 #ifdef DEBUG
-       printk("\n%s : Setting RTC values\n",__FUNCTION__);
+       printk("\n%s : Setting RTC values\n",__func__);
        printk("tm_sec : %i\n",dt->tm_sec);
        printk("tm_min : %i\n",dt->tm_min);
        printk("tm_hour: %i\n",dt->tm_hour);
index b90fb1866ce98735eb329c7a266dd32e902b4ee3..eaf55945f21bbf860318a082b24463caa8648ebc 100644 (file)
 #include <linux/rtc.h>
 #include <linux/delay.h>
 
-#define DRV_VERSION "1.0.7"
-
-/* Addresses to scan: none. This chip is located at
- * 0x6f and uses a two bytes register addressing.
- * Two bytes need to be written to read a single register,
- * while most other chips just require one and take the second
- * one as the data to be written. To prevent corrupting
- * unknown chips, the user must explicitly set the probe parameter.
- */
-
-static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
-
-/* Insmod parameters */
-I2C_CLIENT_INSMOD;
+#define DRV_VERSION "1.0.8"
 
 /* offsets into CCR area */
 
@@ -91,19 +78,7 @@ I2C_CLIENT_INSMOD;
 
 #define X1205_HR_MIL           0x80    /* Set in ccr.hour for 24 hr mode */
 
-/* Prototypes */
-static int x1205_attach(struct i2c_adapter *adapter);
-static int x1205_detach(struct i2c_client *client);
-static int x1205_probe(struct i2c_adapter *adapter, int address, int kind);
-
-static struct i2c_driver x1205_driver = {
-       .driver         = {
-               .name   = "x1205",
-       },
-       .id             = I2C_DRIVERID_X1205,
-       .attach_adapter = &x1205_attach,
-       .detach_client  = &x1205_detach,
-};
+static struct i2c_driver x1205_driver;
 
 /*
  * In the routines that deal directly with the x1205 hardware, we use
@@ -124,14 +99,14 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
 
        /* read date registers */
        if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: read error\n", __func__);
                return -EIO;
        }
 
        dev_dbg(&client->dev,
                "%s: raw read data - sec=%02x, min=%02x, hr=%02x, "
                "mday=%02x, mon=%02x, year=%02x, wday=%02x, y2k=%02x\n",
-               __FUNCTION__,
+               __func__,
                buf[0], buf[1], buf[2], buf[3],
                buf[4], buf[5], buf[6], buf[7]);
 
@@ -146,7 +121,7 @@ static int x1205_get_datetime(struct i2c_client *client, struct rtc_time *tm,
 
        dev_dbg(&client->dev, "%s: tm is secs=%d, mins=%d, hours=%d, "
                "mday=%d, mon=%d, year=%d, wday=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour,
                tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
@@ -164,7 +139,7 @@ static int x1205_get_status(struct i2c_client *client, unsigned char *sr)
 
        /* read status register */
        if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: read error\n", __func__);
                return -EIO;
        }
 
@@ -187,7 +162,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
 
        dev_dbg(&client->dev,
                "%s: secs=%d, mins=%d, hours=%d\n",
-               __FUNCTION__,
+               __func__,
                tm->tm_sec, tm->tm_min, tm->tm_hour);
 
        buf[CCR_SEC] = BIN2BCD(tm->tm_sec);
@@ -200,7 +175,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
        if (datetoo) {
                dev_dbg(&client->dev,
                        "%s: mday=%d, mon=%d, year=%d, wday=%d\n",
-                       __FUNCTION__,
+                       __func__,
                        tm->tm_mday, tm->tm_mon, tm->tm_year, tm->tm_wday);
 
                buf[CCR_MDAY] = BIN2BCD(tm->tm_mday);
@@ -216,12 +191,12 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
 
        /* this sequence is required to unlock the chip */
        if ((xfer = i2c_master_send(client, wel, 3)) != 3) {
-               dev_err(&client->dev, "%s: wel - %d\n", __FUNCTION__, xfer);
+               dev_err(&client->dev, "%s: wel - %d\n", __func__, xfer);
                return -EIO;
        }
 
        if ((xfer = i2c_master_send(client, rwel, 3)) != 3) {
-               dev_err(&client->dev, "%s: rwel - %d\n", __FUNCTION__, xfer);
+               dev_err(&client->dev, "%s: rwel - %d\n", __func__, xfer);
                return -EIO;
        }
 
@@ -233,7 +208,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
                if (xfer != 3) {
                        dev_err(&client->dev,
                                "%s: xfer=%d addr=%02x, data=%02x\n",
-                               __FUNCTION__,
+                               __func__,
                                 xfer, rdata[1], rdata[2]);
                        return -EIO;
                }
@@ -241,7 +216,7 @@ static int x1205_set_datetime(struct i2c_client *client, struct rtc_time *tm,
 
        /* disable further writes */
        if ((xfer = i2c_master_send(client, diswe, 3)) != 3) {
-               dev_err(&client->dev, "%s: diswe - %d\n", __FUNCTION__, xfer);
+               dev_err(&client->dev, "%s: diswe - %d\n", __func__, xfer);
                return -EIO;
        }
 
@@ -274,11 +249,11 @@ static int x1205_get_dtrim(struct i2c_client *client, int *trim)
 
        /* read dtr register */
        if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: read error\n", __func__);
                return -EIO;
        }
 
-       dev_dbg(&client->dev, "%s: raw dtr=%x\n", __FUNCTION__, dtr);
+       dev_dbg(&client->dev, "%s: raw dtr=%x\n", __func__, dtr);
 
        *trim = 0;
 
@@ -306,11 +281,11 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim)
 
        /* read atr register */
        if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) {
-               dev_err(&client->dev, "%s: read error\n", __FUNCTION__);
+               dev_err(&client->dev, "%s: read error\n", __func__);
                return -EIO;
        }
 
-       dev_dbg(&client->dev, "%s: raw atr=%x\n", __FUNCTION__, atr);
+       dev_dbg(&client->dev, "%s: raw atr=%x\n", __func__, atr);
 
        /* atr is a two's complement value on 6 bits,
         * perform sign extension. The formula is
@@ -319,11 +294,11 @@ static int x1205_get_atrim(struct i2c_client *client, int *trim)
        if (atr & 0x20)
                atr |= 0xC0;
 
-       dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __FUNCTION__, atr, atr);
+       dev_dbg(&client->dev, "%s: raw atr=%x (%d)\n", __func__, atr, atr);
 
        *trim = (atr * 250) + 11000;
 
-       dev_dbg(&client->dev, "%s: real=%d\n", __FUNCTION__, *trim);
+       dev_dbg(&client->dev, "%s: real=%d\n", __func__, *trim);
 
        return 0;
 }
@@ -377,7 +352,7 @@ static int x1205_validate_client(struct i2c_client *client)
                if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
                        dev_err(&client->dev,
                                "%s: could not read register %x\n",
-                               __FUNCTION__, probe_zero_pattern[i]);
+                               __func__, probe_zero_pattern[i]);
 
                        return -EIO;
                }
@@ -385,7 +360,7 @@ static int x1205_validate_client(struct i2c_client *client)
                if ((buf & probe_zero_pattern[i+1]) != 0) {
                        dev_err(&client->dev,
                                "%s: register=%02x, zero pattern=%d, value=%x\n",
-                               __FUNCTION__, probe_zero_pattern[i], i, buf);
+                               __func__, probe_zero_pattern[i], i, buf);
 
                        return -ENODEV;
                }
@@ -405,7 +380,7 @@ static int x1205_validate_client(struct i2c_client *client)
                if ((xfer = i2c_transfer(client->adapter, msgs, 2)) != 2) {
                        dev_err(&client->dev,
                                "%s: could not read register %x\n",
-                               __FUNCTION__, probe_limits_pattern[i].reg);
+                               __func__, probe_limits_pattern[i].reg);
 
                        return -EIO;
                }
@@ -416,7 +391,7 @@ static int x1205_validate_client(struct i2c_client *client)
                        value < probe_limits_pattern[i].min) {
                        dev_dbg(&client->dev,
                                "%s: register=%x, lim pattern=%d, value=%d\n",
-                               __FUNCTION__, probe_limits_pattern[i].reg,
+                               __func__, probe_limits_pattern[i].reg,
                                i, value);
 
                        return -ENODEV;
@@ -497,58 +472,50 @@ static ssize_t x1205_sysfs_show_dtrim(struct device *dev,
 }
 static DEVICE_ATTR(dtrim, S_IRUGO, x1205_sysfs_show_dtrim, NULL);
 
-static int x1205_attach(struct i2c_adapter *adapter)
+static int x1205_sysfs_register(struct device *dev)
 {
-       return i2c_probe(adapter, &addr_data, x1205_probe);
+       int err;
+
+       err = device_create_file(dev, &dev_attr_atrim);
+       if (err)
+               return err;
+
+       err = device_create_file(dev, &dev_attr_dtrim);
+       if (err)
+               device_remove_file(dev, &dev_attr_atrim);
+
+       return err;
 }
 
-static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
+static void x1205_sysfs_unregister(struct device *dev)
+{
+       device_remove_file(dev, &dev_attr_atrim);
+       device_remove_file(dev, &dev_attr_dtrim);
+}
+
+
+static int x1205_probe(struct i2c_client *client,
+                       const struct i2c_device_id *id)
 {
        int err = 0;
        unsigned char sr;
-       struct i2c_client *client;
        struct rtc_device *rtc;
 
-       dev_dbg(&adapter->dev, "%s\n", __FUNCTION__);
-
-       if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
-               err = -ENODEV;
-               goto exit;
-       }
-
-       if (!(client = kzalloc(sizeof(struct i2c_client), GFP_KERNEL))) {
-               err = -ENOMEM;
-               goto exit;
-       }
-
-       /* I2C client */
-       client->addr = address;
-       client->driver = &x1205_driver;
-       client->adapter = adapter;
-
-       strlcpy(client->name, x1205_driver.driver.name, I2C_NAME_SIZE);
+       dev_dbg(&client->dev, "%s\n", __func__);
 
-       /* Verify the chip is really an X1205 */
-       if (kind < 0) {
-               if (x1205_validate_client(client) < 0) {
-                       err = -ENODEV;
-                       goto exit_kfree;
-               }
-       }
+       if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+               return -ENODEV;
 
-       /* Inform the i2c layer */
-       if ((err = i2c_attach_client(client)))
-               goto exit_kfree;
+       if (x1205_validate_client(client) < 0)
+               return -ENODEV;
 
        dev_info(&client->dev, "chip found, driver version " DRV_VERSION "\n");
 
        rtc = rtc_device_register(x1205_driver.driver.name, &client->dev,
                                &x1205_rtc_ops, THIS_MODULE);
 
-       if (IS_ERR(rtc)) {
-               err = PTR_ERR(rtc);
-               goto exit_detach;
-       }
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
 
        i2c_set_clientdata(client, rtc);
 
@@ -565,45 +532,42 @@ static int x1205_probe(struct i2c_adapter *adapter, int address, int kind)
        else
                dev_err(&client->dev, "couldn't read status\n");
 
-       err = device_create_file(&client->dev, &dev_attr_atrim);
-       if (err) goto exit_devreg;
-       err = device_create_file(&client->dev, &dev_attr_dtrim);
-       if (err) goto exit_atrim;
+       err = x1205_sysfs_register(&client->dev);
+       if (err)
+               goto exit_devreg;
 
        return 0;
 
-exit_atrim:
-       device_remove_file(&client->dev, &dev_attr_atrim);
-
 exit_devreg:
        rtc_device_unregister(rtc);
 
-exit_detach:
-       i2c_detach_client(client);
-
-exit_kfree:
-       kfree(client);
-
-exit:
        return err;
 }
 
-static int x1205_detach(struct i2c_client *client)
+static int x1205_remove(struct i2c_client *client)
 {
-       int err;
        struct rtc_device *rtc = i2c_get_clientdata(client);
 
-       if (rtc)
-               rtc_device_unregister(rtc);
-
-       if ((err = i2c_detach_client(client)))
-               return err;
-
-       kfree(client);
-
+       rtc_device_unregister(rtc);
+       x1205_sysfs_unregister(&client->dev);
        return 0;
 }
 
+static const struct i2c_device_id x1205_id[] = {
+       { "x1205", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, x1205_id);
+
+static struct i2c_driver x1205_driver = {
+       .driver         = {
+               .name   = "rtc-x1205",
+       },
+       .probe          = x1205_probe,
+       .remove         = x1205_remove,
+       .id_table       = x1205_id,
+};
+
 static int __init x1205_init(void)
 {
        return i2c_add_driver(&x1205_driver);
index 5a888704a8d002ca0d846308f51e052ec4a3301b..4f4e7cf105d4985c425aedf30f79a72ed3fb3ab6 100644 (file)
@@ -5,7 +5,7 @@
 CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w
 
 obj-y += s390mach.o sysinfo.o s390_rdev.o
-obj-y += cio/ block/ char/ crypto/ net/ scsi/
+obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/
 
 drivers-y += drivers/s390/built-in.o
 
index 556063e8f7a91d1163450cc63d1cd9128e6a77b4..03c0e40a92ff9fafc20694ee0127822d03a4db4f 100644 (file)
@@ -157,6 +157,7 @@ static int dasd_devices_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations dasd_devices_file_ops = {
+       .owner          = THIS_MODULE,
        .open           = dasd_devices_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -311,17 +312,16 @@ out_error:
 int
 dasd_proc_init(void)
 {
-       dasd_proc_root_entry = proc_mkdir("dasd", &proc_root);
+       dasd_proc_root_entry = proc_mkdir("dasd", NULL);
        if (!dasd_proc_root_entry)
                goto out_nodasd;
        dasd_proc_root_entry->owner = THIS_MODULE;
-       dasd_devices_entry = create_proc_entry("devices",
-                                              S_IFREG | S_IRUGO | S_IWUSR,
-                                              dasd_proc_root_entry);
+       dasd_devices_entry = proc_create("devices",
+                                        S_IFREG | S_IRUGO | S_IWUSR,
+                                        dasd_proc_root_entry,
+                                        &dasd_devices_file_ops);
        if (!dasd_devices_entry)
                goto out_nodevices;
-       dasd_devices_entry->proc_fops = &dasd_devices_file_ops;
-       dasd_devices_entry->owner = THIS_MODULE;
        dasd_statistics_entry = create_proc_entry("statistics",
                                                  S_IFREG | S_IRUGO | S_IWUSR,
                                                  dasd_proc_root_entry);
@@ -335,7 +335,7 @@ dasd_proc_init(void)
  out_nostatistics:
        remove_proc_entry("devices", dasd_proc_root_entry);
  out_nodevices:
-       remove_proc_entry("dasd", &proc_root);
+       remove_proc_entry("dasd", NULL);
  out_nodasd:
        return -ENOENT;
 }
@@ -345,5 +345,5 @@ dasd_proc_exit(void)
 {
        remove_proc_entry("devices", dasd_proc_root_entry);
        remove_proc_entry("statistics", dasd_proc_root_entry);
-       remove_proc_entry("dasd", &proc_root);
+       remove_proc_entry("dasd", NULL);
 }
index 04787eab10168f20465725e8a0da0e36563addba..bb52d2fbac183b7a98e4e7872ace113a92a8cecc 100644 (file)
@@ -36,7 +36,7 @@ static int dcssblk_open(struct inode *inode, struct file *filp);
 static int dcssblk_release(struct inode *inode, struct file *filp);
 static int dcssblk_make_request(struct request_queue *q, struct bio *bio);
 static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum,
-                                unsigned long *data);
+                                void **kaddr, unsigned long *pfn);
 
 static char dcssblk_segments[DCSSBLK_PARM_LEN] = "\0";
 
@@ -636,7 +636,7 @@ fail:
 
 static int
 dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
-                       unsigned long *data)
+                       void **kaddr, unsigned long *pfn)
 {
        struct dcssblk_dev_info *dev_info;
        unsigned long pgoff;
@@ -649,7 +649,9 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
        pgoff = secnum / (PAGE_SIZE / 512);
        if ((pgoff+1)*PAGE_SIZE-1 > dev_info->end - dev_info->start)
                return -ERANGE;
-       *data = (unsigned long) (dev_info->start+pgoff*PAGE_SIZE);
+       *kaddr = (void *) (dev_info->start+pgoff*PAGE_SIZE);
+       *pfn = virt_to_phys(*kaddr) >> PAGE_SHIFT;
+
        return 0;
 }
 
index 0e1f35c9ed9d4e0be6dc928d0d3bf69182799149..3e5653c92f4b5e5b6d1c5b1edd14b5f6eaa23af6 100644 (file)
@@ -982,15 +982,16 @@ tty3215_write(struct tty_struct * tty,
 /*
  * Put character routine for 3215 ttys
  */
-static void
+static int
 tty3215_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct raw3215_info *raw;
 
        if (!tty)
-               return;
+               return 0;
        raw = (struct raw3215_info *) tty->driver_data;
        raw3215_putchar(raw, ch);
+       return 1;
 }
 
 static void
index b8f35bc52b7b9d5cda1528ed1952b160a7dfa329..9e784d5f7f576460b69324361801bb9e749f81be 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/cpu.h>
 #include <linux/sysdev.h>
 #include <linux/workqueue.h>
+#include <asm/smp.h>
 #include "sclp.h"
 
 #define TAG    "sclp_config: "
@@ -19,9 +20,11 @@ struct conf_mgm_data {
        u8 ev_qualifier;
 } __attribute__((packed));
 
+#define EV_QUAL_CPU_CHANGE     1
 #define EV_QUAL_CAP_CHANGE     3
 
 static struct work_struct sclp_cpu_capability_work;
+static struct work_struct sclp_cpu_change_work;
 
 static void sclp_cpu_capability_notify(struct work_struct *work)
 {
@@ -37,13 +40,24 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
        put_online_cpus();
 }
 
+static void sclp_cpu_change_notify(struct work_struct *work)
+{
+       smp_rescan_cpus();
+}
+
 static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
 {
        struct conf_mgm_data *cdata;
 
        cdata = (struct conf_mgm_data *)(evbuf + 1);
-       if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE)
+       switch (cdata->ev_qualifier) {
+       case EV_QUAL_CPU_CHANGE:
+               schedule_work(&sclp_cpu_change_work);
+               break;
+       case EV_QUAL_CAP_CHANGE:
                schedule_work(&sclp_cpu_capability_work);
+               break;
+       }
 }
 
 static struct sclp_register sclp_conf_register =
@@ -57,6 +71,7 @@ static int __init sclp_conf_init(void)
        int rc;
 
        INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify);
+       INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify);
 
        rc = sclp_register(&sclp_conf_register);
        if (rc) {
index e3b3d390b4a3b2294f23428c6e5b70b688298f23..40b11521cd20246ee308a34c92909f7c849058e9 100644 (file)
@@ -412,14 +412,14 @@ sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
  * - including previous characters from sclp_tty_put_char() and strings from
  * sclp_write() without final '\n' - will be written.
  */
-static void
+static int
 sclp_tty_put_char(struct tty_struct *tty, unsigned char ch)
 {
        sclp_tty_chars[sclp_tty_chars_count++] = ch;
        if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) {
                sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count);
                sclp_tty_chars_count = 0;
-       }
+       } return 1;
 }
 
 /*
index ed507594e62ba0adf12fde30a1ecd23a91509180..35707c04e613fd7708acea79ccbbc4119d3d4851 100644 (file)
@@ -524,11 +524,15 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
  * NOTE: include/linux/tty_driver.h specifies that a character should be
  * ignored if there is no room in the queue. This driver implements a different
  * semantic in that it will block when there is no more room left.
+ *
+ * FIXME: putchar can currently be called from BH and other non blocking
+ * handlers so  this semantic isn't a good idea.
  */
-static void
+static int
 sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
 {
        __sclp_vt220_write(&ch, 1, 0, 0, 1);
+       return 1;
 }
 
 /*
index c9b96d51b28f25d36ab9fa85fbc34e60648fc3e0..e7c888c14e71f847b6ba8aa05e2797c700a2ca6a 100644 (file)
@@ -111,6 +111,7 @@ static int tape_proc_open(struct inode *inode, struct file *file)
 
 static const struct file_operations tape_proc_ops =
 {
+       .owner          = THIS_MODULE,
        .open           = tape_proc_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -124,14 +125,12 @@ void
 tape_proc_init(void)
 {
        tape_proc_devices =
-               create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR,
-                                  &proc_root);
+               proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+                           &tape_proc_ops);
        if (tape_proc_devices == NULL) {
                PRINT_WARN("tape: Cannot register procfs entry tapedevices\n");
                return;
        }
-       tape_proc_devices->proc_fops = &tape_proc_ops;
-       tape_proc_devices->owner = THIS_MODULE;
 }
 
 /*
@@ -141,5 +140,5 @@ void
 tape_proc_cleanup(void)
 {
        if (tape_proc_devices != NULL)
-               remove_proc_entry ("tapedevices", &proc_root);
+               remove_proc_entry ("tapedevices", NULL);
 }
index 70b1980a08b66380c075355d81101c90a784032f..c1f2adefad410287fe170ae77a8baa47a8611187 100644 (file)
@@ -965,7 +965,7 @@ tty3270_write_room(struct tty_struct *tty)
  * Insert character into the screen at the current position with the
  * current color and highlight. This function does NOT do cursor movement.
  */
-static void
+static int
 tty3270_put_character(struct tty3270 *tp, char ch)
 {
        struct tty3270_line *line;
@@ -986,6 +986,7 @@ tty3270_put_character(struct tty3270 *tp, char ch)
        cell->character = tp->view.ascebc[(unsigned int) ch];
        cell->highlight = tp->highlight;
        cell->f_color = tp->f_color;
+       return 1;
 }
 
 /*
index e8597ec92247b2eb9127f32448b24c47b12a50e1..40ef948fcb3a5e9b2441bacb6a78fd65f9a693cf 100644 (file)
@@ -374,13 +374,10 @@ cio_ignore_proc_init (void)
 {
        struct proc_dir_entry *entry;
 
-       entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR,
-                                  &proc_root);
+       entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL,
+                           &cio_ignore_proc_fops);
        if (!entry)
                return -ENOENT;
-
-       entry->proc_fops = &cio_ignore_proc_fops;
-
        return 0;
 }
 
index fe1ad1722158818f9ba594a455bb7f3b542e7fbe..26a930e832bdab58c7f649c866124200e1f3f8c8 100644 (file)
@@ -152,44 +152,89 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
        return 0;
 }
 
+static int __get_next_bus_id(const char **buf, char *bus_id)
+{
+       int rc, len;
+       char *start, *end;
+
+       start = (char *)*buf;
+       end = strchr(start, ',');
+       if (!end) {
+               /* Last entry. Strip trailing newline, if applicable. */
+               end = strchr(start, '\n');
+               if (end)
+                       *end = '\0';
+               len = strlen(start) + 1;
+       } else {
+               len = end - start + 1;
+               end++;
+       }
+       if (len < BUS_ID_SIZE) {
+               strlcpy(bus_id, start, len);
+               rc = 0;
+       } else
+               rc = -EINVAL;
+       *buf = end;
+       return rc;
+}
+
+static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE])
+{
+       int cssid, ssid, devno;
+
+       /* Must be of form %x.%x.%04x */
+       if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3)
+               return 0;
+       return 1;
+}
+
 /**
- * ccwgroup_create() - create and register a ccw group device
+ * ccwgroup_create_from_string() - create and register a ccw group device
  * @root: parent device for the new device
  * @creator_id: identifier of creating driver
  * @cdrv: ccw driver of slave devices
- * @argc: number of slave devices
- * @argv: bus ids of slave devices
+ * @num_devices: number of slave devices
+ * @buf: buffer containing comma separated bus ids of slave devices
  *
  * Create and register a new ccw group device as a child of @root. Slave
- * devices are obtained from the list of bus ids given in @argv[] and must all
+ * devices are obtained from the list of bus ids given in @buf and must all
  * belong to @cdrv.
  * Returns:
  *  %0 on success and an error code on failure.
  * Context:
  *  non-atomic
  */
-int ccwgroup_create(struct device *root, unsigned int creator_id,
-                   struct ccw_driver *cdrv, int argc, char *argv[])
+int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
+                               struct ccw_driver *cdrv, int num_devices,
+                               const char *buf)
 {
        struct ccwgroup_device *gdev;
-       int i;
-       int rc;
+       int rc, i;
+       char tmp_bus_id[BUS_ID_SIZE];
+       const char *curr_buf;
 
-       if (argc > 256) /* disallow dumb users */
-               return -EINVAL;
-
-       gdev = kzalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL);
+       gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]),
+                      GFP_KERNEL);
        if (!gdev)
                return -ENOMEM;
 
        atomic_set(&gdev->onoff, 0);
        mutex_init(&gdev->reg_mutex);
        mutex_lock(&gdev->reg_mutex);
-       for (i = 0; i < argc; i++) {
-               gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
-
-               /* all devices have to be of the same type in
-                * order to be grouped */
+       curr_buf = buf;
+       for (i = 0; i < num_devices && curr_buf; i++) {
+               rc = __get_next_bus_id(&curr_buf, tmp_bus_id);
+               if (rc != 0)
+                       goto error;
+               if (!__is_valid_bus_id(tmp_bus_id)) {
+                       rc = -EINVAL;
+                       goto error;
+               }
+               gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id);
+               /*
+                * All devices have to be of the same type in
+                * order to be grouped.
+                */
                if (!gdev->cdev[i]
                    || gdev->cdev[i]->id.driver_info !=
                    gdev->cdev[0]->id.driver_info) {
@@ -203,9 +248,18 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
                }
                dev_set_drvdata(&gdev->cdev[i]->dev, gdev);
        }
-
+       /* Check for sufficient number of bus ids. */
+       if (i < num_devices && !curr_buf) {
+               rc = -EINVAL;
+               goto error;
+       }
+       /* Check for trailing stuff. */
+       if (i == num_devices && strlen(curr_buf) > 0) {
+               rc = -EINVAL;
+               goto error;
+       }
        gdev->creator_id = creator_id;
-       gdev->count = argc;
+       gdev->count = num_devices;
        gdev->dev.bus = &ccwgroup_bus_type;
        gdev->dev.parent = root;
        gdev->dev.release = ccwgroup_release;
@@ -233,7 +287,7 @@ int ccwgroup_create(struct device *root, unsigned int creator_id,
        device_remove_file(&gdev->dev, &dev_attr_ungroup);
        device_unregister(&gdev->dev);
 error:
-       for (i = 0; i < argc; i++)
+       for (i = 0; i < num_devices; i++)
                if (gdev->cdev[i]) {
                        if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
                                dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
@@ -243,6 +297,7 @@ error:
        put_device(&gdev->dev);
        return rc;
 }
+EXPORT_SYMBOL(ccwgroup_create_from_string);
 
 static int __init
 init_ccwgroup (void)
@@ -318,7 +373,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
 {
        struct ccwgroup_device *gdev;
        struct ccwgroup_driver *gdrv;
-       unsigned int value;
+       unsigned long value;
        int ret;
 
        gdev = to_ccwgroupdev(dev);
@@ -329,7 +384,9 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
        if (!try_module_get(gdrv->owner))
                return -EINVAL;
 
-       value = simple_strtoul(buf, NULL, 0);
+       ret = strict_strtoul(buf, 0, &value);
+       if (ret)
+               goto out;
        ret = count;
        if (value == 1)
                ccwgroup_set_online(gdev);
@@ -337,6 +394,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
                ccwgroup_set_offline(gdev);
        else
                ret = -EINVAL;
+out:
        module_put(gdrv->owner);
        return ret;
 }
@@ -518,6 +576,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
 MODULE_LICENSE("GPL");
 EXPORT_SYMBOL(ccwgroup_driver_register);
 EXPORT_SYMBOL(ccwgroup_driver_unregister);
-EXPORT_SYMBOL(ccwgroup_create);
 EXPORT_SYMBOL(ccwgroup_probe_ccwdev);
 EXPORT_SYMBOL(ccwgroup_remove_ccwdev);
index 23ffcc4768a7dba3a30cf6935ef261bb97c00b2a..08a578161306e1088f4fe6786ea85b518cfd7a12 100644 (file)
@@ -407,8 +407,7 @@ cio_modify (struct subchannel *sch)
 /*
  * Enable subchannel.
  */
-int cio_enable_subchannel(struct subchannel *sch, unsigned int isc,
-                         u32 intparm)
+int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
 {
        char dbf_txt[15];
        int ccode;
@@ -426,7 +425,7 @@ int cio_enable_subchannel(struct subchannel *sch, unsigned int isc,
 
        for (retry = 5, ret = 0; retry > 0; retry--) {
                sch->schib.pmcw.ena = 1;
-               sch->schib.pmcw.isc = isc;
+               sch->schib.pmcw.isc = sch->isc;
                sch->schib.pmcw.intparm = intparm;
                ret = cio_modify(sch);
                if (ret == -ENODEV)
@@ -600,6 +599,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
        else
                sch->opm = chp_get_sch_opm(sch);
        sch->lpm = sch->schib.pmcw.pam & sch->opm;
+       sch->isc = 3;
 
        CIO_DEBUG(KERN_INFO, 0,
                  "Detected device %04x on subchannel 0.%x.%04X"
@@ -610,13 +610,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
 
        /*
         * We now have to initially ...
-        *  ... set "interruption subclass"
         *  ... enable "concurrent sense"
         *  ... enable "multipath mode" if more than one
         *        CHPID is available. This is done regardless
         *        whether multiple paths are available for us.
         */
-       sch->schib.pmcw.isc = 3;        /* could be smth. else */
        sch->schib.pmcw.csense = 1;     /* concurrent sense */
        sch->schib.pmcw.ena = 0;
        if ((sch->lpm & (sch->lpm - 1)) != 0)
@@ -812,6 +810,7 @@ cio_probe_console(void)
         * enable console I/O-interrupt subclass 7
         */
        ctl_set_bit(6, 24);
+       console_subchannel.isc = 7;
        console_subchannel.schib.pmcw.isc = 7;
        console_subchannel.schib.pmcw.intparm =
                (u32)(addr_t)&console_subchannel;
index 08f2235c5a6fa0f38dc05f36f8e19bafec75986f..3c75412904dc495eade9f6627b148adc730e6e82 100644 (file)
@@ -74,6 +74,7 @@ struct subchannel {
        __u8 lpm;               /* logical path mask */
        __u8 opm;               /* operational path mask */
        struct schib schib;     /* subchannel information block */
+       int isc; /* desired interruption subclass */
        struct chsc_ssd_info ssd_info;  /* subchannel description */
        struct device dev;      /* entry in device tree */
        struct css_driver *driver;
@@ -85,7 +86,7 @@ struct subchannel {
 #define to_subchannel(n) container_of(n, struct subchannel, dev)
 
 extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
-extern int cio_enable_subchannel(struct subchannel *, unsigned int, u32);
+extern int cio_enable_subchannel(struct subchannel *, u32);
 extern int cio_disable_subchannel (struct subchannel *);
 extern int cio_cancel (struct subchannel *);
 extern int cio_clear (struct subchannel *);
index f4c132ab39edef112c72faad0c492dd3971d0ed7..2808b6833b9edea7fceb7263017316e61dedbbd3 100644 (file)
@@ -1219,16 +1219,21 @@ static ssize_t cmb_enable_store(struct device *dev,
 {
        struct ccw_device *cdev;
        int ret;
+       unsigned long val;
+
+       ret = strict_strtoul(buf, 16, &val);
+       if (ret)
+               return ret;
 
        cdev = to_ccwdev(dev);
 
-       switch (buf[0]) {
-       case '0':
+       switch (val) {
+       case 0:
                ret = disable_cmf(cdev);
                if (ret)
                        dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret);
                break;
-       case '1':
+       case 1:
                ret = enable_cmf(cdev);
                if (ret && ret != -EBUSY)
                        dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret);
index c1afab5f72d6d0e57fcd13ed77501c1ca6b4fb71..595e327d2f76cc4f47d7f7682fb7e99e88647c3e 100644 (file)
@@ -705,13 +705,17 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
 {
        struct channel_subsystem *css = to_css(dev);
        int ret;
+       unsigned long val;
 
+       ret = strict_strtoul(buf, 16, &val);
+       if (ret)
+               return ret;
        mutex_lock(&css->mutex);
-       switch (buf[0]) {
-       case '0':
+       switch (val) {
+       case 0:
                ret = css->cm_enabled ? chsc_secm(css, 0) : 0;
                break;
-       case '1':
+       case 1:
                ret = css->cm_enabled ? 0 : chsc_secm(css, 1);
                break;
        default:
index e0c7adb8958e25b61379f0b2cb5d6dd5a0b17abd..abfd601d237a1a07c9aa852e671a8bde507f6cc4 100644 (file)
@@ -512,8 +512,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
                             const char *buf, size_t count)
 {
        struct ccw_device *cdev = to_ccwdev(dev);
-       int i, force;
-       char *tmp;
+       int force, ret;
+       unsigned long i;
 
        if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
                return -EAGAIN;
@@ -525,25 +525,30 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
        if (!strncmp(buf, "force\n", count)) {
                force = 1;
                i = 1;
+               ret = 0;
        } else {
                force = 0;
-               i = simple_strtoul(buf, &tmp, 16);
+               ret = strict_strtoul(buf, 16, &i);
        }
-
+       if (ret)
+               goto out;
        switch (i) {
        case 0:
                online_store_handle_offline(cdev);
+               ret = count;
                break;
        case 1:
                online_store_handle_online(cdev, force);
+               ret = count;
                break;
        default:
-               count = -EINVAL;
+               ret = -EINVAL;
        }
+out:
        if (cdev->drv)
                module_put(cdev->drv->owner);
        atomic_set(&cdev->private->onoff, 0);
-       return count;
+       return ret;
 }
 
 static ssize_t
index 4b92c84fb438440874eeb9ea4cb42b5f72965739..99403b0a97a7737a4865f72d41453048d1fb2600 100644 (file)
@@ -555,8 +555,7 @@ ccw_device_recognition(struct ccw_device *cdev)
            (cdev->private->state != DEV_STATE_BOXED))
                return -EINVAL;
        sch = to_subchannel(cdev->dev.parent);
-       ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
-                                   (u32)(addr_t)sch);
+       ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
        if (ret != 0)
                /* Couldn't enable the subchannel for i/o. Sick device. */
                return ret;
@@ -667,8 +666,7 @@ ccw_device_online(struct ccw_device *cdev)
        sch = to_subchannel(cdev->dev.parent);
        if (css_init_done && !get_device(&cdev->dev))
                return -ENODEV;
-       ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc,
-                                   (u32)(addr_t)sch);
+       ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
        if (ret != 0) {
                /* Couldn't enable the subchannel for i/o. Sick device. */
                if (ret == -ENODEV)
@@ -1048,8 +1046,7 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event)
        struct subchannel *sch;
 
        sch = to_subchannel(cdev->dev.parent);
-       if (cio_enable_subchannel(sch, sch->schib.pmcw.isc,
-                                 (u32)(addr_t)sch) != 0)
+       if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0)
                /* Couldn't enable the subchannel for i/o. Sick device. */
                return;
 
@@ -1082,7 +1079,6 @@ device_trigger_reprobe(struct subchannel *sch)
         */
        sch->lpm = sch->schib.pmcw.pam & sch->opm;
        /* Re-set some bits in the pmcw that were lost. */
-       sch->schib.pmcw.isc = 3;
        sch->schib.pmcw.csense = 1;
        sch->schib.pmcw.ena = 0;
        if ((sch->lpm & (sch->lpm - 1)) != 0)
index a1718a0aa539d07bb5f9852597b25f18ae3b3c75..f308ad55a6d5bed173b2fc5ee71197069bd764cc 100644 (file)
@@ -508,7 +508,7 @@ ccw_device_stlck(struct ccw_device *cdev)
                return -ENOMEM;
        }
        spin_lock_irqsave(sch->lock, flags);
-       ret = cio_enable_subchannel(sch, 3, (u32)(addr_t)sch);
+       ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
        if (ret)
                goto out_unlock;
        /*
index 10aa1e780801f42db88b0cf2006ad2e194744763..445cf364e461f353cf8dee05b6d98a3eb15e756c 100644 (file)
@@ -3632,7 +3632,7 @@ qdio_add_procfs_entry(void)
 {
         proc_perf_file_registration=0;
        qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
-                                             S_IFREG|0444,&proc_root);
+                                             S_IFREG|0444,NULL);
        if (qdio_perf_proc_file) {
                qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
        } else proc_perf_file_registration=-1;
@@ -3647,7 +3647,7 @@ static void
 qdio_remove_procfs_entry(void)
 {
         if (!proc_perf_file_registration) /* means if it went ok earlier */
-               remove_proc_entry(QDIO_PERF,&proc_root);
+               remove_proc_entry(QDIO_PERF,NULL);
 }
 
 /**
@@ -3663,11 +3663,11 @@ qdio_performance_stats_show(struct bus_type *bus, char *buf)
 static ssize_t
 qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
 {
-       char *tmp;
-       int i;
+       unsigned long i;
+       int ret;
 
-       i = simple_strtoul(buf, &tmp, 16);
-       if ((i == 0) || (i == 1)) {
+       ret = strict_strtoul(buf, 16, &i);
+       if (!ret && ((i == 0) || (i == 1))) {
                if (i == qdio_performance_stats)
                        return count;
                qdio_performance_stats = i;
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/kvm/Makefile
new file mode 100644 (file)
index 0000000..4a5ec39
--- /dev/null
@@ -0,0 +1,9 @@
+# Makefile for kvm guest drivers on s390
+#
+# Copyright IBM Corp. 2008
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (version 2 only)
+# as published by the Free Software Foundation.
+
+obj-$(CONFIG_VIRTIO) += kvm_virtio.o
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
new file mode 100644 (file)
index 0000000..47a7e62
--- /dev/null
@@ -0,0 +1,339 @@
+/*
+ * kvm_virtio.c - virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/err.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/interrupt.h>
+#include <linux/virtio_ring.h>
+#include <linux/pfn.h>
+#include <asm/io.h>
+#include <asm/kvm_para.h>
+#include <asm/kvm_virtio.h>
+#include <asm/setup.h>
+#include <asm/s390_ext.h>
+
+#define VIRTIO_SUBCODE_64 0x0D00
+
+/*
+ * The pointer to our (page) of device descriptions.
+ */
+static void *kvm_devices;
+
+/*
+ * Unique numbering for kvm devices.
+ */
+static unsigned int dev_index;
+
+struct kvm_device {
+       struct virtio_device vdev;
+       struct kvm_device_desc *desc;
+};
+
+#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev)
+
+/*
+ * memory layout:
+ * - kvm_device_descriptor
+ *        struct kvm_device_desc
+ * - configuration
+ *        struct kvm_vqconfig
+ * - feature bits
+ * - config space
+ */
+static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc)
+{
+       return (struct kvm_vqconfig *)(desc + 1);
+}
+
+static u8 *kvm_vq_features(const struct kvm_device_desc *desc)
+{
+       return (u8 *)(kvm_vq_config(desc) + desc->num_vq);
+}
+
+static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc)
+{
+       return kvm_vq_features(desc) + desc->feature_len * 2;
+}
+
+/*
+ * The total size of the config page used by this device (incl. desc)
+ */
+static unsigned desc_size(const struct kvm_device_desc *desc)
+{
+       return sizeof(*desc)
+               + desc->num_vq * sizeof(struct kvm_vqconfig)
+               + desc->feature_len * 2
+               + desc->config_len;
+}
+
+/*
+ * This tests (and acknowleges) a feature bit.
+ */
+static bool kvm_feature(struct virtio_device *vdev, unsigned fbit)
+{
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+       u8 *features;
+
+       if (fbit / 8 > desc->feature_len)
+               return false;
+
+       features = kvm_vq_features(desc);
+       if (!(features[fbit / 8] & (1 << (fbit % 8))))
+               return false;
+
+       /*
+        * We set the matching bit in the other half of the bitmap to tell the
+        * Host we want to use this feature.
+        */
+       features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8));
+       return true;
+}
+
+/*
+ * Reading and writing elements in config space
+ */
+static void kvm_get(struct virtio_device *vdev, unsigned int offset,
+                  void *buf, unsigned len)
+{
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+       BUG_ON(offset + len > desc->config_len);
+       memcpy(buf, kvm_vq_configspace(desc) + offset, len);
+}
+
+static void kvm_set(struct virtio_device *vdev, unsigned int offset,
+                  const void *buf, unsigned len)
+{
+       struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
+
+       BUG_ON(offset + len > desc->config_len);
+       memcpy(kvm_vq_configspace(desc) + offset, buf, len);
+}
+
+/*
+ * The operations to get and set the status word just access
+ * the status field of the device descriptor. set_status will also
+ * make a hypercall to the host, to tell about status changes
+ */
+static u8 kvm_get_status(struct virtio_device *vdev)
+{
+       return to_kvmdev(vdev)->desc->status;
+}
+
+static void kvm_set_status(struct virtio_device *vdev, u8 status)
+{
+       BUG_ON(!status);
+       to_kvmdev(vdev)->desc->status = status;
+       kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS,
+                      (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the
+ * descriptor address. The Host will zero the status and all the
+ * features.
+ */
+static void kvm_reset(struct virtio_device *vdev)
+{
+       kvm_hypercall1(KVM_S390_VIRTIO_RESET,
+                      (unsigned long) to_kvmdev(vdev)->desc);
+}
+
+/*
+ * When the virtio_ring code wants to notify the Host, it calls us here and we
+ * make a hypercall.  We hand the address  of the virtqueue so the Host
+ * knows which virtqueue we're talking about.
+ */
+static void kvm_notify(struct virtqueue *vq)
+{
+       struct kvm_vqconfig *config = vq->priv;
+
+       kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address);
+}
+
+/*
+ * This routine finds the first virtqueue described in the configuration of
+ * this device and sets it up.
+ */
+static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
+                                   unsigned index,
+                                   void (*callback)(struct virtqueue *vq))
+{
+       struct kvm_device *kdev = to_kvmdev(vdev);
+       struct kvm_vqconfig *config;
+       struct virtqueue *vq;
+       int err;
+
+       if (index >= kdev->desc->num_vq)
+               return ERR_PTR(-ENOENT);
+
+       config = kvm_vq_config(kdev->desc)+index;
+
+       err = vmem_add_mapping(config->address,
+                              vring_size(config->num, PAGE_SIZE));
+       if (err)
+               goto out;
+
+       vq = vring_new_virtqueue(config->num, vdev, (void *) config->address,
+                                kvm_notify, callback);
+       if (!vq) {
+               err = -ENOMEM;
+               goto unmap;
+       }
+
+       /*
+        * register a callback token
+        * The host will sent this via the external interrupt parameter
+        */
+       config->token = (u64) vq;
+
+       vq->priv = config;
+       return vq;
+unmap:
+       vmem_remove_mapping(config->address,
+                           vring_size(config->num, PAGE_SIZE));
+out:
+       return ERR_PTR(err);
+}
+
+static void kvm_del_vq(struct virtqueue *vq)
+{
+       struct kvm_vqconfig *config = vq->priv;
+
+       vring_del_virtqueue(vq);
+       vmem_remove_mapping(config->address,
+                           vring_size(config->num, PAGE_SIZE));
+}
+
+/*
+ * The config ops structure as defined by virtio config
+ */
+static struct virtio_config_ops kvm_vq_configspace_ops = {
+       .feature = kvm_feature,
+       .get = kvm_get,
+       .set = kvm_set,
+       .get_status = kvm_get_status,
+       .set_status = kvm_set_status,
+       .reset = kvm_reset,
+       .find_vq = kvm_find_vq,
+       .del_vq = kvm_del_vq,
+};
+
+/*
+ * The root device for the kvm virtio devices.
+ * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2.
+ */
+static struct device kvm_root = {
+       .parent = NULL,
+       .bus_id = "kvm_s390",
+};
+
+/*
+ * adds a new device and register it with virtio
+ * appropriate drivers are loaded by the device model
+ */
+static void add_kvm_device(struct kvm_device_desc *d)
+{
+       struct kvm_device *kdev;
+
+       kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+       if (!kdev) {
+               printk(KERN_EMERG "Cannot allocate kvm dev %u\n",
+                      dev_index++);
+               return;
+       }
+
+       kdev->vdev.dev.parent = &kvm_root;
+       kdev->vdev.index = dev_index++;
+       kdev->vdev.id.device = d->type;
+       kdev->vdev.config = &kvm_vq_configspace_ops;
+       kdev->desc = d;
+
+       if (register_virtio_device(&kdev->vdev) != 0) {
+               printk(KERN_ERR "Failed to register kvm device %u\n",
+                      kdev->vdev.index);
+               kfree(kdev);
+       }
+}
+
+/*
+ * scan_devices() simply iterates through the device page.
+ * The type 0 is reserved to mean "end of devices".
+ */
+static void scan_devices(void)
+{
+       unsigned int i;
+       struct kvm_device_desc *d;
+
+       for (i = 0; i < PAGE_SIZE; i += desc_size(d)) {
+               d = kvm_devices + i;
+
+               if (d->type == 0)
+                       break;
+
+               add_kvm_device(d);
+       }
+}
+
+/*
+ * we emulate the request_irq behaviour on top of s390 extints
+ */
+static void kvm_extint_handler(u16 code)
+{
+       void *data = (void *) *(long *) __LC_PFAULT_INTPARM;
+       u16 subcode = S390_lowcore.cpu_addr;
+
+       if ((subcode & 0xff00) != VIRTIO_SUBCODE_64)
+               return;
+
+       vring_interrupt(0, data);
+}
+
+/*
+ * Init function for virtio
+ * devices are in a single page above top of "normal" mem
+ */
+static int __init kvm_devices_init(void)
+{
+       int rc;
+
+       if (!MACHINE_IS_KVM)
+               return -ENODEV;
+
+       rc = device_register(&kvm_root);
+       if (rc) {
+               printk(KERN_ERR "Could not register kvm_s390 root device");
+               return rc;
+       }
+
+       rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE);
+       if (rc) {
+               device_unregister(&kvm_root);
+               return rc;
+       }
+
+       kvm_devices = (void *) PFN_PHYS(max_pfn);
+
+       ctl_set_bit(0, 9);
+       register_external_interrupt(0x2603, kvm_extint_handler);
+
+       scan_devices();
+       return 0;
+}
+
+/*
+ * We do this after core stuff, but before the drivers.
+ */
+postcore_initcall(kvm_devices_init);
index 76728ae4b843c669795b346e4b1a1cfb903bd4a1..8e7697305a4cd9ab6d871f1d569d6d1e9ef31b4b 100644 (file)
@@ -62,30 +62,14 @@ static struct device *cu3088_root_dev;
 static ssize_t
 group_write(struct device_driver *drv, const char *buf, size_t count)
 {
-       const char *start, *end;
-       char bus_ids[2][BUS_ID_SIZE], *argv[2];
-       int i;
        int ret;
        struct ccwgroup_driver *cdrv;
 
        cdrv = to_ccwgroupdrv(drv);
        if (!cdrv)
                return -EINVAL;
-       start = buf;
-       for (i=0; i<2; i++) {
-               static const char delim[] = {',', '\n'};
-               int len;
-
-               if (!(end = strchr(start, delim[i])))
-                       return -EINVAL;
-               len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1);
-               strlcpy (bus_ids[i], start, len);
-               argv[i] = bus_ids[i];
-               start = end + 1;
-       }
-
-       ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id,
-                             &cu3088_driver, 2, argv);
+       ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
+                                         &cu3088_driver, 2, buf);
 
        return (ret == 0) ? count : ret;
 }
index f51ed997258793b71ea49db3301270eac70f726b..dd22f4b37037ebaa25e25c49bda17de8670dddd6 100644 (file)
@@ -1793,7 +1793,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
        skb->protocol = card->lan_type_trans(skb, card->dev);
        card->stats.rx_bytes += skb_len;
        card->stats.rx_packets++;
-       *((__u32 *)skb->cb) = ++card->pkt_seq;
+       if (skb->protocol == htons(ETH_P_802_2))
+               *((__u32 *)skb->cb) = ++card->pkt_seq;
        netif_rx(skb);
 }
 
index 8f876f6ab367c7c1aef89cfb4efa30b4e03bf4c1..e4ba6a0372acb6360124839afe6e651c1a6cec87 100644 (file)
@@ -1313,8 +1313,6 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
         * and throw away packet.
         */
        if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
-               if (!in_atomic())
-                       fsm_event(privptr->fsm, DEV_EVENT_START, dev);
                dev_kfree_skb(skb);
                privptr->stats.tx_dropped++;
                privptr->stats.tx_errors++;
@@ -2147,6 +2145,7 @@ static int __init netiucv_init(void)
        if (rc)
                goto out_dbf;
        IUCV_DBF_TEXT(trace, 3, __func__);
+       netiucv_driver.groups = netiucv_drv_attr_groups;
        rc = driver_register(&netiucv_driver);
        if (rc) {
                PRINT_ERR("NETIUCV: failed to register driver.\n");
index 66f4f12503c9d6287b8a45ebfe82d32208258391..699ac11debd84c49c0bbdbd21a2f70f9914deedc 100644 (file)
@@ -72,22 +72,7 @@ struct qeth_dbf_info {
        debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text)
 
 #define QETH_DBF_TEXT_(name, level, text...) \
-       do { \
-               if (qeth_dbf_passes(qeth_dbf[QETH_DBF_##name].id, level)) { \
-                       char *dbf_txt_buf = \
-                               get_cpu_var(QETH_DBF_TXT_BUF); \
-                       sprintf(dbf_txt_buf, text); \
-                       debug_text_event(qeth_dbf[QETH_DBF_##name].id, \
-                                       level, dbf_txt_buf); \
-                       put_cpu_var(QETH_DBF_TXT_BUF); \
-               } \
-       } while (0)
-
-/* Allow to sort out low debug levels early to avoid wasted sprints */
-static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level)
-{
-       return (level <= dbf_grp->level);
-}
+       qeth_dbf_longtext(QETH_DBF_##name, level, text)
 
 /**
  * some more debug stuff
@@ -773,27 +758,6 @@ static inline int qeth_get_micros(void)
        return (int) (get_clock() >> 12);
 }
 
-static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb,
-               int size)
-{
-       void *hdr;
-
-       hdr = (void *) skb_push(skb, size);
-       /*
-        * sanity check, the Linux memory allocation scheme should
-        * never present us cases like this one (the qdio header size plus
-        * the first 40 bytes of the paket cross a 4k boundary)
-        */
-       if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) !=
-           (((unsigned long) hdr + size +
-           QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) {
-               PRINT_ERR("Misaligned packet on interface %s. Discarded.",
-                       QETH_CARD_IFNAME(card));
-               return NULL;
-       }
-       return hdr;
-}
-
 static inline int qeth_get_ip_version(struct sk_buff *skb)
 {
        switch (skb->protocol) {
@@ -806,6 +770,12 @@ static inline int qeth_get_ip_version(struct sk_buff *skb)
        }
 }
 
+static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
+               struct qeth_buffer_pool_entry *entry)
+{
+       list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
+}
+
 struct qeth_eddp_context;
 extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
 extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
@@ -843,8 +813,6 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
 int qeth_query_setadapterparms(struct qeth_card *);
 int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int,
                       unsigned int, const char *);
-void qeth_put_buffer_pool_entry(struct qeth_card *,
-                          struct qeth_buffer_pool_entry *);
 void qeth_queue_input_buffer(struct qeth_card *, int);
 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
                struct qdio_buffer *, struct qdio_buffer_element **, int *,
@@ -880,8 +848,6 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
        void *reply_param);
 int qeth_get_cast_type(struct qeth_card *, struct sk_buff *);
 int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int);
-struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *,
-                struct qeth_hdr **);
 int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int);
 int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
                        struct sk_buff *, struct qeth_hdr *, int,
@@ -894,6 +860,8 @@ void qeth_core_get_ethtool_stats(struct net_device *,
                                struct ethtool_stats *, u64 *);
 void qeth_core_get_strings(struct net_device *, u32, u8 *);
 void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
+void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
+int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
 
 /* exports for OSN */
 int qeth_osn_assist(struct net_device *, void *, int);
index 055f5c3e7b563fc8f73e65dc141ef91915375008..436bf1f6d4a602935dd94776e95909a450737bc3 100644 (file)
@@ -26,9 +26,6 @@
 #include "qeth_core.h"
 #include "qeth_core_offl.h"
 
-static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf);
-#define QETH_DBF_TXT_BUF qeth_core_dbf_txt_buf
-
 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
        /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
        /*                   N  P  A    M  L  V                      H  */
@@ -2255,14 +2252,6 @@ void qeth_print_status_message(struct qeth_card *card)
 }
 EXPORT_SYMBOL_GPL(qeth_print_status_message);
 
-void qeth_put_buffer_pool_entry(struct qeth_card *card,
-               struct qeth_buffer_pool_entry *entry)
-{
-       QETH_DBF_TEXT(TRACE, 6, "ptbfplen");
-       list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
-}
-EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry);
-
 static void qeth_initialize_working_pool_list(struct qeth_card *card)
 {
        struct qeth_buffer_pool_entry *entry;
@@ -2603,7 +2592,6 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index)
        int rc;
        int newcount = 0;
 
-       QETH_DBF_TEXT(TRACE, 6, "queinbuf");
        count = (index < queue->next_buf_to_init)?
                card->qdio.in_buf_pool.buf_count -
                (queue->next_buf_to_init - index) :
@@ -2792,8 +2780,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int,
        int i;
        unsigned int qdio_flags;
 
-       QETH_DBF_TEXT(TRACE, 6, "flushbuf");
-
        for (i = index; i < index + count; ++i) {
                buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
                buf->buffer->element[buf->next_element_to_fill - 1].flags |=
@@ -3037,49 +3023,6 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb,
 }
 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
 
-static void __qeth_free_new_skb(struct sk_buff *orig_skb,
-               struct sk_buff *new_skb)
-{
-       if (orig_skb != new_skb)
-               dev_kfree_skb_any(new_skb);
-}
-
-static inline struct sk_buff *qeth_realloc_headroom(struct qeth_card *card,
-               struct sk_buff *skb, int size)
-{
-       struct sk_buff *new_skb = skb;
-
-       if (skb_headroom(skb) >= size)
-               return skb;
-       new_skb = skb_realloc_headroom(skb, size);
-       if (!new_skb)
-               PRINT_ERR("Could not realloc headroom for qeth_hdr "
-                         "on interface %s", QETH_CARD_IFNAME(card));
-       return new_skb;
-}
-
-struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb,
-                struct qeth_hdr **hdr)
-{
-       struct sk_buff *new_skb;
-
-       QETH_DBF_TEXT(TRACE, 6, "prepskb");
-
-       new_skb = qeth_realloc_headroom(card, skb,
-                       sizeof(struct qeth_hdr));
-       if (!new_skb)
-               return NULL;
-
-       *hdr = ((struct qeth_hdr *)qeth_push_skb(card, new_skb,
-                       sizeof(struct qeth_hdr)));
-       if (*hdr == NULL) {
-               __qeth_free_new_skb(skb, new_skb);
-               return NULL;
-       }
-       return new_skb;
-}
-EXPORT_SYMBOL_GPL(qeth_prepare_skb);
-
 int qeth_get_elements_no(struct qeth_card *card, void *hdr,
                     struct sk_buff *skb, int elems)
 {
@@ -3100,8 +3043,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr,
 }
 EXPORT_SYMBOL_GPL(qeth_get_elements_no);
 
-static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
-               int is_tso, int *next_element_to_fill)
+static inline void __qeth_fill_buffer(struct sk_buff *skb,
+       struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill)
 {
        int length = skb->len;
        int length_here;
@@ -3143,15 +3086,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer,
        *next_element_to_fill = element;
 }
 
-static int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
+static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
                struct qeth_qdio_out_buffer *buf, struct sk_buff *skb)
 {
        struct qdio_buffer *buffer;
        struct qeth_hdr_tso *hdr;
        int flush_cnt = 0, hdr_len, large_send = 0;
 
-       QETH_DBF_TEXT(TRACE, 6, "qdfillbf");
-
        buffer = buf->buffer;
        atomic_inc(&skb->users);
        skb_queue_tail(&buf->skb_list, skb);
@@ -3210,8 +3151,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card,
        int flush_cnt = 0;
        int index;
 
-       QETH_DBF_TEXT(TRACE, 6, "dosndpfa");
-
        /* spin until we get the queue ... */
        while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
                              QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
@@ -3263,8 +3202,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
        int tmp;
        int rc = 0;
 
-       QETH_DBF_TEXT(TRACE, 6, "dosndpkt");
-
        /* spin until we get the queue ... */
        while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
                              QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
@@ -3827,27 +3764,8 @@ static struct ccw_driver qeth_ccw_driver = {
 static int qeth_core_driver_group(const char *buf, struct device *root_dev,
                                unsigned long driver_id)
 {
-       const char *start, *end;
-       char bus_ids[3][BUS_ID_SIZE], *argv[3];
-       int i;
-
-       start = buf;
-       for (i = 0; i < 3; i++) {
-               static const char delim[] = { ',', ',', '\n' };
-               int len;
-
-               end = strchr(start, delim[i]);
-               if (!end)
-                       return -EINVAL;
-               len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start);
-               strncpy(bus_ids[i], start, len);
-               bus_ids[i][len] = '\0';
-               start = end + 1;
-               argv[i] = bus_ids[i];
-       }
-
-       return (ccwgroup_create(root_dev, driver_id,
-                               &qeth_ccw_driver, 3, argv));
+       return ccwgroup_create_from_string(root_dev, driver_id,
+                                          &qeth_ccw_driver, 3, buf);
 }
 
 int qeth_core_hardsetup_card(struct qeth_card *card)
@@ -3885,8 +3803,9 @@ retry:
                QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
                return rc;
        }
-
-       mpno = QETH_MAX_PORTNO;
+       mpno = qdio_get_ssqd_pct(CARD_DDEV(card));
+       if (mpno)
+               mpno = min(mpno - 1, QETH_MAX_PORTNO);
        if (card->info.portno > mpno) {
                PRINT_ERR("Device %s does not offer port number %d \n.",
                        CARD_BUS_ID(card), card->info.portno);
@@ -3980,7 +3899,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
        int use_rx_sg = 0;
        int frag = 0;
 
-       QETH_DBF_TEXT(TRACE, 6, "nextskb");
        /* qeth_hdr must not cross element boundaries */
        if (element->length < offset + sizeof(struct qeth_hdr)) {
                if (qeth_is_last_sbale(element))
@@ -4086,6 +4004,18 @@ static void qeth_unregister_dbf_views(void)
        }
 }
 
+void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...)
+{
+       char dbf_txt_buf[32];
+
+       if (level > (qeth_dbf[dbf_nix].id)->level)
+               return;
+       snprintf(dbf_txt_buf, sizeof(dbf_txt_buf), text);
+       debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf);
+       
+}
+EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
+
 static int qeth_register_dbf_views(void)
 {
        int ret;
@@ -4433,6 +4363,96 @@ void qeth_core_get_drvinfo(struct net_device *dev,
 }
 EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo);
 
+int qeth_core_ethtool_get_settings(struct net_device *netdev,
+                                       struct ethtool_cmd *ecmd)
+{
+       struct qeth_card *card = netdev_priv(netdev);
+       enum qeth_link_types link_type;
+
+       if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan))
+               link_type = QETH_LINK_TYPE_10GBIT_ETH;
+       else
+               link_type = card->info.link_type;
+
+       ecmd->transceiver = XCVR_INTERNAL;
+       ecmd->supported = SUPPORTED_Autoneg;
+       ecmd->advertising = ADVERTISED_Autoneg;
+       ecmd->duplex = DUPLEX_FULL;
+       ecmd->autoneg = AUTONEG_ENABLE;
+
+       switch (link_type) {
+       case QETH_LINK_TYPE_FAST_ETH:
+       case QETH_LINK_TYPE_LANE_ETH100:
+               ecmd->supported |= SUPPORTED_10baseT_Half |
+                                       SUPPORTED_10baseT_Full |
+                                       SUPPORTED_100baseT_Half |
+                                       SUPPORTED_100baseT_Full |
+                                       SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_10baseT_Half |
+                                       ADVERTISED_10baseT_Full |
+                                       ADVERTISED_100baseT_Half |
+                                       ADVERTISED_100baseT_Full |
+                                       ADVERTISED_TP;
+               ecmd->speed = SPEED_100;
+               ecmd->port = PORT_TP;
+               break;
+
+       case QETH_LINK_TYPE_GBIT_ETH:
+       case QETH_LINK_TYPE_LANE_ETH1000:
+               ecmd->supported |= SUPPORTED_10baseT_Half |
+                                       SUPPORTED_10baseT_Full |
+                                       SUPPORTED_100baseT_Half |
+                                       SUPPORTED_100baseT_Full |
+                                       SUPPORTED_1000baseT_Half |
+                                       SUPPORTED_1000baseT_Full |
+                                       SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_10baseT_Half |
+                                       ADVERTISED_10baseT_Full |
+                                       ADVERTISED_100baseT_Half |
+                                       ADVERTISED_100baseT_Full |
+                                       ADVERTISED_1000baseT_Half |
+                                       ADVERTISED_1000baseT_Full |
+                                       ADVERTISED_FIBRE;
+               ecmd->speed = SPEED_1000;
+               ecmd->port = PORT_FIBRE;
+               break;
+
+       case QETH_LINK_TYPE_10GBIT_ETH:
+               ecmd->supported |= SUPPORTED_10baseT_Half |
+                                       SUPPORTED_10baseT_Full |
+                                       SUPPORTED_100baseT_Half |
+                                       SUPPORTED_100baseT_Full |
+                                       SUPPORTED_1000baseT_Half |
+                                       SUPPORTED_1000baseT_Full |
+                                       SUPPORTED_10000baseT_Full |
+                                       SUPPORTED_FIBRE;
+               ecmd->advertising |= ADVERTISED_10baseT_Half |
+                                       ADVERTISED_10baseT_Full |
+                                       ADVERTISED_100baseT_Half |
+                                       ADVERTISED_100baseT_Full |
+                                       ADVERTISED_1000baseT_Half |
+                                       ADVERTISED_1000baseT_Full |
+                                       ADVERTISED_10000baseT_Full |
+                                       ADVERTISED_FIBRE;
+               ecmd->speed = SPEED_10000;
+               ecmd->port = PORT_FIBRE;
+               break;
+
+       default:
+               ecmd->supported |= SUPPORTED_10baseT_Half |
+                                       SUPPORTED_10baseT_Full |
+                                       SUPPORTED_TP;
+               ecmd->advertising |= ADVERTISED_10baseT_Half |
+                                       ADVERTISED_10baseT_Full |
+                                       ADVERTISED_TP;
+               ecmd->speed = SPEED_10;
+               ecmd->port = PORT_TP;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
+
 static int __init qeth_core_init(void)
 {
        int rc;
index 3921d1631a78928067fc9b5e90f0f1e676036ee0..86ec50ddae139b602aba92216b1ee45dfdba3f64 100644 (file)
@@ -22,9 +22,6 @@
 #include "qeth_core.h"
 #include "qeth_core_offl.h"
 
-#define QETH_DBF_TXT_BUF qeth_l2_dbf_txt_buf
-static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf);
-
 static int qeth_l2_set_offline(struct ccwgroup_device *);
 static int qeth_l2_stop(struct net_device *);
 static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
@@ -635,8 +632,6 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
        struct qeth_eddp_context *ctx = NULL;
 
-       QETH_DBF_TEXT(TRACE, 6, "l2xmit");
-
        if ((card->state != CARD_STATE_UP) || !card->lan_online) {
                card->stats.tx_carrier_errors++;
                goto tx_drop;
@@ -658,9 +653,12 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        if (card->info.type == QETH_CARD_TYPE_OSN)
                hdr = (struct qeth_hdr *)skb->data;
        else {
-               new_skb = qeth_prepare_skb(card, skb, &hdr);
+               /* create a clone with writeable headroom */
+               new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr));
                if (!new_skb)
                        goto tx_drop;
+               hdr = (struct qeth_hdr *)skb_push(new_skb,
+                                               sizeof(struct qeth_hdr));
                qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type);
        }
 
@@ -747,7 +745,6 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
        int index;
        int i;
 
-       QETH_DBF_TEXT(TRACE, 6, "qdinput");
        card = (struct qeth_card *) card_ptr;
        net_dev = card->dev;
        if (card->options.performance_stats) {
@@ -852,6 +849,22 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
        return;
 }
 
+static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data)
+{
+       struct qeth_card *card = netdev_priv(dev);
+
+       if (data) {
+               if (card->options.large_send == QETH_LARGE_SEND_NO) {
+                       card->options.large_send = QETH_LARGE_SEND_EDDP;
+                       dev->features |= NETIF_F_TSO;
+               }
+       } else {
+               dev->features &= ~NETIF_F_TSO;
+               card->options.large_send = QETH_LARGE_SEND_NO;
+       }
+       return 0;
+}
+
 static struct ethtool_ops qeth_l2_ethtool_ops = {
        .get_link = ethtool_op_get_link,
        .get_tx_csum = ethtool_op_get_tx_csum,
@@ -859,11 +872,12 @@ static struct ethtool_ops qeth_l2_ethtool_ops = {
        .get_sg = ethtool_op_get_sg,
        .set_sg = ethtool_op_set_sg,
        .get_tso = ethtool_op_get_tso,
-       .set_tso = ethtool_op_set_tso,
+       .set_tso = qeth_l2_ethtool_set_tso,
        .get_strings = qeth_core_get_strings,
        .get_ethtool_stats = qeth_core_get_ethtool_stats,
        .get_stats_count = qeth_core_get_stats_count,
        .get_drvinfo = qeth_core_get_drvinfo,
+       .get_settings = qeth_core_ethtool_get_settings,
 };
 
 static struct ethtool_ops qeth_l2_osn_ops = {
index 1be353593a59f5a4caa47ab2a1f13f048abeb59d..9f143c83bba39fe63e7759486accb5d26e35e3f9 100644 (file)
@@ -13,9 +13,6 @@
 
 #include "qeth_core.h"
 
-#define QETH_DBF_TXT_BUF qeth_l3_dbf_txt_buf
-DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
-
 struct qeth_ipaddr {
        struct list_head entry;
        enum qeth_ip_types type;
index e1bfe56087d6bf36ecb10745bb4692f526828e74..94a8ead64ed4d3ea428d6da0eaaa810e82146ded 100644 (file)
@@ -28,8 +28,6 @@
 #include "qeth_l3.h"
 #include "qeth_core_offl.h"
 
-DEFINE_PER_CPU(char[256], qeth_l3_dbf_txt_buf);
-
 static int qeth_l3_set_offline(struct ccwgroup_device *);
 static int qeth_l3_recover(void *);
 static int qeth_l3_stop(struct net_device *);
@@ -2093,6 +2091,11 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
            (card->state == CARD_STATE_UP)) {
                if (recovery_mode)
                        qeth_l3_stop(card->dev);
+               else {
+                       rtnl_lock();
+                       dev_close(card->dev);
+                       rtnl_unlock();
+               }
                if (!card->use_hard_stop) {
                        rc = qeth_send_stoplan(card);
                        if (rc)
@@ -2559,8 +2562,6 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
                struct sk_buff *skb, int ipv, int cast_type)
 {
-       QETH_DBF_TEXT(TRACE, 6, "fillhdr");
-
        memset(hdr, 0, sizeof(struct qeth_hdr));
        hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3;
        hdr->hdr.l3.ext_flags = 0;
@@ -2570,9 +2571,10 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
         * v6 uses passthrough, v4 sets the tag in the QDIO header.
         */
        if (card->vlangrp && vlan_tx_tag_present(skb)) {
-               hdr->hdr.l3.ext_flags = (ipv == 4) ?
-                       QETH_HDR_EXT_VLAN_FRAME :
-                       QETH_HDR_EXT_INCLUDE_VLAN_TAG;
+               if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD))
+                       hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME;
+               else
+                       hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG;
                hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb);
        }
 
@@ -2638,8 +2640,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO;
        struct qeth_eddp_context *ctx = NULL;
 
-       QETH_DBF_TEXT(TRACE, 6, "l3xmit");
-
        if ((card->info.type == QETH_CARD_TYPE_IQD) &&
            (skb->protocol != htons(ETH_P_IPV6)) &&
            (skb->protocol != htons(ETH_P_IP)))
@@ -2890,6 +2890,7 @@ static struct ethtool_ops qeth_l3_ethtool_ops = {
        .get_ethtool_stats = qeth_core_get_ethtool_stats,
        .get_stats_count = qeth_core_get_stats_count,
        .get_drvinfo = qeth_core_get_drvinfo,
+       .get_settings = qeth_core_ethtool_get_settings,
 };
 
 /*
@@ -2982,7 +2983,6 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
        int index;
        int i;
 
-       QETH_DBF_TEXT(TRACE, 6, "qdinput");
        card = (struct qeth_card *) card_ptr;
        net_dev = card->dev;
        if (card->options.performance_stats) {
@@ -3140,9 +3140,15 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
        netif_carrier_on(card->dev);
 
        qeth_set_allowed_threads(card, 0xffffffff, 0);
-       if ((recover_flag == CARD_STATE_RECOVER) && recovery_mode) {
+       if (recover_flag == CARD_STATE_RECOVER) {
+               if (recovery_mode)
                        qeth_l3_open(card->dev);
-                       qeth_l3_set_multicast_list(card->dev);
+               else {
+                       rtnl_lock();
+                       dev_open(card->dev);
+                       rtnl_unlock();
+               }
+               qeth_l3_set_multicast_list(card->dev);
        }
        /* let user_space know that device is online */
        kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
index 7c3f02816e95c35b8ed6005c9f0c7d7fa2f9fa21..9af2330f07a21c0019b0df6d5495427ae4ba38b3 100644 (file)
@@ -1927,7 +1927,8 @@ zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
 
        /* setup new FSF request */
        retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
-                                    0, NULL, &lock_flags, &fsf_req);
+                                    ZFCP_WAIT_FOR_SBAL, NULL, &lock_flags,
+                                    &fsf_req);
        if (retval) {
                ZFCP_LOG_INFO("error: Could not create exchange configuration "
                              "data request for adapter %s.\n",
@@ -2035,21 +2036,21 @@ zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *fsf_req, int xchg_ok)
                       min(FC_SERIAL_NUMBER_SIZE, 17));
        }
 
-       ZFCP_LOG_NORMAL("The adapter %s reported the following "
-                       "characteristics:\n"
-                       "WWNN 0x%016Lx, "
-                       "WWPN 0x%016Lx, "
-                       "S_ID 0x%06x,\n"
-                       "adapter version 0x%x, "
-                       "LIC version 0x%x, "
-                       "FC link speed %d Gb/s\n",
-                       zfcp_get_busid_by_adapter(adapter),
-                       (wwn_t) fc_host_node_name(shost),
-                       (wwn_t) fc_host_port_name(shost),
-                       fc_host_port_id(shost),
-                       adapter->hydra_version,
-                       adapter->fsf_lic_version,
-                       fc_host_speed(shost));
+       if (fsf_req->erp_action)
+               ZFCP_LOG_NORMAL("The adapter %s reported the following "
+                               "characteristics:\n"
+                               "WWNN 0x%016Lx, WWPN 0x%016Lx, "
+                               "S_ID 0x%06x,\n"
+                               "adapter version 0x%x, "
+                               "LIC version 0x%x, "
+                               "FC link speed %d Gb/s\n",
+                               zfcp_get_busid_by_adapter(adapter),
+                               (wwn_t) fc_host_node_name(shost),
+                               (wwn_t) fc_host_port_name(shost),
+                               fc_host_port_id(shost),
+                               adapter->hydra_version,
+                               adapter->fsf_lic_version,
+                               fc_host_speed(shost));
        if (ZFCP_QTCB_VERSION < bottom->low_qtcb_version) {
                ZFCP_LOG_NORMAL("error: the adapter %s "
                                "only supports newer control block "
@@ -2114,8 +2115,10 @@ zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *fsf_req)
                        zfcp_erp_adapter_shutdown(adapter, 0, 127, fsf_req);
                        return -EIO;
                case FC_PORTTYPE_NPORT:
-                       ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
-                                       "network detected at adapter %s.\n",
+                       if (fsf_req->erp_action)
+                               ZFCP_LOG_NORMAL("Switched fabric fibrechannel "
+                                               "network detected at adapter "
+                                               "%s.\n",
                                        zfcp_get_busid_by_adapter(adapter));
                        break;
                default:
index 8cce5cc11d50c23e0df383ad08d65ee5ab67136f..099970b2700175f01bbbad8de08e96871f108a7e 100644 (file)
 #define FSF_FEATURE_HBAAPI_MANAGEMENT           0x00000010
 #define FSF_FEATURE_ELS_CT_CHAINED_SBALS        0x00000020
 #define FSF_FEATURE_UPDATE_ALERT               0x00000100
+#define FSF_FEATURE_MEASUREMENT_DATA           0x00000200
 
 /* host connection features */
 #define FSF_FEATURE_NPIV_MODE                  0x00000001
@@ -340,6 +341,15 @@ struct fsf_qtcb_prefix {
        u8  res1[20];
 } __attribute__ ((packed));
 
+struct fsf_statistics_info {
+       u64 input_req;
+       u64 output_req;
+       u64 control_req;
+       u64 input_mb;
+       u64 output_mb;
+       u64 seconds_act;
+} __attribute__ ((packed));
+
 union fsf_status_qual {
        u8  byte[FSF_STATUS_QUALIFIER_SIZE];
        u16 halfword[FSF_STATUS_QUALIFIER_SIZE / sizeof (u16)];
@@ -436,7 +446,8 @@ struct fsf_qtcb_bottom_config {
        u32 hardware_version;
        u8 serial_number[32];
        struct fsf_nport_serv_param plogi_payload;
-       u8 res4[160];
+       struct fsf_statistics_info stat_info;
+       u8 res4[112];
 } __attribute__ ((packed));
 
 struct fsf_qtcb_bottom_port {
@@ -469,7 +480,10 @@ struct fsf_qtcb_bottom_port {
        u64 control_requests;
        u64 input_mb;           /* where 1 MByte == 1.000.000 Bytes */
        u64 output_mb;          /* where 1 MByte == 1.000.000 Bytes */
-       u8 res2[256];
+       u8 cp_util;
+       u8 cb_util;
+       u8 a_util;
+       u8 res2[253];
 } __attribute__ ((packed));
 
 union fsf_qtcb_bottom {
index f81850624eedc5b5ca0e33119275e4964acaaebb..01687559dc06094fc4f81d89ab6256e2717a56a7 100644 (file)
@@ -40,6 +40,7 @@ static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *, int,
                                          unsigned int, unsigned int);
 
 static struct device_attribute *zfcp_sysfs_sdev_attrs[];
+static struct device_attribute *zfcp_a_stats_attrs[];
 
 struct zfcp_data zfcp_data = {
        .scsi_host_template = {
@@ -61,6 +62,7 @@ struct zfcp_data zfcp_data = {
                .use_clustering         = 1,
                .sdev_attrs             = zfcp_sysfs_sdev_attrs,
                .max_sectors            = ZFCP_MAX_SECTORS,
+               .shost_attrs            = zfcp_a_stats_attrs,
        },
        .driver_version = ZFCP_VERSION,
 };
@@ -809,4 +811,116 @@ static struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
        NULL
 };
 
+static ssize_t zfcp_sysfs_adapter_util_show(struct device *dev,
+                                           struct device_attribute *attr,
+                                           char *buf)
+{
+       struct Scsi_Host *scsi_host = dev_to_shost(dev);
+       struct fsf_qtcb_bottom_port *qtcb_port;
+       int retval;
+       struct zfcp_adapter *adapter;
+
+       adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+       if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+               return -EOPNOTSUPP;
+
+       qtcb_port = kzalloc(sizeof(struct fsf_qtcb_bottom_port), GFP_KERNEL);
+       if (!qtcb_port)
+               return -ENOMEM;
+
+       retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
+       if (!retval)
+               retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
+                                qtcb_port->cb_util, qtcb_port->a_util);
+       kfree(qtcb_port);
+       return retval;
+}
+
+static int zfcp_sysfs_adapter_ex_config(struct device *dev,
+                                       struct fsf_statistics_info *stat_inf)
+{
+       int retval;
+       struct fsf_qtcb_bottom_config *qtcb_config;
+       struct Scsi_Host *scsi_host = dev_to_shost(dev);
+       struct zfcp_adapter *adapter;
+
+       adapter = (struct zfcp_adapter *) scsi_host->hostdata[0];
+       if (!(adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA))
+               return -EOPNOTSUPP;
+
+       qtcb_config = kzalloc(sizeof(struct fsf_qtcb_bottom_config),
+                              GFP_KERNEL);
+       if (!qtcb_config)
+               return -ENOMEM;
+
+       retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
+       if (!retval)
+               *stat_inf = qtcb_config->stat_info;
+
+       kfree(qtcb_config);
+       return retval;
+}
+
+static ssize_t zfcp_sysfs_adapter_request_show(struct device *dev,
+                                              struct device_attribute *attr,
+                                              char *buf)
+{
+       struct fsf_statistics_info stat_info;
+       int retval;
+
+       retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+       if (retval)
+               return retval;
+
+       return sprintf(buf, "%llu %llu %llu\n",
+                      (unsigned long long) stat_info.input_req,
+                      (unsigned long long) stat_info.output_req,
+                      (unsigned long long) stat_info.control_req);
+}
+
+static ssize_t zfcp_sysfs_adapter_mb_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
+{
+       struct fsf_statistics_info stat_info;
+       int retval;
+
+       retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+       if (retval)
+               return retval;
+
+       return sprintf(buf, "%llu %llu\n",
+                      (unsigned long long) stat_info.input_mb,
+                      (unsigned long long) stat_info.output_mb);
+}
+
+static ssize_t zfcp_sysfs_adapter_sec_active_show(struct device *dev,
+                                                 struct device_attribute *attr,
+                                                 char *buf)
+{
+       struct fsf_statistics_info stat_info;
+       int retval;
+
+       retval = zfcp_sysfs_adapter_ex_config(dev, &stat_info);
+       if (retval)
+               return retval;
+
+       return sprintf(buf, "%llu\n",
+                      (unsigned long long) stat_info.seconds_act);
+}
+
+static DEVICE_ATTR(utilization, S_IRUGO, zfcp_sysfs_adapter_util_show, NULL);
+static DEVICE_ATTR(requests, S_IRUGO, zfcp_sysfs_adapter_request_show, NULL);
+static DEVICE_ATTR(megabytes, S_IRUGO, zfcp_sysfs_adapter_mb_show, NULL);
+static DEVICE_ATTR(seconds_active, S_IRUGO,
+                  zfcp_sysfs_adapter_sec_active_show, NULL);
+
+static struct device_attribute *zfcp_a_stats_attrs[] = {
+       &dev_attr_utilization,
+       &dev_attr_requests,
+       &dev_attr_megabytes,
+       &dev_attr_seconds_active,
+       NULL
+};
+
 #undef ZFCP_LOG_AREA
index a4e758143665aa2427aa97ecf6f86dcfbb20d4c2..235703414370b8aac9329e783319dc92f49b7546 100644 (file)
@@ -637,7 +637,7 @@ static int wd_inittimer(int whichdog)
                        break;
                default:
                        printk("%s: %s: invalid watchdog id: %i\n",
-                               WD_OBPNAME, __FUNCTION__, whichdog);
+                               WD_OBPNAME, __func__, whichdog);
                        return(1);
        }
        if(0 != misc_register(whichmisc))
index 44d2ef906ac732aa7b0a4c92467602ad128ab9e6..383f32c1d347c33b9ed63bf9e479b8ae237a94e7 100644 (file)
@@ -393,13 +393,13 @@ static int __init ts102_uctrl_init(void)
        err = request_irq(driver->irq, uctrl_interrupt, 0, "uctrl", driver);
        if (err) {
                printk("%s: unable to register irq %d\n",
-                      __FUNCTION__, driver->irq);
+                      __func__, driver->irq);
                return err;
        }
 
        if (misc_register(&uctrl_dev)) {
                printk("%s: unable to get misc minor %d\n",
-                      __FUNCTION__, uctrl_dev.minor);
+                      __func__, uctrl_dev.minor);
                free_irq(driver->irq, driver);
                return -ENODEV;
        }
index b374e457e5e2ded4401697ce2e63e88252376f41..b898d382b7b0768d93ed3250bb0782ba1d38c095 100644 (file)
@@ -1499,7 +1499,7 @@ static void FlashPoint_StartCCB(unsigned long pCurrCard, struct sccb *p_Sccb)
        thisCard = ((struct sccb_card *)pCurrCard)->cardIndex;
        ioport = ((struct sccb_card *)pCurrCard)->ioPort;
 
-       if ((p_Sccb->TargID > MAX_SCSI_TAR) || (p_Sccb->Lun > MAX_LUN)) {
+       if ((p_Sccb->TargID >= MAX_SCSI_TAR) || (p_Sccb->Lun >= MAX_LUN)) {
 
                p_Sccb->HostStatus = SCCB_COMPLETE;
                p_Sccb->SccbStatus = SCCB_ERROR;
index 7f78e3ea517d7e9afa272af07d017c8ae26758bf..99c57b0c1d540c5d8e7d63138ebd76bc7dc4db4f 100644 (file)
@@ -1677,6 +1677,16 @@ config MAC_SCSI
          SCSI-HOWTO, available from
          <http://www.tldp.org/docs.html#howto>.
 
+config SCSI_MAC_ESP
+       tristate "Macintosh NCR53c9[46] SCSI"
+       depends on MAC && SCSI
+       help
+         This is the NCR 53c9x SCSI controller found on most of the 68040
+         based Macintoshes.
+
+         To compile this driver as a module, choose M here: the module
+         will be called mac_esp.
+
 config MVME147_SCSI
        bool "WD33C93 SCSI driver for MVME147"
        depends on MVME147 && SCSI=y
index 23e6ecbd47780bcc59a0042727e710065306b7b5..6c775e350c9842718283cbd9a6340c139e3e4914 100644 (file)
@@ -46,6 +46,7 @@ obj-$(CONFIG_MVME147_SCSI)    += mvme147.o    wd33c93.o
 obj-$(CONFIG_SGIWD93_SCSI)     += sgiwd93.o    wd33c93.o
 obj-$(CONFIG_ATARI_SCSI)       += atari_scsi.o
 obj-$(CONFIG_MAC_SCSI)         += mac_scsi.o
+obj-$(CONFIG_SCSI_MAC_ESP)     += esp_scsi.o   mac_esp.o
 obj-$(CONFIG_SUN3_SCSI)                += sun3_scsi.o  sun3_scsi_vme.o
 obj-$(CONFIG_MVME16x_SCSI)     += 53c700.o     mvme16x_scsi.o
 obj-$(CONFIG_BVME6000_SCSI)    += 53c700.o     bvme6000_scsi.o
index 6ccdc96cc480559c4e259a79d004e4589d316300..f5215fd4b73d30bac4603dd7deab26adccb11971 100644 (file)
@@ -994,13 +994,13 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
        SCpnt->SCp.sent_command = 0;
 
        if(SCpnt->SCp.phase & (resetting|check_condition)) {
-               if(SCpnt->host_scribble==0 || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
+               if (!SCpnt->host_scribble || SCSEM(SCpnt) || SCNEXT(SCpnt)) {
                        printk(ERR_LEAD "cannot reuse command\n", CMDINFO(SCpnt));
                        return FAILED;
                }
        } else {
                SCpnt->host_scribble = kmalloc(sizeof(struct aha152x_scdata), GFP_ATOMIC);
-               if(SCpnt->host_scribble==0) {
+               if(!SCpnt->host_scribble) {
                        printk(ERR_LEAD "allocation failed\n", CMDINFO(SCpnt));
                        return FAILED;
                }
@@ -1162,7 +1162,7 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
        }
 
        DO_LOCK(flags);
-       issued       = remove_SC(&ISSUE_SC, SCpnt)==0;
+       issued       = remove_SC(&ISSUE_SC, SCpnt) == NULL;
        disconnected = issued && remove_SC(&DISCONNECTED_SC, SCpnt);
        DO_UNLOCK(flags);
 
@@ -1432,15 +1432,10 @@ static void run(struct work_struct *work)
  */
 static irqreturn_t intr(int irqno, void *dev_id)
 {
-       struct Scsi_Host *shpnt = (struct Scsi_Host *)dev_id;
+       struct Scsi_Host *shpnt = dev_id;
        unsigned long flags;
        unsigned char rev, dmacntrl0;
 
-       if (!shpnt) {
-               printk(KERN_ERR "aha152x: catched interrupt %d for unknown controller.\n", irqno);
-               return IRQ_NONE;
-       }
-
        /*
         * Read a couple of registers that are known to not be all 1's. If
         * we read all 1's (-1), that means that either:
index 5a1471c370fa70f601a845fb66e762701bc2d1a8..80594947c6f6c1a97117be2bedde8fa9b83c75a8 100644 (file)
@@ -153,8 +153,6 @@ struct aha1542_hostdata {
 
 #define HOSTDATA(host) ((struct aha1542_hostdata *) &host->hostdata)
 
-static struct Scsi_Host *aha_host[7];  /* One for each IRQ level (9-15) */
-
 static DEFINE_SPINLOCK(aha1542_lock);
 
 
@@ -163,8 +161,7 @@ static DEFINE_SPINLOCK(aha1542_lock);
 
 static void setup_mailboxes(int base_io, struct Scsi_Host *shpnt);
 static int aha1542_restart(struct Scsi_Host *shost);
-static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id);
-static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id);
+static void aha1542_intr_handle(struct Scsi_Host *shost);
 
 #define aha1542_intr_reset(base)  outb(IRST, CONTROL(base))
 
@@ -404,23 +401,19 @@ fail:
 }
 
 /* A quick wrapper for do_aha1542_intr_handle to grab the spin lock */
-static irqreturn_t do_aha1542_intr_handle(int irq, void *dev_id)
+static irqreturn_t do_aha1542_intr_handle(int dummy, void *dev_id)
 {
        unsigned long flags;
-       struct Scsi_Host *shost;
-
-       shost = aha_host[irq - 9];
-       if (!shost)
-               panic("Splunge!");
+       struct Scsi_Host *shost = dev_id;
 
        spin_lock_irqsave(shost->host_lock, flags);
-       aha1542_intr_handle(shost, dev_id);
+       aha1542_intr_handle(shost);
        spin_unlock_irqrestore(shost->host_lock, flags);
        return IRQ_HANDLED;
 }
 
 /* A "high" level interrupt handler */
-static void aha1542_intr_handle(struct Scsi_Host *shost, void *dev_id)
+static void aha1542_intr_handle(struct Scsi_Host *shost)
 {
        void (*my_done) (Scsi_Cmnd *) = NULL;
        int errstatus, mbi, mbo, mbistatus;
@@ -1197,7 +1190,8 @@ fail:
 
                        DEB(printk("aha1542_detect: enable interrupt channel %d\n", irq_level));
                        spin_lock_irqsave(&aha1542_lock, flags);
-                       if (request_irq(irq_level, do_aha1542_intr_handle, 0, "aha1542", NULL)) {
+                       if (request_irq(irq_level, do_aha1542_intr_handle, 0,
+                                       "aha1542", shpnt)) {
                                printk(KERN_ERR "Unable to allocate IRQ for adaptec controller.\n");
                                spin_unlock_irqrestore(&aha1542_lock, flags);
                                goto unregister;
@@ -1205,7 +1199,7 @@ fail:
                        if (dma_chan != 0xFF) {
                                if (request_dma(dma_chan, "aha1542")) {
                                        printk(KERN_ERR "Unable to allocate DMA channel for Adaptec.\n");
-                                       free_irq(irq_level, NULL);
+                                       free_irq(irq_level, shpnt);
                                        spin_unlock_irqrestore(&aha1542_lock, flags);
                                        goto unregister;
                                }
@@ -1214,7 +1208,7 @@ fail:
                                        enable_dma(dma_chan);
                                }
                        }
-                       aha_host[irq_level - 9] = shpnt;
+
                        shpnt->this_id = scsi_id;
                        shpnt->unique_id = base_io;
                        shpnt->io_port = base_io;
@@ -1276,7 +1270,7 @@ unregister:
 static int aha1542_release(struct Scsi_Host *shost)
 {
        if (shost->irq)
-               free_irq(shost->irq, NULL);
+               free_irq(shost->irq, shost);
        if (shost->dma_channel != 0xff)
                free_dma(shost->dma_channel);
        if (shost->io_port && shost->n_io_port)
index 1ac119733bac3b2ceef4fea3a228931037366664..f220e5e436ab9b762f42bda40365be70580da4ad 100644 (file)
@@ -50,7 +50,7 @@ aic7770_map_registers(struct ahc_softc *ahc, u_int port)
        /*
         * Lock out other contenders for our i/o space.
         */
-       if (request_region(port, AHC_EISA_IOSIZE, "aic7xxx") == 0)
+       if (!request_region(port, AHC_EISA_IOSIZE, "aic7xxx"))
                return (ENOMEM);
        ahc->tag = BUS_SPACE_PIO;
        ahc->bsh.ioport = port;
index 2f00467b6b8c8bd12418614d186e475f2fd24590..be5558ab84ea06622e51f3a7f176d3efcba2e3f6 100644 (file)
@@ -815,7 +815,7 @@ struct ahd_tmode_tstate {
 struct ahd_phase_table_entry {
         uint8_t phase;
         uint8_t mesg_out; /* Message response to parity errors */
-       char *phasemsg;
+       const char *phasemsg;
 };
 
 /************************** Serial EEPROM Format ******************************/
@@ -1314,7 +1314,7 @@ typedef int (ahd_device_setup_t)(struct ahd_softc *);
 struct ahd_pci_identity {
        uint64_t                 full_id;
        uint64_t                 id_mask;
-       char                    *name;
+       const char              *name;
        ahd_device_setup_t      *setup;
 };
 
@@ -1322,7 +1322,7 @@ struct ahd_pci_identity {
 struct aic7770_identity {
        uint32_t                 full_id;
        uint32_t                 id_mask;
-       char                    *name;
+       const char              *name;
        ahd_device_setup_t      *setup;
 };
 extern struct aic7770_identity aic7770_ident_table [];
@@ -1333,12 +1333,11 @@ extern const int ahd_num_aic7770_devs;
 
 /*************************** Function Declarations ****************************/
 /******************************************************************************/
-void                   ahd_reset_cmds_pending(struct ahd_softc *ahd);
 
 /***************************** PCI Front End *********************************/
-struct ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
+const struct   ahd_pci_identity *ahd_find_pci_device(ahd_dev_softc_t);
 int                      ahd_pci_config(struct ahd_softc *,
-                                        struct ahd_pci_identity *);
+                                        const struct ahd_pci_identity *);
 int    ahd_pci_test_register_access(struct ahd_softc *);
 #ifdef CONFIG_PM
 void   ahd_pci_suspend(struct ahd_softc *);
@@ -1376,16 +1375,6 @@ int                       ahd_write_flexport(struct ahd_softc *ahd,
 int                     ahd_read_flexport(struct ahd_softc *ahd, u_int addr,
                                           uint8_t *value);
 
-/*************************** Interrupt Services *******************************/
-void                   ahd_run_qoutfifo(struct ahd_softc *ahd);
-#ifdef AHD_TARGET_MODE
-void                   ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
-#endif
-void                   ahd_handle_hwerrint(struct ahd_softc *ahd);
-void                   ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
-void                   ahd_handle_scsiint(struct ahd_softc *ahd,
-                                          u_int intstat);
-
 /***************************** Error Recovery *********************************/
 typedef enum {
        SEARCH_COMPLETE,
@@ -1479,7 +1468,7 @@ extern uint32_t ahd_debug;
 void                   ahd_print_devinfo(struct ahd_softc *ahd,
                                          struct ahd_devinfo *devinfo);
 void                   ahd_dump_card_state(struct ahd_softc *ahd);
-int                    ahd_print_register(ahd_reg_parse_entry_t *table,
+int                    ahd_print_register(const ahd_reg_parse_entry_t *table,
                                           u_int num_entries,
                                           const char *name,
                                           u_int address,
index be14e2ecb8f796b6bdaebe3c4926481160a238bd..cca16fc5b4ad746a0ec02fedb575eea6dbc7df90 100644 (file)
@@ -198,6 +198,7 @@ register SEQINTCODE {
 register CLRINT {
        address                 0x003
        access_mode     WO
+       count           19
        field   CLRHWERRINT     0x80 /* Rev B or greater */
        field   CLRBRKADRINT    0x40
        field   CLRSWTMINT      0x20
@@ -245,6 +246,7 @@ register CLRERR {
 register HCNTRL {
        address                 0x005
        access_mode     RW
+       count           12
        field   SEQ_RESET       0x80 /* Rev B or greater */
        field   POWRDN          0x40
        field   SWINT           0x10
@@ -262,6 +264,7 @@ register HNSCB_QOFF {
        address                 0x006
        access_mode     RW
        size            2
+       count           2
 }
 
 /*
@@ -270,6 +273,7 @@ register HNSCB_QOFF {
 register HESCB_QOFF {
        address                 0x008
        access_mode     RW
+       count           2
 }
 
 /*
@@ -287,6 +291,7 @@ register HS_MAILBOX {
  */
 register SEQINTSTAT {
        address                 0x00C
+       count           1
        access_mode     RO
        field   SEQ_SWTMRTO     0x10
        field   SEQ_SEQINT      0x08
@@ -332,6 +337,7 @@ register SNSCB_QOFF {
  */
 register SESCB_QOFF {
        address                 0x012
+       count           2
        access_mode     RW
        modes           M_CCHAN
 }
@@ -397,6 +403,7 @@ register DFCNTRL {
        address                 0x019
        access_mode     RW
        modes           M_DFF0, M_DFF1
+       count           11
        field   PRELOADEN       0x80
        field   SCSIENWRDIS     0x40    /* Rev B only. */
        field   SCSIEN          0x20
@@ -415,6 +422,7 @@ register DFCNTRL {
  */
 register DSCOMMAND0 {
        address                 0x019
+       count           1
        access_mode     RW
        modes           M_CFG
        field   CACHETHEN       0x80    /* Cache Threshold enable */
@@ -580,6 +588,7 @@ register DFF_THRSH {
        address                 0x088
        access_mode     RW
        modes           M_CFG
+       count           1
        field   WR_DFTHRSH      0x70 {
                WR_DFTHRSH_MIN,
                WR_DFTHRSH_25,
@@ -800,6 +809,7 @@ register PCIXCTL {
        address                 0x093
        access_mode     RW
        modes           M_CFG
+       count           1
        field   SERRPULSE       0x80
        field   UNEXPSCIEN      0x20
        field   SPLTSMADIS      0x10
@@ -844,6 +854,7 @@ register DCHSPLTSTAT0 {
        address                 0x096
        access_mode     RW
        modes           M_DFF0, M_DFF1
+       count           2
        field   STAETERM        0x80
        field   SCBCERR         0x40
        field   SCADERR         0x20
@@ -895,6 +906,7 @@ register DCHSPLTSTAT1 {
        address                 0x097
        access_mode     RW
        modes           M_DFF0, M_DFF1
+       count           2
        field   RXDATABUCKET    0x01
 }
 
@@ -1048,6 +1060,7 @@ register SGSPLTSTAT0 {
        address                 0x09E
        access_mode     RW
        modes           M_DFF0, M_DFF1
+       count           2
        field   STAETERM        0x80
        field   SCBCERR         0x40
        field   SCADERR         0x20
@@ -1065,6 +1078,7 @@ register SGSPLTSTAT1 {
        address                 0x09F
        access_mode     RW
        modes           M_DFF0, M_DFF1
+       count           2
        field   RXDATABUCKET    0x01
 }
 
@@ -1086,6 +1100,7 @@ register DF0PCISTAT {
        address                 0x0A0
        access_mode     RW
        modes           M_CFG
+       count           1
        field   DPE             0x80
        field   SSE             0x40
        field   RMA             0x20
@@ -1184,6 +1199,7 @@ register TARGPCISTAT {
        address                 0x0A7
        access_mode     RW
        modes           M_CFG
+       count           5
        field   DPE             0x80
        field   SSE             0x40
        field   STA             0x08
@@ -1198,6 +1214,7 @@ register LQIN {
        address                 0x020
        access_mode     RW
        size            20
+       count           2
        modes           M_DFF0, M_DFF1, M_SCSI
 }
 
@@ -1229,6 +1246,7 @@ register LUNPTR {
        address                 0x022
        access_mode     RW
        modes           M_CFG
+       count           2
 }
 
 /*
@@ -1259,6 +1277,7 @@ register CMDLENPTR {
        address                 0x025
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1270,6 +1289,7 @@ register ATTRPTR {
        address                 0x026
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1281,6 +1301,7 @@ register FLAGPTR {
        address                 0x027
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1291,6 +1312,7 @@ register CMDPTR {
        address                 0x028
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1301,6 +1323,7 @@ register QNEXTPTR {
        address                 0x029
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1323,6 +1346,7 @@ register ABRTBYTEPTR {
        address                 0x02B
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1333,6 +1357,7 @@ register ABRTBITPTR {
        address                 0x02C
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1370,6 +1395,7 @@ register LUNLEN {
        address                 0x030
        access_mode     RW
        modes           M_CFG
+       count           2
        mask            ILUNLEN 0x0F
        mask            TLUNLEN 0xF0
 }
@@ -1383,6 +1409,7 @@ register CDBLIMIT {
        address                 0x031
        access_mode     RW
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -1394,6 +1421,7 @@ register MAXCMD {
        address                 0x032
        access_mode     RW
        modes           M_CFG
+       count           9
 }
 
 /*
@@ -1458,6 +1486,7 @@ register LQCTL1 {
        address                 0x038
        access_mode     RW
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           2
        field   PCI2PCI         0x04
        field   SINGLECMD       0x02
        field   ABORTPENDING    0x01
@@ -1470,6 +1499,7 @@ register LQCTL2 {
        address                 0x039
        access_mode     RW
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           5
        field   LQIRETRY        0x80
        field   LQICONTINUE     0x40
        field   LQITOIDLE       0x20
@@ -1528,6 +1558,7 @@ register SCSISEQ1 {
        address                 0x03B
        access_mode     RW
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           8
        field   MANUALCTL       0x40
        field   ENSELI          0x20
        field   ENRSELI         0x10
@@ -1667,6 +1698,9 @@ register SCSISIGO {
        }
 }
 
+/*
+ * SCSI Control Signal In
+ */
 register SCSISIGI {
        address                 0x041
        access_mode     RO
@@ -1703,6 +1737,7 @@ register MULTARGID {
        access_mode     RW
        modes           M_CFG
        size            2
+       count           2
 }
 
 /*
@@ -1758,6 +1793,7 @@ register TARGIDIN {
        address                 0x048
        access_mode     RO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           2
        field   CLKOUT          0x80
        field   TARGID          0x0F
 }
@@ -1798,6 +1834,7 @@ register OPTIONMODE {
        address                 0x04A
        access_mode     RW
        modes           M_CFG
+       count           4
        field   BIOSCANCTL              0x80
        field   AUTOACKEN               0x40
        field   BIASCANCTL              0x20
@@ -1850,6 +1887,7 @@ register SIMODE0 {
        address                 0x04B
        access_mode     RW
        modes           M_CFG
+       count           8
        field   ENSELDO         0x40
        field   ENSELDI         0x20
        field   ENSELINGO       0x10
@@ -1945,6 +1983,7 @@ register PERRDIAG {
        address                 0x04E
        access_mode     RO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           3
        field   HIZERO          0x80
        field   HIPERR          0x40
        field   PREVPHASE       0x20
@@ -1962,6 +2001,7 @@ register LQISTATE {
        address                 0x04E
        access_mode     RO
        modes           M_CFG
+       count           6
 }
 
 /*
@@ -1971,6 +2011,7 @@ register SOFFCNT {
        address                 0x04F
        access_mode     RO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           1
 }
 
 /*
@@ -1980,6 +2021,7 @@ register LQOSTATE {
        address                 0x04F
        access_mode     RO
        modes           M_CFG
+       count           2
 }
 
 /*
@@ -1989,6 +2031,7 @@ register LQISTAT0 {
        address                 0x050
        access_mode     RO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           2
        field   LQIATNQAS       0x20
        field   LQICRCT1        0x10
        field   LQICRCT2        0x08
@@ -2004,6 +2047,7 @@ register CLRLQIINT0 {
        address                 0x050
        access_mode     WO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           1
        field   CLRLQIATNQAS    0x20
        field   CLRLQICRCT1     0x10
        field   CLRLQICRCT2     0x08
@@ -2019,6 +2063,7 @@ register LQIMODE0 {
        address                 0x050
        access_mode     RW
        modes           M_CFG
+       count           3
        field   ENLQIATNQASK    0x20
        field   ENLQICRCT1      0x10
        field   ENLQICRCT2      0x08
@@ -2034,6 +2079,7 @@ register LQISTAT1 {
        address                 0x051
        access_mode     RO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           3
        field   LQIPHASE_LQ     0x80
        field   LQIPHASE_NLQ    0x40
        field   LQIABORT        0x20
@@ -2051,6 +2097,7 @@ register CLRLQIINT1 {
        address                 0x051
        access_mode     WO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           4
        field   CLRLQIPHASE_LQ  0x80
        field   CLRLQIPHASE_NLQ 0x40
        field   CLRLIQABORT     0x20
@@ -2068,6 +2115,7 @@ register LQIMODE1 {
        address                 0x051
        access_mode     RW
        modes           M_CFG
+       count           4
        field   ENLQIPHASE_LQ   0x80    /* LQIPHASE1 */
        field   ENLQIPHASE_NLQ  0x40    /* LQIPHASE2 */
        field   ENLIQABORT      0x20
@@ -2102,6 +2150,7 @@ register SSTAT3 {
        address                 0x053
        access_mode     RO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           3
        field   NTRAMPERR       0x02
        field   OSRAMPERR       0x01
 }
@@ -2113,6 +2162,7 @@ register CLRSINT3 {
        address                 0x053
        access_mode     WO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           3
        field   CLRNTRAMPERR    0x02
        field   CLROSRAMPERR    0x01
 }
@@ -2124,6 +2174,7 @@ register SIMODE3 {
        address                 0x053
        access_mode     RW
        modes           M_CFG
+       count           4
        field   ENNTRAMPERR     0x02
        field   ENOSRAMPERR     0x01
 }
@@ -2135,6 +2186,7 @@ register LQOSTAT0 {
        address                 0x054
        access_mode     RO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           2
        field   LQOTARGSCBPERR  0x10
        field   LQOSTOPT2       0x08
        field   LQOATNLQ        0x04
@@ -2149,6 +2201,7 @@ register CLRLQOINT0 {
        address                 0x054
        access_mode     WO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           3
        field   CLRLQOTARGSCBPERR       0x10
        field   CLRLQOSTOPT2            0x08
        field   CLRLQOATNLQ             0x04
@@ -2163,6 +2216,7 @@ register LQOMODE0 {
        address                 0x054
        access_mode     RW
        modes           M_CFG
+       count           4
        field   ENLQOTARGSCBPERR        0x10
        field   ENLQOSTOPT2             0x08
        field   ENLQOATNLQ              0x04
@@ -2191,6 +2245,7 @@ register CLRLQOINT1 {
        address                 0x055
        access_mode     WO
        modes           M_DFF0, M_DFF1, M_SCSI
+       count           7
        field   CLRLQOINITSCBPERR       0x10
        field   CLRLQOSTOPI2            0x08
        field   CLRLQOBADQAS            0x04
@@ -2205,6 +2260,7 @@ register LQOMODE1 {
        address                 0x055
        access_mode     RW
        modes           M_CFG
+       count           4
        field   ENLQOINITSCBPERR        0x10
        field   ENLQOSTOPI2             0x08
        field   ENLQOBADQAS             0x04
@@ -2232,6 +2288,7 @@ register OS_SPACE_CNT {
        address                 0x056
        access_mode     RO
        modes           M_CFG
+       count           2
 }
 
 /*
@@ -2286,13 +2343,19 @@ register NEXTSCB {
        modes           M_SCSI
 }
 
-/* Rev B only. */
+/*
+ * LQO SCSI Control
+ * (Rev B only.)
+ */
 register LQOSCSCTL {
        address                 0x05A
        access_mode     RW
        size            1
        modes           M_CFG
+       count           1
        field           LQOH2A_VERSION  0x80
+       field           LQOBUSETDLY     0x40
+       field           LQONOHOLDLACK   0x02
        field           LQONOCHKOVER    0x01
 }
 
@@ -2459,6 +2522,7 @@ register NEGPERIOD {
        address                 0x061
        access_mode     RW
        modes           M_SCSI
+       count           1
 }
 
 /*
@@ -2478,6 +2542,7 @@ register NEGOFFSET {
        address                 0x062
        access_mode     RW
        modes           M_SCSI
+       count           1
 }
 
 /*
@@ -2487,6 +2552,7 @@ register NEGPPROPTS {
        address                 0x063
        access_mode     RW
        modes           M_SCSI
+       count           1
        field   PPROPT_PACE     0x08
        field   PPROPT_QAS      0x04
        field   PPROPT_DT       0x02
@@ -2516,12 +2582,19 @@ register ANNEXCOL {
        address                 0x065
        access_mode     RW
        modes           M_SCSI
+       count           7
 }
 
+/*
+ * SCSI Check
+ * (Rev. B only)
+ */
 register SCSCHKN {
        address                 0x066
        access_mode     RW
        modes           M_CFG
+       count           1
+       field   BIDICHKDIS      0x80
        field   STSELSKIDDIS    0x40
        field   CURRFIFODEF     0x20
        field   WIDERESEN       0x10
@@ -2561,6 +2634,7 @@ register ANNEXDAT {
        address                 0x066
        access_mode     RW
        modes           M_SCSI
+       count           3
 }
 
 /*
@@ -2596,6 +2670,7 @@ register TOWNID {
        address                 0x069
        access_mode     RW
        modes           M_SCSI
+       count           2
 }
 
 /*
@@ -2737,6 +2812,7 @@ register SCBAUTOPTR {
        address                 0x0AB
        access_mode     RW
        modes           M_CFG
+       count           1
        field   AUSCBPTR_EN     0x80
        field   SCBPTR_ADDR     0x38
        field   SCBPTR_OFF      0x07
@@ -2881,6 +2957,7 @@ register BRDDAT {
        address                 0x0B8
        access_mode     RW
        modes           M_SCSI
+       count           2
 }
 
 /*
@@ -2890,6 +2967,7 @@ register BRDCTL {
        address                 0x0B9
        access_mode     RW
        modes           M_SCSI
+       count           7
        field   FLXARBACK       0x80
        field   FLXARBREQ       0x40
        field   BRDADDR         0x38
@@ -2905,6 +2983,7 @@ register SEEADR {
        address                 0x0BA
        access_mode     RW
        modes           M_SCSI
+       count           4
 }
 
 /*
@@ -2915,6 +2994,7 @@ register SEEDAT {
        access_mode     RW
        size            2
        modes           M_SCSI
+       count           4
 }
 
 /*
@@ -2924,6 +3004,7 @@ register SEESTAT {
        address                 0x0BE
        access_mode     RO
        modes           M_SCSI
+       count           1
        field   INIT_DONE       0x80
        field   SEEOPCODE       0x70
        field   LDALTID_L       0x08
@@ -2939,6 +3020,7 @@ register SEECTL {
        address                 0x0BE
        access_mode     RW
        modes           M_SCSI
+       count           4
        field   SEEOPCODE       0x70 {
                SEEOP_ERASE     0x70,
                SEEOP_READ      0x60,
@@ -3000,6 +3082,7 @@ register DSPDATACTL {
        address                 0x0C1
        access_mode     RW
        modes           M_CFG
+       count           3
        field   BYPASSENAB      0x80
        field   DESQDIS         0x10
        field   RCVROFFSTDIS    0x04
@@ -3058,6 +3141,7 @@ register DSPSELECT {
        address                 0x0C4
        access_mode     RW
        modes           M_CFG
+       count           1
        field   AUTOINCEN       0x80
        field   DSPSEL          0x1F
 }
@@ -3071,6 +3155,7 @@ register WRTBIASCTL {
        address                 0x0C5
        access_mode     WO
        modes           M_CFG
+       count           3
        field   AUTOXBCDIS      0x80
        field   XMITMANVAL      0x3F
 }
@@ -3196,7 +3281,8 @@ register OVLYADDR {
  */
 register SEQCTL0 {
        address                 0x0D6
-       access_mode RW
+       access_mode     RW
+       count           11
        field   PERRORDIS       0x80
        field   PAUSEDIS        0x40
        field   FAILDIS         0x20
@@ -3226,7 +3312,8 @@ register SEQCTL1 {
  */
 register FLAGS {
        address                 0x0D8
-       access_mode RO
+       access_mode     RO
+       count           23
        field   ZERO            0x02
        field   CARRY           0x01
 }
@@ -3255,7 +3342,8 @@ register SEQINTCTL {
  */
 register SEQRAM {
        address                 0x0DA
-       access_mode RW
+       access_mode     RW
+       count           2
 }
 
 /*
@@ -3266,6 +3354,7 @@ register PRGMCNT {
        address                 0x0DE
        access_mode     RW
        size            2
+       count           5
 }
 
 /*
@@ -3273,7 +3362,7 @@ register PRGMCNT {
  */
 register ACCUM {
        address                 0x0E0
-       access_mode RW
+       access_mode     RW
        accumulator
 }
 
@@ -3401,6 +3490,7 @@ register INTVEC1_ADDR {
        access_mode     RW
        size            2
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -3412,6 +3502,7 @@ register CURADDR {
        access_mode     RW
        size            2
        modes           M_SCSI
+       count           2
 }
 
 /*
@@ -3423,6 +3514,7 @@ register INTVEC2_ADDR {
        access_mode     RW
        size            2
        modes           M_CFG
+       count           1
 }
 
 /*
@@ -3579,6 +3671,7 @@ scratch_ram {
        /* Parameters for DMA Logic */
        DMAPARAMS {
                size            1
+               count           8
                field   PRELOADEN       0x80
                field   WIDEODD         0x40
                field   SCSIEN          0x20
@@ -3648,9 +3741,11 @@ scratch_ram {
         */
        KERNEL_TQINPOS {
                size            1
+               count           1
        }
-       TQINPOS {                
+       TQINPOS {
                size            1
+               count           8
        }
        /*
         * Base address of our shared data with the kernel driver in host
@@ -3681,6 +3776,7 @@ scratch_ram {
        }
        ARG_2 {
                size            1
+               count           1
                alias   RETURN_2
        }
 
@@ -3698,6 +3794,7 @@ scratch_ram {
         */
        SCSISEQ_TEMPLATE {
                size            1
+               count           7
                field   MANUALCTL       0x40
                field   ENSELI          0x20
                field   ENRSELI         0x10
@@ -3711,6 +3808,7 @@ scratch_ram {
         */
        INITIATOR_TAG {
                size            1
+               count           1
        }
 
        SEQ_FLAGS2 {
@@ -3777,6 +3875,7 @@ scratch_ram {
         */
        CMDSIZE_TABLE {
                size            8
+               count           8
        }
        /*
         * When an SCB with the MK_MESSAGE flag is
@@ -3803,8 +3902,8 @@ scratch_ram {
 /************************* Hardware SCB Definition ****************************/
 scb {
        address                 0x180
-       size    64
-       modes   0, 1, 2, 3
+       size            64
+       modes           0, 1, 2, 3
        SCB_RESIDUAL_DATACNT {
                size    4
                alias   SCB_CDB_STORE
index ade0fb8fbdb23fa9264174e9b6b2c08a972e42f2..55508b0fcec4020df71de8236e854f712a952393 100644 (file)
@@ -52,7 +52,7 @@
 
 
 /***************************** Lookup Tables **********************************/
-static char *ahd_chip_names[] =
+static const char *const ahd_chip_names[] =
 {
        "NONE",
        "aic7901",
@@ -66,10 +66,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names);
  */
 struct ahd_hard_error_entry {
         uint8_t errno;
-       char *errmesg;
+       const char *errmesg;
 };
 
-static struct ahd_hard_error_entry ahd_hard_errors[] = {
+static const struct ahd_hard_error_entry ahd_hard_errors[] = {
        { DSCTMOUT,     "Discard Timer has timed out" },
        { ILLOPCODE,    "Illegal Opcode in sequencer program" },
        { SQPARERR,     "Sequencer Parity Error" },
@@ -79,7 +79,7 @@ static struct ahd_hard_error_entry ahd_hard_errors[] = {
 };
 static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
 
-static struct ahd_phase_table_entry ahd_phase_table[] =
+static const struct ahd_phase_table_entry ahd_phase_table[] =
 {
        { P_DATAOUT,    MSG_NOOP,               "in Data-out phase"     },
        { P_DATAIN,     MSG_INITIATOR_DET_ERR,  "in Data-in phase"      },
@@ -213,7 +213,7 @@ static void         ahd_dumpseq(struct ahd_softc *ahd);
 #endif
 static void            ahd_loadseq(struct ahd_softc *ahd);
 static int             ahd_check_patch(struct ahd_softc *ahd,
-                                       struct patch **start_patch,
+                                       const struct patch **start_patch,
                                        u_int start_instr, u_int *skip_addr);
 static u_int           ahd_resolve_seqaddr(struct ahd_softc *ahd,
                                            u_int address);
@@ -254,7 +254,7 @@ static void         ahd_freeze_devq(struct ahd_softc *ahd,
                                        struct scb *scb);
 static void            ahd_handle_scb_status(struct ahd_softc *ahd,
                                              struct scb *scb);
-static struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
+static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase);
 static void            ahd_shutdown(void *arg);
 static void            ahd_update_coalescing_values(struct ahd_softc *ahd,
                                                     u_int timer,
@@ -266,8 +266,774 @@ static int                ahd_match_scb(struct ahd_softc *ahd, struct scb *scb,
                                      int target, char channel, int lun,
                                      u_int tag, role_t role);
 
-/******************************** Private Inlines *****************************/
+static void            ahd_reset_cmds_pending(struct ahd_softc *ahd);
+
+/*************************** Interrupt Services *******************************/
+static void            ahd_run_qoutfifo(struct ahd_softc *ahd);
+#ifdef AHD_TARGET_MODE
+static void            ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
+#endif
+static void            ahd_handle_hwerrint(struct ahd_softc *ahd);
+static void            ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
+static void            ahd_handle_scsiint(struct ahd_softc *ahd,
+                                          u_int intstat);
+
+/************************ Sequencer Execution Control *************************/
+void
+ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
+{
+       if (ahd->src_mode == src && ahd->dst_mode == dst)
+               return;
+#ifdef AHD_DEBUG
+       if (ahd->src_mode == AHD_MODE_UNKNOWN
+        || ahd->dst_mode == AHD_MODE_UNKNOWN)
+               panic("Setting mode prior to saving it.\n");
+       if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+               printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
+                      ahd_build_mode_state(ahd, src, dst));
+#endif
+       ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
+       ahd->src_mode = src;
+       ahd->dst_mode = dst;
+}
+
+static void
+ahd_update_modes(struct ahd_softc *ahd)
+{
+       ahd_mode_state mode_ptr;
+       ahd_mode src;
+       ahd_mode dst;
+
+       mode_ptr = ahd_inb(ahd, MODE_PTR);
+#ifdef AHD_DEBUG
+       if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
+               printf("Reading mode 0x%x\n", mode_ptr);
+#endif
+       ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
+       ahd_known_modes(ahd, src, dst);
+}
+
+static void
+ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
+                ahd_mode dstmode, const char *file, int line)
+{
+#ifdef AHD_DEBUG
+       if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
+        || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
+               panic("%s:%s:%d: Mode assertion failed.\n",
+                      ahd_name(ahd), file, line);
+       }
+#endif
+}
+
+#define AHD_ASSERT_MODES(ahd, source, dest) \
+       ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
+
+ahd_mode_state
+ahd_save_modes(struct ahd_softc *ahd)
+{
+       if (ahd->src_mode == AHD_MODE_UNKNOWN
+        || ahd->dst_mode == AHD_MODE_UNKNOWN)
+               ahd_update_modes(ahd);
+
+       return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
+}
+
+void
+ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
+{
+       ahd_mode src;
+       ahd_mode dst;
+
+       ahd_extract_mode_state(ahd, state, &src, &dst);
+       ahd_set_modes(ahd, src, dst);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahd_is_paused(struct ahd_softc *ahd)
+{
+       return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop.  The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahd_pause(struct ahd_softc *ahd)
+{
+       ahd_outb(ahd, HCNTRL, ahd->pause);
+
+       /*
+        * Since the sequencer can disable pausing in a critical section, we
+        * must loop until it actually stops.
+        */
+       while (ahd_is_paused(ahd) == 0)
+               ;
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted.  If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahd_unpause(struct ahd_softc *ahd)
+{
+       /*
+        * Automatically restore our modes to those saved
+        * prior to the first change of the mode.
+        */
+       if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
+        && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
+               if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
+                       ahd_reset_cmds_pending(ahd);
+               ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
+       }
+
+       if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
+               ahd_outb(ahd, HCNTRL, ahd->unpause);
+
+       ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
+}
+
+/*********************** Scatter Gather List Handling *************************/
+void *
+ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+            void *sgptr, dma_addr_t addr, bus_size_t len, int last)
+{
+       scb->sg_count++;
+       if (sizeof(dma_addr_t) > 4
+        && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+               struct ahd_dma64_seg *sg;
+
+               sg = (struct ahd_dma64_seg *)sgptr;
+               sg->addr = ahd_htole64(addr);
+               sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
+               return (sg + 1);
+       } else {
+               struct ahd_dma_seg *sg;
 
+               sg = (struct ahd_dma_seg *)sgptr;
+               sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
+               sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
+                                   | (last ? AHD_DMA_LAST_SEG : 0));
+               return (sg + 1);
+       }
+}
+
+static void
+ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
+{
+       /* XXX Handle target mode SCBs. */
+       scb->crc_retry_count = 0;
+       if ((scb->flags & SCB_PACKETIZED) != 0) {
+               /* XXX what about ACA??  It is type 4, but TAG_TYPE == 0x3. */
+               scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
+       } else {
+               if (ahd_get_transfer_length(scb) & 0x01)
+                       scb->hscb->task_attribute = SCB_XFERLEN_ODD;
+               else
+                       scb->hscb->task_attribute = 0;
+       }
+
+       if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
+        || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
+               scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
+                   ahd_htole32(scb->sense_busaddr);
+}
+
+static void
+ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+       /*
+        * Copy the first SG into the "current" data ponter area.
+        */
+       if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
+               struct ahd_dma64_seg *sg;
+
+               sg = (struct ahd_dma64_seg *)scb->sg_list;
+               scb->hscb->dataptr = sg->addr;
+               scb->hscb->datacnt = sg->len;
+       } else {
+               struct ahd_dma_seg *sg;
+               uint32_t *dataptr_words;
+
+               sg = (struct ahd_dma_seg *)scb->sg_list;
+               dataptr_words = (uint32_t*)&scb->hscb->dataptr;
+               dataptr_words[0] = sg->addr;
+               dataptr_words[1] = 0;
+               if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
+                       uint64_t high_addr;
+
+                       high_addr = ahd_le32toh(sg->len) & 0x7F000000;
+                       scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
+               }
+               scb->hscb->datacnt = sg->len;
+       }
+       /*
+        * Note where to find the SG entries in bus space.
+        * We also set the full residual flag which the
+        * sequencer will clear as soon as a data transfer
+        * occurs.
+        */
+       scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
+}
+
+static void
+ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+       scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
+       scb->hscb->dataptr = 0;
+       scb->hscb->datacnt = 0;
+}
+
+/************************** Memory mapping routines ***************************/
+static void *
+ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
+{
+       dma_addr_t sg_offset;
+
+       /* sg_list_phys points to entry 1, not 0 */
+       sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
+       return ((uint8_t *)scb->sg_list + sg_offset);
+}
+
+static uint32_t
+ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
+{
+       dma_addr_t sg_offset;
+
+       /* sg_list_phys points to entry 1, not 0 */
+       sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
+                 - ahd_sg_size(ahd);
+
+       return (scb->sg_list_busaddr + sg_offset);
+}
+
+static void
+ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+       ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
+                       scb->hscb_map->dmamap,
+                       /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
+                       /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+       if (scb->sg_count == 0)
+               return;
+
+       ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
+                       scb->sg_map->dmamap,
+                       /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
+                       /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
+}
+
+static void
+ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
+{
+       ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
+                       scb->sense_map->dmamap,
+                       /*offset*/scb->sense_busaddr,
+                       /*len*/AHD_SENSE_BUFSIZE, op);
+}
+
+#ifdef AHD_TARGET_MODE
+static uint32_t
+ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
+{
+       return (((uint8_t *)&ahd->targetcmds[index])
+              - (uint8_t *)ahd->qoutfifo);
+}
+#endif
+
+/*********************** Miscelaneous Support Functions ***********************/
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahd_initiator_tinfo *
+ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
+                   u_int remote_id, struct ahd_tmode_tstate **tstate)
+{
+       /*
+        * Transfer data structures are stored from the perspective
+        * of the target role.  Since the parameters for a connection
+        * in the initiator role to a given target are the same as
+        * when the roles are reversed, we pretend we are the target.
+        */
+       if (channel == 'B')
+               our_id += 8;
+       *tstate = ahd->enabled_targets[our_id];
+       return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahd_inw(struct ahd_softc *ahd, u_int port)
+{
+       /*
+        * Read high byte first as some registers increment
+        * or have other side effects when the low byte is
+        * read.
+        */
+       uint16_t r = ahd_inb(ahd, port+1) << 8;
+       return r | ahd_inb(ahd, port);
+}
+
+void
+ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
+{
+       /*
+        * Write low byte first to accomodate registers
+        * such as PRGMCNT where the order maters.
+        */
+       ahd_outb(ahd, port, value & 0xFF);
+       ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahd_inl(struct ahd_softc *ahd, u_int port)
+{
+       return ((ahd_inb(ahd, port))
+             | (ahd_inb(ahd, port+1) << 8)
+             | (ahd_inb(ahd, port+2) << 16)
+             | (ahd_inb(ahd, port+3) << 24));
+}
+
+void
+ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
+{
+       ahd_outb(ahd, port, (value) & 0xFF);
+       ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
+       ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
+       ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahd_inq(struct ahd_softc *ahd, u_int port)
+{
+       return ((ahd_inb(ahd, port))
+             | (ahd_inb(ahd, port+1) << 8)
+             | (ahd_inb(ahd, port+2) << 16)
+             | (ahd_inb(ahd, port+3) << 24)
+             | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
+             | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
+             | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
+             | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
+}
+
+void
+ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
+{
+       ahd_outb(ahd, port, value & 0xFF);
+       ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
+       ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
+       ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
+       ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
+       ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
+       ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
+       ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
+}
+
+u_int
+ahd_get_scbptr(struct ahd_softc *ahd)
+{
+       AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+                        ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+       return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
+}
+
+void
+ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
+{
+       AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
+                        ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
+       ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
+       ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hnscb_qoff(struct ahd_softc *ahd)
+{
+       return (ahd_inw_atomic(ahd, HNSCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+       ahd_outw_atomic(ahd, HNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_hescb_qoff(struct ahd_softc *ahd)
+{
+       return (ahd_inb(ahd, HESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+       ahd_outb(ahd, HESCB_QOFF, value);
+}
+
+static u_int
+ahd_get_snscb_qoff(struct ahd_softc *ahd)
+{
+       u_int oldvalue;
+
+       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+       oldvalue = ahd_inw(ahd, SNSCB_QOFF);
+       ahd_outw(ahd, SNSCB_QOFF, oldvalue);
+       return (oldvalue);
+}
+
+static void
+ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+       ahd_outw(ahd, SNSCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sescb_qoff(struct ahd_softc *ahd)
+{
+       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+       return (ahd_inb(ahd, SESCB_QOFF));
+}
+#endif
+
+static void
+ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
+{
+       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+       ahd_outb(ahd, SESCB_QOFF, value);
+}
+
+#if 0 /* unused */
+static u_int
+ahd_get_sdscb_qoff(struct ahd_softc *ahd)
+{
+       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+       return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
+}
+#endif
+
+static void
+ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
+{
+       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
+       ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
+       ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
+}
+
+u_int
+ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
+{
+       u_int value;
+
+       /*
+        * Workaround PCI-X Rev A. hardware bug.
+        * After a host read of SCB memory, the chip
+        * may become confused into thinking prefetch
+        * was required.  This starts the discard timer
+        * running and can cause an unexpected discard
+        * timer interrupt.  The work around is to read
+        * a normal register prior to the exhaustion of
+        * the discard timer.  The mode pointer register
+        * has no side effects and so serves well for
+        * this purpose.
+        *
+        * Razor #528
+        */
+       value = ahd_inb(ahd, offset);
+       if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
+               ahd_inb(ahd, MODE_PTR);
+       return (value);
+}
+
+u_int
+ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
+{
+       return (ahd_inb_scbram(ahd, offset)
+             | (ahd_inb_scbram(ahd, offset+1) << 8));
+}
+
+static uint32_t
+ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
+{
+       return (ahd_inw_scbram(ahd, offset)
+             | (ahd_inw_scbram(ahd, offset+2) << 16));
+}
+
+static uint64_t
+ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
+{
+       return (ahd_inl_scbram(ahd, offset)
+             | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
+}
+
+struct scb *
+ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
+{
+       struct scb* scb;
+
+       if (tag >= AHD_SCB_MAX)
+               return (NULL);
+       scb = ahd->scb_data.scbindex[tag];
+       if (scb != NULL)
+               ahd_sync_scb(ahd, scb,
+                            BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+       return (scb);
+}
+
+static void
+ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
+{
+       struct   hardware_scb *q_hscb;
+       struct   map_node *q_hscb_map;
+       uint32_t saved_hscb_busaddr;
+
+       /*
+        * Our queuing method is a bit tricky.  The card
+        * knows in advance which HSCB (by address) to download,
+        * and we can't disappoint it.  To achieve this, the next
+        * HSCB to download is saved off in ahd->next_queued_hscb.
+        * When we are called to queue "an arbitrary scb",
+        * we copy the contents of the incoming HSCB to the one
+        * the sequencer knows about, swap HSCB pointers and
+        * finally assign the SCB to the tag indexed location
+        * in the scb_array.  This makes sure that we can still
+        * locate the correct SCB by SCB_TAG.
+        */
+       q_hscb = ahd->next_queued_hscb;
+       q_hscb_map = ahd->next_queued_hscb_map;
+       saved_hscb_busaddr = q_hscb->hscb_busaddr;
+       memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+       q_hscb->hscb_busaddr = saved_hscb_busaddr;
+       q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
+
+       /* Now swap HSCB pointers. */
+       ahd->next_queued_hscb = scb->hscb;
+       ahd->next_queued_hscb_map = scb->hscb_map;
+       scb->hscb = q_hscb;
+       scb->hscb_map = q_hscb_map;
+
+       /* Now define the mapping from tag to SCB in the scbindex */
+       ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
+ */
+void
+ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
+{
+       ahd_swap_with_next_hscb(ahd, scb);
+
+       if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
+               panic("Attempt to queue invalid SCB tag %x\n",
+                     SCB_GET_TAG(scb));
+
+       /*
+        * Keep a history of SCBs we've downloaded in the qinfifo.
+        */
+       ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
+       ahd->qinfifonext++;
+
+       if (scb->sg_count != 0)
+               ahd_setup_data_scb(ahd, scb);
+       else
+               ahd_setup_noxfer_scb(ahd, scb);
+       ahd_setup_scb_common(ahd, scb);
+
+       /*
+        * Make sure our data is consistent from the
+        * perspective of the adapter.
+        */
+       ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+#ifdef AHD_DEBUG
+       if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
+               uint64_t host_dataptr;
+
+               host_dataptr = ahd_le64toh(scb->hscb->dataptr);
+               printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
+                      ahd_name(ahd),
+                      SCB_GET_TAG(scb), scb->hscb->scsiid,
+                      ahd_le32toh(scb->hscb->hscb_busaddr),
+                      (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
+                      (u_int)(host_dataptr & 0xFFFFFFFF),
+                      ahd_le32toh(scb->hscb->datacnt));
+       }
+#endif
+       /* Tell the adapter about the newly queued SCB */
+       ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
+{
+       ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+                       /*offset*/0,
+                       /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
+}
+
+static void
+ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
+{
+#ifdef AHD_TARGET_MODE
+       if ((ahd->flags & AHD_TARGETROLE) != 0) {
+               ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+                               ahd->shared_data_map.dmamap,
+                               ahd_targetcmd_offset(ahd, 0),
+                               sizeof(struct target_cmd) * AHD_TMODE_CMDS,
+                               op);
+       }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHD_RUN_QOUTFIFO 0x1
+#define AHD_RUN_TQINFIFO 0x2
+static u_int
+ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
+{
+       u_int retval;
+
+       retval = 0;
+       ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
+                       /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
+                       /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
+       if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
+         == ahd->qoutfifonext_valid_tag)
+               retval |= AHD_RUN_QOUTFIFO;
+#ifdef AHD_TARGET_MODE
+       if ((ahd->flags & AHD_TARGETROLE) != 0
+        && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
+               ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
+                               ahd->shared_data_map.dmamap,
+                               ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
+                               /*len*/sizeof(struct target_cmd),
+                               BUS_DMASYNC_POSTREAD);
+               if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
+                       retval |= AHD_RUN_TQINFIFO;
+       }
+#endif
+       return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahd_intr(struct ahd_softc *ahd)
+{
+       u_int   intstat;
+
+       if ((ahd->pause & INTEN) == 0) {
+               /*
+                * Our interrupt is not enabled on the chip
+                * and may be disabled for re-entrancy reasons,
+                * so just return.  This is likely just a shared
+                * interrupt.
+                */
+               return (0);
+       }
+
+       /*
+        * Instead of directly reading the interrupt status register,
+        * infer the cause of the interrupt by checking our in-core
+        * completion queues.  This avoids a costly PCI bus read in
+        * most cases.
+        */
+       if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
+        && (ahd_check_cmdcmpltqueues(ahd) != 0))
+               intstat = CMDCMPLT;
+       else
+               intstat = ahd_inb(ahd, INTSTAT);
+
+       if ((intstat & INT_PEND) == 0)
+               return (0);
+
+       if (intstat & CMDCMPLT) {
+               ahd_outb(ahd, CLRINT, CLRCMDINT);
+
+               /*
+                * Ensure that the chip sees that we've cleared
+                * this interrupt before we walk the output fifo.
+                * Otherwise, we may, due to posted bus writes,
+                * clear the interrupt after we finish the scan,
+                * and after the sequencer has added new entries
+                * and asserted the interrupt again.
+                */
+               if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
+                       if (ahd_is_paused(ahd)) {
+                               /*
+                                * Potentially lost SEQINT.
+                                * If SEQINTCODE is non-zero,
+                                * simulate the SEQINT.
+                                */
+                               if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
+                                       intstat |= SEQINT;
+                       }
+               } else {
+                       ahd_flush_device_writes(ahd);
+               }
+               ahd_run_qoutfifo(ahd);
+               ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
+               ahd->cmdcmplt_total++;
+#ifdef AHD_TARGET_MODE
+               if ((ahd->flags & AHD_TARGETROLE) != 0)
+                       ahd_run_tqinfifo(ahd, /*paused*/FALSE);
+#endif
+       }
+
+       /*
+        * Handle statuses that may invalidate our cached
+        * copy of INTSTAT separately.
+        */
+       if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
+               /* Hot eject.  Do nothing */
+       } else if (intstat & HWERRINT) {
+               ahd_handle_hwerrint(ahd);
+       } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
+               ahd->bus_intr(ahd);
+       } else {
+
+               if ((intstat & SEQINT) != 0)
+                       ahd_handle_seqint(ahd, intstat);
+
+               if ((intstat & SCSIINT) != 0)
+                       ahd_handle_scsiint(ahd, intstat);
+       }
+       return (1);
+}
+
+/******************************** Private Inlines *****************************/
 static __inline void
 ahd_assert_atn(struct ahd_softc *ahd)
 {
@@ -280,7 +1046,7 @@ ahd_assert_atn(struct ahd_softc *ahd)
  * are currently in a packetized transfer.  We could
  * just as easily be sending or receiving a message.
  */
-static __inline int
+static int
 ahd_currently_packetized(struct ahd_softc *ahd)
 {
        ahd_mode_state   saved_modes;
@@ -896,7 +1662,7 @@ clrchn:
  * a copy of the first byte (little endian) of the sgptr
  * hscb field.
  */
-void
+static void
 ahd_run_qoutfifo(struct ahd_softc *ahd)
 {
        struct ahd_completion *completion;
@@ -935,7 +1701,7 @@ ahd_run_qoutfifo(struct ahd_softc *ahd)
 }
 
 /************************* Interrupt Handling *********************************/
-void
+static void
 ahd_handle_hwerrint(struct ahd_softc *ahd)
 {
        /*
@@ -1009,7 +1775,7 @@ ahd_dump_sglist(struct scb *scb)
 }
 #endif  /*  AHD_DEBUG  */
 
-void
+static void
 ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
 {
        u_int seqintcode;
@@ -1621,7 +2387,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
        ahd_unpause(ahd);
 }
 
-void
+static void
 ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat)
 {
        struct scb      *scb;
@@ -3571,11 +4337,11 @@ ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
               devinfo->target, devinfo->lun);
 }
 
-static struct ahd_phase_table_entry*
+static const struct ahd_phase_table_entry*
 ahd_lookup_phase_entry(int phase)
 {
-       struct ahd_phase_table_entry *entry;
-       struct ahd_phase_table_entry *last_entry;
+       const struct ahd_phase_table_entry *entry;
+       const struct ahd_phase_table_entry *last_entry;
 
        /*
         * num_phases doesn't include the default entry which
@@ -3941,7 +4707,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
  */
 static void
 ahd_handle_message_phase(struct ahd_softc *ahd)
-{ 
+{
        struct  ahd_devinfo devinfo;
        u_int   bus_phase;
        int     end_session;
@@ -5983,8 +6749,7 @@ found:
  */
 void
 ahd_free_scb(struct ahd_softc *ahd, struct scb *scb)
-{       
-
+{
        /* Clean up for the next user */
        scb->flags = SCB_FLAG_NONE;
        scb->hscb->control = 0;
@@ -6272,6 +7037,24 @@ static const char *termstat_strings[] = {
        "Not Configured"
 };
 
+/***************************** Timer Facilities *******************************/
+#define ahd_timer_init init_timer
+#define ahd_timer_stop del_timer_sync
+typedef void ahd_linux_callback_t (u_long);
+
+static void
+ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
+{
+       struct ahd_softc *ahd;
+
+       ahd = (struct ahd_softc *)arg;
+       del_timer(timer);
+       timer->data = (u_long)arg;
+       timer->expires = jiffies + (usec * HZ)/1000000;
+       timer->function = (ahd_linux_callback_t*)func;
+       add_timer(timer);
+}
+
 /*
  * Start the board, ready for normal operation
  */
@@ -7370,7 +8153,7 @@ ahd_qinfifo_count(struct ahd_softc *ahd)
                      + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos);
 }
 
-void
+static void
 ahd_reset_cmds_pending(struct ahd_softc *ahd)
 {
        struct          scb *scb;
@@ -8571,7 +9354,7 @@ ahd_loadseq(struct ahd_softc *ahd)
        struct  cs cs_table[num_critical_sections];
        u_int   begin_set[num_critical_sections];
        u_int   end_set[num_critical_sections];
-       struct  patch *cur_patch;
+       const struct patch *cur_patch;
        u_int   cs_count;
        u_int   cur_cs;
        u_int   i;
@@ -8726,11 +9509,11 @@ ahd_loadseq(struct ahd_softc *ahd)
 }
 
 static int
-ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
+ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch,
                u_int start_instr, u_int *skip_addr)
 {
-       struct  patch *cur_patch;
-       struct  patch *last_patch;
+       const struct patch *cur_patch;
+       const struct patch *last_patch;
        u_int   num_patches;
 
        num_patches = ARRAY_SIZE(patches);
@@ -8764,7 +9547,7 @@ ahd_check_patch(struct ahd_softc *ahd, struct patch **start_patch,
 static u_int
 ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address)
 {
-       struct patch *cur_patch;
+       const struct patch *cur_patch;
        int address_offset;
        u_int skip_addr;
        u_int i;
@@ -8895,7 +9678,7 @@ sized:
 }
 
 int
-ahd_print_register(ahd_reg_parse_entry_t *table, u_int num_entries,
+ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
                   const char *name, u_int address, u_int value,
                   u_int *cur_column, u_int wrap_point)
 {
@@ -9886,7 +10669,7 @@ ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask)
 #endif
 }
 
-void
+static void
 ahd_run_tqinfifo(struct ahd_softc *ahd, int paused)
 {
        struct target_cmd *cmd;
index 45e55575a0fad6c997f725d1489e815af31c4fff..5f12cf9d99d071344b6929beeb11c3fb11edbe33 100644 (file)
@@ -63,18 +63,15 @@ static __inline ahd_mode_state ahd_build_mode_state(struct ahd_softc *ahd,
 static __inline void ahd_extract_mode_state(struct ahd_softc *ahd,
                                            ahd_mode_state state,
                                            ahd_mode *src, ahd_mode *dst);
-static __inline void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
-                                  ahd_mode dst);
-static __inline void ahd_update_modes(struct ahd_softc *ahd);
-static __inline void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
-                                     ahd_mode dstmode, const char *file,
-                                     int line);
-static __inline ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
-static __inline void ahd_restore_modes(struct ahd_softc *ahd,
-                                      ahd_mode_state state);
-static __inline int  ahd_is_paused(struct ahd_softc *ahd);
-static __inline void ahd_pause(struct ahd_softc *ahd);
-static __inline void ahd_unpause(struct ahd_softc *ahd);
+
+void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src,
+                  ahd_mode dst);
+ahd_mode_state ahd_save_modes(struct ahd_softc *ahd);
+void ahd_restore_modes(struct ahd_softc *ahd,
+                      ahd_mode_state state);
+int  ahd_is_paused(struct ahd_softc *ahd);
+void ahd_pause(struct ahd_softc *ahd);
+void ahd_unpause(struct ahd_softc *ahd);
 
 static __inline void
 ahd_known_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
@@ -99,256 +96,16 @@ ahd_extract_mode_state(struct ahd_softc *ahd, ahd_mode_state state,
        *dst = (state & DST_MODE) >> DST_MODE_SHIFT;
 }
 
-static __inline void
-ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst)
-{
-       if (ahd->src_mode == src && ahd->dst_mode == dst)
-               return;
-#ifdef AHD_DEBUG
-       if (ahd->src_mode == AHD_MODE_UNKNOWN
-        || ahd->dst_mode == AHD_MODE_UNKNOWN)
-               panic("Setting mode prior to saving it.\n");
-       if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
-               printf("%s: Setting mode 0x%x\n", ahd_name(ahd),
-                      ahd_build_mode_state(ahd, src, dst));
-#endif
-       ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst));
-       ahd->src_mode = src;
-       ahd->dst_mode = dst;
-}
-
-static __inline void
-ahd_update_modes(struct ahd_softc *ahd)
-{
-       ahd_mode_state mode_ptr;
-       ahd_mode src;
-       ahd_mode dst;
-
-       mode_ptr = ahd_inb(ahd, MODE_PTR);
-#ifdef AHD_DEBUG
-       if ((ahd_debug & AHD_SHOW_MODEPTR) != 0)
-               printf("Reading mode 0x%x\n", mode_ptr);
-#endif
-       ahd_extract_mode_state(ahd, mode_ptr, &src, &dst);
-       ahd_known_modes(ahd, src, dst);
-}
-
-static __inline void
-ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode,
-                ahd_mode dstmode, const char *file, int line)
-{
-#ifdef AHD_DEBUG
-       if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0
-        || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) {
-               panic("%s:%s:%d: Mode assertion failed.\n",
-                      ahd_name(ahd), file, line);
-       }
-#endif
-}
-
-static __inline ahd_mode_state
-ahd_save_modes(struct ahd_softc *ahd)
-{
-       if (ahd->src_mode == AHD_MODE_UNKNOWN
-        || ahd->dst_mode == AHD_MODE_UNKNOWN)
-               ahd_update_modes(ahd);
-
-       return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode));
-}
-
-static __inline void
-ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state)
-{
-       ahd_mode src;
-       ahd_mode dst;
-
-       ahd_extract_mode_state(ahd, state, &src, &dst);
-       ahd_set_modes(ahd, src, dst);
-}
-
-#define AHD_ASSERT_MODES(ahd, source, dest) \
-       ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__);
-
-/*
- * Determine whether the sequencer has halted code execution.
- * Returns non-zero status if the sequencer is stopped.
- */
-static __inline int
-ahd_is_paused(struct ahd_softc *ahd)
-{
-       return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0);
-}
-
-/*
- * Request that the sequencer stop and wait, indefinitely, for it
- * to stop.  The sequencer will only acknowledge that it is paused
- * once it has reached an instruction boundary and PAUSEDIS is
- * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
- * for critical sections.
- */
-static __inline void
-ahd_pause(struct ahd_softc *ahd)
-{
-       ahd_outb(ahd, HCNTRL, ahd->pause);
-
-       /*
-        * Since the sequencer can disable pausing in a critical section, we
-        * must loop until it actually stops.
-        */
-       while (ahd_is_paused(ahd) == 0)
-               ;
-}
-
-/*
- * Allow the sequencer to continue program execution.
- * We check here to ensure that no additional interrupt
- * sources that would cause the sequencer to halt have been
- * asserted.  If, for example, a SCSI bus reset is detected
- * while we are fielding a different, pausing, interrupt type,
- * we don't want to release the sequencer before going back
- * into our interrupt handler and dealing with this new
- * condition.
- */
-static __inline void
-ahd_unpause(struct ahd_softc *ahd)
-{
-       /*
-        * Automatically restore our modes to those saved
-        * prior to the first change of the mode.
-        */
-       if (ahd->saved_src_mode != AHD_MODE_UNKNOWN
-        && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) {
-               if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0)
-                       ahd_reset_cmds_pending(ahd);
-               ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode);
-       }
-
-       if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0)
-               ahd_outb(ahd, HCNTRL, ahd->unpause);
-
-       ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN);
-}
-
 /*********************** Scatter Gather List Handling *************************/
-static __inline void   *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
-                                     void *sgptr, dma_addr_t addr,
-                                     bus_size_t len, int last);
-static __inline void    ahd_setup_scb_common(struct ahd_softc *ahd,
-                                             struct scb *scb);
-static __inline void    ahd_setup_data_scb(struct ahd_softc *ahd,
-                                           struct scb *scb);
-static __inline void    ahd_setup_noxfer_scb(struct ahd_softc *ahd,
-                                             struct scb *scb);
-
-static __inline void *
-ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
-            void *sgptr, dma_addr_t addr, bus_size_t len, int last)
-{
-       scb->sg_count++;
-       if (sizeof(dma_addr_t) > 4
-        && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
-               struct ahd_dma64_seg *sg;
-
-               sg = (struct ahd_dma64_seg *)sgptr;
-               sg->addr = ahd_htole64(addr);
-               sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0));
-               return (sg + 1);
-       } else {
-               struct ahd_dma_seg *sg;
-
-               sg = (struct ahd_dma_seg *)sgptr;
-               sg->addr = ahd_htole32(addr & 0xFFFFFFFF);
-               sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000)
-                                   | (last ? AHD_DMA_LAST_SEG : 0));
-               return (sg + 1);
-       }
-}
-
-static __inline void
-ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb)
-{
-       /* XXX Handle target mode SCBs. */
-       scb->crc_retry_count = 0;
-       if ((scb->flags & SCB_PACKETIZED) != 0) {
-               /* XXX what about ACA??  It is type 4, but TAG_TYPE == 0x3. */
-               scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE;
-       } else {
-               if (ahd_get_transfer_length(scb) & 0x01)
-                       scb->hscb->task_attribute = SCB_XFERLEN_ODD;
-               else
-                       scb->hscb->task_attribute = 0;
-       }
-
-       if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR
-        || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0)
-               scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr =
-                   ahd_htole32(scb->sense_busaddr);
-}
-
-static __inline void
-ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb)
-{
-       /*
-        * Copy the first SG into the "current" data ponter area.
-        */
-       if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) {
-               struct ahd_dma64_seg *sg;
-
-               sg = (struct ahd_dma64_seg *)scb->sg_list;
-               scb->hscb->dataptr = sg->addr;
-               scb->hscb->datacnt = sg->len;
-       } else {
-               struct ahd_dma_seg *sg;
-               uint32_t *dataptr_words;
-
-               sg = (struct ahd_dma_seg *)scb->sg_list;
-               dataptr_words = (uint32_t*)&scb->hscb->dataptr;
-               dataptr_words[0] = sg->addr;
-               dataptr_words[1] = 0;
-               if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) {
-                       uint64_t high_addr;
-
-                       high_addr = ahd_le32toh(sg->len) & 0x7F000000;
-                       scb->hscb->dataptr |= ahd_htole64(high_addr << 8);
-               }
-               scb->hscb->datacnt = sg->len;
-       }
-       /*
-        * Note where to find the SG entries in bus space.
-        * We also set the full residual flag which the 
-        * sequencer will clear as soon as a data transfer
-        * occurs.
-        */
-       scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID);
-}
-
-static __inline void
-ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb)
-{
-       scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL);
-       scb->hscb->dataptr = 0;
-       scb->hscb->datacnt = 0;
-}
+void   *ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb,
+                     void *sgptr, dma_addr_t addr,
+                     bus_size_t len, int last);
 
 /************************** Memory mapping routines ***************************/
 static __inline size_t ahd_sg_size(struct ahd_softc *ahd);
-static __inline void *
-                       ahd_sg_bus_to_virt(struct ahd_softc *ahd,
-                                          struct scb *scb,
-                                          uint32_t sg_busaddr);
-static __inline uint32_t
-                       ahd_sg_virt_to_bus(struct ahd_softc *ahd,
-                                          struct scb *scb,
-                                          void *sg);
-static __inline void   ahd_sync_scb(struct ahd_softc *ahd,
-                                    struct scb *scb, int op);
-static __inline void   ahd_sync_sglist(struct ahd_softc *ahd,
-                                       struct scb *scb, int op);
-static __inline void   ahd_sync_sense(struct ahd_softc *ahd,
-                                      struct scb *scb, int op);
-static __inline uint32_t
-                       ahd_targetcmd_offset(struct ahd_softc *ahd,
-                                            u_int index);
+
+void   ahd_sync_sglist(struct ahd_softc *ahd,
+                       struct scb *scb, int op);
 
 static __inline size_t
 ahd_sg_size(struct ahd_softc *ahd)
@@ -358,104 +115,32 @@ ahd_sg_size(struct ahd_softc *ahd)
        return (sizeof(struct ahd_dma_seg));
 }
 
-static __inline void *
-ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr)
-{
-       dma_addr_t sg_offset;
-
-       /* sg_list_phys points to entry 1, not 0 */
-       sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd));
-       return ((uint8_t *)scb->sg_list + sg_offset);
-}
-
-static __inline uint32_t
-ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg)
-{
-       dma_addr_t sg_offset;
-
-       /* sg_list_phys points to entry 1, not 0 */
-       sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list)
-                 - ahd_sg_size(ahd);
-
-       return (scb->sg_list_busaddr + sg_offset);
-}
-
-static __inline void
-ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op)
-{
-       ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat,
-                       scb->hscb_map->dmamap,
-                       /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr,
-                       /*len*/sizeof(*scb->hscb), op);
-}
-
-static __inline void
-ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op)
-{
-       if (scb->sg_count == 0)
-               return;
-
-       ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat,
-                       scb->sg_map->dmamap,
-                       /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd),
-                       /*len*/ahd_sg_size(ahd) * scb->sg_count, op);
-}
-
-static __inline void
-ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op)
-{
-       ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat,
-                       scb->sense_map->dmamap,
-                       /*offset*/scb->sense_busaddr,
-                       /*len*/AHD_SENSE_BUFSIZE, op);
-}
-
-static __inline uint32_t
-ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index)
-{
-       return (((uint8_t *)&ahd->targetcmds[index])
-              - (uint8_t *)ahd->qoutfifo);
-}
-
 /*********************** Miscellaneous Support Functions ***********************/
-static __inline struct ahd_initiator_tinfo *
-                       ahd_fetch_transinfo(struct ahd_softc *ahd,
-                                           char channel, u_int our_id,
-                                           u_int remote_id,
-                                           struct ahd_tmode_tstate **tstate);
-static __inline uint16_t
-                       ahd_inw(struct ahd_softc *ahd, u_int port);
-static __inline void   ahd_outw(struct ahd_softc *ahd, u_int port,
-                                u_int value);
-static __inline uint32_t
-                       ahd_inl(struct ahd_softc *ahd, u_int port);
-static __inline void   ahd_outl(struct ahd_softc *ahd, u_int port,
-                                uint32_t value);
-static __inline uint64_t
-                       ahd_inq(struct ahd_softc *ahd, u_int port);
-static __inline void   ahd_outq(struct ahd_softc *ahd, u_int port,
-                                uint64_t value);
-static __inline u_int  ahd_get_scbptr(struct ahd_softc *ahd);
-static __inline void   ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
-static __inline u_int  ahd_get_hnscb_qoff(struct ahd_softc *ahd);
-static __inline void   ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int  ahd_get_hescb_qoff(struct ahd_softc *ahd);
-static __inline void   ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int  ahd_get_snscb_qoff(struct ahd_softc *ahd);
-static __inline void   ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int  ahd_get_sescb_qoff(struct ahd_softc *ahd);
-static __inline void   ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int  ahd_get_sdscb_qoff(struct ahd_softc *ahd);
-static __inline void   ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value);
-static __inline u_int  ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline u_int  ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline uint32_t
-                       ahd_inl_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline uint64_t
-                       ahd_inq_scbram(struct ahd_softc *ahd, u_int offset);
-static __inline void   ahd_swap_with_next_hscb(struct ahd_softc *ahd,
-                                               struct scb *scb);
-static __inline void   ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+struct ahd_initiator_tinfo *
+       ahd_fetch_transinfo(struct ahd_softc *ahd,
+                           char channel, u_int our_id,
+                           u_int remote_id,
+                           struct ahd_tmode_tstate **tstate);
+uint16_t
+       ahd_inw(struct ahd_softc *ahd, u_int port);
+void   ahd_outw(struct ahd_softc *ahd, u_int port,
+                u_int value);
+uint32_t
+       ahd_inl(struct ahd_softc *ahd, u_int port);
+void   ahd_outl(struct ahd_softc *ahd, u_int port,
+                uint32_t value);
+uint64_t
+       ahd_inq(struct ahd_softc *ahd, u_int port);
+void   ahd_outq(struct ahd_softc *ahd, u_int port,
+                uint64_t value);
+u_int  ahd_get_scbptr(struct ahd_softc *ahd);
+void   ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr);
+u_int  ahd_inb_scbram(struct ahd_softc *ahd, u_int offset);
+u_int  ahd_inw_scbram(struct ahd_softc *ahd, u_int offset);
+struct scb *
+       ahd_lookup_scb(struct ahd_softc *ahd, u_int tag);
+void   ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb);
+
 static __inline uint8_t *
                        ahd_get_sense_buf(struct ahd_softc *ahd,
                                          struct scb *scb);
@@ -463,25 +148,7 @@ static __inline uint32_t
                        ahd_get_sense_bufaddr(struct ahd_softc *ahd,
                                              struct scb *scb);
 
-/*
- * Return pointers to the transfer negotiation information
- * for the specified our_id/remote_id pair.
- */
-static __inline struct ahd_initiator_tinfo *
-ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id,
-                   u_int remote_id, struct ahd_tmode_tstate **tstate)
-{
-       /*
-        * Transfer data structures are stored from the perspective
-        * of the target role.  Since the parameters for a connection
-        * in the initiator role to a given target are the same as
-        * when the roles are reversed, we pretend we are the target.
-        */
-       if (channel == 'B')
-               our_id += 8;
-       *tstate = ahd->enabled_targets[our_id];
-       return (&(*tstate)->transinfo[remote_id]);
-}
+#if 0 /* unused */
 
 #define AHD_COPY_COL_IDX(dst, src)                             \
 do {                                                           \
@@ -489,304 +156,7 @@ do {                                                              \
        dst->hscb->lun = src->hscb->lun;                        \
 } while (0)
 
-static __inline uint16_t
-ahd_inw(struct ahd_softc *ahd, u_int port)
-{
-       /*
-        * Read high byte first as some registers increment
-        * or have other side effects when the low byte is
-        * read.
-        */
-       uint16_t r = ahd_inb(ahd, port+1) << 8;
-       return r | ahd_inb(ahd, port);
-}
-
-static __inline void
-ahd_outw(struct ahd_softc *ahd, u_int port, u_int value)
-{
-       /*
-        * Write low byte first to accomodate registers
-        * such as PRGMCNT where the order maters.
-        */
-       ahd_outb(ahd, port, value & 0xFF);
-       ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
-}
-
-static __inline uint32_t
-ahd_inl(struct ahd_softc *ahd, u_int port)
-{
-       return ((ahd_inb(ahd, port))
-             | (ahd_inb(ahd, port+1) << 8)
-             | (ahd_inb(ahd, port+2) << 16)
-             | (ahd_inb(ahd, port+3) << 24));
-}
-
-static __inline void
-ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value)
-{
-       ahd_outb(ahd, port, (value) & 0xFF);
-       ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF);
-       ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF);
-       ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF);
-}
-
-static __inline uint64_t
-ahd_inq(struct ahd_softc *ahd, u_int port)
-{
-       return ((ahd_inb(ahd, port))
-             | (ahd_inb(ahd, port+1) << 8)
-             | (ahd_inb(ahd, port+2) << 16)
-             | (ahd_inb(ahd, port+3) << 24)
-             | (((uint64_t)ahd_inb(ahd, port+4)) << 32)
-             | (((uint64_t)ahd_inb(ahd, port+5)) << 40)
-             | (((uint64_t)ahd_inb(ahd, port+6)) << 48)
-             | (((uint64_t)ahd_inb(ahd, port+7)) << 56));
-}
-
-static __inline void
-ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value)
-{
-       ahd_outb(ahd, port, value & 0xFF);
-       ahd_outb(ahd, port+1, (value >> 8) & 0xFF);
-       ahd_outb(ahd, port+2, (value >> 16) & 0xFF);
-       ahd_outb(ahd, port+3, (value >> 24) & 0xFF);
-       ahd_outb(ahd, port+4, (value >> 32) & 0xFF);
-       ahd_outb(ahd, port+5, (value >> 40) & 0xFF);
-       ahd_outb(ahd, port+6, (value >> 48) & 0xFF);
-       ahd_outb(ahd, port+7, (value >> 56) & 0xFF);
-}
-
-static __inline u_int
-ahd_get_scbptr(struct ahd_softc *ahd)
-{
-       AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
-                        ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
-       return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8));
-}
-
-static __inline void
-ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr)
-{
-       AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK),
-                        ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK));
-       ahd_outb(ahd, SCBPTR, scbptr & 0xFF);
-       ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF);
-}
-
-static __inline u_int
-ahd_get_hnscb_qoff(struct ahd_softc *ahd)
-{
-       return (ahd_inw_atomic(ahd, HNSCB_QOFF));
-}
-
-static __inline void
-ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value)
-{
-       ahd_outw_atomic(ahd, HNSCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_hescb_qoff(struct ahd_softc *ahd)
-{
-       return (ahd_inb(ahd, HESCB_QOFF));
-}
-
-static __inline void
-ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value)
-{
-       ahd_outb(ahd, HESCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_snscb_qoff(struct ahd_softc *ahd)
-{
-       u_int oldvalue;
-
-       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
-       oldvalue = ahd_inw(ahd, SNSCB_QOFF);
-       ahd_outw(ahd, SNSCB_QOFF, oldvalue);
-       return (oldvalue);
-}
-
-static __inline void
-ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value)
-{
-       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
-       ahd_outw(ahd, SNSCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_sescb_qoff(struct ahd_softc *ahd)
-{
-       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
-       return (ahd_inb(ahd, SESCB_QOFF));
-}
-
-static __inline void
-ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value)
-{
-       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
-       ahd_outb(ahd, SESCB_QOFF, value);
-}
-
-static __inline u_int
-ahd_get_sdscb_qoff(struct ahd_softc *ahd)
-{
-       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
-       return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8));
-}
-
-static __inline void
-ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value)
-{
-       AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK);
-       ahd_outb(ahd, SDSCB_QOFF, value & 0xFF);
-       ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF);
-}
-
-static __inline u_int
-ahd_inb_scbram(struct ahd_softc *ahd, u_int offset)
-{
-       u_int value;
-
-       /*
-        * Workaround PCI-X Rev A. hardware bug.
-        * After a host read of SCB memory, the chip
-        * may become confused into thinking prefetch
-        * was required.  This starts the discard timer
-        * running and can cause an unexpected discard
-        * timer interrupt.  The work around is to read
-        * a normal register prior to the exhaustion of
-        * the discard timer.  The mode pointer register
-        * has no side effects and so serves well for
-        * this purpose.
-        *
-        * Razor #528
-        */
-       value = ahd_inb(ahd, offset);
-       if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0)
-               ahd_inb(ahd, MODE_PTR);
-       return (value);
-}
-
-static __inline u_int
-ahd_inw_scbram(struct ahd_softc *ahd, u_int offset)
-{
-       return (ahd_inb_scbram(ahd, offset)
-             | (ahd_inb_scbram(ahd, offset+1) << 8));
-}
-
-static __inline uint32_t
-ahd_inl_scbram(struct ahd_softc *ahd, u_int offset)
-{
-       return (ahd_inw_scbram(ahd, offset)
-             | (ahd_inw_scbram(ahd, offset+2) << 16));
-}
-
-static __inline uint64_t
-ahd_inq_scbram(struct ahd_softc *ahd, u_int offset)
-{
-       return (ahd_inl_scbram(ahd, offset)
-             | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32);
-}
-
-static __inline struct scb *
-ahd_lookup_scb(struct ahd_softc *ahd, u_int tag)
-{
-       struct scb* scb;
-
-       if (tag >= AHD_SCB_MAX)
-               return (NULL);
-       scb = ahd->scb_data.scbindex[tag];
-       if (scb != NULL)
-               ahd_sync_scb(ahd, scb,
-                            BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
-       return (scb);
-}
-
-static __inline void
-ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb)
-{
-       struct   hardware_scb *q_hscb;
-       struct   map_node *q_hscb_map;
-       uint32_t saved_hscb_busaddr;
-
-       /*
-        * Our queuing method is a bit tricky.  The card
-        * knows in advance which HSCB (by address) to download,
-        * and we can't disappoint it.  To achieve this, the next
-        * HSCB to download is saved off in ahd->next_queued_hscb.
-        * When we are called to queue "an arbitrary scb",
-        * we copy the contents of the incoming HSCB to the one
-        * the sequencer knows about, swap HSCB pointers and
-        * finally assign the SCB to the tag indexed location
-        * in the scb_array.  This makes sure that we can still
-        * locate the correct SCB by SCB_TAG.
-        */
-       q_hscb = ahd->next_queued_hscb;
-       q_hscb_map = ahd->next_queued_hscb_map;
-       saved_hscb_busaddr = q_hscb->hscb_busaddr;
-       memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
-       q_hscb->hscb_busaddr = saved_hscb_busaddr;
-       q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
-
-       /* Now swap HSCB pointers. */
-       ahd->next_queued_hscb = scb->hscb;
-       ahd->next_queued_hscb_map = scb->hscb_map;
-       scb->hscb = q_hscb;
-       scb->hscb_map = q_hscb_map;
-
-       /* Now define the mapping from tag to SCB in the scbindex */
-       ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb;
-}
-
-/*
- * Tell the sequencer about a new transaction to execute.
- */
-static __inline void
-ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb)
-{
-       ahd_swap_with_next_hscb(ahd, scb);
-
-       if (SCBID_IS_NULL(SCB_GET_TAG(scb)))
-               panic("Attempt to queue invalid SCB tag %x\n",
-                     SCB_GET_TAG(scb));
-
-       /*
-        * Keep a history of SCBs we've downloaded in the qinfifo.
-        */
-       ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
-       ahd->qinfifonext++;
-
-       if (scb->sg_count != 0)
-               ahd_setup_data_scb(ahd, scb);
-       else
-               ahd_setup_noxfer_scb(ahd, scb);
-       ahd_setup_scb_common(ahd, scb);
-
-       /*
-        * Make sure our data is consistent from the
-        * perspective of the adapter.
-        */
-       ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
-#ifdef AHD_DEBUG
-       if ((ahd_debug & AHD_SHOW_QUEUE) != 0) {
-               uint64_t host_dataptr;
-
-               host_dataptr = ahd_le64toh(scb->hscb->dataptr);
-               printf("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n",
-                      ahd_name(ahd),
-                      SCB_GET_TAG(scb), scb->hscb->scsiid,
-                      ahd_le32toh(scb->hscb->hscb_busaddr),
-                      (u_int)((host_dataptr >> 32) & 0xFFFFFFFF),
-                      (u_int)(host_dataptr & 0xFFFFFFFF),
-                      ahd_le32toh(scb->hscb->datacnt));
-       }
 #endif
-       /* Tell the adapter about the newly queued SCB */
-       ahd_set_hnscb_qoff(ahd, ahd->qinfifonext);
-}
 
 static __inline uint8_t *
 ahd_get_sense_buf(struct ahd_softc *ahd, struct scb *scb)
@@ -801,151 +171,6 @@ ahd_get_sense_bufaddr(struct ahd_softc *ahd, struct scb *scb)
 }
 
 /************************** Interrupt Processing ******************************/
-static __inline void   ahd_sync_qoutfifo(struct ahd_softc *ahd, int op);
-static __inline void   ahd_sync_tqinfifo(struct ahd_softc *ahd, int op);
-static __inline u_int  ahd_check_cmdcmpltqueues(struct ahd_softc *ahd);
-static __inline int    ahd_intr(struct ahd_softc *ahd);
-
-static __inline void
-ahd_sync_qoutfifo(struct ahd_softc *ahd, int op)
-{
-       ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
-                       /*offset*/0,
-                       /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op);
-}
-
-static __inline void
-ahd_sync_tqinfifo(struct ahd_softc *ahd, int op)
-{
-#ifdef AHD_TARGET_MODE
-       if ((ahd->flags & AHD_TARGETROLE) != 0) {
-               ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
-                               ahd->shared_data_map.dmamap,
-                               ahd_targetcmd_offset(ahd, 0),
-                               sizeof(struct target_cmd) * AHD_TMODE_CMDS,
-                               op);
-       }
-#endif
-}
-
-/*
- * See if the firmware has posted any completed commands
- * into our in-core command complete fifos.
- */
-#define AHD_RUN_QOUTFIFO 0x1
-#define AHD_RUN_TQINFIFO 0x2
-static __inline u_int
-ahd_check_cmdcmpltqueues(struct ahd_softc *ahd)
-{
-       u_int retval;
-
-       retval = 0;
-       ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap,
-                       /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo),
-                       /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD);
-       if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag
-         == ahd->qoutfifonext_valid_tag)
-               retval |= AHD_RUN_QOUTFIFO;
-#ifdef AHD_TARGET_MODE
-       if ((ahd->flags & AHD_TARGETROLE) != 0
-        && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) {
-               ahd_dmamap_sync(ahd, ahd->shared_data_dmat,
-                               ahd->shared_data_map.dmamap,
-                               ahd_targetcmd_offset(ahd, ahd->tqinfifofnext),
-                               /*len*/sizeof(struct target_cmd),
-                               BUS_DMASYNC_POSTREAD);
-               if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0)
-                       retval |= AHD_RUN_TQINFIFO;
-       }
-#endif
-       return (retval);
-}
-
-/*
- * Catch an interrupt from the adapter
- */
-static __inline int
-ahd_intr(struct ahd_softc *ahd)
-{
-       u_int   intstat;
-
-       if ((ahd->pause & INTEN) == 0) {
-               /*
-                * Our interrupt is not enabled on the chip
-                * and may be disabled for re-entrancy reasons,
-                * so just return.  This is likely just a shared
-                * interrupt.
-                */
-               return (0);
-       }
-
-       /*
-        * Instead of directly reading the interrupt status register,
-        * infer the cause of the interrupt by checking our in-core
-        * completion queues.  This avoids a costly PCI bus read in
-        * most cases.
-        */
-       if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0
-        && (ahd_check_cmdcmpltqueues(ahd) != 0))
-               intstat = CMDCMPLT;
-       else
-               intstat = ahd_inb(ahd, INTSTAT);
-
-       if ((intstat & INT_PEND) == 0)
-               return (0);
-
-       if (intstat & CMDCMPLT) {
-               ahd_outb(ahd, CLRINT, CLRCMDINT);
-
-               /*
-                * Ensure that the chip sees that we've cleared
-                * this interrupt before we walk the output fifo.
-                * Otherwise, we may, due to posted bus writes,
-                * clear the interrupt after we finish the scan,
-                * and after the sequencer has added new entries
-                * and asserted the interrupt again.
-                */
-               if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) {
-                       if (ahd_is_paused(ahd)) {
-                               /*
-                                * Potentially lost SEQINT.
-                                * If SEQINTCODE is non-zero,
-                                * simulate the SEQINT.
-                                */
-                               if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT)
-                                       intstat |= SEQINT;
-                       }
-               } else {
-                       ahd_flush_device_writes(ahd);
-               }
-               ahd_run_qoutfifo(ahd);
-               ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++;
-               ahd->cmdcmplt_total++;
-#ifdef AHD_TARGET_MODE
-               if ((ahd->flags & AHD_TARGETROLE) != 0)
-                       ahd_run_tqinfifo(ahd, /*paused*/FALSE);
-#endif
-       }
-
-       /*
-        * Handle statuses that may invalidate our cached
-        * copy of INTSTAT separately.
-        */
-       if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) {
-               /* Hot eject.  Do nothing */
-       } else if (intstat & HWERRINT) {
-               ahd_handle_hwerrint(ahd);
-       } else if ((intstat & (PCIINT|SPLTINT)) != 0) {
-               ahd->bus_intr(ahd);
-       } else {
-
-               if ((intstat & SEQINT) != 0)
-                       ahd_handle_seqint(ahd, intstat);
-
-               if ((intstat & SCSIINT) != 0)
-                       ahd_handle_scsiint(ahd, intstat);
-       }
-       return (1);
-}
+int    ahd_intr(struct ahd_softc *ahd);
 
 #endif  /* _AIC79XX_INLINE_H_ */
index 0081aa357c8b09c7f5f939d76c4bddfd2b8de65f..0f829b3b8ab75e0f990e420045bd7007b4f6855f 100644 (file)
@@ -193,7 +193,7 @@ struct ahd_linux_iocell_opts
 #define AIC79XX_PRECOMP_INDEX  0
 #define AIC79XX_SLEWRATE_INDEX 1
 #define AIC79XX_AMPLITUDE_INDEX        2
-static struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
+static const struct ahd_linux_iocell_opts aic79xx_iocell_info[] =
 {
        AIC79XX_DEFAULT_IOOPTS,
        AIC79XX_DEFAULT_IOOPTS,
@@ -369,10 +369,167 @@ static void ahd_release_simq(struct ahd_softc *ahd);
 static int ahd_linux_unit;
 
 
+/************************** OS Utility Wrappers *******************************/
+void ahd_delay(long);
+void
+ahd_delay(long usec)
+{
+       /*
+        * udelay on Linux can have problems for
+        * multi-millisecond waits.  Wait at most
+        * 1024us per call.
+        */
+       while (usec > 0) {
+               udelay(usec % 1024);
+               usec -= 1024;
+       }
+}
+
+
+/***************************** Low Level I/O **********************************/
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
+                                    long port, uint16_t val);
+void ahd_outsb(struct ahd_softc * ahd, long port,
+                              uint8_t *, int count);
+void ahd_insb(struct ahd_softc * ahd, long port,
+                              uint8_t *, int count);
+
+uint8_t
+ahd_inb(struct ahd_softc * ahd, long port)
+{
+       uint8_t x;
+
+       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+               x = readb(ahd->bshs[0].maddr + port);
+       } else {
+               x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+       }
+       mb();
+       return (x);
+}
+
+#if 0 /* unused */
+static uint16_t
+ahd_inw_atomic(struct ahd_softc * ahd, long port)
+{
+       uint8_t x;
+
+       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+               x = readw(ahd->bshs[0].maddr + port);
+       } else {
+               x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
+       }
+       mb();
+       return (x);
+}
+#endif
+
+void
+ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
+{
+       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+               writeb(val, ahd->bshs[0].maddr + port);
+       } else {
+               outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+       }
+       mb();
+}
+
+void
+ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
+{
+       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
+               writew(val, ahd->bshs[0].maddr + port);
+       } else {
+               outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
+       }
+       mb();
+}
+
+void
+ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+       int i;
+
+       /*
+        * There is probably a more efficient way to do this on Linux
+        * but we don't use this for anything speed critical and this
+        * should work.
+        */
+       for (i = 0; i < count; i++)
+               ahd_outb(ahd, port, *array++);
+}
+
+void
+ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
+{
+       int i;
+
+       /*
+        * There is probably a more efficient way to do this on Linux
+        * but we don't use this for anything speed critical and this
+        * should work.
+        */
+       for (i = 0; i < count; i++)
+               *array++ = ahd_inb(ahd, port);
+}
+
+/******************************* PCI Routines *********************************/
+uint32_t
+ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
+{
+       switch (width) {
+       case 1:
+       {
+               uint8_t retval;
+
+               pci_read_config_byte(pci, reg, &retval);
+               return (retval);
+       }
+       case 2:
+       {
+               uint16_t retval;
+               pci_read_config_word(pci, reg, &retval);
+               return (retval);
+       }
+       case 4:
+       {
+               uint32_t retval;
+               pci_read_config_dword(pci, reg, &retval);
+               return (retval);
+       }
+       default:
+               panic("ahd_pci_read_config: Read size too big");
+               /* NOTREACHED */
+               return (0);
+       }
+}
+
+void
+ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+       switch (width) {
+       case 1:
+               pci_write_config_byte(pci, reg, value);
+               break;
+       case 2:
+               pci_write_config_word(pci, reg, value);
+               break;
+       case 4:
+               pci_write_config_dword(pci, reg, value);
+               break;
+       default:
+               panic("ahd_pci_write_config: Write size too big");
+               /* NOTREACHED */
+       }
+}
+
 /****************************** Inlines ***************************************/
-static __inline void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
+static void ahd_linux_unmap_scb(struct ahd_softc*, struct scb*);
 
-static __inline void
+static void
 ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
 {
        struct scsi_cmnd *cmd;
@@ -400,13 +557,11 @@ ahd_linux_info(struct Scsi_Host *host)
        bp = &buffer[0];
        ahd = *(struct ahd_softc **)host->hostdata;
        memset(bp, 0, sizeof(buffer));
-       strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev ");
-       strcat(bp, AIC79XX_DRIVER_VERSION);
-       strcat(bp, "\n");
-       strcat(bp, "        <");
+       strcpy(bp, "Adaptec AIC79XX PCI-X SCSI HBA DRIVER, Rev " AIC79XX_DRIVER_VERSION "\n"
+                       "        <");
        strcat(bp, ahd->description);
-       strcat(bp, ">\n");
-       strcat(bp, "        ");
+       strcat(bp, ">\n"
+                       "        ");
        ahd_controller_info(ahd, ahd_info);
        strcat(bp, ahd_info);
 
@@ -432,7 +587,7 @@ ahd_linux_queue(struct scsi_cmnd * cmd, void (*scsi_done) (struct scsi_cmnd *))
        return rtn;
 }
 
-static inline struct scsi_target **
+static struct scsi_target **
 ahd_linux_target_in_softc(struct scsi_target *starget)
 {
        struct  ahd_softc *ahd =
@@ -991,7 +1146,7 @@ aic79xx_setup(char *s)
        char   *p;
        char   *end;
 
-       static struct {
+       static const struct {
                const char *name;
                uint32_t *flag;
        } options[] = {
@@ -1223,7 +1378,7 @@ ahd_platform_init(struct ahd_softc *ahd)
         * Lookup and commit any modified IO Cell options.
         */
        if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
-               struct ahd_linux_iocell_opts *iocell_opts;
+               const struct ahd_linux_iocell_opts *iocell_opts;
 
                iocell_opts = &aic79xx_iocell_info[ahd->unit];
                if (iocell_opts->precomp != AIC79XX_DEFAULT_PRECOMP)
@@ -2613,7 +2768,7 @@ static void ahd_linux_set_pcomp_en(struct scsi_target *starget, int pcomp)
                uint8_t precomp;
 
                if (ahd->unit < ARRAY_SIZE(aic79xx_iocell_info)) {
-                       struct ahd_linux_iocell_opts *iocell_opts;
+                       const struct ahd_linux_iocell_opts *iocell_opts;
 
                        iocell_opts = &aic79xx_iocell_info[ahd->unit];
                        precomp = iocell_opts->precomp;
index 853998be1474f4059deb2251bef1ec7bb10a182c..8d6612c19922fe29cfd340992550627131b9897c 100644 (file)
@@ -222,22 +222,6 @@ typedef struct timer_list ahd_timer_t;
 /***************************** Timer Facilities *******************************/
 #define ahd_timer_init init_timer
 #define ahd_timer_stop del_timer_sync
-typedef void ahd_linux_callback_t (u_long);  
-static __inline void ahd_timer_reset(ahd_timer_t *timer, int usec,
-                                    ahd_callback_t *func, void *arg);
-
-static __inline void
-ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg)
-{
-       struct ahd_softc *ahd;
-
-       ahd = (struct ahd_softc *)arg;
-       del_timer(timer);
-       timer->data = (u_long)arg;
-       timer->expires = jiffies + (usec * HZ)/1000000;
-       timer->function = (ahd_linux_callback_t*)func;
-       add_timer(timer);
-}
 
 /***************************** SMP support ************************************/
 #include <linux/spinlock.h>
@@ -376,7 +360,7 @@ struct ahd_platform_data {
 #define AHD_LINUX_NOIRQ        ((uint32_t)~0)
        uint32_t                 irq;           /* IRQ for this adapter */
        uint32_t                 bios_address;
-       uint32_t                 mem_busaddr;   /* Mem Base Addr */
+       resource_size_t          mem_busaddr;   /* Mem Base Addr */
 };
 
 /************************** OS Utility Wrappers *******************************/
@@ -386,111 +370,18 @@ struct ahd_platform_data {
 #define malloc(size, type, flags) kmalloc(size, flags)
 #define free(ptr, type) kfree(ptr)
 
-static __inline void ahd_delay(long);
-static __inline void
-ahd_delay(long usec)
-{
-       /*
-        * udelay on Linux can have problems for
-        * multi-millisecond waits.  Wait at most
-        * 1024us per call.
-        */
-       while (usec > 0) {
-               udelay(usec % 1024);
-               usec -= 1024;
-       }
-}
-
+void ahd_delay(long);
 
 /***************************** Low Level I/O **********************************/
-static __inline uint8_t ahd_inb(struct ahd_softc * ahd, long port);
-static __inline uint16_t ahd_inw_atomic(struct ahd_softc * ahd, long port);
-static __inline void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
-static __inline void ahd_outw_atomic(struct ahd_softc * ahd,
+uint8_t ahd_inb(struct ahd_softc * ahd, long port);
+void ahd_outb(struct ahd_softc * ahd, long port, uint8_t val);
+void ahd_outw_atomic(struct ahd_softc * ahd,
                                     long port, uint16_t val);
-static __inline void ahd_outsb(struct ahd_softc * ahd, long port,
+void ahd_outsb(struct ahd_softc * ahd, long port,
                               uint8_t *, int count);
-static __inline void ahd_insb(struct ahd_softc * ahd, long port,
+void ahd_insb(struct ahd_softc * ahd, long port,
                               uint8_t *, int count);
 
-static __inline uint8_t
-ahd_inb(struct ahd_softc * ahd, long port)
-{
-       uint8_t x;
-
-       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
-               x = readb(ahd->bshs[0].maddr + port);
-       } else {
-               x = inb(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
-       }
-       mb();
-       return (x);
-}
-
-static __inline uint16_t
-ahd_inw_atomic(struct ahd_softc * ahd, long port)
-{
-       uint8_t x;
-
-       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
-               x = readw(ahd->bshs[0].maddr + port);
-       } else {
-               x = inw(ahd->bshs[(port) >> 8].ioport + ((port) & 0xFF));
-       }
-       mb();
-       return (x);
-}
-
-static __inline void
-ahd_outb(struct ahd_softc * ahd, long port, uint8_t val)
-{
-       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
-               writeb(val, ahd->bshs[0].maddr + port);
-       } else {
-               outb(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
-       }
-       mb();
-}
-
-static __inline void
-ahd_outw_atomic(struct ahd_softc * ahd, long port, uint16_t val)
-{
-       if (ahd->tags[0] == BUS_SPACE_MEMIO) {
-               writew(val, ahd->bshs[0].maddr + port);
-       } else {
-               outw(val, ahd->bshs[(port) >> 8].ioport + (port & 0xFF));
-       }
-       mb();
-}
-
-static __inline void
-ahd_outsb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
-{
-       int i;
-
-       /*
-        * There is probably a more efficient way to do this on Linux
-        * but we don't use this for anything speed critical and this
-        * should work.
-        */
-       for (i = 0; i < count; i++)
-               ahd_outb(ahd, port, *array++);
-}
-
-static __inline void
-ahd_insb(struct ahd_softc * ahd, long port, uint8_t *array, int count)
-{
-       int i;
-
-       /*
-        * There is probably a more efficient way to do this on Linux
-        * but we don't use this for anything speed critical and this
-        * should work.
-        */
-       for (i = 0; i < count; i++)
-               *array++ = ahd_inb(ahd, port);
-}
-
 /**************************** Initialization **********************************/
 int            ahd_linux_register_host(struct ahd_softc *,
                                        struct scsi_host_template *);
@@ -593,62 +484,12 @@ void                       ahd_linux_pci_exit(void);
 int                     ahd_pci_map_registers(struct ahd_softc *ahd);
 int                     ahd_pci_map_int(struct ahd_softc *ahd);
 
-static __inline uint32_t ahd_pci_read_config(ahd_dev_softc_t pci,
+uint32_t                ahd_pci_read_config(ahd_dev_softc_t pci,
                                             int reg, int width);
-
-static __inline uint32_t
-ahd_pci_read_config(ahd_dev_softc_t pci, int reg, int width)
-{
-       switch (width) {
-       case 1:
-       {
-               uint8_t retval;
-
-               pci_read_config_byte(pci, reg, &retval);
-               return (retval);
-       }
-       case 2:
-       {
-               uint16_t retval;
-               pci_read_config_word(pci, reg, &retval);
-               return (retval);
-       }
-       case 4:
-       {
-               uint32_t retval;
-               pci_read_config_dword(pci, reg, &retval);
-               return (retval);
-       }
-       default:
-               panic("ahd_pci_read_config: Read size too big");
-               /* NOTREACHED */
-               return (0);
-       }
-}
-
-static __inline void ahd_pci_write_config(ahd_dev_softc_t pci,
+void                    ahd_pci_write_config(ahd_dev_softc_t pci,
                                          int reg, uint32_t value,
                                          int width);
 
-static __inline void
-ahd_pci_write_config(ahd_dev_softc_t pci, int reg, uint32_t value, int width)
-{
-       switch (width) {
-       case 1:
-               pci_write_config_byte(pci, reg, value);
-               break;
-       case 2:
-               pci_write_config_word(pci, reg, value);
-               break;
-       case 4:
-               pci_write_config_dword(pci, reg, value);
-               break;
-       default:
-               panic("ahd_pci_write_config: Write size too big");
-               /* NOTREACHED */
-       }
-}
-
 static __inline int ahd_get_pci_function(ahd_dev_softc_t);
 static __inline int
 ahd_get_pci_function(ahd_dev_softc_t pci)
index dfaaae5e73aebdee4a8fa873117f1ce23db4c3f7..6593056867f6d33b9ac472b49f19ff56d9dcd96f 100644 (file)
@@ -49,7 +49,7 @@
        ID2C(x),         \
        ID2C(IDIROC(x))
 
-static struct pci_device_id ahd_linux_pci_id_table[] = {
+static const struct pci_device_id ahd_linux_pci_id_table[] = {
        /* aic7901 based controllers */
        ID(ID_AHA_29320A),
        ID(ID_AHA_29320ALP),
@@ -159,7 +159,7 @@ ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        char             buf[80];
        struct           ahd_softc *ahd;
        ahd_dev_softc_t  pci;
-       struct           ahd_pci_identity *entry;
+       const struct ahd_pci_identity *entry;
        char            *name;
        int              error;
        struct device   *dev = &pdev->dev;
@@ -249,8 +249,8 @@ ahd_linux_pci_exit(void)
 }
 
 static int
-ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
-                                u_long *base2)
+ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, resource_size_t *base,
+                                resource_size_t *base2)
 {
        *base = pci_resource_start(ahd->dev_softc, 0);
        /*
@@ -272,11 +272,11 @@ ahd_linux_pci_reserve_io_regions(struct ahd_softc *ahd, u_long *base,
 
 static int
 ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd,
-                                u_long *bus_addr,
+                                resource_size_t *bus_addr,
                                 uint8_t __iomem **maddr)
 {
-       u_long  start;
-       u_long  base_page;
+       resource_size_t start;
+       resource_size_t base_page;
        u_long  base_offset;
        int     error = 0;
 
@@ -310,7 +310,7 @@ int
 ahd_pci_map_registers(struct ahd_softc *ahd)
 {
        uint32_t command;
-       u_long   base;
+       resource_size_t base;
        uint8_t __iomem *maddr;
        int      error;
 
@@ -346,31 +346,32 @@ ahd_pci_map_registers(struct ahd_softc *ahd)
                } else
                        command |= PCIM_CMD_MEMEN;
        } else if (bootverbose) {
-               printf("aic79xx: PCI%d:%d:%d MEM region 0x%lx "
+               printf("aic79xx: PCI%d:%d:%d MEM region 0x%llx "
                       "unavailable. Cannot memory map device.\n",
                       ahd_get_pci_bus(ahd->dev_softc),
                       ahd_get_pci_slot(ahd->dev_softc),
                       ahd_get_pci_function(ahd->dev_softc),
-                      base);
+                      (unsigned long long)base);
        }
 
        if (maddr == NULL) {
-               u_long   base2;
+               resource_size_t base2;
 
                error = ahd_linux_pci_reserve_io_regions(ahd, &base, &base2);
                if (error == 0) {
                        ahd->tags[0] = BUS_SPACE_PIO;
                        ahd->tags[1] = BUS_SPACE_PIO;
-                       ahd->bshs[0].ioport = base;
-                       ahd->bshs[1].ioport = base2;
+                       ahd->bshs[0].ioport = (u_long)base;
+                       ahd->bshs[1].ioport = (u_long)base2;
                        command |= PCIM_CMD_PORTEN;
                } else {
-                       printf("aic79xx: PCI%d:%d:%d IO regions 0x%lx and 0x%lx"
-                              "unavailable. Cannot map device.\n",
+                       printf("aic79xx: PCI%d:%d:%d IO regions 0x%llx and "
+                              "0x%llx unavailable. Cannot map device.\n",
                               ahd_get_pci_bus(ahd->dev_softc),
                               ahd_get_pci_slot(ahd->dev_softc),
                               ahd_get_pci_function(ahd->dev_softc),
-                              base, base2);
+                              (unsigned long long)base,
+                              (unsigned long long)base2);
                }
        }
        ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, command, 4);
index c9f79fdf913105d77046db61619c6432a4123dd6..c25b6adffbf94f50fd5684063e7add6b04984895 100644 (file)
@@ -97,7 +97,7 @@ static ahd_device_setup_t ahd_aic7901A_setup;
 static ahd_device_setup_t ahd_aic7902_setup;
 static ahd_device_setup_t ahd_aic790X_setup;
 
-static struct ahd_pci_identity ahd_pci_ident_table [] =
+static const struct ahd_pci_identity ahd_pci_ident_table[] =
 {
        /* aic7901 based controllers */
        {
@@ -253,7 +253,7 @@ static void ahd_configure_termination(struct ahd_softc *ahd,
 static void    ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat);
 static void    ahd_pci_intr(struct ahd_softc *ahd);
 
-struct ahd_pci_identity *
+const struct ahd_pci_identity *
 ahd_find_pci_device(ahd_dev_softc_t pci)
 {
        uint64_t  full_id;
@@ -261,7 +261,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
        uint16_t  vendor;
        uint16_t  subdevice;
        uint16_t  subvendor;
-       struct    ahd_pci_identity *entry;
+       const struct ahd_pci_identity *entry;
        u_int     i;
 
        vendor = ahd_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -292,7 +292,7 @@ ahd_find_pci_device(ahd_dev_softc_t pci)
 }
 
 int
-ahd_pci_config(struct ahd_softc *ahd, struct ahd_pci_identity *entry)
+ahd_pci_config(struct ahd_softc *ahd, const struct ahd_pci_identity *entry)
 {
        struct scb_data *shared_scb_data;
        u_int            command;
index 6b28bebcbca0fffb4021fd517e5cf93254be6d07..014bed716e7c9e349e51af90a8ec739927d7cc6c 100644 (file)
@@ -57,7 +57,7 @@ static int    ahd_proc_write_seeprom(struct ahd_softc *ahd,
  * Table of syncrates that don't follow the "divisible by 4"
  * rule. This table will be expanded in future SCSI specs.
  */
-static struct {
+static const struct {
        u_int period_factor;
        u_int period;   /* in 100ths of ns */
 } scsi_syncrates[] = {
index 2068e00d2c750a09c8b733aff1fa01af385a395e..c21ceab8e9134f699af09203b6dada11acd1d132 100644 (file)
@@ -47,13 +47,6 @@ ahd_reg_print_t ahd_error_print;
     ahd_print_register(NULL, 0, "ERROR", 0x04, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrerr_print;
-#else
-#define ahd_clrerr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRERR", 0x04, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_hcntrl_print;
 #else
@@ -166,13 +159,6 @@ ahd_reg_print_t ahd_sg_cache_shadow_print;
     ahd_print_register(NULL, 0, "SG_CACHE_SHADOW", 0x1b, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_arbctl_print;
-#else
-#define ahd_arbctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ARBCTL", 0x1b, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_sg_cache_pre_print;
 #else
@@ -187,20 +173,6 @@ ahd_reg_print_t ahd_lqin_print;
     ahd_print_register(NULL, 0, "LQIN", 0x20, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_typeptr_print;
-#else
-#define ahd_typeptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TYPEPTR", 0x20, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_tagptr_print;
-#else
-#define ahd_tagptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "TAGPTR", 0x21, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lunptr_print;
 #else
@@ -208,20 +180,6 @@ ahd_reg_print_t ahd_lunptr_print;
     ahd_print_register(NULL, 0, "LUNPTR", 0x22, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_datalenptr_print;
-#else
-#define ahd_datalenptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DATALENPTR", 0x23, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_statlenptr_print;
-#else
-#define ahd_statlenptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "STATLENPTR", 0x24, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_cmdlenptr_print;
 #else
@@ -257,13 +215,6 @@ ahd_reg_print_t ahd_qnextptr_print;
     ahd_print_register(NULL, 0, "QNEXTPTR", 0x29, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_idptr_print;
-#else
-#define ahd_idptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "IDPTR", 0x2a, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_abrtbyteptr_print;
 #else
@@ -278,27 +229,6 @@ ahd_reg_print_t ahd_abrtbitptr_print;
     ahd_print_register(NULL, 0, "ABRTBITPTR", 0x2c, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmdbytes_print;
-#else
-#define ahd_maxcmdbytes_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MAXCMDBYTES", 0x2d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_maxcmd2rcv_print;
-#else
-#define ahd_maxcmd2rcv_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MAXCMD2RCV", 0x2e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_shortthresh_print;
-#else
-#define ahd_shortthresh_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SHORTTHRESH", 0x2f, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lunlen_print;
 #else
@@ -327,41 +257,6 @@ ahd_reg_print_t ahd_maxcmdcnt_print;
     ahd_print_register(NULL, 0, "MAXCMDCNT", 0x33, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd01_print;
-#else
-#define ahd_lqrsvd01_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQRSVD01", 0x34, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd16_print;
-#else
-#define ahd_lqrsvd16_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQRSVD16", 0x35, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqrsvd17_print;
-#else
-#define ahd_lqrsvd17_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQRSVD17", 0x36, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmdrsvd0_print;
-#else
-#define ahd_cmdrsvd0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMDRSVD0", 0x37, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqctl0_print;
-#else
-#define ahd_lqctl0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQCTL0", 0x38, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqctl1_print;
 #else
@@ -369,13 +264,6 @@ ahd_reg_print_t ahd_lqctl1_print;
     ahd_print_register(NULL, 0, "LQCTL1", 0x38, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsbist0_print;
-#else
-#define ahd_scsbist0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSBIST0", 0x39, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_lqctl2_print;
 #else
@@ -383,13 +271,6 @@ ahd_reg_print_t ahd_lqctl2_print;
     ahd_print_register(NULL, 0, "LQCTL2", 0x39, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsbist1_print;
-#else
-#define ahd_scsbist1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSBIST1", 0x3a, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsiseq0_print;
 #else
@@ -411,20 +292,6 @@ ahd_reg_print_t ahd_sxfrctl0_print;
     ahd_print_register(NULL, 0, "SXFRCTL0", 0x3c, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dlcount_print;
-#else
-#define ahd_dlcount_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DLCOUNT", 0x3c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_businitid_print;
-#else
-#define ahd_businitid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "BUSINITID", 0x3c, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_sxfrctl1_print;
 #else
@@ -432,20 +299,6 @@ ahd_reg_print_t ahd_sxfrctl1_print;
     ahd_print_register(NULL, 0, "SXFRCTL1", 0x3d, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_bustargid_print;
-#else
-#define ahd_bustargid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "BUSTARGID", 0x3e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sxfrctl2_print;
-#else
-#define ahd_sxfrctl2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SXFRCTL2", 0x3e, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_dffstat_print;
 #else
@@ -454,17 +307,17 @@ ahd_reg_print_t ahd_dffstat_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsisigo_print;
+ahd_reg_print_t ahd_multargid_print;
 #else
-#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
+#define ahd_multargid_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_multargid_print;
+ahd_reg_print_t ahd_scsisigo_print;
 #else
-#define ahd_multargid_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MULTARGID", 0x40, regvalue, cur_col, wrap)
+#define ahd_scsisigo_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCSISIGO", 0x40, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -481,13 +334,6 @@ ahd_reg_print_t ahd_scsiphase_print;
     ahd_print_register(NULL, 0, "SCSIPHASE", 0x42, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsidat0_img_print;
-#else
-#define ahd_scsidat0_img_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSIDAT0_IMG", 0x43, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scsidat_print;
 #else
@@ -530,13 +376,6 @@ ahd_reg_print_t ahd_sblkctl_print;
     ahd_print_register(NULL, 0, "SBLKCTL", 0x4a, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint0_print;
-#else
-#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_sstat0_print;
 #else
@@ -552,10 +391,10 @@ ahd_reg_print_t ahd_simode0_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrsint1_print;
+ahd_reg_print_t ahd_clrsint0_print;
 #else
-#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
+#define ahd_clrsint0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSINT0", 0x4b, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -566,17 +405,17 @@ ahd_reg_print_t ahd_sstat1_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sstat2_print;
+ahd_reg_print_t ahd_clrsint1_print;
 #else
-#define ahd_sstat2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
+#define ahd_clrsint1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRSINT1", 0x4c, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_simode2_print;
+ahd_reg_print_t ahd_sstat2_print;
 #else
-#define ahd_simode2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SIMODE2", 0x4d, regvalue, cur_col, wrap)
+#define ahd_sstat2_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SSTAT2", 0x4d, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -622,17 +461,17 @@ ahd_reg_print_t ahd_lqistat0_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_clrlqiint0_print;
+ahd_reg_print_t ahd_lqimode0_print;
 #else
-#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
+#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lqimode0_print;
+ahd_reg_print_t ahd_clrlqiint0_print;
 #else
-#define ahd_lqimode0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LQIMODE0", 0x50, regvalue, cur_col, wrap)
+#define ahd_clrlqiint0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CLRLQIINT0", 0x50, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -789,13 +628,6 @@ ahd_reg_print_t ahd_seqintsrc_print;
     ahd_print_register(NULL, 0, "SEQINTSRC", 0x5b, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_currscb_print;
-#else
-#define ahd_currscb_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seqimode_print;
 #else
@@ -804,24 +636,17 @@ ahd_reg_print_t ahd_seqimode_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_mdffstat_print;
-#else
-#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_crccontrol_print;
+ahd_reg_print_t ahd_currscb_print;
 #else
-#define ahd_crccontrol_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CRCCONTROL", 0x5d, regvalue, cur_col, wrap)
+#define ahd_currscb_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "CURRSCB", 0x5c, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfftag_print;
+ahd_reg_print_t ahd_mdffstat_print;
 #else
-#define ahd_dfftag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFFTAG", 0x5e, regvalue, cur_col, wrap)
+#define ahd_mdffstat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "MDFFSTAT", 0x5d, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -831,20 +656,6 @@ ahd_reg_print_t ahd_lastscb_print;
     ahd_print_register(NULL, 0, "LASTSCB", 0x5e, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scsitest_print;
-#else
-#define ahd_scsitest_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCSITEST", 0x5e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_iopdnctl_print;
-#else
-#define ahd_iopdnctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "IOPDNCTL", 0x5f, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_shaddr_print;
 #else
@@ -859,13 +670,6 @@ ahd_reg_print_t ahd_negoaddr_print;
     ahd_print_register(NULL, 0, "NEGOADDR", 0x60, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dgrpcrci_print;
-#else
-#define ahd_dgrpcrci_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DGRPCRCI", 0x60, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_negperiod_print;
 #else
@@ -873,13 +677,6 @@ ahd_reg_print_t ahd_negperiod_print;
     ahd_print_register(NULL, 0, "NEGPERIOD", 0x61, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_packcrci_print;
-#else
-#define ahd_packcrci_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PACKCRCI", 0x62, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_negoffset_print;
 #else
@@ -929,13 +726,6 @@ ahd_reg_print_t ahd_iownid_print;
     ahd_print_register(NULL, 0, "IOWNID", 0x67, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960ctl0_print;
-#else
-#define ahd_pll960ctl0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PLL960CTL0", 0x68, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_shcnt_print;
 #else
@@ -950,27 +740,6 @@ ahd_reg_print_t ahd_townid_print;
     ahd_print_register(NULL, 0, "TOWNID", 0x69, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960ctl1_print;
-#else
-#define ahd_pll960ctl1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PLL960CTL1", 0x69, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll960cnt0_print;
-#else
-#define ahd_pll960cnt0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PLL960CNT0", 0x6a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_xsig_print;
-#else
-#define ahd_xsig_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "XSIG", 0x6a, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seloid_print;
 #else
@@ -978,41 +747,6 @@ ahd_reg_print_t ahd_seloid_print;
     ahd_print_register(NULL, 0, "SELOID", 0x6b, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400ctl0_print;
-#else
-#define ahd_pll400ctl0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PLL400CTL0", 0x6c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_fairness_print;
-#else
-#define ahd_fairness_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FAIRNESS", 0x6c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400ctl1_print;
-#else
-#define ahd_pll400ctl1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PLL400CTL1", 0x6d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_unfairness_print;
-#else
-#define ahd_unfairness_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "UNFAIRNESS", 0x6e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pll400cnt0_print;
-#else
-#define ahd_pll400cnt0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PLL400CNT0", 0x6e, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_haddr_print;
 #else
@@ -1020,27 +754,6 @@ ahd_reg_print_t ahd_haddr_print;
     ahd_print_register(NULL, 0, "HADDR", 0x70, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_plldelay_print;
-#else
-#define ahd_plldelay_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PLLDELAY", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmaadr_print;
-#else
-#define ahd_hodmaadr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "HODMAADR", 0x70, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmacnt_print;
-#else
-#define ahd_hodmacnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "HODMACNT", 0x78, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_hcnt_print;
 #else
@@ -1049,10 +762,10 @@ ahd_reg_print_t ahd_hcnt_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_hodmaen_print;
+ahd_reg_print_t ahd_sghaddr_print;
 #else
-#define ahd_hodmaen_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "HODMAEN", 0x7a, regvalue, cur_col, wrap)
+#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -1063,10 +776,10 @@ ahd_reg_print_t ahd_scbhaddr_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghaddr_print;
+ahd_reg_print_t ahd_sghcnt_print;
 #else
-#define ahd_sghaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGHADDR", 0x7c, regvalue, cur_col, wrap)
+#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -1076,13 +789,6 @@ ahd_reg_print_t ahd_scbhcnt_print;
     ahd_print_register(NULL, 0, "SCBHCNT", 0x84, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sghcnt_print;
-#else
-#define ahd_sghcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGHCNT", 0x84, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_dff_thrsh_print;
 #else
@@ -1091,357 +797,56 @@ ahd_reg_print_t ahd_dff_thrsh_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romaddr_print;
+ahd_reg_print_t ahd_pcixctl_print;
 #else
-#define ahd_romaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ROMADDR", 0x8a, regvalue, cur_col, wrap)
+#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "PCIXCTL", 0x93, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romcntrl_print;
+ahd_reg_print_t ahd_dchspltstat0_print;
 #else
-#define ahd_romcntrl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ROMCNTRL", 0x8d, regvalue, cur_col, wrap)
+#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_romdata_print;
+ahd_reg_print_t ahd_dchspltstat1_print;
 #else
-#define ahd_romdata_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ROMDATA", 0x8e, regvalue, cur_col, wrap)
+#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DCHSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg0_print;
+ahd_reg_print_t ahd_sgspltstat0_print;
 #else
-#define ahd_cmcrxmsg0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCRXMSG0", 0x90, regvalue, cur_col, wrap)
+#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGSPLTSTAT0", 0x9e, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_roenable_print;
+ahd_reg_print_t ahd_sgspltstat1_print;
 #else
-#define ahd_roenable_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "ROENABLE", 0x90, regvalue, cur_col, wrap)
+#define ahd_sgspltstat1_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SGSPLTSTAT1", 0x9f, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg0_print;
+ahd_reg_print_t ahd_df0pcistat_print;
 #else
-#define ahd_ovlyrxmsg0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYRXMSG0", 0x90, regvalue, cur_col, wrap)
+#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "DF0PCISTAT", 0xa0, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg0_print;
+ahd_reg_print_t ahd_reg0_print;
 #else
-#define ahd_dchrxmsg0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHRXMSG0", 0x90, regvalue, cur_col, wrap)
+#define ahd_reg0_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "REG0", 0xa0, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg1_print;
-#else
-#define ahd_ovlyrxmsg1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_nsenable_print;
-#else
-#define ahd_nsenable_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "NSENABLE", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg1_print;
-#else
-#define ahd_cmcrxmsg1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg1_print;
-#else
-#define ahd_dchrxmsg1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHRXMSG1", 0x91, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg2_print;
-#else
-#define ahd_dchrxmsg2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg2_print;
-#else
-#define ahd_cmcrxmsg2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ost_print;
-#else
-#define ahd_ost_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OST", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg2_print;
-#else
-#define ahd_ovlyrxmsg2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYRXMSG2", 0x92, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchrxmsg3_print;
-#else
-#define ahd_dchrxmsg3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyrxmsg3_print;
-#else
-#define ahd_ovlyrxmsg3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcrxmsg3_print;
-#else
-#define ahd_cmcrxmsg3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCRXMSG3", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_pcixctl_print;
-#else
-#define ahd_pcixctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "PCIXCTL", 0x93, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyseqbcnt_print;
-#else
-#define ahd_ovlyseqbcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchseqbcnt_print;
-#else
-#define ahd_dchseqbcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcseqbcnt_print;
-#else
-#define ahd_cmcseqbcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCSEQBCNT", 0x94, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcspltstat0_print;
-#else
-#define ahd_cmcspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchspltstat0_print;
-#else
-#define ahd_dchspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyspltstat0_print;
-#else
-#define ahd_ovlyspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYSPLTSTAT0", 0x96, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcspltstat1_print;
-#else
-#define ahd_cmcspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyspltstat1_print;
-#else
-#define ahd_ovlyspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dchspltstat1_print;
-#else
-#define ahd_dchspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DCHSPLTSTAT1", 0x97, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg0_print;
-#else
-#define ahd_sgrxmsg0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGRXMSG0", 0x98, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr0_print;
-#else
-#define ahd_slvspltoutadr0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SLVSPLTOUTADR0", 0x98, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg1_print;
-#else
-#define ahd_sgrxmsg1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGRXMSG1", 0x99, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr1_print;
-#else
-#define ahd_slvspltoutadr1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SLVSPLTOUTADR1", 0x99, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg2_print;
-#else
-#define ahd_sgrxmsg2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGRXMSG2", 0x9a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr2_print;
-#else
-#define ahd_slvspltoutadr2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SLVSPLTOUTADR2", 0x9a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgrxmsg3_print;
-#else
-#define ahd_sgrxmsg3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGRXMSG3", 0x9b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutadr3_print;
-#else
-#define ahd_slvspltoutadr3_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SLVSPLTOUTADR3", 0x9b, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgseqbcnt_print;
-#else
-#define ahd_sgseqbcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGSEQBCNT", 0x9c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr0_print;
-#else
-#define ahd_slvspltoutattr0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SLVSPLTOUTATTR0", 0x9c, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr1_print;
-#else
-#define ahd_slvspltoutattr1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SLVSPLTOUTATTR1", 0x9d, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_slvspltoutattr2_print;
-#else
-#define ahd_slvspltoutattr2_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SLVSPLTOUTATTR2", 0x9e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgspltstat0_print;
-#else
-#define ahd_sgspltstat0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGSPLTSTAT0", 0x9e, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgspltstat1_print;
-#else
-#define ahd_sgspltstat1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGSPLTSTAT1", 0x9f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sfunct_print;
-#else
-#define ahd_sfunct_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SFUNCT", 0x9f, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_df0pcistat_print;
-#else
-#define ahd_df0pcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DF0PCISTAT", 0xa0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg0_print;
-#else
-#define ahd_reg0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "REG0", 0xa0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_df1pcistat_print;
-#else
-#define ahd_df1pcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DF1PCISTAT", 0xa1, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sgpcistat_print;
-#else
-#define ahd_sgpcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SGPCISTAT", 0xa2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg1_print;
-#else
-#define ahd_reg1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "REG1", 0xa2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmcpcistat_print;
-#else
-#define ahd_cmcpcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMCPCISTAT", 0xa3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlypcistat_print;
-#else
-#define ahd_ovlypcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYPCISTAT", 0xa4, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_reg_isr_print;
+ahd_reg_print_t ahd_reg_isr_print;
 #else
 #define ahd_reg_isr_print(regvalue, cur_col, wrap) \
     ahd_print_register(NULL, 0, "REG_ISR", 0xa4, regvalue, cur_col, wrap)
@@ -1454,13 +859,6 @@ ahd_reg_print_t ahd_sg_state_print;
     ahd_print_register(NULL, 0, "SG_STATE", 0xa6, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_msipcistat_print;
-#else
-#define ahd_msipcistat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "MSIPCISTAT", 0xa6, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_targpcistat_print;
 #else
@@ -1468,13 +866,6 @@ ahd_reg_print_t ahd_targpcistat_print;
     ahd_print_register(NULL, 0, "TARGPCISTAT", 0xa7, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_data_count_odd_print;
-#else
-#define ahd_data_count_odd_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DATA_COUNT_ODD", 0xa7, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scbptr_print;
 #else
@@ -1482,13 +873,6 @@ ahd_reg_print_t ahd_scbptr_print;
     ahd_print_register(NULL, 0, "SCBPTR", 0xa8, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbacnt_print;
-#else
-#define ahd_ccscbacnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSCBACNT", 0xab, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scbautoptr_print;
 #else
@@ -1503,13 +887,6 @@ ahd_reg_print_t ahd_ccsgaddr_print;
     ahd_print_register(NULL, 0, "CCSGADDR", 0xac, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ccscbadr_bk_print;
-#else
-#define ahd_ccscbadr_bk_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CCSCBADR_BK", 0xac, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_ccscbaddr_print;
 #else
@@ -1517,13 +894,6 @@ ahd_reg_print_t ahd_ccscbaddr_print;
     ahd_print_register(NULL, 0, "CCSCBADDR", 0xac, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_cmc_rambist_print;
-#else
-#define ahd_cmc_rambist_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "CMC_RAMBIST", 0xad, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_ccscbctl_print;
 #else
@@ -1545,13 +915,6 @@ ahd_reg_print_t ahd_ccsgram_print;
     ahd_print_register(NULL, 0, "CCSGRAM", 0xb0, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexadr_print;
-#else
-#define ahd_flexadr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FLEXADR", 0xb0, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_ccscbram_print;
 #else
@@ -1559,27 +922,6 @@ ahd_reg_print_t ahd_ccscbram_print;
     ahd_print_register(NULL, 0, "CCSCBRAM", 0xb0, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexcnt_print;
-#else
-#define ahd_flexcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FLEXCNT", 0xb3, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexdmastat_print;
-#else
-#define ahd_flexdmastat_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FLEXDMASTAT", 0xb5, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_flexdata_print;
-#else
-#define ahd_flexdata_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FLEXDATA", 0xb6, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_brddat_print;
 #else
@@ -1622,27 +964,6 @@ ahd_reg_print_t ahd_seestat_print;
     ahd_print_register(NULL, 0, "SEESTAT", 0xbe, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scbcnt_print;
-#else
-#define ahd_scbcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCBCNT", 0xbf, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfwaddr_print;
-#else
-#define ahd_dfwaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFWADDR", 0xc0, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspfltrctl_print;
-#else
-#define ahd_dspfltrctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DSPFLTRCTL", 0xc0, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_dspdatactl_print;
 #else
@@ -1650,27 +971,6 @@ ahd_reg_print_t ahd_dspdatactl_print;
     ahd_print_register(NULL, 0, "DSPDATACTL", 0xc1, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfraddr_print;
-#else
-#define ahd_dfraddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFRADDR", 0xc2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspreqctl_print;
-#else
-#define ahd_dspreqctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DSPREQCTL", 0xc2, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dspackctl_print;
-#else
-#define ahd_dspackctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DSPACKCTL", 0xc3, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_dfdat_print;
 #else
@@ -1692,76 +992,6 @@ ahd_reg_print_t ahd_wrtbiasctl_print;
     ahd_print_register(NULL, 0, "WRTBIASCTL", 0xc5, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_rcvrbiosctl_print;
-#else
-#define ahd_rcvrbiosctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "RCVRBIOSCTL", 0xc6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_wrtbiascalc_print;
-#else
-#define ahd_wrtbiascalc_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WRTBIASCALC", 0xc7, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_rcvrbiascalc_print;
-#else
-#define ahd_rcvrbiascalc_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "RCVRBIASCALC", 0xc8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfptrs_print;
-#else
-#define ahd_dfptrs_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFPTRS", 0xc8, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_skewcalc_print;
-#else
-#define ahd_skewcalc_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SKEWCALC", 0xc9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfbkptr_print;
-#else
-#define ahd_dfbkptr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFBKPTR", 0xc9, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfdbctl_print;
-#else
-#define ahd_dfdbctl_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFDBCTL", 0xcb, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfscnt_print;
-#else
-#define ahd_dfscnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFSCNT", 0xcc, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_dfbcnt_print;
-#else
-#define ahd_dfbcnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "DFBCNT", 0xce, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ovlyaddr_print;
-#else
-#define ahd_ovlyaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "OVLYADDR", 0xd4, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_seqctl0_print;
 #else
@@ -1769,13 +999,6 @@ ahd_reg_print_t ahd_seqctl0_print;
     ahd_print_register(NULL, 0, "SEQCTL0", 0xd6, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_seqctl1_print;
-#else
-#define ahd_seqctl1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SEQCTL1", 0xd7, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_flags_print;
 #else
@@ -1825,20 +1048,6 @@ ahd_reg_print_t ahd_dindex_print;
     ahd_print_register(NULL, 0, "DINDEX", 0xe4, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brkaddr0_print;
-#else
-#define ahd_brkaddr0_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "BRKADDR0", 0xe6, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_brkaddr1_print;
-#else
-#define ahd_brkaddr1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "BRKADDR1", 0xe6, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_allones_print;
 #else
@@ -1874,13 +1083,6 @@ ahd_reg_print_t ahd_dindir_print;
     ahd_print_register(NULL, 0, "DINDIR", 0xed, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_function1_print;
-#else
-#define ahd_function1_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "FUNCTION1", 0xf0, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_stack_print;
 #else
@@ -1902,13 +1104,6 @@ ahd_reg_print_t ahd_curaddr_print;
     ahd_print_register(NULL, 0, "CURADDR", 0xf4, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_lastaddr_print;
-#else
-#define ahd_lastaddr_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "LASTADDR", 0xf6, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_intvec2_addr_print;
 #else
@@ -1931,24 +1126,17 @@ ahd_reg_print_t ahd_accum_save_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_waiting_scb_tails_print;
-#else
-#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_ahd_pci_config_base_print;
+ahd_reg_print_t ahd_sram_base_print;
 #else
-#define ahd_ahd_pci_config_base_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE", 0x100, regvalue, cur_col, wrap)
+#define ahd_sram_base_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_sram_base_print;
+ahd_reg_print_t ahd_waiting_scb_tails_print;
 #else
-#define ahd_sram_base_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SRAM_BASE", 0x100, regvalue, cur_col, wrap)
+#define ahd_waiting_scb_tails_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "WAITING_SCB_TAILS", 0x100, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -2218,17 +1406,17 @@ ahd_reg_print_t ahd_mk_message_scsiid_print;
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_base_print;
+ahd_reg_print_t ahd_scb_residual_datacnt_print;
 #else
-#define ahd_scb_base_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
+#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_residual_datacnt_print;
+ahd_reg_print_t ahd_scb_base_print;
 #else
-#define ahd_scb_residual_datacnt_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT", 0x180, regvalue, cur_col, wrap)
+#define ahd_scb_base_print(regvalue, cur_col, wrap) \
+    ahd_print_register(NULL, 0, "SCB_BASE", 0x180, regvalue, cur_col, wrap)
 #endif
 
 #if AIC_DEBUG_REGISTERS
@@ -2245,27 +1433,6 @@ ahd_reg_print_t ahd_scb_scsi_status_print;
     ahd_print_register(NULL, 0, "SCB_SCSI_STATUS", 0x188, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_phases_print;
-#else
-#define ahd_scb_target_phases_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TARGET_PHASES", 0x189, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_data_dir_print;
-#else
-#define ahd_scb_target_data_dir_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR", 0x18a, regvalue, cur_col, wrap)
-#endif
-
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_target_itag_print;
-#else
-#define ahd_scb_target_itag_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_TARGET_ITAG", 0x18b, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scb_sense_busaddr_print;
 #else
@@ -2364,13 +1531,6 @@ ahd_reg_print_t ahd_scb_next2_print;
     ahd_print_register(NULL, 0, "SCB_NEXT2", 0x1ae, regvalue, cur_col, wrap)
 #endif
 
-#if AIC_DEBUG_REGISTERS
-ahd_reg_print_t ahd_scb_spare_print;
-#else
-#define ahd_scb_spare_print(regvalue, cur_col, wrap) \
-    ahd_print_register(NULL, 0, "SCB_SPARE", 0x1b0, regvalue, cur_col, wrap)
-#endif
-
 #if AIC_DEBUG_REGISTERS
 ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #else
@@ -2557,10 +1717,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        SG_CACHE_PRE                    0x1b
 
-#define        LQIN                            0x20
-
 #define        TYPEPTR                         0x20
 
+#define        LQIN                            0x20
+
 #define        TAGPTR                          0x21
 
 #define        LUNPTR                          0x22
@@ -2620,14 +1780,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                SINGLECMD               0x02
 #define                ABORTPENDING            0x01
 
-#define        SCSBIST0                        0x39
-#define                GSBISTERR               0x40
-#define                GSBISTDONE              0x20
-#define                GSBISTRUN               0x10
-#define                OSBISTERR               0x04
-#define                OSBISTDONE              0x02
-#define                OSBISTRUN               0x01
-
 #define        LQCTL2                          0x39
 #define                LQIRETRY                0x80
 #define                LQICONTINUE             0x40
@@ -2638,10 +1790,13 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                LQOTOIDLE               0x02
 #define                LQOPAUSE                0x01
 
-#define        SCSBIST1                        0x3a
-#define                NTBISTERR               0x04
-#define                NTBISTDONE              0x02
-#define                NTBISTRUN               0x01
+#define        SCSBIST0                        0x39
+#define                GSBISTERR               0x40
+#define                GSBISTDONE              0x20
+#define                GSBISTRUN               0x10
+#define                OSBISTERR               0x04
+#define                OSBISTDONE              0x02
+#define                OSBISTRUN               0x01
 
 #define        SCSISEQ0                        0x3a
 #define                TEMODEO                 0x80
@@ -2650,8 +1805,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                FORCEBUSFREE            0x10
 #define                SCSIRSTO                0x01
 
+#define        SCSBIST1                        0x3a
+#define                NTBISTERR               0x04
+#define                NTBISTDONE              0x02
+#define                NTBISTRUN               0x01
+
 #define        SCSISEQ1                        0x3b
 
+#define        BUSINITID                       0x3c
+
 #define        SXFRCTL0                        0x3c
 #define                DFON                    0x80
 #define                DFPEXP                  0x40
@@ -2660,8 +1822,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        DLCOUNT                         0x3c
 
-#define        BUSINITID                       0x3c
-
 #define        SXFRCTL1                        0x3d
 #define                BITBUCKET               0x80
 #define                ENSACHK                 0x40
@@ -2686,6 +1846,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                CURRFIFO_1              0x01
 #define                CURRFIFO_0              0x00
 
+#define        MULTARGID                       0x40
+
 #define        SCSISIGO                        0x40
 #define                CDO                     0x80
 #define                IOO                     0x40
@@ -2696,8 +1858,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                REQO                    0x02
 #define                ACKO                    0x01
 
-#define        MULTARGID                       0x40
-
 #define        SCSISIGI                        0x41
 #define                ATNI                    0x10
 #define                SELI                    0x08
@@ -2744,15 +1904,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                ENAB20                  0x04
 #define                SELWIDE                 0x02
 
-#define        CLRSINT0                        0x4b
-#define                CLRSELDO                0x40
-#define                CLRSELDI                0x20
-#define                CLRSELINGO              0x10
-#define                CLRIOERR                0x08
-#define                CLROVERRUN              0x04
-#define                CLRSPIORDY              0x02
-#define                CLRARBDO                0x01
-
 #define        SSTAT0                          0x4b
 #define                TARGET                  0x80
 #define                SELDO                   0x40
@@ -2772,14 +1923,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                ENSPIORDY               0x02
 #define                ENARBDO                 0x01
 
-#define        CLRSINT1                        0x4c
-#define                CLRSELTIMEO             0x80
-#define                CLRATNO                 0x40
-#define                CLRSCSIRSTI             0x20
-#define                CLRBUSFREE              0x08
-#define                CLRSCSIPERR             0x04
-#define                CLRSTRB2FAST            0x02
-#define                CLRREQINIT              0x01
+#define        CLRSINT0                        0x4b
+#define                CLRSELDO                0x40
+#define                CLRSELDI                0x20
+#define                CLRSELINGO              0x10
+#define                CLRIOERR                0x08
+#define                CLROVERRUN              0x04
+#define                CLRSPIORDY              0x02
+#define                CLRARBDO                0x01
 
 #define        SSTAT1                          0x4c
 #define                SELTO                   0x80
@@ -2791,6 +1942,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                STRB2FAST               0x02
 #define                REQINIT                 0x01
 
+#define        CLRSINT1                        0x4c
+#define                CLRSELTIMEO             0x80
+#define                CLRATNO                 0x40
+#define                CLRSCSIRSTI             0x20
+#define                CLRBUSFREE              0x08
+#define                CLRSCSIPERR             0x04
+#define                CLRSTRB2FAST            0x02
+#define                CLRREQINIT              0x01
+
 #define        SSTAT2                          0x4d
 #define                BUSFREETIME             0xc0
 #define                NONPACKREQ              0x20
@@ -2838,14 +1998,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                LQIATNLQ                0x02
 #define                LQIATNCMD               0x01
 
-#define        CLRLQIINT0                      0x50
-#define                CLRLQIATNQAS            0x20
-#define                CLRLQICRCT1             0x10
-#define                CLRLQICRCT2             0x08
-#define                CLRLQIBADLQT            0x04
-#define                CLRLQIATNLQ             0x02
-#define                CLRLQIATNCMD            0x01
-
 #define        LQIMODE0                        0x50
 #define                ENLQIATNQASK            0x20
 #define                ENLQICRCT1              0x10
@@ -2854,6 +2006,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                ENLQIATNLQ              0x02
 #define                ENLQIATNCMD             0x01
 
+#define        CLRLQIINT0                      0x50
+#define                CLRLQIATNQAS            0x20
+#define                CLRLQICRCT1             0x10
+#define                CLRLQICRCT2             0x08
+#define                CLRLQIBADLQT            0x04
+#define                CLRLQIATNLQ             0x02
+#define                CLRLQIATNCMD            0x01
+
 #define        LQIMODE1                        0x51
 #define                ENLQIPHASE_LQ           0x80
 #define                ENLQIPHASE_NLQ          0x40
@@ -2976,6 +2136,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        LQOSCSCTL                       0x5a
 #define                LQOH2A_VERSION          0x80
+#define                LQOBUSETDLY             0x40
+#define                LQONOHOLDLACK           0x02
 #define                LQONOCHKOVER            0x01
 
 #define        NEXTSCB                         0x5a
@@ -2998,8 +2160,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                CFG4ICMD                0x02
 #define                CFG4TCMD                0x01
 
-#define        CURRSCB                         0x5c
-
 #define        SEQIMODE                        0x5c
 #define                ENCTXTDONE              0x40
 #define                ENSAVEPTRS              0x20
@@ -3009,6 +2169,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                ENCFG4ICMD              0x02
 #define                ENCFG4TCMD              0x01
 
+#define        CURRSCB                         0x5c
+
 #define        MDFFSTAT                        0x5d
 #define                SHCNTNEGATIVE           0x40
 #define                SHCNTMINUS1             0x20
@@ -3023,29 +2185,29 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        DFFTAG                          0x5e
 
-#define        LASTSCB                         0x5e
-
 #define        SCSITEST                        0x5e
 #define                CNTRTEST                0x08
 #define                SEL_TXPLL_DEBUG         0x04
 
+#define        LASTSCB                         0x5e
+
 #define        IOPDNCTL                        0x5f
 #define                DISABLE_OE              0x80
 #define                PDN_IDIST               0x04
 #define                PDN_DIFFSENSE           0x01
 
+#define        DGRPCRCI                        0x60
+
 #define        SHADDR                          0x60
 
 #define        NEGOADDR                        0x60
 
-#define        DGRPCRCI                        0x60
-
 #define        NEGPERIOD                       0x61
 
-#define        PACKCRCI                        0x62
-
 #define        NEGOFFSET                       0x62
 
+#define        PACKCRCI                        0x62
+
 #define        NEGPPROPTS                      0x63
 #define                PPROPT_PACE             0x08
 #define                PPROPT_QAS              0x04
@@ -3066,6 +2228,7 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define        ANNEXDAT                        0x66
 
 #define        SCSCHKN                         0x66
+#define                BIDICHKDIS              0x80
 #define                STSELSKIDDIS            0x40
 #define                CURRFIFODEF             0x20
 #define                WIDERESEN               0x10
@@ -3090,6 +2253,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        SELOID                          0x6b
 
+#define        FAIRNESS                        0x6c
+
 #define        PLL400CTL0                      0x6c
 #define                PLL_VCOSEL              0x80
 #define                PLL_PWDN                0x40
@@ -3099,8 +2264,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                PLL_DLPF                0x02
 #define                PLL_ENFBM               0x01
 
-#define        FAIRNESS                        0x6c
-
 #define        PLL400CTL1                      0x6d
 #define                PLL_CNTEN               0x80
 #define                PLL_CNTCLR              0x40
@@ -3112,25 +2275,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        HADDR                           0x70
 
+#define        HODMAADR                        0x70
+
 #define        PLLDELAY                        0x70
 #define                SPLIT_DROP_REQ          0x80
 
-#define        HODMAADR                        0x70
+#define        HCNT                            0x78
 
 #define        HODMACNT                        0x78
 
-#define        HCNT                            0x78
-
 #define        HODMAEN                         0x7a
 
-#define        SCBHADDR                        0x7c
-
 #define        SGHADDR                         0x7c
 
-#define        SCBHCNT                         0x84
+#define        SCBHADDR                        0x7c
 
 #define        SGHCNT                          0x84
 
+#define        SCBHCNT                         0x84
+
 #define        DFF_THRSH                       0x88
 #define                WR_DFTHRSH              0x70
 #define                RD_DFTHRSH              0x07
@@ -3163,6 +2326,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        CMCRXMSG0                       0x90
 
+#define        OVLYRXMSG0                      0x90
+
+#define        DCHRXMSG0                       0x90
+
 #define        ROENABLE                        0x90
 #define                MSIROEN                 0x20
 #define                OVLYROEN                0x10
@@ -3171,11 +2338,11 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                DCH1ROEN                0x02
 #define                DCH0ROEN                0x01
 
-#define        OVLYRXMSG0                      0x90
+#define        OVLYRXMSG1                      0x91
 
-#define        DCHRXMSG0                       0x90
+#define        CMCRXMSG1                       0x91
 
-#define        OVLYRXMSG1                      0x91
+#define        DCHRXMSG1                       0x91
 
 #define        NSENABLE                        0x91
 #define                MSINSEN                 0x20
@@ -3185,10 +2352,6 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                DCH1NSEN                0x02
 #define                DCH0NSEN                0x01
 
-#define        CMCRXMSG1                       0x91
-
-#define        DCHRXMSG1                       0x91
-
 #define        DCHRXMSG2                       0x92
 
 #define        CMCRXMSG2                       0x92
@@ -3212,24 +2375,24 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                TSCSERREN               0x02
 #define                CMPABCDIS               0x01
 
+#define        CMCSEQBCNT                      0x94
+
 #define        OVLYSEQBCNT                     0x94
 
 #define        DCHSEQBCNT                      0x94
 
-#define        CMCSEQBCNT                      0x94
-
-#define        CMCSPLTSTAT0                    0x96
-
 #define        DCHSPLTSTAT0                    0x96
 
 #define        OVLYSPLTSTAT0                   0x96
 
-#define        CMCSPLTSTAT1                    0x97
+#define        CMCSPLTSTAT0                    0x96
 
 #define        OVLYSPLTSTAT1                   0x97
 
 #define        DCHSPLTSTAT1                    0x97
 
+#define        CMCSPLTSTAT1                    0x97
+
 #define        SGRXMSG0                        0x98
 #define                CDNUM                   0xf8
 #define                CFNUM                   0x07
@@ -3257,18 +2420,15 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                TAG_NUM                 0x1f
 #define                RLXORD                  0x10
 
-#define        SGSEQBCNT                       0x9c
-
 #define        SLVSPLTOUTATTR0                 0x9c
 #define                LOWER_BCNT              0xff
 
+#define        SGSEQBCNT                       0x9c
+
 #define        SLVSPLTOUTATTR1                 0x9d
 #define                CMPLT_DNUM              0xf8
 #define                CMPLT_FNUM              0x07
 
-#define        SLVSPLTOUTATTR2                 0x9e
-#define                CMPLT_BNUM              0xff
-
 #define        SGSPLTSTAT0                     0x9e
 #define                STAETERM                0x80
 #define                SCBCERR                 0x40
@@ -3279,6 +2439,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                RXSCEMSG                0x02
 #define                RXSPLTRSP               0x01
 
+#define        SLVSPLTOUTATTR2                 0x9e
+#define                CMPLT_BNUM              0xff
+
 #define        SGSPLTSTAT1                     0x9f
 #define                RXDATABUCKET            0x01
 
@@ -3334,10 +2497,10 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        CCSGADDR                        0xac
 
-#define        CCSCBADR_BK                     0xac
-
 #define        CCSCBADDR                       0xac
 
+#define        CCSCBADR_BK                     0xac
+
 #define        CMC_RAMBIST                     0xad
 #define                SG_ELEMENT_SIZE         0x80
 #define                SCBRAMBIST_FAIL         0x40
@@ -3391,9 +2554,9 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define        SEEDAT                          0xbc
 
 #define        SEECTL                          0xbe
+#define                SEEOP_EWDS              0x40
 #define                SEEOP_WALL              0x40
 #define                SEEOP_EWEN              0x40
-#define                SEEOP_EWDS              0x40
 #define                SEEOPCODE               0x70
 #define                SEERST                  0x02
 #define                SEESTART                0x01
@@ -3410,25 +2573,25 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        SCBCNT                          0xbf
 
-#define        DFWADDR                         0xc0
-
 #define        DSPFLTRCTL                      0xc0
 #define                FLTRDISABLE             0x20
 #define                EDGESENSE               0x10
 #define                DSPFCNTSEL              0x0f
 
+#define        DFWADDR                         0xc0
+
 #define        DSPDATACTL                      0xc1
 #define                BYPASSENAB              0x80
 #define                DESQDIS                 0x10
 #define                RCVROFFSTDIS            0x04
 #define                XMITOFFSTDIS            0x02
 
-#define        DFRADDR                         0xc2
-
 #define        DSPREQCTL                       0xc2
 #define                MANREQCTL               0xc0
 #define                MANREQDLY               0x3f
 
+#define        DFRADDR                         0xc2
+
 #define        DSPACKCTL                       0xc3
 #define                MANACKCTL               0xc0
 #define                MANACKDLY               0x3f
@@ -3449,14 +2612,14 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        WRTBIASCALC                     0xc7
 
-#define        RCVRBIASCALC                    0xc8
-
 #define        DFPTRS                          0xc8
 
-#define        SKEWCALC                        0xc9
+#define        RCVRBIASCALC                    0xc8
 
 #define        DFBKPTR                         0xc9
 
+#define        SKEWCALC                        0xc9
+
 #define        DFDBCTL                         0xcb
 #define                DFF_CIO_WR_RDY          0x20
 #define                DFF_CIO_RD_RDY          0x10
@@ -3541,12 +2704,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        ACCUM_SAVE                      0xfa
 
-#define        WAITING_SCB_TAILS               0x100
-
 #define        AHD_PCI_CONFIG_BASE             0x100
 
 #define        SRAM_BASE                       0x100
 
+#define        WAITING_SCB_TAILS               0x100
+
 #define        WAITING_TID_HEAD                0x120
 
 #define        WAITING_TID_TAIL                0x122
@@ -3575,8 +2738,8 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define                PRELOADEN               0x80
 #define                WIDEODD                 0x40
 #define                SCSIEN                  0x20
-#define                SDMAEN                  0x10
 #define                SDMAENACK               0x10
+#define                SDMAEN                  0x10
 #define                HDMAEN                  0x08
 #define                HDMAENACK               0x08
 #define                DIRECTION               0x04
@@ -3674,12 +2837,12 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 
 #define        MK_MESSAGE_SCSIID               0x162
 
-#define        SCB_BASE                        0x180
-
 #define        SCB_RESIDUAL_DATACNT            0x180
 #define        SCB_CDB_STORE                   0x180
 #define        SCB_HOST_CDB_PTR                0x180
 
+#define        SCB_BASE                        0x180
+
 #define        SCB_RESIDUAL_SGPTR              0x184
 #define                SG_ADDR_MASK            0xf8
 #define                SG_OVERRUN_RESID        0x02
@@ -3747,6 +2910,17 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define        SCB_DISCONNECTED_LISTS          0x1b8
 
 
+#define        CMD_GROUP_CODE_SHIFT    0x05
+#define        STIMESEL_MIN    0x18
+#define        STIMESEL_SHIFT  0x03
+#define        INVALID_ADDR    0x80
+#define        AHD_PRECOMP_MASK        0x07
+#define        TARGET_DATA_IN  0x01
+#define        CCSCBADDR_MAX   0x80
+#define        NUMDSPS         0x14
+#define        SEEOP_EWEN_ADDR 0xc0
+#define        AHD_ANNEXCOL_PER_DEV0   0x04
+#define        DST_MODE_SHIFT  0x04
 #define        AHD_TIMER_MAX_US        0x18ffe7
 #define        AHD_TIMER_MAX_TICKS     0xffff
 #define        AHD_SENSE_BUFSIZE       0x100
@@ -3781,43 +2955,32 @@ ahd_reg_print_t ahd_scb_disconnected_lists_print;
 #define        LUNLEN_SINGLE_LEVEL_LUN 0x0f
 #define        NVRAM_SCB_OFFSET        0x2c
 #define        STATUS_PKT_SENSE        0xff
-#define        CMD_GROUP_CODE_SHIFT    0x05
 #define        MAX_OFFSET_PACED_BUG    0x7f
 #define        STIMESEL_BUG_ADJ        0x08
-#define        STIMESEL_MIN    0x18
-#define        STIMESEL_SHIFT  0x03
 #define        CCSGRAM_MAXSEGS 0x10
-#define        INVALID_ADDR    0x80
 #define        SEEOP_ERAL_ADDR 0x80
 #define        AHD_SLEWRATE_DEF_REVB   0x08
 #define        AHD_PRECOMP_CUTBACK_17  0x04
-#define        AHD_PRECOMP_MASK        0x07
 #define        SRC_MODE_SHIFT  0x00
 #define        PKT_OVERRUN_BUFSIZE     0x200
 #define        SCB_TRANSFER_SIZE_1BYTE_LUN     0x30
-#define        TARGET_DATA_IN  0x01
 #define        HOST_MSG        0xff
 #define        MAX_OFFSET      0xfe
 #define        BUS_16_BIT      0x01
-#define        CCSCBADDR_MAX   0x80
-#define        NUMDSPS         0x14
-#define        SEEOP_EWEN_ADDR 0xc0
-#define        AHD_ANNEXCOL_PER_DEV0   0x04
-#define        DST_MODE_SHIFT  0x04
 
 
 /* Downloaded Constant Definitions */
+#define        SG_SIZEOF       0x04
+#define        SG_PREFETCH_ALIGN_MASK  0x02
+#define        SG_PREFETCH_CNT_LIMIT   0x01
 #define        CACHELINE_MASK  0x07
 #define        SCB_TRANSFER_SIZE       0x06
 #define        PKT_OVERRUN_BUFOFFSET   0x05
-#define        SG_SIZEOF       0x04
 #define        SG_PREFETCH_ADDR_MASK   0x03
-#define        SG_PREFETCH_ALIGN_MASK  0x02
-#define        SG_PREFETCH_CNT_LIMIT   0x01
 #define        SG_PREFETCH_CNT 0x00
 #define        DOWNLOAD_CONST_COUNT    0x08
 
 
 /* Exported Labels */
-#define        LABEL_seq_isr   0x28f
 #define        LABEL_timer_isr 0x28b
+#define        LABEL_seq_isr   0x28f
index db38a61a8cb4f2cf15160f4406a4749fcb716ad9..c4c8a96bf5a36b53003f8ce15549760f94dfd134 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "aic79xx_osm.h"
 
-static ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
+static const ahd_reg_parse_entry_t MODE_PTR_parse_table[] = {
        { "SRC_MODE",           0x07, 0x07 },
        { "DST_MODE",           0x70, 0x70 }
 };
@@ -20,7 +20,7 @@ ahd_mode_ptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x00, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t INTSTAT_parse_table[] = {
        { "SPLTINT",            0x01, 0x01 },
        { "CMDCMPLT",           0x02, 0x02 },
        { "SEQINT",             0x04, 0x04 },
@@ -39,7 +39,7 @@ ahd_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x01, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTCODE_parse_table[] = {
        { "NO_SEQINT",          0x00, 0xff },
        { "BAD_PHASE",          0x01, 0xff },
        { "SEND_REJECT",        0x02, 0xff },
@@ -76,7 +76,7 @@ ahd_seqintcode_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x02, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRINT_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRINT_parse_table[] = {
        { "CLRSPLTINT",         0x01, 0x01 },
        { "CLRCMDINT",          0x02, 0x02 },
        { "CLRSEQINT",          0x04, 0x04 },
@@ -94,7 +94,7 @@ ahd_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x03, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t ERROR_parse_table[] = {
+static const ahd_reg_parse_entry_t ERROR_parse_table[] = {
        { "DSCTMOUT",           0x02, 0x02 },
        { "ILLOPCODE",          0x04, 0x04 },
        { "SQPARERR",           0x08, 0x08 },
@@ -111,24 +111,7 @@ ahd_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x04, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRERR_parse_table[] = {
-       { "CLRDSCTMOUT",        0x02, 0x02 },
-       { "CLRILLOPCODE",       0x04, 0x04 },
-       { "CLRSQPARERR",        0x08, 0x08 },
-       { "CLRDPARERR",         0x10, 0x10 },
-       { "CLRMPARERR",         0x20, 0x20 },
-       { "CLRCIOACCESFAIL",    0x40, 0x40 },
-       { "CLRCIOPARERR",       0x80, 0x80 }
-};
-
-int
-ahd_clrerr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CLRERR_parse_table, 7, "CLRERR",
-           0x04, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
+static const ahd_reg_parse_entry_t HCNTRL_parse_table[] = {
        { "CHIPRST",            0x01, 0x01 },
        { "CHIPRSTACK",         0x01, 0x01 },
        { "INTEN",              0x02, 0x02 },
@@ -160,7 +143,7 @@ ahd_hescb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x08, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+static const ahd_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
        { "ENINT_COALESCE",     0x40, 0x40 },
        { "HOST_TQINPOS",       0x80, 0x80 }
 };
@@ -172,7 +155,7 @@ ahd_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTSTAT_parse_table[] = {
        { "SEQ_SPLTINT",        0x01, 0x01 },
        { "SEQ_PCIINT",         0x02, 0x02 },
        { "SEQ_SCSIINT",        0x04, 0x04 },
@@ -187,7 +170,7 @@ ahd_seqintstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0c, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSEQINTSTAT_parse_table[] = {
        { "CLRSEQ_SPLTINT",     0x01, 0x01 },
        { "CLRSEQ_PCIINT",      0x02, 0x02 },
        { "CLRSEQ_SCSIINT",     0x04, 0x04 },
@@ -230,7 +213,7 @@ ahd_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x14, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+static const ahd_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
        { "SCB_QSIZE_4",        0x00, 0x0f },
        { "SCB_QSIZE_8",        0x01, 0x0f },
        { "SCB_QSIZE_16",       0x02, 0x0f },
@@ -258,7 +241,7 @@ ahd_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x16, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t INTCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t INTCTL_parse_table[] = {
        { "SPLTINTEN",          0x01, 0x01 },
        { "SEQINTEN",           0x02, 0x02 },
        { "SCSIINTEN",          0x04, 0x04 },
@@ -276,7 +259,7 @@ ahd_intctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x18, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
+static const ahd_reg_parse_entry_t DFCNTRL_parse_table[] = {
        { "DIRECTIONEN",        0x01, 0x01 },
        { "FIFOFLUSH",          0x02, 0x02 },
        { "FIFOFLUSHACK",       0x02, 0x02 },
@@ -297,7 +280,7 @@ ahd_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x19, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+static const ahd_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
        { "CIOPARCKEN",         0x01, 0x01 },
        { "DISABLE_TWATE",      0x02, 0x02 },
        { "EXTREQLCK",          0x10, 0x10 },
@@ -313,7 +296,7 @@ ahd_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x19, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
+static const ahd_reg_parse_entry_t DFSTATUS_parse_table[] = {
        { "FIFOEMP",            0x01, 0x01 },
        { "FIFOFULL",           0x02, 0x02 },
        { "DFTHRESH",           0x04, 0x04 },
@@ -330,7 +313,7 @@ ahd_dfstatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1a, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
        { "LAST_SEG_DONE",      0x01, 0x01 },
        { "LAST_SEG",           0x02, 0x02 },
        { "ODD_SEG",            0x04, 0x04 },
@@ -344,20 +327,7 @@ ahd_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t ARBCTL_parse_table[] = {
-       { "USE_TIME",           0x07, 0x07 },
-       { "RETRY_SWEN",         0x08, 0x08 },
-       { "RESET_HARB",         0x80, 0x80 }
-};
-
-int
-ahd_arbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(ARBCTL_parse_table, 3, "ARBCTL",
-           0x1b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
        { "LAST_SEG",           0x02, 0x02 },
        { "ODD_SEG",            0x04, 0x04 },
        { "SG_ADDR_MASK",       0xf8, 0xf8 }
@@ -377,20 +347,6 @@ ahd_lqin_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x20, regvalue, cur_col, wrap));
 }
 
-int
-ahd_typeptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "TYPEPTR",
-           0x20, regvalue, cur_col, wrap));
-}
-
-int
-ahd_tagptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "TAGPTR",
-           0x21, regvalue, cur_col, wrap));
-}
-
 int
 ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -398,20 +354,6 @@ ahd_lunptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x22, regvalue, cur_col, wrap));
 }
 
-int
-ahd_datalenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DATALENPTR",
-           0x23, regvalue, cur_col, wrap));
-}
-
-int
-ahd_statlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "STATLENPTR",
-           0x24, regvalue, cur_col, wrap));
-}
-
 int
 ahd_cmdlenptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -447,13 +389,6 @@ ahd_qnextptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x29, regvalue, cur_col, wrap));
 }
 
-int
-ahd_idptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "IDPTR",
-           0x2a, regvalue, cur_col, wrap));
-}
-
 int
 ahd_abrtbyteptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -468,28 +403,7 @@ ahd_abrtbitptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x2c, regvalue, cur_col, wrap));
 }
 
-int
-ahd_maxcmdbytes_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "MAXCMDBYTES",
-           0x2d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_maxcmd2rcv_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "MAXCMD2RCV",
-           0x2e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_shortthresh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SHORTTHRESH",
-           0x2f, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
+static const ahd_reg_parse_entry_t LUNLEN_parse_table[] = {
        { "ILUNLEN",            0x0f, 0x0f },
        { "TLUNLEN",            0xf0, 0xf0 }
 };
@@ -522,49 +436,7 @@ ahd_maxcmdcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x33, regvalue, cur_col, wrap));
 }
 
-int
-ahd_lqrsvd01_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "LQRSVD01",
-           0x34, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lqrsvd16_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "LQRSVD16",
-           0x35, regvalue, cur_col, wrap));
-}
-
-int
-ahd_lqrsvd17_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "LQRSVD17",
-           0x36, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmdrsvd0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "CMDRSVD0",
-           0x37, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL0_parse_table[] = {
-       { "LQ0INITGCLT",        0x03, 0x03 },
-       { "LQ0TARGCLT",         0x0c, 0x0c },
-       { "LQIINITGCLT",        0x30, 0x30 },
-       { "LQITARGCLT",         0xc0, 0xc0 }
-};
-
-int
-ahd_lqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(LQCTL0_parse_table, 4, "LQCTL0",
-           0x38, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQCTL1_parse_table[] = {
        { "ABORTPENDING",       0x01, 0x01 },
        { "SINGLECMD",          0x02, 0x02 },
        { "PCI2PCI",            0x04, 0x04 }
@@ -577,23 +449,7 @@ ahd_lqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x38, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSBIST0_parse_table[] = {
-       { "OSBISTRUN",          0x01, 0x01 },
-       { "OSBISTDONE",         0x02, 0x02 },
-       { "OSBISTERR",          0x04, 0x04 },
-       { "GSBISTRUN",          0x10, 0x10 },
-       { "GSBISTDONE",         0x20, 0x20 },
-       { "GSBISTERR",          0x40, 0x40 }
-};
-
-int
-ahd_scsbist0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SCSBIST0_parse_table, 6, "SCSBIST0",
-           0x39, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQCTL2_parse_table[] = {
        { "LQOPAUSE",           0x01, 0x01 },
        { "LQOTOIDLE",          0x02, 0x02 },
        { "LQOCONTINUE",        0x04, 0x04 },
@@ -611,20 +467,7 @@ ahd_lqctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x39, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSBIST1_parse_table[] = {
-       { "NTBISTRUN",          0x01, 0x01 },
-       { "NTBISTDONE",         0x02, 0x02 },
-       { "NTBISTERR",          0x04, 0x04 }
-};
-
-int
-ahd_scsbist1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SCSBIST1_parse_table, 3, "SCSBIST1",
-           0x3a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ0_parse_table[] = {
        { "SCSIRSTO",           0x01, 0x01 },
        { "FORCEBUSFREE",       0x10, 0x10 },
        { "ENARBO",             0x20, 0x20 },
@@ -639,7 +482,7 @@ ahd_scsiseq0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3a, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ1_parse_table[] = {
        { "ALTSTIM",            0x01, 0x01 },
        { "ENAUTOATNP",         0x02, 0x02 },
        { "MANUALP",            0x0c, 0x0c },
@@ -655,7 +498,7 @@ ahd_scsiseq1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+static const ahd_reg_parse_entry_t SXFRCTL0_parse_table[] = {
        { "SPIOEN",             0x08, 0x08 },
        { "BIOSCANCELEN",       0x10, 0x10 },
        { "DFPEXP",             0x40, 0x40 },
@@ -669,21 +512,7 @@ ahd_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3c, regvalue, cur_col, wrap));
 }
 
-int
-ahd_dlcount_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DLCOUNT",
-           0x3c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_businitid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "BUSINITID",
-           0x3c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+static const ahd_reg_parse_entry_t SXFRCTL1_parse_table[] = {
        { "STPWEN",             0x01, 0x01 },
        { "ACTNEGEN",           0x02, 0x02 },
        { "ENSTIMER",           0x04, 0x04 },
@@ -700,27 +529,7 @@ ahd_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3d, regvalue, cur_col, wrap));
 }
 
-int
-ahd_bustargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "BUSTARGID",
-           0x3e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SXFRCTL2_parse_table[] = {
-       { "ASU",                0x07, 0x07 },
-       { "CMDDMAEN",           0x08, 0x08 },
-       { "AUTORSTDIS",         0x10, 0x10 }
-};
-
-int
-ahd_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
-           0x3e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t DFFSTAT_parse_table[] = {
        { "CURRFIFO_0",         0x00, 0x03 },
        { "CURRFIFO_1",         0x01, 0x03 },
        { "CURRFIFO_NONE",      0x03, 0x03 },
@@ -736,7 +545,14 @@ ahd_dffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3f, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
+int
+ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+       return (ahd_print_register(NULL, 0, "MULTARGID",
+           0x40, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SCSISIGO_parse_table[] = {
        { "P_DATAOUT",          0x00, 0xe0 },
        { "P_DATAOUT_DT",       0x20, 0xe0 },
        { "P_DATAIN",           0x40, 0xe0 },
@@ -763,14 +579,7 @@ ahd_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x40, regvalue, cur_col, wrap));
 }
 
-int
-ahd_multargid_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "MULTARGID",
-           0x40, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISIGI_parse_table[] = {
        { "P_DATAOUT",          0x00, 0xe0 },
        { "P_DATAOUT_DT",       0x20, 0xe0 },
        { "P_DATAIN",           0x40, 0xe0 },
@@ -797,7 +606,7 @@ ahd_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x41, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSIPHASE_parse_table[] = {
        { "DATA_OUT_PHASE",     0x01, 0x03 },
        { "DATA_IN_PHASE",      0x02, 0x03 },
        { "DATA_PHASE_MASK",    0x03, 0x03 },
@@ -814,13 +623,6 @@ ahd_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x42, regvalue, cur_col, wrap));
 }
 
-int
-ahd_scsidat0_img_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SCSIDAT0_IMG",
-           0x43, regvalue, cur_col, wrap));
-}
-
 int
 ahd_scsidat_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -835,7 +637,7 @@ ahd_scsibus_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x46, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
+static const ahd_reg_parse_entry_t TARGIDIN_parse_table[] = {
        { "TARGID",             0x0f, 0x0f },
        { "CLKOUT",             0x80, 0x80 }
 };
@@ -847,7 +649,7 @@ ahd_targidin_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x48, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SELID_parse_table[] = {
+static const ahd_reg_parse_entry_t SELID_parse_table[] = {
        { "ONEBIT",             0x08, 0x08 },
        { "SELID_MASK",         0xf0, 0xf0 }
 };
@@ -859,7 +661,7 @@ ahd_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x49, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+static const ahd_reg_parse_entry_t OPTIONMODE_parse_table[] = {
        { "AUTO_MSGOUT_DE",     0x02, 0x02 },
        { "ENDGFORMCHK",        0x04, 0x04 },
        { "BUSFREEREV",         0x10, 0x10 },
@@ -876,7 +678,7 @@ ahd_optionmode_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4a, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SBLKCTL_parse_table[] = {
        { "SELWIDE",            0x02, 0x02 },
        { "ENAB20",             0x04, 0x04 },
        { "ENAB40",             0x08, 0x08 },
@@ -891,24 +693,7 @@ ahd_sblkctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4a, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
-       { "CLRARBDO",           0x01, 0x01 },
-       { "CLRSPIORDY",         0x02, 0x02 },
-       { "CLROVERRUN",         0x04, 0x04 },
-       { "CLRIOERR",           0x08, 0x08 },
-       { "CLRSELINGO",         0x10, 0x10 },
-       { "CLRSELDI",           0x20, 0x20 },
-       { "CLRSELDO",           0x40, 0x40 }
-};
-
-int
-ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
-           0x4b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT0_parse_table[] = {
        { "ARBDO",              0x01, 0x01 },
        { "SPIORDY",            0x02, 0x02 },
        { "OVERRUN",            0x04, 0x04 },
@@ -926,7 +711,7 @@ ahd_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE0_parse_table[] = {
        { "ENARBDO",            0x01, 0x01 },
        { "ENSPIORDY",          0x02, 0x02 },
        { "ENOVERRUN",          0x04, 0x04 },
@@ -943,24 +728,24 @@ ahd_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
-       { "CLRREQINIT",         0x01, 0x01 },
-       { "CLRSTRB2FAST",       0x02, 0x02 },
-       { "CLRSCSIPERR",        0x04, 0x04 },
-       { "CLRBUSFREE",         0x08, 0x08 },
-       { "CLRSCSIRSTI",        0x20, 0x20 },
-       { "CLRATNO",            0x40, 0x40 },
-       { "CLRSELTIMEO",        0x80, 0x80 }
+static const ahd_reg_parse_entry_t CLRSINT0_parse_table[] = {
+       { "CLRARBDO",           0x01, 0x01 },
+       { "CLRSPIORDY",         0x02, 0x02 },
+       { "CLROVERRUN",         0x04, 0x04 },
+       { "CLRIOERR",           0x08, 0x08 },
+       { "CLRSELINGO",         0x10, 0x10 },
+       { "CLRSELDI",           0x20, 0x20 },
+       { "CLRSELDO",           0x40, 0x40 }
 };
 
 int
-ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
-           0x4c, regvalue, cur_col, wrap));
+       return (ahd_print_register(CLRSINT0_parse_table, 7, "CLRSINT0",
+           0x4b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT1_parse_table[] = {
        { "REQINIT",            0x01, 0x01 },
        { "STRB2FAST",          0x02, 0x02 },
        { "SCSIPERR",           0x04, 0x04 },
@@ -978,7 +763,24 @@ ahd_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4c, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT1_parse_table[] = {
+       { "CLRREQINIT",         0x01, 0x01 },
+       { "CLRSTRB2FAST",       0x02, 0x02 },
+       { "CLRSCSIPERR",        0x04, 0x04 },
+       { "CLRBUSFREE",         0x08, 0x08 },
+       { "CLRSCSIRSTI",        0x20, 0x20 },
+       { "CLRATNO",            0x40, 0x40 },
+       { "CLRSELTIMEO",        0x80, 0x80 }
+};
+
+int
+ahd_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+       return (ahd_print_register(CLRSINT1_parse_table, 7, "CLRSINT1",
+           0x4c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t SSTAT2_parse_table[] = {
        { "BUSFREE_LQO",        0x40, 0xc0 },
        { "BUSFREE_DFF0",       0x80, 0xc0 },
        { "BUSFREE_DFF1",       0xc0, 0xc0 },
@@ -998,20 +800,7 @@ ahd_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4d, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SIMODE2_parse_table[] = {
-       { "ENDMADONE",          0x01, 0x01 },
-       { "ENSDONE",            0x02, 0x02 },
-       { "ENWIDE_RES",         0x04, 0x04 }
-};
-
-int
-ahd_simode2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SIMODE2_parse_table, 3, "SIMODE2",
-           0x4d, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT2_parse_table[] = {
        { "CLRDMADONE",         0x01, 0x01 },
        { "CLRSDONE",           0x02, 0x02 },
        { "CLRWIDE_RES",        0x04, 0x04 },
@@ -1025,7 +814,7 @@ ahd_clrsint2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4d, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
+static const ahd_reg_parse_entry_t PERRDIAG_parse_table[] = {
        { "DTERR",              0x01, 0x01 },
        { "DGFORMERR",          0x02, 0x02 },
        { "CRCERR",             0x04, 0x04 },
@@ -1064,7 +853,7 @@ ahd_lqostate_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x4f, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT0_parse_table[] = {
        { "LQIATNCMD",          0x01, 0x01 },
        { "LQIATNLQ",           0x02, 0x02 },
        { "LQIBADLQT",          0x04, 0x04 },
@@ -1080,23 +869,7 @@ ahd_lqistat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x50, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
-       { "CLRLQIATNCMD",       0x01, 0x01 },
-       { "CLRLQIATNLQ",        0x02, 0x02 },
-       { "CLRLQIBADLQT",       0x04, 0x04 },
-       { "CLRLQICRCT2",        0x08, 0x08 },
-       { "CLRLQICRCT1",        0x10, 0x10 },
-       { "CLRLQIATNQAS",       0x20, 0x20 }
-};
-
-int
-ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
-           0x50, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQIMODE0_parse_table[] = {
        { "ENLQIATNCMD",        0x01, 0x01 },
        { "ENLQIATNLQ",         0x02, 0x02 },
        { "ENLQIBADLQT",        0x04, 0x04 },
@@ -1112,7 +885,23 @@ ahd_lqimode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x50, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQIINT0_parse_table[] = {
+       { "CLRLQIATNCMD",       0x01, 0x01 },
+       { "CLRLQIATNLQ",        0x02, 0x02 },
+       { "CLRLQIBADLQT",       0x04, 0x04 },
+       { "CLRLQICRCT2",        0x08, 0x08 },
+       { "CLRLQICRCT1",        0x10, 0x10 },
+       { "CLRLQIATNQAS",       0x20, 0x20 }
+};
+
+int
+ahd_clrlqiint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+       return (ahd_print_register(CLRLQIINT0_parse_table, 6, "CLRLQIINT0",
+           0x50, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t LQIMODE1_parse_table[] = {
        { "ENLQIOVERI_NLQ",     0x01, 0x01 },
        { "ENLQIOVERI_LQ",      0x02, 0x02 },
        { "ENLQIBADLQI",        0x04, 0x04 },
@@ -1130,7 +919,7 @@ ahd_lqimode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x51, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT1_parse_table[] = {
        { "LQIOVERI_NLQ",       0x01, 0x01 },
        { "LQIOVERI_LQ",        0x02, 0x02 },
        { "LQIBADLQI",          0x04, 0x04 },
@@ -1148,7 +937,7 @@ ahd_lqistat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x51, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQIINT1_parse_table[] = {
        { "CLRLQIOVERI_NLQ",    0x01, 0x01 },
        { "CLRLQIOVERI_LQ",     0x02, 0x02 },
        { "CLRLQIBADLQI",       0x04, 0x04 },
@@ -1166,7 +955,7 @@ ahd_clrlqiint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x51, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQISTAT2_parse_table[] = {
        { "LQIGSAVAIL",         0x01, 0x01 },
        { "LQISTOPCMD",         0x02, 0x02 },
        { "LQISTOPLQ",          0x04, 0x04 },
@@ -1184,7 +973,7 @@ ahd_lqistat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x52, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
+static const ahd_reg_parse_entry_t SSTAT3_parse_table[] = {
        { "OSRAMPERR",          0x01, 0x01 },
        { "NTRAMPERR",          0x02, 0x02 }
 };
@@ -1196,7 +985,7 @@ ahd_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x53, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE3_parse_table[] = {
        { "ENOSRAMPERR",        0x01, 0x01 },
        { "ENNTRAMPERR",        0x02, 0x02 }
 };
@@ -1208,7 +997,7 @@ ahd_simode3_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x53, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSINT3_parse_table[] = {
        { "CLROSRAMPERR",       0x01, 0x01 },
        { "CLRNTRAMPERR",       0x02, 0x02 }
 };
@@ -1220,7 +1009,7 @@ ahd_clrsint3_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x53, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT0_parse_table[] = {
        { "LQOTCRC",            0x01, 0x01 },
        { "LQOATNPKT",          0x02, 0x02 },
        { "LQOATNLQ",           0x04, 0x04 },
@@ -1235,7 +1024,7 @@ ahd_lqostat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x54, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQOINT0_parse_table[] = {
        { "CLRLQOTCRC",         0x01, 0x01 },
        { "CLRLQOATNPKT",       0x02, 0x02 },
        { "CLRLQOATNLQ",        0x04, 0x04 },
@@ -1250,7 +1039,7 @@ ahd_clrlqoint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x54, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOMODE0_parse_table[] = {
        { "ENLQOTCRC",          0x01, 0x01 },
        { "ENLQOATNPKT",        0x02, 0x02 },
        { "ENLQOATNLQ",         0x04, 0x04 },
@@ -1265,7 +1054,7 @@ ahd_lqomode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x54, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOMODE1_parse_table[] = {
        { "ENLQOPHACHGINPKT",   0x01, 0x01 },
        { "ENLQOBUSFREE",       0x02, 0x02 },
        { "ENLQOBADQAS",        0x04, 0x04 },
@@ -1280,7 +1069,7 @@ ahd_lqomode1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x55, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT1_parse_table[] = {
        { "LQOPHACHGINPKT",     0x01, 0x01 },
        { "LQOBUSFREE",         0x02, 0x02 },
        { "LQOBADQAS",          0x04, 0x04 },
@@ -1295,7 +1084,7 @@ ahd_lqostat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x55, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRLQOINT1_parse_table[] = {
        { "CLRLQOPHACHGINPKT",  0x01, 0x01 },
        { "CLRLQOBUSFREE",      0x02, 0x02 },
        { "CLRLQOBADQAS",       0x04, 0x04 },
@@ -1310,7 +1099,7 @@ ahd_clrlqoint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x55, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSTAT2_parse_table[] = {
        { "LQOSTOP0",           0x01, 0x01 },
        { "LQOPHACHGOUTPKT",    0x02, 0x02 },
        { "LQOWAITFIFO",        0x10, 0x10 },
@@ -1331,7 +1120,7 @@ ahd_os_space_cnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x56, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
+static const ahd_reg_parse_entry_t SIMODE1_parse_table[] = {
        { "ENREQINIT",          0x01, 0x01 },
        { "ENSTRB2FAST",        0x02, 0x02 },
        { "ENSCSIPERR",         0x04, 0x04 },
@@ -1356,7 +1145,7 @@ ahd_gsfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x58, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t DFFSXFRCTL_parse_table[] = {
        { "RSTCHN",             0x01, 0x01 },
        { "CLRCHN",             0x02, 0x02 },
        { "CLRSHCNT",           0x04, 0x04 },
@@ -1370,15 +1159,17 @@ ahd_dffsxfrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5a, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t LQOSCSCTL_parse_table[] = {
        { "LQONOCHKOVER",       0x01, 0x01 },
+       { "LQONOHOLDLACK",      0x02, 0x02 },
+       { "LQOBUSETDLY",        0x40, 0x40 },
        { "LQOH2A_VERSION",     0x80, 0x80 }
 };
 
 int
 ahd_lqoscsctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(LQOSCSCTL_parse_table, 2, "LQOSCSCTL",
+       return (ahd_print_register(LQOSCSCTL_parse_table, 4, "LQOSCSCTL",
            0x5a, regvalue, cur_col, wrap));
 }
 
@@ -1389,7 +1180,7 @@ ahd_nextscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5a, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
+static const ahd_reg_parse_entry_t CLRSEQINTSRC_parse_table[] = {
        { "CLRCFG4TCMD",        0x01, 0x01 },
        { "CLRCFG4ICMD",        0x02, 0x02 },
        { "CLRCFG4TSTAT",       0x04, 0x04 },
@@ -1406,7 +1197,7 @@ ahd_clrseqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTSRC_parse_table[] = {
        { "CFG4TCMD",           0x01, 0x01 },
        { "CFG4ICMD",           0x02, 0x02 },
        { "CFG4TSTAT",          0x04, 0x04 },
@@ -1423,14 +1214,7 @@ ahd_seqintsrc_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5b, regvalue, cur_col, wrap));
 }
 
-int
-ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "CURRSCB",
-           0x5c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQIMODE_parse_table[] = {
        { "ENCFG4TCMD",         0x01, 0x01 },
        { "ENCFG4ICMD",         0x02, 0x02 },
        { "ENCFG4TSTAT",        0x04, 0x04 },
@@ -1447,7 +1231,14 @@ ahd_seqimode_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5c, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
+int
+ahd_currscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
+{
+       return (ahd_print_register(NULL, 0, "CURRSCB",
+           0x5c, regvalue, cur_col, wrap));
+}
+
+static const ahd_reg_parse_entry_t MDFFSTAT_parse_table[] = {
        { "FIFOFREE",           0x01, 0x01 },
        { "DATAINFIFO",         0x02, 0x02 },
        { "DLZERO",             0x04, 0x04 },
@@ -1464,24 +1255,6 @@ ahd_mdffstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5d, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CRCCONTROL_parse_table[] = {
-       { "CRCVALCHKEN",        0x40, 0x40 }
-};
-
-int
-ahd_crccontrol_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CRCCONTROL_parse_table, 1, "CRCCONTROL",
-           0x5d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfftag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DFFTAG",
-           0x5e, regvalue, cur_col, wrap));
-}
-
 int
 ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1489,31 +1262,6 @@ ahd_lastscb_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5e, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSITEST_parse_table[] = {
-       { "SEL_TXPLL_DEBUG",    0x04, 0x04 },
-       { "CNTRTEST",           0x08, 0x08 }
-};
-
-int
-ahd_scsitest_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SCSITEST_parse_table, 2, "SCSITEST",
-           0x5e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t IOPDNCTL_parse_table[] = {
-       { "PDN_DIFFSENSE",      0x01, 0x01 },
-       { "PDN_IDIST",          0x04, 0x04 },
-       { "DISABLE_OE",         0x80, 0x80 }
-};
-
-int
-ahd_iopdnctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(IOPDNCTL_parse_table, 3, "IOPDNCTL",
-           0x5f, regvalue, cur_col, wrap));
-}
-
 int
 ahd_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1528,13 +1276,6 @@ ahd_negoaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x60, regvalue, cur_col, wrap));
 }
 
-int
-ahd_dgrpcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DGRPCRCI",
-           0x60, regvalue, cur_col, wrap));
-}
-
 int
 ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1543,20 +1284,13 @@ ahd_negperiod_print(u_int regvalue, u_int *cur_col, u_int wrap)
 }
 
 int
-ahd_packcrci_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "PACKCRCI",
-           0x62, regvalue, cur_col, wrap));
-}
-
-int
-ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_negoffset_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
        return (ahd_print_register(NULL, 0, "NEGOFFSET",
            0x62, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
+static const ahd_reg_parse_entry_t NEGPPROPTS_parse_table[] = {
        { "PPROPT_IUT",         0x01, 0x01 },
        { "PPROPT_DT",          0x02, 0x02 },
        { "PPROPT_QAS",         0x04, 0x04 },
@@ -1570,7 +1304,7 @@ ahd_negppropts_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x63, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
+static const ahd_reg_parse_entry_t NEGCONOPTS_parse_table[] = {
        { "WIDEXFER",           0x01, 0x01 },
        { "ENAUTOATNO",         0x02, 0x02 },
        { "ENAUTOATNI",         0x04, 0x04 },
@@ -1601,20 +1335,21 @@ ahd_annexdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x66, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSCHKN_parse_table[] = {
        { "LSTSGCLRDIS",        0x01, 0x01 },
        { "SHVALIDSTDIS",       0x02, 0x02 },
        { "DFFACTCLR",          0x04, 0x04 },
        { "SDONEMSKDIS",        0x08, 0x08 },
        { "WIDERESEN",          0x10, 0x10 },
        { "CURRFIFODEF",        0x20, 0x20 },
-       { "STSELSKIDDIS",       0x40, 0x40 }
+       { "STSELSKIDDIS",       0x40, 0x40 },
+       { "BIDICHKDIS",         0x80, 0x80 }
 };
 
 int
 ahd_scschkn_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(SCSCHKN_parse_table, 7, "SCSCHKN",
+       return (ahd_print_register(SCSCHKN_parse_table, 8, "SCSCHKN",
            0x66, regvalue, cur_col, wrap));
 }
 
@@ -1625,23 +1360,6 @@ ahd_iownid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x67, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t PLL960CTL0_parse_table[] = {
-       { "PLL_ENFBM",          0x01, 0x01 },
-       { "PLL_DLPF",           0x02, 0x02 },
-       { "PLL_ENLPF",          0x04, 0x04 },
-       { "PLL_ENLUD",          0x08, 0x08 },
-       { "PLL_NS",             0x30, 0x30 },
-       { "PLL_PWDN",           0x40, 0x40 },
-       { "PLL_VCOSEL",         0x80, 0x80 }
-};
-
-int
-ahd_pll960ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(PLL960CTL0_parse_table, 7, "PLL960CTL0",
-           0x68, regvalue, cur_col, wrap));
-}
-
 int
 ahd_shcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1656,33 +1374,6 @@ ahd_townid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x69, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t PLL960CTL1_parse_table[] = {
-       { "PLL_RST",            0x01, 0x01 },
-       { "PLL_CNTCLR",         0x40, 0x40 },
-       { "PLL_CNTEN",          0x80, 0x80 }
-};
-
-int
-ahd_pll960ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(PLL960CTL1_parse_table, 3, "PLL960CTL1",
-           0x69, regvalue, cur_col, wrap));
-}
-
-int
-ahd_pll960cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "PLL960CNT0",
-           0x6a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_xsig_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "XSIG",
-           0x6a, regvalue, cur_col, wrap));
-}
-
 int
 ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1690,57 +1381,6 @@ ahd_seloid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x6b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t PLL400CTL0_parse_table[] = {
-       { "PLL_ENFBM",          0x01, 0x01 },
-       { "PLL_DLPF",           0x02, 0x02 },
-       { "PLL_ENLPF",          0x04, 0x04 },
-       { "PLL_ENLUD",          0x08, 0x08 },
-       { "PLL_NS",             0x30, 0x30 },
-       { "PLL_PWDN",           0x40, 0x40 },
-       { "PLL_VCOSEL",         0x80, 0x80 }
-};
-
-int
-ahd_pll400ctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(PLL400CTL0_parse_table, 7, "PLL400CTL0",
-           0x6c, regvalue, cur_col, wrap));
-}
-
-int
-ahd_fairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "FAIRNESS",
-           0x6c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t PLL400CTL1_parse_table[] = {
-       { "PLL_RST",            0x01, 0x01 },
-       { "PLL_CNTCLR",         0x40, 0x40 },
-       { "PLL_CNTEN",          0x80, 0x80 }
-};
-
-int
-ahd_pll400ctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(PLL400CTL1_parse_table, 3, "PLL400CTL1",
-           0x6d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_unfairness_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "UNFAIRNESS",
-           0x6e, regvalue, cur_col, wrap));
-}
-
-int
-ahd_pll400cnt0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "PLL400CNT0",
-           0x6e, regvalue, cur_col, wrap));
-}
-
 int
 ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1748,31 +1388,6 @@ ahd_haddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x70, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t PLLDELAY_parse_table[] = {
-       { "SPLIT_DROP_REQ",     0x80, 0x80 }
-};
-
-int
-ahd_plldelay_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(PLLDELAY_parse_table, 1, "PLLDELAY",
-           0x70, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hodmaadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "HODMAADR",
-           0x70, regvalue, cur_col, wrap));
-}
-
-int
-ahd_hodmacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "HODMACNT",
-           0x78, regvalue, cur_col, wrap));
-}
-
 int
 ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1781,10 +1396,10 @@ ahd_hcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
 }
 
 int
-ahd_hodmaen_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(NULL, 0, "HODMAEN",
-           0x7a, regvalue, cur_col, wrap));
+       return (ahd_print_register(NULL, 0, "SGHADDR",
+           0x7c, regvalue, cur_col, wrap));
 }
 
 int
@@ -1795,10 +1410,10 @@ ahd_scbhaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 }
 
 int
-ahd_sghaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(NULL, 0, "SGHADDR",
-           0x7c, regvalue, cur_col, wrap));
+       return (ahd_print_register(NULL, 0, "SGHCNT",
+           0x84, regvalue, cur_col, wrap));
 }
 
 int
@@ -1808,14 +1423,7 @@ ahd_scbhcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x84, regvalue, cur_col, wrap));
 }
 
-int
-ahd_sghcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SGHCNT",
-           0x84, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+static const ahd_reg_parse_entry_t DFF_THRSH_parse_table[] = {
        { "WR_DFTHRSH_MIN",     0x00, 0x70 },
        { "RD_DFTHRSH_MIN",     0x00, 0x07 },
        { "RD_DFTHRSH_25",      0x01, 0x07 },
@@ -1843,209 +1451,7 @@ ahd_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x88, regvalue, cur_col, wrap));
 }
 
-int
-ahd_romaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "ROMADDR",
-           0x8a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t ROMCNTRL_parse_table[] = {
-       { "RDY",                0x01, 0x01 },
-       { "REPEAT",             0x02, 0x02 },
-       { "ROMSPD",             0x18, 0x18 },
-       { "ROMOP",              0xe0, 0xe0 }
-};
-
-int
-ahd_romcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(ROMCNTRL_parse_table, 4, "ROMCNTRL",
-           0x8d, regvalue, cur_col, wrap));
-}
-
-int
-ahd_romdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "ROMDATA",
-           0x8e, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG0_parse_table[] = {
-       { "CFNUM",              0x07, 0x07 },
-       { "CDNUM",              0xf8, 0xf8 }
-};
-
-int
-ahd_cmcrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMCRXMSG0_parse_table, 2, "CMCRXMSG0",
-           0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t ROENABLE_parse_table[] = {
-       { "DCH0ROEN",           0x01, 0x01 },
-       { "DCH1ROEN",           0x02, 0x02 },
-       { "SGROEN",             0x04, 0x04 },
-       { "CMCROEN",            0x08, 0x08 },
-       { "OVLYROEN",           0x10, 0x10 },
-       { "MSIROEN",            0x20, 0x20 }
-};
-
-int
-ahd_roenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(ROENABLE_parse_table, 6, "ROENABLE",
-           0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG0_parse_table[] = {
-       { "CFNUM",              0x07, 0x07 },
-       { "CDNUM",              0xf8, 0xf8 }
-};
-
-int
-ahd_ovlyrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(OVLYRXMSG0_parse_table, 2, "OVLYRXMSG0",
-           0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG0_parse_table[] = {
-       { "CFNUM",              0x07, 0x07 },
-       { "CDNUM",              0xf8, 0xf8 }
-};
-
-int
-ahd_dchrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DCHRXMSG0_parse_table, 2, "DCHRXMSG0",
-           0x90, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG1_parse_table[] = {
-       { "CBNUM",              0xff, 0xff }
-};
-
-int
-ahd_ovlyrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(OVLYRXMSG1_parse_table, 1, "OVLYRXMSG1",
-           0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t NSENABLE_parse_table[] = {
-       { "DCH0NSEN",           0x01, 0x01 },
-       { "DCH1NSEN",           0x02, 0x02 },
-       { "SGNSEN",             0x04, 0x04 },
-       { "CMCNSEN",            0x08, 0x08 },
-       { "OVLYNSEN",           0x10, 0x10 },
-       { "MSINSEN",            0x20, 0x20 }
-};
-
-int
-ahd_nsenable_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NSENABLE_parse_table, 6, "NSENABLE",
-           0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG1_parse_table[] = {
-       { "CBNUM",              0xff, 0xff }
-};
-
-int
-ahd_cmcrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMCRXMSG1_parse_table, 1, "CMCRXMSG1",
-           0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG1_parse_table[] = {
-       { "CBNUM",              0xff, 0xff }
-};
-
-int
-ahd_dchrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DCHRXMSG1_parse_table, 1, "DCHRXMSG1",
-           0x91, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG2_parse_table[] = {
-       { "MINDEX",             0xff, 0xff }
-};
-
-int
-ahd_dchrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DCHRXMSG2_parse_table, 1, "DCHRXMSG2",
-           0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG2_parse_table[] = {
-       { "MINDEX",             0xff, 0xff }
-};
-
-int
-ahd_cmcrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMCRXMSG2_parse_table, 1, "CMCRXMSG2",
-           0x92, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ost_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "OST",
-           0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG2_parse_table[] = {
-       { "MINDEX",             0xff, 0xff }
-};
-
-int
-ahd_ovlyrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(OVLYRXMSG2_parse_table, 1, "OVLYRXMSG2",
-           0x92, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHRXMSG3_parse_table[] = {
-       { "MCLASS",             0x0f, 0x0f }
-};
-
-int
-ahd_dchrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DCHRXMSG3_parse_table, 1, "DCHRXMSG3",
-           0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYRXMSG3_parse_table[] = {
-       { "MCLASS",             0x0f, 0x0f }
-};
-
-int
-ahd_ovlyrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(OVLYRXMSG3_parse_table, 1, "OVLYRXMSG3",
-           0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCRXMSG3_parse_table[] = {
-       { "MCLASS",             0x0f, 0x0f }
-};
-
-int
-ahd_cmcrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMCRXMSG3_parse_table, 1, "CMCRXMSG3",
-           0x93, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t PCIXCTL_parse_table[] = {
        { "CMPABCDIS",          0x01, 0x01 },
        { "TSCSERREN",          0x02, 0x02 },
        { "SRSPDPEEN",          0x04, 0x04 },
@@ -2062,46 +1468,7 @@ ahd_pcixctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x93, regvalue, cur_col, wrap));
 }
 
-int
-ahd_ovlyseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "OVLYSEQBCNT",
-           0x94, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dchseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DCHSEQBCNT",
-           0x94, regvalue, cur_col, wrap));
-}
-
-int
-ahd_cmcseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "CMCSEQBCNT",
-           0x94, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCSPLTSTAT0_parse_table[] = {
-       { "RXSPLTRSP",          0x01, 0x01 },
-       { "RXSCEMSG",           0x02, 0x02 },
-       { "RXOVRUN",            0x04, 0x04 },
-       { "CNTNOTCMPLT",        0x08, 0x08 },
-       { "SCDATBUCKET",        0x10, 0x10 },
-       { "SCADERR",            0x20, 0x20 },
-       { "SCBCERR",            0x40, 0x40 },
-       { "STAETERM",           0x80, 0x80 }
-};
-
-int
-ahd_cmcspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMCSPLTSTAT0_parse_table, 8, "CMCSPLTSTAT0",
-           0x96, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t DCHSPLTSTAT0_parse_table[] = {
        { "RXSPLTRSP",          0x01, 0x01 },
        { "RXSCEMSG",           0x02, 0x02 },
        { "RXOVRUN",            0x04, 0x04 },
@@ -2119,190 +1486,18 @@ ahd_dchspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x96, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t OVLYSPLTSTAT0_parse_table[] = {
-       { "RXSPLTRSP",          0x01, 0x01 },
-       { "RXSCEMSG",           0x02, 0x02 },
-       { "RXOVRUN",            0x04, 0x04 },
-       { "CNTNOTCMPLT",        0x08, 0x08 },
-       { "SCDATBUCKET",        0x10, 0x10 },
-       { "SCADERR",            0x20, 0x20 },
-       { "SCBCERR",            0x40, 0x40 },
-       { "STAETERM",           0x80, 0x80 }
-};
-
-int
-ahd_ovlyspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(OVLYSPLTSTAT0_parse_table, 8, "OVLYSPLTSTAT0",
-           0x96, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCSPLTSTAT1_parse_table[] = {
-       { "RXDATABUCKET",       0x01, 0x01 }
-};
-
-int
-ahd_cmcspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMCSPLTSTAT1_parse_table, 1, "CMCSPLTSTAT1",
-           0x97, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYSPLTSTAT1_parse_table[] = {
-       { "RXDATABUCKET",       0x01, 0x01 }
-};
-
-int
-ahd_ovlyspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(OVLYSPLTSTAT1_parse_table, 1, "OVLYSPLTSTAT1",
-           0x97, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t DCHSPLTSTAT1_parse_table[] = {
        { "RXDATABUCKET",       0x01, 0x01 }
 };
 
 int
 ahd_dchspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(DCHSPLTSTAT1_parse_table, 1, "DCHSPLTSTAT1",
-           0x97, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG0_parse_table[] = {
-       { "CFNUM",              0x07, 0x07 },
-       { "CDNUM",              0xf8, 0xf8 }
-};
-
-int
-ahd_sgrxmsg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SGRXMSG0_parse_table, 2, "SGRXMSG0",
-           0x98, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR0_parse_table[] = {
-       { "LOWER_ADDR",         0x7f, 0x7f }
-};
-
-int
-ahd_slvspltoutadr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SLVSPLTOUTADR0_parse_table, 1, "SLVSPLTOUTADR0",
-           0x98, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG1_parse_table[] = {
-       { "CBNUM",              0xff, 0xff }
-};
-
-int
-ahd_sgrxmsg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SGRXMSG1_parse_table, 1, "SGRXMSG1",
-           0x99, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR1_parse_table[] = {
-       { "REQ_FNUM",           0x07, 0x07 },
-       { "REQ_DNUM",           0xf8, 0xf8 }
-};
-
-int
-ahd_slvspltoutadr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SLVSPLTOUTADR1_parse_table, 2, "SLVSPLTOUTADR1",
-           0x99, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG2_parse_table[] = {
-       { "MINDEX",             0xff, 0xff }
-};
-
-int
-ahd_sgrxmsg2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SGRXMSG2_parse_table, 1, "SGRXMSG2",
-           0x9a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR2_parse_table[] = {
-       { "REQ_BNUM",           0xff, 0xff }
-};
-
-int
-ahd_slvspltoutadr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SLVSPLTOUTADR2_parse_table, 1, "SLVSPLTOUTADR2",
-           0x9a, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGRXMSG3_parse_table[] = {
-       { "MCLASS",             0x0f, 0x0f }
-};
-
-int
-ahd_sgrxmsg3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SGRXMSG3_parse_table, 1, "SGRXMSG3",
-           0x9b, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTADR3_parse_table[] = {
-       { "RLXORD",             0x10, 0x10 },
-       { "TAG_NUM",            0x1f, 0x1f }
-};
-
-int
-ahd_slvspltoutadr3_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SLVSPLTOUTADR3_parse_table, 2, "SLVSPLTOUTADR3",
-           0x9b, regvalue, cur_col, wrap));
-}
-
-int
-ahd_sgseqbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SGSEQBCNT",
-           0x9c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR0_parse_table[] = {
-       { "LOWER_BCNT",         0xff, 0xff }
-};
-
-int
-ahd_slvspltoutattr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SLVSPLTOUTATTR0_parse_table, 1, "SLVSPLTOUTATTR0",
-           0x9c, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR1_parse_table[] = {
-       { "CMPLT_FNUM",         0x07, 0x07 },
-       { "CMPLT_DNUM",         0xf8, 0xf8 }
-};
-
-int
-ahd_slvspltoutattr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SLVSPLTOUTATTR1_parse_table, 2, "SLVSPLTOUTATTR1",
-           0x9d, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SLVSPLTOUTATTR2_parse_table[] = {
-       { "CMPLT_BNUM",         0xff, 0xff }
-};
-
-int
-ahd_slvspltoutattr2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SLVSPLTOUTATTR2_parse_table, 1, "SLVSPLTOUTATTR2",
-           0x9e, regvalue, cur_col, wrap));
+       return (ahd_print_register(DCHSPLTSTAT1_parse_table, 1, "DCHSPLTSTAT1",
+           0x97, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
+static const ahd_reg_parse_entry_t SGSPLTSTAT0_parse_table[] = {
        { "RXSPLTRSP",          0x01, 0x01 },
        { "RXSCEMSG",           0x02, 0x02 },
        { "RXOVRUN",            0x04, 0x04 },
@@ -2320,7 +1515,7 @@ ahd_sgspltstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x9e, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
+static const ahd_reg_parse_entry_t SGSPLTSTAT1_parse_table[] = {
        { "RXDATABUCKET",       0x01, 0x01 }
 };
 
@@ -2331,19 +1526,7 @@ ahd_sgspltstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x9f, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SFUNCT_parse_table[] = {
-       { "TEST_NUM",           0x0f, 0x0f },
-       { "TEST_GROUP",         0xf0, 0xf0 }
-};
-
-int
-ahd_sfunct_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SFUNCT_parse_table, 2, "SFUNCT",
-           0x9f, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t DF0PCISTAT_parse_table[] = {
        { "DPR",                0x01, 0x01 },
        { "TWATERR",            0x02, 0x02 },
        { "RDPERR",             0x04, 0x04 },
@@ -2368,83 +1551,6 @@ ahd_reg0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xa0, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t DF1PCISTAT_parse_table[] = {
-       { "DPR",                0x01, 0x01 },
-       { "TWATERR",            0x02, 0x02 },
-       { "RDPERR",             0x04, 0x04 },
-       { "SCAAPERR",           0x08, 0x08 },
-       { "RTA",                0x10, 0x10 },
-       { "RMA",                0x20, 0x20 },
-       { "SSE",                0x40, 0x40 },
-       { "DPE",                0x80, 0x80 }
-};
-
-int
-ahd_df1pcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DF1PCISTAT_parse_table, 8, "DF1PCISTAT",
-           0xa1, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SGPCISTAT_parse_table[] = {
-       { "DPR",                0x01, 0x01 },
-       { "RDPERR",             0x04, 0x04 },
-       { "SCAAPERR",           0x08, 0x08 },
-       { "RTA",                0x10, 0x10 },
-       { "RMA",                0x20, 0x20 },
-       { "SSE",                0x40, 0x40 },
-       { "DPE",                0x80, 0x80 }
-};
-
-int
-ahd_sgpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SGPCISTAT_parse_table, 7, "SGPCISTAT",
-           0xa2, regvalue, cur_col, wrap));
-}
-
-int
-ahd_reg1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "REG1",
-           0xa2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CMCPCISTAT_parse_table[] = {
-       { "DPR",                0x01, 0x01 },
-       { "TWATERR",            0x02, 0x02 },
-       { "RDPERR",             0x04, 0x04 },
-       { "SCAAPERR",           0x08, 0x08 },
-       { "RTA",                0x10, 0x10 },
-       { "RMA",                0x20, 0x20 },
-       { "SSE",                0x40, 0x40 },
-       { "DPE",                0x80, 0x80 }
-};
-
-int
-ahd_cmcpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMCPCISTAT_parse_table, 8, "CMCPCISTAT",
-           0xa3, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t OVLYPCISTAT_parse_table[] = {
-       { "DPR",                0x01, 0x01 },
-       { "RDPERR",             0x04, 0x04 },
-       { "SCAAPERR",           0x08, 0x08 },
-       { "RTA",                0x10, 0x10 },
-       { "RMA",                0x20, 0x20 },
-       { "SSE",                0x40, 0x40 },
-       { "DPE",                0x80, 0x80 }
-};
-
-int
-ahd_ovlypcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(OVLYPCISTAT_parse_table, 7, "OVLYPCISTAT",
-           0xa4, regvalue, cur_col, wrap));
-}
-
 int
 ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -2452,7 +1558,7 @@ ahd_reg_isr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xa4, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
+static const ahd_reg_parse_entry_t SG_STATE_parse_table[] = {
        { "SEGS_AVAIL",         0x01, 0x01 },
        { "LOADING_NEEDED",     0x02, 0x02 },
        { "FETCH_INPROG",       0x04, 0x04 }
@@ -2465,23 +1571,7 @@ ahd_sg_state_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xa6, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t MSIPCISTAT_parse_table[] = {
-       { "DPR",                0x01, 0x01 },
-       { "TWATERR",            0x02, 0x02 },
-       { "CLRPENDMSI",         0x08, 0x08 },
-       { "RTA",                0x10, 0x10 },
-       { "RMA",                0x20, 0x20 },
-       { "SSE",                0x40, 0x40 }
-};
-
-int
-ahd_msipcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(MSIPCISTAT_parse_table, 6, "MSIPCISTAT",
-           0xa6, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t TARGPCISTAT_parse_table[] = {
        { "TWATERR",            0x02, 0x02 },
        { "STA",                0x08, 0x08 },
        { "SSE",                0x40, 0x40 },
@@ -2495,13 +1585,6 @@ ahd_targpcistat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xa7, regvalue, cur_col, wrap));
 }
 
-int
-ahd_data_count_odd_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DATA_COUNT_ODD",
-           0xa7, regvalue, cur_col, wrap));
-}
-
 int
 ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -2509,14 +1592,7 @@ ahd_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xa8, regvalue, cur_col, wrap));
 }
 
-int
-ahd_ccscbacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "CCSCBACNT",
-           0xab, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCBAUTOPTR_parse_table[] = {
        { "SCBPTR_OFF",         0x07, 0x07 },
        { "SCBPTR_ADDR",        0x38, 0x38 },
        { "AUSCBPTR_EN",        0x80, 0x80 }
@@ -2536,13 +1612,6 @@ ahd_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xac, regvalue, cur_col, wrap));
 }
 
-int
-ahd_ccscbadr_bk_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "CCSCBADR_BK",
-           0xac, regvalue, cur_col, wrap));
-}
-
 int
 ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -2550,23 +1619,7 @@ ahd_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xac, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CMC_RAMBIST_parse_table[] = {
-       { "CMC_BUFFER_BIST_EN", 0x01, 0x01 },
-       { "CMC_BUFFER_BIST_FAIL",0x02, 0x02 },
-       { "SG_BIST_EN",         0x10, 0x10 },
-       { "SG_BIST_FAIL",       0x20, 0x20 },
-       { "SCBRAMBIST_FAIL",    0x40, 0x40 },
-       { "SG_ELEMENT_SIZE",    0x80, 0x80 }
-};
-
-int
-ahd_cmc_rambist_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(CMC_RAMBIST_parse_table, 6, "CMC_RAMBIST",
-           0xad, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t CCSCBCTL_parse_table[] = {
        { "CCSCBRESET",         0x01, 0x01 },
        { "CCSCBDIR",           0x04, 0x04 },
        { "CCSCBEN",            0x08, 0x08 },
@@ -2582,7 +1635,7 @@ ahd_ccscbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xad, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t CCSGCTL_parse_table[] = {
        { "CCSGRESET",          0x01, 0x01 },
        { "SG_FETCH_REQ",       0x02, 0x02 },
        { "CCSGENACK",          0x08, 0x08 },
@@ -2605,13 +1658,6 @@ ahd_ccsgram_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xb0, regvalue, cur_col, wrap));
 }
 
-int
-ahd_flexadr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "FLEXADR",
-           0xb0, regvalue, cur_col, wrap));
-}
-
 int
 ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -2619,32 +1665,6 @@ ahd_ccscbram_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xb0, regvalue, cur_col, wrap));
 }
 
-int
-ahd_flexcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "FLEXCNT",
-           0xb3, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t FLEXDMASTAT_parse_table[] = {
-       { "FLEXDMADONE",        0x01, 0x01 },
-       { "FLEXDMAERR",         0x02, 0x02 }
-};
-
-int
-ahd_flexdmastat_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(FLEXDMASTAT_parse_table, 2, "FLEXDMASTAT",
-           0xb5, regvalue, cur_col, wrap));
-}
-
-int
-ahd_flexdata_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "FLEXDATA",
-           0xb6, regvalue, cur_col, wrap));
-}
-
 int
 ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -2652,7 +1672,7 @@ ahd_brddat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xb8, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t BRDCTL_parse_table[] = {
        { "BRDSTB",             0x01, 0x01 },
        { "BRDRW",              0x02, 0x02 },
        { "BRDEN",              0x04, 0x04 },
@@ -2682,7 +1702,7 @@ ahd_seedat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xbc, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEECTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SEECTL_parse_table[] = {
        { "SEEOP_ERAL",         0x40, 0x70 },
        { "SEEOP_WRITE",        0x50, 0x70 },
        { "SEEOP_READ",         0x60, 0x70 },
@@ -2702,7 +1722,7 @@ ahd_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xbe, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
+static const ahd_reg_parse_entry_t SEESTAT_parse_table[] = {
        { "SEESTART",           0x01, 0x01 },
        { "SEEBUSY",            0x02, 0x02 },
        { "SEEARBACK",          0x04, 0x04 },
@@ -2718,34 +1738,7 @@ ahd_seestat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xbe, regvalue, cur_col, wrap));
 }
 
-int
-ahd_scbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SCBCNT",
-           0xbf, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DFWADDR",
-           0xc0, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPFLTRCTL_parse_table[] = {
-       { "DSPFCNTSEL",         0x0f, 0x0f },
-       { "EDGESENSE",          0x10, 0x10 },
-       { "FLTRDISABLE",        0x20, 0x20 }
-};
-
-int
-ahd_dspfltrctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DSPFLTRCTL_parse_table, 3, "DSPFLTRCTL",
-           0xc0, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
+static const ahd_reg_parse_entry_t DSPDATACTL_parse_table[] = {
        { "XMITOFFSTDIS",       0x02, 0x02 },
        { "RCVROFFSTDIS",       0x04, 0x04 },
        { "DESQDIS",            0x10, 0x10 },
@@ -2759,37 +1752,6 @@ ahd_dspdatactl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xc1, regvalue, cur_col, wrap));
 }
 
-int
-ahd_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DFRADDR",
-           0xc2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPREQCTL_parse_table[] = {
-       { "MANREQDLY",          0x3f, 0x3f },
-       { "MANREQCTL",          0xc0, 0xc0 }
-};
-
-int
-ahd_dspreqctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DSPREQCTL_parse_table, 2, "DSPREQCTL",
-           0xc2, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DSPACKCTL_parse_table[] = {
-       { "MANACKDLY",          0x3f, 0x3f },
-       { "MANACKCTL",          0xc0, 0xc0 }
-};
-
-int
-ahd_dspackctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DSPACKCTL_parse_table, 2, "DSPACKCTL",
-           0xc3, regvalue, cur_col, wrap));
-}
-
 int
 ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -2797,7 +1759,7 @@ ahd_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xc4, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
+static const ahd_reg_parse_entry_t DSPSELECT_parse_table[] = {
        { "DSPSEL",             0x1f, 0x1f },
        { "AUTOINCEN",          0x80, 0x80 }
 };
@@ -2809,7 +1771,7 @@ ahd_dspselect_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xc4, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t WRTBIASCTL_parse_table[] = {
        { "XMITMANVAL",         0x3f, 0x3f },
        { "AUTOXBCDIS",         0x80, 0x80 }
 };
@@ -2821,91 +1783,7 @@ ahd_wrtbiasctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xc5, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t RCVRBIOSCTL_parse_table[] = {
-       { "RCVRMANVAL",         0x3f, 0x3f },
-       { "AUTORBCDIS",         0x80, 0x80 }
-};
-
-int
-ahd_rcvrbiosctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(RCVRBIOSCTL_parse_table, 2, "RCVRBIOSCTL",
-           0xc6, regvalue, cur_col, wrap));
-}
-
-int
-ahd_wrtbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "WRTBIASCALC",
-           0xc7, regvalue, cur_col, wrap));
-}
-
-int
-ahd_rcvrbiascalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "RCVRBIASCALC",
-           0xc8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfptrs_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DFPTRS",
-           0xc8, regvalue, cur_col, wrap));
-}
-
-int
-ahd_skewcalc_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SKEWCALC",
-           0xc9, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfbkptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DFBKPTR",
-           0xc9, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t DFDBCTL_parse_table[] = {
-       { "DFF_RAMBIST_EN",     0x01, 0x01 },
-       { "DFF_RAMBIST_DONE",   0x02, 0x02 },
-       { "DFF_RAMBIST_FAIL",   0x04, 0x04 },
-       { "DFF_DIR_ERR",        0x08, 0x08 },
-       { "DFF_CIO_RD_RDY",     0x10, 0x10 },
-       { "DFF_CIO_WR_RDY",     0x20, 0x20 }
-};
-
-int
-ahd_dfdbctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(DFDBCTL_parse_table, 6, "DFDBCTL",
-           0xcb, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfscnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DFSCNT",
-           0xcc, regvalue, cur_col, wrap));
-}
-
-int
-ahd_dfbcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "DFBCNT",
-           0xce, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ovlyaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "OVLYADDR",
-           0xd4, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQCTL0_parse_table[] = {
        { "LOADRAM",            0x01, 0x01 },
        { "SEQRESET",           0x02, 0x02 },
        { "STEP",               0x04, 0x04 },
@@ -2923,21 +1801,7 @@ ahd_seqctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xd6, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEQCTL1_parse_table[] = {
-       { "RAMBIST_EN",         0x01, 0x01 },
-       { "RAMBIST_FAIL",       0x02, 0x02 },
-       { "RAMBIST_DONE",       0x04, 0x04 },
-       { "OVRLAY_DATA_CHK",    0x08, 0x08 }
-};
-
-int
-ahd_seqctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(SEQCTL1_parse_table, 4, "SEQCTL1",
-           0xd7, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t FLAGS_parse_table[] = {
+static const ahd_reg_parse_entry_t FLAGS_parse_table[] = {
        { "CARRY",              0x01, 0x01 },
        { "ZERO",               0x02, 0x02 }
 };
@@ -2949,7 +1813,7 @@ ahd_flags_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xd8, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQINTCTL_parse_table[] = {
        { "IRET",               0x01, 0x01 },
        { "INTMASK1",           0x02, 0x02 },
        { "INTMASK2",           0x04, 0x04 },
@@ -3001,24 +1865,6 @@ ahd_dindex_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xe4, regvalue, cur_col, wrap));
 }
 
-int
-ahd_brkaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "BRKADDR0",
-           0xe6, regvalue, cur_col, wrap));
-}
-
-static ahd_reg_parse_entry_t BRKADDR1_parse_table[] = {
-       { "BRKDIS",             0x80, 0x80 }
-};
-
-int
-ahd_brkaddr1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(BRKADDR1_parse_table, 1, "BRKADDR1",
-           0xe6, regvalue, cur_col, wrap));
-}
-
 int
 ahd_allones_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -3054,13 +1900,6 @@ ahd_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xed, regvalue, cur_col, wrap));
 }
 
-int
-ahd_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "FUNCTION1",
-           0xf0, regvalue, cur_col, wrap));
-}
-
 int
 ahd_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -3082,13 +1921,6 @@ ahd_curaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xf4, regvalue, cur_col, wrap));
 }
 
-int
-ahd_lastaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "LASTADDR",
-           0xf6, regvalue, cur_col, wrap));
-}
-
 int
 ahd_intvec2_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -3111,23 +1943,16 @@ ahd_accum_save_print(u_int regvalue, u_int *cur_col, u_int wrap)
 }
 
 int
-ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
-           0x100, regvalue, cur_col, wrap));
-}
-
-int
-ahd_ahd_pci_config_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(NULL, 0, "AHD_PCI_CONFIG_BASE",
+       return (ahd_print_register(NULL, 0, "SRAM_BASE",
            0x100, regvalue, cur_col, wrap));
 }
 
 int
-ahd_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_waiting_scb_tails_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(NULL, 0, "SRAM_BASE",
+       return (ahd_print_register(NULL, 0, "WAITING_SCB_TAILS",
            0x100, regvalue, cur_col, wrap));
 }
 
@@ -3215,7 +2040,7 @@ ahd_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x137, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+static const ahd_reg_parse_entry_t DMAPARAMS_parse_table[] = {
        { "FIFORESET",          0x01, 0x01 },
        { "FIFOFLUSH",          0x02, 0x02 },
        { "DIRECTION",          0x04, 0x04 },
@@ -3235,7 +2060,7 @@ ahd_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x138, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
        { "NO_DISCONNECT",      0x01, 0x01 },
        { "SPHASE_PENDING",     0x02, 0x02 },
        { "DPHASE_PENDING",     0x04, 0x04 },
@@ -3268,7 +2093,7 @@ ahd_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x13b, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
+static const ahd_reg_parse_entry_t LASTPHASE_parse_table[] = {
        { "P_DATAOUT",          0x00, 0xe0 },
        { "P_DATAOUT_DT",       0x20, 0xe0 },
        { "P_DATAIN",           0x40, 0xe0 },
@@ -3326,7 +2151,7 @@ ahd_qoutfifo_next_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x144, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t ARG_1_parse_table[] = {
+static const ahd_reg_parse_entry_t ARG_1_parse_table[] = {
        { "CONT_MSG_LOOP_TARG", 0x02, 0x02 },
        { "CONT_MSG_LOOP_READ", 0x03, 0x03 },
        { "CONT_MSG_LOOP_WRITE",0x04, 0x04 },
@@ -3358,7 +2183,7 @@ ahd_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x14a, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
        { "ALTSTIM",            0x01, 0x01 },
        { "ENAUTOATNP",         0x02, 0x02 },
        { "MANUALP",            0x0c, 0x0c },
@@ -3381,7 +2206,7 @@ ahd_initiator_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x14c, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+static const ahd_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
        { "PENDING_MK_MESSAGE", 0x01, 0x01 },
        { "TARGET_MSG_PENDING", 0x02, 0x02 },
        { "SELECTOUT_QFROZEN",  0x04, 0x04 }
@@ -3465,20 +2290,20 @@ ahd_mk_message_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
 }
 
 int
-ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(NULL, 0, "SCB_BASE",
+       return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
            0x180, regvalue, cur_col, wrap));
 }
 
 int
-ahd_scb_residual_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
+ahd_scb_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
-       return (ahd_print_register(NULL, 0, "SCB_RESIDUAL_DATACNT",
+       return (ahd_print_register(NULL, 0, "SCB_BASE",
            0x180, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_RESIDUAL_SGPTR_parse_table[] = {
        { "SG_LIST_NULL",       0x01, 0x01 },
        { "SG_OVERRUN_RESID",   0x02, 0x02 },
        { "SG_ADDR_MASK",       0xf8, 0xf8 }
@@ -3498,27 +2323,6 @@ ahd_scb_scsi_status_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x188, regvalue, cur_col, wrap));
 }
 
-int
-ahd_scb_target_phases_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SCB_TARGET_PHASES",
-           0x189, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_target_data_dir_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SCB_TARGET_DATA_DIR",
-           0x18a, regvalue, cur_col, wrap));
-}
-
-int
-ahd_scb_target_itag_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SCB_TARGET_ITAG",
-           0x18b, regvalue, cur_col, wrap));
-}
-
 int
 ahd_scb_sense_busaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -3533,7 +2337,7 @@ ahd_scb_tag_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x190, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
        { "SCB_TAG_TYPE",       0x03, 0x03 },
        { "DISCONNECTED",       0x04, 0x04 },
        { "STATUS_RCVD",        0x08, 0x08 },
@@ -3550,7 +2354,7 @@ ahd_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x192, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
        { "OID",                0x0f, 0x0f },
        { "TID",                0xf0, 0xf0 }
 };
@@ -3562,7 +2366,7 @@ ahd_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x193, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_LUN_parse_table[] = {
        { "LID",                0xff, 0xff }
 };
 
@@ -3573,7 +2377,7 @@ ahd_scb_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x194, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_TASK_ATTRIBUTE_parse_table[] = {
        { "SCB_XFERLEN_ODD",    0x01, 0x01 }
 };
 
@@ -3584,7 +2388,7 @@ ahd_scb_task_attribute_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x195, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_CDB_LEN_parse_table[] = {
        { "SCB_CDB_LEN_PTR",    0x80, 0x80 }
 };
 
@@ -3609,7 +2413,7 @@ ahd_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x198, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
        { "SG_HIGH_ADDR_BITS",  0x7f, 0x7f },
        { "SG_LAST_SEG",        0x80, 0x80 }
 };
@@ -3621,7 +2425,7 @@ ahd_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1a0, regvalue, cur_col, wrap));
 }
 
-static ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+static const ahd_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
        { "SG_LIST_NULL",       0x01, 0x01 },
        { "SG_FULL_RESID",      0x02, 0x02 },
        { "SG_STATUS_VALID",    0x04, 0x04 }
@@ -3655,13 +2459,6 @@ ahd_scb_next2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1ae, regvalue, cur_col, wrap));
 }
 
-int
-ahd_scb_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahd_print_register(NULL, 0, "SCB_SPARE",
-           0x1b0, regvalue, cur_col, wrap));
-}
-
 int
 ahd_scb_disconnected_lists_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
index 11bed07e90b7ef8573ae0d5fc5afcb7f3211ab07..4b51e232392ff8b3278a34c7c7e810e3ebf84d57 100644 (file)
@@ -5,7 +5,7 @@
  * $Id: //depot/aic7xxx/aic7xxx/aic79xx.seq#120 $
  * $Id: //depot/aic7xxx/aic7xxx/aic79xx.reg#77 $
  */
-static uint8_t seqprog[] = {
+static const uint8_t seqprog[] = {
        0xff, 0x02, 0x06, 0x78,
        0x00, 0xea, 0x6e, 0x59,
        0x01, 0xea, 0x04, 0x30,
@@ -1027,7 +1027,7 @@ ahd_patch0_func(struct ahd_softc *ahd)
        return (0);
 }
 
-static struct patch {
+static const struct patch {
        ahd_patch_func_t                *patch_func;
        uint32_t                 begin          :10,
                                 skip_instr     :10,
@@ -1166,7 +1166,7 @@ static struct patch {
        { ahd_patch23_func, 815, 11, 1 }
 };
 
-static struct cs {
+static const struct cs {
        uint16_t        begin;
        uint16_t        end;
 } critical_sections[] = {
index c0344e61765112195bb711ad25137e34da58fc61..e4e651cca3e448185cefb4a9474f9cf7baad12f0 100644 (file)
@@ -736,7 +736,7 @@ struct ahc_syncrate {
 #define                ST_SXFR    0x010        /* Rate Single Transition Only */
 #define                DT_SXFR    0x040        /* Rate Double Transition Only */
        uint8_t period; /* Period to send to SCSI target */
-       char *rate;
+       const char *rate;
 };
 
 /* Safe and valid period for async negotiations. */
@@ -1114,7 +1114,7 @@ typedef int (ahc_device_setup_t)(struct ahc_softc *);
 struct ahc_pci_identity {
        uint64_t                 full_id;
        uint64_t                 id_mask;
-       char                    *name;
+       const char              *name;
        ahc_device_setup_t      *setup;
 };
 
@@ -1133,15 +1133,11 @@ extern const int ahc_num_aic7770_devs;
 
 /*************************** Function Declarations ****************************/
 /******************************************************************************/
-u_int                  ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
-void                   ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
-void                   ahc_busy_tcl(struct ahc_softc *ahc,
-                                    u_int tcl, u_int busyid);
 
 /***************************** PCI Front End *********************************/
-struct ahc_pci_identity        *ahc_find_pci_device(ahc_dev_softc_t);
+const struct ahc_pci_identity  *ahc_find_pci_device(ahc_dev_softc_t);
 int                     ahc_pci_config(struct ahc_softc *,
-                                       struct ahc_pci_identity *);
+                                       const struct ahc_pci_identity *);
 int                     ahc_pci_test_register_access(struct ahc_softc *);
 #ifdef CONFIG_PM
 void                    ahc_pci_resume(struct ahc_softc *ahc);
@@ -1155,9 +1151,6 @@ int                        aic7770_config(struct ahc_softc *ahc,
 
 /************************** SCB and SCB queue management **********************/
 int            ahc_probe_scbs(struct ahc_softc *);
-void           ahc_run_untagged_queues(struct ahc_softc *ahc);
-void           ahc_run_untagged_queue(struct ahc_softc *ahc,
-                                      struct scb_tailq *queue);
 void           ahc_qinfifo_requeue_tail(struct ahc_softc *ahc,
                                         struct scb *scb);
 int            ahc_match_scb(struct ahc_softc *ahc, struct scb *scb,
@@ -1178,22 +1171,8 @@ int                       ahc_resume(struct ahc_softc *ahc);
 #endif
 void                    ahc_set_unit(struct ahc_softc *, int);
 void                    ahc_set_name(struct ahc_softc *, char *);
-void                    ahc_alloc_scbs(struct ahc_softc *ahc);
 void                    ahc_free(struct ahc_softc *ahc);
 int                     ahc_reset(struct ahc_softc *ahc, int reinit);
-void                    ahc_shutdown(void *arg);
-
-/*************************** Interrupt Services *******************************/
-void                   ahc_clear_intstat(struct ahc_softc *ahc);
-void                   ahc_run_qoutfifo(struct ahc_softc *ahc);
-#ifdef AHC_TARGET_MODE
-void                   ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
-#endif
-void                   ahc_handle_brkadrint(struct ahc_softc *ahc);
-void                   ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
-void                   ahc_handle_scsiint(struct ahc_softc *ahc,
-                                          u_int intstat);
-void                   ahc_clear_critical_section(struct ahc_softc *ahc);
 
 /***************************** Error Recovery *********************************/
 typedef enum {
@@ -1214,36 +1193,19 @@ int                     ahc_search_disc_list(struct ahc_softc *ahc, int target,
                                             char channel, int lun, u_int tag,
                                             int stop_on_first, int remove,
                                             int save_state);
-void                   ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
 int                    ahc_reset_channel(struct ahc_softc *ahc, char channel,
                                          int initiate_reset);
-int                    ahc_abort_scbs(struct ahc_softc *ahc, int target,
-                                      char channel, int lun, u_int tag,
-                                      role_t role, uint32_t status);
-void                   ahc_restart(struct ahc_softc *ahc);
-void                   ahc_calc_residual(struct ahc_softc *ahc,
-                                         struct scb *scb);
+
 /*************************** Utility Functions ********************************/
-struct ahc_phase_table_entry*
-                       ahc_lookup_phase_entry(int phase);
 void                   ahc_compile_devinfo(struct ahc_devinfo *devinfo,
                                            u_int our_id, u_int target,
                                            u_int lun, char channel,
                                            role_t role);
 /************************** Transfer Negotiation ******************************/
-struct ahc_syncrate*   ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
+const struct ahc_syncrate*     ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
                                          u_int *ppr_options, u_int maxsync);
 u_int                  ahc_find_period(struct ahc_softc *ahc,
                                        u_int scsirate, u_int maxsync);
-void                   ahc_validate_offset(struct ahc_softc *ahc,
-                                           struct ahc_initiator_tinfo *tinfo,
-                                           struct ahc_syncrate *syncrate,
-                                           u_int *offset, int wide,
-                                           role_t role);
-void                   ahc_validate_width(struct ahc_softc *ahc,
-                                          struct ahc_initiator_tinfo *tinfo,
-                                          u_int *bus_width,
-                                          role_t role);
 /*
  * Negotiation types.  These are used to qualify if we should renegotiate
  * even if our goal and current transport parameters are identical.
@@ -1263,7 +1225,7 @@ void                      ahc_set_width(struct ahc_softc *ahc,
                                      u_int width, u_int type, int paused);
 void                   ahc_set_syncrate(struct ahc_softc *ahc,
                                         struct ahc_devinfo *devinfo,
-                                        struct ahc_syncrate *syncrate,
+                                        const struct ahc_syncrate *syncrate,
                                         u_int period, u_int offset,
                                         u_int ppr_options,
                                         u_int type, int paused);
@@ -1305,11 +1267,10 @@ extern uint32_t ahc_debug;
 #define AHC_SHOW_MASKED_ERRORS 0x1000
 #define AHC_DEBUG_SEQUENCER    0x2000
 #endif
-void                   ahc_print_scb(struct scb *scb);
 void                   ahc_print_devinfo(struct ahc_softc *ahc,
                                          struct ahc_devinfo *dev);
 void                   ahc_dump_card_state(struct ahc_softc *ahc);
-int                    ahc_print_register(ahc_reg_parse_entry_t *table,
+int                    ahc_print_register(const ahc_reg_parse_entry_t *table,
                                           u_int num_entries,
                                           const char *name,
                                           u_int address,
index e196d83b93c7a49fc8fcf1828afbc1da2c16a7b0..0d2f763c3427c820300303fb8bec461a4a66548b 100644 (file)
@@ -238,6 +238,7 @@ register SXFRCTL2 {
 register OPTIONMODE {
        address                 0x008
        access_mode RW
+       count           2
        field   AUTORATEEN              0x80
        field   AUTOACKEN               0x40
        field   ATNMGMNTEN              0x20
@@ -254,6 +255,7 @@ register TARGCRCCNT {
        address                 0x00a
        size    2
        access_mode RW
+       count           2
 }
 
 /*
@@ -344,6 +346,7 @@ register SSTAT2 {
 register SSTAT3 {
        address                 0x00e
        access_mode RO
+       count           2
        mask    SCSICNT         0xf0
        mask    OFFCNT          0x0f
        mask    U2OFFCNT        0x7f
@@ -367,6 +370,7 @@ register SCSIID_ULTRA2 {
 register SIMODE0 {
        address                 0x010
        access_mode RW
+       count           2
        field   ENSELDO         0x40
        field   ENSELDI         0x20
        field   ENSELINGO       0x10
@@ -429,6 +433,7 @@ register SHADDR {
 register SELTIMER {
        address                 0x018
        access_mode RW
+       count           1
        field   STAGE6          0x20
        field   STAGE5          0x10
        field   STAGE4          0x08
@@ -467,6 +472,7 @@ register TARGID {
        address                 0x01b
        size                    2
        access_mode RW
+       count           14
 }
 
 /*
@@ -480,6 +486,7 @@ register TARGID {
 register SPIOCAP {
        address                 0x01b
        access_mode RW
+       count           10
        field   SOFT1           0x80
        field   SOFT0           0x40
        field   SOFTCMDEN       0x20    
@@ -492,6 +499,7 @@ register SPIOCAP {
 
 register BRDCTL        {
        address                 0x01d
+       count           11
        field   BRDDAT7         0x80
        field   BRDDAT6         0x40
        field   BRDDAT5         0x20
@@ -534,6 +542,7 @@ register BRDCTL     {
  */
 register SEECTL {
        address                 0x01e
+       count           11
        field   EXTARBACK       0x80
        field   EXTARBREQ       0x40
        field   SEEMS           0x20
@@ -570,6 +579,7 @@ register SBLKCTL {
 register SEQCTL {
        address                 0x060
        access_mode RW
+       count           15
        field   PERRORDIS       0x80
        field   PAUSEDIS        0x40
        field   FAILDIS         0x20
@@ -590,6 +600,7 @@ register SEQCTL {
 register SEQRAM {
        address                 0x061
        access_mode RW
+       count           2
 }
 
 /*
@@ -604,6 +615,7 @@ register SEQADDR0 {
 register SEQADDR1 {
        address                 0x063
        access_mode RW
+       count           8
        mask    SEQADDR1_MASK   0x01
 }
 
@@ -649,6 +661,7 @@ register NONE {
 register FLAGS {
        address                 0x06b
        access_mode RO
+       count           18
        field   ZERO            0x02
        field   CARRY           0x01
 }
@@ -671,6 +684,7 @@ register FUNCTION1 {
 register STACK {
        address                 0x06f
        access_mode RO
+       count           5
 }
 
 const  STACK_SIZE      4
@@ -692,6 +706,7 @@ register BCTL {
 register DSCOMMAND0 {
        address                 0x084
        access_mode RW
+       count           7
        field   CACHETHEN       0x80    /* Cache Threshold enable */
        field   DPARCKEN        0x40    /* Data Parity Check Enable */
        field   MPARCKEN        0x20    /* Memory Parity Check Enable */
@@ -717,6 +732,7 @@ register DSCOMMAND1 {
 register BUSTIME {
        address                 0x085
        access_mode RW
+       count           2
        mask    BOFF            0xf0
        mask    BON             0x0f
 }
@@ -727,6 +743,7 @@ register BUSTIME {
 register BUSSPD {
        address                 0x086
        access_mode RW
+       count           2
        mask    DFTHRSH         0xc0
        mask    STBOFF          0x38
        mask    STBON           0x07
@@ -737,6 +754,7 @@ register BUSSPD {
 /* aic7850/55/60/70/80/95 only */
 register DSPCISTATUS {
        address                 0x086
+       count           4
        mask    DFTHRSH_100     0xc0
 }
 
@@ -758,6 +776,7 @@ const       SEQ_MAILBOX_SHIFT       0
 register HCNTRL {
        address                 0x087
        access_mode RW
+       count           14
        field   POWRDN          0x40
        field   SWINT           0x10
        field   IRQMS           0x08
@@ -869,6 +888,7 @@ register INTSTAT {
 register ERROR {
        address                 0x092
        access_mode RO
+       count           26
        field   CIOPARERR       0x80    /* Ultra2 only */
        field   PCIERRSTAT      0x40    /* PCI only */
        field   MPARERR         0x20    /* PCI only */
@@ -885,6 +905,7 @@ register ERROR {
 register CLRINT {
        address                 0x092
        access_mode WO
+       count           24
        field   CLRPARERR       0x10    /* PCI only */
        field   CLRBRKADRINT    0x08
        field   CLRSCSIINT      0x04
@@ -943,6 +964,7 @@ register DFDAT {
 register SCBCNT {
        address                 0x09a
        access_mode RW
+       count           1
        field   SCBAUTO         0x80
        mask    SCBCNT_MASK     0x1f
 }
@@ -954,6 +976,7 @@ register SCBCNT {
 register QINFIFO {
        address                 0x09b
        access_mode RW
+       count           12
 }
 
 /*
@@ -972,11 +995,13 @@ register QINCNT   {
 register QOUTFIFO {
        address                 0x09d
        access_mode WO
+       count           7
 }
 
 register CRCCONTROL1 {
        address                 0x09d
        access_mode RW
+       count           3
        field   CRCONSEEN               0x80
        field   CRCVALCHKEN             0x40
        field   CRCENDCHKEN             0x20
@@ -1013,6 +1038,7 @@ register SCSIPHASE {
 register SFUNCT {
        address                 0x09f
        access_mode RW
+       count       4
        field   ALT_MODE        0x80
 }
 
@@ -1095,6 +1121,7 @@ scb {
        }
        SCB_SCSIOFFSET {
                size    1
+               count   1
        }
        SCB_NEXT {
                size    1
@@ -1118,6 +1145,7 @@ const     SG_SIZEOF       0x08            /* sizeof(struct ahc_dma) */
 register SEECTL_2840 {
        address                 0x0c0
        access_mode RW
+       count           2
        field   CS_2840         0x04
        field   CK_2840         0x02
        field   DO_2840         0x01
@@ -1126,6 +1154,7 @@ register SEECTL_2840 {
 register STATUS_2840 {
        address                 0x0c1
        access_mode RW
+       count           4
        field   EEPROM_TF       0x80
        mask    BIOS_SEL        0x60
        mask    ADSEL           0x1e
@@ -1161,6 +1190,7 @@ register CCSGCTL {
 
 register CCSCBCNT {
        address                 0xEF
+       count           1
 }
 
 register CCSCBCTL {
@@ -1187,6 +1217,7 @@ register CCSCBRAM {
 register SCBBADDR {
        address                 0x0F0
        access_mode RW
+       count           3
 }
 
 register CCSCBPTR {
@@ -1195,6 +1226,7 @@ register CCSCBPTR {
 
 register HNSCB_QOFF {
        address                 0x0F4
+       count           4
 }
 
 register SNSCB_QOFF {
@@ -1234,6 +1266,7 @@ register DFF_THRSH {
        mask    WR_DFTHRSH_85   0x50
        mask    WR_DFTHRSH_90   0x60
        mask    WR_DFTHRSH_MAX  0x70
+       count   4
 }
 
 register SG_CACHE_PRE {
@@ -1287,6 +1320,7 @@ scratch_ram {
        ULTRA_ENB {
                alias           CMDSIZE_TABLE
                size            2
+               count           2
        }
        /*
         * Bit vector of targets that have disconnection disabled as set by
@@ -1296,6 +1330,7 @@ scratch_ram {
         */
        DISC_DSB {
                size            2
+               count           6
        }
        CMDSIZE_TABLE_TAIL {
                size            4
@@ -1323,6 +1358,7 @@ scratch_ram {
        /* Parameters for DMA Logic */
        DMAPARAMS {
                size            1
+               count           12
                field   PRELOADEN       0x80
                field   WIDEODD         0x40
                field   SCSIEN          0x20
@@ -1436,11 +1472,12 @@ scratch_ram {
        KERNEL_TQINPOS {
                size            1
        }
-       TQINPOS {                
+       TQINPOS {
                size            1
        }
        ARG_1 {
                size            1
+               count           1
                mask    SEND_MSG                0x80
                mask    SEND_SENSE              0x40
                mask    SEND_REJ                0x20
@@ -1495,6 +1532,7 @@ scratch_ram {
                size    1
                field   HA_274_EXTENDED_TRANS   0x01
                alias   INITIATOR_TAG
+               count           1
        }
 
        SEQ_FLAGS2 {
@@ -1518,6 +1556,7 @@ scratch_ram {
         */
        SCSICONF {
                size            1
+               count           12
                field   TERM_ENB        0x80
                field   RESET_SCSI      0x40
                field   ENSPCHK         0x20
@@ -1527,16 +1566,19 @@ scratch_ram {
        INTDEF {
                address         0x05c
                size            1
+               count           1
                field   EDGE_TRIG       0x80
                mask    VECTOR          0x0f
        }
        HOSTCONF {
                address         0x05d
                size            1
+               count           1
        }
        HA_274_BIOSCTRL {
                address         0x05f
                size            1
+               count           1
                mask    BIOSMODE                0x30
                mask    BIOSDISABLED            0x30    
                field   CHANNEL_B_PRIMARY       0x08
@@ -1552,6 +1594,7 @@ scratch_ram {
         */
        TARG_OFFSET {
                size            16
+               count           1
        }
 }
 
index 3cb07e114e89cd6e83e9753594dc741dd63fe03c..dd11999b77b6dd590992671c062ad508b3bcf546 100644 (file)
@@ -84,16 +84,16 @@ struct seeprom_cmd {
 };
 
 /* Short opcodes for the c46 */
-static struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
-static struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_ewen = {9, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_ewds = {9, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
 
 /* Long opcodes for the C56/C66 */
-static struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
-static struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_long_ewen = {11, {1, 0, 0, 1, 1, 0, 0, 0, 0}};
+static const struct seeprom_cmd seeprom_long_ewds = {11, {1, 0, 0, 0, 0, 0, 0, 0, 0}};
 
 /* Common opcodes */
-static struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
-static struct seeprom_cmd seeprom_read  = {3, {1, 1, 0}};
+static const struct seeprom_cmd seeprom_write = {3, {1, 0, 1}};
+static const struct seeprom_cmd seeprom_read  = {3, {1, 1, 0}};
 
 /*
  * Wait for the SEERDY to go high; about 800 ns.
@@ -108,7 +108,7 @@ static struct seeprom_cmd seeprom_read  = {3, {1, 1, 0}};
  * Send a START condition and the given command
  */
 static void
-send_seeprom_cmd(struct seeprom_descriptor *sd, struct seeprom_cmd *cmd)
+send_seeprom_cmd(struct seeprom_descriptor *sd, const struct seeprom_cmd *cmd)
 {
        uint8_t temp;
        int i = 0;
@@ -227,7 +227,7 @@ int
 ahc_write_seeprom(struct seeprom_descriptor *sd, uint16_t *buf,
                  u_int start_addr, u_int count)
 {
-       struct seeprom_cmd *ewen, *ewds;
+       const struct seeprom_cmd *ewen, *ewds;
        uint16_t v;
        uint8_t temp;
        int i, k;
index 64e62ce59c152fbedf5495021d0b7d3a6f9e536e..0ae2b4605d09aa9068765486903aa19929ed7e9d 100644 (file)
@@ -51,8 +51,7 @@
 #endif
 
 /***************************** Lookup Tables **********************************/
-char *ahc_chip_names[] =
-{
+static const char *const ahc_chip_names[] = {
        "NONE",
        "aic7770",
        "aic7850",
@@ -75,10 +74,10 @@ static const u_int num_chip_names = ARRAY_SIZE(ahc_chip_names);
  */
 struct ahc_hard_error_entry {
         uint8_t errno;
-       char *errmesg;
+       const char *errmesg;
 };
 
-static struct ahc_hard_error_entry ahc_hard_errors[] = {
+static const struct ahc_hard_error_entry ahc_hard_errors[] = {
        { ILLHADDR,     "Illegal Host Access" },
        { ILLSADDR,     "Illegal Sequencer Address referrenced" },
        { ILLOPCODE,    "Illegal Opcode in sequencer program" },
@@ -90,7 +89,7 @@ static struct ahc_hard_error_entry ahc_hard_errors[] = {
 };
 static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
 
-static struct ahc_phase_table_entry ahc_phase_table[] =
+static const struct ahc_phase_table_entry ahc_phase_table[] =
 {
        { P_DATAOUT,    MSG_NOOP,               "in Data-out phase"     },
        { P_DATAIN,     MSG_INITIATOR_DET_ERR,  "in Data-in phase"      },
@@ -115,7 +114,7 @@ static const u_int num_phases = ARRAY_SIZE(ahc_phase_table) - 1;
  * Provides a mapping of tranfer periods in ns to the proper value to
  * stick in the scsixfer reg.
  */
-static struct ahc_syncrate ahc_syncrates[] =
+static const struct ahc_syncrate ahc_syncrates[] =
 {
       /* ultra2    fast/ultra  period     rate */
        { 0x42,      0x000,      9,      "80.0" },
@@ -148,7 +147,7 @@ static struct ahc_tmode_tstate*
 static void            ahc_free_tstate(struct ahc_softc *ahc,
                                        u_int scsi_id, char channel, int force);
 #endif
-static struct ahc_syncrate*
+static const struct ahc_syncrate*
                        ahc_devlimited_syncrate(struct ahc_softc *ahc,
                                                struct ahc_initiator_tinfo *,
                                                u_int *period,
@@ -204,9 +203,9 @@ static void         ahc_setup_target_msgin(struct ahc_softc *ahc,
 #endif
 
 static bus_dmamap_callback_t   ahc_dmamap_cb; 
-static void                    ahc_build_free_scb_list(struct ahc_softc *ahc);
-static int                     ahc_init_scbdata(struct ahc_softc *ahc);
-static void                    ahc_fini_scbdata(struct ahc_softc *ahc);
+static void            ahc_build_free_scb_list(struct ahc_softc *ahc);
+static int             ahc_init_scbdata(struct ahc_softc *ahc);
+static void            ahc_fini_scbdata(struct ahc_softc *ahc);
 static void            ahc_qinfifo_requeue(struct ahc_softc *ahc,
                                            struct scb *prev_scb,
                                            struct scb *scb);
@@ -222,7 +221,7 @@ static void         ahc_dumpseq(struct ahc_softc *ahc);
 #endif
 static int             ahc_loadseq(struct ahc_softc *ahc);
 static int             ahc_check_patch(struct ahc_softc *ahc,
-                                       struct patch **start_patch,
+                                       const struct patch **start_patch,
                                        u_int start_instr, u_int *skip_addr);
 static void            ahc_download_instr(struct ahc_softc *ahc,
                                           u_int instrptr, uint8_t *dconsts);
@@ -237,11 +236,582 @@ static void              ahc_update_scsiid(struct ahc_softc *ahc,
 static int             ahc_handle_target_cmd(struct ahc_softc *ahc,
                                              struct target_cmd *cmd);
 #endif
+
+static u_int           ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void            ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl);
+static void            ahc_busy_tcl(struct ahc_softc *ahc,
+                                    u_int tcl, u_int busyid);
+
+/************************** SCB and SCB queue management **********************/
+static void            ahc_run_untagged_queues(struct ahc_softc *ahc);
+static void            ahc_run_untagged_queue(struct ahc_softc *ahc,
+                                              struct scb_tailq *queue);
+
+/****************************** Initialization ********************************/
+static void             ahc_alloc_scbs(struct ahc_softc *ahc);
+static void             ahc_shutdown(void *arg);
+
+/*************************** Interrupt Services *******************************/
+static void            ahc_clear_intstat(struct ahc_softc *ahc);
+static void            ahc_run_qoutfifo(struct ahc_softc *ahc);
+#ifdef AHC_TARGET_MODE
+static void            ahc_run_tqinfifo(struct ahc_softc *ahc, int paused);
+#endif
+static void            ahc_handle_brkadrint(struct ahc_softc *ahc);
+static void            ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat);
+static void            ahc_handle_scsiint(struct ahc_softc *ahc,
+                                          u_int intstat);
+static void            ahc_clear_critical_section(struct ahc_softc *ahc);
+
+/***************************** Error Recovery *********************************/
+static void            ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb);
+static int             ahc_abort_scbs(struct ahc_softc *ahc, int target,
+                                      char channel, int lun, u_int tag,
+                                      role_t role, uint32_t status);
+static void            ahc_calc_residual(struct ahc_softc *ahc,
+                                         struct scb *scb);
+
+/*********************** Untagged Transaction Routines ************************/
+static inline void     ahc_freeze_untagged_queues(struct ahc_softc *ahc);
+static inline void     ahc_release_untagged_queues(struct ahc_softc *ahc);
+
+/*
+ * Block our completion routine from starting the next untagged
+ * transaction for this target or target lun.
+ */
+static inline void
+ahc_freeze_untagged_queues(struct ahc_softc *ahc)
+{
+       if ((ahc->flags & AHC_SCB_BTT) == 0)
+               ahc->untagged_queue_lock++;
+}
+
+/*
+ * Allow the next untagged transaction for this target or target lun
+ * to be executed.  We use a counting semaphore to allow the lock
+ * to be acquired recursively.  Once the count drops to zero, the
+ * transaction queues will be run.
+ */
+static inline void
+ahc_release_untagged_queues(struct ahc_softc *ahc)
+{
+       if ((ahc->flags & AHC_SCB_BTT) == 0) {
+               ahc->untagged_queue_lock--;
+               if (ahc->untagged_queue_lock == 0)
+                       ahc_run_untagged_queues(ahc);
+       }
+}
+
 /************************* Sequencer Execution Control ************************/
 /*
- * Restart the sequencer program from address zero
+ * Work around any chip bugs related to halting sequencer execution.
+ * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
+ * reading a register that will set this signal and deassert it.
+ * Without this workaround, if the chip is paused, by an interrupt or
+ * manual pause while accessing scb ram, accesses to certain registers
+ * will hang the system (infinite pci retries).
+ */
+static void
+ahc_pause_bug_fix(struct ahc_softc *ahc)
+{
+       if ((ahc->features & AHC_ULTRA2) != 0)
+               (void)ahc_inb(ahc, CCSCBCTL);
+}
+
+/*
+ * Determine whether the sequencer has halted code execution.
+ * Returns non-zero status if the sequencer is stopped.
+ */
+int
+ahc_is_paused(struct ahc_softc *ahc)
+{
+       return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
+}
+
+/*
+ * Request that the sequencer stop and wait, indefinitely, for it
+ * to stop.  The sequencer will only acknowledge that it is paused
+ * once it has reached an instruction boundary and PAUSEDIS is
+ * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
+ * for critical sections.
+ */
+void
+ahc_pause(struct ahc_softc *ahc)
+{
+       ahc_outb(ahc, HCNTRL, ahc->pause);
+
+       /*
+        * Since the sequencer can disable pausing in a critical section, we
+        * must loop until it actually stops.
+        */
+       while (ahc_is_paused(ahc) == 0)
+               ;
+
+       ahc_pause_bug_fix(ahc);
+}
+
+/*
+ * Allow the sequencer to continue program execution.
+ * We check here to ensure that no additional interrupt
+ * sources that would cause the sequencer to halt have been
+ * asserted.  If, for example, a SCSI bus reset is detected
+ * while we are fielding a different, pausing, interrupt type,
+ * we don't want to release the sequencer before going back
+ * into our interrupt handler and dealing with this new
+ * condition.
+ */
+void
+ahc_unpause(struct ahc_softc *ahc)
+{
+       if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
+               ahc_outb(ahc, HCNTRL, ahc->unpause);
+}
+
+/************************** Memory mapping routines ***************************/
+static struct ahc_dma_seg *
+ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
+{
+       int sg_index;
+
+       sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
+       /* sg_list_phys points to entry 1, not 0 */
+       sg_index++;
+
+       return (&scb->sg_list[sg_index]);
+}
+
+static uint32_t
+ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
+{
+       int sg_index;
+
+       /* sg_list_phys points to entry 1, not 0 */
+       sg_index = sg - &scb->sg_list[1];
+
+       return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
+}
+
+static uint32_t
+ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
+{
+       return (ahc->scb_data->hscb_busaddr
+               + (sizeof(struct hardware_scb) * index));
+}
+
+static void
+ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+       ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
+                       ahc->scb_data->hscb_dmamap,
+                       /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
+                       /*len*/sizeof(*scb->hscb), op);
+}
+
+void
+ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
+{
+       if (scb->sg_count == 0)
+               return;
+
+       ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
+                       /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
+                               * sizeof(struct ahc_dma_seg),
+                       /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
+}
+
+#ifdef AHC_TARGET_MODE
+static uint32_t
+ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
+{
+       return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
+}
+#endif
+
+/*********************** Miscelaneous Support Functions ***********************/
+/*
+ * Determine whether the sequencer reported a residual
+ * for this SCB/transaction.
+ */
+static void
+ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
+{
+       uint32_t sgptr;
+
+       sgptr = ahc_le32toh(scb->hscb->sgptr);
+       if ((sgptr & SG_RESID_VALID) != 0)
+               ahc_calc_residual(ahc, scb);
+}
+
+/*
+ * Return pointers to the transfer negotiation information
+ * for the specified our_id/remote_id pair.
+ */
+struct ahc_initiator_tinfo *
+ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
+                   u_int remote_id, struct ahc_tmode_tstate **tstate)
+{
+       /*
+        * Transfer data structures are stored from the perspective
+        * of the target role.  Since the parameters for a connection
+        * in the initiator role to a given target are the same as
+        * when the roles are reversed, we pretend we are the target.
+        */
+       if (channel == 'B')
+               our_id += 8;
+       *tstate = ahc->enabled_targets[our_id];
+       return (&(*tstate)->transinfo[remote_id]);
+}
+
+uint16_t
+ahc_inw(struct ahc_softc *ahc, u_int port)
+{
+       uint16_t r = ahc_inb(ahc, port+1) << 8;
+       return r | ahc_inb(ahc, port);
+}
+
+void
+ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
+{
+       ahc_outb(ahc, port, value & 0xFF);
+       ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+}
+
+uint32_t
+ahc_inl(struct ahc_softc *ahc, u_int port)
+{
+       return ((ahc_inb(ahc, port))
+             | (ahc_inb(ahc, port+1) << 8)
+             | (ahc_inb(ahc, port+2) << 16)
+             | (ahc_inb(ahc, port+3) << 24));
+}
+
+void
+ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
+{
+       ahc_outb(ahc, port, (value) & 0xFF);
+       ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
+       ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
+       ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
+}
+
+uint64_t
+ahc_inq(struct ahc_softc *ahc, u_int port)
+{
+       return ((ahc_inb(ahc, port))
+             | (ahc_inb(ahc, port+1) << 8)
+             | (ahc_inb(ahc, port+2) << 16)
+             | (ahc_inb(ahc, port+3) << 24)
+             | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
+             | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
+             | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
+             | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
+}
+
+void
+ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
+{
+       ahc_outb(ahc, port, value & 0xFF);
+       ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
+       ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
+       ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
+       ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
+       ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
+       ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
+       ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
+}
+
+/*
+ * Get a free scb. If there are none, see if we can allocate a new SCB.
+ */
+struct scb *
+ahc_get_scb(struct ahc_softc *ahc)
+{
+       struct scb *scb;
+
+       if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
+               ahc_alloc_scbs(ahc);
+               scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
+               if (scb == NULL)
+                       return (NULL);
+       }
+       SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
+       return (scb);
+}
+
+/*
+ * Return an SCB resource to the free list.
+ */
+void
+ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+       struct hardware_scb *hscb;
+
+       hscb = scb->hscb;
+       /* Clean up for the next user */
+       ahc->scb_data->scbindex[hscb->tag] = NULL;
+       scb->flags = SCB_FREE;
+       hscb->control = 0;
+
+       SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
+
+       /* Notify the OSM that a resource is now available. */
+       ahc_platform_scb_free(ahc, scb);
+}
+
+struct scb *
+ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
+{
+       struct scb* scb;
+
+       scb = ahc->scb_data->scbindex[tag];
+       if (scb != NULL)
+               ahc_sync_scb(ahc, scb,
+                            BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
+       return (scb);
+}
+
+static void
+ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
+{
+       struct hardware_scb *q_hscb;
+       u_int  saved_tag;
+
+       /*
+        * Our queuing method is a bit tricky.  The card
+        * knows in advance which HSCB to download, and we
+        * can't disappoint it.  To achieve this, the next
+        * SCB to download is saved off in ahc->next_queued_scb.
+        * When we are called to queue "an arbitrary scb",
+        * we copy the contents of the incoming HSCB to the one
+        * the sequencer knows about, swap HSCB pointers and
+        * finally assign the SCB to the tag indexed location
+        * in the scb_array.  This makes sure that we can still
+        * locate the correct SCB by SCB_TAG.
+        */
+       q_hscb = ahc->next_queued_scb->hscb;
+       saved_tag = q_hscb->tag;
+       memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
+       if ((scb->flags & SCB_CDB32_PTR) != 0) {
+               q_hscb->shared_data.cdb_ptr =
+                   ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
+                             + offsetof(struct hardware_scb, cdb32));
+       }
+       q_hscb->tag = saved_tag;
+       q_hscb->next = scb->hscb->tag;
+
+       /* Now swap HSCB pointers. */
+       ahc->next_queued_scb->hscb = scb->hscb;
+       scb->hscb = q_hscb;
+
+       /* Now define the mapping from tag to SCB in the scbindex */
+       ahc->scb_data->scbindex[scb->hscb->tag] = scb;
+}
+
+/*
+ * Tell the sequencer about a new transaction to execute.
  */
 void
+ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
+{
+       ahc_swap_with_next_hscb(ahc, scb);
+
+       if (scb->hscb->tag == SCB_LIST_NULL
+        || scb->hscb->next == SCB_LIST_NULL)
+               panic("Attempt to queue invalid SCB tag %x:%x\n",
+                     scb->hscb->tag, scb->hscb->next);
+
+       /*
+        * Setup data "oddness".
+        */
+       scb->hscb->lun &= LID;
+       if (ahc_get_transfer_length(scb) & 0x1)
+               scb->hscb->lun |= SCB_XFERLEN_ODD;
+
+       /*
+        * Keep a history of SCBs we've downloaded in the qinfifo.
+        */
+       ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
+
+       /*
+        * Make sure our data is consistent from the
+        * perspective of the adapter.
+        */
+       ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
+
+       /* Tell the adapter about the newly queued SCB */
+       if ((ahc->features & AHC_QUEUE_REGS) != 0) {
+               ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
+       } else {
+               if ((ahc->features & AHC_AUTOPAUSE) == 0)
+                       ahc_pause(ahc);
+               ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
+               if ((ahc->features & AHC_AUTOPAUSE) == 0)
+                       ahc_unpause(ahc);
+       }
+}
+
+struct scsi_sense_data *
+ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
+{
+       int offset;
+
+       offset = scb - ahc->scb_data->scbarray;
+       return (&ahc->scb_data->sense[offset]);
+}
+
+static uint32_t
+ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
+{
+       int offset;
+
+       offset = scb - ahc->scb_data->scbarray;
+       return (ahc->scb_data->sense_busaddr
+             + (offset * sizeof(struct scsi_sense_data)));
+}
+
+/************************** Interrupt Processing ******************************/
+static void
+ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
+{
+       ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+                       /*offset*/0, /*len*/256, op);
+}
+
+static void
+ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
+{
+#ifdef AHC_TARGET_MODE
+       if ((ahc->flags & AHC_TARGETROLE) != 0) {
+               ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+                               ahc->shared_data_dmamap,
+                               ahc_targetcmd_offset(ahc, 0),
+                               sizeof(struct target_cmd) * AHC_TMODE_CMDS,
+                               op);
+       }
+#endif
+}
+
+/*
+ * See if the firmware has posted any completed commands
+ * into our in-core command complete fifos.
+ */
+#define AHC_RUN_QOUTFIFO 0x1
+#define AHC_RUN_TQINFIFO 0x2
+static u_int
+ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
+{
+       u_int retval;
+
+       retval = 0;
+       ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
+                       /*offset*/ahc->qoutfifonext, /*len*/1,
+                       BUS_DMASYNC_POSTREAD);
+       if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
+               retval |= AHC_RUN_QOUTFIFO;
+#ifdef AHC_TARGET_MODE
+       if ((ahc->flags & AHC_TARGETROLE) != 0
+        && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
+               ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
+                               ahc->shared_data_dmamap,
+                               ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
+                               /*len*/sizeof(struct target_cmd),
+                               BUS_DMASYNC_POSTREAD);
+               if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
+                       retval |= AHC_RUN_TQINFIFO;
+       }
+#endif
+       return (retval);
+}
+
+/*
+ * Catch an interrupt from the adapter
+ */
+int
+ahc_intr(struct ahc_softc *ahc)
+{
+       u_int   intstat;
+
+       if ((ahc->pause & INTEN) == 0) {
+               /*
+                * Our interrupt is not enabled on the chip
+                * and may be disabled for re-entrancy reasons,
+                * so just return.  This is likely just a shared
+                * interrupt.
+                */
+               return (0);
+       }
+       /*
+        * Instead of directly reading the interrupt status register,
+        * infer the cause of the interrupt by checking our in-core
+        * completion queues.  This avoids a costly PCI bus read in
+        * most cases.
+        */
+       if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
+        && (ahc_check_cmdcmpltqueues(ahc) != 0))
+               intstat = CMDCMPLT;
+       else {
+               intstat = ahc_inb(ahc, INTSTAT);
+       }
+
+       if ((intstat & INT_PEND) == 0) {
+#if AHC_PCI_CONFIG > 0
+               if (ahc->unsolicited_ints > 500) {
+                       ahc->unsolicited_ints = 0;
+                       if ((ahc->chip & AHC_PCI) != 0
+                        && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
+                               ahc->bus_intr(ahc);
+               }
+#endif
+               ahc->unsolicited_ints++;
+               return (0);
+       }
+       ahc->unsolicited_ints = 0;
+
+       if (intstat & CMDCMPLT) {
+               ahc_outb(ahc, CLRINT, CLRCMDINT);
+
+               /*
+                * Ensure that the chip sees that we've cleared
+                * this interrupt before we walk the output fifo.
+                * Otherwise, we may, due to posted bus writes,
+                * clear the interrupt after we finish the scan,
+                * and after the sequencer has added new entries
+                * and asserted the interrupt again.
+                */
+               ahc_flush_device_writes(ahc);
+               ahc_run_qoutfifo(ahc);
+#ifdef AHC_TARGET_MODE
+               if ((ahc->flags & AHC_TARGETROLE) != 0)
+                       ahc_run_tqinfifo(ahc, /*paused*/FALSE);
+#endif
+       }
+
+       /*
+        * Handle statuses that may invalidate our cached
+        * copy of INTSTAT separately.
+        */
+       if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
+               /* Hot eject.  Do nothing */
+       } else if (intstat & BRKADRINT) {
+               ahc_handle_brkadrint(ahc);
+       } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
+
+               ahc_pause_bug_fix(ahc);
+
+               if ((intstat & SEQINT) != 0)
+                       ahc_handle_seqint(ahc, intstat);
+
+               if ((intstat & SCSIINT) != 0)
+                       ahc_handle_scsiint(ahc, intstat);
+       }
+       return (1);
+}
+
+/************************* Sequencer Execution Control ************************/
+/*
+ * Restart the sequencer program from address zero
+ */
+static void
 ahc_restart(struct ahc_softc *ahc)
 {
 
@@ -302,7 +872,7 @@ ahc_restart(struct ahc_softc *ahc)
 }
 
 /************************* Input/Output Queues ********************************/
-void
+static void
 ahc_run_qoutfifo(struct ahc_softc *ahc)
 {
        struct scb *scb;
@@ -349,7 +919,7 @@ ahc_run_qoutfifo(struct ahc_softc *ahc)
        }
 }
 
-void
+static void
 ahc_run_untagged_queues(struct ahc_softc *ahc)
 {
        int i;
@@ -358,7 +928,7 @@ ahc_run_untagged_queues(struct ahc_softc *ahc)
                ahc_run_untagged_queue(ahc, &ahc->untagged_queues[i]);
 }
 
-void
+static void
 ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
 {
        struct scb *scb;
@@ -374,7 +944,7 @@ ahc_run_untagged_queue(struct ahc_softc *ahc, struct scb_tailq *queue)
 }
 
 /************************* Interrupt Handling *********************************/
-void
+static void
 ahc_handle_brkadrint(struct ahc_softc *ahc)
 {
        /*
@@ -403,7 +973,7 @@ ahc_handle_brkadrint(struct ahc_softc *ahc)
        ahc_shutdown(ahc);
 }
 
-void
+static void
 ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
 {
        struct scb *scb;
@@ -954,7 +1524,7 @@ unpause:
        ahc_unpause(ahc);
 }
 
-void
+static void
 ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
 {
        u_int   scb_index;
@@ -1407,7 +1977,7 @@ ahc_force_renegotiation(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
 }
 
 #define AHC_MAX_STEPS 2000
-void
+static void
 ahc_clear_critical_section(struct ahc_softc *ahc)
 {
        int     stepping;
@@ -1500,7 +2070,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
 /*
  * Clear any pending interrupt status.
  */
-void
+static void
 ahc_clear_intstat(struct ahc_softc *ahc)
 {
        /* Clear any interrupt conditions this may have caused */
@@ -1519,7 +2089,8 @@ ahc_clear_intstat(struct ahc_softc *ahc)
 uint32_t ahc_debug = AHC_DEBUG_OPTS;
 #endif
 
-void
+#if 0 /* unused */
+static void
 ahc_print_scb(struct scb *scb)
 {
        int i;
@@ -1551,6 +2122,7 @@ ahc_print_scb(struct scb *scb)
                }
        }
 }
+#endif
 
 /************************* Transfer Negotiation *******************************/
 /*
@@ -1634,7 +2206,7 @@ ahc_free_tstate(struct ahc_softc *ahc, u_int scsi_id, char channel, int force)
  * by the capabilities of the bus connectivity of and sync settings for
  * the target.
  */
-struct ahc_syncrate *
+const struct ahc_syncrate *
 ahc_devlimited_syncrate(struct ahc_softc *ahc,
                        struct ahc_initiator_tinfo *tinfo,
                        u_int *period, u_int *ppr_options, role_t role)
@@ -1689,11 +2261,11 @@ ahc_devlimited_syncrate(struct ahc_softc *ahc,
  * Return the period and offset that should be sent to the target
  * if this was the beginning of an SDTR.
  */
-struct ahc_syncrate *
+const struct ahc_syncrate *
 ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
                  u_int *ppr_options, u_int maxsync)
 {
-       struct ahc_syncrate *syncrate;
+       const struct ahc_syncrate *syncrate;
 
        if ((ahc->features & AHC_DT) == 0)
                *ppr_options &= ~MSG_EXT_PPR_DT_REQ;
@@ -1768,7 +2340,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period,
 u_int
 ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
 {
-       struct ahc_syncrate *syncrate;
+       const struct ahc_syncrate *syncrate;
 
        if ((ahc->features & AHC_ULTRA2) != 0)
                scsirate &= SXFR_ULTRA2;
@@ -1806,10 +2378,10 @@ ahc_find_period(struct ahc_softc *ahc, u_int scsirate, u_int maxsync)
  * Truncate the given synchronous offset to a value the
  * current adapter type and syncrate are capable of.
  */
-void
+static void
 ahc_validate_offset(struct ahc_softc *ahc,
                    struct ahc_initiator_tinfo *tinfo,
-                   struct ahc_syncrate *syncrate,
+                   const struct ahc_syncrate *syncrate,
                    u_int *offset, int wide, role_t role)
 {
        u_int maxoffset;
@@ -1838,7 +2410,7 @@ ahc_validate_offset(struct ahc_softc *ahc,
  * Truncate the given transfer width parameter to a value the
  * current adapter type is capable of.
  */
-void
+static void
 ahc_validate_width(struct ahc_softc *ahc, struct ahc_initiator_tinfo *tinfo,
                   u_int *bus_width, role_t role)
 {
@@ -1913,7 +2485,7 @@ ahc_update_neg_request(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
  */
 void
 ahc_set_syncrate(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
-                struct ahc_syncrate *syncrate, u_int period,
+                const struct ahc_syncrate *syncrate, u_int period,
                 u_int offset, u_int ppr_options, u_int type, int paused)
 {
        struct  ahc_initiator_tinfo *tinfo;
@@ -2220,11 +2792,11 @@ ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
                            role);
 }
 
-struct ahc_phase_table_entry*
+static const struct ahc_phase_table_entry*
 ahc_lookup_phase_entry(int phase)
 {
-       struct ahc_phase_table_entry *entry;
-       struct ahc_phase_table_entry *last_entry;
+       const struct ahc_phase_table_entry *entry;
+       const struct ahc_phase_table_entry *last_entry;
 
        /*
         * num_phases doesn't include the default entry which
@@ -2390,7 +2962,7 @@ ahc_build_transfer_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
         */
        struct  ahc_initiator_tinfo *tinfo;
        struct  ahc_tmode_tstate *tstate;
-       struct  ahc_syncrate *rate;
+       const struct ahc_syncrate *rate;
        int     dowide;
        int     dosync;
        int     doppr;
@@ -2655,7 +3227,7 @@ proto_violation_reset:
  */
 static void
 ahc_handle_message_phase(struct ahc_softc *ahc)
-{ 
+{
        struct  ahc_devinfo devinfo;
        u_int   bus_phase;
        int     end_session;
@@ -3056,7 +3628,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
                switch (ahc->msgin_buf[2]) {
                case MSG_EXT_SDTR:
                {
-                       struct   ahc_syncrate *syncrate;
+                       const struct ahc_syncrate *syncrate;
                        u_int    period;
                        u_int    ppr_options;
                        u_int    offset;
@@ -3231,7 +3803,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
                }
                case MSG_EXT_PPR:
                {
-                       struct  ahc_syncrate *syncrate;
+                       const struct ahc_syncrate *syncrate;
                        u_int   period;
                        u_int   offset;
                        u_int   bus_width;
@@ -3984,7 +4556,7 @@ ahc_free(struct ahc_softc *ahc)
        return;
 }
 
-void
+static void
 ahc_shutdown(void *arg)
 {
        struct  ahc_softc *ahc;
@@ -4388,7 +4960,7 @@ ahc_fini_scbdata(struct ahc_softc *ahc)
                free(scb_data->scbarray, M_DEVBUF);
 }
 
-void
+static void
 ahc_alloc_scbs(struct ahc_softc *ahc)
 {
        struct scb_data *scb_data;
@@ -5121,7 +5693,7 @@ ahc_resume(struct ahc_softc *ahc)
  * Return the untagged transaction id for a given target/channel lun.
  * Optionally, clear the entry.
  */
-u_int
+static u_int
 ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
 {
        u_int scbid;
@@ -5142,7 +5714,7 @@ ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
        return (scbid);
 }
 
-void
+static void
 ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
 {
        u_int target_offset;
@@ -5160,7 +5732,7 @@ ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
        }
 }
 
-void
+static void
 ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
 {
        u_int target_offset;
@@ -5215,7 +5787,7 @@ ahc_match_scb(struct ahc_softc *ahc, struct scb *scb, int target,
        return match;
 }
 
-void
+static void
 ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
 {
        int     target;
@@ -5707,7 +6279,7 @@ ahc_add_curscb_to_free_list(struct ahc_softc *ahc)
  */
 static u_int
 ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
-{       
+{
        u_int curscb, next;
 
        /*
@@ -5756,7 +6328,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
  * been modified from CAM_REQ_INPROG.  This routine assumes that the sequencer
  * is paused before it is called.
  */
-int
+static int
 ahc_abort_scbs(struct ahc_softc *ahc, int target, char channel,
               int lun, u_int tag, role_t role, uint32_t status)
 {
@@ -6078,7 +6650,7 @@ ahc_reset_channel(struct ahc_softc *ahc, char channel, int initiate_reset)
 /*
  * Calculate the residual for a just completed SCB.
  */
-void
+static void
 ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
 {
        struct hardware_scb *hscb;
@@ -6279,7 +6851,7 @@ ahc_loadseq(struct ahc_softc *ahc)
        struct  cs cs_table[num_critical_sections];
        u_int   begin_set[num_critical_sections];
        u_int   end_set[num_critical_sections];
-       struct  patch *cur_patch;
+       const struct patch *cur_patch;
        u_int   cs_count;
        u_int   cur_cs;
        u_int   i;
@@ -6384,11 +6956,11 @@ ahc_loadseq(struct ahc_softc *ahc)
 }
 
 static int
-ahc_check_patch(struct ahc_softc *ahc, struct patch **start_patch,
+ahc_check_patch(struct ahc_softc *ahc, const struct patch **start_patch,
                u_int start_instr, u_int *skip_addr)
 {
-       struct  patch *cur_patch;
-       struct  patch *last_patch;
+       const struct patch *cur_patch;
+       const struct patch *last_patch;
        u_int   num_patches;
 
        num_patches = ARRAY_SIZE(patches);
@@ -6447,7 +7019,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
        case AIC_OP_JE:
        case AIC_OP_JZ:
        {
-               struct patch *cur_patch;
+               const struct patch *cur_patch;
                int address_offset;
                u_int address;
                u_int skip_addr;
@@ -6545,7 +7117,7 @@ ahc_download_instr(struct ahc_softc *ahc, u_int instrptr, uint8_t *dconsts)
 }
 
 int
-ahc_print_register(ahc_reg_parse_entry_t *table, u_int num_entries,
+ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
                   const char *name, u_int address, u_int value,
                   u_int *cur_column, u_int wrap_point)
 {
@@ -7229,7 +7801,7 @@ ahc_update_scsiid(struct ahc_softc *ahc, u_int targid_mask)
                ahc_outb(ahc, SCSIID, scsiid);
 }
 
-void
+static void
 ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
 {
        struct target_cmd *cmd;
index cba2f23bbe7954974aab44408b480269d16bae92..09bf2f4d78d58d953a5b025bf3201aaf8fd76441 100644 (file)
 #define _AIC7XXX_INLINE_H_
 
 /************************* Sequencer Execution Control ************************/
-static __inline void ahc_pause_bug_fix(struct ahc_softc *ahc);
-static __inline int  ahc_is_paused(struct ahc_softc *ahc);
-static __inline void ahc_pause(struct ahc_softc *ahc);
-static __inline void ahc_unpause(struct ahc_softc *ahc);
-
-/*
- * Work around any chip bugs related to halting sequencer execution.
- * On Ultra2 controllers, we must clear the CIOBUS stretch signal by
- * reading a register that will set this signal and deassert it.
- * Without this workaround, if the chip is paused, by an interrupt or
- * manual pause while accessing scb ram, accesses to certain registers
- * will hang the system (infinite pci retries).
- */
-static __inline void
-ahc_pause_bug_fix(struct ahc_softc *ahc)
-{
-       if ((ahc->features & AHC_ULTRA2) != 0)
-               (void)ahc_inb(ahc, CCSCBCTL);
-}
-
-/*
- * Determine whether the sequencer has halted code execution.
- * Returns non-zero status if the sequencer is stopped.
- */
-static __inline int
-ahc_is_paused(struct ahc_softc *ahc)
-{
-       return ((ahc_inb(ahc, HCNTRL) & PAUSE) != 0);
-}
-
-/*
- * Request that the sequencer stop and wait, indefinitely, for it
- * to stop.  The sequencer will only acknowledge that it is paused
- * once it has reached an instruction boundary and PAUSEDIS is
- * cleared in the SEQCTL register.  The sequencer may use PAUSEDIS
- * for critical sections.
- */
-static __inline void
-ahc_pause(struct ahc_softc *ahc)
-{
-       ahc_outb(ahc, HCNTRL, ahc->pause);
-
-       /*
-        * Since the sequencer can disable pausing in a critical section, we
-        * must loop until it actually stops.
-        */
-       while (ahc_is_paused(ahc) == 0)
-               ;
-
-       ahc_pause_bug_fix(ahc);
-}
-
-/*
- * Allow the sequencer to continue program execution.
- * We check here to ensure that no additional interrupt
- * sources that would cause the sequencer to halt have been
- * asserted.  If, for example, a SCSI bus reset is detected
- * while we are fielding a different, pausing, interrupt type,
- * we don't want to release the sequencer before going back
- * into our interrupt handler and dealing with this new
- * condition.
- */
-static __inline void
-ahc_unpause(struct ahc_softc *ahc)
-{
-       if ((ahc_inb(ahc, INTSTAT) & (SCSIINT | SEQINT | BRKADRINT)) == 0)
-               ahc_outb(ahc, HCNTRL, ahc->unpause);
-}
-
-/*********************** Untagged Transaction Routines ************************/
-static __inline void   ahc_freeze_untagged_queues(struct ahc_softc *ahc);
-static __inline void   ahc_release_untagged_queues(struct ahc_softc *ahc);
-
-/*
- * Block our completion routine from starting the next untagged
- * transaction for this target or target lun.
- */
-static __inline void
-ahc_freeze_untagged_queues(struct ahc_softc *ahc)
-{
-       if ((ahc->flags & AHC_SCB_BTT) == 0)
-               ahc->untagged_queue_lock++;
-}
-
-/*
- * Allow the next untagged transaction for this target or target lun
- * to be executed.  We use a counting semaphore to allow the lock
- * to be acquired recursively.  Once the count drops to zero, the
- * transaction queues will be run.
- */
-static __inline void
-ahc_release_untagged_queues(struct ahc_softc *ahc)
-{
-       if ((ahc->flags & AHC_SCB_BTT) == 0) {
-               ahc->untagged_queue_lock--;
-               if (ahc->untagged_queue_lock == 0)
-                       ahc_run_untagged_queues(ahc);
-       }
-}
+int  ahc_is_paused(struct ahc_softc *ahc);
+void ahc_pause(struct ahc_softc *ahc);
+void ahc_unpause(struct ahc_softc *ahc);
 
 /************************** Memory mapping routines ***************************/
-static __inline struct ahc_dma_seg *
-                       ahc_sg_bus_to_virt(struct scb *scb,
-                                          uint32_t sg_busaddr);
-static __inline uint32_t
-                       ahc_sg_virt_to_bus(struct scb *scb,
-                                          struct ahc_dma_seg *sg);
-static __inline uint32_t
-                       ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index);
-static __inline void   ahc_sync_scb(struct ahc_softc *ahc,
-                                    struct scb *scb, int op);
-static __inline void   ahc_sync_sglist(struct ahc_softc *ahc,
-                                       struct scb *scb, int op);
-static __inline uint32_t
-                       ahc_targetcmd_offset(struct ahc_softc *ahc,
-                                            u_int index);
-
-static __inline struct ahc_dma_seg *
-ahc_sg_bus_to_virt(struct scb *scb, uint32_t sg_busaddr)
-{
-       int sg_index;
-
-       sg_index = (sg_busaddr - scb->sg_list_phys)/sizeof(struct ahc_dma_seg);
-       /* sg_list_phys points to entry 1, not 0 */
-       sg_index++;
-
-       return (&scb->sg_list[sg_index]);
-}
-
-static __inline uint32_t
-ahc_sg_virt_to_bus(struct scb *scb, struct ahc_dma_seg *sg)
-{
-       int sg_index;
-
-       /* sg_list_phys points to entry 1, not 0 */
-       sg_index = sg - &scb->sg_list[1];
-
-       return (scb->sg_list_phys + (sg_index * sizeof(*scb->sg_list)));
-}
-
-static __inline uint32_t
-ahc_hscb_busaddr(struct ahc_softc *ahc, u_int index)
-{
-       return (ahc->scb_data->hscb_busaddr
-               + (sizeof(struct hardware_scb) * index));
-}
-
-static __inline void
-ahc_sync_scb(struct ahc_softc *ahc, struct scb *scb, int op)
-{
-       ahc_dmamap_sync(ahc, ahc->scb_data->hscb_dmat,
-                       ahc->scb_data->hscb_dmamap,
-                       /*offset*/(scb->hscb - ahc->hscbs) * sizeof(*scb->hscb),
-                       /*len*/sizeof(*scb->hscb), op);
-}
-
-static __inline void
-ahc_sync_sglist(struct ahc_softc *ahc, struct scb *scb, int op)
-{
-       if (scb->sg_count == 0)
-               return;
-
-       ahc_dmamap_sync(ahc, ahc->scb_data->sg_dmat, scb->sg_map->sg_dmamap,
-                       /*offset*/(scb->sg_list - scb->sg_map->sg_vaddr)
-                               * sizeof(struct ahc_dma_seg),
-                       /*len*/sizeof(struct ahc_dma_seg) * scb->sg_count, op);
-}
-
-static __inline uint32_t
-ahc_targetcmd_offset(struct ahc_softc *ahc, u_int index)
-{
-       return (((uint8_t *)&ahc->targetcmds[index]) - ahc->qoutfifo);
-}
+void   ahc_sync_sglist(struct ahc_softc *ahc,
+                       struct scb *scb, int op);
 
 /******************************** Debugging ***********************************/
 static __inline char *ahc_name(struct ahc_softc *ahc);
@@ -231,420 +65,34 @@ ahc_name(struct ahc_softc *ahc)
 
 /*********************** Miscellaneous Support Functions ***********************/
 
-static __inline void   ahc_update_residual(struct ahc_softc *ahc,
-                                           struct scb *scb);
-static __inline struct ahc_initiator_tinfo *
-                       ahc_fetch_transinfo(struct ahc_softc *ahc,
-                                           char channel, u_int our_id,
-                                           u_int remote_id,
-                                           struct ahc_tmode_tstate **tstate);
-static __inline uint16_t
-                       ahc_inw(struct ahc_softc *ahc, u_int port);
-static __inline void   ahc_outw(struct ahc_softc *ahc, u_int port,
-                                u_int value);
-static __inline uint32_t
-                       ahc_inl(struct ahc_softc *ahc, u_int port);
-static __inline void   ahc_outl(struct ahc_softc *ahc, u_int port,
-                                uint32_t value);
-static __inline uint64_t
-                       ahc_inq(struct ahc_softc *ahc, u_int port);
-static __inline void   ahc_outq(struct ahc_softc *ahc, u_int port,
-                                uint64_t value);
-static __inline struct scb*
-                       ahc_get_scb(struct ahc_softc *ahc);
-static __inline void   ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
-static __inline void   ahc_swap_with_next_hscb(struct ahc_softc *ahc,
-                                               struct scb *scb);
-static __inline void   ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
-static __inline struct scsi_sense_data *
-                       ahc_get_sense_buf(struct ahc_softc *ahc,
-                                         struct scb *scb);
-static __inline uint32_t
-                       ahc_get_sense_bufaddr(struct ahc_softc *ahc,
-                                             struct scb *scb);
-
-/*
- * Determine whether the sequencer reported a residual
- * for this SCB/transaction.
- */
-static __inline void
-ahc_update_residual(struct ahc_softc *ahc, struct scb *scb)
-{
-       uint32_t sgptr;
-
-       sgptr = ahc_le32toh(scb->hscb->sgptr);
-       if ((sgptr & SG_RESID_VALID) != 0)
-               ahc_calc_residual(ahc, scb);
-}
-
-/*
- * Return pointers to the transfer negotiation information
- * for the specified our_id/remote_id pair.
- */
-static __inline struct ahc_initiator_tinfo *
-ahc_fetch_transinfo(struct ahc_softc *ahc, char channel, u_int our_id,
-                   u_int remote_id, struct ahc_tmode_tstate **tstate)
-{
-       /*
-        * Transfer data structures are stored from the perspective
-        * of the target role.  Since the parameters for a connection
-        * in the initiator role to a given target are the same as
-        * when the roles are reversed, we pretend we are the target.
-        */
-       if (channel == 'B')
-               our_id += 8;
-       *tstate = ahc->enabled_targets[our_id];
-       return (&(*tstate)->transinfo[remote_id]);
-}
-
-static __inline uint16_t
-ahc_inw(struct ahc_softc *ahc, u_int port)
-{
-       uint16_t r = ahc_inb(ahc, port+1) << 8;
-       return r | ahc_inb(ahc, port);
-}
-
-static __inline void
-ahc_outw(struct ahc_softc *ahc, u_int port, u_int value)
-{
-       ahc_outb(ahc, port, value & 0xFF);
-       ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
-}
-
-static __inline uint32_t
-ahc_inl(struct ahc_softc *ahc, u_int port)
-{
-       return ((ahc_inb(ahc, port))
-             | (ahc_inb(ahc, port+1) << 8)
-             | (ahc_inb(ahc, port+2) << 16)
-             | (ahc_inb(ahc, port+3) << 24));
-}
-
-static __inline void
-ahc_outl(struct ahc_softc *ahc, u_int port, uint32_t value)
-{
-       ahc_outb(ahc, port, (value) & 0xFF);
-       ahc_outb(ahc, port+1, ((value) >> 8) & 0xFF);
-       ahc_outb(ahc, port+2, ((value) >> 16) & 0xFF);
-       ahc_outb(ahc, port+3, ((value) >> 24) & 0xFF);
-}
-
-static __inline uint64_t
-ahc_inq(struct ahc_softc *ahc, u_int port)
-{
-       return ((ahc_inb(ahc, port))
-             | (ahc_inb(ahc, port+1) << 8)
-             | (ahc_inb(ahc, port+2) << 16)
-             | (ahc_inb(ahc, port+3) << 24)
-             | (((uint64_t)ahc_inb(ahc, port+4)) << 32)
-             | (((uint64_t)ahc_inb(ahc, port+5)) << 40)
-             | (((uint64_t)ahc_inb(ahc, port+6)) << 48)
-             | (((uint64_t)ahc_inb(ahc, port+7)) << 56));
-}
-
-static __inline void
-ahc_outq(struct ahc_softc *ahc, u_int port, uint64_t value)
-{
-       ahc_outb(ahc, port, value & 0xFF);
-       ahc_outb(ahc, port+1, (value >> 8) & 0xFF);
-       ahc_outb(ahc, port+2, (value >> 16) & 0xFF);
-       ahc_outb(ahc, port+3, (value >> 24) & 0xFF);
-       ahc_outb(ahc, port+4, (value >> 32) & 0xFF);
-       ahc_outb(ahc, port+5, (value >> 40) & 0xFF);
-       ahc_outb(ahc, port+6, (value >> 48) & 0xFF);
-       ahc_outb(ahc, port+7, (value >> 56) & 0xFF);
-}
-
-/*
- * Get a free scb. If there are none, see if we can allocate a new SCB.
- */
-static __inline struct scb *
-ahc_get_scb(struct ahc_softc *ahc)
-{
-       struct scb *scb;
-
-       if ((scb = SLIST_FIRST(&ahc->scb_data->free_scbs)) == NULL) {
-               ahc_alloc_scbs(ahc);
-               scb = SLIST_FIRST(&ahc->scb_data->free_scbs);
-               if (scb == NULL)
-                       return (NULL);
-       }
-       SLIST_REMOVE_HEAD(&ahc->scb_data->free_scbs, links.sle);
-       return (scb);
-}
-
-/*
- * Return an SCB resource to the free list.
- */
-static __inline void
-ahc_free_scb(struct ahc_softc *ahc, struct scb *scb)
-{       
-       struct hardware_scb *hscb;
-
-       hscb = scb->hscb;
-       /* Clean up for the next user */
-       ahc->scb_data->scbindex[hscb->tag] = NULL;
-       scb->flags = SCB_FREE;
-       hscb->control = 0;
-
-       SLIST_INSERT_HEAD(&ahc->scb_data->free_scbs, scb, links.sle);
-
-       /* Notify the OSM that a resource is now available. */
-       ahc_platform_scb_free(ahc, scb);
-}
-
-static __inline struct scb *
-ahc_lookup_scb(struct ahc_softc *ahc, u_int tag)
-{
-       struct scb* scb;
-
-       scb = ahc->scb_data->scbindex[tag];
-       if (scb != NULL)
-               ahc_sync_scb(ahc, scb,
-                            BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
-       return (scb);
-}
-
-static __inline void
-ahc_swap_with_next_hscb(struct ahc_softc *ahc, struct scb *scb)
-{
-       struct hardware_scb *q_hscb;
-       u_int  saved_tag;
-
-       /*
-        * Our queuing method is a bit tricky.  The card
-        * knows in advance which HSCB to download, and we
-        * can't disappoint it.  To achieve this, the next
-        * SCB to download is saved off in ahc->next_queued_scb.
-        * When we are called to queue "an arbitrary scb",
-        * we copy the contents of the incoming HSCB to the one
-        * the sequencer knows about, swap HSCB pointers and
-        * finally assign the SCB to the tag indexed location
-        * in the scb_array.  This makes sure that we can still
-        * locate the correct SCB by SCB_TAG.
-        */
-       q_hscb = ahc->next_queued_scb->hscb;
-       saved_tag = q_hscb->tag;
-       memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb));
-       if ((scb->flags & SCB_CDB32_PTR) != 0) {
-               q_hscb->shared_data.cdb_ptr =
-                   ahc_htole32(ahc_hscb_busaddr(ahc, q_hscb->tag)
-                             + offsetof(struct hardware_scb, cdb32));
-       }
-       q_hscb->tag = saved_tag;
-       q_hscb->next = scb->hscb->tag;
-
-       /* Now swap HSCB pointers. */
-       ahc->next_queued_scb->hscb = scb->hscb;
-       scb->hscb = q_hscb;
-
-       /* Now define the mapping from tag to SCB in the scbindex */
-       ahc->scb_data->scbindex[scb->hscb->tag] = scb;
-}
-
-/*
- * Tell the sequencer about a new transaction to execute.
- */
-static __inline void
-ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb)
-{
-       ahc_swap_with_next_hscb(ahc, scb);
-
-       if (scb->hscb->tag == SCB_LIST_NULL
-        || scb->hscb->next == SCB_LIST_NULL)
-               panic("Attempt to queue invalid SCB tag %x:%x\n",
-                     scb->hscb->tag, scb->hscb->next);
-
-       /*
-        * Setup data "oddness".
-        */
-       scb->hscb->lun &= LID;
-       if (ahc_get_transfer_length(scb) & 0x1)
-               scb->hscb->lun |= SCB_XFERLEN_ODD;
-
-       /*
-        * Keep a history of SCBs we've downloaded in the qinfifo.
-        */
-       ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
-
-       /*
-        * Make sure our data is consistent from the
-        * perspective of the adapter.
-        */
-       ahc_sync_scb(ahc, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
-
-       /* Tell the adapter about the newly queued SCB */
-       if ((ahc->features & AHC_QUEUE_REGS) != 0) {
-               ahc_outb(ahc, HNSCB_QOFF, ahc->qinfifonext);
-       } else {
-               if ((ahc->features & AHC_AUTOPAUSE) == 0)
-                       ahc_pause(ahc);
-               ahc_outb(ahc, KERNEL_QINPOS, ahc->qinfifonext);
-               if ((ahc->features & AHC_AUTOPAUSE) == 0)
-                       ahc_unpause(ahc);
-       }
-}
-
-static __inline struct scsi_sense_data *
-ahc_get_sense_buf(struct ahc_softc *ahc, struct scb *scb)
-{
-       int offset;
-
-       offset = scb - ahc->scb_data->scbarray;
-       return (&ahc->scb_data->sense[offset]);
-}
-
-static __inline uint32_t
-ahc_get_sense_bufaddr(struct ahc_softc *ahc, struct scb *scb)
-{
-       int offset;
-
-       offset = scb - ahc->scb_data->scbarray;
-       return (ahc->scb_data->sense_busaddr
-             + (offset * sizeof(struct scsi_sense_data)));
-}
+struct ahc_initiator_tinfo *
+       ahc_fetch_transinfo(struct ahc_softc *ahc,
+                           char channel, u_int our_id,
+                           u_int remote_id,
+                           struct ahc_tmode_tstate **tstate);
+uint16_t
+       ahc_inw(struct ahc_softc *ahc, u_int port);
+void   ahc_outw(struct ahc_softc *ahc, u_int port,
+                u_int value);
+uint32_t
+       ahc_inl(struct ahc_softc *ahc, u_int port);
+void   ahc_outl(struct ahc_softc *ahc, u_int port,
+                uint32_t value);
+uint64_t
+       ahc_inq(struct ahc_softc *ahc, u_int port);
+void   ahc_outq(struct ahc_softc *ahc, u_int port,
+                uint64_t value);
+struct scb*
+       ahc_get_scb(struct ahc_softc *ahc);
+void   ahc_free_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scb *
+       ahc_lookup_scb(struct ahc_softc *ahc, u_int tag);
+void   ahc_queue_scb(struct ahc_softc *ahc, struct scb *scb);
+struct scsi_sense_data *
+       ahc_get_sense_buf(struct ahc_softc *ahc,
+                         struct scb *scb);
 
 /************************** Interrupt Processing ******************************/
-static __inline void   ahc_sync_qoutfifo(struct ahc_softc *ahc, int op);
-static __inline void   ahc_sync_tqinfifo(struct ahc_softc *ahc, int op);
-static __inline u_int  ahc_check_cmdcmpltqueues(struct ahc_softc *ahc);
-static __inline int    ahc_intr(struct ahc_softc *ahc);
-
-static __inline void
-ahc_sync_qoutfifo(struct ahc_softc *ahc, int op)
-{
-       ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
-                       /*offset*/0, /*len*/256, op);
-}
-
-static __inline void
-ahc_sync_tqinfifo(struct ahc_softc *ahc, int op)
-{
-#ifdef AHC_TARGET_MODE
-       if ((ahc->flags & AHC_TARGETROLE) != 0) {
-               ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
-                               ahc->shared_data_dmamap,
-                               ahc_targetcmd_offset(ahc, 0),
-                               sizeof(struct target_cmd) * AHC_TMODE_CMDS,
-                               op);
-       }
-#endif
-}
-
-/*
- * See if the firmware has posted any completed commands
- * into our in-core command complete fifos.
- */
-#define AHC_RUN_QOUTFIFO 0x1
-#define AHC_RUN_TQINFIFO 0x2
-static __inline u_int
-ahc_check_cmdcmpltqueues(struct ahc_softc *ahc)
-{
-       u_int retval;
-
-       retval = 0;
-       ahc_dmamap_sync(ahc, ahc->shared_data_dmat, ahc->shared_data_dmamap,
-                       /*offset*/ahc->qoutfifonext, /*len*/1,
-                       BUS_DMASYNC_POSTREAD);
-       if (ahc->qoutfifo[ahc->qoutfifonext] != SCB_LIST_NULL)
-               retval |= AHC_RUN_QOUTFIFO;
-#ifdef AHC_TARGET_MODE
-       if ((ahc->flags & AHC_TARGETROLE) != 0
-        && (ahc->flags & AHC_TQINFIFO_BLOCKED) == 0) {
-               ahc_dmamap_sync(ahc, ahc->shared_data_dmat,
-                               ahc->shared_data_dmamap,
-                               ahc_targetcmd_offset(ahc, ahc->tqinfifofnext),
-                               /*len*/sizeof(struct target_cmd),
-                               BUS_DMASYNC_POSTREAD);
-               if (ahc->targetcmds[ahc->tqinfifonext].cmd_valid != 0)
-                       retval |= AHC_RUN_TQINFIFO;
-       }
-#endif
-       return (retval);
-}
-
-/*
- * Catch an interrupt from the adapter
- */
-static __inline int
-ahc_intr(struct ahc_softc *ahc)
-{
-       u_int   intstat;
-
-       if ((ahc->pause & INTEN) == 0) {
-               /*
-                * Our interrupt is not enabled on the chip
-                * and may be disabled for re-entrancy reasons,
-                * so just return.  This is likely just a shared
-                * interrupt.
-                */
-               return (0);
-       }
-       /*
-        * Instead of directly reading the interrupt status register,
-        * infer the cause of the interrupt by checking our in-core
-        * completion queues.  This avoids a costly PCI bus read in
-        * most cases.
-        */
-       if ((ahc->flags & (AHC_ALL_INTERRUPTS|AHC_EDGE_INTERRUPT)) == 0
-        && (ahc_check_cmdcmpltqueues(ahc) != 0))
-               intstat = CMDCMPLT;
-       else {
-               intstat = ahc_inb(ahc, INTSTAT);
-       }
-
-       if ((intstat & INT_PEND) == 0) {
-#if AHC_PCI_CONFIG > 0
-               if (ahc->unsolicited_ints > 500) {
-                       ahc->unsolicited_ints = 0;
-                       if ((ahc->chip & AHC_PCI) != 0
-                        && (ahc_inb(ahc, ERROR) & PCIERRSTAT) != 0)
-                               ahc->bus_intr(ahc);
-               }
-#endif
-               ahc->unsolicited_ints++;
-               return (0);
-       }
-       ahc->unsolicited_ints = 0;
-
-       if (intstat & CMDCMPLT) {
-               ahc_outb(ahc, CLRINT, CLRCMDINT);
-
-               /*
-                * Ensure that the chip sees that we've cleared
-                * this interrupt before we walk the output fifo.
-                * Otherwise, we may, due to posted bus writes,
-                * clear the interrupt after we finish the scan,
-                * and after the sequencer has added new entries
-                * and asserted the interrupt again.
-                */
-               ahc_flush_device_writes(ahc);
-               ahc_run_qoutfifo(ahc);
-#ifdef AHC_TARGET_MODE
-               if ((ahc->flags & AHC_TARGETROLE) != 0)
-                       ahc_run_tqinfifo(ahc, /*paused*/FALSE);
-#endif
-       }
-
-       /*
-        * Handle statuses that may invalidate our cached
-        * copy of INTSTAT separately.
-        */
-       if (intstat == 0xFF && (ahc->features & AHC_REMOVABLE) != 0) {
-               /* Hot eject.  Do nothing */
-       } else if (intstat & BRKADRINT) {
-               ahc_handle_brkadrint(ahc);
-       } else if ((intstat & (SEQINT|SCSIINT)) != 0) {
-
-               ahc_pause_bug_fix(ahc);
-
-               if ((intstat & SEQINT) != 0)
-                       ahc_handle_seqint(ahc, intstat);
-
-               if ((intstat & SCSIINT) != 0)
-                       ahc_handle_scsiint(ahc, intstat);
-       }
-       return (1);
-}
+int    ahc_intr(struct ahc_softc *ahc);
 
 #endif  /* _AIC7XXX_INLINE_H_ */
index 42ad48e09f022ae4409338a225a818c111527611..fd2b9785ff4fe9aa452cf86cb21ab14f31beb072 100644 (file)
@@ -388,14 +388,83 @@ static int  aic7xxx_setup(char *s);
 static int ahc_linux_unit;
 
 
+/************************** OS Utility Wrappers *******************************/
+void
+ahc_delay(long usec)
+{
+       /*
+        * udelay on Linux can have problems for
+        * multi-millisecond waits.  Wait at most
+        * 1024us per call.
+        */
+       while (usec > 0) {
+               udelay(usec % 1024);
+               usec -= 1024;
+       }
+}
+
+/***************************** Low Level I/O **********************************/
+uint8_t
+ahc_inb(struct ahc_softc * ahc, long port)
+{
+       uint8_t x;
+
+       if (ahc->tag == BUS_SPACE_MEMIO) {
+               x = readb(ahc->bsh.maddr + port);
+       } else {
+               x = inb(ahc->bsh.ioport + port);
+       }
+       mb();
+       return (x);
+}
+
+void
+ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
+{
+       if (ahc->tag == BUS_SPACE_MEMIO) {
+               writeb(val, ahc->bsh.maddr + port);
+       } else {
+               outb(val, ahc->bsh.ioport + port);
+       }
+       mb();
+}
+
+void
+ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+       int i;
+
+       /*
+        * There is probably a more efficient way to do this on Linux
+        * but we don't use this for anything speed critical and this
+        * should work.
+        */
+       for (i = 0; i < count; i++)
+               ahc_outb(ahc, port, *array++);
+}
+
+void
+ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
+{
+       int i;
+
+       /*
+        * There is probably a more efficient way to do this on Linux
+        * but we don't use this for anything speed critical and this
+        * should work.
+        */
+       for (i = 0; i < count; i++)
+               *array++ = ahc_inb(ahc, port);
+}
+
 /********************************* Inlines ************************************/
-static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
+static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
 
-static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
+static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
                                      struct ahc_dma_seg *sg,
                                      dma_addr_t addr, bus_size_t len);
 
-static __inline void
+static void
 ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
 {
        struct scsi_cmnd *cmd;
@@ -406,7 +475,7 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
        scsi_dma_unmap(cmd);
 }
 
-static __inline int
+static int
 ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
                  struct ahc_dma_seg *sg, dma_addr_t addr, bus_size_t len)
 {
@@ -442,13 +511,11 @@ ahc_linux_info(struct Scsi_Host *host)
        bp = &buffer[0];
        ahc = *(struct ahc_softc **)host->hostdata;
        memset(bp, 0, sizeof(buffer));
-       strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev ");
-       strcat(bp, AIC7XXX_DRIVER_VERSION);
-       strcat(bp, "\n");
-       strcat(bp, "        <");
+       strcpy(bp, "Adaptec AIC7XXX EISA/VLB/PCI SCSI HBA DRIVER, Rev " AIC7XXX_DRIVER_VERSION "\n"
+                       "        <");
        strcat(bp, ahc->description);
-       strcat(bp, ">\n");
-       strcat(bp, "        ");
+       strcat(bp, ">\n"
+                       "        ");
        ahc_controller_info(ahc, ahc_info);
        strcat(bp, ahc_info);
        strcat(bp, "\n");
@@ -964,7 +1031,7 @@ aic7xxx_setup(char *s)
        char   *p;
        char   *end;
 
-       static struct {
+       static const struct {
                const char *name;
                uint32_t *flag;
        } options[] = {
@@ -2317,7 +2384,7 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
        unsigned int ppr_options = tinfo->goal.ppr_options;
        unsigned long flags;
        unsigned long offset = tinfo->goal.offset;
-       struct ahc_syncrate *syncrate;
+       const struct ahc_syncrate *syncrate;
 
        if (offset == 0)
                offset = MAX_OFFSET;
@@ -2361,7 +2428,7 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
        unsigned int ppr_options = 0;
        unsigned int period = 0;
        unsigned long flags;
-       struct ahc_syncrate *syncrate = NULL;
+       const struct ahc_syncrate *syncrate = NULL;
 
        ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
                            starget->channel + 'A', ROLE_INITIATOR);
@@ -2391,7 +2458,7 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
        unsigned int period = tinfo->goal.period;
        unsigned int width = tinfo->goal.width;
        unsigned long flags;
-       struct ahc_syncrate *syncrate;
+       const struct ahc_syncrate *syncrate;
 
        if (dt && spi_max_width(starget)) {
                ppr_options |= MSG_EXT_PPR_DT_REQ;
index b48dab447bde6cdc185fef846bba132677c3af1d..3f7238db35e5f129b693ed356e33d37cb6f9d49f 100644 (file)
@@ -365,7 +365,7 @@ struct ahc_platform_data {
 #define AHC_LINUX_NOIRQ        ((uint32_t)~0)
        uint32_t                 irq;           /* IRQ for this adapter */
        uint32_t                 bios_address;
-       uint32_t                 mem_busaddr;   /* Mem Base Addr */
+       resource_size_t          mem_busaddr;   /* Mem Base Addr */
 };
 
 /************************** OS Utility Wrappers *******************************/
@@ -375,82 +375,16 @@ struct ahc_platform_data {
 #define malloc(size, type, flags) kmalloc(size, flags)
 #define free(ptr, type) kfree(ptr)
 
-static __inline void ahc_delay(long);
-static __inline void
-ahc_delay(long usec)
-{
-       /*
-        * udelay on Linux can have problems for
-        * multi-millisecond waits.  Wait at most
-        * 1024us per call.
-        */
-       while (usec > 0) {
-               udelay(usec % 1024);
-               usec -= 1024;
-       }
-}
+void ahc_delay(long);
 
 
 /***************************** Low Level I/O **********************************/
-static __inline uint8_t ahc_inb(struct ahc_softc * ahc, long port);
-static __inline void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
-static __inline void ahc_outsb(struct ahc_softc * ahc, long port,
-                              uint8_t *, int count);
-static __inline void ahc_insb(struct ahc_softc * ahc, long port,
-                              uint8_t *, int count);
-
-static __inline uint8_t
-ahc_inb(struct ahc_softc * ahc, long port)
-{
-       uint8_t x;
-
-       if (ahc->tag == BUS_SPACE_MEMIO) {
-               x = readb(ahc->bsh.maddr + port);
-       } else {
-               x = inb(ahc->bsh.ioport + port);
-       }
-       mb();
-       return (x);
-}
-
-static __inline void
-ahc_outb(struct ahc_softc * ahc, long port, uint8_t val)
-{
-       if (ahc->tag == BUS_SPACE_MEMIO) {
-               writeb(val, ahc->bsh.maddr + port);
-       } else {
-               outb(val, ahc->bsh.ioport + port);
-       }
-       mb();
-}
-
-static __inline void
-ahc_outsb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
-{
-       int i;
-
-       /*
-        * There is probably a more efficient way to do this on Linux
-        * but we don't use this for anything speed critical and this
-        * should work.
-        */
-       for (i = 0; i < count; i++)
-               ahc_outb(ahc, port, *array++);
-}
-
-static __inline void
-ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
-{
-       int i;
-
-       /*
-        * There is probably a more efficient way to do this on Linux
-        * but we don't use this for anything speed critical and this
-        * should work.
-        */
-       for (i = 0; i < count; i++)
-               *array++ = ahc_inb(ahc, port);
-}
+uint8_t ahc_inb(struct ahc_softc * ahc, long port);
+void ahc_outb(struct ahc_softc * ahc, long port, uint8_t val);
+void ahc_outsb(struct ahc_softc * ahc, long port,
+              uint8_t *, int count);
+void ahc_insb(struct ahc_softc * ahc, long port,
+              uint8_t *, int count);
 
 /**************************** Initialization **********************************/
 int            ahc_linux_register_host(struct ahc_softc *,
@@ -464,9 +398,6 @@ struct info_str {
        int pos;
 };
 
-void   ahc_format_transinfo(struct info_str *info,
-                            struct ahc_transinfo *tinfo);
-
 /******************************** Locking *************************************/
 /* Lock protecting internal data structures */
 
@@ -555,61 +486,12 @@ void                       ahc_linux_pci_exit(void);
 int                     ahc_pci_map_registers(struct ahc_softc *ahc);
 int                     ahc_pci_map_int(struct ahc_softc *ahc);
 
-static __inline uint32_t ahc_pci_read_config(ahc_dev_softc_t pci,
+uint32_t                ahc_pci_read_config(ahc_dev_softc_t pci,
                                             int reg, int width);
 
-static __inline uint32_t
-ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
-{
-       switch (width) {
-       case 1:
-       {
-               uint8_t retval;
-
-               pci_read_config_byte(pci, reg, &retval);
-               return (retval);
-       }
-       case 2:
-       {
-               uint16_t retval;
-               pci_read_config_word(pci, reg, &retval);
-               return (retval);
-       }
-       case 4:
-       {
-               uint32_t retval;
-               pci_read_config_dword(pci, reg, &retval);
-               return (retval);
-       }
-       default:
-               panic("ahc_pci_read_config: Read size too big");
-               /* NOTREACHED */
-               return (0);
-       }
-}
-
-static __inline void ahc_pci_write_config(ahc_dev_softc_t pci,
-                                         int reg, uint32_t value,
-                                         int width);
-
-static __inline void
-ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
-{
-       switch (width) {
-       case 1:
-               pci_write_config_byte(pci, reg, value);
-               break;
-       case 2:
-               pci_write_config_word(pci, reg, value);
-               break;
-       case 4:
-               pci_write_config_dword(pci, reg, value);
-               break;
-       default:
-               panic("ahc_pci_write_config: Write size too big");
-               /* NOTREACHED */
-       }
-}
+void                    ahc_pci_write_config(ahc_dev_softc_t pci,
+                                             int reg, uint32_t value,
+                                             int width);
 
 static __inline int ahc_get_pci_function(ahc_dev_softc_t);
 static __inline int
index 3d3eaef65fb3e6ded30a548eb7e1d25eab1e5300..00f5b98685749ae567f2e63c444407f6fe644c61 100644 (file)
@@ -46,7 +46,7 @@
 */
 #define ID(x)  ID_C(x, PCI_CLASS_STORAGE_SCSI)
 
-static struct pci_device_id ahc_linux_pci_id_table[] = {
+static const struct pci_device_id ahc_linux_pci_id_table[] = {
        /* aic7850 based controllers */
        ID(ID_AHA_2902_04_10_15_20C_30C),
        /* aic7860 based controllers */
@@ -206,7 +206,7 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        const uint64_t   mask_39bit = 0x7FFFFFFFFFULL;
        struct           ahc_softc *ahc;
        ahc_dev_softc_t  pci;
-       struct           ahc_pci_identity *entry;
+       const struct ahc_pci_identity *entry;
        char            *name;
        int              error;
        struct device   *dev = &pdev->dev;
@@ -269,6 +269,57 @@ ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        return (0);
 }
 
+/******************************* PCI Routines *********************************/
+uint32_t
+ahc_pci_read_config(ahc_dev_softc_t pci, int reg, int width)
+{
+       switch (width) {
+       case 1:
+       {
+               uint8_t retval;
+
+               pci_read_config_byte(pci, reg, &retval);
+               return (retval);
+       }
+       case 2:
+       {
+               uint16_t retval;
+               pci_read_config_word(pci, reg, &retval);
+               return (retval);
+       }
+       case 4:
+       {
+               uint32_t retval;
+               pci_read_config_dword(pci, reg, &retval);
+               return (retval);
+       }
+       default:
+               panic("ahc_pci_read_config: Read size too big");
+               /* NOTREACHED */
+               return (0);
+       }
+}
+
+void
+ahc_pci_write_config(ahc_dev_softc_t pci, int reg, uint32_t value, int width)
+{
+       switch (width) {
+       case 1:
+               pci_write_config_byte(pci, reg, value);
+               break;
+       case 2:
+               pci_write_config_word(pci, reg, value);
+               break;
+       case 4:
+               pci_write_config_dword(pci, reg, value);
+               break;
+       default:
+               panic("ahc_pci_write_config: Write size too big");
+               /* NOTREACHED */
+       }
+}
+
+
 static struct pci_driver aic7xxx_pci_driver = {
        .name           = "aic7xxx",
        .probe          = ahc_linux_pci_dev_probe,
@@ -293,7 +344,7 @@ ahc_linux_pci_exit(void)
 }
 
 static int
-ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
+ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, resource_size_t *base)
 {
        if (aic7xxx_allow_memio == 0)
                return (ENOMEM);
@@ -301,24 +352,24 @@ ahc_linux_pci_reserve_io_region(struct ahc_softc *ahc, u_long *base)
        *base = pci_resource_start(ahc->dev_softc, 0);
        if (*base == 0)
                return (ENOMEM);
-       if (request_region(*base, 256, "aic7xxx") == 0)
+       if (!request_region(*base, 256, "aic7xxx"))
                return (ENOMEM);
        return (0);
 }
 
 static int
 ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc,
-                                u_long *bus_addr,
+                                resource_size_t *bus_addr,
                                 uint8_t __iomem **maddr)
 {
-       u_long  start;
+       resource_size_t start;
        int     error;
 
        error = 0;
        start = pci_resource_start(ahc->dev_softc, 1);
        if (start != 0) {
                *bus_addr = start;
-               if (request_mem_region(start, 0x1000, "aic7xxx") == 0)
+               if (!request_mem_region(start, 0x1000, "aic7xxx"))
                        error = ENOMEM;
                if (error == 0) {
                        *maddr = ioremap_nocache(start, 256);
@@ -336,7 +387,7 @@ int
 ahc_pci_map_registers(struct ahc_softc *ahc)
 {
        uint32_t command;
-       u_long   base;
+       resource_size_t base;
        uint8_t __iomem *maddr;
        int      error;
 
@@ -374,12 +425,12 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
                } else
                        command |= PCIM_CMD_MEMEN;
        } else {
-               printf("aic7xxx: PCI%d:%d:%d MEM region 0x%lx "
+               printf("aic7xxx: PCI%d:%d:%d MEM region 0x%llx "
                       "unavailable. Cannot memory map device.\n",
                       ahc_get_pci_bus(ahc->dev_softc),
                       ahc_get_pci_slot(ahc->dev_softc),
                       ahc_get_pci_function(ahc->dev_softc),
-                      base);
+                      (unsigned long long)base);
        }
 
        /*
@@ -390,15 +441,15 @@ ahc_pci_map_registers(struct ahc_softc *ahc)
                error = ahc_linux_pci_reserve_io_region(ahc, &base);
                if (error == 0) {
                        ahc->tag = BUS_SPACE_PIO;
-                       ahc->bsh.ioport = base;
+                       ahc->bsh.ioport = (u_long)base;
                        command |= PCIM_CMD_PORTEN;
                } else {
-                       printf("aic7xxx: PCI%d:%d:%d IO region 0x%lx[0..255] "
+                       printf("aic7xxx: PCI%d:%d:%d IO region 0x%llx[0..255] "
                               "unavailable. Cannot map device.\n",
                               ahc_get_pci_bus(ahc->dev_softc),
                               ahc_get_pci_slot(ahc->dev_softc),
                               ahc_get_pci_function(ahc->dev_softc),
-                              base);
+                              (unsigned long long)base);
                }
        }
        ahc_pci_write_config(ahc->dev_softc, PCIR_COMMAND, command, 4);
index 56848f41e4f961f7a70d66964a8d770671ff0663..c07cb6eebb0294cac8ae9e93820e14562719c9fd 100644 (file)
@@ -168,8 +168,7 @@ static ahc_device_setup_t ahc_aha394XX_setup;
 static ahc_device_setup_t ahc_aha494XX_setup;
 static ahc_device_setup_t ahc_aha398XX_setup;
 
-static struct ahc_pci_identity ahc_pci_ident_table [] =
-{
+static const struct ahc_pci_identity ahc_pci_ident_table[] = {
        /* aic7850 based controllers */
        {
                ID_AHA_2902_04_10_15_20C_30C,
@@ -668,7 +667,7 @@ ahc_9005_subdevinfo_valid(uint16_t device, uint16_t vendor,
        return (result);
 }
 
-struct ahc_pci_identity *
+const struct ahc_pci_identity *
 ahc_find_pci_device(ahc_dev_softc_t pci)
 {
        uint64_t  full_id;
@@ -676,7 +675,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
        uint16_t  vendor;
        uint16_t  subdevice;
        uint16_t  subvendor;
-       struct    ahc_pci_identity *entry;
+       const struct ahc_pci_identity *entry;
        u_int     i;
 
        vendor = ahc_pci_read_config(pci, PCIR_DEVVENDOR, /*bytes*/2);
@@ -710,7 +709,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci)
 }
 
 int
-ahc_pci_config(struct ahc_softc *ahc, struct ahc_pci_identity *entry)
+ahc_pci_config(struct ahc_softc *ahc, const struct ahc_pci_identity *entry)
 {
        u_int    command;
        u_int    our_id;
index 99e5443e75352cecf9383c247b5d8107dfd109fe..e92991a7c48554314877231870ff38cec3821bcd 100644 (file)
@@ -58,7 +58,7 @@ static int    ahc_proc_write_seeprom(struct ahc_softc *ahc,
  * Table of syncrates that don't follow the "divisible by 4"
  * rule. This table will be expanded in future SCSI specs.
  */
-static struct {
+static const struct {
        u_int period_factor;
        u_int period;   /* in 100ths of ns */
 } scsi_syncrates[] = {
@@ -137,7 +137,7 @@ copy_info(struct info_str *info, char *fmt, ...)
        return (len);
 }
 
-void
+static void
 ahc_format_transinfo(struct info_str *info, struct ahc_transinfo *tinfo)
 {
        u_int speed;
index 88bfd767c51c2e7be9f2fa757b5e2362c8f9b720..309a562b009ef284294ff27fbbe6ea7950dc2cab 100644 (file)
@@ -8,7 +8,7 @@
 
 #include "aic7xxx_osm.h"
 
-static ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISEQ_parse_table[] = {
        { "SCSIRSTO",           0x01, 0x01 },
        { "ENAUTOATNP",         0x02, 0x02 },
        { "ENAUTOATNI",         0x04, 0x04 },
@@ -26,7 +26,7 @@ ahc_scsiseq_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x00, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
+static const ahc_reg_parse_entry_t SXFRCTL0_parse_table[] = {
        { "CLRCHN",             0x02, 0x02 },
        { "SCAMEN",             0x04, 0x04 },
        { "SPIOEN",             0x08, 0x08 },
@@ -43,7 +43,7 @@ ahc_sxfrctl0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x01, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
+static const ahc_reg_parse_entry_t SXFRCTL1_parse_table[] = {
        { "STPWEN",             0x01, 0x01 },
        { "ACTNEGEN",           0x02, 0x02 },
        { "ENSTIMER",           0x04, 0x04 },
@@ -60,7 +60,7 @@ ahc_sxfrctl1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x02, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISIGO_parse_table[] = {
        { "ACKO",               0x01, 0x01 },
        { "REQO",               0x02, 0x02 },
        { "BSYO",               0x04, 0x04 },
@@ -85,7 +85,7 @@ ahc_scsisigo_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x03, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISIGI_parse_table[] = {
        { "ACKI",               0x01, 0x01 },
        { "REQI",               0x02, 0x02 },
        { "BSYI",               0x04, 0x04 },
@@ -112,7 +112,7 @@ ahc_scsisigi_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x03, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIRATE_parse_table[] = {
        { "SINGLE_EDGE",        0x10, 0x10 },
        { "ENABLE_CRC",         0x40, 0x40 },
        { "WIDEXFER",           0x80, 0x80 },
@@ -128,7 +128,7 @@ ahc_scsirate_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x04, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCSIID_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIID_parse_table[] = {
        { "TWIN_CHNLB",         0x80, 0x80 },
        { "OID",                0x0f, 0x0f },
        { "TWIN_TID",           0x70, 0x70 },
@@ -150,13 +150,6 @@ ahc_scsidatl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x06, regvalue, cur_col, wrap));
 }
 
-int
-ahc_scsidath_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "SCSIDATH",
-           0x07, regvalue, cur_col, wrap));
-}
-
 int
 ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -164,7 +157,7 @@ ahc_stcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x08, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
+static const ahc_reg_parse_entry_t OPTIONMODE_parse_table[] = {
        { "DIS_MSGIN_DUALEDGE", 0x01, 0x01 },
        { "AUTO_MSGOUT_DE",     0x02, 0x02 },
        { "SCSIDATL_IMGEN",     0x04, 0x04 },
@@ -190,7 +183,7 @@ ahc_targcrccnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0a, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRSINT0_parse_table[] = {
        { "CLRSPIORDY",         0x02, 0x02 },
        { "CLRSWRAP",           0x08, 0x08 },
        { "CLRIOERR",           0x08, 0x08 },
@@ -206,7 +199,7 @@ ahc_clrsint0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0b, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT0_parse_table[] = {
        { "DMADONE",            0x01, 0x01 },
        { "SPIORDY",            0x02, 0x02 },
        { "SDONE",              0x04, 0x04 },
@@ -225,7 +218,7 @@ ahc_sstat0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0b, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRSINT1_parse_table[] = {
        { "CLRREQINIT",         0x01, 0x01 },
        { "CLRPHASECHG",        0x02, 0x02 },
        { "CLRSCSIPERR",        0x04, 0x04 },
@@ -242,7 +235,7 @@ ahc_clrsint1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0c, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT1_parse_table[] = {
        { "REQINIT",            0x01, 0x01 },
        { "PHASECHG",           0x02, 0x02 },
        { "SCSIPERR",           0x04, 0x04 },
@@ -260,7 +253,7 @@ ahc_sstat1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0c, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT2_parse_table[] = {
        { "DUAL_EDGE_ERR",      0x01, 0x01 },
        { "CRCREQERR",          0x02, 0x02 },
        { "CRCENDERR",          0x04, 0x04 },
@@ -278,7 +271,7 @@ ahc_sstat2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0d, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
+static const ahc_reg_parse_entry_t SSTAT3_parse_table[] = {
        { "OFFCNT",             0x0f, 0x0f },
        { "U2OFFCNT",           0x7f, 0x7f },
        { "SCSICNT",            0xf0, 0xf0 }
@@ -291,7 +284,7 @@ ahc_sstat3_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0e, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIID_ULTRA2_parse_table[] = {
        { "OID",                0x0f, 0x0f },
        { "TID",                0xf0, 0xf0 }
 };
@@ -303,7 +296,7 @@ ahc_scsiid_ultra2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x0f, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
+static const ahc_reg_parse_entry_t SIMODE0_parse_table[] = {
        { "ENDMADONE",          0x01, 0x01 },
        { "ENSPIORDY",          0x02, 0x02 },
        { "ENSDONE",            0x04, 0x04 },
@@ -321,7 +314,7 @@ ahc_simode0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x10, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
+static const ahc_reg_parse_entry_t SIMODE1_parse_table[] = {
        { "ENREQINIT",          0x01, 0x01 },
        { "ENPHASECHG",         0x02, 0x02 },
        { "ENSCSIPERR",         0x04, 0x04 },
@@ -346,26 +339,6 @@ ahc_scsibusl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x12, regvalue, cur_col, wrap));
 }
 
-int
-ahc_scsibush_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "SCSIBUSH",
-           0x13, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SXFRCTL2_parse_table[] = {
-       { "CMDDMAEN",           0x08, 0x08 },
-       { "AUTORSTDIS",         0x10, 0x10 },
-       { "ASYNC_SETUP",        0x07, 0x07 }
-};
-
-int
-ahc_sxfrctl2_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(SXFRCTL2_parse_table, 3, "SXFRCTL2",
-           0x13, regvalue, cur_col, wrap));
-}
-
 int
 ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -373,7 +346,7 @@ ahc_shaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x14, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
+static const ahc_reg_parse_entry_t SELTIMER_parse_table[] = {
        { "STAGE1",             0x01, 0x01 },
        { "STAGE2",             0x02, 0x02 },
        { "STAGE3",             0x04, 0x04 },
@@ -389,7 +362,7 @@ ahc_seltimer_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x18, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SELID_parse_table[] = {
+static const ahc_reg_parse_entry_t SELID_parse_table[] = {
        { "ONEBIT",             0x08, 0x08 },
        { "SELID_MASK",         0xf0, 0xf0 }
 };
@@ -401,21 +374,6 @@ ahc_selid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x19, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCAMCTL_parse_table[] = {
-       { "DFLTTID",            0x10, 0x10 },
-       { "ALTSTIM",            0x20, 0x20 },
-       { "CLRSCAMSELID",       0x40, 0x40 },
-       { "ENSCAMSELO",         0x80, 0x80 },
-       { "SCAMLVL",            0x03, 0x03 }
-};
-
-int
-ahc_scamctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(SCAMCTL_parse_table, 5, "SCAMCTL",
-           0x1a, regvalue, cur_col, wrap));
-}
-
 int
 ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -423,7 +381,7 @@ ahc_targid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1b, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
+static const ahc_reg_parse_entry_t SPIOCAP_parse_table[] = {
        { "SSPIOCPS",           0x01, 0x01 },
        { "ROM",                0x02, 0x02 },
        { "EEPROM",             0x04, 0x04 },
@@ -441,7 +399,7 @@ ahc_spiocap_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1b, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t BRDCTL_parse_table[] = {
        { "BRDCTL0",            0x01, 0x01 },
        { "BRDSTB_ULTRA2",      0x01, 0x01 },
        { "BRDCTL1",            0x02, 0x02 },
@@ -464,7 +422,7 @@ ahc_brdctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1d, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SEECTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SEECTL_parse_table[] = {
        { "SEEDI",              0x01, 0x01 },
        { "SEEDO",              0x02, 0x02 },
        { "SEECK",              0x04, 0x04 },
@@ -482,7 +440,7 @@ ahc_seectl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x1e, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SBLKCTL_parse_table[] = {
        { "XCVR",               0x01, 0x01 },
        { "SELWIDE",            0x02, 0x02 },
        { "ENAB20",             0x04, 0x04 },
@@ -521,13 +479,6 @@ ahc_disc_dsb_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x32, regvalue, cur_col, wrap));
 }
 
-int
-ahc_cmdsize_table_tail_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "CMDSIZE_TABLE_TAIL",
-           0x34, regvalue, cur_col, wrap));
-}
-
 int
 ahc_mwi_residual_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -549,7 +500,7 @@ ahc_msg_out_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3a, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
+static const ahc_reg_parse_entry_t DMAPARAMS_parse_table[] = {
        { "FIFORESET",          0x01, 0x01 },
        { "FIFOFLUSH",          0x02, 0x02 },
        { "DIRECTION",          0x04, 0x04 },
@@ -569,7 +520,7 @@ ahc_dmaparams_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3b, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQ_FLAGS_parse_table[] = {
        { "NO_DISCONNECT",      0x01, 0x01 },
        { "SPHASE_PENDING",     0x02, 0x02 },
        { "DPHASE_PENDING",     0x04, 0x04 },
@@ -602,7 +553,7 @@ ahc_saved_lun_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x3e, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
+static const ahc_reg_parse_entry_t LASTPHASE_parse_table[] = {
        { "MSGI",               0x20, 0x20 },
        { "IOI",                0x40, 0x40 },
        { "CDI",                0x80, 0x80 },
@@ -644,13 +595,6 @@ ahc_free_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x42, regvalue, cur_col, wrap));
 }
 
-int
-ahc_complete_scbh_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "COMPLETE_SCBH",
-           0x43, regvalue, cur_col, wrap));
-}
-
 int
 ahc_hscb_addr_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -700,7 +644,7 @@ ahc_tqinpos_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x50, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t ARG_1_parse_table[] = {
+static const ahc_reg_parse_entry_t ARG_1_parse_table[] = {
        { "CONT_TARG_SESSION",  0x02, 0x02 },
        { "CONT_MSG_LOOP",      0x04, 0x04 },
        { "EXIT_MSG_LOOP",      0x08, 0x08 },
@@ -731,7 +675,7 @@ ahc_last_msg_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x53, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSISEQ_TEMPLATE_parse_table[] = {
        { "ENAUTOATNP",         0x02, 0x02 },
        { "ENAUTOATNI",         0x04, 0x04 },
        { "ENAUTOATNO",         0x08, 0x08 },
@@ -747,7 +691,7 @@ ahc_scsiseq_template_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x54, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
+static const ahc_reg_parse_entry_t HA_274_BIOSGLOBAL_parse_table[] = {
        { "HA_274_EXTENDED_TRANS",0x01, 0x01 }
 };
 
@@ -758,7 +702,7 @@ ahc_ha_274_biosglobal_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x56, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQ_FLAGS2_parse_table[] = {
        { "SCB_DMA",            0x01, 0x01 },
        { "TARGET_MSG_PENDING", 0x02, 0x02 }
 };
@@ -770,7 +714,7 @@ ahc_seq_flags2_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x57, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSICONF_parse_table[] = {
        { "ENSPCHK",            0x20, 0x20 },
        { "RESET_SCSI",         0x40, 0x40 },
        { "TERM_ENB",           0x80, 0x80 },
@@ -785,7 +729,7 @@ ahc_scsiconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5a, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t INTDEF_parse_table[] = {
+static const ahc_reg_parse_entry_t INTDEF_parse_table[] = {
        { "EDGE_TRIG",          0x80, 0x80 },
        { "VECTOR",             0x0f, 0x0f }
 };
@@ -804,7 +748,7 @@ ahc_hostconf_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5d, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t HA_274_BIOSCTRL_parse_table[] = {
        { "CHANNEL_B_PRIMARY",  0x08, 0x08 },
        { "BIOSMODE",           0x30, 0x30 },
        { "BIOSDISABLED",       0x30, 0x30 }
@@ -817,7 +761,7 @@ ahc_ha_274_biosctrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x5f, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQCTL_parse_table[] = {
        { "LOADRAM",            0x01, 0x01 },
        { "SEQRESET",           0x02, 0x02 },
        { "STEP",               0x04, 0x04 },
@@ -849,7 +793,7 @@ ahc_seqaddr0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x62, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
+static const ahc_reg_parse_entry_t SEQADDR1_parse_table[] = {
        { "SEQADDR1_MASK",      0x01, 0x01 }
 };
 
@@ -902,7 +846,7 @@ ahc_none_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x6a, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t FLAGS_parse_table[] = {
+static const ahc_reg_parse_entry_t FLAGS_parse_table[] = {
        { "CARRY",              0x01, 0x01 },
        { "ZERO",               0x02, 0x02 }
 };
@@ -928,13 +872,6 @@ ahc_dindir_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x6d, regvalue, cur_col, wrap));
 }
 
-int
-ahc_function1_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "FUNCTION1",
-           0x6e, regvalue, cur_col, wrap));
-}
-
 int
 ahc_stack_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -956,19 +893,7 @@ ahc_sram_base_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x70, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t BCTL_parse_table[] = {
-       { "ENABLE",             0x01, 0x01 },
-       { "ACE",                0x08, 0x08 }
-};
-
-int
-ahc_bctl_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(BCTL_parse_table, 2, "BCTL",
-           0x84, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
+static const ahc_reg_parse_entry_t DSCOMMAND0_parse_table[] = {
        { "CIOPARCKEN",         0x01, 0x01 },
        { "USCBSIZE32",         0x02, 0x02 },
        { "RAMPS",              0x04, 0x04 },
@@ -986,7 +911,7 @@ ahc_dscommand0_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x84, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
+static const ahc_reg_parse_entry_t BUSTIME_parse_table[] = {
        { "BON",                0x0f, 0x0f },
        { "BOFF",               0xf0, 0xf0 }
 };
@@ -998,7 +923,7 @@ ahc_bustime_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x85, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
+static const ahc_reg_parse_entry_t DSCOMMAND1_parse_table[] = {
        { "HADDLDSEL0",         0x01, 0x01 },
        { "HADDLDSEL1",         0x02, 0x02 },
        { "DSLATT",             0xfc, 0xfc }
@@ -1011,7 +936,7 @@ ahc_dscommand1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x85, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
+static const ahc_reg_parse_entry_t BUSSPD_parse_table[] = {
        { "STBON",              0x07, 0x07 },
        { "STBOFF",             0x38, 0x38 },
        { "DFTHRSH_75",         0x80, 0x80 },
@@ -1026,7 +951,7 @@ ahc_busspd_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x86, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
+static const ahc_reg_parse_entry_t HS_MAILBOX_parse_table[] = {
        { "SEQ_MAILBOX",        0x0f, 0x0f },
        { "HOST_TQINPOS",       0x80, 0x80 },
        { "HOST_MAILBOX",       0xf0, 0xf0 }
@@ -1039,7 +964,7 @@ ahc_hs_mailbox_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x86, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
+static const ahc_reg_parse_entry_t DSPCISTATUS_parse_table[] = {
        { "DFTHRSH_100",        0xc0, 0xc0 }
 };
 
@@ -1050,7 +975,7 @@ ahc_dspcistatus_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x86, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t HCNTRL_parse_table[] = {
        { "CHIPRST",            0x01, 0x01 },
        { "CHIPRSTACK",         0x01, 0x01 },
        { "INTEN",              0x02, 0x02 },
@@ -1088,7 +1013,7 @@ ahc_scbptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x90, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
+static const ahc_reg_parse_entry_t INTSTAT_parse_table[] = {
        { "SEQINT",             0x01, 0x01 },
        { "CMDCMPLT",           0x02, 0x02 },
        { "SCSIINT",            0x04, 0x04 },
@@ -1119,7 +1044,7 @@ ahc_intstat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x91, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t CLRINT_parse_table[] = {
+static const ahc_reg_parse_entry_t CLRINT_parse_table[] = {
        { "CLRSEQINT",          0x01, 0x01 },
        { "CLRCMDINT",          0x02, 0x02 },
        { "CLRSCSIINT",         0x04, 0x04 },
@@ -1134,7 +1059,7 @@ ahc_clrint_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x92, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t ERROR_parse_table[] = {
+static const ahc_reg_parse_entry_t ERROR_parse_table[] = {
        { "ILLHADDR",           0x01, 0x01 },
        { "ILLSADDR",           0x02, 0x02 },
        { "ILLOPCODE",          0x04, 0x04 },
@@ -1152,7 +1077,7 @@ ahc_error_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x92, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
+static const ahc_reg_parse_entry_t DFCNTRL_parse_table[] = {
        { "FIFORESET",          0x01, 0x01 },
        { "FIFOFLUSH",          0x02, 0x02 },
        { "DIRECTION",          0x04, 0x04 },
@@ -1172,7 +1097,7 @@ ahc_dfcntrl_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x93, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
+static const ahc_reg_parse_entry_t DFSTATUS_parse_table[] = {
        { "FIFOEMP",            0x01, 0x01 },
        { "FIFOFULL",           0x02, 0x02 },
        { "DFTHRESH",           0x04, 0x04 },
@@ -1197,13 +1122,6 @@ ahc_dfwaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x95, regvalue, cur_col, wrap));
 }
 
-int
-ahc_dfraddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "DFRADDR",
-           0x97, regvalue, cur_col, wrap));
-}
-
 int
 ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1211,7 +1129,7 @@ ahc_dfdat_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x99, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
+static const ahc_reg_parse_entry_t SCBCNT_parse_table[] = {
        { "SCBAUTO",            0x80, 0x80 },
        { "SCBCNT_MASK",        0x1f, 0x1f }
 };
@@ -1230,13 +1148,6 @@ ahc_qinfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x9b, regvalue, cur_col, wrap));
 }
 
-int
-ahc_qincnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "QINCNT",
-           0x9c, regvalue, cur_col, wrap));
-}
-
 int
 ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
 {
@@ -1244,7 +1155,7 @@ ahc_qoutfifo_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x9d, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
+static const ahc_reg_parse_entry_t CRCCONTROL1_parse_table[] = {
        { "TARGCRCCNTEN",       0x04, 0x04 },
        { "TARGCRCENDEN",       0x08, 0x08 },
        { "CRCREQCHKEN",        0x10, 0x10 },
@@ -1260,14 +1171,7 @@ ahc_crccontrol1_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x9d, regvalue, cur_col, wrap));
 }
 
-int
-ahc_qoutcnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "QOUTCNT",
-           0x9e, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
+static const ahc_reg_parse_entry_t SCSIPHASE_parse_table[] = {
        { "DATA_OUT_PHASE",     0x01, 0x01 },
        { "DATA_IN_PHASE",      0x02, 0x02 },
        { "MSG_OUT_PHASE",      0x04, 0x04 },
@@ -1284,7 +1188,7 @@ ahc_scsiphase_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0x9e, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
+static const ahc_reg_parse_entry_t SFUNCT_parse_table[] = {
        { "ALT_MODE",           0x80, 0x80 }
 };
 
@@ -1351,7 +1255,7 @@ ahc_scb_dataptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xac, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_DATACNT_parse_table[] = {
        { "SG_LAST_SEG",        0x80, 0x80 },
        { "SG_HIGH_ADDR_BITS",  0x7f, 0x7f }
 };
@@ -1363,7 +1267,7 @@ ahc_scb_datacnt_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xb0, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_SGPTR_parse_table[] = {
        { "SG_LIST_NULL",       0x01, 0x01 },
        { "SG_FULL_RESID",      0x02, 0x02 },
        { "SG_RESID_VALID",     0x04, 0x04 }
@@ -1376,7 +1280,7 @@ ahc_scb_sgptr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xb4, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_CONTROL_parse_table[] = {
        { "DISCONNECTED",       0x04, 0x04 },
        { "ULTRAENB",           0x08, 0x08 },
        { "MK_MESSAGE",         0x10, 0x10 },
@@ -1394,7 +1298,7 @@ ahc_scb_control_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xb8, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_SCSIID_parse_table[] = {
        { "TWIN_CHNLB",         0x80, 0x80 },
        { "OID",                0x0f, 0x0f },
        { "TWIN_TID",           0x70, 0x70 },
@@ -1408,7 +1312,7 @@ ahc_scb_scsiid_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xb9, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
+static const ahc_reg_parse_entry_t SCB_LUN_parse_table[] = {
        { "SCB_XFERLEN_ODD",    0x80, 0x80 },
        { "LID",                0x3f, 0x3f }
 };
@@ -1455,14 +1359,7 @@ ahc_scb_next_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xbf, regvalue, cur_col, wrap));
 }
 
-int
-ahc_scb_64_spare_print(u_int regvalue, u_int *cur_col, u_int wrap)
-{
-       return (ahc_print_register(NULL, 0, "SCB_64_SPARE",
-           0xc0, regvalue, cur_col, wrap));
-}
-
-static ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
+static const ahc_reg_parse_entry_t SEECTL_2840_parse_table[] = {
        { "DO_2840",            0x01, 0x01 },
        { "CK_2840",            0x02, 0x02 },
        { "CS_2840",            0x04, 0x04 }
@@ -1475,7 +1372,7 @@ ahc_seectl_2840_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xc0, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
+static const ahc_reg_parse_entry_t STATUS_2840_parse_table[] = {
        { "DI_2840",            0x01, 0x01 },
        { "EEPROM_TF",          0x80, 0x80 },
        { "ADSEL",              0x1e, 0x1e },
@@ -1524,7 +1421,7 @@ ahc_ccsgaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xea, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t CCSGCTL_parse_table[] = {
        { "CCSGRESET",          0x01, 0x01 },
        { "SG_FETCH_NEEDED",    0x02, 0x02 },
        { "CCSGEN",             0x08, 0x08 },
@@ -1552,7 +1449,7 @@ ahc_ccscbaddr_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xed, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
+static const ahc_reg_parse_entry_t CCSCBCTL_parse_table[] = {
        { "CCSCBRESET",         0x01, 0x01 },
        { "CCSCBDIR",           0x04, 0x04 },
        { "CCSCBEN",            0x08, 0x08 },
@@ -1610,7 +1507,7 @@ ahc_sdscb_qoff_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xf8, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
+static const ahc_reg_parse_entry_t QOFF_CTLSTA_parse_table[] = {
        { "SDSCB_ROLLOVER",     0x10, 0x10 },
        { "SNSCB_ROLLOVER",     0x20, 0x20 },
        { "SCB_AVAIL",          0x40, 0x40 },
@@ -1625,7 +1522,7 @@ ahc_qoff_ctlsta_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xfa, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
+static const ahc_reg_parse_entry_t DFF_THRSH_parse_table[] = {
        { "RD_DFTHRSH_MIN",     0x00, 0x00 },
        { "WR_DFTHRSH_MIN",     0x00, 0x00 },
        { "RD_DFTHRSH_25",      0x01, 0x01 },
@@ -1653,7 +1550,7 @@ ahc_dff_thrsh_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xfb, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
+static const ahc_reg_parse_entry_t SG_CACHE_SHADOW_parse_table[] = {
        { "LAST_SEG_DONE",      0x01, 0x01 },
        { "LAST_SEG",           0x02, 0x02 },
        { "SG_ADDR_MASK",       0xf8, 0xf8 }
@@ -1666,7 +1563,7 @@ ahc_sg_cache_shadow_print(u_int regvalue, u_int *cur_col, u_int wrap)
            0xfc, regvalue, cur_col, wrap));
 }
 
-static ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
+static const ahc_reg_parse_entry_t SG_CACHE_PRE_parse_table[] = {
        { "LAST_SEG_DONE",      0x01, 0x01 },
        { "LAST_SEG",           0x02, 0x02 },
        { "SG_ADDR_MASK",       0xf8, 0xf8 }
index 4cee08521e75f6cb0c9f1a00580ce9ac683958c2..07e93fbae70661b2eee2518b0fcaf70e61241c9c 100644 (file)
@@ -5,7 +5,7 @@
  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.seq#58 $
  * $Id: //depot/aic7xxx/aic7xxx/aic7xxx.reg#40 $
  */
-static uint8_t seqprog[] = {
+static const uint8_t seqprog[] = {
        0xb2, 0x00, 0x00, 0x08,
        0xf7, 0x11, 0x22, 0x08,
        0x00, 0x65, 0xee, 0x59,
@@ -1081,7 +1081,7 @@ ahc_patch0_func(struct ahc_softc *ahc)
        return (0);
 }
 
-static struct patch {
+static const struct patch {
        ahc_patch_func_t                *patch_func;
        uint32_t                 begin          :10,
                                 skip_instr     :10,
@@ -1291,7 +1291,7 @@ static struct patch {
        { ahc_patch4_func, 865, 12, 1 }
 };
 
-static struct cs {
+static const struct cs {
        uint16_t        begin;
        uint16_t        end;
 } critical_sections[] = {
index 924102720b141fe969f0891924fa1384e8269a2f..e4a778720301d4f5bcae7a5ed50b2785ad8ccf3b 100644 (file)
@@ -362,7 +362,7 @@ output_code()
 " *\n"
 "%s */\n", versions);
 
-       fprintf(ofile, "static uint8_t seqprog[] = {\n");
+       fprintf(ofile, "static const uint8_t seqprog[] = {\n");
        for (cur_instr = STAILQ_FIRST(&seq_program);
             cur_instr != NULL;
             cur_instr = STAILQ_NEXT(cur_instr, links)) {
@@ -415,7 +415,7 @@ output_code()
        }
 
        fprintf(ofile,
-"static struct patch {\n"
+"static const struct patch {\n"
 "      %spatch_func_t          *patch_func;\n"
 "      uint32_t                 begin          :10,\n"
 "                               skip_instr     :10,\n"
@@ -435,7 +435,7 @@ output_code()
        fprintf(ofile, "\n};\n\n");
 
        fprintf(ofile,
-"static struct cs {\n"
+"static const struct cs {\n"
 "      uint16_t        begin;\n"
 "      uint16_t        end;\n"
 "} critical_sections[] = {\n");
index 702e2dbd11fb5b2feecfc701b52520fa76aade15..81be6a261cc8503a0853699b9e6719139cb9b5f6 100644 (file)
@@ -101,11 +101,12 @@ static void format_3_instr(int opcode, symbol_ref_t *src,
                           expression_t *immed, symbol_ref_t *address);
 static void test_readable_symbol(symbol_t *symbol);
 static void test_writable_symbol(symbol_t *symbol);
-static void type_check(symbol_t *symbol, expression_t *expression, int and_op);
+static void type_check(symbol_ref_t *sym, expression_t *expression, int and_op);
 static void make_expression(expression_t *immed, int value);
 static void add_conditional(symbol_t *symbol);
 static void add_version(const char *verstring);
 static int  is_download_const(expression_t *immed);
+static int  is_location_address(symbol_t *symbol);
 void yyerror(const char *string);
 
 #define SRAM_SYMNAME "SRAM_BASE"
@@ -142,6 +143,8 @@ void yyerror(const char *string);
 
 %token <value> T_ADDRESS
 
+%token T_COUNT
+
 %token T_ACCESS_MODE
 
 %token T_MODES
@@ -192,10 +195,10 @@ void yyerror(const char *string);
 
 %token <value> T_OR
 
-/* 16 bit extensions */
-%token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
-%token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
-
+/* 16 bit extensions, not implemented
+ * %token <value> T_OR16 T_AND16 T_XOR16 T_ADD16
+ * %token <value> T_ADC16 T_MVI16 T_TEST16 T_CMP16 T_CMPXCHG
+ */
 %token T_RET
 
 %token T_NOP
@@ -214,7 +217,7 @@ void yyerror(const char *string);
 
 %type <expression> expression immediate immediate_or_a
 
-%type <value> export ret f1_opcode f2_opcode f4_opcode jmp_jc_jnc_call jz_jnz je_jne
+%type <value> export ret f1_opcode f2_opcode jmp_jc_jnc_call jz_jnz je_jne
 
 %type <value> mode_value mode_list macro_arglist
 
@@ -313,13 +316,13 @@ reg_definition:
                                stop("Register multiply defined", EX_DATAERR);
                                /* NOTREACHED */
                        }
-                       cur_symbol = $1; 
+                       cur_symbol = $1;
                        cur_symbol->type = cur_symtype;
                        initialize_symbol(cur_symbol);
                }
                reg_attribute_list
        '}'
-               {                    
+               {
                        /*
                         * Default to allowing everything in for registers
                         * with no bit or mask definitions.
@@ -349,9 +352,10 @@ reg_attribute_list:
 |      reg_attribute_list reg_attribute
 ;
 
-reg_attribute:         
+reg_attribute:
        reg_address
 |      size
+|      count
 |      access_mode
 |      modes
 |      field_defn
@@ -392,6 +396,13 @@ size:
        }
 ;
 
+count:
+       T_COUNT T_NUMBER
+       {
+               cur_symbol->count += $2;
+       }
+;
+
 access_mode:
        T_ACCESS_MODE T_MODE
        {
@@ -641,14 +652,14 @@ expression:
                               &($1.referenced_syms),
                               &($3.referenced_syms));
        }
-|      expression T_EXPR_LSHIFT expression
+|      expression T_EXPR_LSHIFT expression
        {
                $$.value = $1.value << $3.value;
                symlist_merge(&$$.referenced_syms,
                               &$1.referenced_syms,
                               &$3.referenced_syms);
        }
-|      expression T_EXPR_RSHIFT expression
+|      expression T_EXPR_RSHIFT expression
        {
                $$.value = $1.value >> $3.value;
                symlist_merge(&$$.referenced_syms,
@@ -714,7 +725,7 @@ expression:
 ;
 
 constant:
-       T_CONST T_SYMBOL expression 
+       T_CONST T_SYMBOL expression
        {
                if ($2->type != UNINITIALIZED) {
                        stop("Re-definition of symbol as a constant",
@@ -800,6 +811,7 @@ scratch_ram:
                        cur_symtype = SRAMLOC;
                        cur_symbol->type = SRAMLOC;
                        initialize_symbol(cur_symbol);
+                       cur_symbol->count += 1;
                }
                reg_address
                {
@@ -831,6 +843,7 @@ scb:
                        initialize_symbol(cur_symbol);
                        /* 64 bytes of SCB space */
                        cur_symbol->info.rinfo->size = 64;
+                       cur_symbol->count += 1;
                }
                reg_address
                {
@@ -1311,14 +1324,18 @@ f2_opcode:
 |      T_ROR { $$ = AIC_OP_ROR; }
 ;
 
-f4_opcode:
-       T_OR16  { $$ = AIC_OP_OR16; }
-|      T_AND16 { $$ = AIC_OP_AND16; }
-|      T_XOR16 { $$ = AIC_OP_XOR16; }
-|      T_ADD16 { $$ = AIC_OP_ADD16; }
-|      T_ADC16 { $$ = AIC_OP_ADC16; }
-|      T_MVI16 { $$ = AIC_OP_MVI16; }
-;
+/*
+ * 16bit opcodes, not used
+ *
+ *f4_opcode:
+ *     T_OR16  { $$ = AIC_OP_OR16; }
+ *|    T_AND16 { $$ = AIC_OP_AND16; }
+ *|    T_XOR16 { $$ = AIC_OP_XOR16; }
+ *|    T_ADD16 { $$ = AIC_OP_ADD16; }
+ *|    T_ADC16 { $$ = AIC_OP_ADC16; }
+ *|    T_MVI16 { $$ = AIC_OP_MVI16; }
+ *;
+ */
 
 code:
        f2_opcode destination ',' expression opt_source ret ';'
@@ -1357,6 +1374,7 @@ code:
 code:
        T_OR reg_symbol ',' immediate jmp_jc_jnc_call address ';'
        {
+               type_check(&$2, &$4, AIC_OP_OR);
                format_3_instr($5, &$2, &$4, &$6);
        }
 ;
@@ -1528,7 +1546,7 @@ initialize_symbol(symbol_t *symbol)
                       sizeof(struct cond_info));
                break;
        case MACRO:
-               symbol->info.macroinfo = 
+               symbol->info.macroinfo =
                    (struct macro_info *)malloc(sizeof(struct macro_info));
                if (symbol->info.macroinfo == NULL) {
                        stop("Can't create macro info", EX_SOFTWARE);
@@ -1552,7 +1570,6 @@ add_macro_arg(const char *argtext, int argnum)
        struct macro_arg *marg;
        int i;
        int retval;
-               
 
        if (cur_symbol == NULL || cur_symbol->type != MACRO) {
                stop("Invalid current symbol for adding macro arg",
@@ -1633,8 +1650,10 @@ format_1_instr(int opcode, symbol_ref_t *dest, expression_t *immed,
        test_writable_symbol(dest->symbol);
        test_readable_symbol(src->symbol);
 
-       /* Ensure that immediate makes sense for this destination */
-       type_check(dest->symbol, immed, opcode);
+       if (!is_location_address(dest->symbol)) {
+               /* Ensure that immediate makes sense for this destination */
+               type_check(dest, immed, opcode);
+       }
 
        /* Allocate sequencer space for the instruction and fill it out */
        instr = seq_alloc();
@@ -1766,9 +1785,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
        /* Test register permissions */
        test_readable_symbol(src->symbol);
 
-       /* Ensure that immediate makes sense for this source */
-       type_check(src->symbol, immed, opcode);
-
        /* Allocate sequencer space for the instruction and fill it out */
        instr = seq_alloc();
        f3_instr = &instr->format.format3;
@@ -1797,7 +1813,6 @@ format_3_instr(int opcode, symbol_ref_t *src,
 static void
 test_readable_symbol(symbol_t *symbol)
 {
-       
        if ((symbol->info.rinfo->modes & (0x1 << src_mode)) == 0) {
                snprintf(errbuf, sizeof(errbuf),
                        "Register %s unavailable in source reg mode %d",
@@ -1815,7 +1830,6 @@ test_readable_symbol(symbol_t *symbol)
 static void
 test_writable_symbol(symbol_t *symbol)
 {
-       
        if ((symbol->info.rinfo->modes & (0x1 << dst_mode)) == 0) {
                snprintf(errbuf, sizeof(errbuf),
                        "Register %s unavailable in destination reg mode %d",
@@ -1831,25 +1845,34 @@ test_writable_symbol(symbol_t *symbol)
 }
 
 static void
-type_check(symbol_t *symbol, expression_t *expression, int opcode)
+type_check(symbol_ref_t *sym, expression_t *expression, int opcode)
 {
+       symbol_t *symbol = sym->symbol;
        symbol_node_t *node;
        int and_op;
+       int8_t value, mask;
 
        and_op = FALSE;
-       if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ || opcode == AIC_OP_JZ)
-               and_op = TRUE;
-
        /*
         * Make sure that we aren't attempting to write something
         * that hasn't been defined.  If this is an and operation,
         * this is a mask, so "undefined" bits are okay.
         */
-       if (and_op == FALSE
-        && (expression->value & ~symbol->info.rinfo->valid_bitmask) != 0) {
+       if (opcode == AIC_OP_AND || opcode == AIC_OP_JNZ ||
+           opcode == AIC_OP_JZ  || opcode == AIC_OP_JNE ||
+           opcode == AIC_OP_BMOV)
+               and_op = TRUE;
+
+       /*
+        * Defaulting to 8 bit logic
+        */
+       mask = (int8_t)~symbol->info.rinfo->valid_bitmask;
+       value = (int8_t)expression->value;
+
+       if (and_op == FALSE && (mask & value) != 0 ) {
                snprintf(errbuf, sizeof(errbuf),
                         "Invalid bit(s) 0x%x in immediate written to %s",
-                        expression->value & ~symbol->info.rinfo->valid_bitmask,
+                        (mask & value),
                         symbol->name);
                stop(errbuf, EX_DATAERR);
                /* NOTREACHED */
@@ -1959,3 +1982,13 @@ is_download_const(expression_t *immed)
 
        return (FALSE);
 }
+
+static int
+is_location_address(symbol_t *sym)
+{
+       if (sym->type == SCBLOC ||
+           sym->type == SRAMLOC)
+               return (TRUE);
+       return (FALSE);
+}
+
index 7c3983f868a9a8c171d9d165438abf5f187be6fd..2c7f02daf88d21387967be473c660dcc9a2c1a0e 100644 (file)
@@ -162,6 +162,7 @@ register            { return T_REGISTER; }
 const                  { yylval.value = FALSE; return T_CONST; }
 download               { return T_DOWNLOAD; }
 address                        { return T_ADDRESS; }
+count                  { return T_COUNT; }
 access_mode            { return T_ACCESS_MODE; }
 modes                  { return T_MODES; }
 RW|RO|WO               {
@@ -228,15 +229,15 @@ ret                       { return T_RET; }
 nop                    { return T_NOP; }
 
        /* ARP2 16bit extensions */
-or16                   { return T_OR16; }
-and16                  { return T_AND16; }
-xor16                  { return T_XOR16; }
-add16                  { return T_ADD16; }
-adc16                  { return T_ADC16; }
-mvi16                  { return T_MVI16; }
-test16                 { return T_TEST16; }
-cmp16                  { return T_CMP16; }
-cmpxchg                        { return T_CMPXCHG; }
+       /* or16                 { return T_OR16; } */
+       /* and16                        { return T_AND16; }*/
+       /* xor16                        { return T_XOR16; }*/
+       /* add16                        { return T_ADD16; }*/
+       /* adc16                        { return T_ADC16; }*/
+       /* mvi16                        { return T_MVI16; }*/
+       /* test16                       { return T_TEST16; }*/
+       /* cmp16                        { return T_CMP16; }*/
+       /* cmpxchg                      { return T_CMPXCHG; }*/
 
        /* Allowed Symbols */
 \<\<                   { return T_EXPR_LSHIFT; }
index f1f448dff569fef218055b4ecc6ad5aba0b70137..fcd357872b43d0e360456e732dbdb868f7eb013c 100644 (file)
@@ -77,6 +77,7 @@ symbol_create(char *name)
        if (new_symbol->name == NULL)
                 stop("Unable to strdup symbol name", EX_SOFTWARE);
        new_symbol->type = UNINITIALIZED;
+       new_symbol->count = 1;
        return (new_symbol);
 }
 
@@ -198,6 +199,12 @@ symtable_get(char *name)
                }
        }
        memcpy(&stored_ptr, data.data, sizeof(stored_ptr));
+       stored_ptr->count++;
+       data.data = &stored_ptr;
+       if (symtable->put(symtable, &key, &data, /*flags*/0) !=0) {
+               perror("Symtable put failed");
+               exit(EX_SOFTWARE);
+       }
        return (stored_ptr);
 }
 
@@ -256,7 +263,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
                    && (curnode->symbol->info.finfo->value >
                        newnode->symbol->info.finfo->value))))
                 || (!field && (curnode->symbol->info.rinfo->address >
-                              newnode->symbol->info.rinfo->address))) {
+                               newnode->symbol->info.rinfo->address))) {
                        SLIST_INSERT_HEAD(symlist, newnode, links);
                        return;
                }
@@ -271,7 +278,7 @@ symlist_add(symlist_t *symlist, symbol_t *symbol, int how)
 
                                cursymbol = SLIST_NEXT(curnode, links)->symbol;
                                if ((field
-                                 && (cursymbol->type > symbol->type
+                                 && (cursymbol->type > symbol->type
                                   || (cursymbol->type == symbol->type
                                    && (cursymbol->info.finfo->value >
                                        symbol->info.finfo->value))))
@@ -351,7 +358,7 @@ aic_print_reg_dump_types(FILE *ofile)
 {
        if (ofile == NULL)
                return;
-               
+
        fprintf(ofile,
 "typedef int (%sreg_print_t)(u_int, u_int *, u_int);\n"
 "typedef struct %sreg_parse_entry {\n"
@@ -370,7 +377,7 @@ aic_print_reg_dump_start(FILE *dfile, symbol_node_t *regnode)
                return;
 
        fprintf(dfile,
-"static %sreg_parse_entry_t %s_parse_table[] = {\n",
+"static const %sreg_parse_entry_t %s_parse_table[] = {\n",
                prefix,
                regnode->symbol->name);
 }
@@ -385,7 +392,7 @@ aic_print_reg_dump_end(FILE *ofile, FILE *dfile,
        lower_name = strdup(regnode->symbol->name);
        if (lower_name == NULL)
                 stop("Unable to strdup symbol name", EX_SOFTWARE);
-       
+
        for (letter = lower_name; *letter != '\0'; letter++)
                *letter = tolower(*letter);
 
@@ -472,6 +479,7 @@ symtable_dump(FILE *ofile, FILE *dfile)
        DBT              key;
        DBT              data;
        int              flag;
+       int              reg_count = 0, reg_used = 0;
        u_int            i;
 
        if (symtable == NULL)
@@ -541,6 +549,9 @@ symtable_dump(FILE *ofile, FILE *dfile)
                        int              num_entries;
 
                        num_entries = 0;
+                       reg_count++;
+                       if (curnode->symbol->count == 1)
+                               break;
                        fields = &curnode->symbol->info.rinfo->fields;
                        SLIST_FOREACH(fieldnode, fields, links) {
                                if (num_entries == 0)
@@ -553,11 +564,14 @@ symtable_dump(FILE *ofile, FILE *dfile)
                        }
                        aic_print_reg_dump_end(ofile, dfile,
                                               curnode, num_entries);
+                       reg_used++;
                }
                default:
                        break;
                }
        }
+       fprintf(stderr, "%s: %d of %d register definitions used\n", appname,
+               reg_used, reg_count);
 
        /* Fold in the masks and bits */
        while (SLIST_FIRST(&masks) != NULL) {
@@ -646,7 +660,6 @@ symtable_dump(FILE *ofile, FILE *dfile)
                free(curnode);
        }
 
-       
        fprintf(ofile, "\n\n/* Downloaded Constant Definitions */\n");
 
        for (i = 0; SLIST_FIRST(&download_constants) != NULL; i++) {
index afc22e8b4903cfc3caf81290636e13293989fe70..05190c1a2fb779b880efbb2cc672b65da2058e4c 100644 (file)
@@ -128,6 +128,7 @@ typedef struct expression_info {
 typedef struct symbol {
        char    *name;
        symtype type;
+       int     count;
        union   {
                struct reg_info   *rinfo;
                struct field_info *finfo;
index c9dd8392aab20f138916d6b46150aba96b336483..ac92ac143b46783bc9df6323bcfee84680189e8f 100644 (file)
@@ -153,7 +153,7 @@ static DEFINE_SPINLOCK(adpt_post_wait_lock);
 
 static u8 adpt_read_blink_led(adpt_hba* host)
 {
-       if(host->FwDebugBLEDflag_P != 0) {
+       if (host->FwDebugBLEDflag_P) {
                if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
                        return readb(host->FwDebugBLEDvalue_P);
                }
index 8be3d76656faab6fdbfa92548febc77bb4db03e5..a73a6bbb1b2bb37d2426c19987992b4f6f1ef49c 100644 (file)
@@ -2286,17 +2286,14 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec,
        }
 }
 
-static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
+static irqreturn_t ihdlr(struct Scsi_Host *shost)
 {
        struct scsi_cmnd *SCpnt;
        unsigned int i, k, c, status, tstatus, reg;
        struct mssp *spp;
        struct mscp *cpp;
        struct hostdata *ha = (struct hostdata *)shost->hostdata;
-
-       if (shost->irq != irq)
-               panic("%s: ihdlr, irq %d, shost->irq %d.\n", ha->board_name, irq,
-                     shost->irq);
+       int irq = shost->irq;
 
        /* Check if this board need to be serviced */
        if (!(inb(shost->io_port + REG_AUX_STATUS) & IRQ_ASSERTED))
@@ -2535,7 +2532,7 @@ static irqreturn_t ihdlr(int irq, struct Scsi_Host *shost)
        return IRQ_NONE;
 }
 
-static irqreturn_t do_interrupt_handler(int irq, void *shap)
+static irqreturn_t do_interrupt_handler(int dummy, void *shap)
 {
        struct Scsi_Host *shost;
        unsigned int j;
@@ -2548,7 +2545,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap)
        shost = sh[j];
 
        spin_lock_irqsave(shost->host_lock, spin_flags);
-       ret = ihdlr(irq, shost);
+       ret = ihdlr(shost);
        spin_unlock_irqrestore(shost->host_lock, spin_flags);
        return ret;
 }
index bfdee596889296f3c4ef3a6db1f7201c3f30e34a..a0b6d414953dda3128acc7477827a7dcfbdd429b 100644 (file)
@@ -978,7 +978,7 @@ static int esp_check_spur_intr(struct esp *esp)
                         */
                        if (!esp->ops->dma_error(esp)) {
                                printk(KERN_ERR PFX "esp%d: Spurious irq, "
-                                      "sreg=%x.\n",
+                                      "sreg=%02x.\n",
                                       esp->host->unique_id, esp->sreg);
                                return -1;
                        }
@@ -1447,6 +1447,9 @@ static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
        if (offset > 15)
                goto do_reject;
 
+       if (esp->flags & ESP_FLAG_DISABLE_SYNC)
+               offset = 0;
+
        if (offset) {
                int rounded_up, one_clock;
 
@@ -1697,7 +1700,12 @@ again:
                else
                        ent->flags &= ~ESP_CMD_FLAG_WRITE;
 
-               dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+               if (esp->ops->dma_length_limit)
+                       dma_len = esp->ops->dma_length_limit(esp, dma_addr,
+                                                            dma_len);
+               else
+                       dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
+
                esp->data_dma_len = dma_len;
 
                if (!dma_len) {
@@ -1761,7 +1769,6 @@ again:
                esp_advance_dma(esp, ent, cmd, bytes_sent);
                esp_event(esp, ESP_EVENT_CHECK_PHASE);
                goto again;
-               break;
        }
 
        case ESP_EVENT_STATUS: {
@@ -2235,7 +2242,7 @@ static void esp_bootup_reset(struct esp *esp)
 
 static void esp_set_clock_params(struct esp *esp)
 {
-       int fmhz;
+       int fhz;
        u8 ccf;
 
        /* This is getting messy but it has to be done correctly or else
@@ -2270,9 +2277,9 @@ static void esp_set_clock_params(struct esp *esp)
         *    This entails the smallest and largest sync period we could ever
         *    handle on this ESP.
         */
-       fmhz = esp->cfreq;
+       fhz = esp->cfreq;
 
-       ccf = ((fmhz / 1000000) + 4) / 5;
+       ccf = ((fhz / 1000000) + 4) / 5;
        if (ccf == 1)
                ccf = 2;
 
@@ -2281,16 +2288,16 @@ static void esp_set_clock_params(struct esp *esp)
         * been unable to find the clock-frequency PROM property.  All
         * other machines provide useful values it seems.
         */
-       if (fmhz <= 5000000 || ccf < 1 || ccf > 8) {
-               fmhz = 20000000;
+       if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
+               fhz = 20000000;
                ccf = 4;
        }
 
        esp->cfact = (ccf == 8 ? 0 : ccf);
-       esp->cfreq = fmhz;
-       esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz);
+       esp->cfreq = fhz;
+       esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
        esp->ctick = ESP_TICK(ccf, esp->ccycle);
-       esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf);
+       esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
        esp->sync_defp = SYNC_DEFP_SLOW;
 }
 
@@ -2382,6 +2389,12 @@ static int esp_slave_configure(struct scsi_device *dev)
        struct esp_target_data *tp = &esp->target[dev->id];
        int goal_tags, queue_depth;
 
+       if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
+               /* Bypass async domain validation */
+               dev->ppr  = 0;
+               dev->sdtr = 0;
+       }
+
        goal_tags = 0;
 
        if (dev->tagged_supported) {
index d5576d54ce768f6e801e174c29f69901f36bc111..bb43a138818805d6d3ca683c2c75ce27cef4d87e 100644 (file)
 #define ESP_TIMEO_CONST       8192
 #define ESP_NEG_DEFP(mhz, cfact) \
         ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact)))
-#define ESP_MHZ_TO_CYCLE(mhertz)  ((1000000000) / ((mhertz) / 1000))
+#define ESP_HZ_TO_CYCLE(hertz)  ((1000000000) / ((hertz) / 1000))
 #define ESP_TICK(ccf, cycle)  ((7682 * (ccf) * (cycle) / 1000))
 
 /* For slow to medium speed input clock rates we shoot for 5mb/s, but for high
@@ -240,9 +240,9 @@ struct esp_cmd_priv {
                int             num_sg;
        } u;
 
-       unsigned int            cur_residue;
+       int                     cur_residue;
        struct scatterlist      *cur_sg;
-       unsigned int            tot_residue;
+       int                     tot_residue;
 };
 #define ESP_CMD_PRIV(CMD)      ((struct esp_cmd_priv *)(&(CMD)->SCp))
 
@@ -368,6 +368,12 @@ struct esp_driver_ops {
         */
        int (*irq_pending)(struct esp *esp);
 
+       /* Return the maximum allowable size of a DMA transfer for a
+        * given buffer.
+        */
+       u32 (*dma_length_limit)(struct esp *esp, u32 dma_addr,
+                               u32 dma_len);
+
        /* Reset the DMA engine entirely.  On return, ESP interrupts
         * should be enabled.  Often the interrupt enabling is
         * controlled in the DMA engine.
@@ -471,6 +477,7 @@ struct esp {
 #define ESP_FLAG_DOING_SLOWCMD 0x00000004
 #define ESP_FLAG_WIDE_CAPABLE  0x00000008
 #define ESP_FLAG_QUICKIRQ_CHECK        0x00000010
+#define ESP_FLAG_DISABLE_SYNC  0x00000020
 
        u8                      select_state;
 #define ESP_SELECT_NONE                0x00 /* Not selecting */
index 2cd6b4959eb2da2740ae1b1cd86bebb5995c251c..c33bcb284df7d465051d8682a16447e16eb40bdf 100644 (file)
@@ -1443,7 +1443,7 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
           current_SC->SCp.this_residual    = current_SC->SCp.buffer->length;
           current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
    } else {
-          current_SC->SCp.ptr              = 0;
+          current_SC->SCp.ptr              = NULL;
           current_SC->SCp.this_residual    = 0;
           current_SC->SCp.buffer           = NULL;
           current_SC->SCp.buffers_residual = 0;
index c264a8c5f01e1f362284476f1513a38f38b56ed1..3690360d7a798ec98f73e58bb83880fe01e99de3 100644 (file)
@@ -199,9 +199,13 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
        if (!shost->can_queue) {
                printk(KERN_ERR "%s: can_queue = 0 no longer supported\n",
                                sht->name);
-               goto out;
+               goto fail;
        }
 
+       error = scsi_setup_command_freelist(shost);
+       if (error)
+               goto fail;
+
        if (!shost->shost_gendev.parent)
                shost->shost_gendev.parent = dev ? dev : &platform_bus;
 
@@ -255,6 +259,8 @@ int scsi_add_host(struct Scsi_Host *shost, struct device *dev)
  out_del_gendev:
        device_del(&shost->shost_gendev);
  out:
+       scsi_destroy_command_freelist(shost);
+ fail:
        return error;
 }
 EXPORT_SYMBOL(scsi_add_host);
@@ -284,6 +290,11 @@ static void scsi_host_dev_release(struct device *dev)
        kfree(shost);
 }
 
+struct device_type scsi_host_type = {
+       .name =         "scsi_host",
+       .release =      scsi_host_dev_release,
+};
+
 /**
  * scsi_host_alloc - register a scsi host adapter instance.
  * @sht:       pointer to scsi host template
@@ -376,33 +387,31 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
        else
                shost->dma_boundary = 0xffffffff;
 
-       rval = scsi_setup_command_freelist(shost);
-       if (rval)
-               goto fail_kfree;
-
        device_initialize(&shost->shost_gendev);
        snprintf(shost->shost_gendev.bus_id, BUS_ID_SIZE, "host%d",
                shost->host_no);
-       shost->shost_gendev.release = scsi_host_dev_release;
+#ifndef CONFIG_SYSFS_DEPRECATED
+       shost->shost_gendev.bus = &scsi_bus_type;
+#endif
+       shost->shost_gendev.type = &scsi_host_type;
 
        device_initialize(&shost->shost_dev);
        shost->shost_dev.parent = &shost->shost_gendev;
        shost->shost_dev.class = &shost_class;
        snprintf(shost->shost_dev.bus_id, BUS_ID_SIZE, "host%d",
                 shost->host_no);
+       shost->shost_dev.groups = scsi_sysfs_shost_attr_groups;
 
        shost->ehandler = kthread_run(scsi_error_handler, shost,
                        "scsi_eh_%d", shost->host_no);
        if (IS_ERR(shost->ehandler)) {
                rval = PTR_ERR(shost->ehandler);
-               goto fail_destroy_freelist;
+               goto fail_kfree;
        }
 
        scsi_proc_hostdir_add(shost->hostt);
        return shost;
 
- fail_destroy_freelist:
-       scsi_destroy_command_freelist(shost);
  fail_kfree:
        kfree(shost);
        return NULL;
@@ -496,7 +505,7 @@ void scsi_exit_hosts(void)
 
 int scsi_is_host_device(const struct device *dev)
 {
-       return dev->release == scsi_host_dev_release;
+       return dev->type == &scsi_host_type;
 }
 EXPORT_SYMBOL(scsi_is_host_device);
 
index 93c3fc20aa591f6f243e67f0691a4cf1811a75bf..44d8d5163a1a357e2e4a41b22544f28df25823ce 100644 (file)
@@ -134,6 +134,7 @@ static inline idescsi_scsi_t *drive_to_idescsi(ide_drive_t *ide_drive)
 static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                unsigned int bcount)
 {
+       ide_hwif_t *hwif = drive->hwif;
        int count;
        char *buf;
 
@@ -145,14 +146,12 @@ static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                        local_irq_save(flags);
                        buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
                                        pc->sg->offset;
-                       drive->hwif->atapi_input_bytes(drive,
-                                               buf + pc->b_count, count);
+                       hwif->input_data(drive, NULL, buf + pc->b_count, count);
                        kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
                        local_irq_restore(flags);
                } else {
                        buf = sg_virt(pc->sg);
-                       drive->hwif->atapi_input_bytes(drive,
-                                               buf + pc->b_count, count);
+                       hwif->input_data(drive, NULL, buf + pc->b_count, count);
                }
                bcount -= count; pc->b_count += count;
                if (pc->b_count == pc->sg->length) {
@@ -165,13 +164,14 @@ static void idescsi_input_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 
        if (bcount) {
                printk (KERN_ERR "ide-scsi: scatter gather table too small, discarding data\n");
-               ide_atapi_discard_data(drive, bcount);
+               ide_pad_transfer(drive, 0, bcount);
        }
 }
 
 static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                unsigned int bcount)
 {
+       ide_hwif_t *hwif = drive->hwif;
        int count;
        char *buf;
 
@@ -183,14 +183,12 @@ static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
                        local_irq_save(flags);
                        buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
                                                pc->sg->offset;
-                       drive->hwif->atapi_output_bytes(drive,
-                                               buf + pc->b_count, count);
+                       hwif->output_data(drive, NULL, buf + pc->b_count, count);
                        kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
                        local_irq_restore(flags);
                } else {
                        buf = sg_virt(pc->sg);
-                       drive->hwif->atapi_output_bytes(drive,
-                                               buf + pc->b_count, count);
+                       hwif->output_data(drive, NULL, buf + pc->b_count, count);
                }
                bcount -= count; pc->b_count += count;
                if (pc->b_count == pc->sg->length) {
@@ -203,7 +201,7 @@ static void idescsi_output_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
 
        if (bcount) {
                printk (KERN_ERR "ide-scsi: scatter gather table too small, padding with zeros\n");
-               ide_atapi_write_zeros(drive, bcount);
+               ide_pad_transfer(drive, 1, bcount);
        }
 }
 
@@ -258,8 +256,8 @@ idescsi_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err)
 
        if (ide_read_status(drive) & (BUSY_STAT | DRQ_STAT))
                /* force an abort */
-               hwif->OUTB(WIN_IDLEIMMEDIATE,
-                          hwif->io_ports[IDE_COMMAND_OFFSET]);
+               hwif->OUTBSYNC(drive, WIN_IDLEIMMEDIATE,
+                              hwif->io_ports.command_addr);
 
        rq->errors++;
 
@@ -393,7 +391,7 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
                printk ("ide-scsi: %s: DMA complete\n", drive->name);
 #endif /* IDESCSI_DEBUG_LOG */
                pc->xferred = pc->req_xfer;
-               (void) HWIF(drive)->ide_dma_end(drive);
+               (void)hwif->dma_ops->dma_end(drive);
        }
 
        /* Clear the interrupt */
@@ -410,9 +408,9 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
                idescsi_end_request (drive, 1, 0);
                return ide_stopped;
        }
-       bcount = (hwif->INB(hwif->io_ports[IDE_BCOUNTH_OFFSET]) << 8) |
-                 hwif->INB(hwif->io_ports[IDE_BCOUNTL_OFFSET]);
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+       bcount = (hwif->INB(hwif->io_ports.lbah_addr) << 8) |
+                 hwif->INB(hwif->io_ports.lbam_addr);
+       ireason = hwif->INB(hwif->io_ports.nsect_addr);
 
        if (ireason & CD) {
                printk(KERN_ERR "ide-scsi: CoD != 0 in idescsi_pc_intr\n");
@@ -432,14 +430,15 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
                                                idescsi_input_buffers(drive, pc,
                                                                        temp);
                                        else
-                                               drive->hwif->atapi_input_bytes(drive, pc->cur_pos, temp);
+                                               hwif->input_data(drive, NULL,
+                                                       pc->cur_pos, temp);
                                        printk(KERN_ERR "ide-scsi: transferred"
                                                        " %d of %d bytes\n",
                                                        temp, bcount);
                                }
                                pc->xferred += temp;
                                pc->cur_pos += temp;
-                               ide_atapi_discard_data(drive, bcount - temp);
+                               ide_pad_transfer(drive, 0, bcount - temp);
                                ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
                                return ide_started;
                        }
@@ -453,15 +452,13 @@ static ide_startstop_t idescsi_pc_intr (ide_drive_t *drive)
                if (pc->sg)
                        idescsi_input_buffers(drive, pc, bcount);
                else
-                       hwif->atapi_input_bytes(drive, pc->cur_pos,
-                                               bcount);
+                       hwif->input_data(drive, NULL, pc->cur_pos, bcount);
        } else {
                pc->flags |= PC_FLAG_WRITING;
                if (pc->sg)
                        idescsi_output_buffers(drive, pc, bcount);
                else
-                       hwif->atapi_output_bytes(drive, pc->cur_pos,
-                                                bcount);
+                       hwif->output_data(drive, NULL, pc->cur_pos, bcount);
        }
        /* Update the current position */
        pc->xferred += bcount;
@@ -485,7 +482,7 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
                        "initiated yet DRQ isn't asserted\n");
                return startstop;
        }
-       ireason = hwif->INB(hwif->io_ports[IDE_IREASON_OFFSET]);
+       ireason = hwif->INB(hwif->io_ports.nsect_addr);
        if ((ireason & CD) == 0 || (ireason & IO)) {
                printk(KERN_ERR "ide-scsi: (IO,CoD) != (0,1) while "
                                "issuing a packet command\n");
@@ -494,11 +491,13 @@ static ide_startstop_t idescsi_transfer_pc(ide_drive_t *drive)
        BUG_ON(HWGROUP(drive)->handler != NULL);
        /* Set the interrupt routine */
        ide_set_handler(drive, &idescsi_pc_intr, get_timeout(pc), idescsi_expiry);
+
        /* Send the actual packet */
-       drive->hwif->atapi_output_bytes(drive, scsi->pc->c, 12);
+       hwif->output_data(drive, NULL, scsi->pc->c, 12);
+
        if (pc->flags & PC_FLAG_DMA_OK) {
                pc->flags |= PC_FLAG_DMA_IN_PROGRESS;
-               hwif->dma_start(drive);
+               hwif->dma_ops->dma_start(drive);
        }
        return ide_started;
 }
@@ -560,7 +559,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
 
        if (drive->using_dma && !idescsi_map_sg(drive, pc)) {
                hwif->sg_mapped = 1;
-               dma = !hwif->dma_setup(drive);
+               dma = !hwif->dma_ops->dma_setup(drive);
                hwif->sg_mapped = 0;
        }
 
@@ -575,7 +574,7 @@ static ide_startstop_t idescsi_issue_pc(ide_drive_t *drive,
                return ide_started;
        } else {
                /* Issue the packet command */
-               hwif->OUTB(WIN_PACKETCMD, hwif->io_ports[IDE_COMMAND_OFFSET]);
+               ide_execute_pkt_cmd(drive);
                return idescsi_transfer_pc(drive);
        }
 }
index 5d231015bb2058abebe009fbfdadae98f85cf8d9..b2d481dd37505f43f25d9c0cc79659e900bb25ca 100644 (file)
@@ -217,11 +217,15 @@ static int __devexit esp_jazz_remove(struct platform_device *dev)
        return 0;
 }
 
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:jazz_esp");
+
 static struct platform_driver esp_jazz_driver = {
        .probe          = esp_jazz_probe,
        .remove         = __devexit_p(esp_jazz_remove),
        .driver = {
                .name   = "jazz_esp",
+               .owner  = THIS_MODULE,
        },
 };
 
index a9fbb3f88659bb7bbcb117d9f268df77fb786f61..960baaf11fb181d8e357cb124f35e98a5b883307 100644 (file)
@@ -182,8 +182,8 @@ lpfc_option_rom_version_show(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, PAGE_SIZE, "%s\n", phba->OptionROMVersion);
 }
 static ssize_t
-lpfc_state_show(struct device *dev, struct device_attribute *attr,
-               char *buf)
+lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
+                    char *buf)
 {
        struct Scsi_Host  *shost = class_to_shost(dev);
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
@@ -936,7 +936,7 @@ static DEVICE_ATTR(programtype, S_IRUGO, lpfc_programtype_show, NULL);
 static DEVICE_ATTR(portnum, S_IRUGO, lpfc_vportnum_show, NULL);
 static DEVICE_ATTR(fwrev, S_IRUGO, lpfc_fwrev_show, NULL);
 static DEVICE_ATTR(hdw, S_IRUGO, lpfc_hdw_show, NULL);
-static DEVICE_ATTR(state, S_IRUGO, lpfc_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, lpfc_link_state_show, NULL);
 static DEVICE_ATTR(option_rom_version, S_IRUGO,
                   lpfc_option_rom_version_show, NULL);
 static DEVICE_ATTR(num_discovered_ports, S_IRUGO,
@@ -1666,7 +1666,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_fwrev,
        &dev_attr_hdw,
        &dev_attr_option_rom_version,
-       &dev_attr_state,
+       &dev_attr_link_state,
        &dev_attr_num_discovered_ports,
        &dev_attr_lpfc_drvr_version,
        &dev_attr_lpfc_temp_sensor,
@@ -1714,7 +1714,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
 
 struct device_attribute *lpfc_vport_attrs[] = {
        &dev_attr_info,
-       &dev_attr_state,
+       &dev_attr_link_state,
        &dev_attr_num_discovered_ports,
        &dev_attr_lpfc_drvr_version,
        &dev_attr_lpfc_log_verbose,
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
new file mode 100644 (file)
index 0000000..cd37bd6
--- /dev/null
@@ -0,0 +1,657 @@
+/* mac_esp.c: ESP front-end for Macintosh Quadra systems.
+ *
+ * Adapted from jazz_esp.c and the old mac_esp.c.
+ *
+ * The pseudo DMA algorithm is based on the one used in NetBSD.
+ * See sys/arch/mac68k/obio/esp.c for some background information.
+ *
+ * Copyright (C) 2007-2008 Finn Thain
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/nubus.h>
+
+#include <asm/irq.h>
+#include <asm/dma.h>
+
+#include <asm/macints.h>
+#include <asm/macintosh.h>
+
+#include <scsi/scsi_host.h>
+
+#include "esp_scsi.h"
+
+#define DRV_MODULE_NAME     "mac_esp"
+#define PFX                 DRV_MODULE_NAME ": "
+#define DRV_VERSION         "1.000"
+#define DRV_MODULE_RELDATE  "Sept 15, 2007"
+
+#define MAC_ESP_IO_BASE          0x50F00000
+#define MAC_ESP_REGS_QUADRA      (MAC_ESP_IO_BASE + 0x10000)
+#define MAC_ESP_REGS_QUADRA2     (MAC_ESP_IO_BASE + 0xF000)
+#define MAC_ESP_REGS_QUADRA3     (MAC_ESP_IO_BASE + 0x18000)
+#define MAC_ESP_REGS_SPACING     0x402
+#define MAC_ESP_PDMA_REG         0xF9800024
+#define MAC_ESP_PDMA_REG_SPACING 0x4
+#define MAC_ESP_PDMA_IO_OFFSET   0x100
+
+#define esp_read8(REG)         mac_esp_read8(esp, REG)
+#define esp_write8(VAL, REG)   mac_esp_write8(esp, VAL, REG)
+
+struct mac_esp_priv {
+       struct esp *esp;
+       void __iomem *pdma_regs;
+       void __iomem *pdma_io;
+       int error;
+};
+static struct platform_device *internal_esp, *external_esp;
+
+#define MAC_ESP_GET_PRIV(esp) ((struct mac_esp_priv *) \
+                              platform_get_drvdata((struct platform_device *) \
+                                                   (esp->dev)))
+
+static inline void mac_esp_write8(struct esp *esp, u8 val, unsigned long reg)
+{
+       nubus_writeb(val, esp->regs + reg * 16);
+}
+
+static inline u8 mac_esp_read8(struct esp *esp, unsigned long reg)
+{
+       return nubus_readb(esp->regs + reg * 16);
+}
+
+/* For pseudo DMA and PIO we need the virtual address
+ * so this address mapping is the identity mapping.
+ */
+
+static dma_addr_t mac_esp_map_single(struct esp *esp, void *buf,
+                                    size_t sz, int dir)
+{
+       return (dma_addr_t)buf;
+}
+
+static int mac_esp_map_sg(struct esp *esp, struct scatterlist *sg,
+                         int num_sg, int dir)
+{
+       int i;
+
+       for (i = 0; i < num_sg; i++)
+               sg[i].dma_address = (u32)sg_virt(&sg[i]);
+       return num_sg;
+}
+
+static void mac_esp_unmap_single(struct esp *esp, dma_addr_t addr,
+                                size_t sz, int dir)
+{
+       /* Nothing to do. */
+}
+
+static void mac_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
+                            int num_sg, int dir)
+{
+       /* Nothing to do. */
+}
+
+static void mac_esp_reset_dma(struct esp *esp)
+{
+       /* Nothing to do. */
+}
+
+static void mac_esp_dma_drain(struct esp *esp)
+{
+       /* Nothing to do. */
+}
+
+static void mac_esp_dma_invalidate(struct esp *esp)
+{
+       /* Nothing to do. */
+}
+
+static int mac_esp_dma_error(struct esp *esp)
+{
+       return MAC_ESP_GET_PRIV(esp)->error;
+}
+
+static inline int mac_esp_wait_for_empty_fifo(struct esp *esp)
+{
+       struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+       int i = 500000;
+
+       do {
+               if (!(esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES))
+                       return 0;
+
+               if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+                       return 1;
+
+               udelay(2);
+       } while (--i);
+
+       printk(KERN_ERR PFX "FIFO is not empty (sreg %02x)\n",
+              esp_read8(ESP_STATUS));
+       mep->error = 1;
+       return 1;
+}
+
+static inline int mac_esp_wait_for_dreq(struct esp *esp)
+{
+       struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+       int i = 500000;
+
+       do {
+               if (mep->pdma_regs == NULL) {
+                       if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
+                               return 0;
+               } else {
+                       if (nubus_readl(mep->pdma_regs) & 0x200)
+                               return 0;
+               }
+
+               if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+                       return 1;
+
+               udelay(2);
+       } while (--i);
+
+       printk(KERN_ERR PFX "PDMA timeout (sreg %02x)\n",
+              esp_read8(ESP_STATUS));
+       mep->error = 1;
+       return 1;
+}
+
+#define MAC_ESP_PDMA_LOOP(operands) \
+       asm volatile ( \
+            "       tstw %2                   \n" \
+            "       jbeq 20f                  \n" \
+            "1:     movew " operands "        \n" \
+            "2:     movew " operands "        \n" \
+            "3:     movew " operands "        \n" \
+            "4:     movew " operands "        \n" \
+            "5:     movew " operands "        \n" \
+            "6:     movew " operands "        \n" \
+            "7:     movew " operands "        \n" \
+            "8:     movew " operands "        \n" \
+            "9:     movew " operands "        \n" \
+            "10:    movew " operands "        \n" \
+            "11:    movew " operands "        \n" \
+            "12:    movew " operands "        \n" \
+            "13:    movew " operands "        \n" \
+            "14:    movew " operands "        \n" \
+            "15:    movew " operands "        \n" \
+            "16:    movew " operands "        \n" \
+            "       subqw #1,%2               \n" \
+            "       jbne 1b                   \n" \
+            "20:    tstw %3                   \n" \
+            "       jbeq 30f                  \n" \
+            "21:    movew " operands "        \n" \
+            "       subqw #1,%3               \n" \
+            "       jbne 21b                  \n" \
+            "30:    tstw %4                   \n" \
+            "       jbeq 40f                  \n" \
+            "31:    moveb " operands "        \n" \
+            "32:    nop                       \n" \
+            "40:                              \n" \
+            "                                 \n" \
+            "       .section __ex_table,\"a\" \n" \
+            "       .align  4                 \n" \
+            "       .long   1b,40b            \n" \
+            "       .long   2b,40b            \n" \
+            "       .long   3b,40b            \n" \
+            "       .long   4b,40b            \n" \
+            "       .long   5b,40b            \n" \
+            "       .long   6b,40b            \n" \
+            "       .long   7b,40b            \n" \
+            "       .long   8b,40b            \n" \
+            "       .long   9b,40b            \n" \
+            "       .long  10b,40b            \n" \
+            "       .long  11b,40b            \n" \
+            "       .long  12b,40b            \n" \
+            "       .long  13b,40b            \n" \
+            "       .long  14b,40b            \n" \
+            "       .long  15b,40b            \n" \
+            "       .long  16b,40b            \n" \
+            "       .long  21b,40b            \n" \
+            "       .long  31b,40b            \n" \
+            "       .long  32b,40b            \n" \
+            "       .previous                 \n" \
+            : "+a" (addr) \
+            : "a" (mep->pdma_io), "r" (count32), "r" (count2), "g" (esp_count))
+
+static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count,
+                                 u32 dma_count, int write, u8 cmd)
+{
+       struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       mep->error = 0;
+
+       if (!write)
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+       esp_write8((esp_count >> 0) & 0xFF, ESP_TCLOW);
+       esp_write8((esp_count >> 8) & 0xFF, ESP_TCMED);
+
+       scsi_esp_cmd(esp, cmd);
+
+       do {
+               unsigned int count32 = esp_count >> 5;
+               unsigned int count2 = (esp_count & 0x1F) >> 1;
+               unsigned int start_addr = addr;
+
+               if (mac_esp_wait_for_dreq(esp))
+                       break;
+
+               if (write) {
+                       MAC_ESP_PDMA_LOOP("%1@,%0@+");
+
+                       esp_count -= addr - start_addr;
+               } else {
+                       unsigned int n;
+
+                       MAC_ESP_PDMA_LOOP("%0@+,%1@");
+
+                       if (mac_esp_wait_for_empty_fifo(esp))
+                               break;
+
+                       n = (esp_read8(ESP_TCMED) << 8) + esp_read8(ESP_TCLOW);
+                       addr = start_addr + esp_count - n;
+                       esp_count = n;
+               }
+       } while (esp_count);
+
+       local_irq_restore(flags);
+}
+
+/*
+ * Programmed IO routines follow.
+ */
+
+static inline int mac_esp_wait_for_fifo(struct esp *esp)
+{
+       int i = 500000;
+
+       do {
+               if (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES)
+                       return 0;
+
+               udelay(2);
+       } while (--i);
+
+       printk(KERN_ERR PFX "FIFO is empty (sreg %02x)\n",
+              esp_read8(ESP_STATUS));
+       return 1;
+}
+
+static inline int mac_esp_wait_for_intr(struct esp *esp)
+{
+       int i = 500000;
+
+       do {
+               esp->sreg = esp_read8(ESP_STATUS);
+               if (esp->sreg & ESP_STAT_INTR)
+                       return 0;
+
+               udelay(2);
+       } while (--i);
+
+       printk(KERN_ERR PFX "IRQ timeout (sreg %02x)\n", esp->sreg);
+       return 1;
+}
+
+#define MAC_ESP_PIO_LOOP(operands, reg1) \
+       asm volatile ( \
+            "1:     moveb " operands " \n" \
+            "       subqw #1,%1        \n" \
+            "       jbne 1b            \n" \
+            : "+a" (addr), "+r" (reg1) \
+            : "a" (fifo))
+
+#define MAC_ESP_PIO_FILL(operands, reg1) \
+       asm volatile ( \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       moveb " operands " \n" \
+            "       subqw #8,%1        \n" \
+            "       subqw #8,%1        \n" \
+            : "+a" (addr), "+r" (reg1) \
+            : "a" (fifo))
+
+#define MAC_ESP_FIFO_SIZE 16
+
+static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
+                                u32 dma_count, int write, u8 cmd)
+{
+       unsigned long flags;
+       struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp);
+       u8 *fifo = esp->regs + ESP_FDATA * 16;
+
+       local_irq_save(flags);
+
+       cmd &= ~ESP_CMD_DMA;
+       mep->error = 0;
+
+       if (write) {
+               scsi_esp_cmd(esp, cmd);
+
+               if (!mac_esp_wait_for_intr(esp)) {
+                       if (mac_esp_wait_for_fifo(esp))
+                               esp_count = 0;
+               } else {
+                       esp_count = 0;
+               }
+       } else {
+               scsi_esp_cmd(esp, ESP_CMD_FLUSH);
+
+               if (esp_count >= MAC_ESP_FIFO_SIZE)
+                       MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+               else
+                       MAC_ESP_PIO_LOOP("%0@+,%2@", esp_count);
+
+               scsi_esp_cmd(esp, cmd);
+       }
+
+       while (esp_count) {
+               unsigned int n;
+
+               if (mac_esp_wait_for_intr(esp)) {
+                       mep->error = 1;
+                       break;
+               }
+
+               if (esp->sreg & ESP_STAT_SPAM) {
+                       printk(KERN_ERR PFX "gross error\n");
+                       mep->error = 1;
+                       break;
+               }
+
+               n = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
+
+               if (write) {
+                       if (n > esp_count)
+                               n = esp_count;
+                       esp_count -= n;
+
+                       MAC_ESP_PIO_LOOP("%2@,%0@+", n);
+
+                       if ((esp->sreg & ESP_STAT_PMASK) == ESP_STATP)
+                               break;
+
+                       if (esp_count) {
+                               esp->ireg = esp_read8(ESP_INTRPT);
+                               if (esp->ireg & ESP_INTR_DC)
+                                       break;
+
+                               scsi_esp_cmd(esp, ESP_CMD_TI);
+                       }
+               } else {
+                       esp->ireg = esp_read8(ESP_INTRPT);
+                       if (esp->ireg & ESP_INTR_DC)
+                               break;
+
+                       n = MAC_ESP_FIFO_SIZE - n;
+                       if (n > esp_count)
+                               n = esp_count;
+
+                       if (n == MAC_ESP_FIFO_SIZE) {
+                               MAC_ESP_PIO_FILL("%0@+,%2@", esp_count);
+                       } else {
+                               esp_count -= n;
+                               MAC_ESP_PIO_LOOP("%0@+,%2@", n);
+                       }
+
+                       scsi_esp_cmd(esp, ESP_CMD_TI);
+               }
+       }
+
+       local_irq_restore(flags);
+}
+
+static int mac_esp_irq_pending(struct esp *esp)
+{
+       if (esp_read8(ESP_STATUS) & ESP_STAT_INTR)
+               return 1;
+       return 0;
+}
+
+static u32 mac_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
+{
+       return dma_len > 0xFFFF ? 0xFFFF : dma_len;
+}
+
+static struct esp_driver_ops mac_esp_ops = {
+       .esp_write8       = mac_esp_write8,
+       .esp_read8        = mac_esp_read8,
+       .map_single       = mac_esp_map_single,
+       .map_sg           = mac_esp_map_sg,
+       .unmap_single     = mac_esp_unmap_single,
+       .unmap_sg         = mac_esp_unmap_sg,
+       .irq_pending      = mac_esp_irq_pending,
+       .dma_length_limit = mac_esp_dma_length_limit,
+       .reset_dma        = mac_esp_reset_dma,
+       .dma_drain        = mac_esp_dma_drain,
+       .dma_invalidate   = mac_esp_dma_invalidate,
+       .send_dma_cmd     = mac_esp_send_pdma_cmd,
+       .dma_error        = mac_esp_dma_error,
+};
+
+static int __devinit esp_mac_probe(struct platform_device *dev)
+{
+       struct scsi_host_template *tpnt = &scsi_esp_template;
+       struct Scsi_Host *host;
+       struct esp *esp;
+       int err;
+       int chips_present;
+       struct mac_esp_priv *mep;
+
+       if (!MACH_IS_MAC)
+               return -ENODEV;
+
+       switch (macintosh_config->scsi_type) {
+       case MAC_SCSI_QUADRA:
+       case MAC_SCSI_QUADRA3:
+               chips_present = 1;
+               break;
+       case MAC_SCSI_QUADRA2:
+               if ((macintosh_config->ident == MAC_MODEL_Q900) ||
+                   (macintosh_config->ident == MAC_MODEL_Q950))
+                       chips_present = 2;
+               else
+                       chips_present = 1;
+               break;
+       default:
+               chips_present = 0;
+       }
+
+       if (dev->id + 1 > chips_present)
+               return -ENODEV;
+
+       host = scsi_host_alloc(tpnt, sizeof(struct esp));
+
+       err = -ENOMEM;
+       if (!host)
+               goto fail;
+
+       host->max_id = 8;
+       host->use_clustering = DISABLE_CLUSTERING;
+       esp = shost_priv(host);
+
+       esp->host = host;
+       esp->dev = dev;
+
+       esp->command_block = kzalloc(16, GFP_KERNEL);
+       if (!esp->command_block)
+               goto fail_unlink;
+       esp->command_block_dma = (dma_addr_t)esp->command_block;
+
+       esp->scsi_id = 7;
+       host->this_id = esp->scsi_id;
+       esp->scsi_id_mask = 1 << esp->scsi_id;
+
+       mep = kzalloc(sizeof(struct mac_esp_priv), GFP_KERNEL);
+       if (!mep)
+               goto fail_free_command_block;
+       mep->esp = esp;
+       platform_set_drvdata(dev, mep);
+
+       switch (macintosh_config->scsi_type) {
+       case MAC_SCSI_QUADRA:
+               esp->cfreq     = 16500000;
+               esp->regs      = (void __iomem *)MAC_ESP_REGS_QUADRA;
+               mep->pdma_io   = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+               mep->pdma_regs = NULL;
+               break;
+       case MAC_SCSI_QUADRA2:
+               esp->cfreq     = 25000000;
+               esp->regs      = (void __iomem *)(MAC_ESP_REGS_QUADRA2 +
+                                dev->id * MAC_ESP_REGS_SPACING);
+               mep->pdma_io   = esp->regs + MAC_ESP_PDMA_IO_OFFSET;
+               mep->pdma_regs = (void __iomem *)(MAC_ESP_PDMA_REG +
+                                dev->id * MAC_ESP_PDMA_REG_SPACING);
+               nubus_writel(0x1d1, mep->pdma_regs);
+               break;
+       case MAC_SCSI_QUADRA3:
+               /* These quadras have a real DMA controller (the PSC) but we
+                * don't know how to drive it so we must use PIO instead.
+                */
+               esp->cfreq     = 25000000;
+               esp->regs      = (void __iomem *)MAC_ESP_REGS_QUADRA3;
+               mep->pdma_io   = NULL;
+               mep->pdma_regs = NULL;
+               break;
+       }
+
+       esp->ops = &mac_esp_ops;
+       if (mep->pdma_io == NULL) {
+               printk(KERN_INFO PFX "using PIO for controller %d\n", dev->id);
+               esp_write8(0, ESP_TCLOW);
+               esp_write8(0, ESP_TCMED);
+               esp->flags = ESP_FLAG_DISABLE_SYNC;
+               mac_esp_ops.send_dma_cmd = mac_esp_send_pio_cmd;
+       } else {
+               printk(KERN_INFO PFX "using PDMA for controller %d\n", dev->id);
+       }
+
+       host->irq = IRQ_MAC_SCSI;
+       err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "Mac ESP",
+                         esp);
+       if (err < 0)
+               goto fail_free_priv;
+
+       err = scsi_esp_register(esp, &dev->dev);
+       if (err)
+               goto fail_free_irq;
+
+       return 0;
+
+fail_free_irq:
+       free_irq(host->irq, esp);
+fail_free_priv:
+       kfree(mep);
+fail_free_command_block:
+       kfree(esp->command_block);
+fail_unlink:
+       scsi_host_put(host);
+fail:
+       return err;
+}
+
+static int __devexit esp_mac_remove(struct platform_device *dev)
+{
+       struct mac_esp_priv *mep = platform_get_drvdata(dev);
+       struct esp *esp = mep->esp;
+       unsigned int irq = esp->host->irq;
+
+       scsi_esp_unregister(esp);
+
+       free_irq(irq, esp);
+
+       kfree(mep);
+
+       kfree(esp->command_block);
+
+       scsi_host_put(esp->host);
+
+       return 0;
+}
+
+static struct platform_driver esp_mac_driver = {
+       .probe    = esp_mac_probe,
+       .remove   = __devexit_p(esp_mac_remove),
+       .driver   = {
+               .name     = DRV_MODULE_NAME,
+       },
+};
+
+static int __init mac_esp_init(void)
+{
+       int err;
+
+       err = platform_driver_register(&esp_mac_driver);
+       if (err)
+               return err;
+
+       internal_esp = platform_device_alloc(DRV_MODULE_NAME, 0);
+       if (internal_esp && platform_device_add(internal_esp)) {
+               platform_device_put(internal_esp);
+               internal_esp = NULL;
+       }
+
+       external_esp = platform_device_alloc(DRV_MODULE_NAME, 1);
+       if (external_esp && platform_device_add(external_esp)) {
+               platform_device_put(external_esp);
+               external_esp = NULL;
+       }
+
+       if (internal_esp || external_esp) {
+               return 0;
+       } else {
+               platform_driver_unregister(&esp_mac_driver);
+               return -ENOMEM;
+       }
+}
+
+static void __exit mac_esp_exit(void)
+{
+       platform_driver_unregister(&esp_mac_driver);
+
+       if (internal_esp) {
+               platform_device_unregister(internal_esp);
+               internal_esp = NULL;
+       }
+       if (external_esp) {
+               platform_device_unregister(external_esp);
+               external_esp = NULL;
+       }
+}
+
+MODULE_DESCRIPTION("Mac ESP SCSI driver");
+MODULE_AUTHOR("Finn Thain <fthain@telegraphics.com.au>");
+MODULE_LICENSE("GPLv2");
+MODULE_VERSION(DRV_VERSION);
+
+module_init(mac_esp_init);
+module_exit(mac_esp_exit);
index b135a1ed4b2cff191c1235106131ba4ca9f44229..18551aaf5e094e07612ddf028df0dc7f2a05d344 100644 (file)
@@ -4996,7 +4996,7 @@ static int __init megaraid_init(void)
                max_mbox_busy_wait = MBOX_BUSY_WAIT;
 
 #ifdef CONFIG_PROC_FS
-       mega_proc_dir_entry = proc_mkdir("megaraid", &proc_root);
+       mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
        if (!mega_proc_dir_entry) {
                printk(KERN_WARNING
                                "megaraid: failed to create megaraid root\n");
@@ -5005,7 +5005,7 @@ static int __init megaraid_init(void)
        error = pci_register_driver(&megaraid_pci_driver);
        if (error) {
 #ifdef CONFIG_PROC_FS
-               remove_proc_entry("megaraid", &proc_root);
+               remove_proc_entry("megaraid", NULL);
 #endif
                return error;
        }
@@ -5035,7 +5035,7 @@ static void __exit megaraid_exit(void)
        pci_unregister_driver(&megaraid_pci_driver);
 
 #ifdef CONFIG_PROC_FS
-       remove_proc_entry("megaraid", &proc_root);
+       remove_proc_entry("megaraid", NULL);
 #endif
 }
 
index e55b9037adb224286b4a2a0a1f0efd77a2b3b967..1dd70d7a4947c9a5f6ec41d5e46b3dc52a311f11 100644 (file)
@@ -2822,7 +2822,9 @@ static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
                dev_printk(KERN_DEBUG, &pdev->dev,
                        "phy[%d] Get Attached Address 0x%llX ,"
                        " SAS Address 0x%llX\n",
-                       i, phy->att_dev_sas_addr, phy->dev_sas_addr);
+                       i,
+                       (unsigned long long)phy->att_dev_sas_addr,
+                       (unsigned long long)phy->dev_sas_addr);
                dev_printk(KERN_DEBUG, &pdev->dev,
                        "Rate = %x , type = %d\n",
                        sas_phy->linkrate, phy->phy_type);
index d8928940042557b46d1000e005546d99db914daf..c57c94c0ffd237f1d39b0cf6ea164e737ac11643 100644 (file)
@@ -8186,7 +8186,7 @@ static void insert_into_waiting_list(struct ncb *np, struct scsi_cmnd *cmd)
        cmd->next_wcmd = NULL;
        if (!(wcmd = np->waiting_list)) np->waiting_list = cmd;
        else {
-               while ((wcmd->next_wcmd) != 0)
+               while (wcmd->next_wcmd)
                        wcmd = (struct scsi_cmnd *) wcmd->next_wcmd;
                wcmd->next_wcmd = (char *) cmd;
        }
@@ -8222,7 +8222,7 @@ static void process_waiting_list(struct ncb *np, int sts)
 #ifdef DEBUG_WAITING_LIST
        if (waiting_list) printk("%s: waiting_list=%lx processing sts=%d\n", ncr_name(np), (u_long) waiting_list, sts);
 #endif
-       while ((wcmd = waiting_list) != 0) {
+       while ((wcmd = waiting_list) != NULL) {
                waiting_list = (struct scsi_cmnd *) wcmd->next_wcmd;
                wcmd->next_wcmd = NULL;
                if (sts == DID_OK) {
index d61df036910c1f46579807e5a17b9fee1ff73417..287690853caf2594ddc1fa1cd63f58b87b002562 100644 (file)
@@ -609,8 +609,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
 }
 
 static ssize_t
-qla2x00_state_show(struct device *dev, struct device_attribute *attr,
-                  char *buf)
+qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
 {
        scsi_qla_host_t *ha = shost_priv(class_to_shost(dev));
        int len = 0;
@@ -814,7 +814,7 @@ static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
 static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
 static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
 static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
-static DEVICE_ATTR(state, S_IRUGO, qla2x00_state_show, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
 static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
 static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
                   qla2x00_zio_timer_store);
@@ -838,7 +838,7 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_model_name,
        &dev_attr_model_desc,
        &dev_attr_pci_info,
-       &dev_attr_state,
+       &dev_attr_link_state,
        &dev_attr_zio,
        &dev_attr_zio_timer,
        &dev_attr_beacon,
index 9d12d9f2620930015ec2d56c29a11b58660fe4de..cbef785765cf2ead72b64899a260a75bfca849d2 100644 (file)
@@ -38,78 +38,38 @@ qla2xxx_copy_queues(scsi_qla_host_t *ha, void *ptr)
 }
 
 static int
-qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
-    uint32_t cram_size, uint32_t *ext_mem, void **nxt)
+qla24xx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint32_t *ram,
+    uint32_t ram_dwords, void **nxt)
 {
        int rval;
-       uint32_t cnt, stat, timer, risc_address, ext_mem_cnt;
-       uint16_t mb[4];
+       uint32_t cnt, stat, timer, dwords, idx;
+       uint16_t mb0;
        struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+       dma_addr_t dump_dma = ha->gid_list_dma;
+       uint32_t *dump = (uint32_t *)ha->gid_list;
 
        rval = QLA_SUCCESS;
-       risc_address = ext_mem_cnt = 0;
-       memset(mb, 0, sizeof(mb));
+       mb0 = 0;
 
-       /* Code RAM. */
-       risc_address = 0x20000;
-       WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
+       WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
        clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
 
-       for (cnt = 0; cnt < cram_size / 4 && rval == QLA_SUCCESS;
-           cnt++, risc_address++) {
-               WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
-               WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
-               RD_REG_WORD(&reg->mailbox8);
-               WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
-
-               for (timer = 6000000; timer; timer--) {
-                       /* Check for pending interrupts. */
-                       stat = RD_REG_DWORD(&reg->host_status);
-                       if (stat & HSRX_RISC_INT) {
-                               stat &= 0xff;
+       dwords = GID_LIST_SIZE / 4;
+       for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
+           cnt += dwords, addr += dwords) {
+               if (cnt + dwords > ram_dwords)
+                       dwords = ram_dwords - cnt;
 
-                               if (stat == 0x1 || stat == 0x2 ||
-                                   stat == 0x10 || stat == 0x11) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
+               WRT_REG_WORD(&reg->mailbox1, LSW(addr));
+               WRT_REG_WORD(&reg->mailbox8, MSW(addr));
 
-                                       mb[0] = RD_REG_WORD(&reg->mailbox0);
-                                       mb[2] = RD_REG_WORD(&reg->mailbox2);
-                                       mb[3] = RD_REG_WORD(&reg->mailbox3);
+               WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
+               WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
+               WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
+               WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
 
-                                       WRT_REG_DWORD(&reg->hccr,
-                                           HCCRX_CLR_RISC_INT);
-                                       RD_REG_DWORD(&reg->hccr);
-                                       break;
-                               }
-
-                               /* Clear this intr; it wasn't a mailbox intr */
-                               WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
-                               RD_REG_DWORD(&reg->hccr);
-                       }
-                       udelay(5);
-               }
-
-               if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-                       rval = mb[0] & MBS_MASK;
-                       code_ram[cnt] = htonl((mb[3] << 16) | mb[2]);
-               } else {
-                       rval = QLA_FUNCTION_FAILED;
-               }
-       }
-
-       if (rval == QLA_SUCCESS) {
-               /* External Memory. */
-               risc_address = 0x100000;
-               ext_mem_cnt = ha->fw_memory_size - 0x100000 + 1;
-               WRT_REG_WORD(&reg->mailbox0, MBC_READ_RAM_EXTENDED);
-               clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-       }
-       for (cnt = 0; cnt < ext_mem_cnt && rval == QLA_SUCCESS;
-           cnt++, risc_address++) {
-               WRT_REG_WORD(&reg->mailbox1, LSW(risc_address));
-               WRT_REG_WORD(&reg->mailbox8, MSW(risc_address));
-               RD_REG_WORD(&reg->mailbox8);
+               WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
+               WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
                WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
 
                for (timer = 6000000; timer; timer--) {
@@ -123,9 +83,7 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
                                        set_bit(MBX_INTERRUPT,
                                            &ha->mbx_cmd_flags);
 
-                                       mb[0] = RD_REG_WORD(&reg->mailbox0);
-                                       mb[2] = RD_REG_WORD(&reg->mailbox2);
-                                       mb[3] = RD_REG_WORD(&reg->mailbox3);
+                                       mb0 = RD_REG_WORD(&reg->mailbox0);
 
                                        WRT_REG_DWORD(&reg->hccr,
                                            HCCRX_CLR_RISC_INT);
@@ -141,17 +99,34 @@ qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
                }
 
                if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-                       rval = mb[0] & MBS_MASK;
-                       ext_mem[cnt] = htonl((mb[3] << 16) | mb[2]);
+                       rval = mb0 & MBS_MASK;
+                       for (idx = 0; idx < dwords; idx++)
+                               ram[cnt + idx] = swab32(dump[idx]);
                } else {
                        rval = QLA_FUNCTION_FAILED;
                }
        }
 
-       *nxt = rval == QLA_SUCCESS ? &ext_mem[cnt]: NULL;
+       *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
        return rval;
 }
 
+static int
+qla24xx_dump_memory(scsi_qla_host_t *ha, uint32_t *code_ram,
+    uint32_t cram_size, void **nxt)
+{
+       int rval;
+
+       /* Code RAM. */
+       rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
+       if (rval != QLA_SUCCESS)
+               return rval;
+
+       /* External Memory. */
+       return qla24xx_dump_ram(ha, 0x100000, *nxt,
+           ha->fw_memory_size - 0x100000 + 1, nxt);
+}
+
 static uint32_t *
 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
     uint32_t count, uint32_t *buf)
@@ -239,6 +214,90 @@ qla24xx_soft_reset(scsi_qla_host_t *ha)
        return rval;
 }
 
+static int
+qla2xxx_dump_ram(scsi_qla_host_t *ha, uint32_t addr, uint16_t *ram,
+    uint16_t ram_words, void **nxt)
+{
+       int rval;
+       uint32_t cnt, stat, timer, words, idx;
+       uint16_t mb0;
+       struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+       dma_addr_t dump_dma = ha->gid_list_dma;
+       uint16_t *dump = (uint16_t *)ha->gid_list;
+
+       rval = QLA_SUCCESS;
+       mb0 = 0;
+
+       WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
+       clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+
+       words = GID_LIST_SIZE / 2;
+       for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
+           cnt += words, addr += words) {
+               if (cnt + words > ram_words)
+                       words = ram_words - cnt;
+
+               WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
+               WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
+
+               WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
+               WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
+               WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
+               WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
+
+               WRT_MAILBOX_REG(ha, reg, 4, words);
+               WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
+
+               for (timer = 6000000; timer; timer--) {
+                       /* Check for pending interrupts. */
+                       stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
+                       if (stat & HSR_RISC_INT) {
+                               stat &= 0xff;
+
+                               if (stat == 0x1 || stat == 0x2) {
+                                       set_bit(MBX_INTERRUPT,
+                                           &ha->mbx_cmd_flags);
+
+                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+                                       /* Release mailbox registers. */
+                                       WRT_REG_WORD(&reg->semaphore, 0);
+                                       WRT_REG_WORD(&reg->hccr,
+                                           HCCR_CLR_RISC_INT);
+                                       RD_REG_WORD(&reg->hccr);
+                                       break;
+                               } else if (stat == 0x10 || stat == 0x11) {
+                                       set_bit(MBX_INTERRUPT,
+                                           &ha->mbx_cmd_flags);
+
+                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
+
+                                       WRT_REG_WORD(&reg->hccr,
+                                           HCCR_CLR_RISC_INT);
+                                       RD_REG_WORD(&reg->hccr);
+                                       break;
+                               }
+
+                               /* clear this intr; it wasn't a mailbox intr */
+                               WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
+                               RD_REG_WORD(&reg->hccr);
+                       }
+                       udelay(5);
+               }
+
+               if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
+                       rval = mb0 & MBS_MASK;
+                       for (idx = 0; idx < words; idx++)
+                               ram[cnt + idx] = swab16(dump[idx]);
+               } else {
+                       rval = QLA_FUNCTION_FAILED;
+               }
+       }
+
+       *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
+       return rval;
+}
+
 static inline void
 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
     uint16_t *buf)
@@ -258,19 +317,14 @@ void
 qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
 {
        int             rval;
-       uint32_t        cnt, timer;
-       uint32_t        risc_address;
-       uint16_t        mb0, mb2;
+       uint32_t        cnt;
 
-       uint32_t        stat;
        struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
        uint16_t __iomem *dmp_reg;
        unsigned long   flags;
        struct qla2300_fw_dump  *fw;
-       uint32_t        data_ram_cnt;
+       void            *nxt;
 
-       risc_address = data_ram_cnt = 0;
-       mb0 = mb2 = 0;
        flags = 0;
 
        if (!hardware_locked)
@@ -388,185 +442,23 @@ qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
                }
        }
 
-       if (rval == QLA_SUCCESS) {
-               /* Get RISC SRAM. */
-               risc_address = 0x800;
-               WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
-               clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-       }
-       for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
-           cnt++, risc_address++) {
-               WRT_MAILBOX_REG(ha, reg, 1, (uint16_t)risc_address);
-               WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
-               for (timer = 6000000; timer; timer--) {
-                       /* Check for pending interrupts. */
-                       stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
-                       if (stat & HSR_RISC_INT) {
-                               stat &= 0xff;
-
-                               if (stat == 0x1 || stat == 0x2) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
-
-                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
-                                       mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
-                                       /* Release mailbox registers. */
-                                       WRT_REG_WORD(&reg->semaphore, 0);
-                                       WRT_REG_WORD(&reg->hccr,
-                                           HCCR_CLR_RISC_INT);
-                                       RD_REG_WORD(&reg->hccr);
-                                       break;
-                               } else if (stat == 0x10 || stat == 0x11) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
-
-                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
-                                       mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
-                                       WRT_REG_WORD(&reg->hccr,
-                                           HCCR_CLR_RISC_INT);
-                                       RD_REG_WORD(&reg->hccr);
-                                       break;
-                               }
-
-                               /* clear this intr; it wasn't a mailbox intr */
-                               WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
-                               RD_REG_WORD(&reg->hccr);
-                       }
-                       udelay(5);
-               }
-
-               if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-                       rval = mb0 & MBS_MASK;
-                       fw->risc_ram[cnt] = htons(mb2);
-               } else {
-                       rval = QLA_FUNCTION_FAILED;
-               }
-       }
-
-       if (rval == QLA_SUCCESS) {
-               /* Get stack SRAM. */
-               risc_address = 0x10000;
-               WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
-               clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-       }
-       for (cnt = 0; cnt < sizeof(fw->stack_ram) / 2 && rval == QLA_SUCCESS;
-           cnt++, risc_address++) {
-               WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
-               WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
-               WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
-               for (timer = 6000000; timer; timer--) {
-                       /* Check for pending interrupts. */
-                       stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
-                       if (stat & HSR_RISC_INT) {
-                               stat &= 0xff;
-
-                               if (stat == 0x1 || stat == 0x2) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
-
-                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
-                                       mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
-                                       /* Release mailbox registers. */
-                                       WRT_REG_WORD(&reg->semaphore, 0);
-                                       WRT_REG_WORD(&reg->hccr,
-                                           HCCR_CLR_RISC_INT);
-                                       RD_REG_WORD(&reg->hccr);
-                                       break;
-                               } else if (stat == 0x10 || stat == 0x11) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
-
-                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
-                                       mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
-                                       WRT_REG_WORD(&reg->hccr,
-                                           HCCR_CLR_RISC_INT);
-                                       RD_REG_WORD(&reg->hccr);
-                                       break;
-                               }
-
-                               /* clear this intr; it wasn't a mailbox intr */
-                               WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
-                               RD_REG_WORD(&reg->hccr);
-                       }
-                       udelay(5);
-               }
-
-               if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-                       rval = mb0 & MBS_MASK;
-                       fw->stack_ram[cnt] = htons(mb2);
-               } else {
-                       rval = QLA_FUNCTION_FAILED;
-               }
-       }
-
-       if (rval == QLA_SUCCESS) {
-               /* Get data SRAM. */
-               risc_address = 0x11000;
-               data_ram_cnt = ha->fw_memory_size - risc_address + 1;
-               WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
-               clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-       }
-       for (cnt = 0; cnt < data_ram_cnt && rval == QLA_SUCCESS;
-           cnt++, risc_address++) {
-               WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
-               WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
-               WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
-
-               for (timer = 6000000; timer; timer--) {
-                       /* Check for pending interrupts. */
-                       stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
-                       if (stat & HSR_RISC_INT) {
-                               stat &= 0xff;
-
-                               if (stat == 0x1 || stat == 0x2) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
-
-                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
-                                       mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
-                                       /* Release mailbox registers. */
-                                       WRT_REG_WORD(&reg->semaphore, 0);
-                                       WRT_REG_WORD(&reg->hccr,
-                                           HCCR_CLR_RISC_INT);
-                                       RD_REG_WORD(&reg->hccr);
-                                       break;
-                               } else if (stat == 0x10 || stat == 0x11) {
-                                       set_bit(MBX_INTERRUPT,
-                                           &ha->mbx_cmd_flags);
-
-                                       mb0 = RD_MAILBOX_REG(ha, reg, 0);
-                                       mb2 = RD_MAILBOX_REG(ha, reg, 2);
-
-                                       WRT_REG_WORD(&reg->hccr,
-                                           HCCR_CLR_RISC_INT);
-                                       RD_REG_WORD(&reg->hccr);
-                                       break;
-                               }
+       /* Get RISC SRAM. */
+       if (rval == QLA_SUCCESS)
+               rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
+                   sizeof(fw->risc_ram) / 2, &nxt);
 
-                               /* clear this intr; it wasn't a mailbox intr */
-                               WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
-                               RD_REG_WORD(&reg->hccr);
-                       }
-                       udelay(5);
-               }
+       /* Get stack SRAM. */
+       if (rval == QLA_SUCCESS)
+               rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
+                   sizeof(fw->stack_ram) / 2, &nxt);
 
-               if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
-                       rval = mb0 & MBS_MASK;
-                       fw->data_ram[cnt] = htons(mb2);
-               } else {
-                       rval = QLA_FUNCTION_FAILED;
-               }
-       }
+       /* Get data SRAM. */
+       if (rval == QLA_SUCCESS)
+               rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
+                   ha->fw_memory_size - 0x11000 + 1, &nxt);
 
        if (rval == QLA_SUCCESS)
-               qla2xxx_copy_queues(ha, &fw->data_ram[cnt]);
+               qla2xxx_copy_queues(ha, nxt);
 
        if (rval != QLA_SUCCESS) {
                qla_printk(KERN_WARNING, ha,
@@ -1010,7 +902,7 @@ qla24xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
                goto qla24xx_fw_dump_failed_0;
 
        rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
-           fw->ext_mem, &nxt);
+           &nxt);
        if (rval != QLA_SUCCESS)
                goto qla24xx_fw_dump_failed_0;
 
@@ -1318,7 +1210,7 @@ qla25xx_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
                goto qla25xx_fw_dump_failed_0;
 
        rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
-           fw->ext_mem, &nxt);
+           &nxt);
        if (rval != QLA_SUCCESS)
                goto qla25xx_fw_dump_failed_0;
 
index 078f2a15f40bcb3814b53b7cb4942db61d08922e..cf194517400d4d8f76f34dd29319a3045124e670 100644 (file)
@@ -1036,22 +1036,6 @@ struct mid_db_entry_24xx {
        uint8_t reserved_1;
 };
 
- /*
- * Virtual Fabric ID type definition.
- */
-typedef struct vf_id {
-        uint16_t id : 12;
-        uint16_t priority : 4;
-} vf_id_t;
-
-/*
- * Virtual Fabric HopCt type definition.
- */
-typedef struct vf_hopct {
-        uint16_t reserved : 8;
-        uint16_t hopct : 8;
-} vf_hopct_t;
-
 /*
  * Virtual Port Control IOCB
  */
@@ -1082,10 +1066,10 @@ struct vp_ctrl_entry_24xx {
 
        uint8_t vp_idx_map[16];
        uint16_t flags;
-       struct vf_id    id;
+       uint16_t id;
        uint16_t reserved_4;
-       struct vf_hopct  hopct;
-       uint8_t reserved_5[8];
+       uint16_t hopct;
+       uint8_t reserved_5[24];
 };
 
 /*
@@ -1132,9 +1116,9 @@ struct vp_config_entry_24xx {
        uint16_t reserved_vp2;
        uint8_t port_name_idx2[WWN_SIZE];
        uint8_t node_name_idx2[WWN_SIZE];
-       struct vf_id    id;
+       uint16_t id;
        uint16_t reserved_4;
-       struct vf_hopct  hopct;
+       uint16_t hopct;
        uint8_t reserved_5;
 };
 
index 76eb4fecce656109bcb0ca5464e8c0150d9496e8..f8827068d30fb006165b2daccfd86f7d69d2f134 100644 (file)
@@ -151,10 +151,6 @@ qla2x00_verify_checksum(scsi_qla_host_t *, uint32_t);
 extern int
 qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
 
-extern int
-qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t,
-    uint32_t);
-
 extern int
 qla2x00_abort_command(scsi_qla_host_t *, srb_t *);
 
index 750d7ef83aaefb54f3f4c79fb468fdb579c8fc1b..4cb80b476c85c69c65277d5de2709f863f13a958 100644 (file)
@@ -1583,8 +1583,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *ha)
        eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
        eiter->len = __constant_cpu_to_be16(4 + 4);
        max_frame_size = IS_FWI2_CAPABLE(ha) ?
-               (uint32_t) icb24->frame_payload_size:
-               (uint32_t) ha->init_cb->frame_payload_size;
+           le16_to_cpu(icb24->frame_payload_size):
+           le16_to_cpu(ha->init_cb->frame_payload_size);
        eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
        size += 4 + 4;
 
index 01e26087c1dd599036202bb862c608675960d3e6..bbbc5a632a1d22f2334761283bfa1db4254a5ff2 100644 (file)
@@ -3645,7 +3645,7 @@ qla24xx_nvram_config(scsi_qla_host_t *ha)
        if (le16_to_cpu(nv->login_timeout) < 4)
                nv->login_timeout = __constant_cpu_to_le16(4);
        ha->login_timeout = le16_to_cpu(nv->login_timeout);
-       icb->login_timeout = cpu_to_le16(nv->login_timeout);
+       icb->login_timeout = nv->login_timeout;
 
        /* Set minimum RATOV to 100 tenths of a second. */
        ha->r_a_tov = 100;
index 285479b62d8f02084ad0daebc12ab249f16348c0..5d9a64a7879bce722b4bb6b94ffce8d3facb7750 100644 (file)
@@ -409,6 +409,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                }
 
                set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+               set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
 
                ha->flags.management_server_logged_in = 0;
                qla2x00_post_aen_work(ha, FCH_EVT_LIP, mb[1]);
@@ -454,8 +455,6 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
 
                ha->flags.management_server_logged_in = 0;
                ha->link_data_rate = PORT_SPEED_UNKNOWN;
-               if (ql2xfdmienable)
-                       set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
                qla2x00_post_aen_work(ha, FCH_EVT_LINKDOWN, 0);
                break;
 
@@ -511,6 +510,7 @@ qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
                        set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
                }
                set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
+               set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
 
                ha->flags.gpsc_supported = 1;
                ha->flags.management_server_logged_in = 0;
index 7d0a8a4c77196ea302262dcfc2d61f0d6d8931b2..2100604208096f2b3e04d0cb1001614ffbcb7aa8 100644 (file)
@@ -681,7 +681,7 @@ qla2x00_verify_checksum(scsi_qla_host_t *ha, uint32_t risc_addr)
  * Context:
  *     Kernel context.
  */
-int
+static int
 qla2x00_issue_iocb_timeout(scsi_qla_host_t *ha, void *buffer,
     dma_addr_t phys_addr, size_t size, uint32_t tov)
 {
@@ -784,7 +784,6 @@ qla2x00_abort_command(scsi_qla_host_t *ha, srb_t *sp)
                DEBUG2_3_11(printk("qla2x00_abort_command(%ld): failed=%x.\n",
                    ha->host_no, rval));
        } else {
-               sp->flags |= SRB_ABORT_PENDING;
                DEBUG11(printk("qla2x00_abort_command(%ld): done.\n",
                    ha->host_no));
        }
@@ -1469,7 +1468,7 @@ qla24xx_login_fabric(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
        lg->port_id[0] = al_pa;
        lg->port_id[1] = area;
        lg->port_id[2] = domain;
-       lg->vp_index = cpu_to_le16(ha->vp_idx);
+       lg->vp_index = ha->vp_idx;
        rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
        if (rval != QLA_SUCCESS) {
                DEBUG2_3_11(printk("%s(%ld): failed to issue Login IOCB "
@@ -1724,7 +1723,7 @@ qla24xx_fabric_logout(scsi_qla_host_t *ha, uint16_t loop_id, uint8_t domain,
        lg->port_id[0] = al_pa;
        lg->port_id[1] = area;
        lg->port_id[2] = domain;
-       lg->vp_index = cpu_to_le16(ha->vp_idx);
+       lg->vp_index = ha->vp_idx;
        rval = qla2x00_issue_iocb(ha, lg, lg_dma, 0);
        if (rval != QLA_SUCCESS) {
                DEBUG2_3_11(printk("%s(%ld): failed to issue Logout IOCB "
@@ -2210,7 +2209,6 @@ qla24xx_abort_command(scsi_qla_host_t *ha, srb_t *sp)
                rval = QLA_FUNCTION_FAILED;
        } else {
                DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
-               sp->flags |= SRB_ABORT_PENDING;
        }
 
        dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2644,12 +2642,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
        struct vp_rpt_id_entry_24xx *rptid_entry)
 {
        uint8_t vp_idx;
+       uint16_t stat = le16_to_cpu(rptid_entry->vp_idx);
        scsi_qla_host_t *vha;
 
        if (rptid_entry->entry_status != 0)
                return;
-       if (rptid_entry->entry_status != __constant_cpu_to_le16(CS_COMPLETE))
-               return;
 
        if (rptid_entry->format == 0) {
                DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
@@ -2659,17 +2656,17 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *ha,
                        rptid_entry->port_id[2], rptid_entry->port_id[1],
                        rptid_entry->port_id[0]));
        } else if (rptid_entry->format == 1) {
-               vp_idx = LSB(rptid_entry->vp_idx);
+               vp_idx = LSB(stat);
                DEBUG15(printk("%s:format 1: scsi(%ld): VP[%d] enabled "
                    "- status %d - "
                    "with port id %02x%02x%02x\n",__func__,ha->host_no,
-                   vp_idx, MSB(rptid_entry->vp_idx),
+                   vp_idx, MSB(stat),
                    rptid_entry->port_id[2], rptid_entry->port_id[1],
                    rptid_entry->port_id[0]));
                if (vp_idx == 0)
                        return;
 
-               if (MSB(rptid_entry->vp_idx) == 1)
+               if (MSB(stat) == 1)
                        return;
 
                list_for_each_entry(vha, &ha->vp_list, vp_list)
@@ -2982,8 +2979,8 @@ qla84xx_verify_chip(struct scsi_qla_host *ha, uint16_t *status)
        /* We update the firmware with only one data sequence. */
        options |= VCO_END_OF_DATA;
 
-       retry = 0;
        do {
+               retry = 0;
                memset(mn, 0, sizeof(*mn));
                mn->p.req.entry_type = VERIFY_CHIP_IOCB_TYPE;
                mn->p.req.entry_count = 1;
index 8b33b163b1d427717ccea65b5e20f1822438e632..3223fd16bcfe3a72829662fc47998ec3a0e081f5 100644 (file)
@@ -67,7 +67,7 @@ static void qla2x00_free_device(scsi_qla_host_t *);
 
 static void qla2x00_config_dma_addressing(scsi_qla_host_t *ha);
 
-int ql2xfdmienable;
+int ql2xfdmienable=1;
 module_param(ql2xfdmienable, int, S_IRUGO|S_IRUSR);
 MODULE_PARM_DESC(ql2xfdmienable,
                "Enables FDMI registratons "
@@ -2135,7 +2135,7 @@ qla2x00_mem_free(scsi_qla_host_t *ha)
        kfree(ha->nvram);
 }
 
-struct qla_work_evt *
+static struct qla_work_evt *
 qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
     int locked)
 {
@@ -2152,7 +2152,7 @@ qla2x00_alloc_work(struct scsi_qla_host *ha, enum qla_work_type type,
        return e;
 }
 
-int
+static int
 qla2x00_post_work(struct scsi_qla_host *ha, struct qla_work_evt *e, int locked)
 {
        unsigned long flags;
@@ -2373,7 +2373,7 @@ qla2x00_do_dpc(void *data)
                                        } else {
                                                fcport->login_retry = 0;
                                        }
-                                       if (fcport->login_retry == 0)
+                                       if (fcport->login_retry == 0 && status != QLA_SUCCESS)
                                                fcport->loop_id = FC_NO_LOOP_ID;
                                }
                                if (test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))
@@ -2599,6 +2599,10 @@ qla2x00_timer(scsi_qla_host_t *ha)
                start_dpc++;
        }
 
+       /* Process any deferred work. */
+       if (!list_empty(&ha->work_list))
+               start_dpc++;
+
        /* Schedule the DPC routine if needed */
        if ((test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
            test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags) ||
index f42f17acf2cf21b7b5d9b62e5c0b18e7f4bc8040..afeae2bfe7eb090f337d114bf2ed52e3e2bff9af 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.02.01-k1"
+#define QLA2XXX_VERSION      "8.02.01-k2"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   2
index 07103c399fe0c2e7ce840fcdcb7d995bd277ca73..f6600bfb5bde71ffc244260ec6aa1aec4cf59469 100644 (file)
@@ -1773,7 +1773,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp)
        if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts)
                printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n",
                       sdp->host->host_no, sdp->channel, sdp->id, sdp->lun);
-       set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags);
+       queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue);
        return 0;
 }
 
index b8de041bc0ae8a2e894aeb723e67b98b9afe09a1..a235802f298150e648eb5bdf3f054dae4b1721c8 100644 (file)
@@ -449,37 +449,40 @@ int scsi_get_device_flags(struct scsi_device *sdev,
 }
 
 #ifdef CONFIG_SCSI_PROC_FS
-/* 
- * proc_scsi_dev_info_read: dump the scsi_dev_info_list via
- * /proc/scsi/device_info
- */
-static int proc_scsi_devinfo_read(char *buffer, char **start,
-                                 off_t offset, int length)
+static int devinfo_seq_show(struct seq_file *m, void *v)
 {
-       struct scsi_dev_info_list *devinfo;
-       int size, len = 0;
-       off_t begin = 0;
-       off_t pos = 0;
+       struct scsi_dev_info_list *devinfo =
+               list_entry(v, struct scsi_dev_info_list, dev_info_list);
 
-       list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) {
-               size = sprintf(buffer + len, "'%.8s' '%.16s' 0x%x\n",
+       seq_printf(m, "'%.8s' '%.16s' 0x%x\n",
                        devinfo->vendor, devinfo->model, devinfo->flags);
-               len += size;
-               pos = begin + len;
-               if (pos < offset) {
-                       len = 0;
-                       begin = pos;
-               }
-               if (pos > offset + length)
-                       goto stop_output;
-       }
+       return 0;
+}
+
+static void * devinfo_seq_start(struct seq_file *m, loff_t *pos)
+{
+       return seq_list_start(&scsi_dev_info_list, *pos);
+}
 
-stop_output:
-       *start = buffer + (offset - begin);     /* Start of wanted data */
-       len -= (offset - begin);        /* Start slop */
-       if (len > length)
-               len = length;   /* Ending slop */
-       return (len);
+static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       return seq_list_next(v, &scsi_dev_info_list, pos);
+}
+
+static void devinfo_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations scsi_devinfo_seq_ops = {
+       .start  = devinfo_seq_start,
+       .next   = devinfo_seq_next,
+       .stop   = devinfo_seq_stop,
+       .show   = devinfo_seq_show,
+};
+
+static int proc_scsi_devinfo_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &scsi_devinfo_seq_ops);
 }
 
 /* 
@@ -489,11 +492,12 @@ stop_output:
  * integer value of flag to the scsi device info list.
  * To use, echo "vendor:model:flag" > /proc/scsi/device_info
  */
-static int proc_scsi_devinfo_write(struct file *file, const char __user *buf,
-                                  unsigned long length, void *data)
+static ssize_t proc_scsi_devinfo_write(struct file *file,
+                                      const char __user *buf,
+                                      size_t length, loff_t *ppos)
 {
        char *buffer;
-       int err = length;
+       ssize_t err = length;
 
        if (!buf || length>PAGE_SIZE)
                return -EINVAL;
@@ -517,6 +521,15 @@ out:
        free_page((unsigned long)buffer);
        return err;
 }
+
+static const struct file_operations scsi_devinfo_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = proc_scsi_devinfo_open,
+       .read           = seq_read,
+       .write          = proc_scsi_devinfo_write,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
 #endif /* CONFIG_SCSI_PROC_FS */
 
 module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0);
@@ -577,15 +590,13 @@ int __init scsi_init_devinfo(void)
        }
 
 #ifdef CONFIG_SCSI_PROC_FS
-       p = create_proc_entry("scsi/device_info", 0, NULL);
+       p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops);
        if (!p) {
                error = -ENOMEM;
                goto out;
        }
 
        p->owner = THIS_MODULE;
-       p->get_info = proc_scsi_devinfo_read;
-       p->write_proc = proc_scsi_devinfo_write;
 #endif /* CONFIG_SCSI_PROC_FS */
 
  out:
index 221f31e36d26e5c4f7bfe88b7abde0b2996c982e..1eaba6cd80f411735b390a984a3f61837e377432 100644 (file)
@@ -1771,6 +1771,7 @@ scsi_reset_provider(struct scsi_device *dev, int flag)
        unsigned long flags;
        int rtn;
 
+       blk_rq_init(NULL, &req);
        scmd->request = &req;
        memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout));
 
index 67f412bb4974b91783009fb3404c4a589bb6f6c8..d545ad1cf47a7f94c70369da8c1b6fb4dab0fe9d 100644 (file)
@@ -536,6 +536,9 @@ static void scsi_run_queue(struct request_queue *q)
               !shost->host_blocked && !shost->host_self_blocked &&
                !((shost->can_queue > 0) &&
                  (shost->host_busy >= shost->can_queue))) {
+
+               int flagset;
+
                /*
                 * As long as shost is accepting commands and we have
                 * starved queues, call blk_run_queue. scsi_request_fn
@@ -549,19 +552,20 @@ static void scsi_run_queue(struct request_queue *q)
                sdev = list_entry(shost->starved_list.next,
                                          struct scsi_device, starved_entry);
                list_del_init(&sdev->starved_entry);
-               spin_unlock_irqrestore(shost->host_lock, flags);
-
+               spin_unlock(shost->host_lock);
+
+               spin_lock(sdev->request_queue->queue_lock);
+               flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
+                               !test_bit(QUEUE_FLAG_REENTER,
+                                       &sdev->request_queue->queue_flags);
+               if (flagset)
+                       queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
+               __blk_run_queue(sdev->request_queue);
+               if (flagset)
+                       queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
+               spin_unlock(sdev->request_queue->queue_lock);
 
-               if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
-                   !test_and_set_bit(QUEUE_FLAG_REENTER,
-                                     &sdev->request_queue->queue_flags)) {
-                       blk_run_queue(sdev->request_queue);
-                       clear_bit(QUEUE_FLAG_REENTER,
-                                 &sdev->request_queue->queue_flags);
-               } else
-                       blk_run_queue(sdev->request_queue);
-
-               spin_lock_irqsave(shost->host_lock, flags);
+               spin_lock(shost->host_lock);
                if (unlikely(!list_empty(&sdev->starved_entry)))
                        /*
                         * sdev lost a race, and was put back on the
@@ -1585,8 +1589,9 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
 
        blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
 
+       /* New queue, no concurrency on queue_flags */
        if (!shost->use_clustering)
-               clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
+               queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
 
        /*
         * set a reasonable default alignment on word boundaries: the
index 3f34e9376b0aa148c8dafc58ee0c52c65849b1e1..b33e72516ef83ee685a5186e0894b0585faba4fd 100644 (file)
@@ -121,6 +121,7 @@ extern struct scsi_transport_template blank_transport_template;
 extern void __scsi_remove_device(struct scsi_device *);
 
 extern struct bus_type scsi_bus_type;
+extern struct attribute_group *scsi_sysfs_shost_attr_groups[];
 
 /* scsi_netlink.c */
 #ifdef CONFIG_SCSI_NETLINK
index ed395154a5b189e4043c271da8fc9042178a043d..e4a0d2f9b35752a47c2e84b7e99dafa67fce04f7 100644 (file)
@@ -190,10 +190,14 @@ void scsi_proc_host_rm(struct Scsi_Host *shost)
  */
 static int proc_print_scsidevice(struct device *dev, void *data)
 {
-       struct scsi_device *sdev = to_scsi_device(dev);
+       struct scsi_device *sdev;
        struct seq_file *s = data;
        int i;
 
+       if (!scsi_is_sdev_device(dev))
+               goto out;
+
+       sdev = to_scsi_device(dev);
        seq_printf(s,
                "Host: scsi%d Channel: %02d Id: %02d Lun: %02d\n  Vendor: ",
                sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
@@ -230,6 +234,7 @@ static int proc_print_scsidevice(struct device *dev, void *data)
        else
                seq_printf(s, "\n");
 
+out:
        return 0;
 }
 
@@ -408,6 +413,7 @@ static int proc_scsi_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_scsi_operations = {
+       .owner          = THIS_MODULE,
        .open           = proc_scsi_open,
        .read           = seq_read,
        .write          = proc_scsi_write,
@@ -426,10 +432,9 @@ int __init scsi_init_procfs(void)
        if (!proc_scsi)
                goto err1;
 
-       pde = create_proc_entry("scsi/scsi", 0, NULL);
+       pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations);
        if (!pde)
                goto err2;
-       pde->proc_fops = &proc_scsi_operations;
 
        return 0;
 
index e67c14e31babba5913f0ab1852f7952e191502bb..a00eee6f7be9fc85d85f3eb4f33516aa22d8689b 100644 (file)
@@ -322,6 +322,21 @@ out:
        return NULL;
 }
 
+static void scsi_target_destroy(struct scsi_target *starget)
+{
+       struct device *dev = &starget->dev;
+       struct Scsi_Host *shost = dev_to_shost(dev->parent);
+       unsigned long flags;
+
+       transport_destroy_device(dev);
+       spin_lock_irqsave(shost->host_lock, flags);
+       if (shost->hostt->target_destroy)
+               shost->hostt->target_destroy(starget);
+       list_del_init(&starget->siblings);
+       spin_unlock_irqrestore(shost->host_lock, flags);
+       put_device(dev);
+}
+
 static void scsi_target_dev_release(struct device *dev)
 {
        struct device *parent = dev->parent;
@@ -331,9 +346,14 @@ static void scsi_target_dev_release(struct device *dev)
        put_device(parent);
 }
 
+struct device_type scsi_target_type = {
+       .name =         "scsi_target",
+       .release =      scsi_target_dev_release,
+};
+
 int scsi_is_target_device(const struct device *dev)
 {
-       return dev->release == scsi_target_dev_release;
+       return dev->type == &scsi_target_type;
 }
 EXPORT_SYMBOL(scsi_is_target_device);
 
@@ -391,14 +411,17 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
        device_initialize(dev);
        starget->reap_ref = 1;
        dev->parent = get_device(parent);
-       dev->release = scsi_target_dev_release;
        sprintf(dev->bus_id, "target%d:%d:%d",
                shost->host_no, channel, id);
+#ifndef CONFIG_SYSFS_DEPRECATED
+       dev->bus = &scsi_bus_type;
+#endif
+       dev->type = &scsi_target_type;
        starget->id = id;
        starget->channel = channel;
        INIT_LIST_HEAD(&starget->siblings);
        INIT_LIST_HEAD(&starget->devices);
-       starget->state = STARGET_RUNNING;
+       starget->state = STARGET_CREATED;
        starget->scsi_level = SCSI_2;
  retry:
        spin_lock_irqsave(shost->host_lock, flags);
@@ -411,18 +434,6 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
        spin_unlock_irqrestore(shost->host_lock, flags);
        /* allocate and add */
        transport_setup_device(dev);
-       error = device_add(dev);
-       if (error) {
-               dev_err(dev, "target device_add failed, error %d\n", error);
-               spin_lock_irqsave(shost->host_lock, flags);
-               list_del_init(&starget->siblings);
-               spin_unlock_irqrestore(shost->host_lock, flags);
-               transport_destroy_device(dev);
-               put_device(parent);
-               kfree(starget);
-               return NULL;
-       }
-       transport_add_device(dev);
        if (shost->hostt->target_alloc) {
                error = shost->hostt->target_alloc(starget);
 
@@ -430,9 +441,7 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
                        dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
                        /* don't want scsi_target_reap to do the final
                         * put because it will be under the host lock */
-                       get_device(dev);
-                       scsi_target_reap(starget);
-                       put_device(dev);
+                       scsi_target_destroy(starget);
                        return NULL;
                }
        }
@@ -459,18 +468,10 @@ static void scsi_target_reap_usercontext(struct work_struct *work)
 {
        struct scsi_target *starget =
                container_of(work, struct scsi_target, ew.work);
-       struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
-       unsigned long flags;
 
        transport_remove_device(&starget->dev);
        device_del(&starget->dev);
-       transport_destroy_device(&starget->dev);
-       spin_lock_irqsave(shost->host_lock, flags);
-       if (shost->hostt->target_destroy)
-               shost->hostt->target_destroy(starget);
-       list_del_init(&starget->siblings);
-       spin_unlock_irqrestore(shost->host_lock, flags);
-       put_device(&starget->dev);
+       scsi_target_destroy(starget);
 }
 
 /**
@@ -485,21 +486,25 @@ void scsi_target_reap(struct scsi_target *starget)
 {
        struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
        unsigned long flags;
+       enum scsi_target_state state;
+       int empty;
 
        spin_lock_irqsave(shost->host_lock, flags);
+       state = starget->state;
+       empty = --starget->reap_ref == 0 &&
+               list_empty(&starget->devices) ? 1 : 0;
+       spin_unlock_irqrestore(shost->host_lock, flags);
 
-       if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
-               BUG_ON(starget->state == STARGET_DEL);
-               starget->state = STARGET_DEL;
-               spin_unlock_irqrestore(shost->host_lock, flags);
-               execute_in_process_context(scsi_target_reap_usercontext,
-                                          &starget->ew);
+       if (!empty)
                return;
 
-       }
-       spin_unlock_irqrestore(shost->host_lock, flags);
-
-       return;
+       BUG_ON(state == STARGET_DEL);
+       starget->state = STARGET_DEL;
+       if (state == STARGET_CREATED)
+               scsi_target_destroy(starget);
+       else
+               execute_in_process_context(scsi_target_reap_usercontext,
+                                          &starget->ew);
 }
 
 /**
@@ -1048,8 +1053,9 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
                                        scsi_inq_str(vend, result, 8, 16),
                                        scsi_inq_str(mod, result, 16, 32));
                        });
+
                }
-               
+
                res = SCSI_SCAN_TARGET_PRESENT;
                goto out_free_result;
        }
@@ -1489,7 +1495,6 @@ struct scsi_device *__scsi_add_device(struct Scsi_Host *shost, uint channel,
        if (scsi_host_scan_allowed(shost))
                scsi_probe_and_add_lun(starget, lun, NULL, &sdev, 1, hostdata);
        mutex_unlock(&shost->scan_mutex);
-       transport_configure_device(&starget->dev);
        scsi_target_reap(starget);
        put_device(&starget->dev);
 
@@ -1570,7 +1575,6 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
  out_reap:
        /* now determine if the target has any children at all
         * and if not, nuke it */
-       transport_configure_device(&starget->dev);
        scsi_target_reap(starget);
 
        put_device(&starget->dev);
@@ -1824,7 +1828,7 @@ void scsi_scan_host(struct Scsi_Host *shost)
        }
 
        p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
-       if (unlikely(IS_ERR(p)))
+       if (IS_ERR(p))
                do_scan_async(data);
 }
 EXPORT_SYMBOL(scsi_scan_host);
index 67bb20ed45d23ef431280a2c63a03e14ea252731..049103f1d16ffd320ca51a5544aca1966dc71585 100644 (file)
@@ -21,6 +21,8 @@
 #include "scsi_priv.h"
 #include "scsi_logging.h"
 
+static struct device_type scsi_dev_type;
+
 static const struct {
        enum scsi_device_state  value;
        char                    *name;
@@ -249,18 +251,27 @@ shost_rd_attr(sg_tablesize, "%hu\n");
 shost_rd_attr(unchecked_isa_dma, "%d\n");
 shost_rd_attr2(proc_name, hostt->proc_name, "%s\n");
 
-static struct device_attribute *scsi_sysfs_shost_attrs[] = {
-       &dev_attr_unique_id,
-       &dev_attr_host_busy,
-       &dev_attr_cmd_per_lun,
-       &dev_attr_can_queue,
-       &dev_attr_sg_tablesize,
-       &dev_attr_unchecked_isa_dma,
-       &dev_attr_proc_name,
-       &dev_attr_scan,
-       &dev_attr_hstate,
-       &dev_attr_supported_mode,
-       &dev_attr_active_mode,
+static struct attribute *scsi_sysfs_shost_attrs[] = {
+       &dev_attr_unique_id.attr,
+       &dev_attr_host_busy.attr,
+       &dev_attr_cmd_per_lun.attr,
+       &dev_attr_can_queue.attr,
+       &dev_attr_sg_tablesize.attr,
+       &dev_attr_unchecked_isa_dma.attr,
+       &dev_attr_proc_name.attr,
+       &dev_attr_scan.attr,
+       &dev_attr_hstate.attr,
+       &dev_attr_supported_mode.attr,
+       &dev_attr_active_mode.attr,
+       NULL
+};
+
+struct attribute_group scsi_shost_attr_group = {
+       .attrs =        scsi_sysfs_shost_attrs,
+};
+
+struct attribute_group *scsi_sysfs_shost_attr_groups[] = {
+       &scsi_shost_attr_group,
        NULL
 };
 
@@ -335,7 +346,12 @@ static struct class sdev_class = {
 /* all probing is done in the individual ->probe routines */
 static int scsi_bus_match(struct device *dev, struct device_driver *gendrv)
 {
-       struct scsi_device *sdp = to_scsi_device(dev);
+       struct scsi_device *sdp;
+
+       if (dev->type != &scsi_dev_type)
+               return 0;
+
+       sdp = to_scsi_device(dev);
        if (sdp->no_uld_attach)
                return 0;
        return (sdp->inq_periph_qual == SCSI_INQ_PQ_CON)? 1: 0;
@@ -351,10 +367,16 @@ static int scsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
 
 static int scsi_bus_suspend(struct device * dev, pm_message_t state)
 {
-       struct device_driver *drv = dev->driver;
-       struct scsi_device *sdev = to_scsi_device(dev);
+       struct device_driver *drv;
+       struct scsi_device *sdev;
        int err;
 
+       if (dev->type != &scsi_dev_type)
+               return 0;
+
+       drv = dev->driver;
+       sdev = to_scsi_device(dev);
+
        err = scsi_device_quiesce(sdev);
        if (err)
                return err;
@@ -370,10 +392,16 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
 
 static int scsi_bus_resume(struct device * dev)
 {
-       struct device_driver *drv = dev->driver;
-       struct scsi_device *sdev = to_scsi_device(dev);
+       struct device_driver *drv;
+       struct scsi_device *sdev;
        int err = 0;
 
+       if (dev->type != &scsi_dev_type)
+               return 0;
+
+       drv = dev->driver;
+       sdev = to_scsi_device(dev);
+
        if (drv && drv->resume)
                err = drv->resume(dev);
 
@@ -781,6 +809,27 @@ sdev_store_queue_type_rw(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
+static int scsi_target_add(struct scsi_target *starget)
+{
+       int error;
+
+       if (starget->state != STARGET_CREATED)
+               return 0;
+
+       error = device_add(&starget->dev);
+       if (error) {
+               dev_err(&starget->dev, "target device_add failed, error %d\n", error);
+               get_device(&starget->dev);
+               scsi_target_reap(starget);
+               put_device(&starget->dev);
+               return error;
+       }
+       transport_add_device(&starget->dev);
+       starget->state = STARGET_RUNNING;
+
+       return 0;
+}
+
 static struct device_attribute sdev_attr_queue_type_rw =
        __ATTR(queue_type, S_IRUGO | S_IWUSR, show_queue_type_field,
               sdev_store_queue_type_rw);
@@ -796,10 +845,16 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
 {
        int error, i;
        struct request_queue *rq = sdev->request_queue;
+       struct scsi_target *starget = sdev->sdev_target;
 
        if ((error = scsi_device_set_state(sdev, SDEV_RUNNING)) != 0)
                return error;
 
+       error = scsi_target_add(starget);
+       if (error)
+               return error;
+
+       transport_configure_device(&starget->dev);
        error = device_add(&sdev->sdev_gendev);
        if (error) {
                put_device(sdev->sdev_gendev.parent);
@@ -834,7 +889,7 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
                goto out;
        }
 
-       error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL);
+       error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
 
        if (error)
                sdev_printk(KERN_INFO, sdev,
@@ -971,44 +1026,6 @@ int scsi_register_interface(struct class_interface *intf)
 }
 EXPORT_SYMBOL(scsi_register_interface);
 
-
-static struct device_attribute *class_attr_overridden(
-               struct device_attribute **attrs,
-               struct device_attribute *attr)
-{
-       int i;
-
-       if (!attrs)
-               return NULL;
-       for (i = 0; attrs[i]; i++)
-               if (!strcmp(attrs[i]->attr.name, attr->attr.name))
-                       return attrs[i];
-       return NULL;
-}
-
-static int class_attr_add(struct device *classdev,
-               struct device_attribute *attr)
-{
-       struct device_attribute *base_attr;
-
-       /*
-        * Spare the caller from having to copy things it's not interested in.
-        */
-       base_attr = class_attr_overridden(scsi_sysfs_shost_attrs, attr);
-       if (base_attr) {
-               /* extend permissions */
-               attr->attr.mode |= base_attr->attr.mode;
-
-               /* override null show/store with default */
-               if (!attr->show)
-                       attr->show = base_attr->show;
-               if (!attr->store)
-                       attr->store = base_attr->store;
-       }
-
-       return device_create_file(classdev, attr);
-}
-
 /**
  * scsi_sysfs_add_host - add scsi host to subsystem
  * @shost:     scsi host struct to add to subsystem
@@ -1018,20 +1035,11 @@ int scsi_sysfs_add_host(struct Scsi_Host *shost)
 {
        int error, i;
 
+       /* add host specific attributes */
        if (shost->hostt->shost_attrs) {
                for (i = 0; shost->hostt->shost_attrs[i]; i++) {
-                       error = class_attr_add(&shost->shost_dev,
-                                       shost->hostt->shost_attrs[i]);
-                       if (error)
-                               return error;
-               }
-       }
-
-       for (i = 0; scsi_sysfs_shost_attrs[i]; i++) {
-               if (!class_attr_overridden(shost->hostt->shost_attrs,
-                                       scsi_sysfs_shost_attrs[i])) {
                        error = device_create_file(&shost->shost_dev,
-                                       scsi_sysfs_shost_attrs[i]);
+                                       shost->hostt->shost_attrs[i]);
                        if (error)
                                return error;
                }
index 6b092a6c295d52d0b63b7d5659b880c63c07ed7a..5fd64e70029dcd30d4c30c8c9dffd5a0f77648a6 100644 (file)
@@ -1961,12 +1961,17 @@ fc_timed_out(struct scsi_cmnd *scmd)
 }
 
 /*
- * Must be called with shost->host_lock held
+ * Called by fc_user_scan to locate an rport on the shost that
+ * matches the channel and target id, and invoke scsi_scan_target()
+ * on the rport.
  */
-static int fc_user_scan(struct Scsi_Host *shost, uint channel,
-               uint id, uint lun)
+static void
+fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, uint lun)
 {
        struct fc_rport *rport;
+       unsigned long flags;
+
+       spin_lock_irqsave(shost->host_lock, flags);
 
        list_for_each_entry(rport, &fc_host_rports(shost), peers) {
                if (rport->scsi_target_id == -1)
@@ -1975,13 +1980,54 @@ static int fc_user_scan(struct Scsi_Host *shost, uint channel,
                if (rport->port_state != FC_PORTSTATE_ONLINE)
                        continue;
 
-               if ((channel == SCAN_WILD_CARD || channel == rport->channel) &&
-                   (id == SCAN_WILD_CARD || id == rport->scsi_target_id)) {
-                       scsi_scan_target(&rport->dev, rport->channel,
-                                        rport->scsi_target_id, lun, 1);
+               if ((channel == rport->channel) &&
+                   (id == rport->scsi_target_id)) {
+                       spin_unlock_irqrestore(shost->host_lock, flags);
+                       scsi_scan_target(&rport->dev, channel, id, lun, 1);
+                       return;
                }
        }
 
+       spin_unlock_irqrestore(shost->host_lock, flags);
+}
+
+/*
+ * Called via sysfs scan routines. Necessary, as the FC transport
+ * wants to place all target objects below the rport object. So this
+ * routine must invoke the scsi_scan_target() routine with the rport
+ * object as the parent.
+ */
+static int
+fc_user_scan(struct Scsi_Host *shost, uint channel, uint id, uint lun)
+{
+       uint chlo, chhi;
+       uint tgtlo, tgthi;
+
+       if (((channel != SCAN_WILD_CARD) && (channel > shost->max_channel)) ||
+           ((id != SCAN_WILD_CARD) && (id >= shost->max_id)) ||
+           ((lun != SCAN_WILD_CARD) && (lun > shost->max_lun)))
+               return -EINVAL;
+
+       if (channel == SCAN_WILD_CARD) {
+               chlo = 0;
+               chhi = shost->max_channel + 1;
+       } else {
+               chlo = channel;
+               chhi = channel + 1;
+       }
+
+       if (id == SCAN_WILD_CARD) {
+               tgtlo = 0;
+               tgthi = shost->max_id;
+       } else {
+               tgtlo = id;
+               tgthi = id + 1;
+       }
+
+       for ( ; chlo < chhi; chlo++)
+               for ( ; tgtlo < tgthi; tgtlo++)
+                       fc_user_scan_tgt(shost, chlo, tgtlo, lun);
+
        return 0;
 }
 
index 27ec625ab7719a2cdf59931ed1ecb6ce77431de5..f4461d35ffb9157d6cc3e634f692ef0937b5a9c5 100644 (file)
@@ -192,6 +192,16 @@ static void sas_non_host_smp_request(struct request_queue *q)
        sas_smp_request(q, rphy_to_shost(rphy), rphy);
 }
 
+static void sas_host_release(struct device *dev)
+{
+       struct Scsi_Host *shost = dev_to_shost(dev);
+       struct sas_host_attrs *sas_host = to_sas_host_attrs(shost);
+       struct request_queue *q = sas_host->q;
+
+       if (q)
+               blk_cleanup_queue(q);
+}
+
 static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
 {
        struct request_queue *q;
@@ -199,6 +209,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
        struct device *dev;
        char namebuf[BUS_ID_SIZE];
        const char *name;
+       void (*release)(struct device *);
 
        if (!to_sas_internal(shost->transportt)->f->smp_handler) {
                printk("%s can't handle SMP requests\n", shost->hostt->name);
@@ -209,17 +220,19 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
                q = blk_init_queue(sas_non_host_smp_request, NULL);
                dev = &rphy->dev;
                name = dev->bus_id;
+               release = NULL;
        } else {
                q = blk_init_queue(sas_host_smp_request, NULL);
                dev = &shost->shost_gendev;
                snprintf(namebuf, sizeof(namebuf),
                         "sas_host%d", shost->host_no);
                name = namebuf;
+               release = sas_host_release;
        }
        if (!q)
                return -ENOMEM;
 
-       error = bsg_register_queue(q, dev, name);
+       error = bsg_register_queue(q, dev, name, release);
        if (error) {
                blk_cleanup_queue(q);
                return -ENOMEM;
@@ -235,8 +248,7 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy)
        else
                q->queuedata = shost;
 
-       set_bit(QUEUE_FLAG_BIDI, &q->queue_flags);
-
+       queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
        return 0;
 }
 
@@ -253,7 +265,6 @@ static void sas_bsg_remove(struct Scsi_Host *shost, struct sas_rphy *rphy)
                return;
 
        bsg_unregister_queue(q);
-       blk_cleanup_queue(q);
 }
 
 /*
@@ -1301,6 +1312,9 @@ static void sas_expander_release(struct device *dev)
        struct sas_rphy *rphy = dev_to_rphy(dev);
        struct sas_expander_device *edev = rphy_to_expander_device(rphy);
 
+       if (rphy->q)
+               blk_cleanup_queue(rphy->q);
+
        put_device(dev->parent);
        kfree(edev);
 }
@@ -1310,6 +1324,9 @@ static void sas_end_device_release(struct device *dev)
        struct sas_rphy *rphy = dev_to_rphy(dev);
        struct sas_end_device *edev = rphy_to_end_device(rphy);
 
+       if (rphy->q)
+               blk_cleanup_queue(rphy->q);
+
        put_device(dev->parent);
        kfree(edev);
 }
index bc12b5d5d676df7ab733fd23ea0ef8e929241363..75a64a6cae8ca873d70b259e61384b4763eed6ef 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/workqueue.h>
 #include <linux/blkdev.h>
 #include <linux/mutex.h>
+#include <linux/sysfs.h>
 #include <scsi/scsi.h>
 #include "scsi_priv.h"
 #include <scsi/scsi_device.h>
@@ -1374,11 +1375,11 @@ static int spi_host_configure(struct transport_container *tc,
  * overloads the return by setting 1<<1 if the attribute should
  * be writeable */
 #define TARGET_ATTRIBUTE_HELPER(name) \
-       (si->f->show_##name ? 1 : 0) + \
-       (si->f->set_##name ? 2 : 0)
+       (si->f->show_##name ? S_IRUGO : 0) | \
+       (si->f->set_##name ? S_IWUSR : 0)
 
-static int target_attribute_is_visible(struct kobject *kobj,
-                                      struct attribute *attr, int i)
+static mode_t target_attribute_is_visible(struct kobject *kobj,
+                                         struct attribute *attr, int i)
 {
        struct device *cdev = container_of(kobj, struct device, kobj);
        struct scsi_target *starget = transport_class_to_starget(cdev);
@@ -1428,7 +1429,7 @@ static int target_attribute_is_visible(struct kobject *kobj,
                 spi_support_ius(starget))
                return TARGET_ATTRIBUTE_HELPER(hold_mcs);
        else if (attr == &dev_attr_revalidate.attr)
-               return 1;
+               return S_IWUSR;
 
        return 0;
 }
@@ -1462,25 +1463,9 @@ static int spi_target_configure(struct transport_container *tc,
                                struct device *cdev)
 {
        struct kobject *kobj = &cdev->kobj;
-       int i;
-       struct attribute *attr;
-       int rc;
-
-       for (i = 0; (attr = target_attributes[i]) != NULL; i++) {
-               int j = target_attribute_group.is_visible(kobj, attr, i);
-
-               /* FIXME: as well as returning -EEXIST, which we'd like
-                * to ignore, sysfs also does a WARN_ON and dumps a trace,
-                * which is bad, so temporarily, skip attributes that are
-                * already visible (the revalidate one) */
-               if (j && attr != &dev_attr_revalidate.attr)
-                       rc = sysfs_add_file_to_group(kobj, attr,
-                                               target_attribute_group.name);
-               /* and make the attribute writeable if we have a set
-                * function */
-               if ((j & 1))
-                       rc = sysfs_chmod_file(kobj, attr, attr->mode | S_IWUSR);
-       }
+
+       /* force an update based on parameters read from the device */
+       sysfs_update_group(kobj, &target_attribute_group);
 
        return 0;
 }
index 3cea17dd5dbac5b80619e9ee7a34732401ee586d..01cefbb2d5396718f61ed49e188c05b1e2772925 100644 (file)
@@ -860,7 +860,6 @@ static int sd_sync_cache(struct scsi_disk *sdkp)
 
 static void sd_prepare_flush(struct request_queue *q, struct request *rq)
 {
-       memset(rq->cmd, 0, sizeof(rq->cmd));
        rq->cmd_type = REQ_TYPE_BLOCK_PC;
        rq->timeout = SD_TIMEOUT;
        rq->cmd[0] = SYNCHRONIZE_CACHE;
index 2029422bc04d099ba081df55b73cd94a940390d5..c9d7f721b9e28005fcba109b974ed6216e58c8e8 100644 (file)
@@ -2667,7 +2667,6 @@ sg_proc_init(void)
 {
        int k, mask;
        int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
-       struct proc_dir_entry *pdep;
        struct sg_proc_leaf * leaf;
 
        sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
@@ -2676,13 +2675,10 @@ sg_proc_init(void)
        for (k = 0; k < num_leaves; ++k) {
                leaf = &sg_proc_leaf_arr[k];
                mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
-               pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp);
-               if (pdep) {
-                       leaf->fops->owner = THIS_MODULE,
-                       leaf->fops->read = seq_read,
-                       leaf->fops->llseek = seq_lseek,
-                       pdep->proc_fops = leaf->fops;
-               }
+               leaf->fops->owner = THIS_MODULE;
+               leaf->fops->read = seq_read;
+               leaf->fops->llseek = seq_lseek;
+               proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
        }
        return 0;
 }
index 03e359670506cf1403463616a1c410d72a709fab..31fe6051c799869f76713ffa674d4d795baf73dc 100644 (file)
@@ -313,7 +313,8 @@ static struct platform_driver sgiwd93_driver = {
        .probe  = sgiwd93_probe,
        .remove = __devexit_p(sgiwd93_remove),
        .driver = {
-               .name   = "sgiwd93"
+               .name   = "sgiwd93",
+               .owner  = THIS_MODULE,
        }
 };
 
@@ -333,3 +334,4 @@ module_exit(sgiwd93_module_exit);
 MODULE_DESCRIPTION("SGI WD33C93 driver");
 MODULE_AUTHOR("Ralf Baechle <ralf@linux-mips.org>");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:sgiwd93");
index 0a6b45b1b003a7a0d64a2dd896355bfb15b9e8a0..2bbef4c45a0d64404c11dfc1ae1fbc0b071d0c21 100644 (file)
@@ -53,6 +53,7 @@
 MODULE_AUTHOR("Thomas Bogendörfer");
 MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:snirm_53c710");
 
 #define SNIRM710_CLOCK 32
 
@@ -136,6 +137,7 @@ static struct platform_driver snirm710_driver = {
        .remove = __devexit_p(snirm710_driver_remove),
        .driver = {
                .name   = "snirm_53c710",
+               .owner  = THIS_MODULE,
        },
 };
 
index a860c3a9ae9902b1b935802400f29c1b53903b5c..e8db66ad0bde787448f32c44980eb89328ddb212 100644 (file)
@@ -4322,7 +4322,7 @@ static void do_remove_sysfs_files(void)
 static ssize_t
 st_defined_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+       struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
 
        l = snprintf(buf, PAGE_SIZE, "%d\n", STm->defined);
@@ -4334,7 +4334,7 @@ DEVICE_ATTR(defined, S_IRUGO, st_defined_show, NULL);
 static ssize_t
 st_defblk_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+       struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
 
        l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_blksize);
@@ -4346,7 +4346,7 @@ DEVICE_ATTR(default_blksize, S_IRUGO, st_defblk_show, NULL);
 static ssize_t
 st_defdensity_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+       struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
        char *fmt;
 
@@ -4361,7 +4361,7 @@ static ssize_t
 st_defcompression_show(struct device *dev, struct device_attribute *attr,
                       char *buf)
 {
-       struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+       struct st_modedef *STm = dev_get_drvdata(dev);
        ssize_t l = 0;
 
        l = snprintf(buf, PAGE_SIZE, "%d\n", STm->default_compression - 1);
@@ -4373,7 +4373,7 @@ DEVICE_ATTR(default_compression, S_IRUGO, st_defcompression_show, NULL);
 static ssize_t
 st_options_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
-       struct st_modedef *STm = (struct st_modedef *)dev_get_drvdata(dev);
+       struct st_modedef *STm = dev_get_drvdata(dev);
        struct scsi_tape *STp;
        int i, j, options;
        ssize_t l = 0;
index 06152c7fa689bfd21e6dbd7df465ea060a0dcd85..7514b3a0390e5b19b75689a4493047a7ef3d67be 100644 (file)
@@ -294,6 +294,7 @@ static struct platform_driver esp_sun3x_driver = {
        .remove         = __devexit_p(esp_sun3x_remove),
        .driver = {
                .name   = "sun3x_esp",
+               .owner  = THIS_MODULE,
        },
 };
 
@@ -314,3 +315,4 @@ MODULE_VERSION(DRV_VERSION);
 
 module_init(sun3x_esp_init);
 module_exit(sun3x_esp_exit);
+MODULE_ALIAS("platform:sun3x_esp");
index 35142b5341b5358d045b216dabaf3d2fea618dc7..22a6aae78699ab06762858f7c1c7f6c3bcbb4958 100644 (file)
@@ -1647,7 +1647,7 @@ static void sym_flush_comp_queue(struct sym_hcb *np, int cam_status)
        SYM_QUEHEAD *qp;
        struct sym_ccb *cp;
 
-       while ((qp = sym_remque_head(&np->comp_ccbq)) != 0) {
+       while ((qp = sym_remque_head(&np->comp_ccbq)) != NULL) {
                struct scsi_cmnd *cmd;
                cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
                sym_insque_tail(&cp->link_ccbq, &np->busy_ccbq);
@@ -3168,7 +3168,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
         *  the COMP queue and put back other ones into 
         *  the BUSY queue.
         */
-       while ((qp = sym_remque_head(&qtmp)) != 0) {
+       while ((qp = sym_remque_head(&qtmp)) != NULL) {
                struct scsi_cmnd *cmd;
                cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
                cmd = cp->cmd;
@@ -5729,7 +5729,7 @@ void sym_hcb_free(struct sym_hcb *np)
                sym_mfree_dma(np->dqueue, sizeof(u32)*(MAX_QUEUE*2), "DQUEUE");
 
        if (np->actccbs) {
-               while ((qp = sym_remque_head(&np->free_ccbq)) != 0) {
+               while ((qp = sym_remque_head(&np->free_ccbq)) != NULL) {
                        cp = sym_que_entry(qp, struct sym_ccb, link_ccbq);
                        sym_mfree_dma(cp, sizeof(*cp), "CCB");
                }
index 58d7eee4fe81133779d066b9a02f119273b027d8..640333b1e75c9b32565bfeab8721ef566fb903c4 100644 (file)
@@ -1715,13 +1715,12 @@ static void flush_dev(struct scsi_device *dev, unsigned long cursec, unsigned in
 
 }
 
-static irqreturn_t ihdlr(int irq, unsigned int j) {
+static irqreturn_t ihdlr(unsigned int j)
+{
    struct scsi_cmnd *SCpnt;
    unsigned int i, k, c, status, tstatus, reg, ret;
    struct mscp *spp, *cpp;
-
-   if (sh[j]->irq != irq)
-       panic("%s: ihdlr, irq %d, sh[j]->irq %d.\n", BN(j), irq, sh[j]->irq);
+   int irq = sh[j]->irq;
 
    /* Check if this board need to be serviced */
    if (!((reg = inb(sh[j]->io_port + REG_SYS_INTR)) & IRQ_ASSERTED)) goto none;
@@ -1935,7 +1934,7 @@ static irqreturn_t do_interrupt_handler(int irq, void *shap) {
    if ((j = (unsigned int)((char *)shap - sha)) >= num_boards) return IRQ_NONE;
 
    spin_lock_irqsave(sh[j]->host_lock, spin_flags);
-   ret = ihdlr(irq, j);
+   ret = ihdlr(j);
    spin_unlock_irqrestore(sh[j]->host_lock, spin_flags);
    return ret;
 }
index f385dce8dfbeb16d809a04eaf149edfb25c9cbf6..27aa40f3980e56114ddb53847bda968a9e5d1a66 100644 (file)
@@ -951,7 +951,7 @@ static int ultrastor_abort(struct scsi_cmnd *SCpnt)
        printk("abort: command mismatch, %p != %p\n",
               config.mscp[mscp_index].SCint, SCpnt);
 #endif
-    if (config.mscp[mscp_index].SCint == 0)
+    if (config.mscp[mscp_index].SCint == NULL)
        return FAILED;
 
     if (config.mscp[mscp_index].SCint != SCpnt) panic("Bad abort");
@@ -1101,7 +1101,7 @@ static void ultrastor_interrupt(void *dev_id)
     SCtmp = mscp->SCint;
     mscp->SCint = NULL;
 
-    if (SCtmp == 0)
+    if (!SCtmp)
       {
 #if ULTRASTOR_DEBUG & (UD_ABORT|UD_INTERRUPT)
        printk("MSCP %d (%x): no command\n", mscp_index, (unsigned int) mscp);
index 2b8a410e09595f87bffd264a44ce0a840cde599d..bbf5bc5892c7be21451fa55c6863af71519ffaff 100644 (file)
@@ -200,7 +200,7 @@ static void rs_stop(struct tty_struct *tty)
        local_irq_restore(flags);
 }
 
-static void rs_put_char(char ch)
+static int rs_put_char(char ch)
 {
         int flags, loops = 0;
 
@@ -214,6 +214,7 @@ static void rs_put_char(char ch)
        UTX_TXDATA = ch;
         udelay(5);
         local_irq_restore(flags);
+        return 1;
 }
 
 static void rs_start(struct tty_struct *tty)
@@ -1017,18 +1018,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
                        tty_wait_until_sent(tty, 0);
                        send_break(info, arg ? arg*(100) : 250);
                        return 0;
-               case TIOCGSOFTCAR:
-                       error = put_user(C_CLOCAL(tty) ? 1 : 0,
-                                   (unsigned long *) arg);
-                       if (error)
-                               return error;
-                       return 0;
-               case TIOCSSOFTCAR:
-                       get_user(arg, (unsigned long *) arg);
-                       tty->termios->c_cflag =
-                               ((tty->termios->c_cflag & ~CLOCAL) |
-                                (arg ? CLOCAL : 0));
-                       return 0;
                case TIOCGSERIAL:
                        if (access_ok(VERIFY_WRITE, (void *) arg,
                                                sizeof(struct serial_struct)))
@@ -1061,9 +1050,6 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
 {
        struct m68k_serial *info = (struct m68k_serial *)tty->driver_data;
 
-       if (tty->termios->c_cflag == old_termios->c_cflag)
-               return;
-
        change_speed(info);
 
        if ((old_termios->c_cflag & CRTSCTS) &&
@@ -1140,8 +1126,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
        uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK);
 
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       rs_flush_buffer(tty);
                
        tty_ldisc_flush(tty);
        tty->closing = 0;
index 2aa6bfe8fdb3d2a0fdfb8d685a065ff7815a4e6b..d9d4e9552a4d355c2458749621cf9d83898db86d 100644 (file)
@@ -51,6 +51,7 @@ extern int  kgdb_output_string (const char* s, unsigned int count);
 
 /* #ifdef CONFIG_SERIAL_CONSOLE */ /* This seems to be a post 2.0 thing - mles */
 #include <linux/console.h>
+#include <linux/jiffies.h>
 
 /* this defines the index into rs_table for the port to use
  */
@@ -994,10 +995,10 @@ static void rs_360_put_char(struct tty_struct *tty, unsigned char ch)
        volatile QUICC_BD       *bdp;
 
        if (serial_paranoia_check(info, tty->name, "rs_put_char"))
-               return;
+               return 0;
 
        if (!tty)
-               return;
+               return 0;
 
        bdp = info->tx_cur;
        while (bdp->status & BD_SC_READY);
@@ -1015,6 +1016,7 @@ static void rs_360_put_char(struct tty_struct *tty, unsigned char ch)
                bdp++;
 
        info->tx_cur = (QUICC_BD *)bdp;
+       return 1;
 
 }
 
@@ -1245,7 +1247,7 @@ static int rs_360_tiocmget(struct tty_struct *tty, struct file *file)
 #ifdef modem_control
        unsigned char control, status;
 
-       if (serial_paranoia_check(info, tty->name, __FUNCTION__))
+       if (serial_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
 
        if (tty->flags & (1 << TTY_IO_ERROR))
@@ -1276,12 +1278,12 @@ static int rs_360_tiocmset(struct tty_struct *tty, struct file *file,
        ser_info_t *info = (ser_info_t *)tty->driver_data;
        unsigned int arg;
 
-       if (serial_paranoia_check(info, tty->name, __FUNCTION__))
+       if (serial_paranoia_check(info, tty->name, __func__))
                return -ENODEV;
 
        if (tty->flags & (1 << TTY_IO_ERROR))
                return -EIO;
-
+       /* FIXME: locking on info->mcr */
        if (set & TIOCM_RTS)
                info->mcr |= UART_MCR_RTS;
        if (set & TIOCM_DTR)
@@ -1435,18 +1437,6 @@ static int rs_360_ioctl(struct tty_struct *tty, struct file * file,
                                return retval;
                        end_break(info);
                        return 0;
-               case TIOCGSOFTCAR:
-                       /* return put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg); */
-                       put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg);
-                       return 0;
-               case TIOCSSOFTCAR:
-                       error = get_user(arg, (unsigned int *) arg); 
-                       if (error)
-                               return error;
-                       tty->termios->c_cflag =
-                               ((tty->termios->c_cflag & ~CLOCAL) |
-                                (arg ? CLOCAL : 0));
-                       return 0;
 #ifdef maybe
                case TIOCSERGETLSR: /* Get line status register */
                        return get_lsr_info(info, (unsigned int *) arg);
@@ -1664,8 +1654,7 @@ static void rs_360_close(struct tty_struct *tty, struct file * filp)
                rs_360_wait_until_sent(tty, info->timeout);
        }
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       rs_360_flush_buffer(tty);
        tty_ldisc_flush(tty);           
        tty->closing = 0;
        info->event = 0;
@@ -1716,6 +1705,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
        printk("jiff=%lu...", jiffies);
 #endif
 
+       lock_kernel();
        /* We go through the loop at least once because we can't tell
         * exactly when the last character exits the shifter.  There can
         * be at least two characters waiting to be sent after the buffers
@@ -1729,7 +1719,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
                msleep_interruptible(jiffies_to_msecs(char_time));
                if (signal_pending(current))
                        break;
-               if (timeout && ((orig_jiffies + timeout) < jiffies))
+               if (timeout && (time_after(jiffies, orig_jiffies + timeout)))
                        break;
                /* The 'tx_cur' is really the next buffer to send.  We
                 * have to back up to the previous BD and wait for it
@@ -1744,6 +1734,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout)
                        bdp--;
        } while (bdp->status & BD_SC_READY);
        current->state = TASK_RUNNING;
+       unlock_kernel();
 #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT
        printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies);
 #endif
index 96a585e1cee8226a449d3e0174c98bf9a1b6ff77..a1ca9b7bf2d5862b7466fbdcee4f5099b882ac76 100644 (file)
@@ -1868,6 +1868,7 @@ static int serial8250_startup(struct uart_port *port)
        }
 
        if (is_real_interrupt(up->port.irq)) {
+               unsigned char iir1;
                /*
                 * Test for UARTs that do not reassert THRE when the
                 * transmitter is idle and the interrupt has already
@@ -1881,7 +1882,7 @@ static int serial8250_startup(struct uart_port *port)
                wait_for_xmitr(up, UART_LSR_THRE);
                serial_out_sync(up, UART_IER, UART_IER_THRI);
                udelay(1); /* allow THRE to set */
-               serial_in(up, UART_IIR);
+               iir1 = serial_in(up, UART_IIR);
                serial_out(up, UART_IER, 0);
                serial_out_sync(up, UART_IER, UART_IER_THRI);
                udelay(1); /* allow a working UART time to re-assert THRE */
@@ -1894,7 +1895,7 @@ static int serial8250_startup(struct uart_port *port)
                 * If the interrupt is not reasserted, setup a timer to
                 * kick the UART on a regular basis.
                 */
-               if (iir & UART_IIR_NO_INT) {
+               if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) {
                        pr_debug("ttyS%d - using backup timer\n", port->line);
                        up->timer.function = serial8250_backup_timeout;
                        up->timer.data = (unsigned long)up;
@@ -2228,7 +2229,9 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios,
        }
        serial8250_set_mctrl(&up->port, up->port.mctrl);
        spin_unlock_irqrestore(&up->port.lock, flags);
-       tty_termios_encode_baud_rate(termios, baud, baud);
+       /* Don't rewrite B0 */
+       if (tty_termios_baud_rate(termios))
+               tty_termios_encode_baud_rate(termios, baud, baud);
 }
 
 static void
@@ -2268,7 +2271,8 @@ static int serial8250_request_std_resource(struct uart_8250_port *up)
                }
 
                if (up->port.flags & UPF_IOREMAP) {
-                       up->port.membase = ioremap(up->port.mapbase, size);
+                       up->port.membase = ioremap_nocache(up->port.mapbase,
+                                                                       size);
                        if (!up->port.membase) {
                                release_mem_region(up->port.mapbase, size);
                                ret = -ENOMEM;
diff --git a/drivers/serial/8250_au1x00.c b/drivers/serial/8250_au1x00.c
deleted file mode 100644 (file)
index 58015fd..0000000
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Serial Device Initialisation for Au1x00
- *
- * (C) Copyright Embedded Alley Solutions, Inc 2005
- * Author: Pantelis Antoniou <pantelis@embeddedalley.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/module.h>
-#include <linux/serial_core.h>
-#include <linux/signal.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-
-#include <linux/serial_8250.h>
-
-#include <asm/mach-au1x00/au1000.h>
-
-#include "8250.h"
-
-#define PORT(_base, _irq)                              \
-       {                                               \
-               .iobase         = _base,                \
-               .membase        = (void __iomem *)_base,\
-               .mapbase        = CPHYSADDR(_base),     \
-               .irq            = _irq,                 \
-               .uartclk        = 0,    /* filled */    \
-               .regshift       = 2,                    \
-               .iotype         = UPIO_AU,              \
-               .flags          = UPF_SKIP_TEST         \
-       }
-
-static struct plat_serial8250_port au1x00_data[] = {
-#if defined(CONFIG_SOC_AU1000)
-       PORT(UART0_ADDR, AU1000_UART0_INT),
-       PORT(UART1_ADDR, AU1000_UART1_INT),
-       PORT(UART2_ADDR, AU1000_UART2_INT),
-       PORT(UART3_ADDR, AU1000_UART3_INT),
-#elif defined(CONFIG_SOC_AU1500)
-       PORT(UART0_ADDR, AU1500_UART0_INT),
-       PORT(UART3_ADDR, AU1500_UART3_INT),
-#elif defined(CONFIG_SOC_AU1100)
-       PORT(UART0_ADDR, AU1100_UART0_INT),
-       PORT(UART1_ADDR, AU1100_UART1_INT),
-       /* The internal UART2 does not exist on the AU1100 processor. */
-       PORT(UART3_ADDR, AU1100_UART3_INT),
-#elif defined(CONFIG_SOC_AU1550)
-       PORT(UART0_ADDR, AU1550_UART0_INT),
-       PORT(UART1_ADDR, AU1550_UART1_INT),
-       PORT(UART3_ADDR, AU1550_UART3_INT),
-#elif defined(CONFIG_SOC_AU1200)
-       PORT(UART0_ADDR, AU1200_UART0_INT),
-       PORT(UART1_ADDR, AU1200_UART1_INT),
-#endif
-       { },
-};
-
-static struct platform_device au1x00_device = {
-       .name                   = "serial8250",
-       .id                     = PLAT8250_DEV_AU1X00,
-       .dev                    = {
-               .platform_data  = au1x00_data,
-       },
-};
-
-static int __init au1x00_init(void)
-{
-       int i;
-       unsigned int uartclk;
-
-       /* get uart clock */
-       uartclk = get_au1x00_uart_baud_base() * 16;
-
-       /* fill up uartclk */
-       for (i = 0; au1x00_data[i].flags ; i++)
-               au1x00_data[i].uartclk = uartclk;
-
-       return platform_device_register(&au1x00_device);
-}
-
-/* XXX: Yes, I know this doesn't yet work. */
-static void __exit au1x00_exit(void)
-{
-       platform_device_unregister(&au1x00_device);
-}
-
-module_init(au1x00_init);
-module_exit(au1x00_exit);
-
-MODULE_AUTHOR("Pantelis Antoniou <pantelis@embeddedalley.com>");
-MODULE_DESCRIPTION("8250 serial probe module for Au1x000 cards");
-MODULE_LICENSE("GPL");
index 38776e8b064b07f57f8815709efd70defeb9d4ca..f279745e9fefe9f07114923d58ebaec052dda4cf 100644 (file)
@@ -153,10 +153,10 @@ static int __init parse_options(struct early_serial8250_device *device,
                        (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
                port->membase += port->mapbase & ~PAGE_MASK;
 #else
-               port->membase = ioremap(port->mapbase, 64);
+               port->membase = ioremap_nocache(port->mapbase, 64);
                if (!port->membase) {
                        printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
-                               __FUNCTION__,
+                               __func__,
                               (unsigned long long)port->mapbase);
                        return -ENOMEM;
                }
index f97224ce59da709f739dfe45c31631ac4a635adc..53fa19cf2f0636d6dd79d0918266243441bda31f 100644 (file)
@@ -86,7 +86,7 @@ setup_port(struct serial_private *priv, struct uart_port *port,
                len =  pci_resource_len(dev, bar);
 
                if (!priv->remapped_bar[bar])
-                       priv->remapped_bar[bar] = ioremap(base, len);
+                       priv->remapped_bar[bar] = ioremap_nocache(base, len);
                if (!priv->remapped_bar[bar])
                        return -ENOMEM;
 
@@ -270,7 +270,7 @@ static int pci_plx9050_init(struct pci_dev *dev)
        /*
         * enable/disable interrupts
         */
-       p = ioremap(pci_resource_start(dev, 0), 0x80);
+       p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
        if (p == NULL)
                return -ENOMEM;
        writel(irq_config, p + 0x4c);
@@ -294,7 +294,7 @@ static void __devexit pci_plx9050_exit(struct pci_dev *dev)
        /*
         * disable interrupts
         */
-       p = ioremap(pci_resource_start(dev, 0), 0x80);
+       p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
        if (p != NULL) {
                writel(0, p + 0x4c);
 
@@ -341,7 +341,8 @@ static int sbs_init(struct pci_dev *dev)
 {
        u8 __iomem *p;
 
-       p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0));
+       p = ioremap_nocache(pci_resource_start(dev, 0),
+                                               pci_resource_len(dev, 0));
 
        if (p == NULL)
                return -ENOMEM;
@@ -365,7 +366,8 @@ static void __devexit sbs_exit(struct pci_dev *dev)
 {
        u8 __iomem *p;
 
-       p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0));
+       p = ioremap_nocache(pci_resource_start(dev, 0),
+                                       pci_resource_len(dev, 0));
        /* FIXME: What if resource_len < OCT_REG_CR_OFF */
        if (p != NULL)
                writeb(0, p + OCT_REG_CR_OFF);
@@ -419,7 +421,7 @@ static int pci_siig10x_init(struct pci_dev *dev)
                break;
        }
 
-       p = ioremap(pci_resource_start(dev, 0), 0x80);
+       p = ioremap_nocache(pci_resource_start(dev, 0), 0x80);
        if (p == NULL)
                return -ENOMEM;
 
@@ -775,7 +777,7 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board,
  * This list is ordered alphabetically by vendor then device.
  * Specific entries must come before more generic entries.
  */
-static struct pci_serial_quirk pci_serial_quirks[] = {
+static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
        /*
        * ADDI-DATA GmbH communication cards <info@addi-data.com>
        */
index f7cd9504d81162c1e08c8f3b30ff18b3a40d3920..36acbcca2d4893c4aa3178e0d7e426cff2029f90 100644 (file)
@@ -262,12 +262,12 @@ config SERIAL_8250_ACORN
          cards.  If unsure, say N.
 
 config SERIAL_8250_AU1X00
-       bool "AU1X00 serial port support"
+       bool "Au1x00 serial port support"
        depends on SERIAL_8250 != n && SOC_AU1X00
        help
-         If you have an Au1x00 board and want to use the serial port, say Y
-         to this option.  The driver can handle 1 or 2 serial ports.
-         If unsure, say N.
+         If you have an Au1x00 SOC based board and want to use the serial port,
+         say Y to this option. The driver can handle up to 4 serial ports,
+         depending on the SOC. If unsure, say N.
 
 config SERIAL_8250_RM9K
        bool "Support for MIPS RM9xxx integrated serial port"
@@ -1355,4 +1355,47 @@ config SERIAL_SC26XX_CONSOLE
        help
          Support for Console on SC2681/SC2692 serial ports.
 
+config SERIAL_BFIN_SPORT
+       tristate "Blackfin SPORT emulate UART (EXPERIMENTAL)"
+       depends on BFIN && EXPERIMENTAL
+       select SERIAL_CORE
+       help
+         Enble support SPORT emulate UART on Blackfin series.
+
+         To compile this driver as a module, choose M here: the
+         module will be called bfin_sport_uart.
+
+choice
+       prompt "Baud rate for Blackfin SPORT UART"
+       depends on SERIAL_BFIN_SPORT
+       default SERIAL_SPORT_BAUD_RATE_57600
+       help
+         Choose a baud rate for the SPORT UART, other uart settings are
+         8 bit, 1 stop bit, no parity, no flow control.
+
+config SERIAL_SPORT_BAUD_RATE_115200
+       bool "115200"
+
+config SERIAL_SPORT_BAUD_RATE_57600
+       bool "57600"
+
+config SERIAL_SPORT_BAUD_RATE_38400
+       bool "38400"
+
+config SERIAL_SPORT_BAUD_RATE_19200
+       bool "19200"
+
+config SERIAL_SPORT_BAUD_RATE_9600
+       bool "9600"
+endchoice
+
+config SPORT_BAUD_RATE
+       int
+       depends on SERIAL_BFIN_SPORT
+       default 115200 if (SERIAL_SPORT_BAUD_RATE_115200)
+       default 57600 if (SERIAL_SPORT_BAUD_RATE_57600)
+       default 38400 if (SERIAL_SPORT_BAUD_RATE_38400)
+       default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
+       default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
+
 endmenu
index 3cbea5494724ac978cd6c0221a1ec038b34efed3..0d9c09b1e8367489b98bede96e59d2ee2f6b693e 100644 (file)
@@ -20,7 +20,6 @@ obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
 obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
 obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
 obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
-obj-$(CONFIG_SERIAL_8250_AU1X00) += 8250_au1x00.o
 obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
 obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
 obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
@@ -28,6 +27,7 @@ obj-$(CONFIG_SERIAL_PXA) += pxa.o
 obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
 obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
 obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
+obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
 obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
 obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o
 obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o
index 55492fa095a2fab26fe7505143f662d665261fbf..c065a704a93ab68cfe167d73f67edbb700a8420c 100644 (file)
@@ -96,7 +96,6 @@
 
  /* PDC registers */
 #define UART_PUT_PTCR(port,v)  __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR)
-#define UART_GET_TCR(port)      __raw_readl((port)->membase + ATMEL_PDC_TCR)
 #define UART_GET_PTSR(port)    __raw_readl((port)->membase + ATMEL_PDC_PTSR)
 
 #define UART_PUT_RPR(port,v)   __raw_writel(v, (port)->membase + ATMEL_PDC_RPR)
index 5f55534a290ba8d00000c694cef8c28496347090..8a2f6a1baa74d5b09b31666dd46be9b7bca746c2 100644 (file)
@@ -762,7 +762,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
                break;
        default:
                printk(KERN_ERR "%s: word lengh not supported\n",
-                       __FUNCTION__);
+                       __func__);
        }
 
        if (termios->c_cflag & CSTOPB)
@@ -1029,7 +1029,7 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud,
 
                *baud = get_sclk() / (16*(dll | dlh << 8));
        }
-       pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __FUNCTION__, *baud, *parity, *bits);
+       pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits);
 }
 #endif
 
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
new file mode 100644 (file)
index 0000000..aca1240
--- /dev/null
@@ -0,0 +1,614 @@
+/*
+ * File:       linux/drivers/serial/bfin_sport_uart.c
+ *
+ * Based on:   drivers/serial/bfin_5xx.c by Aubrey Li.
+ * Author:     Roy Huang <roy.huang@analog.com>
+ *
+ * Created:    Nov 22, 2006
+ * Copyright:  (c) 2006-2007 Analog Devices Inc.
+ * Description: this driver enable SPORTs on Blackfin emulate UART.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+/*
+ * This driver and the hardware supported are in term of EE-191 of ADI.
+ * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
+ * This application note describe how to implement a UART on a Sharc DSP,
+ * but this driver is implemented on Blackfin Processor.
+ */
+
+/* After reset, there is a prelude of low level pulse when transmit data first
+ * time. No addtional pulse in following transmit.
+ * According to document:
+ * The SPORTs are ready to start transmitting or receiving data no later than
+ * three serial clock cycles after they are enabled in the SPORTx_TCR1 or
+ * SPORTx_RCR1 register. No serial clock cycles are lost from this point on.
+ * The first internal frame sync will occur one frame sync delay after the
+ * SPORTs are ready. External frame syncs can occur as soon as the SPORT is
+ * ready.
+ */
+
+/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes
+ * sport receives data incorrectly. The following is Axel's words.
+ * As EE-191, sport rx samples 3 times of the UART baudrate and takes the
+ * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting,
+ * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short
+ * byte due to buadrate drift, then the 30th sample of a byte, this sample is
+ * also the third sample of the stop bit, will happens on the immediately
+ * following start bit which will be thrown away and missed. Thus since parts
+ * of the startbit will be missed and the receiver will begin to drift, the
+ * effect accumulates over time until synchronization is lost.
+ * If only require 2 samples of the stopbit (by sampling in total 29 samples),
+ * then a to short byte as in the case above will be tolerated. Then the 1/3
+ * early startbit will trigger a framesync since the last read is complete
+ * after only 2/3 stopbit and framesync is active during the last 1/3 looking
+ * for a possible early startbit. */
+
+//#define DEBUG
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+
+#include <asm/delay.h>
+#include <asm/portmux.h>
+
+#include "bfin_sport_uart.h"
+
+unsigned short bfin_uart_pin_req_sport0[] =
+       {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
+        P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
+
+unsigned short bfin_uart_pin_req_sport1[] =
+       {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
+       P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
+
+#define DRV_NAME "bfin-sport-uart"
+
+struct sport_uart_port {
+       struct uart_port        port;
+       char                    *name;
+
+       int                     tx_irq;
+       int                     rx_irq;
+       int                     err_irq;
+};
+
+static void sport_uart_tx_chars(struct sport_uart_port *up);
+static void sport_stop_tx(struct uart_port *port);
+
+static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
+{
+       pr_debug("%s value:%x\n", __FUNCTION__, value);
+       /* Place a Start and Stop bit */
+       __asm__ volatile (
+               "R2 = b#01111111100;\n\t"
+               "R3 = b#10000000001;\n\t"
+               "%0 <<= 2;\n\t"
+               "%0 = %0 & R2;\n\t"
+               "%0 = %0 | R3;\n\t"
+               :"=r"(value)
+               :"0"(value)
+               :"R2", "R3");
+       pr_debug("%s value:%x\n", __FUNCTION__, value);
+
+       SPORT_PUT_TX(up, value);
+}
+
+static inline unsigned int rx_one_byte(struct sport_uart_port *up)
+{
+       unsigned int value, extract;
+
+       value = SPORT_GET_RX32(up);
+       pr_debug("%s value:%x\n", __FUNCTION__, value);
+
+       /* Extract 8 bits data */
+       __asm__ volatile (
+               "R5 = 0;\n\t"
+               "P0 = 8;\n\t"
+               "R1 = 0x1801(Z);\n\t"
+               "R3 = 0x0300(Z);\n\t"
+               "R4 = 0;\n\t"
+               "LSETUP(loop_s, loop_e) LC0 = P0;\nloop_s:\t"
+               "R2 = extract(%1, R1.L)(Z);\n\t"
+               "R2 <<= R4;\n\t"
+               "R5 = R5 | R2;\n\t"
+               "R1 = R1 - R3;\nloop_e:\t"
+               "R4 += 1;\n\t"
+               "%0 = R5;\n\t"
+               :"=r"(extract)
+               :"r"(value)
+               :"P0", "R1", "R2","R3","R4", "R5");
+
+       pr_debug("      extract:%x\n", extract);
+       return extract;
+}
+
+static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
+{
+       int tclkdiv, tfsdiv, rclkdiv;
+
+       /* Set TCR1 and TCR2 */
+       SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK));
+       SPORT_PUT_TCR2(up, 10);
+       pr_debug("%s TCR1:%x, TCR2:%x\n", __FUNCTION__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
+
+       /* Set RCR1 and RCR2 */
+       SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
+       SPORT_PUT_RCR2(up, 28);
+       pr_debug("%s RCR1:%x, RCR2:%x\n", __FUNCTION__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
+
+       tclkdiv = sclk/(2 * baud_rate) - 1;
+       tfsdiv = 12;
+       rclkdiv = sclk/(2 * baud_rate * 3) - 1;
+       SPORT_PUT_TCLKDIV(up, tclkdiv);
+       SPORT_PUT_TFSDIV(up, tfsdiv);
+       SPORT_PUT_RCLKDIV(up, rclkdiv);
+       SSYNC();
+       pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n",
+                       __FUNCTION__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv);
+
+       return 0;
+}
+
+static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
+{
+       struct sport_uart_port *up = dev_id;
+       struct tty_struct *tty = up->port.info->tty;
+       unsigned int ch;
+
+       do {
+               ch = rx_one_byte(up);
+               up->port.icount.rx++;
+
+               if (uart_handle_sysrq_char(&up->port, ch))
+                       ;
+               else
+                       tty_insert_flip_char(tty, ch, TTY_NORMAL);
+       } while (SPORT_GET_STAT(up) & RXNE);
+       tty_flip_buffer_push(tty);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
+{
+       sport_uart_tx_chars(dev_id);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
+{
+       struct sport_uart_port *up = dev_id;
+       struct tty_struct *tty = up->port.info->tty;
+       unsigned int stat = SPORT_GET_STAT(up);
+
+       /* Overflow in RX FIFO */
+       if (stat & ROVF) {
+               up->port.icount.overrun++;
+               tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+               SPORT_PUT_STAT(up, ROVF); /* Clear ROVF bit */
+       }
+       /* These should not happen */
+       if (stat & (TOVF | TUVF | RUVF)) {
+               printk(KERN_ERR "SPORT Error:%s %s %s\n",
+                               (stat & TOVF)?"TX overflow":"",
+                               (stat & TUVF)?"TX underflow":"",
+                               (stat & RUVF)?"RX underflow":"");
+               SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+               SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
+       }
+       SSYNC();
+
+       return IRQ_HANDLED;
+}
+
+/* Reqeust IRQ, Setup clock */
+static int sport_startup(struct uart_port *port)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+       char buffer[20];
+       int retval;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       memset(buffer, 20, '\0');
+       snprintf(buffer, 20, "%s rx", up->name);
+       retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
+       if (retval) {
+               printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+               return retval;
+       }
+
+       snprintf(buffer, 20, "%s tx", up->name);
+       retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
+       if (retval) {
+               printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+               goto fail1;
+       }
+
+       snprintf(buffer, 20, "%s err", up->name);
+       retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up);
+       if (retval) {
+               printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+               goto fail2;
+       }
+
+       if (port->line) {
+               if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME))
+                       goto fail3;
+       } else {
+               if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME))
+                       goto fail3;
+       }
+
+       sport_uart_setup(up, get_sclk(), port->uartclk);
+
+       /* Enable receive interrupt */
+       SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN));
+       SSYNC();
+
+       return 0;
+
+
+fail3:
+       printk(KERN_ERR DRV_NAME
+               ": Requesting Peripherals failed\n");
+
+       free_irq(up->err_irq, up);
+fail2:
+       free_irq(up->tx_irq, up);
+fail1:
+       free_irq(up->rx_irq, up);
+
+       return retval;
+
+}
+
+static void sport_uart_tx_chars(struct sport_uart_port *up)
+{
+       struct circ_buf *xmit = &up->port.info->xmit;
+
+       if (SPORT_GET_STAT(up) & TXF)
+               return;
+
+       if (up->port.x_char) {
+               tx_one_byte(up, up->port.x_char);
+               up->port.icount.tx++;
+               up->port.x_char = 0;
+               return;
+       }
+
+       if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) {
+               sport_stop_tx(&up->port);
+               return;
+       }
+
+       while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) {
+               tx_one_byte(up, xmit->buf[xmit->tail]);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1);
+               up->port.icount.tx++;
+       }
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(&up->port);
+}
+
+static unsigned int sport_tx_empty(struct uart_port *port)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+       unsigned int stat;
+
+       stat = SPORT_GET_STAT(up);
+       pr_debug("%s stat:%04x\n", __FUNCTION__, stat);
+       if (stat & TXHRE) {
+               return TIOCSER_TEMT;
+       } else
+               return 0;
+}
+
+static unsigned int sport_get_mctrl(struct uart_port *port)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+       return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR);
+}
+
+static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+}
+
+static void sport_stop_tx(struct uart_port *port)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+       unsigned int stat;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+
+       stat = SPORT_GET_STAT(up);
+       while(!(stat & TXHRE)) {
+               udelay(1);
+               stat = SPORT_GET_STAT(up);
+       }
+       /* Although the hold register is empty, last byte is still in shift
+        * register and not sent out yet. If baud rate is lower than default,
+        * delay should be longer. For example, if the baud rate is 9600,
+        * the delay must be at least 2ms by experience */
+       udelay(500);
+
+       SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+       SSYNC();
+
+       return;
+}
+
+static void sport_start_tx(struct uart_port *port)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       /* Write data into SPORT FIFO before enable SPROT to transmit */
+       sport_uart_tx_chars(up);
+
+       /* Enable transmit, then an interrupt will generated */
+       SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+       SSYNC();
+       pr_debug("%s exit\n", __FUNCTION__);
+}
+
+static void sport_stop_rx(struct uart_port *port)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       /* Disable sport to stop rx */
+       SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
+       SSYNC();
+}
+
+static void sport_enable_ms(struct uart_port *port)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+}
+
+static void sport_break_ctl(struct uart_port *port, int break_state)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+}
+
+static void sport_shutdown(struct uart_port *port)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+
+       /* Disable sport */
+       SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+       SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
+       SSYNC();
+
+       if (port->line) {
+               peripheral_free_list(bfin_uart_pin_req_sport1);
+       } else {
+               peripheral_free_list(bfin_uart_pin_req_sport0);
+       }
+
+       free_irq(up->rx_irq, up);
+       free_irq(up->tx_irq, up);
+       free_irq(up->err_irq, up);
+}
+
+static void sport_set_termios(struct uart_port *port,
+               struct termios *termios, struct termios *old)
+{
+       pr_debug("%s enter, c_cflag:%08x\n", __FUNCTION__, termios->c_cflag);
+       uart_update_timeout(port, CS8 ,port->uartclk);
+}
+
+static const char *sport_type(struct uart_port *port)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       return up->name;
+}
+
+static void sport_release_port(struct uart_port *port)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+}
+
+static int sport_request_port(struct uart_port *port)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+       return 0;
+}
+
+static void sport_config_port(struct uart_port *port, int flags)
+{
+       struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       up->port.type = PORT_BFIN_SPORT;
+}
+
+static int sport_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+       return 0;
+}
+
+struct uart_ops sport_uart_ops = {
+       .tx_empty       = sport_tx_empty,
+       .set_mctrl      = sport_set_mctrl,
+       .get_mctrl      = sport_get_mctrl,
+       .stop_tx        = sport_stop_tx,
+       .start_tx       = sport_start_tx,
+       .stop_rx        = sport_stop_rx,
+       .enable_ms      = sport_enable_ms,
+       .break_ctl      = sport_break_ctl,
+       .startup        = sport_startup,
+       .shutdown       = sport_shutdown,
+       .set_termios    = sport_set_termios,
+       .type           = sport_type,
+       .release_port   = sport_release_port,
+       .request_port   = sport_request_port,
+       .config_port    = sport_config_port,
+       .verify_port    = sport_verify_port,
+};
+
+static struct sport_uart_port sport_uart_ports[] = {
+       { /* SPORT 0 */
+               .name   = "SPORT0",
+               .tx_irq = IRQ_SPORT0_TX,
+               .rx_irq = IRQ_SPORT0_RX,
+               .err_irq= IRQ_SPORT0_ERROR,
+               .port   = {
+                       .type           = PORT_BFIN_SPORT,
+                       .iotype         = UPIO_MEM,
+                       .membase        = (void __iomem *)SPORT0_TCR1,
+                       .mapbase        = SPORT0_TCR1,
+                       .irq            = IRQ_SPORT0_RX,
+                       .uartclk        = CONFIG_SPORT_BAUD_RATE,
+                       .fifosize       = 8,
+                       .ops            = &sport_uart_ops,
+                       .line           = 0,
+               },
+       }, { /* SPORT 1 */
+               .name   = "SPORT1",
+               .tx_irq = IRQ_SPORT1_TX,
+               .rx_irq = IRQ_SPORT1_RX,
+               .err_irq= IRQ_SPORT1_ERROR,
+               .port   = {
+                       .type           = PORT_BFIN_SPORT,
+                       .iotype         = UPIO_MEM,
+                       .membase        = (void __iomem *)SPORT1_TCR1,
+                       .mapbase        = SPORT1_TCR1,
+                       .irq            = IRQ_SPORT1_RX,
+                       .uartclk        = CONFIG_SPORT_BAUD_RATE,
+                       .fifosize       = 8,
+                       .ops            = &sport_uart_ops,
+                       .line           = 1,
+               },
+       }
+};
+
+static struct uart_driver sport_uart_reg = {
+       .owner          = THIS_MODULE,
+       .driver_name    = "SPORT-UART",
+       .dev_name       = "ttySS",
+       .major          = 204,
+       .minor          = 84,
+       .nr             = ARRAY_SIZE(sport_uart_ports),
+       .cons           = NULL,
+};
+
+static int sport_uart_suspend(struct platform_device *dev, pm_message_t state)
+{
+       struct sport_uart_port *sport = platform_get_drvdata(dev);
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       if (sport)
+               uart_suspend_port(&sport_uart_reg, &sport->port);
+
+       return 0;
+}
+
+static int sport_uart_resume(struct platform_device *dev)
+{
+       struct sport_uart_port *sport = platform_get_drvdata(dev);
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       if (sport)
+               uart_resume_port(&sport_uart_reg, &sport->port);
+
+       return 0;
+}
+
+static int sport_uart_probe(struct platform_device *dev)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+       sport_uart_ports[dev->id].port.dev = &dev->dev;
+       uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port);
+       platform_set_drvdata(dev, &sport_uart_ports[dev->id]);
+
+       return 0;
+}
+
+static int sport_uart_remove(struct platform_device *dev)
+{
+       struct sport_uart_port *sport = platform_get_drvdata(dev);
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       platform_set_drvdata(dev, NULL);
+
+       if (sport)
+               uart_remove_one_port(&sport_uart_reg, &sport->port);
+
+       return 0;
+}
+
+static struct platform_driver sport_uart_driver = {
+       .probe          = sport_uart_probe,
+       .remove         = sport_uart_remove,
+       .suspend        = sport_uart_suspend,
+       .resume         = sport_uart_resume,
+       .driver         = {
+               .name   = DRV_NAME,
+       },
+};
+
+static int __init sport_uart_init(void)
+{
+       int ret;
+
+       pr_debug("%s enter\n", __FUNCTION__);
+       ret = uart_register_driver(&sport_uart_reg);
+       if (ret != 0) {
+               printk(KERN_ERR "Failed to register %s:%d\n",
+                               sport_uart_reg.driver_name, ret);
+               return ret;
+       }
+
+       ret = platform_driver_register(&sport_uart_driver);
+       if (ret != 0) {
+               printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret);
+               uart_unregister_driver(&sport_uart_reg);
+       }
+
+
+       pr_debug("%s exit\n", __FUNCTION__);
+       return ret;
+}
+
+static void __exit sport_uart_exit(void)
+{
+       pr_debug("%s enter\n", __FUNCTION__);
+       platform_driver_unregister(&sport_uart_driver);
+       uart_unregister_driver(&sport_uart_reg);
+}
+
+module_init(sport_uart_init);
+module_exit(sport_uart_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
new file mode 100644 (file)
index 0000000..671d41c
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * File:       linux/drivers/serial/bfin_sport_uart.h
+ *
+ * Based on:   include/asm-blackfin/mach-533/bfin_serial_5xx.h
+ * Author:     Roy Huang <roy.huang>analog.com>
+ *
+ * Created:    Nov 22, 2006
+ * Copyright:  (C) Analog Device Inc.
+ * Description: this driver enable SPORTs on Blackfin emulate UART.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ */
+
+
+#define OFFSET_TCR1            0x00    /* Transmit Configuration 1 Register */
+#define OFFSET_TCR2            0x04    /* Transmit Configuration 2 Register */
+#define OFFSET_TCLKDIV         0x08    /* Transmit Serial Clock Divider Register */
+#define OFFSET_TFSDIV          0x0C    /* Transmit Frame Sync Divider Register */
+#define OFFSET_TX              0x10    /* Transmit Data Register               */
+#define OFFSET_RX              0x18    /* Receive Data Register                */
+#define OFFSET_RCR1            0x20    /* Receive Configuration 1 Register     */
+#define OFFSET_RCR2            0x24    /* Receive Configuration 2 Register     */
+#define OFFSET_RCLKDIV         0x28    /* Receive Serial Clock Divider Register */
+#define OFFSET_RFSDIV          0x2c    /* Receive Frame Sync Divider Register */
+#define OFFSET_STAT            0x30    /* Status Register                      */
+
+#define SPORT_GET_TCR1(sport)          bfin_read16(((sport)->port.membase + OFFSET_TCR1))
+#define SPORT_GET_TCR2(sport)          bfin_read16(((sport)->port.membase + OFFSET_TCR2))
+#define SPORT_GET_TCLKDIV(sport)       bfin_read16(((sport)->port.membase + OFFSET_TCLKDIV))
+#define SPORT_GET_TFSDIV(sport)                bfin_read16(((sport)->port.membase + OFFSET_TFSDIV))
+#define SPORT_GET_TX(sport)            bfin_read16(((sport)->port.membase + OFFSET_TX))
+#define SPORT_GET_RX(sport)            bfin_read16(((sport)->port.membase + OFFSET_RX))
+#define SPORT_GET_RX32(sport)          bfin_read32(((sport)->port.membase + OFFSET_RX))
+#define SPORT_GET_RCR1(sport)          bfin_read16(((sport)->port.membase + OFFSET_RCR1))
+#define SPORT_GET_RCR2(sport)          bfin_read16(((sport)->port.membase + OFFSET_RCR2))
+#define SPORT_GET_RCLKDIV(sport)       bfin_read16(((sport)->port.membase + OFFSET_RCLKDIV))
+#define SPORT_GET_RFSDIV(sport)                bfin_read16(((sport)->port.membase + OFFSET_RFSDIV))
+#define SPORT_GET_STAT(sport)          bfin_read16(((sport)->port.membase + OFFSET_STAT))
+
+#define SPORT_PUT_TCR1(sport, v)       bfin_write16(((sport)->port.membase + OFFSET_TCR1), v)
+#define SPORT_PUT_TCR2(sport, v)       bfin_write16(((sport)->port.membase + OFFSET_TCR2), v)
+#define SPORT_PUT_TCLKDIV(sport, v)    bfin_write16(((sport)->port.membase + OFFSET_TCLKDIV), v)
+#define SPORT_PUT_TFSDIV(sport, v)     bfin_write16(((sport)->port.membase + OFFSET_TFSDIV), v)
+#define SPORT_PUT_TX(sport, v)         bfin_write16(((sport)->port.membase + OFFSET_TX), v)
+#define SPORT_PUT_RX(sport, v)         bfin_write16(((sport)->port.membase + OFFSET_RX), v)
+#define SPORT_PUT_RCR1(sport, v)       bfin_write16(((sport)->port.membase + OFFSET_RCR1), v)
+#define SPORT_PUT_RCR2(sport, v)       bfin_write16(((sport)->port.membase + OFFSET_RCR2), v)
+#define SPORT_PUT_RCLKDIV(sport, v)    bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
+#define SPORT_PUT_RFSDIV(sport, v)     bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
+#define SPORT_PUT_STAT(sport, v)       bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
index a638ba0679ac0f5f2342b4867c1cb5457a5e7c23..a19dc7ef88611f46a382c9227c94238d5e8ed25a 100644 (file)
@@ -1117,7 +1117,7 @@ int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con)
 
        line = cpm_uart_id2nr(idx);
        if(line < 0) {
-               printk(KERN_ERR"%s(): port %d is not registered", __FUNCTION__, idx);
+               printk(KERN_ERR"%s(): port %d is not registered", __func__, idx);
                return -EINVAL;
        }
 
index 383c4e660cd5c25da1ab86db8a408dcc7f9badd2..f9fa237aa9496d7cdf88d4cd0589d8b8391ae351 100644 (file)
@@ -1788,7 +1788,7 @@ static unsigned int handle_descr_data(struct e100_serial *info,
 
        if (info->recv_cnt + recvl > 65536) {
                printk(KERN_CRIT
-                      "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __FUNCTION__, recvl);
+                      "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl);
                return 0;
        }
 
@@ -1801,7 +1801,7 @@ static unsigned int handle_descr_data(struct e100_serial *info,
        append_recv_buffer(info, buffer);
 
        if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
-               panic("%s: Failed to allocate memory for receive buffer!\n", __FUNCTION__);
+               panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
 
        descr->buf = virt_to_phys(buffer->buffer);
 
@@ -1925,7 +1925,7 @@ static int start_recv_dma(struct e100_serial *info)
        /* Set up the receiving descriptors */
        for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) {
                if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE)))
-                       panic("%s: Failed to allocate memory for receive buffer!\n", __FUNCTION__);
+                       panic("%s: Failed to allocate memory for receive buffer!\n", __func__);
 
                descr[i].ctrl = d_int;
                descr[i].buf = virt_to_phys(buffer->buffer);
@@ -3581,6 +3581,9 @@ rs_tiocmset(struct tty_struct *tty, struct file *file,
                unsigned int set, unsigned int clear)
 {
        struct e100_serial *info = (struct e100_serial *)tty->driver_data;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        if (clear & TIOCM_RTS)
                e100_rts(info, 0);
@@ -3601,6 +3604,8 @@ rs_tiocmset(struct tty_struct *tty, struct file *file,
                e100_ri_out(info, 1);
        if (set & TIOCM_CD)
                e100_cd_out(info, 1);
+
+       local_irq_restore(flags);
        return 0;
 }
 
@@ -3609,6 +3614,9 @@ rs_tiocmget(struct tty_struct *tty, struct file *file)
 {
        struct e100_serial *info = (struct e100_serial *)tty->driver_data;
        unsigned int result;
+       unsigned long flags;
+
+       local_irq_save(flags);
 
        result =
                (!E100_RTS_GET(info) ? TIOCM_RTS : 0)
@@ -3618,6 +3626,8 @@ rs_tiocmget(struct tty_struct *tty, struct file *file)
                | (!E100_CD_GET(info) ? TIOCM_CAR : 0)
                | (!E100_CTS_GET(info) ? TIOCM_CTS : 0);
 
+       local_irq_restore(flags);
+
 #ifdef SERIAL_DEBUG_IO
        printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n",
                info->line, result, result);
@@ -3695,10 +3705,6 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
 {
        struct e100_serial *info = (struct e100_serial *)tty->driver_data;
 
-       if (tty->termios->c_cflag == old_termios->c_cflag &&
-           tty->termios->c_iflag == old_termios->c_iflag)
-               return;
-
        change_speed(info);
 
        /* Handle turning off CRTSCTS */
@@ -3801,10 +3807,8 @@ rs_close(struct tty_struct *tty, struct file * filp)
 #endif
 
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
-       if (tty->ldisc.flush_buffer)
-               tty->ldisc.flush_buffer(tty);
+       rs_flush_buffer(tty);
+       tty_ldisc_flush_buffer(tty);
        tty->closing = 0;
        info->event = 0;
        info->tty = 0;
@@ -3878,6 +3882,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
         * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO
         * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k)
         */
+       lock_kernel();
        orig_jiffies = jiffies;
        while (info->xmit.head != info->xmit.tail || /* More in send queue */
               (*info->ostatusadr & 0x007f) ||  /* more in FIFO */
@@ -3894,6 +3899,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout)
                        curr_time_usec - info->last_tx_active_usec;
        }
        set_current_state(TASK_RUNNING);
+       unlock_kernel();
 }
 
 /*
@@ -4513,7 +4519,7 @@ rs_init(void)
 
        if (request_irq(SERIAL_IRQ_NBR, ser_interrupt,
                        IRQF_SHARED | IRQF_DISABLED, "serial ", driver))
-               panic("%s: Failed to request irq8", __FUNCTION__);
+               panic("%s: Failed to request irq8", __func__);
 
 #endif
 #endif /* CONFIG_SVINTO_SIM */
index 116211fcd36fc196ff12de4aeb36ee8e6194031f..0dddd68b20d27de2dd4e5a8802e4931e954f493b 100644 (file)
@@ -819,7 +819,7 @@ static void dz_console_putchar(struct uart_port *uport, int ch)
                dz_out(dport, DZ_TCR, mask);
                iob();
                udelay(2);
-       } while (loops--);
+       } while (--loops);
 
        if (loops)                              /* Cannot send otherwise. */
                dz_out(dport, DZ_TDR, ch);
index 168073f12cec94b9b39b4684bb6f0e37a7e78c0a..4f1af71e9a1b2e6e0a26e44416fbb996ac4fe597 100644 (file)
@@ -52,7 +52,7 @@ static unsigned int Submodule_slot;
 #define DPRINT_CONFIG(_x...)   ;
 //#define DPRINT_CONFIG(_x...)  printk _x
 #define NOT_PROGRESS() ;
-//#define NOT_PROGRESS()       printk("%s : fails %d\n", __FUNCTION__, __LINE__)
+//#define NOT_PROGRESS()       printk("%s : fails %d\n", __func__, __LINE__)
 
 /* number of characters we want to transmit to the lower level at a time */
 #define MAX_CHARS              256
@@ -445,7 +445,7 @@ static int inline port_init(struct ioc3_port *port)
                sbbr_h = &idd->vma->sbbr_h;
                ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
                DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n",
-                              __FUNCTION__, (void *)ring_pci_addr));
+                              __func__, (void *)ring_pci_addr));
 
                writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h);
                writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l);
@@ -593,7 +593,7 @@ config_port(struct ioc3_port *port,
 
        DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d "
                        "parodd %d\n",
-                      __FUNCTION__, ((struct uart_port *)port->ip_port)->line,
+                      __func__, ((struct uart_port *)port->ip_port)->line,
                        baud, byte_size, stop_bits, parenb, parodd));
 
        if (set_baud(port, baud))
@@ -871,14 +871,14 @@ static int ioc3_set_proto(struct ioc3_port *port, int proto)
        default:
        case PROTO_RS232:
                /* Clear the appropriate GIO pin */
-               DPRINT_CONFIG(("%s: rs232\n", __FUNCTION__));
+               DPRINT_CONFIG(("%s: rs232\n", __func__));
                writel(0, (&port->ip_idd->vma->gppr[0]
                                        + hooks->rs422_select_pin));
                break;
 
        case PROTO_RS422:
                /* Set the appropriate GIO pin */
-               DPRINT_CONFIG(("%s: rs422\n", __FUNCTION__));
+               DPRINT_CONFIG(("%s: rs422\n", __func__));
                writel(1, (&port->ip_idd->vma->gppr[0]
                                        + hooks->rs422_select_pin));
                break;
@@ -988,7 +988,7 @@ ioc3_change_speed(struct uart_port *the_port,
        }
        baud = uart_get_baud_rate(the_port, new_termios, old_termios,
                                  MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
-       DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __FUNCTION__, baud,
+       DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __func__, baud,
                                the_port->line));
 
        if (!the_port->fifosize)
@@ -1026,7 +1026,7 @@ ioc3_change_speed(struct uart_port *the_port,
        DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o "
                       "config_port(baud %d data %d stop %d penable %d "
                        " parity %d), notification 0x%x\n",
-                      __FUNCTION__, (void *)port, the_port->line, cflag, baud,
+                      __func__, (void *)port, the_port->line, cflag, baud,
                       new_data, new_stop, new_parity_enable, new_parity,
                       the_port->ignore_status_mask));
 
@@ -1919,7 +1919,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
        struct pci_dev *pdev = idd->pdev;
 
        DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n",
-                      __FUNCTION__, pdev, (void *)card_ptr));
+                      __func__, pdev, (void *)card_ptr));
 
        if (!card_ptr)
                return -ENODEV;
@@ -1933,7 +1933,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
                port->ip_port = the_port;
 
                DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n",
-                       __FUNCTION__, (void *)the_port, (void *)port,
+                       __func__, (void *)the_port, (void *)port,
                                phys_port, ii));
 
                /* membase, iobase and mapbase just need to be non-0 */
@@ -1950,7 +1950,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is,
                if (uart_add_one_port(&ioc3_uart, the_port) < 0) {
                        printk(KERN_WARNING
                          "%s: unable to add port %d bus %d\n",
-                              __FUNCTION__, the_port->line, pdev->bus->number);
+                              __func__, the_port->line, pdev->bus->number);
                } else {
                        DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n",
                          the_port->line, the_port->irq, pdev->bus->number));
@@ -2017,7 +2017,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
        struct ioc3_port *ports[PORTS_PER_CARD];
        int phys_port;
 
-       DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, is, idd));
+       DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd));
 
        card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL);
        if (!card_ptr) {
@@ -2067,7 +2067,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
 
                        DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p "
                                       "ip_uart_regs 0x%p\n",
-                                      __FUNCTION__,
+                                      __func__,
                                       (void *)port->ip_serial_regs,
                                       (void *)port->ip_uart_regs));
 
@@ -2082,7 +2082,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
                        DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p "
                                       "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
                                        "ip_outring 0x%p\n",
-                                      __FUNCTION__,
+                                      __func__,
                                       (void *)port->ip_cpu_ringbuf,
                                       (void *)port->ip_dma_ringbuf,
                                       (void *)port->ip_inring,
@@ -2094,7 +2094,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
 
                        DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p "
                                       "ip_uart_regs 0x%p\n",
-                                      __FUNCTION__,
+                                      __func__,
                                       (void *)port->ip_serial_regs,
                                       (void *)port->ip_uart_regs));
 
@@ -2108,7 +2108,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
                        DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p "
                                       "ip_dma_ringbuf 0x%p, ip_inring 0x%p "
                                        "ip_outring 0x%p\n",
-                                      __FUNCTION__,
+                                      __func__,
                                       (void *)port->ip_cpu_ringbuf,
                                       (void *)port->ip_dma_ringbuf,
                                       (void *)port->ip_inring,
@@ -2116,7 +2116,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
                }
 
                DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p",
-                              __FUNCTION__,
+                              __func__,
                               phys_port, (void *)port, (void *)card_ptr));
                DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
                               (void *)port->ip_serial_regs,
@@ -2127,7 +2127,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd)
 
                DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p "
                               "outring 0x%p\n",
-                              __FUNCTION__,
+                              __func__,
                               phys_port, (void *)port,
                               (void *)port->ip_inring,
                               (void *)port->ip_outring));
@@ -2170,7 +2170,7 @@ static int __devinit ioc3uart_init(void)
        if ((ret = uart_register_driver(&ioc3_uart)) < 0) {
                printk(KERN_WARNING
                       "%s: Couldn't register IOC3 uart serial driver\n",
-                      __FUNCTION__);
+                      __func__);
                return ret;
        }
        ret = ioc3_register_submodule(&ioc3uart_submodule);
index 0c179384fb0cae34049bda432e8f349a910d3694..49b8a82b7b9f65956c96493aa6e2fd22f80d0632 100644 (file)
@@ -889,7 +889,7 @@ static int inline port_init(struct ioc4_port *port)
 
                ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf;
                DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n",
-                                       __FUNCTION__, ring_pci_addr));
+                                       __func__, ring_pci_addr));
 
                writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h);
                writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l);
@@ -1028,7 +1028,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
                spin_lock_irqsave(&soft->is_ir_lock, flag);
                printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x "
                                "other_ir 0x%x other_ies 0x%x mask 0x%x\n",
-                    __FUNCTION__, __LINE__,
+                    __func__, __LINE__,
                     (void *)mem, readl(&mem->sio_ir.raw),
                     readl(&mem->sio_ies.raw),
                     readl(&mem->other_ir.raw),
@@ -1155,14 +1155,14 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
                                (TOTAL_RING_BUF_SIZE - 1)) == 0));
                        DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p "
                                                "ip_dma_ringbuf 0x%p\n",
-                                       __FUNCTION__,
+                                       __func__,
                                        (void *)port->ip_cpu_ringbuf,
                                        (void *)port->ip_dma_ringbuf));
                        port->ip_inring = RING(port, RX_0_OR_2);
                        port->ip_outring = RING(port, TX_0_OR_2);
                }
                DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p",
-                               __FUNCTION__,
+                               __func__,
                                port_number, (void *)port, (void *)control));
                DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n",
                                (void *)port->ip_serial_regs,
@@ -1173,7 +1173,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd)
 
                DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p "
                                                "outring 0x%p\n",
-                               __FUNCTION__,
+                               __func__,
                                port_number, (void *)port,
                                (void *)port->ip_inring,
                                (void *)port->ip_outring));
@@ -1317,7 +1317,7 @@ config_port(struct ioc4_port *port,
        int spiniter = 0;
 
        DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n",
-               __FUNCTION__, baud, byte_size, stop_bits, parenb, parodd));
+               __func__, baud, byte_size, stop_bits, parenb, parodd));
 
        if (set_baud(port, baud))
                return 1;
@@ -1725,7 +1725,7 @@ ioc4_change_speed(struct uart_port *the_port,
        }
        baud = uart_get_baud_rate(the_port, new_termios, old_termios,
                                MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED);
-       DPRINT_CONFIG(("%s: returned baud %d\n", __FUNCTION__, baud));
+       DPRINT_CONFIG(("%s: returned baud %d\n", __func__, baud));
 
        /* default is 9600 */
        if (!baud)
@@ -1765,7 +1765,7 @@ ioc4_change_speed(struct uart_port *the_port,
        DPRINT_CONFIG(("%s : port 0x%p cflag 0%o "
                "config_port(baud %d data %d stop %d p enable %d parity %d),"
                " notification 0x%x\n",
-            __FUNCTION__, (void *)port, cflag, baud, new_data, new_stop,
+            __func__, (void *)port, cflag, baud, new_data, new_stop,
             new_parity_enable, new_parity, the_port->ignore_status_mask));
 
        if ((config_port(port, baud,            /* baud */
@@ -2715,7 +2715,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
 
 
        DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n",
-                       __FUNCTION__, pdev, (void *)control));
+                       __func__, pdev, (void *)control));
 
        if (!control)
                return -ENODEV;
@@ -2734,7 +2734,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
                port->ip_all_ports[port_type_idx] = the_port;
 
                DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n",
-                               __FUNCTION__, (void *)the_port,
+                               __func__, (void *)the_port,
                                (void *)port,
                                port_type == PROTO_RS232 ? "rs232" : "rs422"));
 
@@ -2752,7 +2752,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type)
                if (uart_add_one_port(u_driver, the_port) < 0) {
                        printk(KERN_WARNING
                           "%s: unable to add port %d bus %d\n",
-                              __FUNCTION__, the_port->line, pdev->bus->number);
+                              __func__, the_port->line, pdev->bus->number);
                } else {
                        DPRINT_CONFIG(
                            ("IOC4 serial port %d irq = %d, bus %d\n",
@@ -2777,7 +2777,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
        int ret = 0;
 
 
-       DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, idd->idd_pdev,
+       DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, idd->idd_pdev,
                                                        idd->idd_pci_id));
 
        /* PCI-RT does not bring out serial connections.
@@ -2806,7 +2806,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
                goto out2;
        }
        DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n",
-                               __FUNCTION__, (void *)idd->idd_misc_regs,
+                               __func__, (void *)idd->idd_misc_regs,
                                (void *)serial));
 
        /* Get memory for the new card */
@@ -2858,7 +2858,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd)
        } else {
                printk(KERN_WARNING
                    "%s : request_irq fails for IRQ 0x%x\n ",
-                       __FUNCTION__, idd->idd_pdev->irq);
+                       __func__, idd->idd_pdev->irq);
        }
        ret = ioc4_attach_local(idd);
        if (ret)
@@ -2911,13 +2911,13 @@ int ioc4_serial_init(void)
        if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) {
                printk(KERN_WARNING
                        "%s: Couldn't register rs232 IOC4 serial driver\n",
-                       __FUNCTION__);
+                       __func__);
                return ret;
        }
        if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) {
                printk(KERN_WARNING
                        "%s: Couldn't register rs422 IOC4 serial driver\n",
-                       __FUNCTION__);
+                       __func__);
                return ret;
        }
 
index 12c934a1f2742c88a7b80e1b3ed00588d73e521b..8871aaa3dba677350f519280a5d75b8ff915316d 100644 (file)
@@ -373,6 +373,7 @@ struct neo_uart_struct {
 #define PCI_DEVICE_NEO_2DB9PRI_PCI_NAME                "Neo 2 - DB9 Universal PCI - Powered Ring Indicator"
 #define PCI_DEVICE_NEO_2RJ45_PCI_NAME          "Neo 2 - RJ45 Universal PCI"
 #define PCI_DEVICE_NEO_2RJ45PRI_PCI_NAME       "Neo 2 - RJ45 Universal PCI - Powered Ring Indicator"
+#define PCIE_DEVICE_NEO_IBM_PCI_NAME           "Neo 4 - PCI Express - IBM"
 
 /*
  * Our Global Variables.
index 6767ee381cd1b4645056012ba3704c05177389a0..338cf8a08b436db85c1b9d11b10b2f868e93fd24 100644 (file)
@@ -82,7 +82,10 @@ static int jsm_probe_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* store the info for the board we've found */
        brd->boardnum = adapter_count++;
        brd->pci_dev = pdev;
-       brd->maxports = 2;
+       if (pdev->device == PCIE_DEVICE_ID_NEO_4_IBM)
+               brd->maxports = 4;
+       else
+               brd->maxports = 2;
 
        spin_lock_init(&brd->bd_lock);
        spin_lock_init(&brd->bd_intr_lock);
@@ -208,6 +211,7 @@ static struct pci_device_id jsm_pci_tbl[] = {
        { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2DB9PRI), 0, 0, 1 },
        { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45), 0, 0, 2 },
        { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCI_DEVICE_ID_NEO_2RJ45PRI), 0, 0, 3 },
+       { PCI_DEVICE(PCI_VENDOR_ID_DIGI, PCIE_DEVICE_ID_NEO_4_IBM), 0, 0, 4 },
        { 0, }
 };
 MODULE_DEVICE_TABLE(pci, jsm_pci_tbl);
index 9cf03327386acce3b382b7d63c579546b5ff0dd4..eadc1ab6bbcead8918a19773a7db2f08d85ec0a3 100644 (file)
@@ -96,12 +96,14 @@ static void cleanup_kgdboc(void)
 
 static int kgdboc_get_char(void)
 {
-       return kgdb_tty_driver->poll_get_char(kgdb_tty_driver, kgdb_tty_line);
+       return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver,
+                                               kgdb_tty_line);
 }
 
 static void kgdboc_put_char(u8 chr)
 {
-       kgdb_tty_driver->poll_put_char(kgdb_tty_driver, kgdb_tty_line, chr);
+       kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver,
+                                       kgdb_tty_line, chr);
 }
 
 static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
index ddd3aa50d4adef5de5e136df8fbd5fa3a8738ad8..43af40d59b8af44717621150e13194afd85efdff 100644 (file)
@@ -1072,18 +1072,6 @@ static int mcfrs_ioctl(struct tty_struct *tty, struct file * file,
                        tty_wait_until_sent(tty, 0);
                        send_break(info, arg ? arg*(HZ/10) : HZ/4);
                        return 0;
-               case TIOCGSOFTCAR:
-                       error = put_user(C_CLOCAL(tty) ? 1 : 0,
-                                   (unsigned long *) arg);
-                       if (error)
-                               return error;
-                       return 0;
-               case TIOCSSOFTCAR:
-                       get_user(arg, (unsigned long *) arg);
-                       tty->termios->c_cflag =
-                               ((tty->termios->c_cflag & ~CLOCAL) |
-                                (arg ? CLOCAL : 0));
-                       return 0;
                case TIOCGSERIAL:
                        if (access_ok(VERIFY_WRITE, (void *) arg,
                                                sizeof(struct serial_struct)))
@@ -1222,8 +1210,7 @@ static void mcfrs_close(struct tty_struct *tty, struct file * filp)
        } else
 #endif
        shutdown(info);
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       mcfrs_flush_buffer(tty);
        tty_ldisc_flush(tty);
        
        tty->closing = 0;
@@ -1276,6 +1263,8 @@ mcfrs_wait_until_sent(struct tty_struct *tty, int timeout)
         * Note: we have to use pretty tight timings here to satisfy
         * the NIST-PCTS.
         */
+       lock_kernel();
+
        fifo_time = (MCF5272_FIFO_SIZE * HZ * 10) / info->baud;
        char_time = fifo_time / 5;
        if (char_time == 0)
@@ -1312,6 +1301,7 @@ mcfrs_wait_until_sent(struct tty_struct *tty, int timeout)
                if (timeout && time_after(jiffies, orig_jiffies + timeout))
                        break;
        }
+       unlock_kernel();
 #else
        /*
         * For the other coldfire models, assume all data has been sent
@@ -1907,7 +1897,7 @@ static struct tty_driver *mcfrs_console_device(struct console *c, int *index)
  *     This is used for console output.
  */
 
-void mcfrs_put_char(char ch)
+int mcfrs_put_char(char ch)
 {
        volatile unsigned char  *uartp;
        unsigned long           flags;
@@ -1931,7 +1921,7 @@ void mcfrs_put_char(char ch)
                mcfrs_init_console(); /* try and get it back */
        local_irq_restore(flags);
 
-       return;
+       return 1;
 }
 
 
index d93b3578c5e22f67fdb48b074478c042b81e31bf..7a3625f52a03b837551697cf2bc23b54bc488d32 100644 (file)
@@ -1221,8 +1221,8 @@ static struct of_device_id mpc52xx_uart_of_match[] = {
 #endif
 #ifdef CONFIG_PPC_MPC512x
        { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, },
-       {},
 #endif
+       {},
 };
 
 static int __devinit
index 3123ffeac8ad682e4ec383cd9d9606bc095270fe..81ac9bb4f39b74a7a9e1b2456ae6484a49a0f560 100644 (file)
@@ -287,6 +287,7 @@ static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl)
 {
        unsigned int val;
 
+       /* FIXME: Locking needed ? */
        if (mctrl & TIOCM_RTS) {
                val = readl(port->membase + UART_RTS_CR);
                writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR);
index 4ffa2585429a4965d979b67cea2ba870357baef5..2b6a013639e6db103656787fb567f168e8e802b3 100644 (file)
@@ -1022,6 +1022,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
        struct uart_port *port = &ourport->port;
        struct s3c2410_uartcfg *cfg;
        struct resource *res;
+       int ret;
 
        dbg("s3c24xx_serial_init_port: port=%p, platdev=%p\n", port, platdev);
 
@@ -1064,9 +1065,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
 
        port->mapbase   = res->start;
        port->membase   = S3C24XX_VA_UART + (res->start - S3C24XX_PA_UART);
-       port->irq       = platform_get_irq(platdev, 0);
-       if (port->irq < 0)
+       ret = platform_get_irq(platdev, 0);
+       if (ret < 0)
                port->irq = 0;
+       else
+               port->irq = ret;
 
        ourport->clk    = clk_get(&platdev->dev, "uart");
 
@@ -1093,13 +1096,13 @@ static int s3c24xx_serial_probe(struct platform_device *dev,
        ourport = &s3c24xx_serial_ports[probe_index];
        probe_index++;
 
-       dbg("%s: initialising port %p...\n", __FUNCTION__, ourport);
+       dbg("%s: initialising port %p...\n", __func__, ourport);
 
        ret = s3c24xx_serial_init_port(ourport, info, dev);
        if (ret < 0)
                goto probe_err;
 
-       dbg("%s: adding port\n", __FUNCTION__);
+       dbg("%s: adding port\n", __func__);
        uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
        platform_set_drvdata(dev, &ourport->port);
 
@@ -1584,7 +1587,7 @@ static int s3c2412_serial_resetport(struct uart_port *port,
        unsigned long ucon = rd_regl(port, S3C2410_UCON);
 
        dbg("%s: port=%p (%08lx), cfg=%p\n",
-           __FUNCTION__, port, port->mapbase, cfg);
+           __func__, port, port->mapbase, cfg);
 
        /* ensure we don't change the clock settings... */
 
index 67b2338913c2d08c7fd58a86836053b899cfbf0d..62b38582f5e940588d29e0dd8108c0dcf1c0d34f 100644 (file)
@@ -655,7 +655,7 @@ void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns)
 void __init sa1100_register_uart(int idx, int port)
 {
        if (idx >= NR_PORTS) {
-               printk(KERN_ERR "%s: bad index number %d\n", __FUNCTION__, idx);
+               printk(KERN_ERR "%s: bad index number %d\n", __func__, idx);
                return;
        }
 
@@ -682,7 +682,7 @@ void __init sa1100_register_uart(int idx, int port)
                break;
 
        default:
-               printk(KERN_ERR "%s: bad port number %d\n", __FUNCTION__, port);
+               printk(KERN_ERR "%s: bad port number %d\n", __func__, port);
        }
 }
 
index a9ca03ead3e5dc0337b9b7a1eecdf8bb4ce1569a..1e2b9d826f69a5db2756e8eb477f2249587122e4 100644 (file)
@@ -329,13 +329,15 @@ EXPORT_SYMBOL(uart_update_timeout);
  *     If it's still invalid, we try 9600 baud.
  *
  *     Update the @termios structure to reflect the baud rate
- *     we're actually going to be using.
+ *     we're actually going to be using. Don't do this for the case
+ *     where B0 is requested ("hang up").
  */
 unsigned int
 uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
                   struct ktermios *old, unsigned int min, unsigned int max)
 {
        unsigned int try, baud, altbaud = 38400;
+       int hung_up = 0;
        upf_t flags = port->flags & UPF_SPD_MASK;
 
        if (flags == UPF_SPD_HI)
@@ -360,8 +362,10 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
                /*
                 * Special case: B0 rate.
                 */
-               if (baud == 0)
+               if (baud == 0) {
+                       hung_up = 1;
                        baud = 9600;
+               }
 
                if (baud >= min && baud <= max)
                        return baud;
@@ -373,7 +377,9 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
                termios->c_cflag &= ~CBAUD;
                if (old) {
                        baud = tty_termios_baud_rate(old);
-                       tty_termios_encode_baud_rate(termios, baud, baud);
+                       if (!hung_up)
+                               tty_termios_encode_baud_rate(termios,
+                                                               baud, baud);
                        old = NULL;
                        continue;
                }
@@ -382,7 +388,8 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
                 * As a last resort, if the quotient is zero,
                 * default to 9600 bps
                 */
-               tty_termios_encode_baud_rate(termios, 9600, 9600);
+               if (!hung_up)
+                       tty_termios_encode_baud_rate(termios, 9600, 9600);
        }
 
        return 0;
@@ -415,6 +422,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
 
 EXPORT_SYMBOL(uart_get_divisor);
 
+/* FIXME: Consistent locking policy */
 static void
 uart_change_speed(struct uart_state *state, struct ktermios *old_termios)
 {
@@ -447,27 +455,30 @@ uart_change_speed(struct uart_state *state, struct ktermios *old_termios)
        port->ops->set_termios(port, termios, old_termios);
 }
 
-static inline void
+static inline int
 __uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c)
 {
        unsigned long flags;
+       int ret = 0;
 
        if (!circ->buf)
-               return;
+               return 0;
 
        spin_lock_irqsave(&port->lock, flags);
        if (uart_circ_chars_free(circ) != 0) {
                circ->buf[circ->head] = c;
                circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
+               ret = 1;
        }
        spin_unlock_irqrestore(&port->lock, flags);
+       return ret;
 }
 
-static void uart_put_char(struct tty_struct *tty, unsigned char ch)
+static int uart_put_char(struct tty_struct *tty, unsigned char ch)
 {
        struct uart_state *state = tty->driver_data;
 
-       __uart_put_char(state->port, &state->info->xmit, ch);
+       return __uart_put_char(state->port, &state->info->xmit, ch);
 }
 
 static void uart_flush_chars(struct tty_struct *tty)
@@ -521,15 +532,25 @@ uart_write(struct tty_struct *tty, const unsigned char *buf, int count)
 static int uart_write_room(struct tty_struct *tty)
 {
        struct uart_state *state = tty->driver_data;
+       unsigned long flags;
+       int ret;
 
-       return uart_circ_chars_free(&state->info->xmit);
+       spin_lock_irqsave(&state->port->lock, flags);
+       ret = uart_circ_chars_free(&state->info->xmit);
+       spin_unlock_irqrestore(&state->port->lock, flags);
+       return ret;
 }
 
 static int uart_chars_in_buffer(struct tty_struct *tty)
 {
        struct uart_state *state = tty->driver_data;
+       unsigned long flags;
+       int ret;
 
-       return uart_circ_chars_pending(&state->info->xmit);
+       spin_lock_irqsave(&state->port->lock, flags);
+       ret = uart_circ_chars_pending(&state->info->xmit);
+       spin_unlock_irqrestore(&state->port->lock, flags);
+       return ret;
 }
 
 static void uart_flush_buffer(struct tty_struct *tty)
@@ -611,6 +632,11 @@ static int uart_get_info(struct uart_state *state,
        struct serial_struct tmp;
 
        memset(&tmp, 0, sizeof(tmp));
+
+       /* Ensure the state we copy is consistent and no hardware changes
+          occur as we go */
+       mutex_lock(&state->mutex);
+
        tmp.type            = port->type;
        tmp.line            = port->line;
        tmp.port            = port->iobase;
@@ -630,6 +656,8 @@ static int uart_get_info(struct uart_state *state,
        tmp.iomem_reg_shift = port->regshift;
        tmp.iomem_base      = (void *)(unsigned long)port->mapbase;
 
+       mutex_unlock(&state->mutex);
+
        if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
                return -EFAULT;
        return 0;
@@ -907,8 +935,6 @@ static void uart_break_ctl(struct tty_struct *tty, int break_state)
        struct uart_state *state = tty->driver_data;
        struct uart_port *port = state->port;
 
-       BUG_ON(!kernel_locked());
-
        mutex_lock(&state->mutex);
 
        if (port->type != PORT_UNKNOWN)
@@ -1052,7 +1078,7 @@ static int uart_get_count(struct uart_state *state,
 }
 
 /*
- * Called via sys_ioctl under the BKL.  We can use spin_lock_irq() here.
+ * Called via sys_ioctl.  We can use spin_lock_irq() here.
  */
 static int
 uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
@@ -1062,7 +1088,6 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
        void __user *uarg = (void __user *)arg;
        int ret = -ENOIOCTLCMD;
 
-       BUG_ON(!kernel_locked());
 
        /*
         * These ioctls don't rely on the hardware to be present.
@@ -1133,9 +1158,9 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd,
                break;
        }
        }
- out_up:
+out_up:
        mutex_unlock(&state->mutex);
- out:
+out:
        return ret;
 }
 
@@ -1146,7 +1171,6 @@ static void uart_set_termios(struct tty_struct *tty,
        unsigned long flags;
        unsigned int cflag = tty->termios->c_cflag;
 
-       BUG_ON(!kernel_locked());
 
        /*
         * These are the bits that are used to setup various
@@ -1158,8 +1182,9 @@ static void uart_set_termios(struct tty_struct *tty,
        if ((cflag ^ old_termios->c_cflag) == 0 &&
            tty->termios->c_ospeed == old_termios->c_ospeed &&
            tty->termios->c_ispeed == old_termios->c_ispeed &&
-           RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0)
+           RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) {
                return;
+       }
 
        uart_change_speed(state, old_termios);
 
@@ -1193,7 +1218,6 @@ static void uart_set_termios(struct tty_struct *tty,
                }
                spin_unlock_irqrestore(&state->port->lock, flags);
        }
-
 #if 0
        /*
         * No need to wake up processes in open wait, since they
@@ -1309,11 +1333,11 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
        struct uart_port *port = state->port;
        unsigned long char_time, expire;
 
-       BUG_ON(!kernel_locked());
-
        if (port->type == PORT_UNKNOWN || port->fifosize == 0)
                return;
 
+       lock_kernel();
+
        /*
         * Set the check interval to be 1/5 of the estimated time to
         * send a single character, and make it at least 1.  The check
@@ -1359,6 +1383,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
                        break;
        }
        set_current_state(TASK_RUNNING); /* might not be needed */
+       unlock_kernel();
 }
 
 /*
@@ -2072,7 +2097,9 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port)
                int ret;
 
                uart_change_pm(state, 0);
+               spin_lock_irq(&port->lock);
                ops->set_mctrl(port, 0);
+               spin_unlock_irq(&port->lock);
                ret = ops->startup(port);
                if (ret == 0) {
                        uart_change_speed(state, NULL);
index c2ea5d4df44a07c5aab6ae01bb86b9a73cade4d0..969106187718d909e0aacb9478477296d43e04ae 100644 (file)
@@ -855,7 +855,7 @@ static int sci_notifier(struct notifier_block *self,
 
                printk(KERN_INFO "%s: got a postchange notification "
                       "for cpu %d (old %d, new %d)\n",
-                      __FUNCTION__, freqs->cpu, freqs->old, freqs->new);
+                      __func__, freqs->cpu, freqs->old, freqs->new);
        }
 
        return NOTIFY_OK;
index 41fc61264443578e77fc4deb16ef276c79d2d502..019da2e05f0b4825d41ada3cf6e6badc609b8885 100644 (file)
@@ -839,7 +839,7 @@ static int __init sn_sal_module_init(void)
 
        if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) {
                /* error - not sure what I'd do - so I'll do nothing */
-               printk(KERN_ERR "%s: unable to add port\n", __FUNCTION__);
+               printk(KERN_ERR "%s: unable to add port\n", __func__);
        }
 
        /* when this driver is compiled in, the console initialization
index 3271379a36dbc8b8cf8daede92fad77a5680e4a3..90a20a152ebfe9ce0779b3ddba4682941ce9de4a 100644 (file)
@@ -1231,7 +1231,7 @@ static inline struct console *SUNZILOG_CONSOLE(void)
 #define SUNZILOG_CONSOLE()     (NULL)
 #endif
 
-static void __devinit sunzilog_init_kbdms(struct uart_sunzilog_port *up, int channel)
+static void __devinit sunzilog_init_kbdms(struct uart_sunzilog_port *up)
 {
        int baud, brg;
 
@@ -1305,7 +1305,7 @@ static void __devinit sunzilog_init_hw(struct uart_sunzilog_port *up)
                up->curregs[R7] = 0x7E; /* SDLC Flag    */
                up->curregs[R9] = NV;
                up->curregs[R7p] = 0x00;
-               sunzilog_init_kbdms(up, up->port.line);
+               sunzilog_init_kbdms(up);
                /* Only enable interrupts if an ISR handler available */
                if (up->flags & SUNZILOG_FLAG_ISR_HANDLER)
                        up->curregs[R9] |= MIE;
index b565d5a37499799ac3e9873a757fb1b7dc5d1df5..b51c24245be4e361d05c0bf545760375806d0a3a 100644 (file)
@@ -584,7 +584,7 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match)
        const unsigned int *id;
        int irq, rc;
 
-       dev_dbg(&op->dev, "%s(%p, %p)\n", __FUNCTION__, op, match);
+       dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match);
 
        rc = of_address_to_resource(op->node, 0, &res);
        if (rc) {
index 5e4310ccd5914ecc26983a8ad51bc3bd5fd61624..01917c433f17939000324e82abe63edeb0a72f15 100644 (file)
@@ -215,7 +215,7 @@ static inline dma_addr_t cpu2qe_addr(void *addr, struct uart_qe_port *qe_port)
                return qe_port->bd_dma_addr + (addr - qe_port->bd_virt);
 
        /* something nasty happened */
-       printk(KERN_ERR "%s: addr=%p\n", __FUNCTION__, addr);
+       printk(KERN_ERR "%s: addr=%p\n", __func__, addr);
        BUG();
        return 0;
 }
@@ -234,7 +234,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port)
                return qe_port->bd_virt + (addr - qe_port->bd_dma_addr);
 
        /* something nasty happened */
-       printk(KERN_ERR "%s: addr=%x\n", __FUNCTION__, addr);
+       printk(KERN_ERR "%s: addr=%x\n", __func__, addr);
        BUG();
        return NULL;
 }
index 98ab649c1ff9fb0ce3ad5023df1864dd4e05da66..bb6ce6bba32f21dbdf591cc433476aa5f1bea9fa 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Driver for NEC VR4100 series Serial Interface Unit.
  *
- *  Copyright (C) 2004-2007  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *  Copyright (C) 2004-2008  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
  *
  *  Based on drivers/serial/8250.c, by Russell King.
  *
@@ -840,6 +840,19 @@ static int __devinit siu_console_init(void)
 
 console_initcall(siu_console_init);
 
+void __init vr41xx_siu_early_setup(struct uart_port *port)
+{
+       if (port->type == PORT_UNKNOWN)
+               return;
+
+       siu_uart_ports[port->line].line = port->line;
+       siu_uart_ports[port->line].type = port->type;
+       siu_uart_ports[port->line].uartclk = SIU_BAUD_BASE * 16;
+       siu_uart_ports[port->line].mapbase = port->mapbase;
+       siu_uart_ports[port->line].mapbase = port->mapbase;
+       siu_uart_ports[port->line].ops = &siu_uart_ops;
+}
+
 #define SERIAL_VR41XX_CONSOLE  &siu_console
 #else
 #define SERIAL_VR41XX_CONSOLE  NULL
index d8107890db152c789a764248a9d9a1078484ee02..fae9e8f3d092ce6e34afcf4e89ed5cfc73f0d39f 100644 (file)
@@ -5,11 +5,9 @@
 # nobody's needed a slave side API yet.  The master-role API is not
 # fully appropriate there, so it'd need some thought to do well.
 #
-menu "SPI support"
-       depends on HAS_IOMEM
-
-config SPI
+menuconfig SPI
        bool "SPI support"
+       depends on HAS_IOMEM
        help
          The "Serial Peripheral Interface" is a low level synchronous
          protocol.  Chips that support SPI can have data transfer rates
@@ -28,9 +26,11 @@ config SPI
          (half duplex), SSP, SSI, and PSP.  This driver framework should
          work with most such devices and controllers.
 
+if SPI
+
 config SPI_DEBUG
        boolean "Debug support for SPI drivers"
-       depends on SPI && DEBUG_KERNEL
+       depends on DEBUG_KERNEL
        help
          Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
          sysfs, and debugfs support in SPI controller and protocol drivers.
@@ -245,5 +245,4 @@ config SPI_TLE62X0
 
 # (slave support would go here)
 
-endmenu # "SPI support"
-
+endif # SPI
index 1749a27be0669f71670ec38f0670e1bf537588f4..e81d59d789108041fee33e62b22b82d14e34fef3 100644 (file)
@@ -497,7 +497,7 @@ static int atmel_spi_setup(struct spi_device *spi)
        struct atmel_spi        *as;
        u32                     scbr, csr;
        unsigned int            bits = spi->bits_per_word;
-       unsigned long           bus_hz, sck_hz;
+       unsigned long           bus_hz;
        unsigned int            npcs_pin;
        int                     ret;
 
@@ -536,14 +536,25 @@ static int atmel_spi_setup(struct spi_device *spi)
                return -EINVAL;
        }
 
-       /* speed zero convention is used by some upper layers */
+       /*
+        * Pre-new_1 chips start out at half the peripheral
+        * bus speed.
+        */
        bus_hz = clk_get_rate(as->clk);
+       if (!as->new_1)
+               bus_hz /= 2;
+
        if (spi->max_speed_hz) {
-               /* assume div32/fdiv/mbz == 0 */
-               if (!as->new_1)
-                       bus_hz /= 2;
-               scbr = ((bus_hz + spi->max_speed_hz - 1)
-                       / spi->max_speed_hz);
+               /*
+                * Calculate the lowest divider that satisfies the
+                * constraint, assuming div32/fdiv/mbz == 0.
+                */
+               scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz);
+
+               /*
+                * If the resulting divider doesn't fit into the
+                * register bitfield, we can't satisfy the constraint.
+                */
                if (scbr >= (1 << SPI_SCBR_SIZE)) {
                        dev_dbg(&spi->dev,
                                "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
@@ -551,8 +562,8 @@ static int atmel_spi_setup(struct spi_device *spi)
                        return -EINVAL;
                }
        } else
+               /* speed zero means "as slow as possible" */
                scbr = 0xff;
-       sck_hz = bus_hz / scbr;
 
        csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8);
        if (spi->mode & SPI_CPOL)
@@ -589,7 +600,7 @@ static int atmel_spi_setup(struct spi_device *spi)
 
        dev_dbg(&spi->dev,
                "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n",
-               sck_hz, bits, spi->mode, spi->chip_select, csr);
+               bus_hz / scbr, bits, spi->mode, spi->chip_select, csr);
 
        spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
 
@@ -616,7 +627,7 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
                return -ESHUTDOWN;
 
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-               if (!(xfer->tx_buf || xfer->rx_buf)) {
+               if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
                        dev_dbg(&spi->dev, "missing rx or tx buf\n");
                        return -EINVAL;
                }
index 5f00bd6500efa2f8c51203c892f3d75bc1c9eb0e..d9ae111c27ae4c08955fb8ec0414ce65dbc26196 100644 (file)
@@ -151,7 +151,7 @@ static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch)
                if (time_after(jiffies, max_jiffies)) {
                        printk(KERN_ERR "%s: timeout. reg=%#06x "
                                        "mask=%#06x val=%#06x\n",
-                              __FUNCTION__, w, mask, val);
+                              __func__, w, mask, val);
                        return -1;
                }
                c++;
@@ -437,7 +437,7 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
        }
        omap_uwire_configure_mode(spi->chip_select, flags);
        pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n",
-                       __FUNCTION__, flags,
+                       __func__, flags,
                        clk_get_rate(uwire->ck) / 1000,
                        rate / 1000);
        status = 0;
index 147e26a78d648c5d343454de8fca05f200faccac..654bb58be63099235c711e909e46c13fd4831c62 100644 (file)
@@ -67,8 +67,11 @@ MODULE_ALIAS("platform:pxa2xx-spi");
                                | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
 
 #define DEFINE_SSP_REG(reg, off) \
-static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \
-static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); }
+static inline u32 read_##reg(void const __iomem *p) \
+{ return __raw_readl(p + (off)); } \
+\
+static inline void write_##reg(u32 v, void __iomem *p) \
+{ __raw_writel(v, p + (off)); }
 
 DEFINE_SSP_REG(SSCR0, 0x00)
 DEFINE_SSP_REG(SSCR1, 0x04)
@@ -106,7 +109,7 @@ struct driver_data {
        u32 *null_dma_buf;
 
        /* SSP register addresses */
-       void *ioaddr;
+       void __iomem *ioaddr;
        u32 ssdr_physical;
 
        /* SSP masks*/
@@ -173,7 +176,7 @@ static int flush(struct driver_data *drv_data)
 {
        unsigned long limit = loops_per_jiffy << 1;
 
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        do {
                while (read_SSSR(reg) & SSSR_RNE) {
@@ -191,7 +194,7 @@ static void null_cs_control(u32 command)
 
 static int null_writer(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
        u8 n_bytes = drv_data->n_bytes;
 
        if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
@@ -206,7 +209,7 @@ static int null_writer(struct driver_data *drv_data)
 
 static int null_reader(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
        u8 n_bytes = drv_data->n_bytes;
 
        while ((read_SSSR(reg) & SSSR_RNE)
@@ -220,7 +223,7 @@ static int null_reader(struct driver_data *drv_data)
 
 static int u8_writer(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
                || (drv_data->tx == drv_data->tx_end))
@@ -234,7 +237,7 @@ static int u8_writer(struct driver_data *drv_data)
 
 static int u8_reader(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        while ((read_SSSR(reg) & SSSR_RNE)
                && (drv_data->rx < drv_data->rx_end)) {
@@ -247,7 +250,7 @@ static int u8_reader(struct driver_data *drv_data)
 
 static int u16_writer(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
                || (drv_data->tx == drv_data->tx_end))
@@ -261,7 +264,7 @@ static int u16_writer(struct driver_data *drv_data)
 
 static int u16_reader(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        while ((read_SSSR(reg) & SSSR_RNE)
                && (drv_data->rx < drv_data->rx_end)) {
@@ -274,7 +277,7 @@ static int u16_reader(struct driver_data *drv_data)
 
 static int u32_writer(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        if (((read_SSSR(reg) & 0x00000f00) == 0x00000f00)
                || (drv_data->tx == drv_data->tx_end))
@@ -288,7 +291,7 @@ static int u32_writer(struct driver_data *drv_data)
 
 static int u32_reader(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        while ((read_SSSR(reg) & SSSR_RNE)
                && (drv_data->rx < drv_data->rx_end)) {
@@ -412,7 +415,7 @@ static void giveback(struct driver_data *drv_data)
                msg->complete(msg->context);
 }
 
-static int wait_ssp_rx_stall(void *ioaddr)
+static int wait_ssp_rx_stall(void const __iomem *ioaddr)
 {
        unsigned long limit = loops_per_jiffy << 1;
 
@@ -432,9 +435,9 @@ static int wait_dma_channel_stop(int channel)
        return limit;
 }
 
-void dma_error_stop(struct driver_data *drv_data, const char *msg)
+static void dma_error_stop(struct driver_data *drv_data, const char *msg)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        /* Stop and reset */
        DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
@@ -456,7 +459,7 @@ void dma_error_stop(struct driver_data *drv_data, const char *msg)
 
 static void dma_transfer_complete(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
        struct spi_message *msg = drv_data->cur_msg;
 
        /* Clear and disable interrupts on SSP and DMA channels*/
@@ -536,7 +539,7 @@ static void dma_handler(int channel, void *data)
 static irqreturn_t dma_transfer(struct driver_data *drv_data)
 {
        u32 irq_status;
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        irq_status = read_SSSR(reg) & drv_data->mask_sr;
        if (irq_status & SSSR_ROR) {
@@ -570,7 +573,7 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
 
 static void int_error_stop(struct driver_data *drv_data, const char* msg)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        /* Stop and reset SSP */
        write_SSSR(drv_data->clear_sr, reg);
@@ -588,7 +591,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
 
 static void int_transfer_complete(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        /* Stop SSP */
        write_SSSR(drv_data->clear_sr, reg);
@@ -614,7 +617,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
 
 static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 {
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
                        drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
@@ -675,7 +678,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 static irqreturn_t ssp_int(int irq, void *dev_id)
 {
        struct driver_data *drv_data = dev_id;
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
 
        if (!drv_data->cur_msg) {
 
@@ -695,7 +698,8 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
        return drv_data->transfer_handler(drv_data);
 }
 
-int set_dma_burst_and_threshold(struct chip_data *chip, struct spi_device *spi,
+static int set_dma_burst_and_threshold(struct chip_data *chip,
+                               struct spi_device *spi,
                                u8 bits_per_word, u32 *burst_code,
                                u32 *threshold)
 {
@@ -809,7 +813,7 @@ static void pump_transfers(unsigned long data)
        struct spi_transfer *previous = NULL;
        struct chip_data *chip = NULL;
        struct ssp_device *ssp = drv_data->ssp;
-       void *reg = drv_data->ioaddr;
+       void __iomem *reg = drv_data->ioaddr;
        u32 clk_div = 0;
        u8 bits = 0;
        u32 speed = 0;
@@ -1338,7 +1342,7 @@ static int __init pxa2xx_spi_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct pxa2xx_spi_master *platform_info;
        struct spi_master *master;
-       struct driver_data *drv_data = 0;
+       struct driver_data *drv_data = NULL;
        struct ssp_device *ssp;
        int status = 0;
 
index a9ac1fdb30948c5eba8ff864d0ee156d8db7e247..7fea3cf4588a37fa89589c8931862e704a62f156 100644 (file)
@@ -608,6 +608,7 @@ static void pump_transfers(unsigned long data)
        u8 width;
        u16 cr, dma_width, dma_config;
        u32 tranf_success = 1;
+       u8 full_duplex = 0;
 
        /* Get current state information */
        message = drv_data->cur_msg;
@@ -658,6 +659,7 @@ static void pump_transfers(unsigned long data)
        }
 
        if (transfer->rx_buf != NULL) {
+               full_duplex = transfer->tx_buf != NULL;
                drv_data->rx = transfer->rx_buf;
                drv_data->rx_end = drv_data->rx + transfer->len;
                dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
@@ -740,7 +742,8 @@ static void pump_transfers(unsigned long data)
         * successful use different way to r/w according to
         * drv_data->cur_chip->enable_dma
         */
-       if (drv_data->cur_chip->enable_dma && drv_data->len > 6) {
+       if (!full_duplex && drv_data->cur_chip->enable_dma
+                               && drv_data->len > 6) {
 
                disable_dma(drv_data->dma_channel);
                clear_dma_irqstat(drv_data->dma_channel);
@@ -828,7 +831,7 @@ static void pump_transfers(unsigned long data)
                /* IO mode write then read */
                dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
 
-               if (drv_data->tx != NULL && drv_data->rx != NULL) {
+               if (full_duplex) {
                        /* full duplex mode */
                        BUG_ON((drv_data->tx_end - drv_data->tx) !=
                               (drv_data->rx_end - drv_data->rx));
index 71e881419cddaa6ba9c77f588ae6723ff633249a..96cc39ecb6e234f7340fbeb62322ea1831ba9e0d 100644 (file)
@@ -214,7 +214,7 @@ int spi_bitbang_setup(struct spi_device *spi)
                return retval;
 
        dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
-                       __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
+                       __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
                        spi->bits_per_word, 2 * cs->nsecs);
 
        /* NOTE we _need_ to call chipselect() early, ideally with adapter
index d4ba640366b6c0039682bf24921e7736b76425d8..c730d05bfeb68b8db4cc2f3943e3c199821d1d97 100644 (file)
@@ -270,19 +270,26 @@ struct chip_data {
 
 static void pump_messages(struct work_struct *work);
 
-static int flush(struct driver_data *drv_data)
+static void flush(struct driver_data *drv_data)
 {
-       unsigned long limit = loops_per_jiffy << 1;
        void __iomem *regs = drv_data->regs;
-       volatile u32 d;
+       u32 control;
 
        dev_dbg(&drv_data->pdev->dev, "flush\n");
+
+       /* Wait for end of transaction */
        do {
-               while (readl(regs + SPI_INT_STATUS) & SPI_STATUS_RR)
-                       d = readl(regs + SPI_RXDATA);
-       } while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) && limit--);
+               control = readl(regs + SPI_CONTROL);
+       } while (control & SPI_CONTROL_XCH);
+
+       /* Release chip select if requested, transfer delays are
+          handled in pump_transfers */
+       if (drv_data->cs_change)
+               drv_data->cs_control(SPI_CS_DEASSERT);
 
-       return limit;
+       /* Disable SPI to flush FIFOs */
+       writel(control & ~SPI_CONTROL_SPIEN, regs + SPI_CONTROL);
+       writel(control, regs + SPI_CONTROL);
 }
 
 static void restore_state(struct driver_data *drv_data)
@@ -570,6 +577,7 @@ static void giveback(struct spi_message *message, struct driver_data *drv_data)
        writel(0, regs + SPI_INT_STATUS);
        writel(0, regs + SPI_DMA);
 
+       /* Unconditioned deselct */
        drv_data->cs_control(SPI_CS_DEASSERT);
 
        message->state = NULL;
@@ -592,13 +600,10 @@ static void dma_err_handler(int channel, void *data, int errcode)
        /* Disable both rx and tx dma channels */
        imx_dma_disable(drv_data->rx_channel);
        imx_dma_disable(drv_data->tx_channel);
-
-       if (flush(drv_data) == 0)
-               dev_err(&drv_data->pdev->dev,
-                               "dma_err_handler - flush failed\n");
-
        unmap_dma_buffers(drv_data);
 
+       flush(drv_data);
+
        msg->state = ERROR_STATE;
        tasklet_schedule(&drv_data->pump_transfers);
 }
@@ -612,8 +617,7 @@ static void dma_tx_handler(int channel, void *data)
        imx_dma_disable(channel);
 
        /* Now waits for TX FIFO empty */
-       writel(readl(drv_data->regs + SPI_INT_STATUS) | SPI_INTEN_TE,
-                       drv_data->regs + SPI_INT_STATUS);
+       writel(SPI_INTEN_TE, drv_data->regs + SPI_INT_STATUS);
 }
 
 static irqreturn_t dma_transfer(struct driver_data *drv_data)
@@ -621,19 +625,18 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
        u32 status;
        struct spi_message *msg = drv_data->cur_msg;
        void __iomem *regs = drv_data->regs;
-       unsigned long limit;
 
        status = readl(regs + SPI_INT_STATUS);
 
-       if ((status & SPI_INTEN_RO) && (status & SPI_STATUS_RO)) {
+       if ((status & (SPI_INTEN_RO | SPI_STATUS_RO))
+                       == (SPI_INTEN_RO | SPI_STATUS_RO)) {
                writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
 
+               imx_dma_disable(drv_data->tx_channel);
                imx_dma_disable(drv_data->rx_channel);
                unmap_dma_buffers(drv_data);
 
-               if (flush(drv_data) == 0)
-                       dev_err(&drv_data->pdev->dev,
-                               "dma_transfer - flush failed\n");
+               flush(drv_data);
 
                dev_warn(&drv_data->pdev->dev,
                                "dma_transfer - fifo overun\n");
@@ -649,20 +652,17 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
 
                if (drv_data->rx) {
                        /* Wait end of transfer before read trailing data */
-                       limit = loops_per_jiffy << 1;
-                       while ((readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH) &&
-                                       limit--);
-
-                       if (limit == 0)
-                               dev_err(&drv_data->pdev->dev,
-                                       "dma_transfer - end of tx failed\n");
-                       else
-                               dev_dbg(&drv_data->pdev->dev,
-                                       "dma_transfer - end of tx\n");
+                       while (readl(regs + SPI_CONTROL) & SPI_CONTROL_XCH)
+                               cpu_relax();
 
                        imx_dma_disable(drv_data->rx_channel);
                        unmap_dma_buffers(drv_data);
 
+                       /* Release chip select if requested, transfer delays are
+                          handled in pump_transfers() */
+                       if (drv_data->cs_change)
+                               drv_data->cs_control(SPI_CS_DEASSERT);
+
                        /* Calculate number of trailing data and read them */
                        dev_dbg(&drv_data->pdev->dev,
                                "dma_transfer - test = 0x%08X\n",
@@ -676,19 +676,12 @@ static irqreturn_t dma_transfer(struct driver_data *drv_data)
                        /* Write only transfer */
                        unmap_dma_buffers(drv_data);
 
-                       if (flush(drv_data) == 0)
-                               dev_err(&drv_data->pdev->dev,
-                                       "dma_transfer - flush failed\n");
+                       flush(drv_data);
                }
 
                /* End of transfer, update total byte transfered */
                msg->actual_length += drv_data->len;
 
-               /* Release chip select if requested, transfer delays are
-                  handled in pump_transfers() */
-               if (drv_data->cs_change)
-                       drv_data->cs_control(SPI_CS_DEASSERT);
-
                /* Move to next transfer */
                msg->state = next_transfer(drv_data);
 
@@ -711,44 +704,43 @@ static irqreturn_t interrupt_wronly_transfer(struct driver_data *drv_data)
 
        status = readl(regs + SPI_INT_STATUS);
 
-       while (status & SPI_STATUS_TH) {
+       if (status & SPI_INTEN_TE) {
+               /* TXFIFO Empty Interrupt on the last transfered word */
+               writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
                dev_dbg(&drv_data->pdev->dev,
-                       "interrupt_wronly_transfer - status = 0x%08X\n", status);
+                       "interrupt_wronly_transfer - end of tx\n");
 
-               /* Pump data */
-               if (write(drv_data)) {
-                       writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
-                               regs + SPI_INT_STATUS);
+               flush(drv_data);
 
-                       dev_dbg(&drv_data->pdev->dev,
-                               "interrupt_wronly_transfer - end of tx\n");
+               /* Update total byte transfered */
+               msg->actual_length += drv_data->len;
 
-                       if (flush(drv_data) == 0)
-                               dev_err(&drv_data->pdev->dev,
-                                       "interrupt_wronly_transfer - "
-                                       "flush failed\n");
+               /* Move to next transfer */
+               msg->state = next_transfer(drv_data);
 
-                       /* End of transfer, update total byte transfered */
-                       msg->actual_length += drv_data->len;
+               /* Schedule transfer tasklet */
+               tasklet_schedule(&drv_data->pump_transfers);
 
-                       /* Release chip select if requested, transfer delays are
-                          handled in pump_transfers */
-                       if (drv_data->cs_change)
-                               drv_data->cs_control(SPI_CS_DEASSERT);
+               return IRQ_HANDLED;
+       } else {
+               while (status & SPI_STATUS_TH) {
+                       dev_dbg(&drv_data->pdev->dev,
+                               "interrupt_wronly_transfer - status = 0x%08X\n",
+                               status);
 
-                       /* Move to next transfer */
-                       msg->state = next_transfer(drv_data);
+                       /* Pump data */
+                       if (write(drv_data)) {
+                               /* End of TXFIFO writes,
+                                  now wait until TXFIFO is empty */
+                               writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
+                               return IRQ_HANDLED;
+                       }
 
-                       /* Schedule transfer tasklet */
-                       tasklet_schedule(&drv_data->pump_transfers);
+                       status = readl(regs + SPI_INT_STATUS);
 
-                       return IRQ_HANDLED;
+                       /* We did something */
+                       handled = IRQ_HANDLED;
                }
-
-               status = readl(regs + SPI_INT_STATUS);
-
-               /* We did something */
-               handled = IRQ_HANDLED;
        }
 
        return handled;
@@ -758,45 +750,31 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
 {
        struct spi_message *msg = drv_data->cur_msg;
        void __iomem *regs = drv_data->regs;
-       u32 status;
+       u32 status, control;
        irqreturn_t handled = IRQ_NONE;
        unsigned long limit;
 
        status = readl(regs + SPI_INT_STATUS);
 
-       while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
+       if (status & SPI_INTEN_TE) {
+               /* TXFIFO Empty Interrupt on the last transfered word */
+               writel(status & ~SPI_INTEN, regs + SPI_INT_STATUS);
                dev_dbg(&drv_data->pdev->dev,
-                       "interrupt_transfer - status = 0x%08X\n", status);
-
-               if (status & SPI_STATUS_RO) {
-                       writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
-                               regs + SPI_INT_STATUS);
-
-                       dev_warn(&drv_data->pdev->dev,
-                               "interrupt_transfer - fifo overun\n"
-                               "    data not yet written = %d\n"
-                               "    data not yet read    = %d\n",
-                               data_to_write(drv_data),
-                               data_to_read(drv_data));
-
-                       if (flush(drv_data) == 0)
-                               dev_err(&drv_data->pdev->dev,
-                                       "interrupt_transfer - flush failed\n");
-
-                       msg->state = ERROR_STATE;
-                       tasklet_schedule(&drv_data->pump_transfers);
+                       "interrupt_transfer - end of tx\n");
 
-                       return IRQ_HANDLED;
-               }
-
-               /* Pump data */
-               read(drv_data);
-               if (write(drv_data)) {
-                       writel(readl(regs + SPI_INT_STATUS) & ~SPI_INTEN,
-                               regs + SPI_INT_STATUS);
+               if (msg->state == ERROR_STATE) {
+                       /* RXFIFO overrun was detected and message aborted */
+                       flush(drv_data);
+               } else {
+                       /* Wait for end of transaction */
+                       do {
+                               control = readl(regs + SPI_CONTROL);
+                       } while (control & SPI_CONTROL_XCH);
 
-                       dev_dbg(&drv_data->pdev->dev,
-                               "interrupt_transfer - end of tx\n");
+                       /* Release chip select if requested, transfer delays are
+                          handled in pump_transfers */
+                       if (drv_data->cs_change)
+                               drv_data->cs_control(SPI_CS_DEASSERT);
 
                        /* Read trailing bytes */
                        limit = loops_per_jiffy << 1;
@@ -810,27 +788,54 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
                                dev_dbg(&drv_data->pdev->dev,
                                        "interrupt_transfer - end of rx\n");
 
-                       /* End of transfer, update total byte transfered */
+                       /* Update total byte transfered */
                        msg->actual_length += drv_data->len;
 
-                       /* Release chip select if requested, transfer delays are
-                          handled in pump_transfers */
-                       if (drv_data->cs_change)
-                               drv_data->cs_control(SPI_CS_DEASSERT);
-
                        /* Move to next transfer */
                        msg->state = next_transfer(drv_data);
+               }
 
-                       /* Schedule transfer tasklet */
-                       tasklet_schedule(&drv_data->pump_transfers);
+               /* Schedule transfer tasklet */
+               tasklet_schedule(&drv_data->pump_transfers);
 
-                       return IRQ_HANDLED;
-               }
+               return IRQ_HANDLED;
+       } else {
+               while (status & (SPI_STATUS_TH | SPI_STATUS_RO)) {
+                       dev_dbg(&drv_data->pdev->dev,
+                               "interrupt_transfer - status = 0x%08X\n",
+                               status);
+
+                       if (status & SPI_STATUS_RO) {
+                               /* RXFIFO overrun, abort message end wait
+                                  until TXFIFO is empty */
+                               writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
+
+                               dev_warn(&drv_data->pdev->dev,
+                                       "interrupt_transfer - fifo overun\n"
+                                       "    data not yet written = %d\n"
+                                       "    data not yet read    = %d\n",
+                                       data_to_write(drv_data),
+                                       data_to_read(drv_data));
+
+                               msg->state = ERROR_STATE;
+
+                               return IRQ_HANDLED;
+                       }
 
-               status = readl(regs + SPI_INT_STATUS);
+                       /* Pump data */
+                       read(drv_data);
+                       if (write(drv_data)) {
+                               /* End of TXFIFO writes,
+                                  now wait until TXFIFO is empty */
+                               writel(SPI_INTEN_TE, regs + SPI_INT_STATUS);
+                               return IRQ_HANDLED;
+                       }
 
-               /* We did something */
-               handled = IRQ_HANDLED;
+                       status = readl(regs + SPI_INT_STATUS);
+
+                       /* We did something */
+                       handled = IRQ_HANDLED;
+               }
        }
 
        return handled;
index be15a6213205c09b9c840b6ea726607b3e052e18..189f706b9e4b98b517d939c8ece9ed6146121659 100644 (file)
@@ -310,7 +310,7 @@ static int mpc83xx_spi_setup(struct spi_device *spi)
                return retval;
 
        dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
-               __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
+               __func__, spi->mode & (SPI_CPOL | SPI_CPHA),
                spi->bits_per_word, 2 * mpc83xx_spi->nsecs);
 
        /* NOTE we _need_ to call chipselect() early, ideally with adapter
index b7476b888197c7872ce24cde39a552f9c74095b4..0885cc357a371552dc2190a6da682f98a652abdb 100644 (file)
@@ -125,10 +125,10 @@ static int s3c24xx_spi_setupxfer(struct spi_device *spi,
        /* is clk = pclk / (2 * (pre+1)), or is it
         *    clk = (pclk * 2) / ( pre + 1) */
 
-       div = (div / 2) - 1;
+       div /= 2;
 
-       if (div < 0)
-               div = 1;
+       if (div > 0)
+               div -= 1;
 
        if (div > 255)
                div = 255;
@@ -169,7 +169,7 @@ static int s3c24xx_spi_setup(struct spi_device *spi)
        }
 
        dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n",
-               __FUNCTION__, spi->mode, spi->bits_per_word,
+               __func__, spi->mode, spi->bits_per_word,
                spi->max_speed_hz);
 
        return 0;
index cf6aef34fe2595353335875197b671a823d59e3b..113a0468ffcbd3291402c136322596ac75ab28e2 100644 (file)
@@ -151,13 +151,13 @@ static int xilinx_spi_setup_transfer(struct spi_device *spi,
        hz = (t) ? t->speed_hz : spi->max_speed_hz;
        if (bits_per_word != 8) {
                dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n",
-                       __FUNCTION__, bits_per_word);
+                       __func__, bits_per_word);
                return -EINVAL;
        }
 
        if (hz && xspi->speed_hz > hz) {
                dev_err(&spi->dev, "%s, unsupported clock rate %uHz\n",
-                       __FUNCTION__, hz);
+                       __func__, hz);
                return -EINVAL;
        }
 
@@ -181,7 +181,7 @@ static int xilinx_spi_setup(struct spi_device *spi)
 
        if (spi->mode & ~MODEBITS) {
                dev_err(&spi->dev, "%s, unsupported mode bits %x\n",
-                       __FUNCTION__, spi->mode & ~MODEBITS);
+                       __func__, spi->mode & ~MODEBITS);
                return -EINVAL;
        }
 
@@ -190,7 +190,7 @@ static int xilinx_spi_setup(struct spi_device *spi)
                return retval;
 
        dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n",
-               __FUNCTION__, spi->mode & MODEBITS, spi->bits_per_word, 0);
+               __func__, spi->mode & MODEBITS, spi->bits_per_word, 0);
 
        return 0;
 }
index 17e71d56f31ef777af16d6745cfa5394c7c85df1..4b628526df097562a94004a653a9ba16eed9dd18 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 menuconfig THERMAL
-       bool "Generic Thermal sysfs driver"
+       tristate "Generic Thermal sysfs driver"
        help
          Generic Thermal Sysfs driver offers a generic mechanism for
          thermal management. Usually it's made up of one or more thermal
@@ -11,4 +11,4 @@ menuconfig THERMAL
          Each thermal zone contains its own temperature, trip points,
          cooling devices.
          All platforms with ACPI thermal support can use this driver.
-         If you want this support, you should say Y here.
+         If you want this support, you should say Y or M here.
index 8ef1232de37664b2bac7ffa5fc292ea635083969..31108a01c22e21d659aa68609cda0436f123e448 100644 (file)
@@ -2,4 +2,4 @@
 # Makefile for sensor chip drivers.
 #
 
-obj-$(CONFIG_THERMAL)          += thermal.o
+obj-$(CONFIG_THERMAL)          += thermal_sys.o
similarity index 81%
rename from drivers/thermal/thermal.c
rename to drivers/thermal/thermal_sys.c
index 7f79bbf652d7483c1a11f1b793973f0eddf345f4..6098787341f3d6ada67a195b7ee59d307b6a9c34 100644 (file)
@@ -31,7 +31,7 @@
 #include <linux/thermal.h>
 #include <linux/spinlock.h>
 
-MODULE_AUTHOR("Zhang Rui")
+MODULE_AUTHOR("Zhang Rui");
 MODULE_DESCRIPTION("Generic thermal management sysfs support");
 MODULE_LICENSE("GPL");
 
@@ -295,6 +295,164 @@ thermal_cooling_device_trip_point_show(struct device *dev,
 
 /* Device management */
 
+#if defined(CONFIG_HWMON) ||   \
+       (defined(CONFIG_HWMON_MODULE) && defined(CONFIG_THERMAL_MODULE))
+/* hwmon sys I/F */
+#include <linux/hwmon.h>
+static LIST_HEAD(thermal_hwmon_list);
+
+static ssize_t
+name_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct thermal_hwmon_device *hwmon = dev->driver_data;
+       return sprintf(buf, "%s\n", hwmon->type);
+}
+static DEVICE_ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+temp_input_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct thermal_hwmon_attr *hwmon_attr
+                       = container_of(attr, struct thermal_hwmon_attr, attr);
+       struct thermal_zone_device *tz
+                       = container_of(hwmon_attr, struct thermal_zone_device,
+                                      temp_input);
+
+       return tz->ops->get_temp(tz, buf);
+}
+
+static ssize_t
+temp_crit_show(struct device *dev, struct device_attribute *attr,
+               char *buf)
+{
+       struct thermal_hwmon_attr *hwmon_attr
+                       = container_of(attr, struct thermal_hwmon_attr, attr);
+       struct thermal_zone_device *tz
+                       = container_of(hwmon_attr, struct thermal_zone_device,
+                                      temp_crit);
+
+       return tz->ops->get_trip_temp(tz, 0, buf);
+}
+
+
+static int
+thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+       struct thermal_hwmon_device *hwmon;
+       int new_hwmon_device = 1;
+       int result;
+
+       mutex_lock(&thermal_list_lock);
+       list_for_each_entry(hwmon, &thermal_hwmon_list, node)
+               if (!strcmp(hwmon->type, tz->type)) {
+                       new_hwmon_device = 0;
+                       mutex_unlock(&thermal_list_lock);
+                       goto register_sys_interface;
+               }
+       mutex_unlock(&thermal_list_lock);
+
+       hwmon = kzalloc(sizeof(struct thermal_hwmon_device), GFP_KERNEL);
+       if (!hwmon)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&hwmon->tz_list);
+       strlcpy(hwmon->type, tz->type, THERMAL_NAME_LENGTH);
+       hwmon->device = hwmon_device_register(NULL);
+       if (IS_ERR(hwmon->device)) {
+               result = PTR_ERR(hwmon->device);
+               goto free_mem;
+       }
+       hwmon->device->driver_data = hwmon;
+       result = device_create_file(hwmon->device, &dev_attr_name);
+       if (result)
+               goto unregister_hwmon_device;
+
+ register_sys_interface:
+       tz->hwmon = hwmon;
+       hwmon->count++;
+
+       snprintf(tz->temp_input.name, THERMAL_NAME_LENGTH,
+                "temp%d_input", hwmon->count);
+       tz->temp_input.attr.attr.name = tz->temp_input.name;
+       tz->temp_input.attr.attr.mode = 0444;
+       tz->temp_input.attr.show = temp_input_show;
+       result = device_create_file(hwmon->device, &tz->temp_input.attr);
+       if (result)
+               goto unregister_hwmon_device;
+
+       if (tz->ops->get_crit_temp) {
+               unsigned long temperature;
+               if (!tz->ops->get_crit_temp(tz, &temperature)) {
+                       snprintf(tz->temp_crit.name, THERMAL_NAME_LENGTH,
+                               "temp%d_crit", hwmon->count);
+                       tz->temp_crit.attr.attr.name = tz->temp_crit.name;
+                       tz->temp_crit.attr.attr.mode = 0444;
+                       tz->temp_crit.attr.show = temp_crit_show;
+                       result = device_create_file(hwmon->device,
+                                                   &tz->temp_crit.attr);
+                       if (result)
+                               goto unregister_hwmon_device;
+               }
+       }
+
+       mutex_lock(&thermal_list_lock);
+       if (new_hwmon_device)
+               list_add_tail(&hwmon->node, &thermal_hwmon_list);
+       list_add_tail(&tz->hwmon_node, &hwmon->tz_list);
+       mutex_unlock(&thermal_list_lock);
+
+       return 0;
+
+ unregister_hwmon_device:
+       device_remove_file(hwmon->device, &tz->temp_crit.attr);
+       device_remove_file(hwmon->device, &tz->temp_input.attr);
+       if (new_hwmon_device) {
+               device_remove_file(hwmon->device, &dev_attr_name);
+               hwmon_device_unregister(hwmon->device);
+       }
+ free_mem:
+       if (new_hwmon_device)
+               kfree(hwmon);
+
+       return result;
+}
+
+static void
+thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+       struct thermal_hwmon_device *hwmon = tz->hwmon;
+
+       tz->hwmon = NULL;
+       device_remove_file(hwmon->device, &tz->temp_input.attr);
+       device_remove_file(hwmon->device, &tz->temp_crit.attr);
+
+       mutex_lock(&thermal_list_lock);
+       list_del(&tz->hwmon_node);
+       if (!list_empty(&hwmon->tz_list)) {
+               mutex_unlock(&thermal_list_lock);
+               return;
+       }
+       list_del(&hwmon->node);
+       mutex_unlock(&thermal_list_lock);
+
+       device_remove_file(hwmon->device, &dev_attr_name);
+       hwmon_device_unregister(hwmon->device);
+       kfree(hwmon);
+}
+#else
+static int
+thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+       return 0;
+}
+
+static void
+thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
+{
+}
+#endif
+
+
 /**
  * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone
  * @tz:                thermal zone device
@@ -642,6 +800,10 @@ struct thermal_zone_device *thermal_zone_device_register(char *type,
                        goto unregister;
        }
 
+       result = thermal_add_hwmon_sysfs(tz);
+       if (result)
+               goto unregister;
+
        mutex_lock(&thermal_list_lock);
        list_add_tail(&tz->node, &thermal_tz_list);
        if (ops->bind)
@@ -700,6 +862,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
        for (count = 0; count < tz->trips; count++)
                TRIP_POINT_ATTR_REMOVE(&tz->device, count);
 
+       thermal_remove_hwmon_sysfs(tz);
        release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
        idr_destroy(&tz->idr);
        mutex_destroy(&tz->lock);
index d470c72b737e5295f875ff1433cdfce8c7f5111f..5ea3093bc40f0ddea25b87252c1ef61a4f7ff878 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/device.h>
 #include <linux/firmware.h>
 #include <linux/mutex.h>
+#include <asm/unaligned.h>
 
 #include "usbatm.h"
 
@@ -573,7 +574,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
                               u32 *data, int size)
 {
        int ret, len;
-       u32 *buf;
+       __le32 *buf;
        int offb, offd;
        const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
        int buflen =  ((size - 1) / stride + 1 + size * 2) * 4;
@@ -837,7 +838,7 @@ static int cxacru_fw(struct usb_device *usb_dev, enum cxacru_fw_request fw,
                buf[offb++] = l;
                buf[offb++] = code1;
                buf[offb++] = code2;
-               *((u32 *) (buf + offb)) = cpu_to_le32(addr);
+               put_unaligned(cpu_to_le32(addr), (__le32 *)(buf + offb));
                offb += 4;
                addr += l;
                if(l)
@@ -874,8 +875,9 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
        int off;
        struct usbatm_data *usbatm = instance->usbatm;
        struct usb_device *usb_dev = usbatm->usb_dev;
-       u16 signature[] = { usb_dev->descriptor.idVendor, usb_dev->descriptor.idProduct };
-       u32 val;
+       __le16 signature[] = { usb_dev->descriptor.idVendor,
+                              usb_dev->descriptor.idProduct };
+       __le32 val;
 
        dbg("cxacru_upload_firmware");
 
@@ -955,7 +957,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
        /* Load config data (le32), doing one packet at a time */
        if (cf)
                for (off = 0; off < cf->size / 4; ) {
-                       u32 buf[CMD_PACKET_SIZE / 4 - 1];
+                       __le32 buf[CMD_PACKET_SIZE / 4 - 1];
                        int i, len = min_t(int, cf->size / 4 - off, CMD_PACKET_SIZE / 4 / 2 - 1);
                        buf[0] = cpu_to_le32(len);
                        for (i = 0; i < len; i++, off++) {
index abb7d7410e63c7588898300c362307341ba9c116..5f71ff3aee35f01203e36b3344f2d7dec3d1f40a 100644 (file)
@@ -305,8 +305,6 @@ enum {
  */
 
 #define FW_GET_BYTE(p) *((__u8 *) (p))
-#define FW_GET_WORD(p) le16_to_cpu(get_unaligned((__le16 *) (p)))
-#define FW_GET_LONG(p) le32_to_cpu(get_unaligned((__le32 *) (p)))
 
 #define FW_DIR "ueagle-atm/"
 #define NB_MODEM 4
@@ -621,7 +619,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte
        if (size < 4)
                goto err_fw_corrupted;
 
-       crc = FW_GET_LONG(pfw);
+       crc = get_unaligned_le32(pfw);
        pfw += 4;
        size -= 4;
        if (crc32_be(0, pfw, size) != crc)
@@ -640,7 +638,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte
 
        while (size > 3) {
                u8 len = FW_GET_BYTE(pfw);
-               u16 add = FW_GET_WORD(pfw + 1);
+               u16 add = get_unaligned_le16(pfw + 1);
 
                size -= len + 3;
                if (size < 0)
@@ -738,7 +736,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len)
 
        for (i = 0; i < pagecount; i++) {
 
-               pageoffset = FW_GET_LONG(dsp + p);
+               pageoffset = get_unaligned_le32(dsp + p);
                p += 4;
 
                if (pageoffset == 0)
@@ -759,7 +757,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len)
                                return 1;
 
                        pp += 2;        /* skip blockaddr */
-                       blocksize = FW_GET_WORD(dsp + pp);
+                       blocksize = get_unaligned_le16(dsp + pp);
                        pp += 2;
 
                        /* enough space for block data? */
@@ -928,7 +926,7 @@ static void uea_load_page_e1(struct work_struct *work)
                goto bad1;
 
        p += 4 * pageno;
-       pageoffset = FW_GET_LONG(p);
+       pageoffset = get_unaligned_le32(p);
 
        if (pageoffset == 0)
                goto bad1;
@@ -945,10 +943,10 @@ static void uea_load_page_e1(struct work_struct *work)
        bi.wOvlOffset = cpu_to_le16(ovl | 0x8000);
 
        for (i = 0; i < blockcount; i++) {
-               blockaddr = FW_GET_WORD(p);
+               blockaddr = get_unaligned_le16(p);
                p += 2;
 
-               blocksize = FW_GET_WORD(p);
+               blocksize = get_unaligned_le16(p);
                p += 2;
 
                bi.wSize = cpu_to_le16(blocksize);
@@ -996,7 +994,7 @@ static void __uea_load_page_e4(struct uea_softc *sc, u8 pageno, int boot)
                blockoffset = sc->dsp_firm->data + le32_to_cpu(blockidx->PageOffset);
 
                bi.dwSize = cpu_to_be32(blocksize);
-               bi.dwAddress = swab32(blockidx->PageAddress);
+               bi.dwAddress = cpu_to_be32(le32_to_cpu(blockidx->PageAddress));
 
                uea_dbg(INS_TO_USBDEV(sc),
                       "sending block %u for DSP page %u size %u address %x\n",
@@ -1040,7 +1038,7 @@ static void uea_load_page_e4(struct work_struct *work)
                return;
 
        p = (struct l1_code *) sc->dsp_firm->data;
-       if (pageno >= p->page_header[0].PageNumber) {
+       if (pageno >= le16_to_cpu(p->page_header[0].PageNumber)) {
                uea_err(INS_TO_USBDEV(sc), "invalid DSP page %u requested\n", pageno);
                return;
        }
@@ -1065,7 +1063,7 @@ static void uea_load_page_e4(struct work_struct *work)
        bi.bPageNumber = 0xff;
        bi.wReserved = cpu_to_be16(UEA_RESERVED);
        bi.dwSize = cpu_to_be32(E4_PAGE_BYTES(p->page_header[0].PageSize));
-       bi.dwAddress = swab32(p->page_header[0].PageAddress);
+       bi.dwAddress = cpu_to_be32(le32_to_cpu(p->page_header[0].PageAddress));
 
        /* send block info through the IDMA pipe */
        if (uea_idma_write(sc, &bi, E4_BLOCK_INFO_SIZE))
@@ -1152,9 +1150,9 @@ static int uea_cmv_e1(struct uea_softc *sc,
        cmv.bDirection = E1_HOSTTOMODEM;
        cmv.bFunction = function;
        cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx);
-       put_unaligned(cpu_to_le32(address), &cmv.dwSymbolicAddress);
+       put_unaligned_le32(address, &cmv.dwSymbolicAddress);
        cmv.wOffsetAddress = cpu_to_le16(offset);
-       put_unaligned(cpu_to_le32(data >> 16 | data << 16), &cmv.dwData);
+       put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData);
 
        ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv);
        if (ret < 0)
@@ -1646,7 +1644,7 @@ static int request_cmvs(struct uea_softc *sc,
        if (size < 5)
                goto err_fw_corrupted;
 
-       crc = FW_GET_LONG(data);
+       crc = get_unaligned_le32(data);
        data += 4;
        size -= 4;
        if (crc32_be(0, data, size) != crc)
@@ -1696,9 +1694,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
                        "please update your firmware\n");
 
                for (i = 0; i < len; i++) {
-                       ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v1[i].address),
-                                               FW_GET_WORD(&cmvs_v1[i].offset),
-                                               FW_GET_LONG(&cmvs_v1[i].data));
+                       ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address),
+                                               get_unaligned_le16(&cmvs_v1[i].offset),
+                                               get_unaligned_le32(&cmvs_v1[i].data));
                        if (ret < 0)
                                goto out;
                }
@@ -1706,9 +1704,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc)
                struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr;
 
                for (i = 0; i < len; i++) {
-                       ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v2[i].address),
-                                               (u16) FW_GET_LONG(&cmvs_v2[i].offset),
-                                               FW_GET_LONG(&cmvs_v2[i].data));
+                       ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address),
+                                               (u16) get_unaligned_le32(&cmvs_v2[i].offset),
+                                               get_unaligned_le32(&cmvs_v2[i].data));
                        if (ret < 0)
                                goto out;
                }
@@ -1759,10 +1757,10 @@ static int uea_send_cmvs_e4(struct uea_softc *sc)
 
                for (i = 0; i < len; i++) {
                        ret = uea_write_cmv_e4(sc, 1,
-                                               FW_GET_LONG(&cmvs_v2[i].group),
-                                               FW_GET_LONG(&cmvs_v2[i].address),
-                                               FW_GET_LONG(&cmvs_v2[i].offset),
-                                               FW_GET_LONG(&cmvs_v2[i].data));
+                                               get_unaligned_le32(&cmvs_v2[i].group),
+                                               get_unaligned_le32(&cmvs_v2[i].address),
+                                               get_unaligned_le32(&cmvs_v2[i].offset),
+                                               get_unaligned_le32(&cmvs_v2[i].data));
                        if (ret < 0)
                                goto out;
                }
@@ -1964,7 +1962,7 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
                if (UEA_CHIP_VERSION(sc) == ADI930
                                && cmv->bFunction ==  E1_MAKEFUNCTION(2, 2)) {
                        cmv->wIndex = cpu_to_le16(dsc->idx);
-                       put_unaligned(cpu_to_le32(dsc->address), &cmv->dwSymbolicAddress);
+                       put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress);
                        cmv->wOffsetAddress = cpu_to_le16(dsc->offset);
                } else
                        goto bad2;
@@ -1978,11 +1976,11 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr)
 
        /* in case of MEMACCESS */
        if (le16_to_cpu(cmv->wIndex) != dsc->idx ||
-           le32_to_cpu(get_unaligned(&cmv->dwSymbolicAddress)) != dsc->address ||
+           get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address ||
            le16_to_cpu(cmv->wOffsetAddress) != dsc->offset)
                goto bad2;
 
-       sc->data = le32_to_cpu(get_unaligned(&cmv->dwData));
+       sc->data = get_unaligned_le32(&cmv->dwData);
        sc->data = sc->data << 16 | sc->data >> 16;
 
        wake_up_cmv_ack(sc);
index 7b572e75e73cac3046e2662d86f6521f4885db2a..cefe7f2c6f750d5ac8ab3c0483b0a535cb22c273 100644 (file)
@@ -280,7 +280,7 @@ static void acm_ctrl_irq(struct urb *urb)
 
                case USB_CDC_NOTIFY_SERIAL_STATE:
 
-                       newctrl = le16_to_cpu(get_unaligned((__le16 *) data));
+                       newctrl = get_unaligned_le16(data);
 
                        if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
                                dbg("calling hangup");
index 8607846e3c3f7bf1f2998953b61517756a3a4c14..1d253dd4ea8143040b4175286331d349a95a1e9c 100644 (file)
@@ -773,7 +773,7 @@ int __init usbfs_init(void)
        usb_register_notify(&usbfs_nb);
 
        /* create mount point for usbfs */
-       usbdir = proc_mkdir("usb", proc_bus);
+       usbdir = proc_mkdir("bus/usb", NULL);
 
        return 0;
 }
@@ -783,6 +783,6 @@ void usbfs_cleanup(void)
        usb_unregister_notify(&usbfs_nb);
        unregister_filesystem(&usb_fs_type);
        if (usbdir)
-               remove_proc_entry("usb", proc_bus);
+               remove_proc_entry("bus/usb", NULL);
 }
 
index fc6f3483be4473c25e21620a8be1a84bb5e9ea79..ce337cb5d13706a763b42193adf872efe38b309b 100644 (file)
@@ -328,6 +328,7 @@ udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
        u32                     tmp;
        unsigned long           iflags;
        u8 udc_csr_epix;
+       unsigned                maxpacket;
 
        if (!usbep
                        || usbep->name == ep0_string
@@ -354,9 +355,10 @@ udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
        writel(tmp, &dev->ep[ep->num].regs->ctl);
 
        /* set max packet size */
+       maxpacket = le16_to_cpu(desc->wMaxPacketSize);
        tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
-       tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_EP_MAX_PKT_SIZE);
-       ep->ep.maxpacket = desc->wMaxPacketSize;
+       tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
+       ep->ep.maxpacket = maxpacket;
        writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
 
        /* IN ep */
@@ -370,8 +372,8 @@ udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
                /* double buffering: fifo size = 2 x max packet size */
                tmp = AMD_ADDBITS(
                                tmp,
-                               desc->wMaxPacketSize * UDC_EPIN_BUFF_SIZE_MULT
-                                               / UDC_DWORD_BYTES,
+                               maxpacket * UDC_EPIN_BUFF_SIZE_MULT
+                                         / UDC_DWORD_BYTES,
                                UDC_EPIN_BUFF_SIZE);
                writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
 
@@ -390,7 +392,7 @@ udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
 
                /* set max packet size UDC CSR  */
                tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
-               tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize,
+               tmp = AMD_ADDBITS(tmp, maxpacket,
                                        UDC_CSR_NE_MAX_PKT);
                writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
 
@@ -407,7 +409,7 @@ udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
        /* set ep values */
        tmp = readl(&dev->csr->ne[udc_csr_epix]);
        /* max packet */
-       tmp = AMD_ADDBITS(tmp, desc->wMaxPacketSize, UDC_CSR_NE_MAX_PKT);
+       tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
        /* ep number */
        tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
        /* ep direction */
@@ -2832,7 +2834,7 @@ __acquires(dev->lock)
                /* make usb request for gadget driver */
                memset(&setup_data, 0 , sizeof(union udc_setup_data));
                setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
-               setup_data.request.wValue = dev->cur_config;
+               setup_data.request.wValue = cpu_to_le16(dev->cur_config);
 
                /* programm the NE registers */
                for (i = 0; i < UDC_EP_NUM; i++) {
@@ -2881,8 +2883,8 @@ __acquires(dev->lock)
                memset(&setup_data, 0 , sizeof(union udc_setup_data));
                setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
                setup_data.request.bRequestType = USB_RECIP_INTERFACE;
-               setup_data.request.wValue = dev->cur_alt;
-               setup_data.request.wIndex = dev->cur_intf;
+               setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
+               setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
 
                DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
                                dev->cur_alt, dev->cur_intf);
index 9b913afb2e6dab7beb727a7400516247b4863a59..274c60a970cdcf8a7f3f0409e0d1512ec653477f 100644 (file)
@@ -231,6 +231,7 @@ static int proc_udc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_ops = {
+       .owner          = THIS_MODULE,
        .open           = proc_udc_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -239,15 +240,7 @@ static const struct file_operations proc_ops = {
 
 static void create_debug_file(struct at91_udc *udc)
 {
-       struct proc_dir_entry *pde;
-
-       pde = create_proc_entry (debug_filename, 0, NULL);
-       udc->pde = pde;
-       if (pde == NULL)
-               return;
-
-       pde->proc_fops = &proc_ops;
-       pde->data = udc;
+       udc->pde = proc_create_data(debug_filename, 0, NULL, &proc_ops, udc);
 }
 
 static void remove_debug_file(struct at91_udc *udc)
index 66293105d136646d3e739ef330cde96ff112cec3..42036192a03c4cc4abb61078b591dfcac1da1a70 100644 (file)
@@ -1555,8 +1555,7 @@ hub_descriptor (struct usb_hub_descriptor *desc)
        memset (desc, 0, sizeof *desc);
        desc->bDescriptorType = 0x29;
        desc->bDescLength = 9;
-       desc->wHubCharacteristics = (__force __u16)
-                       (__constant_cpu_to_le16 (0x0001));
+       desc->wHubCharacteristics = cpu_to_le16(0x0001);
        desc->bNbrPorts = 1;
        desc->bitmap [0] = 0xff;
        desc->bitmap [1] = 0xff;
index ff3a8513e64dbbcdc8d15f5d944693317acdb459..7f4d4828e3aabaca7db67bd512017f094b419992 100644 (file)
@@ -229,7 +229,7 @@ static const struct usb_ac_header_descriptor_1 ac_header_desc = {
        .bDescriptorType =      USB_DT_CS_INTERFACE,
        .bDescriptorSubtype =   USB_MS_HEADER,
        .bcdADC =               __constant_cpu_to_le16(0x0100),
-       .wTotalLength =         USB_DT_AC_HEADER_SIZE(1),
+       .wTotalLength =         __constant_cpu_to_le16(USB_DT_AC_HEADER_SIZE(1)),
        .bInCollection =        1,
        .baInterfaceNr = {
                [0] =           GMIDI_MS_INTERFACE,
@@ -253,9 +253,9 @@ static const struct usb_ms_header_descriptor ms_header_desc = {
        .bDescriptorType =      USB_DT_CS_INTERFACE,
        .bDescriptorSubtype =   USB_MS_HEADER,
        .bcdMSC =               __constant_cpu_to_le16(0x0100),
-       .wTotalLength =         USB_DT_MS_HEADER_SIZE
+       .wTotalLength =         __constant_cpu_to_le16(USB_DT_MS_HEADER_SIZE
                                + 2*USB_DT_MIDI_IN_SIZE
-                               + 2*USB_DT_MIDI_OUT_SIZE(1),
+                               + 2*USB_DT_MIDI_OUT_SIZE(1)),
 };
 
 #define JACK_IN_EMB    1
index 64a592cbbe7b7f4aa3b3236ebd165ce726cdb6cb..be6613afedbfd317f907a4d252973b4b9c73b787 100644 (file)
@@ -127,7 +127,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
 
        /* enabling the no-toggle interrupt mode would need an api hook */
        mode = 0;
-       max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize));
+       max = get_unaligned_le16(&desc->wMaxPacketSize);
        switch (max) {
        case 64:        mode++;
        case 32:        mode++;
index be0a4c1f80a2d66fa8c26ed66f9aacc81093e811..f118f00f14662e4b884671cb4f4084be4e56cc48 100644 (file)
@@ -485,7 +485,7 @@ struct m66592 {
        struct m66592_ep        *epaddr2ep[16];
 
        struct usb_request      *ep0_req;       /* for internal request */
-       u16                     ep0_data;       /* for internal request */
+       __le16                  ep0_data;       /* for internal request */
        u16                     old_vbus;
 
        struct timer_list       timer;
index 95f7662376f16e41e463ec3feef0c243842517f9..881d74c3d9646b482130b2afcf651ed974a97f6a 100644 (file)
@@ -2504,6 +2504,7 @@ static int proc_udc_open(struct inode *inode, struct file *file)
 }
 
 static const struct file_operations proc_ops = {
+       .owner          = THIS_MODULE,
        .open           = proc_udc_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
@@ -2512,11 +2513,7 @@ static const struct file_operations proc_ops = {
 
 static void create_proc_file(void)
 {
-       struct proc_dir_entry *pde;
-
-       pde = create_proc_entry (proc_filename, 0, NULL);
-       if (pde)
-               pde->proc_fops = &proc_ops;
+       proc_create(proc_filename, 0, NULL, &proc_ops);
 }
 
 static void remove_proc_file(void)
index bd58dd504f6f4ec144048432b51a8d8737047751..d0677f5d3cd5f3f8b88b5c73573113f7fed672d1 100644 (file)
@@ -183,14 +183,10 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
                DBG("query OID %08x value, len %d:\n", OID, buf_len);
                for (i = 0; i < buf_len; i += 16) {
                        DBG("%03d: %08x %08x %08x %08x\n", i,
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i])),
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i + 4])),
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i + 8])),
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i + 12])));
+                               get_unaligned_le32(&buf[i]),
+                               get_unaligned_le32(&buf[i + 4]),
+                               get_unaligned_le32(&buf[i + 8]),
+                               get_unaligned_le32(&buf[i + 12]));
                }
        }
 
@@ -666,7 +662,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len,
                break;
        case OID_PNP_QUERY_POWER:
                DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__,
-                               le32_to_cpu(get_unaligned((__le32 *)buf)) - 1);
+                               get_unaligned_le32(buf) - 1);
                /* only suspend is a real power state, and
                 * it can't be entered by OID_PNP_SET_POWER...
                 */
@@ -705,14 +701,10 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
                DBG("set OID %08x value, len %d:\n", OID, buf_len);
                for (i = 0; i < buf_len; i += 16) {
                        DBG("%03d: %08x %08x %08x %08x\n", i,
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i])),
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i + 4])),
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i + 8])),
-                               le32_to_cpu(get_unaligned((__le32 *)
-                                       &buf[i + 12])));
+                               get_unaligned_le32(&buf[i]),
+                               get_unaligned_le32(&buf[i + 4]),
+                               get_unaligned_le32(&buf[i + 8]),
+                               get_unaligned_le32(&buf[i + 12]));
                }
        }
 
@@ -726,8 +718,7 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len,
                 *      PROMISCUOUS, DIRECTED,
                 *      MULTICAST, ALL_MULTICAST, BROADCAST
                 */
-               *params->filter = (u16) le32_to_cpu(get_unaligned(
-                               (__le32 *)buf));
+               *params->filter = (u16)get_unaligned_le32(buf);
                DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n",
                        __func__, *params->filter);
 
@@ -777,7 +768,7 @@ update_linkstate:
                 * resuming, Windows forces a reset, and then SET_POWER D0.
                 * FIXME ... then things go batty; Windows wedges itself.
                 */
-               i = le32_to_cpu(get_unaligned((__le32 *)buf));
+               i = get_unaligned_le32(buf);
                DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1);
                switch (i) {
                case NdisDeviceStateD0:
@@ -1064,8 +1055,8 @@ int rndis_msg_parser (u8 configNr, u8 *buf)
                return -ENOMEM;
 
        tmp = (__le32 *) buf;
-       MsgType   = le32_to_cpu(get_unaligned(tmp++));
-       MsgLength = le32_to_cpu(get_unaligned(tmp++));
+       MsgType   = get_unaligned_le32(tmp++);
+       MsgLength = get_unaligned_le32(tmp++);
 
        if (configNr >= RNDIS_MAX_CONFIGS)
                return -ENOTSUPP;
@@ -1296,10 +1287,9 @@ int rndis_rm_hdr(struct sk_buff *skb)
        tmp++;
 
        /* DataOffset, DataLength */
-       if (!skb_pull(skb, le32_to_cpu(get_unaligned(tmp++))
-                       + 8 /* offset of DataOffset */))
+       if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8))
                return -EOVERFLOW;
-       skb_trim(skb, le32_to_cpu(get_unaligned(tmp++)));
+       skb_trim(skb, get_unaligned_le32(tmp++));
 
        return 0;
 }
index 433b3f44f42e2dc36e38e2b7dc85be3e48b3785a..8d158e5640e37782ea0dbed21c08277092a2614e 100644 (file)
@@ -170,7 +170,7 @@ static int gs_open(struct tty_struct *tty, struct file *file);
 static void gs_close(struct tty_struct *tty, struct file *file);
 static int gs_write(struct tty_struct *tty,
        const unsigned char *buf, int count);
-static void gs_put_char(struct tty_struct *tty, unsigned char ch);
+static int gs_put_char(struct tty_struct *tty, unsigned char ch);
 static void gs_flush_chars(struct tty_struct *tty);
 static int gs_write_room(struct tty_struct *tty);
 static int gs_chars_in_buffer(struct tty_struct *tty);
@@ -883,14 +883,15 @@ exit:
 /*
  * gs_put_char
  */
-static void gs_put_char(struct tty_struct *tty, unsigned char ch)
+static int gs_put_char(struct tty_struct *tty, unsigned char ch)
 {
        unsigned long flags;
        struct gs_port *port = tty->driver_data;
+       int ret = 0;
 
        if (port == NULL) {
                pr_err("gs_put_char: NULL port pointer\n");
-               return;
+               return 0;
        }
 
        gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p\n",
@@ -910,10 +911,11 @@ static void gs_put_char(struct tty_struct *tty, unsigned char ch)
                goto exit;
        }
 
-       gs_buf_put(port->port_write_buf, &ch, 1);
+       ret = gs_buf_put(port->port_write_buf, &ch, 1);
 
 exit:
        spin_unlock_irqrestore(&port->port_lock, flags);
+       return ret;
 }
 
 /*
index 878e428a0ec12b3fd45c9298b40fa058220f5881..4154be375c7a01a48ac3ab26eeeeb78c7c62b1ab 100644 (file)
@@ -74,7 +74,7 @@ static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
                                goto fail;
                } else
                        uchar = c;
-               put_unaligned (cpu_to_le16 (uchar), cp++);
+               put_unaligned_le16(uchar, cp++);
                count++;
                len--;
        }
index 4af90df8e7dec7e2aa3ea96a88b8de05de1a9f12..0f82fdcaef096b65e384ab07312e7e001d211a56 100644 (file)
@@ -398,7 +398,7 @@ static void qh_lines (
        unsigned                size = *sizep;
        char                    *next = *nextp;
        char                    mark;
-       u32                     list_end = EHCI_LIST_END(ehci);
+       __le32                  list_end = EHCI_LIST_END(ehci);
 
        if (qh->hw_qtd_next == list_end)        /* NEC does this */
                mark = '@';
index efffef64f59daf68ecc80d571921cbb10f8a566e..382587c4457ccae83396be87280da2bf0da190ad 100644 (file)
@@ -530,7 +530,7 @@ ehci_hub_descriptor (
        if (HCS_INDICATOR (ehci->hcs_params))
                temp |= 0x0080;         /* per-port indicators (LEDs) */
 #endif
-       desc->wHubCharacteristics = (__force __u16)cpu_to_le16 (temp);
+       desc->wHubCharacteristics = cpu_to_le16(temp);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -770,7 +770,7 @@ static int ehci_hub_control (
        if (status & ~0xffff)   /* only if wPortChange is interesting */
 #endif
                dbg_port (ehci, "GetStatus", wIndex + 1, temp);
-               put_unaligned(cpu_to_le32 (status), (__le32 *) buf);
+               put_unaligned_le32(status, buf);
                break;
        case SetHubFeature:
                switch (wValue) {
index 5ae689139dd08f8a92bd21e65439a9943cc540f6..b85b54160cdaeade20792ad9878d9751606f3b50 100644 (file)
@@ -285,7 +285,7 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
        int                     stopped;
        unsigned                count = 0;
        u8                      state;
-       u32                     halt = HALT_BIT(ehci);
+       __le32                  halt = HALT_BIT(ehci);
 
        if (unlikely (list_empty (&qh->qtd_list)))
                return count;
@@ -883,7 +883,7 @@ static struct ehci_qh *qh_append_tds (
 )
 {
        struct ehci_qh          *qh = NULL;
-       u32                     qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
+       __hc32                  qh_addr_mask = cpu_to_hc32(ehci, 0x7f);
 
        qh = (struct ehci_qh *) *ptr;
        if (unlikely (qh == NULL)) {
index 5be3bb3e6a9d791aa6262af9e429f724f20066f3..17dc2eccda83e7d6dca6bfd725fbd87fdce8b649 100644 (file)
@@ -736,14 +736,14 @@ static int ohci_hub_control (
                break;
        case GetHubStatus:
                temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE);
-               put_unaligned(cpu_to_le32 (temp), (__le32 *) buf);
+               put_unaligned_le32(temp, buf);
                break;
        case GetPortStatus:
                if (!wIndex || wIndex > ports)
                        goto error;
                wIndex--;
                temp = roothub_portstatus (ohci, wIndex);
-               put_unaligned(cpu_to_le32 (temp), (__le32 *) buf);
+               put_unaligned_le32(temp, buf);
 
 #ifndef        OHCI_VERBOSE_DEBUG
        if (*(u16*)(buf+2))     /* only if wPortChange is interesting */
index f4fa93dabdde99d77d3e632876b4d22ba5c80a83..16667342b3c324f694300c54d3d3532674c417c5 100644 (file)
@@ -993,7 +993,7 @@ static void prepare_setup_packet(struct r8a66597 *r8a66597,
                                 struct r8a66597_td *td)
 {
        int i;
-       u16 *p = (u16 *)td->urb->setup_packet;
+       __le16 *p = (__le16 *)td->urb->setup_packet;
        unsigned long setup_addr = USBREQ;
 
        r8a66597_write(r8a66597, make_devsel(td->address) | td->maxpacket,
@@ -1001,7 +1001,7 @@ static void prepare_setup_packet(struct r8a66597 *r8a66597,
        r8a66597_write(r8a66597, ~(SIGN | SACK), INTSTS1);
 
        for (i = 0; i < 4; i++) {
-               r8a66597_write(r8a66597, cpu_to_le16(p[i]), setup_addr);
+               r8a66597_write(r8a66597, le16_to_cpu(p[i]), setup_addr);
                setup_addr += 2;
        }
        r8a66597_write(r8a66597, SUREQ, DCPCTR);
@@ -2131,7 +2131,7 @@ static int r8a66597_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
        case GetPortStatus:
                if (wIndex > R8A66597_MAX_ROOT_HUB)
                        goto error;
-               *(u32 *)buf = cpu_to_le32(rh->port);
+               *(__le32 *)buf = cpu_to_le32(rh->port);
                break;
        case SetPortFeature:
                if (wIndex > R8A66597_MAX_ROOT_HUB)
index 274276cf8621664394bd7e9597d5c9c014695b18..426575247b23697265487a995e2551371f88361b 100644 (file)
@@ -1100,7 +1100,7 @@ sl811h_hub_descriptor (
        /* no overcurrent errors detection/handling */
        temp |= 0x0010;
 
-       desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
+       desc->wHubCharacteristics = cpu_to_le16(temp);
 
        /* two bitmaps:  ports removable, and legacy PortPwrCtrlMask */
        desc->bitmap[0] = 0 << 1;
@@ -1506,15 +1506,7 @@ static const char proc_filename[] = "driver/sl811h";
 
 static void create_debug_file(struct sl811 *sl811)
 {
-       struct proc_dir_entry *pde;
-
-       pde = create_proc_entry(proc_filename, 0, NULL);
-       if (pde == NULL)
-               return;
-
-       pde->proc_fops = &proc_ops;
-       pde->data = sl811;
-       sl811->pde = pde;
+       sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811);
 }
 
 static void remove_debug_file(struct sl811 *sl811)
index a238817762adad0c448903e90f7267233739015c..9b1bb347dc2d7e0b1865c4f36191118b207e5dde 100644 (file)
@@ -209,7 +209,7 @@ static void aircable_send(struct usb_serial_port *port)
        int count, result;
        struct aircable_private *priv = usb_get_serial_port_data(port);
        unsigned char* buf;
-       u16 *dbuf;
+       __le16 *dbuf;
        dbg("%s - port %d", __func__, port->number);
        if (port->write_urb_busy)
                return;
@@ -227,7 +227,7 @@ static void aircable_send(struct usb_serial_port *port)
 
        buf[0] = TX_HEADER_0;
        buf[1] = TX_HEADER_1;
-       dbuf = (u16 *)&buf[2];
+       dbuf = (__le16 *)&buf[2];
        *dbuf = cpu_to_le16((u16)count);
        serial_buf_get(priv->tx_buf,buf + HCI_HEADER_LENGTH, MAX_HCI_FRAMESIZE);
 
index 32121794808dd5f964ca61143503bc92b6b5b016..0230d3c0888af92e816d2d432f60a7171c4f547d 100644 (file)
@@ -541,7 +541,7 @@ static int cypress_earthmate_startup (struct usb_serial *serial)
        /* All Earthmate devices use the separated-count packet
           format!  Idiotic. */
        priv->pkt_fmt = packet_format_1;
-       if (serial->dev->descriptor.idProduct != PRODUCT_ID_EARTHMATEUSB) {
+       if (serial->dev->descriptor.idProduct != cpu_to_le16(PRODUCT_ID_EARTHMATEUSB)) {
                /* The old original USB Earthmate seemed able to
                   handle GET_CONFIG requests; everything they've
                   produced since that time crashes if this command is
index d17d1645714fbb2ab9ee7cf391181b62e869275a..04a56f300ea68059ce54d7f803408737562855ab 100644 (file)
@@ -1421,8 +1421,7 @@ static void digi_close(struct usb_serial_port *port, struct file *filp)
                tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT);
 
        /* flush driver and line discipline buffers */
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
        tty_ldisc_flush(tty);
 
        if (port->serial->dev) {
index 23f51a41093ec7e6215aae4228badd3bfa3f7d48..c7329f43d9c9dc885fd65cb21ed80b29b4938162 100644 (file)
@@ -1104,7 +1104,7 @@ static int ftdi_mtxorb_hack_setup(struct usb_serial *serial)
        struct usb_endpoint_descriptor *ep_desc = &ep->desc;
 
        if (ep->enabled && ep_desc->wMaxPacketSize == 0) {
-               ep_desc->wMaxPacketSize = 0x40;
+               ep_desc->wMaxPacketSize = cpu_to_le16(0x40);
                info("Fixing invalid wMaxPacketSize on read pipe");
        }
 
index ce2e487f324042df2bc36904c6a48ffa55c8d4bb..06b52f4098f11e407325ee3b8288002c7cacaef4 100644 (file)
@@ -2993,7 +2993,7 @@ static int edge_startup (struct usb_serial *serial)
                                usb_fill_bulk_urb(edge_serial->read_urb, dev,
                                                  usb_rcvbulkpipe(dev, endpoint->bEndpointAddress),
                                                  edge_serial->bulk_in_buffer,
-                                                 endpoint->wMaxPacketSize,
+                                                 le16_to_cpu(endpoint->wMaxPacketSize),
                                                  edge_bulk_in_callback,
                                                  edge_serial);
                                bulk_in_found = true;
index b395ac7598883a985e94cf485b22d27cf2fac97e..f328948d74e3dfc9cf21439333ef48140eff1af9 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/tty_flip.h>
 #include <linux/module.h>
 #include <asm/uaccess.h>
+#include <asm/unaligned.h>
 #include <linux/usb.h>
 #include <linux/usb/serial.h>
 #include "kl5kusb105.h"
@@ -235,7 +236,7 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
        if (rc < 0)
                err("Reading line status failed (error = %d)", rc);
        else {
-               status = le16_to_cpu(*(u16 *)status_buf);
+               status = le16_to_cpu(get_unaligned((__le16 *)status_buf));
 
                info("%s - read status %x %x", __func__,
                     status_buf[0], status_buf[1]);
index d92bb6501c8443d2963054c1472dee893c4565db..a9625c180dc3502206d2d967224a1d6ea31d2bf5 100644 (file)
@@ -98,7 +98,7 @@ struct oti6858_buf {
 
 /* format of the control packet */
 struct oti6858_control_pkt {
-       u16     divisor;        /* baud rate = 96000000 / (16 * divisor), LE */
+       __le16  divisor;        /* baud rate = 96000000 / (16 * divisor), LE */
 #define OTI6858_MAX_BAUD_RATE  3000000
        u8      frame_fmt;
 #define FMT_STOP_BITS_MASK     0xc0
@@ -211,7 +211,7 @@ struct oti6858_private {
        struct delayed_work delayed_write_work;
 
        struct {
-               u16 divisor;
+               __le16 divisor;
                u8 frame_fmt;
                u8 control;
        } pending_setup;
@@ -450,7 +450,7 @@ static void oti6858_set_termios(struct usb_serial_port *port,
        unsigned long flags;
        unsigned int cflag;
        u8 frame_fmt, control;
-       u16 divisor;
+       __le16 divisor;
        int br;
 
        dbg("%s(port = %d)", __func__, port->number);
@@ -505,11 +505,12 @@ static void oti6858_set_termios(struct usb_serial_port *port,
                divisor = 0;
        } else {
                int real_br;
+               int new_divisor;
                br = min(br, OTI6858_MAX_BAUD_RATE);
 
-               divisor = (96000000 + 8 * br) / (16 * br);
-               real_br = 96000000 / (16 * divisor);
-               divisor = cpu_to_le16(divisor);
+               new_divisor = (96000000 + 8 * br) / (16 * br);
+               real_br = 96000000 / (16 * new_divisor);
+               divisor = cpu_to_le16(new_divisor);
                tty_encode_baud_rate(port->tty, real_br, real_br);
        }
 
index 2282d620186e2122fadbb0bf1c9bbed1a52ac6f7..55b2570b8b8b568ad0e694ed575d8153ed4333f6 100644 (file)
@@ -310,17 +310,18 @@ static int spcp8x5_startup(struct usb_serial *serial)
        struct spcp8x5_private *priv;
        int i;
        enum spcp8x5_type type = SPCP825_007_TYPE;
+       u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
 
-       if (serial->dev->descriptor.idProduct == 0x0201)
+       if (product == 0x0201)
                type = SPCP825_007_TYPE;
-       else if (serial->dev->descriptor.idProduct == 0x0231)
+       else if (product == 0x0231)
                type = SPCP835_TYPE;
-       else if (serial->dev->descriptor.idProduct == 0x0235)
+       else if (product == 0x0235)
                type = SPCP825_008_TYPE;
-       else if (serial->dev->descriptor.idProduct == 0x0204)
+       else if (product == 0x0204)
                type = SPCP825_INTERMATIC_TYPE;
-       else if (serial->dev->descriptor.idProduct == 0x0471 &&
-                serial->dev->descriptor.idVendor == 0x081e)
+       else if (product == 0x0471 &&
+                serial->dev->descriptor.idVendor == cpu_to_le16(0x081e))
                type = SPCP825_PHILIP_TYPE;
        dev_dbg(&serial->dev->dev, "device type = %d\n", (int)type);
 
index a9934a3f984591700cfa64e8dce7d31900035c64..0cb0d77dc429b2b5d0f0dbd916feac063db7a4f6 100644 (file)
@@ -296,16 +296,14 @@ static int serial_write (struct tty_struct * tty, const unsigned char *buf, int
        struct usb_serial_port *port = tty->driver_data;
        int retval = -ENODEV;
 
-       if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED)
+       if (port->serial->dev->state == USB_STATE_NOTATTACHED)
                goto exit;
 
        dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
 
-       if (!port->open_count) {
-               retval = -EINVAL;
-               dbg("%s - port not opened", __func__);
-               goto exit;
-       }
+       /* open_count is managed under the mutex lock for the tty so cannot
+           drop to zero until after the last close completes */
+       WARN_ON(!port->open_count);
 
        /* pass on to the driver specific version of this function */
        retval = port->serial->type->write(port, buf, count);
@@ -317,61 +315,28 @@ exit:
 static int serial_write_room (struct tty_struct *tty) 
 {
        struct usb_serial_port *port = tty->driver_data;
-       int retval = -ENODEV;
-
-       if (!port)
-               goto exit;
-
        dbg("%s - port %d", __func__, port->number);
-
-       if (!port->open_count) {
-               dbg("%s - port not open", __func__);
-               goto exit;
-       }
-
+       WARN_ON(!port->open_count);
        /* pass on to the driver specific version of this function */
-       retval = port->serial->type->write_room(port);
-
-exit:
-       return retval;
+       return port->serial->type->write_room(port);
 }
 
 static int serial_chars_in_buffer (struct tty_struct *tty) 
 {
        struct usb_serial_port *port = tty->driver_data;
-       int retval = -ENODEV;
-
-       if (!port)
-               goto exit;
-
        dbg("%s = port %d", __func__, port->number);
 
-       if (!port->open_count) {
-               dbg("%s - port not open", __func__);
-               goto exit;
-       }
-
+       WARN_ON(!port->open_count);
        /* pass on to the driver specific version of this function */
-       retval = port->serial->type->chars_in_buffer(port);
-
-exit:
-       return retval;
+       return port->serial->type->chars_in_buffer(port);
 }
 
 static void serial_throttle (struct tty_struct * tty)
 {
        struct usb_serial_port *port = tty->driver_data;
-
-       if (!port)
-               return;
-
        dbg("%s - port %d", __func__, port->number);
 
-       if (!port->open_count) {
-               dbg ("%s - port not open", __func__);
-               return;
-       }
-
+       WARN_ON(!port->open_count);
        /* pass on to the driver specific version of this function */
        if (port->serial->type->throttle)
                port->serial->type->throttle(port);
@@ -380,17 +345,9 @@ static void serial_throttle (struct tty_struct * tty)
 static void serial_unthrottle (struct tty_struct * tty)
 {
        struct usb_serial_port *port = tty->driver_data;
-
-       if (!port)
-               return;
-
        dbg("%s - port %d", __func__, port->number);
 
-       if (!port->open_count) {
-               dbg("%s - port not open", __func__);
-               return;
-       }
-
+       WARN_ON(!port->open_count);
        /* pass on to the driver specific version of this function */
        if (port->serial->type->unthrottle)
                port->serial->type->unthrottle(port);
@@ -401,42 +358,27 @@ static int serial_ioctl (struct tty_struct *tty, struct file * file, unsigned in
        struct usb_serial_port *port = tty->driver_data;
        int retval = -ENODEV;
 
-       lock_kernel();
-       if (!port)
-               goto exit;
-
        dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd);
 
-       /* Caution - port->open_count is BKL protected */
-       if (!port->open_count) {
-               dbg ("%s - port not open", __func__);
-               goto exit;
-       }
+       WARN_ON(!port->open_count);
 
        /* pass on to the driver specific version of this function if it is available */
-       if (port->serial->type->ioctl)
+       if (port->serial->type->ioctl) {
+               lock_kernel();
                retval = port->serial->type->ioctl(port, file, cmd, arg);
+               unlock_kernel();
+       }
        else
                retval = -ENOIOCTLCMD;
-exit:
-       unlock_kernel();
        return retval;
 }
 
 static void serial_set_termios (struct tty_struct *tty, struct ktermios * old)
 {
        struct usb_serial_port *port = tty->driver_data;
-
-       if (!port)
-               return;
-
        dbg("%s - port %d", __func__, port->number);
 
-       if (!port->open_count) {
-               dbg("%s - port not open", __func__);
-               return;
-       }
-
+       WARN_ON(!port->open_count);
        /* pass on to the driver specific version of this function if it is available */
        if (port->serial->type->set_termios)
                port->serial->type->set_termios(port, old);
@@ -448,24 +390,15 @@ static void serial_break (struct tty_struct *tty, int break_state)
 {
        struct usb_serial_port *port = tty->driver_data;
 
-       lock_kernel();
-       if (!port) {
-               unlock_kernel();
-               return;
-       }
-
        dbg("%s - port %d", __func__, port->number);
 
-       if (!port->open_count) {
-               dbg("%s - port not open", __func__);
-               unlock_kernel();
-               return;
-       }
-
+       WARN_ON(!port->open_count);
        /* pass on to the driver specific version of this function if it is available */
-       if (port->serial->type->break_ctl)
+       if (port->serial->type->break_ctl) {
+               lock_kernel();
                port->serial->type->break_ctl(port, break_state);
-       unlock_kernel();
+               unlock_kernel();
+       }
 }
 
 static int serial_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data)
@@ -519,19 +452,11 @@ static int serial_tiocmget (struct tty_struct *tty, struct file *file)
 {
        struct usb_serial_port *port = tty->driver_data;
 
-       if (!port)
-               return -ENODEV;
-
        dbg("%s - port %d", __func__, port->number);
 
-       if (!port->open_count) {
-               dbg("%s - port not open", __func__);
-               return -ENODEV;
-       }
-
+       WARN_ON(!port->open_count);
        if (port->serial->type->tiocmget)
                return port->serial->type->tiocmget(port, file);
-
        return -EINVAL;
 }
 
@@ -540,19 +465,11 @@ static int serial_tiocmset (struct tty_struct *tty, struct file *file,
 {
        struct usb_serial_port *port = tty->driver_data;
 
-       if (!port)
-               return -ENODEV;
-
        dbg("%s - port %d", __func__, port->number);
 
-       if (!port->open_count) {
-               dbg("%s - port not open", __func__);
-               return -ENODEV;
-       }
-
+       WARN_ON(!port->open_count);
        if (port->serial->type->tiocmset)
                return port->serial->type->tiocmset(port, file, set, clear);
-
        return -EINVAL;
 }
 
index e96bf8663ffcd0eef052e334a0ecca8b430f628a..f07e8a4c1f3ddb3b3cfe12f4a3b22bef7e7c11a9 100644 (file)
@@ -673,15 +673,13 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp)
        }
 */
 
-       if (port->tty->driver->flush_buffer)
-               port->tty->driver->flush_buffer(port->tty);
+       tty_driver_flush_buffer(port->tty);
        tty_ldisc_flush(port->tty);
 
        firm_report_tx_done(port);
 
        firm_close(port);
 
-printk(KERN_ERR"Before processing rx_urbs_submitted.\n");
        /* shutdown our bulk reads and writes */
        mutex_lock(&info->deathwarrant);
        spin_lock_irq(&info->lock);
index e3dc8f8d0c3ec082530c846d99cce79d8a6f1892..bb1dadaa4a23a1054cca3fbd56791d3cf50fcb1d 100644 (file)
@@ -139,6 +139,30 @@ config FB_SYS_IMAGEBLIT
          blitting. This is used by drivers that don't provide their own
          (accelerated) version and the framebuffer is in system RAM.
 
+menuconfig FB_FOREIGN_ENDIAN
+       bool "Framebuffer foreign endianness support"
+       depends on FB
+       ---help---
+         This menu will let you enable support for the framebuffers with
+         non-native endianness (e.g. Little-Endian framebuffer on a
+         Big-Endian machine). Most probably you don't have such hardware,
+         so it's safe to say "n" here.
+
+choice
+       prompt "Choice endianness support"
+       depends on FB_FOREIGN_ENDIAN
+
+config FB_BOTH_ENDIAN
+       bool "Support for Big- and Little-Endian framebuffers"
+
+config FB_BIG_ENDIAN
+       bool "Support for Big-Endian framebuffers only"
+
+config FB_LITTLE_ENDIAN
+       bool "Support for Little-Endian framebuffers only"
+
+endchoice
+
 config FB_SYS_FOPS
        tristate
        depends on FB
@@ -149,6 +173,16 @@ config FB_DEFERRED_IO
        depends on FB
        default y
 
+config FB_METRONOME
+       tristate
+       depends on FB
+       depends on FB_DEFERRED_IO
+
+config FB_HECUBA
+       tristate
+       depends on FB
+       depends on FB_DEFERRED_IO
+
 config FB_SVGALIB
        tristate
        depends on FB
@@ -546,7 +580,7 @@ config FB_VGA16
 
 config FB_BF54X_LQ043
        tristate "SHARP LQ043 TFT LCD (BF548 EZKIT)"
-       depends on FB && (BF54x)
+       depends on FB && (BF54x) && !BF542
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
        select FB_CFB_IMAGEBLIT
@@ -674,20 +708,18 @@ config FB_IMAC
        help
          This is the frame buffer device driver for the Intel-based Macintosh
 
-config FB_HECUBA
-       tristate "Hecuba board support"
+config FB_N411
+       tristate "N411 Apollo/Hecuba devkit support"
        depends on FB && X86 && MMU
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
        select FB_SYS_FOPS
        select FB_DEFERRED_IO
+       select FB_HECUBA
        help
-         This enables support for the Hecuba board. This driver was tested
-         with an E-Ink 800x600 display and x86 SBCs through a 16 bit GPIO
-         interface (8 bit data, 4 bit control). If you anticipate using
-         this driver, say Y or M; otherwise say N. You must specify the
-         GPIO IO address to be used for setting control and data.
+         This enables support for the Apollo display controller in its
+         Hecuba form using the n411 devkit.
 
 config FB_HGA
        tristate "Hercules mono graphics support"
@@ -1087,7 +1119,7 @@ config FB_CARILLO_RANCH
          This driver supports the LE80578 (Carillo Ranch) board
 
 config FB_INTEL
-       tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G support (EXPERIMENTAL)"
+       tristate "Intel 830M/845G/852GM/855GM/865G/915G/945G/945GM/965G/965GM support (EXPERIMENTAL)"
        depends on FB && EXPERIMENTAL && PCI && X86
        select AGP
        select AGP_INTEL
@@ -1097,7 +1129,7 @@ config FB_INTEL
        select FB_CFB_IMAGEBLIT
        help
          This driver supports the on-board graphics built in to the Intel
-          830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM chipsets.
+          830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM chipsets.
           Say Y if you have and plan to use such a board.
 
          If you say Y here and want DDC/I2C support you must first say Y to
@@ -1742,6 +1774,11 @@ config FB_PXA
 
          If unsure, say N.
 
+config FB_PXA_SMARTPANEL
+       bool "PXA Smartpanel LCD support"
+       default n
+       depends on FB_PXA
+
 config FB_PXA_PARAMETERS
        bool "PXA LCD command line parameters"
        default n
@@ -1779,6 +1816,16 @@ config FB_MBX_DEBUG
 
          If unsure, say N.
 
+config FB_FSL_DIU
+       tristate "Freescale DIU framebuffer support"
+       depends on FB && FSL_SOC
+       select FB_CFB_FILLRECT
+       select FB_CFB_COPYAREA
+       select FB_CFB_IMAGEBLIT
+       select PPC_LIB_RHEAP
+       ---help---
+         Framebuffer driver for the Freescale SoC DIU
+
 config FB_W100
        tristate "W100 frame buffer support"
        depends on FB && PXA_SHARPSL
@@ -1893,19 +1940,18 @@ config FB_XILINX
          framebuffer. ML300 carries a 640*480 LCD display on the board,
          ML403 uses a standard DB15 VGA connector.
 
-config FB_METRONOME
-       tristate "Metronome display controller support"
+config FB_AM200EPD
+       tristate "AM-200 E-Ink EPD devkit support"
        depends on FB && ARCH_PXA && MMU
        select FB_SYS_FILLRECT
        select FB_SYS_COPYAREA
        select FB_SYS_IMAGEBLIT
        select FB_SYS_FOPS
        select FB_DEFERRED_IO
+       select FB_METRONOME
        help
-         This enables support for the Metronome display controller. Tested
-         with an E-Ink 800x600 display and Gumstix Connex through an AMLCD
-         interface. Please read <file:Documentation/fb/metronomefb.txt>
-         for more information.
+         This enables support for the Metronome display controller used on
+         the E-Ink AM-200 EPD devkit.
 
 config FB_VIRTUAL
        tristate "Virtual Frame Buffer support (ONLY FOR TESTING!)"
index f172b9b73314ebc21845439919f6c18e63fd79cd..04bca35403ff6d0333270e63d44a51d97a072f41 100644 (file)
@@ -29,6 +29,7 @@ obj-$(CONFIG_FB_DEFERRED_IO)   += fb_defio.o
 
 # Hardware specific drivers go first
 obj-$(CONFIG_FB_AMIGA)            += amifb.o c2p.o
+obj-$(CONFIG_FB_AM200EPD)         += am200epd.o
 obj-$(CONFIG_FB_ARC)              += arcfb.o
 obj-$(CONFIG_FB_CLPS711X)         += clps711xfb.o
 obj-$(CONFIG_FB_CYBER2000)        += cyber2000fb.o
@@ -107,6 +108,7 @@ obj-$(CONFIG_FB_METRONOME)        += metronomefb.o
 obj-$(CONFIG_FB_S1D13XXX)        += s1d13xxxfb.o
 obj-$(CONFIG_FB_IMX)              += imxfb.o
 obj-$(CONFIG_FB_S3C2410)         += s3c2410fb.o
+obj-$(CONFIG_FB_FSL_DIU)         += fsl-diu-fb.o
 obj-$(CONFIG_FB_PNX4008_DUM)     += pnx4008/
 obj-$(CONFIG_FB_PNX4008_DUM_RGB)  += pnx4008/
 obj-$(CONFIG_FB_IBM_GXT4500)     += gxt4500.o
diff --git a/drivers/video/am200epd.c b/drivers/video/am200epd.c
new file mode 100644 (file)
index 0000000..51e26c1
--- /dev/null
@@ -0,0 +1,295 @@
+/*
+ * linux/drivers/video/am200epd.c -- Platform device for AM200 EPD kit
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This work was made possible by help and equipment support from E-Ink
+ * Corporation. http://support.eink.com/community
+ *
+ * This driver is written to be used with the Metronome display controller.
+ * on the AM200 EPD prototype kit/development kit with an E-Ink 800x600
+ * Vizplex EPD on a Gumstix board using the Lyre interface board.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/irq.h>
+
+#include <video/metronomefb.h>
+
+#include <asm/arch/pxa-regs.h>
+
+/* register offsets for gpio control */
+#define LED_GPIO_PIN 51
+#define STDBY_GPIO_PIN 48
+#define RST_GPIO_PIN 49
+#define RDY_GPIO_PIN 32
+#define ERR_GPIO_PIN 17
+#define PCBPWR_GPIO_PIN 16
+
+#define AF_SEL_GPIO_N 0x3
+#define GAFR0_U_OFFSET(pin) ((pin - 16) * 2)
+#define GAFR1_L_OFFSET(pin) ((pin - 32) * 2)
+#define GAFR1_U_OFFSET(pin) ((pin - 48) * 2)
+#define GPDR1_OFFSET(pin) (pin - 32)
+#define GPCR1_OFFSET(pin) (pin - 32)
+#define GPSR1_OFFSET(pin) (pin - 32)
+#define GPCR0_OFFSET(pin) (pin)
+#define GPSR0_OFFSET(pin) (pin)
+
+static void am200_set_gpio_output(int pin, int val)
+{
+       u8 index;
+
+       index = pin >> 4;
+
+       switch (index) {
+       case 1:
+               if (val)
+                       GPSR0 |= (1 << GPSR0_OFFSET(pin));
+               else
+                       GPCR0 |= (1 << GPCR0_OFFSET(pin));
+               break;
+       case 2:
+               break;
+       case 3:
+               if (val)
+                       GPSR1 |= (1 << GPSR1_OFFSET(pin));
+               else
+                       GPCR1 |= (1 << GPCR1_OFFSET(pin));
+               break;
+       default:
+               printk(KERN_ERR "unimplemented\n");
+       }
+}
+
+static void __devinit am200_init_gpio_pin(int pin, int dir)
+{
+       u8 index;
+       /* dir 0 is output, 1 is input
+       - do 2 things here:
+       - set gpio alternate function to standard gpio
+       - set gpio direction to input or output  */
+
+       index = pin >> 4;
+       switch (index) {
+       case 1:
+               GAFR0_U &= ~(AF_SEL_GPIO_N << GAFR0_U_OFFSET(pin));
+
+               if (dir)
+                       GPDR0 &= ~(1 << pin);
+               else
+                       GPDR0 |= (1 << pin);
+               break;
+       case 2:
+               GAFR1_L &= ~(AF_SEL_GPIO_N << GAFR1_L_OFFSET(pin));
+
+               if (dir)
+                       GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
+               else
+                       GPDR1 |= (1 << GPDR1_OFFSET(pin));
+               break;
+       case 3:
+               GAFR1_U &= ~(AF_SEL_GPIO_N << GAFR1_U_OFFSET(pin));
+
+               if (dir)
+                       GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
+               else
+                       GPDR1 |= (1 << GPDR1_OFFSET(pin));
+               break;
+       default:
+               printk(KERN_ERR "unimplemented\n");
+       }
+}
+
+static void am200_init_gpio_regs(struct metronomefb_par *par)
+{
+       am200_init_gpio_pin(LED_GPIO_PIN, 0);
+       am200_set_gpio_output(LED_GPIO_PIN, 0);
+
+       am200_init_gpio_pin(STDBY_GPIO_PIN, 0);
+       am200_set_gpio_output(STDBY_GPIO_PIN, 0);
+
+       am200_init_gpio_pin(RST_GPIO_PIN, 0);
+       am200_set_gpio_output(RST_GPIO_PIN, 0);
+
+       am200_init_gpio_pin(RDY_GPIO_PIN, 1);
+
+       am200_init_gpio_pin(ERR_GPIO_PIN, 1);
+
+       am200_init_gpio_pin(PCBPWR_GPIO_PIN, 0);
+       am200_set_gpio_output(PCBPWR_GPIO_PIN, 0);
+}
+
+static void am200_disable_lcd_controller(struct metronomefb_par *par)
+{
+       LCSR = 0xffffffff;      /* Clear LCD Status Register */
+       LCCR0 |= LCCR0_DIS;     /* Disable LCD Controller */
+
+       /* we reset and just wait for things to settle */
+       msleep(200);
+}
+
+static void am200_enable_lcd_controller(struct metronomefb_par *par)
+{
+       LCSR = 0xffffffff;
+       FDADR0 = par->metromem_desc_dma;
+       LCCR0 |= LCCR0_ENB;
+}
+
+static void am200_init_lcdc_regs(struct metronomefb_par *par)
+{
+       /* here we do:
+       - disable the lcd controller
+       - setup lcd control registers
+       - setup dma descriptor
+       - reenable lcd controller
+       */
+
+       /* disable the lcd controller */
+       am200_disable_lcd_controller(par);
+
+       /* setup lcd control registers */
+       LCCR0 = LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | LCCR0_PAS
+               | LCCR0_QDM | LCCR0_BM | LCCR0_OUM;
+
+       LCCR1 = (par->info->var.xres/2 - 1) /* pixels per line */
+               | (27 << 10) /* hsync pulse width - 1 */
+               | (33 << 16) /* eol pixel count */
+               | (33 << 24); /* bol pixel count */
+
+       LCCR2 = (par->info->var.yres - 1) /* lines per panel */
+               | (24 << 10) /* vsync pulse width - 1 */
+               | (2 << 16) /* eof pixel count */
+               | (0 << 24); /* bof pixel count */
+
+       LCCR3 = 2 /* pixel clock divisor */
+               | (24 << 8) /* AC Bias pin freq */
+               | LCCR3_16BPP /* BPP */
+               | LCCR3_PCP;  /* PCP falling edge */
+
+}
+
+static void am200_post_dma_setup(struct metronomefb_par *par)
+{
+       par->metromem_desc->mFDADR0 = par->metromem_desc_dma;
+       par->metromem_desc->mFSADR0 = par->metromem_dma;
+       par->metromem_desc->mFIDR0 = 0;
+       par->metromem_desc->mLDCMD0 = par->info->var.xres
+                                       * par->info->var.yres;
+       am200_enable_lcd_controller(par);
+}
+
+static void am200_free_irq(struct fb_info *info)
+{
+       free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
+}
+
+static irqreturn_t am200_handle_irq(int irq, void *dev_id)
+{
+       struct fb_info *info = dev_id;
+       struct metronomefb_par *par = info->par;
+
+       wake_up_interruptible(&par->waitq);
+       return IRQ_HANDLED;
+}
+
+static int am200_setup_irq(struct fb_info *info)
+{
+       int retval;
+
+       retval = request_irq(IRQ_GPIO(RDY_GPIO_PIN), am200_handle_irq,
+                               IRQF_DISABLED, "AM200", info);
+       if (retval) {
+               printk(KERN_ERR "am200epd: request_irq failed: %d\n", retval);
+               return retval;
+       }
+
+       return set_irq_type(IRQ_GPIO(RDY_GPIO_PIN), IRQT_FALLING);
+}
+
+static void am200_set_rst(struct metronomefb_par *par, int state)
+{
+       am200_set_gpio_output(RST_GPIO_PIN, state);
+}
+
+static void am200_set_stdby(struct metronomefb_par *par, int state)
+{
+       am200_set_gpio_output(STDBY_GPIO_PIN, state);
+}
+
+static int am200_wait_event(struct metronomefb_par *par)
+{
+       return wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+}
+
+static int am200_wait_event_intr(struct metronomefb_par *par)
+{
+       return wait_event_interruptible_timeout(par->waitq, (GPLR1 & 0x01), HZ);
+}
+
+static struct metronome_board am200_board = {
+       .owner                  = THIS_MODULE,
+       .free_irq               = am200_free_irq,
+       .setup_irq              = am200_setup_irq,
+       .init_gpio_regs         = am200_init_gpio_regs,
+       .init_lcdc_regs         = am200_init_lcdc_regs,
+       .post_dma_setup         = am200_post_dma_setup,
+       .set_rst                = am200_set_rst,
+       .set_stdby              = am200_set_stdby,
+       .met_wait_event         = am200_wait_event,
+       .met_wait_event_intr    = am200_wait_event_intr,
+};
+
+static struct platform_device *am200_device;
+
+static int __init am200_init(void)
+{
+       int ret;
+
+       /* request our platform independent driver */
+       request_module("metronomefb");
+
+       am200_device = platform_device_alloc("metronomefb", -1);
+       if (!am200_device)
+               return -ENOMEM;
+
+       platform_device_add_data(am200_device, &am200_board,
+                                       sizeof(am200_board));
+
+       /* this _add binds metronomefb to am200. metronomefb refcounts am200 */
+       ret = platform_device_add(am200_device);
+
+       if (ret)
+               platform_device_put(am200_device);
+
+       return ret;
+}
+
+static void __exit am200_exit(void)
+{
+       platform_device_unregister(am200_device);
+}
+
+module_init(am200_init);
+module_exit(am200_exit);
+
+MODULE_DESCRIPTION("board driver for am200 metronome epd kit");
+MODULE_AUTHOR("Jaya Kumar");
+MODULE_LICENSE("GPL");
index 4c9ec3f58c52e35c194d98cfebcd830075be6c61..e6492c1048bf29dcb367f696a7abaee9c6403055 100644 (file)
@@ -96,7 +96,7 @@
 #endif
 
 #ifdef DEBUG
-#  define DPRINTK(fmt, args...)        printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#  define DPRINTK(fmt, args...)        printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
 #else
 #  define DPRINTK(fmt, args...)
 #endif
index 8a1b07c74394d35f12c62ab25129ca762c23e045..5001bd4ef466331ba9c5498c3cd6fc7e9e4a8e21 100644 (file)
@@ -101,7 +101,7 @@ static const struct svga_timing_regs ark_timing_regs     = {
 
 /* Module parameters */
 
-static char *mode = "640x480-8@60";
+static char *mode_option __devinitdata = "640x480-8@60";
 
 #ifdef CONFIG_MTRR
 static int mtrr = 1;
@@ -111,8 +111,10 @@ MODULE_AUTHOR("(c) 2007 Ondrej Zajicek <santiago@crfreenet.org>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("fbdev driver for ARK 2000PV");
 
-module_param(mode, charp, 0444);
-MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+module_param(mode_option, charp, 0444);
+MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
+module_param_named(mode, mode_option, charp, 0444);
+MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
 
 #ifdef CONFIG_MTRR
 module_param(mtrr, int, 0444);
@@ -941,7 +943,7 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
        }
 
        /* Allocate and fill driver data structure */
-       info = framebuffer_alloc(sizeof(struct arkfb_info), NULL);
+       info = framebuffer_alloc(sizeof(struct arkfb_info), &(dev->dev));
        if (! info) {
                dev_err(&(dev->dev), "cannot allocate memory\n");
                return -ENOMEM;
@@ -956,20 +958,20 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
        /* Prepare PCI device */
        rc = pci_enable_device(dev);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot enable PCI device\n");
+               dev_err(info->dev, "cannot enable PCI device\n");
                goto err_enable_device;
        }
 
        rc = pci_request_regions(dev, "arkfb");
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+               dev_err(info->dev, "cannot reserve framebuffer region\n");
                goto err_request_regions;
        }
 
        par->dac = ics5342_init(ark_dac_read_regs, ark_dac_write_regs, info);
        if (! par->dac) {
                rc = -ENOMEM;
-               dev_err(&(dev->dev), "RAMDAC initialization failed\n");
+               dev_err(info->dev, "RAMDAC initialization failed\n");
                goto err_dac;
        }
 
@@ -980,7 +982,7 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
        info->screen_base = pci_iomap(dev, 0, 0);
        if (! info->screen_base) {
                rc = -ENOMEM;
-               dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+               dev_err(info->dev, "iomap for framebuffer failed\n");
                goto err_iomap;
        }
 
@@ -999,22 +1001,22 @@ static int __devinit ark_pci_probe(struct pci_dev *dev, const struct pci_device_
        info->pseudo_palette = (void*) (par->pseudo_palette);
 
        /* Prepare startup mode */
-       rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+       rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
        if (! ((rc == 1) || (rc == 2))) {
                rc = -EINVAL;
-               dev_err(&(dev->dev), "mode %s not found\n", mode);
+               dev_err(info->dev, "mode %s not found\n", mode_option);
                goto err_find_mode;
        }
 
        rc = fb_alloc_cmap(&info->cmap, 256, 0);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot allocate colormap\n");
+               dev_err(info->dev, "cannot allocate colormap\n");
                goto err_alloc_cmap;
        }
 
        rc = register_framebuffer(info);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot register framebugger\n");
+               dev_err(info->dev, "cannot register framebugger\n");
                goto err_reg_fb;
        }
 
@@ -1088,7 +1090,7 @@ static int ark_pci_suspend (struct pci_dev* dev, pm_message_t state)
        struct fb_info *info = pci_get_drvdata(dev);
        struct arkfb_info *par = info->par;
 
-       dev_info(&(dev->dev), "suspend\n");
+       dev_info(info->dev, "suspend\n");
 
        acquire_console_sem();
        mutex_lock(&(par->open_lock));
@@ -1119,7 +1121,7 @@ static int ark_pci_resume (struct pci_dev* dev)
        struct fb_info *info = pci_get_drvdata(dev);
        struct arkfb_info *par = info->par;
 
-       dev_info(&(dev->dev), "resume\n");
+       dev_info(info->dev, "resume\n");
 
        acquire_console_sem();
        mutex_lock(&(par->open_lock));
@@ -1190,7 +1192,7 @@ static int __init arkfb_init(void)
                return -ENODEV;
 
        if (option && *option)
-               mode = option;
+               mode_option = option;
 #endif
 
        pr_debug("arkfb: initializing\n");
index 5d4fbaa53a6cefd6fc69a9676987c77de7f5b732..dff35474b854f5b3949d730e8cafc7241792a01f 100644 (file)
@@ -1270,7 +1270,7 @@ again:
 
        gstart = (prescale / 2 + plen * left_margin) / prescale;
        /* gend1 is for hde (gend-gstart multiple of align), shifter's xres */
-       gend1 = gstart + ((xres + align - 1) / align) * align * plen / prescale;
+       gend1 = gstart + roundup(xres, align) * plen / prescale;
        /* gend2 is for hbb, visible xres (rest to gend1 is cut off by hblank) */
        gend2 = gstart + xres * plen / prescale;
        par->HHT = plen * (left_margin + xres + right_margin) /
index fc65c02306ddbd6c32c2be96b80fcd630be0258a..8ffdf35787688084deced4c51cd8704bb92f4c19 100644 (file)
@@ -31,7 +31,8 @@
 #define ATMEL_LCDC_CVAL_DEFAULT                0xc8
 #define ATMEL_LCDC_DMA_BURST_LEN       8
 
-#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9)
+#if defined(CONFIG_ARCH_AT91SAM9263) || defined(CONFIG_ARCH_AT91CAP9) || \
+       defined(CONFIG_ARCH_AT91SAM9RL)
 #define ATMEL_LCDC_FIFO_SIZE           2048
 #else
 #define ATMEL_LCDC_FIFO_SIZE           512
@@ -250,6 +251,8 @@ static int atmel_lcdfb_alloc_video_memory(struct atmel_lcdfb_info *sinfo)
                return -ENOMEM;
        }
 
+       memset(info->screen_base, 0, info->fix.smem_len);
+
        return 0;
 }
 
@@ -336,19 +339,35 @@ static int atmel_lcdfb_check_var(struct fb_var_screeninfo *var,
                break;
        case 15:
        case 16:
-               var->red.offset = 0;
+               if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+                       /* RGB:565 mode */
+                       var->red.offset = 11;
+                       var->blue.offset = 0;
+                       var->green.length = 6;
+               } else {
+                       /* BGR:555 mode */
+                       var->red.offset = 0;
+                       var->blue.offset = 10;
+                       var->green.length = 5;
+               }
                var->green.offset = 5;
-               var->blue.offset = 10;
-               var->red.length = var->green.length = var->blue.length = 5;
+               var->red.length = var->blue.length = 5;
                break;
        case 32:
                var->transp.offset = 24;
                var->transp.length = 8;
                /* fall through */
        case 24:
-               var->red.offset = 0;
+               if (sinfo->lcd_wiring_mode == ATMEL_LCDC_WIRING_RGB) {
+                       /* RGB:888 mode */
+                       var->red.offset = 16;
+                       var->blue.offset = 0;
+               } else {
+                       /* BGR:888 mode */
+                       var->red.offset = 0;
+                       var->blue.offset = 16;
+               }
                var->green.offset = 8;
-               var->blue.offset = 16;
                var->red.length = var->green.length = var->blue.length = 8;
                break;
        default:
@@ -634,7 +653,6 @@ static int __init atmel_lcdfb_init_fbinfo(struct atmel_lcdfb_info *sinfo)
        struct fb_info *info = sinfo->info;
        int ret = 0;
 
-       memset_io(info->screen_base, 0, info->fix.smem_len);
        info->var.activate |= FB_ACTIVATE_FORCE | FB_ACTIVATE_NOW;
 
        dev_info(info->device,
@@ -696,6 +714,7 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
                sinfo->atmel_lcdfb_power_control = pdata_sinfo->atmel_lcdfb_power_control;
                sinfo->guard_time = pdata_sinfo->guard_time;
                sinfo->lcdcon_is_backlight = pdata_sinfo->lcdcon_is_backlight;
+               sinfo->lcd_wiring_mode = pdata_sinfo->lcd_wiring_mode;
        } else {
                dev_err(dev, "cannot get default configuration\n");
                goto free_info;
@@ -764,6 +783,11 @@ static int __init atmel_lcdfb_probe(struct platform_device *pdev)
                info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
                if (!info->screen_base)
                        goto release_intmem;
+
+               /*
+                * Don't clear the framebuffer -- someone may have set
+                * up a splash image.
+                */
        } else {
                /* alocate memory buffer */
                ret = atmel_lcdfb_alloc_video_memory(sinfo);
@@ -903,10 +927,42 @@ static int __exit atmel_lcdfb_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM
+
+static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct atmel_lcdfb_info *sinfo = info->par;
+
+       sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+       lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
+       if (sinfo->atmel_lcdfb_power_control)
+               sinfo->atmel_lcdfb_power_control(0);
+       atmel_lcdfb_stop_clock(sinfo);
+       return 0;
+}
+
+static int atmel_lcdfb_resume(struct platform_device *pdev)
+{
+       struct fb_info *info = platform_get_drvdata(pdev);
+       struct atmel_lcdfb_info *sinfo = info->par;
+
+       atmel_lcdfb_start_clock(sinfo);
+       if (sinfo->atmel_lcdfb_power_control)
+               sinfo->atmel_lcdfb_power_control(1);
+       lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, sinfo->saved_lcdcon);
+       return 0;
+}
+
+#else
+#define atmel_lcdfb_suspend    NULL
+#define atmel_lcdfb_resume     NULL
+#endif
+
 static struct platform_driver atmel_lcdfb_driver = {
        .remove         = __exit_p(atmel_lcdfb_remove),
-
-// FIXME need suspend, resume
+       .suspend        = atmel_lcdfb_suspend,
+       .resume         = atmel_lcdfb_resume,
 
        .driver         = {
                .name   = "atmel_lcdfb",
index cbd3308b66904a0cf6bcaa2dd449692495f60fea..24ee96c4e9e9e5c816254444b7f002863363eda7 100644 (file)
@@ -91,7 +91,7 @@
 #undef DEBUG
 
 #ifdef DEBUG
-#define DBG(fmt, args...)              printk(KERN_DEBUG "aty128fb: %s " fmt, __FUNCTION__, ##args);
+#define DBG(fmt, args...)              printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args);
 #else
 #define DBG(fmt, args...)
 #endif
@@ -1885,7 +1885,7 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i
 
        /* range check to make sure */
        if (ent->driver_data < ARRAY_SIZE(r128_family))
-           strncat(video_card, r128_family[ent->driver_data], sizeof(video_card));
+           strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card));
 
        printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev);
 
index 62f9c6e387ccc3de3e65714ccec28c42eb38a7b8..e4bcf5376a9964095d8e18eddbc680b8538acfe7 100644 (file)
@@ -2621,10 +2621,13 @@ static int __devinit aty_init(struct fb_info *info)
 #endif /* CONFIG_FB_ATY_CT */
        info->var = var;
 
-       fb_alloc_cmap(&info->cmap, 256, 0);
+       if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+               goto aty_init_exit;
 
-       if (register_framebuffer(info) < 0)
+       if (register_framebuffer(info) < 0) {
+               fb_dealloc_cmap(&info->cmap);
                goto aty_init_exit;
+       }
 
        fb_list = info;
 
index cc9e9779b75f753fd9f937d845ca7a5cd7b415a8..c50c7cf26fe9d82b27acbd3f0312841392c19d74 100644 (file)
@@ -197,7 +197,7 @@ static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
        pll->dsp_config = (dsp_precision << 20) | (pll->dsp_loop_latency << 16) | dsp_xclks;
 #ifdef DEBUG
        printk("atyfb(%s): dsp_config 0x%08x, dsp_on_off 0x%08x\n",
-               __FUNCTION__, pll->dsp_config, pll->dsp_on_off);
+               __func__, pll->dsp_config, pll->dsp_on_off);
 #endif
        return 0;
 }
@@ -225,7 +225,7 @@ static int aty_valid_pll_ct(const struct fb_info *info, u32 vclk_per, struct pll
                (par->ref_clk_per * pll->pll_ref_div);
 #ifdef DEBUG
        printk("atyfb(%s): pllvclk=%d MHz, vclk=%d MHz\n",
-               __FUNCTION__, pllvclk, pllvclk / pll->vclk_post_div_real);
+               __func__, pllvclk, pllvclk / pll->vclk_post_div_real);
 #endif
        pll->pll_vclk_cntl = 0x03; /* VCLK = PLL_VCLK/VCLKx_POST */
 
@@ -269,7 +269,7 @@ static u32 aty_pll_to_var_ct(const struct fb_info *info, const union aty_pll *pl
        }
 #endif
 #ifdef DEBUG
-       printk("atyfb(%s): calculated 0x%08X(%i)\n", __FUNCTION__, ret, ret);
+       printk("atyfb(%s): calculated 0x%08X(%i)\n", __func__, ret, ret);
 #endif
        return ret;
 }
@@ -284,11 +284,11 @@ void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll)
 #ifdef DEBUG
        printk("atyfb(%s): about to program:\n"
                "pll_ext_cntl=0x%02x pll_gen_cntl=0x%02x pll_vclk_cntl=0x%02x\n",
-               __FUNCTION__,
+               __func__,
                pll->ct.pll_ext_cntl, pll->ct.pll_gen_cntl, pll->ct.pll_vclk_cntl);
 
        printk("atyfb(%s): setting clock %lu for FeedBackDivider %i, ReferenceDivider %i, PostDivider %i(%i)\n",
-               __FUNCTION__,
+               __func__,
                par->clk_wr_offset, pll->ct.vclk_fb_div,
                pll->ct.pll_ref_div, pll->ct.vclk_post_div, pll->ct.vclk_post_div_real);
 #endif
@@ -428,7 +428,7 @@ static int __devinit aty_init_pll_ct(const struct fb_info *info,
 
 #ifdef DEBUG
        printk("atyfb(%s): mclk_fb_mult=%d, xclk_post_div=%d\n",
-               __FUNCTION__, pll->ct.mclk_fb_mult, pll->ct.xclk_post_div);
+               __func__, pll->ct.mclk_fb_mult, pll->ct.xclk_post_div);
 #endif
 
        memcntl = aty_ld_le32(MEM_CNTL, par);
@@ -540,7 +540,7 @@ static int __devinit aty_init_pll_ct(const struct fb_info *info,
        pllmclk = (1000000 * pll->ct.mclk_fb_mult * pll->ct.mclk_fb_div) /
                        (par->ref_clk_per * pll->ct.pll_ref_div);
        printk("atyfb(%s): pllmclk=%d MHz, xclk=%d MHz\n",
-               __FUNCTION__, pllmclk, pllmclk / pll->ct.xclk_post_div_real);
+               __func__, pllmclk, pllmclk / pll->ct.xclk_post_div_real);
 #endif
 
        if (M64_HAS(SDRAM_MAGIC_PLL) && (par->ram_type >= SDRAM))
@@ -581,7 +581,7 @@ static int __devinit aty_init_pll_ct(const struct fb_info *info,
                pllsclk = (1000000 * 2 * pll->ct.sclk_fb_div) /
                        (par->ref_clk_per * pll->ct.pll_ref_div);
                printk("atyfb(%s): use sclk, pllsclk=%d MHz, sclk=mclk=%d MHz\n",
-                       __FUNCTION__, pllsclk, pllsclk / sclk_post_div_real);
+                       __func__, pllsclk, pllsclk / sclk_post_div_real);
 #endif
        }
 
index 62867cb63fefb2894dfc98096affff66c4fd5d22..72cd0d2f14ec1a955adf9462cd0f8c8742874b77 100644 (file)
 
 #define RADEON_VERSION "0.2.0"
 
+#include "radeonfb.h"
+
 #include <linux/module.h>
 #include <linux/moduleparam.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/string.h>
+#include <linux/ctype.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/delay.h>
@@ -91,7 +94,6 @@
 
 #include "../edid.h" // MOVE THAT TO include/video
 #include "ati_ids.h"
-#include "radeonfb.h"              
 
 #define MAX_MAPPED_VRAM        (2048*2048*4)
 #define MIN_MAPPED_VRAM        (1024*768*1)
@@ -1488,7 +1490,7 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
                freq = rinfo->pll.ppll_max;
        if (freq*12 < rinfo->pll.ppll_min)
                freq = rinfo->pll.ppll_min / 12;
-       RTRACE("freq = %lu, PLL min = %u, PLL max = %u\n",
+       pr_debug("freq = %lu, PLL min = %u, PLL max = %u\n",
               freq, rinfo->pll.ppll_min, rinfo->pll.ppll_max);
 
        for (post_div = &post_divs[0]; post_div->divider; ++post_div) {
@@ -1509,7 +1511,7 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
                post_div = &post_divs[post_div->bitvalue];
                pll_output_freq = post_div->divider * freq;
        }
-       RTRACE("ref_div = %d, ref_clk = %d, output_freq = %d\n",
+       pr_debug("ref_div = %d, ref_clk = %d, output_freq = %d\n",
               rinfo->pll.ref_div, rinfo->pll.ref_clk,
               pll_output_freq);
 
@@ -1519,7 +1521,7 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
                post_div = &post_divs[post_div->bitvalue];
                pll_output_freq = post_div->divider * freq;
        }
-       RTRACE("ref_div = %d, ref_clk = %d, output_freq = %d\n",
+       pr_debug("ref_div = %d, ref_clk = %d, output_freq = %d\n",
               rinfo->pll.ref_div, rinfo->pll.ref_clk,
               pll_output_freq);
 
@@ -1528,9 +1530,9 @@ static void radeon_calc_pll_regs(struct radeonfb_info *rinfo, struct radeon_regs
        regs->ppll_ref_div = rinfo->pll.ref_div;
        regs->ppll_div_3 = fb_div | (post_div->bitvalue << 16);
 
-       RTRACE("post div = 0x%x\n", post_div->bitvalue);
-       RTRACE("fb_div = 0x%x\n", fb_div);
-       RTRACE("ppll_div_3 = 0x%x\n", regs->ppll_div_3);
+       pr_debug("post div = 0x%x\n", post_div->bitvalue);
+       pr_debug("fb_div = 0x%x\n", fb_div);
+       pr_debug("ppll_div_3 = 0x%x\n", regs->ppll_div_3);
 }
 
 static int radeonfb_set_par(struct fb_info *info)
@@ -1602,9 +1604,9 @@ static int radeonfb_set_par(struct fb_info *info)
        dotClock = 1000000000 / pixClock;
        freq = dotClock / 10; /* x100 */
 
-       RTRACE("hStart = %d, hEnd = %d, hTotal = %d\n",
+       pr_debug("hStart = %d, hEnd = %d, hTotal = %d\n",
                hSyncStart, hSyncEnd, hTotal);
-       RTRACE("vStart = %d, vEnd = %d, vTotal = %d\n",
+       pr_debug("vStart = %d, vEnd = %d, vTotal = %d\n",
                vSyncStart, vSyncEnd, vTotal);
 
        hsync_wid = (hSyncEnd - hSyncStart) / 8;
@@ -1713,16 +1715,16 @@ static int radeonfb_set_par(struct fb_info *info)
                newmode->surf_info[i] = 0;
        }
 
-       RTRACE("h_total_disp = 0x%x\t   hsync_strt_wid = 0x%x\n",
+       pr_debug("h_total_disp = 0x%x\t   hsync_strt_wid = 0x%x\n",
                newmode->crtc_h_total_disp, newmode->crtc_h_sync_strt_wid);
-       RTRACE("v_total_disp = 0x%x\t   vsync_strt_wid = 0x%x\n",
+       pr_debug("v_total_disp = 0x%x\t   vsync_strt_wid = 0x%x\n",
                newmode->crtc_v_total_disp, newmode->crtc_v_sync_strt_wid);
 
        rinfo->bpp = mode->bits_per_pixel;
        rinfo->depth = depth;
 
-       RTRACE("pixclock = %lu\n", (unsigned long)pixClock);
-       RTRACE("freq = %lu\n", (unsigned long)freq);
+       pr_debug("pixclock = %lu\n", (unsigned long)pixClock);
+       pr_debug("freq = %lu\n", (unsigned long)freq);
 
        /* We use PPLL_DIV_3 */
        newmode->clk_cntl_index = 0x300;
@@ -1986,7 +1988,7 @@ static void fixup_memory_mappings(struct radeonfb_info *rinfo)
        if (rinfo->has_CRTC2)
                OUTREG(CRTC2_GEN_CNTL, save_crtc2_gen_cntl);    
 
-       RTRACE("aper_base: %08x MC_FB_LOC to: %08x, MC_AGP_LOC to: %08x\n",
+       pr_debug("aper_base: %08x MC_FB_LOC to: %08x, MC_AGP_LOC to: %08x\n",
                aper_base,
                ((aper_base + aper_size - 1) & 0xffff0000) | (aper_base >> 16),
                0xffff0000 | (agp_base >> 16));
@@ -2083,7 +2085,7 @@ static void radeon_identify_vram(struct radeonfb_info *rinfo)
         * ToDo: identify these cases
         */
 
-       RTRACE("radeonfb (%s): Found %ldk of %s %d bits wide videoram\n",
+       pr_debug("radeonfb (%s): Found %ldk of %s %d bits wide videoram\n",
               pci_name(rinfo->pdev),
               rinfo->video_ram / 1024,
               rinfo->vram_ddr ? "DDR" : "SDRAM",
@@ -2158,8 +2160,9 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
        struct fb_info *info;
        struct radeonfb_info *rinfo;
        int ret;
+       unsigned char c1, c2;
 
-       RTRACE("radeonfb_pci_register BEGIN\n");
+       pr_debug("radeonfb_pci_register BEGIN\n");
        
        /* Enable device in PCI config */
        ret = pci_enable_device(pdev);
@@ -2185,9 +2188,15 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
        rinfo->lvds_timer.function = radeon_lvds_timer_func;
        rinfo->lvds_timer.data = (unsigned long)rinfo;
 
-       strcpy(rinfo->name, "ATI Radeon XX ");
-       rinfo->name[11] = ent->device >> 8;
-       rinfo->name[12] = ent->device & 0xFF;
+       c1 = ent->device >> 8;
+       c2 = ent->device & 0xff;
+       if (isprint(c1) && isprint(c2))
+               snprintf(rinfo->name, sizeof(rinfo->name),
+                        "ATI Radeon %x \"%c%c\"", ent->device & 0xffff, c1, c2);
+       else
+               snprintf(rinfo->name, sizeof(rinfo->name),
+                        "ATI Radeon %x", ent->device & 0xffff);
+
        rinfo->family = ent->driver_data & CHIP_FAMILY_MASK;
        rinfo->chipset = pdev->device;
        rinfo->has_CRTC2 = (ent->driver_data & CHIP_HAS_CRTC2) != 0;
@@ -2278,7 +2287,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
                goto err_unmap_rom;
        }
 
-       RTRACE("radeonfb (%s): mapped %ldk videoram\n", pci_name(rinfo->pdev),
+       pr_debug("radeonfb (%s): mapped %ldk videoram\n", pci_name(rinfo->pdev),
               rinfo->mapped_vram/1024);
 
        /*
@@ -2373,7 +2382,7 @@ static int __devinit radeonfb_pci_register (struct pci_dev *pdev,
 
        if (rinfo->bios_seg)
                radeon_unmap_ROM(rinfo, pdev);
-       RTRACE("radeonfb_pci_register END\n");
+       pr_debug("radeonfb_pci_register END\n");
 
        return 0;
 err_unmap_fb:
index 7db9de6817169c40724bc554b580e93b57a6385e..f9e7c29ad9bf8eb516c8d386c460426f7cfcd925 100644 (file)
@@ -1,3 +1,5 @@
+#include "radeonfb.h"
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/delay.h>
@@ -11,7 +13,6 @@
 #include <asm/io.h>
 
 #include <video/radeon.h>
-#include "radeonfb.h"
 #include "../edid.h"
 
 static void radeon_gpio_setscl(void* data, int state)
@@ -77,7 +78,7 @@ static int radeon_setup_i2c_bus(struct radeon_i2c_chan *chan, const char *name)
        chan->algo.setscl               = radeon_gpio_setscl;
        chan->algo.getsda               = radeon_gpio_getsda;
        chan->algo.getscl               = radeon_gpio_getscl;
-       chan->algo.udelay               = 40;
+       chan->algo.udelay               = 10;
        chan->algo.timeout              = 20;
        chan->algo.data                 = chan; 
        
@@ -148,21 +149,21 @@ int radeon_probe_i2c_connector(struct radeonfb_info *rinfo, int conn,
        if (out_edid)
                *out_edid = edid;
        if (!edid) {
-               RTRACE("radeonfb: I2C (port %d) ... not found\n", conn);
+               pr_debug("radeonfb: I2C (port %d) ... not found\n", conn);
                return MT_NONE;
        }
        if (edid[0x14] & 0x80) {
                /* Fix detection using BIOS tables */
                if (rinfo->is_mobility /*&& conn == ddc_dvi*/ &&
                    (INREG(LVDS_GEN_CNTL) & LVDS_ON)) {
-                       RTRACE("radeonfb: I2C (port %d) ... found LVDS panel\n", conn);
+                       pr_debug("radeonfb: I2C (port %d) ... found LVDS panel\n", conn);
                        return MT_LCD;
                } else {
-                       RTRACE("radeonfb: I2C (port %d) ... found TMDS panel\n", conn);
+                       pr_debug("radeonfb: I2C (port %d) ... found TMDS panel\n", conn);
                        return MT_DFP;
                }
        }
-               RTRACE("radeonfb: I2C (port %d) ... found CRT display\n", conn);
+       pr_debug("radeonfb: I2C (port %d) ... found CRT display\n", conn);
        return MT_CRT;
 }
 
index 2030ed813429f802114fff22ff4919aabc858f26..b4d4b88afc093d752f8ffce2ccbc14109e234eea 100644 (file)
@@ -69,11 +69,11 @@ static int __devinit radeon_parse_montype_prop(struct device_node *dp, u8 **out_
        u8 *tmp;
         int i, mt = MT_NONE;  
        
-       RTRACE("analyzing OF properties...\n");
+       pr_debug("analyzing OF properties...\n");
        pmt = of_get_property(dp, "display-type", NULL);
        if (!pmt)
                return MT_NONE;
-       RTRACE("display-type: %s\n", pmt);
+       pr_debug("display-type: %s\n", pmt);
        /* OF says "LCD" for DFP as well, we discriminate from the caller of this
         * function
         */
@@ -117,7 +117,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
 {
         struct device_node *dp;
 
-       RTRACE("radeon_probe_OF_head\n");
+       pr_debug("radeon_probe_OF_head\n");
 
         dp = rinfo->of_node;
         while (dp == NULL)
@@ -135,7 +135,7 @@ static int __devinit radeon_probe_OF_head(struct radeonfb_info *rinfo, int head_
                        if (!pname)
                                return MT_NONE;
                        len = strlen(pname);
-                       RTRACE("head: %s (letter: %c, head_no: %d)\n",
+                       pr_debug("head: %s (letter: %c, head_no: %d)\n",
                               pname, pname[len-1], head_no);
                        if (pname[len-1] == 'A' && head_no == 0) {
                                int mt = radeon_parse_montype_prop(dp, out_EDID, 0);
@@ -185,7 +185,7 @@ static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
                rinfo->panel_info.xres, rinfo->panel_info.yres);
 
        rinfo->panel_info.pwr_delay = BIOS_IN16(tmp + 44);
-       RTRACE("BIOS provided panel power delay: %d\n", rinfo->panel_info.pwr_delay);
+       pr_debug("BIOS provided panel power delay: %d\n", rinfo->panel_info.pwr_delay);
        if (rinfo->panel_info.pwr_delay > 2000 || rinfo->panel_info.pwr_delay <= 0)
                rinfo->panel_info.pwr_delay = 2000;
 
@@ -199,16 +199,16 @@ static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
            rinfo->panel_info.fbk_divider > 3) {
                rinfo->panel_info.use_bios_dividers = 1;
                printk(KERN_INFO "radeondb: BIOS provided dividers will be used\n");
-               RTRACE("ref_divider = %x\n", rinfo->panel_info.ref_divider);
-               RTRACE("post_divider = %x\n", rinfo->panel_info.post_divider);
-               RTRACE("fbk_divider = %x\n", rinfo->panel_info.fbk_divider);
+               pr_debug("ref_divider = %x\n", rinfo->panel_info.ref_divider);
+               pr_debug("post_divider = %x\n", rinfo->panel_info.post_divider);
+               pr_debug("fbk_divider = %x\n", rinfo->panel_info.fbk_divider);
        }
-       RTRACE("Scanning BIOS table ...\n");
+       pr_debug("Scanning BIOS table ...\n");
        for(i=0; i<32; i++) {
                tmp0 = BIOS_IN16(tmp+64+i*2);
                if (tmp0 == 0)
                        break;
-               RTRACE(" %d x %d\n", BIOS_IN16(tmp0), BIOS_IN16(tmp0+2));
+               pr_debug(" %d x %d\n", BIOS_IN16(tmp0), BIOS_IN16(tmp0+2));
                if ((BIOS_IN16(tmp0) == rinfo->panel_info.xres) &&
                    (BIOS_IN16(tmp0+2) == rinfo->panel_info.yres)) {
                        rinfo->panel_info.hblank = (BIOS_IN16(tmp0+17) - BIOS_IN16(tmp0+19)) * 8;
@@ -227,19 +227,19 @@ static int __devinit radeon_get_panel_info_BIOS(struct radeonfb_info *rinfo)
                        /* Mark panel infos valid */
                        rinfo->panel_info.valid = 1;
 
-                       RTRACE("Found panel in BIOS table:\n");
-                       RTRACE("  hblank: %d\n", rinfo->panel_info.hblank);
-                       RTRACE("  hOver_plus: %d\n", rinfo->panel_info.hOver_plus);
-                       RTRACE("  hSync_width: %d\n", rinfo->panel_info.hSync_width);
-                       RTRACE("  vblank: %d\n", rinfo->panel_info.vblank);
-                       RTRACE("  vOver_plus: %d\n", rinfo->panel_info.vOver_plus);
-                       RTRACE("  vSync_width: %d\n", rinfo->panel_info.vSync_width);
-                       RTRACE("  clock: %d\n", rinfo->panel_info.clock);
+                       pr_debug("Found panel in BIOS table:\n");
+                       pr_debug("  hblank: %d\n", rinfo->panel_info.hblank);
+                       pr_debug("  hOver_plus: %d\n", rinfo->panel_info.hOver_plus);
+                       pr_debug("  hSync_width: %d\n", rinfo->panel_info.hSync_width);
+                       pr_debug("  vblank: %d\n", rinfo->panel_info.vblank);
+                       pr_debug("  vOver_plus: %d\n", rinfo->panel_info.vOver_plus);
+                       pr_debug("  vSync_width: %d\n", rinfo->panel_info.vSync_width);
+                       pr_debug("  clock: %d\n", rinfo->panel_info.clock);
                                
                        return 1;
                }
        }
-       RTRACE("Didn't find panel in BIOS table !\n");
+       pr_debug("Didn't find panel in BIOS table !\n");
 
        return 0;
 }
@@ -271,18 +271,18 @@ static void __devinit radeon_parse_connector_info(struct radeonfb_info *rinfo)
         * DEBUG is enabled
         */
        chips = BIOS_IN8(offset++) >> 4;
-       RTRACE("%d chips in connector info\n", chips);
+       pr_debug("%d chips in connector info\n", chips);
        for (i = 0; i < chips; i++) {
                tmp = BIOS_IN8(offset++);
                connectors = tmp & 0x0f;
-               RTRACE(" - chip %d has %d connectors\n", tmp >> 4, connectors);
+               pr_debug(" - chip %d has %d connectors\n", tmp >> 4, connectors);
                for (conn = 0; ; conn++) {
                        tmp = BIOS_IN16(offset);
                        if (tmp == 0)
                                break;
                        offset += 2;
                        type = (tmp >> 12) & 0x0f;
-                       RTRACE("  * connector %d of type %d (%s) : %04x\n",
+                       pr_debug("  * connector %d of type %d (%s) : %04x\n",
                               conn, type, __conn_type_table[type], tmp);
                }
        }
@@ -449,7 +449,7 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
                 * a layout for each card ?
                 */
 
-               RTRACE("Using specified monitor layout: %s", monitor_layout);
+               pr_debug("Using specified monitor layout: %s", monitor_layout);
 #ifdef CONFIG_FB_RADEON_I2C
                if (!ignore_edid) {
                        if (rinfo->mon1_type != MT_NONE)
@@ -479,9 +479,9 @@ void __devinit radeon_probe_screens(struct radeonfb_info *rinfo,
                 * Auto-detecting display type (well... trying to ...)
                 */
                
-               RTRACE("Starting monitor auto detection...\n");
+               pr_debug("Starting monitor auto detection...\n");
 
-#if DEBUG && defined(CONFIG_FB_RADEON_I2C)
+#if defined(DEBUG) && defined(CONFIG_FB_RADEON_I2C)
                {
                        u8 *EDIDs[4] = { NULL, NULL, NULL, NULL };
                        int mon_types[4] = {MT_NONE, MT_NONE, MT_NONE, MT_NONE};
@@ -756,7 +756,7 @@ void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_
        if (!rinfo->panel_info.use_bios_dividers && rinfo->mon1_type != MT_CRT
            && rinfo->mon1_EDID) {
                struct fb_var_screeninfo var;
-               RTRACE("Parsing EDID data for panel info\n");
+               pr_debug("Parsing EDID data for panel info\n");
                if (fb_parse_edid(rinfo->mon1_EDID, &var) == 0) {
                        if (var.xres >= rinfo->panel_info.xres &&
                            var.yres >= rinfo->panel_info.yres)
@@ -776,7 +776,7 @@ void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_
        if (rinfo->mon1_type != MT_CRT && rinfo->panel_info.valid) {
                struct fb_var_screeninfo *var = &info->var;
 
-               RTRACE("Setting up default mode based on panel info\n");
+               pr_debug("Setting up default mode based on panel info\n");
                var->xres = rinfo->panel_info.xres;
                var->yres = rinfo->panel_info.yres;
                var->xres_virtual = rinfo->panel_info.xres;
@@ -824,7 +824,7 @@ void __devinit radeon_check_modes(struct radeonfb_info *rinfo, const char *mode_
                int                     dbsize;
                char                    modename[32];
 
-               RTRACE("Guessing panel info...\n");
+               pr_debug("Guessing panel info...\n");
                if (rinfo->panel_info.xres == 0 || rinfo->panel_info.yres == 0) {
                        u32 tmp = INREG(FP_HORZ_STRETCH) & HORZ_PANEL_SIZE;
                        rinfo->panel_info.xres = ((tmp >> HORZ_PANEL_SHIFT) + 1) * 8;
index 5eac1ce52e72001d078248026f501c6efb7082ad..c347e38cd0b084719437e8157477aa220ef9e6f6 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef __RADEONFB_H__
 #define __RADEONFB_H__
 
+#ifdef CONFIG_FB_RADEON_DEBUG
+#define DEBUG          1
+#endif
+
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -364,22 +368,6 @@ struct radeonfb_info {
 #define PRIMARY_MONITOR(rinfo) (rinfo->mon1_type)
 
 
-/*
- * Debugging stuffs
- */
-#ifdef CONFIG_FB_RADEON_DEBUG
-#define DEBUG          1
-#else
-#define DEBUG          0
-#endif
-
-#if DEBUG
-#define RTRACE         printk
-#else
-#define RTRACE         if(0) printk
-#endif
-
-
 /*
  * IO macros
  */
index eefba3d0e4b9e52291d46de30cb71d223b87d951..49834a67a623351e3f27b5add2e1e2bbf6f114e9 100644 (file)
@@ -336,7 +336,7 @@ static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
 {
 
        if (var->bits_per_pixel != LCD_BPP) {
-               pr_debug("%s: depth not supported: %u BPP\n", __FUNCTION__,
+               pr_debug("%s: depth not supported: %u BPP\n", __func__,
                         var->bits_per_pixel);
                return -EINVAL;
        }
@@ -345,7 +345,7 @@ static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
            info->var.xres_virtual != var->xres_virtual ||
            info->var.yres_virtual != var->yres_virtual) {
                pr_debug("%s: Resolution not supported: X%u x Y%u \n",
-                        __FUNCTION__, var->xres, var->yres);
+                        __func__, var->xres, var->yres);
                return -EINVAL;
        }
 
@@ -355,7 +355,7 @@ static int bfin_bf54x_fb_check_var(struct fb_var_screeninfo *var,
 
        if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
                pr_debug("%s: Memory Limit requested yres_virtual = %u\n",
-                        __FUNCTION__, var->yres_virtual);
+                        __func__, var->yres_virtual);
                return -ENOMEM;
        }
 
@@ -652,7 +652,7 @@ static int __init bfin_bf54x_probe(struct platform_device *pdev)
                goto out7;
        }
 
-       if (request_irq(info->irq, (void *)bfin_bf54x_irq_error, IRQF_DISABLED,
+       if (request_irq(info->irq, bfin_bf54x_irq_error, IRQF_DISABLED,
                        "PPI ERROR", info) < 0) {
                printk(KERN_ERR DRIVER_NAME
                       ": unable to request PPI ERROR IRQ\n");
index 833b10c840646e69de4e95954b10a10ddcf5df03..275d9dab0c6108e2f56b73c6bb5d2da1d3fdedc2 100644 (file)
@@ -339,7 +339,7 @@ static int __devinit bw2_probe(struct of_device *op, const struct of_device_id *
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: bwtwo at %lx:%lx\n",
+       printk(KERN_INFO "%s: bwtwo at %lx:%lx\n",
               dp->full_name, par->which_io, par->physbase);
 
        return 0;
@@ -399,10 +399,9 @@ static int __init bw2_init(void)
 
 static void __exit bw2_exit(void)
 {
-       return of_unregister_driver(&bw2_driver);
+       of_unregister_driver(&bw2_driver);
 }
 
-
 module_init(bw2_init);
 module_exit(bw2_exit);
 
index b07e419b12d289f79dfeb2a34f1e87c66b12c971..df03f3776dcc3b41503018c0cc7d8045bbe67d88 100644 (file)
      */
 
 static void
-bitcpy(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src,
-       int src_idx, int bits, unsigned n, u32 bswapmask)
+bitcpy(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+               const unsigned long __iomem *src, int src_idx, int bits,
+               unsigned n, u32 bswapmask)
 {
        unsigned long first, last;
        int const shift = dst_idx-src_idx;
        int left, right;
 
-       first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
-       last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
+       first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+       last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
 
        if (!shift) {
                // Same alignment for source and dest
@@ -202,8 +203,9 @@ bitcpy(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src
      */
 
 static void
-bitcpy_rev(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem *src,
-               int src_idx, int bits, unsigned n, u32 bswapmask)
+bitcpy_rev(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+               const unsigned long __iomem *src, int src_idx, int bits,
+               unsigned n, u32 bswapmask)
 {
        unsigned long first, last;
        int shift;
@@ -221,8 +223,9 @@ bitcpy_rev(unsigned long __iomem *dst, int dst_idx, const unsigned long __iomem
 
        shift = dst_idx-src_idx;
 
-       first = fb_shifted_pixels_mask_long(bits - 1 - dst_idx, bswapmask);
-       last = ~fb_shifted_pixels_mask_long(bits - 1 - ((dst_idx-n) % bits), bswapmask);
+       first = fb_shifted_pixels_mask_long(p, bits - 1 - dst_idx, bswapmask);
+       last = ~fb_shifted_pixels_mask_long(p, bits - 1 - ((dst_idx-n) % bits),
+                                           bswapmask);
 
        if (!shift) {
                // Same alignment for source and dest
@@ -404,7 +407,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
                        dst_idx &= (bytes - 1);
                        src += src_idx >> (ffs(bits) - 1);
                        src_idx &= (bytes - 1);
-                       bitcpy_rev(dst, dst_idx, src, src_idx, bits,
+                       bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
                                width*p->var.bits_per_pixel, bswapmask);
                }
        } else {
@@ -413,7 +416,7 @@ void cfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
                        dst_idx &= (bytes - 1);
                        src += src_idx >> (ffs(bits) - 1);
                        src_idx &= (bytes - 1);
-                       bitcpy(dst, dst_idx, src, src_idx, bits,
+                       bitcpy(p, dst, dst_idx, src, src_idx, bits,
                                width*p->var.bits_per_pixel, bswapmask);
                        dst_idx += bits_per_line;
                        src_idx += bits_per_line;
index 23d70a12e4daa273bbb16280d3c8f26c682901f2..64b35766b2a26bf28ccb1cfb55990c159008ca45 100644 (file)
      */
 
 static void
-bitfill_aligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
-               unsigned n, int bits, u32 bswapmask)
+bitfill_aligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+               unsigned long pat, unsigned n, int bits, u32 bswapmask)
 {
        unsigned long first, last;
 
        if (!n)
                return;
 
-       first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
-       last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
+       first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+       last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
 
        if (dst_idx+n <= bits) {
                // Single word
@@ -93,16 +93,16 @@ bitfill_aligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
      */
 
 static void
-bitfill_unaligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
-                       int left, int right, unsigned n, int bits)
+bitfill_unaligned(struct fb_info *p, unsigned long __iomem *dst, int dst_idx,
+                 unsigned long pat, int left, int right, unsigned n, int bits)
 {
        unsigned long first, last;
 
        if (!n)
                return;
 
-       first = FB_SHIFT_HIGH(~0UL, dst_idx);
-       last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+       first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+       last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
 
        if (dst_idx+n <= bits) {
                // Single word
@@ -147,8 +147,9 @@ bitfill_unaligned(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
      *  Aligned pattern invert using 32/64-bit memory accesses
      */
 static void
-bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
-               unsigned n, int bits, u32 bswapmask)
+bitfill_aligned_rev(struct fb_info *p, unsigned long __iomem *dst,
+                   int dst_idx, unsigned long pat, unsigned n, int bits,
+                   u32 bswapmask)
 {
        unsigned long val = pat, dat;
        unsigned long first, last;
@@ -156,8 +157,8 @@ bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
        if (!n)
                return;
 
-       first = fb_shifted_pixels_mask_long(dst_idx, bswapmask);
-       last = ~fb_shifted_pixels_mask_long((dst_idx+n) % bits, bswapmask);
+       first = fb_shifted_pixels_mask_long(p, dst_idx, bswapmask);
+       last = ~fb_shifted_pixels_mask_long(p, (dst_idx+n) % bits, bswapmask);
 
        if (dst_idx+n <= bits) {
                // Single word
@@ -217,16 +218,17 @@ bitfill_aligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
      */
 
 static void
-bitfill_unaligned_rev(unsigned long __iomem *dst, int dst_idx, unsigned long pat,
-                       int left, int right, unsigned n, int bits)
+bitfill_unaligned_rev(struct fb_info *p, unsigned long __iomem *dst,
+                     int dst_idx, unsigned long pat, int left, int right,
+                     unsigned n, int bits)
 {
        unsigned long first, last, dat;
 
        if (!n)
                return;
 
-       first = FB_SHIFT_HIGH(~0UL, dst_idx);
-       last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+       first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+       last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
 
        if (dst_idx+n <= bits) {
                // Single word
@@ -306,7 +308,8 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
                p->fbops->fb_sync(p);
        if (!left) {
                u32 bswapmask = fb_compute_bswapmask(p);
-               void (*fill_op32)(unsigned long __iomem *dst, int dst_idx,
+               void (*fill_op32)(struct fb_info *p,
+                                 unsigned long __iomem *dst, int dst_idx,
                                  unsigned long pat, unsigned n, int bits,
                                  u32 bswapmask) = NULL;
 
@@ -325,16 +328,17 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
                while (height--) {
                        dst += dst_idx >> (ffs(bits) - 1);
                        dst_idx &= (bits - 1);
-                       fill_op32(dst, dst_idx, pat, width*bpp, bits, bswapmask);
+                       fill_op32(p, dst, dst_idx, pat, width*bpp, bits,
+                                 bswapmask);
                        dst_idx += p->fix.line_length*8;
                }
        } else {
                int right;
                int r;
                int rot = (left-dst_idx) % bpp;
-               void (*fill_op)(unsigned long __iomem *dst, int dst_idx,
-                               unsigned long pat, int left, int right,
-                               unsigned n, int bits) = NULL;
+               void (*fill_op)(struct fb_info *p, unsigned long __iomem *dst,
+                               int dst_idx, unsigned long pat, int left,
+                               int right, unsigned n, int bits) = NULL;
 
                /* rotate pattern to correct start position */
                pat = pat << rot | pat >> (bpp-rot);
@@ -355,7 +359,7 @@ void cfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
                while (height--) {
                        dst += dst_idx >> (ffs(bits) - 1);
                        dst_idx &= (bits - 1);
-                       fill_op(dst, dst_idx, pat, left, right,
+                       fill_op(p, dst, dst_idx, pat, left, right,
                                width*bpp, bits);
                        r = (p->fix.line_length*8) % bpp;
                        pat = pat << (bpp-r) | pat >> r;
index f598907b42adfa71adaa5f237e07e7ab8cd1757d..baed57d3cfff38e53a9ea37fff3b82e943ba77cd 100644 (file)
 #define DEBUG
 
 #ifdef DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__FUNCTION__,## args)
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args)
 #else
 #define DPRINTK(fmt, args...)
 #endif
 
-static const u32 cfb_tab8[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab8_be[] = {
     0x00000000,0x000000ff,0x0000ff00,0x0000ffff,
     0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff,
     0xff000000,0xff0000ff,0xff00ff00,0xff00ffff,
     0xffff0000,0xffff00ff,0xffffff00,0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab8_le[] = {
     0x00000000,0xff000000,0x00ff0000,0xffff0000,
     0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00,
     0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff,
     0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
 };
 
-static const u32 cfb_tab16[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab16_be[] = {
     0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab16_le[] = {
     0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
 };
 
 static const u32 cfb_tab32[] = {
@@ -98,7 +94,8 @@ static inline void color_imageblit(const struct fb_image *image,
                val = 0;
                
                if (start_index) {
-                       u32 start_mask = ~fb_shifted_pixels_mask_u32(start_index, bswapmask);
+                       u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
+                                               start_index, bswapmask);
                        val = FB_READL(dst) & start_mask;
                        shift = start_index;
                }
@@ -108,20 +105,21 @@ static inline void color_imageblit(const struct fb_image *image,
                                color = palette[*src];
                        else
                                color = *src;
-                       color <<= FB_LEFT_POS(bpp);
-                       val |= FB_SHIFT_HIGH(color, shift ^ bswapmask);
+                       color <<= FB_LEFT_POS(p, bpp);
+                       val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
                        if (shift >= null_bits) {
                                FB_WRITEL(val, dst++);
        
                                val = (shift == null_bits) ? 0 : 
-                                       FB_SHIFT_LOW(color, 32 - shift);
+                                       FB_SHIFT_LOW(p, color, 32 - shift);
                        }
                        shift += bpp;
                        shift &= (32 - 1);
                        src++;
                }
                if (shift) {
-                       u32 end_mask = fb_shifted_pixels_mask_u32(shift, bswapmask);
+                       u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
+                                               bswapmask);
 
                        FB_WRITEL((FB_READL(dst) & end_mask) | val, dst);
                }
@@ -152,8 +150,8 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
        u32 bswapmask = fb_compute_bswapmask(p);
 
        dst2 = (u32 __iomem *) dst1;
-       fgcolor <<= FB_LEFT_POS(bpp);
-       bgcolor <<= FB_LEFT_POS(bpp);
+       fgcolor <<= FB_LEFT_POS(p, bpp);
+       bgcolor <<= FB_LEFT_POS(p, bpp);
 
        for (i = image->height; i--; ) {
                shift = val = 0;
@@ -164,7 +162,8 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
 
                /* write leading bits */
                if (start_index) {
-                       u32 start_mask = ~fb_shifted_pixels_mask_u32(start_index, bswapmask);
+                       u32 start_mask = ~fb_shifted_pixels_mask_u32(p,
+                                               start_index, bswapmask);
                        val = FB_READL(dst) & start_mask;
                        shift = start_index;
                }
@@ -172,13 +171,13 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
                while (j--) {
                        l--;
                        color = (*s & (1 << l)) ? fgcolor : bgcolor;
-                       val |= FB_SHIFT_HIGH(color, shift ^ bswapmask);
+                       val |= FB_SHIFT_HIGH(p, color, shift ^ bswapmask);
                        
                        /* Did the bitshift spill bits to the next long? */
                        if (shift >= null_bits) {
                                FB_WRITEL(val, dst++);
                                val = (shift == null_bits) ? 0 :
-                                       FB_SHIFT_LOW(color,32 - shift);
+                                       FB_SHIFT_LOW(p, color, 32 - shift);
                        }
                        shift += bpp;
                        shift &= (32 - 1);
@@ -187,7 +186,8 @@ static inline void slow_imageblit(const struct fb_image *image, struct fb_info *
 
                /* write trailing bits */
                if (shift) {
-                       u32 end_mask = fb_shifted_pixels_mask_u32(shift, bswapmask);
+                       u32 end_mask = fb_shifted_pixels_mask_u32(p, shift,
+                                               bswapmask);
 
                        FB_WRITEL((FB_READL(dst) & end_mask) | val, dst);
                }
@@ -223,13 +223,13 @@ static inline void fast_imageblit(const struct fb_image *image, struct fb_info *
        u32 __iomem *dst;
        const u32 *tab = NULL;
        int i, j, k;
-               
+
        switch (bpp) {
        case 8:
-               tab = cfb_tab8;
+               tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
                break;
        case 16:
-               tab = cfb_tab16;
+               tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
                break;
        case 32:
        default:
index fdc9f43ec30a73f5957cc118d91dc9711c870194..0db0fecba93b6718af12b5af3b053333c6783496 100644 (file)
@@ -556,7 +556,7 @@ static int __devinit cg14_probe(struct of_device *op, const struct of_device_id
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: cgfourteen at %lx:%lx, %dMB\n",
+       printk(KERN_INFO "%s: cgfourteen at %lx:%lx, %dMB\n",
               dp->full_name,
               par->iospace, par->physbase,
               par->ramsize >> 20);
@@ -605,7 +605,7 @@ static struct of_platform_driver cg14_driver = {
        .remove         = __devexit_p(cg14_remove),
 };
 
-int __init cg14_init(void)
+static int __init cg14_init(void)
 {
        if (fb_get_options("cg14fb", NULL))
                return -ENODEV;
@@ -613,7 +613,7 @@ int __init cg14_init(void)
        return of_register_driver(&cg14_driver, &of_bus_type);
 }
 
-void __exit cg14_exit(void)
+static void __exit cg14_exit(void)
 {
        of_unregister_driver(&cg14_driver);
 }
index a5c7fb331527c03038d7b42d73830de306fbab4f..010ea53978f822cdc7cafd9dce2f85c7ec9054b5 100644 (file)
@@ -419,7 +419,7 @@ static int __devinit cg3_probe(struct of_device *op,
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: cg3 at %lx:%lx\n",
+       printk(KERN_INFO "%s: cg3 at %lx:%lx\n",
               dp->full_name, par->which_io, par->physbase);
 
        return 0;
index 549891d76ef5dba2728d905ff75217b95cf26f70..fc90db6da65a94267e8e2e8f22865b5843abdabb 100644 (file)
@@ -781,7 +781,7 @@ static int __devinit cg6_probe(struct of_device *op,
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: CGsix [%s] at %lx:%lx\n",
+       printk(KERN_INFO "%s: CGsix [%s] at %lx:%lx\n",
               dp->full_name, info->fix.id,
               par->which_io, par->physbase);
 
index f7e2d5add83172239d2a805c500d2e6fcb353ec0..35ac9d956b3dee0c2359da291997189fc2bd6c8a 100644 (file)
@@ -81,7 +81,7 @@
 /* debug output */
 #ifdef CIRRUSFB_DEBUG
 #define DPRINTK(fmt, args...) \
-       printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+       printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
 #else
 #define DPRINTK(fmt, args...)
 #endif
@@ -91,7 +91,7 @@
 #define assert(expr) \
        if (!(expr)) { \
                printk("Assertion failed! %s,%s,%s,line=%d\n", \
-               #expr, __FILE__, __FUNCTION__, __LINE__); \
+               #expr, __FILE__, __func__, __LINE__); \
        }
 #else
 #define assert(expr)
@@ -3117,7 +3117,7 @@ static void bestclock(long freq, long *best, long *nom,
                                }
                        }
                }
-               d = ((143181 * n) + f - 1) / f;
+               d = DIV_ROUND_UP(143181 * n, f);
                if ((d >= 7) && (d <= 63)) {
                        if (d > 31)
                                d = (d / 2) * 2;
index 17b5267f44d7a2842fffe08e08708fa2e63f441e..9f8a389dc7aedaae23368286f60ce3c973ae5bc9 100644 (file)
@@ -381,7 +381,7 @@ int __init clps711xfb_init(void)
 
        /* Register the /proc entries. */
        clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444,
-               &proc_root);
+               NULL);
        if (clps7111fb_backlight_proc_entry == NULL) {
                printk("Couldn't create the /proc entry for the backlight.\n");
                return -EINVAL;
index 022282494d3fecee136f47d831e2dd4e534fe62d..ad31983b43eb6a169a100b2846e96f1554e32b1b 100644 (file)
@@ -92,7 +92,7 @@
 #include "fbcon.h"
 
 #ifdef FBCONDEBUG
-#  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#  define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
 #else
 #  define DPRINTK(fmt, args...)
 #endif
@@ -620,8 +620,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
        if (fb_get_color_depth(&info->var, &info->fix) == 1)
                erase &= ~0x400;
        logo_height = fb_prepare_logo(info, ops->rotate);
-       logo_lines = (logo_height + vc->vc_font.height - 1) /
-               vc->vc_font.height;
+       logo_lines = DIV_ROUND_UP(logo_height, vc->vc_font.height);
        q = (unsigned short *) (vc->vc_origin +
                                vc->vc_size_row * rows);
        step = logo_lines * cols;
@@ -1882,7 +1881,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
                        scr_memsetw((unsigned short *) (vc->vc_origin +
                                                        vc->vc_size_row *
                                                        (b - count)),
-                                   vc->vc_video_erase_char,
+                                   vc->vc_scrl_erase_char,
                                    vc->vc_size_row * count);
                        return 1;
                        break;
@@ -1954,7 +1953,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
                        scr_memsetw((unsigned short *) (vc->vc_origin +
                                                        vc->vc_size_row *
                                                        (b - count)),
-                                   vc->vc_video_erase_char,
+                                   vc->vc_scrl_erase_char,
                                    vc->vc_size_row * count);
                        return 1;
                }
@@ -1973,7 +1972,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
                        scr_memsetw((unsigned short *) (vc->vc_origin +
                                                        vc->vc_size_row *
                                                        t),
-                                   vc->vc_video_erase_char,
+                                   vc->vc_scrl_erase_char,
                                    vc->vc_size_row * count);
                        return 1;
                        break;
@@ -2043,7 +2042,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
                        scr_memsetw((unsigned short *) (vc->vc_origin +
                                                        vc->vc_size_row *
                                                        t),
-                                   vc->vc_video_erase_char,
+                                   vc->vc_scrl_erase_char,
                                    vc->vc_size_row * count);
                        return 1;
                }
index 3706307e70ed271ddc84d70f1c02ef66c9ff604e..0135e03954562713a83b8839b3cd4f57fc21de3b 100644 (file)
@@ -104,10 +104,14 @@ struct fbcon_ops {
 #define attr_blink(s) \
        ((s) & 0x8000)
        
-#define mono_col(info)                                                 \
-       (~(0xfff << (max((info)->var.green.length,                      \
-                        max((info)->var.red.length,                    \
-                            (info)->var.blue.length)))) & 0xff)
+
+static inline int mono_col(const struct fb_info *info)
+{
+       __u32 max_len;
+       max_len = max(info->var.green.length, info->var.red.length);
+       max_len = max(info->var.blue.length, max_len);
+       return ~(0xfff << (max_len & 0xff));
+}
 
 static inline int attr_col_ec(int shift, struct vc_data *vc,
                              struct fb_info *info, int is_fg)
index bd8d995fe25de13a1a0fa7444ba35eef58ded727..38a296bbdfc97a9a67ba1c0d74d2a81357698989 100644 (file)
@@ -531,7 +531,7 @@ static void mdacon_cursor(struct vc_data *c, int mode)
 
 static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines)
 {
-       u16 eattr = mda_convert_attr(c->vc_video_erase_char);
+       u16 eattr = mda_convert_attr(c->vc_scrl_erase_char);
 
        if (!lines)
                return 0;
index 67a682d6cc7bb03ba043753d4585928b248f1b6f..a11cc2fdd4cd5eb59eb714652b108c3f825b507c 100644 (file)
@@ -170,12 +170,12 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count)
     switch (dir) {
     case SM_UP:
        sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols);
-       sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_video_erase_char);
+       sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_scrl_erase_char);
        break;
 
     case SM_DOWN:
        sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols);
-       sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_video_erase_char);
+       sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_scrl_erase_char);
        break;
     }
 
index 6df29a62d7202552d63ced7bfc588ef8854d6c49..bd1f57b259d9ce50d470b192e021ad95461b8e72 100644 (file)
@@ -1350,7 +1350,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
                } else
                        c->vc_origin += delta;
                scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size -
-                                    delta), c->vc_video_erase_char,
+                                    delta), c->vc_scrl_erase_char,
                            delta);
        } else {
                if (oldo - delta < vga_vram_base) {
@@ -1363,7 +1363,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir,
                } else
                        c->vc_origin -= delta;
                c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
-               scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char,
+               scr_memsetw((u16 *) (c->vc_origin), c->vc_scrl_erase_char,
                            delta);
        }
        c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size;
index a2a0618d86a54aa34ed7a45364e559bcc040636e..1db622192bde2252553eb91ce69123d6fc69512f 100644 (file)
@@ -94,41 +94,44 @@ static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
        return val;
 }
 
-static inline u32 fb_shifted_pixels_mask_u32(u32 index, u32 bswapmask)
+static inline u32 fb_shifted_pixels_mask_u32(struct fb_info *p, u32 index,
+                                            u32 bswapmask)
 {
        u32 mask;
 
        if (!bswapmask) {
-               mask = FB_SHIFT_HIGH(~(u32)0, index);
+               mask = FB_SHIFT_HIGH(p, ~(u32)0, index);
        } else {
-               mask = 0xff << FB_LEFT_POS(8);
-               mask = FB_SHIFT_LOW(mask, index & (bswapmask)) & mask;
-               mask = FB_SHIFT_HIGH(mask, index & ~(bswapmask));
+               mask = 0xff << FB_LEFT_POS(p, 8);
+               mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
+               mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
 #if defined(__i386__) || defined(__x86_64__)
                /* Shift argument is limited to 0 - 31 on x86 based CPU's */
                if(index + bswapmask < 32)
 #endif
-                       mask |= FB_SHIFT_HIGH(~(u32)0,
+                       mask |= FB_SHIFT_HIGH(p, ~(u32)0,
                                        (index + bswapmask) & ~(bswapmask));
        }
        return mask;
 }
 
-static inline unsigned long fb_shifted_pixels_mask_long(u32 index, u32 bswapmask)
+static inline unsigned long fb_shifted_pixels_mask_long(struct fb_info *p,
+                                                       u32 index,
+                                                       u32 bswapmask)
 {
        unsigned long mask;
 
        if (!bswapmask) {
-               mask = FB_SHIFT_HIGH(~0UL, index);
+               mask = FB_SHIFT_HIGH(p, ~0UL, index);
        } else {
-               mask = 0xff << FB_LEFT_POS(8);
-               mask = FB_SHIFT_LOW(mask, index & (bswapmask)) & mask;
-               mask = FB_SHIFT_HIGH(mask, index & ~(bswapmask));
+               mask = 0xff << FB_LEFT_POS(p, 8);
+               mask = FB_SHIFT_LOW(p, mask, index & (bswapmask)) & mask;
+               mask = FB_SHIFT_HIGH(p, mask, index & ~(bswapmask));
 #if defined(__i386__) || defined(__x86_64__)
                /* Shift argument is limited to 0 - 31 on x86 based CPU's */
                if(index + bswapmask < BITS_PER_LONG)
 #endif
-                       mask |= FB_SHIFT_HIGH(~0UL,
+                       mask |= FB_SHIFT_HIGH(p, ~0UL,
                                        (index + bswapmask) & ~(bswapmask));
        }
        return mask;
@@ -158,8 +161,8 @@ static inline unsigned long fb_rev_pixels_in_long(unsigned long val,
        return val;
 }
 
-#define fb_shifted_pixels_mask_u32(i, b) FB_SHIFT_HIGH(~(u32)0, (i))
-#define fb_shifted_pixels_mask_long(i, b) FB_SHIFT_HIGH(~0UL, (i))
+#define fb_shifted_pixels_mask_u32(p, i, b) FB_SHIFT_HIGH((p), ~(u32)0, (i))
+#define fb_shifted_pixels_mask_long(p, i, b) FB_SHIFT_HIGH((p), ~0UL, (i))
 #define fb_compute_bswapmask(...) 0
 
 #endif  /* CONFIG_FB_CFB_REV_PIXELS_IN_BYTE */
index 01072f4b3e8fef2c26d10dea5d64c974e8a695ad..776f7fcd2fbf4f508a0bb72398391aeb8f5a32d0 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/init.h>
 #include <linux/linux_logo.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/console.h>
 #ifdef CONFIG_KMOD
 #include <linux/kmod.h>
@@ -632,27 +633,51 @@ int fb_prepare_logo(struct fb_info *info, int rotate) { return 0; }
 int fb_show_logo(struct fb_info *info, int rotate) { return 0; }
 #endif /* CONFIG_LOGO */
 
-static int fbmem_read_proc(char *buf, char **start, off_t offset,
-                          int len, int *eof, void *private)
+static void *fb_seq_start(struct seq_file *m, loff_t *pos)
 {
-       struct fb_info **fi;
-       int clen;
-
-       clen = 0;
-       for (fi = registered_fb; fi < &registered_fb[FB_MAX] && clen < 4000;
-            fi++)
-               if (*fi)
-                       clen += sprintf(buf + clen, "%d %s\n",
-                                       (*fi)->node,
-                                       (*fi)->fix.id);
-       *start = buf + offset;
-       if (clen > offset)
-               clen -= offset;
-       else
-               clen = 0;
-       return clen < len ? clen : len;
+       return (*pos < FB_MAX) ? pos : NULL;
+}
+
+static void *fb_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return (*pos < FB_MAX) ? pos : NULL;
+}
+
+static void fb_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int fb_seq_show(struct seq_file *m, void *v)
+{
+       int i = *(loff_t *)v;
+       struct fb_info *fi = registered_fb[i];
+
+       if (fi)
+               seq_printf(m, "%d %s\n", fi->node, fi->fix.id);
+       return 0;
+}
+
+static const struct seq_operations proc_fb_seq_ops = {
+       .start  = fb_seq_start,
+       .next   = fb_seq_next,
+       .stop   = fb_seq_stop,
+       .show   = fb_seq_show,
+};
+
+static int proc_fb_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &proc_fb_seq_ops);
 }
 
+static const struct file_operations fb_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = proc_fb_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
 static ssize_t
 fb_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
@@ -1057,7 +1082,7 @@ fb_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
        case FBIOPUT_CON2FBMAP:
                if (copy_from_user(&con2fb, argp, sizeof(con2fb)))
                        return - EFAULT;
-               if (con2fb.console < 0 || con2fb.console > MAX_NR_CONSOLES)
+               if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
                    return -EINVAL;
                if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
                    return -EINVAL;
@@ -1352,6 +1377,32 @@ static const struct file_operations fb_fops = {
 
 struct class *fb_class;
 EXPORT_SYMBOL(fb_class);
+
+static int fb_check_foreignness(struct fb_info *fi)
+{
+       const bool foreign_endian = fi->flags & FBINFO_FOREIGN_ENDIAN;
+
+       fi->flags &= ~FBINFO_FOREIGN_ENDIAN;
+
+#ifdef __BIG_ENDIAN
+       fi->flags |= foreign_endian ? 0 : FBINFO_BE_MATH;
+#else
+       fi->flags |= foreign_endian ? FBINFO_BE_MATH : 0;
+#endif /* __BIG_ENDIAN */
+
+       if (fi->flags & FBINFO_BE_MATH && !fb_be_math(fi)) {
+               pr_err("%s: enable CONFIG_FB_BIG_ENDIAN to "
+                      "support this framebuffer\n", fi->fix.id);
+               return -ENOSYS;
+       } else if (!(fi->flags & FBINFO_BE_MATH) && fb_be_math(fi)) {
+               pr_err("%s: enable CONFIG_FB_LITTLE_ENDIAN to "
+                      "support this framebuffer\n", fi->fix.id);
+               return -ENOSYS;
+       }
+
+       return 0;
+}
+
 /**
  *     register_framebuffer - registers a frame buffer device
  *     @fb_info: frame buffer info structure
@@ -1371,6 +1422,10 @@ register_framebuffer(struct fb_info *fb_info)
 
        if (num_registered_fb == FB_MAX)
                return -ENXIO;
+
+       if (fb_check_foreignness(fb_info))
+               return -ENOSYS;
+
        num_registered_fb++;
        for (i = 0 ; i < FB_MAX; i++)
                if (!registered_fb[i])
@@ -1503,7 +1558,7 @@ void fb_set_suspend(struct fb_info *info, int state)
 static int __init
 fbmem_init(void)
 {
-       create_proc_read_entry("fb", 0, NULL, fbmem_read_proc, NULL);
+       proc_create("fb", 0, NULL, &fb_proc_fops);
 
        if (register_chrdev(FB_MAJOR,"fb",&fb_fops))
                printk("unable to get major %d for fb devs\n", FB_MAJOR);
index d7e24889650e25e83d3b1ebf82bbb3ccd08bb3ff..93dca3e2aa502d7537aca3fbae7069af80b4f6eb 100644 (file)
@@ -32,7 +32,6 @@
 static int ffb_setcolreg(unsigned, unsigned, unsigned, unsigned,
                         unsigned, struct fb_info *);
 static int ffb_blank(int, struct fb_info *);
-static void ffb_init_fix(struct fb_info *);
 
 static void ffb_imageblit(struct fb_info *, const struct fb_image *);
 static void ffb_fillrect(struct fb_info *, const struct fb_fillrect *);
@@ -1001,7 +1000,7 @@ static int __devinit ffb_probe(struct of_device *op,
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: %s at %016lx, type %d, "
+       printk(KERN_INFO "%s: %s at %016lx, type %d, "
               "DAC pnum[%x] rev[%d] manuf_rev[%d]\n",
               dp->full_name,
               ((par->flags & FFB_FLAG_AFB) ? "AFB" : "FFB"),
@@ -1062,7 +1061,7 @@ static struct of_platform_driver ffb_driver = {
        .remove         = __devexit_p(ffb_remove),
 };
 
-int __init ffb_init(void)
+static int __init ffb_init(void)
 {
        if (fb_get_options("ffb", NULL))
                return -ENODEV;
@@ -1070,7 +1069,7 @@ int __init ffb_init(void)
        return of_register_driver(&ffb_driver, &of_bus_type);
 }
 
-void __exit ffb_exit(void)
+static void __exit ffb_exit(void)
 {
        of_unregister_driver(&ffb_driver);
 }
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
new file mode 100644 (file)
index 0000000..b50bb03
--- /dev/null
@@ -0,0 +1,1721 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ *  Freescale DIU Frame Buffer device driver
+ *
+ *  Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ *           Paul Widmer <paul.widmer@freescale.com>
+ *           Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
+ *           York Sun <yorksun@freescale.com>
+ *
+ *   Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+
+#include <linux/of_platform.h>
+
+#include <sysdev/fsl_soc.h>
+#include "fsl-diu-fb.h"
+
+/*
+ * These parameters give default parameters
+ * for video output 1024x768,
+ * FIXME - change timing to proper amounts
+ * hsync 31.5kHz, vsync 60Hz
+ */
+static struct fb_videomode __devinitdata fsl_diu_default_mode = {
+       .refresh        = 60,
+       .xres           = 1024,
+       .yres           = 768,
+       .pixclock       = 15385,
+       .left_margin    = 160,
+       .right_margin   = 24,
+       .upper_margin   = 29,
+       .lower_margin   = 3,
+       .hsync_len      = 136,
+       .vsync_len      = 6,
+       .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+       .vmode          = FB_VMODE_NONINTERLACED
+};
+
+static struct fb_videomode __devinitdata fsl_diu_mode_db[] = {
+       {
+               .name           = "1024x768-60",
+               .refresh        = 60,
+               .xres           = 1024,
+               .yres           = 768,
+               .pixclock       = 15385,
+               .left_margin    = 160,
+               .right_margin   = 24,
+               .upper_margin   = 29,
+               .lower_margin   = 3,
+               .hsync_len      = 136,
+               .vsync_len      = 6,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+       {
+               .name           = "1024x768-70",
+               .refresh        = 70,
+               .xres           = 1024,
+               .yres           = 768,
+               .pixclock       = 16886,
+               .left_margin    = 3,
+               .right_margin   = 3,
+               .upper_margin   = 2,
+               .lower_margin   = 2,
+               .hsync_len      = 40,
+               .vsync_len      = 18,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+       {
+               .name           = "1024x768-75",
+               .refresh        = 75,
+               .xres           = 1024,
+               .yres           = 768,
+               .pixclock       = 15009,
+               .left_margin    = 3,
+               .right_margin   = 3,
+               .upper_margin   = 2,
+               .lower_margin   = 2,
+               .hsync_len      = 80,
+               .vsync_len      = 32,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+       {
+               .name           = "1280x1024-60",
+               .refresh        = 60,
+               .xres           = 1280,
+               .yres           = 1024,
+               .pixclock       = 9375,
+               .left_margin    = 38,
+               .right_margin   = 128,
+               .upper_margin   = 2,
+               .lower_margin   = 7,
+               .hsync_len      = 216,
+               .vsync_len      = 37,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+       {
+               .name           = "1280x1024-70",
+               .refresh        = 70,
+               .xres           = 1280,
+               .yres           = 1024,
+               .pixclock       = 9380,
+               .left_margin    = 6,
+               .right_margin   = 6,
+               .upper_margin   = 4,
+               .lower_margin   = 4,
+               .hsync_len      = 60,
+               .vsync_len      = 94,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+       {
+               .name           = "1280x1024-75",
+               .refresh        = 75,
+               .xres           = 1280,
+               .yres           = 1024,
+               .pixclock       = 9380,
+               .left_margin    = 6,
+               .right_margin   = 6,
+               .upper_margin   = 4,
+               .lower_margin   = 4,
+               .hsync_len      = 60,
+               .vsync_len      = 15,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+       {
+               .name           = "320x240",            /* for AOI only */
+               .refresh        = 60,
+               .xres           = 320,
+               .yres           = 240,
+               .pixclock       = 15385,
+               .left_margin    = 0,
+               .right_margin   = 0,
+               .upper_margin   = 0,
+               .lower_margin   = 0,
+               .hsync_len      = 0,
+               .vsync_len      = 0,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+       {
+               .name           = "1280x480-60",
+               .refresh        = 60,
+               .xres           = 1280,
+               .yres           = 480,
+               .pixclock       = 18939,
+               .left_margin    = 353,
+               .right_margin   = 47,
+               .upper_margin   = 39,
+               .lower_margin   = 4,
+               .hsync_len      = 8,
+               .vsync_len      = 2,
+               .sync           = FB_SYNC_COMP_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+               .vmode          = FB_VMODE_NONINTERLACED
+       },
+};
+
+static char *fb_mode = "1024x768-32@60";
+static unsigned long default_bpp = 32;
+static int monitor_port;
+
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+static u8 *coherence_data;
+static size_t coherence_data_size;
+static unsigned int d_cache_line_size;
+#endif
+
+static DEFINE_SPINLOCK(diu_lock);
+
+struct fsl_diu_data {
+       struct fb_info *fsl_diu_info[FSL_AOI_NUM - 1];
+                               /*FSL_AOI_NUM has one dummy AOI */
+       struct device_attribute dev_attr;
+       struct diu_ad *dummy_ad;
+       void *dummy_aoi_virt;
+       unsigned int irq;
+       int fb_enabled;
+       int monitor_port;
+};
+
+struct mfb_info {
+       int index;
+       int type;
+       char *id;
+       int registered;
+       int blank;
+       unsigned long pseudo_palette[16];
+       struct diu_ad *ad;
+       int cursor_reset;
+       unsigned char g_alpha;
+       unsigned int count;
+       int x_aoi_d;            /* aoi display x offset to physical screen */
+       int y_aoi_d;            /* aoi display y offset to physical screen */
+       struct fsl_diu_data *parent;
+};
+
+
+static struct mfb_info mfb_template[] = {
+       {               /* AOI 0 for plane 0 */
+       .index = 0,
+       .type = MFB_TYPE_OUTPUT,
+       .id = "Panel0",
+       .registered = 0,
+       .count = 0,
+       .x_aoi_d = 0,
+       .y_aoi_d = 0,
+       },
+       {               /* AOI 0 for plane 1 */
+       .index = 1,
+       .type = MFB_TYPE_OUTPUT,
+       .id = "Panel1 AOI0",
+       .registered = 0,
+       .g_alpha = 0xff,
+       .count = 0,
+       .x_aoi_d = 0,
+       .y_aoi_d = 0,
+       },
+       {               /* AOI 1 for plane 1 */
+       .index = 2,
+       .type = MFB_TYPE_OUTPUT,
+       .id = "Panel1 AOI1",
+       .registered = 0,
+       .g_alpha = 0xff,
+       .count = 0,
+       .x_aoi_d = 0,
+       .y_aoi_d = 480,
+       },
+       {               /* AOI 0 for plane 2 */
+       .index = 3,
+       .type = MFB_TYPE_OUTPUT,
+       .id = "Panel2 AOI0",
+       .registered = 0,
+       .g_alpha = 0xff,
+       .count = 0,
+       .x_aoi_d = 640,
+       .y_aoi_d = 0,
+       },
+       {               /* AOI 1 for plane 2 */
+       .index = 4,
+       .type = MFB_TYPE_OUTPUT,
+       .id = "Panel2 AOI1",
+       .registered = 0,
+       .g_alpha = 0xff,
+       .count = 0,
+       .x_aoi_d = 640,
+       .y_aoi_d = 480,
+       },
+};
+
+static struct diu_hw dr = {
+       .mode = MFB_MODE1,
+       .reg_lock = __SPIN_LOCK_UNLOCKED(diu_hw.reg_lock),
+};
+
+static struct diu_pool pool;
+
+/*     To allocate memory for framebuffer. First try __get_free_pages(). If it
+ *     fails, try rh_alloc. The reason is __get_free_pages() cannot allocate
+ *     very large memory (more than 4MB). We don't want to allocate all memory
+ *     in rheap since small memory allocation/deallocation will fragment the
+ *     rheap and make the furture large allocation fail.
+ */
+
+void *fsl_diu_alloc(unsigned long size, phys_addr_t *phys)
+{
+       void *virt;
+
+       pr_debug("size=%lu\n", size);
+
+       virt = (void *)__get_free_pages(GFP_DMA | __GFP_ZERO, get_order(size));
+       if (virt) {
+               *phys = virt_to_phys(virt);
+               pr_debug("virt %p, phys=%llx\n", virt, (uint64_t) *phys);
+               return virt;
+       }
+       if (!diu_ops.diu_mem) {
+               printk(KERN_INFO "%s: no diu_mem."
+                       " To reserve more memory, put 'diufb=15M' "
+                       "in the command line\n", __func__);
+               return NULL;
+       }
+
+       virt = (void *)rh_alloc(&diu_ops.diu_rh_info, size, "DIU");
+       if (virt) {
+               *phys = virt_to_bus(virt);
+               memset(virt, 0, size);
+       }
+
+       pr_debug("rh virt=%p phys=%lx\n", virt, *phys);
+
+       return virt;
+}
+
+void fsl_diu_free(void *p, unsigned long size)
+{
+       pr_debug("p=%p size=%lu\n", p, size);
+
+       if (!p)
+               return;
+
+       if ((p >= diu_ops.diu_mem) &&
+           (p < (diu_ops.diu_mem + diu_ops.diu_size))) {
+               pr_debug("rh\n");
+               rh_free(&diu_ops.diu_rh_info, (unsigned long) p);
+       } else {
+               pr_debug("dma\n");
+               free_pages((unsigned long)p, get_order(size));
+       }
+}
+
+static int fsl_diu_enable_panel(struct fb_info *info)
+{
+       struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
+       struct diu *hw = dr.diu_reg;
+       struct diu_ad *ad = mfbi->ad;
+       struct fsl_diu_data *machine_data = mfbi->parent;
+       int res = 0;
+
+       pr_debug("enable_panel index %d\n", mfbi->index);
+       if (mfbi->type != MFB_TYPE_OFF) {
+               switch (mfbi->index) {
+               case 0:                         /* plane 0 */
+                       if (hw->desc[0] != ad->paddr)
+                               out_be32(&hw->desc[0], ad->paddr);
+                       break;
+               case 1:                         /* plane 1 AOI 0 */
+                       cmfbi = machine_data->fsl_diu_info[2]->par;
+                       if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
+                               if (cmfbi->count > 0)   /* AOI1 open */
+                                       ad->next_ad =
+                                               cpu_to_le32(cmfbi->ad->paddr);
+                               else
+                                       ad->next_ad = 0;
+                               out_be32(&hw->desc[1], ad->paddr);
+                       }
+                       break;
+               case 3:                         /* plane 2 AOI 0 */
+                       cmfbi = machine_data->fsl_diu_info[4]->par;
+                       if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
+                               if (cmfbi->count > 0)   /* AOI1 open */
+                                       ad->next_ad =
+                                               cpu_to_le32(cmfbi->ad->paddr);
+                               else
+                                       ad->next_ad = 0;
+                               out_be32(&hw->desc[2], ad->paddr);
+                       }
+                       break;
+               case 2:                         /* plane 1 AOI 1 */
+                       pmfbi = machine_data->fsl_diu_info[1]->par;
+                       ad->next_ad = 0;
+                       if (hw->desc[1] == machine_data->dummy_ad->paddr)
+                               out_be32(&hw->desc[1], ad->paddr);
+                       else                                    /* AOI0 open */
+                               pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
+                       break;
+               case 4:                         /* plane 2 AOI 1 */
+                       pmfbi = machine_data->fsl_diu_info[3]->par;
+                       ad->next_ad = 0;
+                       if (hw->desc[2] == machine_data->dummy_ad->paddr)
+                               out_be32(&hw->desc[2], ad->paddr);
+                       else                            /* AOI0 was open */
+                               pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
+                       break;
+               default:
+                       res = -EINVAL;
+                       break;
+               }
+       } else
+               res = -EINVAL;
+       return res;
+}
+
+static int fsl_diu_disable_panel(struct fb_info *info)
+{
+       struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
+       struct diu *hw = dr.diu_reg;
+       struct diu_ad *ad = mfbi->ad;
+       struct fsl_diu_data *machine_data = mfbi->parent;
+       int res = 0;
+
+       switch (mfbi->index) {
+       case 0:                                 /* plane 0 */
+               if (hw->desc[0] != machine_data->dummy_ad->paddr)
+                       out_be32(&hw->desc[0],
+                               machine_data->dummy_ad->paddr);
+               break;
+       case 1:                                 /* plane 1 AOI 0 */
+               cmfbi = machine_data->fsl_diu_info[2]->par;
+               if (cmfbi->count > 0)   /* AOI1 is open */
+                       out_be32(&hw->desc[1], cmfbi->ad->paddr);
+                                       /* move AOI1 to the first */
+               else                    /* AOI1 was closed */
+                       out_be32(&hw->desc[1],
+                               machine_data->dummy_ad->paddr);
+                                       /* close AOI 0 */
+               break;
+       case 3:                                 /* plane 2 AOI 0 */
+               cmfbi = machine_data->fsl_diu_info[4]->par;
+               if (cmfbi->count > 0)   /* AOI1 is open */
+                       out_be32(&hw->desc[2], cmfbi->ad->paddr);
+                                       /* move AOI1 to the first */
+               else                    /* AOI1 was closed */
+                       out_be32(&hw->desc[2],
+                               machine_data->dummy_ad->paddr);
+                                       /* close AOI 0 */
+               break;
+       case 2:                                 /* plane 1 AOI 1 */
+               pmfbi = machine_data->fsl_diu_info[1]->par;
+               if (hw->desc[1] != ad->paddr) {
+                               /* AOI1 is not the first in the chain */
+                       if (pmfbi->count > 0)
+                                       /* AOI0 is open, must be the first */
+                               pmfbi->ad->next_ad = 0;
+               } else                  /* AOI1 is the first in the chain */
+                       out_be32(&hw->desc[1], machine_data->dummy_ad->paddr);
+                                       /* close AOI 1 */
+               break;
+       case 4:                                 /* plane 2 AOI 1 */
+               pmfbi = machine_data->fsl_diu_info[3]->par;
+               if (hw->desc[2] != ad->paddr) {
+                               /* AOI1 is not the first in the chain */
+                       if (pmfbi->count > 0)
+                               /* AOI0 is open, must be the first */
+                               pmfbi->ad->next_ad = 0;
+               } else          /* AOI1 is the first in the chain */
+                       out_be32(&hw->desc[2], machine_data->dummy_ad->paddr);
+                               /* close AOI 1 */
+               break;
+       default:
+               res = -EINVAL;
+               break;
+       }
+
+       return res;
+}
+
+static void enable_lcdc(struct fb_info *info)
+{
+       struct diu *hw = dr.diu_reg;
+       struct mfb_info *mfbi = info->par;
+       struct fsl_diu_data *machine_data = mfbi->parent;
+
+       if (!machine_data->fb_enabled) {
+               out_be32(&hw->diu_mode, dr.mode);
+               machine_data->fb_enabled++;
+       }
+}
+
+static void disable_lcdc(struct fb_info *info)
+{
+       struct diu *hw = dr.diu_reg;
+       struct mfb_info *mfbi = info->par;
+       struct fsl_diu_data *machine_data = mfbi->parent;
+
+       if (machine_data->fb_enabled) {
+               out_be32(&hw->diu_mode, 0);
+               machine_data->fb_enabled = 0;
+       }
+}
+
+static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
+                               struct fb_info *info)
+{
+       struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par;
+       struct fsl_diu_data *machine_data = mfbi->parent;
+       int available_height, upper_aoi_bottom, index = mfbi->index;
+       int lower_aoi_is_open, upper_aoi_is_open;
+       __u32 base_plane_width, base_plane_height, upper_aoi_height;
+
+       base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
+       base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
+
+       switch (index) {
+       case 0:
+               if (mfbi->x_aoi_d != 0)
+                       mfbi->x_aoi_d = 0;
+               if (mfbi->y_aoi_d != 0)
+                       mfbi->y_aoi_d = 0;
+               break;
+       case 1:                 /* AOI 0 */
+       case 3:
+               lower_aoi_mfbi = machine_data->fsl_diu_info[index+1]->par;
+               lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0;
+               if (var->xres > base_plane_width)
+                       var->xres = base_plane_width;
+               if ((mfbi->x_aoi_d + var->xres) > base_plane_width)
+                       mfbi->x_aoi_d = base_plane_width - var->xres;
+
+               if (lower_aoi_is_open)
+                       available_height = lower_aoi_mfbi->y_aoi_d;
+               else
+                       available_height = base_plane_height;
+               if (var->yres > available_height)
+                       var->yres = available_height;
+               if ((mfbi->y_aoi_d + var->yres) > available_height)
+                       mfbi->y_aoi_d = available_height - var->yres;
+               break;
+       case 2:                 /* AOI 1 */
+       case 4:
+               upper_aoi_mfbi = machine_data->fsl_diu_info[index-1]->par;
+               upper_aoi_height =
+                               machine_data->fsl_diu_info[index-1]->var.yres;
+               upper_aoi_bottom = upper_aoi_mfbi->y_aoi_d + upper_aoi_height;
+               upper_aoi_is_open = upper_aoi_mfbi->count > 0 ? 1 : 0;
+               if (var->xres > base_plane_width)
+                       var->xres = base_plane_width;
+               if ((mfbi->x_aoi_d + var->xres) > base_plane_width)
+                       mfbi->x_aoi_d = base_plane_width - var->xres;
+               if (mfbi->y_aoi_d < 0)
+                       mfbi->y_aoi_d = 0;
+               if (upper_aoi_is_open) {
+                       if (mfbi->y_aoi_d < upper_aoi_bottom)
+                               mfbi->y_aoi_d = upper_aoi_bottom;
+                       available_height = base_plane_height
+                                               - upper_aoi_bottom;
+               } else
+                       available_height = base_plane_height;
+               if (var->yres > available_height)
+                       var->yres = available_height;
+               if ((mfbi->y_aoi_d + var->yres) > base_plane_height)
+                       mfbi->y_aoi_d = base_plane_height - var->yres;
+               break;
+       }
+}
+/*
+ * Checks to see if the hardware supports the state requested by var passed
+ * in. This function does not alter the hardware state! If the var passed in
+ * is slightly off by what the hardware can support then we alter the var
+ * PASSED in to what we can do. If the hardware doesn't support mode change
+ * a -EINVAL will be returned by the upper layers.
+ */
+static int fsl_diu_check_var(struct fb_var_screeninfo *var,
+                               struct fb_info *info)
+{
+       unsigned long htotal, vtotal;
+
+       pr_debug("check_var xres: %d\n", var->xres);
+       pr_debug("check_var yres: %d\n", var->yres);
+
+       if (var->xres_virtual < var->xres)
+               var->xres_virtual = var->xres;
+       if (var->yres_virtual < var->yres)
+               var->yres_virtual = var->yres;
+
+       if (var->xoffset < 0)
+               var->xoffset = 0;
+
+       if (var->yoffset < 0)
+               var->yoffset = 0;
+
+       if (var->xoffset + info->var.xres > info->var.xres_virtual)
+               var->xoffset = info->var.xres_virtual - info->var.xres;
+
+       if (var->yoffset + info->var.yres > info->var.yres_virtual)
+               var->yoffset = info->var.yres_virtual - info->var.yres;
+
+       if ((var->bits_per_pixel != 32) && (var->bits_per_pixel != 24) &&
+           (var->bits_per_pixel != 16))
+               var->bits_per_pixel = default_bpp;
+
+       switch (var->bits_per_pixel) {
+       case 16:
+               var->red.length = 5;
+               var->red.offset = 11;
+               var->red.msb_right = 0;
+
+               var->green.length = 6;
+               var->green.offset = 5;
+               var->green.msb_right = 0;
+
+               var->blue.length = 5;
+               var->blue.offset = 0;
+               var->blue.msb_right = 0;
+
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               var->transp.msb_right = 0;
+               break;
+       case 24:
+               var->red.length = 8;
+               var->red.offset = 0;
+               var->red.msb_right = 0;
+
+               var->green.length = 8;
+               var->green.offset = 8;
+               var->green.msb_right = 0;
+
+               var->blue.length = 8;
+               var->blue.offset = 16;
+               var->blue.msb_right = 0;
+
+               var->transp.length = 0;
+               var->transp.offset = 0;
+               var->transp.msb_right = 0;
+               break;
+       case 32:
+               var->red.length = 8;
+               var->red.offset = 16;
+               var->red.msb_right = 0;
+
+               var->green.length = 8;
+               var->green.offset = 8;
+               var->green.msb_right = 0;
+
+               var->blue.length = 8;
+               var->blue.offset = 0;
+               var->blue.msb_right = 0;
+
+               var->transp.length = 8;
+               var->transp.offset = 24;
+               var->transp.msb_right = 0;
+
+               break;
+       }
+       /* If the pixclock is below the minimum spec'd value then set to
+        * refresh rate for 60Hz since this is supported by most monitors.
+        * Refer to Documentation/fb/ for calculations.
+        */
+       if ((var->pixclock < MIN_PIX_CLK) || (var->pixclock > MAX_PIX_CLK)) {
+               htotal = var->xres + var->right_margin + var->hsync_len +
+                   var->left_margin;
+               vtotal = var->yres + var->lower_margin + var->vsync_len +
+                   var->upper_margin;
+               var->pixclock = (vtotal * htotal * 6UL) / 100UL;
+               var->pixclock = KHZ2PICOS(var->pixclock);
+               pr_debug("pixclock set for 60Hz refresh = %u ps\n",
+                       var->pixclock);
+       }
+
+       var->height = -1;
+       var->width = -1;
+       var->grayscale = 0;
+
+       /* Copy nonstd field to/from sync for fbset usage */
+       var->sync |= var->nonstd;
+       var->nonstd |= var->sync;
+
+       adjust_aoi_size_position(var, info);
+       return 0;
+}
+
+static void set_fix(struct fb_info *info)
+{
+       struct fb_fix_screeninfo *fix = &info->fix;
+       struct fb_var_screeninfo *var = &info->var;
+       struct mfb_info *mfbi = info->par;
+
+       strncpy(fix->id, mfbi->id, strlen(mfbi->id));
+       fix->line_length = var->xres_virtual * var->bits_per_pixel / 8;
+       fix->type = FB_TYPE_PACKED_PIXELS;
+       fix->accel = FB_ACCEL_NONE;
+       fix->visual = FB_VISUAL_TRUECOLOR;
+       fix->xpanstep = 1;
+       fix->ypanstep = 1;
+}
+
+static void update_lcdc(struct fb_info *info)
+{
+       struct fb_var_screeninfo *var = &info->var;
+       struct mfb_info *mfbi = info->par;
+       struct fsl_diu_data *machine_data = mfbi->parent;
+       struct diu *hw;
+       int i, j;
+       char __iomem *cursor_base, *gamma_table_base;
+
+       u32 temp;
+
+       hw = dr.diu_reg;
+
+       if (mfbi->type == MFB_TYPE_OFF) {
+               fsl_diu_disable_panel(info);
+               return;
+       }
+
+       diu_ops.set_monitor_port(machine_data->monitor_port);
+       gamma_table_base = pool.gamma.vaddr;
+       cursor_base = pool.cursor.vaddr;
+       /* Prep for DIU init  - gamma table, cursor table */
+
+       for (i = 0; i <= 2; i++)
+          for (j = 0; j <= 255; j++)
+             *gamma_table_base++ = j;
+
+       diu_ops.set_gamma_table(machine_data->monitor_port, pool.gamma.vaddr);
+
+       pr_debug("update-lcdc: HW - %p\n Disabling DIU\n", hw);
+       disable_lcdc(info);
+
+       /* Program DIU registers */
+
+       out_be32(&hw->gamma, pool.gamma.paddr);
+       out_be32(&hw->cursor, pool.cursor.paddr);
+
+       out_be32(&hw->bgnd, 0x007F7F7F);        /* BGND */
+       out_be32(&hw->bgnd_wb, 0);              /* BGND_WB */
+       out_be32(&hw->disp_size, (var->yres << 16 | var->xres));
+                                               /* DISP SIZE */
+       pr_debug("DIU xres: %d\n", var->xres);
+       pr_debug("DIU yres: %d\n", var->yres);
+
+       out_be32(&hw->wb_size, 0); /* WB SIZE */
+       out_be32(&hw->wb_mem_addr, 0); /* WB MEM ADDR */
+
+       /* Horizontal and vertical configuration register */
+       temp = var->left_margin << 22 | /* BP_H */
+              var->hsync_len << 11 |   /* PW_H */
+              var->right_margin;       /* FP_H */
+
+       out_be32(&hw->hsyn_para, temp);
+
+       temp = var->upper_margin << 22 | /* BP_V */
+              var->vsync_len << 11 |    /* PW_V  */
+              var->lower_margin;        /* FP_V  */
+
+       out_be32(&hw->vsyn_para, temp);
+
+       pr_debug("DIU right_margin - %d\n", var->right_margin);
+       pr_debug("DIU left_margin - %d\n", var->left_margin);
+       pr_debug("DIU hsync_len - %d\n", var->hsync_len);
+       pr_debug("DIU upper_margin - %d\n", var->upper_margin);
+       pr_debug("DIU lower_margin - %d\n", var->lower_margin);
+       pr_debug("DIU vsync_len - %d\n", var->vsync_len);
+       pr_debug("DIU HSYNC - 0x%08x\n", hw->hsyn_para);
+       pr_debug("DIU VSYNC - 0x%08x\n", hw->vsyn_para);
+
+       diu_ops.set_pixel_clock(var->pixclock);
+
+       out_be32(&hw->syn_pol, 0);      /* SYNC SIGNALS POLARITY */
+       out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
+       out_be32(&hw->int_status, 0);   /* INTERRUPT STATUS */
+       out_be32(&hw->plut, 0x01F5F666);
+
+       /* Enable the DIU */
+       enable_lcdc(info);
+}
+
+static int map_video_memory(struct fb_info *info)
+{
+       phys_addr_t phys;
+
+       pr_debug("info->var.xres_virtual = %d\n", info->var.xres_virtual);
+       pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
+       pr_debug("info->fix.line_length  = %d\n", info->fix.line_length);
+
+       info->fix.smem_len = info->fix.line_length * info->var.yres_virtual;
+       pr_debug("MAP_VIDEO_MEMORY: smem_len = %d\n", info->fix.smem_len);
+       info->screen_base = fsl_diu_alloc(info->fix.smem_len, &phys);
+       if (info->screen_base == 0) {
+               printk(KERN_ERR "Unable to allocate fb memory\n");
+               return -ENOMEM;
+       }
+       info->fix.smem_start = (unsigned long) phys;
+       info->screen_size = info->fix.smem_len;
+
+       pr_debug("Allocated fb @ paddr=0x%08lx, size=%d.\n",
+                               info->fix.smem_start,
+               info->fix.smem_len);
+       pr_debug("screen base %p\n", info->screen_base);
+
+       return 0;
+}
+
+static void unmap_video_memory(struct fb_info *info)
+{
+       fsl_diu_free(info->screen_base, info->fix.smem_len);
+       info->screen_base = 0;
+       info->fix.smem_start = 0;
+       info->fix.smem_len = 0;
+}
+
+/*
+ * Using the fb_var_screeninfo in fb_info we set the resolution of this
+ * particular framebuffer. This function alters the fb_fix_screeninfo stored
+ * in fb_info. It does not alter var in fb_info since we are using that
+ * data. This means we depend on the data in var inside fb_info to be
+ * supported by the hardware. fsl_diu_check_var is always called before
+ * fsl_diu_set_par to ensure this.
+ */
+static int fsl_diu_set_par(struct fb_info *info)
+{
+       unsigned long len;
+       struct fb_var_screeninfo *var = &info->var;
+       struct mfb_info *mfbi = info->par;
+       struct fsl_diu_data *machine_data = mfbi->parent;
+       struct diu_ad *ad = mfbi->ad;
+       struct diu *hw;
+
+       hw = dr.diu_reg;
+
+       set_fix(info);
+       mfbi->cursor_reset = 1;
+
+       len = info->var.yres_virtual * info->fix.line_length;
+       /* Alloc & dealloc each time resolution/bpp change */
+       if (len != info->fix.smem_len) {
+               if (info->fix.smem_start)
+                       unmap_video_memory(info);
+               pr_debug("SET PAR: smem_len = %d\n", info->fix.smem_len);
+
+               /* Memory allocation for framebuffer */
+               if (map_video_memory(info)) {
+                       printk(KERN_ERR "Unable to allocate fb memory 1\n");
+                       return -ENOMEM;
+               }
+       }
+
+       ad->pix_fmt =
+               diu_ops.get_pixel_format(var->bits_per_pixel,
+                                        machine_data->monitor_port);
+       ad->addr    = cpu_to_le32(info->fix.smem_start);
+       ad->src_size_g_alpha = cpu_to_le32((var->yres << 12) |
+                               var->xres) | mfbi->g_alpha;
+       /* fix me. AOI should not be greater than display size */
+       ad->aoi_size    = cpu_to_le32((var->yres << 16) | var->xres);
+       ad->offset_xyi = 0;
+       ad->offset_xyd = cpu_to_le32((mfbi->y_aoi_d << 16) | mfbi->x_aoi_d);
+
+       /* Disable chroma keying function */
+       ad->ckmax_r = 0;
+       ad->ckmax_g = 0;
+       ad->ckmax_b = 0;
+
+       ad->ckmin_r = 255;
+       ad->ckmin_g = 255;
+       ad->ckmin_b = 255;
+
+       if (mfbi->index == 0)
+               update_lcdc(info);
+       return 0;
+}
+
+static inline __u32 CNVT_TOHW(__u32 val, __u32 width)
+{
+       return ((val<<width) + 0x7FFF - val)>>16;
+}
+
+/*
+ * Set a single color register. The values supplied have a 16 bit magnitude
+ * which needs to be scaled in this function for the hardware. Things to take
+ * into consideration are how many color registers, if any, are supported with
+ * the current color visual. With truecolor mode no color palettes are
+ * supported. Here a psuedo palette is created which we store the value in
+ * pseudo_palette in struct fb_info. For pseudocolor mode we have a limited
+ * color palette.
+ */
+static int fsl_diu_setcolreg(unsigned regno, unsigned red, unsigned green,
+                          unsigned blue, unsigned transp, struct fb_info *info)
+{
+       int ret = 1;
+
+       /*
+        * If greyscale is true, then we convert the RGB value
+        * to greyscale no matter what visual we are using.
+        */
+       if (info->var.grayscale)
+               red = green = blue = (19595 * red + 38470 * green +
+                                     7471 * blue) >> 16;
+       switch (info->fix.visual) {
+       case FB_VISUAL_TRUECOLOR:
+               /*
+                * 16-bit True Colour.  We encode the RGB value
+                * according to the RGB bitfield information.
+                */
+               if (regno < 16) {
+                       u32 *pal = info->pseudo_palette;
+                       u32 v;
+
+                       red = CNVT_TOHW(red, info->var.red.length);
+                       green = CNVT_TOHW(green, info->var.green.length);
+                       blue = CNVT_TOHW(blue, info->var.blue.length);
+                       transp = CNVT_TOHW(transp, info->var.transp.length);
+
+                       v = (red << info->var.red.offset) |
+                           (green << info->var.green.offset) |
+                           (blue << info->var.blue.offset) |
+                           (transp << info->var.transp.offset);
+
+                       pal[regno] = v;
+                       ret = 0;
+               }
+               break;
+       case FB_VISUAL_STATIC_PSEUDOCOLOR:
+       case FB_VISUAL_PSEUDOCOLOR:
+               break;
+       }
+
+       return ret;
+}
+
+/*
+ * Pan (or wrap, depending on the `vmode' field) the display using the
+ * 'xoffset' and 'yoffset' fields of the 'var' structure. If the values
+ * don't fit, return -EINVAL.
+ */
+static int fsl_diu_pan_display(struct fb_var_screeninfo *var,
+                            struct fb_info *info)
+{
+       if ((info->var.xoffset == var->xoffset) &&
+           (info->var.yoffset == var->yoffset))
+               return 0;       /* No change, do nothing */
+
+       if (var->xoffset < 0 || var->yoffset < 0
+           || var->xoffset + info->var.xres > info->var.xres_virtual
+           || var->yoffset + info->var.yres > info->var.yres_virtual)
+               return -EINVAL;
+
+       info->var.xoffset = var->xoffset;
+       info->var.yoffset = var->yoffset;
+
+       if (var->vmode & FB_VMODE_YWRAP)
+               info->var.vmode |= FB_VMODE_YWRAP;
+       else
+               info->var.vmode &= ~FB_VMODE_YWRAP;
+
+       return 0;
+}
+
+/*
+ * Blank the screen if blank_mode != 0, else unblank. Return 0 if blanking
+ * succeeded, != 0 if un-/blanking failed.
+ * blank_mode == 2: suspend vsync
+ * blank_mode == 3: suspend hsync
+ * blank_mode == 4: powerdown
+ */
+static int fsl_diu_blank(int blank_mode, struct fb_info *info)
+{
+       struct mfb_info *mfbi = info->par;
+
+       mfbi->blank = blank_mode;
+
+       switch (blank_mode) {
+       case FB_BLANK_VSYNC_SUSPEND:
+       case FB_BLANK_HSYNC_SUSPEND:
+       /* FIXME: fixes to enable_panel and enable lcdc needed */
+       case FB_BLANK_NORMAL:
+       /*      fsl_diu_disable_panel(info);*/
+               break;
+       case FB_BLANK_POWERDOWN:
+       /*      disable_lcdc(info);     */
+               break;
+       case FB_BLANK_UNBLANK:
+       /*      fsl_diu_enable_panel(info);*/
+               break;
+       }
+
+       return 0;
+}
+
+static int fsl_diu_ioctl(struct fb_info *info, unsigned int cmd,
+                      unsigned long arg)
+{
+       struct mfb_info *mfbi = info->par;
+       struct diu_ad *ad = mfbi->ad;
+       struct mfb_chroma_key ck;
+       unsigned char global_alpha;
+       struct aoi_display_offset aoi_d;
+       __u32 pix_fmt;
+       void __user *buf = (void __user *)arg;
+
+       if (!arg)
+               return -EINVAL;
+       switch (cmd) {
+       case MFB_SET_PIXFMT:
+               if (copy_from_user(&pix_fmt, buf, sizeof(pix_fmt)))
+                       return -EFAULT;
+               ad->pix_fmt = pix_fmt;
+               pr_debug("Set pixel format to 0x%08x\n", ad->pix_fmt);
+               break;
+       case MFB_GET_PIXFMT:
+               pix_fmt = ad->pix_fmt;
+               if (copy_to_user(buf, &pix_fmt, sizeof(pix_fmt)))
+                       return -EFAULT;
+               pr_debug("get pixel format 0x%08x\n", ad->pix_fmt);
+               break;
+       case MFB_SET_AOID:
+               if (copy_from_user(&aoi_d, buf, sizeof(aoi_d)))
+                       return -EFAULT;
+               mfbi->x_aoi_d = aoi_d.x_aoi_d;
+               mfbi->y_aoi_d = aoi_d.y_aoi_d;
+               pr_debug("set AOI display offset of index %d to (%d,%d)\n",
+                                mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
+               fsl_diu_check_var(&info->var, info);
+               fsl_diu_set_par(info);
+               break;
+       case MFB_GET_AOID:
+               aoi_d.x_aoi_d = mfbi->x_aoi_d;
+               aoi_d.y_aoi_d = mfbi->y_aoi_d;
+               if (copy_to_user(buf, &aoi_d, sizeof(aoi_d)))
+                       return -EFAULT;
+               pr_debug("get AOI display offset of index %d (%d,%d)\n",
+                               mfbi->index, aoi_d.x_aoi_d, aoi_d.y_aoi_d);
+               break;
+       case MFB_GET_ALPHA:
+               global_alpha = mfbi->g_alpha;
+               if (copy_to_user(buf, &global_alpha, sizeof(global_alpha)))
+                       return -EFAULT;
+               pr_debug("get global alpha of index %d\n", mfbi->index);
+               break;
+       case MFB_SET_ALPHA:
+               /* set panel information */
+               if (copy_from_user(&global_alpha, buf, sizeof(global_alpha)))
+                       return -EFAULT;
+               ad->src_size_g_alpha = (ad->src_size_g_alpha & (~0xff)) |
+                                                       (global_alpha & 0xff);
+               mfbi->g_alpha = global_alpha;
+               pr_debug("set global alpha for index %d\n", mfbi->index);
+               break;
+       case MFB_SET_CHROMA_KEY:
+               /* set panel winformation */
+               if (copy_from_user(&ck, buf, sizeof(ck)))
+                       return -EFAULT;
+
+               if (ck.enable &&
+                  (ck.red_max < ck.red_min ||
+                   ck.green_max < ck.green_min ||
+                   ck.blue_max < ck.blue_min))
+                       return -EINVAL;
+
+               if (!ck.enable) {
+                       ad->ckmax_r = 0;
+                       ad->ckmax_g = 0;
+                       ad->ckmax_b = 0;
+                       ad->ckmin_r = 255;
+                       ad->ckmin_g = 255;
+                       ad->ckmin_b = 255;
+               } else {
+                       ad->ckmax_r = ck.red_max;
+                       ad->ckmax_g = ck.green_max;
+                       ad->ckmax_b = ck.blue_max;
+                       ad->ckmin_r = ck.red_min;
+                       ad->ckmin_g = ck.green_min;
+                       ad->ckmin_b = ck.blue_min;
+               }
+               pr_debug("set chroma key\n");
+               break;
+       case FBIOGET_GWINFO:
+               if (mfbi->type == MFB_TYPE_OFF)
+                       return -ENODEV;
+               /* get graphic window information */
+               if (copy_to_user(buf, ad, sizeof(*ad)))
+                       return -EFAULT;
+               break;
+       case FBIOGET_HWCINFO:
+               pr_debug("FBIOGET_HWCINFO:0x%08x\n", FBIOGET_HWCINFO);
+               break;
+       case FBIOPUT_MODEINFO:
+               pr_debug("FBIOPUT_MODEINFO:0x%08x\n", FBIOPUT_MODEINFO);
+               break;
+       case FBIOGET_DISPINFO:
+               pr_debug("FBIOGET_DISPINFO:0x%08x\n", FBIOGET_DISPINFO);
+               break;
+
+       default:
+               printk(KERN_ERR "Unknown ioctl command (0x%08X)\n", cmd);
+               return -ENOIOCTLCMD;
+       }
+
+       return 0;
+}
+
+/* turn on fb if count == 1
+ */
+static int fsl_diu_open(struct fb_info *info, int user)
+{
+       struct mfb_info *mfbi = info->par;
+       int res = 0;
+
+       spin_lock(&diu_lock);
+       mfbi->count++;
+       if (mfbi->count == 1) {
+               pr_debug("open plane index %d\n", mfbi->index);
+               fsl_diu_check_var(&info->var, info);
+               res = fsl_diu_set_par(info);
+               if (res < 0)
+                       mfbi->count--;
+               else {
+                       res = fsl_diu_enable_panel(info);
+                       if (res < 0)
+                               mfbi->count--;
+               }
+       }
+
+       spin_unlock(&diu_lock);
+       return res;
+}
+
+/* turn off fb if count == 0
+ */
+static int fsl_diu_release(struct fb_info *info, int user)
+{
+       struct mfb_info *mfbi = info->par;
+       int res = 0;
+
+       spin_lock(&diu_lock);
+       mfbi->count--;
+       if (mfbi->count == 0) {
+               pr_debug("release plane index %d\n", mfbi->index);
+               res = fsl_diu_disable_panel(info);
+               if (res < 0)
+                       mfbi->count++;
+       }
+       spin_unlock(&diu_lock);
+       return res;
+}
+
+static struct fb_ops fsl_diu_ops = {
+       .owner = THIS_MODULE,
+       .fb_check_var = fsl_diu_check_var,
+       .fb_set_par = fsl_diu_set_par,
+       .fb_setcolreg = fsl_diu_setcolreg,
+       .fb_blank = fsl_diu_blank,
+       .fb_pan_display = fsl_diu_pan_display,
+       .fb_fillrect = cfb_fillrect,
+       .fb_copyarea = cfb_copyarea,
+       .fb_imageblit = cfb_imageblit,
+       .fb_ioctl = fsl_diu_ioctl,
+       .fb_open = fsl_diu_open,
+       .fb_release = fsl_diu_release,
+};
+
+static int init_fbinfo(struct fb_info *info)
+{
+       struct mfb_info *mfbi = info->par;
+
+       info->device = NULL;
+       info->var.activate = FB_ACTIVATE_NOW;
+       info->fbops = &fsl_diu_ops;
+       info->flags = FBINFO_FLAG_DEFAULT;
+       info->pseudo_palette = &mfbi->pseudo_palette;
+
+       /* Allocate colormap */
+       fb_alloc_cmap(&info->cmap, 16, 0);
+       return 0;
+}
+
+static int install_fb(struct fb_info *info)
+{
+       int rc;
+       struct mfb_info *mfbi = info->par;
+       const char *aoi_mode, *init_aoi_mode = "320x240";
+
+       if (init_fbinfo(info))
+               return -EINVAL;
+
+       if (mfbi->index == 0)   /* plane 0 */
+               aoi_mode = fb_mode;
+       else
+               aoi_mode = init_aoi_mode;
+       pr_debug("mode used = %s\n", aoi_mode);
+       rc = fb_find_mode(&info->var, info, aoi_mode, fsl_diu_mode_db,
+            ARRAY_SIZE(fsl_diu_mode_db), &fsl_diu_default_mode, default_bpp);
+
+       switch (rc) {
+       case 1:
+               pr_debug("using mode specified in @mode\n");
+               break;
+       case 2:
+               pr_debug("using mode specified in @mode "
+                       "with ignored refresh rate\n");
+               break;
+       case 3:
+               pr_debug("using mode default mode\n");
+               break;
+       case 4:
+               pr_debug("using mode from list\n");
+               break;
+       default:
+               pr_debug("rc = %d\n", rc);
+               pr_debug("failed to find mode\n");
+               return -EINVAL;
+               break;
+       }
+
+       pr_debug("xres_virtual %d\n", info->var.xres_virtual);
+       pr_debug("bits_per_pixel %d\n", info->var.bits_per_pixel);
+
+       pr_debug("info->var.yres_virtual = %d\n", info->var.yres_virtual);
+       pr_debug("info->fix.line_length = %d\n", info->fix.line_length);
+
+       if (mfbi->type == MFB_TYPE_OFF)
+               mfbi->blank = FB_BLANK_NORMAL;
+       else
+               mfbi->blank = FB_BLANK_UNBLANK;
+
+       if (fsl_diu_check_var(&info->var, info)) {
+               printk(KERN_ERR "fb_check_var failed");
+               fb_dealloc_cmap(&info->cmap);
+               return -EINVAL;
+       }
+
+       if (fsl_diu_set_par(info)) {
+               printk(KERN_ERR "fb_set_par failed");
+               fb_dealloc_cmap(&info->cmap);
+               return -EINVAL;
+       }
+
+       if (register_framebuffer(info) < 0) {
+               printk(KERN_ERR "register_framebuffer failed");
+               unmap_video_memory(info);
+               fb_dealloc_cmap(&info->cmap);
+               return -EINVAL;
+       }
+
+       mfbi->registered = 1;
+       printk(KERN_INFO "fb%d: %s fb device registered successfully.\n",
+                info->node, info->fix.id);
+
+       return 0;
+}
+
+static void __exit uninstall_fb(struct fb_info *info)
+{
+       struct mfb_info *mfbi = info->par;
+
+       if (!mfbi->registered)
+               return;
+
+       unregister_framebuffer(info);
+       unmap_video_memory(info);
+       if (&info->cmap)
+               fb_dealloc_cmap(&info->cmap);
+
+       mfbi->registered = 0;
+}
+
+static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
+{
+       struct diu *hw = dr.diu_reg;
+       unsigned int status = in_be32(&hw->int_status);
+
+       if (status) {
+               /* This is the workaround for underrun */
+               if (status & INT_UNDRUN) {
+                       out_be32(&hw->diu_mode, 0);
+                       pr_debug("Err: DIU occurs underrun!\n");
+                       udelay(1);
+                       out_be32(&hw->diu_mode, 1);
+               }
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+               else if (status & INT_VSYNC) {
+                       unsigned int i;
+                       for (i = 0; i < coherence_data_size;
+                               i += d_cache_line_size)
+                               __asm__ __volatile__ (
+                                       "dcbz 0, %[input]"
+                               ::[input]"r"(&coherence_data[i]));
+               }
+#endif
+               return IRQ_HANDLED;
+       }
+       return IRQ_NONE;
+}
+
+static int request_irq_local(int irq)
+{
+       unsigned long status, ints;
+       struct diu *hw;
+       int ret;
+
+       hw = dr.diu_reg;
+
+       /* Read to clear the status */
+       status = in_be32(&hw->int_status);
+
+       ret = request_irq(irq, fsl_diu_isr, 0, "diu", 0);
+       if (ret)
+               pr_info("Request diu IRQ failed.\n");
+       else {
+               ints = INT_PARERR | INT_LS_BF_VS;
+#if !defined(CONFIG_NOT_COHERENT_CACHE)
+               ints |= INT_VSYNC;
+#endif
+               if (dr.mode == MFB_MODE2 || dr.mode == MFB_MODE3)
+                       ints |= INT_VSYNC_WB;
+
+               /* Read to clear the status */
+               status = in_be32(&hw->int_status);
+               out_be32(&hw->int_mask, ints);
+       }
+       return ret;
+}
+
+static void free_irq_local(int irq)
+{
+       struct diu *hw = dr.diu_reg;
+
+       /* Disable all LCDC interrupt */
+       out_be32(&hw->int_mask, 0x1f);
+
+       free_irq(irq, 0);
+}
+
+#ifdef CONFIG_PM
+/*
+ * Power management hooks. Note that we won't be called from IRQ context,
+ * unlike the blank functions above, so we may sleep.
+ */
+static int fsl_diu_suspend(struct of_device *dev, pm_message_t state)
+{
+       struct fsl_diu_data *machine_data;
+
+       machine_data = dev_get_drvdata(&ofdev->dev);
+       disable_lcdc(machine_data->fsl_diu_info[0]);
+
+       return 0;
+}
+
+static int fsl_diu_resume(struct of_device *dev)
+{
+       struct fsl_diu_data *machine_data;
+
+       machine_data = dev_get_drvdata(&ofdev->dev);
+       enable_lcdc(machine_data->fsl_diu_info[0]);
+
+       return 0;
+}
+
+#else
+#define fsl_diu_suspend NULL
+#define fsl_diu_resume NULL
+#endif                         /* CONFIG_PM */
+
+/* Align to 64-bit(8-byte), 32-byte, etc. */
+static int allocate_buf(struct diu_addr *buf, u32 size, u32 bytes_align)
+{
+       u32 offset, ssize;
+       u32 mask;
+       dma_addr_t paddr = 0;
+
+       ssize = size + bytes_align;
+       buf->vaddr = dma_alloc_coherent(0, ssize, &paddr, GFP_DMA | __GFP_ZERO);
+       if (!buf->vaddr)
+               return -ENOMEM;
+
+       buf->paddr = (__u32) paddr;
+
+       mask = bytes_align - 1;
+       offset = (u32)buf->paddr & mask;
+       if (offset) {
+               buf->offset = bytes_align - offset;
+               buf->paddr = (u32)buf->paddr + offset;
+       } else
+               buf->offset = 0;
+       return 0;
+}
+
+static void free_buf(struct diu_addr *buf, u32 size, u32 bytes_align)
+{
+       dma_free_coherent(0, size + bytes_align,
+                               buf->vaddr, (buf->paddr - buf->offset));
+       return;
+}
+
+static ssize_t store_monitor(struct device *device,
+       struct device_attribute *attr, const char *buf, size_t count)
+{
+       int old_monitor_port;
+       unsigned long val;
+       struct fsl_diu_data *machine_data =
+               container_of(attr, struct fsl_diu_data, dev_attr);
+
+       if (strict_strtoul(buf, 10, &val))
+               return 0;
+
+       old_monitor_port = machine_data->monitor_port;
+       machine_data->monitor_port = diu_ops.set_sysfs_monitor_port(val);
+
+       if (old_monitor_port != machine_data->monitor_port) {
+               /* All AOIs need adjust pixel format
+                * fsl_diu_set_par only change the pixsel format here
+                * unlikely to fail. */
+               fsl_diu_set_par(machine_data->fsl_diu_info[0]);
+               fsl_diu_set_par(machine_data->fsl_diu_info[1]);
+               fsl_diu_set_par(machine_data->fsl_diu_info[2]);
+               fsl_diu_set_par(machine_data->fsl_diu_info[3]);
+               fsl_diu_set_par(machine_data->fsl_diu_info[4]);
+       }
+       return count;
+}
+
+static ssize_t show_monitor(struct device *device,
+       struct device_attribute *attr, char *buf)
+{
+       struct fsl_diu_data *machine_data =
+               container_of(attr, struct fsl_diu_data, dev_attr);
+       return diu_ops.show_monitor_port(machine_data->monitor_port, buf);
+}
+
+static int fsl_diu_probe(struct of_device *ofdev,
+       const struct of_device_id *match)
+{
+       struct device_node *np = ofdev->node;
+       struct mfb_info *mfbi;
+       phys_addr_t dummy_ad_addr;
+       int ret, i, error = 0;
+       struct resource res;
+       struct fsl_diu_data *machine_data;
+
+       machine_data = kzalloc(sizeof(struct fsl_diu_data), GFP_KERNEL);
+       if (!machine_data)
+               return -ENOMEM;
+
+       for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
+               machine_data->fsl_diu_info[i] =
+                       framebuffer_alloc(sizeof(struct mfb_info), &ofdev->dev);
+               if (!machine_data->fsl_diu_info[i]) {
+                       dev_err(&ofdev->dev, "cannot allocate memory\n");
+                       ret = -ENOMEM;
+                       goto error2;
+               }
+               mfbi = machine_data->fsl_diu_info[i]->par;
+               memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
+               mfbi->parent = machine_data;
+       }
+
+       ret = of_address_to_resource(np, 0, &res);
+       if (ret) {
+               dev_err(&ofdev->dev, "could not obtain DIU address\n");
+               goto error;
+       }
+       if (!res.start) {
+               dev_err(&ofdev->dev, "invalid DIU address\n");
+               goto error;
+       }
+       dev_dbg(&ofdev->dev, "%s, res.start: 0x%08x\n", __func__, res.start);
+
+       dr.diu_reg = ioremap(res.start, sizeof(struct diu));
+       if (!dr.diu_reg) {
+               dev_err(&ofdev->dev, "Err: can't map DIU registers!\n");
+               ret = -EFAULT;
+               goto error2;
+       }
+
+       out_be32(&dr.diu_reg->diu_mode, 0);             /* disable DIU anyway*/
+
+       /* Get the IRQ of the DIU */
+       machine_data->irq = irq_of_parse_and_map(np, 0);
+
+       if (!machine_data->irq) {
+               dev_err(&ofdev->dev, "could not get DIU IRQ\n");
+               ret = -EINVAL;
+               goto error;
+       }
+       machine_data->monitor_port = monitor_port;
+
+       /* Area descriptor memory pool aligns to 64-bit boundary */
+       if (allocate_buf(&pool.ad, sizeof(struct diu_ad) * FSL_AOI_NUM, 8))
+               return -ENOMEM;
+
+       /* Get memory for Gamma Table  - 32-byte aligned memory */
+       if (allocate_buf(&pool.gamma, 768, 32)) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       /* For performance, cursor bitmap buffer aligns to 32-byte boundary */
+       if (allocate_buf(&pool.cursor, MAX_CURS * MAX_CURS * 2, 32)) {
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       i = ARRAY_SIZE(machine_data->fsl_diu_info);
+       machine_data->dummy_ad = (struct diu_ad *)
+                       ((u32)pool.ad.vaddr + pool.ad.offset) + i;
+       machine_data->dummy_ad->paddr = pool.ad.paddr +
+                       i * sizeof(struct diu_ad);
+       machine_data->dummy_aoi_virt = fsl_diu_alloc(64, &dummy_ad_addr);
+       if (!machine_data->dummy_aoi_virt) {
+               ret = -ENOMEM;
+               goto error;
+       }
+       machine_data->dummy_ad->addr = cpu_to_le32(dummy_ad_addr);
+       machine_data->dummy_ad->pix_fmt = 0x88882317;
+       machine_data->dummy_ad->src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
+       machine_data->dummy_ad->aoi_size = cpu_to_le32((4 << 16) |  2);
+       machine_data->dummy_ad->offset_xyi = 0;
+       machine_data->dummy_ad->offset_xyd = 0;
+       machine_data->dummy_ad->next_ad = 0;
+
+       out_be32(&dr.diu_reg->desc[0], machine_data->dummy_ad->paddr);
+       out_be32(&dr.diu_reg->desc[1], machine_data->dummy_ad->paddr);
+       out_be32(&dr.diu_reg->desc[2], machine_data->dummy_ad->paddr);
+
+       for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
+               machine_data->fsl_diu_info[i]->fix.smem_start = 0;
+               mfbi = machine_data->fsl_diu_info[i]->par;
+               mfbi->ad = (struct diu_ad *)((u32)pool.ad.vaddr
+                                       + pool.ad.offset) + i;
+               mfbi->ad->paddr = pool.ad.paddr + i * sizeof(struct diu_ad);
+               ret = install_fb(machine_data->fsl_diu_info[i]);
+               if (ret) {
+                       dev_err(&ofdev->dev,
+                               "Failed to register framebuffer %d\n",
+                               i);
+                       goto error;
+               }
+       }
+
+       if (request_irq_local(machine_data->irq)) {
+               dev_err(machine_data->fsl_diu_info[0]->dev,
+                       "could not request irq for diu.");
+               goto error;
+       }
+
+       machine_data->dev_attr.attr.name = "monitor";
+       machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
+       machine_data->dev_attr.show = show_monitor;
+       machine_data->dev_attr.store = store_monitor;
+       error = device_create_file(machine_data->fsl_diu_info[0]->dev,
+                                 &machine_data->dev_attr);
+       if (error) {
+               dev_err(machine_data->fsl_diu_info[0]->dev,
+                       "could not create sysfs %s file\n",
+                       machine_data->dev_attr.attr.name);
+       }
+
+       dev_set_drvdata(&ofdev->dev, machine_data);
+       return 0;
+
+error:
+       for (i = ARRAY_SIZE(machine_data->fsl_diu_info);
+               i > 0; i--)
+               uninstall_fb(machine_data->fsl_diu_info[i - 1]);
+       if (pool.ad.vaddr)
+               free_buf(&pool.ad, sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
+       if (pool.gamma.vaddr)
+               free_buf(&pool.gamma, 768, 32);
+       if (pool.cursor.vaddr)
+               free_buf(&pool.cursor, MAX_CURS * MAX_CURS * 2, 32);
+       if (machine_data->dummy_aoi_virt)
+               fsl_diu_free(machine_data->dummy_aoi_virt, 64);
+       iounmap(dr.diu_reg);
+
+error2:
+       for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
+               if (machine_data->fsl_diu_info[i])
+                       framebuffer_release(machine_data->fsl_diu_info[i]);
+       kfree(machine_data);
+
+       return ret;
+}
+
+
+static int fsl_diu_remove(struct of_device *ofdev)
+{
+       struct fsl_diu_data *machine_data;
+       int i;
+
+       machine_data = dev_get_drvdata(&ofdev->dev);
+       disable_lcdc(machine_data->fsl_diu_info[0]);
+       free_irq_local(machine_data->irq);
+       for (i = ARRAY_SIZE(machine_data->fsl_diu_info); i > 0; i--)
+               uninstall_fb(machine_data->fsl_diu_info[i - 1]);
+       if (pool.ad.vaddr)
+               free_buf(&pool.ad, sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
+       if (pool.gamma.vaddr)
+               free_buf(&pool.gamma, 768, 32);
+       if (pool.cursor.vaddr)
+               free_buf(&pool.cursor, MAX_CURS * MAX_CURS * 2, 32);
+       if (machine_data->dummy_aoi_virt)
+               fsl_diu_free(machine_data->dummy_aoi_virt, 64);
+       iounmap(dr.diu_reg);
+       for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
+               if (machine_data->fsl_diu_info[i])
+                       framebuffer_release(machine_data->fsl_diu_info[i]);
+       kfree(machine_data);
+
+       return 0;
+}
+
+#ifndef MODULE
+static int __init fsl_diu_setup(char *options)
+{
+       char *opt;
+       unsigned long val;
+
+       if (!options || !*options)
+               return 0;
+
+       while ((opt = strsep(&options, ",")) != NULL) {
+               if (!*opt)
+                       continue;
+               if (!strncmp(opt, "monitor=", 8)) {
+                       if (!strict_strtoul(opt + 8, 10, &val) && (val <= 2))
+                               monitor_port = val;
+               } else if (!strncmp(opt, "bpp=", 4)) {
+                       if (!strict_strtoul(opt + 4, 10, &val))
+                               default_bpp = val;
+               } else
+                       fb_mode = opt;
+       }
+
+       return 0;
+}
+#endif
+
+static struct of_device_id fsl_diu_match[] = {
+       {
+               .compatible = "fsl,diu",
+       },
+       {}
+};
+MODULE_DEVICE_TABLE(of, fsl_diu_match);
+
+static struct of_platform_driver fsl_diu_driver = {
+       .owner          = THIS_MODULE,
+       .name           = "fsl_diu",
+       .match_table    = fsl_diu_match,
+       .probe          = fsl_diu_probe,
+       .remove         = fsl_diu_remove,
+       .suspend        = fsl_diu_suspend,
+       .resume         = fsl_diu_resume,
+};
+
+static int __init fsl_diu_init(void)
+{
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       struct device_node *np;
+       const u32 *prop;
+#endif
+       int ret;
+#ifndef MODULE
+       char *option;
+
+       /*
+        * For kernel boot options (in 'video=xxxfb:<options>' format)
+        */
+       if (fb_get_options("fslfb", &option))
+               return -ENODEV;
+       fsl_diu_setup(option);
+#endif
+       printk(KERN_INFO "Freescale DIU driver\n");
+
+#ifdef CONFIG_NOT_COHERENT_CACHE
+       np = of_find_node_by_type(NULL, "cpu");
+       if (!np) {
+               printk(KERN_ERR "Err: can't find device node 'cpu'\n");
+               return -ENODEV;
+       }
+
+       prop = of_get_property(np, "d-cache-size", NULL);
+       if (prop == NULL)
+               return -ENODEV;
+
+       /* Freescale PLRU requires 13/8 times the cache size to do a proper
+          displacement flush
+        */
+       coherence_data_size = *prop * 13;
+       coherence_data_size /= 8;
+
+       prop = of_get_property(np, "d-cache-line-size", NULL);
+       if (prop == NULL)
+               return -ENODEV;
+       d_cache_line_size = *prop;
+
+       of_node_put(np);
+       coherence_data = vmalloc(coherence_data_size);
+       if (!coherence_data)
+               return -ENOMEM;
+#endif
+       ret = of_register_platform_driver(&fsl_diu_driver);
+       if (ret) {
+               printk(KERN_ERR
+                       "fsl-diu: failed to register platform driver\n");
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+               vfree(coherence_data);
+#endif
+               iounmap(dr.diu_reg);
+       }
+       return ret;
+}
+
+static void __exit fsl_diu_exit(void)
+{
+       of_unregister_platform_driver(&fsl_diu_driver);
+#if defined(CONFIG_NOT_COHERENT_CACHE)
+       vfree(coherence_data);
+#endif
+}
+
+module_init(fsl_diu_init);
+module_exit(fsl_diu_exit);
+
+MODULE_AUTHOR("York Sun <yorksun@freescale.com>");
+MODULE_DESCRIPTION("Freescale DIU framebuffer driver");
+MODULE_LICENSE("GPL");
+
+module_param_named(mode, fb_mode, charp, 0);
+MODULE_PARM_DESC(mode,
+       "Specify resolution as \"<xres>x<yres>[-<bpp>][@<refresh>]\" ");
+module_param_named(bpp, default_bpp, ulong, 0);
+MODULE_PARM_DESC(bpp, "Specify bit-per-pixel if not specified mode");
+module_param_named(monitor, monitor_port, int, 0);
+MODULE_PARM_DESC(monitor,
+       "Specify the monitor port (0, 1 or 2) if supported by the platform");
+
diff --git a/drivers/video/fsl-diu-fb.h b/drivers/video/fsl-diu-fb.h
new file mode 100644 (file)
index 0000000..fc295d7
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ *  Freescale DIU Frame Buffer device driver
+ *
+ *  Authors: Hongjun Chen <hong-jun.chen@freescale.com>
+ *           Paul Widmer <paul.widmer@freescale.com>
+ *           Srikanth Srinivasan <srikanth.srinivasan@freescale.com>
+ *           York Sun <yorksun@freescale.com>
+ *
+ *   Based on imxfb.c Copyright (C) 2004 S.Hauer, Pengutronix
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __FSL_DIU_FB_H__
+#define __FSL_DIU_FB_H__
+
+/* Arbitrary threshold to determine the allocation method
+ * See mpc8610fb_set_par(), map_video_memory(), and unmap_video_memory()
+ */
+#define MEM_ALLOC_THRESHOLD (1024*768*4+32)
+/* Minimum value that the pixel clock can be set to in pico seconds
+ * This is determined by platform clock/3 where the minimum platform
+ * clock is 533MHz. This gives 5629 pico seconds.
+ */
+#define MIN_PIX_CLK 5629
+#define MAX_PIX_CLK 96096
+
+#include <linux/types.h>
+
+struct mfb_alpha {
+       int enable;
+       int alpha;
+};
+
+struct mfb_chroma_key {
+       int enable;
+       __u8  red_max;
+       __u8  green_max;
+       __u8  blue_max;
+       __u8  red_min;
+       __u8  green_min;
+       __u8  blue_min;
+};
+
+struct aoi_display_offset {
+       int x_aoi_d;
+       int y_aoi_d;
+};
+
+#define MFB_SET_CHROMA_KEY     _IOW('M', 1, struct mfb_chroma_key)
+#define MFB_WAIT_FOR_VSYNC     _IOW('F', 0x20, u_int32_t)
+#define MFB_SET_BRIGHTNESS     _IOW('M', 3, __u8)
+
+#define MFB_SET_ALPHA          0x80014d00
+#define MFB_GET_ALPHA          0x40014d00
+#define MFB_SET_AOID           0x80084d04
+#define MFB_GET_AOID           0x40084d04
+#define MFB_SET_PIXFMT         0x80014d08
+#define MFB_GET_PIXFMT         0x40014d08
+
+#define FBIOGET_GWINFO         0x46E0
+#define FBIOPUT_GWINFO         0x46E1
+
+#ifdef __KERNEL__
+#include <linux/spinlock.h>
+
+/*
+ * These are the fields of area descriptor(in DDR memory) for every plane
+ */
+struct diu_ad {
+       /* Word 0(32-bit) in DDR memory */
+/*     __u16 comp; */
+/*     __u16 pixel_s:2; */
+/*     __u16 pallete:1; */
+/*     __u16 red_c:2; */
+/*     __u16 green_c:2; */
+/*     __u16 blue_c:2; */
+/*     __u16 alpha_c:3; */
+/*     __u16 byte_f:1; */
+/*     __u16 res0:3; */
+
+       __be32 pix_fmt; /* hard coding pixel format */
+
+       /* Word 1(32-bit) in DDR memory */
+       __le32 addr;
+
+       /* Word 2(32-bit) in DDR memory */
+/*     __u32 delta_xs:11; */
+/*     __u32 res1:1; */
+/*     __u32 delta_ys:11; */
+/*     __u32 res2:1; */
+/*     __u32 g_alpha:8; */
+       __le32 src_size_g_alpha;
+
+       /* Word 3(32-bit) in DDR memory */
+/*     __u32 delta_xi:11; */
+/*     __u32 res3:5; */
+/*     __u32 delta_yi:11; */
+/*     __u32 res4:3; */
+/*     __u32 flip:2; */
+       __le32 aoi_size;
+
+       /* Word 4(32-bit) in DDR memory */
+       /*__u32 offset_xi:11;
+       __u32 res5:5;
+       __u32 offset_yi:11;
+       __u32 res6:5;
+       */
+       __le32 offset_xyi;
+
+       /* Word 5(32-bit) in DDR memory */
+       /*__u32 offset_xd:11;
+       __u32 res7:5;
+       __u32 offset_yd:11;
+       __u32 res8:5; */
+       __le32 offset_xyd;
+
+
+       /* Word 6(32-bit) in DDR memory */
+       __u8 ckmax_r;
+       __u8 ckmax_g;
+       __u8 ckmax_b;
+       __u8 res9;
+
+       /* Word 7(32-bit) in DDR memory */
+       __u8 ckmin_r;
+       __u8 ckmin_g;
+       __u8 ckmin_b;
+       __u8 res10;
+/*     __u32 res10:8; */
+
+       /* Word 8(32-bit) in DDR memory */
+       __le32 next_ad;
+
+       /* Word 9(32-bit) in DDR memory, just for 64-bit aligned */
+       __u32 paddr;
+} __attribute__ ((packed));
+
+/* DIU register map */
+struct diu {
+       __be32 desc[3];
+       __be32 gamma;
+       __be32 pallete;
+       __be32 cursor;
+       __be32 curs_pos;
+       __be32 diu_mode;
+       __be32 bgnd;
+       __be32 bgnd_wb;
+       __be32 disp_size;
+       __be32 wb_size;
+       __be32 wb_mem_addr;
+       __be32 hsyn_para;
+       __be32 vsyn_para;
+       __be32 syn_pol;
+       __be32 thresholds;
+       __be32 int_status;
+       __be32 int_mask;
+       __be32 colorbar[8];
+       __be32 filling;
+       __be32 plut;
+} __attribute__ ((packed));
+
+struct diu_hw {
+       struct diu *diu_reg;
+       spinlock_t reg_lock;
+
+       __u32 mode;             /* DIU operation mode */
+};
+
+struct diu_addr {
+       __u8 __iomem *vaddr;    /* Virtual address */
+       dma_addr_t paddr;       /* Physical address */
+       __u32      offset;
+};
+
+struct diu_pool {
+       struct diu_addr ad;
+       struct diu_addr gamma;
+       struct diu_addr pallete;
+       struct diu_addr cursor;
+};
+
+#define FSL_DIU_BASE_OFFSET    0x2C000 /* Offset of DIU */
+#define INT_LCDC               64      /* DIU interrupt number */
+
+#define FSL_AOI_NUM    6       /* 5 AOIs and one dummy AOI */
+                               /* 1 for plane 0, 2 for plane 1&2 each */
+
+/* Minimum X and Y resolutions */
+#define MIN_XRES       64
+#define MIN_YRES       64
+
+/* HW cursor parameters */
+#define MAX_CURS               32
+
+/* Modes of operation of DIU */
+#define MFB_MODE0      0       /* DIU off */
+#define MFB_MODE1      1       /* All three planes output to display */
+#define MFB_MODE2      2       /* Plane 1 to display, planes 2+3 written back*/
+#define MFB_MODE3      3       /* All three planes written back to memory */
+#define MFB_MODE4      4       /* Color bar generation */
+
+/* INT_STATUS/INT_MASK field descriptions */
+#define INT_VSYNC      0x01    /* Vsync interrupt  */
+#define INT_VSYNC_WB   0x02    /* Vsync interrupt for write back operation */
+#define INT_UNDRUN     0x04    /* Under run exception interrupt */
+#define INT_PARERR     0x08    /* Display parameters error interrupt */
+#define INT_LS_BF_VS   0x10    /* Lines before vsync. interrupt */
+
+/* Panels'operation modes */
+#define MFB_TYPE_OUTPUT        0       /* Panel output to display */
+#define MFB_TYPE_OFF   1       /* Panel off */
+#define MFB_TYPE_WB    2       /* Panel written back to memory */
+#define MFB_TYPE_TEST  3       /* Panel generate color bar */
+
+#endif /* __KERNEL__ */
+#endif /* __FSL_DIU_FB_H__ */
index 7608429b39435219c284591df123cbe4032e85ec..c5d8ba4b9fc3dcc37d91a5acf5e5f37be3b0e645 100644 (file)
@@ -38,26 +38,6 @@ config FB_GEODE_GX
 
          If unsure, say N.
 
-config FB_GEODE_GX_SET_FBSIZE
-       bool "Manually specify the Geode GX framebuffer size"
-       depends on FB_GEODE_GX
-       default n
-       ---help---
-         If you want to manually specify the size of your GX framebuffer,
-         say Y here, otherwise say N to dynamically probe it.
-
-         Say N unless you know what you are doing.
-
-config FB_GEODE_GX_FBSIZE
-       hex "Size of the GX framebuffer, in bytes"
-       depends on FB_GEODE_GX_SET_FBSIZE
-       default "0x1600000"
-       ---help---
-         Specify the size of the GX framebuffer.  Normally, you will
-         want this to be MB aligned.  Common values are 0x80000 (8MB)
-         and 0x1600000 (16MB).  Don't change this unless you know what
-         you are doing
-
 config FB_GEODE_GX1
        tristate "AMD Geode GX1 framebuffer support (EXPERIMENTAL)"
        depends on FB && FB_GEODE && EXPERIMENTAL
index 957304b45fbaa39ce1813b70adb230f87646b100..5c98da12688397a8f6c8d9fbb9cf4dcc2c27fd7e 100644 (file)
@@ -5,5 +5,5 @@ obj-$(CONFIG_FB_GEODE_GX)  += gxfb.o
 obj-$(CONFIG_FB_GEODE_LX)  += lxfb.o
 
 gx1fb-objs := gx1fb_core.o display_gx1.o video_cs5530.o
-gxfb-objs  := gxfb_core.o display_gx.o video_gx.o
+gxfb-objs  := gxfb_core.o display_gx.o video_gx.o suspend_gx.o
 lxfb-objs  := lxfb_core.o lxfb_ops.o
index 0f16e4bffc6c86874114fec752fd4a42f05de0b8..e759895bf3d3d19deb02ec6323f7c8ff646b96fc 100644 (file)
 #include <asm/io.h>
 #include <asm/div64.h>
 #include <asm/delay.h>
+#include <asm/geode.h>
 
-#include "geodefb.h"
-#include "display_gx.h"
+#include "gxfb.h"
 
-#ifdef CONFIG_FB_GEODE_GX_SET_FBSIZE
-unsigned int gx_frame_buffer_size(void)
-{
-       return CONFIG_FB_GEODE_GX_FBSIZE;
-}
-#else
 unsigned int gx_frame_buffer_size(void)
 {
        unsigned int val;
 
-       /* FB size is reported by a virtual register */
+       if (!geode_has_vsa2()) {
+               uint32_t hi, lo;
+
+               /* The number of pages is (PMAX - PMIN)+1 */
+               rdmsr(MSR_GLIU_P2D_RO0, lo, hi);
+
+               /* PMAX */
+               val = ((hi & 0xff) << 12) | ((lo & 0xfff00000) >> 20);
+               /* PMIN */
+               val -= (lo & 0x000fffff);
+               val += 1;
+
+               /* The page size is 4k */
+               return (val << 12);
+       }
+
+       /* FB size can be obtained from the VSA II */
        /* Virtual register class = 0x02 */
        /* VG_MEM_SIZE(512Kb units) = 0x00 */
 
-       outw(0xFC53, 0xAC1C);
-       outw(0x0200, 0xAC1C);
+       outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+       outw(VSA_VR_MEM_SIZE, VSA_VRC_INDEX);
 
-       val = (unsigned int)(inw(0xAC1E)) & 0xFFl;
+       val = (unsigned int)(inw(VSA_VRC_DATA)) & 0xFFl;
        return (val << 19);
 }
-#endif
 
 int gx_line_delta(int xres, int bpp)
 {
@@ -49,75 +58,76 @@ int gx_line_delta(int xres, int bpp)
        return (xres * (bpp >> 3) + 7) & ~0x7;
 }
 
-static void gx_set_mode(struct fb_info *info)
+void gx_set_mode(struct fb_info *info)
 {
-       struct geodefb_par *par = info->par;
+       struct gxfb_par *par = info->par;
        u32 gcfg, dcfg;
        int hactive, hblankstart, hsyncstart, hsyncend, hblankend, htotal;
        int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal;
 
        /* Unlock the display controller registers. */
-       readl(par->dc_regs + DC_UNLOCK);
-       writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK);
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
 
-       gcfg = readl(par->dc_regs + DC_GENERAL_CFG);
-       dcfg = readl(par->dc_regs + DC_DISPLAY_CFG);
+       gcfg = read_dc(par, DC_GENERAL_CFG);
+       dcfg = read_dc(par, DC_DISPLAY_CFG);
 
        /* Disable the timing generator. */
-       dcfg &= ~(DC_DCFG_TGEN);
-       writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
+       dcfg &= ~DC_DISPLAY_CFG_TGEN;
+       write_dc(par, DC_DISPLAY_CFG, dcfg);
 
        /* Wait for pending memory requests before disabling the FIFO load. */
        udelay(100);
 
        /* Disable FIFO load and compression. */
-       gcfg &= ~(DC_GCFG_DFLE | DC_GCFG_CMPE | DC_GCFG_DECE);
-       writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+       gcfg &= ~(DC_GENERAL_CFG_DFLE | DC_GENERAL_CFG_CMPE |
+                       DC_GENERAL_CFG_DECE);
+       write_dc(par, DC_GENERAL_CFG, gcfg);
 
        /* Setup DCLK and its divisor. */
-       par->vid_ops->set_dclk(info);
+       gx_set_dclk_frequency(info);
 
        /*
         * Setup new mode.
         */
 
        /* Clear all unused feature bits. */
-       gcfg &= DC_GCFG_YUVM | DC_GCFG_VDSE;
+       gcfg &= DC_GENERAL_CFG_YUVM | DC_GENERAL_CFG_VDSE;
        dcfg = 0;
 
        /* Set FIFO priority (default 6/5) and enable. */
        /* FIXME: increase fifo priority for 1280x1024 and higher modes? */
-       gcfg |= (6 << DC_GCFG_DFHPEL_POS) | (5 << DC_GCFG_DFHPSL_POS) | DC_GCFG_DFLE;
+       gcfg |= (6 << DC_GENERAL_CFG_DFHPEL_SHIFT) |
+               (5 << DC_GENERAL_CFG_DFHPSL_SHIFT) | DC_GENERAL_CFG_DFLE;
 
        /* Framebuffer start offset. */
-       writel(0, par->dc_regs + DC_FB_ST_OFFSET);
+       write_dc(par, DC_FB_ST_OFFSET, 0);
 
        /* Line delta and line buffer length. */
-       writel(info->fix.line_length >> 3, par->dc_regs + DC_GFX_PITCH);
-       writel(((info->var.xres * info->var.bits_per_pixel/8) >> 3) + 2,
-              par->dc_regs + DC_LINE_SIZE);
+       write_dc(par, DC_GFX_PITCH, info->fix.line_length >> 3);
+       write_dc(par, DC_LINE_SIZE,
+               ((info->var.xres * info->var.bits_per_pixel/8) >> 3) + 2);
 
 
        /* Enable graphics and video data and unmask address lines. */
-       dcfg |= DC_DCFG_GDEN | DC_DCFG_VDEN | DC_DCFG_A20M | DC_DCFG_A18M;
+       dcfg |= DC_DISPLAY_CFG_GDEN | DC_DISPLAY_CFG_VDEN |
+               DC_DISPLAY_CFG_A20M | DC_DISPLAY_CFG_A18M;
 
        /* Set pixel format. */
        switch (info->var.bits_per_pixel) {
        case 8:
-               dcfg |= DC_DCFG_DISP_MODE_8BPP;
+               dcfg |= DC_DISPLAY_CFG_DISP_MODE_8BPP;
                break;
        case 16:
-               dcfg |= DC_DCFG_DISP_MODE_16BPP;
-               dcfg |= DC_DCFG_16BPP_MODE_565;
+               dcfg |= DC_DISPLAY_CFG_DISP_MODE_16BPP;
                break;
        case 32:
-               dcfg |= DC_DCFG_DISP_MODE_24BPP;
-               dcfg |= DC_DCFG_PALB;
+               dcfg |= DC_DISPLAY_CFG_DISP_MODE_24BPP;
+               dcfg |= DC_DISPLAY_CFG_PALB;
                break;
        }
 
        /* Enable timing generator. */
-       dcfg |= DC_DCFG_TGEN;
+       dcfg |= DC_DISPLAY_CFG_TGEN;
 
        /* Horizontal and vertical timings. */
        hactive = info->var.xres;
@@ -134,28 +144,34 @@ static void gx_set_mode(struct fb_info *info)
        vblankend = vsyncend + info->var.upper_margin;
        vtotal = vblankend;
 
-       writel((hactive - 1)     | ((htotal - 1) << 16),    par->dc_regs + DC_H_ACTIVE_TIMING);
-       writel((hblankstart - 1) | ((hblankend - 1) << 16), par->dc_regs + DC_H_BLANK_TIMING);
-       writel((hsyncstart - 1)  | ((hsyncend - 1) << 16),  par->dc_regs + DC_H_SYNC_TIMING);
+       write_dc(par, DC_H_ACTIVE_TIMING, (hactive - 1)    |
+                       ((htotal - 1) << 16));
+       write_dc(par, DC_H_BLANK_TIMING, (hblankstart - 1) |
+                       ((hblankend - 1) << 16));
+       write_dc(par, DC_H_SYNC_TIMING, (hsyncstart - 1)   |
+                       ((hsyncend - 1) << 16));
 
-       writel((vactive - 1)     | ((vtotal - 1) << 16),    par->dc_regs + DC_V_ACTIVE_TIMING);
-       writel((vblankstart - 1) | ((vblankend - 1) << 16), par->dc_regs + DC_V_BLANK_TIMING);
-       writel((vsyncstart - 1)  | ((vsyncend - 1) << 16),  par->dc_regs + DC_V_SYNC_TIMING);
+       write_dc(par, DC_V_ACTIVE_TIMING, (vactive - 1)    |
+                       ((vtotal - 1) << 16));
+       write_dc(par, DC_V_BLANK_TIMING, (vblankstart - 1) |
+                       ((vblankend - 1) << 16));
+       write_dc(par, DC_V_SYNC_TIMING, (vsyncstart - 1)   |
+                       ((vsyncend - 1) << 16));
 
        /* Write final register values. */
-       writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
-       writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+       write_dc(par, DC_DISPLAY_CFG, dcfg);
+       write_dc(par, DC_GENERAL_CFG, gcfg);
 
-       par->vid_ops->configure_display(info);
+       gx_configure_display(info);
 
        /* Relock display controller registers */
-       writel(0, par->dc_regs + DC_UNLOCK);
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
 }
 
-static void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
-                                  unsigned red, unsigned green, unsigned blue)
+void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
+               unsigned red, unsigned green, unsigned blue)
 {
-       struct geodefb_par *par = info->par;
+       struct gxfb_par *par = info->par;
        int val;
 
        /* Hardware palette is in RGB 8-8-8 format. */
@@ -163,11 +179,6 @@ static void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
        val |= (green)      & 0x00ff00;
        val |= (blue  >> 8) & 0x0000ff;
 
-       writel(regno, par->dc_regs + DC_PAL_ADDRESS);
-       writel(val, par->dc_regs + DC_PAL_DATA);
+       write_dc(par, DC_PAL_ADDRESS, regno);
+       write_dc(par, DC_PAL_DATA, val);
 }
-
-struct geode_dc_ops gx_dc_ops = {
-       .set_mode        = gx_set_mode,
-       .set_palette_reg = gx_set_hw_palette_reg,
-};
diff --git a/drivers/video/geode/display_gx.h b/drivers/video/geode/display_gx.h
deleted file mode 100644 (file)
index 0af33f3..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Geode GX display controller
- *
- * Copyright (C) 2006 Arcom Control Systems Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __DISPLAY_GX_H__
-#define __DISPLAY_GX_H__
-
-unsigned int gx_frame_buffer_size(void);
-int gx_line_delta(int xres, int bpp);
-
-extern struct geode_dc_ops gx_dc_ops;
-
-/* MSR that tells us if a TFT or CRT is attached */
-#define GLD_MSR_CONFIG   0xC0002001
-#define GLD_MSR_CONFIG_DM_FP 0x40
-
-/* Display controller registers */
-
-#define DC_UNLOCK 0x00
-#  define DC_UNLOCK_CODE 0x00004758
-
-#define DC_GENERAL_CFG 0x04
-#  define DC_GCFG_DFLE       0x00000001
-#  define DC_GCFG_CURE       0x00000002
-#  define DC_GCFG_ICNE       0x00000004
-#  define DC_GCFG_VIDE       0x00000008
-#  define DC_GCFG_CMPE       0x00000020
-#  define DC_GCFG_DECE       0x00000040
-#  define DC_GCFG_VGAE       0x00000080
-#  define DC_GCFG_DFHPSL_MASK 0x00000F00
-#  define DC_GCFG_DFHPSL_POS          8
-#  define DC_GCFG_DFHPEL_MASK 0x0000F000
-#  define DC_GCFG_DFHPEL_POS         12
-#  define DC_GCFG_STFM       0x00010000
-#  define DC_GCFG_FDTY       0x00020000
-#  define DC_GCFG_VGAFT              0x00040000
-#  define DC_GCFG_VDSE       0x00080000
-#  define DC_GCFG_YUVM       0x00100000
-#  define DC_GCFG_VFSL       0x00800000
-#  define DC_GCFG_SIGE       0x01000000
-#  define DC_GCFG_SGRE       0x02000000
-#  define DC_GCFG_SGFR       0x04000000
-#  define DC_GCFG_CRC_MODE    0x08000000
-#  define DC_GCFG_DIAG       0x10000000
-#  define DC_GCFG_CFRW       0x20000000
-
-#define DC_DISPLAY_CFG 0x08
-#  define DC_DCFG_TGEN            0x00000001
-#  define DC_DCFG_GDEN            0x00000008
-#  define DC_DCFG_VDEN            0x00000010
-#  define DC_DCFG_TRUP            0x00000040
-#  define DC_DCFG_DISP_MODE_MASK  0x00000300
-#  define DC_DCFG_DISP_MODE_8BPP  0x00000000
-#  define DC_DCFG_DISP_MODE_16BPP 0x00000100
-#  define DC_DCFG_DISP_MODE_24BPP 0x00000200
-#  define DC_DCFG_16BPP_MODE_MASK 0x00000c00
-#  define DC_DCFG_16BPP_MODE_565  0x00000000
-#  define DC_DCFG_16BPP_MODE_555  0x00000100
-#  define DC_DCFG_16BPP_MODE_444  0x00000200
-#  define DC_DCFG_DCEN            0x00080000
-#  define DC_DCFG_PALB            0x02000000
-#  define DC_DCFG_FRLK            0x04000000
-#  define DC_DCFG_VISL            0x08000000
-#  define DC_DCFG_FRSL            0x20000000
-#  define DC_DCFG_A18M            0x40000000
-#  define DC_DCFG_A20M            0x80000000
-
-#define DC_FB_ST_OFFSET 0x10
-
-#define DC_LINE_SIZE 0x30
-#  define DC_LINE_SIZE_FB_LINE_SIZE_MASK  0x000007ff
-#  define DC_LINE_SIZE_FB_LINE_SIZE_POS            0
-#  define DC_LINE_SIZE_CB_LINE_SIZE_MASK  0x007f0000
-#  define DC_LINE_SIZE_CB_LINE_SIZE_POS           16
-#  define DC_LINE_SIZE_VID_LINE_SIZE_MASK 0xff000000
-#  define DC_LINE_SIZE_VID_LINE_SIZE_POS          24
-
-#define DC_GFX_PITCH 0x34
-#  define DC_GFX_PITCH_FB_PITCH_MASK 0x0000ffff
-#  define DC_GFX_PITCH_FB_PITCH_POS           0
-#  define DC_GFX_PITCH_CB_PITCH_MASK 0xffff0000
-#  define DC_GFX_PITCH_CB_PITCH_POS          16
-
-#define DC_H_ACTIVE_TIMING 0x40
-#define DC_H_BLANK_TIMING  0x44
-#define DC_H_SYNC_TIMING   0x48
-#define DC_V_ACTIVE_TIMING 0x50
-#define DC_V_BLANK_TIMING  0x54
-#define DC_V_SYNC_TIMING   0x58
-
-#define DC_PAL_ADDRESS 0x70
-#define DC_PAL_DATA    0x74
-
-#define DC_GLIU0_MEM_OFFSET 0x84
-#endif /* !__DISPLAY_GX1_H__ */
diff --git a/drivers/video/geode/gxfb.h b/drivers/video/geode/gxfb.h
new file mode 100644 (file)
index 0000000..16a96f8
--- /dev/null
@@ -0,0 +1,358 @@
+/*
+ * Copyright (C) 2008 Andres Salomon <dilinger@debian.org>
+ *
+ * Geode GX2 header information
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _GXFB_H_
+#define _GXFB_H_
+
+#include <linux/io.h>
+
+#define GP_REG_COUNT   (0x50 / 4)
+#define DC_REG_COUNT   (0x90 / 4)
+#define VP_REG_COUNT   (0x138 / 8)
+#define FP_REG_COUNT   (0x68 / 8)
+
+#define DC_PAL_COUNT   0x104
+
+struct gxfb_par {
+       int enable_crt;
+       void __iomem *dc_regs;
+       void __iomem *vid_regs;
+       void __iomem *gp_regs;
+#ifdef CONFIG_PM
+       int powered_down;
+
+       /* register state, for power management functionality */
+       struct {
+               uint64_t padsel;
+               uint64_t dotpll;
+       } msr;
+
+       uint32_t gp[GP_REG_COUNT];
+       uint32_t dc[DC_REG_COUNT];
+       uint64_t vp[VP_REG_COUNT];
+       uint64_t fp[FP_REG_COUNT];
+
+       uint32_t pal[DC_PAL_COUNT];
+#endif
+};
+
+unsigned int gx_frame_buffer_size(void);
+int gx_line_delta(int xres, int bpp);
+void gx_set_mode(struct fb_info *info);
+void gx_set_hw_palette_reg(struct fb_info *info, unsigned regno,
+               unsigned red, unsigned green, unsigned blue);
+
+void gx_set_dclk_frequency(struct fb_info *info);
+void gx_configure_display(struct fb_info *info);
+int gx_blank_display(struct fb_info *info, int blank_mode);
+
+#ifdef CONFIG_PM
+int gx_powerdown(struct fb_info *info);
+int gx_powerup(struct fb_info *info);
+#endif
+
+
+/* Graphics Processor registers (table 6-23 from the data book) */
+enum gp_registers {
+       GP_DST_OFFSET = 0,
+       GP_SRC_OFFSET,
+       GP_STRIDE,
+       GP_WID_HEIGHT,
+
+       GP_SRC_COLOR_FG,
+       GP_SRC_COLOR_BG,
+       GP_PAT_COLOR_0,
+       GP_PAT_COLOR_1,
+
+       GP_PAT_COLOR_2,
+       GP_PAT_COLOR_3,
+       GP_PAT_COLOR_4,
+       GP_PAT_COLOR_5,
+
+       GP_PAT_DATA_0,
+       GP_PAT_DATA_1,
+       GP_RASTER_MODE,
+       GP_VECTOR_MODE,
+
+       GP_BLT_MODE,
+       GP_BLT_STATUS,
+       GP_HST_SRC,
+       GP_BASE_OFFSET, /* 0x4c */
+};
+
+#define GP_BLT_STATUS_BLT_PENDING      (1 << 2)
+#define GP_BLT_STATUS_BLT_BUSY         (1 << 0)
+
+
+/* Display Controller registers (table 6-38 from the data book) */
+enum dc_registers {
+       DC_UNLOCK = 0,
+       DC_GENERAL_CFG,
+       DC_DISPLAY_CFG,
+       DC_RSVD_0,
+
+       DC_FB_ST_OFFSET,
+       DC_CB_ST_OFFSET,
+       DC_CURS_ST_OFFSET,
+       DC_ICON_ST_OFFSET,
+
+       DC_VID_Y_ST_OFFSET,
+       DC_VID_U_ST_OFFSET,
+       DC_VID_V_ST_OFFSET,
+       DC_RSVD_1,
+
+       DC_LINE_SIZE,
+       DC_GFX_PITCH,
+       DC_VID_YUV_PITCH,
+       DC_RSVD_2,
+
+       DC_H_ACTIVE_TIMING,
+       DC_H_BLANK_TIMING,
+       DC_H_SYNC_TIMING,
+       DC_RSVD_3,
+
+       DC_V_ACTIVE_TIMING,
+       DC_V_BLANK_TIMING,
+       DC_V_SYNC_TIMING,
+       DC_RSVD_4,
+
+       DC_CURSOR_X,
+       DC_CURSOR_Y,
+       DC_ICON_X,
+       DC_LINE_CNT,
+
+       DC_PAL_ADDRESS,
+       DC_PAL_DATA,
+       DC_DFIFO_DIAG,
+       DC_CFIFO_DIAG,
+
+       DC_VID_DS_DELTA,
+       DC_GLIU0_MEM_OFFSET,
+       DC_RSVD_5,
+       DC_DV_ACC, /* 0x8c */
+};
+
+#define DC_UNLOCK_LOCK                 0x00000000
+#define DC_UNLOCK_UNLOCK               0x00004758      /* magic value */
+
+#define DC_GENERAL_CFG_YUVM            (1 << 20)
+#define DC_GENERAL_CFG_VDSE            (1 << 19)
+#define DC_GENERAL_CFG_DFHPEL_SHIFT    12
+#define DC_GENERAL_CFG_DFHPSL_SHIFT    8
+#define DC_GENERAL_CFG_DECE            (1 << 6)
+#define DC_GENERAL_CFG_CMPE            (1 << 5)
+#define DC_GENERAL_CFG_VIDE            (1 << 3)
+#define DC_GENERAL_CFG_ICNE            (1 << 2)
+#define DC_GENERAL_CFG_CURE            (1 << 1)
+#define DC_GENERAL_CFG_DFLE            (1 << 0)
+
+#define DC_DISPLAY_CFG_A20M            (1 << 31)
+#define DC_DISPLAY_CFG_A18M            (1 << 30)
+#define DC_DISPLAY_CFG_PALB            (1 << 25)
+#define DC_DISPLAY_CFG_DISP_MODE_24BPP (1 << 9)
+#define DC_DISPLAY_CFG_DISP_MODE_16BPP (1 << 8)
+#define DC_DISPLAY_CFG_DISP_MODE_8BPP  (0)
+#define DC_DISPLAY_CFG_VDEN            (1 << 4)
+#define DC_DISPLAY_CFG_GDEN            (1 << 3)
+#define DC_DISPLAY_CFG_TGEN            (1 << 0)
+
+
+/*
+ * Video Processor registers (table 6-54).
+ * There is space for 64 bit values, but we never use more than the
+ * lower 32 bits.  The actual register save/restore code only bothers
+ * to restore those 32 bits.
+ */
+enum vp_registers {
+       VP_VCFG = 0,
+       VP_DCFG,
+
+       VP_VX,
+       VP_VY,
+
+       VP_VS,
+       VP_VCK,
+
+       VP_VCM,
+       VP_GAR,
+
+       VP_GDR,
+       VP_RSVD_0,
+
+       VP_MISC,
+       VP_CCS,
+
+       VP_RSVD_1,
+       VP_RSVD_2,
+
+       VP_RSVD_3,
+       VP_VDC,
+
+       VP_VCO,
+       VP_CRC,
+
+       VP_CRC32,
+       VP_VDE,
+
+       VP_CCK,
+       VP_CCM,
+
+       VP_CC1,
+       VP_CC2,
+
+       VP_A1X,
+       VP_A1Y,
+
+       VP_A1C,
+       VP_A1T,
+
+       VP_A2X,
+       VP_A2Y,
+
+       VP_A2C,
+       VP_A2T,
+
+       VP_A3X,
+       VP_A3Y,
+
+       VP_A3C,
+       VP_A3T,
+
+       VP_VRR,
+       VP_AWT,
+
+       VP_VTM, /* 0x130 */
+};
+
+#define VP_VCFG_VID_EN                 (1 << 0)
+
+#define VP_DCFG_DAC_VREF               (1 << 26)
+#define VP_DCFG_GV_GAM                 (1 << 21)
+#define VP_DCFG_VG_CK                  (1 << 20)
+#define VP_DCFG_CRT_SYNC_SKW_DEFAULT   (1 << 16)
+#define VP_DCFG_CRT_SYNC_SKW           ((1 << 14) | (1 << 15) | (1 << 16))
+#define VP_DCFG_CRT_VSYNC_POL          (1 << 9)
+#define VP_DCFG_CRT_HSYNC_POL          (1 << 8)
+#define VP_DCFG_FP_DATA_EN             (1 << 7)        /* undocumented */
+#define VP_DCFG_FP_PWR_EN              (1 << 6)        /* undocumented */
+#define VP_DCFG_DAC_BL_EN              (1 << 3)
+#define VP_DCFG_VSYNC_EN               (1 << 2)
+#define VP_DCFG_HSYNC_EN               (1 << 1)
+#define VP_DCFG_CRT_EN                 (1 << 0)
+
+#define VP_MISC_GAM_EN                 (1 << 0)
+#define VP_MISC_DACPWRDN               (1 << 10)
+#define VP_MISC_APWRDN                 (1 << 11)
+
+
+/*
+ * Flat Panel registers (table 6-55).
+ * Also 64 bit registers; see above note about 32-bit handling.
+ */
+
+/* we're actually in the VP register space, starting at address 0x400 */
+#define VP_FP_START            0x400
+
+enum fp_registers {
+       FP_PT1 = 0,
+       FP_PT2,
+
+       FP_PM,
+       FP_DFC,
+
+       FP_BLFSR,
+       FP_RLFSR,
+
+       FP_FMI,
+       FP_FMD,
+
+       FP_RSVD_0,
+       FP_DCA,
+
+       FP_DMD,
+       FP_CRC,
+
+       FP_FBB, /* 0x460 */
+};
+
+#define FP_PT1_VSIZE_SHIFT             16              /* undocumented? */
+#define FP_PT1_VSIZE_MASK              0x7FF0000       /* undocumented? */
+
+#define FP_PT2_HSP                     (1 << 22)
+#define FP_PT2_VSP                     (1 << 23)
+
+#define FP_PM_P                                (1 << 24)       /* panel power on */
+#define FP_PM_PANEL_PWR_UP             (1 << 3)        /* r/o */
+#define FP_PM_PANEL_PWR_DOWN           (1 << 2)        /* r/o */
+#define FP_PM_PANEL_OFF                        (1 << 1)        /* r/o */
+#define FP_PM_PANEL_ON                 (1 << 0)        /* r/o */
+
+#define FP_DFC_NFI                     ((1 << 4) | (1 << 5) | (1 << 6))
+
+
+/* register access functions */
+
+static inline uint32_t read_gp(struct gxfb_par *par, int reg)
+{
+       return readl(par->gp_regs + 4*reg);
+}
+
+static inline void write_gp(struct gxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->gp_regs + 4*reg);
+}
+
+static inline uint32_t read_dc(struct gxfb_par *par, int reg)
+{
+       return readl(par->dc_regs + 4*reg);
+}
+
+static inline void write_dc(struct gxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->dc_regs + 4*reg);
+}
+
+static inline uint32_t read_vp(struct gxfb_par *par, int reg)
+{
+       return readl(par->vid_regs + 8*reg);
+}
+
+static inline void write_vp(struct gxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->vid_regs + 8*reg);
+}
+
+static inline uint32_t read_fp(struct gxfb_par *par, int reg)
+{
+       return readl(par->vid_regs + 8*reg + VP_FP_START);
+}
+
+static inline void write_fp(struct gxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->vid_regs + 8*reg + VP_FP_START);
+}
+
+
+/* MSRs are defined in asm/geode.h; their bitfields are here */
+
+#define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3        (1 << 3)
+#define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2        (1 << 2)
+#define MSR_GLCP_SYS_RSTPLL_DOTPREDIV2 (1 << 1)
+
+#define MSR_GLCP_DOTPLL_LOCK           (1 << 25)       /* r/o */
+#define MSR_GLCP_DOTPLL_BYPASS         (1 << 15)
+#define MSR_GLCP_DOTPLL_DOTRESET       (1 << 0)
+
+#define MSR_GX_MSR_PADSEL_MASK         0x3FFFFFFF      /* undocumented? */
+#define MSR_GX_MSR_PADSEL_TFT          0x1FFFFFFF      /* undocumented? */
+
+#define MSR_GX_GLD_MSR_CONFIG_FP       (1 << 3)
+
+#endif
index cf841efa229ad68cd20bc76d4ccc788f98767e30..de2b8f9876a5398b1534aa77ac9c269c38e67408 100644 (file)
 #include <linux/slab.h>
 #include <linux/delay.h>
 #include <linux/fb.h>
+#include <linux/console.h>
+#include <linux/suspend.h>
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <asm/geode.h>
 
-#include "geodefb.h"
-#include "display_gx.h"
-#include "video_gx.h"
+#include "gxfb.h"
 
 static char *mode_option;
+static int vram;
+static int vt_switch;
 
 /* Modes relevant to the GX (taken from modedb.c) */
-static const struct fb_videomode gx_modedb[] __initdata = {
+static struct fb_videomode gx_modedb[] __initdata = {
        /* 640x480-60 VESA */
        { NULL, 60, 640, 480, 39682,  48, 16, 33, 10, 96, 2,
          0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
@@ -105,6 +108,35 @@ static const struct fb_videomode gx_modedb[] __initdata = {
          FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
 };
 
+#ifdef CONFIG_OLPC
+#include <asm/olpc.h>
+
+static struct fb_videomode gx_dcon_modedb[] __initdata = {
+       /* The only mode the DCON has is 1200x900 */
+       { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
+         FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+         FB_VMODE_NONINTERLACED, 0 }
+};
+
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+       if (olpc_has_dcon()) {
+               *modedb = (struct fb_videomode *) gx_dcon_modedb;
+               *size = ARRAY_SIZE(gx_dcon_modedb);
+       } else {
+               *modedb = (struct fb_videomode *) gx_modedb;
+               *size = ARRAY_SIZE(gx_modedb);
+       }
+}
+
+#else
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+       *modedb = (struct fb_videomode *) gx_modedb;
+       *size = ARRAY_SIZE(gx_modedb);
+}
+#endif
+
 static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
        if (var->xres > 1600 || var->yres > 1200)
@@ -139,8 +171,6 @@ static int gxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 
 static int gxfb_set_par(struct fb_info *info)
 {
-       struct geodefb_par *par = info->par;
-
        if (info->var.bits_per_pixel > 8) {
                info->fix.visual = FB_VISUAL_TRUECOLOR;
                fb_dealloc_cmap(&info->cmap);
@@ -151,7 +181,7 @@ static int gxfb_set_par(struct fb_info *info)
 
        info->fix.line_length = gx_line_delta(info->var.xres, info->var.bits_per_pixel);
 
-       par->dc_ops->set_mode(info);
+       gx_set_mode(info);
 
        return 0;
 }
@@ -167,8 +197,6 @@ static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
                           unsigned blue, unsigned transp,
                           struct fb_info *info)
 {
-       struct geodefb_par *par = info->par;
-
        if (info->var.grayscale) {
                /* grayscale = 0.30*R + 0.59*G + 0.11*B */
                red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8;
@@ -191,7 +219,7 @@ static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
                if (regno >= 256)
                        return -EINVAL;
 
-               par->dc_ops->set_palette_reg(info, regno, red, green, blue);
+               gx_set_hw_palette_reg(info, regno, red, green, blue);
        }
 
        return 0;
@@ -199,15 +227,12 @@ static int gxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 
 static int gxfb_blank(int blank_mode, struct fb_info *info)
 {
-       struct geodefb_par *par = info->par;
-
-       return par->vid_ops->blank_display(info, blank_mode);
+       return gx_blank_display(info, blank_mode);
 }
 
 static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *dev)
 {
-       struct geodefb_par *par = info->par;
-       int fb_len;
+       struct gxfb_par *par = info->par;
        int ret;
 
        ret = pci_enable_device(dev);
@@ -229,24 +254,31 @@ static int __init gxfb_map_video_memory(struct fb_info *info, struct pci_dev *de
        if (!par->dc_regs)
                return -ENOMEM;
 
-       ret = pci_request_region(dev, 0, "gxfb (framebuffer)");
+       ret = pci_request_region(dev, 1, "gxfb (graphics processor)");
        if (ret < 0)
                return ret;
-       if ((fb_len = gx_frame_buffer_size()) < 0)
+       par->gp_regs = ioremap(pci_resource_start(dev, 1),
+       pci_resource_len(dev, 1));
+
+       if (!par->gp_regs)
                return -ENOMEM;
+
+       ret = pci_request_region(dev, 0, "gxfb (framebuffer)");
+       if (ret < 0)
+               return ret;
+
        info->fix.smem_start = pci_resource_start(dev, 0);
-       info->fix.smem_len = fb_len;
+       info->fix.smem_len = vram ? vram : gx_frame_buffer_size();
        info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
        if (!info->screen_base)
                return -ENOMEM;
 
-       /* Set the 16MB aligned base address of the graphics memory region
+       /* Set the 16MiB aligned base address of the graphics memory region
         * in the display controller */
 
-       writel(info->fix.smem_start & 0xFF000000,
-                       par->dc_regs + DC_GLIU0_MEM_OFFSET);
+       write_dc(par, DC_GLIU0_MEM_OFFSET, info->fix.smem_start & 0xFF000000);
 
-       dev_info(&dev->dev, "%d Kibyte of video memory at 0x%lx\n",
+       dev_info(&dev->dev, "%d KiB of video memory at 0x%lx\n",
                 info->fix.smem_len / 1024, info->fix.smem_start);
 
        return 0;
@@ -266,11 +298,12 @@ static struct fb_ops gxfb_ops = {
 
 static struct fb_info * __init gxfb_init_fbinfo(struct device *dev)
 {
-       struct geodefb_par *par;
+       struct gxfb_par *par;
        struct fb_info *info;
 
        /* Alloc enough space for the pseudo palette. */
-       info = framebuffer_alloc(sizeof(struct geodefb_par) + sizeof(u32) * 16, dev);
+       info = framebuffer_alloc(sizeof(struct gxfb_par) + sizeof(u32) * 16,
+                       dev);
        if (!info)
                return NULL;
 
@@ -296,29 +329,64 @@ static struct fb_info * __init gxfb_init_fbinfo(struct device *dev)
        info->flags             = FBINFO_DEFAULT;
        info->node              = -1;
 
-       info->pseudo_palette    = (void *)par + sizeof(struct geodefb_par);
+       info->pseudo_palette    = (void *)par + sizeof(struct gxfb_par);
 
        info->var.grayscale     = 0;
 
        return info;
 }
 
+#ifdef CONFIG_PM
+static int gxfb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct fb_info *info = pci_get_drvdata(pdev);
+
+       if (state.event == PM_EVENT_SUSPEND) {
+               acquire_console_sem();
+               gx_powerdown(info);
+               fb_set_suspend(info, 1);
+               release_console_sem();
+       }
+
+       /* there's no point in setting PCI states; we emulate PCI, so
+        * we don't end up getting power savings anyways */
+
+       return 0;
+}
+
+static int gxfb_resume(struct pci_dev *pdev)
+{
+       struct fb_info *info = pci_get_drvdata(pdev);
+       int ret;
+
+       acquire_console_sem();
+       ret = gx_powerup(info);
+       if (ret) {
+               printk(KERN_ERR "gxfb:  power up failed!\n");
+               return ret;
+       }
+
+       fb_set_suspend(info, 0);
+       release_console_sem();
+       return 0;
+}
+#endif
+
 static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       struct geodefb_par *par;
+       struct gxfb_par *par;
        struct fb_info *info;
        int ret;
        unsigned long val;
 
+       struct fb_videomode *modedb_ptr;
+       unsigned int modedb_size;
+
        info = gxfb_init_fbinfo(&pdev->dev);
        if (!info)
                return -ENOMEM;
        par = info->par;
 
-       /* GX display controller and GX video device. */
-       par->dc_ops  = &gx_dc_ops;
-       par->vid_ops = &gx_vid_ops;
-
        if ((ret = gxfb_map_video_memory(info, pdev)) < 0) {
                dev_err(&pdev->dev, "failed to map frame buffer or controller registers\n");
                goto err;
@@ -326,15 +394,16 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
 
        /* Figure out if this is a TFT or CRT part */
 
-       rdmsrl(GLD_MSR_CONFIG, val);
+       rdmsrl(MSR_GX_GLD_MSR_CONFIG, val);
 
-       if ((val & GLD_MSR_CONFIG_DM_FP) == GLD_MSR_CONFIG_DM_FP)
+       if ((val & MSR_GX_GLD_MSR_CONFIG_FP) == MSR_GX_GLD_MSR_CONFIG_FP)
                par->enable_crt = 0;
        else
                par->enable_crt = 1;
 
+       get_modedb(&modedb_ptr, &modedb_size);
        ret = fb_find_mode(&info->var, info, mode_option,
-                          gx_modedb, ARRAY_SIZE(gx_modedb), NULL, 16);
+                          modedb_ptr, modedb_size, NULL, 16);
        if (ret == 0 || ret == 4) {
                dev_err(&pdev->dev, "could not find valid video mode\n");
                ret = -EINVAL;
@@ -348,6 +417,8 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
        gxfb_check_var(&info->var, info);
        gxfb_set_par(info);
 
+       pm_set_vt_switch(vt_switch);
+
        if (register_framebuffer(info) < 0) {
                ret = -EINVAL;
                goto err;
@@ -369,6 +440,10 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
                iounmap(par->dc_regs);
                pci_release_region(pdev, 2);
        }
+       if (par->gp_regs) {
+               iounmap(par->gp_regs);
+               pci_release_region(pdev, 1);
+       }
 
        if (info)
                framebuffer_release(info);
@@ -378,7 +453,7 @@ static int __init gxfb_probe(struct pci_dev *pdev, const struct pci_device_id *i
 static void gxfb_remove(struct pci_dev *pdev)
 {
        struct fb_info *info = pci_get_drvdata(pdev);
-       struct geodefb_par *par = info->par;
+       struct gxfb_par *par = info->par;
 
        unregister_framebuffer(info);
 
@@ -391,15 +466,16 @@ static void gxfb_remove(struct pci_dev *pdev)
        iounmap(par->dc_regs);
        pci_release_region(pdev, 2);
 
+       iounmap(par->gp_regs);
+       pci_release_region(pdev, 1);
+
        pci_set_drvdata(pdev, NULL);
 
        framebuffer_release(info);
 }
 
 static struct pci_device_id gxfb_id_table[] = {
-       { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO,
-         PCI_ANY_ID, PCI_ANY_ID, PCI_BASE_CLASS_DISPLAY << 16,
-         0xff0000, 0 },
+       { PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_GX_VIDEO) },
        { 0, }
 };
 
@@ -410,6 +486,10 @@ static struct pci_driver gxfb_driver = {
        .id_table       = gxfb_id_table,
        .probe          = gxfb_probe,
        .remove         = gxfb_remove,
+#ifdef CONFIG_PM
+       .suspend        = gxfb_suspend,
+       .resume         = gxfb_resume,
+#endif
 };
 
 #ifndef MODULE
@@ -456,5 +536,11 @@ module_exit(gxfb_cleanup);
 module_param(mode_option, charp, 0);
 MODULE_PARM_DESC(mode_option, "video mode (<x>x<y>[-<bpp>][@<refr>])");
 
+module_param(vram, int, 0);
+MODULE_PARM_DESC(vram, "video memory size");
+
+module_param(vt_switch, int, 0);
+MODULE_PARM_DESC(vt_switch, "enable VT switch during suspend/resume");
+
 MODULE_DESCRIPTION("Framebuffer driver for the AMD Geode GX");
 MODULE_LICENSE("GPL");
index ca13c48d19b068a58d2b5b9f24519df840725093..3b9416f4ee20fba2143169e97b886633e4d63fa5 100644 (file)
@@ -3,17 +3,46 @@
 
 #include <linux/fb.h>
 
+#define GP_REG_COUNT   (0x7c / 4)
+#define DC_REG_COUNT   (0xf0 / 4)
+#define VP_REG_COUNT   (0x158 / 8)
+#define FP_REG_COUNT   (0x60 / 8)
+
+#define DC_PAL_COUNT   0x104
+#define DC_HFILT_COUNT 0x100
+#define DC_VFILT_COUNT 0x100
+#define VP_COEFF_SIZE  0x1000
+
 #define OUTPUT_CRT   0x01
 #define OUTPUT_PANEL 0x02
 
 struct lxfb_par {
        int output;
-       int panel_width;
-       int panel_height;
 
        void __iomem *gp_regs;
        void __iomem *dc_regs;
-       void __iomem *df_regs;
+       void __iomem *vp_regs;
+#ifdef CONFIG_PM
+       int powered_down;
+
+       /* register state, for power mgmt functionality */
+       struct {
+               uint64_t padsel;
+               uint64_t dotpll;
+               uint64_t dfglcfg;
+               uint64_t dcspare;
+       } msr;
+
+       uint32_t gp[GP_REG_COUNT];
+       uint32_t dc[DC_REG_COUNT];
+       uint64_t vp[VP_REG_COUNT];
+       uint64_t fp[FP_REG_COUNT];
+
+       uint32_t pal[DC_PAL_COUNT];
+       uint32_t hcoeff[DC_HFILT_COUNT * 2];
+       uint32_t vcoeff[DC_VFILT_COUNT];
+       uint32_t vp_coeff[VP_COEFF_SIZE / 4];
+#endif
 };
 
 static inline unsigned int lx_get_pitch(unsigned int xres, int bpp)
@@ -29,171 +58,383 @@ int lx_blank_display(struct fb_info *, int);
 void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int,
                        unsigned int, unsigned int);
 
-/* MSRS */
+#ifdef CONFIG_PM
+int lx_powerdown(struct fb_info *info);
+int lx_powerup(struct fb_info *info);
+#endif
+
+
+/* Graphics Processor registers (table 6-29 from the data book) */
+enum gp_registers {
+       GP_DST_OFFSET = 0,
+       GP_SRC_OFFSET,
+       GP_STRIDE,
+       GP_WID_HEIGHT,
+
+       GP_SRC_COLOR_FG,
+       GP_SRC_COLOR_BG,
+       GP_PAT_COLOR_0,
+       GP_PAT_COLOR_1,
+
+       GP_PAT_COLOR_2,
+       GP_PAT_COLOR_3,
+       GP_PAT_COLOR_4,
+       GP_PAT_COLOR_5,
+
+       GP_PAT_DATA_0,
+       GP_PAT_DATA_1,
+       GP_RASTER_MODE,
+       GP_VECTOR_MODE,
+
+       GP_BLT_MODE,
+       GP_BLT_STATUS,
+       GP_HST_SRC,
+       GP_BASE_OFFSET,
+
+       GP_CMD_TOP,
+       GP_CMD_BOT,
+       GP_CMD_READ,
+       GP_CMD_WRITE,
+
+       GP_CH3_OFFSET,
+       GP_CH3_MODE_STR,
+       GP_CH3_WIDHI,
+       GP_CH3_HSRC,
+
+       GP_LUT_INDEX,
+       GP_LUT_DATA,
+       GP_INT_CNTRL, /* 0x78 */
+};
+
+#define GP_BLT_STATUS_CE               (1 << 4)        /* cmd buf empty */
+#define GP_BLT_STATUS_PB               (1 << 0)        /* primative busy */
+
+
+/* Display Controller registers (table 6-47 from the data book) */
+enum dc_registers {
+       DC_UNLOCK = 0,
+       DC_GENERAL_CFG,
+       DC_DISPLAY_CFG,
+       DC_ARB_CFG,
+
+       DC_FB_ST_OFFSET,
+       DC_CB_ST_OFFSET,
+       DC_CURS_ST_OFFSET,
+       DC_RSVD_0,
+
+       DC_VID_Y_ST_OFFSET,
+       DC_VID_U_ST_OFFSET,
+       DC_VID_V_ST_OFFSET,
+       DC_DV_TOP,
+
+       DC_LINE_SIZE,
+       DC_GFX_PITCH,
+       DC_VID_YUV_PITCH,
+       DC_RSVD_1,
+
+       DC_H_ACTIVE_TIMING,
+       DC_H_BLANK_TIMING,
+       DC_H_SYNC_TIMING,
+       DC_RSVD_2,
+
+       DC_V_ACTIVE_TIMING,
+       DC_V_BLANK_TIMING,
+       DC_V_SYNC_TIMING,
+       DC_FB_ACTIVE,
+
+       DC_CURSOR_X,
+       DC_CURSOR_Y,
+       DC_RSVD_3,
+       DC_LINE_CNT,
+
+       DC_PAL_ADDRESS,
+       DC_PAL_DATA,
+       DC_DFIFO_DIAG,
+       DC_CFIFO_DIAG,
+
+       DC_VID_DS_DELTA,
+       DC_GLIU0_MEM_OFFSET,
+       DC_DV_CTL,
+       DC_DV_ACCESS,
+
+       DC_GFX_SCALE,
+       DC_IRQ_FILT_CTL,
+       DC_FILT_COEFF1,
+       DC_FILT_COEFF2,
+
+       DC_VBI_EVEN_CTL,
+       DC_VBI_ODD_CTL,
+       DC_VBI_HOR,
+       DC_VBI_LN_ODD,
+
+       DC_VBI_LN_EVEN,
+       DC_VBI_PITCH,
+       DC_CLR_KEY,
+       DC_CLR_KEY_MASK,
+
+       DC_CLR_KEY_X,
+       DC_CLR_KEY_Y,
+       DC_IRQ,
+       DC_RSVD_4,
+
+       DC_RSVD_5,
+       DC_GENLK_CTL,
+       DC_VID_EVEN_Y_ST_OFFSET,
+       DC_VID_EVEN_U_ST_OFFSET,
+
+       DC_VID_EVEN_V_ST_OFFSET,
+       DC_V_ACTIVE_EVEN_TIMING,
+       DC_V_BLANK_EVEN_TIMING,
+       DC_V_SYNC_EVEN_TIMING,  /* 0xec */
+};
+
+#define DC_UNLOCK_LOCK                 0x00000000
+#define DC_UNLOCK_UNLOCK               0x00004758      /* magic value */
+
+#define DC_GENERAL_CFG_FDTY            (1 << 17)
+#define DC_GENERAL_CFG_DFHPEL_SHIFT    (12)
+#define DC_GENERAL_CFG_DFHPSL_SHIFT    (8)
+#define DC_GENERAL_CFG_VGAE            (1 << 7)
+#define DC_GENERAL_CFG_DECE            (1 << 6)
+#define DC_GENERAL_CFG_CMPE            (1 << 5)
+#define DC_GENERAL_CFG_VIDE            (1 << 3)
+#define DC_GENERAL_CFG_DFLE            (1 << 0)
+
+#define DC_DISPLAY_CFG_VISL            (1 << 27)
+#define DC_DISPLAY_CFG_PALB            (1 << 25)
+#define DC_DISPLAY_CFG_DCEN            (1 << 24)
+#define DC_DISPLAY_CFG_DISP_MODE_24BPP (1 << 9)
+#define DC_DISPLAY_CFG_DISP_MODE_16BPP (1 << 8)
+#define DC_DISPLAY_CFG_DISP_MODE_8BPP  (0)
+#define DC_DISPLAY_CFG_TRUP            (1 << 6)
+#define DC_DISPLAY_CFG_VDEN            (1 << 4)
+#define DC_DISPLAY_CFG_GDEN            (1 << 3)
+#define DC_DISPLAY_CFG_TGEN            (1 << 0)
+
+#define DC_DV_TOP_DV_TOP_EN            (1 << 0)
+
+#define DC_DV_CTL_DV_LINE_SIZE         ((1 << 10) | (1 << 11))
+#define DC_DV_CTL_DV_LINE_SIZE_1K      (0)
+#define DC_DV_CTL_DV_LINE_SIZE_2K      (1 << 10)
+#define DC_DV_CTL_DV_LINE_SIZE_4K      (1 << 11)
+#define DC_DV_CTL_DV_LINE_SIZE_8K      ((1 << 10) | (1 << 11))
+#define DC_DV_CTL_CLEAR_DV_RAM         (1 << 0)
+
+#define DC_IRQ_FILT_CTL_H_FILT_SEL     (1 << 10)
+
+#define DC_CLR_KEY_CLR_KEY_EN          (1 << 24)
+
+#define DC_IRQ_VIP_VSYNC_IRQ_STATUS    (1 << 21)       /* undocumented? */
+#define DC_IRQ_STATUS                  (1 << 20)       /* undocumented? */
+#define DC_IRQ_VIP_VSYNC_LOSS_IRQ_MASK (1 << 1)
+#define DC_IRQ_MASK                    (1 << 0)
 
-#define MSR_LX_GLD_CONFIG    0x48002001
-#define MSR_LX_GLCP_DOTPLL   0x4c000015
-#define MSR_LX_DF_PADSEL     0x48002011
-#define MSR_LX_DC_SPARE      0x80000011
-#define MSR_LX_DF_GLCONFIG   0x48002001
-
-#define MSR_LX_GLIU0_P2D_RO0 0x10000029
-
-#define GLCP_DOTPLL_RESET    (1 << 0)
-#define GLCP_DOTPLL_BYPASS   (1 << 15)
-#define GLCP_DOTPLL_HALFPIX  (1 << 24)
-#define GLCP_DOTPLL_LOCK     (1 << 25)
-
-#define DF_CONFIG_OUTPUT_MASK       0x38
-#define DF_OUTPUT_PANEL             0x08
-#define DF_OUTPUT_CRT               0x00
-#define DF_SIMULTANEOUS_CRT_AND_FP  (1 << 15)
-
-#define DF_DEFAULT_TFT_PAD_SEL_LOW  0xDFFFFFFF
-#define DF_DEFAULT_TFT_PAD_SEL_HIGH 0x0000003F
-
-#define DC_SPARE_DISABLE_CFIFO_HGO         0x00000800
-#define DC_SPARE_VFIFO_ARB_SELECT          0x00000400
-#define DC_SPARE_WM_LPEN_OVRD              0x00000200
-#define DC_SPARE_LOAD_WM_LPEN_MASK         0x00000100
-#define DC_SPARE_DISABLE_INIT_VID_PRI      0x00000080
-#define DC_SPARE_DISABLE_VFIFO_WM          0x00000040
-#define DC_SPARE_DISABLE_CWD_CHECK         0x00000020
-#define DC_SPARE_PIX8_PAN_FIX              0x00000010
-#define DC_SPARE_FIRST_REQ_MASK            0x00000002
-
-/* Registers */
-
-#define DC_UNLOCK         0x00
-#define  DC_UNLOCK_CODE   0x4758
+#define DC_GENLK_CTL_FLICK_SEL_MASK    (0x0F << 28)
+#define DC_GENLK_CTL_ALPHA_FLICK_EN    (1 << 25)
+#define DC_GENLK_CTL_FLICK_EN          (1 << 24)
+#define DC_GENLK_CTL_GENLK_EN          (1 << 18)
 
-#define DC_GENERAL_CFG    0x04
-#define  DC_GCFG_DFLE     (1 << 0)
-#define  DC_GCFG_VIDE     (1 << 3)
-#define  DC_GCFG_VGAE     (1 << 7)
-#define  DC_GCFG_CMPE     (1 << 5)
-#define  DC_GCFG_DECE     (1 << 6)
-#define  DC_GCFG_FDTY     (1 << 17)
 
-#define DC_DISPLAY_CFG    0x08
-#define  DC_DCFG_TGEN     (1 << 0)
-#define  DC_DCFG_GDEN     (1 << 3)
-#define  DC_DCFG_VDEN     (1 << 4)
-#define  DC_DCFG_TRUP     (1 << 6)
-#define  DC_DCFG_DCEN     (1 << 24)
-#define  DC_DCFG_PALB     (1 << 25)
-#define  DC_DCFG_VISL     (1 << 27)
+/*
+ * Video Processor registers (table 6-71).
+ * There is space for 64 bit values, but we never use more than the
+ * lower 32 bits.  The actual register save/restore code only bothers
+ * to restore those 32 bits.
+ */
+enum vp_registers {
+       VP_VCFG = 0,
+       VP_DCFG,
 
-#define  DC_DCFG_16BPP           0x0
+       VP_VX,
+       VP_VY,
 
-#define  DC_DCFG_DISP_MODE_MASK  0x00000300
-#define  DC_DCFG_DISP_MODE_8BPP  0x00000000
-#define  DC_DCFG_DISP_MODE_16BPP 0x00000100
-#define  DC_DCFG_DISP_MODE_24BPP 0x00000200
-#define  DC_DCFG_DISP_MODE_32BPP 0x00000300
+       VP_SCL,
+       VP_VCK,
 
+       VP_VCM,
+       VP_PAR,
 
-#define DC_ARB_CFG        0x0C
+       VP_PDR,
+       VP_SLR,
 
-#define DC_FB_START       0x10
-#define DC_CB_START       0x14
-#define DC_CURSOR_START   0x18
+       VP_MISC,
+       VP_CCS,
 
-#define DC_DV_TOP          0x2C
-#define DC_DV_TOP_ENABLE   (1 << 0)
+       VP_VYS,
+       VP_VXS,
 
-#define DC_LINE_SIZE       0x30
-#define DC_GRAPHICS_PITCH  0x34
-#define DC_H_ACTIVE_TIMING 0x40
-#define DC_H_BLANK_TIMING  0x44
-#define DC_H_SYNC_TIMING   0x48
-#define DC_V_ACTIVE_TIMING 0x50
-#define DC_V_BLANK_TIMING  0x54
-#define DC_V_SYNC_TIMING   0x58
-#define DC_FB_ACTIVE       0x5C
+       VP_RSVD_0,
+       VP_VDC,
+
+       VP_RSVD_1,
+       VP_CRC,
+
+       VP_CRC32,
+       VP_VDE,
+
+       VP_CCK,
+       VP_CCM,
+
+       VP_CC1,
+       VP_CC2,
+
+       VP_A1X,
+       VP_A1Y,
+
+       VP_A1C,
+       VP_A1T,
+
+       VP_A2X,
+       VP_A2Y,
+
+       VP_A2C,
+       VP_A2T,
+
+       VP_A3X,
+       VP_A3Y,
+
+       VP_A3C,
+       VP_A3T,
+
+       VP_VRR,
+       VP_AWT,
+
+       VP_VTM,
+       VP_VYE,
+
+       VP_A1YE,
+       VP_A2YE,
+
+       VP_A3YE,        /* 0x150 */
+
+       VP_VCR = 0x1000, /* 0x1000 - 0x1fff */
+};
 
-#define DC_PAL_ADDRESS     0x70
-#define DC_PAL_DATA        0x74
+#define VP_VCFG_VID_EN                 (1 << 0)
 
-#define DC_PHY_MEM_OFFSET  0x84
+#define VP_DCFG_GV_GAM                 (1 << 21)
+#define VP_DCFG_PWR_SEQ_DELAY          ((1 << 17) | (1 << 18) | (1 << 19))
+#define VP_DCFG_PWR_SEQ_DELAY_DEFAULT  (1 << 19)       /* undocumented */
+#define VP_DCFG_CRT_SYNC_SKW           ((1 << 14) | (1 << 15) | (1 << 16))
+#define VP_DCFG_CRT_SYNC_SKW_DEFAULT   (1 << 16)
+#define VP_DCFG_CRT_VSYNC_POL          (1 << 9)
+#define VP_DCFG_CRT_HSYNC_POL          (1 << 8)
+#define VP_DCFG_DAC_BL_EN              (1 << 3)
+#define VP_DCFG_VSYNC_EN               (1 << 2)
+#define VP_DCFG_HSYNC_EN               (1 << 1)
+#define VP_DCFG_CRT_EN                 (1 << 0)
 
-#define DC_DV_CTL          0x88
-#define DC_DV_LINE_SIZE_MASK               0x00000C00
-#define DC_DV_LINE_SIZE_1024               0x00000000
-#define DC_DV_LINE_SIZE_2048               0x00000400
-#define DC_DV_LINE_SIZE_4096               0x00000800
-#define DC_DV_LINE_SIZE_8192               0x00000C00
+#define VP_MISC_APWRDN                 (1 << 11)
+#define VP_MISC_DACPWRDN               (1 << 10)
+#define VP_MISC_BYP_BOTH               (1 << 0)
 
 
-#define DC_GFX_SCALE       0x90
-#define DC_IRQ_FILT_CTL    0x94
+/*
+ * Flat Panel registers (table 6-71).
+ * Also 64 bit registers; see above note about 32-bit handling.
+ */
 
+/* we're actually in the VP register space, starting at address 0x400 */
+#define VP_FP_START    0x400
 
-#define DC_IRQ               0xC8
-#define  DC_IRQ_MASK         (1 << 0)
-#define  DC_VSYNC_IRQ_MASK   (1 << 1)
-#define  DC_IRQ_STATUS       (1 << 20)
-#define  DC_VSYNC_IRQ_STATUS (1 << 21)
-
-#define DC_GENLCK_CTRL      0xD4
-#define  DC_GENLCK_ENABLE   (1 << 18)
-#define  DC_GC_ALPHA_FLICK_ENABLE  (1 << 25)
-#define  DC_GC_FLICKER_FILTER_ENABLE (1 << 24)
-#define  DC_GC_FLICKER_FILTER_MASK (0x0F << 28)
-
-#define DC_COLOR_KEY       0xB8
-#define DC_CLR_KEY_ENABLE (1 << 24)
-
-
-#define DC3_DV_LINE_SIZE_MASK               0x00000C00
-#define DC3_DV_LINE_SIZE_1024               0x00000000
-#define DC3_DV_LINE_SIZE_2048               0x00000400
-#define DC3_DV_LINE_SIZE_4096               0x00000800
-#define DC3_DV_LINE_SIZE_8192               0x00000C00
-
-#define DF_VIDEO_CFG       0x0
-#define  DF_VCFG_VID_EN    (1 << 0)
-
-#define DF_DISPLAY_CFG     0x08
-
-#define DF_DCFG_CRT_EN     (1 << 0)
-#define DF_DCFG_HSYNC_EN   (1 << 1)
-#define DF_DCFG_VSYNC_EN   (1 << 2)
-#define DF_DCFG_DAC_BL_EN  (1 << 3)
-#define DF_DCFG_CRT_HSYNC_POL  (1 << 8)
-#define DF_DCFG_CRT_VSYNC_POL  (1 << 9)
-#define DF_DCFG_GV_PAL_BYP     (1 << 21)
+enum fp_registers {
+       FP_PT1 = 0,
+       FP_PT2,
 
-#define DF_DCFG_CRT_SYNC_SKW_INIT 0x10000
-#define DF_DCFG_CRT_SYNC_SKW_MASK  0x1c000
+       FP_PM,
+       FP_DFC,
 
-#define DF_DCFG_PWR_SEQ_DLY_INIT     0x80000
-#define DF_DCFG_PWR_SEQ_DLY_MASK     0xe0000
+       FP_RSVD_0,
+       FP_RSVD_1,
 
-#define DF_MISC            0x50
+       FP_RSVD_2,
+       FP_RSVD_3,
+
+       FP_RSVD_4,
+       FP_DCA,
+
+       FP_DMD,
+       FP_CRC, /* 0x458 */
+};
+
+#define FP_PT2_SCRC                    (1 << 27)       /* shfclk free */
+
+#define FP_PM_P                                (1 << 24)       /* panel power ctl */
+#define FP_PM_PANEL_PWR_UP             (1 << 3)        /* r/o */
+#define FP_PM_PANEL_PWR_DOWN           (1 << 2)        /* r/o */
+#define FP_PM_PANEL_OFF                        (1 << 1)        /* r/o */
+#define FP_PM_PANEL_ON                 (1 << 0)        /* r/o */
+
+#define FP_DFC_BC                      ((1 << 4) | (1 << 5) | (1 << 6))
+
+
+/* register access functions */
+
+static inline uint32_t read_gp(struct lxfb_par *par, int reg)
+{
+       return readl(par->gp_regs + 4*reg);
+}
+
+static inline void write_gp(struct lxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->gp_regs + 4*reg);
+}
+
+static inline uint32_t read_dc(struct lxfb_par *par, int reg)
+{
+       return readl(par->dc_regs + 4*reg);
+}
+
+static inline void write_dc(struct lxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->dc_regs + 4*reg);
+}
+
+static inline uint32_t read_vp(struct lxfb_par *par, int reg)
+{
+       return readl(par->vp_regs + 8*reg);
+}
+
+static inline void write_vp(struct lxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->vp_regs + 8*reg);
+}
+
+static inline uint32_t read_fp(struct lxfb_par *par, int reg)
+{
+       return readl(par->vp_regs + 8*reg + VP_FP_START);
+}
+
+static inline void write_fp(struct lxfb_par *par, int reg, uint32_t val)
+{
+       writel(val, par->vp_regs + 8*reg + VP_FP_START);
+}
 
-#define  DF_MISC_GAM_BYPASS (1 << 0)
-#define  DF_MISC_DAC_PWRDN  (1 << 10)
-#define  DF_MISC_A_PWRDN    (1 << 11)
 
-#define DF_PAR             0x38
-#define DF_PDR             0x40
-#define DF_ALPHA_CONTROL_1 0xD8
-#define DF_VIDEO_REQUEST   0x120
+/* MSRs are defined in asm/geode.h; their bitfields are here */
 
-#define DF_PANEL_TIM1      0x400
-#define DF_DEFAULT_TFT_PMTIM1 0x0
+#define MSR_GLCP_DOTPLL_LOCK           (1 << 25)       /* r/o */
+#define MSR_GLCP_DOTPLL_HALFPIX                (1 << 24)
+#define MSR_GLCP_DOTPLL_BYPASS         (1 << 15)
+#define MSR_GLCP_DOTPLL_DOTRESET       (1 << 0)
 
-#define DF_PANEL_TIM2      0x408
-#define DF_DEFAULT_TFT_PMTIM2 0x08000000
+/* note: this is actually the VP's GLD_MSR_CONFIG */
+#define MSR_LX_GLD_MSR_CONFIG_FMT      ((1 << 3) | (1 << 4) | (1 << 5))
+#define MSR_LX_GLD_MSR_CONFIG_FMT_FP   (1 << 3)
+#define MSR_LX_GLD_MSR_CONFIG_FMT_CRT  (0)
+#define MSR_LX_GLD_MSR_CONFIG_FPC      (1 << 15)       /* FP *and* CRT */
 
-#define DF_FP_PM             0x410
-#define  DF_FP_PM_P          (1 << 24)
+#define MSR_LX_MSR_PADSEL_TFT_SEL_LOW  0xDFFFFFFF      /* ??? */
+#define MSR_LX_MSR_PADSEL_TFT_SEL_HIGH 0x0000003F      /* ??? */
 
-#define DF_DITHER_CONTROL    0x418
-#define DF_DEFAULT_TFT_DITHCTL                  0x00000070
-#define GP_BLT_STATUS      0x44
-#define  GP_BS_BLT_BUSY    (1 << 0)
-#define  GP_BS_CB_EMPTY    (1 << 4)
+#define MSR_LX_SPARE_MSR_DIS_CFIFO_HGO (1 << 11)       /* undocumented */
+#define MSR_LX_SPARE_MSR_VFIFO_ARB_SEL (1 << 10)       /* undocumented */
+#define MSR_LX_SPARE_MSR_WM_LPEN_OVRD  (1 << 9)        /* undocumented */
+#define MSR_LX_SPARE_MSR_LOAD_WM_LPEN_M        (1 << 8)        /* undocumented */
+#define MSR_LX_SPARE_MSR_DIS_INIT_V_PRI        (1 << 7)        /* undocumented */
+#define MSR_LX_SPARE_MSR_DIS_VIFO_WM   (1 << 6)
+#define MSR_LX_SPARE_MSR_DIS_CWD_CHECK (1 << 5)        /* undocumented */
+#define MSR_LX_SPARE_MSR_PIX8_PAN_FIX  (1 << 4)        /* undocumented */
+#define MSR_LX_SPARE_MSR_FIRST_REQ_MASK        (1 << 1)        /* undocumented */
 
 #endif
index eb6b881715387bbdbc4226ff801d4f5cc3f193ac..2cd9b74d222545ea771d653cf1d558be43c2173b 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/console.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
+#include <linux/suspend.h>
 #include <linux/delay.h>
 #include <linux/fb.h>
 #include <linux/init.h>
 
 static char *mode_option;
 static int noclear, nopanel, nocrt;
-static int fbsize;
+static int vram;
+static int vt_switch;
 
 /* Most of these modes are sorted in ascending order, but
  * since the first entry in this table is the "default" mode,
  * we try to make it something sane - 640x480-60 is sane
  */
 
-static const struct fb_videomode geode_modedb[] __initdata = {
+static struct fb_videomode geode_modedb[] __initdata = {
        /* 640x480-60 */
        { NULL, 60, 640, 480, 39682, 48, 8, 25, 2, 88, 2,
          FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
@@ -215,6 +217,35 @@ static const struct fb_videomode geode_modedb[] __initdata = {
          0, FB_VMODE_NONINTERLACED, 0 },
 };
 
+#ifdef CONFIG_OLPC
+#include <asm/olpc.h>
+
+static struct fb_videomode olpc_dcon_modedb[] __initdata = {
+       /* The only mode the DCON has is 1200x900 */
+       { NULL, 50, 1200, 900, 17460, 24, 8, 4, 5, 8, 3,
+         FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
+         FB_VMODE_NONINTERLACED, 0 }
+};
+
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+       if (olpc_has_dcon()) {
+               *modedb = (struct fb_videomode *) olpc_dcon_modedb;
+               *size = ARRAY_SIZE(olpc_dcon_modedb);
+       } else {
+               *modedb = (struct fb_videomode *) geode_modedb;
+               *size = ARRAY_SIZE(geode_modedb);
+       }
+}
+
+#else
+static void __init get_modedb(struct fb_videomode **modedb, unsigned int *size)
+{
+       *modedb = (struct fb_videomode *) geode_modedb;
+       *size = ARRAY_SIZE(geode_modedb);
+}
+#endif
+
 static int lxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 {
        if (var->xres > 1920 || var->yres > 1440)
@@ -333,13 +364,13 @@ static int __init lxfb_map_video_memory(struct fb_info *info,
        if (ret)
                return ret;
 
-       ret = pci_request_region(dev, 3, "lxfb-vip");
+       ret = pci_request_region(dev, 3, "lxfb-vp");
 
        if (ret)
                return ret;
 
        info->fix.smem_start = pci_resource_start(dev, 0);
-       info->fix.smem_len = fbsize ? fbsize : lx_framebuffer_size();
+       info->fix.smem_len = vram ? vram : lx_framebuffer_size();
 
        info->screen_base = ioremap(info->fix.smem_start, info->fix.smem_len);
 
@@ -360,18 +391,15 @@ static int __init lxfb_map_video_memory(struct fb_info *info,
        if (par->dc_regs == NULL)
                return ret;
 
-       par->df_regs = ioremap(pci_resource_start(dev, 3),
+       par->vp_regs = ioremap(pci_resource_start(dev, 3),
                               pci_resource_len(dev, 3));
 
-       if (par->df_regs == NULL)
+       if (par->vp_regs == NULL)
                return ret;
 
-       writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK);
-
-       writel(info->fix.smem_start & 0xFF000000,
-              par->dc_regs + DC_PHY_MEM_OFFSET);
-
-       writel(0, par->dc_regs + DC_UNLOCK);
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+       write_dc(par, DC_GLIU0_MEM_OFFSET, info->fix.smem_start & 0xFF000000);
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
 
        dev_info(&dev->dev, "%d KB of video memory at 0x%lx\n",
                 info->fix.smem_len / 1024, info->fix.smem_start);
@@ -431,6 +459,45 @@ static struct fb_info * __init lxfb_init_fbinfo(struct device *dev)
        return info;
 }
 
+#ifdef CONFIG_PM
+static int lxfb_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+       struct fb_info *info = pci_get_drvdata(pdev);
+
+       if (state.event == PM_EVENT_SUSPEND) {
+               acquire_console_sem();
+               lx_powerdown(info);
+               fb_set_suspend(info, 1);
+               release_console_sem();
+       }
+
+       /* there's no point in setting PCI states; we emulate PCI, so
+        * we don't end up getting power savings anyways */
+
+       return 0;
+}
+
+static int lxfb_resume(struct pci_dev *pdev)
+{
+       struct fb_info *info = pci_get_drvdata(pdev);
+       int ret;
+
+       acquire_console_sem();
+       ret = lx_powerup(info);
+       if (ret) {
+               printk(KERN_ERR "lxfb:  power up failed!\n");
+               return ret;
+       }
+
+       fb_set_suspend(info, 0);
+       release_console_sem();
+       return 0;
+}
+#else
+#define lxfb_suspend NULL
+#define lxfb_resume NULL
+#endif
+
 static int __init lxfb_probe(struct pci_dev *pdev,
                             const struct pci_device_id *id)
 {
@@ -439,7 +506,7 @@ static int __init lxfb_probe(struct pci_dev *pdev,
        int ret;
 
        struct fb_videomode *modedb_ptr;
-       int modedb_size;
+       unsigned int modedb_size;
 
        info = lxfb_init_fbinfo(&pdev->dev);
 
@@ -464,9 +531,7 @@ static int __init lxfb_probe(struct pci_dev *pdev,
 
        /* Set up the mode database */
 
-       modedb_ptr = (struct fb_videomode *) geode_modedb;
-       modedb_size = ARRAY_SIZE(geode_modedb);
-
+       get_modedb(&modedb_ptr, &modedb_size);
        ret = fb_find_mode(&info->var, info, mode_option,
                           modedb_ptr, modedb_size, NULL, 16);
 
@@ -487,6 +552,8 @@ static int __init lxfb_probe(struct pci_dev *pdev,
        lxfb_check_var(&info->var, info);
        lxfb_set_par(info);
 
+       pm_set_vt_switch(vt_switch);
+
        if (register_framebuffer(info) < 0) {
                ret = -EINVAL;
                goto err;
@@ -510,8 +577,8 @@ err:
                iounmap(par->dc_regs);
                pci_release_region(pdev, 2);
        }
-       if (par->df_regs) {
-               iounmap(par->df_regs);
+       if (par->vp_regs) {
+               iounmap(par->vp_regs);
                pci_release_region(pdev, 3);
        }
 
@@ -537,7 +604,7 @@ static void lxfb_remove(struct pci_dev *pdev)
        iounmap(par->dc_regs);
        pci_release_region(pdev, 2);
 
-       iounmap(par->df_regs);
+       iounmap(par->vp_regs);
        pci_release_region(pdev, 3);
 
        pci_set_drvdata(pdev, NULL);
@@ -556,6 +623,8 @@ static struct pci_driver lxfb_driver = {
        .id_table       = lxfb_id_table,
        .probe          = lxfb_probe,
        .remove         = lxfb_remove,
+       .suspend        = lxfb_suspend,
+       .resume         = lxfb_resume,
 };
 
 #ifndef MODULE
@@ -570,9 +639,7 @@ static int __init lxfb_setup(char *options)
                if (!*opt)
                        continue;
 
-               if (!strncmp(opt, "fbsize:", 7))
-                       fbsize = simple_strtoul(opt+7, NULL, 0);
-               else if (!strcmp(opt, "noclear"))
+               if (!strcmp(opt, "noclear"))
                        noclear = 1;
                else if (!strcmp(opt, "nopanel"))
                        nopanel = 1;
@@ -609,8 +676,11 @@ module_exit(lxfb_cleanup);
 module_param(mode_option, charp, 0);
 MODULE_PARM_DESC(mode_option, "video mode (<x>x<y>[-<bpp>][@<refr>])");
 
-module_param(fbsize, int, 0);
-MODULE_PARM_DESC(fbsize, "video memory size");
+module_param(vram, int, 0);
+MODULE_PARM_DESC(vram, "video memory size");
+
+module_param(vt_switch, int, 0);
+MODULE_PARM_DESC(vt_switch, "enable VT switch during suspend/resume");
 
 MODULE_DESCRIPTION("Framebuffer driver for the AMD Geode LX");
 MODULE_LICENSE("GPL");
index 4fbc99be96ef727cdc0d703fb149f60b7b9d363a..cd9d4cc269549932008ac90ee9a37a744312941d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/fb.h>
 #include <linux/uaccess.h>
 #include <linux/delay.h>
+#include <asm/geode.h>
 
 #include "lxfb.h"
 
@@ -34,35 +35,85 @@ static const struct {
   unsigned int pllval;
   unsigned int freq;
 } pll_table[] = {
-  { 0x000031AC, 24923 },
-  { 0x0000215D, 25175 },
-  { 0x00001087, 27000 },
-  { 0x0000216C, 28322 },
-  { 0x0000218D, 28560 },
-  { 0x000010C9, 31200 },
-  { 0x00003147, 31500 },
-  { 0x000010A7, 33032 },
-  { 0x00002159, 35112 },
-  { 0x00004249, 35500 },
-  { 0x00000057, 36000 },
-  { 0x0000219A, 37889 },
-  { 0x00002158, 39168 },
-  { 0x00000045, 40000 },
-  { 0x00000089, 43163 },
-  { 0x000010E7, 44900 },
-  { 0x00002136, 45720 },
-  { 0x00003207, 49500 },
-  { 0x00002187, 50000 },
-  { 0x00004286, 56250 },
-  { 0x000010E5, 60065 },
-  { 0x00004214, 65000 },
-  { 0x00001105, 68179 },
-  { 0x000031E4, 74250 },
-  { 0x00003183, 75000 },
-  { 0x00004284, 78750 },
-  { 0x00001104, 81600 },
-  { 0x00006363, 94500 },
-  { 0x00005303, 97520 },
+  { 0x000131AC,   6231 },
+  { 0x0001215D,   6294 },
+  { 0x00011087,   6750 },
+  { 0x0001216C,   7081 },
+  { 0x0001218D,   7140 },
+  { 0x000110C9,   7800 },
+  { 0x00013147,   7875 },
+  { 0x000110A7,   8258 },
+  { 0x00012159,   8778 },
+  { 0x00014249,   8875 },
+  { 0x00010057,   9000 },
+  { 0x0001219A,   9472 },
+  { 0x00012158,   9792 },
+  { 0x00010045,  10000 },
+  { 0x00010089,  10791 },
+  { 0x000110E7,  11225 },
+  { 0x00012136,  11430 },
+  { 0x00013207,  12375 },
+  { 0x00012187,  12500 },
+  { 0x00014286,  14063 },
+  { 0x000110E5,  15016 },
+  { 0x00014214,  16250 },
+  { 0x00011105,  17045 },
+  { 0x000131E4,  18563 },
+  { 0x00013183,  18750 },
+  { 0x00014284,  19688 },
+  { 0x00011104,  20400 },
+  { 0x00016363,  23625 },
+  { 0x00015303,  24380 },
+  { 0x000031AC,  24923 },
+  { 0x0000215D,  25175 },
+  { 0x00001087,  27000 },
+  { 0x0000216C,  28322 },
+  { 0x0000218D,  28560 },
+  { 0x00010041,  29913 },
+  { 0x000010C9,  31200 },
+  { 0x00003147,  31500 },
+  { 0x000141A1,  32400 },
+  { 0x000010A7,  33032 },
+  { 0x00012182,  33375 },
+  { 0x000141B1,  33750 },
+  { 0x00002159,  35112 },
+  { 0x00004249,  35500 },
+  { 0x00000057,  36000 },
+  { 0x000141E1,  37125 },
+  { 0x0000219A,  37889 },
+  { 0x00002158,  39168 },
+  { 0x00000045,  40000 },
+  { 0x000131A1,  40500 },
+  { 0x00010061,  42301 },
+  { 0x00000089,  43163 },
+  { 0x00012151,  43875 },
+  { 0x000010E7,  44900 },
+  { 0x00002136,  45720 },
+  { 0x000152E1,  47250 },
+  { 0x00010071,  48000 },
+  { 0x00003207,  49500 },
+  { 0x00002187,  50000 },
+  { 0x00014291,  50625 },
+  { 0x00011101,  51188 },
+  { 0x00017481,  54563 },
+  { 0x00004286,  56250 },
+  { 0x00014170,  57375 },
+  { 0x00016210,  58500 },
+  { 0x000010E5,  60065 },
+  { 0x00013140,  62796 },
+  { 0x00004214,  65000 },
+  { 0x00016250,  65250 },
+  { 0x00001105,  68179 },
+  { 0x000141C0,  69600 },
+  { 0x00015220,  70160 },
+  { 0x00010050,  72000 },
+  { 0x000031E4,  74250 },
+  { 0x00003183,  75000 },
+  { 0x00004284,  78750 },
+  { 0x00012130,  80052 },
+  { 0x00001104,  81600 },
+  { 0x00006363,  94500 },
+  { 0x00005303,  97520 },
   { 0x00002183, 100187 },
   { 0x00002122, 101420 },
   { 0x00001081, 108000 },
@@ -101,16 +152,16 @@ static void lx_set_dotpll(u32 pllval)
        u32 dotpll_lo, dotpll_hi;
        int i;
 
-       rdmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+       rdmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
 
-       if ((dotpll_lo & GLCP_DOTPLL_LOCK) && (dotpll_hi == pllval))
+       if ((dotpll_lo & MSR_GLCP_DOTPLL_LOCK) && (dotpll_hi == pllval))
                return;
 
        dotpll_hi = pllval;
-       dotpll_lo &= ~(GLCP_DOTPLL_BYPASS | GLCP_DOTPLL_HALFPIX);
-       dotpll_lo |= GLCP_DOTPLL_RESET;
+       dotpll_lo &= ~(MSR_GLCP_DOTPLL_BYPASS | MSR_GLCP_DOTPLL_HALFPIX);
+       dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
 
-       wrmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+       wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
 
        /* Wait 100us for the PLL to lock */
 
@@ -119,15 +170,15 @@ static void lx_set_dotpll(u32 pllval)
        /* Now, loop for the lock bit */
 
        for (i = 0; i < 1000; i++) {
-               rdmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
-               if (dotpll_lo & GLCP_DOTPLL_LOCK)
+               rdmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+               if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
                        break;
        }
 
        /* Clear the reset bit */
 
-       dotpll_lo &= ~GLCP_DOTPLL_RESET;
-       wrmsr(MSR_LX_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+       dotpll_lo &= ~MSR_GLCP_DOTPLL_DOTRESET;
+       wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
 }
 
 /* Set the clock based on the frequency specified by the current mode */
@@ -137,7 +188,7 @@ static void lx_set_clock(struct fb_info *info)
        unsigned int diff, min, best = 0;
        unsigned int freq, i;
 
-       freq = (unsigned int) (0x3b9aca00 / info->var.pixclock);
+       freq = (unsigned int) (1000000000 / info->var.pixclock);
 
        min = abs(pll_table[0].freq - freq);
 
@@ -149,7 +200,7 @@ static void lx_set_clock(struct fb_info *info)
                }
        }
 
-       lx_set_dotpll(pll_table[best].pllval & 0x7FFF);
+       lx_set_dotpll(pll_table[best].pllval & 0x00017FFF);
 }
 
 static void lx_graphics_disable(struct fb_info *info)
@@ -159,63 +210,62 @@ static void lx_graphics_disable(struct fb_info *info)
 
        /* Note:  This assumes that the video is in a quitet state */
 
-       writel(0, par->df_regs + DF_ALPHA_CONTROL_1);
-       writel(0, par->df_regs + DF_ALPHA_CONTROL_1 + 32);
-       writel(0, par->df_regs + DF_ALPHA_CONTROL_1 + 64);
+       write_vp(par, VP_A1T, 0);
+       write_vp(par, VP_A2T, 0);
+       write_vp(par, VP_A3T, 0);
 
        /* Turn off the VGA and video enable */
-       val = readl (par->dc_regs + DC_GENERAL_CFG) &
-               ~(DC_GCFG_VGAE | DC_GCFG_VIDE);
+       val = read_dc(par, DC_GENERAL_CFG) & ~(DC_GENERAL_CFG_VGAE |
+                       DC_GENERAL_CFG_VIDE);
 
-       writel(val, par->dc_regs + DC_GENERAL_CFG);
+       write_dc(par, DC_GENERAL_CFG, val);
 
-       val = readl(par->df_regs + DF_VIDEO_CFG) & ~DF_VCFG_VID_EN;
-       writel(val, par->df_regs + DF_VIDEO_CFG);
+       val = read_vp(par, VP_VCFG) & ~VP_VCFG_VID_EN;
+       write_vp(par, VP_VCFG, val);
 
-       writel( DC_IRQ_MASK | DC_VSYNC_IRQ_MASK |
-               DC_IRQ_STATUS | DC_VSYNC_IRQ_STATUS,
-               par->dc_regs + DC_IRQ);
+       write_dc(par, DC_IRQ, DC_IRQ_MASK | DC_IRQ_VIP_VSYNC_LOSS_IRQ_MASK |
+                       DC_IRQ_STATUS | DC_IRQ_VIP_VSYNC_IRQ_STATUS);
 
-       val = readl(par->dc_regs + DC_GENLCK_CTRL) & ~DC_GENLCK_ENABLE;
-       writel(val, par->dc_regs + DC_GENLCK_CTRL);
+       val = read_dc(par, DC_GENLK_CTL) & ~DC_GENLK_CTL_GENLK_EN;
+       write_dc(par, DC_GENLK_CTL, val);
 
-       val = readl(par->dc_regs + DC_COLOR_KEY) & ~DC_CLR_KEY_ENABLE;
-       writel(val & ~DC_CLR_KEY_ENABLE, par->dc_regs + DC_COLOR_KEY);
+       val = read_dc(par, DC_CLR_KEY);
+       write_dc(par, DC_CLR_KEY, val & ~DC_CLR_KEY_CLR_KEY_EN);
 
-       /* We don't actually blank the panel, due to the long latency
-          involved with bringing it back */
+       /* turn off the panel */
+       write_fp(par, FP_PM, read_fp(par, FP_PM) & ~FP_PM_P);
 
-       val = readl(par->df_regs + DF_MISC) | DF_MISC_DAC_PWRDN;
-       writel(val, par->df_regs + DF_MISC);
+       val = read_vp(par, VP_MISC) | VP_MISC_DACPWRDN;
+       write_vp(par, VP_MISC, val);
 
        /* Turn off the display */
 
-       val = readl(par->df_regs + DF_DISPLAY_CFG);
-       writel(val & ~(DF_DCFG_CRT_EN | DF_DCFG_HSYNC_EN | DF_DCFG_VSYNC_EN |
-                      DF_DCFG_DAC_BL_EN), par->df_regs + DF_DISPLAY_CFG);
+       val = read_vp(par, VP_DCFG);
+       write_vp(par, VP_DCFG, val & ~(VP_DCFG_CRT_EN | VP_DCFG_HSYNC_EN |
+                       VP_DCFG_VSYNC_EN | VP_DCFG_DAC_BL_EN));
 
-       gcfg = readl(par->dc_regs + DC_GENERAL_CFG);
-       gcfg &= ~(DC_GCFG_CMPE | DC_GCFG_DECE);
-       writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+       gcfg = read_dc(par, DC_GENERAL_CFG);
+       gcfg &= ~(DC_GENERAL_CFG_CMPE | DC_GENERAL_CFG_DECE);
+       write_dc(par, DC_GENERAL_CFG, gcfg);
 
        /* Turn off the TGEN */
-       val = readl(par->dc_regs + DC_DISPLAY_CFG);
-       val &= ~DC_DCFG_TGEN;
-       writel(val, par->dc_regs + DC_DISPLAY_CFG);
+       val = read_dc(par, DC_DISPLAY_CFG);
+       val &= ~DC_DISPLAY_CFG_TGEN;
+       write_dc(par, DC_DISPLAY_CFG, val);
 
        /* Wait 1000 usecs to ensure that the TGEN is clear */
        udelay(1000);
 
        /* Turn off the FIFO loader */
 
-       gcfg &= ~DC_GCFG_DFLE;
-       writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+       gcfg &= ~DC_GENERAL_CFG_DFLE;
+       write_dc(par, DC_GENERAL_CFG, gcfg);
 
        /* Lastly, wait for the GP to go idle */
 
        do {
-               val = readl(par->gp_regs + GP_BLT_STATUS);
-       } while ((val & GP_BS_BLT_BUSY) || !(val & GP_BS_CB_EMPTY));
+               val = read_gp(par, GP_BLT_STATUS);
+       } while ((val & GP_BLT_STATUS_PB) || !(val & GP_BLT_STATUS_CE));
 }
 
 static void lx_graphics_enable(struct fb_info *info)
@@ -224,80 +274,85 @@ static void lx_graphics_enable(struct fb_info *info)
        u32 temp, config;
 
        /* Set the video request register */
-       writel(0, par->df_regs + DF_VIDEO_REQUEST);
+       write_vp(par, VP_VRR, 0);
 
        /* Set up the polarities */
 
-       config = readl(par->df_regs + DF_DISPLAY_CFG);
+       config = read_vp(par, VP_DCFG);
 
-       config &= ~(DF_DCFG_CRT_SYNC_SKW_MASK | DF_DCFG_PWR_SEQ_DLY_MASK |
-                 DF_DCFG_CRT_HSYNC_POL     | DF_DCFG_CRT_VSYNC_POL);
+       config &= ~(VP_DCFG_CRT_SYNC_SKW | VP_DCFG_PWR_SEQ_DELAY |
+                       VP_DCFG_CRT_HSYNC_POL | VP_DCFG_CRT_VSYNC_POL);
 
-       config |= (DF_DCFG_CRT_SYNC_SKW_INIT | DF_DCFG_PWR_SEQ_DLY_INIT  |
-                  DF_DCFG_GV_PAL_BYP);
+       config |= (VP_DCFG_CRT_SYNC_SKW_DEFAULT | VP_DCFG_PWR_SEQ_DELAY_DEFAULT
+                       | VP_DCFG_GV_GAM);
 
        if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
-               config |= DF_DCFG_CRT_HSYNC_POL;
+               config |= VP_DCFG_CRT_HSYNC_POL;
 
        if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
-               config |= DF_DCFG_CRT_VSYNC_POL;
+               config |= VP_DCFG_CRT_VSYNC_POL;
 
        if (par->output & OUTPUT_PANEL) {
                u32 msrlo, msrhi;
 
-               writel(DF_DEFAULT_TFT_PMTIM1,
-                      par->df_regs + DF_PANEL_TIM1);
-               writel(DF_DEFAULT_TFT_PMTIM2,
-                      par->df_regs + DF_PANEL_TIM2);
-               writel(DF_DEFAULT_TFT_DITHCTL,
-                      par->df_regs + DF_DITHER_CONTROL);
+               write_fp(par, FP_PT1, 0);
+               write_fp(par, FP_PT2, FP_PT2_SCRC);
+               write_fp(par, FP_DFC, FP_DFC_BC);
 
-               msrlo = DF_DEFAULT_TFT_PAD_SEL_LOW;
-               msrhi = DF_DEFAULT_TFT_PAD_SEL_HIGH;
+               msrlo = MSR_LX_MSR_PADSEL_TFT_SEL_LOW;
+               msrhi = MSR_LX_MSR_PADSEL_TFT_SEL_HIGH;
 
-               wrmsr(MSR_LX_DF_PADSEL, msrlo, msrhi);
+               wrmsr(MSR_LX_MSR_PADSEL, msrlo, msrhi);
        }
 
        if (par->output & OUTPUT_CRT) {
-               config |= DF_DCFG_CRT_EN   | DF_DCFG_HSYNC_EN |
-                       DF_DCFG_VSYNC_EN | DF_DCFG_DAC_BL_EN;
+               config |= VP_DCFG_CRT_EN | VP_DCFG_HSYNC_EN |
+                               VP_DCFG_VSYNC_EN | VP_DCFG_DAC_BL_EN;
        }
 
-       writel(config, par->df_regs + DF_DISPLAY_CFG);
+       write_vp(par, VP_DCFG, config);
 
        /* Turn the CRT dacs back on */
 
        if (par->output & OUTPUT_CRT) {
-               temp = readl(par->df_regs + DF_MISC);
-               temp &= ~(DF_MISC_DAC_PWRDN  | DF_MISC_A_PWRDN);
-               writel(temp, par->df_regs + DF_MISC);
+               temp = read_vp(par, VP_MISC);
+               temp &= ~(VP_MISC_DACPWRDN | VP_MISC_APWRDN);
+               write_vp(par, VP_MISC, temp);
        }
 
        /* Turn the panel on (if it isn't already) */
-
-       if (par->output & OUTPUT_PANEL) {
-               temp = readl(par->df_regs + DF_FP_PM);
-
-               if (!(temp & 0x09))
-                       writel(temp | DF_FP_PM_P, par->df_regs + DF_FP_PM);
-       }
-
-       temp = readl(par->df_regs + DF_MISC);
-       temp = readl(par->df_regs + DF_DISPLAY_CFG);
+       if (par->output & OUTPUT_PANEL)
+               write_fp(par, FP_PM, read_fp(par, FP_PM) | FP_PM_P);
 }
 
 unsigned int lx_framebuffer_size(void)
 {
        unsigned int val;
 
+       if (!geode_has_vsa2()) {
+               uint32_t hi, lo;
+
+               /* The number of pages is (PMAX - PMIN)+1 */
+               rdmsr(MSR_GLIU_P2D_RO0, lo, hi);
+
+               /* PMAX */
+               val = ((hi & 0xff) << 12) | ((lo & 0xfff00000) >> 20);
+               /* PMIN */
+               val -= (lo & 0x000fffff);
+               val += 1;
+
+               /* The page size is 4k */
+               return (val << 12);
+       }
+
        /* The frame buffer size is reported by a VSM in VSA II */
        /* Virtual Register Class    = 0x02                     */
        /* VG_MEM_SIZE (1MB units)   = 0x00                     */
 
-       outw(0xFC53, 0xAC1C);
-       outw(0x0200, 0xAC1C);
+       outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+       outw(VSA_VR_MEM_SIZE, VSA_VRC_INDEX);
 
-       val = (unsigned int)(inw(0xAC1E)) & 0xFE;
+       val = (unsigned int)(inw(VSA_VRC_DATA)) & 0xFE;
        return (val << 20);
 }
 
@@ -313,7 +368,7 @@ void lx_set_mode(struct fb_info *info)
        int vactive, vblankstart, vsyncstart, vsyncend, vblankend, vtotal;
 
        /* Unlock the DC registers */
-       writel(DC_UNLOCK_CODE, par->dc_regs + DC_UNLOCK);
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
 
        lx_graphics_disable(info);
 
@@ -321,102 +376,104 @@ void lx_set_mode(struct fb_info *info)
 
        /* Set output mode */
 
-       rdmsrl(MSR_LX_DF_GLCONFIG, msrval);
-       msrval &= ~DF_CONFIG_OUTPUT_MASK;
+       rdmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
+       msrval &= ~MSR_LX_GLD_MSR_CONFIG_FMT;
 
        if (par->output & OUTPUT_PANEL) {
-               msrval |= DF_OUTPUT_PANEL;
+               msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_FP;
 
                if (par->output & OUTPUT_CRT)
-                       msrval |= DF_SIMULTANEOUS_CRT_AND_FP;
+                       msrval |= MSR_LX_GLD_MSR_CONFIG_FPC;
                else
-                       msrval &= ~DF_SIMULTANEOUS_CRT_AND_FP;
-       } else {
-               msrval |= DF_OUTPUT_CRT;
-       }
+                       msrval &= ~MSR_LX_GLD_MSR_CONFIG_FPC;
+       } else
+               msrval |= MSR_LX_GLD_MSR_CONFIG_FMT_CRT;
 
-       wrmsrl(MSR_LX_DF_GLCONFIG, msrval);
+       wrmsrl(MSR_LX_GLD_MSR_CONFIG, msrval);
 
        /* Clear the various buffers */
        /* FIXME:  Adjust for panning here */
 
-       writel(0, par->dc_regs + DC_FB_START);
-       writel(0, par->dc_regs + DC_CB_START);
-       writel(0, par->dc_regs + DC_CURSOR_START);
+       write_dc(par, DC_FB_ST_OFFSET, 0);
+       write_dc(par, DC_CB_ST_OFFSET, 0);
+       write_dc(par, DC_CURS_ST_OFFSET, 0);
 
        /* FIXME: Add support for interlacing */
        /* FIXME: Add support for scaling */
 
-       val = readl(par->dc_regs + DC_GENLCK_CTRL);
-       val &= ~(DC_GC_ALPHA_FLICK_ENABLE |
-                DC_GC_FLICKER_FILTER_ENABLE | DC_GC_FLICKER_FILTER_MASK);
+       val = read_dc(par, DC_GENLK_CTL);
+       val &= ~(DC_GENLK_CTL_ALPHA_FLICK_EN | DC_GENLK_CTL_FLICK_EN |
+                       DC_GENLK_CTL_FLICK_SEL_MASK);
 
        /* Default scaling params */
 
-       writel((0x4000 << 16) | 0x4000, par->dc_regs + DC_GFX_SCALE);
-       writel(0, par->dc_regs + DC_IRQ_FILT_CTL);
-       writel(val, par->dc_regs + DC_GENLCK_CTRL);
+       write_dc(par, DC_GFX_SCALE, (0x4000 << 16) | 0x4000);
+       write_dc(par, DC_IRQ_FILT_CTL, 0);
+       write_dc(par, DC_GENLK_CTL, val);
 
        /* FIXME:  Support compression */
 
        if (info->fix.line_length > 4096)
-               dv = DC_DV_LINE_SIZE_8192;
+               dv = DC_DV_CTL_DV_LINE_SIZE_8K;
        else if (info->fix.line_length > 2048)
-               dv = DC_DV_LINE_SIZE_4096;
+               dv = DC_DV_CTL_DV_LINE_SIZE_4K;
        else if (info->fix.line_length > 1024)
-               dv = DC_DV_LINE_SIZE_2048;
+               dv = DC_DV_CTL_DV_LINE_SIZE_2K;
        else
-               dv = DC_DV_LINE_SIZE_1024;
+               dv = DC_DV_CTL_DV_LINE_SIZE_1K;
 
        max = info->fix.line_length * info->var.yres;
        max = (max + 0x3FF) & 0xFFFFFC00;
 
-       writel(max | DC_DV_TOP_ENABLE, par->dc_regs + DC_DV_TOP);
+       write_dc(par, DC_DV_TOP, max | DC_DV_TOP_DV_TOP_EN);
 
-       val = readl(par->dc_regs + DC_DV_CTL) & ~DC_DV_LINE_SIZE_MASK;
-       writel(val | dv, par->dc_regs + DC_DV_CTL);
+       val = read_dc(par, DC_DV_CTL) & ~DC_DV_CTL_DV_LINE_SIZE;
+       write_dc(par, DC_DV_CTL, val | dv);
 
        size = info->var.xres * (info->var.bits_per_pixel >> 3);
 
-       writel(info->fix.line_length >> 3, par->dc_regs + DC_GRAPHICS_PITCH);
-       writel((size + 7) >> 3, par->dc_regs + DC_LINE_SIZE);
+       write_dc(par, DC_GFX_PITCH, info->fix.line_length >> 3);
+       write_dc(par, DC_LINE_SIZE, (size + 7) >> 3);
 
        /* Set default watermark values */
 
-       rdmsrl(MSR_LX_DC_SPARE, msrval);
-
-       msrval &= ~(DC_SPARE_DISABLE_CFIFO_HGO | DC_SPARE_VFIFO_ARB_SELECT |
-                   DC_SPARE_LOAD_WM_LPEN_MASK | DC_SPARE_WM_LPEN_OVRD |
-                   DC_SPARE_DISABLE_INIT_VID_PRI | DC_SPARE_DISABLE_VFIFO_WM);
-       msrval |= DC_SPARE_DISABLE_VFIFO_WM | DC_SPARE_DISABLE_INIT_VID_PRI;
-       wrmsrl(MSR_LX_DC_SPARE, msrval);
-
-       gcfg = DC_GCFG_DFLE;   /* Display fifo enable */
-       gcfg |= 0xB600;         /* Set default priority */
-       gcfg |= DC_GCFG_FDTY;  /* Set the frame dirty mode */
-
-       dcfg  = DC_DCFG_VDEN;  /* Enable video data */
-       dcfg |= DC_DCFG_GDEN;  /* Enable graphics */
-       dcfg |= DC_DCFG_TGEN;  /* Turn on the timing generator */
-       dcfg |= DC_DCFG_TRUP;  /* Update timings immediately */
-       dcfg |= DC_DCFG_PALB;  /* Palette bypass in > 8 bpp modes */
-       dcfg |= DC_DCFG_VISL;
-       dcfg |= DC_DCFG_DCEN;  /* Always center the display */
+       rdmsrl(MSR_LX_SPARE_MSR, msrval);
+
+       msrval &= ~(MSR_LX_SPARE_MSR_DIS_CFIFO_HGO
+                       | MSR_LX_SPARE_MSR_VFIFO_ARB_SEL
+                       | MSR_LX_SPARE_MSR_LOAD_WM_LPEN_M
+                       | MSR_LX_SPARE_MSR_WM_LPEN_OVRD);
+       msrval |= MSR_LX_SPARE_MSR_DIS_VIFO_WM |
+                       MSR_LX_SPARE_MSR_DIS_INIT_V_PRI;
+       wrmsrl(MSR_LX_SPARE_MSR, msrval);
+
+       gcfg = DC_GENERAL_CFG_DFLE;   /* Display fifo enable */
+       gcfg |= (0x6 << DC_GENERAL_CFG_DFHPSL_SHIFT) | /* default priority */
+                       (0xb << DC_GENERAL_CFG_DFHPEL_SHIFT);
+       gcfg |= DC_GENERAL_CFG_FDTY;  /* Set the frame dirty mode */
+
+       dcfg  = DC_DISPLAY_CFG_VDEN;  /* Enable video data */
+       dcfg |= DC_DISPLAY_CFG_GDEN;  /* Enable graphics */
+       dcfg |= DC_DISPLAY_CFG_TGEN;  /* Turn on the timing generator */
+       dcfg |= DC_DISPLAY_CFG_TRUP;  /* Update timings immediately */
+       dcfg |= DC_DISPLAY_CFG_PALB;  /* Palette bypass in > 8 bpp modes */
+       dcfg |= DC_DISPLAY_CFG_VISL;
+       dcfg |= DC_DISPLAY_CFG_DCEN;  /* Always center the display */
 
        /* Set the current BPP mode */
 
        switch (info->var.bits_per_pixel) {
        case 8:
-               dcfg |= DC_DCFG_DISP_MODE_8BPP;
+               dcfg |= DC_DISPLAY_CFG_DISP_MODE_8BPP;
                break;
 
        case 16:
-               dcfg |= DC_DCFG_DISP_MODE_16BPP | DC_DCFG_16BPP;
+               dcfg |= DC_DISPLAY_CFG_DISP_MODE_16BPP;
                break;
 
        case 32:
        case 24:
-               dcfg |= DC_DCFG_DISP_MODE_24BPP;
+               dcfg |= DC_DISPLAY_CFG_DISP_MODE_24BPP;
                break;
        }
 
@@ -436,35 +493,31 @@ void lx_set_mode(struct fb_info *info)
        vblankend = vsyncend + info->var.upper_margin;
        vtotal = vblankend;
 
-       writel((hactive - 1) | ((htotal - 1) << 16),
-              par->dc_regs + DC_H_ACTIVE_TIMING);
-       writel((hblankstart - 1) | ((hblankend - 1) << 16),
-              par->dc_regs + DC_H_BLANK_TIMING);
-       writel((hsyncstart - 1) | ((hsyncend - 1) << 16),
-              par->dc_regs + DC_H_SYNC_TIMING);
-
-       writel((vactive - 1) | ((vtotal - 1) << 16),
-              par->dc_regs + DC_V_ACTIVE_TIMING);
+       write_dc(par, DC_H_ACTIVE_TIMING, (hactive - 1) | ((htotal - 1) << 16));
+       write_dc(par, DC_H_BLANK_TIMING,
+                       (hblankstart - 1) | ((hblankend - 1) << 16));
+       write_dc(par, DC_H_SYNC_TIMING,
+                       (hsyncstart - 1) | ((hsyncend - 1) << 16));
 
-       writel((vblankstart - 1) | ((vblankend - 1) << 16),
-              par->dc_regs + DC_V_BLANK_TIMING);
+       write_dc(par, DC_V_ACTIVE_TIMING, (vactive - 1) | ((vtotal - 1) << 16));
+       write_dc(par, DC_V_BLANK_TIMING,
+                       (vblankstart - 1) | ((vblankend - 1) << 16));
+       write_dc(par, DC_V_SYNC_TIMING,
+                       (vsyncstart - 1) | ((vsyncend - 1) << 16));
 
-       writel((vsyncstart - 1)  | ((vsyncend - 1) << 16),
-              par->dc_regs + DC_V_SYNC_TIMING);
-
-       writel( (info->var.xres - 1) << 16 | (info->var.yres - 1),
-               par->dc_regs + DC_FB_ACTIVE);
+       write_dc(par, DC_FB_ACTIVE,
+                       (info->var.xres - 1) << 16 | (info->var.yres - 1));
 
        /* And re-enable the graphics output */
        lx_graphics_enable(info);
 
        /* Write the two main configuration registers */
-       writel(dcfg, par->dc_regs + DC_DISPLAY_CFG);
-       writel(0, par->dc_regs + DC_ARB_CFG);
-       writel(gcfg, par->dc_regs + DC_GENERAL_CFG);
+       write_dc(par, DC_DISPLAY_CFG, dcfg);
+       write_dc(par, DC_ARB_CFG, 0);
+       write_dc(par, DC_GENERAL_CFG, gcfg);
 
        /* Lock the DC registers */
-       writel(0, par->dc_regs + DC_UNLOCK);
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
 }
 
 void lx_set_palette_reg(struct fb_info *info, unsigned regno,
@@ -479,58 +532,310 @@ void lx_set_palette_reg(struct fb_info *info, unsigned regno,
        val |= (green)      & 0x00ff00;
        val |= (blue  >> 8) & 0x0000ff;
 
-       writel(regno, par->dc_regs + DC_PAL_ADDRESS);
-       writel(val, par->dc_regs + DC_PAL_DATA);
+       write_dc(par, DC_PAL_ADDRESS, regno);
+       write_dc(par, DC_PAL_DATA, val);
 }
 
 int lx_blank_display(struct fb_info *info, int blank_mode)
 {
        struct lxfb_par *par = info->par;
        u32 dcfg, fp_pm;
-       int blank, hsync, vsync;
+       int blank, hsync, vsync, crt;
 
        /* CRT power saving modes. */
        switch (blank_mode) {
        case FB_BLANK_UNBLANK:
-               blank = 0; hsync = 1; vsync = 1;
+               blank = 0; hsync = 1; vsync = 1; crt = 1;
                break;
        case FB_BLANK_NORMAL:
-               blank = 1; hsync = 1; vsync = 1;
+               blank = 1; hsync = 1; vsync = 1; crt = 1;
                break;
        case FB_BLANK_VSYNC_SUSPEND:
-               blank = 1; hsync = 1; vsync = 0;
+               blank = 1; hsync = 1; vsync = 0; crt = 1;
                break;
        case FB_BLANK_HSYNC_SUSPEND:
-               blank = 1; hsync = 0; vsync = 1;
+               blank = 1; hsync = 0; vsync = 1; crt = 1;
                break;
        case FB_BLANK_POWERDOWN:
-               blank = 1; hsync = 0; vsync = 0;
+               blank = 1; hsync = 0; vsync = 0; crt = 0;
                break;
        default:
                return -EINVAL;
        }
 
-       dcfg = readl(par->df_regs + DF_DISPLAY_CFG);
-       dcfg &= ~(DF_DCFG_DAC_BL_EN
-                 | DF_DCFG_HSYNC_EN | DF_DCFG_VSYNC_EN);
+       dcfg = read_vp(par, VP_DCFG);
+       dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
+                       VP_DCFG_CRT_EN);
        if (!blank)
-               dcfg |= DF_DCFG_DAC_BL_EN;
+               dcfg |= VP_DCFG_DAC_BL_EN;
        if (hsync)
-               dcfg |= DF_DCFG_HSYNC_EN;
+               dcfg |= VP_DCFG_HSYNC_EN;
        if (vsync)
-               dcfg |= DF_DCFG_VSYNC_EN;
-       writel(dcfg, par->df_regs + DF_DISPLAY_CFG);
+               dcfg |= VP_DCFG_VSYNC_EN;
+       if (crt)
+               dcfg |= VP_DCFG_CRT_EN;
+       write_vp(par, VP_DCFG, dcfg);
 
        /* Power on/off flat panel */
 
        if (par->output & OUTPUT_PANEL) {
-               fp_pm = readl(par->df_regs + DF_FP_PM);
+               fp_pm = read_fp(par, FP_PM);
                if (blank_mode == FB_BLANK_POWERDOWN)
-                       fp_pm &= ~DF_FP_PM_P;
+                       fp_pm &= ~FP_PM_P;
                else
-                       fp_pm |= DF_FP_PM_P;
-               writel(fp_pm, par->df_regs + DF_FP_PM);
+                       fp_pm |= FP_PM_P;
+               write_fp(par, FP_PM, fp_pm);
        }
 
        return 0;
 }
+
+#ifdef CONFIG_PM
+
+static void lx_save_regs(struct lxfb_par *par)
+{
+       uint32_t filt;
+       int i;
+
+       /* wait for the BLT engine to stop being busy */
+       do {
+               i = read_gp(par, GP_BLT_STATUS);
+       } while ((i & GP_BLT_STATUS_PB) || !(i & GP_BLT_STATUS_CE));
+
+       /* save MSRs */
+       rdmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
+       rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
+       rdmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
+       rdmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
+
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+
+       /* save registers */
+       memcpy(par->gp, par->gp_regs, sizeof(par->gp));
+       memcpy(par->dc, par->dc_regs, sizeof(par->dc));
+       memcpy(par->vp, par->vp_regs, sizeof(par->vp));
+       memcpy(par->fp, par->vp_regs + VP_FP_START, sizeof(par->fp));
+
+       /* save the palette */
+       write_dc(par, DC_PAL_ADDRESS, 0);
+       for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+               par->pal[i] = read_dc(par, DC_PAL_DATA);
+
+       /* save the horizontal filter coefficients */
+       filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
+       for (i = 0; i < ARRAY_SIZE(par->hcoeff); i += 2) {
+               write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+               par->hcoeff[i] = read_dc(par, DC_FILT_COEFF1);
+               par->hcoeff[i + 1] = read_dc(par, DC_FILT_COEFF2);
+       }
+
+       /* save the vertical filter coefficients */
+       filt &= ~DC_IRQ_FILT_CTL_H_FILT_SEL;
+       for (i = 0; i < ARRAY_SIZE(par->vcoeff); i++) {
+               write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+               par->vcoeff[i] = read_dc(par, DC_FILT_COEFF1);
+       }
+
+       /* save video coeff ram */
+       memcpy(par->vp_coeff, par->vp_regs + VP_VCR, sizeof(par->vp_coeff));
+}
+
+static void lx_restore_gfx_proc(struct lxfb_par *par)
+{
+       int i;
+
+       /* a bunch of registers require GP_RASTER_MODE to be set first */
+       write_gp(par, GP_RASTER_MODE, par->gp[GP_RASTER_MODE]);
+
+       for (i = 0; i < ARRAY_SIZE(par->gp); i++) {
+               switch (i) {
+               case GP_RASTER_MODE:
+               case GP_VECTOR_MODE:
+               case GP_BLT_MODE:
+               case GP_BLT_STATUS:
+               case GP_HST_SRC:
+                       /* FIXME: restore LUT data */
+               case GP_LUT_INDEX:
+               case GP_LUT_DATA:
+                       /* don't restore these registers */
+                       break;
+
+               default:
+                       write_gp(par, i, par->gp[i]);
+               }
+       }
+}
+
+static void lx_restore_display_ctlr(struct lxfb_par *par)
+{
+       uint32_t filt;
+       int i;
+
+       wrmsrl(MSR_LX_SPARE_MSR, par->msr.dcspare);
+
+       for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
+               switch (i) {
+               case DC_UNLOCK:
+                       /* unlock the DC; runs first */
+                       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+                       break;
+
+               case DC_GENERAL_CFG:
+               case DC_DISPLAY_CFG:
+                       /* disable all while restoring */
+                       write_dc(par, i, 0);
+                       break;
+
+               case DC_DV_CTL:
+                       /* set all ram to dirty */
+                       write_dc(par, i, par->dc[i] | DC_DV_CTL_CLEAR_DV_RAM);
+
+               case DC_RSVD_1:
+               case DC_RSVD_2:
+               case DC_RSVD_3:
+               case DC_LINE_CNT:
+               case DC_PAL_ADDRESS:
+               case DC_PAL_DATA:
+               case DC_DFIFO_DIAG:
+               case DC_CFIFO_DIAG:
+               case DC_FILT_COEFF1:
+               case DC_FILT_COEFF2:
+               case DC_RSVD_4:
+               case DC_RSVD_5:
+                       /* don't restore these registers */
+                       break;
+
+               default:
+                       write_dc(par, i, par->dc[i]);
+               }
+       }
+
+       /* restore the palette */
+       write_dc(par, DC_PAL_ADDRESS, 0);
+       for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+               write_dc(par, DC_PAL_DATA, par->pal[i]);
+
+       /* restore the horizontal filter coefficients */
+       filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
+       for (i = 0; i < ARRAY_SIZE(par->hcoeff); i += 2) {
+               write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+               write_dc(par, DC_FILT_COEFF1, par->hcoeff[i]);
+               write_dc(par, DC_FILT_COEFF2, par->hcoeff[i + 1]);
+       }
+
+       /* restore the vertical filter coefficients */
+       filt &= ~DC_IRQ_FILT_CTL_H_FILT_SEL;
+       for (i = 0; i < ARRAY_SIZE(par->vcoeff); i++) {
+               write_dc(par, DC_IRQ_FILT_CTL, (filt & 0xffffff00) | i);
+               write_dc(par, DC_FILT_COEFF1, par->vcoeff[i]);
+       }
+}
+
+static void lx_restore_video_proc(struct lxfb_par *par)
+{
+       int i;
+
+       wrmsrl(MSR_LX_GLD_MSR_CONFIG, par->msr.dfglcfg);
+       wrmsrl(MSR_LX_MSR_PADSEL, par->msr.padsel);
+
+       for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
+               switch (i) {
+               case VP_VCFG:
+               case VP_DCFG:
+               case VP_PAR:
+               case VP_PDR:
+               case VP_CCS:
+               case VP_RSVD_0:
+               /* case VP_VDC: */ /* why should this not be restored? */
+               case VP_RSVD_1:
+               case VP_CRC32:
+                       /* don't restore these registers */
+                       break;
+
+               default:
+                       write_vp(par, i, par->vp[i]);
+               }
+       }
+
+       /* restore video coeff ram */
+       memcpy(par->vp_regs + VP_VCR, par->vp_coeff, sizeof(par->vp_coeff));
+}
+
+static void lx_restore_regs(struct lxfb_par *par)
+{
+       int i;
+
+       lx_set_dotpll((u32) (par->msr.dotpll >> 32));
+       lx_restore_gfx_proc(par);
+       lx_restore_display_ctlr(par);
+       lx_restore_video_proc(par);
+
+       /* Flat Panel */
+       for (i = 0; i < ARRAY_SIZE(par->fp); i++) {
+               switch (i) {
+               case FP_PM:
+               case FP_RSVD_0:
+               case FP_RSVD_1:
+               case FP_RSVD_2:
+               case FP_RSVD_3:
+               case FP_RSVD_4:
+                       /* don't restore these registers */
+                       break;
+
+               default:
+                       write_fp(par, i, par->fp[i]);
+               }
+       }
+
+       /* control the panel */
+       if (par->fp[FP_PM] & FP_PM_P) {
+               /* power on the panel if not already power{ed,ing} on */
+               if (!(read_fp(par, FP_PM) &
+                               (FP_PM_PANEL_ON|FP_PM_PANEL_PWR_UP)))
+                       write_fp(par, FP_PM, par->fp[FP_PM]);
+       } else {
+               /* power down the panel if not already power{ed,ing} down */
+               if (!(read_fp(par, FP_PM) &
+                               (FP_PM_PANEL_OFF|FP_PM_PANEL_PWR_DOWN)))
+                       write_fp(par, FP_PM, par->fp[FP_PM]);
+       }
+
+       /* turn everything on */
+       write_vp(par, VP_VCFG, par->vp[VP_VCFG]);
+       write_vp(par, VP_DCFG, par->vp[VP_DCFG]);
+       write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG]);
+       /* do this last; it will enable the FIFO load */
+       write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG]);
+
+       /* lock the door behind us */
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
+}
+
+int lx_powerdown(struct fb_info *info)
+{
+       struct lxfb_par *par = info->par;
+
+       if (par->powered_down)
+               return 0;
+
+       lx_save_regs(par);
+       lx_graphics_disable(info);
+
+       par->powered_down = 1;
+       return 0;
+}
+
+int lx_powerup(struct fb_info *info)
+{
+       struct lxfb_par *par = info->par;
+
+       if (!par->powered_down)
+               return 0;
+
+       lx_restore_regs(par);
+
+       par->powered_down = 0;
+       return 0;
+}
+
+#endif
diff --git a/drivers/video/geode/suspend_gx.c b/drivers/video/geode/suspend_gx.c
new file mode 100644 (file)
index 0000000..9aff32e
--- /dev/null
@@ -0,0 +1,267 @@
+/*
+ *   Copyright (C) 2007 Advanced Micro Devices, Inc.
+ *   Copyright (C) 2008 Andres Salomon <dilinger@debian.org>
+ *
+ *   This program is free software; you can redistribute it and/or modify it
+ *   under the terms of the GNU General Public License as published by the
+ *   Free Software Foundation; either version 2 of the License, or (at your
+ *   option) any later version.
+ */
+#include <linux/fb.h>
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/geode.h>
+#include <asm/delay.h>
+
+#include "gxfb.h"
+
+#ifdef CONFIG_PM
+
+static void gx_save_regs(struct gxfb_par *par)
+{
+       int i;
+
+       /* wait for the BLT engine to stop being busy */
+       do {
+               i = read_gp(par, GP_BLT_STATUS);
+       } while (i & (GP_BLT_STATUS_BLT_PENDING | GP_BLT_STATUS_BLT_BUSY));
+
+       /* save MSRs */
+       rdmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
+       rdmsrl(MSR_GLCP_DOTPLL, par->msr.dotpll);
+
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+
+       /* save registers */
+       memcpy(par->gp, par->gp_regs, sizeof(par->gp));
+       memcpy(par->dc, par->dc_regs, sizeof(par->dc));
+       memcpy(par->vp, par->vid_regs, sizeof(par->vp));
+       memcpy(par->fp, par->vid_regs + VP_FP_START, sizeof(par->fp));
+
+       /* save the palette */
+       write_dc(par, DC_PAL_ADDRESS, 0);
+       for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+               par->pal[i] = read_dc(par, DC_PAL_DATA);
+}
+
+static void gx_set_dotpll(uint32_t dotpll_hi)
+{
+       uint32_t dotpll_lo;
+       int i;
+
+       rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+       dotpll_lo |= MSR_GLCP_DOTPLL_DOTRESET;
+       dotpll_lo &= ~MSR_GLCP_DOTPLL_BYPASS;
+       wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+
+       /* wait for the PLL to lock */
+       for (i = 0; i < 200; i++) {
+               rdmsrl(MSR_GLCP_DOTPLL, dotpll_lo);
+               if (dotpll_lo & MSR_GLCP_DOTPLL_LOCK)
+                       break;
+               udelay(1);
+       }
+
+       /* PLL set, unlock */
+       dotpll_lo &= ~MSR_GLCP_DOTPLL_DOTRESET;
+       wrmsr(MSR_GLCP_DOTPLL, dotpll_lo, dotpll_hi);
+}
+
+static void gx_restore_gfx_proc(struct gxfb_par *par)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(par->gp); i++) {
+               switch (i) {
+               case GP_VECTOR_MODE:
+               case GP_BLT_MODE:
+               case GP_BLT_STATUS:
+               case GP_HST_SRC:
+                       /* don't restore these registers */
+                       break;
+               default:
+                       write_gp(par, i, par->gp[i]);
+               }
+       }
+}
+
+static void gx_restore_display_ctlr(struct gxfb_par *par)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(par->dc); i++) {
+               switch (i) {
+               case DC_UNLOCK:
+                       /* unlock the DC; runs first */
+                       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+                       break;
+
+               case DC_GENERAL_CFG:
+                       /* write without the enables */
+                       write_dc(par, i, par->dc[i] & ~(DC_GENERAL_CFG_VIDE |
+                                       DC_GENERAL_CFG_ICNE |
+                                       DC_GENERAL_CFG_CURE |
+                                       DC_GENERAL_CFG_DFLE));
+                       break;
+
+               case DC_DISPLAY_CFG:
+                       /* write without the enables */
+                       write_dc(par, i, par->dc[i] & ~(DC_DISPLAY_CFG_VDEN |
+                                       DC_DISPLAY_CFG_GDEN |
+                                       DC_DISPLAY_CFG_TGEN));
+                       break;
+
+               case DC_RSVD_0:
+               case DC_RSVD_1:
+               case DC_RSVD_2:
+               case DC_RSVD_3:
+               case DC_RSVD_4:
+               case DC_LINE_CNT:
+               case DC_PAL_ADDRESS:
+               case DC_PAL_DATA:
+               case DC_DFIFO_DIAG:
+               case DC_CFIFO_DIAG:
+               case DC_RSVD_5:
+                       /* don't restore these registers */
+                       break;
+               default:
+                       write_dc(par, i, par->dc[i]);
+               }
+       }
+
+       /* restore the palette */
+       write_dc(par, DC_PAL_ADDRESS, 0);
+       for (i = 0; i < ARRAY_SIZE(par->pal); i++)
+               write_dc(par, DC_PAL_DATA, par->pal[i]);
+}
+
+static void gx_restore_video_proc(struct gxfb_par *par)
+{
+       int i;
+
+       wrmsrl(MSR_GX_MSR_PADSEL, par->msr.padsel);
+
+       for (i = 0; i < ARRAY_SIZE(par->vp); i++) {
+               switch (i) {
+               case VP_VCFG:
+                       /* don't enable video yet */
+                       write_vp(par, i, par->vp[i] & ~VP_VCFG_VID_EN);
+                       break;
+
+               case VP_DCFG:
+                       /* don't enable CRT yet */
+                       write_vp(par, i, par->vp[i] &
+                                       ~(VP_DCFG_DAC_BL_EN | VP_DCFG_VSYNC_EN |
+                                       VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
+                       break;
+
+               case VP_GAR:
+               case VP_GDR:
+               case VP_RSVD_0:
+               case VP_RSVD_1:
+               case VP_RSVD_2:
+               case VP_RSVD_3:
+               case VP_CRC32:
+               case VP_AWT:
+               case VP_VTM:
+                       /* don't restore these registers */
+                       break;
+               default:
+                       write_vp(par, i, par->vp[i]);
+               }
+       }
+}
+
+static void gx_restore_regs(struct gxfb_par *par)
+{
+       int i;
+
+       gx_set_dotpll((uint32_t) (par->msr.dotpll >> 32));
+       gx_restore_gfx_proc(par);
+       gx_restore_display_ctlr(par);
+       gx_restore_video_proc(par);
+
+       /* Flat Panel */
+       for (i = 0; i < ARRAY_SIZE(par->fp); i++) {
+               if (i != FP_PM && i != FP_RSVD_0)
+                       write_fp(par, i, par->fp[i]);
+       }
+}
+
+static void gx_disable_graphics(struct gxfb_par *par)
+{
+       /* shut down the engine */
+       write_vp(par, VP_VCFG, par->vp[VP_VCFG] & ~VP_VCFG_VID_EN);
+       write_vp(par, VP_DCFG, par->vp[VP_DCFG] & ~(VP_DCFG_DAC_BL_EN |
+                       VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN | VP_DCFG_CRT_EN));
+
+       /* turn off the flat panel */
+       write_fp(par, FP_PM, par->fp[FP_PM] & ~FP_PM_P);
+
+
+       /* turn off display */
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_UNLOCK);
+       write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG] &
+                       ~(DC_GENERAL_CFG_VIDE | DC_GENERAL_CFG_ICNE |
+                       DC_GENERAL_CFG_CURE | DC_GENERAL_CFG_DFLE));
+       write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG] &
+                       ~(DC_DISPLAY_CFG_VDEN | DC_DISPLAY_CFG_GDEN |
+                       DC_DISPLAY_CFG_TGEN));
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
+}
+
+static void gx_enable_graphics(struct gxfb_par *par)
+{
+       uint32_t fp;
+
+       fp = read_fp(par, FP_PM);
+       if (par->fp[FP_PM] & FP_PM_P) {
+               /* power on the panel if not already power{ed,ing} on */
+               if (!(fp & (FP_PM_PANEL_ON|FP_PM_PANEL_PWR_UP)))
+                       write_fp(par, FP_PM, par->fp[FP_PM]);
+       } else {
+               /* power down the panel if not already power{ed,ing} down */
+               if (!(fp & (FP_PM_PANEL_OFF|FP_PM_PANEL_PWR_DOWN)))
+                       write_fp(par, FP_PM, par->fp[FP_PM]);
+       }
+
+       /* turn everything on */
+       write_vp(par, VP_VCFG, par->vp[VP_VCFG]);
+       write_vp(par, VP_DCFG, par->vp[VP_DCFG]);
+       write_dc(par, DC_DISPLAY_CFG, par->dc[DC_DISPLAY_CFG]);
+       /* do this last; it will enable the FIFO load */
+       write_dc(par, DC_GENERAL_CFG, par->dc[DC_GENERAL_CFG]);
+
+       /* lock the door behind us */
+       write_dc(par, DC_UNLOCK, DC_UNLOCK_LOCK);
+}
+
+int gx_powerdown(struct fb_info *info)
+{
+       struct gxfb_par *par = info->par;
+
+       if (par->powered_down)
+               return 0;
+
+       gx_save_regs(par);
+       gx_disable_graphics(par);
+
+       par->powered_down = 1;
+       return 0;
+}
+
+int gx_powerup(struct fb_info *info)
+{
+       struct gxfb_par *par = info->par;
+
+       if (!par->powered_down)
+               return 0;
+
+       gx_restore_regs(par);
+       gx_enable_graphics(par);
+
+       par->powered_down  = 0;
+       return 0;
+}
+
+#endif
index febf09c63492fa6bdaba484c9bc603d35b1a4140..b8d52a8360db2436371dc3445a39e3a4fd96c8c2 100644 (file)
@@ -16,9 +16,9 @@
 #include <asm/io.h>
 #include <asm/delay.h>
 #include <asm/msr.h>
+#include <asm/geode.h>
 
-#include "geodefb.h"
-#include "video_gx.h"
+#include "gxfb.h"
 
 
 /*
@@ -117,7 +117,7 @@ static const struct gx_pll_entry gx_pll_table_14MHz[] = {
        {  4357, 0, 0x0000057D },       /* 229.5000 */
 };
 
-static void gx_set_dclk_frequency(struct fb_info *info)
+void gx_set_dclk_frequency(struct fb_info *info)
 {
        const struct gx_pll_entry *pll_table;
        int pll_table_len;
@@ -178,110 +178,116 @@ static void gx_set_dclk_frequency(struct fb_info *info)
 static void
 gx_configure_tft(struct fb_info *info)
 {
-       struct geodefb_par *par = info->par;
+       struct gxfb_par *par = info->par;
        unsigned long val;
        unsigned long fp;
 
        /* Set up the DF pad select MSR */
 
-       rdmsrl(GX_VP_MSR_PAD_SELECT, val);
-       val &= ~GX_VP_PAD_SELECT_MASK;
-       val |= GX_VP_PAD_SELECT_TFT;
-       wrmsrl(GX_VP_MSR_PAD_SELECT, val);
+       rdmsrl(MSR_GX_MSR_PADSEL, val);
+       val &= ~MSR_GX_MSR_PADSEL_MASK;
+       val |= MSR_GX_MSR_PADSEL_TFT;
+       wrmsrl(MSR_GX_MSR_PADSEL, val);
 
        /* Turn off the panel */
 
-       fp = readl(par->vid_regs + GX_FP_PM);
-       fp &= ~GX_FP_PM_P;
-       writel(fp, par->vid_regs + GX_FP_PM);
+       fp = read_fp(par, FP_PM);
+       fp &= ~FP_PM_P;
+       write_fp(par, FP_PM, fp);
 
        /* Set timing 1 */
 
-       fp = readl(par->vid_regs + GX_FP_PT1);
-       fp &= GX_FP_PT1_VSIZE_MASK;
-       fp |= info->var.yres << GX_FP_PT1_VSIZE_SHIFT;
-       writel(fp, par->vid_regs + GX_FP_PT1);
+       fp = read_fp(par, FP_PT1);
+       fp &= FP_PT1_VSIZE_MASK;
+       fp |= info->var.yres << FP_PT1_VSIZE_SHIFT;
+       write_fp(par, FP_PT1, fp);
 
        /* Timing 2 */
        /* Set bits that are always on for TFT */
 
        fp = 0x0F100000;
 
-       /* Add sync polarity */
+       /* Configure sync polarity */
 
        if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
-               fp |= GX_FP_PT2_VSP;
+               fp |= FP_PT2_VSP;
 
        if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
-               fp |= GX_FP_PT2_HSP;
+               fp |= FP_PT2_HSP;
 
-       writel(fp, par->vid_regs + GX_FP_PT2);
+       write_fp(par, FP_PT2, fp);
 
        /*  Set the dither control */
-       writel(0x70, par->vid_regs + GX_FP_DFC);
+       write_fp(par, FP_DFC, FP_DFC_NFI);
 
        /* Enable the FP data and power (in case the BIOS didn't) */
 
-       fp = readl(par->vid_regs + GX_DCFG);
-       fp |= GX_DCFG_FP_PWR_EN | GX_DCFG_FP_DATA_EN;
-       writel(fp, par->vid_regs + GX_DCFG);
+       fp = read_vp(par, VP_DCFG);
+       fp |= VP_DCFG_FP_PWR_EN | VP_DCFG_FP_DATA_EN;
+       write_vp(par, VP_DCFG, fp);
 
        /* Unblank the panel */
 
-       fp = readl(par->vid_regs + GX_FP_PM);
-       fp |= GX_FP_PM_P;
-       writel(fp, par->vid_regs + GX_FP_PM);
+       fp = read_fp(par, FP_PM);
+       fp |= FP_PM_P;
+       write_fp(par, FP_PM, fp);
 }
 
-static void gx_configure_display(struct fb_info *info)
+void gx_configure_display(struct fb_info *info)
 {
-       struct geodefb_par *par = info->par;
+       struct gxfb_par *par = info->par;
        u32 dcfg, misc;
 
-       /* Set up the MISC register */
-
-       misc = readl(par->vid_regs + GX_MISC);
-
-       /* Power up the DAC */
-       misc &= ~(GX_MISC_A_PWRDN | GX_MISC_DAC_PWRDN);
-
-       /* Disable gamma correction */
-       misc |= GX_MISC_GAM_EN;
-
-       writel(misc, par->vid_regs + GX_MISC);
-
        /* Write the display configuration */
-       dcfg = readl(par->vid_regs + GX_DCFG);
+       dcfg = read_vp(par, VP_DCFG);
 
        /* Disable hsync and vsync */
-       dcfg &= ~(GX_DCFG_VSYNC_EN | GX_DCFG_HSYNC_EN);
-       writel(dcfg, par->vid_regs + GX_DCFG);
+       dcfg &= ~(VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN);
+       write_vp(par, VP_DCFG, dcfg);
 
        /* Clear bits from existing mode. */
-       dcfg &= ~(GX_DCFG_CRT_SYNC_SKW_MASK
-                 | GX_DCFG_CRT_HSYNC_POL   | GX_DCFG_CRT_VSYNC_POL
-                 | GX_DCFG_VSYNC_EN        | GX_DCFG_HSYNC_EN);
+       dcfg &= ~(VP_DCFG_CRT_SYNC_SKW
+                 | VP_DCFG_CRT_HSYNC_POL   | VP_DCFG_CRT_VSYNC_POL
+                 | VP_DCFG_VSYNC_EN        | VP_DCFG_HSYNC_EN);
 
        /* Set default sync skew.  */
-       dcfg |= GX_DCFG_CRT_SYNC_SKW_DFLT;
+       dcfg |= VP_DCFG_CRT_SYNC_SKW_DEFAULT;
 
        /* Enable hsync and vsync. */
-       dcfg |= GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN;
+       dcfg |= VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN;
 
-       /* Sync polarities. */
-       if (info->var.sync & FB_SYNC_HOR_HIGH_ACT)
-               dcfg |= GX_DCFG_CRT_HSYNC_POL;
-       if (info->var.sync & FB_SYNC_VERT_HIGH_ACT)
-               dcfg |= GX_DCFG_CRT_VSYNC_POL;
+       misc = read_vp(par, VP_MISC);
+
+       /* Disable gamma correction */
+       misc |= VP_MISC_GAM_EN;
+
+       if (par->enable_crt) {
+
+               /* Power up the CRT DACs */
+               misc &= ~(VP_MISC_APWRDN | VP_MISC_DACPWRDN);
+               write_vp(par, VP_MISC, misc);
+
+               /* Only change the sync polarities if we are running
+                * in CRT mode.  The FP polarities will be handled in
+                * gxfb_configure_tft */
+               if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
+                       dcfg |= VP_DCFG_CRT_HSYNC_POL;
+               if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
+                       dcfg |= VP_DCFG_CRT_VSYNC_POL;
+       } else {
+               /* Power down the CRT DACs if in FP mode */
+               misc |= (VP_MISC_APWRDN | VP_MISC_DACPWRDN);
+               write_vp(par, VP_MISC, misc);
+       }
 
        /* Enable the display logic */
        /* Set up the DACS to blank normally */
 
-       dcfg |= GX_DCFG_CRT_EN | GX_DCFG_DAC_BL_EN;
+       dcfg |= VP_DCFG_CRT_EN | VP_DCFG_DAC_BL_EN;
 
        /* Enable the external DAC VREF? */
 
-       writel(dcfg, par->vid_regs + GX_DCFG);
+       write_vp(par, VP_DCFG, dcfg);
 
        /* Set up the flat panel (if it is enabled) */
 
@@ -289,59 +295,55 @@ static void gx_configure_display(struct fb_info *info)
                gx_configure_tft(info);
 }
 
-static int gx_blank_display(struct fb_info *info, int blank_mode)
+int gx_blank_display(struct fb_info *info, int blank_mode)
 {
-       struct geodefb_par *par = info->par;
+       struct gxfb_par *par = info->par;
        u32 dcfg, fp_pm;
-       int blank, hsync, vsync;
+       int blank, hsync, vsync, crt;
 
        /* CRT power saving modes. */
        switch (blank_mode) {
        case FB_BLANK_UNBLANK:
-               blank = 0; hsync = 1; vsync = 1;
+               blank = 0; hsync = 1; vsync = 1; crt = 1;
                break;
        case FB_BLANK_NORMAL:
-               blank = 1; hsync = 1; vsync = 1;
+               blank = 1; hsync = 1; vsync = 1; crt = 1;
                break;
        case FB_BLANK_VSYNC_SUSPEND:
-               blank = 1; hsync = 1; vsync = 0;
+               blank = 1; hsync = 1; vsync = 0; crt = 1;
                break;
        case FB_BLANK_HSYNC_SUSPEND:
-               blank = 1; hsync = 0; vsync = 1;
+               blank = 1; hsync = 0; vsync = 1; crt = 1;
                break;
        case FB_BLANK_POWERDOWN:
-               blank = 1; hsync = 0; vsync = 0;
+               blank = 1; hsync = 0; vsync = 0; crt = 0;
                break;
        default:
                return -EINVAL;
        }
-       dcfg = readl(par->vid_regs + GX_DCFG);
-       dcfg &= ~(GX_DCFG_DAC_BL_EN
-                 | GX_DCFG_HSYNC_EN | GX_DCFG_VSYNC_EN);
+       dcfg = read_vp(par, VP_DCFG);
+       dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
+                       VP_DCFG_CRT_EN);
        if (!blank)
-               dcfg |= GX_DCFG_DAC_BL_EN;
+               dcfg |= VP_DCFG_DAC_BL_EN;
        if (hsync)
-               dcfg |= GX_DCFG_HSYNC_EN;
+               dcfg |= VP_DCFG_HSYNC_EN;
        if (vsync)
-               dcfg |= GX_DCFG_VSYNC_EN;
-       writel(dcfg, par->vid_regs + GX_DCFG);
+               dcfg |= VP_DCFG_VSYNC_EN;
+       if (crt)
+               dcfg |= VP_DCFG_CRT_EN;
+       write_vp(par, VP_DCFG, dcfg);
 
        /* Power on/off flat panel. */
 
        if (par->enable_crt == 0) {
-               fp_pm = readl(par->vid_regs + GX_FP_PM);
+               fp_pm = read_fp(par, FP_PM);
                if (blank_mode == FB_BLANK_POWERDOWN)
-                       fp_pm &= ~GX_FP_PM_P;
+                       fp_pm &= ~FP_PM_P;
                else
-                       fp_pm |= GX_FP_PM_P;
-               writel(fp_pm, par->vid_regs + GX_FP_PM);
+                       fp_pm |= FP_PM_P;
+               write_fp(par, FP_PM, fp_pm);
        }
 
        return 0;
 }
-
-struct geode_vid_ops gx_vid_ops = {
-       .set_dclk          = gx_set_dclk_frequency,
-       .configure_display = gx_configure_display,
-       .blank_display     = gx_blank_display,
-};
diff --git a/drivers/video/geode/video_gx.h b/drivers/video/geode/video_gx.h
deleted file mode 100644 (file)
index ce28d8f..0000000
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Geode GX video device
- *
- * Copyright (C) 2006 Arcom Control Systems Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-#ifndef __VIDEO_GX_H__
-#define __VIDEO_GX_H__
-
-extern struct geode_vid_ops gx_vid_ops;
-
-/* GX Flatpanel control MSR */
-#define GX_VP_MSR_PAD_SELECT           0xC0002011
-#define GX_VP_PAD_SELECT_MASK          0x3FFFFFFF
-#define GX_VP_PAD_SELECT_TFT           0x1FFFFFFF
-
-/* Geode GX video processor registers */
-
-#define GX_DCFG                0x0008
-#  define GX_DCFG_CRT_EN               0x00000001
-#  define GX_DCFG_HSYNC_EN             0x00000002
-#  define GX_DCFG_VSYNC_EN             0x00000004
-#  define GX_DCFG_DAC_BL_EN            0x00000008
-#  define GX_DCFG_FP_PWR_EN            0x00000040
-#  define GX_DCFG_FP_DATA_EN           0x00000080
-#  define GX_DCFG_CRT_HSYNC_POL                0x00000100
-#  define GX_DCFG_CRT_VSYNC_POL                0x00000200
-#  define GX_DCFG_CRT_SYNC_SKW_MASK    0x0001C000
-#  define GX_DCFG_CRT_SYNC_SKW_DFLT    0x00010000
-#  define GX_DCFG_VG_CK                        0x00100000
-#  define GX_DCFG_GV_GAM               0x00200000
-#  define GX_DCFG_DAC_VREF             0x04000000
-
-/* Geode GX MISC video configuration */
-
-#define GX_MISC 0x50
-#define GX_MISC_GAM_EN     0x00000001
-#define GX_MISC_DAC_PWRDN  0x00000400
-#define GX_MISC_A_PWRDN    0x00000800
-
-/* Geode GX flat panel display control registers */
-
-#define GX_FP_PT1 0x0400
-#define GX_FP_PT1_VSIZE_MASK 0x7FF0000
-#define GX_FP_PT1_VSIZE_SHIFT 16
-
-#define GX_FP_PT2 0x408
-#define GX_FP_PT2_VSP (1 << 23)
-#define GX_FP_PT2_HSP (1 << 22)
-
-#define GX_FP_PM 0x410
-#  define GX_FP_PM_P 0x01000000
-
-#define GX_FP_DFC 0x418
-
-/* Geode GX clock control MSRs */
-
-#define MSR_GLCP_SYS_RSTPLL    0x4c000014
-#  define MSR_GLCP_SYS_RSTPLL_DOTPREDIV2       (0x0000000000000002ull)
-#  define MSR_GLCP_SYS_RSTPLL_DOTPREMULT2      (0x0000000000000004ull)
-#  define MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3      (0x0000000000000008ull)
-
-#define MSR_GLCP_DOTPLL                0x4c000015
-#  define MSR_GLCP_DOTPLL_DOTRESET             (0x0000000000000001ull)
-#  define MSR_GLCP_DOTPLL_BYPASS               (0x0000000000008000ull)
-#  define MSR_GLCP_DOTPLL_LOCK                 (0x0000000002000000ull)
-
-#endif /* !__VIDEO_GX_H__ */
index e92337bef50d00951bc4a7b9689f0d05b31a62ec..564557792bed6e616d590b8c49a1d28f3719f442 100644 (file)
@@ -238,7 +238,7 @@ static int calc_pll(int period_ps, struct gxt4500_par *par)
        for (pdiv1 = 1; pdiv1 <= 8; ++pdiv1) {
                for (pdiv2 = 1; pdiv2 <= pdiv1; ++pdiv2) {
                        postdiv = pdiv1 * pdiv2;
-                       pll_period = (period_ps + postdiv - 1) / postdiv;
+                       pll_period = DIV_ROUND_UP(period_ps, postdiv);
                        /* keep pll in range 350..600 MHz */
                        if (pll_period < 1666 || pll_period > 2857)
                                continue;
index 94e0df8a6f60bc9b80d05586b1337fad1006cc47..0b4bffbe67c812ff71fcc385efdbfc8acde82af4 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * linux/drivers/video/hecubafb.c -- FB driver for Hecuba controller
+ * linux/drivers/video/hecubafb.c -- FB driver for Hecuba/Apollo controller
  *
  * Copyright (C) 2006, Jaya Kumar
  * This work was sponsored by CIS(M) Sdn Bhd
  * values. There are other commands that the display is capable of,
  * beyond the 5 used here but they are more complex.
  *
- * This driver is written to be used with the Hecuba display controller
- * board, and tested with the EInk 800x600 display in 1 bit mode.
- * The interface between Hecuba and the host is TTL based GPIO. The
- * GPIO requirements are 8 writable data lines and 6 lines for control.
- * Only 4 of the controls are actually used here but 6 for future use.
- * The driver requires the IO addresses for data and control GPIO at
- * load time. It is also possible to use this display with a standard
- * PC parallel port.
+ * This driver is written to be used with the Hecuba display architecture.
+ * The actual display chip is called Apollo and the interface electronics
+ * it needs is called Hecuba.
  *
- * General notes:
- * - User must set hecubafb_enable=1 to enable it
- * - User must set dio_addr=0xIOADDR cio_addr=0xIOADDR c2io_addr=0xIOADDR
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions. An example
+ * is provided as n411.c
  *
  */
 
 #include <linux/list.h>
 #include <linux/uaccess.h>
 
-/* Apollo controller specific defines */
-#define APOLLO_START_NEW_IMG   0xA0
-#define APOLLO_STOP_IMG_DATA   0xA1
-#define APOLLO_DISPLAY_IMG     0xA2
-#define APOLLO_ERASE_DISPLAY   0xA3
-#define APOLLO_INIT_DISPLAY    0xA4
-
-/* Hecuba interface specific defines */
-/* WUP is inverted, CD is inverted, DS is inverted */
-#define HCB_NWUP_BIT   0x01
-#define HCB_NDS_BIT    0x02
-#define HCB_RW_BIT     0x04
-#define HCB_NCD_BIT    0x08
-#define HCB_ACK_BIT    0x80
+#include <video/hecubafb.h>
 
 /* Display specific information */
 #define DPY_W 600
 #define DPY_H 800
 
-struct hecubafb_par {
-       unsigned long dio_addr;
-       unsigned long cio_addr;
-       unsigned long c2io_addr;
-       unsigned char ctl;
-       struct fb_info *info;
-       unsigned int irq;
-};
-
 static struct fb_fix_screeninfo hecubafb_fix __devinitdata = {
        .id =           "hecubafb",
        .type =         FB_TYPE_PACKED_PIXELS,
@@ -82,6 +55,7 @@ static struct fb_fix_screeninfo hecubafb_fix __devinitdata = {
        .xpanstep =     0,
        .ypanstep =     0,
        .ywrapstep =    0,
+       .line_length =  DPY_W,
        .accel =        FB_ACCEL_NONE,
 };
 
@@ -94,136 +68,51 @@ static struct fb_var_screeninfo hecubafb_var __devinitdata = {
        .nonstd         = 1,
 };
 
-static unsigned long dio_addr;
-static unsigned long cio_addr;
-static unsigned long c2io_addr;
-static unsigned long splashval;
-static unsigned int nosplash;
-static unsigned int hecubafb_enable;
-static unsigned int irq;
-
-static DECLARE_WAIT_QUEUE_HEAD(hecubafb_waitq);
-
-static void hcb_set_ctl(struct hecubafb_par *par)
-{
-       outb(par->ctl, par->cio_addr);
-}
-
-static unsigned char hcb_get_ctl(struct hecubafb_par *par)
-{
-       return inb(par->c2io_addr);
-}
-
-static void hcb_set_data(struct hecubafb_par *par, unsigned char value)
-{
-       outb(value, par->dio_addr);
-}
-
-static int __devinit apollo_init_control(struct hecubafb_par *par)
-{
-       unsigned char ctl;
-       /* for init, we want the following setup to be set:
-       WUP = lo
-       ACK = hi
-       DS = hi
-       RW = hi
-       CD = lo
-       */
-
-       /* write WUP to lo, DS to hi, RW to hi, CD to lo */
-       par->ctl = HCB_NWUP_BIT | HCB_RW_BIT | HCB_NCD_BIT ;
-       par->ctl &= ~HCB_NDS_BIT;
-       hcb_set_ctl(par);
-
-       /* check ACK is not lo */
-       ctl = hcb_get_ctl(par);
-       if ((ctl & HCB_ACK_BIT)) {
-               printk(KERN_ERR "Fail because ACK is already low\n");
-               return -ENXIO;
-       }
-
-       return 0;
-}
-
-static void hcb_wait_for_ack(struct hecubafb_par *par)
-{
-
-       int timeout;
-       unsigned char ctl;
-
-       timeout=500;
-       do {
-               ctl = hcb_get_ctl(par);
-               if ((ctl & HCB_ACK_BIT))
-                       return;
-               udelay(1);
-       } while (timeout--);
-       printk(KERN_ERR "timed out waiting for ack\n");
-}
-
-static void hcb_wait_for_ack_clear(struct hecubafb_par *par)
-{
-
-       int timeout;
-       unsigned char ctl;
-
-       timeout=500;
-       do {
-               ctl = hcb_get_ctl(par);
-               if (!(ctl & HCB_ACK_BIT))
-                       return;
-               udelay(1);
-       } while (timeout--);
-       printk(KERN_ERR "timed out waiting for clear\n");
-}
+/* main hecubafb functions */
 
 static void apollo_send_data(struct hecubafb_par *par, unsigned char data)
 {
        /* set data */
-       hcb_set_data(par, data);
+       par->board->set_data(par, data);
 
        /* set DS low */
-       par->ctl |= HCB_NDS_BIT;
-       hcb_set_ctl(par);
+       par->board->set_ctl(par, HCB_DS_BIT, 0);
 
-       hcb_wait_for_ack(par);
+       /* wait for ack */
+       par->board->wait_for_ack(par, 0);
 
        /* set DS hi */
-       par->ctl &= ~(HCB_NDS_BIT);
-       hcb_set_ctl(par);
+       par->board->set_ctl(par, HCB_DS_BIT, 1);
 
-       hcb_wait_for_ack_clear(par);
+       /* wait for ack to clear */
+       par->board->wait_for_ack(par, 1);
 }
 
 static void apollo_send_command(struct hecubafb_par *par, unsigned char data)
 {
        /* command so set CD to high */
-       par->ctl &= ~(HCB_NCD_BIT);
-       hcb_set_ctl(par);
+       par->board->set_ctl(par, HCB_CD_BIT, 1);
 
        /* actually strobe with command */
        apollo_send_data(par, data);
 
        /* clear CD back to low */
-       par->ctl |= (HCB_NCD_BIT);
-       hcb_set_ctl(par);
+       par->board->set_ctl(par, HCB_CD_BIT, 0);
 }
 
-/* main hecubafb functions */
-
 static void hecubafb_dpy_update(struct hecubafb_par *par)
 {
        int i;
        unsigned char *buf = (unsigned char __force *)par->info->screen_base;
 
-       apollo_send_command(par, 0xA0);
+       apollo_send_command(par, APOLLO_START_NEW_IMG);
 
        for (i=0; i < (DPY_W*DPY_H/8); i++) {
                apollo_send_data(par, *(buf++));
        }
 
-       apollo_send_command(par, 0xA1);
-       apollo_send_command(par, 0xA2);
+       apollo_send_command(par, APOLLO_STOP_IMG_DATA);
+       apollo_send_command(par, APOLLO_DISPLAY_IMG);
 }
 
 /* this is called back from the deferred io workqueue */
@@ -270,41 +159,43 @@ static void hecubafb_imageblit(struct fb_info *info,
 static ssize_t hecubafb_write(struct fb_info *info, const char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       unsigned long p;
-       int err=-EINVAL;
-       struct hecubafb_par *par;
-       unsigned int xres;
-       unsigned int fbmemlength;
+       struct hecubafb_par *par = info->par;
+       unsigned long p = *ppos;
+       void *dst;
+       int err = 0;
+       unsigned long total_size;
 
-       p = *ppos;
-       par = info->par;
-       xres = info->var.xres;
-       fbmemlength = (xres * info->var.yres)/8;
+       if (info->state != FBINFO_STATE_RUNNING)
+               return -EPERM;
 
-       if (p > fbmemlength)
-               return -ENOSPC;
+       total_size = info->fix.smem_len;
 
-       err = 0;
-       if ((count + p) > fbmemlength) {
-               count = fbmemlength - p;
-               err = -ENOSPC;
+       if (p > total_size)
+               return -EFBIG;
+
+       if (count > total_size) {
+               err = -EFBIG;
+               count = total_size;
        }
 
-       if (count) {
-               char *base_addr;
+       if (count + p > total_size) {
+               if (!err)
+                       err = -ENOSPC;
 
-               base_addr = (char __force *)info->screen_base;
-               count -= copy_from_user(base_addr + p, buf, count);
-               *ppos += count;
-               err = -EFAULT;
+               count = total_size - p;
        }
 
-       hecubafb_dpy_update(par);
+       dst = (void __force *) (info->screen_base + p);
+
+       if (copy_from_user(dst, buf, count))
+               err = -EFAULT;
 
-       if (count)
-               return count;
+       if  (!err)
+               *ppos += count;
 
-       return err;
+       hecubafb_dpy_update(par);
+
+       return (err) ? err : count;
 }
 
 static struct fb_ops hecubafb_ops = {
@@ -324,11 +215,21 @@ static struct fb_deferred_io hecubafb_defio = {
 static int __devinit hecubafb_probe(struct platform_device *dev)
 {
        struct fb_info *info;
+       struct hecuba_board *board;
        int retval = -ENOMEM;
        int videomemorysize;
        unsigned char *videomemory;
        struct hecubafb_par *par;
 
+       /* pick up board specific routines */
+       board = dev->dev.platform_data;
+       if (!board)
+               return -EINVAL;
+
+       /* try to count device specific driver, if can't, platform recalls */
+       if (!try_module_get(board->owner))
+               return -ENODEV;
+
        videomemorysize = (DPY_W*DPY_H)/8;
 
        if (!(videomemory = vmalloc(videomemorysize)))
@@ -338,9 +239,9 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
 
        info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
        if (!info)
-               goto err;
+               goto err_fballoc;
 
-       info->screen_base = (char __iomem *) videomemory;
+       info->screen_base = (char __force __iomem *)videomemory;
        info->fbops = &hecubafb_ops;
 
        info->var = hecubafb_var;
@@ -348,14 +249,10 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
        info->fix.smem_len = videomemorysize;
        par = info->par;
        par->info = info;
+       par->board = board;
+       par->send_command = apollo_send_command;
+       par->send_data = apollo_send_data;
 
-       if (!dio_addr || !cio_addr || !c2io_addr) {
-               printk(KERN_WARNING "no IO addresses supplied\n");
-               goto err1;
-       }
-       par->dio_addr = dio_addr;
-       par->cio_addr = cio_addr;
-       par->c2io_addr = c2io_addr;
        info->flags = FBINFO_FLAG_DEFAULT;
 
        info->fbdefio = &hecubafb_defio;
@@ -363,7 +260,7 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
 
        retval = register_framebuffer(info);
        if (retval < 0)
-               goto err1;
+               goto err_fbreg;
        platform_set_drvdata(dev, info);
 
        printk(KERN_INFO
@@ -371,25 +268,16 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
               info->node, videomemorysize >> 10);
 
        /* this inits the dpy */
-       apollo_init_control(par);
-
-       apollo_send_command(par, APOLLO_INIT_DISPLAY);
-       apollo_send_data(par, 0x81);
-
-       /* have to wait while display resets */
-       udelay(1000);
-
-       /* if we were told to splash the screen, we just clear it */
-       if (!nosplash) {
-               apollo_send_command(par, APOLLO_ERASE_DISPLAY);
-               apollo_send_data(par, splashval);
-       }
+       retval = par->board->init(par);
+       if (retval < 0)
+               goto err_fbreg;
 
        return 0;
-err1:
+err_fbreg:
        framebuffer_release(info);
-err:
+err_fballoc:
        vfree(videomemory);
+       module_put(board->owner);
        return retval;
 }
 
@@ -398,9 +286,13 @@ static int __devexit hecubafb_remove(struct platform_device *dev)
        struct fb_info *info = platform_get_drvdata(dev);
 
        if (info) {
+               struct hecubafb_par *par = info->par;
                fb_deferred_io_cleanup(info);
                unregister_framebuffer(info);
                vfree((void __force *)info->screen_base);
+               if (par->board->remove)
+                       par->board->remove(par);
+               module_put(par->board->owner);
                framebuffer_release(info);
        }
        return 0;
@@ -410,62 +302,24 @@ static struct platform_driver hecubafb_driver = {
        .probe  = hecubafb_probe,
        .remove = hecubafb_remove,
        .driver = {
+               .owner  = THIS_MODULE,
                .name   = "hecubafb",
        },
 };
 
-static struct platform_device *hecubafb_device;
-
 static int __init hecubafb_init(void)
 {
-       int ret;
-
-       if (!hecubafb_enable) {
-               printk(KERN_ERR "Use hecubafb_enable to enable the device\n");
-               return -ENXIO;
-       }
-
-       ret = platform_driver_register(&hecubafb_driver);
-       if (!ret) {
-               hecubafb_device = platform_device_alloc("hecubafb", 0);
-               if (hecubafb_device)
-                       ret = platform_device_add(hecubafb_device);
-               else
-                       ret = -ENOMEM;
-
-               if (ret) {
-                       platform_device_put(hecubafb_device);
-                       platform_driver_unregister(&hecubafb_driver);
-               }
-       }
-       return ret;
-
+       return platform_driver_register(&hecubafb_driver);
 }
 
 static void __exit hecubafb_exit(void)
 {
-       platform_device_unregister(hecubafb_device);
        platform_driver_unregister(&hecubafb_driver);
 }
 
-module_param(nosplash, uint, 0);
-MODULE_PARM_DESC(nosplash, "Disable doing the splash screen");
-module_param(hecubafb_enable, uint, 0);
-MODULE_PARM_DESC(hecubafb_enable, "Enable communication with Hecuba board");
-module_param(dio_addr, ulong, 0);
-MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480");
-module_param(cio_addr, ulong, 0);
-MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400");
-module_param(c2io_addr, ulong, 0);
-MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408");
-module_param(splashval, ulong, 0);
-MODULE_PARM_DESC(splashval, "Splash pattern: 0x00 is black, 0x01 is white");
-module_param(irq, uint, 0);
-MODULE_PARM_DESC(irq, "IRQ for the Hecuba board");
-
 module_init(hecubafb_init);
 module_exit(hecubafb_exit);
 
-MODULE_DESCRIPTION("fbdev driver for Hecuba board");
+MODULE_DESCRIPTION("fbdev driver for Hecuba/Apollo controller");
 MODULE_AUTHOR("Jaya Kumar");
 MODULE_LICENSE("GPL");
index 3ab91bf21576b37d1b31eb7b784030d953d0b587..15d50b9906ced3eb1d76da6e1bc45844d9165f99 100644 (file)
@@ -1151,8 +1151,10 @@ imsttfb_load_cursor_image(struct imstt_par *par, int width, int height, __u8 fgc
                                par->cmap_regs[TVPCRDAT] = 0xff;                eieio();
                        }
                par->cmap_regs[TVPCADRW] = 0x00;        eieio();
-               for (x = 0; x < 12; x++)
-                       par->cmap_regs[TVPCDATA] = fgc; eieio();
+               for (x = 0; x < 12; x++) {
+                       par->cmap_regs[TVPCDATA] = fgc;
+                       eieio();
+               }
        }
        return 1;
 }
@@ -1476,7 +1478,7 @@ imsttfb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        
        dp = pci_device_to_OF_node(pdev);
        if(dp)
-               printk(KERN_INFO "%s: OF name %s\n",__FUNCTION__, dp->name);
+               printk(KERN_INFO "%s: OF name %s\n",__func__, dp->name);
        else
                printk(KERN_ERR "imsttfb: no OF node for pci device\n");
 #endif /* CONFIG_PPC_OF */
index 11609552a387301bc00515ca75b3af0c37d21b56..94e4d3ac1a05b3a265b9d86aecdf3d94025b96b6 100644 (file)
@@ -415,7 +415,7 @@ static void imxfb_setup_gpio(struct imxfb_info *fbi)
 static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct imxfb_info *fbi = platform_get_drvdata(dev);
-       pr_debug("%s\n",__FUNCTION__);
+       pr_debug("%s\n",__func__);
 
        imxfb_disable_controller(fbi);
        return 0;
@@ -424,7 +424,7 @@ static int imxfb_suspend(struct platform_device *dev, pm_message_t state)
 static int imxfb_resume(struct platform_device *dev)
 {
        struct imxfb_info *fbi = platform_get_drvdata(dev);
-       pr_debug("%s\n",__FUNCTION__);
+       pr_debug("%s\n",__func__);
 
        imxfb_enable_controller(fbi);
        return 0;
@@ -440,7 +440,7 @@ static int __init imxfb_init_fbinfo(struct device *dev)
        struct fb_info *info = dev_get_drvdata(dev);
        struct imxfb_info *fbi = info->par;
 
-       pr_debug("%s\n",__FUNCTION__);
+       pr_debug("%s\n",__func__);
 
        info->pseudo_palette = kmalloc( sizeof(u32) * 16, GFP_KERNEL);
        if (!info->pseudo_palette)
index 8367961779420c275b0b5e2f0a693b2f702fa73c..3325fbd68ab3b235a9ec370e59cd96314b414359 100644 (file)
@@ -12,9 +12,9 @@
 #endif
 
 /*** Version/name ***/
-#define INTELFB_VERSION                        "0.9.4"
+#define INTELFB_VERSION                        "0.9.5"
 #define INTELFB_MODULE_NAME            "intelfb"
-#define SUPPORTED_CHIPSETS             "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM"
+#define SUPPORTED_CHIPSETS             "830M/845G/852GM/855GM/865G/915G/915GM/945G/945GM/965G/965GM"
 
 
 /*** Debug/feature defines ***/
@@ -58,6 +58,8 @@
 #define PCI_DEVICE_ID_INTEL_915GM      0x2592
 #define PCI_DEVICE_ID_INTEL_945G       0x2772
 #define PCI_DEVICE_ID_INTEL_945GM      0x27A2
+#define PCI_DEVICE_ID_INTEL_965G       0x29A2
+#define PCI_DEVICE_ID_INTEL_965GM      0x2A02
 
 /* Size of MMIO region */
 #define INTEL_REG_SIZE                 0x80000
@@ -158,6 +160,8 @@ enum intel_chips {
        INTEL_915GM,
        INTEL_945G,
        INTEL_945GM,
+       INTEL_965G,
+       INTEL_965GM,
 };
 
 struct intelfb_hwstate {
@@ -358,7 +362,9 @@ struct intelfb_info {
 #define IS_I9XX(dinfo) (((dinfo)->chipset == INTEL_915G) ||    \
                        ((dinfo)->chipset == INTEL_915GM) ||    \
                        ((dinfo)->chipset == INTEL_945G) ||     \
-                       ((dinfo)->chipset==INTEL_945GM))
+                       ((dinfo)->chipset == INTEL_945GM) ||    \
+                       ((dinfo)->chipset == INTEL_965G) ||     \
+                       ((dinfo)->chipset == INTEL_965GM))
 
 #ifndef FBIO_WAITFORVSYNC
 #define FBIO_WAITFORVSYNC      _IOW('F', 0x20, __u32)
index 94c08bb5acf18d146eb62a19b5609d05dc72b906..ca95f09d8b43f7e9f88ddf7681e361f386493c0c 100644 (file)
@@ -169,6 +169,8 @@ void intelfb_create_i2c_busses(struct intelfb_info *dinfo)
                /* has some LVDS + tv-out */
        case INTEL_945G:
        case INTEL_945GM:
+       case INTEL_965G:
+       case INTEL_965GM:
                /* SDVO ports have a single control bus - 2 devices */
                dinfo->output[i].type = INTELFB_OUTPUT_SDVO;
                intelfb_setup_i2c_bus(dinfo, &dinfo->output[i].i2c_bus,
index 481d58f7535d0cc4cb5e09ba258a3a486f3cef33..e44303f9bc524ea64e86bd00030ffbaa3110dd01 100644 (file)
@@ -2,7 +2,7 @@
  * intelfb
  *
  * Linux framebuffer driver for Intel(R) 830M/845G/852GM/855GM/865G/915G/915GM/
- * 945G/945GM integrated graphics chips.
+ * 945G/945GM/965G/965GM integrated graphics chips.
  *
  * Copyright Â© 2002, 2003 David Dawes <dawes@xfree86.org>
  *                   2004 Sylvain Meyer
@@ -99,6 +99,9 @@
  *              Add vram option to reserve more memory than stolen by BIOS
  *              Fix intelfbhw_pan_display typo
  *              Add __initdata annotations
+ *
+ *    04/2008 - Version 0.9.5
+ *              Add support for 965G/965GM. (Maik Broemme <mbroemme@plusserver.de>)
  */
 
 #include <linux/module.h>
@@ -180,6 +183,8 @@ static struct pci_device_id intelfb_pci_table[] __devinitdata = {
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_915GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_915GM },
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945G },
        { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_945GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_945GM },
+       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965G, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965G },
+       { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_965GM, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, INTELFB_CLASS_MASK, INTEL_965GM },
        { 0, }
 };
 
@@ -549,7 +554,10 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
        if ((ent->device == PCI_DEVICE_ID_INTEL_915G) ||
            (ent->device == PCI_DEVICE_ID_INTEL_915GM) ||
            (ent->device == PCI_DEVICE_ID_INTEL_945G)  ||
-           (ent->device == PCI_DEVICE_ID_INTEL_945GM)) {
+           (ent->device == PCI_DEVICE_ID_INTEL_945GM) ||
+           (ent->device == PCI_DEVICE_ID_INTEL_965G) ||
+           (ent->device == PCI_DEVICE_ID_INTEL_965GM)) {
+
                aperture_bar = 2;
                mmio_bar = 0;
        }
index fa1fff5535654e4f6bbce1c294efd9031d16d1a2..8e6d6a4db0ad85a52d8fd8313fcaf7f9f6884d3b 100644 (file)
@@ -143,6 +143,18 @@ int intelfbhw_get_chipset(struct pci_dev *pdev, struct intelfb_info *dinfo)
                dinfo->mobile = 1;
                dinfo->pll_index = PLLS_I9xx;
                return 0;
+       case PCI_DEVICE_ID_INTEL_965G:
+               dinfo->name = "Intel(R) 965G";
+               dinfo->chipset = INTEL_965G;
+               dinfo->mobile = 0;
+               dinfo->pll_index = PLLS_I9xx;
+               return 0;
+       case PCI_DEVICE_ID_INTEL_965GM:
+               dinfo->name = "Intel(R) 965GM";
+               dinfo->chipset = INTEL_965GM;
+               dinfo->mobile = 1;
+               dinfo->pll_index = PLLS_I9xx;
+               return 0;
        default:
                return 1;
        }
@@ -174,7 +186,9 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size,
        case PCI_DEVICE_ID_INTEL_915GM:
        case PCI_DEVICE_ID_INTEL_945G:
        case PCI_DEVICE_ID_INTEL_945GM:
-               /* 915 and 945 chipsets support a 256MB aperture.
+       case PCI_DEVICE_ID_INTEL_965G:
+       case PCI_DEVICE_ID_INTEL_965GM:
+               /* 915, 945 and 965 chipsets support a 256MB aperture.
                   Aperture size is determined by inspected the
                   base address of the aperture. */
                if (pci_resource_start(pdev, 2) & 0x08000000)
index 45b9a5d55dec97d7f7ae3303fedb2a71e569afff..f3160fc29795354a680e13d5a576876f70b42af7 100644 (file)
@@ -614,7 +614,7 @@ static int __devinit leo_probe(struct of_device *op, const struct of_device_id *
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: leo at %lx:%lx\n",
+       printk(KERN_INFO "%s: leo at %lx:%lx\n",
               dp->full_name,
               par->which_io, par->physbase);
 
index c4b570b4a4df0e448365acd66f7c6ab8c7284303..0ce3b0a8979805333158d8cd67f44a304ab9f43c 100644 (file)
@@ -37,7 +37,7 @@ static void DAC1064_calcclock(CPMINFO unsigned int freq, unsigned int fmax, unsi
        unsigned int fvco;
        unsigned int p;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
        
        /* only for devices older than G450 */
 
@@ -83,7 +83,7 @@ static const unsigned char MGA1064_DAC[] = {
 static void DAC1064_setpclk(WPMINFO unsigned long fout) {
        unsigned int m, n, p;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        DAC1064_calcclock(PMINFO fout, ACCESS_FBINFO(max_pixel_clock), &m, &n, &p);
        ACCESS_FBINFO(hw).DACclk[0] = m;
@@ -95,7 +95,7 @@ static void DAC1064_setmclk(WPMINFO int oscinfo, unsigned long fmem) {
        u_int32_t mx;
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (ACCESS_FBINFO(devflags.noinit)) {
                /* read MCLK and give up... */
@@ -338,7 +338,7 @@ void DAC1064_global_restore(WPMINFO2) {
 static int DAC1064_init_1(WPMINFO struct my_timming* m) {
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        memcpy(hw->DACreg, MGA1064_DAC, sizeof(MGA1064_DAC_regs));
        switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
@@ -374,7 +374,7 @@ static int DAC1064_init_1(WPMINFO struct my_timming* m) {
 static int DAC1064_init_2(WPMINFO struct my_timming* m) {
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (ACCESS_FBINFO(fbcon).var.bits_per_pixel > 16) {     /* 256 entries */
                int i;
@@ -418,7 +418,7 @@ static void DAC1064_restore_1(WPMINFO2) {
 
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        CRITBEGIN
 
@@ -448,7 +448,7 @@ static void DAC1064_restore_2(WPMINFO2) {
        unsigned int i;
 #endif
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
 #ifdef DEBUG
        dprintk(KERN_DEBUG "DAC1064regs ");
@@ -521,7 +521,7 @@ static struct matrox_altout g450out = {
 static int MGA1064_init(WPMINFO struct my_timming* m) {
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (DAC1064_init_1(PMINFO m)) return 1;
        if (matroxfb_vgaHWinit(PMINFO m)) return 1;
@@ -543,7 +543,7 @@ static int MGA1064_init(WPMINFO struct my_timming* m) {
 static int MGAG100_init(WPMINFO struct my_timming* m) {
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (DAC1064_init_1(PMINFO m)) return 1;
        hw->MXoptionReg &= ~0x2000;
@@ -565,7 +565,7 @@ static int MGAG100_init(WPMINFO struct my_timming* m) {
 #ifdef CONFIG_FB_MATROX_MYSTIQUE
 static void MGA1064_ramdac_init(WPMINFO2) {
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        /* ACCESS_FBINFO(features.DAC1064.vco_freq_min) = 120000; */
        ACCESS_FBINFO(features.pll.vco_freq_min) = 62000;
@@ -594,7 +594,7 @@ static void MGAG100_progPixClock(CPMINFO int flags, int m, int n, int p) {
        int selClk;
        int clk;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        outDAC1064(PMINFO M1064_XPIXCLKCTRL, inDAC1064(PMINFO M1064_XPIXCLKCTRL) | M1064_XPIXCLKCTRL_DIS |
                   M1064_XPIXCLKCTRL_PLL_UP);
@@ -636,7 +636,7 @@ static void MGAG100_progPixClock(CPMINFO int flags, int m, int n, int p) {
 static void MGAG100_setPixClock(CPMINFO int flags, int freq) {
        unsigned int m, n, p;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        DAC1064_calcclock(PMINFO freq, ACCESS_FBINFO(max_pixel_clock), &m, &n, &p);
        MGAG100_progPixClock(PMINFO flags, m, n, p);
@@ -650,7 +650,7 @@ static int MGA1064_preinit(WPMINFO2) {
                                             2048,    0};
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        /* ACCESS_FBINFO(capable.cfb4) = 0; ... preinitialized by 0 */
        ACCESS_FBINFO(capable.text) = 1;
@@ -683,7 +683,7 @@ static int MGA1064_preinit(WPMINFO2) {
 
 static void MGA1064_reset(WPMINFO2) {
 
-       DBG(__FUNCTION__);
+       DBG(__func__);
 
        MGA1064_ramdac_init(PMINFO2);
 }
@@ -819,7 +819,7 @@ static int MGAG100_preinit(WPMINFO2) {
        u_int32_t q;
 #endif
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        /* there are some instabilities if in_div > 19 && vco < 61000 */
        if (ACCESS_FBINFO(devflags.g450dac)) {
@@ -956,7 +956,7 @@ static void MGAG100_reset(WPMINFO2) {
        u_int8_t b;
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        {
 #ifdef G100_BROKEN_IBM_82351
@@ -1015,7 +1015,7 @@ static void MGA1064_restore(WPMINFO2) {
 
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        CRITBEGIN
 
@@ -1041,7 +1041,7 @@ static void MGAG100_restore(WPMINFO2) {
 
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        CRITBEGIN
 
index 9445cdb759b1f090d21ffa6bb1a30bde8dccfbe5..13524821e242c7ef0b9f99d11ff31501da995543 100644 (file)
@@ -283,7 +283,7 @@ static int Ti3026_calcclock(CPMINFO unsigned int freq, unsigned int fmax, int* i
        unsigned int fvco;
        unsigned int lin, lfeed, lpost;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        fvco = PLL_calcclock(PMINFO freq, fmax, &lin, &lfeed, &lpost);
        fvco >>= (*post = lpost);
@@ -297,7 +297,7 @@ static int Ti3026_setpclk(WPMINFO int clk) {
        unsigned int pixfeed, pixin, pixpost;
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        f_pll = Ti3026_calcclock(PMINFO clk, ACCESS_FBINFO(max_pixel_clock), &pixin, &pixfeed, &pixpost);
 
@@ -365,7 +365,7 @@ static int Ti3026_init(WPMINFO struct my_timming* m) {
        u_int8_t muxctrl = isInterleave(MINFO) ? TVP3026_XMUXCTRL_MEMORY_64BIT : TVP3026_XMUXCTRL_MEMORY_32BIT;
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        memcpy(hw->DACreg, MGADACbpp32, sizeof(hw->DACreg));
        switch (ACCESS_FBINFO(fbcon).var.bits_per_pixel) {
@@ -440,7 +440,7 @@ static void ti3026_setMCLK(WPMINFO int fout){
        unsigned int rfhcnt, mclk_ctl;
        int tmout;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        f_pll = Ti3026_calcclock(PMINFO fout, ACCESS_FBINFO(max_pixel_clock), &mclk_n, &mclk_m, &mclk_p);
 
@@ -534,7 +534,7 @@ static void ti3026_setMCLK(WPMINFO int fout){
 
 static void ti3026_ramdac_init(WPMINFO2) {
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        ACCESS_FBINFO(features.pll.vco_freq_min) = 110000;
        ACCESS_FBINFO(features.pll.ref_freq)     = 114545;
@@ -554,7 +554,7 @@ static void Ti3026_restore(WPMINFO2) {
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
 #ifdef DEBUG
        dprintk(KERN_INFO "EXTVGA regs: ");
@@ -662,7 +662,7 @@ static void Ti3026_restore(WPMINFO2) {
 
 static void Ti3026_reset(WPMINFO2) {
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        ti3026_ramdac_init(PMINFO2);
 }
@@ -680,7 +680,7 @@ static int Ti3026_preinit(WPMINFO2) {
                                          2048, 0};
        struct matrox_hw_state* hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        ACCESS_FBINFO(millenium) = 1;
        ACCESS_FBINFO(milleniumII) = (ACCESS_FBINFO(pcidev)->device != PCI_DEVICE_ID_MATROX_MIL);
index 3660d2673bdc9064f988858830dca640e1c10be1..9c3aeee1cc4f4236ca070c25a82566dbe934915c 100644 (file)
@@ -113,7 +113,7 @@ void matrox_cfbX_init(WPMINFO2) {
        u_int32_t mopmode;
        int accel;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        mpitch = ACCESS_FBINFO(fbcon).var.xres_virtual;
 
@@ -199,7 +199,7 @@ static void matrox_accel_bmove(WPMINFO int vxres, int sy, int sx, int dy, int dx
        int start, end;
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        CRITBEGIN
 
@@ -235,7 +235,7 @@ static void matrox_accel_bmove_lin(WPMINFO int vxres, int sy, int sx, int dy, in
        int start, end;
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        CRITBEGIN
 
@@ -287,7 +287,7 @@ static void matroxfb_accel_clear(WPMINFO u_int32_t color, int sy, int sx, int he
                int width) {
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        CRITBEGIN
 
@@ -315,7 +315,7 @@ static void matroxfb_cfb4_clear(WPMINFO u_int32_t bgx, int sy, int sx, int heigh
        int whattodo;
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        CRITBEGIN
 
@@ -388,7 +388,7 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
        int easy;
        CRITFLAGS
 
-       DBG_HEAVY(__FUNCTION__);
+       DBG_HEAVY(__func__);
 
        step = (width + 7) >> 3;
        charcell = height * step;
@@ -469,7 +469,7 @@ static void matroxfb_1bpp_imageblit(WPMINFO u_int32_t fgx, u_int32_t bgx,
 static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* image) {
        MINFO_FROM_INFO(info);
 
-       DBG_HEAVY(__FUNCTION__);
+       DBG_HEAVY(__func__);
 
        if (image->depth == 1) {
                u_int32_t fgx, bgx;
index b25972ac6eeb4c9c1882b611f048c0895afa37cb..54e82f35353dec0e843e56dfed47c163a410b9dd 100644 (file)
@@ -312,7 +312,7 @@ static void matrox_pan_var(WPMINFO struct fb_var_screeninfo *var) {
 
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (ACCESS_FBINFO(dead))
                return;
@@ -392,7 +392,7 @@ static int matroxfb_open(struct fb_info *info, int user)
 {
        MINFO_FROM_INFO(info);
 
-       DBG_LOOP(__FUNCTION__)
+       DBG_LOOP(__func__)
 
        if (ACCESS_FBINFO(dead)) {
                return -ENXIO;
@@ -408,7 +408,7 @@ static int matroxfb_release(struct fb_info *info, int user)
 {
        MINFO_FROM_INFO(info);
 
-       DBG_LOOP(__FUNCTION__)
+       DBG_LOOP(__func__)
 
        if (user) {
                if (0 == --ACCESS_FBINFO(userusecount)) {
@@ -425,7 +425,7 @@ static int matroxfb_pan_display(struct fb_var_screeninfo *var,
                struct fb_info* info) {
        MINFO_FROM_INFO(info);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        matrox_pan_var(PMINFO var);
        return 0;
@@ -434,7 +434,7 @@ static int matroxfb_pan_display(struct fb_var_screeninfo *var,
 static int matroxfb_get_final_bppShift(CPMINFO int bpp) {
        int bppshft2;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        bppshft2 = bpp;
        if (!bppshft2) {
@@ -451,7 +451,7 @@ static int matroxfb_test_and_set_rounding(CPMINFO int xres, int bpp) {
        int over;
        int rounding;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        switch (bpp) {
                case 0:         return xres;
@@ -482,7 +482,7 @@ static int matroxfb_pitch_adjust(CPMINFO int xres, int bpp) {
        const int* width;
        int xres_new;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (!bpp) return xres;
 
@@ -504,7 +504,7 @@ static int matroxfb_pitch_adjust(CPMINFO int xres, int bpp) {
 
 static int matroxfb_get_cmap_len(struct fb_var_screeninfo *var) {
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        switch (var->bits_per_pixel) {
                case 4:
@@ -548,7 +548,7 @@ static int matroxfb_decode_var(CPMINFO struct fb_var_screeninfo *var, int *visua
        unsigned int vramlen;
        unsigned int memlen;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        switch (bpp) {
                case 4:  if (!ACCESS_FBINFO(capable.cfb4)) return -EINVAL;
@@ -648,7 +648,7 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
        struct matrox_fb_info* minfo = container_of(fb_info, struct matrox_fb_info, fbcon);
 #endif
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        /*
         *  Set a single color register. The values supplied are
@@ -707,7 +707,7 @@ static int matroxfb_setcolreg(unsigned regno, unsigned red, unsigned green,
 static void matroxfb_init_fix(WPMINFO2)
 {
        struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        strcpy(fix->id,"MATROX");
 
@@ -722,7 +722,7 @@ static void matroxfb_init_fix(WPMINFO2)
 static void matroxfb_update_fix(WPMINFO2)
 {
        struct fb_fix_screeninfo *fix = &ACCESS_FBINFO(fbcon).fix;
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        fix->smem_start = ACCESS_FBINFO(video.base) + ACCESS_FBINFO(curr.ydstorg.bytes);
        fix->smem_len = ACCESS_FBINFO(video.len_usable) - ACCESS_FBINFO(curr.ydstorg.bytes);
@@ -753,7 +753,7 @@ static int matroxfb_set_par(struct fb_info *info)
        struct fb_var_screeninfo *var;
        MINFO_FROM_INFO(info);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (ACCESS_FBINFO(dead)) {
                return -ENXIO;
@@ -876,7 +876,7 @@ static int matroxfb_ioctl(struct fb_info *info,
        void __user *argp = (void __user *)arg;
        MINFO_FROM_INFO(info);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (ACCESS_FBINFO(dead)) {
                return -ENXIO;
@@ -1175,7 +1175,7 @@ static int matroxfb_blank(int blank, struct fb_info *info)
        CRITFLAGS
        MINFO_FROM_INFO(info);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (ACCESS_FBINFO(dead))
                return 1;
@@ -1287,7 +1287,7 @@ static int matroxfb_getmemory(WPMINFO unsigned int maxSize, unsigned int *realSi
        unsigned char bytes[32];
        unsigned char* tmp;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        vm = ACCESS_FBINFO(video.vbase);
        maxSize &= ~0x1FFFFF;   /* must be X*2MB (really it must be 2 or X*4MB) */
@@ -1593,7 +1593,7 @@ static int initMatrox2(WPMINFO struct board* b){
                { },
        };
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        /* set default values... */
        vesafb_defined.accel_flags = FB_ACCELF_TEXT;
@@ -2006,7 +2006,7 @@ static int matroxfb_probe(struct pci_dev* pdev, const struct pci_device_id* dumm
 #ifndef CONFIG_FB_MATROX_MULTIHEAD
        static int registered = 0;
 #endif
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        svid = pdev->subsystem_vendor;
        sid = pdev->subsystem_device;
@@ -2301,7 +2301,7 @@ static void __exit matrox_done(void) {
 static int __init matroxfb_setup(char *options) {
        char *this_opt;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (!options || !*options)
                return 0;
@@ -2444,7 +2444,7 @@ static int __init matroxfb_init(void)
        char *option = NULL;
        int err = 0;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (fb_get_options("matroxfb", &option))
                return -ENODEV;
@@ -2556,7 +2556,7 @@ MODULE_PARM_DESC(cmode, "Specify the video depth that should be used (8bit defau
 
 int __init init_module(void){
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (disabled)
                return -ENXIO;
index a6ab5b6a58d06df25727a43555d6bbfc2d48dcc8..7ac4c5f6145d0e0746f3d8233f8f64ed08b05cc6 100644 (file)
@@ -420,7 +420,7 @@ static int matroxfb_dh_ioctl(struct fb_info *info,
 #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon))
        MINFO_FROM(m2info->primary_dev);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        switch (cmd) {
                case FBIOGET_VBLANK:
index 0cd58f84fb464170ada3264bfca5500e1dc634c9..89da27bd5c498147d5308e3258af4f2a96349c7d 100644 (file)
@@ -220,7 +220,7 @@ static int matroxfb_PLL_mavenclock(const struct matrox_pll_features2* pll,
        unsigned int scrlen;
        unsigned int fmax;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        scrlen = htotal * (vtotal - 1);
        fwant = htotal * vtotal;
index ab7fb50bc1deb32ffef63984ff82bbde56feb764..5b5f072fc1a818f57ec989daaac4c0afef79ddb4 100644 (file)
 #include <linux/matroxfb.h>
 
 void matroxfb_DAC_out(CPMINFO int reg, int val) {
-       DBG_REG(__FUNCTION__)
+       DBG_REG(__func__)
        mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg);
        mga_outb(M_RAMDAC_BASE+M_X_DATAREG, val);
 }
 
 int matroxfb_DAC_in(CPMINFO int reg) {
-       DBG_REG(__FUNCTION__)
+       DBG_REG(__func__)
        mga_outb(M_RAMDAC_BASE+M_X_INDEX, reg);
        return mga_inb(M_RAMDAC_BASE+M_X_DATAREG);
 }
@@ -104,7 +104,7 @@ int matroxfb_DAC_in(CPMINFO int reg) {
 void matroxfb_var2my(struct fb_var_screeninfo* var, struct my_timming* mt) {
        unsigned int pixclock = var->pixclock;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        if (!pixclock) pixclock = 10000;        /* 10ns = 100MHz */
        mt->pixclock = 1000000000 / pixclock;
@@ -131,7 +131,7 @@ int matroxfb_PLL_calcclock(const struct matrox_pll_features* pll, unsigned int f
        unsigned int fwant;
        unsigned int p;
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        fwant = freq;
 
@@ -192,7 +192,7 @@ int matroxfb_vgaHWinit(WPMINFO struct my_timming* m) {
        int i;
        struct matrox_hw_state * const hw = &ACCESS_FBINFO(hw);
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        hw->SEQ[0] = 0x00;
        hw->SEQ[1] = 0x01;      /* or 0x09 */
@@ -336,7 +336,7 @@ void matroxfb_vgaHWrestore(WPMINFO2) {
        struct matrox_hw_state * const hw = &ACCESS_FBINFO(hw);
        CRITFLAGS
 
-       DBG(__FUNCTION__)
+       DBG(__func__)
 
        dprintk(KERN_INFO "MiscOutReg: %02X\n", hw->MiscOutReg);
        dprintk(KERN_INFO "SEQ regs:   ");
@@ -522,8 +522,6 @@ static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) {
 #endif
 }
 
-#define get_u16(x) (le16_to_cpu(get_unaligned((__u16*)(x))))
-#define get_u32(x) (le32_to_cpu(get_unaligned((__u32*)(x))))
 static int parse_pins1(WPMINFO const struct matrox_bios* bd) {
        unsigned int maxdac;
 
@@ -532,11 +530,12 @@ static int parse_pins1(WPMINFO const struct matrox_bios* bd) {
                case 1:         maxdac = 220000; break;
                default:        maxdac = 240000; break;
        }
-       if (get_u16(bd->pins + 24)) {
-               maxdac = get_u16(bd->pins + 24) * 10;
+       if (get_unaligned_le16(bd->pins + 24)) {
+               maxdac = get_unaligned_le16(bd->pins + 24) * 10;
        }
        MINFO->limits.pixel.vcomax = maxdac;
-       MINFO->values.pll.system = get_u16(bd->pins + 28) ? get_u16(bd->pins + 28) * 10 : 50000;
+       MINFO->values.pll.system = get_unaligned_le16(bd->pins + 28) ?
+               get_unaligned_le16(bd->pins + 28) * 10 : 50000;
        /* ignore 4MB, 8MB, module clocks */
        MINFO->features.pll.ref_freq = 14318;
        MINFO->values.reg.mctlwtst      = 0x00030101;
@@ -575,7 +574,8 @@ static void default_pins2(WPMINFO2) {
 static int parse_pins3(WPMINFO const struct matrox_bios* bd) {
        MINFO->limits.pixel.vcomax      =
        MINFO->limits.system.vcomax     = (bd->pins[36] == 0xFF) ? 230000                       : ((bd->pins[36] + 100) * 1000);
-       MINFO->values.reg.mctlwtst      = get_u32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21     : get_u32(bd->pins + 48);
+       MINFO->values.reg.mctlwtst      = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ?
+               0x01250A21 : get_unaligned_le32(bd->pins + 48);
        /* memory config */
        MINFO->values.reg.memrdbk       = ((bd->pins[57] << 21) & 0x1E000000) |
                                          ((bd->pins[57] << 22) & 0x00C00000) |
@@ -601,7 +601,7 @@ static void default_pins3(WPMINFO2) {
 static int parse_pins4(WPMINFO const struct matrox_bios* bd) {
        MINFO->limits.pixel.vcomax      = (bd->pins[ 39] == 0xFF) ? 230000                      : bd->pins[ 39] * 4000;
        MINFO->limits.system.vcomax     = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax  : bd->pins[ 38] * 4000;
-       MINFO->values.reg.mctlwtst      = get_u32(bd->pins + 71);
+       MINFO->values.reg.mctlwtst      = get_unaligned_le32(bd->pins + 71);
        MINFO->values.reg.memrdbk       = ((bd->pins[87] << 21) & 0x1E000000) |
                                          ((bd->pins[87] << 22) & 0x00C00000) |
                                          ((bd->pins[86] <<  1) & 0x000001E0) |
@@ -609,7 +609,7 @@ static int parse_pins4(WPMINFO const struct matrox_bios* bd) {
        MINFO->values.reg.opt           = ((bd->pins[53] << 15) & 0x00400000) |
                                          ((bd->pins[53] << 22) & 0x10000000) |
                                          ((bd->pins[53] <<  7) & 0x00001C00);
-       MINFO->values.reg.opt3          = get_u32(bd->pins + 67);
+       MINFO->values.reg.opt3          = get_unaligned_le32(bd->pins + 67);
        MINFO->values.pll.system        = (bd->pins[ 65] == 0xFF) ? 200000                      : bd->pins[ 65] * 4000;
        MINFO->features.pll.ref_freq    = (bd->pins[ 92] & 0x01) ? 14318 : 27000;
        return 0;
@@ -640,12 +640,12 @@ static int parse_pins5(WPMINFO const struct matrox_bios* bd) {
        MINFO->limits.video.vcomin      = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult;
        MINFO->values.pll.system        =
        MINFO->values.pll.video         = (bd->pins[ 92] == 0xFF) ? 284000                      : bd->pins[ 92] * 4000;
-       MINFO->values.reg.opt           = get_u32(bd->pins+ 48);
-       MINFO->values.reg.opt2          = get_u32(bd->pins+ 52);
-       MINFO->values.reg.opt3          = get_u32(bd->pins+ 94);
-       MINFO->values.reg.mctlwtst      = get_u32(bd->pins+ 98);
-       MINFO->values.reg.memmisc       = get_u32(bd->pins+102);
-       MINFO->values.reg.memrdbk       = get_u32(bd->pins+106);
+       MINFO->values.reg.opt           = get_unaligned_le32(bd->pins + 48);
+       MINFO->values.reg.opt2          = get_unaligned_le32(bd->pins + 52);
+       MINFO->values.reg.opt3          = get_unaligned_le32(bd->pins + 94);
+       MINFO->values.reg.mctlwtst      = get_unaligned_le32(bd->pins + 98);
+       MINFO->values.reg.memmisc       = get_unaligned_le32(bd->pins + 102);
+       MINFO->values.reg.memrdbk       = get_unaligned_le32(bd->pins + 106);
        MINFO->features.pll.ref_freq    = (bd->pins[110] & 0x01) ? 14318 : 27000;
        MINFO->values.memory.ddr        = (bd->pins[114] & 0x60) == 0x20;
        MINFO->values.memory.dll        = (bd->pins[115] & 0x02) != 0;
index e9a89fd82757c606ea93001f986ca06225b9384f..cc4c038a1b3f3c8c22821aab37a54f564ba41d8b 100644 (file)
  * Corporation. http://support.eink.com/community
  *
  * This driver is written to be used with the Metronome display controller.
- * It was tested with an E-Ink 800x600 Vizplex EPD on a Gumstix Connex board
- * using the Lyre interface board.
+ * It is intended to be architecture independent. A board specific driver
+ * must be used to perform all the physical IO interactions. An example
+ * is provided as am200epd.c
  *
- * General notes:
- * - User must set metronomefb_enable=1 to enable it.
- * - See Documentation/fb/metronomefb.txt for how metronome works.
  */
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/uaccess.h>
 #include <linux/irq.h>
 
-#include <asm/arch/pxa-regs.h>
+#include <video/metronomefb.h>
+
 #include <asm/unaligned.h>
 
+
 #define DEBUG 1
 #ifdef DEBUG
 #define DPRINTK(f, a...) printk(KERN_DEBUG "%s: " f, __func__ , ## a)
 #define DPY_W 832
 #define DPY_H 622
 
-struct metromem_desc {
-       u32 mFDADR0;
-       u32 mFSADR0;
-       u32 mFIDR0;
-       u32 mLDCMD0;
-};
-
-struct metromem_cmd {
-       u16 opcode;
-       u16 args[((64-2)/2)];
-       u16 csum;
-};
-
-struct metronomefb_par {
-       unsigned char *metromem;
-       struct metromem_desc *metromem_desc;
-       struct metromem_cmd *metromem_cmd;
-       unsigned char *metromem_wfm;
-       unsigned char *metromem_img;
-       u16 *metromem_img_csum;
-       u16 *csum_table;
-       int metromemsize;
-       dma_addr_t metromem_dma;
-       dma_addr_t metromem_desc_dma;
-       struct fb_info *info;
-       wait_queue_head_t waitq;
-       u8 frame_count;
-};
-
 /* frame differs from image. frame includes non-visible pixels */
 struct epd_frame {
        int fw; /* frame width */
@@ -120,8 +91,7 @@ static struct fb_var_screeninfo metronomefb_var __devinitdata = {
        .transp =       { 0, 0, 0 },
 };
 
-static unsigned int metronomefb_enable;
-
+/* the waveform structure that is coming from userspace firmware */
 struct waveform_hdr {
        u8 stuff[32];
 
@@ -236,8 +206,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
        }
 
        /* check waveform mode table address checksum */
-       wmta = le32_to_cpu(get_unaligned((__le32 *) wfm_hdr->wmta));
-       wmta &= 0x00FFFFFF;
+       wmta = get_unaligned_le32(wfm_hdr->wmta) & 0x00FFFFFF;
        cksum_idx = wmta + m*4 + 3;
        if (cksum_idx > size)
                return -EINVAL;
@@ -249,8 +218,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
        }
 
        /* check waveform temperature table address checksum */
-       tta = le32_to_cpu(get_unaligned((int *) (mem + wmta + m*4)));
-       tta &= 0x00FFFFFF;
+       tta = get_unaligned_le32(mem + wmta + m * 4) & 0x00FFFFFF;
        cksum_idx = tta + trn*4 + 3;
        if (cksum_idx > size)
                return -EINVAL;
@@ -263,8 +231,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
 
        /* here we do the real work of putting the waveform into the
        metromem buffer. this does runlength decoding of the waveform */
-       wfm_idx = le32_to_cpu(get_unaligned((__le32 *) (mem + tta + trn*4)));
-       wfm_idx &= 0x00FFFFFF;
+       wfm_idx = get_unaligned_le32(mem + tta + trn * 4) & 0x00FFFFFF;
        owfm_idx = wfm_idx;
        if (wfm_idx > size)
                return -EINVAL;
@@ -301,165 +268,6 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t,
        return 0;
 }
 
-/* register offsets for gpio control */
-#define LED_GPIO_PIN 51
-#define STDBY_GPIO_PIN 48
-#define RST_GPIO_PIN 49
-#define RDY_GPIO_PIN 32
-#define ERR_GPIO_PIN 17
-#define PCBPWR_GPIO_PIN 16
-
-#define AF_SEL_GPIO_N 0x3
-#define GAFR0_U_OFFSET(pin) ((pin - 16) * 2)
-#define GAFR1_L_OFFSET(pin) ((pin - 32) * 2)
-#define GAFR1_U_OFFSET(pin) ((pin - 48) * 2)
-#define GPDR1_OFFSET(pin) (pin - 32)
-#define GPCR1_OFFSET(pin) (pin - 32)
-#define GPSR1_OFFSET(pin) (pin - 32)
-#define GPCR0_OFFSET(pin) (pin)
-#define GPSR0_OFFSET(pin) (pin)
-
-static void metronome_set_gpio_output(int pin, int val)
-{
-       u8 index;
-
-       index = pin >> 4;
-
-       switch (index) {
-       case 1:
-               if (val)
-                       GPSR0 |= (1 << GPSR0_OFFSET(pin));
-               else
-                       GPCR0 |= (1 << GPCR0_OFFSET(pin));
-               break;
-       case 2:
-               break;
-       case 3:
-               if (val)
-                       GPSR1 |= (1 << GPSR1_OFFSET(pin));
-               else
-                       GPCR1 |= (1 << GPCR1_OFFSET(pin));
-               break;
-       default:
-               printk(KERN_ERR "unimplemented\n");
-       }
-}
-
-static void __devinit metronome_init_gpio_pin(int pin, int dir)
-{
-       u8 index;
-       /* dir 0 is output, 1 is input
-       - do 2 things here:
-       - set gpio alternate function to standard gpio
-       - set gpio direction to input or output  */
-
-       index = pin >> 4;
-       switch (index) {
-       case 1:
-               GAFR0_U &= ~(AF_SEL_GPIO_N << GAFR0_U_OFFSET(pin));
-
-               if (dir)
-                       GPDR0 &= ~(1 << pin);
-               else
-                       GPDR0 |= (1 << pin);
-               break;
-       case 2:
-               GAFR1_L &= ~(AF_SEL_GPIO_N << GAFR1_L_OFFSET(pin));
-
-               if (dir)
-                       GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
-               else
-                       GPDR1 |= (1 << GPDR1_OFFSET(pin));
-               break;
-       case 3:
-               GAFR1_U &= ~(AF_SEL_GPIO_N << GAFR1_U_OFFSET(pin));
-
-               if (dir)
-                       GPDR1 &= ~(1 << GPDR1_OFFSET(pin));
-               else
-                       GPDR1 |= (1 << GPDR1_OFFSET(pin));
-               break;
-       default:
-               printk(KERN_ERR "unimplemented\n");
-       }
-}
-
-static void __devinit metronome_init_gpio_regs(void)
-{
-       metronome_init_gpio_pin(LED_GPIO_PIN, 0);
-       metronome_set_gpio_output(LED_GPIO_PIN, 0);
-
-       metronome_init_gpio_pin(STDBY_GPIO_PIN, 0);
-       metronome_set_gpio_output(STDBY_GPIO_PIN, 0);
-
-       metronome_init_gpio_pin(RST_GPIO_PIN, 0);
-       metronome_set_gpio_output(RST_GPIO_PIN, 0);
-
-       metronome_init_gpio_pin(RDY_GPIO_PIN, 1);
-
-       metronome_init_gpio_pin(ERR_GPIO_PIN, 1);
-
-       metronome_init_gpio_pin(PCBPWR_GPIO_PIN, 0);
-       metronome_set_gpio_output(PCBPWR_GPIO_PIN, 0);
-}
-
-static void metronome_disable_lcd_controller(struct metronomefb_par *par)
-{
-       LCSR = 0xffffffff;      /* Clear LCD Status Register */
-       LCCR0 |= LCCR0_DIS;     /* Disable LCD Controller */
-
-       /* we reset and just wait for things to settle */
-       msleep(200);
-}
-
-static void metronome_enable_lcd_controller(struct metronomefb_par *par)
-{
-       LCSR = 0xffffffff;
-       FDADR0 = par->metromem_desc_dma;
-       LCCR0 |= LCCR0_ENB;
-}
-
-static void __devinit metronome_init_lcdc_regs(struct metronomefb_par *par)
-{
-       /* here we do:
-       - disable the lcd controller
-       - setup lcd control registers
-       - setup dma descriptor
-       - reenable lcd controller
-       */
-
-       /* disable the lcd controller */
-       metronome_disable_lcd_controller(par);
-
-       /* setup lcd control registers */
-       LCCR0 = LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | LCCR0_PAS
-               | LCCR0_QDM | LCCR0_BM | LCCR0_OUM;
-
-       LCCR1 = (epd_frame_table[0].fw/2 - 1) /* pixels per line */
-               | (27 << 10) /* hsync pulse width - 1 */
-               | (33 << 16) /* eol pixel count */
-               | (33 << 24); /* bol pixel count */
-
-       LCCR2 = (epd_frame_table[0].fh - 1) /* lines per panel */
-               | (24 << 10) /* vsync pulse width - 1 */
-               | (2 << 16) /* eof pixel count */
-               | (0 << 24); /* bof pixel count */
-
-       LCCR3 = 2 /* pixel clock divisor */
-               | (24 << 8) /* AC Bias pin freq */
-               | LCCR3_16BPP /* BPP */
-               | LCCR3_PCP;  /* PCP falling edge */
-
-       /* setup dma descriptor */
-       par->metromem_desc->mFDADR0 = par->metromem_desc_dma;
-       par->metromem_desc->mFSADR0 = par->metromem_dma;
-       par->metromem_desc->mFIDR0 = 0;
-       par->metromem_desc->mLDCMD0 = epd_frame_table[0].fw
-                                       * epd_frame_table[0].fh;
-       /* reenable lcd controller */
-       metronome_enable_lcd_controller(par);
-}
-
 static int metronome_display_cmd(struct metronomefb_par *par)
 {
        int i;
@@ -493,8 +301,7 @@ static int metronome_display_cmd(struct metronomefb_par *par)
        par->metromem_cmd->csum = cs;
        par->metromem_cmd->opcode = opcode; /* display cmd */
 
-       i = wait_event_interruptible_timeout(par->waitq, (GPLR1 & 0x01), HZ);
-       return i;
+       return par->board->met_wait_event_intr(par);
 }
 
 static int __devinit metronome_powerup_cmd(struct metronomefb_par *par)
@@ -518,13 +325,12 @@ static int __devinit metronome_powerup_cmd(struct metronomefb_par *par)
        par->metromem_cmd->csum = cs;
 
        msleep(1);
-       metronome_set_gpio_output(RST_GPIO_PIN, 1);
+       par->board->set_rst(par, 1);
 
        msleep(1);
-       metronome_set_gpio_output(STDBY_GPIO_PIN, 1);
+       par->board->set_stdby(par, 1);
 
-       i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
-       return i;
+       return par->board->met_wait_event(par);
 }
 
 static int __devinit metronome_config_cmd(struct metronomefb_par *par)
@@ -569,8 +375,7 @@ static int __devinit metronome_config_cmd(struct metronomefb_par *par)
        par->metromem_cmd->csum = cs;
        par->metromem_cmd->opcode = 0xCC10; /* config cmd */
 
-       i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
-       return i;
+       return par->board->met_wait_event(par);
 }
 
 static int __devinit metronome_init_cmd(struct metronomefb_par *par)
@@ -596,16 +401,19 @@ static int __devinit metronome_init_cmd(struct metronomefb_par *par)
        par->metromem_cmd->csum = cs;
        par->metromem_cmd->opcode = 0xCC20; /* init cmd */
 
-       i = wait_event_timeout(par->waitq, (GPLR1 & 0x01), HZ);
-       return i;
+       return par->board->met_wait_event(par);
 }
 
 static int __devinit metronome_init_regs(struct metronomefb_par *par)
 {
        int res;
 
-       metronome_init_gpio_regs();
-       metronome_init_lcdc_regs(par);
+       par->board->init_gpio_regs(par);
+
+       par->board->init_lcdc_regs(par);
+
+       /* now that lcd is setup, setup dma descriptor */
+       par->board->post_dma_setup(par);
 
        res = metronome_powerup_cmd(par);
        if (res)
@@ -616,8 +424,6 @@ static int __devinit metronome_init_regs(struct metronomefb_par *par)
                return res;
 
        res = metronome_init_cmd(par);
-       if (res)
-               return res;
 
        return res;
 }
@@ -632,7 +438,7 @@ static void metronomefb_dpy_update(struct metronomefb_par *par)
 
        cksum = calc_img_cksum((u16 *) par->metromem_img,
                                (epd_frame_table[0].fw * DPY_H)/2);
-       *((u16 *) (par->metromem_img) +
+       *((u16 *)(par->metromem_img) +
                        (epd_frame_table[0].fw * DPY_H)/2) = cksum;
        metronome_display_cmd(par);
 }
@@ -641,8 +447,8 @@ static u16 metronomefb_dpy_update_page(struct metronomefb_par *par, int index)
 {
        int i;
        u16 csum = 0;
-       u16 *buf = (u16 __force *) (par->info->screen_base + index);
-       u16 *img = (u16 *) (par->metromem_img + index);
+       u16 *buf = (u16 __force *)(par->info->screen_base + index);
+       u16 *img = (u16 *)(par->metromem_img + index);
 
        /* swizzle from vm to metromem and recalc cksum at the same time*/
        for (i = 0; i < PAGE_SIZE/2; i++) {
@@ -678,7 +484,7 @@ static void metronomefb_fillrect(struct fb_info *info,
 {
        struct metronomefb_par *par = info->par;
 
-       cfb_fillrect(info, rect);
+       sys_fillrect(info, rect);
        metronomefb_dpy_update(par);
 }
 
@@ -687,7 +493,7 @@ static void metronomefb_copyarea(struct fb_info *info,
 {
        struct metronomefb_par *par = info->par;
 
-       cfb_copyarea(info, area);
+       sys_copyarea(info, area);
        metronomefb_dpy_update(par);
 }
 
@@ -696,7 +502,7 @@ static void metronomefb_imageblit(struct fb_info *info,
 {
        struct metronomefb_par *par = info->par;
 
-       cfb_imageblit(info, image);
+       sys_imageblit(info, image);
        metronomefb_dpy_update(par);
 }
 
@@ -733,7 +539,7 @@ static ssize_t metronomefb_write(struct fb_info *info, const char __user *buf,
                count = total_size - p;
        }
 
-       dst = (void __force *) (info->screen_base + p);
+       dst = (void __force *)(info->screen_base + p);
 
        if (copy_from_user(dst, buf, count))
                err = -EFAULT;
@@ -759,18 +565,10 @@ static struct fb_deferred_io metronomefb_defio = {
        .deferred_io    = metronomefb_dpy_deferred_io,
 };
 
-static irqreturn_t metronome_handle_irq(int irq, void *dev_id)
-{
-       struct fb_info *info = dev_id;
-       struct metronomefb_par *par = info->par;
-
-       wake_up_interruptible(&par->waitq);
-       return IRQ_HANDLED;
-}
-
 static int __devinit metronomefb_probe(struct platform_device *dev)
 {
        struct fb_info *info;
+       struct metronome_board *board;
        int retval = -ENOMEM;
        int videomemorysize;
        unsigned char *videomemory;
@@ -779,17 +577,26 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
        int cmd_size, wfm_size, img_size, padding_size, totalsize;
        int i;
 
+       /* pick up board specific routines */
+       board = dev->dev.platform_data;
+       if (!board)
+               return -EINVAL;
+
+       /* try to count device specific driver, if can't, platform recalls */
+       if (!try_module_get(board->owner))
+               return -ENODEV;
+
        /* we have two blocks of memory.
        info->screen_base which is vm, and is the fb used by apps.
        par->metromem which is physically contiguous memory and
        contains the display controller commands, waveform,
        processed image data and padding. this is the data pulled
-       by the pxa255's LCD controller and pushed to Metronome */
+       by the device's LCD controller and pushed to Metronome */
 
        videomemorysize = (DPY_W*DPY_H);
        videomemory = vmalloc(videomemorysize);
        if (!videomemory)
-               return retval;
+               return -ENOMEM;
 
        memset(videomemory, 0, videomemorysize);
 
@@ -797,7 +604,7 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
        if (!info)
                goto err_vfree;
 
-       info->screen_base = (char __iomem *) videomemory;
+       info->screen_base = (char __force __iomem *)videomemory;
        info->fbops = &metronomefb_ops;
 
        info->var = metronomefb_var;
@@ -805,6 +612,7 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
        info->fix.smem_len = videomemorysize;
        par = info->par;
        par->info = info;
+       par->board = board;
        init_waitqueue_head(&par->waitq);
 
        /* this table caches per page csum values. */
@@ -849,11 +657,10 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
        par->metromem_desc_dma = par->metromem_dma + cmd_size + wfm_size
                                 + img_size + padding_size;
 
-       /* load the waveform in. assume mode 3, temp 31 for now */
-       /*      a) request the waveform file from userspace
+       /* load the waveform in. assume mode 3, temp 31 for now
+               a) request the waveform file from userspace
                b) process waveform and decode into metromem */
-
-       retval = request_firmware(&fw_entry, "waveform.wbf", &dev->dev);
+       retval = request_firmware(&fw_entry, "metronome.wbf", &dev->dev);
        if (retval < 0) {
                printk(KERN_ERR "metronomefb: couldn't get waveform\n");
                goto err_dma_free;
@@ -861,19 +668,14 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
 
        retval = load_waveform((u8 *) fw_entry->data, fw_entry->size,
                                par->metromem_wfm, 3, 31, &par->frame_count);
+       release_firmware(fw_entry);
        if (retval < 0) {
                printk(KERN_ERR "metronomefb: couldn't process waveform\n");
-               goto err_ld_wfm;
+               goto err_dma_free;
        }
-       release_firmware(fw_entry);
 
-       retval = request_irq(IRQ_GPIO(RDY_GPIO_PIN), metronome_handle_irq,
-                               IRQF_DISABLED, "Metronome", info);
-       if (retval) {
-               dev_err(&dev->dev, "request_irq failed: %d\n", retval);
-               goto err_ld_wfm;
-       }
-       set_irq_type(IRQ_GPIO(RDY_GPIO_PIN), IRQT_FALLING);
+       if (board->setup_irq(info))
+               goto err_dma_free;
 
        retval = metronome_init_regs(par);
        if (retval < 0)
@@ -913,9 +715,7 @@ err_cmap:
 err_fb_rel:
        framebuffer_release(info);
 err_free_irq:
-       free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
-err_ld_wfm:
-       release_firmware(fw_entry);
+       board->free_irq(info);
 err_dma_free:
        dma_free_writecombine(&dev->dev, par->metromemsize, par->metromem,
                                par->metromem_dma);
@@ -923,6 +723,7 @@ err_csum_table:
        vfree(par->csum_table);
 err_vfree:
        vfree(videomemory);
+       module_put(board->owner);
        return retval;
 }
 
@@ -939,7 +740,8 @@ static int __devexit metronomefb_remove(struct platform_device *dev)
                vfree(par->csum_table);
                unregister_framebuffer(info);
                vfree((void __force *)info->screen_base);
-               free_irq(IRQ_GPIO(RDY_GPIO_PIN), info);
+               par->board->free_irq(info);
+               module_put(par->board->owner);
                framebuffer_release(info);
        }
        return 0;
@@ -949,48 +751,21 @@ static struct platform_driver metronomefb_driver = {
        .probe  = metronomefb_probe,
        .remove = metronomefb_remove,
        .driver = {
+               .owner  = THIS_MODULE,
                .name   = "metronomefb",
        },
 };
 
-static struct platform_device *metronomefb_device;
-
 static int __init metronomefb_init(void)
 {
-       int ret;
-
-       if (!metronomefb_enable) {
-               printk(KERN_ERR
-                       "Use metronomefb_enable to enable the device\n");
-               return -ENXIO;
-       }
-
-       ret = platform_driver_register(&metronomefb_driver);
-       if (!ret) {
-               metronomefb_device = platform_device_alloc("metronomefb", 0);
-               if (metronomefb_device)
-                       ret = platform_device_add(metronomefb_device);
-               else
-                       ret = -ENOMEM;
-
-               if (ret) {
-                       platform_device_put(metronomefb_device);
-                       platform_driver_unregister(&metronomefb_driver);
-               }
-       }
-       return ret;
-
+       return platform_driver_register(&metronomefb_driver);
 }
 
 static void __exit metronomefb_exit(void)
 {
-       platform_device_unregister(metronomefb_device);
        platform_driver_unregister(&metronomefb_driver);
 }
 
-module_param(metronomefb_enable, uint, 0);
-MODULE_PARM_DESC(metronomefb_enable, "Enable communication with Metronome");
-
 module_init(metronomefb_init);
 module_exit(metronomefb_exit);
 
index 08d07255223343c13487d31162f009a89e95e172..473562191586f2101d169d8fb765b7b21ef4d2e4 100644 (file)
@@ -22,7 +22,7 @@
     ((v).xres == (x) && (v).yres == (y))
 
 #ifdef DEBUG
-#define DPRINTK(fmt, args...)  printk("modedb %s: " fmt, __FUNCTION__ , ## args)
+#define DPRINTK(fmt, args...)  printk("modedb %s: " fmt, __func__ , ## args)
 #else
 #define DPRINTK(fmt, args...)
 #endif
@@ -522,7 +522,7 @@ int fb_find_mode(struct fb_var_screeninfo *var,
        int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
        unsigned int xres = 0, yres = 0, bpp = default_bpp, refresh = 0;
        int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
-       u32 best, diff;
+       u32 best, diff, tdiff;
 
        for (i = namelen-1; i >= 0; i--) {
            switch (name[i]) {
@@ -651,19 +651,27 @@ done:
                return (refresh_specified) ? 2 : 1;
        }
 
-       diff = xres + yres;
+       diff = 2 * (xres + yres);
        best = -1;
        DPRINTK("Trying best-fit modes\n");
        for (i = 0; i < dbsize; i++) {
-           if (xres <= db[i].xres && yres <= db[i].yres) {
                DPRINTK("Trying %ix%i\n", db[i].xres, db[i].yres);
                if (!fb_try_mode(var, info, &db[i], bpp)) {
-                   if (diff > (db[i].xres - xres) + (db[i].yres - yres)) {
-                       diff = (db[i].xres - xres) + (db[i].yres - yres);
-                       best = i;
-                   }
+                       tdiff = abs(db[i].xres - xres) +
+                               abs(db[i].yres - yres);
+
+                       /*
+                        * Penalize modes with resolutions smaller
+                        * than requested.
+                        */
+                       if (xres > db[i].xres || yres > db[i].yres)
+                               tdiff += xres + yres;
+
+                       if (diff > tdiff) {
+                               diff = tdiff;
+                               best = i;
+                       }
                }
-           }
        }
        if (best != -1) {
            fb_try_mode(var, info, &db[best], bpp);
diff --git a/drivers/video/n411.c b/drivers/video/n411.c
new file mode 100644 (file)
index 0000000..935830f
--- /dev/null
@@ -0,0 +1,202 @@
+/*
+ * linux/drivers/video/n411.c -- Platform device for N411 EPD kit
+ *
+ * Copyright (C) 2008, Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
+ *
+ * This driver is written to be used with the Hecuba display controller
+ * board, and tested with the EInk 800x600 display in 1 bit mode.
+ * The interface between Hecuba and the host is TTL based GPIO. The
+ * GPIO requirements are 8 writable data lines and 6 lines for control.
+ * Only 4 of the controls are actually used here but 6 for future use.
+ * The driver requires the IO addresses for data and control GPIO at
+ * load time. It is also possible to use this display with a standard
+ * PC parallel port.
+ *
+ * General notes:
+ * - User must set dio_addr=0xIOADDR cio_addr=0xIOADDR c2io_addr=0xIOADDR
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/irq.h>
+
+#include <video/hecubafb.h>
+
+static unsigned long dio_addr;
+static unsigned long cio_addr;
+static unsigned long c2io_addr;
+static unsigned long splashval;
+static unsigned int nosplash;
+static unsigned char ctl;
+
+static void n411_set_ctl(struct hecubafb_par *par, unsigned char bit, unsigned
+                                                       char state)
+{
+       switch (bit) {
+       case HCB_CD_BIT:
+               if (state)
+                       ctl &= ~(HCB_CD_BIT);
+               else
+                       ctl |= HCB_CD_BIT;
+               break;
+       case HCB_DS_BIT:
+               if (state)
+                       ctl &= ~(HCB_DS_BIT);
+               else
+                       ctl |= HCB_DS_BIT;
+               break;
+       }
+       outb(ctl, cio_addr);
+}
+
+static unsigned char n411_get_ctl(struct hecubafb_par *par)
+{
+       return inb(c2io_addr);
+}
+
+static void n411_set_data(struct hecubafb_par *par, unsigned char value)
+{
+       outb(value, dio_addr);
+}
+
+static void n411_wait_for_ack(struct hecubafb_par *par, int clear)
+{
+       int timeout;
+       unsigned char tmp;
+
+       timeout = 500;
+       do {
+               tmp = n411_get_ctl(par);
+               if ((tmp & HCB_ACK_BIT) && (!clear))
+                       return;
+               else if (!(tmp & HCB_ACK_BIT) && (clear))
+                       return;
+               udelay(1);
+       } while (timeout--);
+       printk(KERN_ERR "timed out waiting for ack\n");
+}
+
+static int n411_init_control(struct hecubafb_par *par)
+{
+       unsigned char tmp;
+       /* for init, we want the following setup to be set:
+       WUP = lo
+       ACK = hi
+       DS = hi
+       RW = hi
+       CD = lo
+       */
+
+       /* write WUP to lo, DS to hi, RW to hi, CD to lo */
+       ctl = HCB_WUP_BIT | HCB_RW_BIT | HCB_CD_BIT ;
+       n411_set_ctl(par, HCB_DS_BIT, 1);
+
+       /* check ACK is not lo */
+       tmp = n411_get_ctl(par);
+       if (tmp & HCB_ACK_BIT) {
+               printk(KERN_ERR "Fail because ACK is already low\n");
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+
+static int n411_init_board(struct hecubafb_par *par)
+{
+       int retval;
+
+       retval = n411_init_control(par);
+       if (retval)
+               return retval;
+
+       par->send_command(par, APOLLO_INIT_DISPLAY);
+       par->send_data(par, 0x81);
+
+       /* have to wait while display resets */
+       udelay(1000);
+
+       /* if we were told to splash the screen, we just clear it */
+       if (!nosplash) {
+               par->send_command(par, APOLLO_ERASE_DISPLAY);
+               par->send_data(par, splashval);
+       }
+
+       return 0;
+}
+
+static struct hecuba_board n411_board = {
+       .owner                  = THIS_MODULE,
+       .init                   = n411_init_board,
+       .set_ctl                = n411_set_ctl,
+       .set_data               = n411_set_data,
+       .wait_for_ack           = n411_wait_for_ack,
+};
+
+static struct platform_device *n411_device;
+static int __init n411_init(void)
+{
+       int ret;
+       if (!dio_addr || !cio_addr || !c2io_addr) {
+               printk(KERN_WARNING "no IO addresses supplied\n");
+               return -EINVAL;
+       }
+
+       /* request our platform independent driver */
+       request_module("hecubafb");
+
+       n411_device = platform_device_alloc("hecubafb", -1);
+       if (!n411_device)
+               return -ENOMEM;
+
+       platform_device_add_data(n411_device, &n411_board, sizeof(n411_board));
+
+       /* this _add binds hecubafb to n411. hecubafb refcounts n411 */
+       ret = platform_device_add(n411_device);
+
+       if (ret)
+               platform_device_put(n411_device);
+
+       return ret;
+
+}
+
+static void __exit n411_exit(void)
+{
+       platform_device_unregister(n411_device);
+}
+
+module_init(n411_init);
+module_exit(n411_exit);
+
+module_param(nosplash, uint, 0);
+MODULE_PARM_DESC(nosplash, "Disable doing the splash screen");
+module_param(dio_addr, ulong, 0);
+MODULE_PARM_DESC(dio_addr, "IO address for data, eg: 0x480");
+module_param(cio_addr, ulong, 0);
+MODULE_PARM_DESC(cio_addr, "IO address for control, eg: 0x400");
+module_param(c2io_addr, ulong, 0);
+MODULE_PARM_DESC(c2io_addr, "IO address for secondary control, eg: 0x408");
+module_param(splashval, ulong, 0);
+MODULE_PARM_DESC(splashval, "Splash pattern: 0x00 is black, 0x01 is white");
+
+MODULE_DESCRIPTION("board driver for n411 hecuba/apollo epd kit");
+MODULE_AUTHOR("Jaya Kumar");
+MODULE_LICENSE("GPL");
+
index d1a10549f5434fc16a963d023dca5ad6fea45c70..ed20a9871b33a9b1c1c63a95cc1fcb6948a79cdb 100644 (file)
@@ -129,7 +129,7 @@ typedef struct {
        int nvclk_khz;
        char mem_page_miss;
        char mem_latency;
-       int memory_type;
+       u32 memory_type;
        int memory_width;
        char enable_video;
        char gr_during_vid;
@@ -719,7 +719,7 @@ static void nForceUpdateArbitrationSettings(unsigned VClk,
        memctrl >>= 16;
 
        if ((memctrl == 0x1A9) || (memctrl == 0x1AB) || (memctrl == 0x1ED)) {
-               int dimm[3];
+               u32 dimm[3];
 
                dev = pci_get_bus_and_slot(0, 2);
                pci_read_config_dword(dev, 0x40, &dimm[0]);
index 82579d3a99706470bcf05e26902c4d5e6f2961ea..d9627b57eb4d27e12242116f2fc77c99516ab0dd 100644 (file)
@@ -265,12 +265,12 @@ static void nv10GetConfig(struct nvidia_par *par)
 
        dev = pci_get_bus_and_slot(0, 1);
        if ((par->Chipset & 0xffff) == 0x01a0) {
-               int amt = 0;
+               u32 amt;
 
                pci_read_config_dword(dev, 0x7c, &amt);
                par->RamAmountKBytes = (((amt >> 6) & 31) + 1) * 1024;
        } else if ((par->Chipset & 0xffff) == 0x01f0) {
-               int amt = 0;
+               u32 amt;
 
                pci_read_config_dword(dev, 0x84, &amt);
                par->RamAmountKBytes = (((amt >> 4) & 127) + 1) * 1024;
index 596652d2831ff0c8849672fca177912f247a915c..9dbb5a5a267b4ac5e060172748210981f251b1d3 100644 (file)
 #define NVTRACE          if (0) printk
 #endif
 
-#define NVTRACE_ENTER(...)  NVTRACE("%s START\n", __FUNCTION__)
-#define NVTRACE_LEAVE(...)  NVTRACE("%s END\n", __FUNCTION__)
+#define NVTRACE_ENTER(...)  NVTRACE("%s START\n", __func__)
+#define NVTRACE_LEAVE(...)  NVTRACE("%s END\n", __func__)
 
 #ifdef CONFIG_FB_NVIDIA_DEBUG
 #define assert(expr) \
        if (!(expr)) { \
        printk( "Assertion failed! %s,%s,%s,line=%d\n",\
-       #expr,__FILE__,__FUNCTION__,__LINE__); \
+       #expr,__FILE__,__func__,__LINE__); \
        BUG(); \
        }
 #else
@@ -1559,7 +1559,6 @@ static int __devinit nvidiafb_init(void)
 
 module_init(nvidiafb_init);
 
-#ifdef MODULE
 static void __exit nvidiafb_exit(void)
 {
        pci_unregister_driver(&nvidiafb_driver);
@@ -1615,5 +1614,3 @@ MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) "
 MODULE_AUTHOR("Antonino Daplas");
 MODULE_DESCRIPTION("Framebuffer driver for nVidia graphics chipset");
 MODULE_LICENSE("GPL");
-#endif                         /* MODULE */
-
index 452433d469738cd203115d7fa3779a239f476120..d7b3dcc0dc4388bb18d2614bd17efe4d9c2ea69e 100644 (file)
@@ -248,7 +248,7 @@ static void __iomem *offb_map_reg(struct device_node *np, int index,
 static void __init offb_init_fb(const char *name, const char *full_name,
                                int width, int height, int depth,
                                int pitch, unsigned long address,
-                               struct device_node *dp)
+                               int foreign_endian, struct device_node *dp)
 {
        unsigned long res_size = pitch * height * (depth + 7) / 8;
        struct offb_par *par = &default_par;
@@ -397,7 +397,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
        info->screen_base = ioremap(address, fix->smem_len);
        info->par = par;
        info->pseudo_palette = (void *) (info + 1);
-       info->flags = FBINFO_DEFAULT;
+       info->flags = FBINFO_DEFAULT | foreign_endian;
 
        fb_alloc_cmap(&info->cmap, 256, 0);
 
@@ -424,6 +424,15 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
        u64 rstart, address = OF_BAD_ADDR;
        const u32 *pp, *addrp, *up;
        u64 asize;
+       int foreign_endian = 0;
+
+#ifdef __BIG_ENDIAN
+       if (of_get_property(dp, "little-endian", NULL))
+               foreign_endian = FBINFO_FOREIGN_ENDIAN;
+#else
+       if (of_get_property(dp, "big-endian", NULL))
+               foreign_endian = FBINFO_FOREIGN_ENDIAN;
+#endif
 
        pp = of_get_property(dp, "linux,bootx-depth", &len);
        if (pp == NULL)
@@ -509,7 +518,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
                offb_init_fb(no_real_node ? "bootx" : dp->name,
                             no_real_node ? "display" : dp->full_name,
                             width, height, depth, pitch, address,
-                            no_real_node ? NULL : dp);
+                            foreign_endian, no_real_node ? NULL : dp);
        }
 }
 
index 58496061142de6e90d2a27e79381ac55d1d0b29f..c95874fe9076579de587a7c1f87c9ded194ff7b5 100644 (file)
@@ -310,7 +310,7 @@ static int __devinit p9100_probe(struct of_device *op, const struct of_device_id
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: p9100 at %lx:%lx\n",
+       printk(KERN_INFO "%s: p9100 at %lx:%lx\n",
               dp->full_name,
               par->which_io, par->physbase);
 
index 30181b593829b1e94fe6d5229965d8894c4357cd..3f1ca2adda3d7353fe41529bb9fa59c1085affa3 100644 (file)
@@ -56,7 +56,7 @@
 #undef PM2FB_MASTER_DEBUG
 #ifdef PM2FB_MASTER_DEBUG
 #define DPRINTK(a, b...)       \
-       printk(KERN_DEBUG "pm2fb: %s: " a, __FUNCTION__ , ## b)
+       printk(KERN_DEBUG "pm2fb: %s: " a, __func__ , ## b)
 #else
 #define DPRINTK(a, b...)
 #endif
@@ -67,7 +67,7 @@
  * Driver data
  */
 static int hwcursor = 1;
-static char *mode __devinitdata;
+static char *mode_option __devinitdata;
 
 /*
  * The XFree GLINT driver will (I think to implement hardware cursor
@@ -1680,17 +1680,19 @@ static int __devinit pm2fb_probe(struct pci_dev *pdev,
                info->pixmap.scan_align = 1;
        }
 
-       if (!mode)
-               mode = "640x480@60";
+       if (!mode_option)
+               mode_option = "640x480@60";
 
-       err = fb_find_mode(&info->var, info, mode, NULL, 0, NULL, 8);
+       err = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8);
        if (!err || err == 4)
                info->var = pm2fb_var;
 
-       if (fb_alloc_cmap(&info->cmap, 256, 0) < 0)
+       retval = fb_alloc_cmap(&info->cmap, 256, 0);
+       if (retval < 0)
                goto err_exit_both;
 
-       if (register_framebuffer(info) < 0)
+       retval = register_framebuffer(info);
+       if (retval < 0)
                goto err_exit_all;
 
        printk(KERN_INFO "fb%d: %s frame buffer device, memory = %dK.\n",
@@ -1797,7 +1799,7 @@ static int __init pm2fb_setup(char *options)
                else if (!strncmp(this_opt, "noaccel", 7))
                        noaccel = 1;
                else
-                       mode = this_opt;
+                       mode_option = this_opt;
        }
        return 0;
 }
@@ -1833,8 +1835,10 @@ static void __exit pm2fb_exit(void)
 #ifdef MODULE
 module_exit(pm2fb_exit);
 
-module_param(mode, charp, 0);
-MODULE_PARM_DESC(mode, "Preferred video mode e.g. '648x480-8@60'");
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
+module_param_named(mode, mode_option, charp, 0);
+MODULE_PARM_DESC(mode, "Initial video mode e.g. '648x480-8@60' (deprecated)");
 module_param(lowhsync, bool, 0);
 MODULE_PARM_DESC(lowhsync, "Force horizontal sync low regardless of mode");
 module_param(lowvsync, bool, 0);
index 5dba8cdd05173709c2365559c11d6b760a052adc..68089d1456c25167c0ece4bc5789536e8b2eef6f 100644 (file)
@@ -45,7 +45,7 @@
 #undef PM3FB_MASTER_DEBUG
 #ifdef PM3FB_MASTER_DEBUG
 #define DPRINTK(a, b...)       \
-       printk(KERN_DEBUG "pm3fb: %s: " a, __FUNCTION__ , ## b)
+       printk(KERN_DEBUG "pm3fb: %s: " a, __func__ , ## b)
 #else
 #define DPRINTK(a, b...)
 #endif
@@ -1571,6 +1571,8 @@ module_exit(pm3fb_exit);
 #endif
 module_init(pm3fb_init);
 
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
 module_param(noaccel, bool, 0);
 MODULE_PARM_DESC(noaccel, "Disable acceleration");
 module_param(hwcursor, int, 0644);
index 757651954e6cf81bebba98cda2bf36ce910f824d..3ab6e3d973a1670bef82cc28fa89cf1ba8e46d8f 100644 (file)
@@ -39,6 +39,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/clk.h>
 #include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/hardware.h>
 #include <asm/io.h>
 #include "pxafb.h"
 
 /* Bits which should not be set in machine configuration structures */
-#define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM|LCCR0_BM|LCCR0_QDM|LCCR0_DIS|LCCR0_EFM|LCCR0_IUM|LCCR0_SFM|LCCR0_LDM|LCCR0_ENB)
-#define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP|LCCR3_VSP|LCCR3_PCD|LCCR3_BPP)
+#define LCCR0_INVALID_CONFIG_MASK      (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\
+                                        LCCR0_DIS | LCCR0_EFM | LCCR0_IUM |\
+                                        LCCR0_SFM | LCCR0_LDM | LCCR0_ENB)
+
+#define LCCR3_INVALID_CONFIG_MASK      (LCCR3_HSP | LCCR3_VSP |\
+                                        LCCR3_PCD | LCCR3_BPP)
 
 static void (*pxafb_backlight_power)(int);
 static void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *);
 
-static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *);
+static int pxafb_activate_var(struct fb_var_screeninfo *var,
+                               struct pxafb_info *);
 static void set_ctrlr_state(struct pxafb_info *fbi, u_int state);
 
-#ifdef CONFIG_FB_PXA_PARAMETERS
-#define PXAFB_OPTIONS_SIZE 256
-static char g_options[PXAFB_OPTIONS_SIZE] __devinitdata = "";
-#endif
+static inline unsigned long
+lcd_readl(struct pxafb_info *fbi, unsigned int off)
+{
+       return __raw_readl(fbi->mmio_base + off);
+}
+
+static inline void
+lcd_writel(struct pxafb_info *fbi, unsigned int off, unsigned long val)
+{
+       __raw_writel(val, fbi->mmio_base + off);
+}
 
 static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state)
 {
@@ -79,10 +94,12 @@ static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state)
        /*
         * We need to handle two requests being made at the same time.
         * There are two important cases:
-        *  1. When we are changing VT (C_REENABLE) while unblanking (C_ENABLE)
-        *     We must perform the unblanking, which will do our REENABLE for us.
-        *  2. When we are blanking, but immediately unblank before we have
-        *     blanked.  We do the "REENABLE" thing here as well, just to be sure.
+        *  1. When we are changing VT (C_REENABLE) while unblanking
+        *     (C_ENABLE) We must perform the unblanking, which will
+        *     do our REENABLE for us.
+        *  2. When we are blanking, but immediately unblank before
+        *     we have blanked.  We do the "REENABLE" thing here as
+        *     well, just to be sure.
         */
        if (fbi->task_state == C_ENABLE && state == C_REENABLE)
                state = (u_int) -1;
@@ -129,13 +146,13 @@ pxafb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue,
                val  = ((red   << 8) & 0x00f80000);
                val |= ((green >> 0) & 0x0000fc00);
                val |= ((blue  >> 8) & 0x000000f8);
-               ((u32*)(fbi->palette_cpu))[regno] = val;
+               ((u32 *)(fbi->palette_cpu))[regno] = val;
                break;
        case LCCR4_PAL_FOR_2:
                val  = ((red   << 8) & 0x00fc0000);
                val |= ((green >> 0) & 0x0000fc00);
                val |= ((blue  >> 8) & 0x000000fc);
-               ((u32*)(fbi->palette_cpu))[regno] = val;
+               ((u32 *)(fbi->palette_cpu))[regno] = val;
                break;
        }
 
@@ -203,15 +220,15 @@ pxafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
  */
 static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
 {
-        int ret = 0;
-        switch (var->bits_per_pixel) {
-        case 1:  ret = LCCR3_1BPP; break;
-        case 2:  ret = LCCR3_2BPP; break;
-        case 4:  ret = LCCR3_4BPP; break;
-        case 8:  ret = LCCR3_8BPP; break;
-        case 16: ret = LCCR3_16BPP; break;
-        }
-        return ret;
+       int ret = 0;
+       switch (var->bits_per_pixel) {
+       case 1:  ret = LCCR3_1BPP; break;
+       case 2:  ret = LCCR3_2BPP; break;
+       case 4:  ret = LCCR3_4BPP; break;
+       case 8:  ret = LCCR3_8BPP; break;
+       case 16: ret = LCCR3_16BPP; break;
+       }
+       return ret;
 }
 
 #ifdef CONFIG_CPU_FREQ
@@ -223,31 +240,32 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var)
  */
 static unsigned int pxafb_display_dma_period(struct fb_var_screeninfo *var)
 {
-       /*
-        * Period = pixclock * bits_per_byte * bytes_per_transfer
-        *              / memory_bits_per_pixel;
-        */
-       return var->pixclock * 8 * 16 / var->bits_per_pixel;
+       /*
+        * Period = pixclock * bits_per_byte * bytes_per_transfer
+        *              / memory_bits_per_pixel;
+        */
+       return var->pixclock * 8 * 16 / var->bits_per_pixel;
 }
-
-extern unsigned int get_clk_frequency_khz(int info);
 #endif
 
 /*
  * Select the smallest mode that allows the desired resolution to be
  * displayed. If desired parameters can be rounded up.
  */
-static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struct fb_var_screeninfo *var)
+static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach,
+                                            struct fb_var_screeninfo *var)
 {
        struct pxafb_mode_info *mode = NULL;
        struct pxafb_mode_info *modelist = mach->modes;
        unsigned int best_x = 0xffffffff, best_y = 0xffffffff;
        unsigned int i;
 
-       for (i = 0 ; i < mach->num_modes ; i++) {
-               if (modelist[i].xres >= var->xres && modelist[i].yres >= var->yres &&
-                               modelist[i].xres < best_x && modelist[i].yres < best_y &&
-                               modelist[i].bpp >= var->bits_per_pixel ) {
+       for (i = 0; i < mach->num_modes; i++) {
+               if (modelist[i].xres >= var->xres &&
+                   modelist[i].yres >= var->yres &&
+                   modelist[i].xres < best_x &&
+                   modelist[i].yres < best_y &&
+                   modelist[i].bpp >= var->bits_per_pixel) {
                        best_x = modelist[i].xres;
                        best_y = modelist[i].yres;
                        mode = &modelist[i];
@@ -257,7 +275,8 @@ static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struc
        return mode;
 }
 
-static void pxafb_setmode(struct fb_var_screeninfo *var, struct pxafb_mode_info *mode)
+static void pxafb_setmode(struct fb_var_screeninfo *var,
+                         struct pxafb_mode_info *mode)
 {
        var->xres               = mode->xres;
        var->yres               = mode->yres;
@@ -315,19 +334,20 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
        var->yres_virtual =
                max(var->yres_virtual, var->yres);
 
-        /*
+       /*
         * Setup the RGB parameters for this display.
         *
         * The pixel packing format is described on page 7-11 of the
         * PXA2XX Developer's Manual.
-         */
+        */
        if (var->bits_per_pixel == 16) {
                var->red.offset   = 11; var->red.length   = 5;
                var->green.offset = 5;  var->green.length = 6;
                var->blue.offset  = 0;  var->blue.length  = 5;
                var->transp.offset = var->transp.length = 0;
        } else {
-               var->red.offset = var->green.offset = var->blue.offset = var->transp.offset = 0;
+               var->red.offset = var->green.offset = 0;
+               var->blue.offset = var->transp.offset = 0;
                var->red.length   = 8;
                var->green.length = 8;
                var->blue.length  = 8;
@@ -345,8 +365,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 
 static inline void pxafb_set_truecolor(u_int is_true_color)
 {
-       pr_debug("pxafb: true_color = %d\n", is_true_color);
-       // do your machine-specific setup if needed
+       /* do your machine-specific setup if needed */
 }
 
 /*
@@ -357,9 +376,6 @@ static int pxafb_set_par(struct fb_info *info)
 {
        struct pxafb_info *fbi = (struct pxafb_info *)info;
        struct fb_var_screeninfo *var = &info->var;
-       unsigned long palette_mem_size;
-
-       pr_debug("pxafb: set_par\n");
 
        if (var->bits_per_pixel == 16)
                fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR;
@@ -379,17 +395,10 @@ static int pxafb_set_par(struct fb_info *info)
        if (var->bits_per_pixel == 16)
                fbi->palette_size = 0;
        else
-               fbi->palette_size = var->bits_per_pixel == 1 ? 4 : 1 << var->bits_per_pixel;
-
-       if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0)
-               palette_mem_size = fbi->palette_size * sizeof(u16);
-       else
-               palette_mem_size = fbi->palette_size * sizeof(u32);
-
-       pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
+               fbi->palette_size = var->bits_per_pixel == 1 ?
+                                       4 : 1 << var->bits_per_pixel;
 
-       fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
-       fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
+       fbi->palette_cpu = (u16 *)&fbi->dma_buff->palette[0];
 
        /*
         * Set (any) board control register to handle new color depth
@@ -406,36 +415,6 @@ static int pxafb_set_par(struct fb_info *info)
        return 0;
 }
 
-/*
- * Formal definition of the VESA spec:
- *  On
- *     This refers to the state of the display when it is in full operation
- *  Stand-By
- *     This defines an optional operating state of minimal power reduction with
- *     the shortest recovery time
- *  Suspend
- *     This refers to a level of power management in which substantial power
- *     reduction is achieved by the display.  The display can have a longer
- *     recovery time from this state than from the Stand-by state
- *  Off
- *     This indicates that the display is consuming the lowest level of power
- *     and is non-operational. Recovery from this state may optionally require
- *     the user to manually power on the monitor
- *
- *  Now, the fbdev driver adds an additional state, (blank), where they
- *  turn off the video (maybe by colormap tricks), but don't mess with the
- *  video itself: think of it semantically between on and Stand-By.
- *
- *  So here's what we should do in our fbdev blank routine:
- *
- *     VESA_NO_BLANKING (mode 0)       Video on,  front/back light on
- *     VESA_VSYNC_SUSPEND (mode 1)     Video on,  front/back light off
- *     VESA_HSYNC_SUSPEND (mode 2)     Video on,  front/back light off
- *     VESA_POWERDOWN (mode 3)         Video off, front/back light off
- *
- *  This will match the matrox implementation.
- */
-
 /*
  * pxafb_blank():
  *     Blank the display by setting all palette values to zero.  Note, the
@@ -447,8 +426,6 @@ static int pxafb_blank(int blank, struct fb_info *info)
        struct pxafb_info *fbi = (struct pxafb_info *)info;
        int i;
 
-       pr_debug("pxafb: blank=%d\n", blank);
-
        switch (blank) {
        case FB_BLANK_POWERDOWN:
        case FB_BLANK_VSYNC_SUSPEND:
@@ -460,11 +437,11 @@ static int pxafb_blank(int blank, struct fb_info *info)
                                pxafb_setpalettereg(i, 0, 0, 0, 0, info);
 
                pxafb_schedule_work(fbi, C_DISABLE);
-               //TODO if (pxafb_blank_helper) pxafb_blank_helper(blank);
+               /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */
                break;
 
        case FB_BLANK_UNBLANK:
-               //TODO if (pxafb_blank_helper) pxafb_blank_helper(blank);
+               /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */
                if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR ||
                    fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR)
                        fb_set_cmap(&fbi->fb.cmap, info);
@@ -480,7 +457,7 @@ static int pxafb_mmap(struct fb_info *info,
        unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
 
        if (off < info->fix.smem_len) {
-               vma->vm_pgoff += 1;
+               vma->vm_pgoff += fbi->video_offset / PAGE_SIZE;
                return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu,
                                             fbi->map_dma, fbi->map_size);
        }
@@ -529,7 +506,8 @@ static struct fb_ops pxafb_ops = {
  *
  * Factoring the 10^4 and 10^-12 out gives 10^-8 == 1 / 100000000 as used below.
  */
-static inline unsigned int get_pcd(struct pxafb_info *fbi, unsigned int pixclock)
+static inline unsigned int get_pcd(struct pxafb_info *fbi,
+                                  unsigned int pixclock)
 {
        unsigned long long pcd;
 
@@ -555,7 +533,7 @@ static inline void set_hsync_time(struct pxafb_info *fbi, unsigned int pcd)
        unsigned long htime;
 
        if ((pcd == 0) || (fbi->fb.var.hsync_len == 0)) {
-               fbi->hsync_time=0;
+               fbi->hsync_time = 0;
                return;
        }
 
@@ -576,71 +554,231 @@ unsigned long pxafb_get_hsync_time(struct device *dev)
 }
 EXPORT_SYMBOL(pxafb_get_hsync_time);
 
-/*
- * pxafb_activate_var():
- *     Configures LCD Controller based on entries in var parameter.  Settings are
- *     only written to the controller if changes were made.
- */
-static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *fbi)
+static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal,
+               unsigned int offset, size_t size)
 {
-       struct pxafb_lcd_reg new_regs;
-       u_long flags;
-       u_int lines_per_panel, pcd = get_pcd(fbi, var->pixclock);
+       struct pxafb_dma_descriptor *dma_desc, *pal_desc;
+       unsigned int dma_desc_off, pal_desc_off;
 
-       pr_debug("pxafb: Configuring PXA LCD\n");
+       if (dma < 0 || dma >= DMA_MAX)
+               return -EINVAL;
 
-       pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n",
-                var->xres, var->hsync_len,
-                var->left_margin, var->right_margin);
-       pr_debug("var: yres=%d vslen=%d um=%d bm=%d\n",
-                var->yres, var->vsync_len,
-                var->upper_margin, var->lower_margin);
-       pr_debug("var: pixclock=%d pcd=%d\n", var->pixclock, pcd);
+       dma_desc = &fbi->dma_buff->dma_desc[dma];
+       dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[dma]);
 
-#if DEBUG_VAR
-       if (var->xres < 16        || var->xres > 1024)
-               printk(KERN_ERR "%s: invalid xres %d\n",
-                       fbi->fb.fix.id, var->xres);
-       switch(var->bits_per_pixel) {
-       case 1:
-       case 2:
-       case 4:
-       case 8:
-       case 16:
-               break;
-       default:
-               printk(KERN_ERR "%s: invalid bit depth %d\n",
-                      fbi->fb.fix.id, var->bits_per_pixel);
-               break;
+       dma_desc->fsadr = fbi->screen_dma + offset;
+       dma_desc->fidr  = 0;
+       dma_desc->ldcmd = size;
+
+       if (pal < 0 || pal >= PAL_MAX) {
+               dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off;
+               fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off;
+       } else {
+               pal_desc = &fbi->dma_buff->pal_desc[dma];
+               pal_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[pal]);
+
+               pal_desc->fsadr = fbi->dma_buff_phys + pal * PALETTE_SIZE;
+               pal_desc->fidr  = 0;
+
+               if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0)
+                       pal_desc->ldcmd = fbi->palette_size * sizeof(u16);
+               else
+                       pal_desc->ldcmd = fbi->palette_size * sizeof(u32);
+
+               pal_desc->ldcmd |= LDCMD_PAL;
+
+               /* flip back and forth between palette and frame buffer */
+               pal_desc->fdadr = fbi->dma_buff_phys + dma_desc_off;
+               dma_desc->fdadr = fbi->dma_buff_phys + pal_desc_off;
+               fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off;
        }
-       if (var->hsync_len < 1    || var->hsync_len > 64)
-               printk(KERN_ERR "%s: invalid hsync_len %d\n",
-                       fbi->fb.fix.id, var->hsync_len);
-       if (var->left_margin < 1  || var->left_margin > 255)
-               printk(KERN_ERR "%s: invalid left_margin %d\n",
-                       fbi->fb.fix.id, var->left_margin);
-       if (var->right_margin < 1 || var->right_margin > 255)
-               printk(KERN_ERR "%s: invalid right_margin %d\n",
-                       fbi->fb.fix.id, var->right_margin);
-       if (var->yres < 1         || var->yres > 1024)
-               printk(KERN_ERR "%s: invalid yres %d\n",
-                       fbi->fb.fix.id, var->yres);
-       if (var->vsync_len < 1    || var->vsync_len > 64)
-               printk(KERN_ERR "%s: invalid vsync_len %d\n",
-                       fbi->fb.fix.id, var->vsync_len);
-       if (var->upper_margin < 0 || var->upper_margin > 255)
-               printk(KERN_ERR "%s: invalid upper_margin %d\n",
-                       fbi->fb.fix.id, var->upper_margin);
-       if (var->lower_margin < 0 || var->lower_margin > 255)
-               printk(KERN_ERR "%s: invalid lower_margin %d\n",
-                       fbi->fb.fix.id, var->lower_margin);
-#endif
 
-       new_regs.lccr0 = fbi->lccr0 |
-               (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM |
-                 LCCR0_QDM | LCCR0_BM  | LCCR0_OUM);
+       return 0;
+}
+
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+static int setup_smart_dma(struct pxafb_info *fbi)
+{
+       struct pxafb_dma_descriptor *dma_desc;
+       unsigned long dma_desc_off, cmd_buff_off;
+
+       dma_desc = &fbi->dma_buff->dma_desc[DMA_CMD];
+       dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[DMA_CMD]);
+       cmd_buff_off = offsetof(struct pxafb_dma_buff, cmd_buff);
+
+       dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off;
+       dma_desc->fsadr = fbi->dma_buff_phys + cmd_buff_off;
+       dma_desc->fidr  = 0;
+       dma_desc->ldcmd = fbi->n_smart_cmds * sizeof(uint16_t);
+
+       fbi->fdadr[DMA_CMD] = dma_desc->fdadr;
+       return 0;
+}
+
+int pxafb_smart_flush(struct fb_info *info)
+{
+       struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb);
+       uint32_t prsr;
+       int ret = 0;
+
+       /* disable controller until all registers are set up */
+       lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB);
+
+       /* 1. make it an even number of commands to align on 32-bit boundary
+        * 2. add the interrupt command to the end of the chain so we can
+        *    keep track of the end of the transfer
+        */
+
+       while (fbi->n_smart_cmds & 1)
+               fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_NOOP;
+
+       fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_INTERRUPT;
+       fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_WAIT_FOR_VSYNC;
+       setup_smart_dma(fbi);
+
+       /* continue to execute next command */
+       prsr = lcd_readl(fbi, PRSR) | PRSR_ST_OK | PRSR_CON_NT;
+       lcd_writel(fbi, PRSR, prsr);
+
+       /* stop the processor in case it executed "wait for sync" cmd */
+       lcd_writel(fbi, CMDCR, 0x0001);
+
+       /* don't send interrupts for fifo underruns on channel 6 */
+       lcd_writel(fbi, LCCR5, LCCR5_IUM(6));
+
+       lcd_writel(fbi, LCCR1, fbi->reg_lccr1);
+       lcd_writel(fbi, LCCR2, fbi->reg_lccr2);
+       lcd_writel(fbi, LCCR3, fbi->reg_lccr3);
+       lcd_writel(fbi, FDADR0, fbi->fdadr[0]);
+       lcd_writel(fbi, FDADR6, fbi->fdadr[6]);
+
+       /* begin sending */
+       lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB);
+
+       if (wait_for_completion_timeout(&fbi->command_done, HZ/2) == 0) {
+               pr_warning("%s: timeout waiting for command done\n",
+                               __func__);
+               ret = -ETIMEDOUT;
+       }
+
+       /* quick disable */
+       prsr = lcd_readl(fbi, PRSR) & ~(PRSR_ST_OK | PRSR_CON_NT);
+       lcd_writel(fbi, PRSR, prsr);
+       lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB);
+       lcd_writel(fbi, FDADR6, 0);
+       fbi->n_smart_cmds = 0;
+       return ret;
+}
+
+int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds)
+{
+       int i;
+       struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb);
+
+       /* leave 2 commands for INTERRUPT and WAIT_FOR_SYNC */
+       for (i = 0; i < n_cmds; i++) {
+               if (fbi->n_smart_cmds == CMD_BUFF_SIZE - 8)
+                       pxafb_smart_flush(info);
+
+               fbi->smart_cmds[fbi->n_smart_cmds++] = *cmds++;
+       }
+
+       return 0;
+}
+
+static unsigned int __smart_timing(unsigned time_ns, unsigned long lcd_clk)
+{
+       unsigned int t = (time_ns * (lcd_clk / 1000000) / 1000);
+       return (t == 0) ? 1 : t;
+}
+
+static void setup_smart_timing(struct pxafb_info *fbi,
+                               struct fb_var_screeninfo *var)
+{
+       struct pxafb_mach_info *inf = fbi->dev->platform_data;
+       struct pxafb_mode_info *mode = &inf->modes[0];
+       unsigned long lclk = clk_get_rate(fbi->clk);
+       unsigned t1, t2, t3, t4;
+
+       t1 = max(mode->a0csrd_set_hld, mode->a0cswr_set_hld);
+       t2 = max(mode->rd_pulse_width, mode->wr_pulse_width);
+       t3 = mode->op_hold_time;
+       t4 = mode->cmd_inh_time;
+
+       fbi->reg_lccr1 =
+               LCCR1_DisWdth(var->xres) |
+               LCCR1_BegLnDel(__smart_timing(t1, lclk)) |
+               LCCR1_EndLnDel(__smart_timing(t2, lclk)) |
+               LCCR1_HorSnchWdth(__smart_timing(t3, lclk));
+
+       fbi->reg_lccr2 = LCCR2_DisHght(var->yres);
+       fbi->reg_lccr3 = LCCR3_PixClkDiv(__smart_timing(t4, lclk));
+
+       /* FIXME: make this configurable */
+       fbi->reg_cmdcr = 1;
+}
+
+static int pxafb_smart_thread(void *arg)
+{
+       struct pxafb_info *fbi = arg;
+       struct pxafb_mach_info *inf = fbi->dev->platform_data;
+
+       if (!fbi || !inf->smart_update) {
+               pr_err("%s: not properly initialized, thread terminated\n",
+                               __func__);
+               return -EINVAL;
+       }
 
-       new_regs.lccr1 =
+       pr_debug("%s(): task starting\n", __func__);
+
+       set_freezable();
+       while (!kthread_should_stop()) {
+
+               if (try_to_freeze())
+                       continue;
+
+               if (fbi->state == C_ENABLE) {
+                       inf->smart_update(&fbi->fb);
+                       complete(&fbi->refresh_done);
+               }
+
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(30 * HZ / 1000);
+       }
+
+       pr_debug("%s(): task ending\n", __func__);
+       return 0;
+}
+
+static int pxafb_smart_init(struct pxafb_info *fbi)
+{
+       fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi,
+                                       "lcd_refresh");
+       if (IS_ERR(fbi->smart_thread)) {
+               printk(KERN_ERR "%s: unable to create kernel thread\n",
+                               __func__);
+               return PTR_ERR(fbi->smart_thread);
+       }
+       return 0;
+}
+#else
+int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds)
+{
+       return 0;
+}
+
+int pxafb_smart_flush(struct fb_info *info)
+{
+       return 0;
+}
+#endif /* CONFIG_FB_SMART_PANEL */
+
+static void setup_parallel_timing(struct pxafb_info *fbi,
+                                 struct fb_var_screeninfo *var)
+{
+       unsigned int lines_per_panel, pcd = get_pcd(fbi, var->pixclock);
+
+       fbi->reg_lccr1 =
                LCCR1_DisWdth(var->xres) +
                LCCR1_HorSnchWdth(var->hsync_len) +
                LCCR1_BegLnDel(var->left_margin) +
@@ -654,110 +792,118 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *
        if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual)
                lines_per_panel /= 2;
 
-       new_regs.lccr2 =
+       fbi->reg_lccr2 =
                LCCR2_DisHght(lines_per_panel) +
                LCCR2_VrtSnchWdth(var->vsync_len) +
                LCCR2_BegFrmDel(var->upper_margin) +
                LCCR2_EndFrmDel(var->lower_margin);
 
-       new_regs.lccr3 = fbi->lccr3 |
-               pxafb_bpp_to_lccr3(var) |
-               (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) |
-               (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL);
+       fbi->reg_lccr3 = fbi->lccr3 |
+               (var->sync & FB_SYNC_HOR_HIGH_ACT ?
+                LCCR3_HorSnchH : LCCR3_HorSnchL) |
+               (var->sync & FB_SYNC_VERT_HIGH_ACT ?
+                LCCR3_VrtSnchH : LCCR3_VrtSnchL);
+
+       if (pcd) {
+               fbi->reg_lccr3 |= LCCR3_PixClkDiv(pcd);
+               set_hsync_time(fbi, pcd);
+       }
+}
 
-       if (pcd)
-               new_regs.lccr3 |= LCCR3_PixClkDiv(pcd);
+/*
+ * pxafb_activate_var():
+ *     Configures LCD Controller based on entries in var parameter.
+ *     Settings are only written to the controller if changes were made.
+ */
+static int pxafb_activate_var(struct fb_var_screeninfo *var,
+                             struct pxafb_info *fbi)
+{
+       u_long flags;
+       size_t nbytes;
 
-       pr_debug("nlccr0 = 0x%08x\n", new_regs.lccr0);
-       pr_debug("nlccr1 = 0x%08x\n", new_regs.lccr1);
-       pr_debug("nlccr2 = 0x%08x\n", new_regs.lccr2);
-       pr_debug("nlccr3 = 0x%08x\n", new_regs.lccr3);
+#if DEBUG_VAR
+       if (!(fbi->lccr0 & LCCR0_LCDT)) {
+               if (var->xres < 16 || var->xres > 1024)
+                       printk(KERN_ERR "%s: invalid xres %d\n",
+                               fbi->fb.fix.id, var->xres);
+               switch (var->bits_per_pixel) {
+               case 1:
+               case 2:
+               case 4:
+               case 8:
+               case 16:
+                       break;
+               default:
+                       printk(KERN_ERR "%s: invalid bit depth %d\n",
+                              fbi->fb.fix.id, var->bits_per_pixel);
+                       break;
+               }
 
+               if (var->hsync_len < 1 || var->hsync_len > 64)
+                       printk(KERN_ERR "%s: invalid hsync_len %d\n",
+                               fbi->fb.fix.id, var->hsync_len);
+               if (var->left_margin < 1 || var->left_margin > 255)
+                       printk(KERN_ERR "%s: invalid left_margin %d\n",
+                               fbi->fb.fix.id, var->left_margin);
+               if (var->right_margin < 1 || var->right_margin > 255)
+                       printk(KERN_ERR "%s: invalid right_margin %d\n",
+                               fbi->fb.fix.id, var->right_margin);
+               if (var->yres < 1 || var->yres > 1024)
+                       printk(KERN_ERR "%s: invalid yres %d\n",
+                               fbi->fb.fix.id, var->yres);
+               if (var->vsync_len < 1 || var->vsync_len > 64)
+                       printk(KERN_ERR "%s: invalid vsync_len %d\n",
+                               fbi->fb.fix.id, var->vsync_len);
+               if (var->upper_margin < 0 || var->upper_margin > 255)
+                       printk(KERN_ERR "%s: invalid upper_margin %d\n",
+                               fbi->fb.fix.id, var->upper_margin);
+               if (var->lower_margin < 0 || var->lower_margin > 255)
+                       printk(KERN_ERR "%s: invalid lower_margin %d\n",
+                               fbi->fb.fix.id, var->lower_margin);
+       }
+#endif
        /* Update shadow copy atomically */
        local_irq_save(flags);
 
-       /* setup dma descriptors */
-       fbi->dmadesc_fblow_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 3*16);
-       fbi->dmadesc_fbhigh_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 2*16);
-       fbi->dmadesc_palette_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 1*16);
-
-       fbi->dmadesc_fblow_dma = fbi->palette_dma - 3*16;
-       fbi->dmadesc_fbhigh_dma = fbi->palette_dma - 2*16;
-       fbi->dmadesc_palette_dma = fbi->palette_dma - 1*16;
-
-#define BYTES_PER_PANEL (lines_per_panel * fbi->fb.fix.line_length)
-
-       /* populate descriptors */
-       fbi->dmadesc_fblow_cpu->fdadr = fbi->dmadesc_fblow_dma;
-       fbi->dmadesc_fblow_cpu->fsadr = fbi->screen_dma + BYTES_PER_PANEL;
-       fbi->dmadesc_fblow_cpu->fidr  = 0;
-       fbi->dmadesc_fblow_cpu->ldcmd = BYTES_PER_PANEL;
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+       if (fbi->lccr0 & LCCR0_LCDT)
+               setup_smart_timing(fbi, var);
+       else
+#endif
+               setup_parallel_timing(fbi, var);
 
-       fbi->fdadr1 = fbi->dmadesc_fblow_dma; /* only used in dual-panel mode */
+       fbi->reg_lccr0 = fbi->lccr0 |
+               (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM |
+                LCCR0_QDM | LCCR0_BM  | LCCR0_OUM);
 
-       fbi->dmadesc_fbhigh_cpu->fsadr = fbi->screen_dma;
-       fbi->dmadesc_fbhigh_cpu->fidr = 0;
-       fbi->dmadesc_fbhigh_cpu->ldcmd = BYTES_PER_PANEL;
+       fbi->reg_lccr3 |= pxafb_bpp_to_lccr3(var);
 
-       fbi->dmadesc_palette_cpu->fsadr = fbi->palette_dma;
-       fbi->dmadesc_palette_cpu->fidr  = 0;
-       if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0)
-               fbi->dmadesc_palette_cpu->ldcmd = fbi->palette_size *
-                                                       sizeof(u16);
-       else
-               fbi->dmadesc_palette_cpu->ldcmd = fbi->palette_size *
-                                                       sizeof(u32);
-       fbi->dmadesc_palette_cpu->ldcmd |= LDCMD_PAL;
+       nbytes = var->yres * fbi->fb.fix.line_length;
 
-       if (var->bits_per_pixel == 16) {
-               /* palette shouldn't be loaded in true-color mode */
-               fbi->dmadesc_fbhigh_cpu->fdadr = fbi->dmadesc_fbhigh_dma;
-               fbi->fdadr0 = fbi->dmadesc_fbhigh_dma; /* no pal just fbhigh */
-               /* init it to something, even though we won't be using it */
-               fbi->dmadesc_palette_cpu->fdadr = fbi->dmadesc_palette_dma;
-       } else {
-               fbi->dmadesc_palette_cpu->fdadr = fbi->dmadesc_fbhigh_dma;
-               fbi->dmadesc_fbhigh_cpu->fdadr = fbi->dmadesc_palette_dma;
-               fbi->fdadr0 = fbi->dmadesc_palette_dma; /* flips back and forth between pal and fbhigh */
+       if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) {
+               nbytes = nbytes / 2;
+               setup_frame_dma(fbi, DMA_LOWER, PAL_NONE, nbytes, nbytes);
        }
 
-#if 0
-       pr_debug("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu);
-       pr_debug("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu);
-       pr_debug("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu);
-       pr_debug("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma);
-       pr_debug("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma);
-       pr_debug("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma);
-
-       pr_debug("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr);
-       pr_debug("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr);
-       pr_debug("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr);
-
-       pr_debug("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr);
-       pr_debug("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr);
-       pr_debug("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr);
-
-       pr_debug("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd);
-       pr_debug("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd);
-       pr_debug("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd);
-#endif
+       if ((var->bits_per_pixel >= 16) || (fbi->lccr0 & LCCR0_LCDT))
+               setup_frame_dma(fbi, DMA_BASE, PAL_NONE, 0, nbytes);
+       else
+               setup_frame_dma(fbi, DMA_BASE, PAL_BASE, 0, nbytes);
 
-       fbi->reg_lccr0 = new_regs.lccr0;
-       fbi->reg_lccr1 = new_regs.lccr1;
-       fbi->reg_lccr2 = new_regs.lccr2;
-       fbi->reg_lccr3 = new_regs.lccr3;
-       fbi->reg_lccr4 = LCCR4 & (~LCCR4_PAL_FOR_MASK);
+       fbi->reg_lccr4 = lcd_readl(fbi, LCCR4) & ~LCCR4_PAL_FOR_MASK;
        fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK);
-       set_hsync_time(fbi, pcd);
        local_irq_restore(flags);
 
        /*
         * Only update the registers if the controller is enabled
         * and something has changed.
         */
-       if ((LCCR0  != fbi->reg_lccr0) || (LCCR1  != fbi->reg_lccr1) ||
-           (LCCR2  != fbi->reg_lccr2) || (LCCR3  != fbi->reg_lccr3) ||
-           (FDADR0 != fbi->fdadr0)    || (FDADR1 != fbi->fdadr1))
+       if ((lcd_readl(fbi, LCCR0) != fbi->reg_lccr0) ||
+           (lcd_readl(fbi, LCCR1) != fbi->reg_lccr1) ||
+           (lcd_readl(fbi, LCCR2) != fbi->reg_lccr2) ||
+           (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) ||
+           (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) ||
+           (lcd_readl(fbi, FDADR1) != fbi->fdadr[1]))
                pxafb_schedule_work(fbi, C_REENABLE);
 
        return 0;
@@ -773,8 +919,8 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on)
 {
        pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff");
 
-       if (pxafb_backlight_power)
-               pxafb_backlight_power(on);
+       if (pxafb_backlight_power)
+               pxafb_backlight_power(on);
 }
 
 static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
@@ -788,11 +934,11 @@ static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on)
 static void pxafb_setup_gpio(struct pxafb_info *fbi)
 {
        int gpio, ldd_bits;
-        unsigned int lccr0 = fbi->lccr0;
+       unsigned int lccr0 = fbi->lccr0;
 
        /*
         * setup is based on type of panel supported
-        */
+        */
 
        /* 4 bit interface */
        if ((lccr0 & LCCR0_CMS) == LCCR0_Mono &&
@@ -801,21 +947,25 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
                ldd_bits = 4;
 
        /* 8 bit interface */
-        else if (((lccr0 & LCCR0_CMS) == LCCR0_Mono &&
-                 ((lccr0 & LCCR0_SDS) == LCCR0_Dual || (lccr0 & LCCR0_DPD) == LCCR0_8PixMono)) ||
-                 ((lccr0 & LCCR0_CMS) == LCCR0_Color &&
-                 (lccr0 & LCCR0_PAS) == LCCR0_Pas && (lccr0 & LCCR0_SDS) == LCCR0_Sngl))
+       else if (((lccr0 & LCCR0_CMS) == LCCR0_Mono &&
+                 ((lccr0 & LCCR0_SDS) == LCCR0_Dual ||
+                  (lccr0 & LCCR0_DPD) == LCCR0_8PixMono)) ||
+                ((lccr0 & LCCR0_CMS) == LCCR0_Color &&
+                 (lccr0 & LCCR0_PAS) == LCCR0_Pas &&
+                 (lccr0 & LCCR0_SDS) == LCCR0_Sngl))
                ldd_bits = 8;
 
        /* 16 bit interface */
        else if ((lccr0 & LCCR0_CMS) == LCCR0_Color &&
-                ((lccr0 & LCCR0_SDS) == LCCR0_Dual || (lccr0 & LCCR0_PAS) == LCCR0_Act))
+                ((lccr0 & LCCR0_SDS) == LCCR0_Dual ||
+                 (lccr0 & LCCR0_PAS) == LCCR0_Act))
                ldd_bits = 16;
 
        else {
-               printk(KERN_ERR "pxafb_setup_gpio: unable to determine bits per pixel\n");
+               printk(KERN_ERR "pxafb_setup_gpio: unable to determine "
+                              "bits per pixel\n");
                return;
-        }
+       }
 
        for (gpio = 58; ldd_bits; gpio++, ldd_bits--)
                pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT);
@@ -828,8 +978,8 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi)
 static void pxafb_enable_controller(struct pxafb_info *fbi)
 {
        pr_debug("pxafb: Enabling LCD controller\n");
-       pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0);
-       pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1);
+       pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr[0]);
+       pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr[1]);
        pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0);
        pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1);
        pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2);
@@ -838,40 +988,40 @@ static void pxafb_enable_controller(struct pxafb_info *fbi)
        /* enable LCD controller clock */
        clk_enable(fbi->clk);
 
+       if (fbi->lccr0 & LCCR0_LCDT)
+               return;
+
        /* Sequence from 11.7.10 */
-       LCCR3 = fbi->reg_lccr3;
-       LCCR2 = fbi->reg_lccr2;
-       LCCR1 = fbi->reg_lccr1;
-       LCCR0 = fbi->reg_lccr0 & ~LCCR0_ENB;
-
-       FDADR0 = fbi->fdadr0;
-       FDADR1 = fbi->fdadr1;
-       LCCR0 |= LCCR0_ENB;
-
-       pr_debug("FDADR0 0x%08x\n", (unsigned int) FDADR0);
-       pr_debug("FDADR1 0x%08x\n", (unsigned int) FDADR1);
-       pr_debug("LCCR0 0x%08x\n", (unsigned int) LCCR0);
-       pr_debug("LCCR1 0x%08x\n", (unsigned int) LCCR1);
-       pr_debug("LCCR2 0x%08x\n", (unsigned int) LCCR2);
-       pr_debug("LCCR3 0x%08x\n", (unsigned int) LCCR3);
-       pr_debug("LCCR4 0x%08x\n", (unsigned int) LCCR4);
+       lcd_writel(fbi, LCCR3, fbi->reg_lccr3);
+       lcd_writel(fbi, LCCR2, fbi->reg_lccr2);
+       lcd_writel(fbi, LCCR1, fbi->reg_lccr1);
+       lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB);
+
+       lcd_writel(fbi, FDADR0, fbi->fdadr[0]);
+       lcd_writel(fbi, FDADR1, fbi->fdadr[1]);
+       lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB);
 }
 
 static void pxafb_disable_controller(struct pxafb_info *fbi)
 {
-       DECLARE_WAITQUEUE(wait, current);
+       uint32_t lccr0;
 
-       pr_debug("pxafb: disabling LCD controller\n");
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+       if (fbi->lccr0 & LCCR0_LCDT) {
+               wait_for_completion_timeout(&fbi->refresh_done,
+                               200 * HZ / 1000);
+               return;
+       }
+#endif
 
-       set_current_state(TASK_UNINTERRUPTIBLE);
-       add_wait_queue(&fbi->ctrlr_wait, &wait);
+       /* Clear LCD Status Register */
+       lcd_writel(fbi, LCSR, 0xffffffff);
 
-       LCSR = 0xffffffff;      /* Clear LCD Status Register */
-       LCCR0 &= ~LCCR0_LDM;    /* Enable LCD Disable Done Interrupt */
-       LCCR0 |= LCCR0_DIS;     /* Disable LCD Controller */
+       lccr0 = lcd_readl(fbi, LCCR0) & ~LCCR0_LDM;
+       lcd_writel(fbi, LCCR0, lccr0);
+       lcd_writel(fbi, LCCR0, lccr0 | LCCR0_DIS);
 
-       schedule_timeout(200 * HZ / 1000);
-       remove_wait_queue(&fbi->ctrlr_wait, &wait);
+       wait_for_completion_timeout(&fbi->disable_done, 200 * HZ / 1000);
 
        /* disable LCD controller clock */
        clk_disable(fbi->clk);
@@ -883,14 +1033,20 @@ static void pxafb_disable_controller(struct pxafb_info *fbi)
 static irqreturn_t pxafb_handle_irq(int irq, void *dev_id)
 {
        struct pxafb_info *fbi = dev_id;
-       unsigned int lcsr = LCSR;
+       unsigned int lccr0, lcsr = lcd_readl(fbi, LCSR);
 
        if (lcsr & LCSR_LDD) {
-               LCCR0 |= LCCR0_LDM;
-               wake_up(&fbi->ctrlr_wait);
+               lccr0 = lcd_readl(fbi, LCCR0);
+               lcd_writel(fbi, LCCR0, lccr0 | LCCR0_LDM);
+               complete(&fbi->disable_done);
        }
 
-       LCSR = lcsr;
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+       if (lcsr & LCSR_CMD_INT)
+               complete(&fbi->command_done);
+#endif
+
+       lcd_writel(fbi, LCSR, lcsr);
        return IRQ_HANDLED;
 }
 
@@ -921,7 +1077,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
                 */
                if (old_state != C_DISABLE && old_state != C_DISABLE_PM) {
                        fbi->state = state;
-                       //TODO __pxafb_lcd_power(fbi, 0);
+                       /* TODO __pxafb_lcd_power(fbi, 0); */
                        pxafb_disable_controller(fbi);
                }
                break;
@@ -948,7 +1104,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state)
                if (old_state == C_DISABLE_CLKCHANGE) {
                        fbi->state = C_ENABLE;
                        pxafb_enable_controller(fbi);
-                       //TODO __pxafb_lcd_power(fbi, 1);
+                       /* TODO __pxafb_lcd_power(fbi, 1); */
                }
                break;
 
@@ -1019,7 +1175,7 @@ static int
 pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
 {
        struct pxafb_info *fbi = TO_INF(nb, freq_transition);
-       //TODO struct cpufreq_freqs *f = data;
+       /* TODO struct cpufreq_freqs *f = data; */
        u_int pcd;
 
        switch (val) {
@@ -1030,7 +1186,8 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data)
        case CPUFREQ_POSTCHANGE:
                pcd = get_pcd(fbi, fbi->fb.var.pixclock);
                set_hsync_time(fbi, pcd);
-               fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd);
+               fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) |
+                                 LCCR3_PixClkDiv(pcd);
                set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE);
                break;
        }
@@ -1050,18 +1207,8 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
                pr_debug("min dma period: %d ps, "
                        "new clock %d kHz\n", pxafb_display_dma_period(var),
                        policy->max);
-               // TODO: fill in min/max values
-               break;
-#if 0
-       case CPUFREQ_NOTIFY:
-               printk(KERN_ERR "%s: got CPUFREQ_NOTIFY\n", __FUNCTION__);
-               do {} while(0);
-               /* todo: panic if min/max values aren't fulfilled
-                * [can't really happen unless there's a bug in the
-                * CPU policy verification process *
-                */
+               /* TODO: fill in min/max values */
                break;
-#endif
        }
        return 0;
 }
@@ -1102,21 +1249,21 @@ static int pxafb_resume(struct platform_device *dev)
  */
 static int __init pxafb_map_video_memory(struct pxafb_info *fbi)
 {
-       u_long palette_mem_size;
-
        /*
         * We reserve one page for the palette, plus the size
         * of the framebuffer.
         */
-       fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE);
+       fbi->video_offset = PAGE_ALIGN(sizeof(struct pxafb_dma_buff));
+       fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + fbi->video_offset);
        fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size,
                                              &fbi->map_dma, GFP_KERNEL);
 
        if (fbi->map_cpu) {
                /* prevent initial garbage on screen */
                memset(fbi->map_cpu, 0, fbi->map_size);
-               fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE;
-               fbi->screen_dma = fbi->map_dma + PAGE_SIZE;
+               fbi->fb.screen_base = fbi->map_cpu + fbi->video_offset;
+               fbi->screen_dma = fbi->map_dma + fbi->video_offset;
+
                /*
                 * FIXME: this is actually the wrong thing to place in
                 * smem_start.  But fbdev suffers from the problem that
@@ -1126,27 +1273,86 @@ static int __init pxafb_map_video_memory(struct pxafb_info *fbi)
                fbi->fb.fix.smem_start = fbi->screen_dma;
                fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16;
 
-               if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0)
-                       palette_mem_size = fbi->palette_size * sizeof(u16);
-               else
-                       palette_mem_size = fbi->palette_size * sizeof(u32);
+               fbi->dma_buff = (void *) fbi->map_cpu;
+               fbi->dma_buff_phys = fbi->map_dma;
+               fbi->palette_cpu = (u16 *) fbi->dma_buff->palette;
 
-               pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size);
-
-               fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size);
-               fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size;
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+               fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff;
+               fbi->n_smart_cmds = 0;
+#endif
        }
 
        return fbi->map_cpu ? 0 : -ENOMEM;
 }
 
+static void pxafb_decode_mode_info(struct pxafb_info *fbi,
+                                  struct pxafb_mode_info *modes,
+                                  unsigned int num_modes)
+{
+       unsigned int i, smemlen;
+
+       pxafb_setmode(&fbi->fb.var, &modes[0]);
+
+       for (i = 0; i < num_modes; i++) {
+               smemlen = modes[i].xres * modes[i].yres * modes[i].bpp / 8;
+               if (smemlen > fbi->fb.fix.smem_len)
+                       fbi->fb.fix.smem_len = smemlen;
+       }
+}
+
+static int pxafb_decode_mach_info(struct pxafb_info *fbi,
+                                 struct pxafb_mach_info *inf)
+{
+       unsigned int lcd_conn = inf->lcd_conn;
+
+       fbi->cmap_inverse       = inf->cmap_inverse;
+       fbi->cmap_static        = inf->cmap_static;
+
+       switch (lcd_conn & 0xf) {
+       case LCD_TYPE_MONO_STN:
+               fbi->lccr0 = LCCR0_CMS;
+               break;
+       case LCD_TYPE_MONO_DSTN:
+               fbi->lccr0 = LCCR0_CMS | LCCR0_SDS;
+               break;
+       case LCD_TYPE_COLOR_STN:
+               fbi->lccr0 = 0;
+               break;
+       case LCD_TYPE_COLOR_DSTN:
+               fbi->lccr0 = LCCR0_SDS;
+               break;
+       case LCD_TYPE_COLOR_TFT:
+               fbi->lccr0 = LCCR0_PAS;
+               break;
+       case LCD_TYPE_SMART_PANEL:
+               fbi->lccr0 = LCCR0_LCDT | LCCR0_PAS;
+               break;
+       default:
+               /* fall back to backward compatibility way */
+               fbi->lccr0 = inf->lccr0;
+               fbi->lccr3 = inf->lccr3;
+               fbi->lccr4 = inf->lccr4;
+               return -EINVAL;
+       }
+
+       if (lcd_conn == LCD_MONO_STN_8BPP)
+               fbi->lccr0 |= LCCR0_DPD;
+
+       fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff);
+       fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0;
+       fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL)  ? LCCR3_PCP : 0;
+
+       pxafb_decode_mode_info(fbi, inf->modes, inf->num_modes);
+       return 0;
+}
+
 static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
 {
        struct pxafb_info *fbi;
        void *addr;
        struct pxafb_mach_info *inf = dev->platform_data;
        struct pxafb_mode_info *mode = inf->modes;
-       int i, smemlen;
 
        /* Alloc the pxafb_info and pseudo_palette in one step */
        fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL);
@@ -1186,187 +1392,233 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev)
        addr = addr + sizeof(struct pxafb_info);
        fbi->fb.pseudo_palette  = addr;
 
-       pxafb_setmode(&fbi->fb.var, mode);
+       fbi->state              = C_STARTUP;
+       fbi->task_state         = (u_char)-1;
 
-       fbi->cmap_inverse               = inf->cmap_inverse;
-       fbi->cmap_static                = inf->cmap_static;
-
-       fbi->lccr0                      = inf->lccr0;
-       fbi->lccr3                      = inf->lccr3;
-       fbi->lccr4                      = inf->lccr4;
-       fbi->state                      = C_STARTUP;
-       fbi->task_state                 = (u_char)-1;
-
-       for (i = 0; i < inf->num_modes; i++) {
-               smemlen = mode[i].xres * mode[i].yres * mode[i].bpp / 8;
-               if (smemlen > fbi->fb.fix.smem_len)
-                       fbi->fb.fix.smem_len = smemlen;
-       }
+       pxafb_decode_mach_info(fbi, inf);
 
        init_waitqueue_head(&fbi->ctrlr_wait);
        INIT_WORK(&fbi->task, pxafb_task);
        init_MUTEX(&fbi->ctrlr_sem);
+       init_completion(&fbi->disable_done);
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+       init_completion(&fbi->command_done);
+       init_completion(&fbi->refresh_done);
+#endif
 
        return fbi;
 }
 
 #ifdef CONFIG_FB_PXA_PARAMETERS
-static int __init pxafb_parse_options(struct device *dev, char *options)
+static int __init parse_opt_mode(struct device *dev, const char *this_opt)
+{
+       struct pxafb_mach_info *inf = dev->platform_data;
+
+       const char *name = this_opt+5;
+       unsigned int namelen = strlen(name);
+       int res_specified = 0, bpp_specified = 0;
+       unsigned int xres = 0, yres = 0, bpp = 0;
+       int yres_specified = 0;
+       int i;
+       for (i = namelen-1; i >= 0; i--) {
+               switch (name[i]) {
+               case '-':
+                       namelen = i;
+                       if (!bpp_specified && !yres_specified) {
+                               bpp = simple_strtoul(&name[i+1], NULL, 0);
+                               bpp_specified = 1;
+                       } else
+                               goto done;
+                       break;
+               case 'x':
+                       if (!yres_specified) {
+                               yres = simple_strtoul(&name[i+1], NULL, 0);
+                               yres_specified = 1;
+                       } else
+                               goto done;
+                       break;
+               case '0' ... '9':
+                       break;
+               default:
+                       goto done;
+               }
+       }
+       if (i < 0 && yres_specified) {
+               xres = simple_strtoul(name, NULL, 0);
+               res_specified = 1;
+       }
+done:
+       if (res_specified) {
+               dev_info(dev, "overriding resolution: %dx%d\n", xres, yres);
+               inf->modes[0].xres = xres; inf->modes[0].yres = yres;
+       }
+       if (bpp_specified)
+               switch (bpp) {
+               case 1:
+               case 2:
+               case 4:
+               case 8:
+               case 16:
+                       inf->modes[0].bpp = bpp;
+                       dev_info(dev, "overriding bit depth: %d\n", bpp);
+                       break;
+               default:
+                       dev_err(dev, "Depth %d is not valid\n", bpp);
+                       return -EINVAL;
+               }
+       return 0;
+}
+
+static int __init parse_opt(struct device *dev, char *this_opt)
 {
        struct pxafb_mach_info *inf = dev->platform_data;
+       struct pxafb_mode_info *mode = &inf->modes[0];
+       char s[64];
+
+       s[0] = '\0';
+
+       if (!strncmp(this_opt, "mode:", 5)) {
+               return parse_opt_mode(dev, this_opt);
+       } else if (!strncmp(this_opt, "pixclock:", 9)) {
+               mode->pixclock = simple_strtoul(this_opt+9, NULL, 0);
+               sprintf(s, "pixclock: %ld\n", mode->pixclock);
+       } else if (!strncmp(this_opt, "left:", 5)) {
+               mode->left_margin = simple_strtoul(this_opt+5, NULL, 0);
+               sprintf(s, "left: %u\n", mode->left_margin);
+       } else if (!strncmp(this_opt, "right:", 6)) {
+               mode->right_margin = simple_strtoul(this_opt+6, NULL, 0);
+               sprintf(s, "right: %u\n", mode->right_margin);
+       } else if (!strncmp(this_opt, "upper:", 6)) {
+               mode->upper_margin = simple_strtoul(this_opt+6, NULL, 0);
+               sprintf(s, "upper: %u\n", mode->upper_margin);
+       } else if (!strncmp(this_opt, "lower:", 6)) {
+               mode->lower_margin = simple_strtoul(this_opt+6, NULL, 0);
+               sprintf(s, "lower: %u\n", mode->lower_margin);
+       } else if (!strncmp(this_opt, "hsynclen:", 9)) {
+               mode->hsync_len = simple_strtoul(this_opt+9, NULL, 0);
+               sprintf(s, "hsynclen: %u\n", mode->hsync_len);
+       } else if (!strncmp(this_opt, "vsynclen:", 9)) {
+               mode->vsync_len = simple_strtoul(this_opt+9, NULL, 0);
+               sprintf(s, "vsynclen: %u\n", mode->vsync_len);
+       } else if (!strncmp(this_opt, "hsync:", 6)) {
+               if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
+                       sprintf(s, "hsync: Active Low\n");
+                       mode->sync &= ~FB_SYNC_HOR_HIGH_ACT;
+               } else {
+                       sprintf(s, "hsync: Active High\n");
+                       mode->sync |= FB_SYNC_HOR_HIGH_ACT;
+               }
+       } else if (!strncmp(this_opt, "vsync:", 6)) {
+               if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
+                       sprintf(s, "vsync: Active Low\n");
+                       mode->sync &= ~FB_SYNC_VERT_HIGH_ACT;
+               } else {
+                       sprintf(s, "vsync: Active High\n");
+                       mode->sync |= FB_SYNC_VERT_HIGH_ACT;
+               }
+       } else if (!strncmp(this_opt, "dpc:", 4)) {
+               if (simple_strtoul(this_opt+4, NULL, 0) == 0) {
+                       sprintf(s, "double pixel clock: false\n");
+                       inf->lccr3 &= ~LCCR3_DPC;
+               } else {
+                       sprintf(s, "double pixel clock: true\n");
+                       inf->lccr3 |= LCCR3_DPC;
+               }
+       } else if (!strncmp(this_opt, "outputen:", 9)) {
+               if (simple_strtoul(this_opt+9, NULL, 0) == 0) {
+                       sprintf(s, "output enable: active low\n");
+                       inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL;
+               } else {
+                       sprintf(s, "output enable: active high\n");
+                       inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH;
+               }
+       } else if (!strncmp(this_opt, "pixclockpol:", 12)) {
+               if (simple_strtoul(this_opt+12, NULL, 0) == 0) {
+                       sprintf(s, "pixel clock polarity: falling edge\n");
+                       inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg;
+               } else {
+                       sprintf(s, "pixel clock polarity: rising edge\n");
+                       inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg;
+               }
+       } else if (!strncmp(this_opt, "color", 5)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color;
+       } else if (!strncmp(this_opt, "mono", 4)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono;
+       } else if (!strncmp(this_opt, "active", 6)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act;
+       } else if (!strncmp(this_opt, "passive", 7)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas;
+       } else if (!strncmp(this_opt, "single", 6)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl;
+       } else if (!strncmp(this_opt, "dual", 4)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual;
+       } else if (!strncmp(this_opt, "4pix", 4)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono;
+       } else if (!strncmp(this_opt, "8pix", 4)) {
+               inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono;
+       } else {
+               dev_err(dev, "unknown option: %s\n", this_opt);
+               return -EINVAL;
+       }
+
+       if (s[0] != '\0')
+               dev_info(dev, "override %s", s);
+
+       return 0;
+}
+
+static int __init pxafb_parse_options(struct device *dev, char *options)
+{
        char *this_opt;
+       int ret;
 
-        if (!options || !*options)
-                return 0;
+       if (!options || !*options)
+               return 0;
 
        dev_dbg(dev, "options are \"%s\"\n", options ? options : "null");
 
        /* could be made table driven or similar?... */
-        while ((this_opt = strsep(&options, ",")) != NULL) {
-                if (!strncmp(this_opt, "mode:", 5)) {
-                       const char *name = this_opt+5;
-                       unsigned int namelen = strlen(name);
-                       int res_specified = 0, bpp_specified = 0;
-                       unsigned int xres = 0, yres = 0, bpp = 0;
-                       int yres_specified = 0;
-                       int i;
-                       for (i = namelen-1; i >= 0; i--) {
-                               switch (name[i]) {
-                               case '-':
-                                       namelen = i;
-                                       if (!bpp_specified && !yres_specified) {
-                                               bpp = simple_strtoul(&name[i+1], NULL, 0);
-                                               bpp_specified = 1;
-                                       } else
-                                               goto done;
-                                       break;
-                               case 'x':
-                                       if (!yres_specified) {
-                                               yres = simple_strtoul(&name[i+1], NULL, 0);
-                                               yres_specified = 1;
-                                       } else
-                                               goto done;
-                                       break;
-                               case '0' ... '9':
-                                       break;
-                               default:
-                                       goto done;
-                               }
-                       }
-                       if (i < 0 && yres_specified) {
-                               xres = simple_strtoul(name, NULL, 0);
-                               res_specified = 1;
-                       }
-               done:
-                       if (res_specified) {
-                               dev_info(dev, "overriding resolution: %dx%d\n", xres, yres);
-                               inf->modes[0].xres = xres; inf->modes[0].yres = yres;
-                       }
-                       if (bpp_specified)
-                               switch (bpp) {
-                               case 1:
-                               case 2:
-                               case 4:
-                               case 8:
-                               case 16:
-                                       inf->modes[0].bpp = bpp;
-                                       dev_info(dev, "overriding bit depth: %d\n", bpp);
-                                       break;
-                               default:
-                                       dev_err(dev, "Depth %d is not valid\n", bpp);
-                               }
-                } else if (!strncmp(this_opt, "pixclock:", 9)) {
-                        inf->modes[0].pixclock = simple_strtoul(this_opt+9, NULL, 0);
-                       dev_info(dev, "override pixclock: %ld\n", inf->modes[0].pixclock);
-                } else if (!strncmp(this_opt, "left:", 5)) {
-                        inf->modes[0].left_margin = simple_strtoul(this_opt+5, NULL, 0);
-                       dev_info(dev, "override left: %u\n", inf->modes[0].left_margin);
-                } else if (!strncmp(this_opt, "right:", 6)) {
-                        inf->modes[0].right_margin = simple_strtoul(this_opt+6, NULL, 0);
-                       dev_info(dev, "override right: %u\n", inf->modes[0].right_margin);
-                } else if (!strncmp(this_opt, "upper:", 6)) {
-                        inf->modes[0].upper_margin = simple_strtoul(this_opt+6, NULL, 0);
-                       dev_info(dev, "override upper: %u\n", inf->modes[0].upper_margin);
-                } else if (!strncmp(this_opt, "lower:", 6)) {
-                        inf->modes[0].lower_margin = simple_strtoul(this_opt+6, NULL, 0);
-                       dev_info(dev, "override lower: %u\n", inf->modes[0].lower_margin);
-                } else if (!strncmp(this_opt, "hsynclen:", 9)) {
-                        inf->modes[0].hsync_len = simple_strtoul(this_opt+9, NULL, 0);
-                       dev_info(dev, "override hsynclen: %u\n", inf->modes[0].hsync_len);
-                } else if (!strncmp(this_opt, "vsynclen:", 9)) {
-                        inf->modes[0].vsync_len = simple_strtoul(this_opt+9, NULL, 0);
-                       dev_info(dev, "override vsynclen: %u\n", inf->modes[0].vsync_len);
-                } else if (!strncmp(this_opt, "hsync:", 6)) {
-                        if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
-                               dev_info(dev, "override hsync: Active Low\n");
-                               inf->modes[0].sync &= ~FB_SYNC_HOR_HIGH_ACT;
-                       } else {
-                               dev_info(dev, "override hsync: Active High\n");
-                               inf->modes[0].sync |= FB_SYNC_HOR_HIGH_ACT;
-                       }
-                } else if (!strncmp(this_opt, "vsync:", 6)) {
-                        if (simple_strtoul(this_opt+6, NULL, 0) == 0) {
-                               dev_info(dev, "override vsync: Active Low\n");
-                               inf->modes[0].sync &= ~FB_SYNC_VERT_HIGH_ACT;
-                       } else {
-                               dev_info(dev, "override vsync: Active High\n");
-                               inf->modes[0].sync |= FB_SYNC_VERT_HIGH_ACT;
-                       }
-                } else if (!strncmp(this_opt, "dpc:", 4)) {
-                        if (simple_strtoul(this_opt+4, NULL, 0) == 0) {
-                               dev_info(dev, "override double pixel clock: false\n");
-                               inf->lccr3 &= ~LCCR3_DPC;
-                       } else {
-                               dev_info(dev, "override double pixel clock: true\n");
-                               inf->lccr3 |= LCCR3_DPC;
-                       }
-                } else if (!strncmp(this_opt, "outputen:", 9)) {
-                        if (simple_strtoul(this_opt+9, NULL, 0) == 0) {
-                               dev_info(dev, "override output enable: active low\n");
-                               inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL;
-                       } else {
-                               dev_info(dev, "override output enable: active high\n");
-                               inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH;
-                       }
-                } else if (!strncmp(this_opt, "pixclockpol:", 12)) {
-                        if (simple_strtoul(this_opt+12, NULL, 0) == 0) {
-                               dev_info(dev, "override pixel clock polarity: falling edge\n");
-                               inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg;
-                       } else {
-                               dev_info(dev, "override pixel clock polarity: rising edge\n");
-                               inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg;
-                       }
-                } else if (!strncmp(this_opt, "color", 5)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color;
-                } else if (!strncmp(this_opt, "mono", 4)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono;
-                } else if (!strncmp(this_opt, "active", 6)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act;
-                } else if (!strncmp(this_opt, "passive", 7)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas;
-                } else if (!strncmp(this_opt, "single", 6)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl;
-                } else if (!strncmp(this_opt, "dual", 4)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual;
-                } else if (!strncmp(this_opt, "4pix", 4)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono;
-                } else if (!strncmp(this_opt, "8pix", 4)) {
-                       inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono;
-               } else {
-                       dev_err(dev, "unknown option: %s\n", this_opt);
-                       return -EINVAL;
-               }
-        }
-        return 0;
+       while ((this_opt = strsep(&options, ",")) != NULL) {
+               ret = parse_opt(dev, this_opt);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+static char g_options[256] __devinitdata = "";
 
+#ifndef CONFIG_MODULES
+static int __devinit pxafb_setup_options(void)
+{
+       char *options = NULL;
+
+       if (fb_get_options("pxafb", &options))
+               return -ENODEV;
+
+       if (options)
+               strlcpy(g_options, options, sizeof(g_options));
+
+       return 0;
 }
+#else
+#define pxafb_setup_options()          (0)
+
+module_param_string(options, g_options, sizeof(g_options), 0);
+MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
+#endif
+
+#else
+#define pxafb_parse_options(...)       (0)
+#define pxafb_setup_options()          (0)
 #endif
 
 static int __init pxafb_probe(struct platform_device *dev)
 {
        struct pxafb_info *fbi;
        struct pxafb_mach_info *inf;
-       int ret;
+       struct resource *r;
+       int irq, ret;
 
        dev_dbg(&dev->dev, "pxafb_probe\n");
 
@@ -1376,38 +1628,45 @@ static int __init pxafb_probe(struct platform_device *dev)
        if (!inf)
                goto failed;
 
-#ifdef CONFIG_FB_PXA_PARAMETERS
        ret = pxafb_parse_options(&dev->dev, g_options);
        if (ret < 0)
                goto failed;
-#endif
 
 #ifdef DEBUG_VAR
-        /* Check for various illegal bit-combinations. Currently only
+       /* Check for various illegal bit-combinations. Currently only
         * a warning is given. */
 
-        if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK)
-                dev_warn(&dev->dev, "machine LCCR0 setting contains illegal bits: %08x\n",
-                        inf->lccr0 & LCCR0_INVALID_CONFIG_MASK);
-        if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK)
-                dev_warn(&dev->dev, "machine LCCR3 setting contains illegal bits: %08x\n",
-                        inf->lccr3 & LCCR3_INVALID_CONFIG_MASK);
-        if (inf->lccr0 & LCCR0_DPD &&
+       if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK)
+               dev_warn(&dev->dev, "machine LCCR0 setting contains "
+                               "illegal bits: %08x\n",
+                       inf->lccr0 & LCCR0_INVALID_CONFIG_MASK);
+       if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK)
+               dev_warn(&dev->dev, "machine LCCR3 setting contains "
+                               "illegal bits: %08x\n",
+                       inf->lccr3 & LCCR3_INVALID_CONFIG_MASK);
+       if (inf->lccr0 & LCCR0_DPD &&
            ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas ||
             (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl ||
             (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono))
-                dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is only valid in passive mono"
-                        " single panel mode\n");
-        if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act &&
+               dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is "
+                               "only valid in passive mono"
+                               " single panel mode\n");
+       if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act &&
            (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual)
-                dev_warn(&dev->dev, "Dual panel only valid in passive mode\n");
-        if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas &&
-             (inf->modes->upper_margin || inf->modes->lower_margin))
-                dev_warn(&dev->dev, "Upper and lower margins must be 0 in passive mode\n");
+               dev_warn(&dev->dev, "Dual panel only valid in passive mode\n");
+       if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas &&
+            (inf->modes->upper_margin || inf->modes->lower_margin))
+               dev_warn(&dev->dev, "Upper and lower margins must be 0 in "
+                               "passive mode\n");
 #endif
 
-       dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",inf->modes->xres, inf->modes->yres, inf->modes->bpp);
-       if (inf->modes->xres == 0 || inf->modes->yres == 0 || inf->modes->bpp == 0) {
+       dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",
+                       inf->modes->xres,
+                       inf->modes->yres,
+                       inf->modes->bpp);
+       if (inf->modes->xres == 0 ||
+           inf->modes->yres == 0 ||
+           inf->modes->bpp == 0) {
                dev_err(&dev->dev, "Invalid resolution or bit depth\n");
                ret = -EINVAL;
                goto failed;
@@ -1416,26 +1675,62 @@ static int __init pxafb_probe(struct platform_device *dev)
        pxafb_lcd_power = inf->pxafb_lcd_power;
        fbi = pxafb_init_fbinfo(&dev->dev);
        if (!fbi) {
+               /* only reason for pxafb_init_fbinfo to fail is kmalloc */
                dev_err(&dev->dev, "Failed to initialize framebuffer device\n");
-               ret = -ENOMEM; // only reason for pxafb_init_fbinfo to fail is kmalloc
+               ret = -ENOMEM;
                goto failed;
        }
 
+       r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+       if (r == NULL) {
+               dev_err(&dev->dev, "no I/O memory resource defined\n");
+               ret = -ENODEV;
+               goto failed;
+       }
+
+       r = request_mem_region(r->start, r->end - r->start + 1, dev->name);
+       if (r == NULL) {
+               dev_err(&dev->dev, "failed to request I/O memory\n");
+               ret = -EBUSY;
+               goto failed;
+       }
+
+       fbi->mmio_base = ioremap(r->start, r->end - r->start + 1);
+       if (fbi->mmio_base == NULL) {
+               dev_err(&dev->dev, "failed to map I/O memory\n");
+               ret = -EBUSY;
+               goto failed_free_res;
+       }
+
        /* Initialize video memory */
        ret = pxafb_map_video_memory(fbi);
        if (ret) {
                dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret);
                ret = -ENOMEM;
-               goto failed;
+               goto failed_free_io;
+       }
+
+       irq = platform_get_irq(dev, 0);
+       if (irq < 0) {
+               dev_err(&dev->dev, "no IRQ defined\n");
+               ret = -ENODEV;
+               goto failed_free_mem;
        }
 
-       ret = request_irq(IRQ_LCD, pxafb_handle_irq, IRQF_DISABLED, "LCD", fbi);
+       ret = request_irq(irq, pxafb_handle_irq, IRQF_DISABLED, "LCD", fbi);
        if (ret) {
                dev_err(&dev->dev, "request_irq failed: %d\n", ret);
                ret = -EBUSY;
-               goto failed;
+               goto failed_free_mem;
        }
 
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+       ret = pxafb_smart_init(fbi);
+       if (ret) {
+               dev_err(&dev->dev, "failed to initialize smartpanel\n");
+               goto failed_free_irq;
+       }
+#endif
        /*
         * This makes sure that our colour bitfield
         * descriptors are correctly initialised.
@@ -1447,19 +1742,18 @@ static int __init pxafb_probe(struct platform_device *dev)
 
        ret = register_framebuffer(&fbi->fb);
        if (ret < 0) {
-               dev_err(&dev->dev, "Failed to register framebuffer device: %d\n", ret);
-               goto failed;
+               dev_err(&dev->dev,
+                       "Failed to register framebuffer device: %d\n", ret);
+               goto failed_free_irq;
        }
 
-#ifdef CONFIG_PM
-       // TODO
-#endif
-
 #ifdef CONFIG_CPU_FREQ
        fbi->freq_transition.notifier_call = pxafb_freq_transition;
        fbi->freq_policy.notifier_call = pxafb_freq_policy;
-       cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
-       cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER);
+       cpufreq_register_notifier(&fbi->freq_transition,
+                               CPUFREQ_TRANSITION_NOTIFIER);
+       cpufreq_register_notifier(&fbi->freq_policy,
+                               CPUFREQ_POLICY_NOTIFIER);
 #endif
 
        /*
@@ -1469,6 +1763,15 @@ static int __init pxafb_probe(struct platform_device *dev)
 
        return 0;
 
+failed_free_irq:
+       free_irq(irq, fbi);
+failed_free_res:
+       release_mem_region(r->start, r->end - r->start + 1);
+failed_free_io:
+       iounmap(fbi->mmio_base);
+failed_free_mem:
+       dma_free_writecombine(&dev->dev, fbi->map_size,
+                       fbi->map_cpu, fbi->map_dma);
 failed:
        platform_set_drvdata(dev, NULL);
        kfree(fbi);
@@ -1477,40 +1780,18 @@ failed:
 
 static struct platform_driver pxafb_driver = {
        .probe          = pxafb_probe,
-#ifdef CONFIG_PM
        .suspend        = pxafb_suspend,
        .resume         = pxafb_resume,
-#endif
        .driver         = {
                .name   = "pxa2xx-fb",
        },
 };
 
-#ifndef MODULE
-static int __devinit pxafb_setup(char *options)
-{
-# ifdef CONFIG_FB_PXA_PARAMETERS
-       if (options)
-               strlcpy(g_options, options, sizeof(g_options));
-# endif
-       return 0;
-}
-#else
-# ifdef CONFIG_FB_PXA_PARAMETERS
-module_param_string(options, g_options, sizeof(g_options), 0);
-MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)");
-# endif
-#endif
-
 static int __devinit pxafb_init(void)
 {
-#ifndef MODULE
-       char *option = NULL;
+       if (pxafb_setup_options())
+               return -EINVAL;
 
-       if (fb_get_options("pxafb", &option))
-               return -ENODEV;
-       pxafb_setup(option);
-#endif
        return platform_driver_register(&pxafb_driver);
 }
 
index d920b8a14c35970c8f261ae41c2fc440cb815e38..8238dc8264297fb42c1b2b40226fb64edfb53c32 100644 (file)
  * for more details.
  */
 
-/* Shadows for LCD controller registers */
-struct pxafb_lcd_reg {
-       unsigned int lccr0;
-       unsigned int lccr1;
-       unsigned int lccr2;
-       unsigned int lccr3;
-};
-
 /* PXA LCD DMA descriptor */
 struct pxafb_dma_descriptor {
        unsigned int fdadr;
@@ -37,11 +29,49 @@ struct pxafb_dma_descriptor {
        unsigned int ldcmd;
 };
 
+enum {
+       PAL_NONE        = -1,
+       PAL_BASE        = 0,
+       PAL_OV1         = 1,
+       PAL_OV2         = 2,
+       PAL_MAX,
+};
+
+enum {
+       DMA_BASE        = 0,
+       DMA_UPPER       = 0,
+       DMA_LOWER       = 1,
+       DMA_OV1         = 1,
+       DMA_OV2_Y       = 2,
+       DMA_OV2_Cb      = 3,
+       DMA_OV2_Cr      = 4,
+       DMA_CURSOR      = 5,
+       DMA_CMD         = 6,
+       DMA_MAX,
+};
+
+/* maximum palette size - 256 entries, each 4 bytes long */
+#define PALETTE_SIZE   (256 * 4)
+#define CMD_BUFF_SIZE  (1024 * 50)
+
+struct pxafb_dma_buff {
+       unsigned char palette[PAL_MAX * PALETTE_SIZE];
+       uint16_t cmd_buff[CMD_BUFF_SIZE];
+       struct pxafb_dma_descriptor pal_desc[PAL_MAX];
+       struct pxafb_dma_descriptor dma_desc[DMA_MAX];
+};
+
 struct pxafb_info {
        struct fb_info          fb;
        struct device           *dev;
        struct clk              *clk;
 
+       void __iomem            *mmio_base;
+
+       struct pxafb_dma_buff   *dma_buff;
+       dma_addr_t              dma_buff_phys;
+       dma_addr_t              fdadr[DMA_MAX];
+
        /*
         * These are the addresses we mapped
         * the framebuffer memory region to.
@@ -55,19 +85,8 @@ struct pxafb_info {
        u_char *                screen_cpu;     /* virtual address of frame buffer */
        dma_addr_t              screen_dma;     /* physical address of frame buffer */
        u16 *                   palette_cpu;    /* virtual address of palette memory */
-       dma_addr_t              palette_dma;    /* physical address of palette memory */
        u_int                   palette_size;
-
-       /* DMA descriptors */
-       struct pxafb_dma_descriptor *   dmadesc_fblow_cpu;
-       dma_addr_t              dmadesc_fblow_dma;
-       struct pxafb_dma_descriptor *   dmadesc_fbhigh_cpu;
-       dma_addr_t              dmadesc_fbhigh_dma;
-       struct pxafb_dma_descriptor *   dmadesc_palette_cpu;
-       dma_addr_t              dmadesc_palette_dma;
-
-       dma_addr_t              fdadr0;
-       dma_addr_t              fdadr1;
+       ssize_t                 video_offset;
 
        u_int                   lccr0;
        u_int                   lccr3;
@@ -81,6 +100,7 @@ struct pxafb_info {
        u_int                   reg_lccr2;
        u_int                   reg_lccr3;
        u_int                   reg_lccr4;
+       u_int                   reg_cmdcr;
 
        unsigned long   hsync_time;
 
@@ -90,6 +110,16 @@ struct pxafb_info {
        wait_queue_head_t       ctrlr_wait;
        struct work_struct      task;
 
+       struct completion       disable_done;
+
+#ifdef CONFIG_FB_PXA_SMARTPANEL
+       uint16_t                *smart_cmds;
+       size_t                  n_smart_cmds;
+       struct completion       command_done;
+       struct completion       refresh_done;
+       struct task_struct      *smart_thread;
+#endif
+
 #ifdef CONFIG_CPU_FREQ
        struct notifier_block   freq_transition;
        struct notifier_block   freq_policy;
index 5c47968e7f2152bd3004cd8f0dd9c8a93ff18880..d94c57ffbdb1661b4c7b3476ce406275388980ca 100644 (file)
 #include "rivafb.h"
 #include "nvreg.h"
 
-#ifndef CONFIG_PCI             /* sanity check */
-#error This driver requires PCI support.
-#endif
-
 /* version number of this driver */
 #define RIVAFB_VERSION "0.9.5b"
 
 #define NVTRACE          if(0) printk
 #endif
 
-#define NVTRACE_ENTER(...)  NVTRACE("%s START\n", __FUNCTION__)
-#define NVTRACE_LEAVE(...)  NVTRACE("%s END\n", __FUNCTION__)
+#define NVTRACE_ENTER(...)  NVTRACE("%s START\n", __func__)
+#define NVTRACE_LEAVE(...)  NVTRACE("%s END\n", __func__)
 
 #ifdef CONFIG_FB_RIVA_DEBUG
 #define assert(expr) \
        if(!(expr)) { \
        printk( "Assertion failed! %s,%s,%s,line=%d\n",\
-       #expr,__FILE__,__FUNCTION__,__LINE__); \
+       #expr,__FILE__,__func__,__LINE__); \
        BUG(); \
        }
 #else
@@ -2213,14 +2209,12 @@ static int __devinit rivafb_init(void)
 
 module_init(rivafb_init);
 
-#ifdef MODULE
 static void __exit rivafb_exit(void)
 {
        pci_unregister_driver(&rivafb_driver);
 }
 
 module_exit(rivafb_exit);
-#endif /* MODULE */
 
 module_param(noaccel, bool, 0);
 MODULE_PARM_DESC(noaccel, "bool: disable acceleration");
index a11026812d1b6d169c66ba7badd923f01bd9a051..f3694cf17e58097ea72b398de6717e217d9d3adb 100644 (file)
 #include "rivafb.h"
 #include "nvreg.h"
 
-
-#ifndef CONFIG_PCI             /* sanity check */
-#error This driver requires PCI support.
-#endif
-
 #define PFX "rivafb: "
 
 static inline unsigned char MISCin(struct riva_par *par)
@@ -163,7 +158,7 @@ unsigned long riva_get_memlen(struct riva_par *par)
        unsigned long memlen = 0;
        unsigned int chipset = par->Chipset;
        struct pci_dev* dev;
-       int amt;
+       u32 amt;
 
        switch (chip->Architecture) {
        case NV_ARCH_03:
index 13307703a9f04d421792a460b8b595c50ee776f6..78fdbf5178d7824d181f3edcbb6e157e5e887e83 100644 (file)
@@ -231,7 +231,7 @@ typedef struct {
   int nvclk_khz;
   char mem_page_miss;
   char mem_latency;
-  int memory_type;
+  u32 memory_type;
   int memory_width;
   char enable_video;
   char gr_during_vid;
@@ -2107,7 +2107,7 @@ static void nv10GetConfig
 )
 {
     struct pci_dev* dev;
-    int amt;
+    u32 amt;
 
 #ifdef __BIG_ENDIAN
     /* turn on big endian register access */
index 71fa6edb5c477d64cd255b9fd866ec9dbe070d28..13b38cbbe4cf8218e282d159d371a7ca39eb946a 100644 (file)
@@ -430,9 +430,9 @@ static void s3c2410fb_activate_var(struct fb_info *info)
        struct fb_var_screeninfo *var = &info->var;
        int clkdiv = s3c2410fb_calc_pixclk(fbi, var->pixclock) / 2;
 
-       dprintk("%s: var->xres  = %d\n", __FUNCTION__, var->xres);
-       dprintk("%s: var->yres  = %d\n", __FUNCTION__, var->yres);
-       dprintk("%s: var->bpp   = %d\n", __FUNCTION__, var->bits_per_pixel);
+       dprintk("%s: var->xres  = %d\n", __func__, var->xres);
+       dprintk("%s: var->yres  = %d\n", __func__, var->yres);
+       dprintk("%s: var->bpp   = %d\n", __func__, var->bits_per_pixel);
 
        if (type == S3C2410_LCDCON1_TFT) {
                s3c2410fb_calculate_tft_lcd_regs(info, &fbi->regs);
index 7d53bc23b9c75fac01392afcb4d26421d5f36c03..2972f112dbed29876c3652210812c7c5e854eebe 100644 (file)
@@ -132,10 +132,10 @@ static const struct svga_timing_regs s3_timing_regs     = {
 /* Module parameters */
 
 
-static char *mode = "640x480-8@60";
+static char *mode_option __devinitdata = "640x480-8@60";
 
 #ifdef CONFIG_MTRR
-static int mtrr = 1;
+static int mtrr __devinitdata = 1;
 #endif
 
 static int fasttext = 1;
@@ -145,8 +145,10 @@ MODULE_AUTHOR("(c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("fbdev driver for S3 Trio/Virge");
 
-module_param(mode, charp, 0444);
-MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+module_param(mode_option, charp, 0444);
+MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
+module_param_named(mode, mode_option, charp, 0444);
+MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc) (deprecated)");
 
 #ifdef CONFIG_MTRR
 module_param(mtrr, int, 0444);
@@ -886,7 +888,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
        }
 
        /* Allocate and fill driver data structure */
-       info = framebuffer_alloc(sizeof(struct s3fb_info), NULL);
+       info = framebuffer_alloc(sizeof(struct s3fb_info), &(dev->dev));
        if (!info) {
                dev_err(&(dev->dev), "cannot allocate memory\n");
                return -ENOMEM;
@@ -901,13 +903,13 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
        /* Prepare PCI device */
        rc = pci_enable_device(dev);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot enable PCI device\n");
+               dev_err(info->dev, "cannot enable PCI device\n");
                goto err_enable_device;
        }
 
        rc = pci_request_regions(dev, "s3fb");
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+               dev_err(info->dev, "cannot reserve framebuffer region\n");
                goto err_request_regions;
        }
 
@@ -919,7 +921,7 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
        info->screen_base = pci_iomap(dev, 0, 0);
        if (! info->screen_base) {
                rc = -ENOMEM;
-               dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+               dev_err(info->dev, "iomap for framebuffer failed\n");
                goto err_iomap;
        }
 
@@ -960,22 +962,22 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
        info->pseudo_palette = (void*) (par->pseudo_palette);
 
        /* Prepare startup mode */
-       rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+       rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
        if (! ((rc == 1) || (rc == 2))) {
                rc = -EINVAL;
-               dev_err(&(dev->dev), "mode %s not found\n", mode);
+               dev_err(info->dev, "mode %s not found\n", mode_option);
                goto err_find_mode;
        }
 
        rc = fb_alloc_cmap(&info->cmap, 256, 0);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot allocate colormap\n");
+               dev_err(info->dev, "cannot allocate colormap\n");
                goto err_alloc_cmap;
        }
 
        rc = register_framebuffer(info);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot register framebuffer\n");
+               dev_err(info->dev, "cannot register framebuffer\n");
                goto err_reg_fb;
        }
 
@@ -1051,7 +1053,7 @@ static int s3_pci_suspend(struct pci_dev* dev, pm_message_t state)
        struct fb_info *info = pci_get_drvdata(dev);
        struct s3fb_info *par = info->par;
 
-       dev_info(&(dev->dev), "suspend\n");
+       dev_info(info->dev, "suspend\n");
 
        acquire_console_sem();
        mutex_lock(&(par->open_lock));
@@ -1083,7 +1085,7 @@ static int s3_pci_resume(struct pci_dev* dev)
        struct s3fb_info *par = info->par;
        int err;
 
-       dev_info(&(dev->dev), "resume\n");
+       dev_info(info->dev, "resume\n");
 
        acquire_console_sem();
        mutex_lock(&(par->open_lock));
@@ -1100,7 +1102,7 @@ static int s3_pci_resume(struct pci_dev* dev)
        if (err) {
                mutex_unlock(&(par->open_lock));
                release_console_sem();
-               dev_err(&(dev->dev), "error %d enabling device for resume\n", err);
+               dev_err(info->dev, "error %d enabling device for resume\n", err);
                return err;
        }
        pci_set_master(dev);
@@ -1168,7 +1170,7 @@ static int  __init s3fb_setup(char *options)
                else if (!strncmp(opt, "fasttext:", 9))
                        fasttext = simple_strtoul(opt + 9, NULL, 0);
                else
-                       mode = opt;
+                       mode_option = opt;
        }
 
        return 0;
index 48066ef3af05fa2356171a76932e8252b62b05e5..f465b27ed860e610b353a14d5c2953ab1d1aec21 100644 (file)
@@ -132,7 +132,7 @@ struct sa1100fb_info {
  *  Debug macros 
  */
 #if DEBUG
-#  define DPRINTK(fmt, args...)        printk("%s: " fmt, __FUNCTION__ , ## args)
+#  define DPRINTK(fmt, args...)        printk("%s: " fmt, __func__ , ## args)
 #else
 #  define DPRINTK(fmt, args...)
 #endif
index 35c1ce62b216ac0aeb1ea3fd7cb9d2c1a35b59ca..783d4adffb93d82bda425203912f12c100ef2dbf 100644 (file)
@@ -140,7 +140,7 @@ static int savage_setup_i2c_bus(struct savagefb_i2c_chan *chan,
                chan->adapter.id                = I2C_HW_B_SAVAGE;
                chan->adapter.algo_data         = &chan->algo;
                chan->adapter.dev.parent        = &chan->par->pcidev->dev;
-               chan->algo.udelay               = 40;
+               chan->algo.udelay               = 10;
                chan->algo.timeout              = 20;
                chan->algo.data                 = chan;
 
index 9b05da6268f7e8cfc72e6f07afb4b3460416dcdd..a14e822110375bf7c946d9848014ad891f3ab03b 100644 (file)
@@ -55,7 +55,7 @@
 #undef SISFBDEBUG
 
 #ifdef SISFBDEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args)
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
 #define TWDEBUG(x) printk(KERN_INFO x "\n");
 #else
 #define DPRINTK(fmt, args...)
index 97784f9c184de95980a35c952ac81a363d68b115..5b11a00f49bc3708fbcc9d7b4866424c1539776f 100644 (file)
@@ -1006,7 +1006,7 @@ static int sst_set_pll_att_ti(struct fb_info *info,
                break;
        default:
                dprintk("%s: wrong clock code '%d'\n",
-                       __FUNCTION__, clock);
+                       __func__, clock);
                return 0;
                }
        udelay(300);
@@ -1048,7 +1048,7 @@ static int sst_set_pll_ics(struct fb_info *info,
                break;
        default:
                dprintk("%s: wrong clock code '%d'\n",
-                       __FUNCTION__, clock);
+                       __func__, clock);
                return 0;
                }
        udelay(300);
@@ -1079,7 +1079,7 @@ static void sst_set_vidmod_att_ti(struct fb_info *info, const int bpp)
                sst_dac_write(DACREG_RMR, (cr0 & 0x0f) | DACREG_CR0_16BPP);
                break;
        default:
-               dprintk("%s: bad depth '%u'\n", __FUNCTION__, bpp);
+               dprintk("%s: bad depth '%u'\n", __func__, bpp);
                break;
        }
 }
@@ -1093,7 +1093,7 @@ static void sst_set_vidmod_ics(struct fb_info *info, const int bpp)
                sst_dac_write(DACREG_ICS_CMD, DACREG_ICS_CMD_16BPP);
                break;
        default:
-               dprintk("%s: bad depth '%u'\n", __FUNCTION__, bpp);
+               dprintk("%s: bad depth '%u'\n", __func__, bpp);
                break;
        }
 }
@@ -1133,7 +1133,7 @@ static int __devinit sst_detect_dactype(struct fb_info *info, struct sstfb_par *
        }
        if (!ret)
                return 0;
-       f_dprintk("%s found %s\n", __FUNCTION__, dacs[i].name);
+       f_dprintk("%s found %s\n", __func__, dacs[i].name);
        par->dac_sw = dacs[i];
        return 1;
 }
index f98be301140cdf5be7e06376dd03888ef505a192..598d35eff935ae18e8db7e77e6cfebbab493d462 100644 (file)
@@ -164,11 +164,11 @@ static int __initdata stifb_bpp_pref[MAX_STI_ROMS];
 # define  DEBUG_ON()  debug_on=1
 # define WRITE_BYTE(value,fb,reg)      do { if (debug_on) \
                                                printk(KERN_DEBUG "%30s: WRITE_BYTE(0x%06x) = 0x%02x (old=0x%02x)\n", \
-                                                       __FUNCTION__, reg, value, READ_BYTE(fb,reg));             \
+                                                       __func__, reg, value, READ_BYTE(fb,reg));                 \
                                        gsc_writeb((value),(fb)->info.fix.mmio_start + (reg)); } while (0)
 # define WRITE_WORD(value,fb,reg)      do { if (debug_on) \
                                                printk(KERN_DEBUG "%30s: WRITE_WORD(0x%06x) = 0x%08x (old=0x%08x)\n", \
-                                                       __FUNCTION__, reg, value, READ_WORD(fb,reg));             \
+                                                       __func__, reg, value, READ_WORD(fb,reg));                 \
                                        gsc_writel((value),(fb)->info.fix.mmio_start + (reg)); } while (0)
 #endif /* DEBUG_STIFB_REGS */
 
index 37af10ab8f52b19c570929ba2d580c269d346b52..a352d5f46bbf08be1badbe0b23313fc07d8f4090 100644 (file)
      */
 
 static void
-bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
-       int src_idx, int bits, unsigned n)
+bitcpy(struct fb_info *p, unsigned long *dst, int dst_idx,
+               const unsigned long *src, int src_idx, int bits, unsigned n)
 {
        unsigned long first, last;
        int const shift = dst_idx-src_idx;
        int left, right;
 
-       first = FB_SHIFT_HIGH(~0UL, dst_idx);
-       last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+       first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+       last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
 
        if (!shift) {
                /* Same alignment for source and dest */
@@ -167,8 +167,8 @@ bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
      */
 
 static void
-bitcpy_rev(unsigned long *dst, int dst_idx, const unsigned long *src,
-          int src_idx, int bits, unsigned n)
+bitcpy_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
+               const unsigned long *src, int src_idx, int bits, unsigned n)
 {
        unsigned long first, last;
        int shift;
@@ -186,8 +186,8 @@ bitcpy_rev(unsigned long *dst, int dst_idx, const unsigned long *src,
 
        shift = dst_idx-src_idx;
 
-       first = FB_SHIFT_LOW(~0UL, bits - 1 - dst_idx);
-       last = ~(FB_SHIFT_LOW(~0UL, bits - 1 - ((dst_idx-n) % bits)));
+       first = FB_SHIFT_LOW(p, ~0UL, bits - 1 - dst_idx);
+       last = ~(FB_SHIFT_LOW(p, ~0UL, bits - 1 - ((dst_idx-n) % bits)));
 
        if (!shift) {
                /* Same alignment for source and dest */
@@ -353,7 +353,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
                        dst_idx &= (bytes - 1);
                        src += src_idx >> (ffs(bits) - 1);
                        src_idx &= (bytes - 1);
-                       bitcpy_rev(dst, dst_idx, src, src_idx, bits,
+                       bitcpy_rev(p, dst, dst_idx, src, src_idx, bits,
                                width*p->var.bits_per_pixel);
                }
        } else {
@@ -362,7 +362,7 @@ void sys_copyarea(struct fb_info *p, const struct fb_copyarea *area)
                        dst_idx &= (bytes - 1);
                        src += src_idx >> (ffs(bits) - 1);
                        src_idx &= (bytes - 1);
-                       bitcpy(dst, dst_idx, src, src_idx, bits,
+                       bitcpy(p, dst, dst_idx, src, src_idx, bits,
                                width*p->var.bits_per_pixel);
                        dst_idx += bits_per_line;
                        src_idx += bits_per_line;
index a261e9e6a675a8b1542422ad3c9b8d83d03207eb..f94d6b6e29eeb9f57c4082f6a09190ca055d3278 100644 (file)
      */
 
 static void
-bitfill_aligned(unsigned long *dst, int dst_idx, unsigned long pat,
-               unsigned n, int bits)
+bitfill_aligned(struct fb_info *p, unsigned long *dst, int dst_idx,
+               unsigned long pat, unsigned n, int bits)
 {
        unsigned long first, last;
 
        if (!n)
                return;
 
-       first = FB_SHIFT_HIGH(~0UL, dst_idx);
-       last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+       first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+       last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
 
        if (dst_idx+n <= bits) {
                /* Single word */
@@ -78,16 +78,16 @@ bitfill_aligned(unsigned long *dst, int dst_idx, unsigned long pat,
      */
 
 static void
-bitfill_unaligned(unsigned long *dst, int dst_idx, unsigned long pat,
-                 int left, int right, unsigned n, int bits)
+bitfill_unaligned(struct fb_info *p, unsigned long *dst, int dst_idx,
+                 unsigned long pat, int left, int right, unsigned n, int bits)
 {
        unsigned long first, last;
 
        if (!n)
                return;
 
-       first = FB_SHIFT_HIGH(~0UL, dst_idx);
-       last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+       first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+       last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
 
        if (dst_idx+n <= bits) {
                /* Single word */
@@ -132,8 +132,8 @@ bitfill_unaligned(unsigned long *dst, int dst_idx, unsigned long pat,
      *  Aligned pattern invert using 32/64-bit memory accesses
      */
 static void
-bitfill_aligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
-                   unsigned n, int bits)
+bitfill_aligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
+                   unsigned long pat, unsigned n, int bits)
 {
        unsigned long val = pat;
        unsigned long first, last;
@@ -141,8 +141,8 @@ bitfill_aligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
        if (!n)
                return;
 
-       first = FB_SHIFT_HIGH(~0UL, dst_idx);
-       last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+       first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+       last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
 
        if (dst_idx+n <= bits) {
                /* Single word */
@@ -188,16 +188,17 @@ bitfill_aligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
      */
 
 static void
-bitfill_unaligned_rev(unsigned long *dst, int dst_idx, unsigned long pat,
-                       int left, int right, unsigned n, int bits)
+bitfill_unaligned_rev(struct fb_info *p, unsigned long *dst, int dst_idx,
+                     unsigned long pat, int left, int right, unsigned n,
+                     int bits)
 {
        unsigned long first, last;
 
        if (!n)
                return;
 
-       first = FB_SHIFT_HIGH(~0UL, dst_idx);
-       last = ~(FB_SHIFT_HIGH(~0UL, (dst_idx+n) % bits));
+       first = FB_SHIFT_HIGH(p, ~0UL, dst_idx);
+       last = ~(FB_SHIFT_HIGH(p, ~0UL, (dst_idx+n) % bits));
 
        if (dst_idx+n <= bits) {
                /* Single word */
@@ -267,9 +268,9 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
        if (p->fbops->fb_sync)
                p->fbops->fb_sync(p);
        if (!left) {
-               void (*fill_op32)(unsigned long *dst, int dst_idx,
-                                 unsigned long pat, unsigned n, int bits) =
-                       NULL;
+               void (*fill_op32)(struct fb_info *p, unsigned long *dst,
+                                 int dst_idx, unsigned long pat, unsigned n,
+                                 int bits) = NULL;
 
                switch (rect->rop) {
                case ROP_XOR:
@@ -287,16 +288,16 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
                while (height--) {
                        dst += dst_idx >> (ffs(bits) - 1);
                        dst_idx &= (bits - 1);
-                       fill_op32(dst, dst_idx, pat, width*bpp, bits);
+                       fill_op32(p, dst, dst_idx, pat, width*bpp, bits);
                        dst_idx += p->fix.line_length*8;
                }
        } else {
                int right;
                int r;
                int rot = (left-dst_idx) % bpp;
-               void (*fill_op)(unsigned long *dst, int dst_idx,
-                               unsigned long pat, int left, int right,
-                               unsigned n, int bits) = NULL;
+               void (*fill_op)(struct fb_info *p, unsigned long *dst,
+                               int dst_idx, unsigned long pat, int left,
+                               int right, unsigned n, int bits) = NULL;
 
                /* rotate pattern to correct start position */
                pat = pat << rot | pat >> (bpp-rot);
@@ -318,7 +319,7 @@ void sys_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
                while (height--) {
                        dst += dst_idx >> (ffs(bits) - 1);
                        dst_idx &= (bits - 1);
-                       fill_op(dst, dst_idx, pat, left, right,
+                       fill_op(p, dst, dst_idx, pat, left, right,
                                width*bpp, bits);
                        r = (p->fix.line_length*8) % bpp;
                        pat = pat << (bpp-r) | pat >> r;
index bd7e7e9d155f0e8e2309a00cadb173cfe1a3cc73..186c6f607be2b821ee72c3bd437eeed86a339490 100644 (file)
 #define DEBUG
 
 #ifdef DEBUG
-#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__FUNCTION__,## args)
+#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt,__func__,## args)
 #else
 #define DPRINTK(fmt, args...)
 #endif
 
-static const u32 cfb_tab8[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab8_be[] = {
     0x00000000,0x000000ff,0x0000ff00,0x0000ffff,
     0x00ff0000,0x00ff00ff,0x00ffff00,0x00ffffff,
     0xff000000,0xff0000ff,0xff00ff00,0xff00ffff,
     0xffff0000,0xffff00ff,0xffffff00,0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab8_le[] = {
     0x00000000,0xff000000,0x00ff0000,0xffff0000,
     0x0000ff00,0xff00ff00,0x00ffff00,0xffffff00,
     0x000000ff,0xff0000ff,0x00ff00ff,0xffff00ff,
     0x0000ffff,0xff00ffff,0x00ffffff,0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
 };
 
-static const u32 cfb_tab16[] = {
-#if defined(__BIG_ENDIAN)
+static const u32 cfb_tab16_be[] = {
     0x00000000, 0x0000ffff, 0xffff0000, 0xffffffff
-#elif defined(__LITTLE_ENDIAN)
+};
+
+static const u32 cfb_tab16_le[] = {
     0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff
-#else
-#error FIXME: No endianness??
-#endif
 };
 
 static const u32 cfb_tab32[] = {
@@ -72,7 +68,7 @@ static void color_imageblit(const struct fb_image *image, struct fb_info *p,
                val = 0;
 
                if (start_index) {
-                       u32 start_mask = ~(FB_SHIFT_HIGH(~(u32)0,
+                       u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
                                                         start_index));
                        val = *dst & start_mask;
                        shift = start_index;
@@ -83,20 +79,20 @@ static void color_imageblit(const struct fb_image *image, struct fb_info *p,
                                color = palette[*src];
                        else
                                color = *src;
-                       color <<= FB_LEFT_POS(bpp);
-                       val |= FB_SHIFT_HIGH(color, shift);
+                       color <<= FB_LEFT_POS(p, bpp);
+                       val |= FB_SHIFT_HIGH(p, color, shift);
                        if (shift >= null_bits) {
                                *dst++ = val;
 
                                val = (shift == null_bits) ? 0 :
-                                       FB_SHIFT_LOW(color, 32 - shift);
+                                       FB_SHIFT_LOW(p, color, 32 - shift);
                        }
                        shift += bpp;
                        shift &= (32 - 1);
                        src++;
                }
                if (shift) {
-                       u32 end_mask = FB_SHIFT_HIGH(~(u32)0, shift);
+                       u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
 
                        *dst &= end_mask;
                        *dst |= val;
@@ -125,8 +121,8 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
        u32 i, j, l;
 
        dst2 = dst1;
-       fgcolor <<= FB_LEFT_POS(bpp);
-       bgcolor <<= FB_LEFT_POS(bpp);
+       fgcolor <<= FB_LEFT_POS(p, bpp);
+       bgcolor <<= FB_LEFT_POS(p, bpp);
 
        for (i = image->height; i--; ) {
                shift = val = 0;
@@ -137,7 +133,8 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
 
                /* write leading bits */
                if (start_index) {
-                       u32 start_mask = ~(FB_SHIFT_HIGH(~(u32)0,start_index));
+                       u32 start_mask = ~(FB_SHIFT_HIGH(p, ~(u32)0,
+                                                        start_index));
                        val = *dst & start_mask;
                        shift = start_index;
                }
@@ -145,13 +142,13 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
                while (j--) {
                        l--;
                        color = (*s & (1 << l)) ? fgcolor : bgcolor;
-                       val |= FB_SHIFT_HIGH(color, shift);
+                       val |= FB_SHIFT_HIGH(p, color, shift);
 
                        /* Did the bitshift spill bits to the next long? */
                        if (shift >= null_bits) {
                                *dst++ = val;
                                val = (shift == null_bits) ? 0 :
-                                       FB_SHIFT_LOW(color,32 - shift);
+                                       FB_SHIFT_LOW(p, color, 32 - shift);
                        }
                        shift += bpp;
                        shift &= (32 - 1);
@@ -160,7 +157,7 @@ static void slow_imageblit(const struct fb_image *image, struct fb_info *p,
 
                /* write trailing bits */
                if (shift) {
-                       u32 end_mask = FB_SHIFT_HIGH(~(u32)0, shift);
+                       u32 end_mask = FB_SHIFT_HIGH(p, ~(u32)0, shift);
 
                        *dst &= end_mask;
                        *dst |= val;
@@ -199,10 +196,10 @@ static void fast_imageblit(const struct fb_image *image, struct fb_info *p,
 
        switch (bpp) {
        case 8:
-               tab = cfb_tab8;
+               tab = fb_be_math(p) ? cfb_tab8_be : cfb_tab8_le;
                break;
        case 16:
-               tab = cfb_tab16;
+               tab = fb_be_math(p) ? cfb_tab16_be : cfb_tab16_le;
                break;
        case 32:
        default:
index e5a9ddb3c8be4d9386a63aa758429074fc1ed176..a71774305772bb8d60270f216b53bd6a996906e5 100644 (file)
@@ -419,7 +419,7 @@ static int __devinit tcx_init_one(struct of_device *op)
                par->mmap_map[6].size = SBUS_MMAP_EMPTY;
        }
 
-       par->physbase = 0;
+       par->physbase = op->resource[0].start;
        par->which_io = op->resource[0].flags & IORESOURCE_BITS;
 
        for (i = 0; i < TCX_MMAP_ENTRIES; i++) {
@@ -470,10 +470,10 @@ static int __devinit tcx_init_one(struct of_device *op)
 
        dev_set_drvdata(&op->dev, info);
 
-       printk("%s: TCX at %lx:%lx, %s\n",
+       printk(KERN_INFO "%s: TCX at %lx:%lx, %s\n",
               dp->full_name,
               par->which_io,
-              op->resource[0].start,
+              par->physbase,
               par->lowdepth ? "8-bit only" : "24-bit depth");
 
        return 0;
@@ -527,7 +527,7 @@ static struct of_platform_driver tcx_driver = {
        .remove         = __devexit_p(tcx_remove),
 };
 
-int __init tcx_init(void)
+static int __init tcx_init(void)
 {
        if (fb_get_options("tcxfb", NULL))
                return -ENODEV;
@@ -535,7 +535,7 @@ int __init tcx_init(void)
        return of_register_driver(&tcx_driver, &of_bus_type);
 }
 
-void __exit tcx_exit(void)
+static void __exit tcx_exit(void)
 {
        of_unregister_driver(&tcx_driver);
 }
index 71e179ea5f95db581df36b0e544169b2de947436..ea9f19d2559799155c0eb330d395f8f7f3786f92 100644 (file)
@@ -70,7 +70,7 @@
 
 #include <video/tdfx.h>
 
-#define DPRINTK(a, b...) pr_debug("fb: %s: " a, __FUNCTION__ , ## b)
+#define DPRINTK(a, b...) pr_debug("fb: %s: " a, __func__ , ## b)
 
 #ifdef CONFIG_MTRR
 #include <asm/mtrr.h>
index 0a4e07d43d2d44712e66477294175801b98e38fd..bd54cd0de39af13fc4170665878ed63ac1af4acb 100644 (file)
@@ -58,7 +58,7 @@ static int displaytype;
 /* defaults which are normally overriden by user values */
 
 /* video mode */
-static char *mode = "640x480";
+static char *mode_option __devinitdata = "640x480";
 static int bpp = 8;
 
 static int noaccel;
@@ -73,7 +73,10 @@ static int memsize;
 static int memdiff;
 static int nativex;
 
-module_param(mode, charp, 0);
+module_param(mode_option, charp, 0);
+MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
+module_param_named(mode, mode_option, charp, 0);
+MODULE_PARM_DESC(mode, "Initial video mode e.g. '648x480-8@60' (deprecated)");
 module_param(bpp, int, 0);
 module_param(center, int, 0);
 module_param(stretch, int, 0);
@@ -1297,7 +1300,8 @@ static int __devinit trident_pci_probe(struct pci_dev * dev,
 #endif
        fb_info.pseudo_palette = pseudo_pal;
 
-       if (!fb_find_mode(&default_var, &fb_info, mode, NULL, 0, NULL, bpp)) {
+       if (!fb_find_mode(&default_var, &fb_info,
+                         mode_option, NULL, 0, NULL, bpp)) {
                err = -EINVAL;
                goto out_unmap2;
        }
@@ -1385,7 +1389,7 @@ static struct pci_driver tridentfb_pci_driver = {
  *     video=trident:800x600,bpp=16,noaccel
  */
 #ifndef MODULE
-static int tridentfb_setup(char *options)
+static int __init tridentfb_setup(char *options)
 {
        char *opt;
        if (!options || !*options)
@@ -1412,7 +1416,7 @@ static int tridentfb_setup(char *options)
                else if (!strncmp(opt, "nativex=", 8))
                        nativex = simple_strtoul(opt + 8, NULL, 0);
                else
-                       mode = opt;
+                       mode_option = opt;
        }
        return 0;
 }
index 93361656316c2e06edfa33fafc067b613a0a52be..cdbb56edb6cbdde9b34231b5e985c70c3e39efb4 100644 (file)
@@ -181,7 +181,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
        /* If all slots are taken -- bail out. */
        if (uvfb_tasks[seq]) {
                mutex_unlock(&uvfb_lock);
-               return -EBUSY;
+               err = -EBUSY;
+               goto out;
        }
 
        /* Save a pointer to the kernel part of the task struct. */
@@ -205,7 +206,6 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
                        err = cn_netlink_send(m, 0, gfp_any());
                }
        }
-       kfree(m);
 
        if (!err && !(task->t.flags & TF_EXIT))
                err = !wait_for_completion_timeout(task->done,
@@ -218,7 +218,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task)
        seq++;
        if (seq >= UVESAFB_TASKS_MAX)
                seq = 0;
-
+out:
+       kfree(m);
        return err;
 }
 
@@ -885,7 +886,7 @@ static int __devinit uvesafb_vbe_init_mode(struct fb_info *info)
        }
 
        /* fb_find_mode() failed */
-       if (i == 0 || i >= 3) {
+       if (i == 0) {
                info->var.xres = 640;
                info->var.yres = 480;
                mode = (struct fb_videomode *)
index 2aa71eb67c2bb048f5f80c29951cf24648dfb88c..c18f1884b550b923d797e6b1d8b1eb768edb266a 100644 (file)
@@ -112,8 +112,9 @@ static int vmlfb_alloc_vram_area(struct vram_area *va, unsigned max_order,
 
        /*
         * It seems like __get_free_pages only ups the usage count
-        * of the first page. This doesn't work with nopage mapping, so
-        * up the usage count once more.
+        * of the first page. This doesn't work with fault mapping, so
+        * up the usage count once more (XXX: should use split_page or
+        * compound page).
         */
 
        memset((void *)va->logical, 0x00, va->size);
index 4c3a63308df1a9ee32a19fa7863904cb175fee07..536ab11623f01384cb79f0baa98e1e717b8e7e97 100644 (file)
@@ -100,7 +100,7 @@ static struct svga_timing_regs vt8623_timing_regs     = {
 
 /* Module parameters */
 
-static char *mode = "640x480-8@60";
+static char *mode_option = "640x480-8@60";
 
 #ifdef CONFIG_MTRR
 static int mtrr = 1;
@@ -110,8 +110,10 @@ MODULE_AUTHOR("(c) 2006 Ondrej Zajicek <santiago@crfreenet.org>");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("fbdev driver for integrated graphics core in VIA VT8623 [CLE266]");
 
-module_param(mode, charp, 0644);
-MODULE_PARM_DESC(mode, "Default video mode ('640x480-8@60', etc)");
+module_param(mode_option, charp, 0644);
+MODULE_PARM_DESC(mode_option, "Default video mode ('640x480-8@60', etc)");
+module_param_named(mode, mode_option, charp, 0);
+MODULE_PARM_DESC(mode, "Default video mode e.g. '648x480-8@60' (deprecated)");
 
 #ifdef CONFIG_MTRR
 module_param(mtrr, int, 0444);
@@ -434,6 +436,10 @@ static int vt8623fb_set_par(struct fb_info *info)
        svga_wcrt_multi(vt8623_offset_regs, offset_value);
        svga_wseq_multi(vt8623_fetch_count_regs, fetch_value);
 
+       /* Clear H/V Skew */
+       svga_wcrt_mask(0x03, 0x00, 0x60);
+       svga_wcrt_mask(0x05, 0x00, 0x60);
+
        if (info->var.vmode & FB_VMODE_DOUBLE)
                svga_wcrt_mask(0x09, 0x80, 0x80);
        else
@@ -655,7 +661,7 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
        }
 
        /* Allocate and fill driver data structure */
-       info = framebuffer_alloc(sizeof(struct vt8623fb_info), NULL);
+       info = framebuffer_alloc(sizeof(struct vt8623fb_info), &(dev->dev));
        if (! info) {
                dev_err(&(dev->dev), "cannot allocate memory\n");
                return -ENOMEM;
@@ -671,13 +677,13 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
 
        rc = pci_enable_device(dev);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot enable PCI device\n");
+               dev_err(info->dev, "cannot enable PCI device\n");
                goto err_enable_device;
        }
 
        rc = pci_request_regions(dev, "vt8623fb");
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot reserve framebuffer region\n");
+               dev_err(info->dev, "cannot reserve framebuffer region\n");
                goto err_request_regions;
        }
 
@@ -690,14 +696,14 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
        info->screen_base = pci_iomap(dev, 0, 0);
        if (! info->screen_base) {
                rc = -ENOMEM;
-               dev_err(&(dev->dev), "iomap for framebuffer failed\n");
+               dev_err(info->dev, "iomap for framebuffer failed\n");
                goto err_iomap_1;
        }
 
        par->mmio_base = pci_iomap(dev, 1, 0);
        if (! par->mmio_base) {
                rc = -ENOMEM;
-               dev_err(&(dev->dev), "iomap for MMIO failed\n");
+               dev_err(info->dev, "iomap for MMIO failed\n");
                goto err_iomap_2;
        }
 
@@ -708,7 +714,7 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
        if ((16 <= memsize1) && (memsize1 <= 64) && (memsize1 == memsize2))
                info->screen_size = memsize1 << 20;
        else {
-               dev_err(&(dev->dev), "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
+               dev_err(info->dev, "memory size detection failed (%x %x), suppose 16 MB\n", memsize1, memsize2);
                info->screen_size = 16 << 20;
        }
 
@@ -722,22 +728,22 @@ static int __devinit vt8623_pci_probe(struct pci_dev *dev, const struct pci_devi
 
        /* Prepare startup mode */
 
-       rc = fb_find_mode(&(info->var), info, mode, NULL, 0, NULL, 8);
+       rc = fb_find_mode(&(info->var), info, mode_option, NULL, 0, NULL, 8);
        if (! ((rc == 1) || (rc == 2))) {
                rc = -EINVAL;
-               dev_err(&(dev->dev), "mode %s not found\n", mode);
+               dev_err(info->dev, "mode %s not found\n", mode_option);
                goto err_find_mode;
        }
 
        rc = fb_alloc_cmap(&info->cmap, 256, 0);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot allocate colormap\n");
+               dev_err(info->dev, "cannot allocate colormap\n");
                goto err_alloc_cmap;
        }
 
        rc = register_framebuffer(info);
        if (rc < 0) {
-               dev_err(&(dev->dev), "cannot register framebugger\n");
+               dev_err(info->dev, "cannot register framebugger\n");
                goto err_reg_fb;
        }
 
@@ -811,7 +817,7 @@ static int vt8623_pci_suspend(struct pci_dev* dev, pm_message_t state)
        struct fb_info *info = pci_get_drvdata(dev);
        struct vt8623fb_info *par = info->par;
 
-       dev_info(&(dev->dev), "suspend\n");
+       dev_info(info->dev, "suspend\n");
 
        acquire_console_sem();
        mutex_lock(&(par->open_lock));
@@ -842,7 +848,7 @@ static int vt8623_pci_resume(struct pci_dev* dev)
        struct fb_info *info = pci_get_drvdata(dev);
        struct vt8623fb_info *par = info->par;
 
-       dev_info(&(dev->dev), "resume\n");
+       dev_info(info->dev, "resume\n");
 
        acquire_console_sem();
        mutex_lock(&(par->open_lock));
@@ -913,7 +919,7 @@ static int __init vt8623fb_init(void)
                return -ENODEV;
 
        if (option && *option)
-               mode = option;
+               mode_option = option;
 #endif
 
        pr_debug("vt8623fb: initializing\n");
index 003c49a490eb9034f21bb8b8f49d8d91b0eddee6..30469bf906e54b51a6688918624fd030ab6f25cf 100644 (file)
@@ -765,8 +765,10 @@ int __init w100fb_probe(struct platform_device *pdev)
        printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
        return 0;
 out:
-       fb_dealloc_cmap(&info->cmap);
-       kfree(info->pseudo_palette);
+       if (info) {
+               fb_dealloc_cmap(&info->cmap);
+               kfree(info->pseudo_palette);
+       }
        if (remapped_fbuf != NULL)
                iounmap(remapped_fbuf);
        if (remapped_regs != NULL)
index fe6bdf43380f1cd95fea955ec583f4aceaa780fa..e6ab7cf08f8885d07cbfa222420d5c621c6039c3 100644 (file)
@@ -30,7 +30,7 @@
 #  define assert(expr) \
         if(unlikely(!(expr))) {                                        \
         printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n",        \
-        #expr,__FILE__,__FUNCTION__,__LINE__);                 \
+       #expr, __FILE__, __func__, __LINE__);                   \
         }
 #endif
 
index 2ce4cebc31d9d47e6465783a0fd11b06082ee7e9..099b6fb5b5cbd364817776edf8cc68129fe482df 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/types.h>
 #include <linux/zorro.h>
 #include <linux/proc_fs.h>
+#include <linux/seq_file.h>
 #include <linux/init.h>
 #include <linux/smp_lock.h>
 #include <asm/uaccess.h>
@@ -76,36 +77,58 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *
 }
 
 static const struct file_operations proc_bus_zorro_operations = {
+       .owner          = THIS_MODULE,
        .llseek         = proc_bus_zorro_lseek,
        .read           = proc_bus_zorro_read,
 };
 
-static int
-get_zorro_dev_info(char *buf, char **start, off_t pos, int count)
+static void * zorro_seq_start(struct seq_file *m, loff_t *pos)
 {
-       u_int slot;
-       off_t at = 0;
-       int len, cnt;
-
-       for (slot = cnt = 0; slot < zorro_num_autocon && count > cnt; slot++) {
-               struct zorro_dev *z = &zorro_autocon[slot];
-               len = sprintf(buf, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot,
-                             z->id, (unsigned long)zorro_resource_start(z),
-                             (unsigned long)zorro_resource_len(z),
-                             z->rom.er_Type);
-               at += len;
-               if (at >= pos) {
-                       if (!*start) {
-                               *start = buf + (pos - (at - len));
-                               cnt = at - pos;
-                       } else
-                               cnt += len;
-                       buf += len;
-               }
-       }
-       return (count > cnt) ? cnt : count;
+       return (*pos < zorro_num_autocon) ? pos : NULL;
+}
+
+static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return (*pos < zorro_num_autocon) ? pos : NULL;
+}
+
+static void zorro_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int zorro_seq_show(struct seq_file *m, void *v)
+{
+       u_int slot = *(loff_t *)v;
+       struct zorro_dev *z = &zorro_autocon[slot];
+
+       seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id,
+                  (unsigned long)zorro_resource_start(z),
+                  (unsigned long)zorro_resource_len(z),
+                  z->rom.er_Type);
+       return 0;
+}
+
+static const struct seq_operations zorro_devices_seq_ops = {
+       .start = zorro_seq_start,
+       .next  = zorro_seq_next,
+       .stop  = zorro_seq_stop,
+       .show  = zorro_seq_show,
+};
+
+static int zorro_devices_proc_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &zorro_devices_seq_ops);
 }
 
+static const struct file_operations zorro_devices_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = zorro_devices_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
 static struct proc_dir_entry *proc_bus_zorro_dir;
 
 static int __init zorro_proc_attach_device(u_int slot)
@@ -114,11 +137,11 @@ static int __init zorro_proc_attach_device(u_int slot)
        char name[4];
 
        sprintf(name, "%02x", slot);
-       entry = create_proc_entry(name, 0, proc_bus_zorro_dir);
+       entry = proc_create_data(name, 0, proc_bus_zorro_dir,
+                                &proc_bus_zorro_operations,
+                                &zorro_autocon[slot]);
        if (!entry)
                return -ENOMEM;
-       entry->proc_fops = &proc_bus_zorro_operations;
-       entry->data = &zorro_autocon[slot];
        entry->size = sizeof(struct zorro_dev);
        return 0;
 }
@@ -128,9 +151,9 @@ static int __init zorro_proc_init(void)
        u_int slot;
 
        if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) {
-               proc_bus_zorro_dir = proc_mkdir("zorro", proc_bus);
-               create_proc_info_entry("devices", 0, proc_bus_zorro_dir,
-                                      get_zorro_dev_info);
+               proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL);
+               proc_create("devices", 0, proc_bus_zorro_dir,
+                           &zorro_devices_proc_fops);
                for (slot = 0; slot < zorro_num_autocon; slot++)
                        zorro_proc_attach_device(slot);
        }
index 2e43d46f65d6370c40c4a0ac33f36798fad909fb..cf12c403b8c753b4e8a431ab0c56cb89b5ac588a 100644 (file)
@@ -1005,7 +1005,8 @@ config TMPFS_POSIX_ACL
 
 config HUGETLBFS
        bool "HugeTLB file system support"
-       depends on X86 || IA64 || PPC64 || SPARC64 || (SUPERH && MMU) || BROKEN
+       depends on X86 || IA64 || PPC64 || SPARC64 || (SUPERH && MMU) || \
+                  (S390 && 64BIT) || BROKEN
        help
          hugetlbfs is a filesystem backing for HugeTLB pages, based on
          ramfs. For architectures that support it, say Y here and read
index 853845abcca676889bc854da217082de463e637a..55e8ee1900a5e9b2b2a9b4214be83561eafd5ebe 100644 (file)
@@ -41,7 +41,7 @@ config BINFMT_ELF_FDPIC
          It is also possible to run FDPIC ELF binaries on MMU linux also.
 
 config BINFMT_FLAT
-       tristate "Kernel support for flat binaries"
+       bool "Kernel support for flat binaries"
        depends on !MMU
        help
          Support uClinux FLAT format binaries.
index 936f2af39c433e12dfb8966b8716c3a90d7f1d2c..831157502d5ae58a25bcec65c9af58d5eabc871f 100644 (file)
@@ -75,7 +75,7 @@ extern unsigned int adfs_map_free(struct super_block *sb);
 /* Misc */
 void __adfs_error(struct super_block *sb, const char *function,
                  const char *fmt, ...);
-#define adfs_error(sb, fmt...) __adfs_error(sb, __FUNCTION__, fmt)
+#define adfs_error(sb, fmt...) __adfs_error(sb, __func__, fmt)
 
 /* super.c */
 
index b9b2b27b68c30f3dd97a7717d0f557e5f3f9615d..ea7df2146921142a27b4c27b5e4ebd15704f2f02 100644 (file)
@@ -122,9 +122,9 @@ adfs_dir_checkbyte(const struct adfs_dir *dir)
                ptr.ptr8 = bufoff(bh, i);
                end.ptr8 = ptr.ptr8 + last - i;
 
-               do
+               do {
                        dircheck = *ptr.ptr8++ ^ ror13(dircheck);
-               while (ptr.ptr8 < end.ptr8);
+               while (ptr.ptr8 < end.ptr8);
        }
 
        /*
index 6e0c9399200e84a74d163e25505d087487a2262d..1a4f092f24efdb2ab13efa4bebb85bd38e7d2678 100644 (file)
@@ -325,8 +325,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul
        pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block);
 
 
-       if (block > (sector_t)0x7fffffffUL)
-               BUG();
+       BUG_ON(block > (sector_t)0x7fffffffUL);
 
        if (block >= AFFS_I(inode)->i_blkcnt) {
                if (block > AFFS_I(inode)->i_blkcnt || !create)
@@ -493,8 +492,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
        u32 tmp;
 
        pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to);
-       if (from > to || to > PAGE_CACHE_SIZE)
-               BUG();
+       BUG_ON(from > to || to > PAGE_CACHE_SIZE);
        kmap(page);
        data = page_address(page);
        bsize = AFFS_SB(sb)->s_data_blksize;
@@ -507,8 +505,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign
                if (IS_ERR(bh))
                        return PTR_ERR(bh);
                tmp = min(bsize - boff, to - from);
-               if (from + tmp > to || tmp > bsize)
-                       BUG();
+               BUG_ON(from + tmp > to || tmp > bsize);
                memcpy(data + from, AFFS_DATA(bh) + boff, tmp);
                affs_brelse(bh);
                bidx++;
@@ -540,10 +537,9 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
                if (IS_ERR(bh))
                        return PTR_ERR(bh);
                tmp = min(bsize - boff, newsize - size);
-               if (boff + tmp > bsize || tmp > bsize)
-                       BUG();
+               BUG_ON(boff + tmp > bsize || tmp > bsize);
                memset(AFFS_DATA(bh) + boff, 0, tmp);
-               AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
+               be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
                affs_fix_checksum(sb, bh);
                mark_buffer_dirty_inode(bh, inode);
                size += tmp;
@@ -560,8 +556,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize)
                if (IS_ERR(bh))
                        goto out;
                tmp = min(bsize, newsize - size);
-               if (tmp > bsize)
-                       BUG();
+               BUG_ON(tmp > bsize);
                AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
                AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
                AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
@@ -683,10 +678,9 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
                if (IS_ERR(bh))
                        return PTR_ERR(bh);
                tmp = min(bsize - boff, to - from);
-               if (boff + tmp > bsize || tmp > bsize)
-                       BUG();
+               BUG_ON(boff + tmp > bsize || tmp > bsize);
                memcpy(AFFS_DATA(bh) + boff, data + from, tmp);
-               AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp);
+               be32_add_cpu(&AFFS_DATA_HEAD(bh)->size, tmp);
                affs_fix_checksum(sb, bh);
                mark_buffer_dirty_inode(bh, inode);
                written += tmp;
@@ -732,8 +726,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping,
                if (IS_ERR(bh))
                        goto out;
                tmp = min(bsize, to - from);
-               if (tmp > bsize)
-                       BUG();
+               BUG_ON(tmp > bsize);
                memcpy(AFFS_DATA(bh), data + from, tmp);
                if (buffer_new(bh)) {
                        AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
index d2dc047cb4796b4a879606e60349ec4eb55a79ae..01d25d532541bc3d698f5321923a87cf9900cfe4 100644 (file)
@@ -199,7 +199,6 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
                case Opt_prefix:
                        /* Free any previous prefix */
                        kfree(*prefix);
-                       *prefix = NULL;
                        *prefix = match_strdup(&args[0]);
                        if (!*prefix)
                                return 0;
@@ -233,6 +232,8 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
                        break;
                case Opt_volume: {
                        char *vol = match_strdup(&args[0]);
+                       if (!vol)
+                               return 0;
                        strlcpy(volume, vol, 32);
                        kfree(vol);
                        break;
index 7b4d4fab4c80ed87d9aa687cbb3e9198b1b00e76..255f5dd6040c498d040753f3f88f30f726f61238 100644 (file)
@@ -24,7 +24,8 @@ enum AFS_CM_Operations {
        CBGetXStatsVersion      = 209,  /* get version of extended statistics */
        CBGetXStats             = 210,  /* get contents of extended statistics data */
        CBInitCallBackState3    = 213,  /* initialise callback state, version 3 */
-       CBGetCapabilities       = 65538, /* get client capabilities */
+       CBProbeUuid             = 214,  /* check the client hasn't rebooted */
+       CBTellMeAboutYourself   = 65538, /* get client capabilities */
 };
 
 #define AFS_CAP_ERROR_TRANSLATION      0x1
index 584bb0f9c36a35cdd5ed4234a83ba927e0d04e5e..5e1df14e16b188d9a47501a28c48ce65a4be4c5b 100644 (file)
@@ -20,7 +20,7 @@
 DECLARE_RWSEM(afs_proc_cells_sem);
 LIST_HEAD(afs_proc_cells);
 
-static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells);
+static LIST_HEAD(afs_cells);
 static DEFINE_RWLOCK(afs_cells_lock);
 static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */
 static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq);
index 47b71c8947f91221ac9551b75a5dfa91810d2bd8..eb765489164f85c4c51d9ac1595a3b48e5d41cd2 100644 (file)
@@ -26,8 +26,9 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *,
                                                struct sk_buff *, bool);
 static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool);
 static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool);
-static int afs_deliver_cb_get_capabilities(struct afs_call *, struct sk_buff *,
-                                          bool);
+static int afs_deliver_cb_probe_uuid(struct afs_call *, struct sk_buff *, bool);
+static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *,
+                                                struct sk_buff *, bool);
 static void afs_cm_destructor(struct afs_call *);
 
 /*
@@ -71,11 +72,21 @@ static const struct afs_call_type afs_SRXCBProbe = {
 };
 
 /*
- * CB.GetCapabilities operation type
+ * CB.ProbeUuid operation type
  */
-static const struct afs_call_type afs_SRXCBGetCapabilites = {
-       .name           = "CB.GetCapabilities",
-       .deliver        = afs_deliver_cb_get_capabilities,
+static const struct afs_call_type afs_SRXCBProbeUuid = {
+       .name           = "CB.ProbeUuid",
+       .deliver        = afs_deliver_cb_probe_uuid,
+       .abort_to_error = afs_abort_to_error,
+       .destructor     = afs_cm_destructor,
+};
+
+/*
+ * CB.TellMeAboutYourself operation type
+ */
+static const struct afs_call_type afs_SRXCBTellMeAboutYourself = {
+       .name           = "CB.TellMeAboutYourself",
+       .deliver        = afs_deliver_cb_tell_me_about_yourself,
        .abort_to_error = afs_abort_to_error,
        .destructor     = afs_cm_destructor,
 };
@@ -103,8 +114,8 @@ bool afs_cm_incoming_call(struct afs_call *call)
        case CBProbe:
                call->type = &afs_SRXCBProbe;
                return true;
-       case CBGetCapabilities:
-               call->type = &afs_SRXCBGetCapabilites;
+       case CBTellMeAboutYourself:
+               call->type = &afs_SRXCBTellMeAboutYourself;
                return true;
        default:
                return false;
@@ -392,10 +403,106 @@ static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb,
        return 0;
 }
 
+/*
+ * allow the fileserver to quickly find out if the fileserver has been rebooted
+ */
+static void SRXAFSCB_ProbeUuid(struct work_struct *work)
+{
+       struct afs_call *call = container_of(work, struct afs_call, work);
+       struct afs_uuid *r = call->request;
+
+       struct {
+               __be32  match;
+       } reply;
+
+       _enter("");
+
+
+       if (memcmp(r, &afs_uuid, sizeof(afs_uuid)) == 0)
+               reply.match = htonl(0);
+       else
+               reply.match = htonl(1);
+
+       afs_send_simple_reply(call, &reply, sizeof(reply));
+       _leave("");
+}
+
+/*
+ * deliver request data to a CB.ProbeUuid call
+ */
+static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb,
+                                    bool last)
+{
+       struct afs_uuid *r;
+       unsigned loop;
+       __be32 *b;
+       int ret;
+
+       _enter("{%u},{%u},%d", call->unmarshall, skb->len, last);
+
+       if (skb->len > 0)
+               return -EBADMSG;
+       if (!last)
+               return 0;
+
+       switch (call->unmarshall) {
+       case 0:
+               call->offset = 0;
+               call->buffer = kmalloc(11 * sizeof(__be32), GFP_KERNEL);
+               if (!call->buffer)
+                       return -ENOMEM;
+               call->unmarshall++;
+
+       case 1:
+               _debug("extract UUID");
+               ret = afs_extract_data(call, skb, last, call->buffer,
+                                      11 * sizeof(__be32));
+               switch (ret) {
+               case 0:         break;
+               case -EAGAIN:   return 0;
+               default:        return ret;
+               }
+
+               _debug("unmarshall UUID");
+               call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL);
+               if (!call->request)
+                       return -ENOMEM;
+
+               b = call->buffer;
+               r = call->request;
+               r->time_low                     = ntohl(b[0]);
+               r->time_mid                     = ntohl(b[1]);
+               r->time_hi_and_version          = ntohl(b[2]);
+               r->clock_seq_hi_and_reserved    = ntohl(b[3]);
+               r->clock_seq_low                = ntohl(b[4]);
+
+               for (loop = 0; loop < 6; loop++)
+                       r->node[loop] = ntohl(b[loop + 5]);
+
+               call->offset = 0;
+               call->unmarshall++;
+
+       case 2:
+               _debug("trailer");
+               if (skb->len != 0)
+                       return -EBADMSG;
+               break;
+       }
+
+       if (!last)
+               return 0;
+
+       call->state = AFS_CALL_REPLYING;
+
+       INIT_WORK(&call->work, SRXAFSCB_ProbeUuid);
+       schedule_work(&call->work);
+       return 0;
+}
+
 /*
  * allow the fileserver to ask about the cache manager's capabilities
  */
-static void SRXAFSCB_GetCapabilities(struct work_struct *work)
+static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work)
 {
        struct afs_interface *ifs;
        struct afs_call *call = container_of(work, struct afs_call, work);
@@ -456,10 +563,10 @@ static void SRXAFSCB_GetCapabilities(struct work_struct *work)
 }
 
 /*
- * deliver request data to a CB.GetCapabilities call
+ * deliver request data to a CB.TellMeAboutYourself call
  */
-static int afs_deliver_cb_get_capabilities(struct afs_call *call,
-                                          struct sk_buff *skb, bool last)
+static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call,
+                                                struct sk_buff *skb, bool last)
 {
        _enter(",{%u},%d", skb->len, last);
 
@@ -471,7 +578,7 @@ static int afs_deliver_cb_get_capabilities(struct afs_call *call,
        /* no unmarshalling required */
        call->state = AFS_CALL_REPLYING;
 
-       INIT_WORK(&call->work, SRXAFSCB_GetCapabilities);
+       INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself);
        schedule_work(&call->work);
        return 0;
 }
index b58af8f18bc4d36a849e7f314b9c809834a96bae..dfda03d4397d03862ae9b64d0832d4d9463010c5 100644 (file)
@@ -140,7 +140,7 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page)
 
        if (page->index == 0 && qty != ntohs(dbuf->blocks[0].pagehdr.npages)) {
                printk("kAFS: %s(%lu): wrong number of dir blocks %d!=%hu\n",
-                      __FUNCTION__, dir->i_ino, qty,
+                      __func__, dir->i_ino, qty,
                       ntohs(dbuf->blocks[0].pagehdr.npages));
                goto error;
        }
@@ -159,7 +159,7 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page)
        for (tmp = 0; tmp < qty; tmp++) {
                if (dbuf->blocks[tmp].pagehdr.magic != AFS_DIR_MAGIC) {
                        printk("kAFS: %s(%lu): bad magic %d/%d is %04hx\n",
-                              __FUNCTION__, dir->i_ino, tmp, qty,
+                              __func__, dir->i_ino, tmp, qty,
                               ntohs(dbuf->blocks[tmp].pagehdr.magic));
                        goto error;
                }
index eec41c76de72135d831f4bf7a00819b503110999..7102824ba8479c6e2d006816fb3555311d56df61 100644 (file)
@@ -757,8 +757,8 @@ void _dbprintk(const char *fmt, ...)
 {
 }
 
-#define kenter(FMT,...)        dbgprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
-#define kleave(FMT,...)        dbgprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
+#define kenter(FMT,...)        dbgprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
+#define kleave(FMT,...)        dbgprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
 #define kdebug(FMT,...)        dbgprintk("    "FMT ,##__VA_ARGS__)
 
 
@@ -791,8 +791,8 @@ do {                                                        \
 } while (0)
 
 #else
-#define _enter(FMT,...)        _dbprintk("==> %s("FMT")",__FUNCTION__ ,##__VA_ARGS__)
-#define _leave(FMT,...)        _dbprintk("<== %s()"FMT"",__FUNCTION__ ,##__VA_ARGS__)
+#define _enter(FMT,...)        _dbprintk("==> %s("FMT")",__func__ ,##__VA_ARGS__)
+#define _leave(FMT,...)        _dbprintk("<== %s()"FMT"",__func__ ,##__VA_ARGS__)
 #define _debug(FMT,...)        _dbprintk("    "FMT ,##__VA_ARGS__)
 #endif
 
index 846c7615ac9e516bdb7810bc6498b9b516defbe6..9f7d1ae70269ac9de86404e5b6eb36b6a4d34f29 100644 (file)
@@ -41,6 +41,7 @@ static const struct file_operations afs_proc_cells_fops = {
        .write          = afs_proc_cells_write,
        .llseek         = seq_lseek,
        .release        = seq_release,
+       .owner          = THIS_MODULE,
 };
 
 static int afs_proc_rootcell_open(struct inode *inode, struct file *file);
@@ -56,7 +57,8 @@ static const struct file_operations afs_proc_rootcell_fops = {
        .read           = afs_proc_rootcell_read,
        .write          = afs_proc_rootcell_write,
        .llseek         = no_llseek,
-       .release        = afs_proc_rootcell_release
+       .release        = afs_proc_rootcell_release,
+       .owner          = THIS_MODULE,
 };
 
 static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file);
@@ -80,6 +82,7 @@ static const struct file_operations afs_proc_cell_volumes_fops = {
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = afs_proc_cell_volumes_release,
+       .owner          = THIS_MODULE,
 };
 
 static int afs_proc_cell_vlservers_open(struct inode *inode,
@@ -104,6 +107,7 @@ static const struct file_operations afs_proc_cell_vlservers_fops = {
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = afs_proc_cell_vlservers_release,
+       .owner          = THIS_MODULE,
 };
 
 static int afs_proc_cell_servers_open(struct inode *inode, struct file *file);
@@ -127,6 +131,7 @@ static const struct file_operations afs_proc_cell_servers_fops = {
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = afs_proc_cell_servers_release,
+       .owner          = THIS_MODULE,
 };
 
 /*
@@ -143,17 +148,13 @@ int afs_proc_init(void)
                goto error_dir;
        proc_afs->owner = THIS_MODULE;
 
-       p = create_proc_entry("cells", 0, proc_afs);
+       p = proc_create("cells", 0, proc_afs, &afs_proc_cells_fops);
        if (!p)
                goto error_cells;
-       p->proc_fops = &afs_proc_cells_fops;
-       p->owner = THIS_MODULE;
 
-       p = create_proc_entry("rootcell", 0, proc_afs);
+       p = proc_create("rootcell", 0, proc_afs, &afs_proc_rootcell_fops);
        if (!p)
                goto error_rootcell;
-       p->proc_fops = &afs_proc_rootcell_fops;
-       p->owner = THIS_MODULE;
 
        _leave(" = 0");
        return 0;
@@ -395,26 +396,20 @@ int afs_proc_cell_setup(struct afs_cell *cell)
        if (!cell->proc_dir)
                goto error_dir;
 
-       p = create_proc_entry("servers", 0, cell->proc_dir);
+       p = proc_create_data("servers", 0, cell->proc_dir,
+                            &afs_proc_cell_servers_fops, cell);
        if (!p)
                goto error_servers;
-       p->proc_fops = &afs_proc_cell_servers_fops;
-       p->owner = THIS_MODULE;
-       p->data = cell;
 
-       p = create_proc_entry("vlservers", 0, cell->proc_dir);
+       p = proc_create_data("vlservers", 0, cell->proc_dir,
+                            &afs_proc_cell_vlservers_fops, cell);
        if (!p)
                goto error_vlservers;
-       p->proc_fops = &afs_proc_cell_vlservers_fops;
-       p->owner = THIS_MODULE;
-       p->data = cell;
 
-       p = create_proc_entry("volumes", 0, cell->proc_dir);
+       p = proc_create_data("volumes", 0, cell->proc_dir,
+                            &afs_proc_cell_volumes_fops, cell);
        if (!p)
                goto error_volumes;
-       p->proc_fops = &afs_proc_cell_volumes_fops;
-       p->owner = THIS_MODULE;
-       p->data = cell;
 
        _leave(" = 0");
        return 0;
index 228368610dfa64e7af06ec9dd864074cf550851e..b5253e77eb2f137ee5b1346f848514ad991801e1 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -191,6 +191,43 @@ static int aio_setup_ring(struct kioctx *ctx)
        kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
 } while(0)
 
+
+/* __put_ioctx
+ *     Called when the last user of an aio context has gone away,
+ *     and the struct needs to be freed.
+ */
+static void __put_ioctx(struct kioctx *ctx)
+{
+       unsigned nr_events = ctx->max_reqs;
+
+       BUG_ON(ctx->reqs_active);
+
+       cancel_delayed_work(&ctx->wq);
+       cancel_work_sync(&ctx->wq.work);
+       aio_free_ring(ctx);
+       mmdrop(ctx->mm);
+       ctx->mm = NULL;
+       pr_debug("__put_ioctx: freeing %p\n", ctx);
+       kmem_cache_free(kioctx_cachep, ctx);
+
+       if (nr_events) {
+               spin_lock(&aio_nr_lock);
+               BUG_ON(aio_nr - nr_events > aio_nr);
+               aio_nr -= nr_events;
+               spin_unlock(&aio_nr_lock);
+       }
+}
+
+#define get_ioctx(kioctx) do {                                         \
+       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
+       atomic_inc(&(kioctx)->users);                                   \
+} while (0)
+#define put_ioctx(kioctx) do {                                         \
+       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
+       if (unlikely(atomic_dec_and_test(&(kioctx)->users)))            \
+               __put_ioctx(kioctx);                                    \
+} while (0)
+
 /* ioctx_alloc
  *     Allocates and initializes an ioctx.  Returns an ERR_PTR if it failed.
  */
@@ -240,7 +277,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
        if (ctx->max_reqs == 0)
                goto out_cleanup;
 
-       /* now link into global list.  kludge.  FIXME */
+       /* now link into global list. */
        write_lock(&mm->ioctx_list_lock);
        ctx->next = mm->ioctx_list;
        mm->ioctx_list = ctx;
@@ -361,32 +398,6 @@ void exit_aio(struct mm_struct *mm)
        }
 }
 
-/* __put_ioctx
- *     Called when the last user of an aio context has gone away,
- *     and the struct needs to be freed.
- */
-void __put_ioctx(struct kioctx *ctx)
-{
-       unsigned nr_events = ctx->max_reqs;
-
-       BUG_ON(ctx->reqs_active);
-
-       cancel_delayed_work(&ctx->wq);
-       cancel_work_sync(&ctx->wq.work);
-       aio_free_ring(ctx);
-       mmdrop(ctx->mm);
-       ctx->mm = NULL;
-       pr_debug("__put_ioctx: freeing %p\n", ctx);
-       kmem_cache_free(kioctx_cachep, ctx);
-
-       if (nr_events) {
-               spin_lock(&aio_nr_lock);
-               BUG_ON(aio_nr - nr_events > aio_nr);
-               aio_nr -= nr_events;
-               spin_unlock(&aio_nr_lock);
-       }
-}
-
 /* aio_get_req
  *     Allocate a slot for an aio request.  Increments the users count
  * of the kioctx so that the kioctx stays around until all requests are
@@ -542,10 +553,7 @@ int aio_put_req(struct kiocb *req)
        return ret;
 }
 
-/*     Lookup an ioctx id.  ioctx_list is lockless for reads.
- *     FIXME: this is O(n) and is only suitable for development.
- */
-struct kioctx *lookup_ioctx(unsigned long ctx_id)
+static struct kioctx *lookup_ioctx(unsigned long ctx_id)
 {
        struct kioctx *ioctx;
        struct mm_struct *mm;
@@ -1070,9 +1078,7 @@ static void timeout_func(unsigned long data)
 
 static inline void init_timeout(struct aio_timeout *to)
 {
-       init_timer(&to->timer);
-       to->timer.data = (unsigned long)to;
-       to->timer.function = timeout_func;
+       setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
        to->timed_out = 0;
        to->p = current;
 }
@@ -1166,7 +1172,10 @@ retry:
                                break;
                        if (min_nr <= i)
                                break;
-                       ret = 0;
+                       if (unlikely(ctx->dead)) {
+                               ret = -EINVAL;
+                               break;
+                       }
                        if (to.timed_out)       /* Only check after read evt */
                                break;
                        /* Try to only show up in io wait if there are ops
@@ -1202,6 +1211,7 @@ retry:
        if (timeout)
                clear_timeout(&to);
 out:
+       destroy_timer_on_stack(&to.timer);
        return i ? i : ret;
 }
 
@@ -1231,6 +1241,13 @@ static void io_destroy(struct kioctx *ioctx)
 
        aio_cancel_all(ioctx);
        wait_for_all_aios(ioctx);
+
+       /*
+        * Wake up any waiters.  The setting of ctx->dead must be seen
+        * by other CPUs at this point.  Right now, we rely on the
+        * locking done by the above calls to ensure this consistency.
+        */
+       wake_up(&ioctx->wait);
        put_ioctx(ioctx);       /* once for the lookup */
 }
 
@@ -1542,7 +1559,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode,
        return 1;
 }
 
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
+static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                         struct iocb *iocb)
 {
        struct kiocb *req;
@@ -1583,7 +1600,7 @@ int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
                 * event using the eventfd_signal() function.
                 */
                req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd);
-               if (unlikely(IS_ERR(req->ki_eventfd))) {
+               if (IS_ERR(req->ki_eventfd)) {
                        ret = PTR_ERR(req->ki_eventfd);
                        goto out_put_req;
                }
index 2d4ae40718d9ecdc35119a82a10d264ce5c178ea..c3d352d7fa9304cb44c6ec4e4542bbc46f8ec2b2 100644 (file)
@@ -35,7 +35,7 @@
 /* #define DEBUG */
 
 #ifdef DEBUG
-#define DPRINTK(fmt,args...) do { printk(KERN_DEBUG "pid %d: %s: " fmt "\n" , current->pid , __FUNCTION__ , ##args); } while(0)
+#define DPRINTK(fmt,args...) do { printk(KERN_DEBUG "pid %d: %s: " fmt "\n" , current->pid , __func__ , ##args); } while(0)
 #else
 #define DPRINTK(fmt,args...) do {} while(0)
 #endif
index d96e5c14a9caaaaa2c498b3e5f833176634e3b14..894fee54d4d83bd1ce22c50817871f1dc8c6f0da 100644 (file)
@@ -73,8 +73,8 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
        status = 0;
 done:
        DPRINTK("returning = %d", status);
-       mntput(mnt);
        dput(dentry);
+       mntput(mnt);
        return status;
 }
 
@@ -333,7 +333,7 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        /* Can we expire this guy */
                        if (autofs4_can_expire(dentry, timeout, do_now)) {
                                expired = dentry;
-                               break;
+                               goto found;
                        }
                        goto next;
                }
@@ -352,7 +352,7 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
                                inf->flags |= AUTOFS_INF_EXPIRING;
                                spin_unlock(&sbi->fs_lock);
                                expired = dentry;
-                               break;
+                               goto found;
                        }
                        spin_unlock(&sbi->fs_lock);
                /*
@@ -363,7 +363,7 @@ static struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        expired = autofs4_check_leaves(mnt, dentry, timeout, do_now);
                        if (expired) {
                                dput(dentry);
-                               break;
+                               goto found;
                        }
                }
 next:
@@ -371,18 +371,16 @@ next:
                spin_lock(&dcache_lock);
                next = next->next;
        }
-
-       if (expired) {
-               DPRINTK("returning %p %.*s",
-                       expired, (int)expired->d_name.len, expired->d_name.name);
-               spin_lock(&dcache_lock);
-               list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
-               spin_unlock(&dcache_lock);
-               return expired;
-       }
        spin_unlock(&dcache_lock);
-
        return NULL;
+
+found:
+       DPRINTK("returning %p %.*s",
+               expired, (int)expired->d_name.len, expired->d_name.name);
+       spin_lock(&dcache_lock);
+       list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
+       spin_unlock(&dcache_lock);
+       return expired;
 }
 
 /* Perform an expiry operation */
index a54a946a50ae55bc44f04142565834527f07d524..edf5b6bddb528a43bb85e51eb834ecbdb14a7877 100644 (file)
@@ -146,17 +146,17 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
 
        if (d_mountpoint(dentry)) {
                struct file *fp = NULL;
-               struct vfsmount *fp_mnt = mntget(mnt);
-               struct dentry *fp_dentry = dget(dentry);
+               struct path fp_path = { .dentry = dentry, .mnt = mnt };
 
-               if (!autofs4_follow_mount(&fp_mnt, &fp_dentry)) {
-                       dput(fp_dentry);
-                       mntput(fp_mnt);
+               path_get(&fp_path);
+
+               if (!autofs4_follow_mount(&fp_path.mnt, &fp_path.dentry)) {
+                       path_put(&fp_path);
                        dcache_dir_close(inode, file);
                        goto out;
                }
 
-               fp = dentry_open(fp_dentry, fp_mnt, file->f_flags);
+               fp = dentry_open(fp_path.dentry, fp_path.mnt, file->f_flags);
                status = PTR_ERR(fp);
                if (IS_ERR(fp)) {
                        dcache_dir_close(inode, file);
@@ -242,7 +242,8 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
 {
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
        struct autofs_info *ino = autofs4_dentry_ino(dentry);
-       int status = 0;
+       struct dentry *new;
+       int status;
 
        /* Block on any pending expiry here; invalidate the dentry
            when expiration is done to trigger mount request with a new
@@ -318,7 +319,28 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
        spin_lock(&dentry->d_lock);
        dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
        spin_unlock(&dentry->d_lock);
-       return status;
+
+       /*
+        * The dentry that is passed in from lookup may not be the one
+        * we end up using, as mkdir can create a new one.  If this
+        * happens, and another process tries the lookup at the same time,
+        * it will set the PENDING flag on this new dentry, but add itself
+        * to our waitq.  Then, if after the lookup succeeds, the first
+        * process that requested the mount performs another lookup of the
+        * same directory, it will show up as still pending!  So, we need
+        * to redo the lookup here and clear pending on that dentry.
+        */
+       if (d_unhashed(dentry)) {
+               new = d_lookup(dentry->d_parent, &dentry->d_name);
+               if (new) {
+                       spin_lock(&new->d_lock);
+                       new->d_flags &= ~DCACHE_AUTOFS_PENDING;
+                       spin_unlock(&new->d_lock);
+                       dput(new);
+               }
+       }
+
+       return 0;
 }
 
 /* For autofs direct mounts the follow link triggers the mount */
@@ -533,9 +555,9 @@ static struct dentry *autofs4_lookup_unhashed(struct autofs_sb_info *sbi, struct
                        goto next;
 
                if (d_unhashed(dentry)) {
-                       struct autofs_info *ino = autofs4_dentry_ino(dentry);
                        struct inode *inode = dentry->d_inode;
 
+                       ino = autofs4_dentry_ino(dentry);
                        list_del_init(&ino->rehash);
                        dget(dentry);
                        /*
index 1fe28e4754c2c7b44fc3516d4f633bc1c8d4351c..75e5955c3f6d4fe5cf30b3ffcc3d6bc21a9fbffb 100644 (file)
@@ -171,7 +171,7 @@ static int autofs4_getpath(struct autofs_sb_info *sbi,
        for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
                len += tmp->d_name.len + 1;
 
-       if (--len > NAME_MAX) {
+       if (!len || --len > NAME_MAX) {
                spin_unlock(&dcache_lock);
                return 0;
        }
index 82123ff3e1dd744db3d766769fafd53d2e133a32..e8717de3bab34f16f551004048fd91a4c6029cb1 100644 (file)
@@ -489,9 +489,9 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
 {
        befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
        if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
-               char *p = nd_get_link(nd);
-               if (!IS_ERR(p))
-                       kfree(p);
+               char *link = nd_get_link(nd);
+               if (!IS_ERR(link))
+                       kfree(link);
        }
 }
 
index 71faf4d2390824ac2f0e001c32bb24c126f7f653..70f5d3a8eede460af9faf2f6e45407cc01f7d086 100644 (file)
@@ -42,7 +42,7 @@ static inline struct bfs_inode_info *BFS_I(struct inode *inode)
 
 
 #define printf(format, args...) \
-       printk(KERN_ERR "BFS-fs: %s(): " format, __FUNCTION__, ## args)
+       printk(KERN_ERR "BFS-fs: %s(): " format, __func__, ## args)
 
 /* inode.c */
 extern struct inode *bfs_iget(struct super_block *sb, unsigned long ino);
index a1bb2244cac772dd31dd8caff79b99fcead5ac3d..ba4cddb92f1ddf02330760e7b7614d7b261a5a64 100644 (file)
@@ -372,21 +372,17 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
                         
                flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data);
        } else {
-               static unsigned long error_time, error_time2;
                if ((ex.a_text & 0xfff || ex.a_data & 0xfff) &&
-                   (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ)
+                   (N_MAGIC(ex) != NMAGIC) && printk_ratelimit())
                {
                        printk(KERN_NOTICE "executable not page aligned\n");
-                       error_time2 = jiffies;
                }
 
-               if ((fd_offset & ~PAGE_MASK) != 0 &&
-                   (jiffies-error_time) > 5*HZ)
+               if ((fd_offset & ~PAGE_MASK) != 0 && printk_ratelimit())
                {
                        printk(KERN_WARNING 
                               "fd_offset is not page aligned. Please convert program: %s\n",
                               bprm->file->f_path.dentry->d_name.name);
-                       error_time = jiffies;
                }
 
                if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) {
@@ -495,15 +491,13 @@ static int load_aout_library(struct file *file)
        start_addr =  ex.a_entry & 0xfffff000;
 
        if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) {
-               static unsigned long error_time;
                loff_t pos = N_TXTOFF(ex);
 
-               if ((jiffies-error_time) > 5*HZ)
+               if (printk_ratelimit())
                {
                        printk(KERN_WARNING 
                               "N_TXTOFF is not page aligned. Please convert library: %s\n",
                               file->f_path.dentry->d_name.name);
-                       error_time = jiffies;
                }
                down_write(&current->mm->mmap_sem);
                do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss);
index 9924581df6f6ccbc35520237cbdf8c9c079559f8..b25707fee2ccad4598146224a5b5c5abcf3c2f99 100644 (file)
@@ -1255,26 +1255,23 @@ static int writenote(struct memelfnote *men, struct file *file,
 static void fill_elf_header(struct elfhdr *elf, int segs,
                            u16 machine, u32 flags, u8 osabi)
 {
+       memset(elf, 0, sizeof(*elf));
+
        memcpy(elf->e_ident, ELFMAG, SELFMAG);
        elf->e_ident[EI_CLASS] = ELF_CLASS;
        elf->e_ident[EI_DATA] = ELF_DATA;
        elf->e_ident[EI_VERSION] = EV_CURRENT;
        elf->e_ident[EI_OSABI] = ELF_OSABI;
-       memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
 
        elf->e_type = ET_CORE;
        elf->e_machine = machine;
        elf->e_version = EV_CURRENT;
-       elf->e_entry = 0;
        elf->e_phoff = sizeof(struct elfhdr);
-       elf->e_shoff = 0;
        elf->e_flags = flags;
        elf->e_ehsize = sizeof(struct elfhdr);
        elf->e_phentsize = sizeof(struct elf_phdr);
        elf->e_phnum = segs;
-       elf->e_shentsize = 0;
-       elf->e_shnum = 0;
-       elf->e_shstrndx = 0;
+
        return;
 }
 
@@ -1725,26 +1722,25 @@ static int fill_note_info(struct elfhdr *elf, int phdrs,
 
        info->thread_status_size = 0;
        if (signr) {
-               struct elf_thread_status *tmp;
+               struct elf_thread_status *ets;
                rcu_read_lock();
                do_each_thread(g, p)
                        if (current->mm == p->mm && current != p) {
-                               tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
-                               if (!tmp) {
+                               ets = kzalloc(sizeof(*ets), GFP_ATOMIC);
+                               if (!ets) {
                                        rcu_read_unlock();
                                        return 0;
                                }
-                               tmp->thread = p;
-                               list_add(&tmp->list, &info->thread_list);
+                               ets->thread = p;
+                               list_add(&ets->list, &info->thread_list);
                        }
                while_each_thread(g, p);
                rcu_read_unlock();
                list_for_each(t, &info->thread_list) {
-                       struct elf_thread_status *tmp;
                        int sz;
 
-                       tmp = list_entry(t, struct elf_thread_status, list);
-                       sz = elf_dump_thread_status(signr, tmp);
+                       ets = list_entry(t, struct elf_thread_status, list);
+                       sz = elf_dump_thread_status(signr, ets);
                        info->thread_status_size += sz;
                }
        }
@@ -2000,10 +1996,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
 
                for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
                        struct page *page;
-                       struct vm_area_struct *vma;
+                       struct vm_area_struct *tmp_vma;
 
                        if (get_user_pages(current, current->mm, addr, 1, 0, 1,
-                                               &page, &vma) <= 0) {
+                                               &page, &tmp_vma) <= 0) {
                                DUMP_SEEK(PAGE_SIZE);
                        } else {
                                if (page == ZERO_PAGE(0)) {
@@ -2013,7 +2009,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
                                        }
                                } else {
                                        void *kaddr;
-                                       flush_cache_page(vma, addr,
+                                       flush_cache_page(tmp_vma, addr,
                                                         page_to_pfn(page));
                                        kaddr = kmap(page);
                                        if ((size += PAGE_SIZE) > limit ||
index 32649f2a16544502c8affe2d147ac89d1e02ae50..ddd35d87339143207f6074a49752dcc40a4b5003 100644 (file)
@@ -136,8 +136,8 @@ static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params,
 
        retval = kernel_read(file, params->hdr.e_phoff,
                             (char *) params->phdrs, size);
-       if (retval < 0)
-               return retval;
+       if (unlikely(retval != size))
+               return retval < 0 ? retval : -ENOEXEC;
 
        /* determine stack size for this binary */
        phdr = params->phdrs;
@@ -218,8 +218,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
                                             phdr->p_offset,
                                             interpreter_name,
                                             phdr->p_filesz);
-                       if (retval < 0)
+                       if (unlikely(retval != phdr->p_filesz)) {
+                               if (retval >= 0)
+                                       retval = -ENOEXEC;
                                goto error;
+                       }
 
                        retval = -ENOENT;
                        if (interpreter_name[phdr->p_filesz - 1] != '\0')
@@ -245,8 +248,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
 
                        retval = kernel_read(interpreter, 0, bprm->buf,
                                             BINPRM_BUF_SIZE);
-                       if (retval < 0)
+                       if (unlikely(retval != BINPRM_BUF_SIZE)) {
+                               if (retval >= 0)
+                                       retval = -ENOEXEC;
                                goto error;
+                       }
 
                        interp_params.hdr = *((struct elfhdr *) bprm->buf);
                        break;
index f95ae9789c913d81d0df5d17250bced9c5479855..f9c88d0c8cedeabcfdca1a5f81a51e1e293a59ea 100644 (file)
@@ -43,7 +43,7 @@ static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs)
                        return -ENOEXEC;
        }
 
-       bprm->sh_bang++;        /* Well, the bang-shell is implicit... */
+       bprm->sh_bang = 1;      /* Well, the bang-shell is implicit... */
        allow_write_access(bprm->file);
        fput(bprm->file);
        bprm->file = NULL;
index 0498b181dd525cc4855b3284353abec948633e7e..3b40d45a3a16501ca0ea28563b8049c6e3718698 100644 (file)
@@ -531,7 +531,8 @@ static int load_flat_file(struct linux_binprm * bprm,
                DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n");
 
                down_write(&current->mm->mmap_sem);
-               textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_PRIVATE, 0);
+               textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC,
+                                 MAP_PRIVATE|MAP_EXECUTABLE, 0);
                up_write(&current->mm->mmap_sem);
                if (!textpos  || textpos >= (unsigned long) -4096) {
                        if (!textpos)
@@ -932,14 +933,8 @@ static int __init init_flat_binfmt(void)
        return register_binfmt(&flat_format);
 }
 
-static void __exit exit_flat_binfmt(void)
-{
-       unregister_binfmt(&flat_format);
-}
-
 /****************************************************************************/
 
 core_initcall(init_flat_binfmt);
-module_exit(exit_flat_binfmt);
 
 /****************************************************************************/
index dbf0ac0523de14d49edcc58b43ab7c62a407e406..7191306367c5d11912ceed01c5fce4925b528eba 100644 (file)
@@ -115,6 +115,12 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        if (!enabled)
                goto _ret;
 
+       retval = -ENOEXEC;
+       if (bprm->misc_bang)
+               goto _ret;
+
+       bprm->misc_bang = 1;
+
        /* to keep locking time low, we copy the interpreter string */
        read_lock(&entries_lock);
        fmt = check_file(bprm);
index ab33939b12a7efb8d754fcc80b89bfc88651a33b..9e3963f7ebf19f85a1742ff034bbb518d51a6aff 100644 (file)
@@ -29,7 +29,7 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs)
         * Sorta complicated, but hopefully it will work.  -TYT
         */
 
-       bprm->sh_bang++;
+       bprm->sh_bang = 1;
        allow_write_access(bprm->file);
        fput(bprm->file);
        bprm->file = NULL;
index 6e0b6f66df0315a450f467c1025c8b1042845ced..799f86deff243213256103ee76ff7560098fe965 100644 (file)
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -937,6 +937,95 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
        return ERR_PTR(-EINVAL);
 }
 
+static void bio_copy_kern_endio(struct bio *bio, int err)
+{
+       struct bio_vec *bvec;
+       const int read = bio_data_dir(bio) == READ;
+       char *p = bio->bi_private;
+       int i;
+
+       __bio_for_each_segment(bvec, bio, i, 0) {
+               char *addr = page_address(bvec->bv_page);
+
+               if (read && !err)
+                       memcpy(p, addr, bvec->bv_len);
+
+               __free_page(bvec->bv_page);
+               p += bvec->bv_len;
+       }
+
+       bio_put(bio);
+}
+
+/**
+ *     bio_copy_kern   -       copy kernel address into bio
+ *     @q: the struct request_queue for the bio
+ *     @data: pointer to buffer to copy
+ *     @len: length in bytes
+ *     @gfp_mask: allocation flags for bio and page allocation
+ *
+ *     copy the kernel address into a bio suitable for io to a block
+ *     device. Returns an error pointer in case of error.
+ */
+struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
+                         gfp_t gfp_mask, int reading)
+{
+       unsigned long kaddr = (unsigned long)data;
+       unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       unsigned long start = kaddr >> PAGE_SHIFT;
+       const int nr_pages = end - start;
+       struct bio *bio;
+       struct bio_vec *bvec;
+       int i, ret;
+
+       bio = bio_alloc(gfp_mask, nr_pages);
+       if (!bio)
+               return ERR_PTR(-ENOMEM);
+
+       while (len) {
+               struct page *page;
+               unsigned int bytes = PAGE_SIZE;
+
+               if (bytes > len)
+                       bytes = len;
+
+               page = alloc_page(q->bounce_gfp | gfp_mask);
+               if (!page) {
+                       ret = -ENOMEM;
+                       goto cleanup;
+               }
+
+               if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) {
+                       ret = -EINVAL;
+                       goto cleanup;
+               }
+
+               len -= bytes;
+       }
+
+       if (!reading) {
+               void *p = data;
+
+               bio_for_each_segment(bvec, bio, i) {
+                       char *addr = page_address(bvec->bv_page);
+
+                       memcpy(addr, p, bvec->bv_len);
+                       p += bvec->bv_len;
+               }
+       }
+
+       bio->bi_private = data;
+       bio->bi_end_io = bio_copy_kern_endio;
+       return bio;
+cleanup:
+       bio_for_each_segment(bvec, bio, i)
+               __free_page(bvec->bv_page);
+
+       bio_put(bio);
+
+       return ERR_PTR(ret);
+}
+
 /*
  * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
  * for performing direct-IO in BIOs.
@@ -1273,6 +1362,7 @@ EXPORT_SYMBOL(bio_get_nr_vecs);
 EXPORT_SYMBOL(bio_map_user);
 EXPORT_SYMBOL(bio_unmap_user);
 EXPORT_SYMBOL(bio_map_kern);
+EXPORT_SYMBOL(bio_copy_kern);
 EXPORT_SYMBOL(bio_pair_release);
 EXPORT_SYMBOL(bio_split);
 EXPORT_SYMBOL(bio_split_pool);
index 39ff14403d137be85dcd11161584abeabd9d7577..a073f3f4f013c5c349afede7edb715672bfd4928 100644 (file)
@@ -360,16 +360,19 @@ void invalidate_bdev(struct block_device *bdev)
  */
 static void free_more_memory(void)
 {
-       struct zone **zones;
-       pg_data_t *pgdat;
+       struct zone *zone;
+       int nid;
 
        wakeup_pdflush(1024);
        yield();
 
-       for_each_online_pgdat(pgdat) {
-               zones = pgdat->node_zonelists[gfp_zone(GFP_NOFS)].zones;
-               if (*zones)
-                       try_to_free_pages(zones, 0, GFP_NOFS);
+       for_each_online_node(nid) {
+               (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
+                                               gfp_zone(GFP_NOFS), NULL,
+                                               &zone);
+               if (zone)
+                       try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
+                                               GFP_NOFS);
        }
 }
 
@@ -1098,7 +1101,7 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
 
                printk(KERN_ERR "%s: requested out-of-range block %llu for "
                        "device %s\n",
-                       __FUNCTION__, (unsigned long long)block,
+                       __func__, (unsigned long long)block,
                        bdevname(bdev, b));
                return -EIO;
        }
@@ -2208,8 +2211,8 @@ out:
        return err;
 }
 
-int cont_expand_zero(struct file *file, struct address_space *mapping,
-                       loff_t pos, loff_t *bytes)
+static int cont_expand_zero(struct file *file, struct address_space *mapping,
+                           loff_t pos, loff_t *bytes)
 {
        struct inode *inode = mapping->host;
        unsigned blocksize = 1 << inode->i_blkbits;
@@ -2243,6 +2246,8 @@ int cont_expand_zero(struct file *file, struct address_space *mapping,
                        goto out;
                BUG_ON(err != len);
                err = 0;
+
+               balance_dirty_pages_ratelimited(mapping);
        }
 
        /* page covers the boundary, find the boundary offset */
@@ -2323,23 +2328,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to)
        return 0;
 }
 
-int generic_commit_write(struct file *file, struct page *page,
-               unsigned from, unsigned to)
-{
-       struct inode *inode = page->mapping->host;
-       loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-       __block_commit_write(inode,page,from,to);
-       /*
-        * No need to use i_size_read() here, the i_size
-        * cannot change under us because we hold i_mutex.
-        */
-       if (pos > inode->i_size) {
-               i_size_write(inode, pos);
-               mark_inode_dirty(inode);
-       }
-       return 0;
-}
-
 /*
  * block_page_mkwrite() is not allowed to change the file size as it gets
  * called from a page fault handler when a page is first dirtied. Hence we must
@@ -3180,8 +3168,7 @@ static void recalc_bh_state(void)
        
 struct buffer_head *alloc_buffer_head(gfp_t gfp_flags)
 {
-       struct buffer_head *ret = kmem_cache_alloc(bh_cachep,
-                               set_migrateflags(gfp_flags, __GFP_RECLAIMABLE));
+       struct buffer_head *ret = kmem_cache_alloc(bh_cachep, gfp_flags);
        if (ret) {
                INIT_LIST_HEAD(&ret->b_assoc_buffers);
                get_cpu_var(bh_accounting).nr++;
@@ -3311,7 +3298,6 @@ EXPORT_SYMBOL(end_buffer_write_sync);
 EXPORT_SYMBOL(file_fsync);
 EXPORT_SYMBOL(fsync_bdev);
 EXPORT_SYMBOL(generic_block_bmap);
-EXPORT_SYMBOL(generic_commit_write);
 EXPORT_SYMBOL(generic_cont_expand_simple);
 EXPORT_SYMBOL(init_buffer);
 EXPORT_SYMBOL(invalidate_bdev);
index 038674aa88a72c09a88388ee7cf1b54839675052..68e510b88457e85d8831aa0b48a0479c137d3cb3 100644 (file)
@@ -55,7 +55,6 @@ static struct char_device_struct {
        unsigned int baseminor;
        int minorct;
        char name[64];
-       struct file_operations *fops;
        struct cdev *cdev;              /* will die */
 } *chrdevs[CHRDEV_MAJOR_HASH_SIZE];
 
index 0228ed06069e95c88b3b4a032b5b423f474ca190..cc950f69e51ea5b2e5a13135258151e4560c9b2f 100644 (file)
@@ -468,7 +468,7 @@ cifs_proc_init(void)
 {
        struct proc_dir_entry *pde;
 
-       proc_fs_cifs = proc_mkdir("cifs", proc_root_fs);
+       proc_fs_cifs = proc_mkdir("fs/cifs", NULL);
        if (proc_fs_cifs == NULL)
                return;
 
@@ -559,7 +559,7 @@ cifs_proc_clean(void)
        remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
        remove_proc_entry("Experimental", proc_fs_cifs);
        remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
-       remove_proc_entry("cifs", proc_root_fs);
+       remove_proc_entry("fs/cifs", NULL);
 }
 
 static int
index 95a54253c0479989ec20f047d4bd821ecd5adf30..e1c854890f9400b9467419ce9e625084391d9853 100644 (file)
@@ -134,7 +134,7 @@ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr)
         unsigned int valid;
 
         /* clean out */        
-        vattr->va_mode = (umode_t) -1;
+       vattr->va_mode = -1;
         vattr->va_uid = (vuid_t) -1; 
         vattr->va_gid = (vgid_t) -1;
         vattr->va_size = (off_t) -1;
index f89ff083079ba5b8fd1595f233c3c9b949d472de..3d2580e00a3e272e0cfe04103c98466e2daa15e7 100644 (file)
@@ -345,7 +345,7 @@ static int coda_symlink(struct inode *dir_inode, struct dentry *de,
 }
 
 /* destruction routines: unlink, rmdir */
-int coda_unlink(struct inode *dir, struct dentry *de)
+static int coda_unlink(struct inode *dir, struct dentry *de)
 {
         int error;
        const char *name = de->d_name.name;
@@ -365,7 +365,7 @@ int coda_unlink(struct inode *dir, struct dentry *de)
        return 0;
 }
 
-int coda_rmdir(struct inode *dir, struct dentry *de)
+static int coda_rmdir(struct inode *dir, struct dentry *de)
 {
        const char *name = de->d_name.name;
        int len = de->d_name.len;
@@ -424,7 +424,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
 
 
 /* file operations for directories */
-int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
+static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir)
 {
        struct coda_file_info *cfi;
        struct file *host_file;
index 2ce4456aad309eebc3df3c200178561046d87093..139dc93c092d697b10d540a12020bd92e2e84b41 100644 (file)
@@ -1634,7 +1634,7 @@ sticky:
        return ret;
 }
 
-#ifdef TIF_RESTORE_SIGMASK
+#ifdef HAVE_SET_RESTORE_SIGMASK
 asmlinkage long compat_sys_pselect7(int n, compat_ulong_t __user *inp,
        compat_ulong_t __user *outp, compat_ulong_t __user *exp,
        struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
@@ -1720,7 +1720,7 @@ sticky:
                if (sigmask) {
                        memcpy(&current->saved_sigmask, &sigsaved,
                                        sizeof(sigsaved));
-                       set_thread_flag(TIF_RESTORE_SIGMASK);
+                       set_restore_sigmask();
                }
        } else if (sigmask)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -1791,7 +1791,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
                if (sigmask) {
                        memcpy(&current->saved_sigmask, &sigsaved,
                                sizeof(sigsaved));
-                       set_thread_flag(TIF_RESTORE_SIGMASK);
+                       set_restore_sigmask();
                }
                ret = -ERESTARTNOHAND;
        } else if (sigmask)
@@ -1825,7 +1825,7 @@ sticky:
 
        return ret;
 }
-#endif /* TIF_RESTORE_SIGMASK */
+#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #if defined(CONFIG_NFSD) || defined(CONFIG_NFSD_MODULE)
 /* Stuff for NFS server syscalls... */
@@ -2080,7 +2080,7 @@ long asmlinkage compat_sys_nfsservctl(int cmd, void *notused, void *notused2)
 
 #ifdef CONFIG_EPOLL
 
-#ifdef TIF_RESTORE_SIGMASK
+#ifdef HAVE_SET_RESTORE_SIGMASK
 asmlinkage long compat_sys_epoll_pwait(int epfd,
                        struct compat_epoll_event __user *events,
                        int maxevents, int timeout,
@@ -2117,14 +2117,14 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
                if (err == -EINTR) {
                        memcpy(&current->saved_sigmask, &sigsaved,
                               sizeof(sigsaved));
-                       set_thread_flag(TIF_RESTORE_SIGMASK);
+                       set_restore_sigmask();
                } else
                        sigprocmask(SIG_SETMASK, &sigsaved, NULL);
        }
 
        return err;
 }
-#endif /* TIF_RESTORE_SIGMASK */
+#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 #endif /* CONFIG_EPOLL */
 
index c6e72aebd16b43e7b16339cb69869472fccbdd24..97dba0d92348d27692c4f9c10bc6e99c25333cd8 100644 (file)
@@ -1046,14 +1046,14 @@ static int vt_check(struct file *file)
        struct inode *inode = file->f_path.dentry->d_inode;
        struct vc_data *vc;
        
-       if (file->f_op->ioctl != tty_ioctl)
+       if (file->f_op->unlocked_ioctl != tty_ioctl)
                return -EINVAL;
                        
        tty = (struct tty_struct *)file->private_data;
        if (tty_paranoia_check(tty, inode, "tty_ioctl"))
                return -EINVAL;
                                                        
-       if (tty->driver->ioctl != vt_ioctl)
+       if (tty->ops->ioctl != vt_ioctl)
                return -EINVAL;
 
        vc = (struct vc_data *)tty->driver_data;
index 397cb503a180da62a9cbcdc2f0aeb86b81851cc6..2b6cb23dd14e99a4165af595a1f6670e002d8d3b 100644 (file)
@@ -115,7 +115,7 @@ configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *pp
                        goto out;
        }
        pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
-                __FUNCTION__, count, *ppos, buffer->page);
+                __func__, count, *ppos, buffer->page);
        retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
                                         buffer->count);
 out:
index 4c1ebff778eef936479b2869ed34c620694479fb..b9a1d810346d3883c8a908079ae1f4e8be6fe217 100644 (file)
@@ -47,7 +47,7 @@ static const struct address_space_operations configfs_aops = {
 
 static struct backing_dev_info configfs_backing_dev_info = {
        .ra_pages       = 0,    /* No readahead */
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
 static const struct inode_operations configfs_inode_operations ={
index de3b31d0a37d88a44185cb6be1efeefed6bc1cab..8421cea7d8c7b1460b0cfdf90e831a3012ddf803 100644 (file)
@@ -92,7 +92,7 @@ static int configfs_fill_super(struct super_block *sb, void *data, int silent)
 
        root = d_alloc_root(inode);
        if (!root) {
-               pr_debug("%s: could not get root dentry!\n",__FUNCTION__);
+               pr_debug("%s: could not get root dentry!\n",__func__);
                iput(inode);
                return -ENOMEM;
        }
index 78929ea84ff2da08688619b2b83d74700ca0dc25..2a731ef5f305c3bdd803937a3d893997a2ca6361 100644 (file)
@@ -210,13 +210,13 @@ static int configfs_get_target_path(struct config_item * item, struct config_ite
        if (size > PATH_MAX)
                return -ENAMETOOLONG;
 
-       pr_debug("%s: depth = %d, size = %d\n", __FUNCTION__, depth, size);
+       pr_debug("%s: depth = %d, size = %d\n", __func__, depth, size);
 
        for (s = path; depth--; s += 3)
                strcpy(s,"../");
 
        fill_item_path(target, path, size);
-       pr_debug("%s: path = '%s'\n", __FUNCTION__, path);
+       pr_debug("%s: path = '%s'\n", __func__, path);
 
        return 0;
 }
index fddffe4851f550cd78315189d6466bc0a24ad126..159a5efd6a8a8bcf43ecf0b637023bb3d11e24d4 100644 (file)
@@ -9,7 +9,7 @@
  *     2 as published by the Free Software Foundation.
  *
  *  debugfs is for people to use instead of /proc or /sys.
- *  See Documentation/DocBook/kernel-api for more details.
+ *  See Documentation/DocBook/filesystems for more details.
  *
  */
 
index f120e1207874715b052788ea9105458da9348145..285b64a8b06e125b1aee42a8317cfd0873d69f20 100644 (file)
@@ -17,6 +17,8 @@
 #include <linux/namei.h>
 #include <linux/mount.h>
 #include <linux/tty.h>
+#include <linux/mutex.h>
+#include <linux/idr.h>
 #include <linux/devpts_fs.h>
 #include <linux/parser.h>
 #include <linux/fsnotify.h>
 
 #define DEVPTS_DEFAULT_MODE 0600
 
+extern int pty_limit;                  /* Config limit on Unix98 ptys */
+static DEFINE_IDR(allocated_ptys);
+static DEFINE_MUTEX(allocated_ptys_lock);
+
 static struct vfsmount *devpts_mnt;
 static struct dentry *devpts_root;
 
@@ -171,9 +177,44 @@ static struct dentry *get_node(int num)
        return lookup_one_len(s, root, sprintf(s, "%d", num));
 }
 
+int devpts_new_index(void)
+{
+       int index;
+       int idr_ret;
+
+retry:
+       if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) {
+               return -ENOMEM;
+       }
+
+       mutex_lock(&allocated_ptys_lock);
+       idr_ret = idr_get_new(&allocated_ptys, NULL, &index);
+       if (idr_ret < 0) {
+               mutex_unlock(&allocated_ptys_lock);
+               if (idr_ret == -EAGAIN)
+                       goto retry;
+               return -EIO;
+       }
+
+       if (index >= pty_limit) {
+               idr_remove(&allocated_ptys, index);
+               mutex_unlock(&allocated_ptys_lock);
+               return -EIO;
+       }
+       mutex_unlock(&allocated_ptys_lock);
+       return index;
+}
+
+void devpts_kill_index(int idx)
+{
+       mutex_lock(&allocated_ptys_lock);
+       idr_remove(&allocated_ptys, idx);
+       mutex_unlock(&allocated_ptys_lock);
+}
+
 int devpts_pty_new(struct tty_struct *tty)
 {
-       int number = tty->index;
+       int number = tty->index; /* tty layer puts index from devpts_new_index() in here */
        struct tty_driver *driver = tty->driver;
        dev_t device = MKDEV(driver->major, driver->minor_start+number);
        struct dentry *dentry;
index b64e55e0515d058d6884e9408b9e7fb0580e443d..499e16759e96fea13329dc05d5408194fa96e4e9 100644 (file)
@@ -200,7 +200,7 @@ int __init dlm_lockspace_init(void)
 
        dlm_kset = kset_create_and_add("dlm", NULL, kernel_kobj);
        if (!dlm_kset) {
-               printk(KERN_WARNING "%s: can not create kset\n", __FUNCTION__);
+               printk(KERN_WARNING "%s: can not create kset\n", __func__);
                return -ENOMEM;
        }
        return 0;
index 28d01ed66de0198130407a9d36411f77946cd1c7..eaecc4cfe5402584c6e3b9510628170c3be463bf 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
+#include <linux/file.h>
 
 int dir_notify_enable __read_mostly = 1;
 
@@ -66,6 +67,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
        struct dnotify_struct **prev;
        struct inode *inode;
        fl_owner_t id = current->files;
+       struct file *f;
        int error = 0;
 
        if ((arg & ~DN_MULTISHOT) == 0) {
@@ -92,6 +94,15 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
                prev = &odn->dn_next;
        }
 
+       rcu_read_lock();
+       f = fcheck(fd);
+       rcu_read_unlock();
+       /* we'd lost the race with close(), sod off silently */
+       /* note that inode->i_lock prevents reordering problems
+        * between accesses to descriptor table and ->i_dnotify */
+       if (f != filp)
+               goto out_free;
+
        error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
        if (error)
                goto out_free;
index 41b9dbd68b0e88e4a466685e1150e262e5d03c59..dfba1623cccb27a8d79a5098ace72b729ae45669 100644 (file)
@@ -289,7 +289,15 @@ static void wait_on_dquot(struct dquot *dquot)
        mutex_unlock(&dquot->dq_lock);
 }
 
-#define mark_dquot_dirty(dquot) ((dquot)->dq_sb->dq_op->mark_dirty(dquot))
+static inline int dquot_dirty(struct dquot *dquot)
+{
+       return test_bit(DQ_MOD_B, &dquot->dq_flags);
+}
+
+static inline int mark_dquot_dirty(struct dquot *dquot)
+{
+       return dquot->dq_sb->dq_op->mark_dirty(dquot);
+}
 
 int dquot_mark_dquot_dirty(struct dquot *dquot)
 {
@@ -1441,31 +1449,43 @@ static inline void set_enable_flags(struct quota_info *dqopt, int type)
        switch (type) {
                case USRQUOTA:
                        dqopt->flags |= DQUOT_USR_ENABLED;
+                       dqopt->flags &= ~DQUOT_USR_SUSPENDED;
                        break;
                case GRPQUOTA:
                        dqopt->flags |= DQUOT_GRP_ENABLED;
+                       dqopt->flags &= ~DQUOT_GRP_SUSPENDED;
                        break;
        }
 }
 
-static inline void reset_enable_flags(struct quota_info *dqopt, int type)
+static inline void reset_enable_flags(struct quota_info *dqopt, int type,
+                                     int remount)
 {
        switch (type) {
                case USRQUOTA:
                        dqopt->flags &= ~DQUOT_USR_ENABLED;
+                       if (remount)
+                               dqopt->flags |= DQUOT_USR_SUSPENDED;
+                       else
+                               dqopt->flags &= ~DQUOT_USR_SUSPENDED;
                        break;
                case GRPQUOTA:
                        dqopt->flags &= ~DQUOT_GRP_ENABLED;
+                       if (remount)
+                               dqopt->flags |= DQUOT_GRP_SUSPENDED;
+                       else
+                               dqopt->flags &= ~DQUOT_GRP_SUSPENDED;
                        break;
        }
 }
 
+
 /*
  * Turn quota off on a device. type == -1 ==> quotaoff for all types (umount)
  */
-int vfs_quota_off(struct super_block *sb, int type)
+int vfs_quota_off(struct super_block *sb, int type, int remount)
 {
-       int cnt;
+       int cnt, ret = 0;
        struct quota_info *dqopt = sb_dqopt(sb);
        struct inode *toputinode[MAXQUOTAS];
 
@@ -1475,9 +1495,17 @@ int vfs_quota_off(struct super_block *sb, int type)
                toputinode[cnt] = NULL;
                if (type != -1 && cnt != type)
                        continue;
+               /* If we keep inodes of quota files after remount and quotaoff
+                * is called, drop kept inodes. */
+               if (!remount && sb_has_quota_suspended(sb, cnt)) {
+                       iput(dqopt->files[cnt]);
+                       dqopt->files[cnt] = NULL;
+                       reset_enable_flags(dqopt, cnt, 0);
+                       continue;
+               }
                if (!sb_has_quota_enabled(sb, cnt))
                        continue;
-               reset_enable_flags(dqopt, cnt);
+               reset_enable_flags(dqopt, cnt, remount);
 
                /* Note: these are blocking operations */
                drop_dquot_ref(sb, cnt);
@@ -1493,7 +1521,8 @@ int vfs_quota_off(struct super_block *sb, int type)
                put_quota_format(dqopt->info[cnt].dqi_format);
 
                toputinode[cnt] = dqopt->files[cnt];
-               dqopt->files[cnt] = NULL;
+               if (!remount)
+                       dqopt->files[cnt] = NULL;
                dqopt->info[cnt].dqi_flags = 0;
                dqopt->info[cnt].dqi_igrace = 0;
                dqopt->info[cnt].dqi_bgrace = 0;
@@ -1523,12 +1552,19 @@ int vfs_quota_off(struct super_block *sb, int type)
                                mutex_unlock(&toputinode[cnt]->i_mutex);
                                mark_inode_dirty(toputinode[cnt]);
                        }
-                       iput(toputinode[cnt]);
                        mutex_unlock(&dqopt->dqonoff_mutex);
+                       /* On remount RO, we keep the inode pointer so that we
+                        * can reenable quota on the subsequent remount RW.
+                        * But we have better not keep inode pointer when there
+                        * is pending delete on the quota file... */
+                       if (!remount)
+                               iput(toputinode[cnt]);
+                       else if (!toputinode[cnt]->i_nlink)
+                               ret = -EBUSY;
                }
        if (sb->s_bdev)
                invalidate_bdev(sb->s_bdev);
-       return 0;
+       return ret;
 }
 
 /*
@@ -1566,7 +1602,8 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
        invalidate_bdev(sb->s_bdev);
        mutex_lock(&inode->i_mutex);
        mutex_lock(&dqopt->dqonoff_mutex);
-       if (sb_has_quota_enabled(sb, type)) {
+       if (sb_has_quota_enabled(sb, type) ||
+                       sb_has_quota_suspended(sb, type)) {
                error = -EBUSY;
                goto out_lock;
        }
@@ -1589,6 +1626,7 @@ static int vfs_quota_on_inode(struct inode *inode, int type, int format_id)
 
        dqopt->ops[type] = fmt->qf_ops;
        dqopt->info[type].dqi_format = fmt;
+       dqopt->info[type].dqi_fmt_id = format_id;
        INIT_LIST_HEAD(&dqopt->info[type].dqi_dirty_list);
        mutex_lock(&dqopt->dqio_mutex);
        if ((error = dqopt->ops[type]->read_file_info(sb, type)) < 0) {
@@ -1624,12 +1662,41 @@ out_fmt:
        return error; 
 }
 
+/* Reenable quotas on remount RW */
+static int vfs_quota_on_remount(struct super_block *sb, int type)
+{
+       struct quota_info *dqopt = sb_dqopt(sb);
+       struct inode *inode;
+       int ret;
+
+       mutex_lock(&dqopt->dqonoff_mutex);
+       if (!sb_has_quota_suspended(sb, type)) {
+               mutex_unlock(&dqopt->dqonoff_mutex);
+               return 0;
+       }
+       BUG_ON(sb_has_quota_enabled(sb, type));
+
+       inode = dqopt->files[type];
+       dqopt->files[type] = NULL;
+       reset_enable_flags(dqopt, type, 0);
+       mutex_unlock(&dqopt->dqonoff_mutex);
+
+       ret = vfs_quota_on_inode(inode, type, dqopt->info[type].dqi_fmt_id);
+       iput(inode);
+
+       return ret;
+}
+
 /* Actual function called from quotactl() */
-int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path)
+int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path,
+                int remount)
 {
        struct nameidata nd;
        int error;
 
+       if (remount)
+               return vfs_quota_on_remount(sb, type);
+
        error = path_lookup(path, LOOKUP_FOLLOW, &nd);
        if (error < 0)
                return error;
@@ -1709,10 +1776,19 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
 }
 
 /* Generic routine for setting common part of quota structure */
-static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
+static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
 {
        struct mem_dqblk *dm = &dquot->dq_dqb;
        int check_blim = 0, check_ilim = 0;
+       struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_type];
+
+       if ((di->dqb_valid & QIF_BLIMITS &&
+            (di->dqb_bhardlimit > dqi->dqi_maxblimit ||
+             di->dqb_bsoftlimit > dqi->dqi_maxblimit)) ||
+           (di->dqb_valid & QIF_ILIMITS &&
+            (di->dqb_ihardlimit > dqi->dqi_maxilimit ||
+             di->dqb_isoftlimit > dqi->dqi_maxilimit)))
+               return -ERANGE;
 
        spin_lock(&dq_data_lock);
        if (di->dqb_valid & QIF_SPACE) {
@@ -1744,7 +1820,7 @@ static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
                        clear_bit(DQ_BLKS_B, &dquot->dq_flags);
                }
                else if (!(di->dqb_valid & QIF_BTIME))  /* Set grace only if user hasn't provided his own... */
-                       dm->dqb_btime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_bgrace;
+                       dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
        }
        if (check_ilim) {
                if (!dm->dqb_isoftlimit || dm->dqb_curinodes < dm->dqb_isoftlimit) {
@@ -1752,7 +1828,7 @@ static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
                        clear_bit(DQ_INODES_B, &dquot->dq_flags);
                }
                else if (!(di->dqb_valid & QIF_ITIME))  /* Set grace only if user hasn't provided his own... */
-                       dm->dqb_itime = get_seconds() + sb_dqopt(dquot->dq_sb)->info[dquot->dq_type].dqi_igrace;
+                       dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
        }
        if (dm->dqb_bhardlimit || dm->dqb_bsoftlimit || dm->dqb_ihardlimit || dm->dqb_isoftlimit)
                clear_bit(DQ_FAKE_B, &dquot->dq_flags);
@@ -1760,21 +1836,24 @@ static void do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
                set_bit(DQ_FAKE_B, &dquot->dq_flags);
        spin_unlock(&dq_data_lock);
        mark_dquot_dirty(dquot);
+
+       return 0;
 }
 
 int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *di)
 {
        struct dquot *dquot;
+       int rc;
 
        mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
        if (!(dquot = dqget(sb, id, type))) {
                mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
                return -ESRCH;
        }
-       do_set_dqblk(dquot, di);
+       rc = do_set_dqblk(dquot, di);
        dqput(dquot);
        mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
-       return 0;
+       return rc;
 }
 
 /* Generic routine for getting common part of quota file information */
index 59375efcf39d6769d94a6d77ef33a06054e4f4b9..3e5637fc377962e1aaca5665f3e03d89366673e3 100644 (file)
@@ -14,18 +14,26 @@ int sysctl_drop_caches;
 
 static void drop_pagecache_sb(struct super_block *sb)
 {
-       struct inode *inode;
+       struct inode *inode, *toput_inode = NULL;
 
        spin_lock(&inode_lock);
        list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
                if (inode->i_state & (I_FREEING|I_WILL_FREE))
                        continue;
+               if (inode->i_mapping->nrpages == 0)
+                       continue;
+               __iget(inode);
+               spin_unlock(&inode_lock);
                __invalidate_mapping_pages(inode->i_mapping, 0, -1, true);
+               iput(toput_inode);
+               toput_inode = inode;
+               spin_lock(&inode_lock);
        }
        spin_unlock(&inode_lock);
+       iput(toput_inode);
 }
 
-void drop_pagecache(void)
+static void drop_pagecache(void)
 {
        struct super_block *sb;
 
@@ -45,7 +53,7 @@ restart:
        spin_unlock(&sb_lock);
 }
 
-void drop_slab(void)
+static void drop_slab(void)
 {
        int nr_objects;
 
index 768857015516f54e9109fab114929cb25389cd32..1e34a7fd4884391b5bec9a3de93bc4dfb86d7d2b 100644 (file)
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o
 
-ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o debug.o
+ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o miscdev.o debug.o
index a066e109ad9c05a7cbc0670f92045a3475e9aac1..cd62d75b2cc030d27b275423eaba578b3c93eceb 100644 (file)
@@ -119,21 +119,21 @@ static int ecryptfs_calculate_md5(char *dst,
        if (rc) {
                printk(KERN_ERR
                       "%s: Error initializing crypto hash; rc = [%d]\n",
-                      __FUNCTION__, rc);
+                      __func__, rc);
                goto out;
        }
        rc = crypto_hash_update(&desc, &sg, len);
        if (rc) {
                printk(KERN_ERR
                       "%s: Error updating crypto hash; rc = [%d]\n",
-                      __FUNCTION__, rc);
+                      __func__, rc);
                goto out;
        }
        rc = crypto_hash_final(&desc, dst);
        if (rc) {
                printk(KERN_ERR
                       "%s: Error finalizing crypto hash; rc = [%d]\n",
-                      __FUNCTION__, rc);
+                      __func__, rc);
                goto out;
        }
 out:
@@ -437,7 +437,7 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
        if (rc < 0) {
                printk(KERN_ERR "%s: Error attempting to encrypt page with "
                       "page->index = [%ld], extent_offset = [%ld]; "
-                      "rc = [%d]\n", __FUNCTION__, page->index, extent_offset,
+                      "rc = [%d]\n", __func__, page->index, extent_offset,
                       rc);
                goto out;
        }
@@ -487,7 +487,7 @@ int ecryptfs_encrypt_page(struct page *page)
                                                       0, PAGE_CACHE_SIZE);
                if (rc)
                        printk(KERN_ERR "%s: Error attempting to copy "
-                              "page at index [%ld]\n", __FUNCTION__,
+                              "page at index [%ld]\n", __func__,
                               page->index);
                goto out;
        }
@@ -508,7 +508,7 @@ int ecryptfs_encrypt_page(struct page *page)
                                             extent_offset);
                if (rc) {
                        printk(KERN_ERR "%s: Error encrypting extent; "
-                              "rc = [%d]\n", __FUNCTION__, rc);
+                              "rc = [%d]\n", __func__, rc);
                        goto out;
                }
                ecryptfs_lower_offset_for_extent(
@@ -569,7 +569,7 @@ static int ecryptfs_decrypt_extent(struct page *page,
        if (rc < 0) {
                printk(KERN_ERR "%s: Error attempting to decrypt to page with "
                       "page->index = [%ld], extent_offset = [%ld]; "
-                      "rc = [%d]\n", __FUNCTION__, page->index, extent_offset,
+                      "rc = [%d]\n", __func__, page->index, extent_offset,
                       rc);
                goto out;
        }
@@ -622,7 +622,7 @@ int ecryptfs_decrypt_page(struct page *page)
                                                      ecryptfs_inode);
                if (rc)
                        printk(KERN_ERR "%s: Error attempting to copy "
-                              "page at index [%ld]\n", __FUNCTION__,
+                              "page at index [%ld]\n", __func__,
                               page->index);
                goto out;
        }
@@ -656,7 +656,7 @@ int ecryptfs_decrypt_page(struct page *page)
                                             extent_offset);
                if (rc) {
                        printk(KERN_ERR "%s: Error encrypting extent; "
-                              "rc = [%d]\n", __FUNCTION__, rc);
+                              "rc = [%d]\n", __func__, rc);
                        goto out;
                }
        }
@@ -1215,7 +1215,7 @@ int ecryptfs_read_and_validate_header_region(char *data,
                                 ecryptfs_inode);
        if (rc) {
                printk(KERN_ERR "%s: Error reading header region; rc = [%d]\n",
-                      __FUNCTION__, rc);
+                      __func__, rc);
                goto out;
        }
        if (!contains_ecryptfs_marker(data + ECRYPTFS_FILE_SIZE_BYTES)) {
@@ -1246,7 +1246,6 @@ ecryptfs_write_header_metadata(char *virt,
        (*written) = 6;
 }
 
-struct kmem_cache *ecryptfs_header_cache_0;
 struct kmem_cache *ecryptfs_header_cache_1;
 struct kmem_cache *ecryptfs_header_cache_2;
 
@@ -1320,7 +1319,7 @@ ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat,
                                  0, crypt_stat->num_header_bytes_at_front);
        if (rc)
                printk(KERN_ERR "%s: Error attempting to write header "
-                      "information to lower file; rc = [%d]\n", __FUNCTION__,
+                      "information to lower file; rc = [%d]\n", __func__,
                       rc);
        return rc;
 }
@@ -1365,14 +1364,14 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
                }
        } else {
                printk(KERN_WARNING "%s: Encrypted flag not set\n",
-                      __FUNCTION__);
+                      __func__);
                rc = -EINVAL;
                goto out;
        }
        /* Released in this function */
        virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL);
        if (!virt) {
-               printk(KERN_ERR "%s: Out of memory\n", __FUNCTION__);
+               printk(KERN_ERR "%s: Out of memory\n", __func__);
                rc = -ENOMEM;
                goto out;
        }
@@ -1380,7 +1379,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
                                         ecryptfs_dentry);
        if (unlikely(rc)) {
                printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n",
-                      __FUNCTION__, rc);
+                      __func__, rc);
                goto out_free;
        }
        if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR)
@@ -1391,7 +1390,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
                                                         ecryptfs_dentry, virt);
        if (rc) {
                printk(KERN_ERR "%s: Error writing metadata out to lower file; "
-                      "rc = [%d]\n", __FUNCTION__, rc);
+                      "rc = [%d]\n", __func__, rc);
                goto out_free;
        }
 out_free:
@@ -1585,7 +1584,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
        if (!page_virt) {
                rc = -ENOMEM;
                printk(KERN_ERR "%s: Unable to allocate page_virt\n",
-                      __FUNCTION__);
+                      __func__);
                goto out;
        }
        rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size,
index 5007f788da01cec21fb39be75f714840ce6d29e9..951ee33a022df4adfb0427c737208f285a7225ac 100644 (file)
@@ -4,7 +4,7 @@
  *
  * Copyright (C) 1997-2003 Erez Zadok
  * Copyright (C) 2001-2003 Stony Brook University
- * Copyright (C) 2004-2007 International Business Machines Corp.
+ * Copyright (C) 2004-2008 International Business Machines Corp.
  *   Author(s): Michael A. Halcrow <mahalcro@us.ibm.com>
  *              Trevor S. Highland <trevor.highland@gmail.com>
  *              Tyler Hicks <tyhicks@ou.edu>
@@ -34,6 +34,7 @@
 #include <linux/namei.h>
 #include <linux/scatterlist.h>
 #include <linux/hash.h>
+#include <linux/nsproxy.h>
 
 /* Version verification for shared data structures w/ userspace */
 #define ECRYPTFS_VERSION_MAJOR 0x00
 #define ECRYPTFS_VERSIONING_POLICY                0x00000008
 #define ECRYPTFS_VERSIONING_XATTR                 0x00000010
 #define ECRYPTFS_VERSIONING_MULTKEY               0x00000020
+#define ECRYPTFS_VERSIONING_DEVMISC               0x00000040
 #define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \
                                  | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \
                                  | ECRYPTFS_VERSIONING_PUBKEY \
                                  | ECRYPTFS_VERSIONING_XATTR \
-                                 | ECRYPTFS_VERSIONING_MULTKEY)
+                                 | ECRYPTFS_VERSIONING_MULTKEY \
+                                 | ECRYPTFS_VERSIONING_DEVMISC)
 #define ECRYPTFS_MAX_PASSWORD_LENGTH 64
 #define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH
 #define ECRYPTFS_SALT_SIZE 8
 #define ECRYPTFS_DEFAULT_MSG_CTX_ELEMS 32
 #define ECRYPTFS_DEFAULT_SEND_TIMEOUT HZ
 #define ECRYPTFS_MAX_MSG_CTX_TTL (HZ*3)
-#define ECRYPTFS_NLMSG_HELO 100
-#define ECRYPTFS_NLMSG_QUIT 101
-#define ECRYPTFS_NLMSG_REQUEST 102
-#define ECRYPTFS_NLMSG_RESPONSE 103
 #define ECRYPTFS_MAX_PKI_NAME_BYTES 16
 #define ECRYPTFS_DEFAULT_NUM_USERS 4
 #define ECRYPTFS_MAX_NUM_USERS 32768
 #define ECRYPTFS_TRANSPORT_NETLINK 0
 #define ECRYPTFS_TRANSPORT_CONNECTOR 1
 #define ECRYPTFS_TRANSPORT_RELAYFS 2
-#define ECRYPTFS_DEFAULT_TRANSPORT ECRYPTFS_TRANSPORT_NETLINK
+#define ECRYPTFS_TRANSPORT_MISCDEV 3
+#define ECRYPTFS_DEFAULT_TRANSPORT ECRYPTFS_TRANSPORT_MISCDEV
 #define ECRYPTFS_XATTR_NAME "user.ecryptfs"
 
 #define RFC2440_CIPHER_DES3_EDE 0x02
@@ -366,32 +366,63 @@ struct ecryptfs_auth_tok_list_item {
 };
 
 struct ecryptfs_message {
+       /* Can never be greater than ecryptfs_message_buf_len */
+       /* Used to find the parent msg_ctx */
+       /* Inherits from msg_ctx->index */
        u32 index;
        u32 data_len;
        u8 data[];
 };
 
 struct ecryptfs_msg_ctx {
-#define ECRYPTFS_MSG_CTX_STATE_FREE      0x0001
-#define ECRYPTFS_MSG_CTX_STATE_PENDING   0x0002
-#define ECRYPTFS_MSG_CTX_STATE_DONE      0x0003
-       u32 state;
-       unsigned int index;
-       unsigned int counter;
+#define ECRYPTFS_MSG_CTX_STATE_FREE     0x01
+#define ECRYPTFS_MSG_CTX_STATE_PENDING  0x02
+#define ECRYPTFS_MSG_CTX_STATE_DONE     0x03
+#define ECRYPTFS_MSG_CTX_STATE_NO_REPLY 0x04
+       u8 state;
+#define ECRYPTFS_MSG_HELO 100
+#define ECRYPTFS_MSG_QUIT 101
+#define ECRYPTFS_MSG_REQUEST 102
+#define ECRYPTFS_MSG_RESPONSE 103
+       u8 type;
+       u32 index;
+       /* Counter converts to a sequence number. Each message sent
+        * out for which we expect a response has an associated
+        * sequence number. The response must have the same sequence
+        * number as the counter for the msg_stc for the message to be
+        * valid. */
+       u32 counter;
+       size_t msg_size;
        struct ecryptfs_message *msg;
        struct task_struct *task;
        struct list_head node;
+       struct list_head daemon_out_list;
        struct mutex mux;
 };
 
 extern unsigned int ecryptfs_transport;
 
-struct ecryptfs_daemon_id {
-       pid_t pid;
-       uid_t uid;
-       struct hlist_node id_chain;
+struct ecryptfs_daemon;
+
+struct ecryptfs_daemon {
+#define ECRYPTFS_DAEMON_IN_READ      0x00000001
+#define ECRYPTFS_DAEMON_IN_POLL      0x00000002
+#define ECRYPTFS_DAEMON_ZOMBIE       0x00000004
+#define ECRYPTFS_DAEMON_MISCDEV_OPEN 0x00000008
+       u32 flags;
+       u32 num_queued_msg_ctx;
+       struct pid *pid;
+       uid_t euid;
+       struct user_namespace *user_ns;
+       struct task_struct *task;
+       struct mutex mux;
+       struct list_head msg_ctx_out_queue;
+       wait_queue_head_t wait;
+       struct hlist_node euid_chain;
 };
 
+extern struct mutex ecryptfs_daemon_hash_mux;
+
 static inline struct ecryptfs_file_info *
 ecryptfs_file_to_private(struct file *file)
 {
@@ -500,7 +531,7 @@ ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt)
 }
 
 #define ecryptfs_printk(type, fmt, arg...) \
-        __ecryptfs_printk(type "%s: " fmt, __FUNCTION__, ## arg);
+        __ecryptfs_printk(type "%s: " fmt, __func__, ## arg);
 void __ecryptfs_printk(const char *fmt, ...);
 
 extern const struct file_operations ecryptfs_main_fops;
@@ -581,10 +612,13 @@ int
 ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
                  size_t size, int flags);
 int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode);
-int ecryptfs_process_helo(unsigned int transport, uid_t uid, pid_t pid);
-int ecryptfs_process_quit(uid_t uid, pid_t pid);
-int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t uid,
-                             pid_t pid, u32 seq);
+int ecryptfs_process_helo(unsigned int transport, uid_t euid,
+                         struct user_namespace *user_ns, struct pid *pid);
+int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns,
+                         struct pid *pid);
+int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
+                             struct user_namespace *user_ns, struct pid *pid,
+                             u32 seq);
 int ecryptfs_send_message(unsigned int transport, char *data, int data_len,
                          struct ecryptfs_msg_ctx **msg_ctx);
 int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
@@ -593,14 +627,14 @@ int ecryptfs_init_messaging(unsigned int transport);
 void ecryptfs_release_messaging(unsigned int transport);
 
 int ecryptfs_send_netlink(char *data, int data_len,
-                         struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type,
-                         u16 msg_flags, pid_t daemon_pid);
+                         struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+                         u16 msg_flags, struct pid *daemon_pid);
 int ecryptfs_init_netlink(void);
 void ecryptfs_release_netlink(void);
 
 int ecryptfs_send_connector(char *data, int data_len,
-                           struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type,
-                           u16 msg_flags, pid_t daemon_pid);
+                           struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+                           u16 msg_flags, struct pid *daemon_pid);
 int ecryptfs_init_connector(void);
 void ecryptfs_release_connector(void);
 void
@@ -642,5 +676,21 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
                                     size_t offset_in_page, size_t size,
                                     struct inode *ecryptfs_inode);
 struct page *ecryptfs_get_locked_page(struct file *file, loff_t index);
+int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon);
+int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid,
+                                struct user_namespace *user_ns);
+int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
+                                size_t *length_size);
+int ecryptfs_write_packet_length(char *dest, size_t size,
+                                size_t *packet_size_length);
+int ecryptfs_init_ecryptfs_miscdev(void);
+void ecryptfs_destroy_ecryptfs_miscdev(void);
+int ecryptfs_send_miscdev(char *data, size_t data_size,
+                         struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+                         u16 msg_flags, struct ecryptfs_daemon *daemon);
+void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx);
+int
+ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
+                     struct user_namespace *user_ns, struct pid *pid);
 
 #endif /* #ifndef ECRYPTFS_KERNEL_H */
index 2b8f5ed4adea534063ae77259d366c426f9d5725..2258b8f654a62ff7f45e6df55cd3a501d1c5dc25 100644 (file)
@@ -195,7 +195,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
                file, ecryptfs_inode_to_private(inode)->lower_file);
        if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
                ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
+               mutex_lock(&crypt_stat->cs_mutex);
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+               mutex_unlock(&crypt_stat->cs_mutex);
                rc = 0;
                goto out;
        }
index e2386115210105ecbbb01376a7290d0dd4dbe107..0a1397335a8eb49a245e81b1826607a5158a2685 100644 (file)
@@ -111,7 +111,7 @@ ecryptfs_do_create(struct inode *directory_inode,
 
        lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
        lower_dir_dentry = lock_parent(lower_dentry);
-       if (unlikely(IS_ERR(lower_dir_dentry))) {
+       if (IS_ERR(lower_dir_dentry)) {
                ecryptfs_printk(KERN_ERR, "Error locking directory of "
                                "dentry\n");
                rc = PTR_ERR(lower_dir_dentry);
@@ -121,7 +121,7 @@ ecryptfs_do_create(struct inode *directory_inode,
                                             ecryptfs_dentry, mode, nd);
        if (rc) {
                printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
-                      "rc = [%d]\n", __FUNCTION__, rc);
+                      "rc = [%d]\n", __func__, rc);
                goto out_lock;
        }
        rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
@@ -908,7 +908,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
        if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
                ia->ia_valid &= ~ATTR_MODE;
 
+       mutex_lock(&lower_dentry->d_inode->i_mutex);
        rc = notify_change(lower_dentry, ia);
+       mutex_unlock(&lower_dentry->d_inode->i_mutex);
 out:
        fsstack_copy_attr_all(inode, lower_inode, NULL);
        return rc;
index 682b1b2482c26a062cefeaf924787242f89612d1..e82b457180be78706e4b829c4dec0ea15db82178 100644 (file)
@@ -65,7 +65,7 @@ static int process_request_key_err(long err_code)
 }
 
 /**
- * parse_packet_length
+ * ecryptfs_parse_packet_length
  * @data: Pointer to memory containing length at offset
  * @size: This function writes the decoded size to this memory
  *        address; zero on error
@@ -73,8 +73,8 @@ static int process_request_key_err(long err_code)
  *
  * Returns zero on success; non-zero on error
  */
-static int parse_packet_length(unsigned char *data, size_t *size,
-                              size_t *length_size)
+int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
+                                size_t *length_size)
 {
        int rc = 0;
 
@@ -105,7 +105,7 @@ out:
 }
 
 /**
- * write_packet_length
+ * ecryptfs_write_packet_length
  * @dest: The byte array target into which to write the length. Must
  *        have at least 5 bytes allocated.
  * @size: The length to write.
@@ -114,8 +114,8 @@ out:
  *
  * Returns zero on success; non-zero on error.
  */
-static int write_packet_length(char *dest, size_t size,
-                              size_t *packet_size_length)
+int ecryptfs_write_packet_length(char *dest, size_t size,
+                                size_t *packet_size_length)
 {
        int rc = 0;
 
@@ -162,8 +162,8 @@ write_tag_64_packet(char *signature, struct ecryptfs_session_key *session_key,
                goto out;
        }
        message[i++] = ECRYPTFS_TAG_64_PACKET_TYPE;
-       rc = write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
-                                &packet_size_len);
+       rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
+                                         &packet_size_len);
        if (rc) {
                ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet "
                                "header; cannot generate packet length\n");
@@ -172,8 +172,9 @@ write_tag_64_packet(char *signature, struct ecryptfs_session_key *session_key,
        i += packet_size_len;
        memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX);
        i += ECRYPTFS_SIG_SIZE_HEX;
-       rc = write_packet_length(&message[i], session_key->encrypted_key_size,
-                                &packet_size_len);
+       rc = ecryptfs_write_packet_length(&message[i],
+                                         session_key->encrypted_key_size,
+                                         &packet_size_len);
        if (rc) {
                ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet "
                                "header; cannot generate packet length\n");
@@ -225,7 +226,7 @@ parse_tag_65_packet(struct ecryptfs_session_key *session_key, u8 *cipher_code,
                rc = -EIO;
                goto out;
        }
-       rc = parse_packet_length(&data[i], &m_size, &data_len);
+       rc = ecryptfs_parse_packet_length(&data[i], &m_size, &data_len);
        if (rc) {
                ecryptfs_printk(KERN_WARNING, "Error parsing packet length; "
                                "rc = [%d]\n", rc);
@@ -304,8 +305,8 @@ write_tag_66_packet(char *signature, u8 cipher_code,
                goto out;
        }
        message[i++] = ECRYPTFS_TAG_66_PACKET_TYPE;
-       rc = write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
-                                &packet_size_len);
+       rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX,
+                                         &packet_size_len);
        if (rc) {
                ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet "
                                "header; cannot generate packet length\n");
@@ -315,8 +316,8 @@ write_tag_66_packet(char *signature, u8 cipher_code,
        memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX);
        i += ECRYPTFS_SIG_SIZE_HEX;
        /* The encrypted key includes 1 byte cipher code and 2 byte checksum */
-       rc = write_packet_length(&message[i], crypt_stat->key_size + 3,
-                                &packet_size_len);
+       rc = ecryptfs_write_packet_length(&message[i], crypt_stat->key_size + 3,
+                                         &packet_size_len);
        if (rc) {
                ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet "
                                "header; cannot generate packet length\n");
@@ -357,20 +358,25 @@ parse_tag_67_packet(struct ecryptfs_key_record *key_rec,
        /* verify that everything through the encrypted FEK size is present */
        if (message_len < 4) {
                rc = -EIO;
+               printk(KERN_ERR "%s: message_len is [%Zd]; minimum acceptable "
+                      "message length is [%d]\n", __func__, message_len, 4);
                goto out;
        }
        if (data[i++] != ECRYPTFS_TAG_67_PACKET_TYPE) {
-               ecryptfs_printk(KERN_ERR, "Type should be ECRYPTFS_TAG_67\n");
                rc = -EIO;
+               printk(KERN_ERR "%s: Type should be ECRYPTFS_TAG_67\n",
+                      __func__);
                goto out;
        }
        if (data[i++]) {
-               ecryptfs_printk(KERN_ERR, "Status indicator has non zero value"
-                               " [%d]\n", data[i-1]);
                rc = -EIO;
+               printk(KERN_ERR "%s: Status indicator has non zero "
+                      "value [%d]\n", __func__, data[i-1]);
+
                goto out;
        }
-       rc = parse_packet_length(&data[i], &key_rec->enc_key_size, &data_len);
+       rc = ecryptfs_parse_packet_length(&data[i], &key_rec->enc_key_size,
+                                         &data_len);
        if (rc) {
                ecryptfs_printk(KERN_WARNING, "Error parsing packet length; "
                                "rc = [%d]\n", rc);
@@ -378,17 +384,17 @@ parse_tag_67_packet(struct ecryptfs_key_record *key_rec,
        }
        i += data_len;
        if (message_len < (i + key_rec->enc_key_size)) {
-               ecryptfs_printk(KERN_ERR, "message_len [%d]; max len is [%d]\n",
-                               message_len, (i + key_rec->enc_key_size));
                rc = -EIO;
+               printk(KERN_ERR "%s: message_len [%Zd]; max len is [%Zd]\n",
+                      __func__, message_len, (i + key_rec->enc_key_size));
                goto out;
        }
        if (key_rec->enc_key_size > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) {
-               ecryptfs_printk(KERN_ERR, "Encrypted key_size [%d] larger than "
-                               "the maximum key size [%d]\n",
-                               key_rec->enc_key_size,
-                               ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES);
                rc = -EIO;
+               printk(KERN_ERR "%s: Encrypted key_size [%Zd] larger than "
+                      "the maximum key size [%d]\n", __func__,
+                      key_rec->enc_key_size,
+                      ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES);
                goto out;
        }
        memcpy(key_rec->enc_key, &data[i], key_rec->enc_key_size);
@@ -445,7 +451,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok,
        rc = write_tag_64_packet(auth_tok_sig, &(auth_tok->session_key),
                                 &netlink_message, &netlink_message_length);
        if (rc) {
-               ecryptfs_printk(KERN_ERR, "Failed to write tag 64 packet");
+               ecryptfs_printk(KERN_ERR, "Failed to write tag 64 packet\n");
                goto out;
        }
        rc = ecryptfs_send_message(ecryptfs_transport, netlink_message,
@@ -570,8 +576,8 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
                goto out;
        }
        (*new_auth_tok) = &auth_tok_list_item->auth_tok;
-       rc = parse_packet_length(&data[(*packet_size)], &body_size,
-                                &length_size);
+       rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
+                                         &length_size);
        if (rc) {
                printk(KERN_WARNING "Error parsing packet length; "
                       "rc = [%d]\n", rc);
@@ -704,8 +710,8 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat,
                goto out;
        }
        (*new_auth_tok) = &auth_tok_list_item->auth_tok;
-       rc = parse_packet_length(&data[(*packet_size)], &body_size,
-                                &length_size);
+       rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
+                                         &length_size);
        if (rc) {
                printk(KERN_WARNING "Error parsing packet length; rc = [%d]\n",
                       rc);
@@ -852,8 +858,8 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents,
                rc = -EINVAL;
                goto out;
        }
-       rc = parse_packet_length(&data[(*packet_size)], &body_size,
-                                &length_size);
+       rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size,
+                                         &length_size);
        if (rc) {
                printk(KERN_WARNING "Invalid tag 11 packet format\n");
                goto out;
@@ -1405,8 +1411,8 @@ write_tag_1_packet(char *dest, size_t *remaining_bytes,
                        auth_tok->token.private_key.key_size;
        rc = pki_encrypt_session_key(auth_tok, crypt_stat, key_rec);
        if (rc) {
-               ecryptfs_printk(KERN_ERR, "Failed to encrypt session key "
-                               "via a pki");
+               printk(KERN_ERR "Failed to encrypt session key via a key "
+                      "module; rc = [%d]\n", rc);
                goto out;
        }
        if (ecryptfs_verbosity > 0) {
@@ -1430,8 +1436,9 @@ encrypted_session_key_set:
                goto out;
        }
        dest[(*packet_size)++] = ECRYPTFS_TAG_1_PACKET_TYPE;
-       rc = write_packet_length(&dest[(*packet_size)], (max_packet_size - 4),
-                                &packet_size_length);
+       rc = ecryptfs_write_packet_length(&dest[(*packet_size)],
+                                         (max_packet_size - 4),
+                                         &packet_size_length);
        if (rc) {
                ecryptfs_printk(KERN_ERR, "Error generating tag 1 packet "
                                "header; cannot generate packet length\n");
@@ -1489,8 +1496,9 @@ write_tag_11_packet(char *dest, size_t *remaining_bytes, char *contents,
                goto out;
        }
        dest[(*packet_length)++] = ECRYPTFS_TAG_11_PACKET_TYPE;
-       rc = write_packet_length(&dest[(*packet_length)],
-                                (max_packet_size - 4), &packet_size_length);
+       rc = ecryptfs_write_packet_length(&dest[(*packet_length)],
+                                         (max_packet_size - 4),
+                                         &packet_size_length);
        if (rc) {
                printk(KERN_ERR "Error generating tag 11 packet header; cannot "
                       "generate packet length. rc = [%d]\n", rc);
@@ -1682,8 +1690,9 @@ encrypted_session_key_set:
        dest[(*packet_size)++] = ECRYPTFS_TAG_3_PACKET_TYPE;
        /* Chop off the Tag 3 identifier(1) and Tag 3 packet size(3)
         * to get the number of octets in the actual Tag 3 packet */
-       rc = write_packet_length(&dest[(*packet_size)], (max_packet_size - 4),
-                                &packet_size_length);
+       rc = ecryptfs_write_packet_length(&dest[(*packet_size)],
+                                         (max_packet_size - 4),
+                                         &packet_size_length);
        if (rc) {
                printk(KERN_ERR "Error generating tag 3 packet header; cannot "
                       "generate packet length. rc = [%d]\n", rc);
index d25ac9500a92bd31282f477461161f7ac6cbfeb5..d603631601ebdd364d619b18efdcbafec8b4ac91 100644 (file)
@@ -219,7 +219,7 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry,
        if (rc) {
                printk(KERN_ERR "%s: Error attempting to initialize the "
                       "persistent file for the dentry with name [%s]; "
-                      "rc = [%d]\n", __FUNCTION__, dentry->d_name.name, rc);
+                      "rc = [%d]\n", __func__, dentry->d_name.name, rc);
                goto out;
        }
 out:
index 9cc2aec27b0dc5b8de698dd8779b0cb4e70682b8..1b5c20058acbeefddc1031f5700993da2e799c58 100644 (file)
@@ -1,7 +1,7 @@
 /**
  * eCryptfs: Linux filesystem encryption layer
  *
- * Copyright (C) 2004-2006 International Business Machines Corp.
+ * Copyright (C) 2004-2008 International Business Machines Corp.
  *   Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com>
  *             Tyler Hicks <tyhicks@ou.edu>
  *
  * 02111-1307, USA.
  */
 #include <linux/sched.h>
+#include <linux/user_namespace.h>
+#include <linux/nsproxy.h>
 #include "ecryptfs_kernel.h"
 
 static LIST_HEAD(ecryptfs_msg_ctx_free_list);
 static LIST_HEAD(ecryptfs_msg_ctx_alloc_list);
 static struct mutex ecryptfs_msg_ctx_lists_mux;
 
-static struct hlist_head *ecryptfs_daemon_id_hash;
-static struct mutex ecryptfs_daemon_id_hash_mux;
+static struct hlist_head *ecryptfs_daemon_hash;
+struct mutex ecryptfs_daemon_hash_mux;
 static int ecryptfs_hash_buckets;
 #define ecryptfs_uid_hash(uid) \
         hash_long((unsigned long)uid, ecryptfs_hash_buckets)
 
-static unsigned int ecryptfs_msg_counter;
+static u32 ecryptfs_msg_counter;
 static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
 
 /**
@@ -40,9 +42,10 @@ static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr;
  * @msg_ctx: The context that was acquired from the free list
  *
  * Acquires a context element from the free list and locks the mutex
- * on the context.  Returns zero on success; non-zero on error or upon
- * failure to acquire a free context element.  Be sure to lock the
- * list mutex before calling.
+ * on the context.  Sets the msg_ctx task to current.  Returns zero on
+ * success; non-zero on error or upon failure to acquire a free
+ * context element.  Must be called with ecryptfs_msg_ctx_lists_mux
+ * held.
  */
 static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx)
 {
@@ -50,11 +53,11 @@ static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx)
        int rc;
 
        if (list_empty(&ecryptfs_msg_ctx_free_list)) {
-               ecryptfs_printk(KERN_WARNING, "The eCryptfs free "
-                               "context list is empty.  It may be helpful to "
-                               "specify the ecryptfs_message_buf_len "
-                               "parameter to be greater than the current "
-                               "value of [%d]\n", ecryptfs_message_buf_len);
+               printk(KERN_WARNING "%s: The eCryptfs free "
+                      "context list is empty.  It may be helpful to "
+                      "specify the ecryptfs_message_buf_len "
+                      "parameter to be greater than the current "
+                      "value of [%d]\n", __func__, ecryptfs_message_buf_len);
                rc = -ENOMEM;
                goto out;
        }
@@ -75,8 +78,7 @@ out:
  * ecryptfs_msg_ctx_free_to_alloc
  * @msg_ctx: The context to move from the free list to the alloc list
  *
- * Be sure to lock the list mutex and the context mutex before
- * calling.
+ * Must be called with ecryptfs_msg_ctx_lists_mux held.
  */
 static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx)
 {
@@ -89,36 +91,39 @@ static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx)
  * ecryptfs_msg_ctx_alloc_to_free
  * @msg_ctx: The context to move from the alloc list to the free list
  *
- * Be sure to lock the list mutex and the context mutex before
- * calling.
+ * Must be called with ecryptfs_msg_ctx_lists_mux held.
  */
-static void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
+void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx)
 {
        list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list);
        if (msg_ctx->msg)
                kfree(msg_ctx->msg);
+       msg_ctx->msg = NULL;
        msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE;
 }
 
 /**
- * ecryptfs_find_daemon_id
- * @uid: The user id which maps to the desired daemon id
- * @id: If return value is zero, points to the desired daemon id
- *      pointer
+ * ecryptfs_find_daemon_by_euid
+ * @euid: The effective user id which maps to the desired daemon id
+ * @user_ns: The namespace in which @euid applies
+ * @daemon: If return value is zero, points to the desired daemon pointer
  *
- * Search the hash list for the given user id.  Returns zero if the
- * user id exists in the list; non-zero otherwise.  The daemon id hash
- * mutex should be held before calling this function.
+ * Must be called with ecryptfs_daemon_hash_mux held.
+ *
+ * Search the hash list for the given user id.
+ *
+ * Returns zero if the user id exists in the list; non-zero otherwise.
  */
-static int ecryptfs_find_daemon_id(uid_t uid, struct ecryptfs_daemon_id **id)
+int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid,
+                                struct user_namespace *user_ns)
 {
        struct hlist_node *elem;
        int rc;
 
-       hlist_for_each_entry(*id, elem,
-                            &ecryptfs_daemon_id_hash[ecryptfs_uid_hash(uid)],
-                            id_chain) {
-               if ((*id)->uid == uid) {
+       hlist_for_each_entry(*daemon, elem,
+                            &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)],
+                            euid_chain) {
+               if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) {
                        rc = 0;
                        goto out;
                }
@@ -128,181 +133,325 @@ out:
        return rc;
 }
 
-static int ecryptfs_send_raw_message(unsigned int transport, u16 msg_type,
-                                    pid_t pid)
+static int
+ecryptfs_send_message_locked(unsigned int transport, char *data, int data_len,
+                            u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx);
+
+/**
+ * ecryptfs_send_raw_message
+ * @transport: Transport type
+ * @msg_type: Message type
+ * @daemon: Daemon struct for recipient of message
+ *
+ * A raw message is one that does not include an ecryptfs_message
+ * struct. It simply has a type.
+ *
+ * Must be called with ecryptfs_daemon_hash_mux held.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_send_raw_message(unsigned int transport, u8 msg_type,
+                                    struct ecryptfs_daemon *daemon)
 {
+       struct ecryptfs_msg_ctx *msg_ctx;
        int rc;
 
        switch(transport) {
        case ECRYPTFS_TRANSPORT_NETLINK:
-               rc = ecryptfs_send_netlink(NULL, 0, NULL, msg_type, 0, pid);
+               rc = ecryptfs_send_netlink(NULL, 0, NULL, msg_type, 0,
+                                          daemon->pid);
+               break;
+       case ECRYPTFS_TRANSPORT_MISCDEV:
+               rc = ecryptfs_send_message_locked(transport, NULL, 0, msg_type,
+                                                 &msg_ctx);
+               if (rc) {
+                       printk(KERN_ERR "%s: Error whilst attempting to send "
+                              "message via procfs; rc = [%d]\n", __func__, rc);
+                       goto out;
+               }
+               /* Raw messages are logically context-free (e.g., no
+                * reply is expected), so we set the state of the
+                * ecryptfs_msg_ctx object to indicate that it should
+                * be freed as soon as the transport sends out the message. */
+               mutex_lock(&msg_ctx->mux);
+               msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_NO_REPLY;
+               mutex_unlock(&msg_ctx->mux);
                break;
        case ECRYPTFS_TRANSPORT_CONNECTOR:
        case ECRYPTFS_TRANSPORT_RELAYFS:
        default:
                rc = -ENOSYS;
        }
+out:
+       return rc;
+}
+
+/**
+ * ecryptfs_spawn_daemon - Create and initialize a new daemon struct
+ * @daemon: Pointer to set to newly allocated daemon struct
+ * @euid: Effective user id for the daemon
+ * @user_ns: The namespace in which @euid applies
+ * @pid: Process id for the daemon
+ *
+ * Must be called ceremoniously while in possession of
+ * ecryptfs_sacred_daemon_hash_mux
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int
+ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid,
+                     struct user_namespace *user_ns, struct pid *pid)
+{
+       int rc = 0;
+
+       (*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL);
+       if (!(*daemon)) {
+               rc = -ENOMEM;
+               printk(KERN_ERR "%s: Failed to allocate [%Zd] bytes of "
+                      "GFP_KERNEL memory\n", __func__, sizeof(**daemon));
+               goto out;
+       }
+       (*daemon)->euid = euid;
+       (*daemon)->user_ns = get_user_ns(user_ns);
+       (*daemon)->pid = get_pid(pid);
+       (*daemon)->task = current;
+       mutex_init(&(*daemon)->mux);
+       INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue);
+       init_waitqueue_head(&(*daemon)->wait);
+       (*daemon)->num_queued_msg_ctx = 0;
+       hlist_add_head(&(*daemon)->euid_chain,
+                      &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]);
+out:
        return rc;
 }
 
 /**
  * ecryptfs_process_helo
  * @transport: The underlying transport (netlink, etc.)
- * @uid: The user ID owner of the message
+ * @euid: The user ID owner of the message
+ * @user_ns: The namespace in which @euid applies
  * @pid: The process ID for the userspace program that sent the
  *       message
  *
- * Adds the uid and pid values to the daemon id hash.  If a uid
+ * Adds the euid and pid values to the daemon euid hash.  If an euid
  * already has a daemon pid registered, the daemon will be
- * unregistered before the new daemon id is put into the hash list.
- * Returns zero after adding a new daemon id to the hash list;
+ * unregistered before the new daemon is put into the hash list.
+ * Returns zero after adding a new daemon to the hash list;
  * non-zero otherwise.
  */
-int ecryptfs_process_helo(unsigned int transport, uid_t uid, pid_t pid)
+int ecryptfs_process_helo(unsigned int transport, uid_t euid,
+                         struct user_namespace *user_ns, struct pid *pid)
 {
-       struct ecryptfs_daemon_id *new_id;
-       struct ecryptfs_daemon_id *old_id;
+       struct ecryptfs_daemon *new_daemon;
+       struct ecryptfs_daemon *old_daemon;
        int rc;
 
-       mutex_lock(&ecryptfs_daemon_id_hash_mux);
-       new_id = kmalloc(sizeof(*new_id), GFP_KERNEL);
-       if (!new_id) {
-               rc = -ENOMEM;
-               ecryptfs_printk(KERN_ERR, "Failed to allocate memory; unable "
-                               "to register daemon [%d] for user [%d]\n",
-                               pid, uid);
-               goto unlock;
-       }
-       if (!ecryptfs_find_daemon_id(uid, &old_id)) {
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       rc = ecryptfs_find_daemon_by_euid(&old_daemon, euid, user_ns);
+       if (rc != 0) {
                printk(KERN_WARNING "Received request from user [%d] "
-                      "to register daemon [%d]; unregistering daemon "
-                      "[%d]\n", uid, pid, old_id->pid);
-               hlist_del(&old_id->id_chain);
-               rc = ecryptfs_send_raw_message(transport, ECRYPTFS_NLMSG_QUIT,
-                                              old_id->pid);
+                      "to register daemon [0x%p]; unregistering daemon "
+                      "[0x%p]\n", euid, pid, old_daemon->pid);
+               rc = ecryptfs_send_raw_message(transport, ECRYPTFS_MSG_QUIT,
+                                              old_daemon);
                if (rc)
                        printk(KERN_WARNING "Failed to send QUIT "
-                              "message to daemon [%d]; rc = [%d]\n",
-                              old_id->pid, rc);
-               kfree(old_id);
+                              "message to daemon [0x%p]; rc = [%d]\n",
+                              old_daemon->pid, rc);
+               hlist_del(&old_daemon->euid_chain);
+               kfree(old_daemon);
        }
-       new_id->uid = uid;
-       new_id->pid = pid;
-       hlist_add_head(&new_id->id_chain,
-                      &ecryptfs_daemon_id_hash[ecryptfs_uid_hash(uid)]);
-       rc = 0;
-unlock:
-       mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+       rc = ecryptfs_spawn_daemon(&new_daemon, euid, user_ns, pid);
+       if (rc)
+               printk(KERN_ERR "%s: The gods are displeased with this attempt "
+                      "to create a new daemon object for euid [%d]; pid "
+                      "[0x%p]; rc = [%d]\n", __func__, euid, pid, rc);
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+       return rc;
+}
+
+/**
+ * ecryptfs_exorcise_daemon - Destroy the daemon struct
+ *
+ * Must be called ceremoniously while in possession of
+ * ecryptfs_daemon_hash_mux and the daemon's own mux.
+ */
+int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon)
+{
+       struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp;
+       int rc = 0;
+
+       mutex_lock(&daemon->mux);
+       if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ)
+           || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) {
+               rc = -EBUSY;
+               printk(KERN_WARNING "%s: Attempt to destroy daemon with pid "
+                      "[0x%p], but it is in the midst of a read or a poll\n",
+                      __func__, daemon->pid);
+               mutex_unlock(&daemon->mux);
+               goto out;
+       }
+       list_for_each_entry_safe(msg_ctx, msg_ctx_tmp,
+                                &daemon->msg_ctx_out_queue, daemon_out_list) {
+               list_del(&msg_ctx->daemon_out_list);
+               daemon->num_queued_msg_ctx--;
+               printk(KERN_WARNING "%s: Warning: dropping message that is in "
+                      "the out queue of a dying daemon\n", __func__);
+               ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
+       }
+       hlist_del(&daemon->euid_chain);
+       if (daemon->task)
+               wake_up_process(daemon->task);
+       if (daemon->pid)
+               put_pid(daemon->pid);
+       if (daemon->user_ns)
+               put_user_ns(daemon->user_ns);
+       mutex_unlock(&daemon->mux);
+       memset(daemon, 0, sizeof(*daemon));
+       kfree(daemon);
+out:
        return rc;
 }
 
 /**
  * ecryptfs_process_quit
- * @uid: The user ID owner of the message
+ * @euid: The user ID owner of the message
+ * @user_ns: The namespace in which @euid applies
  * @pid: The process ID for the userspace program that sent the
  *       message
  *
- * Deletes the corresponding daemon id for the given uid and pid, if
+ * Deletes the corresponding daemon for the given euid and pid, if
  * it is the registered that is requesting the deletion. Returns zero
- * after deleting the desired daemon id; non-zero otherwise.
+ * after deleting the desired daemon; non-zero otherwise.
  */
-int ecryptfs_process_quit(uid_t uid, pid_t pid)
+int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns,
+                         struct pid *pid)
 {
-       struct ecryptfs_daemon_id *id;
+       struct ecryptfs_daemon *daemon;
        int rc;
 
-       mutex_lock(&ecryptfs_daemon_id_hash_mux);
-       if (ecryptfs_find_daemon_id(uid, &id)) {
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns);
+       if (rc || !daemon) {
                rc = -EINVAL;
-               ecryptfs_printk(KERN_ERR, "Received request from user [%d] to "
-                               "unregister unrecognized daemon [%d]\n", uid,
-                               pid);
-               goto unlock;
+               printk(KERN_ERR "Received request from user [%d] to "
+                      "unregister unrecognized daemon [0x%p]\n", euid, pid);
+               goto out_unlock;
        }
-       if (id->pid != pid) {
-               rc = -EINVAL;
-               ecryptfs_printk(KERN_WARNING, "Received request from user [%d] "
-                               "with pid [%d] to unregister daemon [%d]\n",
-                               uid, pid, id->pid);
-               goto unlock;
-       }
-       hlist_del(&id->id_chain);
-       kfree(id);
-       rc = 0;
-unlock:
-       mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+       rc = ecryptfs_exorcise_daemon(daemon);
+out_unlock:
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
        return rc;
 }
 
 /**
  * ecryptfs_process_reponse
  * @msg: The ecryptfs message received; the caller should sanity check
- *       msg->data_len
+ *       msg->data_len and free the memory
  * @pid: The process ID of the userspace application that sent the
  *       message
- * @seq: The sequence number of the message
+ * @seq: The sequence number of the message; must match the sequence
+ *       number for the existing message context waiting for this
+ *       response
+ *
+ * Processes a response message after sending an operation request to
+ * userspace. Some other process is awaiting this response. Before
+ * sending out its first communications, the other process allocated a
+ * msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The
+ * response message contains this index so that we can copy over the
+ * response message into the msg_ctx that the process holds a
+ * reference to. The other process is going to wake up, check to see
+ * that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then
+ * proceed to read off and process the response message. Returns zero
+ * upon delivery to desired context element; non-zero upon delivery
+ * failure or error.
  *
- * Processes a response message after sending a operation request to
- * userspace. Returns zero upon delivery to desired context element;
- * non-zero upon delivery failure or error.
+ * Returns zero on success; non-zero otherwise
  */
-int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t uid,
-                             pid_t pid, u32 seq)
+int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid,
+                             struct user_namespace *user_ns, struct pid *pid,
+                             u32 seq)
 {
-       struct ecryptfs_daemon_id *id;
+       struct ecryptfs_daemon *daemon;
        struct ecryptfs_msg_ctx *msg_ctx;
-       int msg_size;
+       size_t msg_size;
+       struct nsproxy *nsproxy;
+       struct user_namespace *current_user_ns;
        int rc;
 
        if (msg->index >= ecryptfs_message_buf_len) {
                rc = -EINVAL;
-               ecryptfs_printk(KERN_ERR, "Attempt to reference "
-                               "context buffer at index [%d]; maximum "
-                               "allowable is [%d]\n", msg->index,
-                               (ecryptfs_message_buf_len - 1));
+               printk(KERN_ERR "%s: Attempt to reference "
+                      "context buffer at index [%d]; maximum "
+                      "allowable is [%d]\n", __func__, msg->index,
+                      (ecryptfs_message_buf_len - 1));
                goto out;
        }
        msg_ctx = &ecryptfs_msg_ctx_arr[msg->index];
        mutex_lock(&msg_ctx->mux);
-       if (ecryptfs_find_daemon_id(msg_ctx->task->euid, &id)) {
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       rcu_read_lock();
+       nsproxy = task_nsproxy(msg_ctx->task);
+       if (nsproxy == NULL) {
                rc = -EBADMSG;
-               ecryptfs_printk(KERN_WARNING, "User [%d] received a "
-                               "message response from process [%d] but does "
-                               "not have a registered daemon\n",
-                               msg_ctx->task->euid, pid);
+               printk(KERN_ERR "%s: Receiving process is a zombie. Dropping "
+                      "message.\n", __func__);
+               rcu_read_unlock();
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
                goto wake_up;
        }
-       if (msg_ctx->task->euid != uid) {
+       current_user_ns = nsproxy->user_ns;
+       rc = ecryptfs_find_daemon_by_euid(&daemon, msg_ctx->task->euid,
+                                         current_user_ns);
+       rcu_read_unlock();
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+       if (rc) {
+               rc = -EBADMSG;
+               printk(KERN_WARNING "%s: User [%d] received a "
+                      "message response from process [0x%p] but does "
+                      "not have a registered daemon\n", __func__,
+                      msg_ctx->task->euid, pid);
+               goto wake_up;
+       }
+       if (msg_ctx->task->euid != euid) {
                rc = -EBADMSG;
-               ecryptfs_printk(KERN_WARNING, "Received message from user "
-                               "[%d]; expected message from user [%d]\n",
-                               uid, msg_ctx->task->euid);
+               printk(KERN_WARNING "%s: Received message from user "
+                      "[%d]; expected message from user [%d]\n", __func__,
+                      euid, msg_ctx->task->euid);
                goto unlock;
        }
-       if (id->pid != pid) {
+       if (current_user_ns != user_ns) {
                rc = -EBADMSG;
-               ecryptfs_printk(KERN_ERR, "User [%d] received a "
-                               "message response from an unrecognized "
-                               "process [%d]\n", msg_ctx->task->euid, pid);
+               printk(KERN_WARNING "%s: Received message from user_ns "
+                      "[0x%p]; expected message from user_ns [0x%p]\n",
+                      __func__, user_ns, nsproxy->user_ns);
+               goto unlock;
+       }
+       if (daemon->pid != pid) {
+               rc = -EBADMSG;
+               printk(KERN_ERR "%s: User [%d] sent a message response "
+                      "from an unrecognized process [0x%p]\n",
+                      __func__, msg_ctx->task->euid, pid);
                goto unlock;
        }
        if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) {
                rc = -EINVAL;
-               ecryptfs_printk(KERN_WARNING, "Desired context element is not "
-                               "pending a response\n");
+               printk(KERN_WARNING "%s: Desired context element is not "
+                      "pending a response\n", __func__);
                goto unlock;
        } else if (msg_ctx->counter != seq) {
                rc = -EINVAL;
-               ecryptfs_printk(KERN_WARNING, "Invalid message sequence; "
-                               "expected [%d]; received [%d]\n",
-                               msg_ctx->counter, seq);
+               printk(KERN_WARNING "%s: Invalid message sequence; "
+                      "expected [%d]; received [%d]\n", __func__,
+                      msg_ctx->counter, seq);
                goto unlock;
        }
-       msg_size = sizeof(*msg) + msg->data_len;
+       msg_size = (sizeof(*msg) + msg->data_len);
        msg_ctx->msg = kmalloc(msg_size, GFP_KERNEL);
        if (!msg_ctx->msg) {
                rc = -ENOMEM;
-               ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n");
+               printk(KERN_ERR "%s: Failed to allocate [%Zd] bytes of "
+                      "GFP_KERNEL memory\n", __func__, msg_size);
                goto unlock;
        }
        memcpy(msg_ctx->msg, msg, msg_size);
@@ -317,34 +466,38 @@ out:
 }
 
 /**
- * ecryptfs_send_message
+ * ecryptfs_send_message_locked
  * @transport: The transport over which to send the message (i.e.,
  *             netlink)
  * @data: The data to send
  * @data_len: The length of data
  * @msg_ctx: The message context allocated for the send
+ *
+ * Must be called with ecryptfs_daemon_hash_mux held.
+ *
+ * Returns zero on success; non-zero otherwise
  */
-int ecryptfs_send_message(unsigned int transport, char *data, int data_len,
-                         struct ecryptfs_msg_ctx **msg_ctx)
+static int
+ecryptfs_send_message_locked(unsigned int transport, char *data, int data_len,
+                            u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx)
 {
-       struct ecryptfs_daemon_id *id;
+       struct ecryptfs_daemon *daemon;
        int rc;
 
-       mutex_lock(&ecryptfs_daemon_id_hash_mux);
-       if (ecryptfs_find_daemon_id(current->euid, &id)) {
-               mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+       rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+                                         current->nsproxy->user_ns);
+       if (rc || !daemon) {
                rc = -ENOTCONN;
-               ecryptfs_printk(KERN_ERR, "User [%d] does not have a daemon "
-                               "registered\n", current->euid);
+               printk(KERN_ERR "%s: User [%d] does not have a daemon "
+                      "registered\n", __func__, current->euid);
                goto out;
        }
-       mutex_unlock(&ecryptfs_daemon_id_hash_mux);
        mutex_lock(&ecryptfs_msg_ctx_lists_mux);
        rc = ecryptfs_acquire_free_msg_ctx(msg_ctx);
        if (rc) {
                mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
-               ecryptfs_printk(KERN_WARNING, "Could not claim a free "
-                               "context element\n");
+               printk(KERN_WARNING "%s: Could not claim a free "
+                      "context element\n", __func__);
                goto out;
        }
        ecryptfs_msg_ctx_free_to_alloc(*msg_ctx);
@@ -352,22 +505,49 @@ int ecryptfs_send_message(unsigned int transport, char *data, int data_len,
        mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
        switch (transport) {
        case ECRYPTFS_TRANSPORT_NETLINK:
-               rc = ecryptfs_send_netlink(data, data_len, *msg_ctx,
-                                          ECRYPTFS_NLMSG_REQUEST, 0, id->pid);
+               rc = ecryptfs_send_netlink(data, data_len, *msg_ctx, msg_type,
+                                          0, daemon->pid);
+               break;
+       case ECRYPTFS_TRANSPORT_MISCDEV:
+               rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type,
+                                          0, daemon);
                break;
        case ECRYPTFS_TRANSPORT_CONNECTOR:
        case ECRYPTFS_TRANSPORT_RELAYFS:
        default:
                rc = -ENOSYS;
        }
-       if (rc) {
-               printk(KERN_ERR "Error attempting to send message to userspace "
-                      "daemon; rc = [%d]\n", rc);
-       }
+       if (rc)
+               printk(KERN_ERR "%s: Error attempting to send message to "
+                      "userspace daemon; rc = [%d]\n", __func__, rc);
 out:
        return rc;
 }
 
+/**
+ * ecryptfs_send_message
+ * @transport: The transport over which to send the message (i.e.,
+ *             netlink)
+ * @data: The data to send
+ * @data_len: The length of data
+ * @msg_ctx: The message context allocated for the send
+ *
+ * Grabs ecryptfs_daemon_hash_mux.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_send_message(unsigned int transport, char *data, int data_len,
+                         struct ecryptfs_msg_ctx **msg_ctx)
+{
+       int rc;
+
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       rc = ecryptfs_send_message_locked(transport, data, data_len,
+                                         ECRYPTFS_MSG_REQUEST, msg_ctx);
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+       return rc;
+}
+
 /**
  * ecryptfs_wait_for_response
  * @msg_ctx: The context that was assigned when sending a message
@@ -377,7 +557,7 @@ out:
  * of time exceeds ecryptfs_message_wait_timeout.  If zero is
  * returned, msg will point to a valid message from userspace; a
  * non-zero value is returned upon failure to receive a message or an
- * error occurs.
+ * error occurs. Callee must free @msg on success.
  */
 int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx,
                               struct ecryptfs_message **msg)
@@ -413,32 +593,32 @@ int ecryptfs_init_messaging(unsigned int transport)
 
        if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) {
                ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS;
-               ecryptfs_printk(KERN_WARNING, "Specified number of users is "
-                               "too large, defaulting to [%d] users\n",
-                               ecryptfs_number_of_users);
+               printk(KERN_WARNING "%s: Specified number of users is "
+                      "too large, defaulting to [%d] users\n", __func__,
+                      ecryptfs_number_of_users);
        }
-       mutex_init(&ecryptfs_daemon_id_hash_mux);
-       mutex_lock(&ecryptfs_daemon_id_hash_mux);
+       mutex_init(&ecryptfs_daemon_hash_mux);
+       mutex_lock(&ecryptfs_daemon_hash_mux);
        ecryptfs_hash_buckets = 1;
        while (ecryptfs_number_of_users >> ecryptfs_hash_buckets)
                ecryptfs_hash_buckets++;
-       ecryptfs_daemon_id_hash = kmalloc(sizeof(struct hlist_head)
-                                         * ecryptfs_hash_buckets, GFP_KERNEL);
-       if (!ecryptfs_daemon_id_hash) {
+       ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head)
+                                       * ecryptfs_hash_buckets), GFP_KERNEL);
+       if (!ecryptfs_daemon_hash) {
                rc = -ENOMEM;
-               ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n");
-               mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+               printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
                goto out;
        }
        for (i = 0; i < ecryptfs_hash_buckets; i++)
-               INIT_HLIST_HEAD(&ecryptfs_daemon_id_hash[i]);
-       mutex_unlock(&ecryptfs_daemon_id_hash_mux);
-
+               INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]);
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
        ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx)
-                                     * ecryptfs_message_buf_len), GFP_KERNEL);
+                                       * ecryptfs_message_buf_len),
+                                      GFP_KERNEL);
        if (!ecryptfs_msg_ctx_arr) {
                rc = -ENOMEM;
-               ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n");
+               printk(KERN_ERR "%s: Failed to allocate memory\n", __func__);
                goto out;
        }
        mutex_init(&ecryptfs_msg_ctx_lists_mux);
@@ -446,6 +626,7 @@ int ecryptfs_init_messaging(unsigned int transport)
        ecryptfs_msg_counter = 0;
        for (i = 0; i < ecryptfs_message_buf_len; i++) {
                INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node);
+               INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list);
                mutex_init(&ecryptfs_msg_ctx_arr[i].mux);
                mutex_lock(&ecryptfs_msg_ctx_arr[i].mux);
                ecryptfs_msg_ctx_arr[i].index = i;
@@ -464,6 +645,11 @@ int ecryptfs_init_messaging(unsigned int transport)
                if (rc)
                        ecryptfs_release_messaging(transport);
                break;
+       case ECRYPTFS_TRANSPORT_MISCDEV:
+               rc = ecryptfs_init_ecryptfs_miscdev();
+               if (rc)
+                       ecryptfs_release_messaging(transport);
+               break;
        case ECRYPTFS_TRANSPORT_CONNECTOR:
        case ECRYPTFS_TRANSPORT_RELAYFS:
        default:
@@ -488,27 +674,37 @@ void ecryptfs_release_messaging(unsigned int transport)
                kfree(ecryptfs_msg_ctx_arr);
                mutex_unlock(&ecryptfs_msg_ctx_lists_mux);
        }
-       if (ecryptfs_daemon_id_hash) {
+       if (ecryptfs_daemon_hash) {
                struct hlist_node *elem;
-               struct ecryptfs_daemon_id *id;
+               struct ecryptfs_daemon *daemon;
                int i;
 
-               mutex_lock(&ecryptfs_daemon_id_hash_mux);
+               mutex_lock(&ecryptfs_daemon_hash_mux);
                for (i = 0; i < ecryptfs_hash_buckets; i++) {
-                       hlist_for_each_entry(id, elem,
-                                            &ecryptfs_daemon_id_hash[i],
-                                            id_chain) {
-                               hlist_del(elem);
-                               kfree(id);
+                       int rc;
+
+                       hlist_for_each_entry(daemon, elem,
+                                            &ecryptfs_daemon_hash[i],
+                                            euid_chain) {
+                               rc = ecryptfs_exorcise_daemon(daemon);
+                               if (rc)
+                                       printk(KERN_ERR "%s: Error whilst "
+                                              "attempting to destroy daemon; "
+                                              "rc = [%d]. Dazed and confused, "
+                                              "but trying to continue.\n",
+                                              __func__, rc);
                        }
                }
-               kfree(ecryptfs_daemon_id_hash);
-               mutex_unlock(&ecryptfs_daemon_id_hash_mux);
+               kfree(ecryptfs_daemon_hash);
+               mutex_unlock(&ecryptfs_daemon_hash_mux);
        }
        switch(transport) {
        case ECRYPTFS_TRANSPORT_NETLINK:
                ecryptfs_release_netlink();
                break;
+       case ECRYPTFS_TRANSPORT_MISCDEV:
+               ecryptfs_destroy_ecryptfs_miscdev();
+               break;
        case ECRYPTFS_TRANSPORT_CONNECTOR:
        case ECRYPTFS_TRANSPORT_RELAYFS:
        default:
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
new file mode 100644 (file)
index 0000000..788995e
--- /dev/null
@@ -0,0 +1,598 @@
+/**
+ * eCryptfs: Linux filesystem encryption layer
+ *
+ * Copyright (C) 2008 International Business Machines Corp.
+ *   Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/fs.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include <linux/miscdevice.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include "ecryptfs_kernel.h"
+
+static atomic_t ecryptfs_num_miscdev_opens;
+
+/**
+ * ecryptfs_miscdev_poll
+ * @file: dev file (ignored)
+ * @pt: dev poll table (ignored)
+ *
+ * Returns the poll mask
+ */
+static unsigned int
+ecryptfs_miscdev_poll(struct file *file, poll_table *pt)
+{
+       struct ecryptfs_daemon *daemon;
+       unsigned int mask = 0;
+       int rc;
+
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       /* TODO: Just use file->private_data? */
+       rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+                                         current->nsproxy->user_ns);
+       BUG_ON(rc || !daemon);
+       mutex_lock(&daemon->mux);
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+       if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+               printk(KERN_WARNING "%s: Attempt to poll on zombified "
+                      "daemon\n", __func__);
+               goto out_unlock_daemon;
+       }
+       if (daemon->flags & ECRYPTFS_DAEMON_IN_READ)
+               goto out_unlock_daemon;
+       if (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)
+               goto out_unlock_daemon;
+       daemon->flags |= ECRYPTFS_DAEMON_IN_POLL;
+       mutex_unlock(&daemon->mux);
+       poll_wait(file, &daemon->wait, pt);
+       mutex_lock(&daemon->mux);
+       if (!list_empty(&daemon->msg_ctx_out_queue))
+               mask |= POLLIN | POLLRDNORM;
+out_unlock_daemon:
+       daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL;
+       mutex_unlock(&daemon->mux);
+       return mask;
+}
+
+/**
+ * ecryptfs_miscdev_open
+ * @inode: inode of miscdev handle (ignored)
+ * @file: file for miscdev handle (ignored)
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int
+ecryptfs_miscdev_open(struct inode *inode, struct file *file)
+{
+       struct ecryptfs_daemon *daemon = NULL;
+       int rc;
+
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       rc = try_module_get(THIS_MODULE);
+       if (rc == 0) {
+               rc = -EIO;
+               printk(KERN_ERR "%s: Error attempting to increment module use "
+                      "count; rc = [%d]\n", __func__, rc);
+               goto out_unlock_daemon_list;
+       }
+       rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+                                         current->nsproxy->user_ns);
+       if (rc || !daemon) {
+               rc = ecryptfs_spawn_daemon(&daemon, current->euid,
+                                          current->nsproxy->user_ns,
+                                          task_pid(current));
+               if (rc) {
+                       printk(KERN_ERR "%s: Error attempting to spawn daemon; "
+                              "rc = [%d]\n", __func__, rc);
+                       goto out_module_put_unlock_daemon_list;
+               }
+       }
+       mutex_lock(&daemon->mux);
+       if (daemon->pid != task_pid(current)) {
+               rc = -EINVAL;
+               printk(KERN_ERR "%s: pid [0x%p] has registered with euid [%d], "
+                      "but pid [0x%p] has attempted to open the handle "
+                      "instead\n", __func__, daemon->pid, daemon->euid,
+                      task_pid(current));
+               goto out_unlock_daemon;
+       }
+       if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) {
+               rc = -EBUSY;
+               printk(KERN_ERR "%s: Miscellaneous device handle may only be "
+                      "opened once per daemon; pid [0x%p] already has this "
+                      "handle open\n", __func__, daemon->pid);
+               goto out_unlock_daemon;
+       }
+       daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN;
+       atomic_inc(&ecryptfs_num_miscdev_opens);
+out_unlock_daemon:
+       mutex_unlock(&daemon->mux);
+out_module_put_unlock_daemon_list:
+       if (rc)
+               module_put(THIS_MODULE);
+out_unlock_daemon_list:
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+       return rc;
+}
+
+/**
+ * ecryptfs_miscdev_release
+ * @inode: inode of fs/ecryptfs/euid handle (ignored)
+ * @file: file for fs/ecryptfs/euid handle (ignored)
+ *
+ * This keeps the daemon registered until the daemon sends another
+ * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int
+ecryptfs_miscdev_release(struct inode *inode, struct file *file)
+{
+       struct ecryptfs_daemon *daemon = NULL;
+       int rc;
+
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+                                         current->nsproxy->user_ns);
+       BUG_ON(rc || !daemon);
+       mutex_lock(&daemon->mux);
+       BUG_ON(daemon->pid != task_pid(current));
+       BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN));
+       daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN;
+       atomic_dec(&ecryptfs_num_miscdev_opens);
+       mutex_unlock(&daemon->mux);
+       rc = ecryptfs_exorcise_daemon(daemon);
+       if (rc) {
+               printk(KERN_CRIT "%s: Fatal error whilst attempting to "
+                      "shut down daemon; rc = [%d]. Please report this "
+                      "bug.\n", __func__, rc);
+               BUG();
+       }
+       module_put(THIS_MODULE);
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+       return rc;
+}
+
+/**
+ * ecryptfs_send_miscdev
+ * @data: Data to send to daemon; may be NULL
+ * @data_size: Amount of data to send to daemon
+ * @msg_ctx: Message context, which is used to handle the reply. If
+ *           this is NULL, then we do not expect a reply.
+ * @msg_type: Type of message
+ * @msg_flags: Flags for message
+ * @daemon: eCryptfs daemon object
+ *
+ * Add msg_ctx to queue and then, if it exists, notify the blocked
+ * miscdevess about the data being available. Must be called with
+ * ecryptfs_daemon_hash_mux held.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_send_miscdev(char *data, size_t data_size,
+                         struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+                         u16 msg_flags, struct ecryptfs_daemon *daemon)
+{
+       int rc = 0;
+
+       mutex_lock(&msg_ctx->mux);
+       if (data) {
+               msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size),
+                                      GFP_KERNEL);
+               if (!msg_ctx->msg) {
+                       rc = -ENOMEM;
+                       printk(KERN_ERR "%s: Out of memory whilst attempting "
+                              "to kmalloc(%Zd, GFP_KERNEL)\n", __func__,
+                              (sizeof(*msg_ctx->msg) + data_size));
+                       goto out_unlock;
+               }
+       } else
+               msg_ctx->msg = NULL;
+       msg_ctx->msg->index = msg_ctx->index;
+       msg_ctx->msg->data_len = data_size;
+       msg_ctx->type = msg_type;
+       if (data) {
+               memcpy(msg_ctx->msg->data, data, data_size);
+               msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size);
+       } else
+               msg_ctx->msg_size = 0;
+       mutex_lock(&daemon->mux);
+       list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue);
+       daemon->num_queued_msg_ctx++;
+       wake_up_interruptible(&daemon->wait);
+       mutex_unlock(&daemon->mux);
+out_unlock:
+       mutex_unlock(&msg_ctx->mux);
+       return rc;
+}
+
+/**
+ * ecryptfs_miscdev_read - format and send message from queue
+ * @file: fs/ecryptfs/euid miscdevfs handle (ignored)
+ * @buf: User buffer into which to copy the next message on the daemon queue
+ * @count: Amount of space available in @buf
+ * @ppos: Offset in file (ignored)
+ *
+ * Pulls the most recent message from the daemon queue, formats it for
+ * being sent via a miscdevfs handle, and copies it into @buf
+ *
+ * Returns the number of bytes copied into the user buffer
+ */
+static ssize_t
+ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
+                     loff_t *ppos)
+{
+       struct ecryptfs_daemon *daemon;
+       struct ecryptfs_msg_ctx *msg_ctx;
+       size_t packet_length_size;
+       u32 counter_nbo;
+       char packet_length[3];
+       size_t i;
+       size_t total_length;
+       int rc;
+
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       /* TODO: Just use file->private_data? */
+       rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid,
+                                         current->nsproxy->user_ns);
+       BUG_ON(rc || !daemon);
+       mutex_lock(&daemon->mux);
+       if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+               rc = 0;
+               printk(KERN_WARNING "%s: Attempt to read from zombified "
+                      "daemon\n", __func__);
+               goto out_unlock_daemon;
+       }
+       if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) {
+               rc = 0;
+               goto out_unlock_daemon;
+       }
+       /* This daemon will not go away so long as this flag is set */
+       daemon->flags |= ECRYPTFS_DAEMON_IN_READ;
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+check_list:
+       if (list_empty(&daemon->msg_ctx_out_queue)) {
+               mutex_unlock(&daemon->mux);
+               rc = wait_event_interruptible(
+                       daemon->wait, !list_empty(&daemon->msg_ctx_out_queue));
+               mutex_lock(&daemon->mux);
+               if (rc < 0) {
+                       rc = 0;
+                       goto out_unlock_daemon;
+               }
+       }
+       if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) {
+               rc = 0;
+               goto out_unlock_daemon;
+       }
+       if (list_empty(&daemon->msg_ctx_out_queue)) {
+               /* Something else jumped in since the
+                * wait_event_interruptable() and removed the
+                * message from the queue; try again */
+               goto check_list;
+       }
+       BUG_ON(current->euid != daemon->euid);
+       BUG_ON(current->nsproxy->user_ns != daemon->user_ns);
+       BUG_ON(task_pid(current) != daemon->pid);
+       msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue,
+                                  struct ecryptfs_msg_ctx, daemon_out_list);
+       BUG_ON(!msg_ctx);
+       mutex_lock(&msg_ctx->mux);
+       if (msg_ctx->msg) {
+               rc = ecryptfs_write_packet_length(packet_length,
+                                                 msg_ctx->msg_size,
+                                                 &packet_length_size);
+               if (rc) {
+                       rc = 0;
+                       printk(KERN_WARNING "%s: Error writing packet length; "
+                              "rc = [%d]\n", __func__, rc);
+                       goto out_unlock_msg_ctx;
+               }
+       } else {
+               packet_length_size = 0;
+               msg_ctx->msg_size = 0;
+       }
+       /* miscdevfs packet format:
+        *  Octet 0: Type
+        *  Octets 1-4: network byte order msg_ctx->counter
+        *  Octets 5-N0: Size of struct ecryptfs_message to follow
+        *  Octets N0-N1: struct ecryptfs_message (including data)
+        *
+        *  Octets 5-N1 not written if the packet type does not
+        *  include a message */
+       total_length = (1 + 4 + packet_length_size + msg_ctx->msg_size);
+       if (count < total_length) {
+               rc = 0;
+               printk(KERN_WARNING "%s: Only given user buffer of "
+                      "size [%Zd], but we need [%Zd] to read the "
+                      "pending message\n", __func__, count, total_length);
+               goto out_unlock_msg_ctx;
+       }
+       i = 0;
+       buf[i++] = msg_ctx->type;
+       counter_nbo = cpu_to_be32(msg_ctx->counter);
+       memcpy(&buf[i], (char *)&counter_nbo, 4);
+       i += 4;
+       if (msg_ctx->msg) {
+               memcpy(&buf[i], packet_length, packet_length_size);
+               i += packet_length_size;
+               rc = copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size);
+               if (rc) {
+                       printk(KERN_ERR "%s: copy_to_user returned error "
+                              "[%d]\n", __func__, rc);
+                       goto out_unlock_msg_ctx;
+               }
+               i += msg_ctx->msg_size;
+       }
+       rc = i;
+       list_del(&msg_ctx->daemon_out_list);
+       kfree(msg_ctx->msg);
+       msg_ctx->msg = NULL;
+       /* We do not expect a reply from the userspace daemon for any
+        * message type other than ECRYPTFS_MSG_REQUEST */
+       if (msg_ctx->type != ECRYPTFS_MSG_REQUEST)
+               ecryptfs_msg_ctx_alloc_to_free(msg_ctx);
+out_unlock_msg_ctx:
+       mutex_unlock(&msg_ctx->mux);
+out_unlock_daemon:
+       daemon->flags &= ~ECRYPTFS_DAEMON_IN_READ;
+       mutex_unlock(&daemon->mux);
+       return rc;
+}
+
+/**
+ * ecryptfs_miscdev_helo
+ * @euid: effective user id of miscdevess sending helo packet
+ * @user_ns: The namespace in which @euid applies
+ * @pid: miscdevess id of miscdevess sending helo packet
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_miscdev_helo(uid_t euid, struct user_namespace *user_ns,
+                                struct pid *pid)
+{
+       int rc;
+
+       rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_MISCDEV, euid, user_ns,
+                                  pid);
+       if (rc)
+               printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc);
+       return rc;
+}
+
+/**
+ * ecryptfs_miscdev_quit
+ * @euid: effective user id of miscdevess sending quit packet
+ * @user_ns: The namespace in which @euid applies
+ * @pid: miscdevess id of miscdevess sending quit packet
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_miscdev_quit(uid_t euid, struct user_namespace *user_ns,
+                                struct pid *pid)
+{
+       int rc;
+
+       rc = ecryptfs_process_quit(euid, user_ns, pid);
+       if (rc)
+               printk(KERN_WARNING
+                      "Error processing QUIT message; rc = [%d]\n", rc);
+       return rc;
+}
+
+/**
+ * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon
+ * @data: Bytes comprising struct ecryptfs_message
+ * @data_size: sizeof(struct ecryptfs_message) + data len
+ * @euid: Effective user id of miscdevess sending the miscdev response
+ * @user_ns: The namespace in which @euid applies
+ * @pid: Miscdevess id of miscdevess sending the miscdev response
+ * @seq: Sequence number for miscdev response packet
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+static int ecryptfs_miscdev_response(char *data, size_t data_size,
+                                    uid_t euid, struct user_namespace *user_ns,
+                                    struct pid *pid, u32 seq)
+{
+       struct ecryptfs_message *msg = (struct ecryptfs_message *)data;
+       int rc;
+
+       if ((sizeof(*msg) + msg->data_len) != data_size) {
+               printk(KERN_WARNING "%s: (sizeof(*msg) + msg->data_len) = "
+                      "[%Zd]; data_size = [%Zd]. Invalid packet.\n", __func__,
+                      (sizeof(*msg) + msg->data_len), data_size);
+               rc = -EINVAL;
+               goto out;
+       }
+       rc = ecryptfs_process_response(msg, euid, user_ns, pid, seq);
+       if (rc)
+               printk(KERN_ERR
+                      "Error processing response message; rc = [%d]\n", rc);
+out:
+       return rc;
+}
+
+/**
+ * ecryptfs_miscdev_write - handle write to daemon miscdev handle
+ * @file: File for misc dev handle (ignored)
+ * @buf: Buffer containing user data
+ * @count: Amount of data in @buf
+ * @ppos: Pointer to offset in file (ignored)
+ *
+ * miscdevfs packet format:
+ *  Octet 0: Type
+ *  Octets 1-4: network byte order msg_ctx->counter (0's for non-response)
+ *  Octets 5-N0: Size of struct ecryptfs_message to follow
+ *  Octets N0-N1: struct ecryptfs_message (including data)
+ *
+ * Returns the number of bytes read from @buf
+ */
+static ssize_t
+ecryptfs_miscdev_write(struct file *file, const char __user *buf,
+                      size_t count, loff_t *ppos)
+{
+       u32 counter_nbo, seq;
+       size_t packet_size, packet_size_length, i;
+       ssize_t sz = 0;
+       char *data;
+       int rc;
+
+       if (count == 0)
+               goto out;
+       data = kmalloc(count, GFP_KERNEL);
+       if (!data) {
+               printk(KERN_ERR "%s: Out of memory whilst attempting to "
+                      "kmalloc([%Zd], GFP_KERNEL)\n", __func__, count);
+               goto out;
+       }
+       rc = copy_from_user(data, buf, count);
+       if (rc) {
+               printk(KERN_ERR "%s: copy_from_user returned error [%d]\n",
+                      __func__, rc);
+               goto out_free;
+       }
+       sz = count;
+       i = 0;
+       switch (data[i++]) {
+       case ECRYPTFS_MSG_RESPONSE:
+               if (count < (1 + 4 + 1 + sizeof(struct ecryptfs_message))) {
+                       printk(KERN_WARNING "%s: Minimum acceptable packet "
+                              "size is [%Zd], but amount of data written is "
+                              "only [%Zd]. Discarding response packet.\n",
+                              __func__,
+                              (1 + 4 + 1 + sizeof(struct ecryptfs_message)),
+                              count);
+                       goto out_free;
+               }
+               memcpy((char *)&counter_nbo, &data[i], 4);
+               seq = be32_to_cpu(counter_nbo);
+               i += 4;
+               rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
+                                                 &packet_size_length);
+               if (rc) {
+                       printk(KERN_WARNING "%s: Error parsing packet length; "
+                              "rc = [%d]\n", __func__, rc);
+                       goto out_free;
+               }
+               i += packet_size_length;
+               if ((1 + 4 + packet_size_length + packet_size) != count) {
+                       printk(KERN_WARNING "%s: (1 + packet_size_length([%Zd])"
+                              " + packet_size([%Zd]))([%Zd]) != "
+                              "count([%Zd]). Invalid packet format.\n",
+                              __func__, packet_size_length, packet_size,
+                              (1 + packet_size_length + packet_size), count);
+                       goto out_free;
+               }
+               rc = ecryptfs_miscdev_response(&data[i], packet_size,
+                                              current->euid,
+                                              current->nsproxy->user_ns,
+                                              task_pid(current), seq);
+               if (rc)
+                       printk(KERN_WARNING "%s: Failed to deliver miscdev "
+                              "response to requesting operation; rc = [%d]\n",
+                              __func__, rc);
+               break;
+       case ECRYPTFS_MSG_HELO:
+               rc = ecryptfs_miscdev_helo(current->euid,
+                                          current->nsproxy->user_ns,
+                                          task_pid(current));
+               if (rc) {
+                       printk(KERN_ERR "%s: Error attempting to process "
+                              "helo from pid [0x%p]; rc = [%d]\n", __func__,
+                              task_pid(current), rc);
+                       goto out_free;
+               }
+               break;
+       case ECRYPTFS_MSG_QUIT:
+               rc = ecryptfs_miscdev_quit(current->euid,
+                                          current->nsproxy->user_ns,
+                                          task_pid(current));
+               if (rc) {
+                       printk(KERN_ERR "%s: Error attempting to process "
+                              "quit from pid [0x%p]; rc = [%d]\n", __func__,
+                              task_pid(current), rc);
+                       goto out_free;
+               }
+               break;
+       default:
+               ecryptfs_printk(KERN_WARNING, "Dropping miscdev "
+                               "message of unrecognized type [%d]\n",
+                               data[0]);
+               break;
+       }
+out_free:
+       kfree(data);
+out:
+       return sz;
+}
+
+
+static const struct file_operations ecryptfs_miscdev_fops = {
+       .open    = ecryptfs_miscdev_open,
+       .poll    = ecryptfs_miscdev_poll,
+       .read    = ecryptfs_miscdev_read,
+       .write   = ecryptfs_miscdev_write,
+       .release = ecryptfs_miscdev_release,
+};
+
+static struct miscdevice ecryptfs_miscdev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name  = "ecryptfs",
+       .fops  = &ecryptfs_miscdev_fops
+};
+
+/**
+ * ecryptfs_init_ecryptfs_miscdev
+ *
+ * Messages sent to the userspace daemon from the kernel are placed on
+ * a queue associated with the daemon. The next read against the
+ * miscdev handle by that daemon will return the oldest message placed
+ * on the message queue for the daemon.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_init_ecryptfs_miscdev(void)
+{
+       int rc;
+
+       atomic_set(&ecryptfs_num_miscdev_opens, 0);
+       mutex_lock(&ecryptfs_daemon_hash_mux);
+       rc = misc_register(&ecryptfs_miscdev);
+       if (rc)
+               printk(KERN_ERR "%s: Failed to register miscellaneous device "
+                      "for communications with userspace daemons; rc = [%d]\n",
+                      __func__, rc);
+       mutex_unlock(&ecryptfs_daemon_hash_mux);
+       return rc;
+}
+
+/**
+ * ecryptfs_destroy_ecryptfs_miscdev
+ *
+ * All of the daemons must be exorcised prior to calling this
+ * function.
+ */
+void ecryptfs_destroy_ecryptfs_miscdev(void)
+{
+       BUG_ON(atomic_read(&ecryptfs_num_miscdev_opens) != 0);
+       misc_deregister(&ecryptfs_miscdev);
+}
index 6df1debdccce14aabbf0e63193b66ec0c8366d3e..2b6fe1e6e8ba38d85f5e6b16e63bd8d55804a35a 100644 (file)
@@ -153,7 +153,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
                        flush_dcache_page(page);
                        if (rc) {
                                printk(KERN_ERR "%s: Error reading xattr "
-                                      "region; rc = [%d]\n", __FUNCTION__, rc);
+                                      "region; rc = [%d]\n", __func__, rc);
                                goto out;
                        }
                } else {
@@ -169,7 +169,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page,
                        if (rc) {
                                printk(KERN_ERR "%s: Error attempting to read "
                                       "extent at offset [%lld] in the lower "
-                                      "file; rc = [%d]\n", __FUNCTION__,
+                                      "file; rc = [%d]\n", __func__,
                                       lower_offset, rc);
                                goto out;
                        }
@@ -212,7 +212,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page)
                                       "the encrypted content from the lower "
                                       "file whilst inserting the metadata "
                                       "from the xattr into the header; rc = "
-                                      "[%d]\n", __FUNCTION__, rc);
+                                      "[%d]\n", __func__, rc);
                                goto out;
                        }
 
@@ -293,7 +293,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
                        if (rc) {
                                printk(KERN_ERR "%s: Error attemping to read "
                                       "lower page segment; rc = [%d]\n",
-                                      __FUNCTION__, rc);
+                                      __func__, rc);
                                ClearPageUptodate(page);
                                goto out;
                        } else
@@ -308,7 +308,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
                                               "from the lower file whilst "
                                               "inserting the metadata from "
                                               "the xattr into the header; rc "
-                                              "= [%d]\n", __FUNCTION__, rc);
+                                              "= [%d]\n", __func__, rc);
                                        ClearPageUptodate(page);
                                        goto out;
                                }
@@ -320,7 +320,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
                                if (rc) {
                                        printk(KERN_ERR "%s: Error reading "
                                               "page; rc = [%d]\n",
-                                              __FUNCTION__, rc);
+                                              __func__, rc);
                                        ClearPageUptodate(page);
                                        goto out;
                                }
@@ -331,7 +331,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
                        if (rc) {
                                printk(KERN_ERR "%s: Error decrypting page "
                                       "at index [%ld]; rc = [%d]\n",
-                                      __FUNCTION__, page->index, rc);
+                                      __func__, page->index, rc);
                                ClearPageUptodate(page);
                                goto out;
                        }
@@ -348,7 +348,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page,
                        if (rc) {
                                printk(KERN_ERR "%s: Error on attempt to "
                                       "truncate to (higher) offset [%lld];"
-                                      " rc = [%d]\n", __FUNCTION__,
+                                      " rc = [%d]\n", __func__,
                                       prev_page_end_size, rc);
                                goto out;
                        }
@@ -389,7 +389,7 @@ static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode)
        kfree(file_size_virt);
        if (rc)
                printk(KERN_ERR "%s: Error writing file size to header; "
-                      "rc = [%d]\n", __FUNCTION__, rc);
+                      "rc = [%d]\n", __func__, rc);
 out:
        return rc;
 }
index f638a698dc525350bc44054f567217d52c0fca71..e0abad62b39535fb8d98368266707af464b050be 100644 (file)
@@ -44,8 +44,8 @@ static struct sock *ecryptfs_nl_sock;
  * upon sending the message; non-zero upon error.
  */
 int ecryptfs_send_netlink(char *data, int data_len,
-                         struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type,
-                         u16 msg_flags, pid_t daemon_pid)
+                         struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type,
+                         u16 msg_flags, struct pid *daemon_pid)
 {
        struct sk_buff *skb;
        struct nlmsghdr *nlh;
@@ -60,7 +60,7 @@ int ecryptfs_send_netlink(char *data, int data_len,
                ecryptfs_printk(KERN_ERR, "Failed to allocate socket buffer\n");
                goto out;
        }
-       nlh = NLMSG_PUT(skb, daemon_pid, msg_ctx ? msg_ctx->counter : 0,
+       nlh = NLMSG_PUT(skb, pid_nr(daemon_pid), msg_ctx ? msg_ctx->counter : 0,
                        msg_type, payload_len);
        nlh->nlmsg_flags = msg_flags;
        if (msg_ctx && payload_len) {
@@ -69,7 +69,7 @@ int ecryptfs_send_netlink(char *data, int data_len,
                msg->data_len = data_len;
                memcpy(msg->data, data, data_len);
        }
-       rc = netlink_unicast(ecryptfs_nl_sock, skb, daemon_pid, 0);
+       rc = netlink_unicast(ecryptfs_nl_sock, skb, pid_nr(daemon_pid), 0);
        if (rc < 0) {
                ecryptfs_printk(KERN_ERR, "Failed to send eCryptfs netlink "
                                "message; rc = [%d]\n", rc);
@@ -99,6 +99,7 @@ static int ecryptfs_process_nl_response(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(skb);
        struct ecryptfs_message *msg = NLMSG_DATA(nlh);
+       struct pid *pid;
        int rc;
 
        if (skb->len - NLMSG_HDRLEN - sizeof(*msg) != msg->data_len) {
@@ -107,8 +108,10 @@ static int ecryptfs_process_nl_response(struct sk_buff *skb)
                                "incorrectly specified data length\n");
                goto out;
        }
-       rc = ecryptfs_process_response(msg, NETLINK_CREDS(skb)->uid,
-                                      NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq);
+       pid = find_get_pid(NETLINK_CREDS(skb)->pid);
+       rc = ecryptfs_process_response(msg, NETLINK_CREDS(skb)->uid, NULL,
+                                      pid, nlh->nlmsg_seq);
+       put_pid(pid);
        if (rc)
                printk(KERN_ERR
                       "Error processing response message; rc = [%d]\n", rc);
@@ -126,11 +129,13 @@ out:
  */
 static int ecryptfs_process_nl_helo(struct sk_buff *skb)
 {
+       struct pid *pid;
        int rc;
 
+       pid = find_get_pid(NETLINK_CREDS(skb)->pid);
        rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_NETLINK,
-                                  NETLINK_CREDS(skb)->uid,
-                                  NETLINK_CREDS(skb)->pid);
+                                  NETLINK_CREDS(skb)->uid, NULL, pid);
+       put_pid(pid);
        if (rc)
                printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc);
        return rc;
@@ -147,10 +152,12 @@ static int ecryptfs_process_nl_helo(struct sk_buff *skb)
  */
 static int ecryptfs_process_nl_quit(struct sk_buff *skb)
 {
+       struct pid *pid;
        int rc;
 
-       rc = ecryptfs_process_quit(NETLINK_CREDS(skb)->uid,
-                                  NETLINK_CREDS(skb)->pid);
+       pid = find_get_pid(NETLINK_CREDS(skb)->pid);
+       rc = ecryptfs_process_quit(NETLINK_CREDS(skb)->uid, NULL, pid);
+       put_pid(pid);
        if (rc)
                printk(KERN_WARNING
                       "Error processing QUIT message; rc = [%d]\n", rc);
@@ -176,20 +183,20 @@ static void ecryptfs_receive_nl_message(struct sk_buff *skb)
                goto free;
        }
        switch (nlh->nlmsg_type) {
-               case ECRYPTFS_NLMSG_RESPONSE:
+               case ECRYPTFS_MSG_RESPONSE:
                        if (ecryptfs_process_nl_response(skb)) {
                                ecryptfs_printk(KERN_WARNING, "Failed to "
                                                "deliver netlink response to "
                                                "requesting operation\n");
                        }
                        break;
-               case ECRYPTFS_NLMSG_HELO:
+               case ECRYPTFS_MSG_HELO:
                        if (ecryptfs_process_nl_helo(skb)) {
                                ecryptfs_printk(KERN_WARNING, "Failed to "
                                                "fulfill HELO request\n");
                        }
                        break;
-               case ECRYPTFS_NLMSG_QUIT:
+               case ECRYPTFS_MSG_QUIT:
                        if (ecryptfs_process_nl_quit(skb)) {
                                ecryptfs_printk(KERN_WARNING, "Failed to "
                                                "fulfill QUIT request\n");
index 0c4928623bbc414f7f0a4861e4dc727bfcdf132f..ebf55150be56cdb26b6fab83990026fadaa71aaf 100644 (file)
@@ -55,7 +55,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
        set_fs(fs_save);
        if (octets_written < 0) {
                printk(KERN_ERR "%s: octets_written = [%td]; "
-                      "expected [%td]\n", __FUNCTION__, octets_written, size);
+                      "expected [%td]\n", __func__, octets_written, size);
                rc = -EINVAL;
        }
        mutex_unlock(&inode_info->lower_file_mutex);
@@ -153,7 +153,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
                        rc = PTR_ERR(ecryptfs_page);
                        printk(KERN_ERR "%s: Error getting page at "
                               "index [%ld] from eCryptfs inode "
-                              "mapping; rc = [%d]\n", __FUNCTION__,
+                              "mapping; rc = [%d]\n", __func__,
                               ecryptfs_page_idx, rc);
                        goto out;
                }
@@ -165,7 +165,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
                        if (rc) {
                                printk(KERN_ERR "%s: Error decrypting "
                                       "page; rc = [%d]\n",
-                                      __FUNCTION__, rc);
+                                      __func__, rc);
                                ClearPageUptodate(ecryptfs_page);
                                page_cache_release(ecryptfs_page);
                                goto out;
@@ -202,7 +202,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
                page_cache_release(ecryptfs_page);
                if (rc) {
                        printk(KERN_ERR "%s: Error encrypting "
-                              "page; rc = [%d]\n", __FUNCTION__, rc);
+                              "page; rc = [%d]\n", __func__, rc);
                        goto out;
                }
                pos += num_bytes;
@@ -254,7 +254,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
        set_fs(fs_save);
        if (octets_read < 0) {
                printk(KERN_ERR "%s: octets_read = [%td]; "
-                      "expected [%td]\n", __FUNCTION__, octets_read, size);
+                      "expected [%td]\n", __func__, octets_read, size);
                rc = -EINVAL;
        }
        mutex_unlock(&inode_info->lower_file_mutex);
@@ -327,7 +327,7 @@ int ecryptfs_read(char *data, loff_t offset, size_t size,
                printk(KERN_ERR "%s: Attempt to read data past the end of the "
                        "file; offset = [%lld]; size = [%td]; "
                       "ecryptfs_file_size = [%lld]\n",
-                      __FUNCTION__, offset, size, ecryptfs_file_size);
+                      __func__, offset, size, ecryptfs_file_size);
                goto out;
        }
        pos = offset;
@@ -345,14 +345,14 @@ int ecryptfs_read(char *data, loff_t offset, size_t size,
                        rc = PTR_ERR(ecryptfs_page);
                        printk(KERN_ERR "%s: Error getting page at "
                               "index [%ld] from eCryptfs inode "
-                              "mapping; rc = [%d]\n", __FUNCTION__,
+                              "mapping; rc = [%d]\n", __func__,
                               ecryptfs_page_idx, rc);
                        goto out;
                }
                rc = ecryptfs_decrypt_page(ecryptfs_page);
                if (rc) {
                        printk(KERN_ERR "%s: Error decrypting "
-                              "page; rc = [%d]\n", __FUNCTION__, rc);
+                              "page; rc = [%d]\n", __func__, rc);
                        ClearPageUptodate(ecryptfs_page);
                        page_cache_release(ecryptfs_page);
                        goto out;
index a415f42d32cf66aba720da4e05b2a0ccfdd55663..221086fef1743545c143a270bd1bc228cfb33561 100644 (file)
@@ -257,25 +257,6 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1,
                (p1->file < p2->file ? -1 : p1->fd - p2->fd));
 }
 
-/* Special initialization for the RB tree node to detect linkage */
-static inline void ep_rb_initnode(struct rb_node *n)
-{
-       rb_set_parent(n, n);
-}
-
-/* Removes a node from the RB tree and marks it for a fast is-linked check */
-static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r)
-{
-       rb_erase(n, r);
-       rb_set_parent(n, n);
-}
-
-/* Fast check to verify that the item is linked to the main RB tree */
-static inline int ep_rb_linked(struct rb_node *n)
-{
-       return rb_parent(n) != n;
-}
-
 /* Tells us if the item is currently linked */
 static inline int ep_is_linked(struct list_head *p)
 {
@@ -283,13 +264,13 @@ static inline int ep_is_linked(struct list_head *p)
 }
 
 /* Get the "struct epitem" from a wait queue pointer */
-static inline struct epitem * ep_item_from_wait(wait_queue_t *p)
+static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
 {
        return container_of(p, struct eppoll_entry, wait)->base;
 }
 
 /* Get the "struct epitem" from an epoll queue wrapper */
-static inline struct epitem * ep_item_from_epqueue(poll_table *p)
+static inline struct epitem *ep_item_from_epqueue(poll_table *p)
 {
        return container_of(p, struct ep_pqueue, pt)->epi;
 }
@@ -411,8 +392,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi)
                list_del_init(&epi->fllink);
        spin_unlock(&file->f_ep_lock);
 
-       if (ep_rb_linked(&epi->rbn))
-               ep_rb_erase(&epi->rbn, &ep->rbr);
+       rb_erase(&epi->rbn, &ep->rbr);
 
        spin_lock_irqsave(&ep->lock, flags);
        if (ep_is_linked(&epi->rdllink))
@@ -728,7 +708,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
                goto error_return;
 
        /* Item initialization follow here ... */
-       ep_rb_initnode(&epi->rbn);
        INIT_LIST_HEAD(&epi->rdllink);
        INIT_LIST_HEAD(&epi->fllink);
        INIT_LIST_HEAD(&epi->pwqlist);
@@ -1262,7 +1241,7 @@ error_return:
        return error;
 }
 
-#ifdef TIF_RESTORE_SIGMASK
+#ifdef HAVE_SET_RESTORE_SIGMASK
 
 /*
  * Implement the event wait interface for the eventpoll file. It is the kernel
@@ -1300,7 +1279,7 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
                if (error == -EINTR) {
                        memcpy(&current->saved_sigmask, &sigsaved,
                               sizeof(sigsaved));
-                       set_thread_flag(TIF_RESTORE_SIGMASK);
+                       set_restore_sigmask();
                } else
                        sigprocmask(SIG_SETMASK, &sigsaved, NULL);
        }
@@ -1308,7 +1287,7 @@ asmlinkage long sys_epoll_pwait(int epfd, struct epoll_event __user *events,
        return error;
 }
 
-#endif /* #ifdef TIF_RESTORE_SIGMASK */
+#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 static int __init eventpoll_init(void)
 {
@@ -1330,4 +1309,3 @@ static int __init eventpoll_init(void)
        return 0;
 }
 fs_initcall(eventpoll_init);
-
index b152029f18f61e68e260d63bab4a9720f2e31ab6..9f9f931ef949987e29ba89204387c1428e491c66 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -735,6 +735,7 @@ static int exec_mmap(struct mm_struct *mm)
        tsk->active_mm = mm;
        activate_mm(active_mm, mm);
        task_unlock(tsk);
+       mm_update_next_owner(mm);
        arch_pick_mmap_layout(mm);
        if (old_mm) {
                up_read(&old_mm->mmap_sem);
@@ -765,9 +766,7 @@ static int de_thread(struct task_struct *tsk)
 
        /*
         * Kill all other threads in the thread group.
-        * We must hold tasklist_lock to call zap_other_threads.
         */
-       read_lock(&tasklist_lock);
        spin_lock_irq(lock);
        if (signal_group_exit(sig)) {
                /*
@@ -775,21 +774,10 @@ static int de_thread(struct task_struct *tsk)
                 * return so that the signal is processed.
                 */
                spin_unlock_irq(lock);
-               read_unlock(&tasklist_lock);
                return -EAGAIN;
        }
-
-       /*
-        * child_reaper ignores SIGKILL, change it now.
-        * Reparenting needs write_lock on tasklist_lock,
-        * so it is safe to do it under read_lock.
-        */
-       if (unlikely(tsk->group_leader == task_child_reaper(tsk)))
-               task_active_pid_ns(tsk)->child_reaper = tsk;
-
        sig->group_exit_task = tsk;
        zap_other_threads(tsk);
-       read_unlock(&tasklist_lock);
 
        /* Account for the thread group leader hanging around: */
        count = thread_group_leader(tsk) ? 1 : 2;
@@ -810,7 +798,7 @@ static int de_thread(struct task_struct *tsk)
        if (!thread_group_leader(tsk)) {
                leader = tsk->group_leader;
 
-               sig->notify_count = -1;
+               sig->notify_count = -1; /* for exit_notify() */
                for (;;) {
                        write_lock_irq(&tasklist_lock);
                        if (likely(leader->exit_state))
@@ -820,6 +808,8 @@ static int de_thread(struct task_struct *tsk)
                        schedule();
                }
 
+               if (unlikely(task_child_reaper(tsk) == leader))
+                       task_active_pid_ns(tsk)->child_reaper = tsk;
                /*
                 * The only record we have of the real-time age of a
                 * process, regardless of execs it's done, is start_time.
@@ -963,6 +953,8 @@ int flush_old_exec(struct linux_binprm * bprm)
        if (retval)
                goto out;
 
+       set_mm_exe_file(bprm->mm, bprm->file);
+
        /*
         * Release all of the old mmap stuff
         */
@@ -1268,7 +1260,6 @@ int do_execve(char * filename,
 {
        struct linux_binprm *bprm;
        struct file *file;
-       unsigned long env_p;
        struct files_struct *displaced;
        int retval;
 
@@ -1321,11 +1312,9 @@ int do_execve(char * filename,
        if (retval < 0)
                goto out;
 
-       env_p = bprm->p;
        retval = copy_strings(bprm->argc, argv, bprm);
        if (retval < 0)
                goto out;
-       bprm->argv_len = env_p - bprm->p;
 
        retval = search_binary_handler(bprm,regs);
        if (retval >= 0) {
index 109ab5e44eca3f6d5961f721b582d137779c6265..cc91227d3bb88dcdce06b83806baf6dd7f912225 100644 (file)
@@ -150,12 +150,12 @@ reconnect_path(struct vfsmount *mnt, struct dentry *target_dir)
                        if (IS_ERR(ppd)) {
                                err = PTR_ERR(ppd);
                                dprintk("%s: get_parent of %ld failed, err %d\n",
-                                       __FUNCTION__, pd->d_inode->i_ino, err);
+                                       __func__, pd->d_inode->i_ino, err);
                                dput(pd);
                                break;
                        }
 
-                       dprintk("%s: find name of %lu in %lu\n", __FUNCTION__,
+                       dprintk("%s: find name of %lu in %lu\n", __func__,
                                pd->d_inode->i_ino, ppd->d_inode->i_ino);
                        err = exportfs_get_name(mnt, ppd, nbuf, pd);
                        if (err) {
@@ -168,14 +168,14 @@ reconnect_path(struct vfsmount *mnt, struct dentry *target_dir)
                                        continue;
                                break;
                        }
-                       dprintk("%s: found name: %s\n", __FUNCTION__, nbuf);
+                       dprintk("%s: found name: %s\n", __func__, nbuf);
                        mutex_lock(&ppd->d_inode->i_mutex);
                        npd = lookup_one_len(nbuf, ppd, strlen(nbuf));
                        mutex_unlock(&ppd->d_inode->i_mutex);
                        if (IS_ERR(npd)) {
                                err = PTR_ERR(npd);
                                dprintk("%s: lookup failed: %d\n",
-                                       __FUNCTION__, err);
+                                       __func__, err);
                                dput(ppd);
                                dput(pd);
                                break;
@@ -188,7 +188,7 @@ reconnect_path(struct vfsmount *mnt, struct dentry *target_dir)
                        if (npd == pd)
                                noprogress = 0;
                        else
-                               printk("%s: npd != pd\n", __FUNCTION__);
+                               printk("%s: npd != pd\n", __func__);
                        dput(npd);
                        dput(ppd);
                        if (IS_ROOT(pd)) {
index e7b2bafa1dd91cd69f8caf4597654a6355c3a666..10bb02c3f25cfcdee98897b6d7efe96e5f3ae6a1 100644 (file)
@@ -106,7 +106,7 @@ static int ext2_valid_block_bitmap(struct super_block *sb,
                return 1;
 
 err_out:
-       ext2_error(sb, __FUNCTION__,
+       ext2_error(sb, __func__,
                        "Invalid block bitmap - "
                        "block_group = %d, block = %lu",
                        block_group, bitmap_blk);
@@ -132,7 +132,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
        bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
        bh = sb_getblk(sb, bitmap_blk);
        if (unlikely(!bh)) {
-               ext2_error(sb, __FUNCTION__,
+               ext2_error(sb, __func__,
                            "Cannot read block bitmap - "
                            "block_group = %d, block_bitmap = %u",
                            block_group, le32_to_cpu(desc->bg_block_bitmap));
@@ -143,17 +143,18 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
 
        if (bh_submit_read(bh) < 0) {
                brelse(bh);
-               ext2_error(sb, __FUNCTION__,
+               ext2_error(sb, __func__,
                            "Cannot read block bitmap - "
                            "block_group = %d, block_bitmap = %u",
                            block_group, le32_to_cpu(desc->bg_block_bitmap));
                return NULL;
        }
-       if (!ext2_valid_block_bitmap(sb, desc, block_group, bh)) {
-               brelse(bh);
-               return NULL;
-       }
 
+       ext2_valid_block_bitmap(sb, desc, block_group, bh);
+       /*
+        * file system mounted not to panic on error, continue with corrupt
+        * bitmap
+        */
        return bh;
 }
 
@@ -245,11 +246,10 @@ restart:
                prev = rsv;
        }
        printk("Window map complete.\n");
-       if (bad)
-               BUG();
+       BUG_ON(bad);
 }
 #define rsv_window_dump(root, verbose) \
-       __rsv_window_dump((root), (verbose), __FUNCTION__)
+       __rsv_window_dump((root), (verbose), __func__)
 #else
 #define rsv_window_dump(root, verbose) do {} while (0)
 #endif
@@ -548,7 +548,7 @@ do_more:
        for (i = 0, group_freed = 0; i < count; i++) {
                if (!ext2_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
                                                bit + i, bitmap_bh->b_data)) {
-                       ext2_error(sb, __FUNCTION__,
+                       ext2_error(sb, __func__,
                                "bit already cleared for block %lu", block + i);
                } else {
                        group_freed++;
@@ -1381,7 +1381,12 @@ allocated:
                            "Allocating block in system zone - "
                            "blocks from "E2FSBLK", length %lu",
                            ret_block, num);
-               goto out;
+               /*
+                * ext2_try_to_allocate marked the blocks we allocated as in
+                * use.  So we may want to selectively mark some of the blocks
+                * as free
+                */
+               goto retry_alloc;
        }
 
        performed_allocation = 1;
index 8dededd80fe2fa6747699c70b81d211b77d49ff0..a78c6b4af06012a82c6a1ed3095b9ce4fc2e22fb 100644 (file)
@@ -41,8 +41,8 @@ static inline __le16 ext2_rec_len_to_disk(unsigned len)
 {
        if (len == (1 << 16))
                return cpu_to_le16(EXT2_MAX_REC_LEN);
-       else if (len > (1 << 16))
-               BUG();
+       else
+               BUG_ON(len > (1 << 16));
        return cpu_to_le16(len);
 }
 
@@ -295,11 +295,11 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
                struct page *page = ext2_get_page(inode, n);
 
                if (IS_ERR(page)) {
-                       ext2_error(sb, __FUNCTION__,
+                       ext2_error(sb, __func__,
                                   "bad page in #%lu",
                                   inode->i_ino);
                        filp->f_pos += PAGE_CACHE_SIZE - offset;
-                       return -EIO;
+                       return PTR_ERR(page);
                }
                kaddr = page_address(page);
                if (unlikely(need_revalidate)) {
@@ -314,7 +314,7 @@ ext2_readdir (struct file * filp, void * dirent, filldir_t filldir)
                limit = kaddr + ext2_last_byte(inode, n) - EXT2_DIR_REC_LEN(1);
                for ( ;(char*)de <= limit; de = ext2_next_entry(de)) {
                        if (de->rec_len == 0) {
-                               ext2_error(sb, __FUNCTION__,
+                               ext2_error(sb, __func__,
                                        "zero-length directory entry");
                                ext2_put_page(page);
                                return -EIO;
@@ -381,7 +381,7 @@ struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
                        kaddr += ext2_last_byte(dir, n) - reclen;
                        while ((char *) de <= kaddr) {
                                if (de->rec_len == 0) {
-                                       ext2_error(dir->i_sb, __FUNCTION__,
+                                       ext2_error(dir->i_sb, __func__,
                                                "zero-length directory entry");
                                        ext2_put_page(page);
                                        goto out;
@@ -396,7 +396,7 @@ struct ext2_dir_entry_2 * ext2_find_entry (struct inode * dir,
                        n = 0;
                /* next page is past the blocks we've got */
                if (unlikely(n > (dir->i_blocks >> (PAGE_CACHE_SHIFT - 9)))) {
-                       ext2_error(dir->i_sb, __FUNCTION__,
+                       ext2_error(dir->i_sb, __func__,
                                "dir %lu size %lld exceeds block count %llu",
                                dir->i_ino, dir->i_size,
                                (unsigned long long)dir->i_blocks);
@@ -506,7 +506,7 @@ int ext2_add_link (struct dentry *dentry, struct inode *inode)
                                goto got_it;
                        }
                        if (de->rec_len == 0) {
-                               ext2_error(dir->i_sb, __FUNCTION__,
+                               ext2_error(dir->i_sb, __func__,
                                        "zero-length directory entry");
                                err = -EIO;
                                goto out_unlock;
@@ -578,7 +578,7 @@ int ext2_delete_entry (struct ext2_dir_entry_2 * dir, struct page * page )
 
        while ((char*)de < (char*)dir) {
                if (de->rec_len == 0) {
-                       ext2_error(inode->i_sb, __FUNCTION__,
+                       ext2_error(inode->i_sb, __func__,
                                "zero-length directory entry");
                        err = -EIO;
                        goto out;
@@ -670,7 +670,7 @@ int ext2_empty_dir (struct inode * inode)
 
                while ((char *)de <= kaddr) {
                        if (de->rec_len == 0) {
-                               ext2_error(inode->i_sb, __FUNCTION__,
+                               ext2_error(inode->i_sb, __func__,
                                        "zero-length directory entry");
                                printk("kaddr=%p, de=%p\n", kaddr, de);
                                goto not_empty;
index 08f647d8188de4651eb5b50be5dfc793e5e4bc44..f597413467605ab8f6c7a1cef2cc0e1d269c7b9d 100644 (file)
@@ -75,11 +75,9 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
        }
 
        spin_lock(sb_bgl_lock(EXT2_SB(sb), group));
-       desc->bg_free_inodes_count =
-               cpu_to_le16(le16_to_cpu(desc->bg_free_inodes_count) + 1);
+       le16_add_cpu(&desc->bg_free_inodes_count, 1);
        if (dir)
-               desc->bg_used_dirs_count =
-                       cpu_to_le16(le16_to_cpu(desc->bg_used_dirs_count) - 1);
+               le16_add_cpu(&desc->bg_used_dirs_count, -1);
        spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
        if (dir)
                percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
@@ -539,13 +537,11 @@ got:
                percpu_counter_inc(&sbi->s_dirs_counter);
 
        spin_lock(sb_bgl_lock(sbi, group));
-       gdp->bg_free_inodes_count =
-                cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+       le16_add_cpu(&gdp->bg_free_inodes_count, -1);
        if (S_ISDIR(mode)) {
                if (sbi->s_debts[group] < 255)
                        sbi->s_debts[group]++;
-               gdp->bg_used_dirs_count =
-                       cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
+               le16_add_cpu(&gdp->bg_used_dirs_count, 1);
        } else {
                if (sbi->s_debts[group])
                        sbi->s_debts[group]--;
index b8a2990bab8340a9ce1146ecafe7bd97585eb728..384fc0d1dd743b080c0fc1fa7f887663d8ed8d8e 100644 (file)
@@ -254,13 +254,13 @@ no_block:
  *     Caller must make sure that @ind is valid and will stay that way.
  */
 
-static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
+static ext2_fsblk_t ext2_find_near(struct inode *inode, Indirect *ind)
 {
        struct ext2_inode_info *ei = EXT2_I(inode);
        __le32 *start = ind->bh ? (__le32 *) ind->bh->b_data : ei->i_data;
        __le32 *p;
-       unsigned long bg_start;
-       unsigned long colour;
+       ext2_fsblk_t bg_start;
+       ext2_fsblk_t colour;
 
        /* Try to find previous block */
        for (p = ind->p - 1; p >= start; p--)
@@ -275,8 +275,7 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
         * It is going to be refered from inode itself? OK, just put it into
         * the same cylinder group then.
         */
-       bg_start = (ei->i_block_group * EXT2_BLOCKS_PER_GROUP(inode->i_sb)) +
-               le32_to_cpu(EXT2_SB(inode->i_sb)->s_es->s_first_data_block);
+       bg_start = ext2_group_first_block_no(inode->i_sb, ei->i_block_group);
        colour = (current->pid % 16) *
                        (EXT2_BLOCKS_PER_GROUP(inode->i_sb) / 16);
        return bg_start + colour;
@@ -291,8 +290,8 @@ static unsigned long ext2_find_near(struct inode *inode, Indirect *ind)
  *     Returns preferred place for a block (the goal).
  */
 
-static inline int ext2_find_goal(struct inode *inode, long block,
-                                Indirect *partial)
+static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block,
+                                         Indirect *partial)
 {
        struct ext2_block_alloc_info *block_i;
 
@@ -796,7 +795,7 @@ const struct address_space_operations ext2_aops = {
 
 const struct address_space_operations ext2_aops_xip = {
        .bmap                   = ext2_bmap,
-       .get_xip_page           = ext2_get_xip_page,
+       .get_xip_mem            = ext2_get_xip_mem,
 };
 
 const struct address_space_operations ext2_nobh_aops = {
index 088b011bb97e2882892a766a94b9bcac2894d984..ef50cbc792db94668b9d7deecc22c50076511fe4 100644 (file)
@@ -51,8 +51,7 @@ void ext2_error (struct super_block * sb, const char * function,
 
        if (!(sb->s_flags & MS_RDONLY)) {
                sbi->s_mount_state |= EXT2_ERROR_FS;
-               es->s_state =
-                       cpu_to_le16(le16_to_cpu(es->s_state) | EXT2_ERROR_FS);
+               es->s_state |= cpu_to_le16(EXT2_ERROR_FS);
                ext2_sync_super(sb, es);
        }
 
@@ -90,7 +89,7 @@ void ext2_update_dynamic_rev(struct super_block *sb)
        if (le32_to_cpu(es->s_rev_level) > EXT2_GOOD_OLD_REV)
                return;
 
-       ext2_warning(sb, __FUNCTION__,
+       ext2_warning(sb, __func__,
                     "updating to rev %d because of new feature flag, "
                     "running e2fsck is recommended",
                     EXT2_DYNAMIC_REV);
@@ -604,7 +603,7 @@ static int ext2_setup_super (struct super_block * sb,
                        "running e2fsck is recommended\n");
        if (!le16_to_cpu(es->s_max_mnt_count))
                es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT);
-       es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
+       le16_add_cpu(&es->s_mnt_count, 1);
        ext2_write_super(sb);
        if (test_opt (sb, DEBUG))
                printk ("[EXT II FS %s, %s, bs=%lu, fs=%lu, gc=%lu, "
@@ -622,13 +621,13 @@ static int ext2_check_descriptors(struct super_block *sb)
 {
        int i;
        struct ext2_sb_info *sbi = EXT2_SB(sb);
-       unsigned long first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
-       unsigned long last_block;
 
        ext2_debug ("Checking group descriptors");
 
        for (i = 0; i < sbi->s_groups_count; i++) {
                struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
+               ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
+               ext2_fsblk_t last_block;
 
                if (i == sbi->s_groups_count - 1)
                        last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
@@ -664,7 +663,6 @@ static int ext2_check_descriptors(struct super_block *sb)
                                    i, (unsigned long) le32_to_cpu(gdp->bg_inode_table));
                        return 0;
                }
-               first_block += EXT2_BLOCKS_PER_GROUP(sb);
        }
        return 1;
 }
@@ -721,10 +719,9 @@ static unsigned long descriptor_loc(struct super_block *sb,
                                    int nr)
 {
        struct ext2_sb_info *sbi = EXT2_SB(sb);
-       unsigned long bg, first_data_block, first_meta_bg;
+       unsigned long bg, first_meta_bg;
        int has_super = 0;
        
-       first_data_block = le32_to_cpu(sbi->s_es->s_first_data_block);
        first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
 
        if (!EXT2_HAS_INCOMPAT_FEATURE(sb, EXT2_FEATURE_INCOMPAT_META_BG) ||
@@ -733,7 +730,8 @@ static unsigned long descriptor_loc(struct super_block *sb,
        bg = sbi->s_desc_per_block * nr;
        if (ext2_bg_has_super(sb, bg))
                has_super = 1;
-       return (first_data_block + has_super + (bg * sbi->s_blocks_per_group));
+
+       return ext2_group_first_block_no(sb, bg) + has_super;
 }
 
 static int ext2_fill_super(struct super_block *sb, void *data, int silent)
@@ -1062,7 +1060,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent)
                goto failed_mount3;
        }
        if (EXT2_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_HAS_JOURNAL))
-               ext2_warning(sb, __FUNCTION__,
+               ext2_warning(sb, __func__,
                        "mounting ext3 filesystem as ext2");
        ext2_setup_super (sb, es, sb->s_flags & MS_RDONLY);
        return 0;
@@ -1126,10 +1124,9 @@ void ext2_write_super (struct super_block * sb)
        if (!(sb->s_flags & MS_RDONLY)) {
                es = EXT2_SB(sb)->s_es;
 
-               if (le16_to_cpu(es->s_state) & EXT2_VALID_FS) {
+               if (es->s_state & cpu_to_le16(EXT2_VALID_FS)) {
                        ext2_debug ("setting valid to 0\n");
-                       es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) &
-                                                 ~EXT2_VALID_FS);
+                       es->s_state &= cpu_to_le16(~EXT2_VALID_FS);
                        es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
                        es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
                        es->s_mtime = cpu_to_le32(get_seconds());
@@ -1180,7 +1177,7 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
        if (((sbi->s_mount_opt & EXT2_MOUNT_XIP) !=
            (old_mount_opt & EXT2_MOUNT_XIP)) &&
            invalidate_inodes(sb))
-               ext2_warning(sb, __FUNCTION__, "busy inodes while remounting "\
+               ext2_warning(sb, __func__, "busy inodes while remounting "\
                             "xip remain in cache (no functional problem)");
        if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
                return 0;
index a99d46f3b26eb032e13c5a3d9f26484518997b5c..987a5261cc2e8ef8a2e164c4f491b37856f14be3 100644 (file)
@@ -646,8 +646,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
                                        unlock_buffer(new_bh);
                                        goto cleanup;
                                }
-                               HDR(new_bh)->h_refcount = cpu_to_le32(1 +
-                                       le32_to_cpu(HDR(new_bh)->h_refcount));
+                               le32_add_cpu(&HDR(new_bh)->h_refcount, 1);
                                ea_bdebug(new_bh, "refcount now=%d",
                                        le32_to_cpu(HDR(new_bh)->h_refcount));
                        }
@@ -660,10 +659,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
                        ext2_xattr_cache_insert(new_bh);
                } else {
                        /* We need to allocate a new block */
-                       int goal = le32_to_cpu(EXT2_SB(sb)->s_es->
-                                                          s_first_data_block) +
-                                  EXT2_I(inode)->i_block_group *
-                                  EXT2_BLOCKS_PER_GROUP(sb);
+                       ext2_fsblk_t goal = ext2_group_first_block_no(sb,
+                                               EXT2_I(inode)->i_block_group);
                        int block = ext2_new_block(inode, goal, &error);
                        if (error)
                                goto cleanup;
@@ -731,8 +728,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
                        bforget(old_bh);
                } else {
                        /* Decrement the refcount only. */
-                       HDR(old_bh)->h_refcount = cpu_to_le32(
-                               le32_to_cpu(HDR(old_bh)->h_refcount) - 1);
+                       le32_add_cpu(&HDR(old_bh)->h_refcount, -1);
                        if (ce)
                                mb_cache_entry_release(ce);
                        DQUOT_FREE_BLOCK(inode, 1);
@@ -789,8 +785,7 @@ ext2_xattr_delete_inode(struct inode *inode)
                bforget(bh);
                unlock_buffer(bh);
        } else {
-               HDR(bh)->h_refcount = cpu_to_le32(
-                       le32_to_cpu(HDR(bh)->h_refcount) - 1);
+               le32_add_cpu(&HDR(bh)->h_refcount, -1);
                if (ce)
                        mb_cache_entry_release(ce);
                ea_bdebug(bh, "refcount now=%d",
index ca7f00312388efabd9a16b38c52df28e4917aee0..4fb94c20041b5e44c5ee2152a151bf65e486daa9 100644 (file)
 #include "xip.h"
 
 static inline int
-__inode_direct_access(struct inode *inode, sector_t sector,
-                     unsigned long *data)
+__inode_direct_access(struct inode *inode, sector_t block,
+                     void **kaddr, unsigned long *pfn)
 {
-       BUG_ON(!inode->i_sb->s_bdev->bd_disk->fops->direct_access);
-       return inode->i_sb->s_bdev->bd_disk->fops
-               ->direct_access(inode->i_sb->s_bdev,sector,data);
+       struct block_device *bdev = inode->i_sb->s_bdev;
+       struct block_device_operations *ops = bdev->bd_disk->fops;
+       sector_t sector;
+
+       sector = block * (PAGE_SIZE / 512); /* ext2 block to bdev sector */
+
+       BUG_ON(!ops->direct_access);
+       return ops->direct_access(bdev, sector, kaddr, pfn);
 }
 
 static inline int
-__ext2_get_sector(struct inode *inode, sector_t offset, int create,
+__ext2_get_block(struct inode *inode, pgoff_t pgoff, int create,
                   sector_t *result)
 {
        struct buffer_head tmp;
        int rc;
 
        memset(&tmp, 0, sizeof(struct buffer_head));
-       rc = ext2_get_block(inode, offset/ (PAGE_SIZE/512), &tmp,
-                           create);
+       rc = ext2_get_block(inode, pgoff, &tmp, create);
        *result = tmp.b_blocknr;
 
        /* did we get a sparse block (hole in the file)? */
@@ -45,15 +49,15 @@ __ext2_get_sector(struct inode *inode, sector_t offset, int create,
 }
 
 int
-ext2_clear_xip_target(struct inode *inode, int block)
+ext2_clear_xip_target(struct inode *inode, sector_t block)
 {
-       sector_t sector = block * (PAGE_SIZE/512);
-       unsigned long data;
+       void *kaddr;
+       unsigned long pfn;
        int rc;
 
-       rc = __inode_direct_access(inode, sector, &data);
+       rc = __inode_direct_access(inode, block, &kaddr, &pfn);
        if (!rc)
-               clear_page((void*)data);
+               clear_page(kaddr);
        return rc;
 }
 
@@ -64,30 +68,23 @@ void ext2_xip_verify_sb(struct super_block *sb)
        if ((sbi->s_mount_opt & EXT2_MOUNT_XIP) &&
            !sb->s_bdev->bd_disk->fops->direct_access) {
                sbi->s_mount_opt &= (~EXT2_MOUNT_XIP);
-               ext2_warning(sb, __FUNCTION__,
+               ext2_warning(sb, __func__,
                             "ignoring xip option - not supported by bdev");
        }
 }
 
-struct page *
-ext2_get_xip_page(struct address_space *mapping, sector_t offset,
-                  int create)
+int ext2_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, int create,
+                               void **kmem, unsigned long *pfn)
 {
        int rc;
-       unsigned long data;
-       sector_t sector;
+       sector_t block;
 
        /* first, retrieve the sector number */
-       rc = __ext2_get_sector(mapping->host, offset, create, &sector);
+       rc = __ext2_get_block(mapping->host, pgoff, create, &block);
        if (rc)
-               goto error;
+               return rc;
 
        /* retrieve address of the target data */
-       rc = __inode_direct_access
-               (mapping->host, sector * (PAGE_SIZE/512), &data);
-       if (!rc)
-               return virt_to_page(data);
-
- error:
-       return ERR_PTR(rc);
+       rc = __inode_direct_access(mapping->host, block, kmem, pfn);
+       return rc;
 }
index aa85331d6c564cb0a6d636576f9ade8cd02d9a56..18b34d2f31b3784fa95565103ea914bab45809b5 100644 (file)
@@ -7,19 +7,20 @@
 
 #ifdef CONFIG_EXT2_FS_XIP
 extern void ext2_xip_verify_sb (struct super_block *);
-extern int ext2_clear_xip_target (struct inode *, int);
+extern int ext2_clear_xip_target (struct inode *, sector_t);
 
 static inline int ext2_use_xip (struct super_block *sb)
 {
        struct ext2_sb_info *sbi = EXT2_SB(sb);
        return (sbi->s_mount_opt & EXT2_MOUNT_XIP);
 }
-struct page* ext2_get_xip_page (struct address_space *, sector_t, int);
-#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_page)
+int ext2_get_xip_mem(struct address_space *, pgoff_t, int,
+                               void **, unsigned long *);
+#define mapping_is_xip(map) unlikely(map->a_ops->get_xip_mem)
 #else
 #define mapping_is_xip(map)                    0
 #define ext2_xip_verify_sb(sb)                 do { } while (0)
 #define ext2_use_xip(sb)                       0
 #define ext2_clear_xip_target(inode, chain)    0
-#define ext2_get_xip_page                      NULL
+#define ext2_get_xip_mem                       NULL
 #endif
index da0cb2c0e4376717292fd2afc1d4ff147b9fc6b8..92fd0338a6ebe3df16739b65feec4e4ef034206f 100644 (file)
@@ -117,7 +117,7 @@ static int ext3_valid_block_bitmap(struct super_block *sb,
                return 1;
 
 err_out:
-       ext3_error(sb, __FUNCTION__,
+       ext3_error(sb, __func__,
                        "Invalid block bitmap - "
                        "block_group = %d, block = %lu",
                        block_group, bitmap_blk);
@@ -147,7 +147,7 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
        bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
        bh = sb_getblk(sb, bitmap_blk);
        if (unlikely(!bh)) {
-               ext3_error(sb, __FUNCTION__,
+               ext3_error(sb, __func__,
                            "Cannot read block bitmap - "
                            "block_group = %d, block_bitmap = %u",
                            block_group, le32_to_cpu(desc->bg_block_bitmap));
@@ -158,16 +158,17 @@ read_block_bitmap(struct super_block *sb, unsigned int block_group)
 
        if (bh_submit_read(bh) < 0) {
                brelse(bh);
-               ext3_error(sb, __FUNCTION__,
+               ext3_error(sb, __func__,
                            "Cannot read block bitmap - "
                            "block_group = %d, block_bitmap = %u",
                            block_group, le32_to_cpu(desc->bg_block_bitmap));
                return NULL;
        }
-       if (!ext3_valid_block_bitmap(sb, desc, block_group, bh)) {
-               brelse(bh);
-               return NULL;
-       }
+       ext3_valid_block_bitmap(sb, desc, block_group, bh);
+       /*
+        * file system mounted not to panic on error, continue with corrupt
+        * bitmap
+        */
        return bh;
 }
 /*
@@ -232,11 +233,10 @@ restart:
                prev = rsv;
        }
        printk("Window map complete.\n");
-       if (bad)
-               BUG();
+       BUG_ON(bad);
 }
 #define rsv_window_dump(root, verbose) \
-       __rsv_window_dump((root), (verbose), __FUNCTION__)
+       __rsv_window_dump((root), (verbose), __func__)
 #else
 #define rsv_window_dump(root, verbose) do {} while (0)
 #endif
@@ -618,7 +618,7 @@ do_more:
                if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
                                                bit + i, bitmap_bh->b_data)) {
                        jbd_unlock_bh_state(bitmap_bh);
-                       ext3_error(sb, __FUNCTION__,
+                       ext3_error(sb, __func__,
                                "bit already cleared for block "E3FSBLK,
                                 block + i);
                        jbd_lock_bh_state(bitmap_bh);
@@ -1642,7 +1642,11 @@ allocated:
                            "Allocating block in system zone - "
                            "blocks from "E3FSBLK", length %lu",
                             ret_block, num);
-               goto out;
+               /*
+                * claim_block() marked the blocks we allocated as in use. So we
+                * may want to selectively mark some of the blocks as free.
+                */
+               goto retry_alloc;
        }
 
        performed_allocation = 1;
@@ -1668,7 +1672,7 @@ allocated:
                        if (ext3_test_bit(grp_alloc_blk+i,
                                        bh2jh(bitmap_bh)->b_committed_data)) {
                                printk("%s: block was unexpectedly set in "
-                                       "b_committed_data\n", __FUNCTION__);
+                                       "b_committed_data\n", __func__);
                        }
                }
        }
index e1f91fd26a93aa9aaf6ba7544fd7e2809bad7c96..d401f148d74d2bc87e72a46e53b34c6b4fc3a27a 100644 (file)
@@ -9,7 +9,7 @@ int __ext3_journal_get_undo_access(const char *where, handle_t *handle,
 {
        int err = journal_get_undo_access(handle, bh);
        if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext3_journal_abort_handle(where, __func__, bh, handle,err);
        return err;
 }
 
@@ -18,7 +18,7 @@ int __ext3_journal_get_write_access(const char *where, handle_t *handle,
 {
        int err = journal_get_write_access(handle, bh);
        if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext3_journal_abort_handle(where, __func__, bh, handle,err);
        return err;
 }
 
@@ -27,7 +27,7 @@ int __ext3_journal_forget(const char *where, handle_t *handle,
 {
        int err = journal_forget(handle, bh);
        if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext3_journal_abort_handle(where, __func__, bh, handle,err);
        return err;
 }
 
@@ -36,7 +36,7 @@ int __ext3_journal_revoke(const char *where, handle_t *handle,
 {
        int err = journal_revoke(handle, blocknr, bh);
        if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext3_journal_abort_handle(where, __func__, bh, handle,err);
        return err;
 }
 
@@ -45,7 +45,7 @@ int __ext3_journal_get_create_access(const char *where,
 {
        int err = journal_get_create_access(handle, bh);
        if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext3_journal_abort_handle(where, __func__, bh, handle,err);
        return err;
 }
 
@@ -54,6 +54,6 @@ int __ext3_journal_dirty_metadata(const char *where,
 {
        int err = journal_dirty_metadata(handle, bh);
        if (err)
-               ext3_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext3_journal_abort_handle(where, __func__, bh, handle,err);
        return err;
 }
index a588e23841d4e68323d2604e70ad92ec227923c6..d33634119e17e893eeff553781ef0307389c35f4 100644 (file)
@@ -72,6 +72,9 @@ int ext3_sync_file(struct file * file, struct dentry *dentry, int datasync)
                goto out;
        }
 
+       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+               goto out;
+
        /*
         * The VFS has written the file data.  If the inode is unaltered
         * then we need not start a commit.
index 96dd5573e49bbf53b90439821a3dad7ff1e046d4..77126821b2e9de219c0e1f36caea9c263e6f7c2f 100644 (file)
@@ -644,7 +644,7 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
 
        /* Error cases - e2fsck has already cleaned up for us */
        if (ino > max_ino) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "bad orphan ino %lu!  e2fsck was run?", ino);
                goto error;
        }
@@ -653,7 +653,7 @@ struct inode *ext3_orphan_get(struct super_block *sb, unsigned long ino)
        bit = (ino - 1) % EXT3_INODES_PER_GROUP(sb);
        bitmap_bh = read_inode_bitmap(sb, block_group);
        if (!bitmap_bh) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "inode bitmap error for orphan %lu", ino);
                goto error;
        }
@@ -678,7 +678,7 @@ iget_failed:
        err = PTR_ERR(inode);
        inode = NULL;
 bad_orphan:
-       ext3_warning(sb, __FUNCTION__,
+       ext3_warning(sb, __func__,
                     "bad orphan inode %lu!  e2fsck was run?", ino);
        printk(KERN_NOTICE "ext3_test_bit(bit=%d, block=%llu) = %d\n",
               bit, (unsigned long long)bitmap_bh->b_blocknr,
index c683609b0e3a39f863377ba937ad22169c306f60..6ae4ecf3ce40b646da679ba6b2a1ecb6149e7e00 100644 (file)
@@ -95,7 +95,7 @@ int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
        BUFFER_TRACE(bh, "call ext3_journal_revoke");
        err = ext3_journal_revoke(handle, blocknr, bh);
        if (err)
-               ext3_abort(inode->i_sb, __FUNCTION__,
+               ext3_abort(inode->i_sb, __func__,
                           "error %d when attempting revoke", err);
        BUFFER_TRACE(bh, "exit");
        return err;
@@ -1190,7 +1190,7 @@ int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
 {
        int err = journal_dirty_data(handle, bh);
        if (err)
-               ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
+               ext3_journal_abort_handle(__func__, __func__,
                                                bh, handle, err);
        return err;
 }
@@ -1261,10 +1261,11 @@ static int ext3_ordered_write_end(struct file *file,
                new_i_size = pos + copied;
                if (new_i_size > EXT3_I(inode)->i_disksize)
                        EXT3_I(inode)->i_disksize = new_i_size;
-               copied = ext3_generic_write_end(file, mapping, pos, len, copied,
+               ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-               if (copied < 0)
-                       ret = copied;
+               copied = ret2;
+               if (ret2 < 0)
+                       ret = ret2;
        }
        ret2 = ext3_journal_stop(handle);
        if (!ret)
@@ -1289,10 +1290,11 @@ static int ext3_writeback_write_end(struct file *file,
        if (new_i_size > EXT3_I(inode)->i_disksize)
                EXT3_I(inode)->i_disksize = new_i_size;
 
-       copied = ext3_generic_write_end(file, mapping, pos, len, copied,
+       ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-       if (copied < 0)
-               ret = copied;
+       copied = ret2;
+       if (ret2 < 0)
+               ret = ret2;
 
        ret2 = ext3_journal_stop(handle);
        if (!ret)
@@ -2454,11 +2456,10 @@ out_stop:
 static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
                unsigned long ino, struct ext3_iloc *iloc)
 {
-       unsigned long desc, group_desc, block_group;
+       unsigned long block_group;
        unsigned long offset;
        ext3_fsblk_t block;
-       struct buffer_head *bh;
-       struct ext3_group_desc * gdp;
+       struct ext3_group_desc *gdp;
 
        if (!ext3_valid_inum(sb, ino)) {
                /*
@@ -2470,27 +2471,15 @@ static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
        }
 
        block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-       if (block_group >= EXT3_SB(sb)->s_groups_count) {
-               ext3_error(sb,"ext3_get_inode_block","group >= groups count");
-               return 0;
-       }
-       smp_rmb();
-       group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
-       desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
-       bh = EXT3_SB(sb)->s_group_desc[group_desc];
-       if (!bh) {
-               ext3_error (sb, "ext3_get_inode_block",
-                           "Descriptor not loaded");
+       gdp = ext3_get_group_desc(sb, block_group, NULL);
+       if (!gdp)
                return 0;
-       }
-
-       gdp = (struct ext3_group_desc *)bh->b_data;
        /*
         * Figure out the offset within the block group inode table
         */
        offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
                EXT3_INODE_SIZE(sb);
-       block = le32_to_cpu(gdp[desc].bg_inode_table) +
+       block = le32_to_cpu(gdp->bg_inode_table) +
                (offset >> EXT3_BLOCK_SIZE_BITS(sb));
 
        iloc->block_group = block_group;
@@ -3214,7 +3203,7 @@ void ext3_dirty_inode(struct inode *inode)
                current_handle->h_transaction != handle->h_transaction) {
                /* This task has a transaction open against a different fs */
                printk(KERN_EMERG "%s: transactions do not match!\n",
-                      __FUNCTION__);
+                      __func__);
        } else {
                jbd_debug(5, "marking dirty.  outer handle=%p\n",
                                current_handle);
index dec3e0d88ab1536745ccf52c70a23a2a6a04ca57..0b8cf80154f1a5c912a1b2d6cae18ccebea591c1 100644 (file)
@@ -57,10 +57,15 @@ static struct buffer_head *ext3_append(handle_t *handle,
 
        *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
 
-       if ((bh = ext3_bread(handle, inode, *block, 1, err))) {
+       bh = ext3_bread(handle, inode, *block, 1, err);
+       if (bh) {
                inode->i_size += inode->i_sb->s_blocksize;
                EXT3_I(inode)->i_disksize = inode->i_size;
-               ext3_journal_get_write_access(handle,bh);
+               *err = ext3_journal_get_write_access(handle, bh);
+               if (*err) {
+                       brelse(bh);
+                       bh = NULL;
+               }
        }
        return bh;
 }
@@ -356,7 +361,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        if (root->info.hash_version != DX_HASH_TEA &&
            root->info.hash_version != DX_HASH_HALF_MD4 &&
            root->info.hash_version != DX_HASH_LEGACY) {
-               ext3_warning(dir->i_sb, __FUNCTION__,
+               ext3_warning(dir->i_sb, __func__,
                             "Unrecognised inode hash code %d",
                             root->info.hash_version);
                brelse(bh);
@@ -370,7 +375,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        hash = hinfo->hash;
 
        if (root->info.unused_flags & 1) {
-               ext3_warning(dir->i_sb, __FUNCTION__,
+               ext3_warning(dir->i_sb, __func__,
                             "Unimplemented inode hash flags: %#06x",
                             root->info.unused_flags);
                brelse(bh);
@@ -379,7 +384,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        }
 
        if ((indirect = root->info.indirect_levels) > 1) {
-               ext3_warning(dir->i_sb, __FUNCTION__,
+               ext3_warning(dir->i_sb, __func__,
                             "Unimplemented inode hash depth: %#06x",
                             root->info.indirect_levels);
                brelse(bh);
@@ -392,7 +397,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
 
        if (dx_get_limit(entries) != dx_root_limit(dir,
                                                   root->info.info_length)) {
-               ext3_warning(dir->i_sb, __FUNCTION__,
+               ext3_warning(dir->i_sb, __func__,
                             "dx entry: limit != root limit");
                brelse(bh);
                *err = ERR_BAD_DX_DIR;
@@ -404,7 +409,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        {
                count = dx_get_count(entries);
                if (!count || count > dx_get_limit(entries)) {
-                       ext3_warning(dir->i_sb, __FUNCTION__,
+                       ext3_warning(dir->i_sb, __func__,
                                     "dx entry: no count or count > limit");
                        brelse(bh);
                        *err = ERR_BAD_DX_DIR;
@@ -449,7 +454,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
                        goto fail2;
                at = entries = ((struct dx_node *) bh->b_data)->entries;
                if (dx_get_limit(entries) != dx_node_limit (dir)) {
-                       ext3_warning(dir->i_sb, __FUNCTION__,
+                       ext3_warning(dir->i_sb, __func__,
                                     "dx entry: limit != node limit");
                        brelse(bh);
                        *err = ERR_BAD_DX_DIR;
@@ -465,7 +470,7 @@ fail2:
        }
 fail:
        if (*err == ERR_BAD_DX_DIR)
-               ext3_warning(dir->i_sb, __FUNCTION__,
+               ext3_warning(dir->i_sb, __func__,
                             "Corrupt dir inode %ld, running e2fsck is "
                             "recommended.", dir->i_ino);
        return NULL;
@@ -913,7 +918,7 @@ restart:
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        /* read error, skip block & hope for the best */
-                       ext3_error(sb, __FUNCTION__, "reading directory #%lu "
+                       ext3_error(sb, __func__, "reading directory #%lu "
                                   "offset %lu", dir->i_ino, block);
                        brelse(bh);
                        goto next;
@@ -1005,7 +1010,7 @@ static struct buffer_head * ext3_dx_find_entry(struct dentry *dentry,
                retval = ext3_htree_next_block(dir, hash, frame,
                                               frames, NULL);
                if (retval < 0) {
-                       ext3_warning(sb, __FUNCTION__,
+                       ext3_warning(sb, __func__,
                             "error reading index page in directory #%lu",
                             dir->i_ino);
                        *err = retval;
@@ -1530,7 +1535,7 @@ static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
 
                if (levels && (dx_get_count(frames->entries) ==
                               dx_get_limit(frames->entries))) {
-                       ext3_warning(sb, __FUNCTION__,
+                       ext3_warning(sb, __func__,
                                     "Directory index full!");
                        err = -ENOSPC;
                        goto cleanup;
@@ -1832,11 +1837,11 @@ static int empty_dir (struct inode * inode)
        if (inode->i_size < EXT3_DIR_REC_LEN(1) + EXT3_DIR_REC_LEN(2) ||
            !(bh = ext3_bread (NULL, inode, 0, 0, &err))) {
                if (err)
-                       ext3_error(inode->i_sb, __FUNCTION__,
+                       ext3_error(inode->i_sb, __func__,
                                   "error %d reading directory #%lu offset 0",
                                   err, inode->i_ino);
                else
-                       ext3_warning(inode->i_sb, __FUNCTION__,
+                       ext3_warning(inode->i_sb, __func__,
                                     "bad directory (dir #%lu) - no data block",
                                     inode->i_ino);
                return 1;
@@ -1865,7 +1870,7 @@ static int empty_dir (struct inode * inode)
                                offset >> EXT3_BLOCK_SIZE_BITS(sb), 0, &err);
                        if (!bh) {
                                if (err)
-                                       ext3_error(sb, __FUNCTION__,
+                                       ext3_error(sb, __func__,
                                                   "error %d reading directory"
                                                   " #%lu offset %lu",
                                                   err, inode->i_ino, offset);
@@ -2318,6 +2323,8 @@ static int ext3_rename (struct inode * old_dir, struct dentry *old_dentry,
                                              EXT3_FEATURE_INCOMPAT_FILETYPE))
                        new_de->file_type = old_de->file_type;
                new_dir->i_version++;
+               new_dir->i_ctime = new_dir->i_mtime = CURRENT_TIME_SEC;
+               ext3_mark_inode_dirty(handle, new_dir);
                BUFFER_TRACE(new_bh, "call ext3_journal_dirty_metadata");
                ext3_journal_dirty_metadata(handle, new_bh);
                brelse(new_bh);
index 0e97b6e07cb03556437c55924003d103efd44f17..28cfd0b40527bed916ce4e5ca36e821ece6308ba 100644 (file)
@@ -48,60 +48,60 @@ static int verify_group_input(struct super_block *sb,
                       free_blocks_count, input->reserved_blocks);
 
        if (group != sbi->s_groups_count)
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Cannot add at group %u (only %lu groups)",
                             input->group, sbi->s_groups_count);
        else if ((start - le32_to_cpu(es->s_first_data_block)) %
                 EXT3_BLOCKS_PER_GROUP(sb))
-               ext3_warning(sb, __FUNCTION__, "Last group not full");
+               ext3_warning(sb, __func__, "Last group not full");
        else if (input->reserved_blocks > input->blocks_count / 5)
-               ext3_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
+               ext3_warning(sb, __func__, "Reserved blocks too high (%u)",
                             input->reserved_blocks);
        else if (free_blocks_count < 0)
-               ext3_warning(sb, __FUNCTION__, "Bad blocks count %u",
+               ext3_warning(sb, __func__, "Bad blocks count %u",
                             input->blocks_count);
        else if (!(bh = sb_bread(sb, end - 1)))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Cannot read last block ("E3FSBLK")",
                             end - 1);
        else if (outside(input->block_bitmap, start, end))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Block bitmap not in group (block %u)",
                             input->block_bitmap);
        else if (outside(input->inode_bitmap, start, end))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Inode bitmap not in group (block %u)",
                             input->inode_bitmap);
        else if (outside(input->inode_table, start, end) ||
                 outside(itend - 1, start, end))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Inode table not in group (blocks %u-"E3FSBLK")",
                             input->inode_table, itend - 1);
        else if (input->inode_bitmap == input->block_bitmap)
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Block bitmap same as inode bitmap (%u)",
                             input->block_bitmap);
        else if (inside(input->block_bitmap, input->inode_table, itend))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Block bitmap (%u) in inode table (%u-"E3FSBLK")",
                             input->block_bitmap, input->inode_table, itend-1);
        else if (inside(input->inode_bitmap, input->inode_table, itend))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Inode bitmap (%u) in inode table (%u-"E3FSBLK")",
                             input->inode_bitmap, input->inode_table, itend-1);
        else if (inside(input->block_bitmap, start, metaend))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Block bitmap (%u) in GDT table"
                             " ("E3FSBLK"-"E3FSBLK")",
                             input->block_bitmap, start, metaend - 1);
        else if (inside(input->inode_bitmap, start, metaend))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Inode bitmap (%u) in GDT table"
                             " ("E3FSBLK"-"E3FSBLK")",
                             input->inode_bitmap, start, metaend - 1);
        else if (inside(input->inode_table, start, metaend) ||
                 inside(itend - 1, start, metaend))
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Inode table (%u-"E3FSBLK") overlaps"
                             "GDT table ("E3FSBLK"-"E3FSBLK")",
                             input->inode_table, itend - 1, start, metaend - 1);
@@ -386,7 +386,7 @@ static int verify_reserved_gdb(struct super_block *sb,
 
        while ((grp = ext3_list_backups(sb, &three, &five, &seven)) < end) {
                if (le32_to_cpu(*p++) != grp * EXT3_BLOCKS_PER_GROUP(sb) + blk){
-                       ext3_warning(sb, __FUNCTION__,
+                       ext3_warning(sb, __func__,
                                     "reserved GDT "E3FSBLK
                                     " missing grp %d ("E3FSBLK")",
                                     blk, grp,
@@ -440,7 +440,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
         */
        if (EXT3_SB(sb)->s_sbh->b_blocknr !=
            le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                        "won't resize using backup superblock at %llu",
                        (unsigned long long)EXT3_SB(sb)->s_sbh->b_blocknr);
                return -EPERM;
@@ -464,7 +464,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
 
        data = (__le32 *)dind->b_data;
        if (le32_to_cpu(data[gdb_num % EXT3_ADDR_PER_BLOCK(sb)]) != gdblock) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "new group %u GDT block "E3FSBLK" not reserved",
                             input->group, gdblock);
                err = -EINVAL;
@@ -488,7 +488,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
                        GFP_NOFS);
        if (!n_group_desc) {
                err = -ENOMEM;
-               ext3_warning (sb, __FUNCTION__,
+               ext3_warning (sb, __func__,
                              "not enough memory for %lu groups", gdb_num + 1);
                goto exit_inode;
        }
@@ -586,7 +586,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
        /* Get each reserved primary GDT block and verify it holds backups */
        for (res = 0; res < reserved_gdb; res++, blk++) {
                if (le32_to_cpu(*data) != blk) {
-                       ext3_warning(sb, __FUNCTION__,
+                       ext3_warning(sb, __func__,
                                     "reserved block "E3FSBLK
                                     " not at offset %ld",
                                     blk,
@@ -730,7 +730,7 @@ static void update_backups(struct super_block *sb,
         */
 exit_err:
        if (err) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "can't update backup for group %d (err %d), "
                             "forcing fsck on next reboot", group, err);
                sbi->s_mount_state &= ~EXT3_VALID_FS;
@@ -770,33 +770,33 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
 
        if (gdb_off == 0 && !EXT3_HAS_RO_COMPAT_FEATURE(sb,
                                        EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "Can't resize non-sparse filesystem further");
                return -EPERM;
        }
 
        if (le32_to_cpu(es->s_blocks_count) + input->blocks_count <
            le32_to_cpu(es->s_blocks_count)) {
-               ext3_warning(sb, __FUNCTION__, "blocks_count overflow\n");
+               ext3_warning(sb, __func__, "blocks_count overflow\n");
                return -EINVAL;
        }
 
        if (le32_to_cpu(es->s_inodes_count) + EXT3_INODES_PER_GROUP(sb) <
            le32_to_cpu(es->s_inodes_count)) {
-               ext3_warning(sb, __FUNCTION__, "inodes_count overflow\n");
+               ext3_warning(sb, __func__, "inodes_count overflow\n");
                return -EINVAL;
        }
 
        if (reserved_gdb || gdb_off == 0) {
                if (!EXT3_HAS_COMPAT_FEATURE(sb,
                                             EXT3_FEATURE_COMPAT_RESIZE_INODE)){
-                       ext3_warning(sb, __FUNCTION__,
+                       ext3_warning(sb, __func__,
                                     "No reserved GDT blocks, can't resize");
                        return -EPERM;
                }
                inode = ext3_iget(sb, EXT3_RESIZE_INO);
                if (IS_ERR(inode)) {
-                       ext3_warning(sb, __FUNCTION__,
+                       ext3_warning(sb, __func__,
                                     "Error opening resize inode");
                        return PTR_ERR(inode);
                }
@@ -825,7 +825,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input)
 
        lock_super(sb);
        if (input->group != sbi->s_groups_count) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "multiple resizers run on filesystem!");
                err = -EBUSY;
                goto exit_journal;
@@ -988,13 +988,13 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
                        " too large to resize to %lu blocks safely\n",
                        sb->s_id, n_blocks_count);
                if (sizeof(sector_t) < 8)
-                       ext3_warning(sb, __FUNCTION__,
+                       ext3_warning(sb, __func__,
                        "CONFIG_LBD not enabled\n");
                return -EINVAL;
        }
 
        if (n_blocks_count < o_blocks_count) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "can't shrink FS - resize aborted");
                return -EBUSY;
        }
@@ -1004,7 +1004,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
                EXT3_BLOCKS_PER_GROUP(sb);
 
        if (last == 0) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "need to use ext2online to resize further");
                return -EPERM;
        }
@@ -1012,7 +1012,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
        add = EXT3_BLOCKS_PER_GROUP(sb) - last;
 
        if (o_blocks_count + add < o_blocks_count) {
-               ext3_warning(sb, __FUNCTION__, "blocks_count overflow");
+               ext3_warning(sb, __func__, "blocks_count overflow");
                return -EINVAL;
        }
 
@@ -1020,7 +1020,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
                add = n_blocks_count - o_blocks_count;
 
        if (o_blocks_count + add < n_blocks_count)
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "will only finish group ("E3FSBLK
                             " blocks, %u new)",
                             o_blocks_count + add, add);
@@ -1028,7 +1028,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
        /* See if the device is actually as big as what was requested */
        bh = sb_bread(sb, o_blocks_count + add -1);
        if (!bh) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "can't read last block, resize aborted");
                return -ENOSPC;
        }
@@ -1040,22 +1040,23 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es,
        handle = ext3_journal_start_sb(sb, 3);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
-               ext3_warning(sb, __FUNCTION__, "error %d on journal start",err);
+               ext3_warning(sb, __func__, "error %d on journal start",err);
                goto exit_put;
        }
 
        lock_super(sb);
        if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "multiple resizers run on filesystem!");
                unlock_super(sb);
+               ext3_journal_stop(handle);
                err = -EBUSY;
                goto exit_put;
        }
 
        if ((err = ext3_journal_get_write_access(handle,
                                                 EXT3_SB(sb)->s_sbh))) {
-               ext3_warning(sb, __FUNCTION__,
+               ext3_warning(sb, __func__,
                             "error %d on journal write access", err);
                unlock_super(sb);
                ext3_journal_stop(handle);
index ad536066408229da9955e41e27a0f5e72756cccd..fe3119a71adacb65172cdc6be3c390f55e234679 100644 (file)
@@ -84,7 +84,7 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks)
         * take the FS itself readonly cleanly. */
        journal = EXT3_SB(sb)->s_journal;
        if (is_journal_aborted(journal)) {
-               ext3_abort(sb, __FUNCTION__,
+               ext3_abort(sb, __func__,
                           "Detected aborted journal");
                return ERR_PTR(-EROFS);
        }
@@ -304,7 +304,7 @@ void ext3_update_dynamic_rev(struct super_block *sb)
        if (le32_to_cpu(es->s_rev_level) > EXT3_GOOD_OLD_REV)
                return;
 
-       ext3_warning(sb, __FUNCTION__,
+       ext3_warning(sb, __func__,
                     "updating to rev %d because of new feature flag, "
                     "running e2fsck is recommended",
                     EXT3_DYNAMIC_REV);
@@ -685,7 +685,8 @@ static int ext3_acquire_dquot(struct dquot *dquot);
 static int ext3_release_dquot(struct dquot *dquot);
 static int ext3_mark_dquot_dirty(struct dquot *dquot);
 static int ext3_write_info(struct super_block *sb, int type);
-static int ext3_quota_on(struct super_block *sb, int type, int format_id, char *path);
+static int ext3_quota_on(struct super_block *sb, int type, int format_id,
+                               char *path, int remount);
 static int ext3_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext3_quota_read(struct super_block *sb, int type, char *data,
                               size_t len, loff_t off);
@@ -1096,6 +1097,9 @@ clear_qf_name:
                case Opt_quota:
                case Opt_usrquota:
                case Opt_grpquota:
+                       printk(KERN_ERR
+                               "EXT3-fs: quota options not supported.\n");
+                       break;
                case Opt_usrjquota:
                case Opt_grpjquota:
                case Opt_offusrjquota:
@@ -1103,7 +1107,7 @@ clear_qf_name:
                case Opt_jqfmt_vfsold:
                case Opt_jqfmt_vfsv0:
                        printk(KERN_ERR
-                               "EXT3-fs: journalled quota options not "
+                               "EXT3-fs: journaled quota options not "
                                "supported.\n");
                        break;
                case Opt_noquota:
@@ -1218,7 +1222,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
                    inconsistencies, to force a fsck at reboot.  But for
                    a plain journaled filesystem we can keep it set as
                    valid forever! :) */
-       es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) & ~EXT3_VALID_FS);
+       es->s_state &= cpu_to_le16(~EXT3_VALID_FS);
 #endif
        if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
                es->s_max_mnt_count = cpu_to_le16(EXT3_DFL_MAX_MNT_COUNT);
@@ -1253,14 +1257,14 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es,
 static int ext3_check_descriptors(struct super_block *sb)
 {
        struct ext3_sb_info *sbi = EXT3_SB(sb);
-       ext3_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
-       ext3_fsblk_t last_block;
        int i;
 
        ext3_debug ("Checking group descriptors");
 
        for (i = 0; i < sbi->s_groups_count; i++) {
                struct ext3_group_desc *gdp = ext3_get_group_desc(sb, i, NULL);
+               ext3_fsblk_t first_block = ext3_group_first_block_no(sb, i);
+               ext3_fsblk_t last_block;
 
                if (i == sbi->s_groups_count - 1)
                        last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
@@ -1299,7 +1303,6 @@ static int ext3_check_descriptors(struct super_block *sb)
                                        le32_to_cpu(gdp->bg_inode_table));
                        return 0;
                }
-               first_block += EXT3_BLOCKS_PER_GROUP(sb);
        }
 
        sbi->s_es->s_free_blocks_count=cpu_to_le32(ext3_count_free_blocks(sb));
@@ -1387,7 +1390,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
                if (inode->i_nlink) {
                        printk(KERN_DEBUG
                                "%s: truncating inode %lu to %Ld bytes\n",
-                               __FUNCTION__, inode->i_ino, inode->i_size);
+                               __func__, inode->i_ino, inode->i_size);
                        jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
                                  inode->i_ino, inode->i_size);
                        ext3_truncate(inode);
@@ -1395,7 +1398,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
                } else {
                        printk(KERN_DEBUG
                                "%s: deleting unreferenced inode %lu\n",
-                               __FUNCTION__, inode->i_ino);
+                               __func__, inode->i_ino);
                        jbd_debug(2, "deleting unreferenced inode %lu\n",
                                  inode->i_ino);
                        nr_orphans++;
@@ -1415,7 +1418,7 @@ static void ext3_orphan_cleanup (struct super_block * sb,
        /* Turn quotas off */
        for (i = 0; i < MAXQUOTAS; i++) {
                if (sb_dqopt(sb)->files[i])
-                       vfs_quota_off(sb, i);
+                       vfs_quota_off(sb, i, 0);
        }
 #endif
        sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -2298,9 +2301,9 @@ static void ext3_clear_journal_err(struct super_block * sb,
                char nbuf[16];
 
                errstr = ext3_decode_error(sb, j_errno, nbuf);
-               ext3_warning(sb, __FUNCTION__, "Filesystem error recorded "
+               ext3_warning(sb, __func__, "Filesystem error recorded "
                             "from previous mount: %s", errstr);
-               ext3_warning(sb, __FUNCTION__, "Marking fs in need of "
+               ext3_warning(sb, __func__, "Marking fs in need of "
                             "filesystem check.");
 
                EXT3_SB(sb)->s_mount_state |= EXT3_ERROR_FS;
@@ -2427,7 +2430,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data)
        }
 
        if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
-               ext3_abort(sb, __FUNCTION__, "Abort forced by user");
+               ext3_abort(sb, __func__, "Abort forced by user");
 
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
@@ -2639,8 +2642,14 @@ static int ext3_dquot_drop(struct inode *inode)
 
        /* We may delete quota structure so we need to reserve enough blocks */
        handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
-       if (IS_ERR(handle))
+       if (IS_ERR(handle)) {
+               /*
+                * We call dquot_drop() anyway to at least release references
+                * to quota structures so that umount does not hang.
+                */
+               dquot_drop(inode);
                return PTR_ERR(handle);
+       }
        ret = dquot_drop(inode);
        err = ext3_journal_stop(handle);
        if (!ret)
@@ -2743,17 +2752,17 @@ static int ext3_quota_on_mount(struct super_block *sb, int type)
  * Standard function to be called on quota_on
  */
 static int ext3_quota_on(struct super_block *sb, int type, int format_id,
-                        char *path)
+                        char *path, int remount)
 {
        int err;
        struct nameidata nd;
 
        if (!test_opt(sb, QUOTA))
                return -EINVAL;
-       /* Not journalling quota? */
-       if (!EXT3_SB(sb)->s_qf_names[USRQUOTA] &&
-           !EXT3_SB(sb)->s_qf_names[GRPQUOTA])
-               return vfs_quota_on(sb, type, format_id, path);
+       /* Not journalling quota or remount? */
+       if ((!EXT3_SB(sb)->s_qf_names[USRQUOTA] &&
+           !EXT3_SB(sb)->s_qf_names[GRPQUOTA]) || remount)
+               return vfs_quota_on(sb, type, format_id, path, remount);
        err = path_lookup(path, LOOKUP_FOLLOW, &nd);
        if (err)
                return err;
@@ -2762,13 +2771,13 @@ static int ext3_quota_on(struct super_block *sb, int type, int format_id,
                path_put(&nd.path);
                return -EXDEV;
        }
-       /* Quotafile not of fs root? */
+       /* Quotafile not in fs root? */
        if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
                printk(KERN_WARNING
                        "EXT3-fs: Quota file not on filesystem root. "
                        "Journalled quota will not work.\n");
        path_put(&nd.path);
-       return vfs_quota_on(sb, type, format_id, path);
+       return vfs_quota_on(sb, type, format_id, path, remount);
 }
 
 /* Read data from quotafile - avoid pagecache and such because we cannot afford
index 42856541e9a5aad9a50fc32a83f6802c2c01a976..d4a4f0e9ff699ae49165ea93ff4b53c162b6a60a 100644 (file)
@@ -99,6 +99,8 @@ static struct buffer_head *ext3_xattr_cache_find(struct inode *,
                                                 struct mb_cache_entry **);
 static void ext3_xattr_rehash(struct ext3_xattr_header *,
                              struct ext3_xattr_entry *);
+static int ext3_xattr_list(struct inode *inode, char *buffer,
+                          size_t buffer_size);
 
 static struct mb_cache *ext3_xattr_cache;
 
@@ -232,7 +234,7 @@ ext3_xattr_block_get(struct inode *inode, int name_index, const char *name,
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
        if (ext3_xattr_check_block(bh)) {
-bad_block:     ext3_error(inode->i_sb, __FUNCTION__,
+bad_block:     ext3_error(inode->i_sb, __func__,
                           "inode %lu: bad block "E3FSBLK, inode->i_ino,
                           EXT3_I(inode)->i_file_acl);
                error = -EIO;
@@ -374,7 +376,7 @@ ext3_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
        if (ext3_xattr_check_block(bh)) {
-               ext3_error(inode->i_sb, __FUNCTION__,
+               ext3_error(inode->i_sb, __func__,
                           "inode %lu: bad block "E3FSBLK, inode->i_ino,
                           EXT3_I(inode)->i_file_acl);
                error = -EIO;
@@ -427,7 +429,7 @@ cleanup:
  * Returns a negative error number on failure, or the number of bytes
  * used / required on success.
  */
-int
+static int
 ext3_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
 {
        int i_error, b_error;
@@ -649,7 +651,7 @@ ext3_xattr_block_find(struct inode *inode, struct ext3_xattr_info *i,
                        atomic_read(&(bs->bh->b_count)),
                        le32_to_cpu(BHDR(bs->bh)->h_refcount));
                if (ext3_xattr_check_block(bs->bh)) {
-                       ext3_error(sb, __FUNCTION__,
+                       ext3_error(sb, __func__,
                                "inode %lu: bad block "E3FSBLK, inode->i_ino,
                                EXT3_I(inode)->i_file_acl);
                        error = -EIO;
@@ -797,10 +799,8 @@ inserted:
                        get_bh(new_bh);
                } else {
                        /* We need to allocate a new block */
-                       ext3_fsblk_t goal = le32_to_cpu(
-                                       EXT3_SB(sb)->s_es->s_first_data_block) +
-                               (ext3_fsblk_t)EXT3_I(inode)->i_block_group *
-                               EXT3_BLOCKS_PER_GROUP(sb);
+                       ext3_fsblk_t goal = ext3_group_first_block_no(sb,
+                                               EXT3_I(inode)->i_block_group);
                        ext3_fsblk_t block = ext3_new_block(handle, inode,
                                                        goal, &error);
                        if (error)
@@ -852,7 +852,7 @@ cleanup_dquot:
        goto cleanup;
 
 bad_block:
-       ext3_error(inode->i_sb, __FUNCTION__,
+       ext3_error(inode->i_sb, __func__,
                   "inode %lu: bad block "E3FSBLK, inode->i_ino,
                   EXT3_I(inode)->i_file_acl);
        goto cleanup;
@@ -1081,14 +1081,14 @@ ext3_xattr_delete_inode(handle_t *handle, struct inode *inode)
                goto cleanup;
        bh = sb_bread(inode->i_sb, EXT3_I(inode)->i_file_acl);
        if (!bh) {
-               ext3_error(inode->i_sb, __FUNCTION__,
+               ext3_error(inode->i_sb, __func__,
                        "inode %lu: block "E3FSBLK" read error", inode->i_ino,
                        EXT3_I(inode)->i_file_acl);
                goto cleanup;
        }
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT3_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1)) {
-               ext3_error(inode->i_sb, __FUNCTION__,
+               ext3_error(inode->i_sb, __func__,
                        "inode %lu: bad block "E3FSBLK, inode->i_ino,
                        EXT3_I(inode)->i_file_acl);
                goto cleanup;
@@ -1215,7 +1215,7 @@ again:
                }
                bh = sb_bread(inode->i_sb, ce->e_block);
                if (!bh) {
-                       ext3_error(inode->i_sb, __FUNCTION__,
+                       ext3_error(inode->i_sb, __func__,
                                "inode %lu: block %lu read error",
                                inode->i_ino, (unsigned long) ce->e_block);
                } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
index 6b1ae1c6182c04bf43173a941b422b9f93d29335..148a4dfc82abf220fb45a71c7de5425cddb0025f 100644 (file)
@@ -67,7 +67,6 @@ extern struct xattr_handler ext3_xattr_security_handler;
 extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
 
 extern int ext3_xattr_get(struct inode *, int, const char *, void *, size_t);
-extern int ext3_xattr_list(struct inode *, char *, size_t);
 extern int ext3_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
 extern int ext3_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
 
@@ -88,12 +87,6 @@ ext3_xattr_get(struct inode *inode, int name_index, const char *name,
        return -EOPNOTSUPP;
 }
 
-static inline int
-ext3_xattr_list(struct inode *inode, void *buffer, size_t size)
-{
-       return -EOPNOTSUPP;
-}
-
 static inline int
 ext3_xattr_set(struct inode *inode, int name_index, const char *name,
               const void *value, size_t size, int flags)
index a8bae8cd1d5de707c9a8772464ca4ecaa93044ed..3c8dab880d91f1bb38247d44d605f2bc3943a90e 100644 (file)
@@ -9,8 +9,8 @@
 #include <linux/slab.h>
 #include <linux/capability.h>
 #include <linux/fs.h>
-#include <linux/ext4_jbd2.h>
-#include <linux/ext4_fs.h>
+#include "ext4_jbd2.h"
+#include "ext4.h"
 #include "xattr.h"
 #include "acl.h"
 
@@ -37,7 +37,7 @@ ext4_acl_from_disk(const void *value, size_t size)
                return ERR_PTR(-EINVAL);
        if (count == 0)
                return NULL;
-       acl = posix_acl_alloc(count, GFP_KERNEL);
+       acl = posix_acl_alloc(count, GFP_NOFS);
        if (!acl)
                return ERR_PTR(-ENOMEM);
        for (n=0; n < count; n++) {
@@ -91,7 +91,7 @@ ext4_acl_to_disk(const struct posix_acl *acl, size_t *size)
 
        *size = ext4_acl_size(acl->a_count);
        ext_acl = kmalloc(sizeof(ext4_acl_header) + acl->a_count *
-                       sizeof(ext4_acl_entry), GFP_KERNEL);
+                       sizeof(ext4_acl_entry), GFP_NOFS);
        if (!ext_acl)
                return ERR_PTR(-ENOMEM);
        ext_acl->a_version = cpu_to_le32(EXT4_ACL_VERSION);
@@ -187,7 +187,7 @@ ext4_get_acl(struct inode *inode, int type)
        }
        retval = ext4_xattr_get(inode, name_index, "", NULL, 0);
        if (retval > 0) {
-               value = kmalloc(retval, GFP_KERNEL);
+               value = kmalloc(retval, GFP_NOFS);
                if (!value)
                        return ERR_PTR(-ENOMEM);
                retval = ext4_xattr_get(inode, name_index, "", value, retval);
@@ -335,7 +335,7 @@ ext4_init_acl(handle_t *handle, struct inode *inode, struct inode *dir)
                        if (error)
                                goto cleanup;
                }
-               clone = posix_acl_clone(acl, GFP_KERNEL);
+               clone = posix_acl_clone(acl, GFP_NOFS);
                error = -ENOMEM;
                if (!clone)
                        goto cleanup;
index 0737e05ba3dd22842429f287356ca1c500265943..da994374ec3b2c6e13c103bf9e83b7dd87353edc 100644 (file)
 #include <linux/capability.h>
 #include <linux/fs.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
-#include <linux/ext4_jbd2.h>
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
-
+#include "ext4.h"
+#include "ext4_jbd2.h"
 #include "group.h"
+
 /*
  * balloc.c contains the blocks allocation and deallocation routines
  */
@@ -48,7 +48,6 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
 unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
                 ext4_group_t block_group, struct ext4_group_desc *gdp)
 {
-       unsigned long start;
        int bit, bit_max;
        unsigned free_blocks, group_blocks;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -59,7 +58,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
                /* If checksum is bad mark all blocks used to prevent allocation
                 * essentially implementing a per-group read-only flag. */
                if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
-                       ext4_error(sb, __FUNCTION__,
+                       ext4_error(sb, __func__,
                                  "Checksum bad for group %lu\n", block_group);
                        gdp->bg_free_blocks_count = 0;
                        gdp->bg_free_inodes_count = 0;
@@ -106,11 +105,12 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
        free_blocks = group_blocks - bit_max;
 
        if (bh) {
+               ext4_fsblk_t start;
+
                for (bit = 0; bit < bit_max; bit++)
                        ext4_set_bit(bit, bh->b_data);
 
-               start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
-                       le32_to_cpu(sbi->s_es->s_first_data_block);
+               start = ext4_group_first_block_no(sb, block_group);
 
                /* Set bits for block and inode bitmaps, and inode table */
                ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
@@ -235,7 +235,7 @@ static int ext4_valid_block_bitmap(struct super_block *sb,
                return 1;
 
 err_out:
-       ext4_error(sb, __FUNCTION__,
+       ext4_error(sb, __func__,
                        "Invalid block bitmap - "
                        "block_group = %d, block = %llu",
                        block_group, bitmap_blk);
@@ -264,7 +264,7 @@ read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
        bitmap_blk = ext4_block_bitmap(sb, desc);
        bh = sb_getblk(sb, bitmap_blk);
        if (unlikely(!bh)) {
-               ext4_error(sb, __FUNCTION__,
+               ext4_error(sb, __func__,
                            "Cannot read block bitmap - "
                            "block_group = %d, block_bitmap = %llu",
                            (int)block_group, (unsigned long long)bitmap_blk);
@@ -281,7 +281,7 @@ read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
        }
        if (bh_submit_read(bh) < 0) {
                put_bh(bh);
-               ext4_error(sb, __FUNCTION__,
+               ext4_error(sb, __func__,
                            "Cannot read block bitmap - "
                            "block_group = %d, block_bitmap = %llu",
                            (int)block_group, (unsigned long long)bitmap_blk);
@@ -360,7 +360,7 @@ restart:
                BUG();
 }
 #define rsv_window_dump(root, verbose) \
-       __rsv_window_dump((root), (verbose), __FUNCTION__)
+       __rsv_window_dump((root), (verbose), __func__)
 #else
 #define rsv_window_dump(root, verbose) do {} while (0)
 #endif
@@ -740,7 +740,7 @@ do_more:
                if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
                                                bit + i, bitmap_bh->b_data)) {
                        jbd_unlock_bh_state(bitmap_bh);
-                       ext4_error(sb, __FUNCTION__,
+                       ext4_error(sb, __func__,
                                   "bit already cleared for block %llu",
                                   (ext4_fsblk_t)(block + i));
                        jbd_lock_bh_state(bitmap_bh);
@@ -752,9 +752,7 @@ do_more:
        jbd_unlock_bh_state(bitmap_bh);
 
        spin_lock(sb_bgl_lock(sbi, block_group));
-       desc->bg_free_blocks_count =
-               cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
-                       group_freed);
+       le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
        desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
        spin_unlock(sb_bgl_lock(sbi, block_group));
        percpu_counter_add(&sbi->s_freeblocks_counter, count);
@@ -1798,7 +1796,7 @@ allocated:
                        if (ext4_test_bit(grp_alloc_blk+i,
                                        bh2jh(bitmap_bh)->b_committed_data)) {
                                printk("%s: block was unexpectedly set in "
-                                       "b_committed_data\n", __FUNCTION__);
+                                       "b_committed_data\n", __func__);
                        }
                }
        }
@@ -1823,8 +1821,7 @@ allocated:
        spin_lock(sb_bgl_lock(sbi, group_no));
        if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
                gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
-       gdp->bg_free_blocks_count =
-                       cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
+       le16_add_cpu(&gdp->bg_free_blocks_count, -num);
        gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
        spin_unlock(sb_bgl_lock(sbi, group_no));
        percpu_counter_sub(&sbi->s_freeblocks_counter, num);
index 420554f8f79d8f4fb4a99a95bec9539e6c78f8bc..d37ea67504549c8d5c36958194323b50e2dd6ce7 100644 (file)
@@ -9,7 +9,7 @@
 
 #include <linux/buffer_head.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
+#include "ext4.h"
 
 #ifdef EXT4FS_DEBUG
 
index 2c23bade9aa676451972905449e8d8cc0bbcfec3..2bf0331ea1946303f0caedf360dce0cb9dc38445 100644 (file)
 
 #include <linux/fs.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
 #include <linux/buffer_head.h>
 #include <linux/slab.h>
 #include <linux/rbtree.h>
+#include "ext4.h"
 
 static unsigned char ext4_filetype_table[] = {
        DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
@@ -42,7 +42,7 @@ const struct file_operations ext4_dir_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
        .readdir        = ext4_readdir,         /* we take BKL. needed?*/
-       .ioctl          = ext4_ioctl,           /* BKL held */
+       .unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext4_compat_ioctl,
 #endif
similarity index 99%
rename from include/linux/ext4_fs.h
rename to fs/ext4/ext4.h
index 25003254859773c657f510d0e2460c900d47a6c9..8158083f7ac0a04673529125fe2aa58babd4cb8d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/include/linux/ext4_fs.h
+ *  ext4.h
  *
  * Copyright (C) 1992, 1993, 1994, 1995
  * Remy Card (card@masi.ibp.fr)
  *  Copyright (C) 1991, 1992  Linus Torvalds
  */
 
-#ifndef _LINUX_EXT4_FS_H
-#define _LINUX_EXT4_FS_H
+#ifndef _EXT4_H
+#define _EXT4_H
 
 #include <linux/types.h>
 #include <linux/blkdev.h>
 #include <linux/magic.h>
-
-#include <linux/ext4_fs_i.h>
+#include "ext4_i.h"
 
 /*
  * The second extended filesystem constants/structures
@@ -176,8 +175,7 @@ struct ext4_group_desc
 #define EXT4_BG_INODE_ZEROED   0x0004 /* On-disk itable initialized to zero */
 
 #ifdef __KERNEL__
-#include <linux/ext4_fs_i.h>
-#include <linux/ext4_fs_sb.h>
+#include "ext4_sb.h"
 #endif
 /*
  * Macro-instructions used to manage group descriptors
@@ -231,6 +229,7 @@ struct ext4_group_desc
 #define EXT4_TOPDIR_FL                 0x00020000 /* Top of directory hierarchies*/
 #define EXT4_HUGE_FILE_FL               0x00040000 /* Set to each huge file */
 #define EXT4_EXTENTS_FL                        0x00080000 /* Inode uses extents */
+#define EXT4_EXT_MIGRATE               0x00100000 /* Inode is migrating */
 #define EXT4_RESERVED_FL               0x80000000 /* reserved for ext4 lib */
 
 #define EXT4_FL_USER_VISIBLE           0x000BDFFF /* User visible flags */
@@ -1049,8 +1048,7 @@ extern int ext4_block_truncate_page(handle_t *handle, struct page *page,
                struct address_space *mapping, loff_t from);
 
 /* ioctl.c */
-extern int ext4_ioctl (struct inode *, struct file *, unsigned int,
-                      unsigned long);
+extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
 extern long ext4_compat_ioctl (struct file *, unsigned int, unsigned long);
 
 /* migrate.c */
@@ -1204,4 +1202,4 @@ extern int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode,
                        int extend_disksize);
 #endif /* __KERNEL__ */
 
-#endif /* _LINUX_EXT4_FS_H */
+#endif /* _EXT4_H */
similarity index 98%
rename from include/linux/ext4_fs_extents.h
rename to fs/ext4/ext4_extents.h
index 1285c583b2d868421366fc71bb05cc12c507225f..75333b595fab794bda6023a3978b0c61df5e91de 100644 (file)
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
  */
 
-#ifndef _LINUX_EXT4_EXTENTS
-#define _LINUX_EXT4_EXTENTS
+#ifndef _EXT4_EXTENTS
+#define _EXT4_EXTENTS
 
-#include <linux/ext4_fs.h>
+#include "ext4.h"
 
 /*
  * With AGGRESSIVE_TEST defined, the capacity of index/leaf blocks
@@ -228,5 +228,5 @@ extern int ext4_ext_search_left(struct inode *, struct ext4_ext_path *,
 extern int ext4_ext_search_right(struct inode *, struct ext4_ext_path *,
                                                ext4_lblk_t *, ext4_fsblk_t *);
 extern void ext4_ext_drop_refs(struct ext4_ext_path *);
-#endif /* _LINUX_EXT4_EXTENTS */
+#endif /* _EXT4_EXTENTS */
 
similarity index 97%
rename from include/linux/ext4_fs_i.h
rename to fs/ext4/ext4_i.h
index d5508d3cf29096db958efa7be8719d69fb155ac0..26a4ae255d79da02ad615de4bf6217f1dc5287b0 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/include/linux/ext4_fs_i.h
+ *  ext4_i.h
  *
  * Copyright (C) 1992, 1993, 1994, 1995
  * Remy Card (card@masi.ibp.fr)
@@ -13,8 +13,8 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  */
 
-#ifndef _LINUX_EXT4_FS_I
-#define _LINUX_EXT4_FS_I
+#ifndef _EXT4_I
+#define _EXT4_I
 
 #include <linux/rwsem.h>
 #include <linux/rbtree.h>
@@ -164,4 +164,4 @@ struct ext4_inode_info {
        spinlock_t i_prealloc_lock;
 };
 
-#endif /* _LINUX_EXT4_FS_I */
+#endif /* _EXT4_I */
index d6afe4e273403147c091cdc7d3807b90d6f6083e..c75384b34f2c7e1f587818cafc301a5d6aae5b1e 100644 (file)
@@ -2,14 +2,14 @@
  * Interface between ext4 and JBD
  */
 
-#include <linux/ext4_jbd2.h>
+#include "ext4_jbd2.h"
 
 int __ext4_journal_get_undo_access(const char *where, handle_t *handle,
                                struct buffer_head *bh)
 {
        int err = jbd2_journal_get_undo_access(handle, bh);
        if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext4_journal_abort_handle(where, __func__, bh, handle, err);
        return err;
 }
 
@@ -18,7 +18,7 @@ int __ext4_journal_get_write_access(const char *where, handle_t *handle,
 {
        int err = jbd2_journal_get_write_access(handle, bh);
        if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext4_journal_abort_handle(where, __func__, bh, handle, err);
        return err;
 }
 
@@ -27,7 +27,7 @@ int __ext4_journal_forget(const char *where, handle_t *handle,
 {
        int err = jbd2_journal_forget(handle, bh);
        if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext4_journal_abort_handle(where, __func__, bh, handle, err);
        return err;
 }
 
@@ -36,7 +36,7 @@ int __ext4_journal_revoke(const char *where, handle_t *handle,
 {
        int err = jbd2_journal_revoke(handle, blocknr, bh);
        if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext4_journal_abort_handle(where, __func__, bh, handle, err);
        return err;
 }
 
@@ -45,7 +45,7 @@ int __ext4_journal_get_create_access(const char *where,
 {
        int err = jbd2_journal_get_create_access(handle, bh);
        if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext4_journal_abort_handle(where, __func__, bh, handle, err);
        return err;
 }
 
@@ -54,6 +54,6 @@ int __ext4_journal_dirty_metadata(const char *where,
 {
        int err = jbd2_journal_dirty_metadata(handle, bh);
        if (err)
-               ext4_journal_abort_handle(where, __FUNCTION__, bh, handle,err);
+               ext4_journal_abort_handle(where, __func__, bh, handle, err);
        return err;
 }
similarity index 98%
rename from include/linux/ext4_jbd2.h
rename to fs/ext4/ext4_jbd2.h
index 38c71d3c8dbf39efff5278fa9c58272d70c4554b..9255a7d28b245546d16001359129aaad43883113 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * linux/include/linux/ext4_jbd2.h
+ * ext4_jbd2.h
  *
  * Written by Stephen C. Tweedie <sct@redhat.com>, 1999
  *
  * Ext4-specific journaling extensions.
  */
 
-#ifndef _LINUX_EXT4_JBD2_H
-#define _LINUX_EXT4_JBD2_H
+#ifndef _EXT4_JBD2_H
+#define _EXT4_JBD2_H
 
 #include <linux/fs.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
+#include "ext4.h"
 
 #define EXT4_JOURNAL(inode)    (EXT4_SB((inode)->i_sb)->s_journal)
 
@@ -228,4 +228,4 @@ static inline int ext4_should_writeback_data(struct inode *inode)
        return 0;
 }
 
-#endif /* _LINUX_EXT4_JBD2_H */
+#endif /* _EXT4_JBD2_H */
similarity index 97%
rename from include/linux/ext4_fs_sb.h
rename to fs/ext4/ext4_sb.h
index abaae2c8cccf3d7bad74769998816f0b081e7d5d..5802e69f2191d015286924887538264a0b3b2325 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  linux/include/linux/ext4_fs_sb.h
+ *  ext4_sb.h
  *
  * Copyright (C) 1992, 1993, 1994, 1995
  * Remy Card (card@masi.ibp.fr)
@@ -13,8 +13,8 @@
  *  Copyright (C) 1991, 1992  Linus Torvalds
  */
 
-#ifndef _LINUX_EXT4_FS_SB
-#define _LINUX_EXT4_FS_SB
+#ifndef _EXT4_SB
+#define _EXT4_SB
 
 #ifdef __KERNEL__
 #include <linux/timer.h>
@@ -145,4 +145,4 @@ struct ext4_sb_info {
        struct ext4_locality_group *s_locality_groups;
 };
 
-#endif /* _LINUX_EXT4_FS_SB */
+#endif /* _EXT4_SB */
index 9ae6e67090cdfad1bd52a7e7169ba725dea19c82..47929c4e3dae66d104f5bdd6ac39f5436183dff5 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/time.h>
-#include <linux/ext4_jbd2.h>
 #include <linux/jbd2.h>
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
@@ -40,8 +39,9 @@
 #include <linux/string.h>
 #include <linux/slab.h>
 #include <linux/falloc.h>
-#include <linux/ext4_fs_extents.h>
 #include <asm/uaccess.h>
+#include "ext4_jbd2.h"
+#include "ext4_extents.h"
 
 
 /*
@@ -308,7 +308,7 @@ corrupted:
 }
 
 #define ext4_ext_check_header(inode, eh, depth)        \
-       __ext4_ext_check_header(__FUNCTION__, inode, eh, depth)
+       __ext4_ext_check_header(__func__, inode, eh, depth)
 
 #ifdef EXT_DEBUG
 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
@@ -614,7 +614,7 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
 
        ix->ei_block = cpu_to_le32(logical);
        ext4_idx_store_pblock(ix, ptr);
-       curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
+       le16_add_cpu(&curp->p_hdr->eh_entries, 1);
 
        BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
                             > le16_to_cpu(curp->p_hdr->eh_max));
@@ -736,7 +736,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
        }
        if (m) {
                memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
-               neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
+               le16_add_cpu(&neh->eh_entries, m);
        }
 
        set_buffer_uptodate(bh);
@@ -753,8 +753,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                err = ext4_ext_get_access(handle, inode, path + depth);
                if (err)
                        goto cleanup;
-               path[depth].p_hdr->eh_entries =
-                    cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
+               le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
                err = ext4_ext_dirty(handle, inode, path + depth);
                if (err)
                        goto cleanup;
@@ -817,8 +816,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                if (m) {
                        memmove(++fidx, path[i].p_idx - m,
                                sizeof(struct ext4_extent_idx) * m);
-                       neh->eh_entries =
-                               cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
+                       le16_add_cpu(&neh->eh_entries, m);
                }
                set_buffer_uptodate(bh);
                unlock_buffer(bh);
@@ -834,7 +832,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
                        err = ext4_ext_get_access(handle, inode, path + i);
                        if (err)
                                goto cleanup;
-                       path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
+                       le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
                        err = ext4_ext_dirty(handle, inode, path + i);
                        if (err)
                                goto cleanup;
@@ -1369,7 +1367,7 @@ int ext4_ext_try_to_merge(struct inode *inode,
                                * sizeof(struct ext4_extent);
                        memmove(ex + 1, ex + 2, len);
                }
-               eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries) - 1);
+               le16_add_cpu(&eh->eh_entries, -1);
                merge_done = 1;
                WARN_ON(eh->eh_entries == 0);
                if (!eh->eh_entries)
@@ -1560,7 +1558,7 @@ has_space:
                path[depth].p_ext = nearex;
        }
 
-       eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
+       le16_add_cpu(&eh->eh_entries, 1);
        nearex = path[depth].p_ext;
        nearex->ee_block = newext->ee_block;
        ext4_ext_store_pblock(nearex, ext_pblock(newext));
@@ -1699,7 +1697,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
        err = ext4_ext_get_access(handle, inode, path);
        if (err)
                return err;
-       path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
+       le16_add_cpu(&path->p_hdr->eh_entries, -1);
        err = ext4_ext_dirty(handle, inode, path);
        if (err)
                return err;
@@ -1902,7 +1900,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
                if (num == 0) {
                        /* this extent is removed; mark slot entirely unused */
                        ext4_ext_store_pblock(ex, 0);
-                       eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
+                       le16_add_cpu(&eh->eh_entries, -1);
                }
 
                ex->ee_block = cpu_to_le32(block);
@@ -1979,7 +1977,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
         * We start scanning from right side, freeing all the blocks
         * after i_size and walking into the tree depth-wise.
         */
-       path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
+       path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
        if (path == NULL) {
                ext4_journal_stop(handle);
                return -ENOMEM;
@@ -2138,6 +2136,82 @@ void ext4_ext_release(struct super_block *sb)
 #endif
 }
 
+static void bi_complete(struct bio *bio, int error)
+{
+       complete((struct completion *)bio->bi_private);
+}
+
+/* FIXME!! we need to try to merge to left or right after zero-out  */
+static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
+{
+       int ret = -EIO;
+       struct bio *bio;
+       int blkbits, blocksize;
+       sector_t ee_pblock;
+       struct completion event;
+       unsigned int ee_len, len, done, offset;
+
+
+       blkbits   = inode->i_blkbits;
+       blocksize = inode->i_sb->s_blocksize;
+       ee_len    = ext4_ext_get_actual_len(ex);
+       ee_pblock = ext_pblock(ex);
+
+       /* convert ee_pblock to 512 byte sectors */
+       ee_pblock = ee_pblock << (blkbits - 9);
+
+       while (ee_len > 0) {
+
+               if (ee_len > BIO_MAX_PAGES)
+                       len = BIO_MAX_PAGES;
+               else
+                       len = ee_len;
+
+               bio = bio_alloc(GFP_NOIO, len);
+               if (!bio)
+                       return -ENOMEM;
+               bio->bi_sector = ee_pblock;
+               bio->bi_bdev   = inode->i_sb->s_bdev;
+
+               done = 0;
+               offset = 0;
+               while (done < len) {
+                       ret = bio_add_page(bio, ZERO_PAGE(0),
+                                                       blocksize, offset);
+                       if (ret != blocksize) {
+                               /*
+                                * We can't add any more pages because of
+                                * hardware limitations.  Start a new bio.
+                                */
+                               break;
+                       }
+                       done++;
+                       offset += blocksize;
+                       if (offset >= PAGE_CACHE_SIZE)
+                               offset = 0;
+               }
+
+               init_completion(&event);
+               bio->bi_private = &event;
+               bio->bi_end_io = bi_complete;
+               submit_bio(WRITE, bio);
+               wait_for_completion(&event);
+
+               if (test_bit(BIO_UPTODATE, &bio->bi_flags))
+                       ret = 0;
+               else {
+                       ret = -EIO;
+                       break;
+               }
+               bio_put(bio);
+               ee_len    -= done;
+               ee_pblock += done  << (blkbits - 9);
+       }
+       return ret;
+}
+
+#define EXT4_EXT_ZERO_LEN 7
+
 /*
  * This function is called by ext4_ext_get_blocks() if someone tries to write
  * to an uninitialized extent. It may result in splitting the uninitialized
@@ -2154,7 +2228,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                                                ext4_lblk_t iblock,
                                                unsigned long max_blocks)
 {
-       struct ext4_extent *ex, newex;
+       struct ext4_extent *ex, newex, orig_ex;
        struct ext4_extent *ex1 = NULL;
        struct ext4_extent *ex2 = NULL;
        struct ext4_extent *ex3 = NULL;
@@ -2173,10 +2247,26 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        allocated = ee_len - (iblock - ee_block);
        newblock = iblock - ee_block + ext_pblock(ex);
        ex2 = ex;
+       orig_ex.ee_block = ex->ee_block;
+       orig_ex.ee_len   = cpu_to_le16(ee_len);
+       ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
 
        err = ext4_ext_get_access(handle, inode, path + depth);
        if (err)
                goto out;
+       /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
+       if (ee_len <= 2*EXT4_EXT_ZERO_LEN) {
+               err =  ext4_ext_zeroout(inode, &orig_ex);
+               if (err)
+                       goto fix_extent_len;
+               /* update the extent length and mark as initialized */
+               ex->ee_block = orig_ex.ee_block;
+               ex->ee_len   = orig_ex.ee_len;
+               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+               ext4_ext_dirty(handle, inode, path + depth);
+               /* zeroed the full extent */
+               return allocated;
+       }
 
        /* ex1: ee_block to iblock - 1 : uninitialized */
        if (iblock > ee_block) {
@@ -2195,19 +2285,103 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        /* ex3: to ee_block + ee_len : uninitialised */
        if (allocated > max_blocks) {
                unsigned int newdepth;
+               /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
+               if (allocated <= EXT4_EXT_ZERO_LEN) {
+                       /* Mark first half uninitialized.
+                        * Mark second half initialized and zero out the
+                        * initialized extent
+                        */
+                       ex->ee_block = orig_ex.ee_block;
+                       ex->ee_len   = cpu_to_le16(ee_len - allocated);
+                       ext4_ext_mark_uninitialized(ex);
+                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_dirty(handle, inode, path + depth);
+
+                       ex3 = &newex;
+                       ex3->ee_block = cpu_to_le32(iblock);
+                       ext4_ext_store_pblock(ex3, newblock);
+                       ex3->ee_len = cpu_to_le16(allocated);
+                       err = ext4_ext_insert_extent(handle, inode, path, ex3);
+                       if (err == -ENOSPC) {
+                               err =  ext4_ext_zeroout(inode, &orig_ex);
+                               if (err)
+                                       goto fix_extent_len;
+                               ex->ee_block = orig_ex.ee_block;
+                               ex->ee_len   = orig_ex.ee_len;
+                               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                               ext4_ext_dirty(handle, inode, path + depth);
+                               /* zeroed the full extent */
+                               return allocated;
+
+                       } else if (err)
+                               goto fix_extent_len;
+
+                       /*
+                        * We need to zero out the second half because
+                        * an fallocate request can update file size and
+                        * converting the second half to initialized extent
+                        * implies that we can leak some junk data to user
+                        * space.
+                        */
+                       err =  ext4_ext_zeroout(inode, ex3);
+                       if (err) {
+                               /*
+                                * We should actually mark the
+                                * second half as uninit and return error
+                                * Insert would have changed the extent
+                                */
+                               depth = ext_depth(inode);
+                               ext4_ext_drop_refs(path);
+                               path = ext4_ext_find_extent(inode,
+                                                               iblock, path);
+                               if (IS_ERR(path)) {
+                                       err = PTR_ERR(path);
+                                       return err;
+                               }
+                               ex = path[depth].p_ext;
+                               err = ext4_ext_get_access(handle, inode,
+                                                               path + depth);
+                               if (err)
+                                       return err;
+                               ext4_ext_mark_uninitialized(ex);
+                               ext4_ext_dirty(handle, inode, path + depth);
+                               return err;
+                       }
+
+                       /* zeroed the second half */
+                       return allocated;
+               }
                ex3 = &newex;
                ex3->ee_block = cpu_to_le32(iblock + max_blocks);
                ext4_ext_store_pblock(ex3, newblock + max_blocks);
                ex3->ee_len = cpu_to_le16(allocated - max_blocks);
                ext4_ext_mark_uninitialized(ex3);
                err = ext4_ext_insert_extent(handle, inode, path, ex3);
-               if (err)
-                       goto out;
+               if (err == -ENOSPC) {
+                       err =  ext4_ext_zeroout(inode, &orig_ex);
+                       if (err)
+                               goto fix_extent_len;
+                       /* update the extent length and mark as initialized */
+                       ex->ee_block = orig_ex.ee_block;
+                       ex->ee_len   = orig_ex.ee_len;
+                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_dirty(handle, inode, path + depth);
+                       /* zeroed the full extent */
+                       return allocated;
+
+               } else if (err)
+                       goto fix_extent_len;
                /*
                 * The depth, and hence eh & ex might change
                 * as part of the insert above.
                 */
                newdepth = ext_depth(inode);
+               /*
+                * update the extent length after successfull insert of the
+                * split extent
+                */
+               orig_ex.ee_len = cpu_to_le16(ee_len -
+                                               ext4_ext_get_actual_len(ex3));
                if (newdepth != depth) {
                        depth = newdepth;
                        ext4_ext_drop_refs(path);
@@ -2226,6 +2400,24 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
                                goto out;
                }
                allocated = max_blocks;
+
+               /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
+                * to insert a extent in the middle zerout directly
+                * otherwise give the extent a chance to merge to left
+                */
+               if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
+                                                       iblock != ee_block) {
+                       err =  ext4_ext_zeroout(inode, &orig_ex);
+                       if (err)
+                               goto fix_extent_len;
+                       /* update the extent length and mark as initialized */
+                       ex->ee_block = orig_ex.ee_block;
+                       ex->ee_len   = orig_ex.ee_len;
+                       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+                       ext4_ext_dirty(handle, inode, path + depth);
+                       /* zero out the first half */
+                       return allocated;
+               }
        }
        /*
         * If there was a change of depth as part of the
@@ -2282,8 +2474,29 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
        goto out;
 insert:
        err = ext4_ext_insert_extent(handle, inode, path, &newex);
+       if (err == -ENOSPC) {
+               err =  ext4_ext_zeroout(inode, &orig_ex);
+               if (err)
+                       goto fix_extent_len;
+               /* update the extent length and mark as initialized */
+               ex->ee_block = orig_ex.ee_block;
+               ex->ee_len   = orig_ex.ee_len;
+               ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+               ext4_ext_dirty(handle, inode, path + depth);
+               /* zero out the first half */
+               return allocated;
+       } else if (err)
+               goto fix_extent_len;
 out:
        return err ? err : allocated;
+
+fix_extent_len:
+       ex->ee_block = orig_ex.ee_block;
+       ex->ee_len   = orig_ex.ee_len;
+       ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
+       ext4_ext_mark_uninitialized(ex);
+       ext4_ext_dirty(handle, inode, path + depth);
+       return err;
 }
 
 /*
@@ -2393,8 +2606,20 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
                        }
                        if (create == EXT4_CREATE_UNINITIALIZED_EXT)
                                goto out;
-                       if (!create)
+                       if (!create) {
+                               /*
+                                * We have blocks reserved already.  We
+                                * return allocated blocks so that delalloc
+                                * won't do block reservation for us.  But
+                                * the buffer head will be unmapped so that
+                                * a read from the block returns 0s.
+                                */
+                               if (allocated > max_blocks)
+                                       allocated = max_blocks;
+                               /* mark the buffer unwritten */
+                               __set_bit(BH_Unwritten, &bh_result->b_state);
                                goto out2;
+                       }
 
                        ret = ext4_ext_convert_to_initialized(handle, inode,
                                                                path, iblock,
@@ -2584,6 +2809,8 @@ out_stop:
                ext4_orphan_del(handle, inode);
 
        up_write(&EXT4_I(inode)->i_data_sem);
+       inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
+       ext4_mark_inode_dirty(handle, inode);
        ext4_journal_stop(handle);
 }
 
@@ -2608,6 +2835,28 @@ int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
        return needed;
 }
 
+static void ext4_falloc_update_inode(struct inode *inode,
+                               int mode, loff_t new_size, int update_ctime)
+{
+       struct timespec now;
+
+       if (update_ctime) {
+               now = current_fs_time(inode->i_sb);
+               if (!timespec_equal(&inode->i_ctime, &now))
+                       inode->i_ctime = now;
+       }
+       /*
+        * Update only when preallocation was requested beyond
+        * the file size.
+        */
+       if (!(mode & FALLOC_FL_KEEP_SIZE) &&
+                               new_size > i_size_read(inode)) {
+               i_size_write(inode, new_size);
+               EXT4_I(inode)->i_disksize = new_size;
+       }
+
+}
+
 /*
  * preallocate space for a file. This implements ext4's fallocate inode
  * operation, which gets called from sys_fallocate system call.
@@ -2619,8 +2868,8 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
 {
        handle_t *handle;
        ext4_lblk_t block;
+       loff_t new_size;
        unsigned long max_blocks;
-       ext4_fsblk_t nblocks = 0;
        int ret = 0;
        int ret2 = 0;
        int retries = 0;
@@ -2639,9 +2888,12 @@ long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
                return -ENODEV;
 
        block = offset >> blkbits;
+       /*
+        * We can't just convert len to max_blocks because
+        * If blocksize = 4096 offset = 3072 and len = 2048
+        */
        max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
-                       - block;
-
+                                                       - block;
        /*
         * credits to insert 1 extent into extent tree + buffers to be able to
         * modify 1 super block, 1 block bitmap and 1 group descriptor.
@@ -2657,7 +2909,6 @@ retry:
                        ret = PTR_ERR(handle);
                        break;
                }
-
                ret = ext4_get_blocks_wrap(handle, inode, block,
                                          max_blocks, &map_bh,
                                          EXT4_CREATE_UNINITIALIZED_EXT, 0);
@@ -2673,61 +2924,24 @@ retry:
                        ret2 = ext4_journal_stop(handle);
                        break;
                }
-               if (ret > 0) {
-                       /* check wrap through sign-bit/zero here */
-                       if ((block + ret) < 0 || (block + ret) < block) {
-                               ret = -EIO;
-                               ext4_mark_inode_dirty(handle, inode);
-                               ret2 = ext4_journal_stop(handle);
-                               break;
-                       }
-                       if (buffer_new(&map_bh) && ((block + ret) >
-                           (EXT4_BLOCK_ALIGN(i_size_read(inode), blkbits)
-                           >> blkbits)))
-                                       nblocks = nblocks + ret;
-               }
-
-               /* Update ctime if new blocks get allocated */
-               if (nblocks) {
-                       struct timespec now;
-
-                       now = current_fs_time(inode->i_sb);
-                       if (!timespec_equal(&inode->i_ctime, &now))
-                               inode->i_ctime = now;
-               }
+               if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
+                                               blkbits) >> blkbits))
+                       new_size = offset + len;
+               else
+                       new_size = (block + ret) << blkbits;
 
+               ext4_falloc_update_inode(inode, mode, new_size,
+                                               buffer_new(&map_bh));
                ext4_mark_inode_dirty(handle, inode);
                ret2 = ext4_journal_stop(handle);
                if (ret2)
                        break;
        }
-
-       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+       if (ret == -ENOSPC &&
+                       ext4_should_retry_alloc(inode->i_sb, &retries)) {
+               ret = 0;
                goto retry;
-
-       /*
-        * Time to update the file size.
-        * Update only when preallocation was requested beyond the file size.
-        */
-       if (!(mode & FALLOC_FL_KEEP_SIZE) &&
-           (offset + len) > i_size_read(inode)) {
-               if (ret > 0) {
-                       /*
-                        * if no error, we assume preallocation succeeded
-                        * completely
-                        */
-                       i_size_write(inode, offset + len);
-                       EXT4_I(inode)->i_disksize = i_size_read(inode);
-               } else if (ret < 0 && nblocks) {
-                       /* Handle partial allocation scenario */
-                       loff_t newsize;
-
-                       newsize  = (nblocks << blkbits) + i_size_read(inode);
-                       i_size_write(inode, EXT4_BLOCK_ALIGN(newsize, blkbits));
-                       EXT4_I(inode)->i_disksize = i_size_read(inode);
-               }
        }
-
        mutex_unlock(&inode->i_mutex);
        return ret > 0 ? ret2 : ret;
 }
index ac35ec58db55c9e9fe4b4d3effde74274b614b25..4159be6366ab8c7fb39493827765544d818b0ecc 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/time.h>
 #include <linux/fs.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
-#include <linux/ext4_jbd2.h>
+#include "ext4.h"
+#include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
 
@@ -129,7 +129,7 @@ const struct file_operations ext4_file_operations = {
        .write          = do_sync_write,
        .aio_read       = generic_file_aio_read,
        .aio_write      = ext4_file_write,
-       .ioctl          = ext4_ioctl,
+       .unlocked_ioctl = ext4_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = ext4_compat_ioctl,
 #endif
index 8d50879d1c2c68f23284314ff4fc7c5afe3c0a8e..1c8ba48d4f8d6014e5708b44706640e3e29a11e9 100644 (file)
@@ -27,8 +27,8 @@
 #include <linux/sched.h>
 #include <linux/writeback.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
-#include <linux/ext4_jbd2.h>
+#include "ext4.h"
+#include "ext4_jbd2.h"
 
 /*
  * akpm: A new design for ext4_sync_file().
@@ -72,6 +72,9 @@ int ext4_sync_file(struct file * file, struct dentry *dentry, int datasync)
                goto out;
        }
 
+       if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
+               goto out;
+
        /*
         * The VFS has written the file data.  If the inode is unaltered
         * then we need not start a commit.
index 1555024e3b36b26a56f8a7c47f191740171f9934..1d6329dbe3906551929c4c8ea61e1878edfde08d 100644 (file)
@@ -11,8 +11,8 @@
 
 #include <linux/fs.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
 #include <linux/cryptohash.h>
+#include "ext4.h"
 
 #define DELTA 0x9E3779B9
 
index 486e46a3918de44a0534db6553058c01e7cf6ef2..c6efbab0c80187942dfcedc867d514c76badcefc 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/time.h>
 #include <linux/fs.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
-#include <linux/ext4_jbd2.h>
 #include <linux/stat.h>
 #include <linux/string.h>
 #include <linux/quotaops.h>
@@ -25,7 +23,8 @@
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
 #include <asm/byteorder.h>
-
+#include "ext4.h"
+#include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
 #include "group.h"
@@ -75,7 +74,7 @@ unsigned ext4_init_inode_bitmap(struct super_block *sb, struct buffer_head *bh,
        /* If checksum is bad mark all blocks and inodes use to prevent
         * allocation, essentially implementing a per-group read-only flag. */
        if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
-               ext4_error(sb, __FUNCTION__, "Checksum bad for group %lu\n",
+               ext4_error(sb, __func__, "Checksum bad for group %lu\n",
                           block_group);
                gdp->bg_free_blocks_count = 0;
                gdp->bg_free_inodes_count = 0;
@@ -223,11 +222,9 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
 
                if (gdp) {
                        spin_lock(sb_bgl_lock(sbi, block_group));
-                       gdp->bg_free_inodes_count = cpu_to_le16(
-                               le16_to_cpu(gdp->bg_free_inodes_count) + 1);
+                       le16_add_cpu(&gdp->bg_free_inodes_count, 1);
                        if (is_directory)
-                               gdp->bg_used_dirs_count = cpu_to_le16(
-                                 le16_to_cpu(gdp->bg_used_dirs_count) - 1);
+                               le16_add_cpu(&gdp->bg_used_dirs_count, -1);
                        gdp->bg_checksum = ext4_group_desc_csum(sbi,
                                                        block_group, gdp);
                        spin_unlock(sb_bgl_lock(sbi, block_group));
@@ -588,7 +585,7 @@ got:
        ino++;
        if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
            ino > EXT4_INODES_PER_GROUP(sb)) {
-               ext4_error(sb, __FUNCTION__,
+               ext4_error(sb, __func__,
                           "reserved inode or inode > inodes count - "
                           "block_group = %lu, inode=%lu", group,
                           ino + group * EXT4_INODES_PER_GROUP(sb));
@@ -664,11 +661,9 @@ got:
                                cpu_to_le16(EXT4_INODES_PER_GROUP(sb) - ino);
        }
 
-       gdp->bg_free_inodes_count =
-               cpu_to_le16(le16_to_cpu(gdp->bg_free_inodes_count) - 1);
+       le16_add_cpu(&gdp->bg_free_inodes_count, -1);
        if (S_ISDIR(mode)) {
-               gdp->bg_used_dirs_count =
-                       cpu_to_le16(le16_to_cpu(gdp->bg_used_dirs_count) + 1);
+               le16_add_cpu(&gdp->bg_used_dirs_count, 1);
        }
        gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
        spin_unlock(sb_bgl_lock(sbi, group));
@@ -744,23 +739,24 @@ got:
        if (err)
                goto fail_free_drop;
 
-       err = ext4_mark_inode_dirty(handle, inode);
-       if (err) {
-               ext4_std_error(sb, err);
-               goto fail_free_drop;
-       }
        if (test_opt(sb, EXTENTS)) {
-               /* set extent flag only for directory and file */
-               if (S_ISDIR(mode) || S_ISREG(mode)) {
+               /* set extent flag only for diretory, file and normal symlink*/
+               if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
                        EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
                        ext4_ext_tree_init(handle, inode);
                        err = ext4_update_incompat_feature(handle, sb,
                                        EXT4_FEATURE_INCOMPAT_EXTENTS);
                        if (err)
-                               goto fail;
+                               goto fail_free_drop;
                }
        }
 
+       err = ext4_mark_inode_dirty(handle, inode);
+       if (err) {
+               ext4_std_error(sb, err);
+               goto fail_free_drop;
+       }
+
        ext4_debug("allocating inode %lu\n", inode->i_ino);
        goto really_out;
 fail:
@@ -796,7 +792,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
 
        /* Error cases - e2fsck has already cleaned up for us */
        if (ino > max_ino) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "bad orphan ino %lu!  e2fsck was run?", ino);
                goto error;
        }
@@ -805,7 +801,7 @@ struct inode *ext4_orphan_get(struct super_block *sb, unsigned long ino)
        bit = (ino - 1) % EXT4_INODES_PER_GROUP(sb);
        bitmap_bh = read_inode_bitmap(sb, block_group);
        if (!bitmap_bh) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "inode bitmap error for orphan %lu", ino);
                goto error;
        }
@@ -830,7 +826,7 @@ iget_failed:
        err = PTR_ERR(inode);
        inode = NULL;
 bad_orphan:
-       ext4_warning(sb, __FUNCTION__,
+       ext4_warning(sb, __func__,
                     "bad orphan inode %lu!  e2fsck was run?", ino);
        printk(KERN_NOTICE "ext4_test_bit(bit=%d, block=%llu) = %d\n",
               bit, (unsigned long long)bitmap_bh->b_blocknr,
index 8fab233cb05fd6bdc8538c7ee4ad6990c2fded70..8d9707746413d90bc5bf019ea6e2dacad91842c2 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/module.h>
 #include <linux/fs.h>
 #include <linux/time.h>
-#include <linux/ext4_jbd2.h>
 #include <linux/jbd2.h>
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
@@ -36,6 +35,7 @@
 #include <linux/mpage.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
+#include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
 
@@ -93,7 +93,7 @@ int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
        BUFFER_TRACE(bh, "call ext4_journal_revoke");
        err = ext4_journal_revoke(handle, blocknr, bh);
        if (err)
-               ext4_abort(inode->i_sb, __FUNCTION__,
+               ext4_abort(inode->i_sb, __func__,
                           "error %d when attempting revoke", err);
        BUFFER_TRACE(bh, "exit");
        return err;
@@ -985,6 +985,16 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
        } else {
                retval = ext4_get_blocks_handle(handle, inode, block,
                                max_blocks, bh, create, extend_disksize);
+
+               if (retval > 0 && buffer_new(bh)) {
+                       /*
+                        * We allocated new blocks which will result in
+                        * i_data's format changing.  Force the migrate
+                        * to fail by clearing migrate flags
+                        */
+                       EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
+                                                       ~EXT4_EXT_MIGRATE;
+               }
        }
        up_write((&EXT4_I(inode)->i_data_sem));
        return retval;
@@ -1230,7 +1240,7 @@ int ext4_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
 {
        int err = jbd2_journal_dirty_data(handle, bh);
        if (err)
-               ext4_journal_abort_handle(__FUNCTION__, __FUNCTION__,
+               ext4_journal_abort_handle(__func__, __func__,
                                                bh, handle, err);
        return err;
 }
@@ -1301,10 +1311,11 @@ static int ext4_ordered_write_end(struct file *file,
                new_i_size = pos + copied;
                if (new_i_size > EXT4_I(inode)->i_disksize)
                        EXT4_I(inode)->i_disksize = new_i_size;
-               copied = ext4_generic_write_end(file, mapping, pos, len, copied,
+               ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-               if (copied < 0)
-                       ret = copied;
+               copied = ret2;
+               if (ret2 < 0)
+                       ret = ret2;
        }
        ret2 = ext4_journal_stop(handle);
        if (!ret)
@@ -1329,10 +1340,11 @@ static int ext4_writeback_write_end(struct file *file,
        if (new_i_size > EXT4_I(inode)->i_disksize)
                EXT4_I(inode)->i_disksize = new_i_size;
 
-       copied = ext4_generic_write_end(file, mapping, pos, len, copied,
+       ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
-       if (copied < 0)
-               ret = copied;
+       copied = ret2;
+       if (ret2 < 0)
+               ret = ret2;
 
        ret2 = ext4_journal_stop(handle);
        if (!ret)
@@ -2501,12 +2513,10 @@ out_stop:
 static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
                unsigned long ino, struct ext4_iloc *iloc)
 {
-       unsigned long desc, group_desc;
        ext4_group_t block_group;
        unsigned long offset;
        ext4_fsblk_t block;
-       struct buffer_head *bh;
-       struct ext4_group_desc * gdp;
+       struct ext4_group_desc *gdp;
 
        if (!ext4_valid_inum(sb, ino)) {
                /*
@@ -2518,22 +2528,10 @@ static ext4_fsblk_t ext4_get_inode_block(struct super_block *sb,
        }
 
        block_group = (ino - 1) / EXT4_INODES_PER_GROUP(sb);
-       if (block_group >= EXT4_SB(sb)->s_groups_count) {
-               ext4_error(sb,"ext4_get_inode_block","group >= groups count");
+       gdp = ext4_get_group_desc(sb, block_group, NULL);
+       if (!gdp)
                return 0;
-       }
-       smp_rmb();
-       group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
-       desc = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
-       bh = EXT4_SB(sb)->s_group_desc[group_desc];
-       if (!bh) {
-               ext4_error (sb, "ext4_get_inode_block",
-                           "Descriptor not loaded");
-               return 0;
-       }
 
-       gdp = (struct ext4_group_desc *)((__u8 *)bh->b_data +
-               desc * EXT4_DESC_SIZE(sb));
        /*
         * Figure out the offset within the block group inode table
         */
@@ -2976,7 +2974,8 @@ static int ext4_do_update_inode(handle_t *handle,
        if (ext4_inode_blocks_set(handle, raw_inode, ei))
                goto out_brelse;
        raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-       raw_inode->i_flags = cpu_to_le32(ei->i_flags);
+       /* clear the migrate flag in the raw_inode */
+       raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
        if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
            cpu_to_le32(EXT4_OS_HURD))
                raw_inode->i_file_acl_high =
@@ -3374,7 +3373,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
                                EXT4_I(inode)->i_state |= EXT4_STATE_NO_EXPAND;
                                if (mnt_count !=
                                        le16_to_cpu(sbi->s_es->s_mnt_count)) {
-                                       ext4_warning(inode->i_sb, __FUNCTION__,
+                                       ext4_warning(inode->i_sb, __func__,
                                        "Unable to expand inode %lu. Delete"
                                        " some EAs or run e2fsck.",
                                        inode->i_ino);
@@ -3415,7 +3414,7 @@ void ext4_dirty_inode(struct inode *inode)
                current_handle->h_transaction != handle->h_transaction) {
                /* This task has a transaction open against a different fs */
                printk(KERN_EMERG "%s: transactions do not match!\n",
-                      __FUNCTION__);
+                      __func__);
        } else {
                jbd_debug(5, "marking dirty.  outer handle=%p\n",
                                current_handle);
index 25b13ede8086c606a4b1320c2a364889434c7b7c..7a6c2f1faba607e4a41b5d2d10b3c7f39655268f 100644 (file)
 #include <linux/fs.h>
 #include <linux/jbd2.h>
 #include <linux/capability.h>
-#include <linux/ext4_fs.h>
-#include <linux/ext4_jbd2.h>
 #include <linux/time.h>
 #include <linux/compat.h>
 #include <linux/smp_lock.h>
 #include <linux/mount.h>
 #include <asm/uaccess.h>
+#include "ext4_jbd2.h"
+#include "ext4.h"
 
-int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
-               unsigned long arg)
+long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
+       struct inode *inode = filp->f_dentry->d_inode;
        struct ext4_inode_info *ei = EXT4_I(inode);
        unsigned int flags;
        unsigned short rsv_window_size;
@@ -277,9 +277,6 @@ setversion_out:
 #ifdef CONFIG_COMPAT
 long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 {
-       struct inode *inode = file->f_path.dentry->d_inode;
-       int ret;
-
        /* These are just misnamed, they actually get/put from/to user an int */
        switch (cmd) {
        case EXT4_IOC32_GETFLAGS:
@@ -319,9 +316,6 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        default:
                return -ENOIOCTLCMD;
        }
-       lock_kernel();
-       ret = ext4_ioctl(inode, file, cmd, (unsigned long) compat_ptr(arg));
-       unlock_kernel();
-       return ret;
+       return ext4_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
 }
 #endif
index ef97f19c2f9d7a51d68ccb281ca982e79a53e823..fbec2ef93797efb9bf6ac16f2a9465fc198dc018 100644 (file)
  * mballoc.c contains the multiblocks allocation routines
  */
 
-#include <linux/time.h>
-#include <linux/fs.h>
-#include <linux/namei.h>
-#include <linux/ext4_jbd2.h>
-#include <linux/ext4_fs.h>
-#include <linux/quotaops.h>
-#include <linux/buffer_head.h>
-#include <linux/module.h>
-#include <linux/swap.h>
-#include <linux/proc_fs.h>
-#include <linux/pagemap.h>
-#include <linux/seq_file.h>
-#include <linux/version.h>
-#include "group.h"
-
+#include "mballoc.h"
 /*
  * MUSTDO:
  *   - test ext4_ext_search_left() and ext4_ext_search_right()
  *
  */
 
-/*
- * with AGGRESSIVE_CHECK allocator runs consistency checks over
- * structures. these checks slow things down a lot
- */
-#define AGGRESSIVE_CHECK__
-
-/*
- * with DOUBLE_CHECK defined mballoc creates persistent in-core
- * bitmaps, maintains and uses them to check for double allocations
- */
-#define DOUBLE_CHECK__
-
-/*
- */
-#define MB_DEBUG__
-#ifdef MB_DEBUG
-#define mb_debug(fmt, a...)    printk(fmt, ##a)
-#else
-#define mb_debug(fmt, a...)
-#endif
-
-/*
- * with EXT4_MB_HISTORY mballoc stores last N allocations in memory
- * and you can monitor it in /proc/fs/ext4/<dev>/mb_history
- */
-#define EXT4_MB_HISTORY
-#define EXT4_MB_HISTORY_ALLOC          1       /* allocation */
-#define EXT4_MB_HISTORY_PREALLOC       2       /* preallocated blocks used */
-#define EXT4_MB_HISTORY_DISCARD                4       /* preallocation discarded */
-#define EXT4_MB_HISTORY_FREE           8       /* free */
-
-#define EXT4_MB_HISTORY_DEFAULT                (EXT4_MB_HISTORY_ALLOC | \
-                                        EXT4_MB_HISTORY_PREALLOC)
-
-/*
- * How long mballoc can look for a best extent (in found extents)
- */
-#define MB_DEFAULT_MAX_TO_SCAN         200
-
-/*
- * How long mballoc must look for a best extent
- */
-#define MB_DEFAULT_MIN_TO_SCAN         10
-
-/*
- * How many groups mballoc will scan looking for the best chunk
- */
-#define MB_DEFAULT_MAX_GROUPS_TO_SCAN  5
-
-/*
- * with 'ext4_mb_stats' allocator will collect stats that will be
- * shown at umount. The collecting costs though!
- */
-#define MB_DEFAULT_STATS               1
-
-/*
- * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
- * by the stream allocator, which purpose is to pack requests
- * as close each to other as possible to produce smooth I/O traffic
- * We use locality group prealloc space for stream request.
- * We can tune the same via /proc/fs/ext4/<parition>/stream_req
- */
-#define MB_DEFAULT_STREAM_THRESHOLD    16      /* 64K */
-
-/*
- * for which requests use 2^N search using buddies
- */
-#define MB_DEFAULT_ORDER2_REQS         2
-
-/*
- * default group prealloc size 512 blocks
- */
-#define MB_DEFAULT_GROUP_PREALLOC      512
-
-static struct kmem_cache *ext4_pspace_cachep;
-static struct kmem_cache *ext4_ac_cachep;
-
-#ifdef EXT4_BB_MAX_BLOCKS
-#undef EXT4_BB_MAX_BLOCKS
-#endif
-#define EXT4_BB_MAX_BLOCKS     30
-
-struct ext4_free_metadata {
-       ext4_group_t group;
-       unsigned short num;
-       ext4_grpblk_t  blocks[EXT4_BB_MAX_BLOCKS];
-       struct list_head list;
-};
-
-struct ext4_group_info {
-       unsigned long   bb_state;
-       unsigned long   bb_tid;
-       struct ext4_free_metadata *bb_md_cur;
-       unsigned short  bb_first_free;
-       unsigned short  bb_free;
-       unsigned short  bb_fragments;
-       struct          list_head bb_prealloc_list;
-#ifdef DOUBLE_CHECK
-       void            *bb_bitmap;
-#endif
-       unsigned short  bb_counters[];
-};
-
-#define EXT4_GROUP_INFO_NEED_INIT_BIT  0
-#define EXT4_GROUP_INFO_LOCKED_BIT     1
-
-#define EXT4_MB_GRP_NEED_INIT(grp)     \
-       (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
-
-
-struct ext4_prealloc_space {
-       struct list_head        pa_inode_list;
-       struct list_head        pa_group_list;
-       union {
-               struct list_head pa_tmp_list;
-               struct rcu_head pa_rcu;
-       } u;
-       spinlock_t              pa_lock;
-       atomic_t                pa_count;
-       unsigned                pa_deleted;
-       ext4_fsblk_t            pa_pstart;      /* phys. block */
-       ext4_lblk_t             pa_lstart;      /* log. block */
-       unsigned short          pa_len;         /* len of preallocated chunk */
-       unsigned short          pa_free;        /* how many blocks are free */
-       unsigned short          pa_linear;      /* consumed in one direction
-                                                * strictly, for grp prealloc */
-       spinlock_t              *pa_obj_lock;
-       struct inode            *pa_inode;      /* hack, for history only */
-};
-
-
-struct ext4_free_extent {
-       ext4_lblk_t fe_logical;
-       ext4_grpblk_t fe_start;
-       ext4_group_t fe_group;
-       int fe_len;
-};
-
-/*
- * Locality group:
- *   we try to group all related changes together
- *   so that writeback can flush/allocate them together as well
- */
-struct ext4_locality_group {
-       /* for allocator */
-       struct mutex            lg_mutex;       /* to serialize allocates */
-       struct list_head        lg_prealloc_list;/* list of preallocations */
-       spinlock_t              lg_prealloc_lock;
-};
-
-struct ext4_allocation_context {
-       struct inode *ac_inode;
-       struct super_block *ac_sb;
-
-       /* original request */
-       struct ext4_free_extent ac_o_ex;
-
-       /* goal request (after normalization) */
-       struct ext4_free_extent ac_g_ex;
-
-       /* the best found extent */
-       struct ext4_free_extent ac_b_ex;
-
-       /* copy of the bext found extent taken before preallocation efforts */
-       struct ext4_free_extent ac_f_ex;
-
-       /* number of iterations done. we have to track to limit searching */
-       unsigned long ac_ex_scanned;
-       __u16 ac_groups_scanned;
-       __u16 ac_found;
-       __u16 ac_tail;
-       __u16 ac_buddy;
-       __u16 ac_flags;         /* allocation hints */
-       __u8 ac_status;
-       __u8 ac_criteria;
-       __u8 ac_repeats;
-       __u8 ac_2order;         /* if request is to allocate 2^N blocks and
-                                * N > 0, the field stores N, otherwise 0 */
-       __u8 ac_op;             /* operation, for history only */
-       struct page *ac_bitmap_page;
-       struct page *ac_buddy_page;
-       struct ext4_prealloc_space *ac_pa;
-       struct ext4_locality_group *ac_lg;
-};
-
-#define AC_STATUS_CONTINUE     1
-#define AC_STATUS_FOUND                2
-#define AC_STATUS_BREAK                3
-
-struct ext4_mb_history {
-       struct ext4_free_extent orig;   /* orig allocation */
-       struct ext4_free_extent goal;   /* goal allocation */
-       struct ext4_free_extent result; /* result allocation */
-       unsigned pid;
-       unsigned ino;
-       __u16 found;    /* how many extents have been found */
-       __u16 groups;   /* how many groups have been scanned */
-       __u16 tail;     /* what tail broke some buddy */
-       __u16 buddy;    /* buddy the tail ^^^ broke */
-       __u16 flags;
-       __u8 cr:3;      /* which phase the result extent was found at */
-       __u8 op:4;
-       __u8 merged:1;
-};
-
-struct ext4_buddy {
-       struct page *bd_buddy_page;
-       void *bd_buddy;
-       struct page *bd_bitmap_page;
-       void *bd_bitmap;
-       struct ext4_group_info *bd_info;
-       struct super_block *bd_sb;
-       __u16 bd_blkbits;
-       ext4_group_t bd_group;
-};
-#define EXT4_MB_BITMAP(e4b)    ((e4b)->bd_bitmap)
-#define EXT4_MB_BUDDY(e4b)     ((e4b)->bd_buddy)
-
-#ifndef EXT4_MB_HISTORY
-static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
-{
-       return;
-}
-#else
-static void ext4_mb_store_history(struct ext4_allocation_context *ac);
-#endif
-
-#define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
-
-static struct proc_dir_entry *proc_root_ext4;
-struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
-ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
-                       ext4_fsblk_t goal, unsigned long *count, int *errp);
-
-static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
-                                       ext4_group_t group);
-static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
-static void ext4_mb_free_committed_blocks(struct super_block *);
-static void ext4_mb_return_to_preallocation(struct inode *inode,
-                                       struct ext4_buddy *e4b, sector_t block,
-                                       int count);
-static void ext4_mb_put_pa(struct ext4_allocation_context *,
-                       struct super_block *, struct ext4_prealloc_space *pa);
-static int ext4_mb_init_per_dev_proc(struct super_block *sb);
-static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
-
-
-static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
-{
-       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
-
-       bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
-}
-
-static inline void ext4_unlock_group(struct super_block *sb,
-                                       ext4_group_t group)
-{
-       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
-
-       bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
-}
-
-static inline int ext4_is_group_locked(struct super_block *sb,
-                                       ext4_group_t group)
-{
-       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
-
-       return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
-                                               &(grinfo->bb_state));
-}
-
-static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
-                                       struct ext4_free_extent *fex)
-{
-       ext4_fsblk_t block;
-
-       block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
-                       + fex->fe_start
-                       + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
-       return block;
-}
-
 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
 {
 #if BITS_PER_LONG == 64
@@ -736,7 +440,7 @@ static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
                        blocknr +=
                            le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
 
-                       ext4_error(sb, __FUNCTION__, "double-free of inode"
+                       ext4_error(sb, __func__, "double-free of inode"
                                   " %lu's block %llu(bit %u in group %lu)\n",
                                   inode ? inode->i_ino : 0, blocknr,
                                   first + i, e4b->bd_group);
@@ -898,17 +602,17 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
        list_for_each(cur, &grp->bb_prealloc_list) {
                ext4_group_t groupnr;
                struct ext4_prealloc_space *pa;
-               pa = list_entry(cur, struct ext4_prealloc_space, group_list);
-               ext4_get_group_no_and_offset(sb, pa->pstart, &groupnr, &k);
+               pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
+               ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
                MB_CHECK_ASSERT(groupnr == e4b->bd_group);
-               for (i = 0; i < pa->len; i++)
+               for (i = 0; i < pa->pa_len; i++)
                        MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
        }
        return 0;
 }
 #undef MB_CHECK_ASSERT
 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,      \
-                                       __FILE__, __FUNCTION__, __LINE__)
+                                       __FILE__, __func__, __LINE__)
 #else
 #define mb_check_buddy(e4b)
 #endif
@@ -982,7 +686,7 @@ static void ext4_mb_generate_buddy(struct super_block *sb,
        grp->bb_fragments = fragments;
 
        if (free != grp->bb_free) {
-               ext4_error(sb, __FUNCTION__,
+               ext4_error(sb, __func__,
                        "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
                        group, free, grp->bb_free);
                /*
@@ -1168,8 +872,9 @@ out:
        return err;
 }
 
-static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
-               struct ext4_buddy *e4b)
+static noinline_for_stack int
+ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
+                                       struct ext4_buddy *e4b)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct inode *inode = sbi->s_buddy_cache;
@@ -1367,7 +1072,7 @@ static int mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
                        blocknr +=
                            le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
 
-                       ext4_error(sb, __FUNCTION__, "double-free of inode"
+                       ext4_error(sb, __func__, "double-free of inode"
                                   " %lu's block %llu(bit %u in group %lu)\n",
                                   inode ? inode->i_ino : 0, blocknr, block,
                                   e4b->bd_group);
@@ -1848,7 +1553,7 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
                         * free blocks even though group info says we
                         * we have free blocks
                         */
-                       ext4_error(sb, __FUNCTION__, "%d free blocks as per "
+                       ext4_error(sb, __func__, "%d free blocks as per "
                                        "group info. But bitmap says 0\n",
                                        free);
                        break;
@@ -1857,7 +1562,7 @@ static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
                mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
                BUG_ON(ex.fe_len <= 0);
                if (free < ex.fe_len) {
-                       ext4_error(sb, __FUNCTION__, "%d free blocks as per "
+                       ext4_error(sb, __func__, "%d free blocks as per "
                                        "group info. But got %d blocks\n",
                                        free, ex.fe_len);
                        /*
@@ -1965,7 +1670,8 @@ static int ext4_mb_good_group(struct ext4_allocation_context *ac,
        return 0;
 }
 
-static int ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
 {
        ext4_group_t group;
        ext4_group_t i;
@@ -2449,17 +2155,10 @@ static void ext4_mb_history_init(struct super_block *sb)
        int i;
 
        if (sbi->s_mb_proc != NULL) {
-               struct proc_dir_entry *p;
-               p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc);
-               if (p) {
-                       p->proc_fops = &ext4_mb_seq_history_fops;
-                       p->data = sb;
-               }
-               p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc);
-               if (p) {
-                       p->proc_fops = &ext4_mb_seq_groups_fops;
-                       p->data = sb;
-               }
+               proc_create_data("mb_history", S_IRUGO, sbi->s_mb_proc,
+                                &ext4_mb_seq_history_fops, sb);
+               proc_create_data("mb_groups", S_IRUGO, sbi->s_mb_proc,
+                                &ext4_mb_seq_groups_fops, sb);
        }
 
        sbi->s_mb_history_max = 1000;
@@ -2472,7 +2171,8 @@ static void ext4_mb_history_init(struct super_block *sb)
        /* if we can't allocate history, then we simple won't use it */
 }
 
-static void ext4_mb_store_history(struct ext4_allocation_context *ac)
+static noinline_for_stack void
+ext4_mb_store_history(struct ext4_allocation_context *ac)
 {
        struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
        struct ext4_mb_history h;
@@ -2572,13 +2272,13 @@ static int ext4_mb_init_backend(struct super_block *sb)
                meta_group_info[j] = kzalloc(len, GFP_KERNEL);
                if (meta_group_info[j] == NULL) {
                        printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
-                       i--;
                        goto err_freebuddy;
                }
                desc = ext4_get_group_desc(sb, i, NULL);
                if (desc == NULL) {
                        printk(KERN_ERR
                                "EXT4-fs: can't read descriptor %lu\n", i);
+                       i++;
                        goto err_freebuddy;
                }
                memset(meta_group_info[j], 0, len);
@@ -2618,13 +2318,11 @@ static int ext4_mb_init_backend(struct super_block *sb)
        return 0;
 
 err_freebuddy:
-       while (i >= 0) {
+       while (i-- > 0)
                kfree(ext4_get_group_info(sb, i));
-               i--;
-       }
        i = num_meta_group_infos;
 err_freemeta:
-       while (--i >= 0)
+       while (i-- > 0)
                kfree(sbi->s_group_info[i]);
        iput(sbi->s_buddy_cache);
 err_freesgi:
@@ -2808,7 +2506,8 @@ int ext4_mb_release(struct super_block *sb)
        return 0;
 }
 
-static void ext4_mb_free_committed_blocks(struct super_block *sb)
+static noinline_for_stack void
+ext4_mb_free_committed_blocks(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        int err;
@@ -2867,7 +2566,6 @@ static void ext4_mb_free_committed_blocks(struct super_block *sb)
        mb_debug("freed %u blocks in %u structures\n", count, count2);
 }
 
-#define EXT4_ROOT                      "ext4"
 #define EXT4_MB_STATS_NAME             "stats"
 #define EXT4_MB_MAX_TO_SCAN_NAME       "max_to_scan"
 #define EXT4_MB_MIN_TO_SCAN_NAME       "min_to_scan"
@@ -3007,9 +2705,9 @@ int __init init_ext4_mballoc(void)
                return -ENOMEM;
        }
 #ifdef CONFIG_PROC_FS
-       proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs);
+       proc_root_ext4 = proc_mkdir("fs/ext4", NULL);
        if (proc_root_ext4 == NULL)
-               printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT);
+               printk(KERN_ERR "EXT4-fs: Unable to create fs/ext4\n");
 #endif
        return 0;
 }
@@ -3020,7 +2718,7 @@ void exit_ext4_mballoc(void)
        kmem_cache_destroy(ext4_pspace_cachep);
        kmem_cache_destroy(ext4_ac_cachep);
 #ifdef CONFIG_PROC_FS
-       remove_proc_entry(EXT4_ROOT, proc_root_fs);
+       remove_proc_entry("fs/ext4", NULL);
 #endif
 }
 
@@ -3029,7 +2727,8 @@ void exit_ext4_mballoc(void)
  * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
  * Returns 0 if success or error code
  */
-static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
+static noinline_for_stack int
+ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
                                handle_t *handle)
 {
        struct buffer_head *bitmap_bh = NULL;
@@ -3078,7 +2777,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
                        in_range(block, ext4_inode_table(sb, gdp),
                                EXT4_SB(sb)->s_itb_per_group)) {
 
-               ext4_error(sb, __FUNCTION__,
+               ext4_error(sb, __func__,
                           "Allocating block in system zone - block = %llu",
                           block);
        }
@@ -3102,9 +2801,7 @@ static int ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
                                                ac->ac_b_ex.fe_group,
                                                gdp));
        }
-       gdp->bg_free_blocks_count =
-               cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)
-                               - ac->ac_b_ex.fe_len);
+       le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
        gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
        spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
        percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
@@ -3138,7 +2835,7 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
                ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
        else
                ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
-       mb_debug("#%u: goal %lu blocks for locality group\n",
+       mb_debug("#%u: goal %u blocks for locality group\n",
                current->pid, ac->ac_g_ex.fe_len);
 }
 
@@ -3146,15 +2843,16 @@ static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
  * Normalization means making request better in terms of
  * size and alignment
  */
-static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
+static noinline_for_stack void
+ext4_mb_normalize_request(struct ext4_allocation_context *ac,
                                struct ext4_allocation_request *ar)
 {
        int bsbits, max;
        ext4_lblk_t end;
-       struct list_head *cur;
        loff_t size, orig_size, start_off;
        ext4_lblk_t start, orig_start;
        struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
+       struct ext4_prealloc_space *pa;
 
        /* do normalize only data requests, metadata requests
           do not need preallocation */
@@ -3240,12 +2938,9 @@ static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
 
        /* check we don't cross already preallocated blocks */
        rcu_read_lock();
-       list_for_each_rcu(cur, &ei->i_prealloc_list) {
-               struct ext4_prealloc_space *pa;
+       list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
                unsigned long pa_end;
 
-               pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
-
                if (pa->pa_deleted)
                        continue;
                spin_lock(&pa->pa_lock);
@@ -3287,10 +2982,8 @@ static void ext4_mb_normalize_request(struct ext4_allocation_context *ac,
 
        /* XXX: extra loop to check we really don't overlap preallocations */
        rcu_read_lock();
-       list_for_each_rcu(cur, &ei->i_prealloc_list) {
-               struct ext4_prealloc_space *pa;
+       list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
                unsigned long pa_end;
-               pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
                spin_lock(&pa->pa_lock);
                if (pa->pa_deleted == 0) {
                        pa_end = pa->pa_lstart + pa->pa_len;
@@ -3382,7 +3075,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
        BUG_ON(pa->pa_free < len);
        pa->pa_free -= len;
 
-       mb_debug("use %llu/%lu from inode pa %p\n", start, len, pa);
+       mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
 }
 
 /*
@@ -3412,12 +3105,12 @@ static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
 /*
  * search goal blocks in preallocated space
  */
-static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
 {
        struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
        struct ext4_locality_group *lg;
        struct ext4_prealloc_space *pa;
-       struct list_head *cur;
 
        /* only data can be preallocated */
        if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
@@ -3425,8 +3118,7 @@ static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
 
        /* first, try per-file preallocation */
        rcu_read_lock();
-       list_for_each_rcu(cur, &ei->i_prealloc_list) {
-               pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
+       list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
 
                /* all fields in this condition don't change,
                 * so we can skip locking for them */
@@ -3458,8 +3150,7 @@ static int ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
                return 0;
 
        rcu_read_lock();
-       list_for_each_rcu(cur, &lg->lg_prealloc_list) {
-               pa = list_entry(cur, struct ext4_prealloc_space, pa_inode_list);
+       list_for_each_entry_rcu(pa, &lg->lg_prealloc_list, pa_inode_list) {
                spin_lock(&pa->pa_lock);
                if (pa->pa_deleted == 0 && pa->pa_free >= ac->ac_o_ex.fe_len) {
                        atomic_inc(&pa->pa_count);
@@ -3579,7 +3270,8 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
 /*
  * creates new preallocated space for given inode
  */
-static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
 {
        struct super_block *sb = ac->ac_sb;
        struct ext4_prealloc_space *pa;
@@ -3666,7 +3358,8 @@ static int ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
 /*
  * creates new preallocated space for locality group inodes belongs to
  */
-static int ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
+static noinline_for_stack int
+ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
 {
        struct super_block *sb = ac->ac_sb;
        struct ext4_locality_group *lg;
@@ -3739,11 +3432,11 @@ static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
  * the caller MUST hold group/inode locks.
  * TODO: optimize the case when there are no in-core structures yet
  */
-static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
-                               struct buffer_head *bitmap_bh,
-                               struct ext4_prealloc_space *pa)
+static noinline_for_stack int
+ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
+                       struct ext4_prealloc_space *pa,
+                       struct ext4_allocation_context *ac)
 {
-       struct ext4_allocation_context *ac;
        struct super_block *sb = e4b->bd_sb;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        unsigned long end;
@@ -3759,8 +3452,6 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
        BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
        end = bit + pa->pa_len;
 
-       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
-
        if (ac) {
                ac->ac_sb = sb;
                ac->ac_inode = pa->pa_inode;
@@ -3797,7 +3488,7 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
                        pa, (unsigned long) pa->pa_lstart,
                        (unsigned long) pa->pa_pstart,
                        (unsigned long) pa->pa_len);
-               ext4_error(sb, __FUNCTION__, "free %u, pa_free %u\n",
+               ext4_error(sb, __func__, "free %u, pa_free %u\n",
                                                free, pa->pa_free);
                /*
                 * pa is already deleted so we use the value obtained
@@ -3805,22 +3496,19 @@ static int ext4_mb_release_inode_pa(struct ext4_buddy *e4b,
                 */
        }
        atomic_add(free, &sbi->s_mb_discarded);
-       if (ac)
-               kmem_cache_free(ext4_ac_cachep, ac);
 
        return err;
 }
 
-static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
-                               struct ext4_prealloc_space *pa)
+static noinline_for_stack int
+ext4_mb_release_group_pa(struct ext4_buddy *e4b,
+                               struct ext4_prealloc_space *pa,
+                               struct ext4_allocation_context *ac)
 {
-       struct ext4_allocation_context *ac;
        struct super_block *sb = e4b->bd_sb;
        ext4_group_t group;
        ext4_grpblk_t bit;
 
-       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
-
        if (ac)
                ac->ac_op = EXT4_MB_HISTORY_DISCARD;
 
@@ -3838,7 +3526,6 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
                ac->ac_b_ex.fe_len = pa->pa_len;
                ac->ac_b_ex.fe_logical = 0;
                ext4_mb_store_history(ac);
-               kmem_cache_free(ext4_ac_cachep, ac);
        }
 
        return 0;
@@ -3853,12 +3540,14 @@ static int ext4_mb_release_group_pa(struct ext4_buddy *e4b,
  * - how many do we discard
  *   1) how many requested
  */
-static int ext4_mb_discard_group_preallocations(struct super_block *sb,
+static noinline_for_stack int
+ext4_mb_discard_group_preallocations(struct super_block *sb,
                                        ext4_group_t group, int needed)
 {
        struct ext4_group_info *grp = ext4_get_group_info(sb, group);
        struct buffer_head *bitmap_bh = NULL;
        struct ext4_prealloc_space *pa, *tmp;
+       struct ext4_allocation_context *ac;
        struct list_head list;
        struct ext4_buddy e4b;
        int err;
@@ -3886,6 +3575,7 @@ static int ext4_mb_discard_group_preallocations(struct super_block *sb,
        grp = ext4_get_group_info(sb, group);
        INIT_LIST_HEAD(&list);
 
+       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
 repeat:
        ext4_lock_group(sb, group);
        list_for_each_entry_safe(pa, tmp,
@@ -3940,9 +3630,9 @@ repeat:
                spin_unlock(pa->pa_obj_lock);
 
                if (pa->pa_linear)
-                       ext4_mb_release_group_pa(&e4b, pa);
+                       ext4_mb_release_group_pa(&e4b, pa, ac);
                else
-                       ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
+                       ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
 
                list_del(&pa->u.pa_tmp_list);
                call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
@@ -3950,6 +3640,8 @@ repeat:
 
 out:
        ext4_unlock_group(sb, group);
+       if (ac)
+               kmem_cache_free(ext4_ac_cachep, ac);
        ext4_mb_release_desc(&e4b);
        put_bh(bitmap_bh);
        return free;
@@ -3970,6 +3662,7 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
        struct super_block *sb = inode->i_sb;
        struct buffer_head *bitmap_bh = NULL;
        struct ext4_prealloc_space *pa, *tmp;
+       struct ext4_allocation_context *ac;
        ext4_group_t group = 0;
        struct list_head list;
        struct ext4_buddy e4b;
@@ -3984,6 +3677,7 @@ void ext4_mb_discard_inode_preallocations(struct inode *inode)
 
        INIT_LIST_HEAD(&list);
 
+       ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
 repeat:
        /* first, collect all pa's in the inode */
        spin_lock(&ei->i_prealloc_lock);
@@ -4048,7 +3742,7 @@ repeat:
 
                ext4_lock_group(sb, group);
                list_del(&pa->pa_group_list);
-               ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa);
+               ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
                ext4_unlock_group(sb, group);
 
                ext4_mb_release_desc(&e4b);
@@ -4057,6 +3751,8 @@ repeat:
                list_del(&pa->u.pa_tmp_list);
                call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
        }
+       if (ac)
+               kmem_cache_free(ext4_ac_cachep, ac);
 }
 
 /*
@@ -4116,7 +3812,7 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
                        printk(KERN_ERR "PA:%lu:%d:%u \n", i,
                                                        start, pa->pa_len);
                }
-               ext4_lock_group(sb, i);
+               ext4_unlock_group(sb, i);
 
                if (grp->bb_free == 0)
                        continue;
@@ -4175,7 +3871,8 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
        mutex_lock(&ac->ac_lg->lg_mutex);
 }
 
-static int ext4_mb_initialize_context(struct ext4_allocation_context *ac,
+static noinline_for_stack int
+ext4_mb_initialize_context(struct ext4_allocation_context *ac,
                                struct ext4_allocation_request *ar)
 {
        struct super_block *sb = ar->inode->i_sb;
@@ -4406,7 +4103,8 @@ static void ext4_mb_poll_new_transaction(struct super_block *sb,
        ext4_mb_free_committed_blocks(sb);
 }
 
-static int ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
+static noinline_for_stack int
+ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
                          ext4_group_t group, ext4_grpblk_t block, int count)
 {
        struct ext4_group_info *db = e4b->bd_info;
@@ -4497,7 +4195,7 @@ void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
        if (block < le32_to_cpu(es->s_first_data_block) ||
            block + count < block ||
            block + count > ext4_blocks_count(es)) {
-               ext4_error(sb, __FUNCTION__,
+               ext4_error(sb, __func__,
                            "Freeing blocks not in datazone - "
                            "block = %lu, count = %lu", block, count);
                goto error_return;
@@ -4538,7 +4236,7 @@ do_more:
            in_range(block + count - 1, ext4_inode_table(sb, gdp),
                      EXT4_SB(sb)->s_itb_per_group)) {
 
-               ext4_error(sb, __FUNCTION__,
+               ext4_error(sb, __func__,
                           "Freeing blocks in system zone - "
                           "Block = %lu, count = %lu", block, count);
        }
@@ -4596,8 +4294,7 @@ do_more:
        }
 
        spin_lock(sb_bgl_lock(sbi, block_group));
-       gdp->bg_free_blocks_count =
-               cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) + count);
+       le16_add_cpu(&gdp->bg_free_blocks_count, count);
        gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
        spin_unlock(sb_bgl_lock(sbi, block_group));
        percpu_counter_add(&sbi->s_freeblocks_counter, count);
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
new file mode 100644 (file)
index 0000000..bfe6add
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+ *  fs/ext4/mballoc.h
+ *
+ *  Written by: Alex Tomas <alex@clusterfs.com>
+ *
+ */
+#ifndef _EXT4_MBALLOC_H
+#define _EXT4_MBALLOC_H
+
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/quotaops.h>
+#include <linux/buffer_head.h>
+#include <linux/module.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pagemap.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "ext4_jbd2.h"
+#include "ext4.h"
+#include "group.h"
+
+/*
+ * with AGGRESSIVE_CHECK allocator runs consistency checks over
+ * structures. these checks slow things down a lot
+ */
+#define AGGRESSIVE_CHECK__
+
+/*
+ * with DOUBLE_CHECK defined mballoc creates persistent in-core
+ * bitmaps, maintains and uses them to check for double allocations
+ */
+#define DOUBLE_CHECK__
+
+/*
+ */
+#define MB_DEBUG__
+#ifdef MB_DEBUG
+#define mb_debug(fmt, a...)    printk(fmt, ##a)
+#else
+#define mb_debug(fmt, a...)
+#endif
+
+/*
+ * with EXT4_MB_HISTORY mballoc stores last N allocations in memory
+ * and you can monitor it in /proc/fs/ext4/<dev>/mb_history
+ */
+#define EXT4_MB_HISTORY
+#define EXT4_MB_HISTORY_ALLOC          1       /* allocation */
+#define EXT4_MB_HISTORY_PREALLOC       2       /* preallocated blocks used */
+#define EXT4_MB_HISTORY_DISCARD                4       /* preallocation discarded */
+#define EXT4_MB_HISTORY_FREE           8       /* free */
+
+#define EXT4_MB_HISTORY_DEFAULT                (EXT4_MB_HISTORY_ALLOC | \
+                                        EXT4_MB_HISTORY_PREALLOC)
+
+/*
+ * How long mballoc can look for a best extent (in found extents)
+ */
+#define MB_DEFAULT_MAX_TO_SCAN         200
+
+/*
+ * How long mballoc must look for a best extent
+ */
+#define MB_DEFAULT_MIN_TO_SCAN         10
+
+/*
+ * How many groups mballoc will scan looking for the best chunk
+ */
+#define MB_DEFAULT_MAX_GROUPS_TO_SCAN  5
+
+/*
+ * with 'ext4_mb_stats' allocator will collect stats that will be
+ * shown at umount. The collecting costs though!
+ */
+#define MB_DEFAULT_STATS               1
+
+/*
+ * files smaller than MB_DEFAULT_STREAM_THRESHOLD are served
+ * by the stream allocator, which purpose is to pack requests
+ * as close each to other as possible to produce smooth I/O traffic
+ * We use locality group prealloc space for stream request.
+ * We can tune the same via /proc/fs/ext4/<parition>/stream_req
+ */
+#define MB_DEFAULT_STREAM_THRESHOLD    16      /* 64K */
+
+/*
+ * for which requests use 2^N search using buddies
+ */
+#define MB_DEFAULT_ORDER2_REQS         2
+
+/*
+ * default group prealloc size 512 blocks
+ */
+#define MB_DEFAULT_GROUP_PREALLOC      512
+
+static struct kmem_cache *ext4_pspace_cachep;
+static struct kmem_cache *ext4_ac_cachep;
+
+#ifdef EXT4_BB_MAX_BLOCKS
+#undef EXT4_BB_MAX_BLOCKS
+#endif
+#define EXT4_BB_MAX_BLOCKS     30
+
+struct ext4_free_metadata {
+       ext4_group_t group;
+       unsigned short num;
+       ext4_grpblk_t  blocks[EXT4_BB_MAX_BLOCKS];
+       struct list_head list;
+};
+
+struct ext4_group_info {
+       unsigned long   bb_state;
+       unsigned long   bb_tid;
+       struct ext4_free_metadata *bb_md_cur;
+       unsigned short  bb_first_free;
+       unsigned short  bb_free;
+       unsigned short  bb_fragments;
+       struct          list_head bb_prealloc_list;
+#ifdef DOUBLE_CHECK
+       void            *bb_bitmap;
+#endif
+       unsigned short  bb_counters[];
+};
+
+#define EXT4_GROUP_INFO_NEED_INIT_BIT  0
+#define EXT4_GROUP_INFO_LOCKED_BIT     1
+
+#define EXT4_MB_GRP_NEED_INIT(grp)     \
+       (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state)))
+
+
+struct ext4_prealloc_space {
+       struct list_head        pa_inode_list;
+       struct list_head        pa_group_list;
+       union {
+               struct list_head pa_tmp_list;
+               struct rcu_head pa_rcu;
+       } u;
+       spinlock_t              pa_lock;
+       atomic_t                pa_count;
+       unsigned                pa_deleted;
+       ext4_fsblk_t            pa_pstart;      /* phys. block */
+       ext4_lblk_t             pa_lstart;      /* log. block */
+       unsigned short          pa_len;         /* len of preallocated chunk */
+       unsigned short          pa_free;        /* how many blocks are free */
+       unsigned short          pa_linear;      /* consumed in one direction
+                                                * strictly, for grp prealloc */
+       spinlock_t              *pa_obj_lock;
+       struct inode            *pa_inode;      /* hack, for history only */
+};
+
+
+struct ext4_free_extent {
+       ext4_lblk_t fe_logical;
+       ext4_grpblk_t fe_start;
+       ext4_group_t fe_group;
+       int fe_len;
+};
+
+/*
+ * Locality group:
+ *   we try to group all related changes together
+ *   so that writeback can flush/allocate them together as well
+ */
+struct ext4_locality_group {
+       /* for allocator */
+       struct mutex            lg_mutex;       /* to serialize allocates */
+       struct list_head        lg_prealloc_list;/* list of preallocations */
+       spinlock_t              lg_prealloc_lock;
+};
+
+struct ext4_allocation_context {
+       struct inode *ac_inode;
+       struct super_block *ac_sb;
+
+       /* original request */
+       struct ext4_free_extent ac_o_ex;
+
+       /* goal request (after normalization) */
+       struct ext4_free_extent ac_g_ex;
+
+       /* the best found extent */
+       struct ext4_free_extent ac_b_ex;
+
+       /* copy of the bext found extent taken before preallocation efforts */
+       struct ext4_free_extent ac_f_ex;
+
+       /* number of iterations done. we have to track to limit searching */
+       unsigned long ac_ex_scanned;
+       __u16 ac_groups_scanned;
+       __u16 ac_found;
+       __u16 ac_tail;
+       __u16 ac_buddy;
+       __u16 ac_flags;         /* allocation hints */
+       __u8 ac_status;
+       __u8 ac_criteria;
+       __u8 ac_repeats;
+       __u8 ac_2order;         /* if request is to allocate 2^N blocks and
+                                * N > 0, the field stores N, otherwise 0 */
+       __u8 ac_op;             /* operation, for history only */
+       struct page *ac_bitmap_page;
+       struct page *ac_buddy_page;
+       struct ext4_prealloc_space *ac_pa;
+       struct ext4_locality_group *ac_lg;
+};
+
+#define AC_STATUS_CONTINUE     1
+#define AC_STATUS_FOUND                2
+#define AC_STATUS_BREAK                3
+
+struct ext4_mb_history {
+       struct ext4_free_extent orig;   /* orig allocation */
+       struct ext4_free_extent goal;   /* goal allocation */
+       struct ext4_free_extent result; /* result allocation */
+       unsigned pid;
+       unsigned ino;
+       __u16 found;    /* how many extents have been found */
+       __u16 groups;   /* how many groups have been scanned */
+       __u16 tail;     /* what tail broke some buddy */
+       __u16 buddy;    /* buddy the tail ^^^ broke */
+       __u16 flags;
+       __u8 cr:3;      /* which phase the result extent was found at */
+       __u8 op:4;
+       __u8 merged:1;
+};
+
+struct ext4_buddy {
+       struct page *bd_buddy_page;
+       void *bd_buddy;
+       struct page *bd_bitmap_page;
+       void *bd_bitmap;
+       struct ext4_group_info *bd_info;
+       struct super_block *bd_sb;
+       __u16 bd_blkbits;
+       ext4_group_t bd_group;
+};
+#define EXT4_MB_BITMAP(e4b)    ((e4b)->bd_bitmap)
+#define EXT4_MB_BUDDY(e4b)     ((e4b)->bd_buddy)
+
+#ifndef EXT4_MB_HISTORY
+static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
+{
+       return;
+}
+#else
+static void ext4_mb_store_history(struct ext4_allocation_context *ac);
+#endif
+
+#define in_range(b, first, len)        ((b) >= (first) && (b) <= (first) + (len) - 1)
+
+static struct proc_dir_entry *proc_root_ext4;
+struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
+
+static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
+                                       ext4_group_t group);
+static void ext4_mb_poll_new_transaction(struct super_block *, handle_t *);
+static void ext4_mb_free_committed_blocks(struct super_block *);
+static void ext4_mb_return_to_preallocation(struct inode *inode,
+                                       struct ext4_buddy *e4b, sector_t block,
+                                       int count);
+static void ext4_mb_put_pa(struct ext4_allocation_context *,
+                       struct super_block *, struct ext4_prealloc_space *pa);
+static int ext4_mb_init_per_dev_proc(struct super_block *sb);
+static int ext4_mb_destroy_per_dev_proc(struct super_block *sb);
+
+
+static inline void ext4_lock_group(struct super_block *sb, ext4_group_t group)
+{
+       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+
+       bit_spin_lock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
+}
+
+static inline void ext4_unlock_group(struct super_block *sb,
+                                       ext4_group_t group)
+{
+       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+
+       bit_spin_unlock(EXT4_GROUP_INFO_LOCKED_BIT, &(grinfo->bb_state));
+}
+
+static inline int ext4_is_group_locked(struct super_block *sb,
+                                       ext4_group_t group)
+{
+       struct ext4_group_info *grinfo = ext4_get_group_info(sb, group);
+
+       return bit_spin_is_locked(EXT4_GROUP_INFO_LOCKED_BIT,
+                                               &(grinfo->bb_state));
+}
+
+static ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
+                                       struct ext4_free_extent *fex)
+{
+       ext4_fsblk_t block;
+
+       block = (ext4_fsblk_t) fex->fe_group * EXT4_BLOCKS_PER_GROUP(sb)
+                       + fex->fe_start
+                       + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
+       return block;
+}
+#endif
index 5c1e27de7755b46552c3fd85dd11ad66d0ec1490..b9e077ba07e94b0c979ebea4530e1fc9471e6eee 100644 (file)
@@ -13,8 +13,8 @@
  */
 
 #include <linux/module.h>
-#include <linux/ext4_jbd2.h>
-#include <linux/ext4_fs_extents.h>
+#include "ext4_jbd2.h"
+#include "ext4_extents.h"
 
 /*
  * The contiguous blocks details which can be
@@ -327,7 +327,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
 }
 
 static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
-                               struct inode *tmp_inode)
+                                               struct inode *tmp_inode)
 {
        int retval;
        __le32  i_data[3];
@@ -339,7 +339,7 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
         * i_data field of the original inode
         */
        retval = ext4_journal_extend(handle, 1);
-       if (retval != 0) {
+       if (retval) {
                retval = ext4_journal_restart(handle, 1);
                if (retval)
                        goto err_out;
@@ -350,6 +350,18 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
        i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
 
        down_write(&EXT4_I(inode)->i_data_sem);
+       /*
+        * if EXT4_EXT_MIGRATE is cleared a block allocation
+        * happened after we started the migrate. We need to
+        * fail the migrate
+        */
+       if (!(EXT4_I(inode)->i_flags & EXT4_EXT_MIGRATE)) {
+               retval = -EAGAIN;
+               up_write(&EXT4_I(inode)->i_data_sem);
+               goto err_out;
+       } else
+               EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
+                                                       ~EXT4_EXT_MIGRATE;
        /*
         * We have the extent map build with the tmp inode.
         * Now copy the i_data across
@@ -508,6 +520,17 @@ int ext4_ext_migrate(struct inode *inode, struct file *filp,
         * switch the inode format to prevent read.
         */
        mutex_lock(&(inode->i_mutex));
+       /*
+        * Even though we take i_mutex we can still cause block allocation
+        * via mmap write to holes. If we have allocated new blocks we fail
+        * migrate.  New block allocation will clear EXT4_EXT_MIGRATE flag.
+        * The flag is updated with i_data_sem held to prevent racing with
+        * block allocation.
+        */
+       down_read((&EXT4_I(inode)->i_data_sem));
+       EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags | EXT4_EXT_MIGRATE;
+       up_read((&EXT4_I(inode)->i_data_sem));
+
        handle = ext4_journal_start(inode, 1);
 
        ei = EXT4_I(inode);
@@ -559,9 +582,15 @@ err_out:
                 * tmp_inode
                 */
                free_ext_block(handle, tmp_inode);
-       else
-               retval = ext4_ext_swap_inode_data(handle, inode,
-                                                       tmp_inode);
+       else {
+               retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
+               if (retval)
+                       /*
+                        * if we fail to swap inode data free the extent
+                        * details of the tmp inode
+                        */
+                       free_ext_block(handle, tmp_inode);
+       }
 
        /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
        if (ext4_journal_extend(handle, 1) != 0)
index 28aa2ed4297ecb30aeedb1d04f3c5470410c7e9a..ab16beaa830d3814250146d044f8d2c4a59c7dd4 100644 (file)
 #include <linux/pagemap.h>
 #include <linux/jbd2.h>
 #include <linux/time.h>
-#include <linux/ext4_fs.h>
-#include <linux/ext4_jbd2.h>
 #include <linux/fcntl.h>
 #include <linux/stat.h>
 #include <linux/string.h>
 #include <linux/quotaops.h>
 #include <linux/buffer_head.h>
 #include <linux/bio.h>
+#include "ext4.h"
+#include "ext4_jbd2.h"
 
 #include "namei.h"
 #include "xattr.h"
@@ -57,10 +57,15 @@ static struct buffer_head *ext4_append(handle_t *handle,
 
        *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
 
-       if ((bh = ext4_bread(handle, inode, *block, 1, err))) {
+       bh = ext4_bread(handle, inode, *block, 1, err);
+       if (bh) {
                inode->i_size += inode->i_sb->s_blocksize;
                EXT4_I(inode)->i_disksize = inode->i_size;
-               ext4_journal_get_write_access(handle,bh);
+               *err = ext4_journal_get_write_access(handle, bh);
+               if (*err) {
+                       brelse(bh);
+                       bh = NULL;
+               }
        }
        return bh;
 }
@@ -348,7 +353,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        if (root->info.hash_version != DX_HASH_TEA &&
            root->info.hash_version != DX_HASH_HALF_MD4 &&
            root->info.hash_version != DX_HASH_LEGACY) {
-               ext4_warning(dir->i_sb, __FUNCTION__,
+               ext4_warning(dir->i_sb, __func__,
                             "Unrecognised inode hash code %d",
                             root->info.hash_version);
                brelse(bh);
@@ -362,7 +367,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        hash = hinfo->hash;
 
        if (root->info.unused_flags & 1) {
-               ext4_warning(dir->i_sb, __FUNCTION__,
+               ext4_warning(dir->i_sb, __func__,
                             "Unimplemented inode hash flags: %#06x",
                             root->info.unused_flags);
                brelse(bh);
@@ -371,7 +376,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        }
 
        if ((indirect = root->info.indirect_levels) > 1) {
-               ext4_warning(dir->i_sb, __FUNCTION__,
+               ext4_warning(dir->i_sb, __func__,
                             "Unimplemented inode hash depth: %#06x",
                             root->info.indirect_levels);
                brelse(bh);
@@ -384,7 +389,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
 
        if (dx_get_limit(entries) != dx_root_limit(dir,
                                                   root->info.info_length)) {
-               ext4_warning(dir->i_sb, __FUNCTION__,
+               ext4_warning(dir->i_sb, __func__,
                             "dx entry: limit != root limit");
                brelse(bh);
                *err = ERR_BAD_DX_DIR;
@@ -396,7 +401,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
        {
                count = dx_get_count(entries);
                if (!count || count > dx_get_limit(entries)) {
-                       ext4_warning(dir->i_sb, __FUNCTION__,
+                       ext4_warning(dir->i_sb, __func__,
                                     "dx entry: no count or count > limit");
                        brelse(bh);
                        *err = ERR_BAD_DX_DIR;
@@ -441,7 +446,7 @@ dx_probe(struct dentry *dentry, struct inode *dir,
                        goto fail2;
                at = entries = ((struct dx_node *) bh->b_data)->entries;
                if (dx_get_limit(entries) != dx_node_limit (dir)) {
-                       ext4_warning(dir->i_sb, __FUNCTION__,
+                       ext4_warning(dir->i_sb, __func__,
                                     "dx entry: limit != node limit");
                        brelse(bh);
                        *err = ERR_BAD_DX_DIR;
@@ -457,7 +462,7 @@ fail2:
        }
 fail:
        if (*err == ERR_BAD_DX_DIR)
-               ext4_warning(dir->i_sb, __FUNCTION__,
+               ext4_warning(dir->i_sb, __func__,
                             "Corrupt dir inode %ld, running e2fsck is "
                             "recommended.", dir->i_ino);
        return NULL;
@@ -914,7 +919,7 @@ restart:
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        /* read error, skip block & hope for the best */
-                       ext4_error(sb, __FUNCTION__, "reading directory #%lu "
+                       ext4_error(sb, __func__, "reading directory #%lu "
                                   "offset %lu", dir->i_ino,
                                   (unsigned long)block);
                        brelse(bh);
@@ -1007,7 +1012,7 @@ static struct buffer_head * ext4_dx_find_entry(struct dentry *dentry,
                retval = ext4_htree_next_block(dir, hash, frame,
                                               frames, NULL);
                if (retval < 0) {
-                       ext4_warning(sb, __FUNCTION__,
+                       ext4_warning(sb, __func__,
                             "error reading index page in directory #%lu",
                             dir->i_ino);
                        *err = retval;
@@ -1532,7 +1537,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
 
                if (levels && (dx_get_count(frames->entries) ==
                               dx_get_limit(frames->entries))) {
-                       ext4_warning(sb, __FUNCTION__,
+                       ext4_warning(sb, __func__,
                                     "Directory index full!");
                        err = -ENOSPC;
                        goto cleanup;
@@ -1860,11 +1865,11 @@ static int empty_dir (struct inode * inode)
        if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
            !(bh = ext4_bread (NULL, inode, 0, 0, &err))) {
                if (err)
-                       ext4_error(inode->i_sb, __FUNCTION__,
+                       ext4_error(inode->i_sb, __func__,
                                   "error %d reading directory #%lu offset 0",
                                   err, inode->i_ino);
                else
-                       ext4_warning(inode->i_sb, __FUNCTION__,
+                       ext4_warning(inode->i_sb, __func__,
                                     "bad directory (dir #%lu) - no data block",
                                     inode->i_ino);
                return 1;
@@ -1893,7 +1898,7 @@ static int empty_dir (struct inode * inode)
                                offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
                        if (!bh) {
                                if (err)
-                                       ext4_error(sb, __FUNCTION__,
+                                       ext4_error(sb, __func__,
                                                   "error %d reading directory"
                                                   " #%lu offset %lu",
                                                   err, inode->i_ino, offset);
@@ -2217,6 +2222,8 @@ retry:
                        goto out_stop;
                }
        } else {
+               /* clear the extent format for fast symlink */
+               EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
                inode->i_op = &ext4_fast_symlink_inode_operations;
                memcpy((char*)&EXT4_I(inode)->i_data,symname,l);
                inode->i_size = l-1;
@@ -2347,6 +2354,9 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
                                              EXT4_FEATURE_INCOMPAT_FILETYPE))
                        new_de->file_type = old_de->file_type;
                new_dir->i_version++;
+               new_dir->i_ctime = new_dir->i_mtime =
+                                       ext4_current_time(new_dir);
+               ext4_mark_inode_dirty(handle, new_dir);
                BUFFER_TRACE(new_bh, "call ext4_journal_dirty_metadata");
                ext4_journal_dirty_metadata(handle, new_bh);
                brelse(new_bh);
index e29efa0f9d6298402bf35b588838d9737b08c551..9f086a6a472b1c13359334d6966caaa6a90d4ea1 100644 (file)
 
 #define EXT4FS_DEBUG
 
-#include <linux/ext4_jbd2.h>
-
 #include <linux/errno.h>
 #include <linux/slab.h>
 
+#include "ext4_jbd2.h"
 #include "group.h"
 
 #define outside(b, first, last)        ((b) < (first) || (b) >= (last))
@@ -50,63 +49,63 @@ static int verify_group_input(struct super_block *sb,
 
        ext4_get_group_no_and_offset(sb, start, NULL, &offset);
        if (group != sbi->s_groups_count)
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Cannot add at group %u (only %lu groups)",
                             input->group, sbi->s_groups_count);
        else if (offset != 0)
-                       ext4_warning(sb, __FUNCTION__, "Last group not full");
+                       ext4_warning(sb, __func__, "Last group not full");
        else if (input->reserved_blocks > input->blocks_count / 5)
-               ext4_warning(sb, __FUNCTION__, "Reserved blocks too high (%u)",
+               ext4_warning(sb, __func__, "Reserved blocks too high (%u)",
                             input->reserved_blocks);
        else if (free_blocks_count < 0)
-               ext4_warning(sb, __FUNCTION__, "Bad blocks count %u",
+               ext4_warning(sb, __func__, "Bad blocks count %u",
                             input->blocks_count);
        else if (!(bh = sb_bread(sb, end - 1)))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Cannot read last block (%llu)",
                             end - 1);
        else if (outside(input->block_bitmap, start, end))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Block bitmap not in group (block %llu)",
                             (unsigned long long)input->block_bitmap);
        else if (outside(input->inode_bitmap, start, end))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Inode bitmap not in group (block %llu)",
                             (unsigned long long)input->inode_bitmap);
        else if (outside(input->inode_table, start, end) ||
                 outside(itend - 1, start, end))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Inode table not in group (blocks %llu-%llu)",
                             (unsigned long long)input->inode_table, itend - 1);
        else if (input->inode_bitmap == input->block_bitmap)
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Block bitmap same as inode bitmap (%llu)",
                             (unsigned long long)input->block_bitmap);
        else if (inside(input->block_bitmap, input->inode_table, itend))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Block bitmap (%llu) in inode table (%llu-%llu)",
                             (unsigned long long)input->block_bitmap,
                             (unsigned long long)input->inode_table, itend - 1);
        else if (inside(input->inode_bitmap, input->inode_table, itend))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Inode bitmap (%llu) in inode table (%llu-%llu)",
                             (unsigned long long)input->inode_bitmap,
                             (unsigned long long)input->inode_table, itend - 1);
        else if (inside(input->block_bitmap, start, metaend))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Block bitmap (%llu) in GDT table"
                             " (%llu-%llu)",
                             (unsigned long long)input->block_bitmap,
                             start, metaend - 1);
        else if (inside(input->inode_bitmap, start, metaend))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Inode bitmap (%llu) in GDT table"
                             " (%llu-%llu)",
                             (unsigned long long)input->inode_bitmap,
                             start, metaend - 1);
        else if (inside(input->inode_table, start, metaend) ||
                 inside(itend - 1, start, metaend))
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Inode table (%llu-%llu) overlaps"
                             "GDT table (%llu-%llu)",
                             (unsigned long long)input->inode_table,
@@ -368,7 +367,7 @@ static int verify_reserved_gdb(struct super_block *sb,
        while ((grp = ext4_list_backups(sb, &three, &five, &seven)) < end) {
                if (le32_to_cpu(*p++) !=
                    grp * EXT4_BLOCKS_PER_GROUP(sb) + blk){
-                       ext4_warning(sb, __FUNCTION__,
+                       ext4_warning(sb, __func__,
                                     "reserved GDT %llu"
                                     " missing grp %d (%llu)",
                                     blk, grp,
@@ -424,7 +423,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
         */
        if (EXT4_SB(sb)->s_sbh->b_blocknr !=
            le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                        "won't resize using backup superblock at %llu",
                        (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
                return -EPERM;
@@ -448,7 +447,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
 
        data = (__le32 *)dind->b_data;
        if (le32_to_cpu(data[gdb_num % EXT4_ADDR_PER_BLOCK(sb)]) != gdblock) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "new group %u GDT block %llu not reserved",
                             input->group, gdblock);
                err = -EINVAL;
@@ -469,10 +468,10 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
                goto exit_dindj;
 
        n_group_desc = kmalloc((gdb_num + 1) * sizeof(struct buffer_head *),
-                       GFP_KERNEL);
+                       GFP_NOFS);
        if (!n_group_desc) {
                err = -ENOMEM;
-               ext4_warning (sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                              "not enough memory for %lu groups", gdb_num + 1);
                goto exit_inode;
        }
@@ -502,8 +501,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        EXT4_SB(sb)->s_gdb_count++;
        kfree(o_group_desc);
 
-       es->s_reserved_gdt_blocks =
-               cpu_to_le16(le16_to_cpu(es->s_reserved_gdt_blocks) - 1);
+       le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
        ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh);
 
        return 0;
@@ -553,7 +551,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
        int res, i;
        int err;
 
-       primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_KERNEL);
+       primary = kmalloc(reserved_gdb * sizeof(*primary), GFP_NOFS);
        if (!primary)
                return -ENOMEM;
 
@@ -571,7 +569,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
        /* Get each reserved primary GDT block and verify it holds backups */
        for (res = 0; res < reserved_gdb; res++, blk++) {
                if (le32_to_cpu(*data) != blk) {
-                       ext4_warning(sb, __FUNCTION__,
+                       ext4_warning(sb, __func__,
                                     "reserved block %llu"
                                     " not at offset %ld",
                                     blk,
@@ -715,7 +713,7 @@ static void update_backups(struct super_block *sb,
         */
 exit_err:
        if (err) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "can't update backup for group %lu (err %d), "
                             "forcing fsck on next reboot", group, err);
                sbi->s_mount_state &= ~EXT4_VALID_FS;
@@ -755,33 +753,33 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
 
        if (gdb_off == 0 && !EXT4_HAS_RO_COMPAT_FEATURE(sb,
                                        EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER)) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "Can't resize non-sparse filesystem further");
                return -EPERM;
        }
 
        if (ext4_blocks_count(es) + input->blocks_count <
            ext4_blocks_count(es)) {
-               ext4_warning(sb, __FUNCTION__, "blocks_count overflow\n");
+               ext4_warning(sb, __func__, "blocks_count overflow\n");
                return -EINVAL;
        }
 
        if (le32_to_cpu(es->s_inodes_count) + EXT4_INODES_PER_GROUP(sb) <
            le32_to_cpu(es->s_inodes_count)) {
-               ext4_warning(sb, __FUNCTION__, "inodes_count overflow\n");
+               ext4_warning(sb, __func__, "inodes_count overflow\n");
                return -EINVAL;
        }
 
        if (reserved_gdb || gdb_off == 0) {
                if (!EXT4_HAS_COMPAT_FEATURE(sb,
                                             EXT4_FEATURE_COMPAT_RESIZE_INODE)){
-                       ext4_warning(sb, __FUNCTION__,
+                       ext4_warning(sb, __func__,
                                     "No reserved GDT blocks, can't resize");
                        return -EPERM;
                }
                inode = ext4_iget(sb, EXT4_RESIZE_INO);
                if (IS_ERR(inode)) {
-                       ext4_warning(sb, __FUNCTION__,
+                       ext4_warning(sb, __func__,
                                     "Error opening resize inode");
                        return PTR_ERR(inode);
                }
@@ -810,7 +808,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
 
        lock_super(sb);
        if (input->group != sbi->s_groups_count) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "multiple resizers run on filesystem!");
                err = -EBUSY;
                goto exit_journal;
@@ -877,8 +875,7 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
         */
        ext4_blocks_count_set(es, ext4_blocks_count(es) +
                input->blocks_count);
-       es->s_inodes_count = cpu_to_le32(le32_to_cpu(es->s_inodes_count) +
-               EXT4_INODES_PER_GROUP(sb));
+       le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb));
 
        /*
         * We need to protect s_groups_count against other CPUs seeing
@@ -977,13 +974,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
                        " too large to resize to %llu blocks safely\n",
                        sb->s_id, n_blocks_count);
                if (sizeof(sector_t) < 8)
-                       ext4_warning(sb, __FUNCTION__,
+                       ext4_warning(sb, __func__,
                        "CONFIG_LBD not enabled\n");
                return -EINVAL;
        }
 
        if (n_blocks_count < o_blocks_count) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "can't shrink FS - resize aborted");
                return -EBUSY;
        }
@@ -992,7 +989,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
        ext4_get_group_no_and_offset(sb, o_blocks_count, NULL, &last);
 
        if (last == 0) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "need to use ext2online to resize further");
                return -EPERM;
        }
@@ -1000,7 +997,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
        add = EXT4_BLOCKS_PER_GROUP(sb) - last;
 
        if (o_blocks_count + add < o_blocks_count) {
-               ext4_warning(sb, __FUNCTION__, "blocks_count overflow");
+               ext4_warning(sb, __func__, "blocks_count overflow");
                return -EINVAL;
        }
 
@@ -1008,7 +1005,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
                add = n_blocks_count - o_blocks_count;
 
        if (o_blocks_count + add < n_blocks_count)
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "will only finish group (%llu"
                             " blocks, %u new)",
                             o_blocks_count + add, add);
@@ -1016,7 +1013,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
        /* See if the device is actually as big as what was requested */
        bh = sb_bread(sb, o_blocks_count + add -1);
        if (!bh) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "can't read last block, resize aborted");
                return -ENOSPC;
        }
@@ -1028,13 +1025,13 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
        handle = ext4_journal_start_sb(sb, 3);
        if (IS_ERR(handle)) {
                err = PTR_ERR(handle);
-               ext4_warning(sb, __FUNCTION__, "error %d on journal start",err);
+               ext4_warning(sb, __func__, "error %d on journal start", err);
                goto exit_put;
        }
 
        lock_super(sb);
        if (o_blocks_count != ext4_blocks_count(es)) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "multiple resizers run on filesystem!");
                unlock_super(sb);
                ext4_journal_stop(handle);
@@ -1044,7 +1041,7 @@ int ext4_group_extend(struct super_block *sb, struct ext4_super_block *es,
 
        if ((err = ext4_journal_get_write_access(handle,
                                                 EXT4_SB(sb)->s_sbh))) {
-               ext4_warning(sb, __FUNCTION__,
+               ext4_warning(sb, __func__,
                             "error %d on journal write access", err);
                unlock_super(sb);
                ext4_journal_stop(handle);
index 13383ba18f1d72d1440a90e021bf21bffb24d039..52dd0679a4e2f8d77e418cb1f6bd5a842bc10ca1 100644 (file)
@@ -21,8 +21,6 @@
 #include <linux/fs.h>
 #include <linux/time.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
-#include <linux/ext4_jbd2.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/blkdev.h>
 #include <linux/seq_file.h>
 #include <linux/log2.h>
 #include <linux/crc16.h>
-
 #include <asm/uaccess.h>
 
+#include "ext4.h"
+#include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
 #include "namei.h"
@@ -135,7 +134,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
         * take the FS itself readonly cleanly. */
        journal = EXT4_SB(sb)->s_journal;
        if (is_journal_aborted(journal)) {
-               ext4_abort(sb, __FUNCTION__,
+               ext4_abort(sb, __func__,
                           "Detected aborted journal");
                return ERR_PTR(-EROFS);
        }
@@ -355,7 +354,7 @@ void ext4_update_dynamic_rev(struct super_block *sb)
        if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
                return;
 
-       ext4_warning(sb, __FUNCTION__,
+       ext4_warning(sb, __func__,
                     "updating to rev %d because of new feature flag, "
                     "running e2fsck is recommended",
                     EXT4_DYNAMIC_REV);
@@ -813,7 +812,8 @@ static int ext4_acquire_dquot(struct dquot *dquot);
 static int ext4_release_dquot(struct dquot *dquot);
 static int ext4_mark_dquot_dirty(struct dquot *dquot);
 static int ext4_write_info(struct super_block *sb, int type);
-static int ext4_quota_on(struct super_block *sb, int type, int format_id, char *path);
+static int ext4_quota_on(struct super_block *sb, int type, int format_id,
+                               char *path, int remount);
 static int ext4_quota_on_mount(struct super_block *sb, int type);
 static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
                               size_t len, loff_t off);
@@ -944,8 +944,8 @@ static match_table_t tokens = {
        {Opt_mballoc, "mballoc"},
        {Opt_nomballoc, "nomballoc"},
        {Opt_stripe, "stripe=%u"},
-       {Opt_err, NULL},
        {Opt_resize, "resize"},
+       {Opt_err, NULL},
 };
 
 static ext4_fsblk_t get_sb_block(void **data)
@@ -1387,11 +1387,11 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
                 * a plain journaled filesystem we can keep it set as
                 * valid forever! :)
                 */
-       es->s_state = cpu_to_le16(le16_to_cpu(es->s_state) & ~EXT4_VALID_FS);
+       es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
 #endif
        if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
                es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
-       es->s_mnt_count=cpu_to_le16(le16_to_cpu(es->s_mnt_count) + 1);
+       le16_add_cpu(&es->s_mnt_count, 1);
        es->s_mtime = cpu_to_le32(get_seconds());
        ext4_update_dynamic_rev(sb);
        EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
@@ -1484,36 +1484,33 @@ static int ext4_check_descriptors(struct super_block *sb)
                block_bitmap = ext4_block_bitmap(sb, gdp);
                if (block_bitmap < first_block || block_bitmap > last_block)
                {
-                       ext4_error (sb, "ext4_check_descriptors",
-                                   "Block bitmap for group %lu"
-                                   " not in group (block %llu)!",
-                                   i, block_bitmap);
+                       printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
+                              "Block bitmap for group %lu not in group "
+                              "(block %llu)!", i, block_bitmap);
                        return 0;
                }
                inode_bitmap = ext4_inode_bitmap(sb, gdp);
                if (inode_bitmap < first_block || inode_bitmap > last_block)
                {
-                       ext4_error (sb, "ext4_check_descriptors",
-                                   "Inode bitmap for group %lu"
-                                   " not in group (block %llu)!",
-                                   i, inode_bitmap);
+                       printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
+                              "Inode bitmap for group %lu not in group "
+                              "(block %llu)!", i, inode_bitmap);
                        return 0;
                }
                inode_table = ext4_inode_table(sb, gdp);
                if (inode_table < first_block ||
                    inode_table + sbi->s_itb_per_group - 1 > last_block)
                {
-                       ext4_error (sb, "ext4_check_descriptors",
-                                   "Inode table for group %lu"
-                                   " not in group (block %llu)!",
-                                   i, inode_table);
+                       printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
+                              "Inode table for group %lu not in group "
+                              "(block %llu)!", i, inode_table);
                        return 0;
                }
                if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
-                       ext4_error(sb, __FUNCTION__,
-                                  "Checksum for group %lu failed (%u!=%u)\n",
-                                   i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
-                                   gdp)), le16_to_cpu(gdp->bg_checksum));
+                       printk(KERN_ERR "EXT4-fs: ext4_check_descriptors: "
+                              "Checksum for group %lu failed (%u!=%u)\n",
+                              i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
+                              gdp)), le16_to_cpu(gdp->bg_checksum));
                        return 0;
                }
                if (!flexbg_flag)
@@ -1593,8 +1590,8 @@ static void ext4_orphan_cleanup (struct super_block * sb,
        while (es->s_last_orphan) {
                struct inode *inode;
 
-               if (!(inode =
-                     ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan)))) {
+               inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
+               if (IS_ERR(inode)) {
                        es->s_last_orphan = 0;
                        break;
                }
@@ -1604,7 +1601,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
                if (inode->i_nlink) {
                        printk(KERN_DEBUG
                                "%s: truncating inode %lu to %Ld bytes\n",
-                               __FUNCTION__, inode->i_ino, inode->i_size);
+                               __func__, inode->i_ino, inode->i_size);
                        jbd_debug(2, "truncating inode %lu to %Ld bytes\n",
                                  inode->i_ino, inode->i_size);
                        ext4_truncate(inode);
@@ -1612,7 +1609,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
                } else {
                        printk(KERN_DEBUG
                                "%s: deleting unreferenced inode %lu\n",
-                               __FUNCTION__, inode->i_ino);
+                               __func__, inode->i_ino);
                        jbd_debug(2, "deleting unreferenced inode %lu\n",
                                  inode->i_ino);
                        nr_orphans++;
@@ -1632,7 +1629,7 @@ static void ext4_orphan_cleanup (struct super_block * sb,
        /* Turn quotas off */
        for (i = 0; i < MAXQUOTAS; i++) {
                if (sb_dqopt(sb)->files[i])
-                       vfs_quota_off(sb, i);
+                       vfs_quota_off(sb, i, 0);
        }
 #endif
        sb->s_flags = s_flags; /* Restore MS_RDONLY status */
@@ -2698,9 +2695,9 @@ static void ext4_clear_journal_err(struct super_block * sb,
                char nbuf[16];
 
                errstr = ext4_decode_error(sb, j_errno, nbuf);
-               ext4_warning(sb, __FUNCTION__, "Filesystem error recorded "
+               ext4_warning(sb, __func__, "Filesystem error recorded "
                             "from previous mount: %s", errstr);
-               ext4_warning(sb, __FUNCTION__, "Marking fs in need of "
+               ext4_warning(sb, __func__, "Marking fs in need of "
                             "filesystem check.");
 
                EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
@@ -2827,7 +2824,7 @@ static int ext4_remount (struct super_block * sb, int * flags, char * data)
        }
 
        if (sbi->s_mount_opt & EXT4_MOUNT_ABORT)
-               ext4_abort(sb, __FUNCTION__, "Abort forced by user");
+               ext4_abort(sb, __func__, "Abort forced by user");
 
        sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
                ((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
@@ -3039,8 +3036,14 @@ static int ext4_dquot_drop(struct inode *inode)
 
        /* We may delete quota structure so we need to reserve enough blocks */
        handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
-       if (IS_ERR(handle))
+       if (IS_ERR(handle)) {
+               /*
+                * We call dquot_drop() anyway to at least release references
+                * to quota structures so that umount does not hang.
+                */
+               dquot_drop(inode);
                return PTR_ERR(handle);
+       }
        ret = dquot_drop(inode);
        err = ext4_journal_stop(handle);
        if (!ret)
@@ -3143,7 +3146,7 @@ static int ext4_quota_on_mount(struct super_block *sb, int type)
  * Standard function to be called on quota_on
  */
 static int ext4_quota_on(struct super_block *sb, int type, int format_id,
-                        char *path)
+                        char *path, int remount)
 {
        int err;
        struct nameidata nd;
@@ -3151,9 +3154,9 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
        if (!test_opt(sb, QUOTA))
                return -EINVAL;
        /* Not journalling quota? */
-       if (!EXT4_SB(sb)->s_qf_names[USRQUOTA] &&
-           !EXT4_SB(sb)->s_qf_names[GRPQUOTA])
-               return vfs_quota_on(sb, type, format_id, path);
+       if ((!EXT4_SB(sb)->s_qf_names[USRQUOTA] &&
+           !EXT4_SB(sb)->s_qf_names[GRPQUOTA]) || remount)
+               return vfs_quota_on(sb, type, format_id, path, remount);
        err = path_lookup(path, LOOKUP_FOLLOW, &nd);
        if (err)
                return err;
@@ -3168,7 +3171,7 @@ static int ext4_quota_on(struct super_block *sb, int type, int format_id,
                        "EXT4-fs: Quota file not on filesystem root. "
                        "Journalled quota will not work.\n");
        path_put(&nd.path);
-       return vfs_quota_on(sb, type, format_id, path);
+       return vfs_quota_on(sb, type, format_id, path, remount);
 }
 
 /* Read data from quotafile - avoid pagecache and such because we cannot afford
index e6f9da4287c455d5975a87668d90dff7e1dcd766..e9178643dc01132b72a396b8099ad0cd8c3785c6 100644 (file)
@@ -19,8 +19,8 @@
 
 #include <linux/fs.h>
 #include <linux/jbd2.h>
-#include <linux/ext4_fs.h>
 #include <linux/namei.h>
+#include "ext4.h"
 #include "xattr.h"
 
 static void * ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
index e9054c1c7d9366ce36f1d140b125c5db8915cd15..3fbc2c6c3d0e900c5ce8e1a5f8e56270f5984768 100644 (file)
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
-#include <linux/ext4_jbd2.h>
-#include <linux/ext4_fs.h>
 #include <linux/mbcache.h>
 #include <linux/quotaops.h>
 #include <linux/rwsem.h>
+#include "ext4_jbd2.h"
+#include "ext4.h"
 #include "xattr.h"
 #include "acl.h"
 
@@ -92,6 +92,8 @@ static struct buffer_head *ext4_xattr_cache_find(struct inode *,
                                                 struct mb_cache_entry **);
 static void ext4_xattr_rehash(struct ext4_xattr_header *,
                              struct ext4_xattr_entry *);
+static int ext4_xattr_list(struct inode *inode, char *buffer,
+                          size_t buffer_size);
 
 static struct mb_cache *ext4_xattr_cache;
 
@@ -225,7 +227,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
        if (ext4_xattr_check_block(bh)) {
-bad_block:     ext4_error(inode->i_sb, __FUNCTION__,
+bad_block:     ext4_error(inode->i_sb, __func__,
                           "inode %lu: bad block %llu", inode->i_ino,
                           EXT4_I(inode)->i_file_acl);
                error = -EIO;
@@ -367,7 +369,7 @@ ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size)
        ea_bdebug(bh, "b_count=%d, refcount=%d",
                atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
        if (ext4_xattr_check_block(bh)) {
-               ext4_error(inode->i_sb, __FUNCTION__,
+               ext4_error(inode->i_sb, __func__,
                           "inode %lu: bad block %llu", inode->i_ino,
                           EXT4_I(inode)->i_file_acl);
                error = -EIO;
@@ -420,7 +422,7 @@ cleanup:
  * Returns a negative error number on failure, or the number of bytes
  * used / required on success.
  */
-int
+static int
 ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size)
 {
        int i_error, b_error;
@@ -484,8 +486,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
                get_bh(bh);
                ext4_forget(handle, 1, inode, bh, bh->b_blocknr);
        } else {
-               BHDR(bh)->h_refcount = cpu_to_le32(
-                               le32_to_cpu(BHDR(bh)->h_refcount) - 1);
+               le32_add_cpu(&BHDR(bh)->h_refcount, -1);
                error = ext4_journal_dirty_metadata(handle, bh);
                if (IS_SYNC(inode))
                        handle->h_sync = 1;
@@ -660,7 +661,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
                        atomic_read(&(bs->bh->b_count)),
                        le32_to_cpu(BHDR(bs->bh)->h_refcount));
                if (ext4_xattr_check_block(bs->bh)) {
-                       ext4_error(sb, __FUNCTION__,
+                       ext4_error(sb, __func__,
                                "inode %lu: bad block %llu", inode->i_ino,
                                EXT4_I(inode)->i_file_acl);
                        error = -EIO;
@@ -738,7 +739,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                                ce = NULL;
                        }
                        ea_bdebug(bs->bh, "cloning");
-                       s->base = kmalloc(bs->bh->b_size, GFP_KERNEL);
+                       s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
                        error = -ENOMEM;
                        if (s->base == NULL)
                                goto cleanup;
@@ -750,7 +751,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
                }
        } else {
                /* Allocate a buffer where we construct the new block. */
-               s->base = kzalloc(sb->s_blocksize, GFP_KERNEL);
+               s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
                /* assert(header == s->base) */
                error = -ENOMEM;
                if (s->base == NULL)
@@ -789,8 +790,7 @@ inserted:
                                if (error)
                                        goto cleanup_dquot;
                                lock_buffer(new_bh);
-                               BHDR(new_bh)->h_refcount = cpu_to_le32(1 +
-                                       le32_to_cpu(BHDR(new_bh)->h_refcount));
+                               le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
                                ea_bdebug(new_bh, "reusing; refcount now=%d",
                                        le32_to_cpu(BHDR(new_bh)->h_refcount));
                                unlock_buffer(new_bh);
@@ -808,10 +808,8 @@ inserted:
                        get_bh(new_bh);
                } else {
                        /* We need to allocate a new block */
-                       ext4_fsblk_t goal = le32_to_cpu(
-                                       EXT4_SB(sb)->s_es->s_first_data_block) +
-                               (ext4_fsblk_t)EXT4_I(inode)->i_block_group *
-                               EXT4_BLOCKS_PER_GROUP(sb);
+                       ext4_fsblk_t goal = ext4_group_first_block_no(sb,
+                                               EXT4_I(inode)->i_block_group);
                        ext4_fsblk_t block = ext4_new_block(handle, inode,
                                                        goal, &error);
                        if (error)
@@ -863,7 +861,7 @@ cleanup_dquot:
        goto cleanup;
 
 bad_block:
-       ext4_error(inode->i_sb, __FUNCTION__,
+       ext4_error(inode->i_sb, __func__,
                   "inode %lu: bad block %llu", inode->i_ino,
                   EXT4_I(inode)->i_file_acl);
        goto cleanup;
@@ -1166,7 +1164,7 @@ retry:
                if (!bh)
                        goto cleanup;
                if (ext4_xattr_check_block(bh)) {
-                       ext4_error(inode->i_sb, __FUNCTION__,
+                       ext4_error(inode->i_sb, __func__,
                                "inode %lu: bad block %llu", inode->i_ino,
                                EXT4_I(inode)->i_file_acl);
                        error = -EIO;
@@ -1341,14 +1339,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
                goto cleanup;
        bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
        if (!bh) {
-               ext4_error(inode->i_sb, __FUNCTION__,
+               ext4_error(inode->i_sb, __func__,
                        "inode %lu: block %llu read error", inode->i_ino,
                        EXT4_I(inode)->i_file_acl);
                goto cleanup;
        }
        if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
            BHDR(bh)->h_blocks != cpu_to_le32(1)) {
-               ext4_error(inode->i_sb, __FUNCTION__,
+               ext4_error(inode->i_sb, __func__,
                        "inode %lu: bad block %llu", inode->i_ino,
                        EXT4_I(inode)->i_file_acl);
                goto cleanup;
@@ -1475,7 +1473,7 @@ again:
                }
                bh = sb_bread(inode->i_sb, ce->e_block);
                if (!bh) {
-                       ext4_error(inode->i_sb, __FUNCTION__,
+                       ext4_error(inode->i_sb, __func__,
                                "inode %lu: block %lu read error",
                                inode->i_ino, (unsigned long) ce->e_block);
                } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
index d7f5d6a126511e7f65c63793512732d6e31813bb..5992fe979bb960e4eda701b4c5f422d1fcb51148 100644 (file)
@@ -74,7 +74,6 @@ extern struct xattr_handler ext4_xattr_security_handler;
 extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
 
 extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
-extern int ext4_xattr_list(struct inode *, char *, size_t);
 extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_t, int);
 extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
 
@@ -98,12 +97,6 @@ ext4_xattr_get(struct inode *inode, int name_index, const char *name,
        return -EOPNOTSUPP;
 }
 
-static inline int
-ext4_xattr_list(struct inode *inode, void *buffer, size_t size)
-{
-       return -EOPNOTSUPP;
-}
-
 static inline int
 ext4_xattr_set(struct inode *inode, int name_index, const char *name,
               const void *value, size_t size, int flags)
index f17eaf2321b9da564b303b5abfdccc0e2ff11018..ca5f89fc6caeba31ee41a15a3843452a1c0f56c6 100644 (file)
@@ -6,9 +6,9 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/fs.h>
-#include <linux/ext4_jbd2.h>
-#include <linux/ext4_fs.h>
 #include <linux/security.h>
+#include "ext4_jbd2.h"
+#include "ext4.h"
 #include "xattr.h"
 
 static size_t
index e0f05acdafece0e0826c2985fc0a59c556fa5159..fff33382cadcfc2b93d3e81dab1ad84e57364d6a 100644 (file)
@@ -9,8 +9,8 @@
 #include <linux/string.h>
 #include <linux/capability.h>
 #include <linux/fs.h>
-#include <linux/ext4_jbd2.h>
-#include <linux/ext4_fs.h>
+#include "ext4_jbd2.h"
+#include "ext4.h"
 #include "xattr.h"
 
 #define XATTR_TRUSTED_PREFIX "trusted."
index 7ed3d8ebf09686d2315e3a1ebdc92ccff5ada1ad..67be723fcc4eeb6cfa3cb996d5e029176b900cc7 100644 (file)
@@ -8,8 +8,8 @@
 #include <linux/module.h>
 #include <linux/string.h>
 #include <linux/fs.h>
-#include <linux/ext4_jbd2.h>
-#include <linux/ext4_fs.h>
+#include "ext4_jbd2.h"
+#include "ext4.h"
 #include "xattr.h"
 
 #define XATTR_USER_PREFIX "user."
index 639b3b4f86d15a8a17a331a3fabdbb16d0bc8048..fda25479af26720690969dde01f444d8bc730f07 100644 (file)
@@ -242,7 +242,7 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
                /* prevent the infinite loop of cluster chain */
                if (*fclus > limit) {
                        fat_fs_panic(sb, "%s: detected the cluster chain loop"
-                                    " (i_pos %lld)", __FUNCTION__,
+                                    " (i_pos %lld)", __func__,
                                     MSDOS_I(inode)->i_pos);
                        nr = -EIO;
                        goto out;
@@ -253,7 +253,7 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
                        goto out;
                else if (nr == FAT_ENT_FREE) {
                        fat_fs_panic(sb, "%s: invalid cluster chain"
-                                    " (i_pos %lld)", __FUNCTION__,
+                                    " (i_pos %lld)", __func__,
                                     MSDOS_I(inode)->i_pos);
                        nr = -EIO;
                        goto out;
@@ -286,7 +286,7 @@ static int fat_bmap_cluster(struct inode *inode, int cluster)
                return ret;
        else if (ret == FAT_ENT_EOF) {
                fat_fs_panic(sb, "%s: request beyond EOF (i_pos %lld)",
-                            __FUNCTION__, MSDOS_I(inode)->i_pos);
+                            __func__, MSDOS_I(inode)->i_pos);
                return -EIO;
        }
        return dclus;
index 72cbcd61bd951268fa49eb18faf42d516606b6dd..486725ee99ae68a977bc9ca2d8e8cd47990ed579 100644 (file)
@@ -124,8 +124,8 @@ static inline int fat_get_entry(struct inode *dir, loff_t *pos,
  * but ignore that right now.
  * Ahem... Stack smashing in ring 0 isn't fun. Fixed.
  */
-static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int uni_xlate,
-                      struct nls_table *nls)
+static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int len,
+                      int uni_xlate, struct nls_table *nls)
 {
        wchar_t *ip, ec;
        unsigned char *op, nc;
@@ -135,10 +135,11 @@ static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int uni_xlate,
        ip = uni;
        op = ascii;
 
-       while (*ip) {
+       while (*ip && ((len - NLS_MAX_CHARSET_SIZE) > 0)) {
                ec = *ip++;
                if ( (charlen = nls->uni2char(ec, op, NLS_MAX_CHARSET_SIZE)) > 0) {
                        op += charlen;
+                       len -= charlen;
                } else {
                        if (uni_xlate == 1) {
                                *op = ':';
@@ -149,16 +150,19 @@ static int uni16_to_x8(unsigned char *ascii, wchar_t *uni, int uni_xlate,
                                        ec >>= 4;
                                }
                                op += 5;
+                               len -= 5;
                        } else {
                                *op++ = '?';
+                               len--;
                        }
                }
-               /* We have some slack there, so it's OK */
-               if (op>ascii+256) {
-                       op = ascii + 256;
-                       break;
-               }
        }
+
+       if (unlikely(*ip)) {
+               printk(KERN_WARNING "FAT: filename was truncated while "
+                      "converting.");
+       }
+
        *op = 0;
        return (op - ascii);
 }
@@ -243,7 +247,7 @@ static int fat_parse_long(struct inode *dir, loff_t *pos,
        unsigned char id, slot, slots, alias_checksum;
 
        if (!*unicode) {
-               *unicode = (wchar_t *)__get_free_page(GFP_KERNEL);
+               *unicode = __getname();
                if (!*unicode) {
                        brelse(*bh);
                        return -ENOMEM;
@@ -311,9 +315,11 @@ int fat_search_long(struct inode *inode, const unsigned char *name,
        struct nls_table *nls_io = sbi->nls_io;
        struct nls_table *nls_disk = sbi->nls_disk;
        wchar_t bufuname[14];
-       unsigned char xlate_len, nr_slots;
+       unsigned char nr_slots;
+       int xlate_len;
        wchar_t *unicode = NULL;
-       unsigned char work[MSDOS_NAME], bufname[260];   /* 256 + 4 */
+       unsigned char work[MSDOS_NAME];
+       unsigned char *bufname = NULL;
        int uni_xlate = sbi->options.unicode_xlate;
        int utf8 = sbi->options.utf8;
        int anycase = (sbi->options.name_check != 's');
@@ -321,6 +327,10 @@ int fat_search_long(struct inode *inode, const unsigned char *name,
        loff_t cpos = 0;
        int chl, i, j, last_u, err;
 
+       bufname = __getname();
+       if (!bufname)
+               return -ENOMEM;
+
        err = -ENOENT;
        while(1) {
                if (fat_get_entry(inode, &cpos, &bh, &de) == -1)
@@ -386,8 +396,8 @@ parse_record:
 
                bufuname[last_u] = 0x0000;
                xlate_len = utf8
-                       ?utf8_wcstombs(bufname, bufuname, sizeof(bufname))
-                       :uni16_to_x8(bufname, bufuname, uni_xlate, nls_io);
+                       ?utf8_wcstombs(bufname, bufuname, PATH_MAX)
+                       :uni16_to_x8(bufname, bufuname, PATH_MAX, uni_xlate, nls_io);
                if (xlate_len == name_len)
                        if ((!anycase && !memcmp(name, bufname, xlate_len)) ||
                            (anycase && !nls_strnicmp(nls_io, name, bufname,
@@ -396,8 +406,8 @@ parse_record:
 
                if (nr_slots) {
                        xlate_len = utf8
-                               ?utf8_wcstombs(bufname, unicode, sizeof(bufname))
-                               :uni16_to_x8(bufname, unicode, uni_xlate, nls_io);
+                               ?utf8_wcstombs(bufname, unicode, PATH_MAX)
+                               :uni16_to_x8(bufname, unicode, PATH_MAX, uni_xlate, nls_io);
                        if (xlate_len != name_len)
                                continue;
                        if ((!anycase && !memcmp(name, bufname, xlate_len)) ||
@@ -416,8 +426,10 @@ Found:
        sinfo->i_pos = fat_make_i_pos(sb, sinfo->bh, sinfo->de);
        err = 0;
 EODir:
+       if (bufname)
+               __putname(bufname);
        if (unicode)
-               free_page((unsigned long)unicode);
+               __putname(unicode);
 
        return err;
 }
@@ -598,7 +610,7 @@ parse_record:
        if (isvfat) {
                bufuname[j] = 0x0000;
                i = utf8 ? utf8_wcstombs(bufname, bufuname, sizeof(bufname))
-                        : uni16_to_x8(bufname, bufuname, uni_xlate, nls_io);
+                        : uni16_to_x8(bufname, bufuname, sizeof(bufname), uni_xlate, nls_io);
        }
 
        fill_name = bufname;
@@ -607,10 +619,10 @@ parse_record:
                /* convert the unicode long name. 261 is maximum size
                 * of unicode buffer. (13 * slots + nul) */
                void *longname = unicode + 261;
-               int buf_size = PAGE_SIZE - (261 * sizeof(unicode[0]));
+               int buf_size = PATH_MAX - (261 * sizeof(unicode[0]));
                int long_len = utf8
                        ? utf8_wcstombs(longname, unicode, buf_size)
-                       : uni16_to_x8(longname, unicode, uni_xlate, nls_io);
+                       : uni16_to_x8(longname, unicode, buf_size, uni_xlate, nls_io);
 
                if (!both) {
                        fill_name = longname;
@@ -640,7 +652,7 @@ EODir:
 FillFailed:
        brelse(bh);
        if (unicode)
-               free_page((unsigned long)unicode);
+               __putname(unicode);
 out:
        unlock_kernel();
        return ret;
index 5fb366992b73dcfdbf6a936c510622087b324444..302e95c4af7e0c517704d987932411066c129d86 100644 (file)
@@ -450,7 +450,8 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
        BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2));    /* fixed limit */
 
        lock_fat(sbi);
-       if (sbi->free_clusters != -1 && sbi->free_clusters < nr_cluster) {
+       if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
+           sbi->free_clusters < nr_cluster) {
                unlock_fat(sbi);
                return -ENOSPC;
        }
@@ -504,6 +505,7 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
 
        /* Couldn't allocate the free entries */
        sbi->free_clusters = 0;
+       sbi->free_clus_valid = 1;
        sb->s_dirt = 1;
        err = -ENOSPC;
 
@@ -544,7 +546,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
                        goto error;
                } else if (cluster == FAT_ENT_FREE) {
                        fat_fs_panic(sb, "%s: deleting FAT entry beyond EOF",
-                                    __FUNCTION__);
+                                    __func__);
                        err = -EIO;
                        goto error;
                }
@@ -583,8 +585,6 @@ error:
                brelse(bhs[i]);
        unlock_fat(sbi);
 
-       fat_clusters_flush(sb);
-
        return err;
 }
 
@@ -615,7 +615,7 @@ int fat_count_free_clusters(struct super_block *sb)
        int err = 0, free;
 
        lock_fat(sbi);
-       if (sbi->free_clusters != -1)
+       if (sbi->free_clusters != -1 && sbi->free_clus_valid)
                goto out;
 
        reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
@@ -643,6 +643,7 @@ int fat_count_free_clusters(struct super_block *sb)
                } while (fat_ent_next(sbi, &fatent));
        }
        sbi->free_clusters = free;
+       sbi->free_clus_valid = 1;
        sb->s_dirt = 1;
        fatent_brelse(&fatent);
 out:
index 2a3bed96704148c1377537677c4c7d9edd9372a3..27cc1164ec36baa84284836686d1603a36451423 100644 (file)
@@ -157,104 +157,6 @@ out:
        return err;
 }
 
-static int check_mode(const struct msdos_sb_info *sbi, mode_t mode)
-{
-       mode_t req = mode & ~S_IFMT;
-
-       /*
-        * Of the r and x bits, all (subject to umask) must be present. Of the
-        * w bits, either all (subject to umask) or none must be present.
-        */
-
-       if (S_ISREG(mode)) {
-               req &= ~sbi->options.fs_fmask;
-
-               if ((req & (S_IRUGO | S_IXUGO)) !=
-                   ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_fmask))
-                       return -EPERM;
-
-               if ((req & S_IWUGO) != 0 &&
-                   (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_fmask))
-                       return -EPERM;
-       } else if (S_ISDIR(mode)) {
-               req &= ~sbi->options.fs_dmask;
-
-               if ((req & (S_IRUGO | S_IXUGO)) !=
-                   ((S_IRUGO | S_IXUGO) & ~sbi->options.fs_dmask))
-                       return -EPERM;
-
-               if ((req & S_IWUGO) != 0 &&
-                   (req & S_IWUGO) != (S_IWUGO & ~sbi->options.fs_dmask))
-                       return -EPERM;
-       } else {
-               return -EPERM;
-       }
-
-       return 0;
-}
-
-int fat_notify_change(struct dentry *dentry, struct iattr *attr)
-{
-       struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
-       struct inode *inode = dentry->d_inode;
-       int mask, error = 0;
-
-       lock_kernel();
-
-       /*
-        * Expand the file. Since inode_setattr() updates ->i_size
-        * before calling the ->truncate(), but FAT needs to fill the
-        * hole before it.
-        */
-       if (attr->ia_valid & ATTR_SIZE) {
-               if (attr->ia_size > inode->i_size) {
-                       error = fat_cont_expand(inode, attr->ia_size);
-                       if (error || attr->ia_valid == ATTR_SIZE)
-                               goto out;
-                       attr->ia_valid &= ~ATTR_SIZE;
-               }
-       }
-
-       error = inode_change_ok(inode, attr);
-       if (error) {
-               if (sbi->options.quiet)
-                       error = 0;
-               goto out;
-       }
-       if (((attr->ia_valid & ATTR_UID) &&
-            (attr->ia_uid != sbi->options.fs_uid)) ||
-           ((attr->ia_valid & ATTR_GID) &&
-            (attr->ia_gid != sbi->options.fs_gid)))
-               error = -EPERM;
-
-       if (error) {
-               if (sbi->options.quiet)
-                       error = 0;
-               goto out;
-       }
-
-       if (attr->ia_valid & ATTR_MODE) {
-               error = check_mode(sbi, attr->ia_mode);
-               if (error != 0 && !sbi->options.quiet)
-                       goto out;
-       }
-
-       error = inode_setattr(inode, attr);
-       if (error)
-               goto out;
-
-       if (S_ISDIR(inode->i_mode))
-               mask = sbi->options.fs_dmask;
-       else
-               mask = sbi->options.fs_fmask;
-       inode->i_mode &= S_IFMT | (S_IRWXUGO & ~mask);
-out:
-       unlock_kernel();
-       return error;
-}
-
-EXPORT_SYMBOL_GPL(fat_notify_change);
-
 /* Free all clusters after the skip'th cluster. */
 static int fat_free(struct inode *inode, int skip)
 {
@@ -306,7 +208,7 @@ static int fat_free(struct inode *inode, int skip)
                } else if (ret == FAT_ENT_FREE) {
                        fat_fs_panic(sb,
                                     "%s: invalid cluster chain (i_pos %lld)",
-                                    __FUNCTION__, MSDOS_I(inode)->i_pos);
+                                    __func__, MSDOS_I(inode)->i_pos);
                        ret = -EIO;
                } else if (ret > 0) {
                        err = fat_ent_write(inode, &fatent, FAT_ENT_EOF, wait);
@@ -355,8 +257,112 @@ int fat_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 }
 EXPORT_SYMBOL_GPL(fat_getattr);
 
+static int fat_check_mode(const struct msdos_sb_info *sbi, struct inode *inode,
+                         mode_t mode)
+{
+       mode_t mask, req = mode & ~S_IFMT;
+
+       if (S_ISREG(mode))
+               mask = sbi->options.fs_fmask;
+       else
+               mask = sbi->options.fs_dmask;
+
+       /*
+        * Of the r and x bits, all (subject to umask) must be present. Of the
+        * w bits, either all (subject to umask) or none must be present.
+        */
+       req &= ~mask;
+       if ((req & (S_IRUGO | S_IXUGO)) != (inode->i_mode & (S_IRUGO|S_IXUGO)))
+               return -EPERM;
+       if ((req & S_IWUGO) && ((req & S_IWUGO) != (S_IWUGO & ~mask)))
+               return -EPERM;
+
+       return 0;
+}
+
+static int fat_allow_set_time(struct msdos_sb_info *sbi, struct inode *inode)
+{
+       mode_t allow_utime = sbi->options.allow_utime;
+
+       if (current->fsuid != inode->i_uid) {
+               if (in_group_p(inode->i_gid))
+                       allow_utime >>= 3;
+               if (allow_utime & MAY_WRITE)
+                       return 1;
+       }
+
+       /* use a default check */
+       return 0;
+}
+
+int fat_setattr(struct dentry *dentry, struct iattr *attr)
+{
+       struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
+       struct inode *inode = dentry->d_inode;
+       int mask, error = 0;
+       unsigned int ia_valid;
+
+       lock_kernel();
+
+       /*
+        * Expand the file. Since inode_setattr() updates ->i_size
+        * before calling the ->truncate(), but FAT needs to fill the
+        * hole before it.
+        */
+       if (attr->ia_valid & ATTR_SIZE) {
+               if (attr->ia_size > inode->i_size) {
+                       error = fat_cont_expand(inode, attr->ia_size);
+                       if (error || attr->ia_valid == ATTR_SIZE)
+                               goto out;
+                       attr->ia_valid &= ~ATTR_SIZE;
+               }
+       }
+
+       /* Check for setting the inode time. */
+       ia_valid = attr->ia_valid;
+       if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
+               if (fat_allow_set_time(sbi, inode))
+                       attr->ia_valid &= ~(ATTR_MTIME_SET | ATTR_ATIME_SET);
+       }
+
+       error = inode_change_ok(inode, attr);
+       attr->ia_valid = ia_valid;
+       if (error) {
+               if (sbi->options.quiet)
+                       error = 0;
+               goto out;
+       }
+       if (((attr->ia_valid & ATTR_UID) &&
+            (attr->ia_uid != sbi->options.fs_uid)) ||
+           ((attr->ia_valid & ATTR_GID) &&
+            (attr->ia_gid != sbi->options.fs_gid)) ||
+           ((attr->ia_valid & ATTR_MODE) &&
+            fat_check_mode(sbi, inode, attr->ia_mode) < 0))
+               error = -EPERM;
+
+       if (error) {
+               if (sbi->options.quiet)
+                       error = 0;
+               goto out;
+       }
+
+       error = inode_setattr(inode, attr);
+       if (error)
+               goto out;
+
+       if (S_ISDIR(inode->i_mode))
+               mask = sbi->options.fs_dmask;
+       else
+               mask = sbi->options.fs_fmask;
+       inode->i_mode &= S_IFMT | (S_IRWXUGO & ~mask);
+out:
+       unlock_kernel();
+       return error;
+}
+EXPORT_SYMBOL_GPL(fat_setattr);
+
 const struct inode_operations fat_file_inode_operations = {
        .truncate       = fat_truncate,
-       .setattr        = fat_notify_change,
+       .setattr        = fat_setattr,
        .getattr        = fat_getattr,
 };
index 53f3cf62b7c18c945bdcc3caaddf9466a91ca391..4e0a3dd9d6779a1d9101704d470b1263b194d7d5 100644 (file)
@@ -433,11 +433,8 @@ EXPORT_SYMBOL_GPL(fat_build_inode);
 static void fat_delete_inode(struct inode *inode)
 {
        truncate_inode_pages(&inode->i_data, 0);
-
-       if (!is_bad_inode(inode)) {
-               inode->i_size = 0;
-               fat_truncate(inode);
-       }
+       inode->i_size = 0;
+       fat_truncate(inode);
        clear_inode(inode);
 }
 
@@ -445,8 +442,6 @@ static void fat_clear_inode(struct inode *inode)
 {
        struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
 
-       if (is_bad_inode(inode))
-               return;
        lock_kernel();
        spin_lock(&sbi->inode_hash_lock);
        fat_cache_inval_inode(inode);
@@ -542,7 +537,7 @@ static int fat_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct msdos_sb_info *sbi = MSDOS_SB(dentry->d_sb);
 
        /* If the count of free cluster is still unknown, counts it here. */
-       if (sbi->free_clusters == -1) {
+       if (sbi->free_clusters == -1 || !sbi->free_clus_valid) {
                int err = fat_count_free_clusters(dentry->d_sb);
                if (err)
                        return err;
@@ -790,6 +785,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
                seq_printf(m, ",gid=%u", opts->fs_gid);
        seq_printf(m, ",fmask=%04o", opts->fs_fmask);
        seq_printf(m, ",dmask=%04o", opts->fs_dmask);
+       if (opts->allow_utime)
+               seq_printf(m, ",allow_utime=%04o", opts->allow_utime);
        if (sbi->nls_disk)
                seq_printf(m, ",codepage=%s", sbi->nls_disk->charset);
        if (isvfat) {
@@ -845,9 +842,9 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
 
 enum {
        Opt_check_n, Opt_check_r, Opt_check_s, Opt_uid, Opt_gid,
-       Opt_umask, Opt_dmask, Opt_fmask, Opt_codepage, Opt_usefree, Opt_nocase,
-       Opt_quiet, Opt_showexec, Opt_debug, Opt_immutable,
-       Opt_dots, Opt_nodots,
+       Opt_umask, Opt_dmask, Opt_fmask, Opt_allow_utime, Opt_codepage,
+       Opt_usefree, Opt_nocase, Opt_quiet, Opt_showexec, Opt_debug,
+       Opt_immutable, Opt_dots, Opt_nodots,
        Opt_charset, Opt_shortname_lower, Opt_shortname_win95,
        Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
        Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
@@ -866,6 +863,7 @@ static match_table_t fat_tokens = {
        {Opt_umask, "umask=%o"},
        {Opt_dmask, "dmask=%o"},
        {Opt_fmask, "fmask=%o"},
+       {Opt_allow_utime, "allow_utime=%o"},
        {Opt_codepage, "codepage=%u"},
        {Opt_usefree, "usefree"},
        {Opt_nocase, "nocase"},
@@ -937,6 +935,7 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
        opts->fs_uid = current->uid;
        opts->fs_gid = current->gid;
        opts->fs_fmask = opts->fs_dmask = current->fs->umask;
+       opts->allow_utime = -1;
        opts->codepage = fat_default_codepage;
        opts->iocharset = fat_default_iocharset;
        if (is_vfat)
@@ -1024,6 +1023,11 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
                                return 0;
                        opts->fs_fmask = option;
                        break;
+               case Opt_allow_utime:
+                       if (match_octal(&args[0], &option))
+                               return 0;
+                       opts->allow_utime = option & (S_IWGRP | S_IWOTH);
+                       break;
                case Opt_codepage:
                        if (match_int(&args[0], &option))
                                return 0;
@@ -1106,6 +1110,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
                       " for FAT filesystems, filesystem will be case sensitive!\n");
        }
 
+       /* If user doesn't specify allow_utime, it's initialized from dmask. */
+       if (opts->allow_utime == (unsigned short)-1)
+               opts->allow_utime = ~opts->fs_dmask & (S_IWGRP | S_IWOTH);
        if (opts->unicode_xlate)
                opts->utf8 = 0;
 
@@ -1208,18 +1215,17 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
         */
 
        media = b->media;
-       if (!FAT_VALID_MEDIA(media)) {
+       if (!fat_valid_media(media)) {
                if (!silent)
                        printk(KERN_ERR "FAT: invalid media value (0x%02x)\n",
                               media);
                brelse(bh);
                goto out_invalid;
        }
-       logical_sector_size =
-               le16_to_cpu(get_unaligned((__le16 *)&b->sector_size));
+       logical_sector_size = get_unaligned_le16(&b->sector_size);
        if (!is_power_of_2(logical_sector_size)
            || (logical_sector_size < 512)
-           || (PAGE_CACHE_SIZE < logical_sector_size)) {
+           || (logical_sector_size > 4096)) {
                if (!silent)
                        printk(KERN_ERR "FAT: bogus logical sector size %u\n",
                               logical_sector_size);
@@ -1267,6 +1273,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
        sbi->fat_length = le16_to_cpu(b->fat_length);
        sbi->root_cluster = 0;
        sbi->free_clusters = -1;        /* Don't know yet */
+       sbi->free_clus_valid = 0;
        sbi->prev_free = FAT_START_ENT;
 
        if (!sbi->fat_length && b->fat32_length) {
@@ -1302,8 +1309,8 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
                               sbi->fsinfo_sector);
                } else {
                        if (sbi->options.usefree)
-                               sbi->free_clusters =
-                                       le32_to_cpu(fsinfo->free_clusters);
+                               sbi->free_clus_valid = 1;
+                       sbi->free_clusters = le32_to_cpu(fsinfo->free_clusters);
                        sbi->prev_free = le32_to_cpu(fsinfo->next_cluster);
                }
 
@@ -1314,8 +1321,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
        sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1;
 
        sbi->dir_start = sbi->fat_start + sbi->fats * sbi->fat_length;
-       sbi->dir_entries =
-               le16_to_cpu(get_unaligned((__le16 *)&b->dir_entries));
+       sbi->dir_entries = get_unaligned_le16(&b->dir_entries);
        if (sbi->dir_entries & (sbi->dir_per_block - 1)) {
                if (!silent)
                        printk(KERN_ERR "FAT: bogus directroy-entries per block"
@@ -1327,7 +1333,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent,
        rootdir_sectors = sbi->dir_entries
                * sizeof(struct msdos_dir_entry) / sb->s_blocksize;
        sbi->data_start = sbi->dir_start + rootdir_sectors;
-       total_sectors = le16_to_cpu(get_unaligned((__le16 *)&b->sectors));
+       total_sectors = get_unaligned_le16(&b->sectors);
        if (total_sectors == 0)
                total_sectors = le32_to_cpu(b->total_sect);
 
index 2b46064f66b2ed1a393ef9729253f59d90115c06..50ab5eecb99b845d5c88b81e83fdfeaa0eafaa2e 100644 (file)
@@ -50,7 +50,11 @@ extern daddr_t                       vxfs_bmap1(struct inode *, long);
 /* vxfs_fshead.c */
 extern int                     vxfs_read_fshead(struct super_block *);
 
+/* vxfs_immed.c */
+extern const struct inode_operations vxfs_immed_symlink_iops;
+
 /* vxfs_inode.c */
+extern const struct address_space_operations vxfs_immed_aops;
 extern struct kmem_cache       *vxfs_inode_cachep;
 extern void                    vxfs_dumpi(struct vxfs_inode_info *, ino_t);
 extern struct inode *          vxfs_get_fake_inode(struct super_block *,
@@ -69,6 +73,7 @@ extern const struct file_operations   vxfs_dir_operations;
 extern int                     vxfs_read_olt(struct super_block *, u_long);
 
 /* vxfs_subr.c */
+extern const struct address_space_operations vxfs_aops;
 extern struct page *           vxfs_get_page(struct address_space *, u_long);
 extern void                    vxfs_put_page(struct page *);
 extern struct buffer_head *    vxfs_bread(struct inode *, int);
index 8a5959a61ba9dbff4b9b713efa1f5d42da003e1f..c36aeaf92e41070cb905d57ffb1b6df8ce2dac4d 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/namei.h>
 
 #include "vxfs.h"
+#include "vxfs_extern.h"
 #include "vxfs_inode.h"
 
 
index ad88d2364bc258bc2d1b07283304d935eeab3369..9f3f2ceb73f00743f0869e086a2e3cb557065748 100644 (file)
 #include "vxfs_extern.h"
 
 
-extern const struct address_space_operations vxfs_aops;
-extern const struct address_space_operations vxfs_immed_aops;
-
-extern const struct inode_operations vxfs_immed_symlink_iops;
-
 struct kmem_cache              *vxfs_inode_cachep;
 
 
index 06557679ca411d6fbce09a33dddd042f938ecbc2..ae45f77765c0ee65a647fc81c899315de2f0862f 100644 (file)
 #include <linux/buffer_head.h>
 #include "internal.h"
 
+
+/**
+ * writeback_acquire - attempt to get exclusive writeback access to a device
+ * @bdi: the device's backing_dev_info structure
+ *
+ * It is a waste of resources to have more than one pdflush thread blocked on
+ * a single request queue.  Exclusion at the request_queue level is obtained
+ * via a flag in the request_queue's backing_dev_info.state.
+ *
+ * Non-request_queue-backed address_spaces will share default_backing_dev_info,
+ * unless they implement their own.  Which is somewhat inefficient, as this
+ * may prevent concurrent writeback against multiple devices.
+ */
+static int writeback_acquire(struct backing_dev_info *bdi)
+{
+       return !test_and_set_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_in_progress - determine whether there is writeback in progress
+ * @bdi: the device's backing_dev_info structure.
+ *
+ * Determine whether there is writeback in progress against a backing device.
+ */
+int writeback_in_progress(struct backing_dev_info *bdi)
+{
+       return test_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_release - relinquish exclusive writeback access against a device.
+ * @bdi: the device's backing_dev_info structure
+ */
+static void writeback_release(struct backing_dev_info *bdi)
+{
+       BUG_ON(!writeback_in_progress(bdi));
+       clear_bit(BDI_pdflush, &bdi->state);
+}
+
 /**
  *     __mark_inode_dirty -    internal function
  *     @inode: inode to mark
@@ -747,43 +786,4 @@ int generic_osync_inode(struct inode *inode, struct address_space *mapping, int
 
        return err;
 }
-
 EXPORT_SYMBOL(generic_osync_inode);
-
-/**
- * writeback_acquire - attempt to get exclusive writeback access to a device
- * @bdi: the device's backing_dev_info structure
- *
- * It is a waste of resources to have more than one pdflush thread blocked on
- * a single request queue.  Exclusion at the request_queue level is obtained
- * via a flag in the request_queue's backing_dev_info.state.
- *
- * Non-request_queue-backed address_spaces will share default_backing_dev_info,
- * unless they implement their own.  Which is somewhat inefficient, as this
- * may prevent concurrent writeback against multiple devices.
- */
-int writeback_acquire(struct backing_dev_info *bdi)
-{
-       return !test_and_set_bit(BDI_pdflush, &bdi->state);
-}
-
-/**
- * writeback_in_progress - determine whether there is writeback in progress
- * @bdi: the device's backing_dev_info structure.
- *
- * Determine whether there is writeback in progress against a backing device.
- */
-int writeback_in_progress(struct backing_dev_info *bdi)
-{
-       return test_bit(BDI_pdflush, &bdi->state);
-}
-
-/**
- * writeback_release - relinquish exclusive writeback access against a device.
- * @bdi: the device's backing_dev_info structure
- */
-void writeback_release(struct backing_dev_info *bdi)
-{
-       BUG_ON(!writeback_in_progress(bdi));
-       clear_bit(BDI_pdflush, &bdi->state);
-}
index 105d4a271e073b0c7d60660057d9716d798f9d1d..4f3cab32141584264789ab3446f962d631ba04ab 100644 (file)
@@ -117,7 +117,7 @@ int fuse_ctl_add_conn(struct fuse_conn *fc)
 
        parent = fuse_control_sb->s_root;
        inc_nlink(parent->d_inode);
-       sprintf(name, "%llu", (unsigned long long) fc->id);
+       sprintf(name, "%u", fc->dev);
        parent = fuse_ctl_add_dentry(parent, fc, name, S_IFDIR | 0500, 2,
                                     &simple_dir_inode_operations,
                                     &simple_dir_operations);
index af639807524e63164397b32f8589bf75131a5112..87250b6a8682468c39ff0e4cb1f35caa09f386f6 100644 (file)
@@ -47,6 +47,14 @@ struct fuse_req *fuse_request_alloc(void)
        return req;
 }
 
+struct fuse_req *fuse_request_alloc_nofs(void)
+{
+       struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
+       if (req)
+               fuse_request_init(req);
+       return req;
+}
+
 void fuse_request_free(struct fuse_req *req)
 {
        kmem_cache_free(fuse_req_cachep, req);
@@ -291,6 +299,7 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 
 static void wait_answer_interruptible(struct fuse_conn *fc,
                                      struct fuse_req *req)
+       __releases(fc->lock) __acquires(fc->lock)
 {
        if (signal_pending(current))
                return;
@@ -307,8 +316,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
        kill_fasync(&fc->fasync, SIGIO, POLL_IN);
 }
 
-/* Called with fc->lock held.  Releases, and then reacquires it. */
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
+       __releases(fc->lock) __acquires(fc->lock)
 {
        if (!fc->no_interrupt) {
                /* Any signal may interrupt this */
@@ -429,6 +438,17 @@ void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
        request_send_nowait(fc, req);
 }
 
+/*
+ * Called under fc->lock
+ *
+ * fc->connected must have been checked previously
+ */
+void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req)
+{
+       req->isreply = 1;
+       request_send_nowait_locked(fc, req);
+}
+
 /*
  * Lock the request.  Up to the next unlock_request() there mustn't be
  * anything that could cause a page-fault.  If the request was already
@@ -968,6 +988,7 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
  * locked).
  */
 static void end_io_requests(struct fuse_conn *fc)
+       __releases(fc->lock) __acquires(fc->lock)
 {
        while (!list_empty(&fc->io)) {
                struct fuse_req *req =
index c4807b3fc8a360776bda6f976baebd26b5c9ae92..2060bf06b90625cf920aa08f87da07bd286a193f 100644 (file)
@@ -132,7 +132,7 @@ static void fuse_lookup_init(struct fuse_req *req, struct inode *dir,
        req->out.args[0].value = outarg;
 }
 
-static u64 fuse_get_attr_version(struct fuse_conn *fc)
+u64 fuse_get_attr_version(struct fuse_conn *fc)
 {
        u64 curr_version;
 
@@ -1106,6 +1106,50 @@ static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
        }
 }
 
+/*
+ * Prevent concurrent writepages on inode
+ *
+ * This is done by adding a negative bias to the inode write counter
+ * and waiting for all pending writes to finish.
+ */
+void fuse_set_nowrite(struct inode *inode)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+
+       BUG_ON(!mutex_is_locked(&inode->i_mutex));
+
+       spin_lock(&fc->lock);
+       BUG_ON(fi->writectr < 0);
+       fi->writectr += FUSE_NOWRITE;
+       spin_unlock(&fc->lock);
+       wait_event(fi->page_waitq, fi->writectr == FUSE_NOWRITE);
+}
+
+/*
+ * Allow writepages on inode
+ *
+ * Remove the bias from the writecounter and send any queued
+ * writepages.
+ */
+static void __fuse_release_nowrite(struct inode *inode)
+{
+       struct fuse_inode *fi = get_fuse_inode(inode);
+
+       BUG_ON(fi->writectr != FUSE_NOWRITE);
+       fi->writectr = 0;
+       fuse_flush_writepages(inode);
+}
+
+void fuse_release_nowrite(struct inode *inode)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+
+       spin_lock(&fc->lock);
+       __fuse_release_nowrite(inode);
+       spin_unlock(&fc->lock);
+}
+
 /*
  * Set attributes, and at the same time refresh them.
  *
@@ -1122,6 +1166,8 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
        struct fuse_req *req;
        struct fuse_setattr_in inarg;
        struct fuse_attr_out outarg;
+       bool is_truncate = false;
+       loff_t oldsize;
        int err;
 
        if (!fuse_allow_task(fc, current))
@@ -1145,12 +1191,16 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
                        send_sig(SIGXFSZ, current, 0);
                        return -EFBIG;
                }
+               is_truncate = true;
        }
 
        req = fuse_get_req(fc);
        if (IS_ERR(req))
                return PTR_ERR(req);
 
+       if (is_truncate)
+               fuse_set_nowrite(inode);
+
        memset(&inarg, 0, sizeof(inarg));
        memset(&outarg, 0, sizeof(outarg));
        iattr_to_fattr(attr, &inarg);
@@ -1181,16 +1231,44 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
        if (err) {
                if (err == -EINTR)
                        fuse_invalidate_attr(inode);
-               return err;
+               goto error;
        }
 
        if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
                make_bad_inode(inode);
-               return -EIO;
+               err = -EIO;
+               goto error;
+       }
+
+       spin_lock(&fc->lock);
+       fuse_change_attributes_common(inode, &outarg.attr,
+                                     attr_timeout(&outarg));
+       oldsize = inode->i_size;
+       i_size_write(inode, outarg.attr.size);
+
+       if (is_truncate) {
+               /* NOTE: this may release/reacquire fc->lock */
+               __fuse_release_nowrite(inode);
+       }
+       spin_unlock(&fc->lock);
+
+       /*
+        * Only call invalidate_inode_pages2() after removing
+        * FUSE_NOWRITE, otherwise fuse_launder_page() would deadlock.
+        */
+       if (S_ISREG(inode->i_mode) && oldsize != outarg.attr.size) {
+               if (outarg.attr.size < oldsize)
+                       fuse_truncate(inode->i_mapping, outarg.attr.size);
+               invalidate_inode_pages2(inode->i_mapping);
        }
 
-       fuse_change_attributes(inode, &outarg.attr, attr_timeout(&outarg), 0);
        return 0;
+
+error:
+       if (is_truncate)
+               fuse_release_nowrite(inode);
+
+       return err;
 }
 
 static int fuse_setattr(struct dentry *entry, struct iattr *attr)
index 676b0bc8a86dba3e3c048a1a35320d3931a65f20..f28cf8b46f806f89c595de8f746731032e625d0d 100644 (file)
@@ -210,6 +210,49 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id)
        return (u64) v0 + ((u64) v1 << 32);
 }
 
+/*
+ * Check if page is under writeback
+ *
+ * This is currently done by walking the list of writepage requests
+ * for the inode, which can be pretty inefficient.
+ */
+static bool fuse_page_is_writeback(struct inode *inode, pgoff_t index)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       struct fuse_req *req;
+       bool found = false;
+
+       spin_lock(&fc->lock);
+       list_for_each_entry(req, &fi->writepages, writepages_entry) {
+               pgoff_t curr_index;
+
+               BUG_ON(req->inode != inode);
+               curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT;
+               if (curr_index == index) {
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock(&fc->lock);
+
+       return found;
+}
+
+/*
+ * Wait for page writeback to be completed.
+ *
+ * Since fuse doesn't rely on the VM writeback tracking, this has to
+ * use some other means.
+ */
+static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index)
+{
+       struct fuse_inode *fi = get_fuse_inode(inode);
+
+       wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index));
+       return 0;
+}
+
 static int fuse_flush(struct file *file, fl_owner_t id)
 {
        struct inode *inode = file->f_path.dentry->d_inode;
@@ -245,6 +288,21 @@ static int fuse_flush(struct file *file, fl_owner_t id)
        return err;
 }
 
+/*
+ * Wait for all pending writepages on the inode to finish.
+ *
+ * This is currently done by blocking further writes with FUSE_NOWRITE
+ * and waiting for all sent writes to complete.
+ *
+ * This must be called under i_mutex, otherwise the FUSE_NOWRITE usage
+ * could conflict with truncation.
+ */
+static void fuse_sync_writes(struct inode *inode)
+{
+       fuse_set_nowrite(inode);
+       fuse_release_nowrite(inode);
+}
+
 int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
                      int isdir)
 {
@@ -261,6 +319,17 @@ int fuse_fsync_common(struct file *file, struct dentry *de, int datasync,
        if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir))
                return 0;
 
+       /*
+        * Start writeback against all dirty pages of the inode, then
+        * wait for all outstanding writes, before sending the FSYNC
+        * request.
+        */
+       err = write_inode_now(inode, 0);
+       if (err)
+               return err;
+
+       fuse_sync_writes(inode);
+
        req = fuse_get_req(fc);
        if (IS_ERR(req))
                return PTR_ERR(req);
@@ -294,7 +363,7 @@ static int fuse_fsync(struct file *file, struct dentry *de, int datasync)
 void fuse_read_fill(struct fuse_req *req, struct file *file,
                    struct inode *inode, loff_t pos, size_t count, int opcode)
 {
-       struct fuse_read_in *inarg = &req->misc.read_in;
+       struct fuse_read_in *inarg = &req->misc.read.in;
        struct fuse_file *ff = file->private_data;
 
        inarg->fh = ff->fh;
@@ -320,7 +389,7 @@ static size_t fuse_send_read(struct fuse_req *req, struct file *file,
 
        fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
        if (owner != NULL) {
-               struct fuse_read_in *inarg = &req->misc.read_in;
+               struct fuse_read_in *inarg = &req->misc.read.in;
 
                inarg->read_flags |= FUSE_READ_LOCKOWNER;
                inarg->lock_owner = fuse_lock_owner_id(fc, owner);
@@ -329,31 +398,66 @@ static size_t fuse_send_read(struct fuse_req *req, struct file *file,
        return req->out.args[0].size;
 }
 
+static void fuse_read_update_size(struct inode *inode, loff_t size,
+                                 u64 attr_ver)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+
+       spin_lock(&fc->lock);
+       if (attr_ver == fi->attr_version && size < inode->i_size) {
+               fi->attr_version = ++fc->attr_version;
+               i_size_write(inode, size);
+       }
+       spin_unlock(&fc->lock);
+}
+
 static int fuse_readpage(struct file *file, struct page *page)
 {
        struct inode *inode = page->mapping->host;
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_req *req;
+       size_t num_read;
+       loff_t pos = page_offset(page);
+       size_t count = PAGE_CACHE_SIZE;
+       u64 attr_ver;
        int err;
 
        err = -EIO;
        if (is_bad_inode(inode))
                goto out;
 
+       /*
+        * Page writeback can extend beyond the liftime of the
+        * page-cache page, so make sure we read a properly synced
+        * page.
+        */
+       fuse_wait_on_page_writeback(inode, page->index);
+
        req = fuse_get_req(fc);
        err = PTR_ERR(req);
        if (IS_ERR(req))
                goto out;
 
+       attr_ver = fuse_get_attr_version(fc);
+
        req->out.page_zeroing = 1;
        req->num_pages = 1;
        req->pages[0] = page;
-       fuse_send_read(req, file, inode, page_offset(page), PAGE_CACHE_SIZE,
-                      NULL);
+       num_read = fuse_send_read(req, file, inode, pos, count, NULL);
        err = req->out.h.error;
        fuse_put_request(fc, req);
-       if (!err)
+
+       if (!err) {
+               /*
+                * Short read means EOF.  If file size is larger, truncate it
+                */
+               if (num_read < count)
+                       fuse_read_update_size(inode, pos + num_read, attr_ver);
+
                SetPageUptodate(page);
+       }
+
        fuse_invalidate_attr(inode); /* atime changed */
  out:
        unlock_page(page);
@@ -363,8 +467,19 @@ static int fuse_readpage(struct file *file, struct page *page)
 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
 {
        int i;
+       size_t count = req->misc.read.in.size;
+       size_t num_read = req->out.args[0].size;
+       struct inode *inode = req->pages[0]->mapping->host;
+
+       /*
+        * Short read means EOF.  If file size is larger, truncate it
+        */
+       if (!req->out.h.error && num_read < count) {
+               loff_t pos = page_offset(req->pages[0]) + num_read;
+               fuse_read_update_size(inode, pos, req->misc.read.attr_ver);
+       }
 
-       fuse_invalidate_attr(req->pages[0]->mapping->host); /* atime changed */
+       fuse_invalidate_attr(inode); /* atime changed */
 
        for (i = 0; i < req->num_pages; i++) {
                struct page *page = req->pages[i];
@@ -387,6 +502,7 @@ static void fuse_send_readpages(struct fuse_req *req, struct file *file,
        size_t count = req->num_pages << PAGE_CACHE_SHIFT;
        req->out.page_zeroing = 1;
        fuse_read_fill(req, file, inode, pos, count, FUSE_READ);
+       req->misc.read.attr_ver = fuse_get_attr_version(fc);
        if (fc->async_read) {
                struct fuse_file *ff = file->private_data;
                req->ff = fuse_file_get(ff);
@@ -411,6 +527,8 @@ static int fuse_readpages_fill(void *_data, struct page *page)
        struct inode *inode = data->inode;
        struct fuse_conn *fc = get_fuse_conn(inode);
 
+       fuse_wait_on_page_writeback(inode, page->index);
+
        if (req->num_pages &&
            (req->num_pages == FUSE_MAX_PAGES_PER_REQ ||
             (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read ||
@@ -477,11 +595,10 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
 }
 
 static void fuse_write_fill(struct fuse_req *req, struct file *file,
-                           struct inode *inode, loff_t pos, size_t count,
-                           int writepage)
+                           struct fuse_file *ff, struct inode *inode,
+                           loff_t pos, size_t count, int writepage)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
-       struct fuse_file *ff = file->private_data;
        struct fuse_write_in *inarg = &req->misc.write.in;
        struct fuse_write_out *outarg = &req->misc.write.out;
 
@@ -490,7 +607,7 @@ static void fuse_write_fill(struct fuse_req *req, struct file *file,
        inarg->offset = pos;
        inarg->size = count;
        inarg->write_flags = writepage ? FUSE_WRITE_CACHE : 0;
-       inarg->flags = file->f_flags;
+       inarg->flags = file ? file->f_flags : 0;
        req->in.h.opcode = FUSE_WRITE;
        req->in.h.nodeid = get_node_id(inode);
        req->in.argpages = 1;
@@ -511,7 +628,7 @@ static size_t fuse_send_write(struct fuse_req *req, struct file *file,
                              fl_owner_t owner)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
-       fuse_write_fill(req, file, inode, pos, count, 0);
+       fuse_write_fill(req, file, file->private_data, inode, pos, count, 0);
        if (owner != NULL) {
                struct fuse_write_in *inarg = &req->misc.write.in;
                inarg->write_flags |= FUSE_WRITE_LOCKOWNER;
@@ -533,19 +650,36 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping,
        return 0;
 }
 
+static void fuse_write_update_size(struct inode *inode, loff_t pos)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+
+       spin_lock(&fc->lock);
+       fi->attr_version = ++fc->attr_version;
+       if (pos > inode->i_size)
+               i_size_write(inode, pos);
+       spin_unlock(&fc->lock);
+}
+
 static int fuse_buffered_write(struct file *file, struct inode *inode,
                               loff_t pos, unsigned count, struct page *page)
 {
        int err;
        size_t nres;
        struct fuse_conn *fc = get_fuse_conn(inode);
-       struct fuse_inode *fi = get_fuse_inode(inode);
        unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
        struct fuse_req *req;
 
        if (is_bad_inode(inode))
                return -EIO;
 
+       /*
+        * Make sure writepages on the same page are not mixed up with
+        * plain writes.
+        */
+       fuse_wait_on_page_writeback(inode, page->index);
+
        req = fuse_get_req(fc);
        if (IS_ERR(req))
                return PTR_ERR(req);
@@ -560,12 +694,7 @@ static int fuse_buffered_write(struct file *file, struct inode *inode,
                err = -EIO;
        if (!err) {
                pos += nres;
-               spin_lock(&fc->lock);
-               fi->attr_version = ++fc->attr_version;
-               if (pos > inode->i_size)
-                       i_size_write(inode, pos);
-               spin_unlock(&fc->lock);
-
+               fuse_write_update_size(inode, pos);
                if (count == PAGE_CACHE_SIZE)
                        SetPageUptodate(page);
        }
@@ -588,6 +717,198 @@ static int fuse_write_end(struct file *file, struct address_space *mapping,
        return res;
 }
 
+static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file,
+                                   struct inode *inode, loff_t pos,
+                                   size_t count)
+{
+       size_t res;
+       unsigned offset;
+       unsigned i;
+
+       for (i = 0; i < req->num_pages; i++)
+               fuse_wait_on_page_writeback(inode, req->pages[i]->index);
+
+       res = fuse_send_write(req, file, inode, pos, count, NULL);
+
+       offset = req->page_offset;
+       count = res;
+       for (i = 0; i < req->num_pages; i++) {
+               struct page *page = req->pages[i];
+
+               if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE)
+                       SetPageUptodate(page);
+
+               if (count > PAGE_CACHE_SIZE - offset)
+                       count -= PAGE_CACHE_SIZE - offset;
+               else
+                       count = 0;
+               offset = 0;
+
+               unlock_page(page);
+               page_cache_release(page);
+       }
+
+       return res;
+}
+
+static ssize_t fuse_fill_write_pages(struct fuse_req *req,
+                              struct address_space *mapping,
+                              struct iov_iter *ii, loff_t pos)
+{
+       struct fuse_conn *fc = get_fuse_conn(mapping->host);
+       unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
+       size_t count = 0;
+       int err;
+
+       req->page_offset = offset;
+
+       do {
+               size_t tmp;
+               struct page *page;
+               pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+               size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset,
+                                    iov_iter_count(ii));
+
+               bytes = min_t(size_t, bytes, fc->max_write - count);
+
+ again:
+               err = -EFAULT;
+               if (iov_iter_fault_in_readable(ii, bytes))
+                       break;
+
+               err = -ENOMEM;
+               page = __grab_cache_page(mapping, index);
+               if (!page)
+                       break;
+
+               pagefault_disable();
+               tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
+               pagefault_enable();
+               flush_dcache_page(page);
+
+               if (!tmp) {
+                       unlock_page(page);
+                       page_cache_release(page);
+                       bytes = min(bytes, iov_iter_single_seg_count(ii));
+                       goto again;
+               }
+
+               err = 0;
+               req->pages[req->num_pages] = page;
+               req->num_pages++;
+
+               iov_iter_advance(ii, tmp);
+               count += tmp;
+               pos += tmp;
+               offset += tmp;
+               if (offset == PAGE_CACHE_SIZE)
+                       offset = 0;
+
+       } while (iov_iter_count(ii) && count < fc->max_write &&
+                req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0);
+
+       return count > 0 ? count : err;
+}
+
+static ssize_t fuse_perform_write(struct file *file,
+                                 struct address_space *mapping,
+                                 struct iov_iter *ii, loff_t pos)
+{
+       struct inode *inode = mapping->host;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       int err = 0;
+       ssize_t res = 0;
+
+       if (is_bad_inode(inode))
+               return -EIO;
+
+       do {
+               struct fuse_req *req;
+               ssize_t count;
+
+               req = fuse_get_req(fc);
+               if (IS_ERR(req)) {
+                       err = PTR_ERR(req);
+                       break;
+               }
+
+               count = fuse_fill_write_pages(req, mapping, ii, pos);
+               if (count <= 0) {
+                       err = count;
+               } else {
+                       size_t num_written;
+
+                       num_written = fuse_send_write_pages(req, file, inode,
+                                                           pos, count);
+                       err = req->out.h.error;
+                       if (!err) {
+                               res += num_written;
+                               pos += num_written;
+
+                               /* break out of the loop on short write */
+                               if (num_written != count)
+                                       err = -EIO;
+                       }
+               }
+               fuse_put_request(fc, req);
+       } while (!err && iov_iter_count(ii));
+
+       if (res > 0)
+               fuse_write_update_size(inode, pos);
+
+       fuse_invalidate_attr(inode);
+
+       return res > 0 ? res : err;
+}
+
+static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                                  unsigned long nr_segs, loff_t pos)
+{
+       struct file *file = iocb->ki_filp;
+       struct address_space *mapping = file->f_mapping;
+       size_t count = 0;
+       ssize_t written = 0;
+       struct inode *inode = mapping->host;
+       ssize_t err;
+       struct iov_iter i;
+
+       WARN_ON(iocb->ki_pos != pos);
+
+       err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+       if (err)
+               return err;
+
+       mutex_lock(&inode->i_mutex);
+       vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
+
+       /* We can write back this queue in page reclaim */
+       current->backing_dev_info = mapping->backing_dev_info;
+
+       err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
+       if (err)
+               goto out;
+
+       if (count == 0)
+               goto out;
+
+       err = remove_suid(file->f_path.dentry);
+       if (err)
+               goto out;
+
+       file_update_time(file);
+
+       iov_iter_init(&i, iov, nr_segs, count, 0);
+       written = fuse_perform_write(file, mapping, &i, pos);
+       if (written >= 0)
+               iocb->ki_pos = pos + written;
+
+out:
+       current->backing_dev_info = NULL;
+       mutex_unlock(&inode->i_mutex);
+
+       return written ? written : err;
+}
+
 static void fuse_release_user_pages(struct fuse_req *req, int write)
 {
        unsigned i;
@@ -613,7 +934,7 @@ static int fuse_get_user_pages(struct fuse_req *req, const char __user *buf,
 
        nbytes = min(nbytes, (unsigned) FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT);
        npages = (nbytes + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       npages = min(max(npages, 1), FUSE_MAX_PAGES_PER_REQ);
+       npages = clamp(npages, 1, FUSE_MAX_PAGES_PER_REQ);
        down_read(&current->mm->mmap_sem);
        npages = get_user_pages(current, current->mm, user_addr, npages, write,
                                0, req->pages, NULL);
@@ -645,14 +966,15 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
 
        while (count) {
                size_t nres;
-               size_t nbytes = min(count, nmax);
-               int err = fuse_get_user_pages(req, buf, nbytes, !write);
+               size_t nbytes_limit = min(count, nmax);
+               size_t nbytes;
+               int err = fuse_get_user_pages(req, buf, nbytes_limit, !write);
                if (err) {
                        res = err;
                        break;
                }
                nbytes = (req->num_pages << PAGE_SHIFT) - req->page_offset;
-               nbytes = min(count, nbytes);
+               nbytes = min(nbytes_limit, nbytes);
                if (write)
                        nres = fuse_send_write(req, file, inode, pos, nbytes,
                                               current->files);
@@ -683,12 +1005,8 @@ static ssize_t fuse_direct_io(struct file *file, const char __user *buf,
        }
        fuse_put_request(fc, req);
        if (res > 0) {
-               if (write) {
-                       spin_lock(&fc->lock);
-                       if (pos > inode->i_size)
-                               i_size_write(inode, pos);
-                       spin_unlock(&fc->lock);
-               }
+               if (write)
+                       fuse_write_update_size(inode, pos);
                *ppos = pos;
        }
        fuse_invalidate_attr(inode);
@@ -716,21 +1034,225 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
        return res;
 }
 
-static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
+static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
 {
-       if ((vma->vm_flags & VM_SHARED)) {
-               if ((vma->vm_flags & VM_WRITE))
-                       return -ENODEV;
-               else
-                       vma->vm_flags &= ~VM_MAYWRITE;
+       __free_page(req->pages[0]);
+       fuse_file_put(req->ff);
+       fuse_put_request(fc, req);
+}
+
+static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
+{
+       struct inode *inode = req->inode;
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
+
+       list_del(&req->writepages_entry);
+       dec_bdi_stat(bdi, BDI_WRITEBACK);
+       dec_zone_page_state(req->pages[0], NR_WRITEBACK_TEMP);
+       bdi_writeout_inc(bdi);
+       wake_up(&fi->page_waitq);
+}
+
+/* Called under fc->lock, may release and reacquire it */
+static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
+{
+       struct fuse_inode *fi = get_fuse_inode(req->inode);
+       loff_t size = i_size_read(req->inode);
+       struct fuse_write_in *inarg = &req->misc.write.in;
+
+       if (!fc->connected)
+               goto out_free;
+
+       if (inarg->offset + PAGE_CACHE_SIZE <= size) {
+               inarg->size = PAGE_CACHE_SIZE;
+       } else if (inarg->offset < size) {
+               inarg->size = size & (PAGE_CACHE_SIZE - 1);
+       } else {
+               /* Got truncated off completely */
+               goto out_free;
+       }
+
+       req->in.args[1].size = inarg->size;
+       fi->writectr++;
+       request_send_background_locked(fc, req);
+       return;
+
+ out_free:
+       fuse_writepage_finish(fc, req);
+       spin_unlock(&fc->lock);
+       fuse_writepage_free(fc, req);
+       spin_lock(&fc->lock);
+}
+
+/*
+ * If fi->writectr is positive (no truncate or fsync going on) send
+ * all queued writepage requests.
+ *
+ * Called with fc->lock
+ */
+void fuse_flush_writepages(struct inode *inode)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       struct fuse_req *req;
+
+       while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) {
+               req = list_entry(fi->queued_writes.next, struct fuse_req, list);
+               list_del_init(&req->list);
+               fuse_send_writepage(fc, req);
+       }
+}
+
+static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req)
+{
+       struct inode *inode = req->inode;
+       struct fuse_inode *fi = get_fuse_inode(inode);
+
+       mapping_set_error(inode->i_mapping, req->out.h.error);
+       spin_lock(&fc->lock);
+       fi->writectr--;
+       fuse_writepage_finish(fc, req);
+       spin_unlock(&fc->lock);
+       fuse_writepage_free(fc, req);
+}
+
+static int fuse_writepage_locked(struct page *page)
+{
+       struct address_space *mapping = page->mapping;
+       struct inode *inode = mapping->host;
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       struct fuse_req *req;
+       struct fuse_file *ff;
+       struct page *tmp_page;
+
+       set_page_writeback(page);
+
+       req = fuse_request_alloc_nofs();
+       if (!req)
+               goto err;
+
+       tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+       if (!tmp_page)
+               goto err_free;
+
+       spin_lock(&fc->lock);
+       BUG_ON(list_empty(&fi->write_files));
+       ff = list_entry(fi->write_files.next, struct fuse_file, write_entry);
+       req->ff = fuse_file_get(ff);
+       spin_unlock(&fc->lock);
+
+       fuse_write_fill(req, NULL, ff, inode, page_offset(page), 0, 1);
+
+       copy_highpage(tmp_page, page);
+       req->num_pages = 1;
+       req->pages[0] = tmp_page;
+       req->page_offset = 0;
+       req->end = fuse_writepage_end;
+       req->inode = inode;
+
+       inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
+       inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
+       end_page_writeback(page);
+
+       spin_lock(&fc->lock);
+       list_add(&req->writepages_entry, &fi->writepages);
+       list_add_tail(&req->list, &fi->queued_writes);
+       fuse_flush_writepages(inode);
+       spin_unlock(&fc->lock);
+
+       return 0;
+
+err_free:
+       fuse_request_free(req);
+err:
+       end_page_writeback(page);
+       return -ENOMEM;
+}
+
+static int fuse_writepage(struct page *page, struct writeback_control *wbc)
+{
+       int err;
+
+       err = fuse_writepage_locked(page);
+       unlock_page(page);
+
+       return err;
+}
+
+static int fuse_launder_page(struct page *page)
+{
+       int err = 0;
+       if (clear_page_dirty_for_io(page)) {
+               struct inode *inode = page->mapping->host;
+               err = fuse_writepage_locked(page);
+               if (!err)
+                       fuse_wait_on_page_writeback(inode, page->index);
        }
-       return generic_file_mmap(file, vma);
+       return err;
 }
 
-static int fuse_set_page_dirty(struct page *page)
+/*
+ * Write back dirty pages now, because there may not be any suitable
+ * open files later
+ */
+static void fuse_vma_close(struct vm_area_struct *vma)
 {
-       printk("fuse_set_page_dirty: should not happen\n");
-       dump_stack();
+       filemap_write_and_wait(vma->vm_file->f_mapping);
+}
+
+/*
+ * Wait for writeback against this page to complete before allowing it
+ * to be marked dirty again, and hence written back again, possibly
+ * before the previous writepage completed.
+ *
+ * Block here, instead of in ->writepage(), so that the userspace fs
+ * can only block processes actually operating on the filesystem.
+ *
+ * Otherwise unprivileged userspace fs would be able to block
+ * unrelated:
+ *
+ * - page migration
+ * - sync(2)
+ * - try_to_free_pages() with order > PAGE_ALLOC_COSTLY_ORDER
+ */
+static int fuse_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+       /*
+        * Don't use page->mapping as it may become NULL from a
+        * concurrent truncate.
+        */
+       struct inode *inode = vma->vm_file->f_mapping->host;
+
+       fuse_wait_on_page_writeback(inode, page->index);
+       return 0;
+}
+
+static struct vm_operations_struct fuse_file_vm_ops = {
+       .close          = fuse_vma_close,
+       .fault          = filemap_fault,
+       .page_mkwrite   = fuse_page_mkwrite,
+};
+
+static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+       if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) {
+               struct inode *inode = file->f_dentry->d_inode;
+               struct fuse_conn *fc = get_fuse_conn(inode);
+               struct fuse_inode *fi = get_fuse_inode(inode);
+               struct fuse_file *ff = file->private_data;
+               /*
+                * file may be written through mmap, so chain it onto the
+                * inodes's write_file list
+                */
+               spin_lock(&fc->lock);
+               if (list_empty(&ff->write_entry))
+                       list_add(&ff->write_entry, &fi->write_files);
+               spin_unlock(&fc->lock);
+       }
+       file_accessed(file);
+       vma->vm_ops = &fuse_file_vm_ops;
        return 0;
 }
 
@@ -909,12 +1431,37 @@ static sector_t fuse_bmap(struct address_space *mapping, sector_t block)
        return err ? 0 : outarg.block;
 }
 
+static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin)
+{
+       loff_t retval;
+       struct inode *inode = file->f_path.dentry->d_inode;
+
+       mutex_lock(&inode->i_mutex);
+       switch (origin) {
+       case SEEK_END:
+               offset += i_size_read(inode);
+               break;
+       case SEEK_CUR:
+               offset += file->f_pos;
+       }
+       retval = -EINVAL;
+       if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
+               if (offset != file->f_pos) {
+                       file->f_pos = offset;
+                       file->f_version = 0;
+               }
+               retval = offset;
+       }
+       mutex_unlock(&inode->i_mutex);
+       return retval;
+}
+
 static const struct file_operations fuse_file_operations = {
-       .llseek         = generic_file_llseek,
+       .llseek         = fuse_file_llseek,
        .read           = do_sync_read,
        .aio_read       = fuse_file_aio_read,
        .write          = do_sync_write,
-       .aio_write      = generic_file_aio_write,
+       .aio_write      = fuse_file_aio_write,
        .mmap           = fuse_file_mmap,
        .open           = fuse_open,
        .flush          = fuse_flush,
@@ -926,7 +1473,7 @@ static const struct file_operations fuse_file_operations = {
 };
 
 static const struct file_operations fuse_direct_io_file_operations = {
-       .llseek         = generic_file_llseek,
+       .llseek         = fuse_file_llseek,
        .read           = fuse_direct_read,
        .write          = fuse_direct_write,
        .open           = fuse_open,
@@ -940,10 +1487,12 @@ static const struct file_operations fuse_direct_io_file_operations = {
 
 static const struct address_space_operations fuse_file_aops  = {
        .readpage       = fuse_readpage,
+       .writepage      = fuse_writepage,
+       .launder_page   = fuse_launder_page,
        .write_begin    = fuse_write_begin,
        .write_end      = fuse_write_end,
        .readpages      = fuse_readpages,
-       .set_page_dirty = fuse_set_page_dirty,
+       .set_page_dirty = __set_page_dirty_nobuffers,
        .bmap           = fuse_bmap,
 };
 
index 67aaf6ee38eaaeb1fedd9c313a160f623d58b8c3..dadffa21a206d220307e9b3e1212b09080bf5741 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/mm.h>
 #include <linux/backing-dev.h>
 #include <linux/mutex.h>
+#include <linux/rwsem.h>
 
 /** Max number of pages that can be used in a single read request */
 #define FUSE_MAX_PAGES_PER_REQ 32
@@ -25,6 +26,9 @@
 /** Congestion starts at 75% of maximum */
 #define FUSE_CONGESTION_THRESHOLD (FUSE_MAX_BACKGROUND * 75 / 100)
 
+/** Bias for fi->writectr, meaning new writepages must not be sent */
+#define FUSE_NOWRITE INT_MIN
+
 /** It could be as large as PATH_MAX, but would that have any uses? */
 #define FUSE_NAME_MAX 1024
 
@@ -73,6 +77,19 @@ struct fuse_inode {
 
        /** Files usable in writepage.  Protected by fc->lock */
        struct list_head write_files;
+
+       /** Writepages pending on truncate or fsync */
+       struct list_head queued_writes;
+
+       /** Number of sent writes, a negative bias (FUSE_NOWRITE)
+        * means more writes are blocked */
+       int writectr;
+
+       /** Waitq for writepage completion */
+       wait_queue_head_t page_waitq;
+
+       /** List of writepage requestst (pending or sent) */
+       struct list_head writepages;
 };
 
 /** FUSE specific file data */
@@ -222,7 +239,10 @@ struct fuse_req {
                } release;
                struct fuse_init_in init_in;
                struct fuse_init_out init_out;
-               struct fuse_read_in read_in;
+               struct {
+                       struct fuse_read_in in;
+                       u64 attr_ver;
+               } read;
                struct {
                        struct fuse_write_in in;
                        struct fuse_write_out out;
@@ -242,6 +262,12 @@ struct fuse_req {
        /** File used in the request (or NULL) */
        struct fuse_file *ff;
 
+       /** Inode used in the request or NULL */
+       struct inode *inode;
+
+       /** Link on fi->writepages */
+       struct list_head writepages_entry;
+
        /** Request completion callback */
        void (*end)(struct fuse_conn *, struct fuse_req *);
 
@@ -390,8 +416,8 @@ struct fuse_conn {
        /** Entry on the fuse_conn_list */
        struct list_head entry;
 
-       /** Unique ID */
-       u64 id;
+       /** Device ID from super block */
+       dev_t dev;
 
        /** Dentries in the control filesystem */
        struct dentry *ctl_dentry[FUSE_CTL_NUM_DENTRIES];
@@ -438,7 +464,7 @@ extern const struct file_operations fuse_dev_operations;
 /**
  * Get a filled in inode
  */
-struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
+struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
                        int generation, struct fuse_attr *attr,
                        u64 attr_valid, u64 attr_version);
 
@@ -446,7 +472,7 @@ struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
  * Send FORGET command
  */
 void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
-                     unsigned long nodeid, u64 nlookup);
+                     u64 nodeid, u64 nlookup);
 
 /**
  * Initialize READ or READDIR request
@@ -504,6 +530,11 @@ void fuse_init_symlink(struct inode *inode);
 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
                            u64 attr_valid, u64 attr_version);
 
+void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+                                  u64 attr_valid);
+
+void fuse_truncate(struct address_space *mapping, loff_t offset);
+
 /**
  * Initialize the client device
  */
@@ -522,6 +553,8 @@ void fuse_ctl_cleanup(void);
  */
 struct fuse_req *fuse_request_alloc(void);
 
+struct fuse_req *fuse_request_alloc_nofs(void);
+
 /**
  * Free a request
  */
@@ -558,6 +591,8 @@ void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req);
  */
 void request_send_background(struct fuse_conn *fc, struct fuse_req *req);
 
+void request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req);
+
 /* Abort all requests */
 void fuse_abort_conn(struct fuse_conn *fc);
 
@@ -600,3 +635,10 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id);
 
 int fuse_update_attributes(struct inode *inode, struct kstat *stat,
                           struct file *file, bool *refreshed);
+
+void fuse_flush_writepages(struct inode *inode);
+
+void fuse_set_nowrite(struct inode *inode);
+void fuse_release_nowrite(struct inode *inode);
+
+u64 fuse_get_attr_version(struct fuse_conn *fc);
index 4df34da2284aaf1cf9b96ee995afb03fd408209a..79b61587383832f7a7ff2187642f0fdcc509f047 100644 (file)
@@ -59,7 +59,11 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
        fi->nodeid = 0;
        fi->nlookup = 0;
        fi->attr_version = 0;
+       fi->writectr = 0;
        INIT_LIST_HEAD(&fi->write_files);
+       INIT_LIST_HEAD(&fi->queued_writes);
+       INIT_LIST_HEAD(&fi->writepages);
+       init_waitqueue_head(&fi->page_waitq);
        fi->forget_req = fuse_request_alloc();
        if (!fi->forget_req) {
                kmem_cache_free(fuse_inode_cachep, inode);
@@ -73,13 +77,14 @@ static void fuse_destroy_inode(struct inode *inode)
 {
        struct fuse_inode *fi = get_fuse_inode(inode);
        BUG_ON(!list_empty(&fi->write_files));
+       BUG_ON(!list_empty(&fi->queued_writes));
        if (fi->forget_req)
                fuse_request_free(fi->forget_req);
        kmem_cache_free(fuse_inode_cachep, inode);
 }
 
 void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
-                     unsigned long nodeid, u64 nlookup)
+                     u64 nodeid, u64 nlookup)
 {
        struct fuse_forget_in *inarg = &req->misc.forget_in;
        inarg->nlookup = nlookup;
@@ -109,7 +114,7 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
        return 0;
 }
 
-static void fuse_truncate(struct address_space *mapping, loff_t offset)
+void fuse_truncate(struct address_space *mapping, loff_t offset)
 {
        /* See vmtruncate() */
        unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
@@ -117,19 +122,12 @@ static void fuse_truncate(struct address_space *mapping, loff_t offset)
        unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
 }
 
-
-void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
-                           u64 attr_valid, u64 attr_version)
+void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
+                                  u64 attr_valid)
 {
        struct fuse_conn *fc = get_fuse_conn(inode);
        struct fuse_inode *fi = get_fuse_inode(inode);
-       loff_t oldsize;
 
-       spin_lock(&fc->lock);
-       if (attr_version != 0 && fi->attr_version > attr_version) {
-               spin_unlock(&fc->lock);
-               return;
-       }
        fi->attr_version = ++fc->attr_version;
        fi->i_time = attr_valid;
 
@@ -159,6 +157,22 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
        fi->orig_i_mode = inode->i_mode;
        if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
                inode->i_mode &= ~S_ISVTX;
+}
+
+void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
+                           u64 attr_valid, u64 attr_version)
+{
+       struct fuse_conn *fc = get_fuse_conn(inode);
+       struct fuse_inode *fi = get_fuse_inode(inode);
+       loff_t oldsize;
+
+       spin_lock(&fc->lock);
+       if (attr_version != 0 && fi->attr_version > attr_version) {
+               spin_unlock(&fc->lock);
+               return;
+       }
+
+       fuse_change_attributes_common(inode, attr, attr_valid);
 
        oldsize = inode->i_size;
        i_size_write(inode, attr->size);
@@ -193,7 +207,7 @@ static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
 
 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
 {
-       unsigned long nodeid = *(unsigned long *) _nodeidp;
+       u64 nodeid = *(u64 *) _nodeidp;
        if (get_node_id(inode) == nodeid)
                return 1;
        else
@@ -202,12 +216,12 @@ static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
 
 static int fuse_inode_set(struct inode *inode, void *_nodeidp)
 {
-       unsigned long nodeid = *(unsigned long *) _nodeidp;
+       u64 nodeid = *(u64 *) _nodeidp;
        get_fuse_inode(inode)->nodeid = nodeid;
        return 0;
 }
 
-struct inode *fuse_iget(struct super_block *sb, unsigned long nodeid,
+struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
                        int generation, struct fuse_attr *attr,
                        u64 attr_valid, u64 attr_version)
 {
@@ -447,7 +461,7 @@ static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
        return 0;
 }
 
-static struct fuse_conn *new_conn(void)
+static struct fuse_conn *new_conn(struct super_block *sb)
 {
        struct fuse_conn *fc;
        int err;
@@ -468,19 +482,41 @@ static struct fuse_conn *new_conn(void)
                atomic_set(&fc->num_waiting, 0);
                fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
                fc->bdi.unplug_io_fn = default_unplug_io_fn;
+               /* fuse does it's own writeback accounting */
+               fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
+               fc->dev = sb->s_dev;
                err = bdi_init(&fc->bdi);
-               if (err) {
-                       kfree(fc);
-                       fc = NULL;
-                       goto out;
-               }
+               if (err)
+                       goto error_kfree;
+               err = bdi_register_dev(&fc->bdi, fc->dev);
+               if (err)
+                       goto error_bdi_destroy;
+               /*
+                * For a single fuse filesystem use max 1% of dirty +
+                * writeback threshold.
+                *
+                * This gives about 1M of write buffer for memory maps on a
+                * machine with 1G and 10% dirty_ratio, which should be more
+                * than enough.
+                *
+                * Privileged users can raise it by writing to
+                *
+                *    /sys/class/bdi/<bdi>/max_ratio
+                */
+               bdi_set_max_ratio(&fc->bdi, 1);
                fc->reqctr = 0;
                fc->blocked = 1;
                fc->attr_version = 1;
                get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
        }
-out:
        return fc;
+
+error_bdi_destroy:
+       bdi_destroy(&fc->bdi);
+error_kfree:
+       mutex_destroy(&fc->inst_mutex);
+       kfree(fc);
+       return NULL;
 }
 
 void fuse_conn_put(struct fuse_conn *fc)
@@ -548,6 +584,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
                fc->minor = arg->minor;
                fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
+               fc->max_write = min_t(unsigned, 4096, fc->max_write);
                fc->conn_init = 1;
        }
        fuse_put_request(fc, req);
@@ -578,12 +615,6 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
        request_send_background(fc, req);
 }
 
-static u64 conn_id(void)
-{
-       static u64 ctr = 1;
-       return ctr++;
-}
-
 static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 {
        struct fuse_conn *fc;
@@ -621,14 +652,14 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        if (file->f_op != &fuse_dev_operations)
                return -EINVAL;
 
-       fc = new_conn();
+       fc = new_conn(sb);
        if (!fc)
                return -ENOMEM;
 
        fc->flags = d.flags;
        fc->user_id = d.user_id;
        fc->group_id = d.group_id;
-       fc->max_read = d.max_read;
+       fc->max_read = min_t(unsigned, 4096, d.max_read);
 
        /* Used by get_root_inode() */
        sb->s_fs_info = fc;
@@ -659,7 +690,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
        if (file->private_data)
                goto err_unlock;
 
-       fc->id = conn_id();
        err = fuse_ctl_add_conn(fc);
        if (err)
                goto err_unlock;
index 8479da47049cbe163c8d642c1cbc8fe30532ee39..a4ff271df9ee4432eb52ceb16f59cb65a0af1f87 100644 (file)
@@ -212,7 +212,7 @@ int gdlm_sysfs_init(void)
 {
        gdlm_kset = kset_create_and_add("lock_dlm", NULL, kernel_kobj);
        if (!gdlm_kset) {
-               printk(KERN_WARNING "%s: can not create kset\n", __FUNCTION__);
+               printk(KERN_WARNING "%s: can not create kset\n", __func__);
                return -ENOMEM;
        }
        return 0;
index 90a04a6e3789179ce7737ca5da9227b41167c334..f55394e57cb28facf0e8928053064f8b70ad7f37 100644 (file)
@@ -438,7 +438,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
        int error;
 
        /*
-        * Due to the order of unstuffing files and ->nopage(), we can be
+        * Due to the order of unstuffing files and ->fault(), we can be
         * asked for a zero page in the case of a stuffed file being extended,
         * so we need to supply one here. It doesn't happen often.
         */
index 509c5d60bd803ecc928df4aad578eb8856ae9ffa..7f48576289c9f9a916aee237d6b2d6308f2bb93b 100644 (file)
@@ -41,7 +41,7 @@ int gfs2_assert_withdraw_i(struct gfs2_sbd *sdp, char *assertion,
 
 #define gfs2_assert_withdraw(sdp, assertion) \
 ((likely(assertion)) ? 0 : gfs2_assert_withdraw_i((sdp), #assertion, \
-                                       __FUNCTION__, __FILE__, __LINE__))
+                                       __func__, __FILE__, __LINE__))
 
 
 int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
@@ -49,28 +49,28 @@ int gfs2_assert_warn_i(struct gfs2_sbd *sdp, char *assertion,
 
 #define gfs2_assert_warn(sdp, assertion) \
 ((likely(assertion)) ? 0 : gfs2_assert_warn_i((sdp), #assertion, \
-                                       __FUNCTION__, __FILE__, __LINE__))
+                                       __func__, __FILE__, __LINE__))
 
 
 int gfs2_consist_i(struct gfs2_sbd *sdp, int cluster_wide,
                   const char *function, char *file, unsigned int line);
 
 #define gfs2_consist(sdp) \
-gfs2_consist_i((sdp), 0, __FUNCTION__, __FILE__, __LINE__)
+gfs2_consist_i((sdp), 0, __func__, __FILE__, __LINE__)
 
 
 int gfs2_consist_inode_i(struct gfs2_inode *ip, int cluster_wide,
                         const char *function, char *file, unsigned int line);
 
 #define gfs2_consist_inode(ip) \
-gfs2_consist_inode_i((ip), 0, __FUNCTION__, __FILE__, __LINE__)
+gfs2_consist_inode_i((ip), 0, __func__, __FILE__, __LINE__)
 
 
 int gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd, int cluster_wide,
                         const char *function, char *file, unsigned int line);
 
 #define gfs2_consist_rgrpd(rgd) \
-gfs2_consist_rgrpd_i((rgd), 0, __FUNCTION__, __FILE__, __LINE__)
+gfs2_consist_rgrpd_i((rgd), 0, __func__, __FILE__, __LINE__)
 
 
 int gfs2_meta_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
@@ -91,7 +91,7 @@ static inline int gfs2_meta_check_i(struct gfs2_sbd *sdp,
 }
 
 #define gfs2_meta_check(sdp, bh) \
-gfs2_meta_check_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__)
+gfs2_meta_check_i((sdp), (bh), __func__, __FILE__, __LINE__)
 
 
 int gfs2_metatype_check_ii(struct gfs2_sbd *sdp, struct buffer_head *bh,
@@ -118,7 +118,7 @@ static inline int gfs2_metatype_check_i(struct gfs2_sbd *sdp,
 }
 
 #define gfs2_metatype_check(sdp, bh, type) \
-gfs2_metatype_check_i((sdp), (bh), (type), __FUNCTION__, __FILE__, __LINE__)
+gfs2_metatype_check_i((sdp), (bh), (type), __func__, __FILE__, __LINE__)
 
 static inline void gfs2_metatype_set(struct buffer_head *bh, u16 type,
                                     u16 format)
@@ -134,14 +134,14 @@ int gfs2_io_error_i(struct gfs2_sbd *sdp, const char *function,
                    char *file, unsigned int line);
 
 #define gfs2_io_error(sdp) \
-gfs2_io_error_i((sdp), __FUNCTION__, __FILE__, __LINE__);
+gfs2_io_error_i((sdp), __func__, __FILE__, __LINE__);
 
 
 int gfs2_io_error_bh_i(struct gfs2_sbd *sdp, struct buffer_head *bh,
                       const char *function, char *file, unsigned int line);
 
 #define gfs2_io_error_bh(sdp, bh) \
-gfs2_io_error_bh_i((sdp), (bh), __FUNCTION__, __FILE__, __LINE__);
+gfs2_io_error_bh_i((sdp), (bh), __func__, __FILE__, __LINE__);
 
 
 extern struct kmem_cache *gfs2_glock_cachep;
index 24cf6fc4302122366ed444c7f5f3344b7cae7382..f6621a785202b9f3d26f562bd7402cfbedb589a9 100644 (file)
@@ -208,7 +208,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
        struct hfs_bnode *node, *next_node;
        struct page **pagep;
        u32 nidx, idx;
-       u16 off, len;
+       unsigned off;
+       u16 off16;
+       u16 len;
        u8 *data, byte, m;
        int i;
 
@@ -235,7 +237,8 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
        node = hfs_bnode_find(tree, nidx);
        if (IS_ERR(node))
                return node;
-       len = hfs_brec_lenoff(node, 2, &off);
+       len = hfs_brec_lenoff(node, 2, &off16);
+       off = off16;
 
        off += node->page_offset;
        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
@@ -280,7 +283,8 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                        return next_node;
                node = next_node;
 
-               len = hfs_brec_lenoff(node, 0, &off);
+               len = hfs_brec_lenoff(node, 0, &off16);
+               off = off16;
                off += node->page_offset;
                pagep = node->page + (off >> PAGE_CACHE_SHIFT);
                data = kmap(*pagep);
index b4651e128d7fbb8e25a3b1767ef1f967ea08b6f0..36ca2e1a4fa32b82f937fa415f77a92addfdd022 100644 (file)
@@ -215,7 +215,7 @@ int hfs_mdb_get(struct super_block *sb)
                attrib &= cpu_to_be16(~HFS_SB_ATTRIB_UNMNT);
                attrib |= cpu_to_be16(HFS_SB_ATTRIB_INCNSTNT);
                mdb->drAtrb = attrib;
-               mdb->drWrCnt = cpu_to_be32(be32_to_cpu(mdb->drWrCnt) + 1);
+               be32_add_cpu(&mdb->drWrCnt, 1);
                mdb->drLsMod = hfs_mtime();
 
                mark_buffer_dirty(HFS_SB(sb)->mdb_bh);
index 32de44ed002196c5b3fd327662dc123f9899f8ac..8cf67974adf60ad79f1b88da04ba4d4fb71f3b68 100644 (file)
@@ -297,7 +297,8 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
                                return 0;
                        }
                        p = match_strdup(&args[0]);
-                       hsb->nls_disk = load_nls(p);
+                       if (p)
+                               hsb->nls_disk = load_nls(p);
                        if (!hsb->nls_disk) {
                                printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p);
                                kfree(p);
@@ -311,7 +312,8 @@ static int parse_options(char *options, struct hfs_sb_info *hsb)
                                return 0;
                        }
                        p = match_strdup(&args[0]);
-                       hsb->nls_io = load_nls(p);
+                       if (p)
+                               hsb->nls_io = load_nls(p);
                        if (!hsb->nls_io) {
                                printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p);
                                kfree(p);
index bb5433608a42efdf3c6ec0ef9b809c614a11b686..e49fcee1e293f725786e84ea6126e408e5eda7c8 100644 (file)
@@ -184,7 +184,9 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
        struct hfs_bnode *node, *next_node;
        struct page **pagep;
        u32 nidx, idx;
-       u16 off, len;
+       unsigned off;
+       u16 off16;
+       u16 len;
        u8 *data, byte, m;
        int i;
 
@@ -211,7 +213,8 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
        node = hfs_bnode_find(tree, nidx);
        if (IS_ERR(node))
                return node;
-       len = hfs_brec_lenoff(node, 2, &off);
+       len = hfs_brec_lenoff(node, 2, &off16);
+       off = off16;
 
        off += node->page_offset;
        pagep = node->page + (off >> PAGE_CACHE_SHIFT);
@@ -256,7 +259,8 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
                        return next_node;
                node = next_node;
 
-               len = hfs_brec_lenoff(node, 0, &off);
+               len = hfs_brec_lenoff(node, 0, &off16);
+               off = off16;
                off += node->page_offset;
                pagep = node->page + (off >> PAGE_CACHE_SHIFT);
                data = kmap(*pagep);
index d72d0a8b25aacf39fd819da29c0963a441f9718a..9e59537b43d56ef0ca178acce4ec900b7d50fcff 100644 (file)
@@ -311,6 +311,10 @@ int hfsplus_delete_cat(u32, struct inode *, struct qstr *);
 int hfsplus_rename_cat(u32, struct inode *, struct qstr *,
                       struct inode *, struct qstr *);
 
+/* dir.c */
+extern const struct inode_operations hfsplus_dir_inode_operations;
+extern const struct file_operations hfsplus_dir_operations;
+
 /* extents.c */
 int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *);
 void hfsplus_ext_write_extent(struct inode *);
index 37744cf3706ac4e7487c687974bda361eb0e873a..d53b2af91c25d78e39c17a692733224753e1905f 100644 (file)
@@ -278,9 +278,6 @@ static int hfsplus_file_release(struct inode *inode, struct file *file)
        return 0;
 }
 
-extern const struct inode_operations hfsplus_dir_inode_operations;
-extern struct file_operations hfsplus_dir_operations;
-
 static const struct inode_operations hfsplus_file_inode_operations = {
        .lookup         = hfsplus_file_lookup,
        .truncate       = hfsplus_file_truncate,
index dc64fac008315092a6d878e5df8634735ea3bbb8..9997cbf8beb50ece6abfa28dc18f3223037eab01 100644 (file)
@@ -132,7 +132,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi)
                                return 0;
                        }
                        p = match_strdup(&args[0]);
-                       sbi->nls = load_nls(p);
+                       if (p)
+                               sbi->nls = load_nls(p);
                        if (!sbi->nls) {
                                printk(KERN_ERR "hfs: unable to load nls mapping \"%s\"\n", p);
                                kfree(p);
index b0f9ad362d1d45127a2391eb034e1c7e642448df..ce97a54518d81d8b904c7a01c336133516547eb3 100644 (file)
@@ -357,7 +357,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
                printk(KERN_WARNING "hfs: Filesystem is marked locked, mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        } else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
-               printk(KERN_WARNING "hfs: write access to a jounaled filesystem is not supported, "
+               printk(KERN_WARNING "hfs: write access to a journaled filesystem is not supported, "
                       "use the force option at your own risk, mounting read-only.\n");
                sb->s_flags |= MS_RDONLY;
        }
@@ -423,7 +423,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
         */
        vhdr->last_mount_vers = cpu_to_be32(HFSP_MOUNT_VERSION);
        vhdr->modify_date = hfsp_now2mt();
-       vhdr->write_count = cpu_to_be32(be32_to_cpu(vhdr->write_count) + 1);
+       be32_add_cpu(&vhdr->write_count, 1);
        vhdr->attributes &= cpu_to_be32(~HFSPLUS_VOL_UNMNT);
        vhdr->attributes |= cpu_to_be32(HFSPLUS_VOL_INCNSTNT);
        mark_buffer_dirty(HFSPLUS_SB(sb).s_vhbh);
index 72cab78f05091282843baef84d6e03abdc8263aa..175d08eacc868226d581d46a2101169bd103f096 100644 (file)
@@ -47,7 +47,7 @@ static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd)
                return 0;
        wd->ablk_start = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART));
 
-       extent = be32_to_cpu(get_unaligned((__be32 *)(bufptr + HFSP_WRAPOFF_EMBEDEXT)));
+       extent = get_unaligned_be32(bufptr + HFSP_WRAPOFF_EMBEDEXT);
        wd->embed_start = (extent >> 16) & 0xFFFF;
        wd->embed_count = extent & 0xFFFF;
 
index 6846785fe9043b48279bc04c44328d0264e48197..aeabf80f81a5d38561768ea95d1b544605e5dcc4 100644 (file)
@@ -45,7 +45,7 @@ static const struct inode_operations hugetlbfs_inode_operations;
 
 static struct backing_dev_info hugetlbfs_backing_dev_info = {
        .ra_pages       = 0,    /* No readahead */
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
 int sysctl_hugetlb_shm_group;
@@ -504,7 +504,7 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid,
                inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
                INIT_LIST_HEAD(&inode->i_mapping->private_list);
                info = HUGETLBFS_I(inode);
-               mpol_shared_policy_init(&info->policy, MPOL_DEFAULT, NULL);
+               mpol_shared_policy_init(&info->policy, NULL);
                switch (mode & S_IFMT) {
                default:
                        init_special_inode(inode, mode, dev);
index 27ee1af50d02c6537febbaed5cf7bd60debc832d..bf64781304243f62cc1ff52f9f395724651103d7 100644 (file)
@@ -495,8 +495,7 @@ static struct inode * find_inode(struct super_block * sb, struct hlist_head *hea
        struct inode * inode = NULL;
 
 repeat:
-       hlist_for_each (node, head) { 
-               inode = hlist_entry(node, struct inode, i_hash);
+       hlist_for_each_entry(inode, node, head, i_hash) {
                if (inode->i_sb != sb)
                        continue;
                if (!test(inode, data))
@@ -520,8 +519,7 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head
        struct inode * inode = NULL;
 
 repeat:
-       hlist_for_each (node, head) {
-               inode = hlist_entry(node, struct inode, i_hash);
+       hlist_for_each_entry(inode, node, head, i_hash) {
                if (inode->i_ino != ino)
                        continue;
                if (inode->i_sb != sb)
index 7b94a1e3c015468b30e6343156053fd5e409afca..6676c06bb7c16935b3a7fbc4615836f1dfb256fb 100644 (file)
@@ -598,7 +598,7 @@ asmlinkage long sys_inotify_init(void)
        }
 
        ih = inotify_init(&inotify_user_ops);
-       if (unlikely(IS_ERR(ih))) {
+       if (IS_ERR(ih)) {
                ret = PTR_ERR(ih);
                goto out_free_dev;
        }
index f32fbde2175e84d6bb3bc59fdeacc9860f5837b3..7db32b3382d3a66452388ad9e9023216a1855ab3 100644 (file)
@@ -28,8 +28,8 @@
  *
  * Returns 0 on success, -errno on error.
  */
-long vfs_ioctl(struct file *filp, unsigned int cmd,
-              unsigned long arg)
+static long vfs_ioctl(struct file *filp, unsigned int cmd,
+                     unsigned long arg)
 {
        int error = -ENOTTY;
 
index 1ba407c64df1fed39edbe1470538dd49da171a61..2f0dc5a1463306adcecc3296a8cf6b01defb7fe3 100644 (file)
@@ -145,6 +145,14 @@ static int do_isofs_readdir(struct inode *inode, struct file *filp,
                        }
                        de = tmpde;
                }
+               /* Basic sanity check, whether name doesn't exceed dir entry */
+               if (de_len < de->name_len[0] +
+                                       sizeof(struct iso_directory_record)) {
+                       printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+                              " in block %lu of inode %lu\n", block,
+                              inode->i_ino);
+                       return -EIO;
+               }
 
                if (first_de) {
                        isofs_normalize_block_and_offset(de,
index d1bdf8adb3510a98ffe2c49518ba6bdd272eaa2d..ccbf72faf27ada523b4abf8d1fa2dc1f42283e1e 100644 (file)
@@ -78,29 +78,29 @@ static inline int isonum_712(char *p)
 }
 static inline unsigned int isonum_721(char *p)
 {
-       return le16_to_cpu(get_unaligned((__le16 *)p));
+       return get_unaligned_le16(p);
 }
 static inline unsigned int isonum_722(char *p)
 {
-       return be16_to_cpu(get_unaligned((__le16 *)p));
+       return get_unaligned_be16(p);
 }
 static inline unsigned int isonum_723(char *p)
 {
        /* Ignore bigendian datum due to broken mastering programs */
-       return le16_to_cpu(get_unaligned((__le16 *)p));
+       return get_unaligned_le16(p);
 }
 static inline unsigned int isonum_731(char *p)
 {
-       return le32_to_cpu(get_unaligned((__le32 *)p));
+       return get_unaligned_le32(p);
 }
 static inline unsigned int isonum_732(char *p)
 {
-       return be32_to_cpu(get_unaligned((__le32 *)p));
+       return get_unaligned_be32(p);
 }
 static inline unsigned int isonum_733(char *p)
 {
        /* Ignore bigendian datum due to broken mastering programs */
-       return le32_to_cpu(get_unaligned((__le32 *)p));
+       return get_unaligned_le32(p);
 }
 extern int iso_date(char *, int);
 
index 344b247bc29ab1f104fcc25af9a831052267baad..8299889a835ea5c9b95c7d71afcabdb4610310c1 100644 (file)
@@ -111,6 +111,13 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
 
                dlen = de->name_len[0];
                dpnt = de->name;
+               /* Basic sanity check, whether name doesn't exceed dir entry */
+               if (de_len < dlen + sizeof(struct iso_directory_record)) {
+                       printk(KERN_NOTICE "iso9660: Corrupted directory entry"
+                              " in block %lu of inode %lu\n", block,
+                              dir->i_ino);
+                       return 0;
+               }
 
                if (sbi->s_rock &&
                    ((i = get_rock_ridge_filename(de, tmpname, dir)))) {
index a38c7186c5704e438236994baa18014b4bacb801..cd931ef1f000b2bb9c606b9be7b6c35c4b2c241a 100644 (file)
@@ -406,22 +406,6 @@ void journal_commit_transaction(journal_t *journal)
 
        jbd_debug (3, "JBD: commit phase 2\n");
 
-       /*
-        * First, drop modified flag: all accesses to the buffers
-        * will be tracked for a new trasaction only -bzzz
-        */
-       spin_lock(&journal->j_list_lock);
-       if (commit_transaction->t_buffers) {
-               new_jh = jh = commit_transaction->t_buffers->b_tnext;
-               do {
-                       J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
-                                       new_jh->b_modified == 0);
-                       new_jh->b_modified = 0;
-                       new_jh = new_jh->b_tnext;
-               } while (new_jh != jh);
-       }
-       spin_unlock(&journal->j_list_lock);
-
        /*
         * Now start flushing things to disk, in the order they appear
         * on the transaction lists.  Data blocks go first.
@@ -488,6 +472,9 @@ void journal_commit_transaction(journal_t *journal)
         */
        commit_transaction->t_state = T_COMMIT;
 
+       J_ASSERT(commit_transaction->t_nr_buffers <=
+                commit_transaction->t_outstanding_credits);
+
        descriptor = NULL;
        bufs = 0;
        while (commit_transaction->t_buffers) {
index 0e081d5f32e85f8993af7bf809acbacb9d4a82e5..b99c3b3654c49ee33f86bc8883319a55f11a9711 100644 (file)
@@ -534,7 +534,7 @@ int log_wait_commit(journal_t *journal, tid_t tid)
        if (!tid_geq(journal->j_commit_request, tid)) {
                printk(KERN_EMERG
                       "%s: error: j_commit_request=%d, tid=%d\n",
-                      __FUNCTION__, journal->j_commit_request, tid);
+                      __func__, journal->j_commit_request, tid);
        }
        spin_unlock(&journal->j_state_lock);
 #endif
@@ -599,7 +599,7 @@ int journal_bmap(journal_t *journal, unsigned long blocknr,
 
                        printk(KERN_ALERT "%s: journal block not found "
                                        "at offset %lu on %s\n",
-                               __FUNCTION__,
+                               __func__,
                                blocknr,
                                bdevname(journal->j_dev, b));
                        err = -EIO;
@@ -728,7 +728,7 @@ journal_t * journal_init_dev(struct block_device *bdev,
        journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
        if (!journal->j_wbuf) {
                printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
-                       __FUNCTION__);
+                       __func__);
                kfree(journal);
                journal = NULL;
                goto out;
@@ -782,7 +782,7 @@ journal_t * journal_init_inode (struct inode *inode)
        journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
        if (!journal->j_wbuf) {
                printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
-                       __FUNCTION__);
+                       __func__);
                kfree(journal);
                return NULL;
        }
@@ -791,7 +791,7 @@ journal_t * journal_init_inode (struct inode *inode)
        /* If that failed, give up */
        if (err) {
                printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
-                      __FUNCTION__);
+                      __func__);
                kfree(journal);
                return NULL;
        }
@@ -877,7 +877,7 @@ int journal_create(journal_t *journal)
                 */
                printk(KERN_EMERG
                       "%s: creation of journal on external device!\n",
-                      __FUNCTION__);
+                      __func__);
                BUG();
        }
 
@@ -1657,7 +1657,7 @@ static struct journal_head *journal_alloc_journal_head(void)
                jbd_debug(1, "out of memory for journal_head\n");
                if (time_after(jiffies, last_warning + 5*HZ)) {
                        printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-                              __FUNCTION__);
+                              __func__);
                        last_warning = jiffies;
                }
                while (ret == NULL) {
@@ -1794,13 +1794,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
                        if (jh->b_frozen_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_frozen_data\n",
-                                               __FUNCTION__);
+                                               __func__);
                                jbd_free(jh->b_frozen_data, bh->b_size);
                        }
                        if (jh->b_committed_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_committed_data\n",
-                                               __FUNCTION__);
+                                               __func__);
                                jbd_free(jh->b_committed_data, bh->b_size);
                        }
                        bh->b_private = NULL;
index d5f8eee7c88cfd0287a51cf934ad5270da5ce6c1..1bb43e987f4b4ff4f0e37a2d578d34c1642cf6c0 100644 (file)
@@ -138,7 +138,7 @@ repeat:
 oom:
        if (!journal_oom_retry)
                return -ENOMEM;
-       jbd_debug(1, "ENOMEM in %s, retrying\n", __FUNCTION__);
+       jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
        yield();
        goto repeat;
 }
index 2c9e8f5d13aae3505fce91eb78fe69396efb9aea..67ff2024c23c7d21950407ce8dfea088a090f1c4 100644 (file)
@@ -608,6 +608,12 @@ repeat:
            jh->b_next_transaction == transaction)
                goto done;
 
+       /*
+        * this is the first time this transaction is touching this buffer,
+        * reset the modified flag
+        */
+       jh->b_modified = 0;
+
        /*
         * If there is already a copy-out version of this buffer, then we don't
         * need to make another one
@@ -681,7 +687,7 @@ repeat:
                                if (!frozen_buffer) {
                                        printk(KERN_EMERG
                                               "%s: OOM for frozen_buffer\n",
-                                              __FUNCTION__);
+                                              __func__);
                                        JBUFFER_TRACE(jh, "oom!");
                                        error = -ENOMEM;
                                        jbd_lock_bh_state(bh);
@@ -820,9 +826,16 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
 
        if (jh->b_transaction == NULL) {
                jh->b_transaction = transaction;
+
+               /* first access by this transaction */
+               jh->b_modified = 0;
+
                JBUFFER_TRACE(jh, "file as BJ_Reserved");
                __journal_file_buffer(jh, transaction, BJ_Reserved);
        } else if (jh->b_transaction == journal->j_committing_transaction) {
+               /* first access by this transaction */
+               jh->b_modified = 0;
+
                JBUFFER_TRACE(jh, "set next transaction");
                jh->b_next_transaction = transaction;
        }
@@ -891,7 +904,7 @@ repeat:
                committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS);
                if (!committed_data) {
                        printk(KERN_EMERG "%s: No memory for committed data\n",
-                               __FUNCTION__);
+                               __func__);
                        err = -ENOMEM;
                        goto out;
                }
@@ -1222,6 +1235,7 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
        struct journal_head *jh;
        int drop_reserve = 0;
        int err = 0;
+       int was_modified = 0;
 
        BUFFER_TRACE(bh, "entry");
 
@@ -1240,6 +1254,9 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
                goto not_jbd;
        }
 
+       /* keep track of wether or not this transaction modified us */
+       was_modified = jh->b_modified;
+
        /*
         * The buffer's going from the transaction, we must drop
         * all references -bzzz
@@ -1257,7 +1274,12 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
 
                JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
 
-               drop_reserve = 1;
+               /*
+                * we only want to drop a reference if this transaction
+                * modified the buffer
+                */
+               if (was_modified)
+                       drop_reserve = 1;
 
                /*
                 * We are no longer going to journal this buffer.
@@ -1297,7 +1319,13 @@ int journal_forget (handle_t *handle, struct buffer_head *bh)
                if (jh->b_next_transaction) {
                        J_ASSERT(jh->b_next_transaction == transaction);
                        jh->b_next_transaction = NULL;
-                       drop_reserve = 1;
+
+                       /*
+                        * only drop a reference if this transaction modified
+                        * the buffer
+                        */
+                       if (was_modified)
+                               drop_reserve = 1;
                }
        }
 
@@ -2069,7 +2097,7 @@ void __journal_refile_buffer(struct journal_head *jh)
        jh->b_transaction = jh->b_next_transaction;
        jh->b_next_transaction = NULL;
        __journal_file_buffer(jh, jh->b_transaction,
-                               was_dirty ? BJ_Metadata : BJ_Reserved);
+                               jh->b_modified ? BJ_Metadata : BJ_Reserved);
        J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
 
        if (was_dirty)
index a8173081f83124d316181460f06645514490355a..e0139786f717839ff5c7b5f542449593c1fcab54 100644 (file)
@@ -519,22 +519,6 @@ void jbd2_journal_commit_transaction(journal_t *journal)
 
        jbd_debug (3, "JBD: commit phase 2\n");
 
-       /*
-        * First, drop modified flag: all accesses to the buffers
-        * will be tracked for a new trasaction only -bzzz
-        */
-       spin_lock(&journal->j_list_lock);
-       if (commit_transaction->t_buffers) {
-               new_jh = jh = commit_transaction->t_buffers->b_tnext;
-               do {
-                       J_ASSERT_JH(new_jh, new_jh->b_modified == 1 ||
-                                       new_jh->b_modified == 0);
-                       new_jh->b_modified = 0;
-                       new_jh = new_jh->b_tnext;
-               } while (new_jh != jh);
-       }
-       spin_unlock(&journal->j_list_lock);
-
        /*
         * Now start flushing things to disk, in the order they appear
         * on the transaction lists.  Data blocks go first.
@@ -584,6 +568,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
        stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
        stats.u.run.rs_blocks_logged = 0;
 
+       J_ASSERT(commit_transaction->t_nr_buffers <=
+                commit_transaction->t_outstanding_credits);
+
        descriptor = NULL;
        bufs = 0;
        while (commit_transaction->t_buffers) {
index 954cff001df6d1a9bf51a964774bc3d3eef13b79..53632e3e8457e6227a182b28ea0d75a49880c5f6 100644 (file)
@@ -534,7 +534,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
        if (!tid_geq(journal->j_commit_request, tid)) {
                printk(KERN_EMERG
                       "%s: error: j_commit_request=%d, tid=%d\n",
-                      __FUNCTION__, journal->j_commit_request, tid);
+                      __func__, journal->j_commit_request, tid);
        }
        spin_unlock(&journal->j_state_lock);
 #endif
@@ -599,7 +599,7 @@ int jbd2_journal_bmap(journal_t *journal, unsigned long blocknr,
 
                        printk(KERN_ALERT "%s: journal block not found "
                                        "at offset %lu on %s\n",
-                               __FUNCTION__,
+                               __func__,
                                blocknr,
                                bdevname(journal->j_dev, b));
                        err = -EIO;
@@ -904,19 +904,10 @@ static void jbd2_stats_proc_init(journal_t *journal)
        snprintf(name, sizeof(name) - 1, "%s", bdevname(journal->j_dev, name));
        journal->j_proc_entry = proc_mkdir(name, proc_jbd2_stats);
        if (journal->j_proc_entry) {
-               struct proc_dir_entry *p;
-               p = create_proc_entry("history", S_IRUGO,
-                               journal->j_proc_entry);
-               if (p) {
-                       p->proc_fops = &jbd2_seq_history_fops;
-                       p->data = journal;
-                       p = create_proc_entry("info", S_IRUGO,
-                                               journal->j_proc_entry);
-                       if (p) {
-                               p->proc_fops = &jbd2_seq_info_fops;
-                               p->data = journal;
-                       }
-               }
+               proc_create_data("history", S_IRUGO, journal->j_proc_entry,
+                                &jbd2_seq_history_fops, journal);
+               proc_create_data("info", S_IRUGO, journal->j_proc_entry,
+                                &jbd2_seq_info_fops, journal);
        }
 }
 
@@ -1006,13 +997,14 @@ fail:
  */
 
 /**
- *  journal_t * jbd2_journal_init_dev() - creates an initialises a journal structure
+ *  journal_t * jbd2_journal_init_dev() - creates and initialises a journal structure
  *  @bdev: Block device on which to create the journal
  *  @fs_dev: Device which hold journalled filesystem for this journal.
  *  @start: Block nr Start of journal.
  *  @len:  Length of the journal in blocks.
  *  @blocksize: blocksize of journalling device
- *  @returns: a newly created journal_t *
+ *
+ *  Returns: a newly created journal_t *
  *
  *  jbd2_journal_init_dev creates a journal which maps a fixed contiguous
  *  range of blocks on an arbitrary block device.
@@ -1036,7 +1028,7 @@ journal_t * jbd2_journal_init_dev(struct block_device *bdev,
        journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
        if (!journal->j_wbuf) {
                printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
-                       __FUNCTION__);
+                       __func__);
                kfree(journal);
                journal = NULL;
                goto out;
@@ -1092,7 +1084,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
        journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
        if (!journal->j_wbuf) {
                printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
-                       __FUNCTION__);
+                       __func__);
                kfree(journal);
                return NULL;
        }
@@ -1101,7 +1093,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
        /* If that failed, give up */
        if (err) {
                printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
-                      __FUNCTION__);
+                      __func__);
                kfree(journal);
                return NULL;
        }
@@ -1187,7 +1179,7 @@ int jbd2_journal_create(journal_t *journal)
                 */
                printk(KERN_EMERG
                       "%s: creation of journal on external device!\n",
-                      __FUNCTION__);
+                      __func__);
                BUG();
        }
 
@@ -1985,9 +1977,10 @@ static int journal_init_jbd2_journal_head_cache(void)
 
 static void jbd2_journal_destroy_jbd2_journal_head_cache(void)
 {
-       J_ASSERT(jbd2_journal_head_cache != NULL);
-       kmem_cache_destroy(jbd2_journal_head_cache);
-       jbd2_journal_head_cache = NULL;
+       if (jbd2_journal_head_cache) {
+               kmem_cache_destroy(jbd2_journal_head_cache);
+               jbd2_journal_head_cache = NULL;
+       }
 }
 
 /*
@@ -2006,7 +1999,7 @@ static struct journal_head *journal_alloc_journal_head(void)
                jbd_debug(1, "out of memory for journal_head\n");
                if (time_after(jiffies, last_warning + 5*HZ)) {
                        printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
-                              __FUNCTION__);
+                              __func__);
                        last_warning = jiffies;
                }
                while (!ret) {
@@ -2143,13 +2136,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
                        if (jh->b_frozen_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_frozen_data\n",
-                                               __FUNCTION__);
+                                               __func__);
                                jbd2_free(jh->b_frozen_data, bh->b_size);
                        }
                        if (jh->b_committed_data) {
                                printk(KERN_WARNING "%s: freeing "
                                                "b_committed_data\n",
-                                               __FUNCTION__);
+                                               __func__);
                                jbd2_free(jh->b_committed_data, bh->b_size);
                        }
                        bh->b_private = NULL;
@@ -2314,10 +2307,12 @@ static int __init journal_init(void)
        BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
 
        ret = journal_init_caches();
-       if (ret != 0)
+       if (ret == 0) {
+               jbd2_create_debugfs_entry();
+               jbd2_create_jbd_stats_proc_entry();
+       } else {
                jbd2_journal_destroy_caches();
-       jbd2_create_debugfs_entry();
-       jbd2_create_jbd_stats_proc_entry();
+       }
        return ret;
 }
 
index 2e1453a5e998d6a8dc606015a0b2ee953d365386..257ff26257655f4573669a5ab6cb7ee817e0371b 100644 (file)
@@ -139,7 +139,7 @@ repeat:
 oom:
        if (!journal_oom_retry)
                return -ENOMEM;
-       jbd_debug(1, "ENOMEM in %s, retrying\n", __FUNCTION__);
+       jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
        yield();
        goto repeat;
 }
@@ -167,138 +167,121 @@ static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
        return NULL;
 }
 
+void jbd2_journal_destroy_revoke_caches(void)
+{
+       if (jbd2_revoke_record_cache) {
+               kmem_cache_destroy(jbd2_revoke_record_cache);
+               jbd2_revoke_record_cache = NULL;
+       }
+       if (jbd2_revoke_table_cache) {
+               kmem_cache_destroy(jbd2_revoke_table_cache);
+               jbd2_revoke_table_cache = NULL;
+       }
+}
+
 int __init jbd2_journal_init_revoke_caches(void)
 {
+       J_ASSERT(!jbd2_revoke_record_cache);
+       J_ASSERT(!jbd2_revoke_table_cache);
+
        jbd2_revoke_record_cache = kmem_cache_create("jbd2_revoke_record",
                                           sizeof(struct jbd2_revoke_record_s),
                                           0,
                                           SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY,
                                           NULL);
        if (!jbd2_revoke_record_cache)
-               return -ENOMEM;
+               goto record_cache_failure;
 
        jbd2_revoke_table_cache = kmem_cache_create("jbd2_revoke_table",
                                           sizeof(struct jbd2_revoke_table_s),
                                           0, SLAB_TEMPORARY, NULL);
-       if (!jbd2_revoke_table_cache) {
-               kmem_cache_destroy(jbd2_revoke_record_cache);
-               jbd2_revoke_record_cache = NULL;
-               return -ENOMEM;
-       }
+       if (!jbd2_revoke_table_cache)
+               goto table_cache_failure;
        return 0;
+table_cache_failure:
+       jbd2_journal_destroy_revoke_caches();
+record_cache_failure:
+               return -ENOMEM;
 }
 
-void jbd2_journal_destroy_revoke_caches(void)
+static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
 {
-       kmem_cache_destroy(jbd2_revoke_record_cache);
-       jbd2_revoke_record_cache = NULL;
-       kmem_cache_destroy(jbd2_revoke_table_cache);
-       jbd2_revoke_table_cache = NULL;
-}
-
-/* Initialise the revoke table for a given journal to a given size. */
-
-int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
-{
-       int shift, tmp;
+       int shift = 0;
+       int tmp = hash_size;
+       struct jbd2_revoke_table_s *table;
 
-       J_ASSERT (journal->j_revoke_table[0] == NULL);
+       table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
+       if (!table)
+               goto out;
 
-       shift = 0;
-       tmp = hash_size;
        while((tmp >>= 1UL) != 0UL)
                shift++;
 
-       journal->j_revoke_table[0] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
-       if (!journal->j_revoke_table[0])
-               return -ENOMEM;
-       journal->j_revoke = journal->j_revoke_table[0];
-
-       /* Check that the hash_size is a power of two */
-       J_ASSERT(is_power_of_2(hash_size));
-
-       journal->j_revoke->hash_size = hash_size;
-
-       journal->j_revoke->hash_shift = shift;
-
-       journal->j_revoke->hash_table =
+       table->hash_size = hash_size;
+       table->hash_shift = shift;
+       table->hash_table =
                kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
-       if (!journal->j_revoke->hash_table) {
-               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
-               journal->j_revoke = NULL;
-               return -ENOMEM;
+       if (!table->hash_table) {
+               kmem_cache_free(jbd2_revoke_table_cache, table);
+               table = NULL;
+               goto out;
        }
 
        for (tmp = 0; tmp < hash_size; tmp++)
-               INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
+               INIT_LIST_HEAD(&table->hash_table[tmp]);
 
-       journal->j_revoke_table[1] = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
-       if (!journal->j_revoke_table[1]) {
-               kfree(journal->j_revoke_table[0]->hash_table);
-               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
-               return -ENOMEM;
+out:
+       return table;
+}
+
+static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
+{
+       int i;
+       struct list_head *hash_list;
+
+       for (i = 0; i < table->hash_size; i++) {
+               hash_list = &table->hash_table[i];
+               J_ASSERT(list_empty(hash_list));
        }
 
-       journal->j_revoke = journal->j_revoke_table[1];
+       kfree(table->hash_table);
+       kmem_cache_free(jbd2_revoke_table_cache, table);
+}
 
-       /* Check that the hash_size is a power of two */
+/* Initialise the revoke table for a given journal to a given size. */
+int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
+{
+       J_ASSERT(journal->j_revoke_table[0] == NULL);
        J_ASSERT(is_power_of_2(hash_size));
 
-       journal->j_revoke->hash_size = hash_size;
-
-       journal->j_revoke->hash_shift = shift;
+       journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size);
+       if (!journal->j_revoke_table[0])
+               goto fail0;
 
-       journal->j_revoke->hash_table =
-               kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
-       if (!journal->j_revoke->hash_table) {
-               kfree(journal->j_revoke_table[0]->hash_table);
-               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[0]);
-               kmem_cache_free(jbd2_revoke_table_cache, journal->j_revoke_table[1]);
-               journal->j_revoke = NULL;
-               return -ENOMEM;
-       }
+       journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size);
+       if (!journal->j_revoke_table[1])
+               goto fail1;
 
-       for (tmp = 0; tmp < hash_size; tmp++)
-               INIT_LIST_HEAD(&journal->j_revoke->hash_table[tmp]);
+       journal->j_revoke = journal->j_revoke_table[1];
 
        spin_lock_init(&journal->j_revoke_lock);
 
        return 0;
-}
 
-/* Destoy a journal's revoke table.  The table must already be empty! */
+fail1:
+       jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
+fail0:
+       return -ENOMEM;
+}
 
+/* Destroy a journal's revoke table.  The table must already be empty! */
 void jbd2_journal_destroy_revoke(journal_t *journal)
 {
-       struct jbd2_revoke_table_s *table;
-       struct list_head *hash_list;
-       int i;
-
-       table = journal->j_revoke_table[0];
-       if (!table)
-               return;
-
-       for (i=0; i<table->hash_size; i++) {
-               hash_list = &table->hash_table[i];
-               J_ASSERT (list_empty(hash_list));
-       }
-
-       kfree(table->hash_table);
-       kmem_cache_free(jbd2_revoke_table_cache, table);
-       journal->j_revoke = NULL;
-
-       table = journal->j_revoke_table[1];
-       if (!table)
-               return;
-
-       for (i=0; i<table->hash_size; i++) {
-               hash_list = &table->hash_table[i];
-               J_ASSERT (list_empty(hash_list));
-       }
-
-       kfree(table->hash_table);
-       kmem_cache_free(jbd2_revoke_table_cache, table);
        journal->j_revoke = NULL;
+       if (journal->j_revoke_table[0])
+               jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
+       if (journal->j_revoke_table[1])
+               jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]);
 }
 
 
index b9b0b6f899b91b2000cc4f34e72c97931670fe53..d6e006e67804d5facfbad041797e87586266751b 100644 (file)
@@ -617,6 +617,12 @@ repeat:
            jh->b_next_transaction == transaction)
                goto done;
 
+       /*
+        * this is the first time this transaction is touching this buffer,
+        * reset the modified flag
+        */
+       jh->b_modified = 0;
+
        /*
         * If there is already a copy-out version of this buffer, then we don't
         * need to make another one
@@ -690,7 +696,7 @@ repeat:
                                if (!frozen_buffer) {
                                        printk(KERN_EMERG
                                               "%s: OOM for frozen_buffer\n",
-                                              __FUNCTION__);
+                                              __func__);
                                        JBUFFER_TRACE(jh, "oom!");
                                        error = -ENOMEM;
                                        jbd_lock_bh_state(bh);
@@ -829,9 +835,16 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
 
        if (jh->b_transaction == NULL) {
                jh->b_transaction = transaction;
+
+               /* first access by this transaction */
+               jh->b_modified = 0;
+
                JBUFFER_TRACE(jh, "file as BJ_Reserved");
                __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
        } else if (jh->b_transaction == journal->j_committing_transaction) {
+               /* first access by this transaction */
+               jh->b_modified = 0;
+
                JBUFFER_TRACE(jh, "set next transaction");
                jh->b_next_transaction = transaction;
        }
@@ -901,7 +914,7 @@ repeat:
                committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
                if (!committed_data) {
                        printk(KERN_EMERG "%s: No memory for committed data\n",
-                               __FUNCTION__);
+                               __func__);
                        err = -ENOMEM;
                        goto out;
                }
@@ -1230,6 +1243,7 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
        struct journal_head *jh;
        int drop_reserve = 0;
        int err = 0;
+       int was_modified = 0;
 
        BUFFER_TRACE(bh, "entry");
 
@@ -1248,6 +1262,9 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
                goto not_jbd;
        }
 
+       /* keep track of wether or not this transaction modified us */
+       was_modified = jh->b_modified;
+
        /*
         * The buffer's going from the transaction, we must drop
         * all references -bzzz
@@ -1265,7 +1282,12 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
 
                JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
 
-               drop_reserve = 1;
+               /*
+                * we only want to drop a reference if this transaction
+                * modified the buffer
+                */
+               if (was_modified)
+                       drop_reserve = 1;
 
                /*
                 * We are no longer going to journal this buffer.
@@ -1305,7 +1327,13 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
                if (jh->b_next_transaction) {
                        J_ASSERT(jh->b_next_transaction == transaction);
                        jh->b_next_transaction = NULL;
-                       drop_reserve = 1;
+
+                       /*
+                        * only drop a reference if this transaction modified
+                        * the buffer
+                        */
+                       if (was_modified)
+                               drop_reserve = 1;
                }
        }
 
@@ -1434,7 +1462,8 @@ int jbd2_journal_stop(handle_t *handle)
        return err;
 }
 
-/**int jbd2_journal_force_commit() - force any uncommitted transactions
+/**
+ * int jbd2_journal_force_commit() - force any uncommitted transactions
  * @journal: journal to force
  *
  * For synchronous operations: force any uncommitted transactions
@@ -2077,7 +2106,7 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
        jh->b_transaction = jh->b_next_transaction;
        jh->b_next_transaction = NULL;
        __jbd2_journal_file_buffer(jh, jh->b_transaction,
-                               was_dirty ? BJ_Metadata : BJ_Reserved);
+                               jh->b_modified ? BJ_Metadata : BJ_Reserved);
        J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
 
        if (was_dirty)
index 9645275023e68c5b93f582a10d31d541a4a79884..a113ecc3bafeeb5f74172d8d5eb98e09f6eb6979 100644 (file)
        do {                                                            \
                printk(JFFS2_ERR_MSG_PREFIX                             \
                        " (%d) %s: " fmt, task_pid_nr(current),         \
-                       __FUNCTION__ , ##__VA_ARGS__);                  \
+                       __func__ , ##__VA_ARGS__);                      \
        } while(0)
 
 #define JFFS2_WARNING(fmt, ...)                                                \
        do {                                                            \
                printk(JFFS2_WARN_MSG_PREFIX                            \
                        " (%d) %s: " fmt, task_pid_nr(current),         \
-                       __FUNCTION__ , ##__VA_ARGS__);                  \
+                       __func__ , ##__VA_ARGS__);                      \
        } while(0)
 
 #define JFFS2_NOTICE(fmt, ...)                                         \
        do {                                                            \
                printk(JFFS2_NOTICE_MSG_PREFIX                          \
                        " (%d) %s: " fmt, task_pid_nr(current),         \
-                       __FUNCTION__ , ##__VA_ARGS__);                  \
+                       __func__ , ##__VA_ARGS__);                      \
        } while(0)
 
 #define JFFS2_DEBUG(fmt, ...)                                          \
        do {                                                            \
                printk(JFFS2_DBG_MSG_PREFIX                             \
                        " (%d) %s: " fmt, task_pid_nr(current),         \
-                       __FUNCTION__ , ##__VA_ARGS__);                  \
+                       __func__ , ##__VA_ARGS__);                      \
        } while(0)
 
 /*
index 05531f291bfa6a5320e798f735aab4c7af5cd709..082e844ab2db1fa11ce5a86453f5bdce957df7b2 100644 (file)
@@ -82,7 +82,7 @@ static int is_xattr_datum_unchecked(struct jffs2_sb_info *c, struct jffs2_xattr_
 static void unload_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *xd)
 {
        /* must be called under down_write(xattr_sem) */
-       D1(dbg_xattr("%s: xid=%u, version=%u\n", __FUNCTION__, xd->xid, xd->version));
+       D1(dbg_xattr("%s: xid=%u, version=%u\n", __func__, xd->xid, xd->version));
        if (xd->xname) {
                c->xdatum_mem_usage -= (xd->name_len + 1 + xd->value_len);
                kfree(xd->xname);
@@ -1252,7 +1252,7 @@ int jffs2_garbage_collect_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_
        rc = jffs2_reserve_space_gc(c, totlen, &length, JFFS2_SUMMARY_XREF_SIZE);
        if (rc) {
                JFFS2_WARNING("%s: jffs2_reserve_space_gc() = %d, request = %u\n",
-                             __FUNCTION__, rc, totlen);
+                             __func__, rc, totlen);
                rc = rc ? rc : -EBADFD;
                goto out;
        }
index 887f5759e53643bdba5086363d587d82a789c5d3..bf6ab19b86ee1e2766dad7165800c70897dde984 100644 (file)
@@ -89,7 +89,7 @@ void jfs_proc_init(void)
 {
        int i;
 
-       if (!(base = proc_mkdir("jfs", proc_root_fs)))
+       if (!(base = proc_mkdir("fs/jfs", NULL)))
                return;
        base->owner = THIS_MODULE;
 
@@ -109,7 +109,7 @@ void jfs_proc_clean(void)
        if (base) {
                for (i = 0; i < NPROCENT; i++)
                        remove_proc_entry(Entries[i].name, base);
-               remove_proc_entry("jfs", proc_root_fs);
+               remove_proc_entry("fs/jfs", NULL);
        }
 }
 
index 40b16f23e49af4eb99c8060d5e52aad370e0613c..5df517b81f3f2a7911e1518c33021e9eb557c85e 100644 (file)
@@ -573,7 +573,7 @@ again:
                /* Ensure the resulting lock will get added to granted list */
                fl->fl_flags |= FL_SLEEP;
                if (do_vfs_lock(fl) < 0)
-                       printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
+                       printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
                up_read(&host->h_rwsem);
                fl->fl_flags = fl_flags;
                status = 0;
index 4d81553d29485e69dd19e746a1edadf4618c8d91..81aca859bfde909f2574c2c7f9e9e02608a1a165 100644 (file)
@@ -752,7 +752,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
                return;
        default:
                printk(KERN_WARNING "lockd: unexpected error %d in %s!\n",
-                               -error, __FUNCTION__);
+                               -error, __func__);
                nlmsvc_insert_block(block, 10 * HZ);
                nlmsvc_release_block(block);
                return;
index 30f7d0ae221551b95024ceae92ae9078f30b5484..05ff4f1d7026f4fbed45d09208c4b8cda3a0a72e 100644 (file)
@@ -609,7 +609,7 @@ error_inode:
        if (corrupt < 0) {
                fat_fs_panic(new_dir->i_sb,
                             "%s: Filesystem corrupted (i_pos %lld)",
-                            __FUNCTION__, sinfo.i_pos);
+                            __func__, sinfo.i_pos);
        }
        goto out;
 }
@@ -653,7 +653,7 @@ static const struct inode_operations msdos_dir_inode_operations = {
        .mkdir          = msdos_mkdir,
        .rmdir          = msdos_rmdir,
        .rename         = msdos_rename,
-       .setattr        = fat_notify_change,
+       .setattr        = fat_setattr,
        .getattr        = fat_getattr,
 };
 
index e179f71bfcb058df613f83cb1ff923d47ffc4618..32fd9655485b7fc9b8c8616ad6a61f6ac243b328 100644 (file)
@@ -30,6 +30,7 @@
 #include <linux/capability.h>
 #include <linux/file.h>
 #include <linux/fcntl.h>
+#include <linux/device_cgroup.h>
 #include <asm/namei.h>
 #include <asm/uaccess.h>
 
@@ -281,6 +282,10 @@ int permission(struct inode *inode, int mask, struct nameidata *nd)
        if (retval)
                return retval;
 
+       retval = devcgroup_inode_permission(inode, mask);
+       if (retval)
+               return retval;
+
        return security_inode_permission(inode, mask, nd);
 }
 
@@ -2028,6 +2033,10 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
        if (!dir->i_op || !dir->i_op->mknod)
                return -EPERM;
 
+       error = devcgroup_inode_mknod(mode, dev);
+       if (error)
+               return error;
+
        error = security_inode_mknod(dir, dentry, mode, dev);
        if (error)
                return error;
index f48f98110c30aefaa114ff0f15c0b7cece3f938d..4fc302c2a0e0f886e4026288bcdc50888ddf3551 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/smp_lock.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
-#include <linux/quotaops.h>
 #include <linux/acct.h>
 #include <linux/capability.h>
 #include <linux/cpumask.h>
@@ -1084,7 +1083,6 @@ static int do_umount(struct vfsmount *mnt, int flags)
                down_write(&sb->s_umount);
                if (!(sb->s_flags & MS_RDONLY)) {
                        lock_kernel();
-                       DQUOT_OFF(sb);
                        retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
                        unlock_kernel();
                }
@@ -1178,17 +1176,6 @@ static int mount_is_safe(struct nameidata *nd)
 #endif
 }
 
-static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry)
-{
-       while (1) {
-               if (d == dentry)
-                       return 1;
-               if (d == NULL || d == d->d_parent)
-                       return 0;
-               d = d->d_parent;
-       }
-}
-
 struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
                                        int flag)
 {
@@ -1205,7 +1192,7 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry,
 
        p = mnt;
        list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) {
-               if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry))
+               if (!is_subdir(r->mnt_mountpoint, dentry))
                        continue;
 
                for (s = r; s; s = next_mnt(s, r)) {
@@ -2342,10 +2329,10 @@ void __init mnt_init(void)
        err = sysfs_init();
        if (err)
                printk(KERN_WARNING "%s: sysfs_init error: %d\n",
-                       __FUNCTION__, err);
+                       __func__, err);
        fs_kobj = kobject_create_and_add("fs", NULL);
        if (!fs_kobj)
-               printk(KERN_WARNING "%s: kobj create error\n", __FUNCTION__);
+               printk(KERN_WARNING "%s: kobj create error\n", __func__);
        init_rootfs();
        init_mount_tree();
 }
index fbbb9f7afa1ab2f8a4a020c904f8b2c11304f431..2e5ab1204dece88feb8f1709cf37323ff459658c 100644 (file)
@@ -107,12 +107,6 @@ static const struct super_operations ncp_sops =
        .show_options   = ncp_show_options,
 };
 
-extern struct dentry_operations ncp_root_dentry_operations;
-#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
-extern const struct address_space_operations ncp_symlink_aops;
-extern int ncp_symlink(struct inode*, struct dentry*, const char*);
-#endif
-
 /*
  * Fill in the ncpfs-specific information in the inode.
  */
index ad8f167e54bc5e6069526b7406d26ed72fddb2ec..3a97c95e1ca243111330fb82a69130c6353b7368 100644 (file)
@@ -389,11 +389,11 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp,
                                struct dentry* dentry = inode->i_sb->s_root;
 
                                if (dentry) {
-                                       struct inode* inode = dentry->d_inode;
+                                       struct inode* s_inode = dentry->d_inode;
                                
-                                       if (inode) {
-                                               sr.volNumber = NCP_FINFO(inode)->volNumber;
-                                               sr.dirEntNum = NCP_FINFO(inode)->dirEntNum;
+                                       if (s_inode) {
+                                               sr.volNumber = NCP_FINFO(s_inode)->volNumber;
+                                               sr.dirEntNum = NCP_FINFO(s_inode)->dirEntNum;
                                                sr.namespace = server->name_space[sr.volNumber];
                                        } else
                                                DPRINTK("ncpfs: s_root->d_inode==NULL\n");
@@ -439,12 +439,12 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp,
                        dentry = inode->i_sb->s_root;
                        server->root_setuped = 1;
                        if (dentry) {
-                               struct inode* inode = dentry->d_inode;
+                               struct inode* s_inode = dentry->d_inode;
                                
                                if (inode) {
-                                       NCP_FINFO(inode)->volNumber = vnum;
-                                       NCP_FINFO(inode)->dirEntNum = de;
-                                       NCP_FINFO(inode)->DosDirNum = dosde;
+                                       NCP_FINFO(s_inode)->volNumber = vnum;
+                                       NCP_FINFO(s_inode)->dirEntNum = de;
+                                       NCP_FINFO(s_inode)->DosDirNum = dosde;
                                } else
                                        DPRINTK("ncpfs: s_root->d_inode==NULL\n");
                        } else
@@ -519,7 +519,6 @@ static int __ncp_ioctl(struct inode *inode, struct file *filp,
                }
                {
                        struct ncp_lock_ioctl    rqdata;
-                       int result;
 
                        if (copy_from_user(&rqdata, argp, sizeof(rqdata)))
                                return -EFAULT;
index df6d60bdfcd3f0087a8bad1aed6707435ff8c2dc..97645f112114e0c2f1aa4304a1f85046ec66ebb6 100644 (file)
@@ -102,48 +102,47 @@ static inline void ncp_init_request_s(struct ncp_server *server, int subfunction
 }
 
 static inline char *
- ncp_reply_data(struct ncp_server *server, int offset)
+ncp_reply_data(struct ncp_server *server, int offset)
 {
        return &(server->packet[sizeof(struct ncp_reply_header) + offset]);
 }
 
-static inline __u8 BVAL(void* data)
+static inline u8 BVAL(void *data)
 {
-       return get_unaligned((__u8*)data);
+       return *(u8 *)data;
 }
 
-static __u8
- ncp_reply_byte(struct ncp_server *server, int offset)
+static u8 ncp_reply_byte(struct ncp_server *server, int offset)
 {
-       return get_unaligned((__u8 *) ncp_reply_data(server, offset));
+       return *(u8 *)ncp_reply_data(server, offset);
 }
 
-static inline __u16 WVAL_LH(void* data)
+static inline u16 WVAL_LH(void *data)
 {
-       return le16_to_cpu(get_unaligned((__le16*)data));
+       return get_unaligned_le16(data);
 }
 
-static __u16
- ncp_reply_le16(struct ncp_server *server, int offset)
+static u16
+ncp_reply_le16(struct ncp_server *server, int offset)
 {
-       return le16_to_cpu(get_unaligned((__le16 *) ncp_reply_data(server, offset)));
+       return get_unaligned_le16(ncp_reply_data(server, offset));
 }
 
-static __u16
- ncp_reply_be16(struct ncp_server *server, int offset)
+static u16
+ncp_reply_be16(struct ncp_server *server, int offset)
 {
-       return be16_to_cpu(get_unaligned((__be16 *) ncp_reply_data(server, offset)));
+       return get_unaligned_be16(ncp_reply_data(server, offset));
 }
 
-static inline __u32 DVAL_LH(void* data)
+static inline u32 DVAL_LH(void *data)
 {
-       return le32_to_cpu(get_unaligned((__le32*)data));
+       return get_unaligned_le32(data);
 }
 
 static __le32
- ncp_reply_dword(struct ncp_server *server, int offset)
+ncp_reply_dword(struct ncp_server *server, int offset)
 {
-       return get_unaligned((__le32 *) ncp_reply_data(server, offset));
+       return get_unaligned((__le32 *)ncp_reply_data(server, offset));
 }
 
 static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) {
@@ -1006,8 +1005,8 @@ ncp_read_bounce(struct ncp_server *server, const char *file_id,
        result = ncp_request2(server, 72, bounce, bufsize);
        ncp_unlock_server(server);
        if (!result) {
-               int len = be16_to_cpu(get_unaligned((__be16*)((char*)bounce + 
-                         sizeof(struct ncp_reply_header))));
+               int len = get_unaligned_be16((char *)bounce +
+                         sizeof(struct ncp_reply_header));
                result = -EIO;
                if (len <= to_read) {
                        char* source;
index 749a18d33599cb456fe8d1f06360de38e25a1fd6..7c0b5c21e6cf668dc791212327ebe46b8a3007bf 100644 (file)
@@ -55,7 +55,7 @@ static void nwsign(char *r_data1, char *r_data2, char *outdata) {
  unsigned int w0,w1,w2,w3;
  static int rbit[4]={0, 2, 1, 3};
 #ifdef __i386__
- unsigned int *data2=(int *)r_data2;
+ unsigned int *data2=(unsigned int *)r_data2;
 #else
  unsigned int data2[16];
  for (i=0;i<16;i++)
index f2f3b284e6dd203a31c11032a195ecc9f6237f53..89ac5bb0401c243784925cef21199036558a2c0d 100644 (file)
@@ -1321,6 +1321,7 @@ static const struct file_operations nfs_server_list_fops = {
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release,
+       .owner          = THIS_MODULE,
 };
 
 static int nfs_volume_list_open(struct inode *inode, struct file *file);
@@ -1341,6 +1342,7 @@ static const struct file_operations nfs_volume_list_fops = {
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release,
+       .owner          = THIS_MODULE,
 };
 
 /*
@@ -1500,33 +1502,29 @@ int __init nfs_fs_proc_init(void)
 {
        struct proc_dir_entry *p;
 
-       proc_fs_nfs = proc_mkdir("nfsfs", proc_root_fs);
+       proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL);
        if (!proc_fs_nfs)
                goto error_0;
 
        proc_fs_nfs->owner = THIS_MODULE;
 
        /* a file of servers with which we're dealing */
-       p = create_proc_entry("servers", S_IFREG|S_IRUGO, proc_fs_nfs);
+       p = proc_create("servers", S_IFREG|S_IRUGO,
+                       proc_fs_nfs, &nfs_server_list_fops);
        if (!p)
                goto error_1;
 
-       p->proc_fops = &nfs_server_list_fops;
-       p->owner = THIS_MODULE;
-
        /* a file of volumes that we have mounted */
-       p = create_proc_entry("volumes", S_IFREG|S_IRUGO, proc_fs_nfs);
+       p = proc_create("volumes", S_IFREG|S_IRUGO,
+                       proc_fs_nfs, &nfs_volume_list_fops);
        if (!p)
                goto error_2;
-
-       p->proc_fops = &nfs_volume_list_fops;
-       p->owner = THIS_MODULE;
        return 0;
 
 error_2:
        remove_proc_entry("servers", proc_fs_nfs);
 error_1:
-       remove_proc_entry("nfsfs", proc_root_fs);
+       remove_proc_entry("fs/nfsfs", NULL);
 error_0:
        return -ENOMEM;
 }
@@ -1538,7 +1536,7 @@ void nfs_fs_proc_exit(void)
 {
        remove_proc_entry("volumes", proc_fs_nfs);
        remove_proc_entry("servers", proc_fs_nfs);
-       remove_proc_entry("nfsfs", proc_root_fs);
+       remove_proc_entry("fs/nfsfs", NULL);
 }
 
 #endif /* CONFIG_PROC_FS */
index fa220dc74609abc1635bbc55b740bd523156ccf0..7226a506f3ca55ea587ece913ec8a64982ff64be 100644 (file)
@@ -1575,6 +1575,11 @@ static int nfs_compare_super(struct super_block *sb, void *data)
        return nfs_compare_mount_options(sb, server, mntflags);
 }
 
+static int nfs_bdi_register(struct nfs_server *server)
+{
+       return bdi_register_dev(&server->backing_dev_info, server->s_dev);
+}
+
 static int nfs_get_sb(struct file_system_type *fs_type,
        int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt)
 {
@@ -1617,6 +1622,10 @@ static int nfs_get_sb(struct file_system_type *fs_type,
        if (s->s_fs_info != server) {
                nfs_free_server(server);
                server = NULL;
+       } else {
+               error = nfs_bdi_register(server);
+               if (error)
+                       goto error_splat_super;
        }
 
        if (!s->s_root) {
@@ -1664,6 +1673,7 @@ static void nfs_kill_super(struct super_block *s)
 {
        struct nfs_server *server = NFS_SB(s);
 
+       bdi_unregister(&server->backing_dev_info);
        kill_anon_super(s);
        nfs_free_server(server);
 }
@@ -1708,6 +1718,10 @@ static int nfs_xdev_get_sb(struct file_system_type *fs_type, int flags,
        if (s->s_fs_info != server) {
                nfs_free_server(server);
                server = NULL;
+       } else {
+               error = nfs_bdi_register(server);
+               if (error)
+                       goto error_splat_super;
        }
 
        if (!s->s_root) {
@@ -1984,6 +1998,10 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
        if (s->s_fs_info != server) {
                nfs_free_server(server);
                server = NULL;
+       } else {
+               error = nfs_bdi_register(server);
+               if (error)
+                       goto error_splat_super;
        }
 
        if (!s->s_root) {
@@ -2070,6 +2088,10 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
        if (s->s_fs_info != server) {
                nfs_free_server(server);
                server = NULL;
+       } else {
+               error = nfs_bdi_register(server);
+               if (error)
+                       goto error_splat_super;
        }
 
        if (!s->s_root) {
@@ -2149,6 +2171,10 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
        if (s->s_fs_info != server) {
                nfs_free_server(server);
                server = NULL;
+       } else {
+               error = nfs_bdi_register(server);
+               if (error)
+                       goto error_splat_super;
        }
 
        if (!s->s_root) {
index 562abf3380d032e96342589fa13232233698c130..0b3ffa9840c20e0530ee4f9de9aa423cb813fc52 100644 (file)
@@ -104,7 +104,7 @@ xdr_writemem(__be32 *p, const void *ptr, int nbytes)
 } while (0)
 #define RESERVE_SPACE(nbytes)   do {                            \
        p = xdr_reserve_space(xdr, nbytes);                     \
-       if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __FUNCTION__); \
+       if (!p) dprintk("NFSD: RESERVE_SPACE(%d) failed in function %s\n", (int) (nbytes), __func__); \
        BUG_ON(!p);                                             \
 } while (0)
 
@@ -134,7 +134,7 @@ xdr_error:                                      \
        p = xdr_inline_decode(xdr, nbytes); \
        if (!p) { \
                dprintk("NFSD: %s: reply buffer overflowed in line %d.\n", \
-                       __FUNCTION__, __LINE__); \
+                       __func__, __LINE__); \
                return -EIO; \
        } \
 } while (0)
index 42f3820ee8f543ca022d019b3b1de49e5907648c..5ac00c4fee919c220bee1fb2dc4c964742b079f5 100644 (file)
@@ -169,6 +169,7 @@ static const struct file_operations exports_operations = {
        .read           = seq_read,
        .llseek         = seq_lseek,
        .release        = seq_release,
+       .owner          = THIS_MODULE,
 };
 
 /*----------------------------------------------------------------------------*/
@@ -801,10 +802,9 @@ static int create_proc_exports_entry(void)
        entry = proc_mkdir("fs/nfs", NULL);
        if (!entry)
                return -ENOMEM;
-       entry = create_proc_entry("fs/nfs/exports", 0, NULL);
+       entry = proc_create("exports", 0, entry, &exports_operations);
        if (!entry)
                return -ENOMEM;
-       entry->proc_fops =  &exports_operations;
        return 0;
 }
 #else /* CONFIG_PROC_FS */
index 8ac37c33d1278ef7d602e5dd3aa57de98e1d2236..5e6724c1afd1d0276d7e839c9bfcbd2832fbd600 100644 (file)
@@ -45,7 +45,7 @@ static void ntfs_debug(const char *f, ...);
 extern void __ntfs_debug (const char *file, int line, const char *function,
        const char *format, ...) __attribute__ ((format (printf, 4, 5)));
 #define ntfs_debug(f, a...)                                            \
-       __ntfs_debug(__FILE__, __LINE__, __FUNCTION__, f, ##a)
+       __ntfs_debug(__FILE__, __LINE__, __func__, f, ##a)
 
 extern void ntfs_debug_dump_runlist(const runlist_element *rl);
 
@@ -58,10 +58,10 @@ extern void ntfs_debug_dump_runlist(const runlist_element *rl);
 
 extern void __ntfs_warning(const char *function, const struct super_block *sb,
                const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
-#define ntfs_warning(sb, f, a...)      __ntfs_warning(__FUNCTION__, sb, f, ##a)
+#define ntfs_warning(sb, f, a...)      __ntfs_warning(__func__, sb, f, ##a)
 
 extern void __ntfs_error(const char *function, const struct super_block *sb,
                const char *fmt, ...) __attribute__ ((format (printf, 3, 4)));
-#define ntfs_error(sb, f, a...)                __ntfs_error(__FUNCTION__, sb, f, ##a)
+#define ntfs_error(sb, f, a...)                __ntfs_error(__func__, sb, f, ##a)
 
 #endif /* _LINUX_NTFS_DEBUG_H */
index 2ad5c8b104b934c9177e162d2c20c6da211ed0d9..790defb847e71786e8b34358a11ccf5dfe1e0609 100644 (file)
@@ -1191,7 +1191,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol,
                if (size) {
                        page = ntfs_map_page(mftbmp_mapping,
                                        ofs >> PAGE_CACHE_SHIFT);
-                       if (unlikely(IS_ERR(page))) {
+                       if (IS_ERR(page)) {
                                ntfs_error(vol->sb, "Failed to read mft "
                                                "bitmap, aborting.");
                                return PTR_ERR(page);
@@ -2118,7 +2118,7 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no)
        }
        /* Read, map, and pin the page containing the mft record. */
        page = ntfs_map_page(mft_vi->i_mapping, index);
-       if (unlikely(IS_ERR(page))) {
+       if (IS_ERR(page)) {
                ntfs_error(vol->sb, "Failed to map page containing mft record "
                                "to format 0x%llx.", (long long)mft_no);
                return PTR_ERR(page);
@@ -2519,7 +2519,7 @@ mft_rec_already_initialized:
        ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK;
        /* Read, map, and pin the page containing the mft record. */
        page = ntfs_map_page(vol->mft_ino->i_mapping, index);
-       if (unlikely(IS_ERR(page))) {
+       if (IS_ERR(page)) {
                ntfs_error(vol->sb, "Failed to map page containing allocated "
                                "mft record 0x%llx.", (long long)bit);
                err = PTR_ERR(page);
index 61a000f8524c7aab2785a0c487c1ebe5d5fadf0a..e48aba698b775a895d32c87b254dc54fe5b2006f 100644 (file)
@@ -327,7 +327,7 @@ clear_fields:
 
 static struct backing_dev_info dlmfs_backing_dev_info = {
        .ra_pages       = 0,    /* No readahead */
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
 static struct inode *dlmfs_get_root_inode(struct super_block *sb)
index b70e7666bb2c3f725a761b268b2370e5c151ba63..7af1f05d59783cf3198771e9ea3945351deb7d7c 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -837,7 +837,7 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
        if (f->f_flags & O_DIRECT) {
                if (!f->f_mapping->a_ops ||
                    ((!f->f_mapping->a_ops->direct_IO) &&
-                   (!f->f_mapping->a_ops->get_xip_page))) {
+                   (!f->f_mapping->a_ops->get_xip_mem))) {
                        fput(f);
                        f = ERR_PTR(-EINVAL);
                }
index e7dd1d4e34734c6c139dbd1f8486becaa418a166..0fdda2e8a4cc916648b035a1f962eaf8744797fa 100644 (file)
 #ifndef CONFIG_LDM_DEBUG
 #define ldm_debug(...) do {} while (0)
 #else
-#define ldm_debug(f, a...) _ldm_printk (KERN_DEBUG, __FUNCTION__, f, ##a)
+#define ldm_debug(f, a...) _ldm_printk (KERN_DEBUG, __func__, f, ##a)
 #endif
 
-#define ldm_crit(f, a...)  _ldm_printk (KERN_CRIT,  __FUNCTION__, f, ##a)
-#define ldm_error(f, a...) _ldm_printk (KERN_ERR,   __FUNCTION__, f, ##a)
-#define ldm_info(f, a...)  _ldm_printk (KERN_INFO,  __FUNCTION__, f, ##a)
+#define ldm_crit(f, a...)  _ldm_printk (KERN_CRIT,  __func__, f, ##a)
+#define ldm_error(f, a...) _ldm_printk (KERN_ERR,   __func__, f, ##a)
+#define ldm_info(f, a...)  _ldm_printk (KERN_INFO,  __func__, f, ##a)
 
 __attribute__ ((format (printf, 3, 4)))
 static void _ldm_printk (const char *level, const char *function,
index 5567ec0d03a3910e931591e72669842cfc2b4591..796511886f285804060ddd62390012751da14685 100644 (file)
@@ -18,7 +18,7 @@
  *
  *  Re-organised Feb 1998 Russell King
  */
-
+#include <linux/msdos_fs.h>
 
 #include "check.h"
 #include "msdos.h"
@@ -419,6 +419,7 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
        Sector sect;
        unsigned char *data;
        struct partition *p;
+       struct fat_boot_sector *fb;
        int slot;
 
        data = read_dev_sector(bdev, 0, &sect);
@@ -444,8 +445,21 @@ int msdos_partition(struct parsed_partitions *state, struct block_device *bdev)
        p = (struct partition *) (data + 0x1be);
        for (slot = 1; slot <= 4; slot++, p++) {
                if (p->boot_ind != 0 && p->boot_ind != 0x80) {
-                       put_dev_sector(sect);
-                       return 0;
+                       /*
+                        * Even without a valid boot inidicator value
+                        * its still possible this is valid FAT filesystem
+                        * without a partition table.
+                        */
+                       fb = (struct fat_boot_sector *) data;
+                       if (slot == 1 && fb->reserved && fb->fats
+                               && fat_valid_media(fb->media)) {
+                               printk("\n");
+                               put_dev_sector(sect);
+                               return 1;
+                       } else {
+                               put_dev_sector(sect);
+                               return 0;
+                       }
                }
        }
 
index 07d6c4853fe858da49c51d8c199a8a3ff635602a..c135cbdd9127a550d6da103f66fa7cc47584b72e 100644 (file)
@@ -425,12 +425,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
        cutime = cstime = utime = stime = cputime_zero;
        cgtime = gtime = cputime_zero;
 
-       rcu_read_lock();
        if (lock_task_sighand(task, &flags)) {
                struct signal_struct *sig = task->signal;
 
                if (sig->tty) {
-                       tty_pgrp = pid_nr_ns(sig->tty->pgrp, ns);
+                       struct pid *pgrp = tty_get_pgrp(sig->tty);
+                       tty_pgrp = pid_nr_ns(pgrp, ns);
+                       put_pid(pgrp);
                        tty_nr = new_encode_dev(tty_devnum(sig->tty));
                }
 
@@ -469,7 +470,6 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
 
                unlock_task_sighand(task, &flags);
        }
-       rcu_read_unlock();
 
        if (!whole || num_threads < 2)
                wchan = get_wchan(task);
index c5e412a00b170e5294964505726b85f6cff93309..fcf02f2deeba398f790a4657574cfcbed9b55bc9 100644 (file)
@@ -195,12 +195,32 @@ static int proc_root_link(struct inode *inode, struct path *path)
        return result;
 }
 
-#define MAY_PTRACE(task) \
-       (task == current || \
-       (task->parent == current && \
-       (task->ptrace & PT_PTRACED) && \
-        (task_is_stopped_or_traced(task)) && \
-        security_ptrace(current,task) == 0))
+/*
+ * Return zero if current may access user memory in @task, -error if not.
+ */
+static int check_mem_permission(struct task_struct *task)
+{
+       /*
+        * A task can always look at itself, in case it chooses
+        * to use system calls instead of load instructions.
+        */
+       if (task == current)
+               return 0;
+
+       /*
+        * If current is actively ptrace'ing, and would also be
+        * permitted to freshly attach with ptrace now, permit it.
+        */
+       if (task->parent == current && (task->ptrace & PT_PTRACED) &&
+           task_is_stopped_or_traced(task) &&
+           ptrace_may_attach(task))
+               return 0;
+
+       /*
+        * Noone else is allowed.
+        */
+       return -EPERM;
+}
 
 struct mm_struct *mm_for_maps(struct task_struct *task)
 {
@@ -722,7 +742,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
        if (!task)
                goto out_no_task;
 
-       if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+       if (check_mem_permission(task))
                goto out;
 
        ret = -ENOMEM;
@@ -748,7 +768,7 @@ static ssize_t mem_read(struct file * file, char __user * buf,
 
                this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
                retval = access_process_vm(task, src, page, this_len, 0);
-               if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) {
+               if (!retval || check_mem_permission(task)) {
                        if (!ret)
                                ret = -EIO;
                        break;
@@ -792,7 +812,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
        if (!task)
                goto out_no_task;
 
-       if (!MAY_PTRACE(task) || !ptrace_may_attach(task))
+       if (check_mem_permission(task))
                goto out;
 
        copied = -ENOMEM;
@@ -1181,6 +1201,81 @@ static const struct file_operations proc_pid_sched_operations = {
 
 #endif
 
+/*
+ * We added or removed a vma mapping the executable. The vmas are only mapped
+ * during exec and are not mapped with the mmap system call.
+ * Callers must hold down_write() on the mm's mmap_sem for these
+ */
+void added_exe_file_vma(struct mm_struct *mm)
+{
+       mm->num_exe_file_vmas++;
+}
+
+void removed_exe_file_vma(struct mm_struct *mm)
+{
+       mm->num_exe_file_vmas--;
+       if ((mm->num_exe_file_vmas == 0) && mm->exe_file){
+               fput(mm->exe_file);
+               mm->exe_file = NULL;
+       }
+
+}
+
+void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
+{
+       if (new_exe_file)
+               get_file(new_exe_file);
+       if (mm->exe_file)
+               fput(mm->exe_file);
+       mm->exe_file = new_exe_file;
+       mm->num_exe_file_vmas = 0;
+}
+
+struct file *get_mm_exe_file(struct mm_struct *mm)
+{
+       struct file *exe_file;
+
+       /* We need mmap_sem to protect against races with removal of
+        * VM_EXECUTABLE vmas */
+       down_read(&mm->mmap_sem);
+       exe_file = mm->exe_file;
+       if (exe_file)
+               get_file(exe_file);
+       up_read(&mm->mmap_sem);
+       return exe_file;
+}
+
+void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
+{
+       /* It's safe to write the exe_file pointer without exe_file_lock because
+        * this is called during fork when the task is not yet in /proc */
+       newmm->exe_file = get_mm_exe_file(oldmm);
+}
+
+static int proc_exe_link(struct inode *inode, struct path *exe_path)
+{
+       struct task_struct *task;
+       struct mm_struct *mm;
+       struct file *exe_file;
+
+       task = get_proc_task(inode);
+       if (!task)
+               return -ENOENT;
+       mm = get_task_mm(task);
+       put_task_struct(task);
+       if (!mm)
+               return -ENOENT;
+       exe_file = get_mm_exe_file(mm);
+       mmput(mm);
+       if (exe_file) {
+               *exe_path = exe_file->f_path;
+               path_get(&exe_file->f_path);
+               fput(exe_file);
+               return 0;
+       } else
+               return -ENOENT;
+}
+
 static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
 {
        struct inode *inode = dentry->d_inode;
index a36ad3c75cf43bbc0cb9f34d550893d952316ac1..9d53b39a9cf8983b8c2e30b9bc1c661dfe89f1b6 100644 (file)
@@ -69,12 +69,7 @@ proc_file_read(struct file *file, char __user *buf, size_t nbytes,
                count = min_t(size_t, PROC_BLOCK_SIZE, nbytes);
 
                start = NULL;
-               if (dp->get_info) {
-                       /* Handle old net routines */
-                       n = dp->get_info(page, &start, *ppos, count);
-                       if (n < count)
-                               eof = 1;
-               } else if (dp->read_proc) {
+               if (dp->read_proc) {
                        /*
                         * How to be a proc read function
                         * ------------------------------
@@ -277,8 +272,11 @@ static int xlate_proc_name(const char *name,
        int                     len;
        int                     rtn = 0;
 
+       de = *ret;
+       if (!de)
+               de = &proc_root;
+
        spin_lock(&proc_subdir_lock);
-       de = &proc_root;
        while (1) {
                next = strchr(cp, '/');
                if (!next)
@@ -385,20 +383,18 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
 
        lock_kernel();
        spin_lock(&proc_subdir_lock);
-       if (de) {
-               for (de = de->subdir; de ; de = de->next) {
-                       if (de->namelen != dentry->d_name.len)
-                               continue;
-                       if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
-                               unsigned int ino;
+       for (de = de->subdir; de ; de = de->next) {
+               if (de->namelen != dentry->d_name.len)
+                       continue;
+               if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
+                       unsigned int ino;
 
-                               ino = de->low_ino;
-                               de_get(de);
-                               spin_unlock(&proc_subdir_lock);
-                               error = -EINVAL;
-                               inode = proc_get_inode(dir->i_sb, ino, de);
-                               goto out_unlock;
-                       }
+                       ino = de->low_ino;
+                       de_get(de);
+                       spin_unlock(&proc_subdir_lock);
+                       error = -EINVAL;
+                       inode = proc_get_inode(dir->i_sb, ino, de);
+                       goto out_unlock;
                }
        }
        spin_unlock(&proc_subdir_lock);
@@ -410,7 +406,8 @@ out_unlock:
                d_add(dentry, inode);
                return NULL;
        }
-       de_put(de);
+       if (de)
+               de_put(de);
        return ERR_PTR(error);
 }
 
@@ -440,10 +437,6 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
        lock_kernel();
 
        ino = inode->i_ino;
-       if (!de) {
-               ret = -EINVAL;
-               goto out;
-       }
        i = filp->f_pos;
        switch (i) {
                case 0:
@@ -582,7 +575,7 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
        /* make sure name is valid */
        if (!name || !strlen(name)) goto out;
 
-       if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0)
+       if (xlate_proc_name(name, parent, &fn) != 0)
                goto out;
 
        /* At this point there must not be any '/' characters beyond *fn */
@@ -682,9 +675,10 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
        return ent;
 }
 
-struct proc_dir_entry *proc_create(const char *name, mode_t mode,
-                                  struct proc_dir_entry *parent,
-                                  const struct file_operations *proc_fops)
+struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
+                                       struct proc_dir_entry *parent,
+                                       const struct file_operations *proc_fops,
+                                       void *data)
 {
        struct proc_dir_entry *pde;
        nlink_t nlink;
@@ -705,6 +699,7 @@ struct proc_dir_entry *proc_create(const char *name, mode_t mode,
        if (!pde)
                goto out;
        pde->proc_fops = proc_fops;
+       pde->data = data;
        if (proc_register(parent, pde) < 0)
                goto out_free;
        return pde;
@@ -734,55 +729,58 @@ void free_proc_entry(struct proc_dir_entry *de)
 void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
 {
        struct proc_dir_entry **p;
-       struct proc_dir_entry *de;
+       struct proc_dir_entry *de = NULL;
        const char *fn = name;
        int len;
 
-       if (!parent && xlate_proc_name(name, &parent, &fn) != 0)
-               goto out;
+       if (xlate_proc_name(name, &parent, &fn) != 0)
+               return;
        len = strlen(fn);
 
        spin_lock(&proc_subdir_lock);
        for (p = &parent->subdir; *p; p=&(*p)->next ) {
-               if (!proc_match(len, fn, *p))
-                       continue;
-               de = *p;
-               *p = de->next;
-               de->next = NULL;
-
-               spin_lock(&de->pde_unload_lock);
-               /*
-                * Stop accepting new callers into module. If you're
-                * dynamically allocating ->proc_fops, save a pointer somewhere.
-                */
-               de->proc_fops = NULL;
-               /* Wait until all existing callers into module are done. */
-               if (de->pde_users > 0) {
-                       DECLARE_COMPLETION_ONSTACK(c);
-
-                       if (!de->pde_unload_completion)
-                               de->pde_unload_completion = &c;
-
-                       spin_unlock(&de->pde_unload_lock);
-                       spin_unlock(&proc_subdir_lock);
+               if (proc_match(len, fn, *p)) {
+                       de = *p;
+                       *p = de->next;
+                       de->next = NULL;
+                       break;
+               }
+       }
+       spin_unlock(&proc_subdir_lock);
+       if (!de)
+               return;
 
-                       wait_for_completion(de->pde_unload_completion);
+       spin_lock(&de->pde_unload_lock);
+       /*
+        * Stop accepting new callers into module. If you're
+        * dynamically allocating ->proc_fops, save a pointer somewhere.
+        */
+       de->proc_fops = NULL;
+       /* Wait until all existing callers into module are done. */
+       if (de->pde_users > 0) {
+               DECLARE_COMPLETION_ONSTACK(c);
+
+               if (!de->pde_unload_completion)
+                       de->pde_unload_completion = &c;
 
-                       spin_lock(&proc_subdir_lock);
-                       goto continue_removing;
-               }
                spin_unlock(&de->pde_unload_lock);
 
+               wait_for_completion(de->pde_unload_completion);
+
+               goto continue_removing;
+       }
+       spin_unlock(&de->pde_unload_lock);
+
 continue_removing:
-               if (S_ISDIR(de->mode))
-                       parent->nlink--;
-               de->nlink = 0;
-               WARN_ON(de->subdir);
-               if (atomic_dec_and_test(&de->count))
-                       free_proc_entry(de);
-               break;
+       if (S_ISDIR(de->mode))
+               parent->nlink--;
+       de->nlink = 0;
+       if (de->subdir) {
+               printk(KERN_WARNING "%s: removing non-empty directory "
+                       "'%s/%s', leaking at least '%s'\n", __func__,
+                       de->parent->name, de->name, de->subdir->name);
+               WARN_ON(1);
        }
-       spin_unlock(&proc_subdir_lock);
-out:
-       return;
+       if (atomic_dec_and_test(&de->count))
+               free_proc_entry(de);
 }
index 82b3a1b5a70b8c887a2e4b7cf698843097b84c0e..6f4e8dc97da1f24f6313b4425efa323fe82a644c 100644 (file)
@@ -25,8 +25,7 @@
 
 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
 {
-       if (de)
-               atomic_inc(&de->count);
+       atomic_inc(&de->count);
        return de;
 }
 
@@ -35,18 +34,16 @@ struct proc_dir_entry *de_get(struct proc_dir_entry *de)
  */
 void de_put(struct proc_dir_entry *de)
 {
-       if (de) {       
-               lock_kernel();          
-               if (!atomic_read(&de->count)) {
-                       printk("de_put: entry %s already free!\n", de->name);
-                       unlock_kernel();
-                       return;
-               }
-
-               if (atomic_dec_and_test(&de->count))
-                       free_proc_entry(de);
+       lock_kernel();
+       if (!atomic_read(&de->count)) {
+               printk("de_put: entry %s already free!\n", de->name);
                unlock_kernel();
+               return;
        }
+
+       if (atomic_dec_and_test(&de->count))
+               free_proc_entry(de);
+       unlock_kernel();
 }
 
 /*
@@ -392,7 +389,7 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
 {
        struct inode * inode;
 
-       if (de != NULL && !try_module_get(de->owner))
+       if (!try_module_get(de->owner))
                goto out_mod;
 
        inode = iget_locked(sb, ino);
@@ -402,30 +399,29 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
                inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
                PROC_I(inode)->fd = 0;
                PROC_I(inode)->pde = de;
-               if (de) {
-                       if (de->mode) {
-                               inode->i_mode = de->mode;
-                               inode->i_uid = de->uid;
-                               inode->i_gid = de->gid;
-                       }
-                       if (de->size)
-                               inode->i_size = de->size;
-                       if (de->nlink)
-                               inode->i_nlink = de->nlink;
-                       if (de->proc_iops)
-                               inode->i_op = de->proc_iops;
-                       if (de->proc_fops) {
-                               if (S_ISREG(inode->i_mode)) {
+
+               if (de->mode) {
+                       inode->i_mode = de->mode;
+                       inode->i_uid = de->uid;
+                       inode->i_gid = de->gid;
+               }
+               if (de->size)
+                       inode->i_size = de->size;
+               if (de->nlink)
+                       inode->i_nlink = de->nlink;
+               if (de->proc_iops)
+                       inode->i_op = de->proc_iops;
+               if (de->proc_fops) {
+                       if (S_ISREG(inode->i_mode)) {
 #ifdef CONFIG_COMPAT
-                                       if (!de->proc_fops->compat_ioctl)
-                                               inode->i_fop =
-                                                       &proc_reg_file_ops_no_compat;
-                                       else
+                               if (!de->proc_fops->compat_ioctl)
+                                       inode->i_fop =
+                                               &proc_reg_file_ops_no_compat;
+                               else
 #endif
-                                               inode->i_fop = &proc_reg_file_ops;
-                               } else {
-                                       inode->i_fop = de->proc_fops;
-                               }
+                                       inode->i_fop = &proc_reg_file_ops;
+                       } else {
+                               inode->i_fop = de->proc_fops;
                        }
                }
                unlock_new_inode(inode);
@@ -433,8 +429,7 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
        return inode;
 
 out_ino:
-       if (de != NULL)
-               module_put(de->owner);
+       module_put(de->owner);
 out_mod:
        return NULL;
 }                      
index bc72f5c8c47d34704cb6a76ef450e1bd6dc84f0d..28cbca8059057e5f1b7729b69743aea11cecc27c 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/proc_fs.h>
 
+extern struct proc_dir_entry proc_root;
 #ifdef CONFIG_PROC_SYSCTL
 extern int proc_sys_init(void);
 #else
@@ -46,9 +47,6 @@ extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *);
 
 extern int maps_protect;
 
-extern void create_seq_entry(char *name, mode_t mode,
-                               const struct file_operations *f);
-extern int proc_exe_link(struct inode *, struct path *);
 extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns,
                                struct pid *pid, struct task_struct *task);
 extern int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns,
index 941e95114b5a3bb3f248f41e15652da2f06ad0fa..79ecd281d2cbf1e1db1484ffe4b24413bc6f9e63 100644 (file)
@@ -137,7 +137,7 @@ static const struct file_operations proc_nommu_vma_list_operations = {
 
 static int __init proc_nommu_init(void)
 {
-       create_seq_entry("maps", S_IRUGO, &proc_nommu_vma_list_operations);
+       proc_create("maps", S_IRUGO, NULL, &proc_nommu_vma_list_operations);
        return 0;
 }
 
index 2d563979cb025412dce7847bd79979c34a8456eb..74a323d2b850884f06c2206fc4d288e957c8bef6 100644 (file)
@@ -179,6 +179,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
                "PageTables:   %8lu kB\n"
                "NFS_Unstable: %8lu kB\n"
                "Bounce:       %8lu kB\n"
+               "WritebackTmp: %8lu kB\n"
                "CommitLimit:  %8lu kB\n"
                "Committed_AS: %8lu kB\n"
                "VmallocTotal: %8lu kB\n"
@@ -210,6 +211,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
                K(global_page_state(NR_PAGETABLE)),
                K(global_page_state(NR_UNSTABLE_NFS)),
                K(global_page_state(NR_BOUNCE)),
+               K(global_page_state(NR_WRITEBACK_TEMP)),
                K(allowed),
                K(committed),
                (unsigned long)VMALLOC_TOTAL >> 10,
@@ -456,6 +458,20 @@ static const struct file_operations proc_slabstats_operations = {
 #endif
 #endif
 
+#ifdef CONFIG_MMU
+static int vmalloc_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &vmalloc_op);
+}
+
+static const struct file_operations proc_vmalloc_operations = {
+       .open           = vmalloc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+#endif
+
 static int show_stat(struct seq_file *p, void *v)
 {
        int i;
@@ -812,14 +828,6 @@ static struct file_operations proc_kpageflags_operations = {
 
 struct proc_dir_entry *proc_root_kcore;
 
-void create_seq_entry(char *name, mode_t mode, const struct file_operations *f)
-{
-       struct proc_dir_entry *entry;
-       entry = create_proc_entry(name, mode, NULL);
-       if (entry)
-               entry->proc_fops = f;
-}
-
 void __init proc_misc_init(void)
 {
        static struct {
@@ -848,63 +856,52 @@ void __init proc_misc_init(void)
 
        /* And now for trickier ones */
 #ifdef CONFIG_PRINTK
-       {
-               struct proc_dir_entry *entry;
-               entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
-               if (entry)
-                       entry->proc_fops = &proc_kmsg_operations;
-       }
+       proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations);
 #endif
-       create_seq_entry("locks", 0, &proc_locks_operations);
-       create_seq_entry("devices", 0, &proc_devinfo_operations);
-       create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
+       proc_create("locks", 0, NULL, &proc_locks_operations);
+       proc_create("devices", 0, NULL, &proc_devinfo_operations);
+       proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations);
 #ifdef CONFIG_BLOCK
-       create_seq_entry("partitions", 0, &proc_partitions_operations);
+       proc_create("partitions", 0, NULL, &proc_partitions_operations);
 #endif
-       create_seq_entry("stat", 0, &proc_stat_operations);
-       create_seq_entry("interrupts", 0, &proc_interrupts_operations);
+       proc_create("stat", 0, NULL, &proc_stat_operations);
+       proc_create("interrupts", 0, NULL, &proc_interrupts_operations);
 #ifdef CONFIG_SLABINFO
-       create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
+       proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
 #ifdef CONFIG_DEBUG_SLAB_LEAK
-       create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations);
+       proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
+#endif
 #endif
+#ifdef CONFIG_MMU
+       proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
 #endif
-       create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
-       create_seq_entry("pagetypeinfo", S_IRUGO, &pagetypeinfo_file_ops);
-       create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
-       create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations);
+       proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
+       proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+       proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
+       proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
 #ifdef CONFIG_BLOCK
-       create_seq_entry("diskstats", 0, &proc_diskstats_operations);
+       proc_create("diskstats", 0, NULL, &proc_diskstats_operations);
 #endif
 #ifdef CONFIG_MODULES
-       create_seq_entry("modules", 0, &proc_modules_operations);
+       proc_create("modules", 0, NULL, &proc_modules_operations);
 #endif
 #ifdef CONFIG_SCHEDSTATS
-       create_seq_entry("schedstat", 0, &proc_schedstat_operations);
+       proc_create("schedstat", 0, NULL, &proc_schedstat_operations);
 #endif
 #ifdef CONFIG_PROC_KCORE
-       proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
-       if (proc_root_kcore) {
-               proc_root_kcore->proc_fops = &proc_kcore_operations;
+       proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations);
+       if (proc_root_kcore)
                proc_root_kcore->size =
                                (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE;
-       }
 #endif
 #ifdef CONFIG_PROC_PAGE_MONITOR
-       create_seq_entry("kpagecount", S_IRUSR, &proc_kpagecount_operations);
-       create_seq_entry("kpageflags", S_IRUSR, &proc_kpageflags_operations);
+       proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
+       proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
 #endif
 #ifdef CONFIG_PROC_VMCORE
-       proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL);
-       if (proc_vmcore)
-               proc_vmcore->proc_fops = &proc_vmcore_operations;
+       proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
 #endif
 #ifdef CONFIG_MAGIC_SYSRQ
-       {
-               struct proc_dir_entry *entry;
-               entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL);
-               if (entry)
-                       entry->proc_fops = &proc_sysrq_trigger_operations;
-       }
+       proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations);
 #endif
 }
index 614c34b6d1c22214ffcdeb46a5f9e9eb63d69678..5acc001d49f6c72db83216173136701c2169f950 100644 (file)
@@ -165,8 +165,8 @@ out:
        return err;
 }
 
-static ssize_t proc_sys_read(struct file *filp, char __user *buf,
-                               size_t count, loff_t *ppos)
+static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
+               size_t count, loff_t *ppos, int write)
 {
        struct dentry *dentry = filp->f_dentry;
        struct ctl_table_header *head;
@@ -190,12 +190,12 @@ static ssize_t proc_sys_read(struct file *filp, char __user *buf,
         * and won't be until we finish.
         */
        error = -EPERM;
-       if (sysctl_perm(table, MAY_READ))
+       if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ))
                goto out;
 
        /* careful: calling conventions are nasty here */
        res = count;
-       error = table->proc_handler(table, 0, filp, buf, &res, ppos);
+       error = table->proc_handler(table, write, filp, buf, &res, ppos);
        if (!error)
                error = res;
 out:
@@ -204,44 +204,16 @@ out:
        return error;
 }
 
-static ssize_t proc_sys_write(struct file *filp, const char __user *buf,
+static ssize_t proc_sys_read(struct file *filp, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       struct dentry *dentry = filp->f_dentry;
-       struct ctl_table_header *head;
-       struct ctl_table *table;
-       ssize_t error;
-       size_t res;
-
-       table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head);
-       /* Has the sysctl entry disappeared on us? */
-       error = -ENOENT;
-       if (!table)
-               goto out;
-
-       /* Has the sysctl entry been replaced by a directory? */
-       error = -EISDIR;
-       if (!table->proc_handler)
-               goto out;
-
-       /*
-        * At this point we know that the sysctl was not unregistered
-        * and won't be until we finish.
-        */
-       error = -EPERM;
-       if (sysctl_perm(table, MAY_WRITE))
-               goto out;
-
-       /* careful: calling conventions are nasty here */
-       res = count;
-       error = table->proc_handler(table, 1, filp, (char __user *)buf,
-                                   &res, ppos);
-       if (!error)
-               error = res;
-out:
-       sysctl_head_finish(head);
+       return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 0);
+}
 
-       return error;
+static ssize_t proc_sys_write(struct file *filp, const char __user *buf,
+                               size_t count, loff_t *ppos)
+{
+       return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 1);
 }
 
 
@@ -416,7 +388,7 @@ static int proc_sys_permission(struct inode *inode, int mask, struct nameidata *
                goto out;
 
        /* Use the permissions on the sysctl table entry */
-       error = sysctl_perm(table, mask);
+       error = sysctl_perm(head->root, table, mask);
 out:
        sysctl_head_finish(head);
        return error;
index 49816e00b51a25899f860215166b97acd8584044..21f490f5d65ce1f96eb37095516eddb3c80aa4f8 100644 (file)
@@ -5,7 +5,7 @@
  */
 
 #include <asm/uaccess.h>
-
+#include <linux/module.h>
 #include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/time.h>
@@ -136,39 +136,54 @@ static const struct file_operations proc_tty_drivers_operations = {
        .release        = seq_release,
 };
 
-/*
- * This is the handler for /proc/tty/ldiscs
- */
-static int tty_ldiscs_read_proc(char *page, char **start, off_t off,
-                               int count, int *eof, void *data)
+static void * tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos)
 {
-       int     i;
-       int     len = 0;
-       off_t   begin = 0;
+       return (*pos < NR_LDISCS) ? pos : NULL;
+}
+
+static void * tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+       (*pos)++;
+       return (*pos < NR_LDISCS) ? pos : NULL;
+}
+
+static void tty_ldiscs_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int tty_ldiscs_seq_show(struct seq_file *m, void *v)
+{
+       int i = *(loff_t *)v;
        struct tty_ldisc *ld;
        
-       for (i=0; i < NR_LDISCS; i++) {
-               ld = tty_ldisc_get(i);
-               if (ld == NULL)
-                       continue;
-               len += sprintf(page+len, "%-10s %2d\n",
-                              ld->name ? ld->name : "???", i);
-               tty_ldisc_put(i);
-               if (len+begin > off+count)
-                       break;
-               if (len+begin < off) {
-                       begin += len;
-                       len = 0;
-               }
-       }
-       if (i >= NR_LDISCS)
-               *eof = 1;
-       if (off >= len+begin)
+       ld = tty_ldisc_get(i);
+       if (ld == NULL)
                return 0;
-       *start = page + (off-begin);
-       return ((count < begin+len-off) ? count : begin+len-off);
+       seq_printf(m, "%-10s %2d\n", ld->name ? ld->name : "???", i);
+       tty_ldisc_put(i);
+       return 0;
+}
+
+static const struct seq_operations tty_ldiscs_seq_ops = {
+       .start  = tty_ldiscs_seq_start,
+       .next   = tty_ldiscs_seq_next,
+       .stop   = tty_ldiscs_seq_stop,
+       .show   = tty_ldiscs_seq_show,
+};
+
+static int proc_tty_ldiscs_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &tty_ldiscs_seq_ops);
 }
 
+static const struct file_operations tty_ldiscs_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = proc_tty_ldiscs_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+
 /*
  * This function is called by tty_register_driver() to handle
  * registering the driver's /proc handler into /proc/tty/driver/<foo>
@@ -177,16 +192,14 @@ void proc_tty_register_driver(struct tty_driver *driver)
 {
        struct proc_dir_entry *ent;
                
-       if ((!driver->read_proc && !driver->write_proc) ||
-           !driver->driver_name ||
+       if (!driver->ops->read_proc || !driver->driver_name ||
            driver->proc_entry)
                return;
 
        ent = create_proc_entry(driver->driver_name, 0, proc_tty_driver);
        if (!ent)
                return;
-       ent->read_proc = driver->read_proc;
-       ent->write_proc = driver->write_proc;
+       ent->read_proc = driver->ops->read_proc;
        ent->owner = driver->owner;
        ent->data = driver;
 
@@ -214,7 +227,6 @@ void proc_tty_unregister_driver(struct tty_driver *driver)
  */
 void __init proc_tty_init(void)
 {
-       struct proc_dir_entry *entry;
        if (!proc_mkdir("tty", NULL))
                return;
        proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL);
@@ -224,10 +236,7 @@ void __init proc_tty_init(void)
         * password lengths and inter-keystroke timings during password
         * entry.
         */
-       proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR | S_IXUSR, NULL);
-
-       create_proc_read_entry("tty/ldiscs", 0, NULL, tty_ldiscs_read_proc, NULL);
-       entry = create_proc_entry("tty/drivers", 0, NULL);
-       if (entry)
-               entry->proc_fops = &proc_tty_drivers_operations;
+       proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL);
+       proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops);
+       proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations);
 }
index ef0fb57fc9ef794e6a5b849c27efe26fadb9f4f8..95117538a4f6fd3b4fdc90558127caa282d7e800 100644 (file)
@@ -22,8 +22,6 @@
 
 #include "internal.h"
 
-struct proc_dir_entry *proc_bus, *proc_root_fs, *proc_root_driver;
-
 static int proc_test_super(struct super_block *sb, void *data)
 {
        return sb->s_fs_info == data;
@@ -126,8 +124,8 @@ void __init proc_root_init(void)
 #ifdef CONFIG_SYSVIPC
        proc_mkdir("sysvipc", NULL);
 #endif
-       proc_root_fs = proc_mkdir("fs", NULL);
-       proc_root_driver = proc_mkdir("driver", NULL);
+       proc_mkdir("fs", NULL);
+       proc_mkdir("driver", NULL);
        proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */
 #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE)
        /* just give it a mountpoint */
@@ -137,7 +135,7 @@ void __init proc_root_init(void)
 #ifdef CONFIG_PROC_DEVICETREE
        proc_device_tree_init();
 #endif
-       proc_bus = proc_mkdir("bus", NULL);
+       proc_mkdir("bus", NULL);
        proc_sys_init();
 }
 
@@ -232,9 +230,5 @@ void pid_ns_release_proc(struct pid_namespace *ns)
 EXPORT_SYMBOL(proc_symlink);
 EXPORT_SYMBOL(proc_mkdir);
 EXPORT_SYMBOL(create_proc_entry);
-EXPORT_SYMBOL(proc_create);
+EXPORT_SYMBOL(proc_create_data);
 EXPORT_SYMBOL(remove_proc_entry);
-EXPORT_SYMBOL(proc_root);
-EXPORT_SYMBOL(proc_root_fs);
-EXPORT_SYMBOL(proc_bus);
-EXPORT_SYMBOL(proc_root_driver);
index 9dfb5ff24209be91f2a3bd87048ab130c46ae2dd..e2b8e769f510d676997698b8fd28ef4346478d2c 100644 (file)
@@ -75,40 +75,6 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
        return mm->total_vm;
 }
 
-int proc_exe_link(struct inode *inode, struct path *path)
-{
-       struct vm_area_struct * vma;
-       int result = -ENOENT;
-       struct task_struct *task = get_proc_task(inode);
-       struct mm_struct * mm = NULL;
-
-       if (task) {
-               mm = get_task_mm(task);
-               put_task_struct(task);
-       }
-       if (!mm)
-               goto out;
-       down_read(&mm->mmap_sem);
-
-       vma = mm->mmap;
-       while (vma) {
-               if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
-                       break;
-               vma = vma->vm_next;
-       }
-
-       if (vma) {
-               *path = vma->vm_file->f_path;
-               path_get(&vma->vm_file->f_path);
-               result = 0;
-       }
-
-       up_read(&mm->mmap_sem);
-       mmput(mm);
-out:
-       return result;
-}
-
 static void pad_len_spaces(struct seq_file *m, int len)
 {
        len = 25 + sizeof(void*) * 6 - len;
@@ -338,8 +304,7 @@ const struct file_operations proc_maps_operations = {
 #define PSS_SHIFT 12
 
 #ifdef CONFIG_PROC_PAGE_MONITOR
-struct mem_size_stats
-{
+struct mem_size_stats {
        struct vm_area_struct *vma;
        unsigned long resident;
        unsigned long shared_clean;
@@ -347,6 +312,7 @@ struct mem_size_stats
        unsigned long private_clean;
        unsigned long private_dirty;
        unsigned long referenced;
+       unsigned long swap;
        u64 pss;
 };
 
@@ -363,6 +329,12 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                ptent = *pte;
+
+               if (is_swap_pte(ptent)) {
+                       mss->swap += PAGE_SIZE;
+                       continue;
+               }
+
                if (!pte_present(ptent))
                        continue;
 
@@ -421,7 +393,8 @@ static int show_smap(struct seq_file *m, void *v)
                   "Shared_Dirty:   %8lu kB\n"
                   "Private_Clean:  %8lu kB\n"
                   "Private_Dirty:  %8lu kB\n"
-                  "Referenced:     %8lu kB\n",
+                  "Referenced:     %8lu kB\n"
+                  "Swap:           %8lu kB\n",
                   (vma->vm_end - vma->vm_start) >> 10,
                   mss.resident >> 10,
                   (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
@@ -429,7 +402,8 @@ static int show_smap(struct seq_file *m, void *v)
                   mss.shared_dirty  >> 10,
                   mss.private_clean >> 10,
                   mss.private_dirty >> 10,
-                  mss.referenced >> 10);
+                  mss.referenced >> 10,
+                  mss.swap >> 10);
 
        return ret;
 }
@@ -579,7 +553,7 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
        return err;
 }
 
-u64 swap_pte_to_pagemap_entry(pte_t pte)
+static u64 swap_pte_to_pagemap_entry(pte_t pte)
 {
        swp_entry_t e = pte_to_swp_entry(pte);
        return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT);
index 8011528518bd7e73d022498466e79257bd2695ec..4b733f108455273ce52ce73c4f410d74a9ea2b2e 100644 (file)
@@ -103,40 +103,6 @@ int task_statm(struct mm_struct *mm, int *shared, int *text,
        return size;
 }
 
-int proc_exe_link(struct inode *inode, struct path *path)
-{
-       struct vm_list_struct *vml;
-       struct vm_area_struct *vma;
-       struct task_struct *task = get_proc_task(inode);
-       struct mm_struct *mm = get_task_mm(task);
-       int result = -ENOENT;
-
-       if (!mm)
-               goto out;
-       down_read(&mm->mmap_sem);
-
-       vml = mm->context.vmlist;
-       vma = NULL;
-       while (vml) {
-               if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) {
-                       vma = vml->vma;
-                       break;
-               }
-               vml = vml->next;
-       }
-
-       if (vma) {
-               *path = vma->vm_file->f_path;
-               path_get(&vma->vm_file->f_path);
-               result = 0;
-       }
-
-       up_read(&mm->mmap_sem);
-       mmput(mm);
-out:
-       return result;
-}
-
 /*
  * display mapping lines for a particular process's /proc/pid/maps
  */
index 84f28dd721163eb5ef09166bb3d96569c9724b6a..db1cc9f3c7aa3a24be8439c632f77228e5e266ab 100644 (file)
@@ -69,7 +69,6 @@ static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid
        switch (cmd) {
                case Q_GETFMT:
                case Q_GETINFO:
-               case Q_QUOTAOFF:
                case Q_SETINFO:
                case Q_SETQUOTA:
                case Q_GETQUOTA:
@@ -229,12 +228,12 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, void
 
                        if (IS_ERR(pathname = getname(addr)))
                                return PTR_ERR(pathname);
-                       ret = sb->s_qcop->quota_on(sb, type, id, pathname);
+                       ret = sb->s_qcop->quota_on(sb, type, id, pathname, 0);
                        putname(pathname);
                        return ret;
                }
                case Q_QUOTAOFF:
-                       return sb->s_qcop->quota_off(sb, type);
+                       return sb->s_qcop->quota_off(sb, type, 0);
 
                case Q_GETFMT: {
                        __u32 fmt;
index f3841f233069659e79a451a1d0f06f005f5ac83c..a6cf9269105c478e7a88db2ca6b074b8bcc0933d 100644 (file)
@@ -139,6 +139,9 @@ static int v1_read_file_info(struct super_block *sb, int type)
                goto out;
        }
        ret = 0;
+       /* limits are stored as unsigned 32-bit data */
+       dqopt->info[type].dqi_maxblimit = 0xffffffff;
+       dqopt->info[type].dqi_maxilimit = 0xffffffff;
        dqopt->info[type].dqi_igrace = dqblk.dqb_itime ? dqblk.dqb_itime : MAX_IQ_TIME;
        dqopt->info[type].dqi_bgrace = dqblk.dqb_btime ? dqblk.dqb_btime : MAX_DQ_TIME;
 out:
index c519a583e681471c86687134c764262df138caf9..234ada90363343ad668eff8213dd0927af7aa6f2 100644 (file)
@@ -59,6 +59,9 @@ static int v2_read_file_info(struct super_block *sb, int type)
                        sb->s_id);
                return -1;
        }
+       /* limits are stored as unsigned 32-bit data */
+       info->dqi_maxblimit = 0xffffffff;
+       info->dqi_maxilimit = 0xffffffff;
        info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace);
        info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace);
        info->dqi_flags = le32_to_cpu(dinfo.dqi_flags);
@@ -303,7 +306,7 @@ static uint find_free_dqentry(struct dquot *dquot, int *err)
                        printk(KERN_ERR "VFS: find_free_dqentry(): Can't remove block (%u) from entry free list.\n", blk);
                        goto out_buf;
                }
-       dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)+1);
+       le16_add_cpu(&dh->dqdh_entries, 1);
        memset(&fakedquot, 0, sizeof(struct v2_disk_dqblk));
        /* Find free structure in block */
        for (i = 0; i < V2_DQSTRINBLK && memcmp(&fakedquot, ddquot+i, sizeof(struct v2_disk_dqblk)); i++);
@@ -445,7 +448,7 @@ static int free_dqentry(struct dquot *dquot, uint blk)
                goto out_buf;
        }
        dh = (struct v2_disk_dqdbheader *)buf;
-       dh->dqdh_entries = cpu_to_le16(le16_to_cpu(dh->dqdh_entries)-1);
+       le16_add_cpu(&dh->dqdh_entries, -1);
        if (!le16_to_cpu(dh->dqdh_entries)) {   /* Block got free? */
                if ((ret = remove_free_dqentry(sb, type, buf, blk)) < 0 ||
                    (ret = put_free_dqblk(sb, type, buf, blk)) < 0) {
index b41a514b0976559a0f17f24bd98cdcea354b0c47..9590b90243002f084cc4a51e8b1bd25ca0cef8f4 100644 (file)
@@ -26,6 +26,9 @@
 
 #include <linux/fs.h>
 #include <linux/mm.h>
+#include <linux/ramfs.h>
+
+#include "internal.h"
 
 const struct address_space_operations ramfs_aops = {
        .readpage       = simple_readpage,
index 8428d5b2711de8e901b75dd3c209039c3e2d429a..b13123424e49ce169da5c729c0e327fdf2259bf8 100644 (file)
@@ -44,7 +44,7 @@ static const struct inode_operations ramfs_dir_inode_operations;
 
 static struct backing_dev_info ramfs_backing_dev_info = {
        .ra_pages       = 0,    /* No readahead */
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK |
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK |
                          BDI_CAP_MAP_DIRECT | BDI_CAP_MAP_COPY |
                          BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP,
 };
index af7cc074a47621af7d160786515dec5280697f3c..6b330639b51dc7b497d6cc9ac8726ce0c004ec37 100644 (file)
@@ -11,5 +11,4 @@
 
 
 extern const struct address_space_operations ramfs_aops;
-extern const struct file_operations ramfs_file_operations;
 extern const struct inode_operations ramfs_file_inode_operations;
index f491ceb5af02d9a8c089066e8a3ca908c641acaa..4646caa60455c4666789242f40c245fdd9cc3ab0 100644 (file)
@@ -479,7 +479,7 @@ static void __discard_prealloc(struct reiserfs_transaction_handle *th,
        if (ei->i_prealloc_count < 0)
                reiserfs_warning(th->t_super,
                                 "zam-4001:%s: inode has negative prealloc blocks count.",
-                                __FUNCTION__);
+                                __func__);
 #endif
        while (ei->i_prealloc_count > 0) {
                reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
@@ -517,7 +517,7 @@ void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th)
                if (!ei->i_prealloc_count) {
                        reiserfs_warning(th->t_super,
                                         "zam-4001:%s: inode is in prealloc list but has no preallocated blocks.",
-                                        __FUNCTION__);
+                                        __func__);
                }
 #endif
                __discard_prealloc(th, ei);
@@ -632,7 +632,7 @@ int reiserfs_parse_alloc_options(struct super_block *s, char *options)
                }
 
                reiserfs_warning(s, "zam-4001: %s : unknown option - %s",
-                                __FUNCTION__, this_char);
+                                __func__, this_char);
                return 1;
        }
 
@@ -1254,7 +1254,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
        bh = sb_bread(sb, block);
        if (bh == NULL)
                reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
-                                "reading failed", __FUNCTION__, block);
+                                "reading failed", __func__, block);
        else {
                if (buffer_locked(bh)) {
                        PROC_INFO_INC(sb, scan_bitmap.wait);
index 7ee4208793b6370321de3068b0f0c848549e8038..2f87f5b14630e2323c9e91c784adc8affb0b1831 100644 (file)
@@ -1464,29 +1464,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih,  /* item h
                                } else
                                        /* item falls wholly into S_new[i] */
                                {
-                                       int ret_val;
+                                       int leaf_mi;
                                        struct item_head *pasted;
 
 #ifdef CONFIG_REISERFS_CHECK
-                                       struct item_head *ih =
+                                       struct item_head *ih_check =
                                            B_N_PITEM_HEAD(tbS0, item_pos);
 
-                                       if (!is_direntry_le_ih(ih)
-                                           && (pos_in_item != ih_item_len(ih)
+                                       if (!is_direntry_le_ih(ih_check)
+                                           && (pos_in_item != ih_item_len(ih_check)
                                                || tb->insert_size[0] <= 0))
                                                reiserfs_panic(tb->tb_sb,
                                                               "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len");
 #endif                         /* CONFIG_REISERFS_CHECK */
 
-                                       ret_val =
+                                       leaf_mi =
                                            leaf_move_items(LEAF_FROM_S_TO_SNEW,
                                                            tb, snum[i],
                                                            sbytes[i],
                                                            S_new[i]);
 
-                                       RFALSE(ret_val,
+                                       RFALSE(leaf_mi,
                                               "PAP-12240: unexpected value returned by leaf_move_items (%d)",
-                                              ret_val);
+                                              leaf_mi);
 
                                        /* paste into item */
                                        bi.tb = tb;
index 74363a7aacbcb64154c81b1f6bcb832e0fee8727..830332021ed47bd33e5dca5d903f5725db1ba7c5 100644 (file)
@@ -12,8 +12,6 @@
 #include <linux/smp_lock.h>
 #include <linux/compat.h>
 
-static int reiserfs_unpack(struct inode *inode, struct file *filp);
-
 /*
 ** reiserfs_ioctl - handler for ioctl for inode
 ** supported commands:
@@ -159,7 +157,7 @@ int reiserfs_prepare_write(struct file *f, struct page *page,
 ** Function try to convert tail from direct item into indirect.
 ** It set up nopack attribute in the REISERFS_I(inode)->nopack
 */
-static int reiserfs_unpack(struct inode *inode, struct file *filp)
+int reiserfs_unpack(struct inode *inode, struct file *filp)
 {
        int retval = 0;
        int index;
index 060eb3f598e7d58b90d9595a9e9d4dcac33c4046..e396b2fa4743e66b32d3ea4ad740aa403266fa40 100644 (file)
@@ -1187,7 +1187,7 @@ static int flush_commit_list(struct super_block *s,
 
        if (retval)
                reiserfs_abort(s, retval, "Journal write error in %s",
-                              __FUNCTION__);
+                              __func__);
        put_fs_excl();
        return retval;
 }
@@ -1534,7 +1534,7 @@ static int flush_journal_list(struct super_block *s,
                        reiserfs_warning(s,
                                         "clm-2082: Unable to flush buffer %llu in %s",
                                         (unsigned long long)saved_bh->
-                                        b_blocknr, __FUNCTION__);
+                                        b_blocknr, __func__);
                }
              free_cnode:
                last = cn;
@@ -1586,7 +1586,7 @@ static int flush_journal_list(struct super_block *s,
        if (err)
                reiserfs_abort(s, -EIO,
                               "Write error while pushing transaction to disk in %s",
-                              __FUNCTION__);
+                              __func__);
       flush_older_and_return:
 
        /* before we can update the journal header block, we _must_ flush all 
@@ -1616,7 +1616,7 @@ static int flush_journal_list(struct super_block *s,
                if (err)
                        reiserfs_abort(s, -EIO,
                                       "Write error while updating journal header in %s",
-                                      __FUNCTION__);
+                                      __func__);
        }
        remove_all_from_journal_list(s, jl, 0);
        list_del_init(&jl->j_list);
@@ -2574,11 +2574,9 @@ static int release_journal_dev(struct super_block *super,
 
        result = 0;
 
-       if (journal->j_dev_file != NULL) {
-               result = filp_close(journal->j_dev_file, NULL);
-               journal->j_dev_file = NULL;
-               journal->j_dev_bd = NULL;
-       } else if (journal->j_dev_bd != NULL) {
+       if (journal->j_dev_bd != NULL) {
+               if (journal->j_dev_bd->bd_dev != super->s_dev)
+                       bd_release(journal->j_dev_bd);
                result = blkdev_put(journal->j_dev_bd);
                journal->j_dev_bd = NULL;
        }
@@ -2603,7 +2601,6 @@ static int journal_init_dev(struct super_block *super,
        result = 0;
 
        journal->j_dev_bd = NULL;
-       journal->j_dev_file = NULL;
        jdev = SB_ONDISK_JOURNAL_DEVICE(super) ?
            new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev;
 
@@ -2620,35 +2617,34 @@ static int journal_init_dev(struct super_block *super,
                                         "cannot init journal device '%s': %i",
                                         __bdevname(jdev, b), result);
                        return result;
-               } else if (jdev != super->s_dev)
+               } else if (jdev != super->s_dev) {
+                       result = bd_claim(journal->j_dev_bd, journal);
+                       if (result) {
+                               blkdev_put(journal->j_dev_bd);
+                               return result;
+                       }
+
                        set_blocksize(journal->j_dev_bd, super->s_blocksize);
+               }
+
                return 0;
        }
 
-       journal->j_dev_file = filp_open(jdev_name, 0, 0);
-       if (!IS_ERR(journal->j_dev_file)) {
-               struct inode *jdev_inode = journal->j_dev_file->f_mapping->host;
-               if (!S_ISBLK(jdev_inode->i_mode)) {
-                       reiserfs_warning(super, "journal_init_dev: '%s' is "
-                                        "not a block device", jdev_name);
-                       result = -ENOTBLK;
-                       release_journal_dev(super, journal);
-               } else {
-                       /* ok */
-                       journal->j_dev_bd = I_BDEV(jdev_inode);
-                       set_blocksize(journal->j_dev_bd, super->s_blocksize);
-                       reiserfs_info(super,
-                                     "journal_init_dev: journal device: %s\n",
-                                     bdevname(journal->j_dev_bd, b));
-               }
-       } else {
-               result = PTR_ERR(journal->j_dev_file);
-               journal->j_dev_file = NULL;
+       journal->j_dev_bd = open_bdev_excl(jdev_name, 0, journal);
+       if (IS_ERR(journal->j_dev_bd)) {
+               result = PTR_ERR(journal->j_dev_bd);
+               journal->j_dev_bd = NULL;
                reiserfs_warning(super,
                                 "journal_init_dev: Cannot open '%s': %i",
                                 jdev_name, result);
+               return result;
        }
-       return result;
+
+       set_blocksize(journal->j_dev_bd, super->s_blocksize);
+       reiserfs_info(super,
+                     "journal_init_dev: journal device: %s\n",
+                     bdevname(journal->j_dev_bd, b));
+       return 0;
 }
 
 /**
@@ -4316,5 +4312,5 @@ static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
 
 void reiserfs_journal_abort(struct super_block *sb, int errno)
 {
-       return __reiserfs_journal_abort_soft(sb, errno);
+       __reiserfs_journal_abort_soft(sb, errno);
 }
index 8867533cb727bcad8048f8dee072aa2137e8c567..c1add28dd45ec144ec946b3ddba2c06a340af405 100644 (file)
@@ -301,7 +301,7 @@ static int reiserfs_find_entry(struct inode *dir, const char *name, int namelen,
                                        path_to_entry, de);
                if (retval == IO_ERROR) {
                        reiserfs_warning(dir->i_sb, "zam-7001: io error in %s",
-                                        __FUNCTION__);
+                                        __func__);
                        return IO_ERROR;
                }
 
@@ -496,7 +496,7 @@ static int reiserfs_add_entry(struct reiserfs_transaction_handle *th,
                        reiserfs_warning(dir->i_sb,
                                         "zam-7002:%s: \"reiserfs_find_entry\" "
                                         "has returned unexpected value (%d)",
-                                        __FUNCTION__, retval);
+                                        __func__, retval);
                }
 
                return -EEXIST;
@@ -907,7 +907,7 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry)
 
        if (inode->i_nlink != 2 && inode->i_nlink != 1)
                reiserfs_warning(inode->i_sb, "%s: empty directory has nlink "
-                                "!= 2 (%d)", __FUNCTION__, inode->i_nlink);
+                                "!= 2 (%d)", __func__, inode->i_nlink);
 
        clear_nlink(inode);
        inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
@@ -984,7 +984,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
 
        if (!inode->i_nlink) {
                reiserfs_warning(inode->i_sb, "%s: deleting nonexistent file "
-                                "(%s:%lu), %d", __FUNCTION__,
+                                "(%s:%lu), %d", __func__,
                                 reiserfs_bdevname(inode->i_sb), inode->i_ino,
                                 inode->i_nlink);
                inode->i_nlink = 1;
index 65feba4deb69a778168cf12ae42133fe8d115c3d..ea0cf8c28a9965a20ce35602fc6dd36919edbd00 100644 (file)
@@ -61,7 +61,7 @@ __u32 reiserfs_get_unused_objectid(struct reiserfs_transaction_handle *th)
        /* comment needed -Hans */
        unused_objectid = le32_to_cpu(map[1]);
        if (unused_objectid == U32_MAX) {
-               reiserfs_warning(s, "%s: no more object ids", __FUNCTION__);
+               reiserfs_warning(s, "%s: no more object ids", __func__);
                reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
                return 0;
        }
@@ -114,7 +114,7 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
                if (objectid_to_release == le32_to_cpu(map[i])) {
                        /* This incrementation unallocates the objectid. */
                        //map[i]++;
-                       map[i] = cpu_to_le32(le32_to_cpu(map[i]) + 1);
+                       le32_add_cpu(&map[i], 1);
 
                        /* Did we unallocate the last member of an odd sequence, and can shrink oids? */
                        if (map[i] == map[i + 1]) {
@@ -138,8 +138,7 @@ void reiserfs_release_objectid(struct reiserfs_transaction_handle *th,
                        /* size of objectid map is not changed */
                        if (objectid_to_release + 1 == le32_to_cpu(map[i + 1])) {
                                //objectid_map[i+1]--;
-                               map[i + 1] =
-                                   cpu_to_le32(le32_to_cpu(map[i + 1]) - 1);
+                               le32_add_cpu(&map[i + 1], -1);
                                return;
                        }
 
index 8f86c52b30d821884c3f576932e3e0d2e96981ba..b9dbeeca7049451e66281a1561da4f36a913703a 100644 (file)
@@ -467,6 +467,7 @@ static const struct file_operations r_file_operations = {
        .read = seq_read,
        .llseek = seq_lseek,
        .release = seq_release,
+       .owner = THIS_MODULE,
 };
 
 static struct proc_dir_entry *proc_info_root = NULL;
@@ -475,12 +476,8 @@ static const char proc_info_root_name[] = "fs/reiserfs";
 static void add_file(struct super_block *sb, char *name,
                     int (*func) (struct seq_file *, struct super_block *))
 {
-       struct proc_dir_entry *de;
-       de = create_proc_entry(name, 0, REISERFS_SB(sb)->procdir);
-       if (de) {
-               de->data = func;
-               de->proc_fops = &r_file_operations;
-       }
+       proc_create_data(name, 0, REISERFS_SB(sb)->procdir,
+                        &r_file_operations, func);
 }
 
 int reiserfs_proc_info_init(struct super_block *sb)
index d2db2417b2bd68a96ad57ff4e8bf429d5aeee0d1..abbc64dcc8d453cd8bd2281e382c85c37bd4fbf6 100644 (file)
@@ -1419,8 +1419,7 @@ int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
 
                inode_generation =
                    &REISERFS_SB(th->t_super)->s_rs->s_inode_generation;
-               *inode_generation =
-                   cpu_to_le32(le32_to_cpu(*inode_generation) + 1);
+               le32_add_cpu(inode_generation, 1);
        }
 /* USE_INODE_GENERATION_COUNTER */
 #endif
index 393cc22c1717d582dc9e03d401ec514fd8ac4c99..ed424d708e6987777b4a2085f88d81df651c86a8 100644 (file)
@@ -304,7 +304,7 @@ static int finish_unfinished(struct super_block *s)
        /* Turn quotas off */
        for (i = 0; i < MAXQUOTAS; i++) {
                if (sb_dqopt(s)->files[i])
-                       vfs_quota_off_mount(s, i);
+                       vfs_quota_off(s, i, 0);
        }
        if (ms_active_set)
                /* Restore the flag back */
@@ -634,7 +634,7 @@ static int reiserfs_acquire_dquot(struct dquot *);
 static int reiserfs_release_dquot(struct dquot *);
 static int reiserfs_mark_dquot_dirty(struct dquot *);
 static int reiserfs_write_info(struct super_block *, int);
-static int reiserfs_quota_on(struct super_block *, int, int, char *);
+static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
 
 static struct dquot_operations reiserfs_quota_operations = {
        .initialize = reiserfs_dquot_initialize,
@@ -1890,8 +1890,14 @@ static int reiserfs_dquot_drop(struct inode *inode)
        ret =
            journal_begin(&th, inode->i_sb,
                          2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
-       if (ret)
+       if (ret) {
+               /*
+                * We call dquot_drop() anyway to at least release references
+                * to quota structures so that umount does not hang.
+                */
+               dquot_drop(inode);
                goto out;
+       }
        ret = dquot_drop(inode);
        err =
            journal_end(&th, inode->i_sb,
@@ -2015,13 +2021,17 @@ static int reiserfs_quota_on_mount(struct super_block *sb, int type)
  * Standard function to be called on quota_on
  */
 static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
-                            char *path)
+                            char *path, int remount)
 {
        int err;
        struct nameidata nd;
+       struct inode *inode;
 
        if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA)))
                return -EINVAL;
+       /* No more checks needed? Path and format_id are bogus anyway... */
+       if (remount)
+               return vfs_quota_on(sb, type, format_id, path, 1);
        err = path_lookup(path, LOOKUP_FOLLOW, &nd);
        if (err)
                return err;
@@ -2030,18 +2040,24 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
                path_put(&nd.path);
                return -EXDEV;
        }
+       inode = nd.path.dentry->d_inode;
        /* We must not pack tails for quota files on reiserfs for quota IO to work */
-       if (!(REISERFS_I(nd.path.dentry->d_inode)->i_flags & i_nopack_mask)) {
-               reiserfs_warning(sb,
-                                "reiserfs: Quota file must have tail packing disabled.");
-               path_put(&nd.path);
-               return -EINVAL;
+       if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
+               err = reiserfs_unpack(inode, NULL);
+               if (err) {
+                       reiserfs_warning(sb,
+                               "reiserfs: Unpacking tail of quota file failed"
+                               " (%d). Cannot turn on quotas.", err);
+                       path_put(&nd.path);
+                       return -EINVAL;
+               }
+               mark_inode_dirty(inode);
        }
        /* Not journalling quota? No more tests needed... */
        if (!REISERFS_SB(sb)->s_qf_names[USRQUOTA] &&
            !REISERFS_SB(sb)->s_qf_names[GRPQUOTA]) {
                path_put(&nd.path);
-               return vfs_quota_on(sb, type, format_id, path);
+               return vfs_quota_on(sb, type, format_id, path, 0);
        }
        /* Quotafile not of fs root? */
        if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
@@ -2049,7 +2065,7 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
                                 "reiserfs: Quota file not on filesystem root. "
                                 "Journalled quota will not work.");
        path_put(&nd.path);
-       return vfs_quota_on(sb, type, format_id, path);
+       return vfs_quota_on(sb, type, format_id, path, 0);
 }
 
 /* Read data from quotafile - avoid pagecache and such because we cannot afford
index 00f58c5c7e05251ef4350e762481df8b07294212..2c292146e2464836983913d7046b3279167f2ce7 100644 (file)
@@ -425,7 +425,7 @@ sticky:
        return ret;
 }
 
-#ifdef TIF_RESTORE_SIGMASK
+#ifdef HAVE_SET_RESTORE_SIGMASK
 asmlinkage long sys_pselect7(int n, fd_set __user *inp, fd_set __user *outp,
                fd_set __user *exp, struct timespec __user *tsp,
                const sigset_t __user *sigmask, size_t sigsetsize)
@@ -498,7 +498,7 @@ sticky:
                if (sigmask) {
                        memcpy(&current->saved_sigmask, &sigsaved,
                                        sizeof(sigsaved));
-                       set_thread_flag(TIF_RESTORE_SIGMASK);
+                       set_restore_sigmask();
                }
        } else if (sigmask)
                sigprocmask(SIG_SETMASK, &sigsaved, NULL);
@@ -528,7 +528,7 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
 
        return sys_pselect7(n, inp, outp, exp, tsp, up, sigsetsize);
 }
-#endif /* TIF_RESTORE_SIGMASK */
+#endif /* HAVE_SET_RESTORE_SIGMASK */
 
 struct poll_list {
        struct poll_list *next;
@@ -759,7 +759,7 @@ asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
        return ret;
 }
 
-#ifdef TIF_RESTORE_SIGMASK
+#ifdef HAVE_SET_RESTORE_SIGMASK
 asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
        struct timespec __user *tsp, const sigset_t __user *sigmask,
        size_t sigsetsize)
@@ -805,7 +805,7 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
                if (sigmask) {
                        memcpy(&current->saved_sigmask, &sigsaved,
                                        sizeof(sigsaved));
-                       set_thread_flag(TIF_RESTORE_SIGMASK);
+                       set_restore_sigmask();
                }
                ret = -ERESTARTNOHAND;
        } else if (sigmask)
@@ -839,4 +839,4 @@ asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
 
        return ret;
 }
-#endif /* TIF_RESTORE_SIGMASK */
+#endif /* HAVE_SET_RESTORE_SIGMASK */
index 734972b92694f03493e9440eb86f8557ba8621f3..fc4b1a5dd75531739bdd0d856e2f85782c3fe356 100644 (file)
  * these are normally enabled.
  */
 #ifdef SMBFS_PARANOIA
-# define PARANOIA(f, a...) printk(KERN_NOTICE "%s: " f, __FUNCTION__ , ## a)
+# define PARANOIA(f, a...) printk(KERN_NOTICE "%s: " f, __func__ , ## a)
 #else
 # define PARANOIA(f, a...) do { ; } while(0)
 #endif
 
 /* lots of debug messages */
 #ifdef SMBFS_DEBUG_VERBOSE
-# define VERBOSE(f, a...) printk(KERN_DEBUG "%s: " f, __FUNCTION__ , ## a)
+# define VERBOSE(f, a...) printk(KERN_DEBUG "%s: " f, __func__ , ## a)
 #else
 # define VERBOSE(f, a...) do { ; } while(0)
 #endif
@@ -28,7 +28,7 @@
  * too common name.
  */
 #ifdef SMBFS_DEBUG
-#define DEBUG1(f, a...) printk(KERN_DEBUG "%s: " f, __FUNCTION__ , ## a)
+#define DEBUG1(f, a...) printk(KERN_DEBUG "%s: " f, __func__ , ## a)
 #else
 #define DEBUG1(f, a...) do { ; } while(0)
 #endif
index eeb1a86a701467fcc90882f803ff7b39585003fc..633f58ebfb72a2ad7b132adb7492f7c268daecca 100644 (file)
@@ -1075,7 +1075,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
 
        ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
        if (ret > 0)
-               *ppos += ret;
+               *ppos = sd.pos;
 
        return ret;
 }
index 4798350b2bc9dc6c294fe47e91a9ed27c52b7dd3..453877c5697be1b927bc8b9654f98f9cad71644b 100644 (file)
@@ -117,7 +117,7 @@ static inline void destroy_super(struct super_block *s)
  * Drop a superblock's refcount.  Returns non-zero if the superblock was
  * destroyed.  The caller must hold sb_lock.
  */
-int __put_super(struct super_block *sb)
+static int __put_super(struct super_block *sb)
 {
        int ret = 0;
 
@@ -179,7 +179,7 @@ void deactivate_super(struct super_block *s)
        if (atomic_dec_and_lock(&s->s_active, &sb_lock)) {
                s->s_count -= S_BIAS-1;
                spin_unlock(&sb_lock);
-               DQUOT_OFF(s);
+               DQUOT_OFF(s, 0);
                down_write(&s->s_umount);
                fs->kill_sb(s);
                put_filesystem(fs);
@@ -608,6 +608,7 @@ retry:
 int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
 {
        int retval;
+       int remount_rw;
        
 #ifdef CONFIG_BLOCK
        if (!(flags & MS_RDONLY) && bdev_read_only(sb->s_bdev))
@@ -625,8 +626,11 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
                        mark_files_ro(sb);
                else if (!fs_may_remount_ro(sb))
                        return -EBUSY;
-               DQUOT_OFF(sb);
+               retval = DQUOT_OFF(sb, 1);
+               if (retval < 0 && retval != -ENOSYS)
+                       return -EBUSY;
        }
+       remount_rw = !(flags & MS_RDONLY) && (sb->s_flags & MS_RDONLY);
 
        if (sb->s_op->remount_fs) {
                lock_super(sb);
@@ -636,6 +640,8 @@ int do_remount_sb(struct super_block *sb, int flags, void *data, int force)
                        return retval;
        }
        sb->s_flags = (sb->s_flags & ~MS_RMT_MASK) | (flags & MS_RMT_MASK);
+       if (remount_rw)
+               DQUOT_ON_REMOUNT(sb);
        return 0;
 }
 
index 7cd005ea76390f385dc85af2c11e8121b6b4dd69..228e17b5e9ee911c2c2f58b4090e33b96954ff23 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -64,7 +64,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
        /* sync the superblock to buffers */
        sb = inode->i_sb;
        lock_super(sb);
-       if (sb->s_op->write_super)
+       if (sb->s_dirt && sb->s_op->write_super)
                sb->s_op->write_super(sb);
        unlock_super(sb);
 
index ade9a7e6a75763cd11304a638113f86e18f09d01..e7735f643cd1b03eab8560479d6f691e387ce218 100644 (file)
@@ -135,7 +135,7 @@ sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
                        goto out;
        }
        pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
-                __FUNCTION__, count, *ppos, buffer->page);
+                __func__, count, *ppos, buffer->page);
        retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
                                         buffer->count);
 out:
@@ -477,11 +477,10 @@ const struct file_operations sysfs_file_operations = {
        .poll           = sysfs_poll,
 };
 
-
-int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
-                  int type)
+int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
+                       const struct attribute *attr, int type, mode_t amode)
 {
-       umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
+       umode_t mode = (amode & S_IALLUGO) | S_IFREG;
        struct sysfs_addrm_cxt acxt;
        struct sysfs_dirent *sd;
        int rc;
@@ -502,6 +501,13 @@ int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
 }
 
 
+int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
+                  int type)
+{
+       return sysfs_add_file_mode(dir_sd, attr, type, attr->mode);
+}
+
+
 /**
  *     sysfs_create_file - create an attribute file for an object.
  *     @kobj:  object we're creating for. 
index 477904915032e9359893d6128f3515a1d86963c7..eeba38417b1d4c5db2a54e381fb68b8441eb7743 100644 (file)
@@ -23,35 +23,50 @@ static void remove_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
        int i;
 
        for (i = 0, attr = grp->attrs; *attr; i++, attr++)
-               if (!grp->is_visible ||
-                   grp->is_visible(kobj, *attr, i))
-                       sysfs_hash_and_remove(dir_sd, (*attr)->name);
+               sysfs_hash_and_remove(dir_sd, (*attr)->name);
 }
 
 static int create_files(struct sysfs_dirent *dir_sd, struct kobject *kobj,
-                       const struct attribute_group *grp)
+                       const struct attribute_group *grp, int update)
 {
        struct attribute *const* attr;
        int error = 0, i;
 
-       for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++)
-               if (!grp->is_visible ||
-                   grp->is_visible(kobj, *attr, i))
-                       error |=
-                               sysfs_add_file(dir_sd, *attr, SYSFS_KOBJ_ATTR);
+       for (i = 0, attr = grp->attrs; *attr && !error; i++, attr++) {
+               mode_t mode = 0;
+
+               /* in update mode, we're changing the permissions or
+                * visibility.  Do this by first removing then
+                * re-adding (if required) the file */
+               if (update)
+                       sysfs_hash_and_remove(dir_sd, (*attr)->name);
+               if (grp->is_visible) {
+                       mode = grp->is_visible(kobj, *attr, i);
+                       if (!mode)
+                               continue;
+               }
+               error = sysfs_add_file_mode(dir_sd, *attr, SYSFS_KOBJ_ATTR,
+                                           (*attr)->mode | mode);
+               if (unlikely(error))
+                       break;
+       }
        if (error)
                remove_files(dir_sd, kobj, grp);
        return error;
 }
 
 
-int sysfs_create_group(struct kobject * kobj, 
-                      const struct attribute_group * grp)
+static int internal_create_group(struct kobject *kobj, int update,
+                                const struct attribute_group *grp)
 {
        struct sysfs_dirent *sd;
        int error;
 
-       BUG_ON(!kobj || !kobj->sd);
+       BUG_ON(!kobj || (!update && !kobj->sd));
+
+       /* Updates may happen before the object has been instantiated */
+       if (unlikely(update && !kobj->sd))
+               return -EINVAL;
 
        if (grp->name) {
                error = sysfs_create_subdir(kobj, grp->name, &sd);
@@ -60,7 +75,7 @@ int sysfs_create_group(struct kobject * kobj,
        } else
                sd = kobj->sd;
        sysfs_get(sd);
-       error = create_files(sd, kobj, grp);
+       error = create_files(sd, kobj, grp, update);
        if (error) {
                if (grp->name)
                        sysfs_remove_subdir(sd);
@@ -69,6 +84,47 @@ int sysfs_create_group(struct kobject * kobj,
        return error;
 }
 
+/**
+ * sysfs_create_group - given a directory kobject, create an attribute group
+ * @kobj:      The kobject to create the group on
+ * @grp:       The attribute group to create
+ *
+ * This function creates a group for the first time.  It will explicitly
+ * warn and error if any of the attribute files being created already exist.
+ *
+ * Returns 0 on success or error.
+ */
+int sysfs_create_group(struct kobject *kobj,
+                      const struct attribute_group *grp)
+{
+       return internal_create_group(kobj, 0, grp);
+}
+
+/**
+ * sysfs_update_group - given a directory kobject, create an attribute group
+ * @kobj:      The kobject to create the group on
+ * @grp:       The attribute group to create
+ *
+ * This function updates an attribute group.  Unlike
+ * sysfs_create_group(), it will explicitly not warn or error if any
+ * of the attribute files being created already exist.  Furthermore,
+ * if the visibility of the files has changed through the is_visible()
+ * callback, it will update the permissions and add or remove the
+ * relevant files.
+ *
+ * The primary use for this function is to call it after making a change
+ * that affects group visibility.
+ *
+ * Returns 0 on success or error.
+ */
+int sysfs_update_group(struct kobject *kobj,
+                      const struct attribute_group *grp)
+{
+       return internal_create_group(kobj, 1, grp);
+}
+
+
+
 void sysfs_remove_group(struct kobject * kobj, 
                        const struct attribute_group * grp)
 {
@@ -95,4 +151,5 @@ void sysfs_remove_group(struct kobject * kobj,
 
 
 EXPORT_SYMBOL_GPL(sysfs_create_group);
+EXPORT_SYMBOL_GPL(sysfs_update_group);
 EXPORT_SYMBOL_GPL(sysfs_remove_group);
index d9262f74f94e50c3c2efacd256f9a9536dc3d850..eb53c632f8564a100d92e4670b8c6764776a3514 100644 (file)
@@ -30,7 +30,7 @@ static const struct address_space_operations sysfs_aops = {
 
 static struct backing_dev_info sysfs_backing_dev_info = {
        .ra_pages       = 0,    /* No readahead */
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
 static const struct inode_operations sysfs_inode_operations ={
@@ -59,6 +59,8 @@ int sysfs_setattr(struct dentry * dentry, struct iattr * iattr)
        if (error)
                return error;
 
+       iattr->ia_valid &= ~ATTR_SIZE; /* ignore size changes */
+
        error = inode_setattr(inode, iattr);
        if (error)
                return error;
index 74168266cd59a1abca9e8039131c0ce46b240980..14f0023984d74d6a51dedc0791fdfab53f2d5cc4 100644 (file)
@@ -61,7 +61,7 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
        /* instantiate and link root dentry */
        root = d_alloc_root(inode);
        if (!root) {
-               pr_debug("%s: could not get root dentry!\n",__FUNCTION__);
+               pr_debug("%s: could not get root dentry!\n",__func__);
                iput(inode);
                return -ENOMEM;
        }
index ff17f8da9b43f5d47ec69a80021e51aa04ffed8b..ce4e15f8aaebce521b4e8b4469c5923307cedad3 100644 (file)
@@ -154,6 +154,8 @@ extern const struct file_operations sysfs_file_operations;
 int sysfs_add_file(struct sysfs_dirent *dir_sd,
                   const struct attribute *attr, int type);
 
+int sysfs_add_file_mode(struct sysfs_dirent *dir_sd,
+                       const struct attribute *attr, int type, mode_t amode);
 /*
  * bin.c
  */
index 42d51d1c05cd418af8eeb0916fac5527ea99c258..38ebe3f85b3d092059ae81f7ad3ad6ce2db78208 100644 (file)
@@ -217,9 +217,9 @@ static inline __fs32 fs32_add(struct sysv_sb_info *sbi, __fs32 *n, int d)
        if (sbi->s_bytesex == BYTESEX_PDP)
                *(__u32*)n = PDP_swab(PDP_swab(*(__u32*)n)+d);
        else if (sbi->s_bytesex == BYTESEX_LE)
-               *(__le32*)n = cpu_to_le32(le32_to_cpu(*(__le32*)n)+d);
+               le32_add_cpu((__le32 *)n, d);
        else
-               *(__be32*)n = cpu_to_be32(be32_to_cpu(*(__be32*)n)+d);
+               be32_add_cpu((__be32 *)n, d);
        return *n;
 }
 
@@ -242,9 +242,9 @@ static inline __fs16 cpu_to_fs16(struct sysv_sb_info *sbi, __u16 n)
 static inline __fs16 fs16_add(struct sysv_sb_info *sbi, __fs16 *n, int d)
 {
        if (sbi->s_bytesex != BYTESEX_BE)
-               *(__le16*)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
+               le16_add_cpu((__le16 *)n, d);
        else
-               *(__be16*)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
+               be16_add_cpu((__be16 *)n, d);
        return *n;
 }
 
index 10c80b59ec4bb7e542b8864f369ec10da7dc34f5..5400524e9cb1d2b2e40bf810a1282277e54eec98 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/hrtimer.h>
 #include <linux/anon_inodes.h>
 #include <linux/timerfd.h>
+#include <linux/syscalls.h>
 
 struct timerfd_ctx {
        struct hrtimer tmr;
index ba5537d4bc151fcbe9575d89a30b2b0c860e92ae..2b34c8ca6c83ed2e6aa01f955664ffccf7eb1ec0 100644 (file)
@@ -890,7 +890,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
 
        if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
                kernel_lb_addr eloc;
-               uint32_t elen;
+               uint32_t bsize;
 
                block = udf_new_block(inode->i_sb, inode,
                                iinfo->i_location.partitionReferenceNum,
@@ -903,9 +903,9 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
                eloc.logicalBlockNum = block;
                eloc.partitionReferenceNum =
                                iinfo->i_location.partitionReferenceNum;
-               elen = inode->i_sb->s_blocksize;
-               iinfo->i_lenExtents = elen;
-               udf_add_aext(inode, &epos, eloc, elen, 0);
+               bsize = inode->i_sb->s_blocksize;
+               iinfo->i_lenExtents = bsize;
+               udf_add_aext(inode, &epos, eloc, bsize, 0);
                brelse(epos.bh);
 
                block = udf_get_pblock(inode->i_sb, block,
index b564fc140fe4e90efc24b7b7f231a5676aa979b6..9fb18a340fc1efa95fb0acd15d50c89c90652875 100644 (file)
@@ -240,7 +240,7 @@ static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
        sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map),
                                  GFP_KERNEL);
        if (!sbi->s_partmaps) {
-               udf_error(sb, __FUNCTION__,
+               udf_error(sb, __func__,
                          "Unable to allocate space for %d partition maps",
                          count);
                sbi->s_partitions = 0;
@@ -1086,7 +1086,7 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
                bitmap = vmalloc(size); /* TODO: get rid of vmalloc */
 
        if (bitmap == NULL) {
-               udf_error(sb, __FUNCTION__,
+               udf_error(sb, __func__,
                          "Unable to allocate space for bitmap "
                          "and %d buffer_head pointers", nr_groups);
                return NULL;
index 1e7598fb97879d3a686939ec24a60234a7518bbf..0d9ada173739c2c0ef701ff361afda1843cfd543 100644 (file)
@@ -277,7 +277,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
                        if (!page)/* it was truncated */
                                continue;
                        if (IS_ERR(page)) {/* or EIO */
-                               ufs_error(inode->i_sb, __FUNCTION__,
+                               ufs_error(inode->i_sb, __func__,
                                          "read of page %llu failed\n",
                                          (unsigned long long)index);
                                continue;
@@ -308,7 +308,7 @@ static void ufs_change_blocknr(struct inode *inode, sector_t beg,
                                ll_rw_block(READ, 1, &bh);
                                wait_on_buffer(bh);
                                if (!buffer_uptodate(bh)) {
-                                       ufs_error(inode->i_sb, __FUNCTION__,
+                                       ufs_error(inode->i_sb, __func__,
                                                  "read of block failed\n");
                                        break;
                                }
index ef563fc8d72c3b62418d5ae13548ae156548d94a..df0bef18742dc63b6dee218e029b2e90d35b1c37 100644 (file)
@@ -179,7 +179,7 @@ bad_entry:
        goto fail;
 Eend:
        p = (struct ufs_dir_entry *)(kaddr + offs);
-       ufs_error(sb, __FUNCTION__,
+       ufs_error(sb, __func__,
                   "entry in directory #%lu spans the page boundary"
                   "offset=%lu",
                   dir->i_ino, (page->index<<PAGE_CACHE_SHIFT)+offs);
@@ -284,7 +284,7 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry,
                        kaddr += ufs_last_byte(dir, n) - reclen;
                        while ((char *) de <= kaddr) {
                                if (de->d_reclen == 0) {
-                                       ufs_error(dir->i_sb, __FUNCTION__,
+                                       ufs_error(dir->i_sb, __func__,
                                                  "zero-length directory entry");
                                        ufs_put_page(page);
                                        goto out;
@@ -356,7 +356,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
                                goto got_it;
                        }
                        if (de->d_reclen == 0) {
-                               ufs_error(dir->i_sb, __FUNCTION__,
+                               ufs_error(dir->i_sb, __func__,
                                          "zero-length directory entry");
                                err = -EIO;
                                goto out_unlock;
@@ -456,7 +456,7 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                struct page *page = ufs_get_page(inode, n);
 
                if (IS_ERR(page)) {
-                       ufs_error(sb, __FUNCTION__,
+                       ufs_error(sb, __func__,
                                  "bad page in #%lu",
                                  inode->i_ino);
                        filp->f_pos += PAGE_CACHE_SIZE - offset;
@@ -475,7 +475,7 @@ ufs_readdir(struct file *filp, void *dirent, filldir_t filldir)
                limit = kaddr + ufs_last_byte(inode, n) - UFS_DIR_REC_LEN(1);
                for ( ;(char*)de <= limit; de = ufs_next_entry(sb, de)) {
                        if (de->d_reclen == 0) {
-                               ufs_error(sb, __FUNCTION__,
+                               ufs_error(sb, __func__,
                                        "zero-length directory entry");
                                ufs_put_page(page);
                                return -EIO;
@@ -536,7 +536,7 @@ int ufs_delete_entry(struct inode *inode, struct ufs_dir_entry *dir,
 
        while ((char*)de < (char*)dir) {
                if (de->d_reclen == 0) {
-                       ufs_error(inode->i_sb, __FUNCTION__,
+                       ufs_error(inode->i_sb, __func__,
                                  "zero-length directory entry");
                        err = -EIO;
                        goto out;
@@ -633,7 +633,7 @@ int ufs_empty_dir(struct inode * inode)
 
                while ((char *)de <= kaddr) {
                        if (de->d_reclen == 0) {
-                               ufs_error(inode->i_sb, __FUNCTION__,
+                               ufs_error(inode->i_sb, __func__,
                                        "zero-length directory entry: "
                                        "kaddr=%p, de=%p\n", kaddr, de);
                                goto not_empty;
index 5446b888fc8e34448078b22096983b04262b71dc..39f8778985655b1f1a166575e013b940af7965d4 100644 (file)
@@ -929,7 +929,7 @@ void ufs_delete_inode (struct inode * inode)
        old_i_size = inode->i_size;
        inode->i_size = 0;
        if (inode->i_blocks && ufs_truncate(inode, old_i_size))
-               ufs_warning(inode->i_sb, __FUNCTION__, "ufs_truncate failed\n");
+               ufs_warning(inode->i_sb, __func__, "ufs_truncate failed\n");
        ufs_free_inode (inode);
        unlock_kernel();
        return;
index 1683d2bee614249a40d91d79af64db3295857d88..8d974c4fd18b1eea45ddc849803f01904f7b8abb 100644 (file)
@@ -40,25 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
                return (__force __fs64)cpu_to_be64(n);
 }
 
-static __inline u32
-fs64_add(struct super_block *sbp, u32 *n, int d)
-{
-       if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-               return *n = cpu_to_le64(le64_to_cpu(*n)+d);
-       else
-               return *n = cpu_to_be64(be64_to_cpu(*n)+d);
-}
-
-static __inline u32
-fs64_sub(struct super_block *sbp, u32 *n, int d)
-{
-       if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-               return *n = cpu_to_le64(le64_to_cpu(*n)-d);
-       else
-               return *n = cpu_to_be64(be64_to_cpu(*n)-d);
-}
-
-static __inline u32
+static inline u32
 fs32_to_cpu(struct super_block *sbp, __fs32 n)
 {
        if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
@@ -80,18 +62,18 @@ static inline void
 fs32_add(struct super_block *sbp, __fs32 *n, int d)
 {
        if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-               *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)+d);
+               le32_add_cpu((__le32 *)n, d);
        else
-               *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)+d);
+               be32_add_cpu((__be32 *)n, d);
 }
 
 static inline void
 fs32_sub(struct super_block *sbp, __fs32 *n, int d)
 {
        if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-               *(__le32 *)n = cpu_to_le32(le32_to_cpu(*(__le32 *)n)-d);
+               le32_add_cpu((__le32 *)n, -d);
        else
-               *(__be32 *)n = cpu_to_be32(be32_to_cpu(*(__be32 *)n)-d);
+               be32_add_cpu((__be32 *)n, -d);
 }
 
 static inline u16
@@ -116,18 +98,18 @@ static inline void
 fs16_add(struct super_block *sbp, __fs16 *n, int d)
 {
        if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-               *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)+d);
+               le16_add_cpu((__le16 *)n, d);
        else
-               *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)+d);
+               be16_add_cpu((__be16 *)n, d);
 }
 
 static inline void
 fs16_sub(struct super_block *sbp, __fs16 *n, int d)
 {
        if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
-               *(__le16 *)n = cpu_to_le16(le16_to_cpu(*(__le16 *)n)-d);
+               le16_add_cpu((__le16 *)n, -d);
        else
-               *(__be16 *)n = cpu_to_be16(be16_to_cpu(*(__be16 *)n)-d);
+               be16_add_cpu((__be16 *)n, -d);
 }
 
 #endif /* _UFS_SWAB_H */
index fcb9231bb9edac22b09593d8fa1d5e80624cde6a..244a1aaa940eea43a82619ff8bde98fa27b4caed 100644 (file)
@@ -66,7 +66,7 @@ struct ufs_inode_info {
 #ifdef CONFIG_UFS_DEBUG
 #      define UFSD(f, a...)    {                                       \
                printk ("UFSD (%s, %d): %s:",                           \
-                       __FILE__, __LINE__, __FUNCTION__);              \
+                       __FILE__, __LINE__, __func__);          \
                printk (f, ## a);                                       \
        }
 #else
index a2bef77dc9c9878c3f93684acf86548f443d989f..af059d5cb485df8289eda123380f4937c0c7b77c 100644 (file)
@@ -40,9 +40,14 @@ asmlinkage long sys_utime(char __user *filename, struct utimbuf __user *times)
 
 #endif
 
+static bool nsec_special(long nsec)
+{
+       return nsec == UTIME_OMIT || nsec == UTIME_NOW;
+}
+
 static bool nsec_valid(long nsec)
 {
-       if (nsec == UTIME_OMIT || nsec == UTIME_NOW)
+       if (nsec_special(nsec))
                return true;
 
        return nsec >= 0 && nsec <= 999999999;
@@ -119,7 +124,15 @@ long do_utimes(int dfd, char __user *filename, struct timespec *times, int flags
                        newattrs.ia_mtime.tv_nsec = times[1].tv_nsec;
                        newattrs.ia_valid |= ATTR_MTIME_SET;
                }
-       } else {
+       }
+
+       /*
+        * If times is NULL or both times are either UTIME_OMIT or
+        * UTIME_NOW, then need to check permissions, because
+        * inode_change_ok() won't do it.
+        */
+       if (!times || (nsec_special(times[0].tv_nsec) &&
+                      nsec_special(times[1].tv_nsec))) {
                error = -EACCES;
                 if (IS_IMMUTABLE(inode))
                        goto mnt_drop_write_and_out;
index cd450bea9f1a179220364e9d42e48561a4d860a1..a3522727ea5b090ea4ee02d925e7327cde21fe7b 100644 (file)
@@ -176,15 +176,10 @@ static inline int vfat_is_used_badchars(const wchar_t *s, int len)
        for (i = 0; i < len; i++)
                if (vfat_bad_char(s[i]))
                        return -EINVAL;
-       return 0;
-}
 
-static int vfat_valid_longname(const unsigned char *name, unsigned int len)
-{
-       if (name[len - 1] == ' ')
+       if (s[i - 1] == ' ') /* last character cannot be space */
                return -EINVAL;
-       if (len >= 256)
-               return -ENAMETOOLONG;
+
        return 0;
 }
 
@@ -477,7 +472,7 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
        if (utf8) {
                int name_len = strlen(name);
 
-               *outlen = utf8_mbstowcs((wchar_t *)outname, name, PAGE_SIZE);
+               *outlen = utf8_mbstowcs((wchar_t *)outname, name, PATH_MAX);
 
                /*
                 * We stripped '.'s before and set len appropriately,
@@ -485,11 +480,14 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
                 */
                *outlen -= (name_len - len);
 
+               if (*outlen > 255)
+                       return -ENAMETOOLONG;
+
                op = &outname[*outlen * sizeof(wchar_t)];
        } else {
                if (nls) {
                        for (i = 0, ip = name, op = outname, *outlen = 0;
-                            i < len && *outlen <= 260;
+                            i < len && *outlen <= 255;
                             *outlen += 1)
                        {
                                if (escape && (*ip == ':')) {
@@ -525,18 +523,20 @@ xlate_to_uni(const unsigned char *name, int len, unsigned char *outname,
                                        op += 2;
                                }
                        }
+                       if (i < len)
+                               return -ENAMETOOLONG;
                } else {
                        for (i = 0, ip = name, op = outname, *outlen = 0;
-                            i < len && *outlen <= 260;
+                            i < len && *outlen <= 255;
                             i++, *outlen += 1)
                        {
                                *op++ = *ip++;
                                *op++ = 0;
                        }
+                       if (i < len)
+                               return -ENAMETOOLONG;
                }
        }
-       if (*outlen > 260)
-               return -ENAMETOOLONG;
 
        *longlen = *outlen;
        if (*outlen % 13) {
@@ -565,7 +565,6 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
        struct fat_mount_options *opts = &sbi->options;
        struct msdos_dir_slot *ps;
        struct msdos_dir_entry *de;
-       unsigned long page;
        unsigned char cksum, lcase;
        unsigned char msdos_name[MSDOS_NAME];
        wchar_t *uname;
@@ -574,15 +573,11 @@ static int vfat_build_slots(struct inode *dir, const unsigned char *name,
        loff_t offset;
 
        *nr_slots = 0;
-       err = vfat_valid_longname(name, len);
-       if (err)
-               return err;
 
-       page = __get_free_page(GFP_KERNEL);
-       if (!page)
+       uname = __getname();
+       if (!uname)
                return -ENOMEM;
 
-       uname = (wchar_t *)page;
        err = xlate_to_uni(name, len, (unsigned char *)uname, &ulen, &usize,
                           opts->unicode_xlate, opts->utf8, sbi->nls_io);
        if (err)
@@ -634,7 +629,7 @@ shortname:
        de->starthi = cpu_to_le16(cluster >> 16);
        de->size = 0;
 out_free:
-       free_page(page);
+       __putname(uname);
        return err;
 }
 
@@ -991,7 +986,7 @@ error_inode:
        if (corrupt < 0) {
                fat_fs_panic(new_dir->i_sb,
                             "%s: Filesystem corrupted (i_pos %lld)",
-                            __FUNCTION__, sinfo.i_pos);
+                            __func__, sinfo.i_pos);
        }
        goto out;
 }
@@ -1003,7 +998,7 @@ static const struct inode_operations vfat_dir_inode_operations = {
        .mkdir          = vfat_mkdir,
        .rmdir          = vfat_rmdir,
        .rename         = vfat_rename,
-       .setattr        = fat_notify_change,
+       .setattr        = fat_setattr,
        .getattr        = fat_getattr,
 };
 
index 89a942f07e1be839af7d8cef99e377d85db6fb27..4706a8b1f4959cdf955cc8b625ee5bac3b53ccf7 100644 (file)
@@ -67,7 +67,7 @@ xattr_permission(struct inode *inode, const char *name, int mask)
 }
 
 int
-vfs_setxattr(struct dentry *dentry, char *name, void *value,
+vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
                size_t size, int flags)
 {
        struct inode *inode = dentry->d_inode;
@@ -131,7 +131,7 @@ out_noalloc:
 EXPORT_SYMBOL_GPL(xattr_getsecurity);
 
 ssize_t
-vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size)
+vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
 {
        struct inode *inode = dentry->d_inode;
        int error;
@@ -187,7 +187,7 @@ vfs_listxattr(struct dentry *d, char *list, size_t size)
 EXPORT_SYMBOL_GPL(vfs_listxattr);
 
 int
-vfs_removexattr(struct dentry *dentry, char *name)
+vfs_removexattr(struct dentry *dentry, const char *name)
 {
        struct inode *inode = dentry->d_inode;
        int error;
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
  * Extended attribute SET operations
  */
 static long
-setxattr(struct dentry *d, char __user *name, void __user *value,
+setxattr(struct dentry *d, const char __user *name, const void __user *value,
         size_t size, int flags)
 {
        int error;
@@ -252,8 +252,8 @@ setxattr(struct dentry *d, char __user *name, void __user *value,
 }
 
 asmlinkage long
-sys_setxattr(char __user *path, char __user *name, void __user *value,
-            size_t size, int flags)
+sys_setxattr(const char __user *path, const char __user *name,
+            const void __user *value, size_t size, int flags)
 {
        struct nameidata nd;
        int error;
@@ -271,8 +271,8 @@ sys_setxattr(char __user *path, char __user *name, void __user *value,
 }
 
 asmlinkage long
-sys_lsetxattr(char __user *path, char __user *name, void __user *value,
-             size_t size, int flags)
+sys_lsetxattr(const char __user *path, const char __user *name,
+             const void __user *value, size_t size, int flags)
 {
        struct nameidata nd;
        int error;
@@ -290,7 +290,7 @@ sys_lsetxattr(char __user *path, char __user *name, void __user *value,
 }
 
 asmlinkage long
-sys_fsetxattr(int fd, char __user *name, void __user *value,
+sys_fsetxattr(int fd, const char __user *name, const void __user *value,
              size_t size, int flags)
 {
        struct file *f;
@@ -315,7 +315,8 @@ sys_fsetxattr(int fd, char __user *name, void __user *value,
  * Extended attribute GET operations
  */
 static ssize_t
-getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
+getxattr(struct dentry *d, const char __user *name, void __user *value,
+        size_t size)
 {
        ssize_t error;
        void *kvalue = NULL;
@@ -349,8 +350,8 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size)
 }
 
 asmlinkage ssize_t
-sys_getxattr(char __user *path, char __user *name, void __user *value,
-            size_t size)
+sys_getxattr(const char __user *path, const char __user *name,
+            void __user *value, size_t size)
 {
        struct nameidata nd;
        ssize_t error;
@@ -364,7 +365,7 @@ sys_getxattr(char __user *path, char __user *name, void __user *value,
 }
 
 asmlinkage ssize_t
-sys_lgetxattr(char __user *path, char __user *name, void __user *value,
+sys_lgetxattr(const char __user *path, const char __user *name, void __user *value,
              size_t size)
 {
        struct nameidata nd;
@@ -379,7 +380,7 @@ sys_lgetxattr(char __user *path, char __user *name, void __user *value,
 }
 
 asmlinkage ssize_t
-sys_fgetxattr(int fd, char __user *name, void __user *value, size_t size)
+sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size)
 {
        struct file *f;
        ssize_t error = -EBADF;
@@ -424,7 +425,7 @@ listxattr(struct dentry *d, char __user *list, size_t size)
 }
 
 asmlinkage ssize_t
-sys_listxattr(char __user *path, char __user *list, size_t size)
+sys_listxattr(const char __user *path, char __user *list, size_t size)
 {
        struct nameidata nd;
        ssize_t error;
@@ -438,7 +439,7 @@ sys_listxattr(char __user *path, char __user *list, size_t size)
 }
 
 asmlinkage ssize_t
-sys_llistxattr(char __user *path, char __user *list, size_t size)
+sys_llistxattr(const char __user *path, char __user *list, size_t size)
 {
        struct nameidata nd;
        ssize_t error;
@@ -470,7 +471,7 @@ sys_flistxattr(int fd, char __user *list, size_t size)
  * Extended attribute REMOVE operations
  */
 static long
-removexattr(struct dentry *d, char __user *name)
+removexattr(struct dentry *d, const char __user *name)
 {
        int error;
        char kname[XATTR_NAME_MAX + 1];
@@ -485,7 +486,7 @@ removexattr(struct dentry *d, char __user *name)
 }
 
 asmlinkage long
-sys_removexattr(char __user *path, char __user *name)
+sys_removexattr(const char __user *path, const char __user *name)
 {
        struct nameidata nd;
        int error;
@@ -503,7 +504,7 @@ sys_removexattr(char __user *path, char __user *name)
 }
 
 asmlinkage long
-sys_lremovexattr(char __user *path, char __user *name)
+sys_lremovexattr(const char __user *path, const char __user *name)
 {
        struct nameidata nd;
        int error;
@@ -521,7 +522,7 @@ sys_lremovexattr(char __user *path, char __user *name)
 }
 
 asmlinkage long
-sys_fremovexattr(int fd, char __user *name)
+sys_fremovexattr(int fd, const char __user *name)
 {
        struct file *f;
        struct dentry *dentry;
index 524021ff54367661322862d84272572171777755..3f53dd101f99340f014db68388a49351f318fb24 100644 (file)
@@ -64,3 +64,16 @@ config XFS_RT
          See the xfs man page in section 5 for additional information.
 
          If unsure, say N.
+
+config XFS_DEBUG
+       bool "XFS Debugging support (EXPERIMENTAL)"
+       depends on XFS_FS && EXPERIMENTAL
+       help
+         Say Y here to get an XFS build with many debugging features,
+         including ASSERT checks, function wrappers around macros,
+         and extra sanity-checking functions in various code paths.
+
+         Note that the resulting code will be HUGE and SLOW, and probably
+         not useful unless you are debugging a particular problem.
+
+         Say N unless you are an XFS developer, or you play one on TV.
index c110bb002665af5dce7d73d17cbb47943826d71d..ff6a19873e5cfb6a2ac45dd38514d951a45cd105 100644 (file)
 
 #include <linux/rwsem.h>
 
-enum { MR_NONE, MR_ACCESS, MR_UPDATE };
-
 typedef struct {
        struct rw_semaphore     mr_lock;
+#ifdef DEBUG
        int                     mr_writer;
+#endif
 } mrlock_t;
 
+#ifdef DEBUG
 #define mrinit(mrp, name)      \
        do { (mrp)->mr_writer = 0; init_rwsem(&(mrp)->mr_lock); } while (0)
+#else
+#define mrinit(mrp, name)      \
+       do { init_rwsem(&(mrp)->mr_lock); } while (0)
+#endif
+
 #define mrlock_init(mrp, t,n,s)        mrinit(mrp, n)
 #define mrfree(mrp)            do { } while (0)
 
-static inline void mraccess(mrlock_t *mrp)
-{
-       down_read(&mrp->mr_lock);
-}
-
-static inline void mrupdate(mrlock_t *mrp)
-{
-       down_write(&mrp->mr_lock);
-       mrp->mr_writer = 1;
-}
-
 static inline void mraccess_nested(mrlock_t *mrp, int subclass)
 {
        down_read_nested(&mrp->mr_lock, subclass);
@@ -51,10 +46,11 @@ static inline void mraccess_nested(mrlock_t *mrp, int subclass)
 static inline void mrupdate_nested(mrlock_t *mrp, int subclass)
 {
        down_write_nested(&mrp->mr_lock, subclass);
+#ifdef DEBUG
        mrp->mr_writer = 1;
+#endif
 }
 
-
 static inline int mrtryaccess(mrlock_t *mrp)
 {
        return down_read_trylock(&mrp->mr_lock);
@@ -64,39 +60,31 @@ static inline int mrtryupdate(mrlock_t *mrp)
 {
        if (!down_write_trylock(&mrp->mr_lock))
                return 0;
+#ifdef DEBUG
        mrp->mr_writer = 1;
+#endif
        return 1;
 }
 
-static inline void mrunlock(mrlock_t *mrp)
+static inline void mrunlock_excl(mrlock_t *mrp)
 {
-       if (mrp->mr_writer) {
-               mrp->mr_writer = 0;
-               up_write(&mrp->mr_lock);
-       } else {
-               up_read(&mrp->mr_lock);
-       }
+#ifdef DEBUG
+       mrp->mr_writer = 0;
+#endif
+       up_write(&mrp->mr_lock);
 }
 
-static inline void mrdemote(mrlock_t *mrp)
+static inline void mrunlock_shared(mrlock_t *mrp)
 {
-       mrp->mr_writer = 0;
-       downgrade_write(&mrp->mr_lock);
+       up_read(&mrp->mr_lock);
 }
 
-#ifdef DEBUG
-/*
- * Debug-only routine, without some platform-specific asm code, we can
- * now only answer requests regarding whether we hold the lock for write
- * (reader state is outside our visibility, we only track writer state).
- * Note: means !ismrlocked would give false positives, so don't do that.
- */
-static inline int ismrlocked(mrlock_t *mrp, int type)
+static inline void mrdemote(mrlock_t *mrp)
 {
-       if (mrp && type == MR_UPDATE)
-               return mrp->mr_writer;
-       return 1;
-}
+#ifdef DEBUG
+       mrp->mr_writer = 0;
 #endif
+       downgrade_write(&mrp->mr_lock);
+}
 
 #endif /* __XFS_SUPPORT_MRLOCK_H__ */
index 52f6846101d528b7726ba39e091c2e70dd81a6ad..5105015a75ad993d8401e0519a81186ab0e722a9 100644 (file)
@@ -886,7 +886,7 @@ int
 xfs_buf_lock_value(
        xfs_buf_t               *bp)
 {
-       return atomic_read(&bp->b_sema.count);
+       return bp->b_sema.count;
 }
 #endif
 
index 265f0168ab76ec9942a352e81421a1db23e71195..c672b3238b14b5d3a5a47b9ae606aeecd10d7fac 100644 (file)
@@ -133,7 +133,7 @@ xfs_nfs_get_inode(
        if (!ip)
                return ERR_PTR(-EIO);
 
-       if (!ip->i_d.di_mode || ip->i_d.di_gen != generation) {
+       if (ip->i_d.di_gen != generation) {
                xfs_iput_new(ip, XFS_ILOCK_SHARED);
                return ERR_PTR(-ENOENT);
        }
index 05905246434dc6e2182d2f5bbbceb2d1c480d828..65e78c13d4ae2d59a7b4eaa0785ad2c9fe95dda5 100644 (file)
@@ -43,9 +43,6 @@
 #include <linux/smp_lock.h>
 
 static struct vm_operations_struct xfs_file_vm_ops;
-#ifdef CONFIG_XFS_DMAPI
-static struct vm_operations_struct xfs_dmapi_file_vm_ops;
-#endif
 
 STATIC_INLINE ssize_t
 __xfs_file_read(
@@ -202,22 +199,6 @@ xfs_file_fsync(
                        (xfs_off_t)0, (xfs_off_t)-1);
 }
 
-#ifdef CONFIG_XFS_DMAPI
-STATIC int
-xfs_vm_fault(
-       struct vm_area_struct   *vma,
-       struct vm_fault *vmf)
-{
-       struct inode    *inode = vma->vm_file->f_path.dentry->d_inode;
-       bhv_vnode_t     *vp = vn_from_inode(inode);
-
-       ASSERT_ALWAYS(vp->v_vfsp->vfs_flag & VFS_DMI);
-       if (XFS_SEND_MMAP(XFS_VFSTOM(vp->v_vfsp), vma, 0))
-               return VM_FAULT_SIGBUS;
-       return filemap_fault(vma, vmf);
-}
-#endif /* CONFIG_XFS_DMAPI */
-
 /*
  * Unfortunately we can't just use the clean and simple readdir implementation
  * below, because nfs might call back into ->lookup from the filldir callback
@@ -386,11 +367,6 @@ xfs_file_mmap(
        vma->vm_ops = &xfs_file_vm_ops;
        vma->vm_flags |= VM_CAN_NONLINEAR;
 
-#ifdef CONFIG_XFS_DMAPI
-       if (XFS_M(filp->f_path.dentry->d_inode->i_sb)->m_flags & XFS_MOUNT_DMAPI)
-               vma->vm_ops = &xfs_dmapi_file_vm_ops;
-#endif /* CONFIG_XFS_DMAPI */
-
        file_accessed(filp);
        return 0;
 }
@@ -437,47 +413,6 @@ xfs_file_ioctl_invis(
        return error;
 }
 
-#ifdef CONFIG_XFS_DMAPI
-#ifdef HAVE_VMOP_MPROTECT
-STATIC int
-xfs_vm_mprotect(
-       struct vm_area_struct *vma,
-       unsigned int    newflags)
-{
-       struct inode    *inode = vma->vm_file->f_path.dentry->d_inode;
-       struct xfs_mount *mp = XFS_M(inode->i_sb);
-       int             error = 0;
-
-       if (mp->m_flags & XFS_MOUNT_DMAPI) {
-               if ((vma->vm_flags & VM_MAYSHARE) &&
-                   (newflags & VM_WRITE) && !(vma->vm_flags & VM_WRITE))
-                       error = XFS_SEND_MMAP(mp, vma, VM_WRITE);
-       }
-       return error;
-}
-#endif /* HAVE_VMOP_MPROTECT */
-#endif /* CONFIG_XFS_DMAPI */
-
-#ifdef HAVE_FOP_OPEN_EXEC
-/* If the user is attempting to execute a file that is offline then
- * we have to trigger a DMAPI READ event before the file is marked as busy
- * otherwise the invisible I/O will not be able to write to the file to bring
- * it back online.
- */
-STATIC int
-xfs_file_open_exec(
-       struct inode    *inode)
-{
-       struct xfs_mount *mp = XFS_M(inode->i_sb);
-       struct xfs_inode *ip = XFS_I(inode);
-
-       if (unlikely(mp->m_flags & XFS_MOUNT_DMAPI) &&
-                    DM_EVENT_ENABLED(ip, DM_EVENT_READ))
-               return -XFS_SEND_DATA(mp, DM_EVENT_READ, ip, 0, 0, 0, NULL);
-       return 0;
-}
-#endif /* HAVE_FOP_OPEN_EXEC */
-
 /*
  * mmap()d file has taken write protection fault and is being made
  * writable. We can set the page state up correctly for a writable
@@ -546,13 +481,3 @@ static struct vm_operations_struct xfs_file_vm_ops = {
        .fault          = filemap_fault,
        .page_mkwrite   = xfs_vm_page_mkwrite,
 };
-
-#ifdef CONFIG_XFS_DMAPI
-static struct vm_operations_struct xfs_dmapi_file_vm_ops = {
-       .fault          = xfs_vm_fault,
-       .page_mkwrite   = xfs_vm_page_mkwrite,
-#ifdef HAVE_VMOP_MPROTECT
-       .mprotect       = xfs_vm_mprotect,
-#endif
-};
-#endif /* CONFIG_XFS_DMAPI */
index 4ddb86b73c6b537034b09d2632c7d2061432a736..a42ba9d71156f84101e1068a33d4bd05e055bcd9 100644 (file)
@@ -238,7 +238,7 @@ xfs_vget_fsop_handlereq(
                return error;
        if (ip == NULL)
                return XFS_ERROR(EIO);
-       if (ip->i_d.di_mode == 0 || ip->i_d.di_gen != igen) {
+       if (ip->i_d.di_gen != igen) {
                xfs_iput_new(ip, XFS_ILOCK_SHARED);
                return XFS_ERROR(ENOENT);
        }
@@ -505,14 +505,14 @@ xfs_attrmulti_attr_get(
 {
        char                    *kbuf;
        int                     error = EFAULT;
-       
+
        if (*len > XATTR_SIZE_MAX)
                return EINVAL;
        kbuf = kmalloc(*len, GFP_KERNEL);
        if (!kbuf)
                return ENOMEM;
 
-       error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags, NULL);
+       error = xfs_attr_get(XFS_I(inode), name, kbuf, (int *)len, flags);
        if (error)
                goto out_kfree;
 
@@ -546,7 +546,7 @@ xfs_attrmulti_attr_set(
 
        if (copy_from_user(kbuf, ubuf, len))
                goto out_kfree;
-                       
+
        error = xfs_attr_set(XFS_I(inode), name, kbuf, len, flags);
 
  out_kfree:
index a1237dad6430b28221be1dc033f6f0a4e599f2cf..2bf287ef54897af2651f24b39792404270179a19 100644 (file)
@@ -511,7 +511,8 @@ xfs_vn_rename(
        xfs_dentry_to_name(&nname, ndentry);
 
        error = xfs_rename(XFS_I(odir), &oname, XFS_I(odentry->d_inode),
-                                                       XFS_I(ndir), &nname);
+                          XFS_I(ndir), &nname, new_inode ?
+                                               XFS_I(new_inode) : NULL);
        if (likely(!error)) {
                if (new_inode)
                        xfs_validate_fields(new_inode);
index e5143323e71f35ba82167013843539e19b34c15e..4edc46915b575a7eb1261f2bda587cfbccfe0462 100644 (file)
@@ -75,6 +75,7 @@
 #include <linux/delay.h>
 #include <linux/log2.h>
 #include <linux/spinlock.h>
+#include <linux/random.h>
 
 #include <asm/page.h>
 #include <asm/div64.h>
 /*
  * Feature macros (disable/enable)
  */
-#define HAVE_SPLICE    /* a splice(2) exists in 2.6, but not in 2.4 */
 #ifdef CONFIG_SMP
 #define HAVE_PERCPU_SB /* per cpu superblock counters are a 2.6 feature */
 #else
index 1ebd8004469c1d3a70a0afc0f8a34f8400ea5011..5e3b57516ec7293fe8bffe42cc995b9025780b8e 100644 (file)
@@ -394,7 +394,7 @@ xfs_zero_last_block(
        int             error = 0;
        xfs_bmbt_irec_t imap;
 
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        zero_offset = XFS_B_FSB_OFFSET(mp, isize);
        if (zero_offset == 0) {
@@ -425,14 +425,14 @@ xfs_zero_last_block(
         * out sync.  We need to drop the ilock while we do this so we
         * don't deadlock when the buffer cache calls back to us.
         */
-       xfs_iunlock(ip, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
        zero_len = mp->m_sb.sb_blocksize - zero_offset;
        if (isize + zero_len > offset)
                zero_len = offset - isize;
        error = xfs_iozero(ip, isize, zero_len);
 
-       xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
        ASSERT(error >= 0);
        return error;
 }
@@ -465,8 +465,7 @@ xfs_zero_eof(
        int             error = 0;
        xfs_bmbt_irec_t imap;
 
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
-       ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
        ASSERT(offset > isize);
 
        /*
@@ -475,8 +474,7 @@ xfs_zero_eof(
         */
        error = xfs_zero_last_block(ip, offset, isize);
        if (error) {
-               ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
-               ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+               ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
                return error;
        }
 
@@ -507,8 +505,7 @@ xfs_zero_eof(
                error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb,
                                  0, NULL, 0, &imap, &nimaps, NULL, NULL);
                if (error) {
-                       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
-                       ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+                       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
                        return error;
                }
                ASSERT(nimaps > 0);
@@ -532,7 +529,7 @@ xfs_zero_eof(
                 * Drop the inode lock while we're doing the I/O.
                 * We'll still have the iolock to protect us.
                 */
-               xfs_iunlock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+               xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
                zero_off = XFS_FSB_TO_B(mp, start_zero_fsb);
                zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount);
@@ -548,13 +545,13 @@ xfs_zero_eof(
                start_zero_fsb = imap.br_startoff + imap.br_blockcount;
                ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 
-               xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+               xfs_ilock(ip, XFS_ILOCK_EXCL);
        }
 
        return 0;
 
 out_lock:
-       xfs_ilock(ip, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
        ASSERT(error >= 0);
        return error;
 }
index e1d498b4ba7a98e1a9bf0049c165d6331290b7a3..e6be37dbd0e9f8865e81e5b442758d2c7d24dcaa 100644 (file)
@@ -50,7 +50,6 @@ struct xfs_iomap;
 #define        XFS_INVAL_CACHED        18
 #define        XFS_DIORD_ENTER         19
 #define        XFS_DIOWR_ENTER         20
-#define        XFS_SENDFILE_ENTER      21
 #define        XFS_WRITEPAGE_ENTER     22
 #define        XFS_RELEASEPAGE_ENTER   23
 #define        XFS_INVALIDPAGE_ENTER   24
index 865eb708aa95ffdca5c528b2a9871c2b78aaacdb..742b2c7852c1bc947ae93e3a11dd499cecab8fdb 100644 (file)
@@ -1181,7 +1181,7 @@ xfs_fs_statfs(
        statp->f_fsid.val[0] = (u32)id;
        statp->f_fsid.val[1] = (u32)(id >> 32);
 
-       xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
+       xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
 
        spin_lock(&mp->m_sb_lock);
        statp->f_bsize = sbp->sb_blocksize;
index 8b4d63ce86945cf00edf197c38b02cbb9d36bb09..9d73cb5c0fc7f664fee65bc2344c41d31535193b 100644 (file)
@@ -25,12 +25,6 @@ struct attrlist_cursor_kern;
 
 typedef struct inode   bhv_vnode_t;
 
-#define VN_ISLNK(vp)   S_ISLNK((vp)->i_mode)
-#define VN_ISREG(vp)   S_ISREG((vp)->i_mode)
-#define VN_ISDIR(vp)   S_ISDIR((vp)->i_mode)
-#define VN_ISCHR(vp)   S_ISCHR((vp)->i_mode)
-#define VN_ISBLK(vp)   S_ISBLK((vp)->i_mode)
-
 /*
  * Vnode to Linux inode mapping.
  */
@@ -151,24 +145,6 @@ typedef struct bhv_vattr {
                XFS_AT_TYPE|XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|\
                XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_GENCOUNT)
 
-/*
- *  Modes.
- */
-#define VSUID  S_ISUID         /* set user id on execution */
-#define VSGID  S_ISGID         /* set group id on execution */
-#define VSVTX  S_ISVTX         /* save swapped text even after use */
-#define VREAD  S_IRUSR         /* read, write, execute permissions */
-#define VWRITE S_IWUSR
-#define VEXEC  S_IXUSR
-
-#define MODEMASK S_IALLUGO     /* mode bits plus permission bits */
-
-/*
- * Check whether mandatory file locking is enabled.
- */
-#define MANDLOCK(vp, mode)     \
-       (VN_ISREG(vp) && ((mode) & (VSGID|(VEXEC>>3))) == VSGID)
-
 extern void    vn_init(void);
 extern int     vn_revalidate(bhv_vnode_t *);
 
index 631ebb31b295df30fed95207e80be2214d5f2f49..85df3288efd5b8530ad2ff42b23f10ce9104cb25 100644 (file)
@@ -933,7 +933,7 @@ xfs_qm_dqget(
               type == XFS_DQ_PROJ ||
               type == XFS_DQ_GROUP);
        if (ip) {
-               ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+               ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
                if (type == XFS_DQ_USER)
                        ASSERT(ip->i_udquot == NULL);
                else
@@ -1088,7 +1088,7 @@ xfs_qm_dqget(
        xfs_qm_mplist_unlock(mp);
        XFS_DQ_HASH_UNLOCK(h);
  dqret:
-       ASSERT((ip == NULL) || XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
        xfs_dqtrace_entry(dqp, "DQGET DONE");
        *O_dqpp = dqp;
        return (0);
index 40ea56409561cd8b98a180f6c1cd639c8f30476e..d31cce1165c59cd03ad1c8590c5bbc487494b95a 100644 (file)
@@ -670,7 +670,7 @@ xfs_qm_dqattach_one(
        xfs_dquot_t     *dqp;
        int             error;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        error = 0;
        /*
         * See if we already have it in the inode itself. IO_idqpp is
@@ -874,7 +874,7 @@ xfs_qm_dqattach(
                return 0;
 
        ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
-              XFS_ISLOCKED_INODE_EXCL(ip));
+              xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        if (! (flags & XFS_QMOPT_ILOCKED))
                xfs_ilock(ip, XFS_ILOCK_EXCL);
@@ -888,7 +888,8 @@ xfs_qm_dqattach(
                        goto done;
                nquotas++;
        }
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        if (XFS_IS_OQUOTA_ON(mp)) {
                error = XFS_IS_GQUOTA_ON(mp) ?
                        xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
@@ -913,7 +914,7 @@ xfs_qm_dqattach(
         * This WON'T, in general, result in a thrash.
         */
        if (nquotas == 2) {
-               ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+               ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
                ASSERT(ip->i_udquot);
                ASSERT(ip->i_gdquot);
 
@@ -956,7 +957,7 @@ xfs_qm_dqattach(
 
 #ifdef QUOTADEBUG
        else
-               ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+               ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 #endif
        return error;
 }
@@ -1291,7 +1292,7 @@ xfs_qm_dqget_noattach(
        xfs_mount_t     *mp;
        xfs_dquot_t     *udqp, *gdqp;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        mp = ip->i_mount;
        udqp = NULL;
        gdqp = NULL;
@@ -1392,7 +1393,7 @@ xfs_qm_qino_alloc(
         * Keep an extra reference to this quota inode. This inode is
         * locked exclusively and joined to the transaction already.
         */
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip));
+       ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
        VN_HOLD(XFS_ITOV((*ip)));
 
        /*
@@ -1737,12 +1738,6 @@ xfs_qm_dqusage_adjust(
                return error;
        }
 
-       if (ip->i_d.di_mode == 0) {
-               xfs_iput_new(ip, XFS_ILOCK_EXCL);
-               *res = BULKSTAT_RV_NOTHING;
-               return XFS_ERROR(ENOENT);
-       }
-
        /*
         * Obtain the locked dquots. In case of an error (eg. allocation
         * fails for ENOSPC), we return the negative of the error number
@@ -2563,7 +2558,7 @@ xfs_qm_vop_chown(
        uint            bfield = XFS_IS_REALTIME_INODE(ip) ?
                                 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
 
        /* old dquot */
@@ -2607,7 +2602,7 @@ xfs_qm_vop_chown_reserve(
        uint            delblks, blkflags, prjflags = 0;
        xfs_dquot_t     *unresudq, *unresgdq, *delblksudq, *delblksgdq;
 
-       ASSERT(XFS_ISLOCKED_INODE(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        mp = ip->i_mount;
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
@@ -2717,7 +2712,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
        if (!XFS_IS_QUOTA_ON(tp->t_mountp))
                return;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
 
        if (udqp) {
index 8342823dbdc34913548e9bd285de1bb8aa991f7c..768a3b27d2b67c68a5361c853b277e778c8f0c8e 100644 (file)
@@ -1366,12 +1366,6 @@ xfs_qm_internalqcheck_adjust(
                return (error);
        }
 
-       if (ip->i_d.di_mode == 0) {
-               xfs_iput_new(ip, lock_flags);
-               *res = BULKSTAT_RV_NOTHING;
-               return XFS_ERROR(ENOENT);
-       }
-
        /*
         * This inode can have blocks after eof which can get released
         * when we send it to inactive. Since we don't check the dquot
index a8b85e2be9d5cf34d10ecaa332d4b9257a82efd1..5e4a40b1c565c3d9388669145b768496a3e345fd 100644 (file)
 /* Number of dquots that fit in to a dquot block */
 #define XFS_QM_DQPERBLK(mp)    ((mp)->m_quotainfo->qi_dqperchunk)
 
-#define XFS_ISLOCKED_INODE(ip)         (ismrlocked(&(ip)->i_lock, \
-                                           MR_UPDATE | MR_ACCESS) != 0)
-#define XFS_ISLOCKED_INODE_EXCL(ip)    (ismrlocked(&(ip)->i_lock, \
-                                           MR_UPDATE) != 0)
-
 #define XFS_DQ_IS_ADDEDTO_TRX(t, d)    ((d)->q_transp == (t))
 
 #define XFS_QI_MPLRECLAIMS(mp) ((mp)->m_quotainfo->qi_dqreclaims)
index f441f836ca8b29338cbc5c1e5b8a7e4b9fad8677..99611381e74043f9974651d2179c08cbc83f9675 100644 (file)
@@ -834,7 +834,7 @@ xfs_trans_reserve_quota_nblks(
        ASSERT(ip->i_ino != mp->m_sb.sb_uquotino);
        ASSERT(ip->i_ino != mp->m_sb.sb_gquotino);
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
        ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
                                XFS_TRANS_DQ_RES_RTBLKS ||
index 855da04086479481a60c5d5a9dd271fb37c57bc7..75845f950814ca8beac0bf2941eea886d8446271 100644 (file)
@@ -49,8 +49,6 @@ extern void assfail(char *expr, char *f, int l);
 
 #else /* DEBUG */
 
-#include <linux/random.h>
-
 #define ASSERT(expr)   \
        (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
index 765aaf65e2d3e984c8d872925ab16fab66d1ed56..540e4c98982512a365eeccc4bd7f5d16b1d9db63 100644 (file)
@@ -22,7 +22,7 @@
 #define STATIC
 #define DEBUG 1
 #define XFS_BUF_LOCK_TRACKING 1
-#define QUOTADEBUG 1
+/* #define QUOTADEBUG 1 */
 #endif
 
 #ifdef CONFIG_XFS_TRACE
index 8e130b9720ae8513d24a2307c5aba0a034718dac..ebee3a4f703aa5128756e950631a3bd3f50e7103 100644 (file)
@@ -72,7 +72,7 @@ xfs_acl_vhasacl_default(
 {
        int             error;
 
-       if (!VN_ISDIR(vp))
+       if (!S_ISDIR(vp->i_mode))
                return 0;
        xfs_acl_get_attr(vp, NULL, _ACL_TYPE_DEFAULT, ATTR_KERNOVAL, &error);
        return (error == 0);
@@ -238,15 +238,8 @@ xfs_acl_vget(
                        error = EINVAL;
                        goto out;
                }
-               if (kind == _ACL_TYPE_ACCESS) {
-                       bhv_vattr_t     va;
-
-                       va.va_mask = XFS_AT_MODE;
-                       error = xfs_getattr(xfs_vtoi(vp), &va, 0);
-                       if (error)
-                               goto out;
-                       xfs_acl_sync_mode(va.va_mode, xfs_acl);
-               }
+               if (kind == _ACL_TYPE_ACCESS)
+                       xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, xfs_acl);
                error = -posix_acl_xfs_to_xattr(xfs_acl, ext_acl, size);
        }
 out:
@@ -341,14 +334,15 @@ xfs_acl_iaccess(
 {
        xfs_acl_t       *acl;
        int             rval;
+       struct xfs_name acl_name = {SGI_ACL_FILE, SGI_ACL_FILE_SIZE};
 
        if (!(_ACL_ALLOC(acl)))
                return -1;
 
        /* If the file has no ACL return -1. */
        rval = sizeof(xfs_acl_t);
-       if (xfs_attr_fetch(ip, SGI_ACL_FILE, SGI_ACL_FILE_SIZE,
-                       (char *)acl, &rval, ATTR_ROOT | ATTR_KERNACCESS, cr)) {
+       if (xfs_attr_fetch(ip, &acl_name, (char *)acl, &rval,
+                                       ATTR_ROOT | ATTR_KERNACCESS)) {
                _ACL_FREE(acl);
                return -1;
        }
@@ -373,23 +367,15 @@ xfs_acl_allow_set(
        bhv_vnode_t     *vp,
        int             kind)
 {
-       xfs_inode_t     *ip = xfs_vtoi(vp);
-       bhv_vattr_t     va;
-       int             error;
-
        if (vp->i_flags & (S_IMMUTABLE|S_APPEND))
                return EPERM;
-       if (kind == _ACL_TYPE_DEFAULT && !VN_ISDIR(vp))
+       if (kind == _ACL_TYPE_DEFAULT && !S_ISDIR(vp->i_mode))
                return ENOTDIR;
        if (vp->i_sb->s_flags & MS_RDONLY)
                return EROFS;
-       va.va_mask = XFS_AT_UID;
-       error = xfs_getattr(ip, &va, 0);
-       if (error)
-               return error;
-       if (va.va_uid != current->fsuid && !capable(CAP_FOWNER))
+       if (xfs_vtoi(vp)->i_d.di_uid != current->fsuid && !capable(CAP_FOWNER))
                return EPERM;
-       return error;
+       return 0;
 }
 
 /*
@@ -594,7 +580,7 @@ xfs_acl_get_attr(
        *error = xfs_attr_get(xfs_vtoi(vp),
                                        kind == _ACL_TYPE_ACCESS ?
                                        SGI_ACL_FILE : SGI_ACL_DEFAULT,
-                                       (char *)aclp, &len, flags, sys_cred);
+                                       (char *)aclp, &len, flags);
        if (*error || (flags & ATTR_KERNOVAL))
                return;
        xfs_acl_get_endian(aclp);
@@ -643,7 +629,6 @@ xfs_acl_vtoacl(
        xfs_acl_t       *access_acl,
        xfs_acl_t       *default_acl)
 {
-       bhv_vattr_t     va;
        int             error = 0;
 
        if (access_acl) {
@@ -652,16 +637,10 @@ xfs_acl_vtoacl(
                 * be obtained for some reason, invalidate the access ACL.
                 */
                xfs_acl_get_attr(vp, access_acl, _ACL_TYPE_ACCESS, 0, &error);
-               if (!error) {
-                       /* Got the ACL, need the mode... */
-                       va.va_mask = XFS_AT_MODE;
-                       error = xfs_getattr(xfs_vtoi(vp), &va, 0);
-               }
-
                if (error)
                        access_acl->acl_cnt = XFS_ACL_NOT_PRESENT;
                else /* We have a good ACL and the file mode, synchronize. */
-                       xfs_acl_sync_mode(va.va_mode, access_acl);
+                       xfs_acl_sync_mode(xfs_vtoi(vp)->i_d.di_mode, access_acl);
        }
 
        if (default_acl) {
@@ -719,7 +698,7 @@ xfs_acl_inherit(
         * If the new file is a directory, its default ACL is a copy of
         * the containing directory's default ACL.
         */
-       if (VN_ISDIR(vp))
+       if (S_ISDIR(vp->i_mode))
                xfs_acl_set_attr(vp, pdaclp, _ACL_TYPE_DEFAULT, &error);
        if (!error && !basicperms)
                xfs_acl_set_attr(vp, cacl, _ACL_TYPE_ACCESS, &error);
@@ -744,7 +723,7 @@ xfs_acl_setmode(
        bhv_vattr_t     va;
        xfs_acl_entry_t *ap;
        xfs_acl_entry_t *gap = NULL;
-       int             i, error, nomask = 1;
+       int             i, nomask = 1;
 
        *basicperms = 1;
 
@@ -756,11 +735,7 @@ xfs_acl_setmode(
         * mode.  The m:: bits take precedence over the g:: bits.
         */
        va.va_mask = XFS_AT_MODE;
-       error = xfs_getattr(xfs_vtoi(vp), &va, 0);
-       if (error)
-               return error;
-
-       va.va_mask = XFS_AT_MODE;
+       va.va_mode = xfs_vtoi(vp)->i_d.di_mode;
        va.va_mode &= ~(S_IRWXU|S_IRWXG|S_IRWXO);
        ap = acl->acl_entry;
        for (i = 0; i < acl->acl_cnt; ++i) {
index 36d781ee5fcc6ab6b884bce5b419610bfa7a4843..df151a859186cdf569ae2e3436af8157e0812887 100644 (file)
@@ -101,14 +101,28 @@ STATIC int xfs_attr_rmtval_remove(xfs_da_args_t *args);
 ktrace_t *xfs_attr_trace_buf;
 #endif
 
+STATIC int
+xfs_attr_name_to_xname(
+       struct xfs_name *xname,
+       const char      *aname)
+{
+       if (!aname)
+               return EINVAL;
+       xname->name = aname;
+       xname->len = strlen(aname);
+       if (xname->len >= MAXNAMELEN)
+               return EFAULT;          /* match IRIX behaviour */
+
+       return 0;
+}
 
 /*========================================================================
  * Overall external interface routines.
  *========================================================================*/
 
 int
-xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen,
-              char *value, int *valuelenp, int flags, struct cred *cred)
+xfs_attr_fetch(xfs_inode_t *ip, struct xfs_name *name,
+               char *value, int *valuelenp, int flags)
 {
        xfs_da_args_t   args;
        int             error;
@@ -122,8 +136,8 @@ xfs_attr_fetch(xfs_inode_t *ip, const char *name, int namelen,
         * Fill in the arg structure for this request.
         */
        memset((char *)&args, 0, sizeof(args));
-       args.name = name;
-       args.namelen = namelen;
+       args.name = name->name;
+       args.namelen = name->len;
        args.value = value;
        args.valuelen = *valuelenp;
        args.flags = flags;
@@ -162,31 +176,29 @@ xfs_attr_get(
        const char      *name,
        char            *value,
        int             *valuelenp,
-       int             flags,
-       cred_t          *cred)
+       int             flags)
 {
-       int             error, namelen;
+       int             error;
+       struct xfs_name xname;
 
        XFS_STATS_INC(xs_attr_get);
 
-       if (!name)
-               return(EINVAL);
-       namelen = strlen(name);
-       if (namelen >= MAXNAMELEN)
-               return(EFAULT);         /* match IRIX behaviour */
-
        if (XFS_FORCED_SHUTDOWN(ip->i_mount))
                return(EIO);
 
+       error = xfs_attr_name_to_xname(&xname, name);
+       if (error)
+               return error;
+
        xfs_ilock(ip, XFS_ILOCK_SHARED);
-       error = xfs_attr_fetch(ip, name, namelen, value, valuelenp, flags, cred);
+       error = xfs_attr_fetch(ip, &xname, value, valuelenp, flags);
        xfs_iunlock(ip, XFS_ILOCK_SHARED);
        return(error);
 }
 
-int
-xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
-                char *value, int valuelen, int flags)
+STATIC int
+xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
+               char *value, int valuelen, int flags)
 {
        xfs_da_args_t   args;
        xfs_fsblock_t   firstblock;
@@ -209,7 +221,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
         */
        if (XFS_IFORK_Q(dp) == 0) {
                int sf_size = sizeof(xfs_attr_sf_hdr_t) +
-                             XFS_ATTR_SF_ENTSIZE_BYNAME(namelen, valuelen);
+                             XFS_ATTR_SF_ENTSIZE_BYNAME(name->len, valuelen);
 
                if ((error = xfs_bmap_add_attrfork(dp, sf_size, rsvd)))
                        return(error);
@@ -219,8 +231,8 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
         * Fill in the arg structure for this request.
         */
        memset((char *)&args, 0, sizeof(args));
-       args.name = name;
-       args.namelen = namelen;
+       args.name = name->name;
+       args.namelen = name->len;
        args.value = value;
        args.valuelen = valuelen;
        args.flags = flags;
@@ -236,7 +248,7 @@ xfs_attr_set_int(xfs_inode_t *dp, const char *name, int namelen,
         * Determine space new attribute will use, and if it would be
         * "local" or "remote" (note: local != inline).
         */
-       size = xfs_attr_leaf_newentsize(namelen, valuelen,
+       size = xfs_attr_leaf_newentsize(name->len, valuelen,
                                        mp->m_sb.sb_blocksize, &local);
 
        nblks = XFS_DAENTER_SPACE_RES(mp, XFS_ATTR_FORK);
@@ -429,26 +441,27 @@ xfs_attr_set(
        int             valuelen,
        int             flags)
 {
-       int             namelen;
-
-       namelen = strlen(name);
-       if (namelen >= MAXNAMELEN)
-               return EFAULT;          /* match IRIX behaviour */
+       int             error;
+       struct xfs_name xname;
 
        XFS_STATS_INC(xs_attr_set);
 
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return (EIO);
 
-       return xfs_attr_set_int(dp, name, namelen, value, valuelen, flags);
+       error = xfs_attr_name_to_xname(&xname, name);
+       if (error)
+               return error;
+
+       return xfs_attr_set_int(dp, &xname, value, valuelen, flags);
 }
 
 /*
  * Generic handler routine to remove a name from an attribute list.
  * Transitions attribute list from Btree to shortform as necessary.
  */
-int
-xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags)
+STATIC int
+xfs_attr_remove_int(xfs_inode_t *dp, struct xfs_name *name, int flags)
 {
        xfs_da_args_t   args;
        xfs_fsblock_t   firstblock;
@@ -460,8 +473,8 @@ xfs_attr_remove_int(xfs_inode_t *dp, const char *name, int namelen, int flags)
         * Fill in the arg structure for this request.
         */
        memset((char *)&args, 0, sizeof(args));
-       args.name = name;
-       args.namelen = namelen;
+       args.name = name->name;
+       args.namelen = name->len;
        args.flags = flags;
        args.hashval = xfs_da_hashname(args.name, args.namelen);
        args.dp = dp;
@@ -575,17 +588,18 @@ xfs_attr_remove(
        const char      *name,
        int             flags)
 {
-       int             namelen;
-
-       namelen = strlen(name);
-       if (namelen >= MAXNAMELEN)
-               return EFAULT;          /* match IRIX behaviour */
+       int             error;
+       struct xfs_name xname;
 
        XFS_STATS_INC(xs_attr_remove);
 
        if (XFS_FORCED_SHUTDOWN(dp->i_mount))
                return (EIO);
 
+       error = xfs_attr_name_to_xname(&xname, name);
+       if (error)
+               return error;
+
        xfs_ilock(dp, XFS_ILOCK_SHARED);
        if (XFS_IFORK_Q(dp) == 0 ||
                   (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
@@ -595,10 +609,10 @@ xfs_attr_remove(
        }
        xfs_iunlock(dp, XFS_ILOCK_SHARED);
 
-       return xfs_attr_remove_int(dp, name, namelen, flags);
+       return xfs_attr_remove_int(dp, &xname, flags);
 }
 
-int                                                            /* error */
+STATIC int
 xfs_attr_list_int(xfs_attr_list_context_t *context)
 {
        int error;
@@ -2522,8 +2536,7 @@ attr_generic_get(
 {
        int     error, asize = size;
 
-       error = xfs_attr_get(xfs_vtoi(vp), name, data,
-                                   &asize, xflags, NULL);
+       error = xfs_attr_get(xfs_vtoi(vp), name, data, &asize, xflags);
        if (!error)
                return asize;
        return -error;
index 786eba3121c4300a0b2a349167c7f29b2a1121e8..6cfc9384fe35c952640edc8baa04ca6d4c1d3b78 100644 (file)
@@ -158,14 +158,10 @@ struct xfs_da_args;
 /*
  * Overall external interface routines.
  */
-int xfs_attr_set_int(struct xfs_inode *, const char *, int, char *, int, int);
-int xfs_attr_remove_int(struct xfs_inode *, const char *, int, int);
-int xfs_attr_list_int(struct xfs_attr_list_context *);
 int xfs_attr_inactive(struct xfs_inode *dp);
 
 int xfs_attr_shortform_getvalue(struct xfs_da_args *);
-int xfs_attr_fetch(struct xfs_inode *, const char *, int,
-                       char *, int *, int, struct cred *);
+int xfs_attr_fetch(struct xfs_inode *, struct xfs_name *, char *, int *, int);
 int xfs_attr_rmtval_get(struct xfs_da_args *args);
 
 #endif /* __XFS_ATTR_H__ */
index eb198c01c35dd08d306c0a53da2109bedc015afa..53c259f5a5af5e7b85fde5f73ee678670e692ad9 100644 (file)
@@ -4074,7 +4074,6 @@ xfs_bmap_add_attrfork(
 error2:
        xfs_bmap_cancel(&flist);
 error1:
-       ASSERT(ismrlocked(&ip->i_lock,MR_UPDATE));
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 error0:
        xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
index 3f53fad356a30a199e11ad761d47cb6b53aa6252..5f3647cb98851aa13855ed53d26d102bdb0f08c7 100644 (file)
@@ -162,7 +162,7 @@ xfs_swap_extents(
                ips[1] = ip;
        }
 
-       xfs_lock_inodes(ips, 2, 0, lock_flags);
+       xfs_lock_inodes(ips, 2, lock_flags);
        locked = 1;
 
        /* Verify that both files have the same format */
@@ -265,7 +265,7 @@ xfs_swap_extents(
                locked = 0;
                goto error0;
        }
-       xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+       xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
 
        /*
         * Count the number of extended attribute blocks
index d3a0f538d6a67a8bdb7ebdc21f33a02f569a5a6e..381ebda4f7bc606c484d504e665769ab45a08377 100644 (file)
@@ -462,7 +462,7 @@ xfs_fs_counts(
        xfs_mount_t             *mp,
        xfs_fsop_counts_t       *cnt)
 {
-       xfs_icsb_sync_counters_flags(mp, XFS_ICSB_LAZY_COUNT);
+       xfs_icsb_sync_counters(mp, XFS_ICSB_LAZY_COUNT);
        spin_lock(&mp->m_sb_lock);
        cnt->freedata = mp->m_sb.sb_fdblocks - XFS_ALLOC_SET_ASIDE(mp);
        cnt->freertx = mp->m_sb.sb_frextents;
@@ -524,7 +524,7 @@ xfs_reserve_blocks(
         */
 retry:
        spin_lock(&mp->m_sb_lock);
-       xfs_icsb_sync_counters_flags(mp, XFS_ICSB_SB_LOCKED);
+       xfs_icsb_sync_counters_locked(mp, 0);
 
        /*
         * If our previous reservation was larger than the current value,
@@ -552,11 +552,8 @@ retry:
                        mp->m_resblks += free;
                        mp->m_resblks_avail += free;
                        fdblks_delta = -free;
-                       mp->m_sb.sb_fdblocks = XFS_ALLOC_SET_ASIDE(mp);
                } else {
                        fdblks_delta = -delta;
-                       mp->m_sb.sb_fdblocks =
-                               lcounter + XFS_ALLOC_SET_ASIDE(mp);
                        mp->m_resblks = request;
                        mp->m_resblks_avail += delta;
                }
@@ -587,7 +584,6 @@ out:
                if (error == ENOSPC)
                        goto retry;
        }
-
        return 0;
 }
 
index a64dfbd565a563b3886d934ec19d77df4f4ed526..aad8c5da38afadb158518d7ee5dc51a3309726f2 100644 (file)
@@ -147,6 +147,7 @@ xfs_ialloc_ag_alloc(
        int             version;        /* inode version number to use */
        int             isaligned = 0;  /* inode allocation at stripe unit */
                                        /* boundary */
+       unsigned int    gen;
 
        args.tp = tp;
        args.mp = tp->t_mountp;
@@ -290,6 +291,14 @@ xfs_ialloc_ag_alloc(
        else
                version = XFS_DINODE_VERSION_1;
 
+       /*
+        * Seed the new inode cluster with a random generation number. This
+        * prevents short-term reuse of generation numbers if a chunk is
+        * freed and then immediately reallocated. We use random numbers
+        * rather than a linear progression to prevent the next generation
+        * number from being easily guessable.
+        */
+       gen = random32();
        for (j = 0; j < nbufs; j++) {
                /*
                 * Get the block.
@@ -309,6 +318,7 @@ xfs_ialloc_ag_alloc(
                        free = XFS_MAKE_IPTR(args.mp, fbuf, i);
                        free->di_core.di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
                        free->di_core.di_version = version;
+                       free->di_core.di_gen = cpu_to_be32(gen);
                        free->di_next_unlinked = cpu_to_be32(NULLAGINO);
                        xfs_ialloc_log_di(tp, fbuf, i,
                                XFS_DI_CORE_BITS | XFS_DI_NEXT_UNLINKED);
index e657c51284606c8d3e98f9a18dee5a7294834be2..b07604b94d9f32c2f15e7eb08c5e83b458b3fd42 100644 (file)
@@ -593,8 +593,9 @@ xfs_iunlock_map_shared(
  *             XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
  */
 void
-xfs_ilock(xfs_inode_t  *ip,
-         uint          lock_flags)
+xfs_ilock(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
        /*
         * You can't set both SHARED and EXCL for the same lock,
@@ -607,16 +608,16 @@ xfs_ilock(xfs_inode_t     *ip,
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
        ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
 
-       if (lock_flags & XFS_IOLOCK_EXCL) {
+       if (lock_flags & XFS_IOLOCK_EXCL)
                mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-       } else if (lock_flags & XFS_IOLOCK_SHARED) {
+       else if (lock_flags & XFS_IOLOCK_SHARED)
                mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
-       }
-       if (lock_flags & XFS_ILOCK_EXCL) {
+
+       if (lock_flags & XFS_ILOCK_EXCL)
                mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
-       } else if (lock_flags & XFS_ILOCK_SHARED) {
+       else if (lock_flags & XFS_ILOCK_SHARED)
                mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
-       }
+
        xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
 }
 
@@ -631,15 +632,12 @@ xfs_ilock(xfs_inode_t     *ip,
  * lock_flags -- this parameter indicates the inode's locks to be
  *       to be locked.  See the comment for xfs_ilock() for a list
  *      of valid values.
- *
  */
 int
-xfs_ilock_nowait(xfs_inode_t   *ip,
-                uint           lock_flags)
+xfs_ilock_nowait(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
-       int     iolocked;
-       int     ilocked;
-
        /*
         * You can't set both SHARED and EXCL for the same lock,
         * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
@@ -651,37 +649,30 @@ xfs_ilock_nowait(xfs_inode_t      *ip,
               (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
        ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
 
-       iolocked = 0;
        if (lock_flags & XFS_IOLOCK_EXCL) {
-               iolocked = mrtryupdate(&ip->i_iolock);
-               if (!iolocked) {
-                       return 0;
-               }
+               if (!mrtryupdate(&ip->i_iolock))
+                       goto out;
        } else if (lock_flags & XFS_IOLOCK_SHARED) {
-               iolocked = mrtryaccess(&ip->i_iolock);
-               if (!iolocked) {
-                       return 0;
-               }
+               if (!mrtryaccess(&ip->i_iolock))
+                       goto out;
        }
        if (lock_flags & XFS_ILOCK_EXCL) {
-               ilocked = mrtryupdate(&ip->i_lock);
-               if (!ilocked) {
-                       if (iolocked) {
-                               mrunlock(&ip->i_iolock);
-                       }
-                       return 0;
-               }
+               if (!mrtryupdate(&ip->i_lock))
+                       goto out_undo_iolock;
        } else if (lock_flags & XFS_ILOCK_SHARED) {
-               ilocked = mrtryaccess(&ip->i_lock);
-               if (!ilocked) {
-                       if (iolocked) {
-                               mrunlock(&ip->i_iolock);
-                       }
-                       return 0;
-               }
+               if (!mrtryaccess(&ip->i_lock))
+                       goto out_undo_iolock;
        }
        xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
        return 1;
+
+ out_undo_iolock:
+       if (lock_flags & XFS_IOLOCK_EXCL)
+               mrunlock_excl(&ip->i_iolock);
+       else if (lock_flags & XFS_IOLOCK_SHARED)
+               mrunlock_shared(&ip->i_iolock);
+ out:
+       return 0;
 }
 
 /*
@@ -697,8 +688,9 @@ xfs_ilock_nowait(xfs_inode_t        *ip,
  *
  */
 void
-xfs_iunlock(xfs_inode_t        *ip,
-           uint        lock_flags)
+xfs_iunlock(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
        /*
         * You can't set both SHARED and EXCL for the same lock,
@@ -713,31 +705,25 @@ xfs_iunlock(xfs_inode_t   *ip,
                        XFS_LOCK_DEP_MASK)) == 0);
        ASSERT(lock_flags != 0);
 
-       if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
-               ASSERT(!(lock_flags & XFS_IOLOCK_SHARED) ||
-                      (ismrlocked(&ip->i_iolock, MR_ACCESS)));
-               ASSERT(!(lock_flags & XFS_IOLOCK_EXCL) ||
-                      (ismrlocked(&ip->i_iolock, MR_UPDATE)));
-               mrunlock(&ip->i_iolock);
-       }
+       if (lock_flags & XFS_IOLOCK_EXCL)
+               mrunlock_excl(&ip->i_iolock);
+       else if (lock_flags & XFS_IOLOCK_SHARED)
+               mrunlock_shared(&ip->i_iolock);
 
-       if (lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) {
-               ASSERT(!(lock_flags & XFS_ILOCK_SHARED) ||
-                      (ismrlocked(&ip->i_lock, MR_ACCESS)));
-               ASSERT(!(lock_flags & XFS_ILOCK_EXCL) ||
-                      (ismrlocked(&ip->i_lock, MR_UPDATE)));
-               mrunlock(&ip->i_lock);
+       if (lock_flags & XFS_ILOCK_EXCL)
+               mrunlock_excl(&ip->i_lock);
+       else if (lock_flags & XFS_ILOCK_SHARED)
+               mrunlock_shared(&ip->i_lock);
 
+       if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
+           !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
                /*
                 * Let the AIL know that this item has been unlocked in case
                 * it is in the AIL and anyone is waiting on it.  Don't do
                 * this if the caller has asked us not to.
                 */
-               if (!(lock_flags & XFS_IUNLOCK_NONOTIFY) &&
-                    ip->i_itemp != NULL) {
-                       xfs_trans_unlocked_item(ip->i_mount,
-                                               (xfs_log_item_t*)(ip->i_itemp));
-               }
+               xfs_trans_unlocked_item(ip->i_mount,
+                                       (xfs_log_item_t*)(ip->i_itemp));
        }
        xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
 }
@@ -747,21 +733,47 @@ xfs_iunlock(xfs_inode_t   *ip,
  * if it is being demoted.
  */
 void
-xfs_ilock_demote(xfs_inode_t   *ip,
-                uint           lock_flags)
+xfs_ilock_demote(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
 {
        ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
        ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
 
-       if (lock_flags & XFS_ILOCK_EXCL) {
-               ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+       if (lock_flags & XFS_ILOCK_EXCL)
                mrdemote(&ip->i_lock);
-       }
-       if (lock_flags & XFS_IOLOCK_EXCL) {
-               ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+       if (lock_flags & XFS_IOLOCK_EXCL)
                mrdemote(&ip->i_iolock);
+}
+
+#ifdef DEBUG
+/*
+ * Debug-only routine, without additional rw_semaphore APIs, we can
+ * now only answer requests regarding whether we hold the lock for write
+ * (reader state is outside our visibility, we only track writer state).
+ *
+ * Note: this means !xfs_isilocked would give false positives, so don't do that.
+ */
+int
+xfs_isilocked(
+       xfs_inode_t             *ip,
+       uint                    lock_flags)
+{
+       if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
+                       XFS_ILOCK_EXCL) {
+               if (!ip->i_lock.mr_writer)
+                       return 0;
        }
+
+       if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
+                       XFS_IOLOCK_EXCL) {
+               if (!ip->i_iolock.mr_writer)
+                       return 0;
+       }
+
+       return 1;
 }
+#endif
 
 /*
  * The following three routines simply manage the i_flock
index ca12acb90394193571f4b09f9febba47ac091ea7..cf0bb9c1d621fb702897acdfd41bed22cabf8e67 100644 (file)
@@ -1291,7 +1291,7 @@ xfs_file_last_byte(
        xfs_fileoff_t   size_last_block;
        int             error;
 
-       ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
 
        mp = ip->i_mount;
        /*
@@ -1402,7 +1402,7 @@ xfs_itruncate_start(
        bhv_vnode_t     *vp;
        int             error = 0;
 
-       ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
        ASSERT((new_size == 0) || (new_size <= ip->i_size));
        ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
               (flags == XFS_ITRUNC_MAYBE));
@@ -1528,8 +1528,7 @@ xfs_itruncate_finish(
        xfs_bmap_free_t free_list;
        int             error;
 
-       ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
        ASSERT((new_size == 0) || (new_size <= ip->i_size));
        ASSERT(*tp != NULL);
        ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
@@ -1780,8 +1779,7 @@ xfs_igrow_start(
        xfs_fsize_t     new_size,
        cred_t          *credp)
 {
-       ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
-       ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
        ASSERT(new_size > ip->i_size);
 
        /*
@@ -1809,8 +1807,7 @@ xfs_igrow_finish(
        xfs_fsize_t     new_size,
        int             change_flag)
 {
-       ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
-       ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
        ASSERT(ip->i_transp == tp);
        ASSERT(new_size > ip->i_size);
 
@@ -2287,7 +2284,7 @@ xfs_ifree(
        xfs_dinode_t            *dip;
        xfs_buf_t               *ibp;
 
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(ip->i_transp == tp);
        ASSERT(ip->i_d.di_nlink == 0);
        ASSERT(ip->i_d.di_nextents == 0);
@@ -2746,7 +2743,7 @@ void
 xfs_ipin(
        xfs_inode_t     *ip)
 {
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        atomic_inc(&ip->i_pincount);
 }
@@ -2779,7 +2776,7 @@ __xfs_iunpin_wait(
 {
        xfs_inode_log_item_t    *iip = ip->i_itemp;
 
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        if (atomic_read(&ip->i_pincount) == 0)
                return;
 
@@ -2829,7 +2826,7 @@ xfs_iextents_copy(
        xfs_fsblock_t           start_block;
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(ifp->if_bytes > 0);
 
        nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
@@ -3132,7 +3129,7 @@ xfs_iflush(
 
        XFS_STATS_INC(xs_iflush_count);
 
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(issemalocked(&(ip->i_flock)));
        ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
               ip->i_d.di_nextents > ip->i_df.if_ext_max);
@@ -3297,7 +3294,7 @@ xfs_iflush_int(
        int                     first;
 #endif
 
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(issemalocked(&(ip->i_flock)));
        ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
               ip->i_d.di_nextents > ip->i_df.if_ext_max);
index 93c37697a72c6b0cf7f83501c26633e1afd0969c..0a999fee4f03e343e4038e9e818464e3bf06b83d 100644 (file)
@@ -386,20 +386,9 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
 #define        XFS_ILOCK_EXCL          (1<<2)
 #define        XFS_ILOCK_SHARED        (1<<3)
 #define        XFS_IUNLOCK_NONOTIFY    (1<<4)
-/*     #define XFS_IOLOCK_NESTED       (1<<5)  */
-#define XFS_EXTENT_TOKEN_RD    (1<<6)
-#define XFS_SIZE_TOKEN_RD      (1<<7)
-#define XFS_EXTSIZE_RD         (XFS_EXTENT_TOKEN_RD|XFS_SIZE_TOKEN_RD)
-#define XFS_WILLLEND           (1<<8)  /* Always acquire tokens for lending */
-#define XFS_EXTENT_TOKEN_WR    (XFS_EXTENT_TOKEN_RD | XFS_WILLLEND)
-#define XFS_SIZE_TOKEN_WR       (XFS_SIZE_TOKEN_RD | XFS_WILLLEND)
-#define XFS_EXTSIZE_WR         (XFS_EXTSIZE_RD | XFS_WILLLEND)
-/* TODO:XFS_SIZE_TOKEN_WANT    (1<<9) */
 
 #define XFS_LOCK_MASK          (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
-                               | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \
-                               | XFS_EXTENT_TOKEN_RD | XFS_SIZE_TOKEN_RD \
-                               | XFS_WILLLEND)
+                               | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED)
 
 /*
  * Flags for lockdep annotations.
@@ -483,6 +472,7 @@ void                xfs_ilock(xfs_inode_t *, uint);
 int            xfs_ilock_nowait(xfs_inode_t *, uint);
 void           xfs_iunlock(xfs_inode_t *, uint);
 void           xfs_ilock_demote(xfs_inode_t *, uint);
+int            xfs_isilocked(xfs_inode_t *, uint);
 void           xfs_iflock(xfs_inode_t *);
 int            xfs_iflock_nowait(xfs_inode_t *);
 uint           xfs_ilock_map_shared(xfs_inode_t *);
@@ -534,7 +524,7 @@ int         xfs_iflush(xfs_inode_t *, uint);
 void           xfs_iflush_all(struct xfs_mount *);
 void           xfs_ichgtime(xfs_inode_t *, int);
 xfs_fsize_t    xfs_file_last_byte(xfs_inode_t *);
-void           xfs_lock_inodes(xfs_inode_t **, int, int, uint);
+void           xfs_lock_inodes(xfs_inode_t **, int, uint);
 
 void           xfs_synchronize_atime(xfs_inode_t *);
 void           xfs_mark_inode_dirty_sync(xfs_inode_t *);
index 93b5db453ea296da3cb64ac7e9b49a63a77fc1b0..167b33f15772c395c1f5d3260bf1e2d36d666c39 100644 (file)
@@ -547,7 +547,7 @@ STATIC void
 xfs_inode_item_pin(
        xfs_inode_log_item_t    *iip)
 {
-       ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE));
+       ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
        xfs_ipin(iip->ili_inode);
 }
 
@@ -664,13 +664,13 @@ xfs_inode_item_unlock(
 
        ASSERT(iip != NULL);
        ASSERT(iip->ili_inode->i_itemp != NULL);
-       ASSERT(ismrlocked(&(iip->ili_inode->i_lock), MR_UPDATE));
+       ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
        ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
                  XFS_ILI_IOLOCKED_EXCL)) ||
-              ismrlocked(&(iip->ili_inode->i_iolock), MR_UPDATE));
+              xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL));
        ASSERT((!(iip->ili_inode->i_itemp->ili_flags &
                  XFS_ILI_IOLOCKED_SHARED)) ||
-              ismrlocked(&(iip->ili_inode->i_iolock), MR_ACCESS));
+              xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED));
        /*
         * Clear the transaction pointer in the inode.
         */
@@ -769,7 +769,7 @@ xfs_inode_item_pushbuf(
 
        ip = iip->ili_inode;
 
-       ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
 
        /*
         * The ili_pushbuf_flag keeps others from
@@ -857,7 +857,7 @@ xfs_inode_item_push(
 
        ip = iip->ili_inode;
 
-       ASSERT(ismrlocked(&(ip->i_lock), MR_ACCESS));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
        ASSERT(issemalocked(&(ip->i_flock)));
        /*
         * Since we were able to lock the inode's flush lock and
index fb3cf11914195ce85dbea2f5bf90ff3dde668bf4..7edcde691d1a2539e340db2e83769ad12188e6e6 100644 (file)
@@ -196,14 +196,14 @@ xfs_iomap(
                break;
        case BMAPI_WRITE:
                xfs_iomap_enter_trace(XFS_IOMAP_WRITE_ENTER, ip, offset, count);
-               lockmode = XFS_ILOCK_EXCL|XFS_EXTSIZE_WR;
+               lockmode = XFS_ILOCK_EXCL;
                if (flags & BMAPI_IGNSTATE)
                        bmapi_flags |= XFS_BMAPI_IGSTATE|XFS_BMAPI_ENTIRE;
                xfs_ilock(ip, lockmode);
                break;
        case BMAPI_ALLOCATE:
                xfs_iomap_enter_trace(XFS_IOMAP_ALLOC_ENTER, ip, offset, count);
-               lockmode = XFS_ILOCK_SHARED|XFS_EXTSIZE_RD;
+               lockmode = XFS_ILOCK_SHARED;
                bmapi_flags = XFS_BMAPI_ENTIRE;
 
                /* Attempt non-blocking lock */
@@ -523,8 +523,7 @@ xfs_iomap_write_direct(
                goto error_out;
        }
 
-       if (unlikely(!imap.br_startblock &&
-                    !(XFS_IS_REALTIME_INODE(ip)))) {
+       if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip))) {
                error = xfs_cmn_err_fsblock_zero(ip, &imap);
                goto error_out;
        }
@@ -624,7 +623,7 @@ xfs_iomap_write_delay(
        int             prealloc, fsynced = 0;
        int             error;
 
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        /*
         * Make sure that the dquots are there. This doesn't hold
@@ -686,8 +685,7 @@ retry:
                goto retry;
        }
 
-       if (unlikely(!imap[0].br_startblock &&
-                    !(XFS_IS_REALTIME_INODE(ip))))
+       if (!(imap[0].br_startblock || XFS_IS_REALTIME_INODE(ip)))
                return xfs_cmn_err_fsblock_zero(ip, &imap[0]);
 
        *ret_imap = imap[0];
@@ -838,9 +836,9 @@ xfs_iomap_write_allocate(
                 * See if we were able to allocate an extent that
                 * covers at least part of the callers request
                 */
-               if (unlikely(!imap.br_startblock &&
-                            XFS_IS_REALTIME_INODE(ip)))
+               if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
                        return xfs_cmn_err_fsblock_zero(ip, &imap);
+
                if ((offset_fsb >= imap.br_startoff) &&
                    (offset_fsb < (imap.br_startoff +
                                   imap.br_blockcount))) {
@@ -934,8 +932,7 @@ xfs_iomap_write_unwritten(
                if (error)
                        return XFS_ERROR(error);
 
-               if (unlikely(!imap.br_startblock &&
-                            !(XFS_IS_REALTIME_INODE(ip))))
+               if (!(imap.br_startblock || XFS_IS_REALTIME_INODE(ip)))
                        return xfs_cmn_err_fsblock_zero(ip, &imap);
 
                if ((numblks_fsb = imap.br_blockcount) == 0) {
index eb85bdedad0c7505aa90bdeb09c97403b3979631..419de15aeb43f004f3ae1e67ef37676a3e2f8629 100644 (file)
@@ -71,11 +71,6 @@ xfs_bulkstat_one_iget(
 
        ASSERT(ip != NULL);
        ASSERT(ip->i_blkno != (xfs_daddr_t)0);
-       if (ip->i_d.di_mode == 0) {
-               *stat = BULKSTAT_RV_NOTHING;
-               error = XFS_ERROR(ENOENT);
-               goto out_iput;
-       }
 
        vp = XFS_ITOV(ip);
        dic = &ip->i_d;
@@ -124,7 +119,6 @@ xfs_bulkstat_one_iget(
                break;
        }
 
- out_iput:
        xfs_iput(ip, XFS_ILOCK_SHARED);
        return error;
 }
index 2fec452afbcc8a710ff3db930cf21f0f289d56f4..da3988453b712b0c64d3afc00f6fc159271c34cf 100644 (file)
@@ -54,8 +54,9 @@ STATIC void   xfs_unmountfs_wait(xfs_mount_t *);
 #ifdef HAVE_PERCPU_SB
 STATIC void    xfs_icsb_destroy_counters(xfs_mount_t *);
 STATIC void    xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
-                                               int, int);
-STATIC void    xfs_icsb_sync_counters(xfs_mount_t *);
+                                               int);
+STATIC void    xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
+                                               int);
 STATIC int     xfs_icsb_modify_counters(xfs_mount_t *, xfs_sb_field_t,
                                                int64_t, int);
 STATIC void    xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
@@ -63,8 +64,8 @@ STATIC void   xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
 #else
 
 #define xfs_icsb_destroy_counters(mp)                  do { } while (0)
-#define xfs_icsb_balance_counter(mp, a, b, c)          do { } while (0)
-#define xfs_icsb_sync_counters(mp)                     do { } while (0)
+#define xfs_icsb_balance_counter(mp, a, b)             do { } while (0)
+#define xfs_icsb_balance_counter_locked(mp, a, b)      do { } while (0)
 #define xfs_icsb_modify_counters(mp, a, b, c)          do { } while (0)
 
 #endif
@@ -1400,7 +1401,7 @@ xfs_log_sbcount(
        if (!xfs_fs_writable(mp))
                return 0;
 
-       xfs_icsb_sync_counters(mp);
+       xfs_icsb_sync_counters(mp, 0);
 
        /*
         * we don't need to do this if we are updating the superblock
@@ -2026,9 +2027,9 @@ xfs_icsb_cpu_notify(
        case CPU_ONLINE:
        case CPU_ONLINE_FROZEN:
                xfs_icsb_lock(mp);
-               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
                xfs_icsb_unlock(mp);
                break;
        case CPU_DEAD:
@@ -2048,12 +2049,9 @@ xfs_icsb_cpu_notify(
 
                memset(cntp, 0, sizeof(xfs_icsb_cnts_t));
 
-               xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT,
-                                        XFS_ICSB_SB_LOCKED, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_IFREE,
-                                        XFS_ICSB_SB_LOCKED, 0);
-               xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS,
-                                        XFS_ICSB_SB_LOCKED, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_ICOUNT, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_IFREE, 0);
+               xfs_icsb_balance_counter_locked(mp, XFS_SBS_FDBLOCKS, 0);
                spin_unlock(&mp->m_sb_lock);
                xfs_icsb_unlock(mp);
                break;
@@ -2105,9 +2103,9 @@ xfs_icsb_reinit_counters(
         * initial balance kicks us off correctly
         */
        mp->m_icsb_counters = -1;
-       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0, 0);
-       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_ICOUNT, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_IFREE, 0);
+       xfs_icsb_balance_counter(mp, XFS_SBS_FDBLOCKS, 0);
        xfs_icsb_unlock(mp);
 }
 
@@ -2223,7 +2221,7 @@ xfs_icsb_disable_counter(
        if (!test_and_set_bit(field, &mp->m_icsb_counters)) {
                /* drain back to superblock */
 
-               xfs_icsb_count(mp, &cnt, XFS_ICSB_SB_LOCKED|XFS_ICSB_LAZY_COUNT);
+               xfs_icsb_count(mp, &cnt, XFS_ICSB_LAZY_COUNT);
                switch(field) {
                case XFS_SBS_ICOUNT:
                        mp->m_sb.sb_icount = cnt.icsb_icount;
@@ -2278,38 +2276,33 @@ xfs_icsb_enable_counter(
 }
 
 void
-xfs_icsb_sync_counters_flags(
+xfs_icsb_sync_counters_locked(
        xfs_mount_t     *mp,
        int             flags)
 {
        xfs_icsb_cnts_t cnt;
 
-       /* Pass 1: lock all counters */
-       if ((flags & XFS_ICSB_SB_LOCKED) == 0)
-               spin_lock(&mp->m_sb_lock);
-
        xfs_icsb_count(mp, &cnt, flags);
 
-       /* Step 3: update mp->m_sb fields */
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_ICOUNT))
                mp->m_sb.sb_icount = cnt.icsb_icount;
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_IFREE))
                mp->m_sb.sb_ifree = cnt.icsb_ifree;
        if (!xfs_icsb_counter_disabled(mp, XFS_SBS_FDBLOCKS))
                mp->m_sb.sb_fdblocks = cnt.icsb_fdblocks;
-
-       if ((flags & XFS_ICSB_SB_LOCKED) == 0)
-               spin_unlock(&mp->m_sb_lock);
 }
 
 /*
  * Accurate update of per-cpu counters to incore superblock
  */
-STATIC void
+void
 xfs_icsb_sync_counters(
-       xfs_mount_t     *mp)
+       xfs_mount_t     *mp,
+       int             flags)
 {
-       xfs_icsb_sync_counters_flags(mp, 0);
+       spin_lock(&mp->m_sb_lock);
+       xfs_icsb_sync_counters_locked(mp, flags);
+       spin_unlock(&mp->m_sb_lock);
 }
 
 /*
@@ -2332,19 +2325,15 @@ xfs_icsb_sync_counters(
 #define XFS_ICSB_FDBLK_CNTR_REENABLE(mp) \
                (uint64_t)(512 + XFS_ALLOC_SET_ASIDE(mp))
 STATIC void
-xfs_icsb_balance_counter(
+xfs_icsb_balance_counter_locked(
        xfs_mount_t     *mp,
        xfs_sb_field_t  field,
-       int             flags,
        int             min_per_cpu)
 {
        uint64_t        count, resid;
        int             weight = num_online_cpus();
        uint64_t        min = (uint64_t)min_per_cpu;
 
-       if (!(flags & XFS_ICSB_SB_LOCKED))
-               spin_lock(&mp->m_sb_lock);
-
        /* disable counter and sync counter */
        xfs_icsb_disable_counter(mp, field);
 
@@ -2354,19 +2343,19 @@ xfs_icsb_balance_counter(
                count = mp->m_sb.sb_icount;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
-                       goto out;
+                       return;
                break;
        case XFS_SBS_IFREE:
                count = mp->m_sb.sb_ifree;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_INO_CNTR_REENABLE))
-                       goto out;
+                       return;
                break;
        case XFS_SBS_FDBLOCKS:
                count = mp->m_sb.sb_fdblocks;
                resid = do_div(count, weight);
                if (count < max(min, XFS_ICSB_FDBLK_CNTR_REENABLE(mp)))
-                       goto out;
+                       return;
                break;
        default:
                BUG();
@@ -2375,9 +2364,17 @@ xfs_icsb_balance_counter(
        }
 
        xfs_icsb_enable_counter(mp, field, count, resid);
-out:
-       if (!(flags & XFS_ICSB_SB_LOCKED))
-               spin_unlock(&mp->m_sb_lock);
+}
+
+STATIC void
+xfs_icsb_balance_counter(
+       xfs_mount_t     *mp,
+       xfs_sb_field_t  fields,
+       int             min_per_cpu)
+{
+       spin_lock(&mp->m_sb_lock);
+       xfs_icsb_balance_counter_locked(mp, fields, min_per_cpu);
+       spin_unlock(&mp->m_sb_lock);
 }
 
 STATIC int
@@ -2484,7 +2481,7 @@ slow_path:
         * we are done.
         */
        if (ret != ENOSPC)
-               xfs_icsb_balance_counter(mp, field, 0, 0);
+               xfs_icsb_balance_counter(mp, field, 0);
        xfs_icsb_unlock(mp);
        return ret;
 
@@ -2508,7 +2505,7 @@ balance_counter:
         * will either succeed through the fast path or slow path without
         * another balance operation being required.
         */
-       xfs_icsb_balance_counter(mp, field, 0, delta);
+       xfs_icsb_balance_counter(mp, field, delta);
        xfs_icsb_unlock(mp);
        goto again;
 }
index 1ed575110ff0149084cc46fee7014b8294817919..63e0693a358a295e4657eabde52ebc88b8de965e 100644 (file)
@@ -206,17 +206,18 @@ typedef struct xfs_icsb_cnts {
 
 #define XFS_ICSB_FLAG_LOCK     (1 << 0)        /* counter lock bit */
 
-#define XFS_ICSB_SB_LOCKED     (1 << 0)        /* sb already locked */
 #define XFS_ICSB_LAZY_COUNT    (1 << 1)        /* accuracy not needed */
 
 extern int     xfs_icsb_init_counters(struct xfs_mount *);
 extern void    xfs_icsb_reinit_counters(struct xfs_mount *);
-extern void    xfs_icsb_sync_counters_flags(struct xfs_mount *, int);
+extern void    xfs_icsb_sync_counters(struct xfs_mount *, int);
+extern void    xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
 
 #else
 #define xfs_icsb_init_counters(mp)     (0)
 #define xfs_icsb_reinit_counters(mp)   do { } while (0)
-#define xfs_icsb_sync_counters_flags(mp, flags)        do { } while (0)
+#define xfs_icsb_sync_counters(mp, flags)      do { } while (0)
+#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
 #endif
 
 typedef struct xfs_ail {
index ee371890d85db6b20b73627c510baab2563b69fd..d8063e1ad2987e94470a9ff24cff44dece1c5f0d 100644 (file)
@@ -55,85 +55,32 @@ xfs_rename_unlock4(
 
        xfs_iunlock(i_tab[0], lock_mode);
        for (i = 1; i < 4; i++) {
-               if (i_tab[i] == NULL) {
+               if (i_tab[i] == NULL)
                        break;
-               }
+
                /*
                 * Watch out for duplicate entries in the table.
                 */
-               if (i_tab[i] != i_tab[i-1]) {
+               if (i_tab[i] != i_tab[i-1])
                        xfs_iunlock(i_tab[i], lock_mode);
-               }
        }
 }
 
-#ifdef DEBUG
-int xfs_rename_skip, xfs_rename_nskip;
-#endif
-
 /*
- * The following routine will acquire the locks required for a rename
- * operation. The code understands the semantics of renames and will
- * validate that name1 exists under dp1 & that name2 may or may not
- * exist under dp2.
- *
- * We are renaming dp1/name1 to dp2/name2.
- *
- * Return ENOENT if dp1 does not exist, other lookup errors, or 0 for success.
+ * Enter all inodes for a rename transaction into a sorted array.
  */
-STATIC int
-xfs_lock_for_rename(
+STATIC void
+xfs_sort_for_rename(
        xfs_inode_t     *dp1,   /* in: old (source) directory inode */
        xfs_inode_t     *dp2,   /* in: new (target) directory inode */
        xfs_inode_t     *ip1,   /* in: inode of old entry */
-       struct xfs_name *name2, /* in: new entry name */
-       xfs_inode_t     **ipp2, /* out: inode of new entry, if it
+       xfs_inode_t     *ip2,   /* in: inode of new entry, if it
                                   already exists, NULL otherwise. */
        xfs_inode_t     **i_tab,/* out: array of inode returned, sorted */
        int             *num_inodes)  /* out: number of inodes in array */
 {
-       xfs_inode_t             *ip2 = NULL;
        xfs_inode_t             *temp;
-       xfs_ino_t               inum1, inum2;
-       int                     error;
        int                     i, j;
-       uint                    lock_mode;
-       int                     diff_dirs = (dp1 != dp2);
-
-       /*
-        * First, find out the current inums of the entries so that we
-        * can determine the initial locking order.  We'll have to
-        * sanity check stuff after all the locks have been acquired
-        * to see if we still have the right inodes, directories, etc.
-        */
-       lock_mode = xfs_ilock_map_shared(dp1);
-       IHOLD(ip1);
-       xfs_itrace_ref(ip1);
-
-       inum1 = ip1->i_ino;
-
-       /*
-        * Unlock dp1 and lock dp2 if they are different.
-        */
-       if (diff_dirs) {
-               xfs_iunlock_map_shared(dp1, lock_mode);
-               lock_mode = xfs_ilock_map_shared(dp2);
-       }
-
-       error = xfs_dir_lookup_int(dp2, lock_mode, name2, &inum2, &ip2);
-       if (error == ENOENT) {          /* target does not need to exist. */
-               inum2 = 0;
-       } else if (error) {
-               /*
-                * If dp2 and dp1 are the same, the next line unlocks dp1.
-                * Got it?
-                */
-               xfs_iunlock_map_shared(dp2, lock_mode);
-               IRELE (ip1);
-               return error;
-       } else {
-               xfs_itrace_ref(ip2);
-       }
 
        /*
         * i_tab contains a list of pointers to inodes.  We initialize
@@ -145,21 +92,20 @@ xfs_lock_for_rename(
        i_tab[0] = dp1;
        i_tab[1] = dp2;
        i_tab[2] = ip1;
-       if (inum2 == 0) {
-               *num_inodes = 3;
-               i_tab[3] = NULL;
-       } else {
+       if (ip2) {
                *num_inodes = 4;
                i_tab[3] = ip2;
+       } else {
+               *num_inodes = 3;
+               i_tab[3] = NULL;
        }
-       *ipp2 = i_tab[3];
 
        /*
         * Sort the elements via bubble sort.  (Remember, there are at
         * most 4 elements to sort, so this is adequate.)
         */
-       for (i=0; i < *num_inodes; i++) {
-               for (j=1; j < *num_inodes; j++) {
+       for (i = 0; i < *num_inodes; i++) {
+               for (j = 1; j < *num_inodes; j++) {
                        if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
                                temp = i_tab[j];
                                i_tab[j] = i_tab[j-1];
@@ -167,30 +113,6 @@ xfs_lock_for_rename(
                        }
                }
        }
-
-       /*
-        * We have dp2 locked. If it isn't first, unlock it.
-        * If it is first, tell xfs_lock_inodes so it can skip it
-        * when locking. if dp1 == dp2, xfs_lock_inodes will skip both
-        * since they are equal. xfs_lock_inodes needs all these inodes
-        * so that it can unlock and retry if there might be a dead-lock
-        * potential with the log.
-        */
-
-       if (i_tab[0] == dp2 && lock_mode == XFS_ILOCK_SHARED) {
-#ifdef DEBUG
-               xfs_rename_skip++;
-#endif
-               xfs_lock_inodes(i_tab, *num_inodes, 1, XFS_ILOCK_SHARED);
-       } else {
-#ifdef DEBUG
-               xfs_rename_nskip++;
-#endif
-               xfs_iunlock_map_shared(dp2, lock_mode);
-               xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED);
-       }
-
-       return 0;
 }
 
 /*
@@ -202,10 +124,10 @@ xfs_rename(
        struct xfs_name *src_name,
        xfs_inode_t     *src_ip,
        xfs_inode_t     *target_dp,
-       struct xfs_name *target_name)
+       struct xfs_name *target_name,
+       xfs_inode_t     *target_ip)
 {
-       xfs_trans_t     *tp;
-       xfs_inode_t     *target_ip;
+       xfs_trans_t     *tp = NULL;
        xfs_mount_t     *mp = src_dp->i_mount;
        int             new_parent;             /* moving to a new dir */
        int             src_is_directory;       /* src_name is a directory */
@@ -215,9 +137,7 @@ xfs_rename(
        int             cancel_flags;
        int             committed;
        xfs_inode_t     *inodes[4];
-       int             target_ip_dropped = 0;  /* dropped target_ip link? */
        int             spaceres;
-       int             target_link_zero = 0;
        int             num_inodes;
 
        xfs_itrace_entry(src_dp);
@@ -230,64 +150,27 @@ xfs_rename(
                                        target_dp, DM_RIGHT_NULL,
                                        src_name->name, target_name->name,
                                        0, 0, 0);
-               if (error) {
+               if (error)
                        return error;
-               }
        }
        /* Return through std_return after this point. */
 
-       /*
-        * Lock all the participating inodes. Depending upon whether
-        * the target_name exists in the target directory, and
-        * whether the target directory is the same as the source
-        * directory, we can lock from 2 to 4 inodes.
-        * xfs_lock_for_rename() will return ENOENT if src_name
-        * does not exist in the source directory.
-        */
-       tp = NULL;
-       error = xfs_lock_for_rename(src_dp, target_dp, src_ip, target_name,
-                                       &target_ip, inodes, &num_inodes);
-       if (error) {
-               /*
-                * We have nothing locked, no inode references, and
-                * no transaction, so just get out.
-                */
-               goto std_return;
-       }
-
-       ASSERT(src_ip != NULL);
+       new_parent = (src_dp != target_dp);
+       src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
 
-       if ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
+       if (src_is_directory) {
                /*
                 * Check for link count overflow on target_dp
                 */
-               if (target_ip == NULL && (src_dp != target_dp) &&
+               if (target_ip == NULL && new_parent &&
                    target_dp->i_d.di_nlink >= XFS_MAXLINK) {
                        error = XFS_ERROR(EMLINK);
-                       xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
-                       goto rele_return;
+                       goto std_return;
                }
        }
 
-       /*
-        * If we are using project inheritance, we only allow renames
-        * into our tree when the project IDs are the same; else the
-        * tree quota mechanism would be circumvented.
-        */
-       if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
-                    (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) {
-               error = XFS_ERROR(EXDEV);
-               xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
-               goto rele_return;
-       }
-
-       new_parent = (src_dp != target_dp);
-       src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);
-
-       /*
-        * Drop the locks on our inodes so that we can start the transaction.
-        */
-       xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
+       xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,
+                               inodes, &num_inodes);
 
        XFS_BMAP_INIT(&free_list, &first_block);
        tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
@@ -302,7 +185,7 @@ xfs_rename(
        }
        if (error) {
                xfs_trans_cancel(tp, 0);
-               goto rele_return;
+               goto std_return;
        }
 
        /*
@@ -310,13 +193,29 @@ xfs_rename(
         */
        if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {
                xfs_trans_cancel(tp, cancel_flags);
-               goto rele_return;
+               goto std_return;
        }
 
        /*
-        * Reacquire the inode locks we dropped above.
+        * Lock all the participating inodes. Depending upon whether
+        * the target_name exists in the target directory, and
+        * whether the target directory is the same as the source
+        * directory, we can lock from 2 to 4 inodes.
+        */
+       xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
+
+       /*
+        * If we are using project inheritance, we only allow renames
+        * into our tree when the project IDs are the same; else the
+        * tree quota mechanism would be circumvented.
         */
-       xfs_lock_inodes(inodes, num_inodes, 0, XFS_ILOCK_EXCL);
+       if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
+                    (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) {
+               error = XFS_ERROR(EXDEV);
+               xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
+               xfs_trans_cancel(tp, cancel_flags);
+               goto std_return;
+       }
 
        /*
         * Join all the inodes to the transaction. From this point on,
@@ -328,17 +227,17 @@ xfs_rename(
         */
        IHOLD(src_dp);
        xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
+
        if (new_parent) {
                IHOLD(target_dp);
                xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
        }
-       if ((src_ip != src_dp) && (src_ip != target_dp)) {
-               xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
-       }
-       if ((target_ip != NULL) &&
-           (target_ip != src_ip) &&
-           (target_ip != src_dp) &&
-           (target_ip != target_dp)) {
+
+       IHOLD(src_ip);
+       xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
+
+       if (target_ip) {
+               IHOLD(target_ip);
                xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
        }
 
@@ -412,7 +311,6 @@ xfs_rename(
                error = xfs_droplink(tp, target_ip);
                if (error)
                        goto abort_return;
-               target_ip_dropped = 1;
 
                if (src_is_directory) {
                        /*
@@ -422,10 +320,6 @@ xfs_rename(
                        if (error)
                                goto abort_return;
                }
-
-               /* Do this test while we still hold the locks */
-               target_link_zero = (target_ip)->i_d.di_nlink==0;
-
        } /* target_ip != NULL */
 
        /*
@@ -491,15 +385,6 @@ xfs_rename(
                xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
        }
 
-       /*
-        * If there was a target inode, take an extra reference on
-        * it here so that it doesn't go to xfs_inactive() from
-        * within the commit.
-        */
-       if (target_ip != NULL) {
-               IHOLD(target_ip);
-       }
-
        /*
         * If this is a synchronous mount, make sure that the
         * rename transaction goes to disk before returning to
@@ -509,30 +394,11 @@ xfs_rename(
                xfs_trans_set_sync(tp);
        }
 
-       /*
-        * Take refs. for vop_link_removed calls below.  No need to worry
-        * about directory refs. because the caller holds them.
-        *
-        * Do holds before the xfs_bmap_finish since it might rele them down
-        * to zero.
-        */
-
-       if (target_ip_dropped)
-               IHOLD(target_ip);
-       IHOLD(src_ip);
-
        error = xfs_bmap_finish(&tp, &free_list, &committed);
        if (error) {
                xfs_bmap_cancel(&free_list);
                xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
                                 XFS_TRANS_ABORT));
-               if (target_ip != NULL) {
-                       IRELE(target_ip);
-               }
-               if (target_ip_dropped) {
-                       IRELE(target_ip);
-               }
-               IRELE(src_ip);
                goto std_return;
        }
 
@@ -541,15 +407,6 @@ xfs_rename(
         * the vnode references.
         */
        error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-       if (target_ip != NULL)
-               IRELE(target_ip);
-       /*
-        * Let interposed file systems know about removed links.
-        */
-       if (target_ip_dropped)
-               IRELE(target_ip);
-
-       IRELE(src_ip);
 
        /* Fall through to std_return with error = 0 or errno from
         * xfs_trans_commit      */
@@ -571,11 +428,4 @@ std_return:
        xfs_bmap_cancel(&free_list);
        xfs_trans_cancel(tp, cancel_flags);
        goto std_return;
-
- rele_return:
-       IRELE(src_ip);
-       if (target_ip != NULL) {
-               IRELE(target_ip);
-       }
-       goto std_return;
 }
index b8db1d5cde5a1fac03837ae85f130c7091775925..4c70bf5e9985a8145a65b2a3670e41a5ca42ab8a 100644 (file)
@@ -111,13 +111,13 @@ xfs_trans_iget(
                 */
                ASSERT(ip->i_itemp != NULL);
                ASSERT(lock_flags & XFS_ILOCK_EXCL);
-               ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+               ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
                ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
-                      ismrlocked(&ip->i_iolock, MR_UPDATE));
+                      xfs_isilocked(ip, XFS_IOLOCK_EXCL));
                ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
                       (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL));
                ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
-                      ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS)));
+                      xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
                ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
                       (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY));
 
@@ -185,7 +185,7 @@ xfs_trans_ijoin(
        xfs_inode_log_item_t    *iip;
 
        ASSERT(ip->i_transp == NULL);
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(lock_flags & XFS_ILOCK_EXCL);
        if (ip->i_itemp == NULL)
                xfs_inode_item_init(ip, ip->i_mount);
@@ -232,7 +232,7 @@ xfs_trans_ihold(
 {
        ASSERT(ip->i_transp == tp);
        ASSERT(ip->i_itemp != NULL);
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
 }
@@ -257,7 +257,7 @@ xfs_trans_log_inode(
 
        ASSERT(ip->i_transp == tp);
        ASSERT(ip->i_itemp != NULL);
-       ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp));
        ASSERT(lidp != NULL);
index 2b8dc7e40772efc5829fed46870fa16b0005f675..98e5f110ba5f412be32adcc9a2be9205190da9e5 100644 (file)
 #include "xfs_utils.h"
 
 
-int
-xfs_dir_lookup_int(
-       xfs_inode_t     *dp,
-       uint            lock_mode,
-       struct xfs_name *name,
-       xfs_ino_t       *inum,
-       xfs_inode_t     **ipp)
-{
-       int             error;
-
-       xfs_itrace_entry(dp);
-
-       error = xfs_dir_lookup(NULL, dp, name, inum);
-       if (!error) {
-               /*
-                * Unlock the directory. We do this because we can't
-                * hold the directory lock while doing the vn_get()
-                * in xfs_iget().  Doing so could cause us to hold
-                * a lock while waiting for the inode to finish
-                * being inactive while it's waiting for a log
-                * reservation in the inactive routine.
-                */
-               xfs_iunlock(dp, lock_mode);
-               error = xfs_iget(dp->i_mount, NULL, *inum, 0, 0, ipp, 0);
-               xfs_ilock(dp, lock_mode);
-
-               if (error) {
-                       *ipp = NULL;
-               } else if ((*ipp)->i_d.di_mode == 0) {
-                       /*
-                        * The inode has been freed.  Something is
-                        * wrong so just get out of here.
-                        */
-                       xfs_iunlock(dp, lock_mode);
-                       xfs_iput_new(*ipp, 0);
-                       *ipp = NULL;
-                       xfs_ilock(dp, lock_mode);
-                       error = XFS_ERROR(ENOENT);
-               }
-       }
-       return error;
-}
-
 /*
  * Allocates a new inode from disk and return a pointer to the
  * incore copy. This routine will internally commit the current
@@ -310,7 +267,7 @@ xfs_bump_ino_vers2(
 {
        xfs_mount_t     *mp;
 
-       ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1);
 
        ip->i_d.di_version = XFS_DINODE_VERSION_2;
index 175b126d2cab76c5d06772bff9dd644b5738b505..f316cb85d8e234eb21b573a0a59ddfbccc21a922 100644 (file)
@@ -21,8 +21,6 @@
 #define IRELE(ip)      VN_RELE(XFS_ITOV(ip))
 #define IHOLD(ip)      VN_HOLD(XFS_ITOV(ip))
 
-extern int xfs_dir_lookup_int(xfs_inode_t *, uint, struct xfs_name *,
-                               xfs_ino_t *, xfs_inode_t **);
 extern int xfs_truncate_file(xfs_mount_t *, xfs_inode_t *);
 extern int xfs_dir_ialloc(xfs_trans_t **, xfs_inode_t *, mode_t, xfs_nlink_t,
                                xfs_dev_t, cred_t *, prid_t, int,
index fc48158fe4797acae6de9c0c9152ce506c2f154e..30bacd8bb0e5b8a86251e3dbe03f9fe2c6f08ace 100644 (file)
@@ -186,6 +186,7 @@ xfs_cleanup(void)
        kmem_zone_destroy(xfs_efi_zone);
        kmem_zone_destroy(xfs_ifork_zone);
        kmem_zone_destroy(xfs_ili_zone);
+       kmem_zone_destroy(xfs_log_ticket_zone);
 }
 
 /*
index 6650601c64f7644f612fa2488bdfd78a0a79720d..70702a60b4bbd908669a6d6e46dfb864db1e8241 100644 (file)
@@ -75,132 +75,6 @@ xfs_open(
        return 0;
 }
 
-/*
- * xfs_getattr
- */
-int
-xfs_getattr(
-       xfs_inode_t     *ip,
-       bhv_vattr_t     *vap,
-       int             flags)
-{
-       bhv_vnode_t     *vp = XFS_ITOV(ip);
-       xfs_mount_t     *mp = ip->i_mount;
-
-       xfs_itrace_entry(ip);
-
-       if (XFS_FORCED_SHUTDOWN(mp))
-               return XFS_ERROR(EIO);
-
-       if (!(flags & ATTR_LAZY))
-               xfs_ilock(ip, XFS_ILOCK_SHARED);
-
-       vap->va_size = XFS_ISIZE(ip);
-       if (vap->va_mask == XFS_AT_SIZE)
-               goto all_done;
-
-       vap->va_nblocks =
-               XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
-       vap->va_nodeid = ip->i_ino;
-#if XFS_BIG_INUMS
-       vap->va_nodeid += mp->m_inoadd;
-#endif
-       vap->va_nlink = ip->i_d.di_nlink;
-
-       /*
-        * Quick exit for non-stat callers
-        */
-       if ((vap->va_mask &
-           ~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID|
-             XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0)
-               goto all_done;
-
-       /*
-        * Copy from in-core inode.
-        */
-       vap->va_mode = ip->i_d.di_mode;
-       vap->va_uid = ip->i_d.di_uid;
-       vap->va_gid = ip->i_d.di_gid;
-       vap->va_projid = ip->i_d.di_projid;
-
-       /*
-        * Check vnode type block/char vs. everything else.
-        */
-       switch (ip->i_d.di_mode & S_IFMT) {
-       case S_IFBLK:
-       case S_IFCHR:
-               vap->va_rdev = ip->i_df.if_u2.if_rdev;
-               vap->va_blocksize = BLKDEV_IOSIZE;
-               break;
-       default:
-               vap->va_rdev = 0;
-
-               if (!(XFS_IS_REALTIME_INODE(ip))) {
-                       vap->va_blocksize = xfs_preferred_iosize(mp);
-               } else {
-
-                       /*
-                        * If the file blocks are being allocated from a
-                        * realtime partition, then return the inode's
-                        * realtime extent size or the realtime volume's
-                        * extent size.
-                        */
-                       vap->va_blocksize =
-                               xfs_get_extsz_hint(ip) << mp->m_sb.sb_blocklog;
-               }
-               break;
-       }
-
-       vn_atime_to_timespec(vp, &vap->va_atime);
-       vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
-       vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
-       vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
-       vap->va_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
-
-       /*
-        * Exit for stat callers.  See if any of the rest of the fields
-        * to be filled in are needed.
-        */
-       if ((vap->va_mask &
-            (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
-             XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
-               goto all_done;
-
-       /*
-        * Convert di_flags to xflags.
-        */
-       vap->va_xflags = xfs_ip2xflags(ip);
-
-       /*
-        * Exit for inode revalidate.  See if any of the rest of
-        * the fields to be filled in are needed.
-        */
-       if ((vap->va_mask &
-            (XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
-             XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
-               goto all_done;
-
-       vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog;
-       vap->va_nextents =
-               (ip->i_df.if_flags & XFS_IFEXTENTS) ?
-                       ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) :
-                       ip->i_d.di_nextents;
-       if (ip->i_afp)
-               vap->va_anextents =
-                       (ip->i_afp->if_flags & XFS_IFEXTENTS) ?
-                               ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) :
-                                ip->i_d.di_anextents;
-       else
-               vap->va_anextents = 0;
-       vap->va_gen = ip->i_d.di_gen;
-
- all_done:
-       if (!(flags & ATTR_LAZY))
-               xfs_iunlock(ip, XFS_ILOCK_SHARED);
-       return 0;
-}
-
-
 /*
  * xfs_setattr
  */
@@ -211,7 +85,6 @@ xfs_setattr(
        int                     flags,
        cred_t                  *credp)
 {
-       bhv_vnode_t             *vp = XFS_ITOV(ip);
        xfs_mount_t             *mp = ip->i_mount;
        xfs_trans_t             *tp;
        int                     mask;
@@ -222,7 +95,6 @@ xfs_setattr(
        gid_t                   gid=0, igid=0;
        int                     timeflags = 0;
        xfs_prid_t              projid=0, iprojid=0;
-       int                     mandlock_before, mandlock_after;
        struct xfs_dquot        *udqp, *gdqp, *olddquot1, *olddquot2;
        int                     file_owner;
        int                     need_iolock = 1;
@@ -383,7 +255,7 @@ xfs_setattr(
                                m |= S_ISGID;
 #if 0
                        /* Linux allows this, Irix doesn't. */
-                       if ((vap->va_mode & S_ISVTX) && !VN_ISDIR(vp))
+                       if ((vap->va_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode))
                                m |= S_ISVTX;
 #endif
                        if (m && !capable(CAP_FSETID))
@@ -461,10 +333,10 @@ xfs_setattr(
                        goto error_return;
                }
 
-               if (VN_ISDIR(vp)) {
+               if (S_ISDIR(ip->i_d.di_mode)) {
                        code = XFS_ERROR(EISDIR);
                        goto error_return;
-               } else if (!VN_ISREG(vp)) {
+               } else if (!S_ISREG(ip->i_d.di_mode)) {
                        code = XFS_ERROR(EINVAL);
                        goto error_return;
                }
@@ -626,9 +498,6 @@ xfs_setattr(
                xfs_trans_ihold(tp, ip);
        }
 
-       /* determine whether mandatory locking mode changes */
-       mandlock_before = MANDLOCK(vp, ip->i_d.di_mode);
-
        /*
         * Truncate file.  Must have write permission and not be a directory.
         */
@@ -858,13 +727,6 @@ xfs_setattr(
                code = xfs_trans_commit(tp, commit_flags);
        }
 
-       /*
-        * If the (regular) file's mandatory locking mode changed, then
-        * notify the vnode.  We do this under the inode lock to prevent
-        * racing calls to vop_vnode_change.
-        */
-       mandlock_after = MANDLOCK(vp, ip->i_d.di_mode);
-
        xfs_iunlock(ip, lock_flags);
 
        /*
@@ -1443,7 +1305,7 @@ xfs_inactive_attrs(
        int             error;
        xfs_mount_t     *mp;
 
-       ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
        tp = *tpp;
        mp = ip->i_mount;
        ASSERT(ip->i_d.di_forkoff != 0);
@@ -1491,7 +1353,7 @@ xfs_release(
        xfs_mount_t     *mp = ip->i_mount;
        int             error;
 
-       if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0))
+       if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
                return 0;
 
        /* If this is a read-only mount, don't do this (would generate I/O) */
@@ -1774,8 +1636,7 @@ xfs_lookup(
        struct xfs_name         *name,
        xfs_inode_t             **ipp)
 {
-       xfs_inode_t             *ip;
-       xfs_ino_t               e_inum;
+       xfs_ino_t               inum;
        int                     error;
        uint                    lock_mode;
 
@@ -1785,12 +1646,21 @@ xfs_lookup(
                return XFS_ERROR(EIO);
 
        lock_mode = xfs_ilock_map_shared(dp);
-       error = xfs_dir_lookup_int(dp, lock_mode, name, &e_inum, &ip);
-       if (!error) {
-               *ipp = ip;
-               xfs_itrace_ref(ip);
-       }
+       error = xfs_dir_lookup(NULL, dp, name, &inum);
        xfs_iunlock_map_shared(dp, lock_mode);
+
+       if (error)
+               goto out;
+
+       error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0);
+       if (error)
+               goto out;
+
+       xfs_itrace_ref(*ipp);
+       return 0;
+
+ out:
+       *ipp = NULL;
        return error;
 }
 
@@ -1906,7 +1776,7 @@ xfs_create(
         * It is locked (and joined to the transaction).
         */
 
-       ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        /*
         * Now we join the directory inode to the transaction.  We do not do it
@@ -2112,7 +1982,7 @@ again:
 
                ips[0] = ip;
                ips[1] = dp;
-               xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+               xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
        }
        /* else  e_inum == dp->i_ino */
        /*     This can happen if we're asked to lock /x/..
@@ -2160,7 +2030,6 @@ void
 xfs_lock_inodes(
        xfs_inode_t     **ips,
        int             inodes,
-       int             first_locked,
        uint            lock_mode)
 {
        int             attempts = 0, i, j, try_lock;
@@ -2168,13 +2037,8 @@ xfs_lock_inodes(
 
        ASSERT(ips && (inodes >= 2)); /* we need at least two */
 
-       if (first_locked) {
-               try_lock = 1;
-               i = 1;
-       } else {
-               try_lock = 0;
-               i = 0;
-       }
+       try_lock = 0;
+       i = 0;
 
 again:
        for (; i < inodes; i++) {
@@ -2298,29 +2162,14 @@ xfs_remove(
                        return error;
        }
 
-       /*
-        * We need to get a reference to ip before we get our log
-        * reservation. The reason for this is that we cannot call
-        * xfs_iget for an inode for which we do not have a reference
-        * once we've acquired a log reservation. This is because the
-        * inode we are trying to get might be in xfs_inactive going
-        * for a log reservation. Since we'll have to wait for the
-        * inactive code to complete before returning from xfs_iget,
-        * we need to make sure that we don't have log space reserved
-        * when we call xfs_iget.  Instead we get an unlocked reference
-        * to the inode before getting our log reservation.
-        */
-       IHOLD(ip);
-
        xfs_itrace_entry(ip);
        xfs_itrace_ref(ip);
 
        error = XFS_QM_DQATTACH(mp, dp, 0);
-       if (!error && dp != ip)
+       if (!error)
                error = XFS_QM_DQATTACH(mp, ip, 0);
        if (error) {
                REMOVE_DEBUG_TRACE(__LINE__);
-               IRELE(ip);
                goto std_return;
        }
 
@@ -2347,7 +2196,6 @@ xfs_remove(
                ASSERT(error != ENOSPC);
                REMOVE_DEBUG_TRACE(__LINE__);
                xfs_trans_cancel(tp, 0);
-               IRELE(ip);
                return error;
        }
 
@@ -2355,7 +2203,6 @@ xfs_remove(
        if (error) {
                REMOVE_DEBUG_TRACE(__LINE__);
                xfs_trans_cancel(tp, cancel_flags);
-               IRELE(ip);
                goto std_return;
        }
 
@@ -2363,23 +2210,18 @@ xfs_remove(
         * At this point, we've gotten both the directory and the entry
         * inodes locked.
         */
+       IHOLD(ip);
        xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
-       if (dp != ip) {
-               /*
-                * Increment vnode ref count only in this case since
-                * there's an extra vnode reference in the case where
-                * dp == ip.
-                */
-               IHOLD(dp);
-               xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
-       }
+
+       IHOLD(dp);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
 
        /*
         * Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
         */
        XFS_BMAP_INIT(&free_list, &first_block);
        error = xfs_dir_removename(tp, dp, name, ip->i_ino,
-                                       &first_block, &free_list, 0);
+                                       &first_block, &free_list, resblks);
        if (error) {
                ASSERT(error != ENOENT);
                REMOVE_DEBUG_TRACE(__LINE__);
@@ -2401,12 +2243,6 @@ xfs_remove(
         */
        link_zero = (ip)->i_d.di_nlink==0;
 
-       /*
-        * Take an extra ref on the inode so that it doesn't
-        * go to xfs_inactive() from within the commit.
-        */
-       IHOLD(ip);
-
        /*
         * If this is a synchronous mount, make sure that the
         * remove transaction goes to disk before returning to
@@ -2423,10 +2259,8 @@ xfs_remove(
        }
 
        error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-       if (error) {
-               IRELE(ip);
+       if (error)
                goto std_return;
-       }
 
        /*
         * If we are using filestreams, kill the stream association.
@@ -2438,7 +2272,6 @@ xfs_remove(
                xfs_filestream_deassociate(ip);
 
        xfs_itrace_exit(ip);
-       IRELE(ip);
 
 /*     Fall through to std_return with error = 0 */
  std_return:
@@ -2467,8 +2300,6 @@ xfs_remove(
        cancel_flags |= XFS_TRANS_ABORT;
        xfs_trans_cancel(tp, cancel_flags);
 
-       IRELE(ip);
-
        goto std_return;
 }
 
@@ -2536,7 +2367,7 @@ xfs_link(
                ips[1] = sip;
        }
 
-       xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+       xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
 
        /*
         * Increment vnode ref counts since xfs_trans_commit &
@@ -2840,7 +2671,6 @@ xfs_rmdir(
        struct xfs_name         *name,
        xfs_inode_t             *cdp)
 {
-       bhv_vnode_t             *dir_vp = XFS_ITOV(dp);
        xfs_mount_t             *mp = dp->i_mount;
        xfs_trans_t             *tp;
        int                     error;
@@ -2865,28 +2695,13 @@ xfs_rmdir(
                        return XFS_ERROR(error);
        }
 
-       /*
-        * We need to get a reference to cdp before we get our log
-        * reservation.  The reason for this is that we cannot call
-        * xfs_iget for an inode for which we do not have a reference
-        * once we've acquired a log reservation.  This is because the
-        * inode we are trying to get might be in xfs_inactive going
-        * for a log reservation.  Since we'll have to wait for the
-        * inactive code to complete before returning from xfs_iget,
-        * we need to make sure that we don't have log space reserved
-        * when we call xfs_iget.  Instead we get an unlocked reference
-        * to the inode before getting our log reservation.
-        */
-       IHOLD(cdp);
-
        /*
         * Get the dquots for the inodes.
         */
        error = XFS_QM_DQATTACH(mp, dp, 0);
-       if (!error && dp != cdp)
+       if (!error)
                error = XFS_QM_DQATTACH(mp, cdp, 0);
        if (error) {
-               IRELE(cdp);
                REMOVE_DEBUG_TRACE(__LINE__);
                goto std_return;
        }
@@ -2913,7 +2728,6 @@ xfs_rmdir(
        if (error) {
                ASSERT(error != ENOSPC);
                cancel_flags = 0;
-               IRELE(cdp);
                goto error_return;
        }
        XFS_BMAP_INIT(&free_list, &first_block);
@@ -2927,21 +2741,13 @@ xfs_rmdir(
        error = xfs_lock_dir_and_entry(dp, cdp);
        if (error) {
                xfs_trans_cancel(tp, cancel_flags);
-               IRELE(cdp);
                goto std_return;
        }
 
+       IHOLD(dp);
        xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
-       if (dp != cdp) {
-               /*
-                * Only increment the parent directory vnode count if
-                * we didn't bump it in looking up cdp.  The only time
-                * we don't bump it is when we're looking up ".".
-                */
-               VN_HOLD(dir_vp);
-       }
 
-       xfs_itrace_ref(cdp);
+       IHOLD(cdp);
        xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL);
 
        ASSERT(cdp->i_d.di_nlink >= 2);
@@ -2994,12 +2800,6 @@ xfs_rmdir(
        /* Determine these before committing transaction */
        last_cdp_link = (cdp)->i_d.di_nlink==0;
 
-       /*
-        * Take an extra ref on the child vnode so that it
-        * does not go to xfs_inactive() from within the commit.
-        */
-       IHOLD(cdp);
-
        /*
         * If this is a synchronous mount, make sure that the
         * rmdir transaction goes to disk before returning to
@@ -3014,19 +2814,15 @@ xfs_rmdir(
                xfs_bmap_cancel(&free_list);
                xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
                                 XFS_TRANS_ABORT));
-               IRELE(cdp);
                goto std_return;
        }
 
        error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
        if (error) {
-               IRELE(cdp);
                goto std_return;
        }
 
 
-       IRELE(cdp);
-
        /* Fall through to std_return with error = 0 or the errno
         * from xfs_trans_commit. */
  std_return:
index 24c53923dc2c25b17f326bc03f1aae5f6ff2ed3a..8abe8f186e2019fbebcb5debcb26940ebe0ee077 100644 (file)
@@ -15,7 +15,6 @@ struct xfs_iomap;
 
 
 int xfs_open(struct xfs_inode *ip);
-int xfs_getattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags);
 int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags,
                struct cred *credp);
 int xfs_readlink(struct xfs_inode *ip, char *link);
@@ -48,9 +47,9 @@ int xfs_change_file_space(struct xfs_inode *ip, int cmd,
                struct cred *credp, int attr_flags);
 int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name,
                struct xfs_inode *src_ip, struct xfs_inode *target_dp,
-               struct xfs_name *target_name);
+               struct xfs_name *target_name, struct xfs_inode *target_ip);
 int xfs_attr_get(struct xfs_inode *ip, const char *name, char *value,
-               int *valuelenp, int flags, cred_t *cred);
+               int *valuelenp, int flags);
 int xfs_attr_set(struct xfs_inode *dp, const char *name, char *value,
                int valuelen, int flags);
 int xfs_attr_remove(struct xfs_inode *dp, const char *name, int flags);
@@ -61,9 +60,6 @@ int xfs_ioctl(struct xfs_inode *ip, struct file *filp,
 ssize_t xfs_read(struct xfs_inode *ip, struct kiocb *iocb,
                const struct iovec *iovp, unsigned int segs,
                loff_t *offset, int ioflags);
-ssize_t xfs_sendfile(struct xfs_inode *ip, struct file *filp,
-               loff_t *offset, int ioflags, size_t count,
-               read_actor_t actor, void *target);
 ssize_t xfs_splice_read(struct xfs_inode *ip, struct file *infilp,
                loff_t *ppos, struct pipe_inode_info *pipe, size_t count,
                int flags, int ioflags);
index 422f29c06c77d10e17a10c434300fda383502768..28fe8bae103703405c08029ae1dafa40ff586613 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -63,7 +63,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20070126
+#define ACPI_CA_VERSION                 0x20080321
 
 /*
  * OS name, used for the _OS object.  The _OS object is essentially obsolete,
 #define ACPI_OBJ_NUM_OPERANDS           8
 #define ACPI_OBJ_MAX_OPERAND            7
 
+/* Number of elements in the Result Stack frame, can be an arbitrary value */
+
+#define ACPI_RESULTS_FRAME_OBJ_NUM      8
+
+/*
+ * Maximal number of elements the Result Stack can contain,
+ * it may be an arbitray value not exceeding the types of
+ * result_size and result_count (now u8).
+ */
+#define ACPI_RESULTS_OBJ_NUM_MAX        255
+
 /* Names within the namespace are 4 bytes long */
 
 #define ACPI_NAME_SIZE                  4
index d626bb1d297318065cebf546dc184fe1152e1a52..c5a1b50d8d94c27ccb9f6d6dff2682ba3c30d115 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 389d772c7d5b5ee2bdc996d3d0acae60f17c2801..788f887820126ede28352c5824957f4964a7bcc8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -97,19 +97,23 @@ typedef const struct acpi_dmtable_info {
 #define ACPI_DMT_CHKSUM                 20
 #define ACPI_DMT_SPACEID                21
 #define ACPI_DMT_GAS                    22
-#define ACPI_DMT_DMAR                   23
-#define ACPI_DMT_MADT                   24
-#define ACPI_DMT_SRAT                   25
-#define ACPI_DMT_EXIT                   26
-#define ACPI_DMT_SIG                    27
+#define ACPI_DMT_ASF                    23
+#define ACPI_DMT_DMAR                   24
+#define ACPI_DMT_HEST                   25
+#define ACPI_DMT_HESTNTFY               26
+#define ACPI_DMT_HESTNTYP               27
+#define ACPI_DMT_MADT                   28
+#define ACPI_DMT_SRAT                   29
+#define ACPI_DMT_EXIT                   30
+#define ACPI_DMT_SIG                    31
 
 typedef
-void (*ACPI_TABLE_HANDLER) (struct acpi_table_header * table);
+void (*acpi_dmtable_handler) (struct acpi_table_header * table);
 
 struct acpi_dmtable_data {
        char *signature;
        struct acpi_dmtable_info *table_info;
-       ACPI_TABLE_HANDLER table_handler;
+       acpi_dmtable_handler table_handler;
        char *name;
 };
 
@@ -149,6 +153,7 @@ extern struct acpi_dmtable_info acpi_dm_table_info_asf3[];
 extern struct acpi_dmtable_info acpi_dm_table_info_asf4[];
 extern struct acpi_dmtable_info acpi_dm_table_info_asf_hdr[];
 extern struct acpi_dmtable_info acpi_dm_table_info_boot[];
+extern struct acpi_dmtable_info acpi_dm_table_info_bert[];
 extern struct acpi_dmtable_info acpi_dm_table_info_cpep[];
 extern struct acpi_dmtable_info acpi_dm_table_info_cpep0[];
 extern struct acpi_dmtable_info acpi_dm_table_info_dbgp[];
@@ -158,11 +163,17 @@ extern struct acpi_dmtable_info acpi_dm_table_info_dmar_scope[];
 extern struct acpi_dmtable_info acpi_dm_table_info_dmar0[];
 extern struct acpi_dmtable_info acpi_dm_table_info_dmar1[];
 extern struct acpi_dmtable_info acpi_dm_table_info_ecdt[];
+extern struct acpi_dmtable_info acpi_dm_table_info_einj[];
+extern struct acpi_dmtable_info acpi_dm_table_info_einj0[];
+extern struct acpi_dmtable_info acpi_dm_table_info_erst[];
 extern struct acpi_dmtable_info acpi_dm_table_info_facs[];
 extern struct acpi_dmtable_info acpi_dm_table_info_fadt1[];
 extern struct acpi_dmtable_info acpi_dm_table_info_fadt2[];
 extern struct acpi_dmtable_info acpi_dm_table_info_gas[];
 extern struct acpi_dmtable_info acpi_dm_table_info_header[];
+extern struct acpi_dmtable_info acpi_dm_table_info_hest[];
+extern struct acpi_dmtable_info acpi_dm_table_info_hest9[];
+extern struct acpi_dmtable_info acpi_dm_table_info_hest_notify[];
 extern struct acpi_dmtable_info acpi_dm_table_info_hpet[];
 extern struct acpi_dmtable_info acpi_dm_table_info_madt[];
 extern struct acpi_dmtable_info acpi_dm_table_info_madt0[];
@@ -180,6 +191,7 @@ extern struct acpi_dmtable_info acpi_dm_table_info_mcfg0[];
 extern struct acpi_dmtable_info acpi_dm_table_info_rsdp1[];
 extern struct acpi_dmtable_info acpi_dm_table_info_rsdp2[];
 extern struct acpi_dmtable_info acpi_dm_table_info_sbst[];
+extern struct acpi_dmtable_info acpi_dm_table_info_slic[];
 extern struct acpi_dmtable_info acpi_dm_table_info_slit[];
 extern struct acpi_dmtable_info acpi_dm_table_info_spcr[];
 extern struct acpi_dmtable_info acpi_dm_table_info_spmi[];
@@ -194,7 +206,7 @@ extern struct acpi_dmtable_info acpi_dm_table_info_wdrt[];
  */
 void acpi_dm_dump_data_table(struct acpi_table_header *table);
 
-void
+acpi_status
 acpi_dm_dump_table(u32 table_length,
                   u32 table_offset,
                   void *table,
@@ -213,9 +225,13 @@ void acpi_dm_dump_cpep(struct acpi_table_header *table);
 
 void acpi_dm_dump_dmar(struct acpi_table_header *table);
 
+void acpi_dm_dump_einj(struct acpi_table_header *table);
+
+void acpi_dm_dump_erst(struct acpi_table_header *table);
+
 void acpi_dm_dump_fadt(struct acpi_table_header *table);
 
-void acpi_dm_dump_srat(struct acpi_table_header *table);
+void acpi_dm_dump_hest(struct acpi_table_header *table);
 
 void acpi_dm_dump_mcfg(struct acpi_table_header *table);
 
@@ -227,6 +243,8 @@ void acpi_dm_dump_rsdt(struct acpi_table_header *table);
 
 void acpi_dm_dump_slit(struct acpi_table_header *table);
 
+void acpi_dm_dump_srat(struct acpi_table_header *table);
+
 void acpi_dm_dump_xsdt(struct acpi_table_header *table);
 
 /*
index 7f690bb0f02f1c8e8d2356c8ca2daa26b2e9e2af..910f018d92c74333a8a340e0f27f5a98951f4175 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -53,6 +53,9 @@
 acpi_status
 acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc);
 
+acpi_status
+acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc);
+
 acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *rgn_desc);
 
 acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc);
@@ -67,11 +70,19 @@ acpi_status
 acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
                             union acpi_parse_object *op);
 
+acpi_status
+acpi_ds_eval_table_region_operands(struct acpi_walk_state *walk_state,
+                                  union acpi_parse_object *op);
+
 acpi_status
 acpi_ds_eval_data_object_operands(struct acpi_walk_state *walk_state,
                                  union acpi_parse_object *op,
                                  union acpi_operand_object *obj_desc);
 
+acpi_status
+acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
+                                union acpi_parse_object *op);
+
 acpi_status acpi_ds_initialize_region(acpi_handle obj_handle);
 
 /*
@@ -269,6 +280,8 @@ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state);
 
 void acpi_ds_clear_operands(struct acpi_walk_state *walk_state);
 
+acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state);
+
 /*
  * dswscope - Scope Stack manipulation
  */
@@ -303,7 +316,7 @@ acpi_ds_init_aml_walk(struct acpi_walk_state *walk_state,
                      u32 aml_length,
                      struct acpi_evaluate_info *info, u8 pass_number);
 
-acpi_status
+void
 acpi_ds_obj_stack_pop_and_delete(u32 pop_count,
                                 struct acpi_walk_state *walk_state);
 
@@ -316,21 +329,11 @@ void
 acpi_ds_push_walk_state(struct acpi_walk_state *walk_state,
                        struct acpi_thread_state *thread);
 
-acpi_status acpi_ds_result_stack_pop(struct acpi_walk_state *walk_state);
-
-acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state);
-
 acpi_status acpi_ds_result_stack_clear(struct acpi_walk_state *walk_state);
 
 struct acpi_walk_state *acpi_ds_get_current_walk_state(struct acpi_thread_state
                                                       *thread);
 
-#ifdef ACPI_FUTURE_USAGE
-acpi_status
-acpi_ds_result_remove(union acpi_operand_object **object,
-                     u32 index, struct acpi_walk_state *walk_state);
-#endif
-
 acpi_status
 acpi_ds_result_pop(union acpi_operand_object **object,
                   struct acpi_walk_state *walk_state);
@@ -339,8 +342,4 @@ acpi_status
 acpi_ds_result_push(union acpi_operand_object *object,
                    struct acpi_walk_state *walk_state);
 
-acpi_status
-acpi_ds_result_pop_from_bottom(union acpi_operand_object **object,
-                              struct acpi_walk_state *walk_state);
-
 #endif                         /* _ACDISPAT_H_ */
index d23cdf326808f68f971e3d70e9a484f52a2bb6e5..d5d099bf349cb67c140ae1e73e536c942672af8a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b73f18a48785685d46bee2702178f4eaf2eec1ec..1f591171bf317d2e84c4baf7e6e971f37235d978 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 47a1fd8f2d8a2c61629efe6999fcda26613a94f0..74ad971241dbb6cb46a733dc02dc0b894eb59e64 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -170,10 +170,14 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width;
 ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX];
 
 /*
- * Global lock semaphore works in conjunction with the actual HW global lock
+ * Global lock mutex is an actual AML mutex object
+ * Global lock semaphore works in conjunction with the HW global lock
  */
-ACPI_EXTERN acpi_mutex acpi_gbl_global_lock_mutex;
+ACPI_EXTERN union acpi_operand_object *acpi_gbl_global_lock_mutex;
 ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore;
+ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
+ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
+ACPI_EXTERN u8 acpi_gbl_global_lock_present;
 
 /*
  * Spinlocks are used for interfaces that can be possibly called at
@@ -213,27 +217,33 @@ ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_device_notify;
 ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify;
 ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler;
 ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler;
+ACPI_EXTERN acpi_tbl_handler acpi_gbl_table_handler;
+ACPI_EXTERN void *acpi_gbl_table_handler_context;
 ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk;
 
+/* Owner ID support */
+
+ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
+ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
+ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
+
 /* Misc */
 
 ACPI_EXTERN u32 acpi_gbl_original_mode;
 ACPI_EXTERN u32 acpi_gbl_rsdp_original_location;
 ACPI_EXTERN u32 acpi_gbl_ns_lookup_count;
 ACPI_EXTERN u32 acpi_gbl_ps_find_count;
-ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
 ACPI_EXTERN u16 acpi_gbl_pm1_enable_register_save;
-ACPI_EXTERN u16 acpi_gbl_global_lock_handle;
-ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
-ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
 ACPI_EXTERN u8 acpi_gbl_debugger_configuration;
-ACPI_EXTERN u8 acpi_gbl_global_lock_acquired;
 ACPI_EXTERN u8 acpi_gbl_step_to_next_call;
 ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
-ACPI_EXTERN u8 acpi_gbl_global_lock_present;
 ACPI_EXTERN u8 acpi_gbl_events_initialized;
 ACPI_EXTERN u8 acpi_gbl_system_awake_and_running;
 
+#ifndef DEFINE_ACPI_GLOBALS
+
+/* Other miscellaneous */
+
 extern u8 acpi_gbl_shutdown;
 extern u32 acpi_gbl_startup_flags;
 extern const char *acpi_gbl_sleep_state_names[ACPI_S_STATE_COUNT];
@@ -241,6 +251,8 @@ extern const char *acpi_gbl_highest_dstate_names[4];
 extern const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES];
 extern const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS];
 
+#endif
+
 /* Exception codes */
 
 extern char const *acpi_gbl_exception_names_env[];
@@ -255,8 +267,6 @@ extern char const *acpi_gbl_exception_names_ctrl[];
  *
  ****************************************************************************/
 
-#define NUM_NS_TYPES                    ACPI_TYPE_INVALID+1
-
 #if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
 #define NUM_PREDEFINED_NAMES            10
 #else
@@ -267,7 +277,7 @@ ACPI_EXTERN struct acpi_namespace_node acpi_gbl_root_node_struct;
 ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_root_node;
 ACPI_EXTERN struct acpi_namespace_node *acpi_gbl_fadt_gpe_device;
 
-extern const u8 acpi_gbl_ns_properties[NUM_NS_TYPES];
+extern const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES];
 extern const struct acpi_predefined_names
     acpi_gbl_pre_defined_names[NUM_PREDEFINED_NAMES];
 
@@ -275,8 +285,8 @@ extern const struct acpi_predefined_names
 ACPI_EXTERN u32 acpi_gbl_current_node_count;
 ACPI_EXTERN u32 acpi_gbl_current_node_size;
 ACPI_EXTERN u32 acpi_gbl_max_concurrent_node_count;
-ACPI_EXTERN acpi_size acpi_gbl_entry_stack_pointer;
-ACPI_EXTERN acpi_size acpi_gbl_lowest_stack_pointer;
+ACPI_EXTERN acpi_size *acpi_gbl_entry_stack_pointer;
+ACPI_EXTERN acpi_size *acpi_gbl_lowest_stack_pointer;
 ACPI_EXTERN u32 acpi_gbl_deepest_nesting;
 #endif
 
index 4053df94345306968f68216bfb05744ecc1cf223..d4fb9bbc903c0a3f417ea195d8650af87757ef8b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ce7c9d653910598c29cce2baa2dcbb28b8cbb83b..e249ce5d33003be88eee37914286a9c4975346d3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -215,8 +215,6 @@ acpi_ex_create_region(u8 * aml_start,
                      u32 aml_length,
                      u8 region_space, struct acpi_walk_state *walk_state);
 
-acpi_status acpi_ex_create_table_region(struct acpi_walk_state *walk_state);
-
 acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state);
 
 acpi_status acpi_ex_create_alias(struct acpi_walk_state *walk_state);
@@ -247,10 +245,17 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
                      union acpi_operand_object *obj_desc,
                      struct acpi_walk_state *walk_state);
 
+acpi_status
+acpi_ex_acquire_mutex_object(u16 timeout,
+                            union acpi_operand_object *obj_desc,
+                            acpi_thread_id thread_id);
+
 acpi_status
 acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
                      struct acpi_walk_state *walk_state);
 
+acpi_status acpi_ex_release_mutex_object(union acpi_operand_object *obj_desc);
+
 void acpi_ex_release_all_mutexes(struct acpi_thread_state *thread);
 
 void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc);
@@ -455,9 +460,9 @@ void acpi_ex_relinquish_interpreter(void);
 
 void acpi_ex_truncate_for32bit_table(union acpi_operand_object *obj_desc);
 
-u8 acpi_ex_acquire_global_lock(u32 rule);
+void acpi_ex_acquire_global_lock(u32 rule);
 
-void acpi_ex_release_global_lock(u8 locked);
+void acpi_ex_release_global_lock(u32 rule);
 
 void acpi_ex_eisa_id_to_string(u32 numeric_id, char *out_string);
 
index 202cd4242ba19bd135ac64877a954376e16f79c7..c5cdc32ac2f8f623deb3515bd367a4fca2ee5da7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -522,9 +522,8 @@ struct acpi_thread_state {
  * AML arguments
  */
 struct acpi_result_values {
-       ACPI_STATE_COMMON u8 num_results;
-       u8 last_insert;
-       union acpi_operand_object *obj_desc[ACPI_OBJ_NUM_OPERANDS];
+       ACPI_STATE_COMMON
+           union acpi_operand_object *obj_desc[ACPI_RESULTS_FRAME_OBJ_NUM];
 };
 
 typedef
@@ -604,6 +603,7 @@ union acpi_parse_value {
        union acpi_parse_object         *next;          /* Next op */\
        struct acpi_namespace_node      *node;          /* For use by interpreter */\
        union acpi_parse_value          value;          /* Value or args associated with the opcode */\
+       u8                              arg_list_length; /* Number of elements in the arg list */\
        ACPI_DISASM_ONLY_MEMBERS (\
        u8                              disasm_flags;   /* Used during AML disassembly */\
        u8                              disasm_opcode;  /* Subtype used for disassembly */\
@@ -696,6 +696,8 @@ struct acpi_parse_state {
 #define ACPI_PARSEOP_NAMED              0x02
 #define ACPI_PARSEOP_DEFERRED           0x04
 #define ACPI_PARSEOP_BYTELIST           0x08
+#define ACPI_PARSEOP_IN_STACK           0x10
+#define ACPI_PARSEOP_TARGET             0x20
 #define ACPI_PARSEOP_IN_CACHE           0x80
 
 /* Parse object disasm_flags */
index 99d171c87c84897123d7cdde56a8c7d83300c5f5..fb41a3b802fc500e7eafb419cc84063f9d1dd748 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 #define ACPI_ARRAY_LENGTH(x)            (sizeof(x) / sizeof((x)[0]))
 
-#ifdef ACPI_NO_INTEGER64_SUPPORT
-/*
- * acpi_integer is 32-bits, no 64-bit support on this platform
- */
-#define ACPI_LODWORD(l)                 ((u32)(l))
-#define ACPI_HIDWORD(l)                 ((u32)(0))
-
-#else
-
-/*
- * Full 64-bit address/integer on both 32-bit and 64-bit platforms
- */
-#define ACPI_LODWORD(l)                 ((u32)(u64)(l))
-#define ACPI_HIDWORD(l)                 ((u32)(((*(struct uint64_struct *)(void *)(&l))).hi))
-#endif
-
-/*
- * printf() format helpers
- */
-
-/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
-
-#define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i),ACPI_LODWORD(i)
-
 /*
  * Extract data using a pointer.  Any more than a byte and we
  * get into potential aligment issues -- see the STORE macros below.
 #define ACPI_COMPARE_NAME(a,b)          (!ACPI_STRNCMP (ACPI_CAST_PTR (char,(a)), ACPI_CAST_PTR (char,(b)), ACPI_NAME_SIZE))
 #endif
 
+/*
+ * Full 64-bit integer must be available on both 32-bit and 64-bit platforms
+ */
+struct acpi_integer_overlay {
+       u32 lo_dword;
+       u32 hi_dword;
+};
+
+#define ACPI_LODWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->lo_dword)
+#define ACPI_HIDWORD(integer)           (ACPI_CAST_PTR (struct acpi_integer_overlay, &integer)->hi_dword)
+
+/*
+ * printf() format helpers
+ */
+
+/* Split 64-bit integer into two 32-bit values. Use with %8.8_x%8.8_x */
+
+#define ACPI_FORMAT_UINT64(i)           ACPI_HIDWORD(i),ACPI_LODWORD(i)
+
+#if ACPI_MACHINE_WIDTH == 64
+#define ACPI_FORMAT_NATIVE_UINT(i)      ACPI_FORMAT_UINT64(i)
+#else
+#define ACPI_FORMAT_NATIVE_UINT(i)      0, (i)
+#endif
+
 /*
  * Macros for moving data around to/from buffers that are possibly unaligned.
  * If the hardware supports the transfer of unaligned data, just do the store.
 
 /* These macros reverse the bytes during the move, converting little-endian to big endian */
 
-        /* Big Endian      <==        Little Endian */
-        /*  Hi...Lo                     Lo...Hi     */
+                         /* Big Endian      <==        Little Endian */
+                         /*  Hi...Lo                     Lo...Hi     */
 /* 16-bit source, 16/32/64 destination */
 
 #define ACPI_MOVE_16_TO_16(d,s)         {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[1];\
-                         ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
+                                          ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[0];}
 
 #define ACPI_MOVE_16_TO_32(d,s)         {(*(u32 *)(void *)(d))=0;\
-                                         ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
-                                         ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+                                                          ((u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+                                                          ((u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
 
 #define ACPI_MOVE_16_TO_64(d,s)         {(*(u64 *)(void *)(d))=0;\
-                                                          ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
-                                                          ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
+                                                                        ((u8 *)(void *)(d))[6] = ((u8 *)(void *)(s))[1];\
+                                                                        ((u8 *)(void *)(d))[7] = ((u8 *)(void *)(s))[0];}
 
 /* 32-bit source, 16/32/64 destination */
 
 #define ACPI_MOVE_32_TO_16(d,s)         ACPI_MOVE_16_TO_16(d,s)        /* Truncate to 16 */
 
 #define ACPI_MOVE_32_TO_32(d,s)         {((  u8 *)(void *)(d))[0] = ((u8 *)(void *)(s))[3];\
-                                                                         ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
-                                                                         ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
-                                                                         ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
+                                                                                ((  u8 *)(void *)(d))[1] = ((u8 *)(void *)(s))[2];\
+                                                                                ((  u8 *)(void *)(d))[2] = ((u8 *)(void *)(s))[1];\
+                                                                                ((  u8 *)(void *)(d))[3] = ((u8 *)(void *)(s))[0];}
 
 #define ACPI_MOVE_32_TO_64(d,s)         {(*(u64 *)(void *)(d))=0;\
                                                                                   ((u8 *)(void *)(d))[4] = ((u8 *)(void *)(s))[3];\
index 34bfae8a05f32458b82766356e0269a66454fae4..c1343a9265fad6291b28ba34a5c4bb897a25b56e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5ef38a6c8a6154c3c4dc0a29c347a6e10b98079e..713b30903fe54c2260e8ba07098359b6fadfddd6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -113,7 +113,8 @@ acpi_ns_parse_table(acpi_native_uint table_index,
 
 acpi_status
 acpi_ns_one_complete_parse(acpi_native_uint pass_number,
-                          acpi_native_uint table_index);
+                          acpi_native_uint table_index,
+                          struct acpi_namespace_node *start_node);
 
 /*
  * nsaccess - Top-level namespace access
index 7e1211a8b8faf5f84710586a7130d8abfb0fa673..e9657dac69b788b6ed391efc4e838c5338f67889 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -155,8 +155,9 @@ struct acpi_object_event {
 struct acpi_object_mutex {
        ACPI_OBJECT_COMMON_HEADER u8 sync_level;        /* 0-15, specified in Mutex() call */
        u16 acquisition_depth;  /* Allow multiple Acquires, same thread */
-       struct acpi_thread_state *owner_thread; /* Current owner of the mutex */
        acpi_mutex os_mutex;    /* Actual OS synchronization object */
+       acpi_thread_id thread_id;       /* Current owner of the mutex */
+       struct acpi_thread_state *owner_thread; /* Current owner of the mutex */
        union acpi_operand_object *prev;        /* Link for list of acquired mutexes */
        union acpi_operand_object *next;        /* Link for list of acquired mutexes */
        struct acpi_namespace_node *node;       /* Containing namespace node */
index e6f76a280a94d62cf2c082ec2425b74d8b1257fd..dfdf63327885c9c5e2e2fb898daf1f5a90f1dd1a 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define ARGI_CREATE_WORD_FIELD_OP       ARGI_LIST3 (ARGI_BUFFER,     ARGI_INTEGER,       ARGI_REFERENCE)
 #define ARGI_DATA_REGION_OP             ARGI_LIST3 (ARGI_STRING,     ARGI_STRING,        ARGI_STRING)
 #define ARGI_DEBUG_OP                   ARG_NONE
-#define ARGI_DECREMENT_OP               ARGI_LIST1 (ARGI_INTEGER_REF)
+#define ARGI_DECREMENT_OP               ARGI_LIST1 (ARGI_TARGETREF)
 #define ARGI_DEREF_OF_OP                ARGI_LIST1 (ARGI_REF_OR_STRING)
 #define ARGI_DEVICE_OP                  ARGI_INVALID_OPCODE
 #define ARGI_DIVIDE_OP                  ARGI_LIST4 (ARGI_INTEGER,    ARGI_INTEGER,       ARGI_TARGETREF,    ARGI_TARGETREF)
 #define ARGI_FIND_SET_RIGHT_BIT_OP      ARGI_LIST2 (ARGI_INTEGER,    ARGI_TARGETREF)
 #define ARGI_FROM_BCD_OP                ARGI_LIST2 (ARGI_INTEGER,    ARGI_FIXED_TARGET)
 #define ARGI_IF_OP                      ARGI_INVALID_OPCODE
-#define ARGI_INCREMENT_OP               ARGI_LIST1 (ARGI_INTEGER_REF)
+#define ARGI_INCREMENT_OP               ARGI_LIST1 (ARGI_TARGETREF)
 #define ARGI_INDEX_FIELD_OP             ARGI_INVALID_OPCODE
 #define ARGI_INDEX_OP                   ARGI_LIST3 (ARGI_COMPLEXOBJ, ARGI_INTEGER,       ARGI_TARGETREF)
 #define ARGI_LAND_OP                    ARGI_LIST2 (ARGI_INTEGER,    ARGI_INTEGER)
index c090a8b0bc99d68e486ab0170bef9a75dc87aeb6..e17873defcec4fbc9ccf2548b95b9631f7e2e508 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 85c358e21014fd594b4c03e6e4c0e117f09c63b0..23ee0fbf561970e92c59d6c9e2f8e8324f17d3c0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -109,6 +109,8 @@ const struct acpi_opcode_info *acpi_ps_get_opcode_info(u16 opcode);
 
 char *acpi_ps_get_opcode_name(u16 opcode);
 
+u8 acpi_ps_get_argument_count(u32 op_type);
+
 /*
  * psparse - top level parsing routines
  */
index 2e5f00d3ea0d101f5b8621cd2568dae7a03e06e4..c515ef6cc89e2c7f23d2267c3bd665b1db601180 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 4839f2af94c35be435b35f2badcb6ee754fc0437..d4a560d2deb61f05f3fe56d2b4affe515926c931 100644 (file)
@@ -8,7 +8,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d970f7f99549639183b73d8324043c2805020dd5..2c3806e6546f278656ffb0342486fccc28c7643f 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -119,6 +119,11 @@ acpi_status
 acpi_get_table_by_index(acpi_native_uint table_index,
                        struct acpi_table_header **out_table);
 
+acpi_status
+acpi_install_table_handler(acpi_tbl_handler handler, void *context);
+
+acpi_status acpi_remove_table_handler(acpi_tbl_handler handler);
+
 /*
  * Namespace and name interfaces
  */
index 9486ab266a5e24b898e427d90f289b2988595b87..eef5bd7a59faf3fd426e8602c9bc0240fd9a8a99 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -94,6 +94,7 @@ typedef const struct acpi_rsconvert_info {
 #define ACPI_RSC_BITMASK16              18
 #define ACPI_RSC_EXIT_NE                19
 #define ACPI_RSC_EXIT_LE                20
+#define ACPI_RSC_EXIT_EQ                21
 
 /* Resource Conversion sub-opcodes */
 
index 88482655407f04fce85d9a35baa3ad8e1b17b624..a907c67d651e778f535c749a3ae0b92031c9bc4f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -80,12 +80,15 @@ struct acpi_walk_state {
        u16 opcode;             /* Current AML opcode */
        u8 next_op_info;        /* Info about next_op */
        u8 num_operands;        /* Stack pointer for Operands[] array */
+       u8 operand_index;       /* Index into operand stack, to be used by acpi_ds_obj_stack_push */
        acpi_owner_id owner_id; /* Owner of objects created during the walk */
        u8 last_predicate;      /* Result of last predicate */
        u8 current_result;
        u8 return_used;
        u8 scope_depth;
        u8 pass_number;         /* Parse pass during table load */
+       u8 result_size;         /* Total elements for the result stack */
+       u8 result_count;        /* Current number of occupied elements of result stack */
        u32 aml_offset;
        u32 arg_types;
        u32 method_breakpoint;  /* For single stepping */
index 2b9f46f9da4d8f9d1d71b6eec3b4723cab4c68ce..4b36a55b0b3bceed7ba441b9a9121825abd631f3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 955adfb8d64c2358d0c7f449a51adc93ecd964bd..1ebbe883f7865576d022280a8da0f979e6a3278d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a1b1b2ee3e512d0c1d3a338c2cd1aca50dee2372..9af239bd1153fad17bdaee0b0bfbfcbc671bab80 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * it more difficult to inadvertently type in the wrong signature.
  */
 #define ACPI_SIG_ASF            "ASF!" /* Alert Standard Format table */
+#define ACPI_SIG_BERT           "BERT" /* Boot Error Record Table */
 #define ACPI_SIG_BOOT           "BOOT" /* Simple Boot Flag Table */
 #define ACPI_SIG_CPEP           "CPEP" /* Corrected Platform Error Polling table */
 #define ACPI_SIG_DBGP           "DBGP" /* Debug Port table */
 #define ACPI_SIG_DMAR           "DMAR" /* DMA Remapping table */
 #define ACPI_SIG_ECDT           "ECDT" /* Embedded Controller Boot Resources Table */
+#define ACPI_SIG_EINJ           "EINJ" /* Error Injection table */
+#define ACPI_SIG_ERST           "ERST" /* Error Record Serialization Table */
+#define ACPI_SIG_HEST           "HEST" /* Hardware Error Source Table */
 #define ACPI_SIG_HPET           "HPET" /* High Precision Event Timer table */
+#define ACPI_SIG_IBFT           "IBFT" /* i_sCSI Boot Firmware Table */
 #define ACPI_SIG_MADT           "APIC" /* Multiple APIC Description Table */
 #define ACPI_SIG_MCFG           "MCFG" /* PCI Memory Mapped Configuration table */
 #define ACPI_SIG_SBST           "SBST" /* Smart Battery Specification Table */
+#define ACPI_SIG_SLIC           "SLIC" /* Software Licensing Description Table */
 #define ACPI_SIG_SLIT           "SLIT" /* System Locality Distance Information Table */
 #define ACPI_SIG_SPCR           "SPCR" /* Serial Port Console Redirection table */
 #define ACPI_SIG_SPMI           "SPMI" /* Server Platform Management Interface table */
 #define ACPI_SIG_SRAT           "SRAT" /* System Resource Affinity Table */
 #define ACPI_SIG_TCPA           "TCPA" /* Trusted Computing Platform Alliance table */
+#define ACPI_SIG_UEFI           "UEFI" /* Uefi Boot Optimization Table */
+#define ACPI_SIG_WDAT           "WDAT" /* Watchdog Action Table */
 #define ACPI_SIG_WDRT           "WDRT" /* Watchdog Resource Table */
 
 /*
  * portable, so do not use any other bitfield types.
  */
 
-/* Common Sub-table header (used in MADT, SRAT, etc.) */
+/* Common Subtable header (used in MADT, SRAT, etc.) */
 
 struct acpi_subtable_header {
        u8 type;
        u8 length;
 };
 
+/* Common Subtable header for WHEA tables (EINJ, ERST, WDAT) */
+
+struct acpi_whea_header {
+       u8 action;
+       u8 instruction;
+       u8 flags;
+       u8 reserved;
+       struct acpi_generic_address register_region;
+       u64 value;              /* Value used with Read/Write register */
+       u64 mask;               /* Bitmask required for this register instruction */
+};
+
 /*******************************************************************************
  *
  * ASF - Alert Standard Format table (Signature "ASF!")
@@ -202,6 +222,35 @@ struct acpi_asf_address {
        u8 devices;
 };
 
+/*******************************************************************************
+ *
+ * BERT - Boot Error Record Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_bert {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 region_length;      /* Length of the boot error region */
+       u64 address;            /* Physical addresss of the error region */
+};
+
+/* Boot Error Region */
+
+struct acpi_bert_region {
+       u32 block_status;
+       u32 raw_data_offset;
+       u32 raw_data_length;
+       u32 data_length;
+       u32 error_severity;
+};
+
+/* block_status Flags */
+
+#define ACPI_BERT_UNCORRECTABLE             (1)
+#define ACPI_BERT_CORRECTABLE               (2)
+#define ACPI_BERT_MULTIPLE_UNCORRECTABLE    (4)
+#define ACPI_BERT_MULTIPLE_CORRECTABLE      (8)
+
 /*******************************************************************************
  *
  * BOOT - Simple Boot Flag Table
@@ -348,6 +397,352 @@ struct acpi_table_ecdt {
        u8 id[1];               /* Full namepath of the EC in the ACPI namespace */
 };
 
+/*******************************************************************************
+ *
+ * EINJ - Error Injection Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_einj {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 header_length;
+       u32 reserved;
+       u32 entries;
+};
+
+/* EINJ Injection Instruction Entries (actions) */
+
+struct acpi_einj_entry {
+       struct acpi_whea_header whea_header;    /* Common header for WHEA tables */
+};
+
+/* Values for Action field above */
+
+enum acpi_einj_actions {
+       ACPI_EINJ_BEGIN_OPERATION = 0,
+       ACPI_EINJ_GET_TRIGGER_TABLE = 1,
+       ACPI_EINJ_SET_ERROR_TYPE = 2,
+       ACPI_EINJ_GET_ERROR_TYPE = 3,
+       ACPI_EINJ_END_OPERATION = 4,
+       ACPI_EINJ_EXECUTE_OPERATION = 5,
+       ACPI_EINJ_CHECK_BUSY_STATUS = 6,
+       ACPI_EINJ_GET_COMMAND_STATUS = 7,
+       ACPI_EINJ_ACTION_RESERVED = 8,  /* 8 and greater are reserved */
+       ACPI_EINJ_TRIGGER_ERROR = 0xFF  /* Except for this value */
+};
+
+/* Values for Instruction field above */
+
+enum acpi_einj_instructions {
+       ACPI_EINJ_READ_REGISTER = 0,
+       ACPI_EINJ_READ_REGISTER_VALUE = 1,
+       ACPI_EINJ_WRITE_REGISTER = 2,
+       ACPI_EINJ_WRITE_REGISTER_VALUE = 3,
+       ACPI_EINJ_NOOP = 4,
+       ACPI_EINJ_INSTRUCTION_RESERVED = 5      /* 5 and greater are reserved */
+};
+
+/* EINJ Trigger Error Action Table */
+
+struct acpi_einj_trigger {
+       u32 header_size;
+       u32 revision;
+       u32 table_size;
+       u32 entry_count;
+};
+
+/*******************************************************************************
+ *
+ * ERST - Error Record Serialization Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_erst {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 header_length;
+       u32 reserved;
+       u32 entries;
+};
+
+/* ERST Serialization Entries (actions) */
+
+struct acpi_erst_entry {
+       struct acpi_whea_header whea_header;    /* Common header for WHEA tables */
+};
+
+/* Values for Action field above */
+
+enum acpi_erst_actions {
+       ACPI_ERST_BEGIN_WRITE_OPERATION = 0,
+       ACPI_ERST_BEGIN_READ_OPERATION = 1,
+       ACPI_ERST_BETGIN_CLEAR_OPERATION = 2,
+       ACPI_ERST_END_OPERATION = 3,
+       ACPI_ERST_SET_RECORD_OFFSET = 4,
+       ACPI_ERST_EXECUTE_OPERATION = 5,
+       ACPI_ERST_CHECK_BUSY_STATUS = 6,
+       ACPI_ERST_GET_COMMAND_STATUS = 7,
+       ACPI_ERST_GET_RECORD_IDENTIFIER = 8,
+       ACPI_ERST_SET_RECORD_IDENTIFIER = 9,
+       ACPI_ERST_GET_RECORD_COUNT = 10,
+       ACPI_ERST_BEGIN_DUMMY_WRIITE = 11,
+       ACPI_ERST_NOT_USED = 12,
+       ACPI_ERST_GET_ERROR_RANGE = 13,
+       ACPI_ERST_GET_ERROR_LENGTH = 14,
+       ACPI_ERST_GET_ERROR_ATTRIBUTES = 15,
+       ACPI_ERST_ACTION_RESERVED = 16  /* 16 and greater are reserved */
+};
+
+/* Values for Instruction field above */
+
+enum acpi_erst_instructions {
+       ACPI_ERST_READ_REGISTER = 0,
+       ACPI_ERST_READ_REGISTER_VALUE = 1,
+       ACPI_ERST_WRITE_REGISTER = 2,
+       ACPI_ERST_WRITE_REGISTER_VALUE = 3,
+       ACPI_ERST_NOOP = 4,
+       ACPI_ERST_LOAD_VAR1 = 5,
+       ACPI_ERST_LOAD_VAR2 = 6,
+       ACPI_ERST_STORE_VAR1 = 7,
+       ACPI_ERST_ADD = 8,
+       ACPI_ERST_SUBTRACT = 9,
+       ACPI_ERST_ADD_VALUE = 10,
+       ACPI_ERST_SUBTRACT_VALUE = 11,
+       ACPI_ERST_STALL = 12,
+       ACPI_ERST_STALL_WHILE_TRUE = 13,
+       ACPI_ERST_SKIP_NEXT_IF_TRUE = 14,
+       ACPI_ERST_GOTO = 15,
+       ACPI_ERST_SET_SRC_ADDRESS_BASE = 16,
+       ACPI_ERST_SET_DST_ADDRESS_BASE = 17,
+       ACPI_ERST_MOVE_DATA = 18,
+       ACPI_ERST_INSTRUCTION_RESERVED = 19     /* 19 and greater are reserved */
+};
+
+/*******************************************************************************
+ *
+ * HEST - Hardware Error Source Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_hest {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 error_source_count;
+};
+
+/* HEST subtable header */
+
+struct acpi_hest_header {
+       u16 type;
+};
+
+/* Values for Type field above for subtables */
+
+enum acpi_hest_types {
+       ACPI_HEST_TYPE_XPF_MACHINE_CHECK = 0,
+       ACPI_HEST_TYPE_XPF_CORRECTED_MACHINE_CHECK = 1,
+       ACPI_HEST_TYPE_XPF_UNUSED = 2,
+       ACPI_HEST_TYPE_XPF_NON_MASKABLE_INTERRUPT = 3,
+       ACPI_HEST_TYPE_IPF_CORRECTED_MACHINE_CHECK = 4,
+       ACPI_HEST_TYPE_IPF_CORRECTED_PLATFORM_ERROR = 5,
+       ACPI_HEST_TYPE_AER_ROOT_PORT = 6,
+       ACPI_HEST_TYPE_AER_ENDPOINT = 7,
+       ACPI_HEST_TYPE_AER_BRIDGE = 8,
+       ACPI_HEST_TYPE_GENERIC_HARDWARE_ERROR_SOURCE = 9,
+       ACPI_HEST_TYPE_RESERVED = 10    /* 10 and greater are reserved */
+};
+
+/*
+ * HEST Sub-subtables
+ */
+
+/* XPF Machine Check Error Bank */
+
+struct acpi_hest_xpf_error_bank {
+       u8 bank_number;
+       u8 clear_status_on_init;
+       u8 status_format;
+       u8 config_write_enable;
+       u32 control_register;
+       u64 control_init_data;
+       u32 status_register;
+       u32 address_register;
+       u32 misc_register;
+};
+
+/* Generic Error Status */
+
+struct acpi_hest_generic_status {
+       u32 block_status;
+       u32 raw_data_offset;
+       u32 raw_data_length;
+       u32 data_length;
+       u32 error_severity;
+};
+
+/* Generic Error Data */
+
+struct acpi_hest_generic_data {
+       u8 section_type[16];
+       u32 error_severity;
+       u16 revision;
+       u8 validation_bits;
+       u8 flags;
+       u32 error_data_length;
+       u8 fru_id[16];
+       u8 fru_text[20];
+};
+
+/* Common HEST structure for PCI/AER types below (6,7,8) */
+
+struct acpi_hest_aer_common {
+       u16 source_id;
+       u16 config_write_enable;
+       u8 flags;
+       u8 enabled;
+       u32 records_to_pre_allocate;
+       u32 max_sections_per_record;
+       u32 bus;
+       u16 device;
+       u16 function;
+       u16 device_control;
+       u16 reserved;
+       u32 uncorrectable_error_mask;
+       u32 uncorrectable_error_severity;
+       u32 correctable_error_mask;
+       u32 advanced_error_cababilities;
+};
+
+/* Hardware Error Notification */
+
+struct acpi_hest_notify {
+       u8 type;
+       u8 length;
+       u16 config_write_enable;
+       u32 poll_interval;
+       u32 vector;
+       u32 polling_threshold_value;
+       u32 polling_threshold_window;
+       u32 error_threshold_value;
+       u32 error_threshold_window;
+};
+
+/* Values for Notify Type field above */
+
+enum acpi_hest_notify_types {
+       ACPI_HEST_NOTIFY_POLLED = 0,
+       ACPI_HEST_NOTIFY_EXTERNAL = 1,
+       ACPI_HEST_NOTIFY_LOCAL = 2,
+       ACPI_HEST_NOTIFY_SCI = 3,
+       ACPI_HEST_NOTIFY_NMI = 4,
+       ACPI_HEST_NOTIFY_RESERVED = 5   /* 5 and greater are reserved */
+};
+
+/*
+ * HEST subtables
+ *
+ * From WHEA Design Document, 16 May 2007.
+ * Note: There is no subtable type 2 in this version of the document,
+ * and there are two different subtable type 3s.
+ */
+
+ /* 0: XPF Machine Check Exception */
+
+struct acpi_hest_xpf_machine_check {
+       struct acpi_hest_header header;
+       u16 source_id;
+       u16 config_write_enable;
+       u8 flags;
+       u8 reserved1;
+       u32 records_to_pre_allocate;
+       u32 max_sections_per_record;
+       u64 global_capability_data;
+       u64 global_control_data;
+       u8 num_hardware_banks;
+       u8 reserved2[7];
+};
+
+/* 1: XPF Corrected Machine Check */
+
+struct acpi_table_hest_xpf_corrected {
+       struct acpi_hest_header header;
+       u16 source_id;
+       u16 config_write_enable;
+       u8 flags;
+       u8 enabled;
+       u32 records_to_pre_allocate;
+       u32 max_sections_per_record;
+       struct acpi_hest_notify notify;
+       u8 num_hardware_banks;
+       u8 reserved[3];
+};
+
+/* 3: XPF Non-Maskable Interrupt */
+
+struct acpi_hest_xpf_nmi {
+       struct acpi_hest_header header;
+       u16 source_id;
+       u32 reserved;
+       u32 records_to_pre_allocate;
+       u32 max_sections_per_record;
+       u32 max_raw_data_length;
+};
+
+/* 4: IPF Corrected Machine Check */
+
+struct acpi_hest_ipf_corrected {
+       struct acpi_hest_header header;
+       u8 enabled;
+       u8 reserved;
+};
+
+/* 5: IPF Corrected Platform Error */
+
+struct acpi_hest_ipf_corrected_platform {
+       struct acpi_hest_header header;
+       u8 enabled;
+       u8 reserved;
+};
+
+/* 6: PCI Express Root Port AER */
+
+struct acpi_hest_aer_root {
+       struct acpi_hest_header header;
+       struct acpi_hest_aer_common aer;
+       u32 root_error_command;
+};
+
+/* 7: PCI Express AER (AER Endpoint) */
+
+struct acpi_hest_aer {
+       struct acpi_hest_header header;
+       struct acpi_hest_aer_common aer;
+};
+
+/* 8: PCI Express/PCI-X Bridge AER */
+
+struct acpi_hest_aer_bridge {
+       struct acpi_hest_header header;
+       struct acpi_hest_aer_common aer;
+       u32 secondary_uncorrectable_error_mask;
+       u32 secondary_uncorrectable_error_severity;
+       u32 secondary_advanced_capabilities;
+};
+
+/* 9: Generic Hardware Error Source */
+
+struct acpi_hest_generic {
+       struct acpi_hest_header header;
+       u16 source_id;
+       u16 related_source_id;
+       u8 config_write_enable;
+       u8 enabled;
+       u32 records_to_pre_allocate;
+       u32 max_sections_per_record;
+       u32 max_raw_data_length;
+       struct acpi_generic_address error_status_address;
+       struct acpi_hest_notify notify;
+       u32 error_status_block_length;
+};
+
 /*******************************************************************************
  *
  * HPET - High Precision Event Timer table
@@ -371,6 +766,96 @@ struct acpi_table_hpet {
 
 /*! [End] no source code translation !*/
 
+/*******************************************************************************
+ *
+ * IBFT - Boot Firmware Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_ibft {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u8 reserved[12];
+};
+
+/* IBFT common subtable header */
+
+struct acpi_ibft_header {
+       u8 type;
+       u8 version;
+       u16 length;
+       u8 index;
+       u8 flags;
+};
+
+/* Values for Type field above */
+
+enum acpi_ibft_type {
+       ACPI_IBFT_TYPE_NOT_USED = 0,
+       ACPI_IBFT_TYPE_CONTROL = 1,
+       ACPI_IBFT_TYPE_INITIATOR = 2,
+       ACPI_IBFT_TYPE_NIC = 3,
+       ACPI_IBFT_TYPE_TARGET = 4,
+       ACPI_IBFT_TYPE_EXTENSIONS = 5,
+       ACPI_IBFT_TYPE_RESERVED = 6     /* 6 and greater are reserved */
+};
+
+/* IBFT subtables */
+
+struct acpi_ibft_control {
+       struct acpi_ibft_header header;
+       u16 extensions;
+       u16 initiator_offset;
+       u16 nic0_offset;
+       u16 target0_offset;
+       u16 nic1_offset;
+       u16 target1_offset;
+};
+
+struct acpi_ibft_initiator {
+       struct acpi_ibft_header header;
+       u8 sns_server[16];
+       u8 slp_server[16];
+       u8 primary_server[16];
+       u8 secondary_server[16];
+       u16 name_length;
+       u16 name_offset;
+};
+
+struct acpi_ibft_nic {
+       struct acpi_ibft_header header;
+       u8 ip_address[16];
+       u8 subnet_mask_prefix;
+       u8 origin;
+       u8 gateway[16];
+       u8 primary_dns[16];
+       u8 secondary_dns[16];
+       u8 dhcp[16];
+       u16 vlan;
+       u8 mac_address[6];
+       u16 pci_address;
+       u16 name_length;
+       u16 name_offset;
+};
+
+struct acpi_ibft_target {
+       struct acpi_ibft_header header;
+       u8 target_ip_address[16];
+       u16 target_ip_socket;
+       u8 target_boot_lun[8];
+       u8 chap_type;
+       u8 nic_association;
+       u16 target_name_length;
+       u16 target_name_offset;
+       u16 chap_name_length;
+       u16 chap_name_offset;
+       u16 chap_secret_length;
+       u16 chap_secret_offset;
+       u16 reverse_chap_name_length;
+       u16 reverse_chap_name_offset;
+       u16 reverse_chap_secret_length;
+       u16 reverse_chap_secret_offset;
+};
+
 /*******************************************************************************
  *
  * MADT - Multiple APIC Description Table
@@ -695,6 +1180,78 @@ struct acpi_table_tcpa {
        u64 log_address;        /* Address of the event log area */
 };
 
+/*******************************************************************************
+ *
+ * UEFI - UEFI Boot optimization Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_uefi {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u8 identifier[16];      /* UUID identifier */
+       u16 data_offset;        /* Offset of remaining data in table */
+       u8 data;
+};
+
+/*******************************************************************************
+ *
+ * WDAT - Watchdog Action Table
+ *
+ ******************************************************************************/
+
+struct acpi_table_wdat {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 header_length;      /* Watchdog Header Length */
+       u16 pci_segment;        /* PCI Segment number */
+       u8 pci_bus;             /* PCI Bus number */
+       u8 pci_device;          /* PCI Device number */
+       u8 pci_function;        /* PCI Function number */
+       u8 reserved[3];
+       u32 timer_period;       /* Period of one timer count (msec) */
+       u32 max_count;          /* Maximum counter value supported */
+       u32 min_count;          /* Minimum counter value */
+       u8 flags;
+       u8 reserved2[3];
+       u32 entries;            /* Number of watchdog entries that follow */
+};
+
+/* WDAT Instruction Entries (actions) */
+
+struct acpi_wdat_entry {
+       struct acpi_whea_header whea_header;    /* Common header for WHEA tables */
+};
+
+/* Values for Action field above */
+
+enum acpi_wdat_actions {
+       ACPI_WDAT_RESET = 1,
+       ACPI_WDAT_GET_CURRENT_COUNTDOWN = 4,
+       ACPI_WDAT_GET_COUNTDOWN = 5,
+       ACPI_WDAT_SET_COUNTDOWN = 6,
+       ACPI_WDAT_GET_RUNNING_STATE = 8,
+       ACPI_WDAT_SET_RUNNING_STATE = 9,
+       ACPI_WDAT_GET_STOPPED_STATE = 10,
+       ACPI_WDAT_SET_STOPPED_STATE = 11,
+       ACPI_WDAT_GET_REBOOT = 16,
+       ACPI_WDAT_SET_REBOOT = 17,
+       ACPI_WDAT_GET_SHUTDOWN = 18,
+       ACPI_WDAT_SET_SHUTDOWN = 19,
+       ACPI_WDAT_GET_STATUS = 32,
+       ACPI_WDAT_SET_STATUS = 33,
+       ACPI_WDAT_ACTION_RESERVED = 34  /* 34 and greater are reserved */
+};
+
+/* Values for Instruction field above */
+
+enum acpi_wdat_instructions {
+       ACPI_WDAT_READ_VALUE = 0,
+       ACPI_WDAT_READ_COUNTDOWN = 1,
+       ACPI_WDAT_WRITE_VALUE = 2,
+       ACPI_WDAT_WRITE_COUNTDOWN = 3,
+       ACPI_WDAT_INSTRUCTION_RESERVED = 4,     /* 4 and greater are reserved */
+       ACPI_WDAT_PRESERVE_REGISTER = 0x80      /* Except for this value */
+};
+
 /*******************************************************************************
  *
  * WDRT - Watchdog Resource Table
index e73a38939120f58e3927ab2c00d72615222c43bb..dfea2d440488548a7fccff43e998b3ec0ef346d6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -323,27 +323,11 @@ struct uint32_struct {
 #define acpi_semaphore                  void *
 
 /*
- * Acpi integer width. In ACPI version 1, integers are
- * 32 bits.  In ACPI version 2, integers are 64 bits.
- * Note that this pertains to the ACPI integer type only, not
- * other integers used in the implementation of the ACPI CA
+ * Acpi integer width. In ACPI version 1, integers are 32 bits.  In ACPI
+ * version 2, integers are 64 bits. Note that this pertains to the ACPI integer
+ * type only, not other integers used in the implementation of the ACPI CA
  * subsystem.
  */
-#ifdef ACPI_NO_INTEGER64_SUPPORT
-
-/* 32-bit integers only, no 64-bit support */
-
-typedef u32 acpi_integer;
-#define ACPI_INTEGER_MAX                ACPI_UINT32_MAX
-#define ACPI_INTEGER_BIT_SIZE           32
-#define ACPI_MAX_DECIMAL_DIGITS         10     /* 2^32 = 4,294,967,296 */
-
-#define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 32-bit divide */
-
-#else
-
-/* 64-bit integers */
-
 typedef unsigned long long acpi_integer;
 #define ACPI_INTEGER_MAX                ACPI_UINT64_MAX
 #define ACPI_INTEGER_BIT_SIZE           64
@@ -352,7 +336,6 @@ typedef unsigned long long acpi_integer;
 #if ACPI_MACHINE_WIDTH == 64
 #define ACPI_USE_NATIVE_DIVIDE /* Use compiler native 64-bit divide */
 #endif
-#endif
 
 #define ACPI_MAX64_DECIMAL_DIGITS       20
 #define ACPI_MAX32_DECIMAL_DIGITS       10
@@ -419,14 +402,20 @@ typedef unsigned long long acpi_integer;
 /*
  * Standard notify values
  */
-#define ACPI_NOTIFY_BUS_CHECK           (u8) 0
-#define ACPI_NOTIFY_DEVICE_CHECK        (u8) 1
-#define ACPI_NOTIFY_DEVICE_WAKE         (u8) 2
-#define ACPI_NOTIFY_EJECT_REQUEST       (u8) 3
-#define ACPI_NOTIFY_DEVICE_CHECK_LIGHT  (u8) 4
-#define ACPI_NOTIFY_FREQUENCY_MISMATCH  (u8) 5
-#define ACPI_NOTIFY_BUS_MODE_MISMATCH   (u8) 6
-#define ACPI_NOTIFY_POWER_FAULT         (u8) 7
+#define ACPI_NOTIFY_BUS_CHECK           (u8) 0x00
+#define ACPI_NOTIFY_DEVICE_CHECK        (u8) 0x01
+#define ACPI_NOTIFY_DEVICE_WAKE         (u8) 0x02
+#define ACPI_NOTIFY_EJECT_REQUEST       (u8) 0x03
+#define ACPI_NOTIFY_DEVICE_CHECK_LIGHT  (u8) 0x04
+#define ACPI_NOTIFY_FREQUENCY_MISMATCH  (u8) 0x05
+#define ACPI_NOTIFY_BUS_MODE_MISMATCH   (u8) 0x06
+#define ACPI_NOTIFY_POWER_FAULT         (u8) 0x07
+#define ACPI_NOTIFY_CAPABILITIES_CHECK  (u8) 0x08
+#define ACPI_NOTIFY_DEVICE_PLD_CHECK    (u8) 0x09
+#define ACPI_NOTIFY_RESERVED            (u8) 0x0A
+#define ACPI_NOTIFY_LOCALITY_UPDATE     (u8) 0x0B
+
+#define ACPI_NOTIFY_MAX                 0x0B
 
 /*
  * Types associated with ACPI names and objects.  The first group of
@@ -494,6 +483,8 @@ typedef u32 acpi_object_type;
 #define ACPI_TYPE_INVALID               0x1E
 #define ACPI_TYPE_NOT_FOUND             0xFF
 
+#define ACPI_NUM_NS_TYPES               (ACPI_TYPE_INVALID + 1)
+
 /*
  * All I/O
  */
@@ -599,7 +590,7 @@ typedef u32 acpi_event_status;
 
 #define ACPI_SYSTEM_NOTIFY              0x1
 #define ACPI_DEVICE_NOTIFY              0x2
-#define ACPI_ALL_NOTIFY                 0x3
+#define ACPI_ALL_NOTIFY                 (ACPI_SYSTEM_NOTIFY | ACPI_DEVICE_NOTIFY)
 #define ACPI_MAX_NOTIFY_HANDLER_TYPE    0x3
 
 #define ACPI_MAX_SYS_NOTIFY             0x7f
@@ -654,46 +645,51 @@ typedef u8 acpi_adr_space_type;
 /*
  * External ACPI object definition
  */
+
+/*
+ * Note: Type == ACPI_TYPE_ANY (0) is used to indicate a NULL package element
+ * or an unresolved named reference.
+ */
 union acpi_object {
        acpi_object_type type;  /* See definition of acpi_ns_type for values */
        struct {
-               acpi_object_type type;
+               acpi_object_type type;  /* ACPI_TYPE_INTEGER */
                acpi_integer value;     /* The actual number */
        } integer;
 
        struct {
-               acpi_object_type type;
+               acpi_object_type type;  /* ACPI_TYPE_STRING */
                u32 length;     /* # of bytes in string, excluding trailing null */
                char *pointer;  /* points to the string value */
        } string;
 
        struct {
-               acpi_object_type type;
+               acpi_object_type type;  /* ACPI_TYPE_BUFFER */
                u32 length;     /* # of bytes in buffer */
                u8 *pointer;    /* points to the buffer */
        } buffer;
 
        struct {
-               acpi_object_type type;
-               u32 fill1;
-               acpi_handle handle;     /* object reference */
-       } reference;
-
-       struct {
-               acpi_object_type type;
+               acpi_object_type type;  /* ACPI_TYPE_PACKAGE */
                u32 count;      /* # of elements in package */
                union acpi_object *elements;    /* Pointer to an array of ACPI_OBJECTs */
        } package;
 
        struct {
-               acpi_object_type type;
+               acpi_object_type type;  /* ACPI_TYPE_LOCAL_REFERENCE */
+               acpi_object_type actual_type;   /* Type associated with the Handle */
+               acpi_handle handle;     /* object reference */
+       } reference;
+
+       struct {
+               acpi_object_type type;  /* ACPI_TYPE_PROCESSOR */
                u32 proc_id;
                acpi_io_address pblk_address;
                u32 pblk_length;
        } processor;
 
        struct {
-               acpi_object_type type;
+               acpi_object_type type;  /* ACPI_TYPE_POWER */
                u32 system_level;
                u32 resource_order;
        } power_resource;
@@ -747,6 +743,12 @@ struct acpi_system_info {
        u32 debug_layer;
 };
 
+/* Table Event Types */
+
+#define ACPI_TABLE_EVENT_LOAD           0x0
+#define ACPI_TABLE_EVENT_UNLOAD         0x1
+#define ACPI_NUM_TABLE_EVENTS           2
+
 /*
  * Types specific to the OS service interfaces
  */
@@ -776,6 +778,11 @@ acpi_status(*acpi_exception_handler) (acpi_status aml_status,
                                      u16 opcode,
                                      u32 aml_offset, void *context);
 
+/* Table Event handler (Load, load_table etc) and types */
+
+typedef
+acpi_status(*acpi_tbl_handler) (u32 event, void *table, void *context);
+
 /* Address Spaces (For Operation Regions) */
 
 typedef
@@ -990,6 +997,7 @@ struct acpi_vendor_uuid {
  *  Structures used to describe device resources
  */
 struct acpi_resource_irq {
+       u8 descriptor_length;
        u8 triggering;
        u8 polarity;
        u8 sharable;
@@ -1006,6 +1014,7 @@ struct acpi_resource_dma {
 };
 
 struct acpi_resource_start_dependent {
+       u8 descriptor_length;
        u8 compatibility_priority;
        u8 performance_robustness;
 };
index a2918547c73f662fd1bd5fe9e7fb54e050b58a1d..b42cadf0730256bb68e17f6ff8ad376f7d57dc23 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -116,6 +116,8 @@ void acpi_ut_init_globals(void);
 
 char *acpi_ut_get_mutex_name(u32 mutex_id);
 
+const char *acpi_ut_get_notify_name(u32 notify_value);
+
 #endif
 
 char *acpi_ut_get_type_name(acpi_object_type type);
index da53a4ef287ad3cbdc414d2b02d05bea005746fb..ff851c5df698a465a3c5e3d730b0a00d7d14ea07 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f7d541239da4f803da1a41dabc8b56916d7ac234..7b070e42b7c5234fd0c11750db5c931384710c0a 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c785485e62a6cd55d476da76092deca582b761df..fcd2572e428c1a24a45e2ac0e19abde178654c8e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3bb50494a38a03a590f43c25ff619b5fe6c8f29d..8996dba90cd91bbe07b084baf84bd52b46d2fde8 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6ed15a0978ebb7b9fd0b308699b37eef280e124b..9af4645986829bbf00ea5b77d3aeae2f818763ed 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2007, R. Byron Moore
+ * Copyright (C) 2000 - 2008, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9e19a704d4840693a31bd88413060024e3623266..15f3ae25c51137bb8af4a67b5d5e8cbbea328241 100644 (file)
@@ -388,6 +388,11 @@ static inline int fls64(unsigned long x)
 }
 #endif
 
+static inline unsigned long __fls(unsigned long x)
+{
+       return fls64(x) - 1;
+}
+
 static inline int fls(int x)
 {
        return fls64((unsigned int) x);
index 39a3e2a5017dba4dcd89e094a48ccbb94e065294..695a5ee4b5d3da464ed25bab5668bcc861de1479 100644 (file)
@@ -1,14 +1,24 @@
 #ifndef _ALPHA_BUG_H
 #define _ALPHA_BUG_H
 
+#include <linux/linkage.h>
+
 #ifdef CONFIG_BUG
 #include <asm/pal.h>
 
 /* ??? Would be nice to use .gprel32 here, but we can't be sure that the
    function loaded the GP, so this could fail in modules.  */
-#define BUG() \
-  __asm__ __volatile__("call_pal %0  # bugchk\n\t"".long %1\n\t.8byte %2" \
-                      : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__))
+static inline void ATTRIB_NORET __BUG(const char *file, int line)
+{
+       __asm__ __volatile__(
+               "call_pal %0  # bugchk\n\t"
+               ".long %1\n\t.8byte %2"
+                      : : "i" (PAL_bugchk), "i"(line), "i"(file));
+       for ( ; ; )
+               ;
+}
+
+#define BUG() __BUG(__FILE__, __LINE__)
 
 #define HAVE_ARCH_BUG
 #endif
index 7af2b8d254868ff7102f43255accd9bbd33673f5..58e958fc7f1b27bb58ce01c4a02dc550a6bd2ab0 100644 (file)
@@ -7,7 +7,7 @@
 
 #ifdef __GNUC__
 
-static __inline __attribute_const__ __u32 __arch__swab32(__u32 x)
+static inline __attribute_const__ __u32 __arch__swab32(__u32 x)
 {
        /*
         * Unfortunately, we can't use the 6 instruction sequence
index 99037b0323570c1e4831e1ec2228bc455fcabef0..05ce5fba43e32ef37143836f1961a5300667f30c 100644 (file)
@@ -268,6 +268,7 @@ extern inline int pte_write(pte_t pte)              { return !(pte_val(pte) & _PAGE_FOW); }
 extern inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
 extern inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
 extern inline int pte_file(pte_t pte)          { return pte_val(pte) & _PAGE_FILE; }
+extern inline int pte_special(pte_t pte)       { return 0; }
 
 extern inline pte_t pte_wrprotect(pte_t pte)   { pte_val(pte) |= _PAGE_FOW; return pte; }
 extern inline pte_t pte_mkclean(pte_t pte)     { pte_val(pte) &= ~(__DIRTY_BITS); return pte; }
@@ -275,6 +276,7 @@ extern inline pte_t pte_mkold(pte_t pte)    { pte_val(pte) &= ~(__ACCESS_BITS); ret
 extern inline pte_t pte_mkwrite(pte_t pte)     { pte_val(pte) &= ~_PAGE_FOW; return pte; }
 extern inline pte_t pte_mkdirty(pte_t pte)     { pte_val(pte) |= __DIRTY_BITS; return pte; }
 extern inline pte_t pte_mkyoung(pte_t pte)     { pte_val(pte) |= __ACCESS_BITS; return pte; }
+extern inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 
index a1d72846f61ca250b53d634404703af06149eb73..3787c60aed3fa716f366b7cf9868e4bcbef63eea 100644 (file)
@@ -1,6 +1,11 @@
-#ifndef __ALPHA_UNALIGNED_H
-#define __ALPHA_UNALIGNED_H
+#ifndef _ASM_ALPHA_UNALIGNED_H
+#define _ASM_ALPHA_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#endif
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+
+#endif /* _ASM_ALPHA_UNALIGNED_H */
index 5148ab7ad1f83404b9ae53caeba8582f4ed08941..50f8c880220667cd4523ad1f5f061eb592c6908c 100644 (file)
@@ -20,11 +20,10 @@ static inline void __iomem *
 __arch_ioremap(unsigned long paddr, size_t size, unsigned int mtype)
 {
        void __iomem *retval;
-
-       if (mtype == MT_DEVICE && size && paddr >= ORION5X_REGS_PHYS_BASE &&
-           paddr + size <= ORION5X_REGS_PHYS_BASE + ORION5X_REGS_SIZE) {
-               retval = (void __iomem *)ORION5X_REGS_VIRT_BASE +
-                               (paddr - ORION5X_REGS_PHYS_BASE);
+       unsigned long offs = paddr - ORION5X_REGS_PHYS_BASE;
+       if (mtype == MT_DEVICE && size && offs < ORION5X_REGS_SIZE &&
+           size <= ORION5X_REGS_SIZE && offs + size <= ORION5X_REGS_SIZE) {
+               retval = (void __iomem *)ORION5X_REGS_VIRT_BASE + offs;
        } else {
                retval = __arm_ioremap(paddr, size, mtype);
        }
index 50c77eacbd5e435526ff36d1a5027181cfe19672..b6c8fe37768315bcc5081490572e2493e15910cf 100644 (file)
 /* ITE8152 irqs */
 /* add IT8152 IRQs beyond BOARD_END */
 #ifdef CONFIG_PCI_HOST_ITE8152
-#define IT8152_IRQ(x)   (IRQ_GPIO(IRQ_BOARD_END) + 1 + (x))
+#define IT8152_IRQ(x)   (IRQ_BOARD_END + (x))
 
 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
 #define IT8152_LD_IRQ_COUNT     9
 
 #define IT8152_LAST_IRQ         IT8152_LD_IRQ(IT8152_LD_IRQ_COUNT - 1)
 
+#if NR_IRQS < (IT8152_LAST_IRQ+1)
 #undef NR_IRQS
 #define NR_IRQS (IT8152_LAST_IRQ+1)
 #endif
+
+#endif /* CONFIG_PCI_HOST_ITE8152 */
index b34fd5683e2d20876805e6b7abe25a8708babced..169b374f9921968031b0b7dccb22673e3366256a 100644 (file)
@@ -13,7 +13,6 @@
 #define _MAGICIAN_H_
 
 #include <asm/arch/irqs.h>
-#include <asm/arch/pxa2xx-gpio.h>
 
 /*
  * PXA GPIOs
 #define GPIO119_MAGICIAN_UNKNOWN               119
 #define GPIO120_MAGICIAN_UNKNOWN               120
 
-/*
- * PXA GPIO alternate function mode & direction
- */
-
-#define GPIO0_MAGICIAN_KEY_POWER_MD            (0 | GPIO_IN)
-#define GPIO9_MAGICIAN_UNKNOWN_MD              (9 | GPIO_IN)
-#define GPIO10_MAGICIAN_GSM_IRQ_MD             (10 | GPIO_IN)
-#define GPIO11_MAGICIAN_GSM_OUT1_MD            (11 | GPIO_OUT)
-#define GPIO13_MAGICIAN_CPLD_IRQ_MD            (13 | GPIO_IN)
-#define GPIO18_MAGICIAN_UNKNOWN_MD             (18 | GPIO_OUT)
-#define GPIO22_MAGICIAN_VIBRA_EN_MD            (22 | GPIO_OUT)
-#define GPIO26_MAGICIAN_GSM_POWER_MD           (26 | GPIO_OUT)
-#define GPIO27_MAGICIAN_USBC_PUEN_MD           (27 | GPIO_OUT)
-#define GPIO30_MAGICIAN_nCHARGE_EN_MD          (30 | GPIO_OUT)
-#define GPIO37_MAGICIAN_KEY_HANGUP_MD          (37 | GPIO_OUT)
-#define GPIO38_MAGICIAN_KEY_CONTACTS_MD                (38 | GPIO_OUT)
-#define GPIO40_MAGICIAN_GSM_OUT2_MD            (40 | GPIO_OUT)
-#define GPIO48_MAGICIAN_UNKNOWN_MD             (48 | GPIO_OUT)
-#define GPIO56_MAGICIAN_UNKNOWN_MD             (56 | GPIO_OUT)
-#define GPIO57_MAGICIAN_CAM_RESET_MD           (57 | GPIO_OUT)
-#define GPIO75_MAGICIAN_SAMSUNG_POWER_MD       (75 | GPIO_OUT)
-#define GPIO83_MAGICIAN_nIR_EN_MD              (83 | GPIO_OUT)
-#define GPIO86_MAGICIAN_GSM_RESET_MD           (86 | GPIO_OUT)
-#define GPIO87_MAGICIAN_GSM_SELECT_MD          (87 | GPIO_OUT)
-#define GPIO90_MAGICIAN_KEY_CALENDAR_MD                (90 | GPIO_OUT)
-#define GPIO91_MAGICIAN_KEY_CAMERA_MD          (91 | GPIO_OUT)
-#define GPIO93_MAGICIAN_KEY_UP_MD              (93 | GPIO_IN)
-#define GPIO94_MAGICIAN_KEY_DOWN_MD            (94 | GPIO_IN)
-#define GPIO95_MAGICIAN_KEY_LEFT_MD            (95 | GPIO_IN)
-#define GPIO96_MAGICIAN_KEY_RIGHT_MD           (96 | GPIO_IN)
-#define GPIO97_MAGICIAN_KEY_ENTER_MD           (97 | GPIO_IN)
-#define GPIO98_MAGICIAN_KEY_RECORD_MD          (98 | GPIO_IN)
-#define GPIO99_MAGICIAN_HEADPHONE_IN_MD                (99 | GPIO_IN)
-#define GPIO100_MAGICIAN_KEY_VOL_UP_MD         (100 | GPIO_IN)
-#define GPIO101_MAGICIAN_KEY_VOL_DOWN_MD       (101 | GPIO_IN)
-#define GPIO102_MAGICIAN_KEY_PHONE_MD          (102 | GPIO_IN)
-#define GPIO103_MAGICIAN_LED_KP_MD             (103 | GPIO_OUT)
-#define GPIO104_MAGICIAN_LCD_POWER_1_MD        (104 | GPIO_OUT)
-#define GPIO105_MAGICIAN_LCD_POWER_2_MD                (105 | GPIO_OUT)
-#define GPIO106_MAGICIAN_LCD_POWER_3_MD                (106 | GPIO_OUT)
-#define GPIO107_MAGICIAN_DS1WM_IRQ_MD          (107 | GPIO_IN)
-#define GPIO108_MAGICIAN_GSM_READY_MD          (108 | GPIO_IN)
-#define GPIO114_MAGICIAN_UNKNOWN_MD            (114 | GPIO_OUT)
-#define GPIO115_MAGICIAN_nPEN_IRQ_MD           (115 | GPIO_IN)
-#define GPIO116_MAGICIAN_nCAM_EN_MD            (116 | GPIO_OUT)
-#define GPIO119_MAGICIAN_UNKNOWN_MD            (119 | GPIO_OUT)
-#define GPIO120_MAGICIAN_UNKNOWN_MD            (120 | GPIO_OUT)
-
 /*
  * CPLD IRQs
  */
index a322012f16acf55f66dd28fb3870fcb7bfed5505..4b2ea1e95c5799b2f1db6effcf557760fd899b69 100644 (file)
 #define OSCC_OON       (1 << 1)        /* 32.768kHz OON (write-once only bit) */
 #define OSCC_OOK       (1 << 0)        /* 32.768kHz OOK (read-only bit) */
 
-
-/*
- * LCD
- */
-
-#define LCCR0          __REG(0x44000000)  /* LCD Controller Control Register 0 */
-#define LCCR1          __REG(0x44000004)  /* LCD Controller Control Register 1 */
-#define LCCR2          __REG(0x44000008)  /* LCD Controller Control Register 2 */
-#define LCCR3          __REG(0x4400000C)  /* LCD Controller Control Register 3 */
-#define LCCR4          __REG(0x44000010)  /* LCD Controller Control Register 3 */
-#define DFBR0          __REG(0x44000020)  /* DMA Channel 0 Frame Branch Register */
-#define DFBR1          __REG(0x44000024)  /* DMA Channel 1 Frame Branch Register */
-#define LCSR           __REG(0x44000038)  /* LCD Controller Status Register */
-#define LIIDR          __REG(0x4400003C)  /* LCD Controller Interrupt ID Register */
-#define TMEDRGBR       __REG(0x44000040)  /* TMED RGB Seed Register */
-#define TMEDCR         __REG(0x44000044)  /* TMED Control Register */
-
-#define LCCR3_1BPP (0 << 24)
-#define LCCR3_2BPP (1 << 24)
-#define LCCR3_4BPP (2 << 24)
-#define LCCR3_8BPP (3 << 24)
-#define LCCR3_16BPP (4 << 24)
-
-#define LCCR3_PDFOR_0 (0 << 30)
-#define LCCR3_PDFOR_1 (1 << 30)
-#define LCCR3_PDFOR_2 (2 << 30)
-#define LCCR3_PDFOR_3 (3 << 30)
-
-#define LCCR4_PAL_FOR_0 (0 << 15)
-#define LCCR4_PAL_FOR_1 (1 << 15)
-#define LCCR4_PAL_FOR_2 (2 << 15)
-#define LCCR4_PAL_FOR_MASK (3 << 15)
-
-#define FDADR0         __REG(0x44000200)  /* DMA Channel 0 Frame Descriptor Address Register */
-#define FSADR0         __REG(0x44000204)  /* DMA Channel 0 Frame Source Address Register */
-#define FIDR0          __REG(0x44000208)  /* DMA Channel 0 Frame ID Register */
-#define LDCMD0         __REG(0x4400020C)  /* DMA Channel 0 Command Register */
-#define FDADR1         __REG(0x44000210)  /* DMA Channel 1 Frame Descriptor Address Register */
-#define FSADR1         __REG(0x44000214)  /* DMA Channel 1 Frame Source Address Register */
-#define FIDR1          __REG(0x44000218)  /* DMA Channel 1 Frame ID Register */
-#define LDCMD1         __REG(0x4400021C)  /* DMA Channel 1 Command Register */
-
-#define LCCR0_ENB      (1 << 0)        /* LCD Controller enable */
-#define LCCR0_CMS      (1 << 1)        /* Color/Monochrome Display Select */
-#define LCCR0_Color     (LCCR0_CMS*0)   /*  Color display                  */
-#define LCCR0_Mono      (LCCR0_CMS*1)   /*  Monochrome display             */
-#define LCCR0_SDS      (1 << 2)        /* Single/Dual Panel Display       */
-                                        /* Select                          */
-#define LCCR0_Sngl      (LCCR0_SDS*0)   /*  Single panel display           */
-#define LCCR0_Dual      (LCCR0_SDS*1)   /*  Dual panel display             */
-
-#define LCCR0_LDM      (1 << 3)        /* LCD Disable Done Mask */
-#define LCCR0_SFM      (1 << 4)        /* Start of frame mask */
-#define LCCR0_IUM      (1 << 5)        /* Input FIFO underrun mask */
-#define LCCR0_EFM      (1 << 6)        /* End of Frame mask */
-#define LCCR0_PAS      (1 << 7)        /* Passive/Active display Select   */
-#define LCCR0_Pas       (LCCR0_PAS*0)   /*  Passive display (STN)          */
-#define LCCR0_Act       (LCCR0_PAS*1)   /*  Active display (TFT)           */
-#define LCCR0_DPD      (1 << 9)        /* Double Pixel Data (monochrome   */
-                                        /* display mode)                   */
-#define LCCR0_4PixMono  (LCCR0_DPD*0)   /*  4-Pixel/clock Monochrome       */
-                                        /*  display                        */
-#define LCCR0_8PixMono  (LCCR0_DPD*1)   /*  8-Pixel/clock Monochrome       */
-                                        /*  display                        */
-#define LCCR0_DIS      (1 << 10)       /* LCD Disable */
-#define LCCR0_QDM      (1 << 11)       /* LCD Quick Disable mask */
-#define LCCR0_PDD      (0xff << 12)    /* Palette DMA request delay */
-#define LCCR0_PDD_S    12
-#define LCCR0_BM       (1 << 20)       /* Branch mask */
-#define LCCR0_OUM      (1 << 21)       /* Output FIFO underrun mask */
-#define LCCR0_LCDT      (1 << 22)       /* LCD panel type */
-#define LCCR0_RDSTM     (1 << 23)       /* Read status interrupt mask */
-#define LCCR0_CMDIM     (1 << 24)       /* Command interrupt mask */
-#define LCCR0_OUC       (1 << 25)       /* Overlay Underlay control bit */
-#define LCCR0_LDDALT    (1 << 26)       /* LDD alternate mapping control */
-
-#define LCCR1_PPL       Fld (10, 0)      /* Pixels Per Line - 1 */
-#define LCCR1_DisWdth(Pixel)            /* Display Width [1..800 pix.]  */ \
-                        (((Pixel) - 1) << FShft (LCCR1_PPL))
-
-#define LCCR1_HSW       Fld (6, 10)     /* Horizontal Synchronization     */
-#define LCCR1_HorSnchWdth(Tpix)         /* Horizontal Synchronization     */ \
-                                        /* pulse Width [1..64 Tpix]       */ \
-                        (((Tpix) - 1) << FShft (LCCR1_HSW))
-
-#define LCCR1_ELW       Fld (8, 16)     /* End-of-Line pixel clock Wait    */
-                                        /* count - 1 [Tpix]                */
-#define LCCR1_EndLnDel(Tpix)            /*  End-of-Line Delay              */ \
-                                        /*  [1..256 Tpix]                  */ \
-                        (((Tpix) - 1) << FShft (LCCR1_ELW))
-
-#define LCCR1_BLW       Fld (8, 24)     /* Beginning-of-Line pixel clock   */
-                                        /* Wait count - 1 [Tpix]           */
-#define LCCR1_BegLnDel(Tpix)            /*  Beginning-of-Line Delay        */ \
-                                        /*  [1..256 Tpix]                  */ \
-                        (((Tpix) - 1) << FShft (LCCR1_BLW))
-
-
-#define LCCR2_LPP       Fld (10, 0)     /* Line Per Panel - 1              */
-#define LCCR2_DisHght(Line)             /*  Display Height [1..1024 lines] */ \
-                        (((Line) - 1) << FShft (LCCR2_LPP))
-
-#define LCCR2_VSW       Fld (6, 10)     /* Vertical Synchronization pulse  */
-                                        /* Width - 1 [Tln] (L_FCLK)        */
-#define LCCR2_VrtSnchWdth(Tln)          /*  Vertical Synchronization pulse */ \
-                                        /*  Width [1..64 Tln]              */ \
-                        (((Tln) - 1) << FShft (LCCR2_VSW))
-
-#define LCCR2_EFW       Fld (8, 16)     /* End-of-Frame line clock Wait    */
-                                        /* count [Tln]                     */
-#define LCCR2_EndFrmDel(Tln)            /*  End-of-Frame Delay             */ \
-                                        /*  [0..255 Tln]                   */ \
-                        ((Tln) << FShft (LCCR2_EFW))
-
-#define LCCR2_BFW       Fld (8, 24)     /* Beginning-of-Frame line clock   */
-                                        /* Wait count [Tln]                */
-#define LCCR2_BegFrmDel(Tln)            /*  Beginning-of-Frame Delay       */ \
-                                        /*  [0..255 Tln]                   */ \
-                        ((Tln) << FShft (LCCR2_BFW))
-
-#if 0
-#define LCCR3_PCD      (0xff)          /* Pixel clock divisor */
-#define LCCR3_ACB      (0xff << 8)     /* AC Bias pin frequency */
-#define LCCR3_ACB_S    8
-#endif
-
-#define LCCR3_API      (0xf << 16)     /* AC Bias pin trasitions per interrupt */
-#define LCCR3_API_S    16
-#define LCCR3_VSP      (1 << 20)       /* vertical sync polarity */
-#define LCCR3_HSP      (1 << 21)       /* horizontal sync polarity */
-#define LCCR3_PCP      (1 << 22)       /* Pixel Clock Polarity (L_PCLK)   */
-#define LCCR3_PixRsEdg  (LCCR3_PCP*0)   /*  Pixel clock Rising-Edge        */
-#define LCCR3_PixFlEdg  (LCCR3_PCP*1)   /*  Pixel clock Falling-Edge       */
-
-#define LCCR3_OEP       (1 << 23)       /* Output Enable Polarity (L_BIAS, */
-                                        /* active display mode)            */
-#define LCCR3_OutEnH    (LCCR3_OEP*0)   /*  Output Enable active High      */
-#define LCCR3_OutEnL    (LCCR3_OEP*1)   /*  Output Enable active Low       */
-
-#if 0
-#define LCCR3_BPP      (7 << 24)       /* bits per pixel */
-#define LCCR3_BPP_S    24
-#endif
-#define LCCR3_DPC      (1 << 27)       /* double pixel clock mode */
-
-
-#define LCCR3_PCD       Fld (8, 0)      /* Pixel Clock Divisor */
-#define LCCR3_PixClkDiv(Div)            /* Pixel Clock Divisor */ \
-                        (((Div) << FShft (LCCR3_PCD)))
-
-
-#define LCCR3_BPP       Fld (3, 24)     /* Bit Per Pixel */
-#define LCCR3_Bpp(Bpp)                  /* Bit Per Pixel */ \
-                        (((Bpp) << FShft (LCCR3_BPP)))
-
-#define LCCR3_ACB       Fld (8, 8)      /* AC Bias */
-#define LCCR3_Acb(Acb)                  /* BAC Bias */ \
-                        (((Acb) << FShft (LCCR3_ACB)))
-
-#define LCCR3_HorSnchH  (LCCR3_HSP*0)   /*  Horizontal Synchronization     */
-                                        /*  pulse active High              */
-#define LCCR3_HorSnchL  (LCCR3_HSP*1)   /*  Horizontal Synchronization     */
-
-#define LCCR3_VrtSnchH  (LCCR3_VSP*0)   /*  Vertical Synchronization pulse */
-                                        /*  active High                    */
-#define LCCR3_VrtSnchL  (LCCR3_VSP*1)   /*  Vertical Synchronization pulse */
-                                        /*  active Low                     */
-
-#define LCSR_LDD       (1 << 0)        /* LCD Disable Done */
-#define LCSR_SOF       (1 << 1)        /* Start of frame */
-#define LCSR_BER       (1 << 2)        /* Bus error */
-#define LCSR_ABC       (1 << 3)        /* AC Bias count */
-#define LCSR_IUL       (1 << 4)        /* input FIFO underrun Lower panel */
-#define LCSR_IUU       (1 << 5)        /* input FIFO underrun Upper panel */
-#define LCSR_OU                (1 << 6)        /* output FIFO underrun */
-#define LCSR_QD                (1 << 7)        /* quick disable */
-#define LCSR_EOF       (1 << 8)        /* end of frame */
-#define LCSR_BS                (1 << 9)        /* branch status */
-#define LCSR_SINT      (1 << 10)       /* subsequent interrupt */
-
-#define LDCMD_PAL      (1 << 26)       /* instructs DMA to load palette buffer */
-
-#define LCSR_LDD       (1 << 0)        /* LCD Disable Done */
-#define LCSR_SOF       (1 << 1)        /* Start of frame */
-#define LCSR_BER       (1 << 2)        /* Bus error */
-#define LCSR_ABC       (1 << 3)        /* AC Bias count */
-#define LCSR_IUL       (1 << 4)        /* input FIFO underrun Lower panel */
-#define LCSR_IUU       (1 << 5)        /* input FIFO underrun Upper panel */
-#define LCSR_OU                (1 << 6)        /* output FIFO underrun */
-#define LCSR_QD                (1 << 7)        /* quick disable */
-#define LCSR_EOF       (1 << 8)        /* end of frame */
-#define LCSR_BS                (1 << 9)        /* branch status */
-#define LCSR_SINT      (1 << 10)       /* subsequent interrupt */
-
-#define LDCMD_PAL      (1 << 26)       /* instructs DMA to load palette buffer */
-
 #ifdef CONFIG_PXA27x
 
 /* Camera Interface */
index ea2336aa70e47ea6fab3bce62a6d483424a02fbc..bbd22396841ae15d0eefcd235ffeccd9abdacb8b 100644 (file)
  */
 
 #include <linux/fb.h>
+#include <asm/arch/regs-lcd.h>
+
+/*
+ * Supported LCD connections
+ *
+ * bits 0 - 3: for LCD panel type:
+ *
+ *   STN  - for passive matrix
+ *   DSTN - for dual scan passive matrix
+ *   TFT  - for active matrix
+ *
+ * bits 4 - 9 : for bus width
+ * bits 10-17 : for AC Bias Pin Frequency
+ * bit     18 : for output enable polarity
+ * bit     19 : for pixel clock edge
+ */
+#define LCD_CONN_TYPE(_x)      ((_x) & 0x0f)
+#define LCD_CONN_WIDTH(_x)     (((_x) >> 4) & 0x1f)
+
+#define LCD_TYPE_UNKNOWN       0
+#define LCD_TYPE_MONO_STN      1
+#define LCD_TYPE_MONO_DSTN     2
+#define LCD_TYPE_COLOR_STN     3
+#define LCD_TYPE_COLOR_DSTN    4
+#define LCD_TYPE_COLOR_TFT     5
+#define LCD_TYPE_SMART_PANEL   6
+#define LCD_TYPE_MAX           7
+
+#define LCD_MONO_STN_4BPP      ((4  << 4) | LCD_TYPE_MONO_STN)
+#define LCD_MONO_STN_8BPP      ((8  << 4) | LCD_TYPE_MONO_STN)
+#define LCD_MONO_DSTN_8BPP     ((8  << 4) | LCD_TYPE_MONO_DSTN)
+#define LCD_COLOR_STN_8BPP     ((8  << 4) | LCD_TYPE_COLOR_STN)
+#define LCD_COLOR_DSTN_16BPP   ((16 << 4) | LCD_TYPE_COLOR_DSTN)
+#define LCD_COLOR_TFT_16BPP    ((16 << 4) | LCD_TYPE_COLOR_TFT)
+#define LCD_COLOR_TFT_18BPP    ((18 << 4) | LCD_TYPE_COLOR_TFT)
+#define LCD_SMART_PANEL_8BPP   ((8  << 4) | LCD_TYPE_SMART_PANEL)
+#define LCD_SMART_PANEL_16BPP  ((16 << 4) | LCD_TYPE_SMART_PANEL)
+#define LCD_SMART_PANEL_18BPP  ((18 << 4) | LCD_TYPE_SMART_PANEL)
+
+#define LCD_AC_BIAS_FREQ(x)    (((x) & 0xff) << 10)
+#define LCD_BIAS_ACTIVE_HIGH   (0 << 17)
+#define LCD_BIAS_ACTIVE_LOW    (1 << 17)
+#define LCD_PCLK_EDGE_RISE     (0 << 18)
+#define LCD_PCLK_EDGE_FALL     (1 << 18)
 
 /*
  * This structure describes the machine which we are running on.
@@ -26,6 +70,10 @@ struct pxafb_mode_info {
        u_short         yres;
 
        u_char          bpp;
+       u_int           cmap_greyscale:1,
+                       unused:31;
+
+       /* Parallel Mode Timing */
        u_char          hsync_len;
        u_char          left_margin;
        u_char          right_margin;
@@ -35,14 +83,28 @@ struct pxafb_mode_info {
        u_char          lower_margin;
        u_char          sync;
 
-       u_int           cmap_greyscale:1,
-                       unused:31;
+       /* Smart Panel Mode Timing - see PXA27x DM 7.4.15.0.3 for details
+        * Note:
+        * 1. all parameters in nanosecond (ns)
+        * 2. a0cs{rd,wr}_set_hld are controlled by the same register bits
+        *    in pxa27x and pxa3xx, initialize them to the same value or
+        *    the larger one will be used
+        * 3. same to {rd,wr}_pulse_width
+        */
+       unsigned        a0csrd_set_hld; /* A0 and CS Setup/Hold Time before/after L_FCLK_RD */
+       unsigned        a0cswr_set_hld; /* A0 and CS Setup/Hold Time before/after L_PCLK_WR */
+       unsigned        wr_pulse_width; /* L_PCLK_WR pulse width */
+       unsigned        rd_pulse_width; /* L_FCLK_RD pulse width */
+       unsigned        cmd_inh_time;   /* Command Inhibit time between two writes */
+       unsigned        op_hold_time;   /* Output Hold time from L_FCLK_RD negation */
 };
 
 struct pxafb_mach_info {
        struct pxafb_mode_info *modes;
        unsigned int num_modes;
 
+       unsigned int    lcd_conn;
+
        u_int           fixed_modes:1,
                        cmap_inverse:1,
                        cmap_static:1,
@@ -78,8 +140,11 @@ struct pxafb_mach_info {
        u_int           lccr4;
        void (*pxafb_backlight_power)(int);
        void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *);
-
+       void (*smart_update)(struct fb_info *);
 };
 void set_pxa_fb_info(struct pxafb_mach_info *hard_pxa_fb_info);
 void set_pxa_fb_parent(struct device *parent_dev);
 unsigned long pxafb_get_hsync_time(struct device *dev);
+
+extern int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int);
+extern int pxafb_smart_flush(struct fb_info *info);
diff --git a/include/asm-arm/arch-pxa/regs-lcd.h b/include/asm-arm/arch-pxa/regs-lcd.h
new file mode 100644 (file)
index 0000000..f762493
--- /dev/null
@@ -0,0 +1,171 @@
+#ifndef __ASM_ARCH_REGS_LCD_H
+#define __ASM_ARCH_REGS_LCD_H
+/*
+ * LCD Controller Registers and Bits Definitions
+ */
+#define LCCR0          (0x000) /* LCD Controller Control Register 0 */
+#define LCCR1          (0x004) /* LCD Controller Control Register 1 */
+#define LCCR2          (0x008) /* LCD Controller Control Register 2 */
+#define LCCR3          (0x00C) /* LCD Controller Control Register 3 */
+#define LCCR4          (0x010) /* LCD Controller Control Register 4 */
+#define LCCR5          (0x014) /* LCD Controller Control Register 5 */
+#define DFBR0          (0x020) /* DMA Channel 0 Frame Branch Register */
+#define DFBR1          (0x024) /* DMA Channel 1 Frame Branch Register */
+#define LCSR           (0x038) /* LCD Controller Status Register */
+#define LIIDR          (0x03C) /* LCD Controller Interrupt ID Register */
+#define TMEDRGBR       (0x040) /* TMED RGB Seed Register */
+#define TMEDCR         (0x044) /* TMED Control Register */
+
+#define CMDCR          (0x100) /* Command Control Register */
+#define PRSR           (0x104) /* Panel Read Status Register */
+
+#define LCCR3_1BPP     (0 << 24)
+#define LCCR3_2BPP     (1 << 24)
+#define LCCR3_4BPP     (2 << 24)
+#define LCCR3_8BPP     (3 << 24)
+#define LCCR3_16BPP    (4 << 24)
+
+#define LCCR3_PDFOR_0  (0 << 30)
+#define LCCR3_PDFOR_1  (1 << 30)
+#define LCCR3_PDFOR_2  (2 << 30)
+#define LCCR3_PDFOR_3  (3 << 30)
+
+#define LCCR4_PAL_FOR_0        (0 << 15)
+#define LCCR4_PAL_FOR_1        (1 << 15)
+#define LCCR4_PAL_FOR_2        (2 << 15)
+#define LCCR4_PAL_FOR_MASK     (3 << 15)
+
+#define FDADR0         (0x200) /* DMA Channel 0 Frame Descriptor Address Register */
+#define FSADR0         (0x204) /* DMA Channel 0 Frame Source Address Register */
+#define FIDR0          (0x208) /* DMA Channel 0 Frame ID Register */
+#define LDCMD0         (0x20C) /* DMA Channel 0 Command Register */
+#define FDADR1         (0x210) /* DMA Channel 1 Frame Descriptor Address Register */
+#define FSADR1         (0x214) /* DMA Channel 1 Frame Source Address Register */
+#define FIDR1          (0x218) /* DMA Channel 1 Frame ID Register */
+#define LDCMD1         (0x21C) /* DMA Channel 1 Command Register */
+#define FDADR6         (0x260) /* DMA Channel 6 Frame Descriptor Address Register */
+#define FSADR6         (0x264) /* DMA Channel 6 Frame Source Address Register */
+#define FIDR6          (0x268) /* DMA Channel 6 Frame ID Register */
+
+#define LCCR0_ENB      (1 << 0)        /* LCD Controller enable */
+#define LCCR0_CMS      (1 << 1)        /* Color/Monochrome Display Select */
+#define LCCR0_Color    (LCCR0_CMS*0)   /*  Color display */
+#define LCCR0_Mono     (LCCR0_CMS*1)   /*  Monochrome display */
+#define LCCR0_SDS      (1 << 2)        /* Single/Dual Panel Display Select */
+#define LCCR0_Sngl     (LCCR0_SDS*0)   /*  Single panel display */
+#define LCCR0_Dual     (LCCR0_SDS*1)   /*  Dual panel display */
+
+#define LCCR0_LDM      (1 << 3)        /* LCD Disable Done Mask */
+#define LCCR0_SFM      (1 << 4)        /* Start of frame mask */
+#define LCCR0_IUM      (1 << 5)        /* Input FIFO underrun mask */
+#define LCCR0_EFM      (1 << 6)        /* End of Frame mask */
+#define LCCR0_PAS      (1 << 7)        /* Passive/Active display Select */
+#define LCCR0_Pas      (LCCR0_PAS*0)   /*  Passive display (STN) */
+#define LCCR0_Act      (LCCR0_PAS*1)   /*  Active display (TFT) */
+#define LCCR0_DPD      (1 << 9)        /* Double Pixel Data (monochrome) */
+#define LCCR0_4PixMono (LCCR0_DPD*0)   /*  4-Pixel/clock Monochrome display */
+#define LCCR0_8PixMono (LCCR0_DPD*1)   /*  8-Pixel/clock Monochrome display */
+#define LCCR0_DIS      (1 << 10)       /* LCD Disable */
+#define LCCR0_QDM      (1 << 11)       /* LCD Quick Disable mask */
+#define LCCR0_PDD      (0xff << 12)    /* Palette DMA request delay */
+#define LCCR0_PDD_S    12
+#define LCCR0_BM       (1 << 20)       /* Branch mask */
+#define LCCR0_OUM      (1 << 21)       /* Output FIFO underrun mask */
+#define LCCR0_LCDT     (1 << 22)       /* LCD panel type */
+#define LCCR0_RDSTM    (1 << 23)       /* Read status interrupt mask */
+#define LCCR0_CMDIM    (1 << 24)       /* Command interrupt mask */
+#define LCCR0_OUC      (1 << 25)       /* Overlay Underlay control bit */
+#define LCCR0_LDDALT   (1 << 26)       /* LDD alternate mapping control */
+
+#define LCCR1_PPL      Fld (10, 0)     /* Pixels Per Line - 1 */
+#define LCCR1_DisWdth(Pixel)   (((Pixel) - 1) << FShft (LCCR1_PPL))
+
+#define LCCR1_HSW      Fld (6, 10)     /* Horizontal Synchronization */
+#define LCCR1_HorSnchWdth(Tpix)        (((Tpix) - 1) << FShft (LCCR1_HSW))
+
+#define LCCR1_ELW      Fld (8, 16)     /* End-of-Line pixel clock Wait - 1 */
+#define LCCR1_EndLnDel(Tpix)   (((Tpix) - 1) << FShft (LCCR1_ELW))
+
+#define LCCR1_BLW      Fld (8, 24)     /* Beginning-of-Line pixel clock */
+#define LCCR1_BegLnDel(Tpix)   (((Tpix) - 1) << FShft (LCCR1_BLW))
+
+#define LCCR2_LPP      Fld (10, 0)     /* Line Per Panel - 1 */
+#define LCCR2_DisHght(Line)    (((Line) - 1) << FShft (LCCR2_LPP))
+
+#define LCCR2_VSW      Fld (6, 10)     /* Vertical Synchronization pulse - 1 */
+#define LCCR2_VrtSnchWdth(Tln) (((Tln) - 1) << FShft (LCCR2_VSW))
+
+#define LCCR2_EFW      Fld (8, 16)     /* End-of-Frame line clock Wait */
+#define LCCR2_EndFrmDel(Tln)   ((Tln) << FShft (LCCR2_EFW))
+
+#define LCCR2_BFW      Fld (8, 24)     /* Beginning-of-Frame line clock */
+#define LCCR2_BegFrmDel(Tln)   ((Tln) << FShft (LCCR2_BFW))
+
+#define LCCR3_API      (0xf << 16)     /* AC Bias pin trasitions per interrupt */
+#define LCCR3_API_S    16
+#define LCCR3_VSP      (1 << 20)       /* vertical sync polarity */
+#define LCCR3_HSP      (1 << 21)       /* horizontal sync polarity */
+#define LCCR3_PCP      (1 << 22)       /* Pixel Clock Polarity (L_PCLK) */
+#define LCCR3_PixRsEdg (LCCR3_PCP*0)   /*  Pixel clock Rising-Edge */
+#define LCCR3_PixFlEdg (LCCR3_PCP*1)   /*  Pixel clock Falling-Edge */
+
+#define LCCR3_OEP      (1 << 23)       /* Output Enable Polarity */
+#define LCCR3_OutEnH   (LCCR3_OEP*0)   /*  Output Enable active High */
+#define LCCR3_OutEnL   (LCCR3_OEP*1)   /*  Output Enable active Low */
+
+#define LCCR3_DPC      (1 << 27)       /* double pixel clock mode */
+#define LCCR3_PCD      Fld (8, 0)      /* Pixel Clock Divisor */
+#define LCCR3_PixClkDiv(Div)   (((Div) << FShft (LCCR3_PCD)))
+
+#define LCCR3_BPP      Fld (3, 24)     /* Bit Per Pixel */
+#define LCCR3_Bpp(Bpp) (((Bpp) << FShft (LCCR3_BPP)))
+
+#define LCCR3_ACB      Fld (8, 8)      /* AC Bias */
+#define LCCR3_Acb(Acb) (((Acb) << FShft (LCCR3_ACB)))
+
+#define LCCR3_HorSnchH (LCCR3_HSP*0)   /*  HSP Active High */
+#define LCCR3_HorSnchL (LCCR3_HSP*1)   /*  HSP Active Low */
+
+#define LCCR3_VrtSnchH (LCCR3_VSP*0)   /*  VSP Active High */
+#define LCCR3_VrtSnchL (LCCR3_VSP*1)   /*  VSP Active Low */
+
+#define LCCR5_IUM(x)   (1 << ((x) + 23)) /* input underrun mask */
+#define LCCR5_BSM(x)   (1 << ((x) + 15)) /* branch mask */
+#define LCCR5_EOFM(x)  (1 << ((x) + 7))  /* end of frame mask */
+#define LCCR5_SOFM(x)  (1 << ((x) + 0))  /* start of frame mask */
+
+#define LCSR_LDD       (1 << 0)        /* LCD Disable Done */
+#define LCSR_SOF       (1 << 1)        /* Start of frame */
+#define LCSR_BER       (1 << 2)        /* Bus error */
+#define LCSR_ABC       (1 << 3)        /* AC Bias count */
+#define LCSR_IUL       (1 << 4)        /* input FIFO underrun Lower panel */
+#define LCSR_IUU       (1 << 5)        /* input FIFO underrun Upper panel */
+#define LCSR_OU                (1 << 6)        /* output FIFO underrun */
+#define LCSR_QD                (1 << 7)        /* quick disable */
+#define LCSR_EOF       (1 << 8)        /* end of frame */
+#define LCSR_BS                (1 << 9)        /* branch status */
+#define LCSR_SINT      (1 << 10)       /* subsequent interrupt */
+#define LCSR_RD_ST     (1 << 11)       /* read status */
+#define LCSR_CMD_INT   (1 << 12)       /* command interrupt */
+
+#define LDCMD_PAL      (1 << 26)       /* instructs DMA to load palette buffer */
+
+/* smartpanel related */
+#define PRSR_DATA(x)   ((x) & 0xff)    /* Panel Data */
+#define PRSR_A0                (1 << 8)        /* Read Data Source */
+#define PRSR_ST_OK     (1 << 9)        /* Status OK */
+#define PRSR_CON_NT    (1 << 10)       /* Continue to Next Command */
+
+#define SMART_CMD_A0                    (0x1 << 8)
+#define SMART_CMD_READ_STATUS_REG       (0x0 << 9)
+#define SMART_CMD_READ_FRAME_BUFFER    ((0x0 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_COMMAND                 (0x1 << 9)
+#define SMART_CMD_WRITE_DATA           ((0x1 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WRITE_FRAME          ((0x2 << 9) | SMART_CMD_A0)
+#define SMART_CMD_WAIT_FOR_VSYNC        (0x3 << 9)
+#define SMART_CMD_NOOP                  (0x4 << 9)
+#define SMART_CMD_INTERRUPT             (0x5 << 9)
+
+#define SMART_CMD(x)   (SMART_CMD_WRITE_COMMAND | ((x) & 0xff))
+#define SMART_DAT(x)   (SMART_CMD_WRITE_DATA | ((x) & 0xff))
+#endif /* __ASM_ARCH_REGS_LCD_H */
index 1d56a3ef89fd3844df65403921f6a6b6c6ad45ab..a758a719180f64e5f4167eea0e9b6ca8b6ff3a75 100644 (file)
@@ -22,6 +22,8 @@ static inline void arch_idle(void)
 
 static inline void arch_reset(char mode)
 {
+       RCSR = RCSR_HWR | RCSR_WDR | RCSR_SMR | RCSR_GPR;
+
        if (mode == 's') {
                /* Jump into ROM at address 0 */
                cpu_reset(0);
index 98b10bcf9f1b87d44058bed3c1f9753415fd1de8..b14cbda01dc3cce14c715ffef836dde3d1fbabe9 100644 (file)
@@ -37,12 +37,12 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port,
 
        memset(hw, 0, sizeof(*hw));
 
-       for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
-               hw->io_ports[i] = reg;
+       for (i = 0; i <= 7; i++) {
+               hw->io_ports_array[i] = reg;
                reg += regincr;
        }
 
-       hw->io_ports[IDE_CONTROL_OFFSET] = ctrl_port;
+       hw->io_ports.ctl_addr = ctrl_port;
 
        if (irq)
                *irq = 0;
index 0b5f881c3d85a252481f30dc713b60a91f5d7c9f..5001390be9582fc1c937aa562671c23cd403f957 100644 (file)
 
 #endif
 
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
-
 #endif
index c86f68ee65118b6c7cdd2fec34ee7509abbaae46..5c22b01121069751e2ed73049ec6d37ae54a18c9 100644 (file)
 # endif
 #endif
 
+#ifdef CONFIG_CPU_COPY_FEROCEON
+# ifdef _USER
+#  define MULTI_USER 1
+# else
+#  define _USER feroceon
+# endif
+#endif
+
 #ifdef CONFIG_CPU_SA1100
 # ifdef _USER
 #  define MULTI_USER 1
index 5e0182485d8c0978872cceff729145291a328596..5571c13c3f3bb0af57ba3ea85d7f22bf78fa68d6 100644 (file)
@@ -260,6 +260,7 @@ extern struct page *empty_zero_page;
 #define pte_write(pte)         (pte_val(pte) & L_PTE_WRITE)
 #define pte_dirty(pte)         (pte_val(pte) & L_PTE_DIRTY)
 #define pte_young(pte)         (pte_val(pte) & L_PTE_YOUNG)
+#define pte_special(pte)       (0)
 
 /*
  * The following only works if pte_present() is not true.
@@ -280,6 +281,8 @@ PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
 PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
 PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);
 
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
 /*
  * Mark the prot value as uncacheable and unbufferable.
  */
index 5db03cf3b90525d2164175288bd0f5a28f3efe7a..44593a8949038d18f566a821b6c0d99161de43a5 100644 (file)
@@ -1,171 +1,9 @@
-#ifndef __ASM_ARM_UNALIGNED_H
-#define __ASM_ARM_UNALIGNED_H
+#ifndef _ASM_ARM_UNALIGNED_H
+#define _ASM_ARM_UNALIGNED_H
 
-#include <asm/types.h>
-
-extern int __bug_unaligned_x(const void *ptr);
-
-/*
- * What is the most efficient way of loading/storing an unaligned value?
- *
- * That is the subject of this file.  Efficiency here is defined as
- * minimum code size with minimum register usage for the common cases.
- * It is currently not believed that long longs are common, so we
- * trade efficiency for the chars, shorts and longs against the long
- * longs.
- *
- * Current stats with gcc 2.7.2.2 for these functions:
- *
- *     ptrsize get:    code    regs    put:    code    regs
- *     1               1       1               1       2
- *     2               3       2               3       2
- *     4               7       3               7       3
- *     8               20      6               16      6
- *
- * gcc 2.95.1 seems to code differently:
- *
- *     ptrsize get:    code    regs    put:    code    regs
- *     1               1       1               1       2
- *     2               3       2               3       2
- *     4               7       4               7       4
- *     8               19      8               15      6
- *
- * which may or may not be more efficient (depending upon whether
- * you can afford the extra registers).  Hopefully the gcc 2.95
- * is inteligent enough to decide if it is better to use the
- * extra register, but evidence so far seems to suggest otherwise.
- *
- * Unfortunately, gcc is not able to optimise the high word
- * out of long long >> 32, or the low word from long long << 32
- */
-
-#define __get_unaligned_2_le(__p)                                      \
-       (unsigned int)(__p[0] | __p[1] << 8)
-
-#define __get_unaligned_2_be(__p)                                      \
-       (unsigned int)(__p[0] << 8 | __p[1])
-
-#define __get_unaligned_4_le(__p)                                      \
-       (unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
-
-#define __get_unaligned_4_be(__p)                                      \
-       (unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3])
-
-#define __get_unaligned_8_le(__p)                                      \
-       ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 |      \
-               __get_unaligned_4_le(__p))
-
-#define __get_unaligned_8_be(__p)                                      \
-       ((unsigned long long)__get_unaligned_4_be(__p) << 32 |          \
-               __get_unaligned_4_be((__p+4)))
-
-#define __get_unaligned_le(ptr)                                                \
-       ((__force typeof(*(ptr)))({                                     \
-               const __u8 *__p = (const __u8 *)(ptr);                  \
-               __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p,        \
-                 __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_le(__p), \
-                 __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_le(__p), \
-                 __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_le(__p), \
-                   (void)__bug_unaligned_x(__p)))));                   \
-       }))
-
-#define __get_unaligned_be(ptr)                                                \
-       ((__force typeof(*(ptr)))({                                     \
-               const __u8 *__p = (const __u8 *)(ptr);                  \
-               __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p,        \
-                 __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_be(__p), \
-                 __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_be(__p), \
-                 __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_be(__p), \
-                   (void)__bug_unaligned_x(__p)))));                   \
-       }))
-
-
-static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p)
-{
-       *__p++ = __v;
-       *__p++ = __v >> 8;
-}
-
-static inline void __put_unaligned_2_be(__u32 __v, register __u8 *__p)
-{
-       *__p++ = __v >> 8;
-       *__p++ = __v;
-}
-
-static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p)
-{
-       __put_unaligned_2_le(__v >> 16, __p + 2);
-       __put_unaligned_2_le(__v, __p);
-}
-
-static inline void __put_unaligned_4_be(__u32 __v, register __u8 *__p)
-{
-       __put_unaligned_2_be(__v >> 16, __p);
-       __put_unaligned_2_be(__v, __p + 2);
-}
-
-static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p)
-{
-       /*
-        * tradeoff: 8 bytes of stack for all unaligned puts (2
-        * instructions), or an extra register in the long long
-        * case - go for the extra register.
-        */
-       __put_unaligned_4_le(__v >> 32, __p+4);
-       __put_unaligned_4_le(__v, __p);
-}
-
-static inline void __put_unaligned_8_be(const unsigned long long __v, register __u8 *__p)
-{
-       /*
-        * tradeoff: 8 bytes of stack for all unaligned puts (2
-        * instructions), or an extra register in the long long
-        * case - go for the extra register.
-        */
-       __put_unaligned_4_be(__v >> 32, __p);
-       __put_unaligned_4_be(__v, __p+4);
-}
-
-/*
- * Try to store an unaligned value as efficiently as possible.
- */
-#define __put_unaligned_le(val,ptr)                                    \
-       ({                                                      \
-               (void)sizeof(*(ptr) = (val));                   \
-               switch (sizeof(*(ptr))) {                       \
-               case 1:                                         \
-                       *(ptr) = (val);                         \
-                       break;                                  \
-               case 2: __put_unaligned_2_le((__force u16)(val),(__u8 *)(ptr)); \
-                       break;                                  \
-               case 4: __put_unaligned_4_le((__force u32)(val),(__u8 *)(ptr)); \
-                       break;                                  \
-               case 8: __put_unaligned_8_le((__force u64)(val),(__u8 *)(ptr)); \
-                       break;                                  \
-               default: __bug_unaligned_x(ptr);                \
-                       break;                                  \
-               }                                               \
-               (void) 0;                                       \
-       })
-
-#define __put_unaligned_be(val,ptr)                                    \
-       ({                                                      \
-               (void)sizeof(*(ptr) = (val));                   \
-               switch (sizeof(*(ptr))) {                       \
-               case 1:                                         \
-                       *(ptr) = (val);                         \
-                       break;                                  \
-               case 2: __put_unaligned_2_be((__force u16)(val),(__u8 *)(ptr)); \
-                       break;                                  \
-               case 4: __put_unaligned_4_be((__force u32)(val),(__u8 *)(ptr)); \
-                       break;                                  \
-               case 8: __put_unaligned_8_be((__force u64)(val),(__u8 *)(ptr)); \
-                       break;                                  \
-               default: __bug_unaligned_x(ptr);                \
-                       break;                                  \
-               }                                               \
-               (void) 0;                                       \
-       })
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
 /*
  * Select endianness
@@ -178,4 +16,4 @@ static inline void __put_unaligned_8_be(const unsigned long long __v, register _
 #define put_unaligned  __put_unaligned_be
 #endif
 
-#endif
+#endif /* _ASM_ARM_UNALIGNED_H */
index 3ae7b548fce764bc04dd2e00a93e9b2f330dce9a..c0e5e29417df5454d9b286d32d326010ebcef463 100644 (file)
@@ -212,6 +212,10 @@ static inline int pte_young(pte_t pte)
 {
        return pte_val(pte) & _PAGE_ACCESSED;
 }
+static inline int pte_special(pte_t pte)
+{
+       return 0;
+}
 
 /*
  * The following only work if pte_present() is not true.
@@ -252,6 +256,10 @@ static inline pte_t pte_mkyoung(pte_t pte)
        set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED));
        return pte;
 }
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return pte;
+}
 
 #define pmd_none(x)    (!pmd_val(x))
 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
index 36f5fd430543ec5d088335f52d357d58e7c75ea7..041877290470f18b3867234e9ff3459d97df8d28 100644 (file)
@@ -1,5 +1,5 @@
-#ifndef __ASM_AVR32_UNALIGNED_H
-#define __ASM_AVR32_UNALIGNED_H
+#ifndef _ASM_AVR32_UNALIGNED_H
+#define _ASM_AVR32_UNALIGNED_H
 
 /*
  * AVR32 can handle some unaligned accesses, depending on the
  * optimize word loads in general.
  */
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#endif /* __ASM_AVR32_UNALIGNED_H */
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
+
+#endif /* _ASM_AVR32_UNALIGNED_H */
index 10081dc241ef6d5430a1c5db68518e5cde4b34c6..fd8a1d634945dd916e5b6204c8e266a8c245fbec 100644 (file)
@@ -1,6 +1,11 @@
-#ifndef __BFIN_UNALIGNED_H
-#define __BFIN_UNALIGNED_H
+#ifndef _ASM_BLACKFIN_UNALIGNED_H
+#define _ASM_BLACKFIN_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#endif                         /* __BFIN_UNALIGNED_H */
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
+
+#endif /* _ASM_BLACKFIN_UNALIGNED_H */
index ea34e0d0a3884690a3e66ff7fd1bdeb6a24f596e..5366e62393288e20225811fd764cbfbd04fa367b 100644 (file)
@@ -59,22 +59,19 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, unsigned long data_port, u
        int i;
 
        /* fill in ports for ATA addresses 0 to 7 */
-
-       for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++) {
-               hw->io_ports[i] = data_port |
+       for (i = 0; i <= 7; i++) {
+               hw->io_ports_array[i] = data_port |
                        IO_FIELD(R_ATA_CTRL_DATA, addr, i) |
                        IO_STATE(R_ATA_CTRL_DATA, cs0, active);
        }
 
        /* the IDE control register is at ATA address 6, with CS1 active instead of CS0 */
-
-       hw->io_ports[IDE_CONTROL_OFFSET] = data_port |
+       hw->io_ports.ctl_addr = data_port |
                        IO_FIELD(R_ATA_CTRL_DATA, addr, 6) |
                        IO_STATE(R_ATA_CTRL_DATA, cs1, active);
 
        /* whats this for ? */
-
-       hw->io_ports[IDE_IRQ_OFFSET] = 0;
+       hw->io_ports.irq_addr = 0;
 }
 
 static inline void ide_init_default_hwifs(void)
index a2607575681bde237c1ccc5b58e72ba2238dc506..829e7a7d9fb9b7b75457337e178dc9a6e2591576 100644 (file)
@@ -115,6 +115,7 @@ static inline int pte_write(pte_t pte)          { return pte_val(pte) & _PAGE_WR
 static inline int pte_dirty(pte_t pte)          { return pte_val(pte) & _PAGE_MODIFIED; }
 static inline int pte_young(pte_t pte)          { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_file(pte_t pte)           { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte)       { return 0; }
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
@@ -162,6 +163,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
         }
         return pte;
 }
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
@@ -229,7 +231,7 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep)
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
 
 /* to find an entry in a page-table-directory */
-static inline pgd_t * pgd_offset(struct mm_struct * mm, unsigned long address)
+static inline pgd_t * pgd_offset(const struct mm_struct *mm, unsigned long address)
 {
        return mm->pgd + pgd_index(address);
 }
index 7fbbb399f6f18c94ff1b0fc6557665d7ba7a1d83..7b3f3fec567ca4fd3a742f2ea6fef986e2ac3fb9 100644 (file)
@@ -1,16 +1,13 @@
-#ifndef __CRIS_UNALIGNED_H
-#define __CRIS_UNALIGNED_H
+#ifndef _ASM_CRIS_UNALIGNED_H
+#define _ASM_CRIS_UNALIGNED_H
 
 /*
  * CRIS can do unaligned accesses itself. 
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
 
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
-
-#endif
+#endif /* _ASM_CRIS_UNALIGNED_H */
index 4e219046fe422910b72619c51245f3798ac6ea80..83c51aba534b727a363d22559eaa153dd90c9dda 100644 (file)
@@ -380,6 +380,7 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address)
 static inline int pte_dirty(pte_t pte)         { return (pte).pte & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte)         { return (pte).pte & _PAGE_ACCESSED; }
 static inline int pte_write(pte_t pte)         { return !((pte).pte & _PAGE_WP); }
+static inline int pte_special(pte_t pte)       { return 0; }
 
 static inline pte_t pte_mkclean(pte_t pte)     { (pte).pte &= ~_PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkold(pte_t pte)       { (pte).pte &= ~_PAGE_ACCESSED; return pte; }
@@ -387,6 +388,7 @@ static inline pte_t pte_wrprotect(pte_t pte)        { (pte).pte |= _PAGE_WP; return pte
 static inline pte_t pte_mkdirty(pte_t pte)     { (pte).pte |= _PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkyoung(pte_t pte)     { (pte).pte |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkwrite(pte_t pte)     { (pte).pte &= ~_PAGE_WP; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
index dc8e9c9bf6bd9a54402db5ecb90619e89efbcb1d..839a2fbffa0f34c8c428b6a5793a407749e69ac8 100644 (file)
 #ifndef _ASM_UNALIGNED_H
 #define _ASM_UNALIGNED_H
 
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-/*
- * Unaligned accesses on uClinux can't be performed in a fault handler - the
- * CPU detects them as imprecise exceptions making this impossible.
- *
- * With the FR451, however, they are precise, and so we used to fix them up in
- * the memory access fault handler.  However, instruction bundling make this
- * impractical.  So, now we fall back to using memcpy.
- */
-#ifdef CONFIG_MMU
-
-/*
- * The asm statement in the macros below is a way to get GCC to copy a
- * value from one variable to another without having any clue it's
- * actually doing so, so that it won't have any idea that the values
- * in the two variables are related.
- */
-
-#define get_unaligned(ptr) ({                          \
-       typeof((*(ptr))) __x;                           \
-       void *__ptrcopy;                                \
-       asm("" : "=r" (__ptrcopy) : "0" (ptr));         \
-       memcpy(&__x, __ptrcopy, sizeof(*(ptr)));        \
-       __x;                                            \
-})
-
-#define put_unaligned(val, ptr) ({                     \
-       typeof((*(ptr))) __x = (val);                   \
-       void *__ptrcopy;                                \
-       asm("" : "=r" (__ptrcopy) : "0" (ptr));         \
-       memcpy(__ptrcopy, &__x, sizeof(*(ptr)));        \
-})
-
-extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0);
-
-#else
-
-#define get_unaligned(ptr)                                                     \
-({                                                                             \
-       typeof(*(ptr)) x;                                                       \
-       const char *__p = (const char *) (ptr);                                 \
-                                                                               \
-       switch (sizeof(x)) {                                                    \
-       case 1:                                                                 \
-               x = *(ptr);                                                     \
-               break;                                                          \
-       case 2:                                                                 \
-       {                                                                       \
-               uint8_t a;                                                      \
-               asm("   ldub%I2         %M2,%0          \n"                     \
-                   "   ldub%I3.p       %M3,%1          \n"                     \
-                   "   slli            %0,#8,%0        \n"                     \
-                   "   or              %0,%1,%0        \n"                     \
-                   : "=&r"(x), "=&r"(a)                                        \
-                   : "m"(__p[0]),  "m"(__p[1])                                 \
-                   );                                                          \
-               break;                                                          \
-       }                                                                       \
-                                                                               \
-       case 4:                                                                 \
-       {                                                                       \
-               uint8_t a;                                                      \
-               asm("   ldub%I2         %M2,%0          \n"                     \
-                   "   ldub%I3.p       %M3,%1          \n"                     \
-                   "   slli            %0,#8,%0        \n"                     \
-                   "   or              %0,%1,%0        \n"                     \
-                   "   ldub%I4.p       %M4,%1          \n"                     \
-                   "   slli            %0,#8,%0        \n"                     \
-                   "   or              %0,%1,%0        \n"                     \
-                   "   ldub%I5.p       %M5,%1          \n"                     \
-                   "   slli            %0,#8,%0        \n"                     \
-                   "   or              %0,%1,%0        \n"                     \
-                   : "=&r"(x), "=&r"(a)                                        \
-                   : "m"(__p[0]),  "m"(__p[1]), "m"(__p[2]), "m"(__p[3])       \
-                   );                                                          \
-               break;                                                          \
-       }                                                                       \
-                                                                               \
-       case 8:                                                                 \
-       {                                                                       \
-               union { uint64_t x; u32 y[2]; } z;                              \
-               uint8_t a;                                                      \
-               asm("   ldub%I3         %M3,%0          \n"                     \
-                   "   ldub%I4.p       %M4,%2          \n"                     \
-                   "   slli            %0,#8,%0        \n"                     \
-                   "   or              %0,%2,%0        \n"                     \
-                   "   ldub%I5.p       %M5,%2          \n"                     \
-                   "   slli            %0,#8,%0        \n"                     \
-                   "   or              %0,%2,%0        \n"                     \
-                   "   ldub%I6.p       %M6,%2          \n"                     \
-                   "   slli            %0,#8,%0        \n"                     \
-                   "   or              %0,%2,%0        \n"                     \
-                   "   ldub%I7         %M7,%1          \n"                     \
-                   "   ldub%I8.p       %M8,%2          \n"                     \
-                   "   slli            %1,#8,%1        \n"                     \
-                   "   or              %1,%2,%1        \n"                     \
-                   "   ldub%I9.p       %M9,%2          \n"                     \
-                   "   slli            %1,#8,%1        \n"                     \
-                   "   or              %1,%2,%1        \n"                     \
-                   "   ldub%I10.p      %M10,%2         \n"                     \
-                   "   slli            %1,#8,%1        \n"                     \
-                   "   or              %1,%2,%1        \n"                     \
-                   : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a)                    \
-                   : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]),       \
-                     "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7])        \
-                   );                                                          \
-               x = z.x;                                                        \
-               break;                                                          \
-       }                                                                       \
-                                                                               \
-       default:                                                                \
-               x = 0;                                                          \
-               BUG();                                                          \
-               break;                                                          \
-       }                                                                       \
-                                                                               \
-       x;                                                                      \
-})
-
-#define put_unaligned(val, ptr)                                                                \
-do {                                                                                   \
-       char *__p = (char *) (ptr);                                                     \
-       int x;                                                                          \
-                                                                                       \
-       switch (sizeof(*ptr)) {                                                         \
-       case 2:                                                                         \
-       {                                                                               \
-               asm("   stb%I1.p        %0,%M1          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I2          %0,%M2          \n"                             \
-                   : "=r"(x), "=m"(__p[1]),  "=m"(__p[0])                              \
-                   : "0"(val)                                                          \
-                   );                                                                  \
-               break;                                                                  \
-       }                                                                               \
-                                                                                       \
-       case 4:                                                                         \
-       {                                                                               \
-               asm("   stb%I1.p        %0,%M1          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I2.p        %0,%M2          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I3.p        %0,%M3          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I4          %0,%M4          \n"                             \
-                   : "=r"(x), "=m"(__p[3]),  "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0])  \
-                   : "0"(val)                                                          \
-                   );                                                                  \
-               break;                                                                  \
-       }                                                                               \
-                                                                                       \
-       case 8:                                                                         \
-       {                                                                               \
-               uint32_t __high, __low;                                                 \
-               __high = (uint64_t)val >> 32;                                           \
-               __low = val & 0xffffffff;                                               \
-               asm("   stb%I2.p        %0,%M2          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I3.p        %0,%M3          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I4.p        %0,%M4          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I5.p        %0,%M5          \n"                             \
-                   "   srli            %0,#8,%0        \n"                             \
-                   "   stb%I6.p        %1,%M6          \n"                             \
-                   "   srli            %1,#8,%1        \n"                             \
-                   "   stb%I7.p        %1,%M7          \n"                             \
-                   "   srli            %1,#8,%1        \n"                             \
-                   "   stb%I8.p        %1,%M8          \n"                             \
-                   "   srli            %1,#8,%1        \n"                             \
-                   "   stb%I9          %1,%M9          \n"                             \
-                   : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]),          \
-                     "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]),           \
-                     "=m"(__p[1]), "=m"(__p[0])                                        \
-                   : "0"(__low), "1"(__high)                                           \
-                   );                                                                  \
-               break;                                                                  \
-       }                                                                               \
-                                                                                       \
-        default:                                                                       \
-               *(ptr) = (val);                                                         \
-               break;                                                                  \
-       }                                                                               \
-} while(0)
-
-#endif
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
-#endif
+#endif /* _ASM_UNALIGNED_H */
index 15e6f253dda4090622d93c0d688e4350da4411ad..c9f369c4bd7ed3d9fd2e65ff5ee65634d48cf77e 100644 (file)
@@ -17,8 +17,6 @@
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/find.h>
 
-#ifdef __KERNEL__
-
 #ifndef _LINUX_BITOPS_H
 #error only <linux/bitops.h> can be included directly
 #endif
@@ -32,6 +30,4 @@
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/minix.h>
 
-#endif /* __KERNEL__ */
-
 #endif /* _ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
new file mode 100644 (file)
index 0000000..be24465
--- /dev/null
@@ -0,0 +1,43 @@
+#ifndef _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+
+#include <asm/types.h>
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+       int num = BITS_PER_LONG - 1;
+
+#if BITS_PER_LONG == 64
+       if (!(word & (~0ul << 32))) {
+               num -= 32;
+               word <<= 32;
+       }
+#endif
+       if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+               num -= 16;
+               word <<= 16;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+               num -= 8;
+               word <<= 8;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+               num -= 4;
+               word <<= 4;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+               num -= 2;
+               word <<= 2;
+       }
+       if (!(word & (~0ul << (BITS_PER_LONG-1))))
+               num -= 1;
+       return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
index 72a51e5a12ef1b3738f25249eeed3417745788c1..1914e9742512db6d9bd5e9be5bc9be30ad510c0f 100644 (file)
@@ -1,11 +1,13 @@
 #ifndef _ASM_GENERIC_BITOPS_FIND_H_
 #define _ASM_GENERIC_BITOPS_FIND_H_
 
+#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
 extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
                size, unsigned long offset);
 
 extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
                long size, unsigned long offset);
+#endif
 
 #define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
 #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
index 1b6b17ce2428091e605c8cbda7103b1d42836780..86d403f8b256815761270e822b84b438cc87f5d2 100644 (file)
@@ -3,6 +3,18 @@
 
 #include <asm/types.h>
 
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#if BITS_PER_LONG == 32
 static inline int fls64(__u64 x)
 {
        __u32 h = x >> 32;
@@ -10,5 +22,15 @@ static inline int fls64(__u64 x)
                return fls(h) + 32;
        return fls(x);
 }
+#elif BITS_PER_LONG == 64
+static inline int fls64(__u64 x)
+{
+       if (x == 0)
+               return 0;
+       return __fls(x) + 1;
+}
+#else
+#error BITS_PER_LONG not 32 or 64
+#endif
 
 #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
index a4a49370793c49ac48a52d52b20691863d4c6483..8f4e3193342e589b8c619e8c2673c3d5544e49a9 100644 (file)
        __rem;                                                  \
  })
 
-static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
-{
-       return dividend / divisor;
-}
-
 #elif BITS_PER_LONG == 32
 
 extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
@@ -54,8 +49,6 @@ extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
        __rem;                                          \
  })
 
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
-
 #else /* BITS_PER_LONG == ?? */
 
 # error do_div() does not yet support the C64
index f422df0956a27025deb4c6cdbbc5a47646f74c9e..3c2344f48136d98351f92b6e9d678e29c052658c 100644 (file)
@@ -1,11 +1,9 @@
 #ifndef _ASM_GENERIC_FUTEX_H
 #define _ASM_GENERIC_FUTEX_H
 
-#ifdef __KERNEL__
-
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 
 static inline int
 futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
@@ -56,4 +54,3 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
 }
 
 #endif
-#endif
index f29a502f4a6c3d1a656ae5142f52b98fe6421408..ecf675a59d2197d00f934aa89f4506028a725839 100644 (file)
 #define ARCH_NR_GPIOS          256
 #endif
 
+static inline int gpio_is_valid(int number)
+{
+       /* only some non-negative numbers are valid */
+       return ((unsigned)number) < ARCH_NR_GPIOS;
+}
+
 struct seq_file;
+struct module;
 
 /**
  * struct gpio_chip - abstract a GPIO controller
@@ -48,6 +55,7 @@ struct seq_file;
  */
 struct gpio_chip {
        char                    *label;
+       struct module           *owner;
 
        int                     (*direction_input)(struct gpio_chip *chip,
                                                unsigned offset);
@@ -66,6 +74,7 @@ struct gpio_chip {
 
 extern const char *gpiochip_is_requested(struct gpio_chip *chip,
                        unsigned offset);
+extern int __init __must_check gpiochip_reserve(int start, int ngpio);
 
 /* add/remove chips */
 extern int gpiochip_add(struct gpio_chip *chip);
@@ -97,6 +106,12 @@ extern int __gpio_cansleep(unsigned gpio);
 
 #else
 
+static inline int gpio_is_valid(int number)
+{
+       /* only non-negative numbers are valid */
+       return number >= 0;
+}
+
 /* platforms that don't directly support access to GPIOs through I2C, SPI,
  * or other blocking infrastructure can use these wrappers.
  */
index cd027298beb13bcc5ac043f78b826cfb8dfdace5..864181385579891496f396bee440d8b3b0884117 100644 (file)
  */
 #define _IOC_NRBITS    8
 #define _IOC_TYPEBITS  8
-#define _IOC_SIZEBITS  14
-#define _IOC_DIRBITS   2
+
+/*
+ * Let any architecture override either of the following before
+ * including this file.
+ */
+
+#ifndef _IOC_SIZEBITS
+# define _IOC_SIZEBITS 14
+#endif
+
+#ifndef _IOC_DIRBITS
+# define _IOC_DIRBITS  2
+#endif
 
 #define _IOC_NRMASK    ((1 << _IOC_NRBITS)-1)
 #define _IOC_TYPEMASK  ((1 << _IOC_TYPEBITS)-1)
 #define _IOC_DIRSHIFT  (_IOC_SIZESHIFT+_IOC_SIZEBITS)
 
 /*
- * Direction bits.
+ * Direction bits, which any architecture can choose to override
+ * before including this file.
  */
-#define _IOC_NONE      0U
-#define _IOC_WRITE     1U
-#define _IOC_READ      2U
+
+#ifndef _IOC_NONE
+# define _IOC_NONE     0U
+#endif
+
+#ifndef _IOC_WRITE
+# define _IOC_WRITE    1U
+#endif
+
+#ifndef _IOC_READ
+# define _IOC_READ     2U
+#endif
 
 #define _IOC(dir,type,nr,size) \
        (((dir)  << _IOC_DIRSHIFT) | \
index 52226e14bd7dba34b483a77ba22424e4241f8145..ae060c62aff1af5e5db8a4366b73d305b8f362db 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef __ASM_MEMORY_MODEL_H
 #define __ASM_MEMORY_MODEL_H
 
-#ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
 #if defined(CONFIG_FLATMEM)
@@ -81,6 +80,5 @@ extern unsigned long page_to_pfn(struct page *page);
 #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
 
 #endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 
 #endif
index a96b5d986b6e8b1f05fb896adacae85b4243149c..14db733b8e68e7df3cb95bb685eb97cc0125a094 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _ASM_GENERIC_PAGE_H
 #define _ASM_GENERIC_PAGE_H
 
-#ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
 #include <linux/compiler.h>
@@ -21,6 +20,5 @@ static __inline__ __attribute_const__ int get_order(unsigned long size)
 }
 
 #endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
 
 #endif /* _ASM_GENERIC_PAGE_H */
index dd1bed860e6487a3c325c2dfd66570efe8616066..be4af0029ac0b451cd89c91f239d3eca231a2e62 100644 (file)
@@ -12,8 +12,6 @@
 #ifndef __ASM_RTC_H__
 #define __ASM_RTC_H__
 
-#ifdef __KERNEL__
-
 #include <linux/mc146818rtc.h>
 #include <linux/rtc.h>
 #include <linux/bcd.h>
@@ -213,5 +211,4 @@ static inline int set_rtc_pll(struct rtc_pll_info *pll)
        return -EINVAL;
 }
 
-#endif /* __KERNEL__ */
 #endif /* __ASM_RTC_H__ */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
deleted file mode 100644 (file)
index 2fe1b2e..0000000
+++ /dev/null
@@ -1,124 +0,0 @@
-#ifndef _ASM_GENERIC_UNALIGNED_H_
-#define _ASM_GENERIC_UNALIGNED_H_
-
-/*
- * For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents. 
- *
- * This is based almost entirely upon Richard Henderson's
- * asm-alpha/unaligned.h implementation.  Some comments were
- * taken from David Mosberger's asm-ia64/unaligned.h header.
- */
-
-#include <linux/types.h>
-
-/* 
- * The main single-value unaligned transfer routines.
- */
-#define get_unaligned(ptr) \
-       __get_unaligned((ptr), sizeof(*(ptr)))
-#define put_unaligned(x,ptr) \
-       ((void)sizeof(*(ptr)=(x)),\
-       __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr))))
-
-/*
- * This function doesn't actually exist.  The idea is that when
- * someone uses the macros below with an unsupported size (datatype),
- * the linker will alert us to the problem via an unresolved reference
- * error.
- */
-extern void bad_unaligned_access_length(void) __attribute__((noreturn));
-
-struct __una_u64 { __u64 x __attribute__((packed)); };
-struct __una_u32 { __u32 x __attribute__((packed)); };
-struct __una_u16 { __u16 x __attribute__((packed)); };
-
-/*
- * Elemental unaligned loads 
- */
-
-static inline __u64 __uldq(const __u64 *addr)
-{
-       const struct __una_u64 *ptr = (const struct __una_u64 *) addr;
-       return ptr->x;
-}
-
-static inline __u32 __uldl(const __u32 *addr)
-{
-       const struct __una_u32 *ptr = (const struct __una_u32 *) addr;
-       return ptr->x;
-}
-
-static inline __u16 __uldw(const __u16 *addr)
-{
-       const struct __una_u16 *ptr = (const struct __una_u16 *) addr;
-       return ptr->x;
-}
-
-/*
- * Elemental unaligned stores 
- */
-
-static inline void __ustq(__u64 val, __u64 *addr)
-{
-       struct __una_u64 *ptr = (struct __una_u64 *) addr;
-       ptr->x = val;
-}
-
-static inline void __ustl(__u32 val, __u32 *addr)
-{
-       struct __una_u32 *ptr = (struct __una_u32 *) addr;
-       ptr->x = val;
-}
-
-static inline void __ustw(__u16 val, __u16 *addr)
-{
-       struct __una_u16 *ptr = (struct __una_u16 *) addr;
-       ptr->x = val;
-}
-
-#define __get_unaligned(ptr, size) ({          \
-       const void *__gu_p = ptr;               \
-       __u64 __val;                            \
-       switch (size) {                         \
-       case 1:                                 \
-               __val = *(const __u8 *)__gu_p;  \
-               break;                          \
-       case 2:                                 \
-               __val = __uldw(__gu_p);         \
-               break;                          \
-       case 4:                                 \
-               __val = __uldl(__gu_p);         \
-               break;                          \
-       case 8:                                 \
-               __val = __uldq(__gu_p);         \
-               break;                          \
-       default:                                \
-               bad_unaligned_access_length();  \
-       };                                      \
-       (__force __typeof__(*(ptr)))__val;      \
-})
-
-#define __put_unaligned(val, ptr, size)                \
-({                                             \
-       void *__gu_p = ptr;                     \
-       switch (size) {                         \
-       case 1:                                 \
-               *(__u8 *)__gu_p = (__force __u8)val;            \
-               break;                          \
-       case 2:                                 \
-               __ustw((__force __u16)val, __gu_p);             \
-               break;                          \
-       case 4:                                 \
-               __ustl((__force __u32)val, __gu_p);             \
-               break;                          \
-       case 8:                                 \
-               __ustq(val, __gu_p);            \
-               break;                          \
-       default:                                \
-               bad_unaligned_access_length();  \
-       };                                      \
-       (void)0;                                \
-})
-
-#endif /* _ASM_GENERIC_UNALIGNED_H */
index ffb67f472070c995d362d7cc6ac2698276c8becd..b8d06c70c2da44b5e78ea629c40467951e6e4c77 100644 (file)
@@ -1,15 +1,11 @@
-#ifndef __H8300_UNALIGNED_H
-#define __H8300_UNALIGNED_H
+#ifndef _ASM_H8300_UNALIGNED_H
+#define _ASM_H8300_UNALIGNED_H
 
+#include <linux/unaligned/be_memmove.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
-#define get_unaligned(ptr) \
-  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
-
-#define put_unaligned(val, ptr)                                \
-  ({ __typeof__(*(ptr)) __tmp = (val);                 \
-     memmove((ptr), &__tmp, sizeof(*(ptr)));           \
-     (void)0; })
-
-#endif
+#endif /* _ASM_H8300_UNALIGNED_H */
index 953d3df9dd22670f16fec92440a9dc18f5431593..e2ca800373351d26b1042394275ebe10f6c8d23e 100644 (file)
@@ -407,6 +407,22 @@ fls (int t)
        return ia64_popcnt(x);
 }
 
+/*
+ * Find the last (most significant) bit set.  Undefined for x==0.
+ * Bits are numbered from 0..63 (e.g., __fls(9) == 3).
+ */
+static inline unsigned long
+__fls (unsigned long x)
+{
+       x |= x >> 1;
+       x |= x >> 2;
+       x |= x >> 4;
+       x |= x >> 8;
+       x |= x >> 16;
+       x |= x >> 32;
+       return ia64_popcnt(x) - 1;
+}
+
 #include <asm-generic/bitops/fls64.h>
 
 /*
index f1735a22d0ea28cd08ce5be1413c78cd1e397061..9f0df9bd46b7f1a772f5a121782a73b70b753af4 100644 (file)
@@ -23,10 +23,30 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr,
 {
        dma_free_coherent(dev, size, cpu_addr, dma_handle);
 }
-#define dma_map_single         platform_dma_map_single
-#define dma_map_sg             platform_dma_map_sg
-#define dma_unmap_single       platform_dma_unmap_single
-#define dma_unmap_sg           platform_dma_unmap_sg
+#define dma_map_single_attrs   platform_dma_map_single_attrs
+static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+                                       size_t size, int dir)
+{
+       return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL);
+}
+#define dma_map_sg_attrs       platform_dma_map_sg_attrs
+static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl,
+                            int nents, int dir)
+{
+       return dma_map_sg_attrs(dev, sgl, nents, dir, NULL);
+}
+#define dma_unmap_single_attrs platform_dma_unmap_single_attrs
+static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr,
+                                   size_t size, int dir)
+{
+       return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL);
+}
+#define dma_unmap_sg_attrs     platform_dma_unmap_sg_attrs
+static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
+                               int nents, int dir)
+{
+       return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL);
+}
 #define dma_sync_single_for_cpu        platform_dma_sync_single_for_cpu
 #define dma_sync_sg_for_cpu    platform_dma_sync_sg_for_cpu
 #define dma_sync_single_for_device platform_dma_sync_single_for_device
index f3efaa229525f287dcc4e27a93071c876c770f16..00eb1b130b63fb91cc6594cfa4b2046f4a8cd7d7 100644 (file)
@@ -3,4 +3,9 @@
 
 #include <asm/io.h>
 
+/* Use normal IO mappings for DMI */
+#define dmi_ioremap ioremap
+#define dmi_iounmap(x,l) iounmap(x)
+#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
+
 #endif
index 8a98a26541391ed17cbcebf97b0b98ef5a1efa31..c7f0f062239cd541112ecbe10cdd34dc54672eec 100644 (file)
@@ -2,9 +2,9 @@
 #define _ASM_FUTEX_H
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/errno.h>
 #include <asm/system.h>
-#include <asm/uaccess.h>
 
 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
 do {                                                                   \
index de2ed2cbdd845d9404c4036de78cba661f7a6ea5..2fe292c275fe9ab142271e0015b1a87391698f38 100644 (file)
 
 #define ia64_invala_fr(regnum) asm volatile ("invala.e f%0" :: "i"(regnum))
 
+#define ia64_flushrs() asm volatile ("flushrs;;":::"memory")
+
+#define ia64_loadrs() asm volatile ("loadrs;;":::"memory")
+
 extern void ia64_bad_param_for_setreg (void);
 extern void ia64_bad_param_for_getreg (void);
 
@@ -517,6 +521,14 @@ do {                                                                               \
 #define ia64_ptrd(addr, size)                                          \
        asm volatile ("ptr.d %0,%1" :: "r"(addr), "r"(size) : "memory")
 
+#define ia64_ttag(addr)                                                        \
+({                                                                       \
+       __u64 ia64_intri_res;                                              \
+       asm volatile ("ttag %0=%1" : "=r"(ia64_intri_res) : "r" (addr));   \
+       ia64_intri_res;                                                  \
+})
+
+
 /* Values for lfhint in ia64_lfetch and ia64_lfetch_fault */
 
 #define ia64_lfhint_none   0
diff --git a/include/asm-ia64/hugetlb.h b/include/asm-ia64/hugetlb.h
new file mode 100644 (file)
index 0000000..f28a970
--- /dev/null
@@ -0,0 +1,79 @@
+#ifndef _ASM_IA64_HUGETLB_H
+#define _ASM_IA64_HUGETLB_H
+
+#include <asm/page.h>
+
+
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+                           unsigned long end, unsigned long floor,
+                           unsigned long ceiling);
+
+int prepare_hugepage_range(unsigned long addr, unsigned long len);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        unsigned long len)
+{
+       return (REGION_NUMBER(addr) == RGN_HPAGE ||
+               REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE);
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
+{
+       set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_IA64_HUGETLB_H */
index 4ebed77aa4726c5263e0ff1a0ff5dfacc60daf11..260a85ac9d6a6bc4a1784520522e94a686283574 100644 (file)
@@ -423,11 +423,6 @@ extern void __iomem * ioremap(unsigned long offset, unsigned long size);
 extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
 extern void iounmap (volatile void __iomem *addr);
 
-/* Use normal IO mappings for DMI */
-#define dmi_ioremap ioremap
-#define dmi_iounmap(x,l) iounmap(x)
-#define dmi_alloc(l) kmalloc(l, GFP_ATOMIC)
-
 /*
  * String version of IO memory access ops:
  */
index 030d29b4b26b249a116cef8d4bf46337346d115d..eb2d3559d089a52c24d7df5cecc1bf0b82cccdab 100644 (file)
@@ -1,6 +1,205 @@
-#ifndef __LINUX_KVM_IA64_H
-#define __LINUX_KVM_IA64_H
+#ifndef __ASM_IA64_KVM_H
+#define __ASM_IA64_KVM_H
 
-/* ia64 does not support KVM */
+/*
+ * asm-ia64/kvm.h: kvm structure definitions  for ia64
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#include <asm/types.h>
+#include <asm/fpu.h>
+
+#include <linux/ioctl.h>
+
+/* Architectural interrupt line count. */
+#define KVM_NR_INTERRUPTS 256
+
+#define KVM_IOAPIC_NUM_PINS  24
+
+struct kvm_ioapic_state {
+       __u64 base_address;
+       __u32 ioregsel;
+       __u32 id;
+       __u32 irr;
+       __u32 pad;
+       union {
+               __u64 bits;
+               struct {
+                       __u8 vector;
+                       __u8 delivery_mode:3;
+                       __u8 dest_mode:1;
+                       __u8 delivery_status:1;
+                       __u8 polarity:1;
+                       __u8 remote_irr:1;
+                       __u8 trig_mode:1;
+                       __u8 mask:1;
+                       __u8 reserve:7;
+                       __u8 reserved[4];
+                       __u8 dest_id;
+               } fields;
+       } redirtbl[KVM_IOAPIC_NUM_PINS];
+};
+
+#define KVM_IRQCHIP_PIC_MASTER   0
+#define KVM_IRQCHIP_PIC_SLAVE    1
+#define KVM_IRQCHIP_IOAPIC       2
+
+#define KVM_CONTEXT_SIZE       8*1024
+
+union context {
+       /* 8K size */
+       char    dummy[KVM_CONTEXT_SIZE];
+       struct {
+               unsigned long       psr;
+               unsigned long       pr;
+               unsigned long       caller_unat;
+               unsigned long       pad;
+               unsigned long       gr[32];
+               unsigned long       ar[128];
+               unsigned long       br[8];
+               unsigned long       cr[128];
+               unsigned long       rr[8];
+               unsigned long       ibr[8];
+               unsigned long       dbr[8];
+               unsigned long       pkr[8];
+               struct ia64_fpreg   fr[128];
+       };
+};
+
+struct thash_data {
+       union {
+               struct {
+                       unsigned long p    :  1; /* 0 */
+                       unsigned long rv1  :  1; /* 1 */
+                       unsigned long ma   :  3; /* 2-4 */
+                       unsigned long a    :  1; /* 5 */
+                       unsigned long d    :  1; /* 6 */
+                       unsigned long pl   :  2; /* 7-8 */
+                       unsigned long ar   :  3; /* 9-11 */
+                       unsigned long ppn  : 38; /* 12-49 */
+                       unsigned long rv2  :  2; /* 50-51 */
+                       unsigned long ed   :  1; /* 52 */
+                       unsigned long ig1  : 11; /* 53-63 */
+               };
+               struct {
+                       unsigned long __rv1 : 53;     /* 0-52 */
+                       unsigned long contiguous : 1; /*53 */
+                       unsigned long tc : 1;         /* 54 TR or TC */
+                       unsigned long cl : 1;
+                       /* 55 I side or D side cache line */
+                       unsigned long len  :  4;      /* 56-59 */
+                       unsigned long io  : 1;  /* 60 entry is for io or not */
+                       unsigned long nomap : 1;
+                       /* 61 entry cann't be inserted into machine TLB.*/
+                       unsigned long checked : 1;
+                       /* 62 for VTLB/VHPT sanity check */
+                       unsigned long invalid : 1;
+                       /* 63 invalid entry */
+               };
+               unsigned long page_flags;
+       };                  /* same for VHPT and TLB */
+
+       union {
+               struct {
+                       unsigned long rv3  :  2;
+                       unsigned long ps   :  6;
+                       unsigned long key  : 24;
+                       unsigned long rv4  : 32;
+               };
+               unsigned long itir;
+       };
+       union {
+               struct {
+                       unsigned long ig2  :  12;
+                       unsigned long vpn  :  49;
+                       unsigned long vrn  :   3;
+               };
+               unsigned long ifa;
+               unsigned long vadr;
+               struct {
+                       unsigned long tag  :  63;
+                       unsigned long ti   :  1;
+               };
+               unsigned long etag;
+       };
+       union {
+               struct thash_data *next;
+               unsigned long rid;
+               unsigned long gpaddr;
+       };
+};
+
+#define        NITRS   8
+#define NDTRS  8
+
+struct saved_vpd {
+       unsigned long  vhpi;
+       unsigned long  vgr[16];
+       unsigned long  vbgr[16];
+       unsigned long  vnat;
+       unsigned long  vbnat;
+       unsigned long  vcpuid[5];
+       unsigned long  vpsr;
+       unsigned long  vpr;
+       unsigned long  vcr[128];
+};
+
+struct kvm_regs {
+       char *saved_guest;
+       char *saved_stack;
+       struct saved_vpd vpd;
+       /*Arch-regs*/
+       int mp_state;
+       unsigned long vmm_rr;
+       /* TR and TC.  */
+       struct thash_data itrs[NITRS];
+       struct thash_data dtrs[NDTRS];
+       /* Bit is set if there is a tr/tc for the region.  */
+       unsigned char itr_regions;
+       unsigned char dtr_regions;
+       unsigned char tc_regions;
+
+       char irq_check;
+       unsigned long saved_itc;
+       unsigned long itc_check;
+       unsigned long timer_check;
+       unsigned long timer_pending;
+       unsigned long last_itc;
+
+       unsigned long vrr[8];
+       unsigned long ibr[8];
+       unsigned long dbr[8];
+       unsigned long insvc[4];         /* Interrupt in service.  */
+       unsigned long xtp;
+
+       unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+       unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+       unsigned long metaphysical_saved_rr0; /* from kvm_arch          */
+       unsigned long metaphysical_saved_rr4; /* from kvm_arch          */
+       unsigned long fp_psr;       /*used for lazy float register */
+       unsigned long saved_gp;
+       /*for phycial  emulation */
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+};
 
 #endif
diff --git a/include/asm-ia64/kvm_host.h b/include/asm-ia64/kvm_host.h
new file mode 100644 (file)
index 0000000..c082c20
--- /dev/null
@@ -0,0 +1,524 @@
+/*
+ * kvm_host.h: used for kvm module, and hold ia64-specific sections.
+ *
+ * Copyright (C) 2007, Intel Corporation.
+ *
+ * Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+#ifndef __ASM_KVM_HOST_H
+#define __ASM_KVM_HOST_H
+
+
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/kvm.h>
+#include <linux/kvm_para.h>
+#include <linux/kvm_types.h>
+
+#include <asm/pal.h>
+#include <asm/sal.h>
+
+#define KVM_MAX_VCPUS 4
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+
+/* define exit reasons from vmm to kvm*/
+#define EXIT_REASON_VM_PANIC           0
+#define EXIT_REASON_MMIO_INSTRUCTION   1
+#define EXIT_REASON_PAL_CALL           2
+#define EXIT_REASON_SAL_CALL           3
+#define EXIT_REASON_SWITCH_RR6         4
+#define EXIT_REASON_VM_DESTROY         5
+#define EXIT_REASON_EXTERNAL_INTERRUPT 6
+#define EXIT_REASON_IPI                        7
+#define EXIT_REASON_PTC_G              8
+
+/*Define vmm address space and vm data space.*/
+#define KVM_VMM_SIZE (16UL<<20)
+#define KVM_VMM_SHIFT 24
+#define KVM_VMM_BASE 0xD000000000000000UL
+#define VMM_SIZE (8UL<<20)
+
+/*
+ * Define vm_buffer, used by PAL Services, base address.
+ * Note: vmbuffer is in the VMM-BLOCK, the size must be < 8M
+ */
+#define KVM_VM_BUFFER_BASE (KVM_VMM_BASE + VMM_SIZE)
+#define KVM_VM_BUFFER_SIZE (8UL<<20)
+
+/*Define Virtual machine data layout.*/
+#define KVM_VM_DATA_SHIFT  24
+#define KVM_VM_DATA_SIZE (1UL << KVM_VM_DATA_SHIFT)
+#define KVM_VM_DATA_BASE (KVM_VMM_BASE + KVM_VMM_SIZE)
+
+
+#define KVM_P2M_BASE    KVM_VM_DATA_BASE
+#define KVM_P2M_OFS     0
+#define KVM_P2M_SIZE    (8UL << 20)
+
+#define KVM_VHPT_BASE   (KVM_P2M_BASE + KVM_P2M_SIZE)
+#define KVM_VHPT_OFS    KVM_P2M_SIZE
+#define KVM_VHPT_BLOCK_SIZE   (2UL << 20)
+#define VHPT_SHIFT      18
+#define VHPT_SIZE       (1UL << VHPT_SHIFT)
+#define VHPT_NUM_ENTRIES (1<<(VHPT_SHIFT-5))
+
+#define KVM_VTLB_BASE   (KVM_VHPT_BASE+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_OFS    (KVM_VHPT_OFS+KVM_VHPT_BLOCK_SIZE)
+#define KVM_VTLB_BLOCK_SIZE   (1UL<<20)
+#define VTLB_SHIFT      17
+#define VTLB_SIZE       (1UL<<VTLB_SHIFT)
+#define VTLB_NUM_ENTRIES (1<<(VTLB_SHIFT-5))
+
+#define KVM_VPD_BASE   (KVM_VTLB_BASE+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_OFS    (KVM_VTLB_OFS+KVM_VTLB_BLOCK_SIZE)
+#define KVM_VPD_BLOCK_SIZE   (2UL<<20)
+#define VPD_SHIFT       16
+#define VPD_SIZE        (1UL<<VPD_SHIFT)
+
+#define KVM_VCPU_BASE   (KVM_VPD_BASE+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_OFS    (KVM_VPD_OFS+KVM_VPD_BLOCK_SIZE)
+#define KVM_VCPU_BLOCK_SIZE   (2UL<<20)
+#define VCPU_SHIFT 18
+#define VCPU_SIZE (1UL<<VCPU_SHIFT)
+#define MAX_VCPU_NUM KVM_VCPU_BLOCK_SIZE/VCPU_SIZE
+
+#define KVM_VM_BASE     (KVM_VCPU_BASE+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_OFS      (KVM_VCPU_OFS+KVM_VCPU_BLOCK_SIZE)
+#define KVM_VM_BLOCK_SIZE     (1UL<<19)
+
+#define KVM_MEM_DIRTY_LOG_BASE (KVM_VM_BASE+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_OFS  (KVM_VM_OFS+KVM_VM_BLOCK_SIZE)
+#define KVM_MEM_DIRTY_LOG_SIZE (1UL<<19)
+
+/* Get vpd, vhpt, tlb, vcpu, base*/
+#define VPD_ADDR(n) (KVM_VPD_BASE+n*VPD_SIZE)
+#define VHPT_ADDR(n) (KVM_VHPT_BASE+n*VHPT_SIZE)
+#define VTLB_ADDR(n) (KVM_VTLB_BASE+n*VTLB_SIZE)
+#define VCPU_ADDR(n) (KVM_VCPU_BASE+n*VCPU_SIZE)
+
+/*IO section definitions*/
+#define IOREQ_READ      1
+#define IOREQ_WRITE     0
+
+#define STATE_IOREQ_NONE        0
+#define STATE_IOREQ_READY       1
+#define STATE_IOREQ_INPROCESS   2
+#define STATE_IORESP_READY      3
+
+/*Guest Physical address layout.*/
+#define GPFN_MEM        (0UL << 60) /* Guest pfn is normal mem */
+#define GPFN_FRAME_BUFFER   (1UL << 60) /* VGA framebuffer */
+#define GPFN_LOW_MMIO       (2UL << 60) /* Low MMIO range */
+#define GPFN_PIB        (3UL << 60) /* PIB base */
+#define GPFN_IOSAPIC        (4UL << 60) /* IOSAPIC base */
+#define GPFN_LEGACY_IO      (5UL << 60) /* Legacy I/O base */
+#define GPFN_GFW        (6UL << 60) /* Guest Firmware */
+#define GPFN_HIGH_MMIO      (7UL << 60) /* High MMIO range */
+
+#define GPFN_IO_MASK        (7UL << 60) /* Guest pfn is I/O type */
+#define GPFN_INV_MASK       (1UL << 63) /* Guest pfn is invalid */
+#define INVALID_MFN       (~0UL)
+#define MEM_G   (1UL << 30)
+#define MEM_M   (1UL << 20)
+#define MMIO_START       (3 * MEM_G)
+#define MMIO_SIZE        (512 * MEM_M)
+#define VGA_IO_START     0xA0000UL
+#define VGA_IO_SIZE      0x20000
+#define LEGACY_IO_START  (MMIO_START + MMIO_SIZE)
+#define LEGACY_IO_SIZE   (64 * MEM_M)
+#define IO_SAPIC_START   0xfec00000UL
+#define IO_SAPIC_SIZE    0x100000
+#define PIB_START 0xfee00000UL
+#define PIB_SIZE 0x200000
+#define GFW_START        (4 * MEM_G - 16 * MEM_M)
+#define GFW_SIZE         (16 * MEM_M)
+
+/*Deliver mode, defined for ioapic.c*/
+#define dest_Fixed IOSAPIC_FIXED
+#define dest_LowestPrio IOSAPIC_LOWEST_PRIORITY
+
+#define NMI_VECTOR                     2
+#define ExtINT_VECTOR                  0
+#define NULL_VECTOR                    (-1)
+#define IA64_SPURIOUS_INT_VECTOR       0x0f
+
+#define VCPU_LID(v) (((u64)(v)->vcpu_id) << 24)
+
+/*
+ *Delivery mode
+ */
+#define SAPIC_DELIV_SHIFT      8
+#define SAPIC_FIXED            0x0
+#define SAPIC_LOWEST_PRIORITY  0x1
+#define SAPIC_PMI              0x2
+#define SAPIC_NMI              0x4
+#define SAPIC_INIT             0x5
+#define SAPIC_EXTINT           0x7
+
+/*
+ * vcpu->requests bit members for arch
+ */
+#define KVM_REQ_PTC_G          32
+#define KVM_REQ_RESUME         33
+
+#define KVM_PAGES_PER_HPAGE    1
+
+struct kvm;
+struct kvm_vcpu;
+struct kvm_guest_debug{
+};
+
+struct kvm_mmio_req {
+       uint64_t addr;          /*  physical address            */
+       uint64_t size;          /*  size in bytes               */
+       uint64_t data;          /*  data (or paddr of data)     */
+       uint8_t state:4;
+       uint8_t dir:1;          /*  1=read, 0=write             */
+};
+
+/*Pal data struct */
+struct kvm_pal_call{
+       /*In area*/
+       uint64_t gr28;
+       uint64_t gr29;
+       uint64_t gr30;
+       uint64_t gr31;
+       /*Out area*/
+       struct ia64_pal_retval ret;
+};
+
+/* Sal data structure */
+struct kvm_sal_call{
+       /*In area*/
+       uint64_t in0;
+       uint64_t in1;
+       uint64_t in2;
+       uint64_t in3;
+       uint64_t in4;
+       uint64_t in5;
+       uint64_t in6;
+       uint64_t in7;
+       struct sal_ret_values ret;
+};
+
+/*Guest change rr6*/
+struct kvm_switch_rr6 {
+       uint64_t old_rr;
+       uint64_t new_rr;
+};
+
+union ia64_ipi_a{
+       unsigned long val;
+       struct {
+               unsigned long rv  : 3;
+               unsigned long ir  : 1;
+               unsigned long eid : 8;
+               unsigned long id  : 8;
+               unsigned long ib_base : 44;
+       };
+};
+
+union ia64_ipi_d {
+       unsigned long val;
+       struct {
+               unsigned long vector : 8;
+               unsigned long dm  : 3;
+               unsigned long ig  : 53;
+       };
+};
+
+/*ipi check exit data*/
+struct kvm_ipi_data{
+       union ia64_ipi_a addr;
+       union ia64_ipi_d data;
+};
+
+/*global purge data*/
+struct kvm_ptc_g {
+       unsigned long vaddr;
+       unsigned long rr;
+       unsigned long ps;
+       struct kvm_vcpu *vcpu;
+};
+
+/*Exit control data */
+struct exit_ctl_data{
+       uint32_t exit_reason;
+       uint32_t vm_status;
+       union {
+               struct kvm_mmio_req     ioreq;
+               struct kvm_pal_call     pal_data;
+               struct kvm_sal_call     sal_data;
+               struct kvm_switch_rr6   rr_data;
+               struct kvm_ipi_data     ipi_data;
+               struct kvm_ptc_g        ptc_g_data;
+       } u;
+};
+
+union pte_flags {
+       unsigned long val;
+       struct {
+               unsigned long p    :  1; /*0      */
+               unsigned long      :  1; /* 1     */
+               unsigned long ma   :  3; /* 2-4   */
+               unsigned long a    :  1; /* 5     */
+               unsigned long d    :  1; /* 6     */
+               unsigned long pl   :  2; /* 7-8   */
+               unsigned long ar   :  3; /* 9-11  */
+               unsigned long ppn  : 38; /* 12-49 */
+               unsigned long      :  2; /* 50-51 */
+               unsigned long ed   :  1; /* 52    */
+       };
+};
+
+union ia64_pta {
+       unsigned long val;
+       struct {
+               unsigned long ve : 1;
+               unsigned long reserved0 : 1;
+               unsigned long size : 6;
+               unsigned long vf : 1;
+               unsigned long reserved1 : 6;
+               unsigned long base : 49;
+       };
+};
+
+struct thash_cb {
+       /* THASH base information */
+       struct thash_data       *hash; /* hash table pointer */
+       union ia64_pta          pta;
+       int           num;
+};
+
+struct kvm_vcpu_stat {
+};
+
+struct kvm_vcpu_arch {
+       int launched;
+       int last_exit;
+       int last_run_cpu;
+       int vmm_tr_slot;
+       int vm_tr_slot;
+
+#define KVM_MP_STATE_RUNNABLE          0
+#define KVM_MP_STATE_UNINITIALIZED     1
+#define KVM_MP_STATE_INIT_RECEIVED     2
+#define KVM_MP_STATE_HALTED            3
+       int mp_state;
+
+#define MAX_PTC_G_NUM                  3
+       int ptc_g_count;
+       struct kvm_ptc_g ptc_g_data[MAX_PTC_G_NUM];
+
+       /*halt timer to wake up sleepy vcpus*/
+       struct hrtimer hlt_timer;
+       long ht_active;
+
+       struct kvm_lapic *apic;    /* kernel irqchip context */
+       struct vpd *vpd;
+
+       /* Exit data for vmm_transition*/
+       struct exit_ctl_data exit_data;
+
+       cpumask_t cache_coherent_map;
+
+       unsigned long vmm_rr;
+       unsigned long host_rr6;
+       unsigned long psbits[8];
+       unsigned long cr_iipa;
+       unsigned long cr_isr;
+       unsigned long vsa_base;
+       unsigned long dirty_log_lock_pa;
+       unsigned long __gp;
+       /* TR and TC.  */
+       struct thash_data itrs[NITRS];
+       struct thash_data dtrs[NDTRS];
+       /* Bit is set if there is a tr/tc for the region.  */
+       unsigned char itr_regions;
+       unsigned char dtr_regions;
+       unsigned char tc_regions;
+       /* purge all */
+       unsigned long ptce_base;
+       unsigned long ptce_count[2];
+       unsigned long ptce_stride[2];
+       /* itc/itm */
+       unsigned long last_itc;
+       long itc_offset;
+       unsigned long itc_check;
+       unsigned long timer_check;
+       unsigned long timer_pending;
+
+       unsigned long vrr[8];
+       unsigned long ibr[8];
+       unsigned long dbr[8];
+       unsigned long insvc[4];         /* Interrupt in service.  */
+       unsigned long xtp;
+
+       unsigned long metaphysical_rr0; /* from kvm_arch (so is pinned) */
+       unsigned long metaphysical_rr4; /* from kvm_arch (so is pinned) */
+       unsigned long metaphysical_saved_rr0; /* from kvm_arch          */
+       unsigned long metaphysical_saved_rr4; /* from kvm_arch          */
+       unsigned long fp_psr;       /*used for lazy float register */
+       unsigned long saved_gp;
+       /*for phycial  emulation */
+       int mode_flags;
+       struct thash_cb vtlb;
+       struct thash_cb vhpt;
+       char irq_check;
+       char irq_new_pending;
+
+       unsigned long opcode;
+       unsigned long cause;
+       union context host;
+       union context guest;
+};
+
+struct kvm_vm_stat {
+       u64 remote_tlb_flush;
+};
+
+struct kvm_sal_data {
+       unsigned long boot_ip;
+       unsigned long boot_gp;
+};
+
+struct kvm_arch {
+       unsigned long   vm_base;
+       unsigned long   metaphysical_rr0;
+       unsigned long   metaphysical_rr4;
+       unsigned long   vmm_init_rr;
+       unsigned long   vhpt_base;
+       unsigned long   vtlb_base;
+       unsigned long   vpd_base;
+       spinlock_t dirty_log_lock;
+       struct kvm_ioapic *vioapic;
+       struct kvm_vm_stat stat;
+       struct kvm_sal_data rdv_sal_data;
+};
+
+union cpuid3_t {
+       u64 value;
+       struct {
+               u64 number : 8;
+               u64 revision : 8;
+               u64 model : 8;
+               u64 family : 8;
+               u64 archrev : 8;
+               u64 rv : 24;
+       };
+};
+
+struct kvm_pt_regs {
+       /* The following registers are saved by SAVE_MIN: */
+       unsigned long b6;  /* scratch */
+       unsigned long b7;  /* scratch */
+
+       unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
+       unsigned long ar_ssd; /* reserved for future use (scratch) */
+
+       unsigned long r8;  /* scratch (return value register 0) */
+       unsigned long r9;  /* scratch (return value register 1) */
+       unsigned long r10; /* scratch (return value register 2) */
+       unsigned long r11; /* scratch (return value register 3) */
+
+       unsigned long cr_ipsr; /* interrupted task's psr */
+       unsigned long cr_iip;  /* interrupted task's instruction pointer */
+       unsigned long cr_ifs;  /* interrupted task's function state */
+
+       unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
+       unsigned long ar_pfs;  /* prev function state  */
+       unsigned long ar_rsc;  /* RSE configuration */
+       /* The following two are valid only if cr_ipsr.cpl > 0: */
+       unsigned long ar_rnat;  /* RSE NaT */
+       unsigned long ar_bspstore; /* RSE bspstore */
+
+       unsigned long pr;  /* 64 predicate registers (1 bit each) */
+       unsigned long b0;  /* return pointer (bp) */
+       unsigned long loadrs;  /* size of dirty partition << 16 */
+
+       unsigned long r1;  /* the gp pointer */
+       unsigned long r12; /* interrupted task's memory stack pointer */
+       unsigned long r13; /* thread pointer */
+
+       unsigned long ar_fpsr;  /* floating point status (preserved) */
+       unsigned long r15;  /* scratch */
+
+       /* The remaining registers are NOT saved for system calls.  */
+       unsigned long r14;  /* scratch */
+       unsigned long r2;  /* scratch */
+       unsigned long r3;  /* scratch */
+       unsigned long r16;  /* scratch */
+       unsigned long r17;  /* scratch */
+       unsigned long r18;  /* scratch */
+       unsigned long r19;  /* scratch */
+       unsigned long r20;  /* scratch */
+       unsigned long r21;  /* scratch */
+       unsigned long r22;  /* scratch */
+       unsigned long r23;  /* scratch */
+       unsigned long r24;  /* scratch */
+       unsigned long r25;  /* scratch */
+       unsigned long r26;  /* scratch */
+       unsigned long r27;  /* scratch */
+       unsigned long r28;  /* scratch */
+       unsigned long r29;  /* scratch */
+       unsigned long r30;  /* scratch */
+       unsigned long r31;  /* scratch */
+       unsigned long ar_ccv;  /* compare/exchange value (scratch) */
+
+       /*
+        * Floating point registers that the kernel considers scratch:
+        */
+       struct ia64_fpreg f6;  /* scratch */
+       struct ia64_fpreg f7;  /* scratch */
+       struct ia64_fpreg f8;  /* scratch */
+       struct ia64_fpreg f9;  /* scratch */
+       struct ia64_fpreg f10;  /* scratch */
+       struct ia64_fpreg f11;  /* scratch */
+
+       unsigned long r4;  /* preserved */
+       unsigned long r5;  /* preserved */
+       unsigned long r6;  /* preserved */
+       unsigned long r7;  /* preserved */
+       unsigned long eml_unat;    /* used for emulating instruction */
+       unsigned long pad0;     /* alignment pad */
+};
+
+static inline struct kvm_pt_regs *vcpu_regs(struct kvm_vcpu *v)
+{
+       return (struct kvm_pt_regs *) ((unsigned long) v + IA64_STK_OFFSET) - 1;
+}
+
+typedef int kvm_vmm_entry(void);
+typedef void kvm_tramp_entry(union context *host, union context *guest);
+
+struct kvm_vmm_info{
+       struct module   *module;
+       kvm_vmm_entry   *vmm_entry;
+       kvm_tramp_entry *tramp_entry;
+       unsigned long   vmm_ivt;
+};
+
+int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
+int kvm_emulate_halt(struct kvm_vcpu *vcpu);
+int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
+void kvm_sal_emul(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/include/asm-ia64/kvm_para.h b/include/asm-ia64/kvm_para.h
new file mode 100644 (file)
index 0000000..9f9796b
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef __IA64_KVM_PARA_H
+#define __IA64_KVM_PARA_H
+
+/*
+ * asm-ia64/kvm_para.h
+ *
+ * Copyright (C) 2007 Xiantao Zhang <xiantao.zhang@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
+ * Place - Suite 330, Boston, MA 02111-1307 USA.
+ *
+ */
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+       return 0;
+}
+
+#endif
index c201a2020aa49e1a8a30deff30892b143e3dc157..9f020eb825c58c89224bfd10923a1872d77cb3ad 100644 (file)
@@ -22,6 +22,7 @@ struct pci_bus;
 struct task_struct;
 struct pci_dev;
 struct msi_desc;
+struct dma_attrs;
 
 typedef void ia64_mv_setup_t (char **);
 typedef void ia64_mv_cpu_init_t (void);
@@ -56,6 +57,11 @@ typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist
 typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
 typedef int ia64_mv_dma_supported (struct device *, u64);
 
+typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
+typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *);
+typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *);
+
 /*
  * WARNING: The legacy I/O space is _architected_.  Platforms are
  * expected to follow this architected model (see Section 10.7 in the
@@ -136,10 +142,10 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *);
 #  define platform_dma_init            ia64_mv.dma_init
 #  define platform_dma_alloc_coherent  ia64_mv.dma_alloc_coherent
 #  define platform_dma_free_coherent   ia64_mv.dma_free_coherent
-#  define platform_dma_map_single      ia64_mv.dma_map_single
-#  define platform_dma_unmap_single    ia64_mv.dma_unmap_single
-#  define platform_dma_map_sg          ia64_mv.dma_map_sg
-#  define platform_dma_unmap_sg                ia64_mv.dma_unmap_sg
+#  define platform_dma_map_single_attrs        ia64_mv.dma_map_single_attrs
+#  define platform_dma_unmap_single_attrs      ia64_mv.dma_unmap_single_attrs
+#  define platform_dma_map_sg_attrs    ia64_mv.dma_map_sg_attrs
+#  define platform_dma_unmap_sg_attrs  ia64_mv.dma_unmap_sg_attrs
 #  define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu
 #  define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu
 #  define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device
@@ -190,10 +196,10 @@ struct ia64_machine_vector {
        ia64_mv_dma_init *dma_init;
        ia64_mv_dma_alloc_coherent *dma_alloc_coherent;
        ia64_mv_dma_free_coherent *dma_free_coherent;
-       ia64_mv_dma_map_single *dma_map_single;
-       ia64_mv_dma_unmap_single *dma_unmap_single;
-       ia64_mv_dma_map_sg *dma_map_sg;
-       ia64_mv_dma_unmap_sg *dma_unmap_sg;
+       ia64_mv_dma_map_single_attrs *dma_map_single_attrs;
+       ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs;
+       ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs;
+       ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs;
        ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu;
        ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu;
        ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device;
@@ -240,10 +246,10 @@ struct ia64_machine_vector {
        platform_dma_init,                      \
        platform_dma_alloc_coherent,            \
        platform_dma_free_coherent,             \
-       platform_dma_map_single,                \
-       platform_dma_unmap_single,              \
-       platform_dma_map_sg,                    \
-       platform_dma_unmap_sg,                  \
+       platform_dma_map_single_attrs,          \
+       platform_dma_unmap_single_attrs,        \
+       platform_dma_map_sg_attrs,              \
+       platform_dma_unmap_sg_attrs,            \
        platform_dma_sync_single_for_cpu,       \
        platform_dma_sync_sg_for_cpu,           \
        platform_dma_sync_single_for_device,    \
@@ -292,9 +298,13 @@ extern ia64_mv_dma_init                    swiotlb_init;
 extern ia64_mv_dma_alloc_coherent      swiotlb_alloc_coherent;
 extern ia64_mv_dma_free_coherent       swiotlb_free_coherent;
 extern ia64_mv_dma_map_single          swiotlb_map_single;
+extern ia64_mv_dma_map_single_attrs    swiotlb_map_single_attrs;
 extern ia64_mv_dma_unmap_single                swiotlb_unmap_single;
+extern ia64_mv_dma_unmap_single_attrs  swiotlb_unmap_single_attrs;
 extern ia64_mv_dma_map_sg              swiotlb_map_sg;
+extern ia64_mv_dma_map_sg_attrs                swiotlb_map_sg_attrs;
 extern ia64_mv_dma_unmap_sg            swiotlb_unmap_sg;
+extern ia64_mv_dma_unmap_sg_attrs      swiotlb_unmap_sg_attrs;
 extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu;
 extern ia64_mv_dma_sync_sg_for_cpu     swiotlb_sync_sg_for_cpu;
 extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device;
@@ -340,17 +350,17 @@ extern ia64_mv_dma_supported              swiotlb_dma_supported;
 #ifndef platform_dma_free_coherent
 # define platform_dma_free_coherent    swiotlb_free_coherent
 #endif
-#ifndef platform_dma_map_single
-# define platform_dma_map_single       swiotlb_map_single
+#ifndef platform_dma_map_single_attrs
+# define platform_dma_map_single_attrs swiotlb_map_single_attrs
 #endif
-#ifndef platform_dma_unmap_single
-# define platform_dma_unmap_single     swiotlb_unmap_single
+#ifndef platform_dma_unmap_single_attrs
+# define platform_dma_unmap_single_attrs       swiotlb_unmap_single_attrs
 #endif
-#ifndef platform_dma_map_sg
-# define platform_dma_map_sg           swiotlb_map_sg
+#ifndef platform_dma_map_sg_attrs
+# define platform_dma_map_sg_attrs     swiotlb_map_sg_attrs
 #endif
-#ifndef platform_dma_unmap_sg
-# define platform_dma_unmap_sg         swiotlb_unmap_sg
+#ifndef platform_dma_unmap_sg_attrs
+# define platform_dma_unmap_sg_attrs   swiotlb_unmap_sg_attrs
 #endif
 #ifndef platform_dma_sync_single_for_cpu
 # define platform_dma_sync_single_for_cpu      swiotlb_sync_single_for_cpu
index e90daf9ce340cc0cb2b6433ceb247c314d2ccc65..2f57f5144b9fbc55363734d0dbf0920bac497fa6 100644 (file)
@@ -4,10 +4,10 @@
 extern ia64_mv_setup_t                 dig_setup;
 extern ia64_mv_dma_alloc_coherent      sba_alloc_coherent;
 extern ia64_mv_dma_free_coherent       sba_free_coherent;
-extern ia64_mv_dma_map_single          sba_map_single;
-extern ia64_mv_dma_unmap_single                sba_unmap_single;
-extern ia64_mv_dma_map_sg              sba_map_sg;
-extern ia64_mv_dma_unmap_sg            sba_unmap_sg;
+extern ia64_mv_dma_map_single_attrs    sba_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs  sba_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs                sba_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs      sba_unmap_sg_attrs;
 extern ia64_mv_dma_supported           sba_dma_supported;
 extern ia64_mv_dma_mapping_error       sba_dma_mapping_error;
 
@@ -23,10 +23,10 @@ extern ia64_mv_dma_mapping_error    sba_dma_mapping_error;
 #define platform_dma_init                      machvec_noop
 #define platform_dma_alloc_coherent            sba_alloc_coherent
 #define platform_dma_free_coherent             sba_free_coherent
-#define platform_dma_map_single                        sba_map_single
-#define platform_dma_unmap_single              sba_unmap_single
-#define platform_dma_map_sg                    sba_map_sg
-#define platform_dma_unmap_sg                  sba_unmap_sg
+#define platform_dma_map_single_attrs          sba_map_single_attrs
+#define platform_dma_unmap_single_attrs                sba_unmap_single_attrs
+#define platform_dma_map_sg_attrs              sba_map_sg_attrs
+#define platform_dma_unmap_sg_attrs            sba_unmap_sg_attrs
 #define platform_dma_sync_single_for_cpu       machvec_dma_sync_single
 #define platform_dma_sync_sg_for_cpu           machvec_dma_sync_sg
 #define platform_dma_sync_single_for_device    machvec_dma_sync_single
index f00a34a148ff338b276f98e3ee70d96f79ea749b..a842cdda827bd8203f95d4e367e1a8eed47d8764 100644 (file)
@@ -4,10 +4,10 @@
 extern ia64_mv_setup_t                         dig_setup;
 extern ia64_mv_dma_alloc_coherent              hwsw_alloc_coherent;
 extern ia64_mv_dma_free_coherent               hwsw_free_coherent;
-extern ia64_mv_dma_map_single                  hwsw_map_single;
-extern ia64_mv_dma_unmap_single                        hwsw_unmap_single;
-extern ia64_mv_dma_map_sg                      hwsw_map_sg;
-extern ia64_mv_dma_unmap_sg                    hwsw_unmap_sg;
+extern ia64_mv_dma_map_single_attrs            hwsw_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs          hwsw_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs                        hwsw_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs              hwsw_unmap_sg_attrs;
 extern ia64_mv_dma_supported                   hwsw_dma_supported;
 extern ia64_mv_dma_mapping_error               hwsw_dma_mapping_error;
 extern ia64_mv_dma_sync_single_for_cpu         hwsw_sync_single_for_cpu;
@@ -28,10 +28,10 @@ extern ia64_mv_dma_sync_sg_for_device               hwsw_sync_sg_for_device;
 #define platform_dma_init                      machvec_noop
 #define platform_dma_alloc_coherent            hwsw_alloc_coherent
 #define platform_dma_free_coherent             hwsw_free_coherent
-#define platform_dma_map_single                        hwsw_map_single
-#define platform_dma_unmap_single              hwsw_unmap_single
-#define platform_dma_map_sg                    hwsw_map_sg
-#define platform_dma_unmap_sg                  hwsw_unmap_sg
+#define platform_dma_map_single_attrs          hwsw_map_single_attrs
+#define platform_dma_unmap_single_attrs                hwsw_unmap_single_attrs
+#define platform_dma_map_sg_attrs              hwsw_map_sg_attrs
+#define platform_dma_unmap_sg_attrs            hwsw_unmap_sg_attrs
 #define platform_dma_supported                 hwsw_dma_supported
 #define platform_dma_mapping_error             hwsw_dma_mapping_error
 #define platform_dma_sync_single_for_cpu       hwsw_sync_single_for_cpu
index 61439a7f5b08a76263983695d23ec7562bbea96e..781308ea7b88baad9f1c6f58eeb039c4fc60f048 100644 (file)
@@ -57,10 +57,10 @@ extern ia64_mv_readl_t __sn_readl_relaxed;
 extern ia64_mv_readq_t __sn_readq_relaxed;
 extern ia64_mv_dma_alloc_coherent      sn_dma_alloc_coherent;
 extern ia64_mv_dma_free_coherent       sn_dma_free_coherent;
-extern ia64_mv_dma_map_single          sn_dma_map_single;
-extern ia64_mv_dma_unmap_single                sn_dma_unmap_single;
-extern ia64_mv_dma_map_sg              sn_dma_map_sg;
-extern ia64_mv_dma_unmap_sg            sn_dma_unmap_sg;
+extern ia64_mv_dma_map_single_attrs    sn_dma_map_single_attrs;
+extern ia64_mv_dma_unmap_single_attrs  sn_dma_unmap_single_attrs;
+extern ia64_mv_dma_map_sg_attrs                sn_dma_map_sg_attrs;
+extern ia64_mv_dma_unmap_sg_attrs      sn_dma_unmap_sg_attrs;
 extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu;
 extern ia64_mv_dma_sync_sg_for_cpu     sn_dma_sync_sg_for_cpu;
 extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device;
@@ -113,10 +113,10 @@ extern ia64_mv_pci_fixup_bus_t            sn_pci_fixup_bus;
 #define platform_dma_init              machvec_noop
 #define platform_dma_alloc_coherent    sn_dma_alloc_coherent
 #define platform_dma_free_coherent     sn_dma_free_coherent
-#define platform_dma_map_single                sn_dma_map_single
-#define platform_dma_unmap_single      sn_dma_unmap_single
-#define platform_dma_map_sg            sn_dma_map_sg
-#define platform_dma_unmap_sg          sn_dma_unmap_sg
+#define platform_dma_map_single_attrs  sn_dma_map_single_attrs
+#define platform_dma_unmap_single_attrs        sn_dma_unmap_single_attrs
+#define platform_dma_map_sg_attrs      sn_dma_map_sg_attrs
+#define platform_dma_unmap_sg_attrs    sn_dma_unmap_sg_attrs
 #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu
 #define platform_dma_sync_sg_for_cpu   sn_dma_sync_sg_for_cpu
 #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device
index 4999a6c63775b87f4432378c6774e74f2ae2bfb0..36f39321b7681a305da85b7be173a4c5c171e0b1 100644 (file)
@@ -54,9 +54,6 @@
 # define HPAGE_MASK            (~(HPAGE_SIZE - 1))
 
 # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
-# define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-# define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-# define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
 #endif /* CONFIG_HUGETLB_PAGE */
 
 #ifdef __ASSEMBLY__
@@ -153,9 +150,6 @@ typedef union ia64_va {
 # define htlbpage_to_page(x)   (((unsigned long) REGION_NUMBER(x) << 61)                       \
                                 | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
 # define HUGETLB_PAGE_ORDER    (HPAGE_SHIFT - PAGE_SHIFT)
-# define is_hugepage_only_range(mm, addr, len)         \
-        (REGION_NUMBER(addr) == RGN_HPAGE ||   \
-         REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
 extern unsigned int hpage_shift;
 #endif
 
index ed70862ea2473ee8e2d2a79a975293a97b2fb550..7a9bff47564f10c3165123e378ce226e90d55f2b 100644 (file)
@@ -302,6 +302,8 @@ ia64_phys_addr_valid (unsigned long addr)
 #define pte_dirty(pte)         ((pte_val(pte) & _PAGE_D) != 0)
 #define pte_young(pte)         ((pte_val(pte) & _PAGE_A) != 0)
 #define pte_file(pte)          ((pte_val(pte) & _PAGE_FILE) != 0)
+#define pte_special(pte)       0
+
 /*
  * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
  * access rights:
@@ -313,6 +315,7 @@ ia64_phys_addr_valid (unsigned long addr)
 #define pte_mkclean(pte)       (__pte(pte_val(pte) & ~_PAGE_D))
 #define pte_mkdirty(pte)       (__pte(pte_val(pte) | _PAGE_D))
 #define pte_mkhuge(pte)                (__pte(pte_val(pte)))
+#define pte_mkspecial(pte)     (pte)
 
 /*
  * Because ia64's Icache and Dcache is not coherent (on a cpu), we need to
index 741f7ecb986a90fb28ed9c0a8020da5b67654f46..6aff126fc07ea31802ec249613e9237c16472875 100644 (file)
@@ -119,6 +119,69 @@ struct ia64_psr {
        __u64 reserved4 : 19;
 };
 
+union ia64_isr {
+       __u64  val;
+       struct {
+               __u64 code : 16;
+               __u64 vector : 8;
+               __u64 reserved1 : 8;
+               __u64 x : 1;
+               __u64 w : 1;
+               __u64 r : 1;
+               __u64 na : 1;
+               __u64 sp : 1;
+               __u64 rs : 1;
+               __u64 ir : 1;
+               __u64 ni : 1;
+               __u64 so : 1;
+               __u64 ei : 2;
+               __u64 ed : 1;
+               __u64 reserved2 : 20;
+       };
+};
+
+union ia64_lid {
+       __u64 val;
+       struct {
+               __u64  rv  : 16;
+               __u64  eid : 8;
+               __u64  id  : 8;
+               __u64  ig  : 32;
+       };
+};
+
+union ia64_tpr {
+       __u64 val;
+       struct {
+               __u64 ig0 : 4;
+               __u64 mic : 4;
+               __u64 rsv : 8;
+               __u64 mmi : 1;
+               __u64 ig1 : 47;
+       };
+};
+
+union ia64_itir {
+       __u64 val;
+       struct {
+               __u64 rv3  :  2; /* 0-1 */
+               __u64 ps   :  6; /* 2-7 */
+               __u64 key  : 24; /* 8-31 */
+               __u64 rv4  : 32; /* 32-63 */
+       };
+};
+
+union  ia64_rr {
+       __u64 val;
+       struct {
+               __u64  ve       :  1;  /* enable hw walker */
+               __u64  reserved0:  1;  /* reserved */
+               __u64  ps       :  6;  /* log page size */
+               __u64  rid      : 24;  /* region id */
+               __u64  reserved1: 32;  /* reserved */
+       };
+};
+
 /*
  * CPU type, hardware bug flags, and per-CPU state.  Frequently used
  * state comes earlier:
index dff8128fa58ef5ba75506daa51a16150656b2e46..26e250bfb91211580635ac4665734389d46cd45b 100644 (file)
@@ -146,23 +146,23 @@ do {                                              \
 
 # define local_irq_save(x)                                     \
 do {                                                           \
-       unsigned long psr;                                      \
+       unsigned long __psr;                                    \
                                                                \
-       __local_irq_save(psr);                                  \
-       if (psr & IA64_PSR_I)                                   \
+       __local_irq_save(__psr);                                \
+       if (__psr & IA64_PSR_I)                                 \
                __save_ip();                                    \
-       (x) = psr;                                              \
+       (x) = __psr;                                            \
 } while (0)
 
-# define local_irq_disable()   do { unsigned long x; local_irq_save(x); } while (0)
+# define local_irq_disable()   do { unsigned long __x; local_irq_save(__x); } while (0)
 
 # define local_irq_restore(x)                                  \
 do {                                                           \
-       unsigned long old_psr, psr = (x);                       \
+       unsigned long __old_psr, __psr = (x);                   \
                                                                \
-       local_save_flags(old_psr);                              \
-       __local_irq_restore(psr);                               \
-       if ((old_psr & IA64_PSR_I) && !(psr & IA64_PSR_I))      \
+       local_save_flags(__old_psr);                            \
+       __local_irq_restore(__psr);                             \
+       if ((__old_psr & IA64_PSR_I) && !(__psr & IA64_PSR_I))  \
                __save_ip();                                    \
 } while (0)
 
index 6da8069a0f77ff6c78074cb45bf36868445f4d9d..f30e05583869e8fe3431ac9f0aef20e90e0edc22 100644 (file)
@@ -101,7 +101,6 @@ extern void tsk_clear_notify_resume(struct task_struct *tsk);
 #define TIF_SYSCALL_TRACE      2       /* syscall trace active */
 #define TIF_SYSCALL_AUDIT      3       /* syscall auditing active */
 #define TIF_SINGLESTEP         4       /* restore singlestep on return to user mode */
-#define TIF_RESTORE_SIGMASK    5       /* restore signal mask in do_signal() */
 #define TIF_NOTIFY_RESUME      6       /* resumption notification requested */
 #define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE             17
@@ -109,6 +108,7 @@ extern void tsk_clear_notify_resume(struct task_struct *tsk);
 #define TIF_DB_DISABLED                19      /* debug trap disabled for fsyscall */
 #define TIF_FREEZE             20      /* is freezing for suspend */
 #define TIF_RESTORE_RSE                21      /* user RBS is newer than kernel RBS */
+#define TIF_RESTORE_SIGMASK    22      /* restore signal mask in do_signal() */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
@@ -126,8 +126,7 @@ extern void tsk_clear_notify_resume(struct task_struct *tsk);
 
 /* "work to do on user-return" bits */
 #define TIF_ALLWORK_MASK       (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SYSCALL_AUDIT|\
-                                _TIF_NEED_RESCHED| _TIF_SYSCALL_TRACE|\
-                                _TIF_RESTORE_SIGMASK)
+                                _TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE)
 /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT */
 #define TIF_WORK_MASK          (TIF_ALLWORK_MASK&~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT))
 
index f2f72ef2a8974cc3496dc0fd92435626f708a7e8..32863b3bb1d3e9bdd8cba30a5eb3356b3cbbb467 100644 (file)
@@ -116,6 +116,8 @@ void build_cpu_to_node_map(void);
 #define smt_capable()                          (smp_num_siblings > 1)
 #endif
 
+extern void arch_fix_phys_package_id(int num, u32 slot);
+
 #define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
                                        CPU_MASK_ALL : \
                                        node_to_cpumask(pcibus_to_node(bus)) \
index bb855988810353220b9d7d238e3d40d2eec1985c..7bddc7f5858469630fc27787fedda38edf7636e0 100644 (file)
@@ -1,6 +1,11 @@
 #ifndef _ASM_IA64_UNALIGNED_H
 #define _ASM_IA64_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/le_struct.h>
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/generic.h>
+
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
 
 #endif /* _ASM_IA64_UNALIGNED_H */
index b82d923b73c1c5b6ad8d1d0599331b72440f0d3f..13d7e65ca3cce3e76bbf2346c37651ad95e01e9c 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2001-2005 Silicon Graphics, Inc.  All rights reserved.
+ * Copyright (C) 2001-2008 Silicon Graphics, Inc.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of version 2 of the GNU General Public License
@@ -8,5 +8,5 @@
  * Prototypes for the uncached page allocator
  */
 
-extern unsigned long uncached_alloc_page(int nid);
-extern void uncached_free_page(unsigned long);
+extern unsigned long uncached_alloc_page(int starting_nid, int n_pages);
+extern void uncached_free_page(unsigned long uc_addr, int n_pages);
index 86505387be0872b305cefdaa89796e3f67d04548..e6359c566b504f50ae462cc9c917b2126603052d 100644 (file)
@@ -214,6 +214,11 @@ static inline int pte_file(pte_t pte)
        return pte_val(pte) & _PAGE_FILE;
 }
 
+static inline int pte_special(pte_t pte)
+{
+       return 0;
+}
+
 static inline pte_t pte_mkclean(pte_t pte)
 {
        pte_val(pte) &= ~_PAGE_DIRTY;
@@ -250,6 +255,11 @@ static inline pte_t pte_mkwrite(pte_t pte)
        return pte;
 }
 
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return pte;
+}
+
 static inline  int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
 {
        return test_and_clear_bit(_PAGE_BIT_ACCESSED, ptep);
index fccc180c3913ff84a67af1139d0bc43a3eeed3a5..377eb20d1ec6dfdac60920f04bc6dbd2e4175ac0 100644 (file)
@@ -1,19 +1,18 @@
 #ifndef _ASM_M32R_UNALIGNED_H
 #define _ASM_M32R_UNALIGNED_H
 
-/*
- * For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents.
- */
-
-#include <asm/string.h>
-
-#define get_unaligned(ptr) \
-  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
-
-#define put_unaligned(val, ptr)                                \
-  ({ __typeof__(*(ptr)) __tmp = (val);                 \
-     memmove((ptr), &__tmp, sizeof(*(ptr)));           \
-     (void)0; })
+#if defined(__LITTLE_ENDIAN__)
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#else
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#endif
 
 #endif /* _ASM_M32R_UNALIGNED_H */
index 33caad1628d4276619daa5b442319a386f4d17a3..8243c931b5c065f4578989361be6f9613b49eb8e 100644 (file)
@@ -25,5 +25,4 @@
        __rem;                                                  \
 })
 
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
 #endif /* _M68K_DIV64_H */
index 13135d4821d8f1a3bfd4f6aecc9070b008db4c7d..8e9a8a754dde0007377d1f59cfc0ed121f56f248 100644 (file)
@@ -168,6 +168,7 @@ static inline int pte_write(pte_t pte)              { return !(pte_val(pte) & _PAGE_RONLY);
 static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_file(pte_t pte)          { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte)       { return 0; }
 
 static inline pte_t pte_wrprotect(pte_t pte)   { pte_val(pte) |= _PAGE_RONLY; return pte; }
 static inline pte_t pte_mkclean(pte_t pte)     { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
@@ -185,6 +186,7 @@ static inline pte_t pte_mkcache(pte_t pte)
        pte_val(pte) = (pte_val(pte) & _CACHEMASK040) | m68k_supervisor_cachemode;
        return pte;
 }
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 #define PAGE_DIR_OFFSET(tsk,address) pgd_offset((tsk),(address))
 
index b766fc261bdedc5ab67947c2264230d88ecbb119..f847ec732d62d290a11ba2c464c2185a8d040f89 100644 (file)
@@ -169,6 +169,7 @@ static inline int pte_write(pte_t pte)              { return pte_val(pte) & SUN3_PAGE_WRITEA
 static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & SUN3_PAGE_MODIFIED; }
 static inline int pte_young(pte_t pte)         { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
 static inline int pte_file(pte_t pte)          { return pte_val(pte) & SUN3_PAGE_ACCESSED; }
+static inline int pte_special(pte_t pte)       { return 0; }
 
 static inline pte_t pte_wrprotect(pte_t pte)   { pte_val(pte) &= ~SUN3_PAGE_WRITEABLE; return pte; }
 static inline pte_t pte_mkclean(pte_t pte)     { pte_val(pte) &= ~SUN3_PAGE_MODIFIED; return pte; }
@@ -181,6 +182,7 @@ static inline pte_t pte_mknocache(pte_t pte)        { pte_val(pte) |= SUN3_PAGE_NOCACHE
 //static inline pte_t pte_mkcache(pte_t pte)   { pte_val(pte) &= SUN3_PAGE_NOCACHE; return pte; }
 // until then, use:
 static inline pte_t pte_mkcache(pte_t pte)     { return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t kernel_pg_dir[PTRS_PER_PGD];
index 804cb3f888fe60d4a32ad314418bd809972d008f..77698f2dc33cca2367653b7b15633982870d783e 100644 (file)
@@ -1,16 +1,13 @@
-#ifndef __M68K_UNALIGNED_H
-#define __M68K_UNALIGNED_H
+#ifndef _ASM_M68K_UNALIGNED_H
+#define _ASM_M68K_UNALIGNED_H
 
 /*
  * The m68k can do unaligned accesses itself.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
-
-#endif
+#endif /* _ASM_M68K_UNALIGNED_H */
index 3f20419c633aab478da5b587acbc10de2416aaeb..939a0205621712c1416af8036d02ff2a33c2ff12 100644 (file)
@@ -35,7 +35,8 @@
 /*
  * Set number of channels of DMA on ColdFire for different implementations.
  */
-#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407)
+#if defined(CONFIG_M5249) || defined(CONFIG_M5307) || defined(CONFIG_M5407) || \
+       defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x)
 #define MAX_M68K_DMA_CHANNELS 4
 #elif defined(CONFIG_M5272)
 #define MAX_M68K_DMA_CHANNELS 1
index 96c45101832491ecbb7e0cabdb5b49d49f5fa5d2..6044397adb64f89d8923cec107ddfd5c5d04bdf2 100644 (file)
@@ -1,13 +1,16 @@
 #ifndef _M68KNOMMU_PARAM_H
 #define _M68KNOMMU_PARAM_H
 
-#define HZ CONFIG_HZ
-
 #ifdef __KERNEL__
+#define HZ CONFIG_HZ
 #define        USER_HZ         HZ
 #define        CLOCKS_PER_SEC  (USER_HZ)
 #endif
 
+#ifndef HZ
+#define HZ     100
+#endif
+
 #define EXEC_PAGESIZE  4096
 
 #ifndef NOGROUP
index 869e9dd24f544f276001793aeea97356c6230c0c..eb1ea4cb9a598e20af365400ff51479a41ad262f 100644 (file)
@@ -1,23 +1,25 @@
-#ifndef __M68K_UNALIGNED_H
-#define __M68K_UNALIGNED_H
+#ifndef _ASM_M68KNOMMU_UNALIGNED_H
+#define _ASM_M68KNOMMU_UNALIGNED_H
 
 
 #ifdef CONFIG_COLDFIRE
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
-#include <asm-generic/unaligned.h>
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
 #else
 /*
  * The m68k can do unaligned accesses itself. 
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
 #endif
 
-#endif
+#endif /* _ASM_M68KNOMMU_UNALIGNED_H */
index ec75ce4cdb8c3e90307e51350c2d7dc4b950a4d5..c2bd126c3b4eda2613c73e26335bb0f8694abd9d 100644 (file)
@@ -591,6 +591,11 @@ static inline int __ilog2(unsigned long x)
        return 63 - lz;
 }
 
+static inline unsigned long __fls(unsigned long x)
+{
+       return __ilog2(x);
+}
+
 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
 
 /*
diff --git a/include/asm-mips/cmp.h b/include/asm-mips/cmp.h
new file mode 100644 (file)
index 0000000..89a73fb
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _ASM_CMP_H
+#define _ASM_CMP_H
+
+/*
+ * Definitions for CMP multitasking on MIPS cores
+ */
+struct task_struct;
+
+extern void cmp_smp_setup(void);
+extern void cmp_smp_finish(void);
+extern void cmp_boot_secondary(int cpu, struct task_struct *t);
+extern void cmp_init_secondary(void);
+extern void cmp_cpus_done(void);
+extern void cmp_prepare_cpus(unsigned int max_cpus);
+
+/* This is platform specific */
+extern void cmp_send_ipi(int cpu, unsigned int action);
+#endif /*  _ASM_CMP_H */
index bf5bbc78a9f7d85ae6a94e929a6b40f27f46e617..1c35cac6f35b1c72ab130d6719f9dde3dfef9f86 100644 (file)
@@ -29,7 +29,7 @@
 #define PRID_COMP_ALCHEMY      0x030000
 #define PRID_COMP_SIBYTE       0x040000
 #define PRID_COMP_SANDCRAFT    0x050000
-#define PRID_COMP_PHILIPS      0x060000
+#define PRID_COMP_NXP          0x060000
 #define PRID_COMP_TOSHIBA      0x070000
 #define PRID_COMP_LSI          0x080000
 #define PRID_COMP_LEXRA                0x0b0000
@@ -89,6 +89,7 @@
 #define PRID_IMP_34K           0x9500
 #define PRID_IMP_24KE          0x9600
 #define PRID_IMP_74K           0x9700
+#define PRID_IMP_1004K         0x9900
 #define PRID_IMP_LOONGSON1      0x4200
 #define PRID_IMP_LOONGSON2      0x6300
 
@@ -194,9 +195,9 @@ enum cpu_type_enum {
        /*
         * MIPS32 class processors
         */
-       CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_74K, CPU_AU1000,
-       CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500, CPU_AU1550,
-       CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
+       CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
+       CPU_AU1000, CPU_AU1100, CPU_AU1200, CPU_AU1210, CPU_AU1250, CPU_AU1500,
+       CPU_AU1550, CPU_PR4450, CPU_BCM3302, CPU_BCM4710,
 
        /*
         * MIPS64 class processors
index 486a5b0a1302b1aa9f7ee1214b16aa894259c6ba..98badd6bf22dd6ca386a62d1df19f9d0c8759874 100644 (file)
@@ -33,4 +33,6 @@ static inline u32 ioasic_read(unsigned int reg)
 
 extern void init_ioasic_irqs(int base);
 
+extern void dec_ioasic_clocksource_init(void);
+
 #endif /* __ASM_DEC_IOASIC_H */
index 716371bd098076c7297e5ef90a2400d42387c620..d1d699105c1106b724340104786f95ce8df39188 100644 (file)
@@ -82,7 +82,6 @@
        (n) = __quot; \
        __mod; })
 
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
 #endif /* (_MIPS_SZLONG == 32) */
 
 #if (_MIPS_SZLONG == 64)
@@ -106,11 +105,6 @@ extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
        (n) = __quot; \
        __mod; })
 
-static inline uint64_t div64_64(uint64_t dividend, uint64_t divisor)
-{
-       return dividend / divisor;
-}
-
 #endif /* (_MIPS_SZLONG == 64) */
 
 #endif /* _ASM_DIV64_H */
diff --git a/include/asm-mips/ds1287.h b/include/asm-mips/ds1287.h
new file mode 100644 (file)
index 0000000..ba1702e
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ *  DS1287 timer functions.
+ *
+ *  Copyright (C) 2008  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef __ASM_DS1287_H
+#define __ASM_DS1287_H
+
+extern int ds1287_timer_state(void);
+extern void ds1287_set_base_clock(unsigned int clock);
+extern int ds1287_clockevent_init(int irq);
+
+#endif
index 17f082cfea85c055cda99f1fcd6305331daa726e..b9cce90346cfc334819f44e95c4308f96b8f9533 100644 (file)
@@ -11,9 +11,9 @@
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/barrier.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 #include <asm/war.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
diff --git a/include/asm-mips/gcmpregs.h b/include/asm-mips/gcmpregs.h
new file mode 100644 (file)
index 0000000..d74a8a4
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 07 MIPS Technologies, Inc.
+ *
+ * Multiprocessor Subsystem Register Definitions
+ *
+ */
+#ifndef _ASM_GCMPREGS_H
+#define _ASM_GCMPREGS_H
+
+
+/* Offsets to major blocks within GCMP from GCMP base */
+#define GCMP_GCB_OFS           0x0000 /* Global Control Block */
+#define GCMP_CLCB_OFS          0x2000 /* Core Local Control Block */
+#define GCMP_COCB_OFS          0x4000 /* Core Other Control Block */
+#define GCMP_GDB_OFS           0x8000 /* Global Debug Block */
+
+/* Offsets to individual GCMP registers from GCMP base */
+#define GCMPOFS(block, tag, reg)       (GCMP_##block##_OFS + GCMP_##tag##_##reg##_OFS)
+
+#define GCMPGCBOFS(reg)                GCMPOFS(GCB, GCB, reg)
+#define GCMPCLCBOFS(reg)       GCMPOFS(CLCB, CCB, reg)
+#define GCMPCOCBOFS(reg)       GCMPOFS(COCB, CCB, reg)
+#define GCMPGDBOFS(reg)                GCMPOFS(GDB, GDB, reg)
+
+/* GCMP register access */
+#define GCMPGCB(reg)                   REGP(_gcmp_base, GCMPGCBOFS(reg))
+#define GCMPCLCB(reg)                  REGP(_gcmp_base, GCMPCLCBOFS(reg))
+#define GCMPCOCB(reg)                  REGP(_gcmp_base, GCMPCOCBOFS(reg))
+#define GCMPGDB(reg)                   REGP(_gcmp_base, GCMPGDBOFS(reg))
+
+/* Mask generation */
+#define GCMPMSK(block, reg, bits)      (MSK(bits)<<GCMP_##block##_##reg##_SHF)
+#define GCMPGCBMSK(reg, bits)          GCMPMSK(GCB, reg, bits)
+#define GCMPCCBMSK(reg, bits)          GCMPMSK(CCB, reg, bits)
+#define GCMPGDBMSK(reg, bits)          GCMPMSK(GDB, reg, bits)
+
+/* GCB registers */
+#define GCMP_GCB_GC_OFS                        0x0000  /* Global Config Register */
+#define  GCMP_GCB_GC_NUMIOCU_SHF       8
+#define  GCMP_GCB_GC_NUMIOCU_MSK       GCMPGCBMSK(GC_NUMIOCU, 4)
+#define  GCMP_GCB_GC_NUMCORES_SHF      0
+#define  GCMP_GCB_GC_NUMCORES_MSK      GCMPGCBMSK(GC_NUMCORES, 8)
+#define GCMP_GCB_GCMPB_OFS             0x0008          /* Global GCMP Base */
+#define  GCMP_GCB_GCMPB_GCMPBASE_SHF   15
+#define  GCMP_GCB_GCMPB_GCMPBASE_MSK   GCMPGCBMSK(GCMPB_GCMPBASE, 17)
+#define  GCMP_GCB_GCMPB_CMDEFTGT_SHF   0
+#define  GCMP_GCB_GCMPB_CMDEFTGT_MSK   GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
+#define  GCMP_GCB_GCMPB_CMDEFTGT_MEM   0
+#define  GCMP_GCB_GCMPB_CMDEFTGT_MEM1  1
+#define  GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
+#define  GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
+#define GCMP_GCB_CCMC_OFS              0x0010  /* Global CM Control */
+#define GCMP_GCB_GCSRAP_OFS            0x0020  /* Global CSR Access Privilege */
+#define  GCMP_GCB_GCSRAP_CMACCESS_SHF  0
+#define  GCMP_GCB_GCSRAP_CMACCESS_MSK  GCMPGCBMSK(GCSRAP_CMACCESS, 8)
+#define GCMP_GCB_GCMPREV_OFS           0x0030  /* GCMP Revision Register */
+#define GCMP_GCB_GCMEM_OFS             0x0040  /* Global CM Error Mask */
+#define GCMP_GCB_GCMEC_OFS             0x0048  /* Global CM Error Cause */
+#define  GCMP_GCB_GMEC_ERROR_TYPE_SHF  27
+#define  GCMP_GCB_GMEC_ERROR_TYPE_MSK  GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
+#define  GCMP_GCB_GMEC_ERROR_INFO_SHF  0
+#define  GCMP_GCB_GMEC_ERROR_INFO_MSK  GCMPGCBMSK(GMEC_ERROR_INFO, 27)
+#define GCMP_GCB_GCMEA_OFS             0x0050  /* Global CM Error Address */
+#define GCMP_GCB_GCMEO_OFS             0x0058  /* Global CM Error Multiple */
+#define  GCMP_GCB_GMEO_ERROR_2ND_SHF   0
+#define  GCMP_GCB_GMEO_ERROR_2ND_MSK   GCMPGCBMSK(GMEO_ERROR_2ND, 5)
+#define GCMP_GCB_GICBA_OFS             0x0080  /* Global Interrupt Controller Base Address */
+#define  GCMP_GCB_GICBA_BASE_SHF       17
+#define  GCMP_GCB_GICBA_BASE_MSK       GCMPGCBMSK(GICBA_BASE, 15)
+#define  GCMP_GCB_GICBA_EN_SHF         0
+#define  GCMP_GCB_GICBA_EN_MSK         GCMPGCBMSK(GICBA_EN, 1)
+
+/* GCB Regions */
+#define GCMP_GCB_CMxBASE_OFS(n)                (0x0090+16*(n))         /* Global Region[0-3] Base Address */
+#define  GCMP_GCB_CMxBASE_BASE_SHF     16
+#define  GCMP_GCB_CMxBASE_BASE_MSK     GCMPGCBMSK(CMxBASE_BASE, 16)
+#define GCMP_GCB_CMxMASK_OFS(n)                (0x0098+16*(n))         /* Global Region[0-3] Address Mask */
+#define  GCMP_GCB_CMxMASK_MASK_SHF     16
+#define  GCMP_GCB_CMxMASK_MASK_MSK     GCMPGCBMSK(CMxMASK_MASK, 16)
+#define  GCMP_GCB_CMxMASK_CMREGTGT_SHF 0
+#define  GCMP_GCB_CMxMASK_CMREGTGT_MSK GCMPGCBMSK(CMxMASK_CMREGTGT, 2)
+#define  GCMP_GCB_CMxMASK_CMREGTGT_MEM  0
+#define  GCMP_GCB_CMxMASK_CMREGTGT_MEM1  1
+#define  GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
+#define  GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
+
+
+/* Core local/Core other control block registers */
+#define GCMP_CCB_RESETR_OFS            0x0000                  /* Reset Release */
+#define  GCMP_CCB_RESETR_INRESET_SHF   0
+#define  GCMP_CCB_RESETR_INRESET_MSK   GCMPCCBMSK(RESETR_INRESET, 16)
+#define GCMP_CCB_COHCTL_OFS            0x0008                  /* Coherence Control */
+#define  GCMP_CCB_COHCTL_DOMAIN_SHF    0
+#define  GCMP_CCB_COHCTL_DOMAIN_MSK    GCMPCCBMSK(COHCTL_DOMAIN, 8)
+#define GCMP_CCB_CFG_OFS               0x0010                  /* Config */
+#define  GCMP_CCB_CFG_IOCUTYPE_SHF     10
+#define  GCMP_CCB_CFG_IOCUTYPE_MSK     GCMPCCBMSK(CFG_IOCUTYPE, 2)
+#define   GCMP_CCB_CFG_IOCUTYPE_CPU    0
+#define   GCMP_CCB_CFG_IOCUTYPE_NCIOCU 1
+#define   GCMP_CCB_CFG_IOCUTYPE_CIOCU  2
+#define  GCMP_CCB_CFG_NUMVPE_SHF       0
+#define  GCMP_CCB_CFG_NUMVPE_MSK       GCMPCCBMSK(CFG_NUMVPE, 10)
+#define GCMP_CCB_OTHER_OFS             0x0018          /* Other Address */
+#define  GCMP_CCB_OTHER_CORENUM_SHF    16
+#define  GCMP_CCB_OTHER_CORENUM_MSK    GCMPCCBMSK(OTHER_CORENUM, 16)
+#define GCMP_CCB_RESETBASE_OFS         0x0020          /* Reset Exception Base */
+#define  GCMP_CCB_RESETBASE_BEV_SHF    12
+#define  GCMP_CCB_RESETBASE_BEV_MSK    GCMPCCBMSK(RESETBASE_BEV, 20)
+#define GCMP_CCB_ID_OFS                        0x0028          /* Identification */
+#define GCMP_CCB_DINTGROUP_OFS         0x0030          /* DINT Group Participate */
+#define GCMP_CCB_DBGGROUP_OFS          0x0100          /* DebugBreak Group */
+
+#endif /* _ASM_GCMPREGS_H */
diff --git a/include/asm-mips/gic.h b/include/asm-mips/gic.h
new file mode 100644 (file)
index 0000000..01b2f92
--- /dev/null
@@ -0,0 +1,487 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2000, 07 MIPS Technologies, Inc.
+ *
+ * GIC Register Definitions
+ *
+ */
+#ifndef _ASM_GICREGS_H
+#define _ASM_GICREGS_H
+
+#undef GICISBYTELITTLEENDIAN
+#define GICISWORDLITTLEENDIAN
+
+/* Constants */
+#define GIC_POL_POS                    1
+#define GIC_POL_NEG                    0
+#define GIC_TRIG_EDGE                  1
+#define GIC_TRIG_LEVEL                 0
+
+#define GIC_NUM_INTRS                  32
+
+#define MSK(n) ((1 << (n)) - 1)
+#define REG32(addr)            (*(volatile unsigned int *) (addr))
+#define REG(base, offs)                REG32((unsigned int)(base) + offs##_##OFS)
+#define REGP(base, phys)       REG32((unsigned int)(base) + (phys))
+
+/* Accessors */
+#define GIC_REG(segment, offset) \
+       REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
+#define GIC_REG_ADDR(segment, offset) \
+       REG32(_gic_base + segment##_##SECTION_OFS + offset)
+
+#define GIC_ABS_REG(segment, offset) \
+       (_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
+#define GIC_REG_ABS_ADDR(segment, offset) \
+       (_gic_base + segment##_##SECTION_OFS + offset)
+
+#ifdef GICISBYTELITTLEENDIAN
+#define GICREAD(reg, data)     (data) = (reg), (data) = le32_to_cpu(data)
+#define GICWRITE(reg, data)    (reg) = cpu_to_le32(data)
+#define GICBIS(reg, bits)                      \
+       ({unsigned int data;                    \
+               GICREAD(reg, data);             \
+               data |= bits;                   \
+               GICWRITE(reg, data);            \
+       })
+
+#else
+#define GICREAD(reg, data)     (data) = (reg)
+#define GICWRITE(reg, data)    (reg) = (data)
+#define GICBIS(reg, bits)      (reg) |= (bits)
+#endif
+
+
+/* GIC Address Space */
+#define SHARED_SECTION_OFS             0x0000
+#define SHARED_SECTION_SIZE            0x8000
+#define VPE_LOCAL_SECTION_OFS          0x8000
+#define VPE_LOCAL_SECTION_SIZE         0x4000
+#define VPE_OTHER_SECTION_OFS          0xc000
+#define VPE_OTHER_SECTION_SIZE         0x4000
+#define USM_VISIBLE_SECTION_OFS                0x10000
+#define USM_VISIBLE_SECTION_SIZE       0x10000
+
+/* Register Map for Shared Section */
+#if defined(CONFIG_CPU_LITTLE_ENDIAN) || defined(GICISWORDLITTLEENDIAN)
+
+#define        GIC_SH_CONFIG_OFS               0x0000
+
+/* Shared Global Counter */
+#define GIC_SH_COUNTER_31_00_OFS       0x0010
+#define GIC_SH_COUNTER_63_32_OFS       0x0014
+
+/* Interrupt Polarity */
+#define GIC_SH_POL_31_0_OFS            0x0100
+#define GIC_SH_POL_63_32_OFS           0x0104
+#define GIC_SH_POL_95_64_OFS           0x0108
+#define GIC_SH_POL_127_96_OFS          0x010c
+#define GIC_SH_POL_159_128_OFS         0x0110
+#define GIC_SH_POL_191_160_OFS         0x0114
+#define GIC_SH_POL_223_192_OFS         0x0118
+#define GIC_SH_POL_255_224_OFS         0x011c
+
+/* Edge/Level Triggering */
+#define GIC_SH_TRIG_31_0_OFS           0x0180
+#define GIC_SH_TRIG_63_32_OFS          0x0184
+#define GIC_SH_TRIG_95_64_OFS          0x0188
+#define GIC_SH_TRIG_127_96_OFS         0x018c
+#define GIC_SH_TRIG_159_128_OFS                0x0190
+#define GIC_SH_TRIG_191_160_OFS                0x0194
+#define GIC_SH_TRIG_223_192_OFS                0x0198
+#define GIC_SH_TRIG_255_224_OFS                0x019c
+
+/* Dual Edge Triggering */
+#define GIC_SH_DUAL_31_0_OFS           0x0200
+#define GIC_SH_DUAL_63_32_OFS          0x0204
+#define GIC_SH_DUAL_95_64_OFS          0x0208
+#define GIC_SH_DUAL_127_96_OFS         0x020c
+#define GIC_SH_DUAL_159_128_OFS                0x0210
+#define GIC_SH_DUAL_191_160_OFS                0x0214
+#define GIC_SH_DUAL_223_192_OFS                0x0218
+#define GIC_SH_DUAL_255_224_OFS                0x021c
+
+/* Set/Clear corresponding bit in Edge Detect Register */
+#define GIC_SH_WEDGE_OFS               0x0280
+
+/* Reset Mask - Disables Interrupt */
+#define GIC_SH_RMASK_31_0_OFS          0x0300
+#define GIC_SH_RMASK_63_32_OFS         0x0304
+#define GIC_SH_RMASK_95_64_OFS         0x0308
+#define GIC_SH_RMASK_127_96_OFS                0x030c
+#define GIC_SH_RMASK_159_128_OFS       0x0310
+#define GIC_SH_RMASK_191_160_OFS       0x0314
+#define GIC_SH_RMASK_223_192_OFS       0x0318
+#define GIC_SH_RMASK_255_224_OFS       0x031c
+
+/* Set Mask (WO) - Enables Interrupt */
+#define GIC_SH_SMASK_31_0_OFS          0x0380
+#define GIC_SH_SMASK_63_32_OFS         0x0384
+#define GIC_SH_SMASK_95_64_OFS         0x0388
+#define GIC_SH_SMASK_127_96_OFS                0x038c
+#define GIC_SH_SMASK_159_128_OFS       0x0390
+#define GIC_SH_SMASK_191_160_OFS       0x0394
+#define GIC_SH_SMASK_223_192_OFS       0x0398
+#define GIC_SH_SMASK_255_224_OFS       0x039c
+
+/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
+#define GIC_SH_MASK_31_0_OFS           0x0400
+#define GIC_SH_MASK_63_32_OFS          0x0404
+#define GIC_SH_MASK_95_64_OFS          0x0408
+#define GIC_SH_MASK_127_96_OFS         0x040c
+#define GIC_SH_MASK_159_128_OFS                0x0410
+#define GIC_SH_MASK_191_160_OFS                0x0414
+#define GIC_SH_MASK_223_192_OFS                0x0418
+#define GIC_SH_MASK_255_224_OFS                0x041c
+
+/* Pending Global Interrupts (RO) */
+#define GIC_SH_PEND_31_0_OFS           0x0480
+#define GIC_SH_PEND_63_32_OFS          0x0484
+#define GIC_SH_PEND_95_64_OFS          0x0488
+#define GIC_SH_PEND_127_96_OFS         0x048c
+#define GIC_SH_PEND_159_128_OFS                0x0490
+#define GIC_SH_PEND_191_160_OFS                0x0494
+#define GIC_SH_PEND_223_192_OFS                0x0498
+#define GIC_SH_PEND_255_224_OFS                0x049c
+
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS        0x0500
+
+/* Maps Interrupt X to a Pin */
+#define GIC_SH_MAP_TO_PIN(intr) \
+       (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
+
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS        0x2000
+
+/* Maps Interrupt X to a VPE */
+#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
+       (GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4))
+#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
+
+/* Polarity : Reset Value is always 0 */
+#define GIC_SH_SET_POLARITY_OFS                0x0100
+#define GIC_SET_POLARITY(intr, pol) \
+       GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + (((intr) / 32) * 4)), (pol) << ((intr) % 32))
+
+/* Triggering : Reset Value is always 0 */
+#define GIC_SH_SET_TRIGGER_OFS         0x0180
+#define GIC_SET_TRIGGER(intr, trig) \
+       GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + (((intr) / 32) * 4)), (trig) << ((intr) % 32))
+
+/* Mask manipulation */
+#define GIC_SH_SMASK_OFS               0x0380
+#define GIC_SET_INTR_MASK(intr, val) \
+       GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + (((intr) / 32) * 4)), ((val) << ((intr) % 32)))
+
+#define GIC_SH_RMASK_OFS               0x0300
+#define GIC_CLR_INTR_MASK(intr, val) \
+       GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + (((intr) / 32) * 4)), ((val) << ((intr) % 32)))
+
+/* Register Map for Local Section */
+#define GIC_VPE_CTL_OFS                        0x0000
+#define GIC_VPE_PEND_OFS               0x0004
+#define GIC_VPE_MASK_OFS               0x0008
+#define GIC_VPE_RMASK_OFS              0x000c
+#define GIC_VPE_SMASK_OFS              0x0010
+#define GIC_VPE_WD_MAP_OFS             0x0040
+#define GIC_VPE_COMPARE_MAP_OFS                0x0044
+#define GIC_VPE_TIMER_MAP_OFS          0x0048
+#define GIC_VPE_PERFCTR_MAP_OFS                0x0050
+#define GIC_VPE_SWINT0_MAP_OFS         0x0054
+#define GIC_VPE_SWINT1_MAP_OFS         0x0058
+#define GIC_VPE_OTHER_ADDR_OFS         0x0080
+#define GIC_VPE_WD_CONFIG0_OFS         0x0090
+#define GIC_VPE_WD_COUNT0_OFS          0x0094
+#define GIC_VPE_WD_INITIAL0_OFS                0x0098
+#define GIC_VPE_COMPARE_LO_OFS         0x00a0
+#define GIC_VPE_COMPARE_HI             0x00a4
+
+#define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
+#define GIC_VPE_EIC_SS(intr) \
+       (GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+
+#define GIC_VPE_EIC_VEC_BASE           0x0800
+#define GIC_VPE_EIC_VEC(intr) \
+       (GIC_VPE_EIC_VEC_BASE + (4 * intr))
+
+#define GIC_VPE_TENABLE_NMI_OFS                0x1000
+#define GIC_VPE_TENABLE_YQ_OFS         0x1004
+#define GIC_VPE_TENABLE_INT_31_0_OFS   0x1080
+#define GIC_VPE_TENABLE_INT_63_32_OFS  0x1084
+
+/* User Mode Visible Section Register Map */
+#define GIC_UMV_SH_COUNTER_31_00_OFS   0x0000
+#define GIC_UMV_SH_COUNTER_63_32_OFS   0x0004
+
+#else /* CONFIG_CPU_BIG_ENDIAN */
+
+#define        GIC_SH_CONFIG_OFS               0x0000
+
+/* Shared Global Counter */
+#define GIC_SH_COUNTER_31_00_OFS       0x0014
+#define GIC_SH_COUNTER_63_32_OFS       0x0010
+
+/* Interrupt Polarity */
+#define GIC_SH_POL_31_0_OFS            0x0104
+#define GIC_SH_POL_63_32_OFS           0x0100
+#define GIC_SH_POL_95_64_OFS           0x010c
+#define GIC_SH_POL_127_96_OFS          0x0108
+#define GIC_SH_POL_159_128_OFS         0x0114
+#define GIC_SH_POL_191_160_OFS         0x0110
+#define GIC_SH_POL_223_192_OFS         0x011c
+#define GIC_SH_POL_255_224_OFS         0x0118
+
+/* Edge/Level Triggering */
+#define GIC_SH_TRIG_31_0_OFS           0x0184
+#define GIC_SH_TRIG_63_32_OFS          0x0180
+#define GIC_SH_TRIG_95_64_OFS          0x018c
+#define GIC_SH_TRIG_127_96_OFS         0x0188
+#define GIC_SH_TRIG_159_128_OFS                0x0194
+#define GIC_SH_TRIG_191_160_OFS                0x0190
+#define GIC_SH_TRIG_223_192_OFS                0x019c
+#define GIC_SH_TRIG_255_224_OFS                0x0198
+
+/* Dual Edge Triggering */
+#define GIC_SH_DUAL_31_0_OFS           0x0204
+#define GIC_SH_DUAL_63_32_OFS          0x0200
+#define GIC_SH_DUAL_95_64_OFS          0x020c
+#define GIC_SH_DUAL_127_96_OFS         0x0208
+#define GIC_SH_DUAL_159_128_OFS                0x0214
+#define GIC_SH_DUAL_191_160_OFS                0x0210
+#define GIC_SH_DUAL_223_192_OFS                0x021c
+#define GIC_SH_DUAL_255_224_OFS                0x0218
+
+/* Set/Clear corresponding bit in Edge Detect Register */
+#define GIC_SH_WEDGE_OFS               0x0280
+
+/* Reset Mask - Disables Interrupt */
+#define GIC_SH_RMASK_31_0_OFS          0x0304
+#define GIC_SH_RMASK_63_32_OFS         0x0300
+#define GIC_SH_RMASK_95_64_OFS         0x030c
+#define GIC_SH_RMASK_127_96_OFS                0x0308
+#define GIC_SH_RMASK_159_128_OFS       0x0314
+#define GIC_SH_RMASK_191_160_OFS       0x0310
+#define GIC_SH_RMASK_223_192_OFS       0x031c
+#define GIC_SH_RMASK_255_224_OFS       0x0318
+
+/* Set Mask (WO) - Enables Interrupt */
+#define GIC_SH_SMASK_31_0_OFS          0x0384
+#define GIC_SH_SMASK_63_32_OFS         0x0380
+#define GIC_SH_SMASK_95_64_OFS         0x038c
+#define GIC_SH_SMASK_127_96_OFS                0x0388
+#define GIC_SH_SMASK_159_128_OFS       0x0394
+#define GIC_SH_SMASK_191_160_OFS       0x0390
+#define GIC_SH_SMASK_223_192_OFS       0x039c
+#define GIC_SH_SMASK_255_224_OFS       0x0398
+
+/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
+#define GIC_SH_MASK_31_0_OFS           0x0404
+#define GIC_SH_MASK_63_32_OFS          0x0400
+#define GIC_SH_MASK_95_64_OFS          0x040c
+#define GIC_SH_MASK_127_96_OFS         0x0408
+#define GIC_SH_MASK_159_128_OFS                0x0414
+#define GIC_SH_MASK_191_160_OFS                0x0410
+#define GIC_SH_MASK_223_192_OFS                0x041c
+#define GIC_SH_MASK_255_224_OFS                0x0418
+
+/* Pending Global Interrupts (RO) */
+#define GIC_SH_PEND_31_0_OFS           0x0484
+#define GIC_SH_PEND_63_32_OFS          0x0480
+#define GIC_SH_PEND_95_64_OFS          0x048c
+#define GIC_SH_PEND_127_96_OFS         0x0488
+#define GIC_SH_PEND_159_128_OFS                0x0494
+#define GIC_SH_PEND_191_160_OFS                0x0490
+#define GIC_SH_PEND_223_192_OFS                0x049c
+#define GIC_SH_PEND_255_224_OFS                0x0498
+
+#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS        0x0500
+
+/* Maps Interrupt X to a Pin */
+#define GIC_SH_MAP_TO_PIN(intr) \
+       (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr))
+
+#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS        0x2004
+
+/*
+ * Maps Interrupt X to a VPE.  This is more complex than the LE case, as
+ * odd and even registers need to be transposed.  It does work - trust me!
+ */
+#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
+       (GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + \
+       (((((vpe) / 32) ^ 1) - 1) * 4))
+#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
+
+/* Polarity */
+#define GIC_SH_SET_POLARITY_OFS                0x0100
+#define GIC_SET_POLARITY(intr, pol) \
+       GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), (pol) << ((intr) % 32))
+
+/* Triggering */
+#define GIC_SH_SET_TRIGGER_OFS         0x0180
+#define GIC_SET_TRIGGER(intr, trig) \
+       GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), (trig) << ((intr) % 32))
+
+/* Mask manipulation */
+#define GIC_SH_SMASK_OFS               0x0380
+#define GIC_SET_INTR_MASK(intr, val) \
+       GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32)))
+
+#define GIC_SH_RMASK_OFS               0x0300
+#define GIC_CLR_INTR_MASK(intr, val) \
+       GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + 4 + (((((intr) / 32) ^ 1) - 1) * 4)), ((val) << ((intr) % 32))
+
+/* Register Map for Local Section */
+#define GIC_VPE_CTL_OFS                        0x0000
+#define GIC_VPE_PEND_OFS               0x0004
+#define GIC_VPE_MASK_OFS               0x0008
+#define GIC_VPE_RMASK_OFS              0x000c
+#define GIC_VPE_SMASK_OFS              0x0010
+#define GIC_VPE_WD_MAP_OFS             0x0040
+#define GIC_VPE_COMPARE_MAP_OFS                0x0044
+#define GIC_VPE_TIMER_MAP_OFS          0x0048
+#define GIC_VPE_PERFCTR_MAP_OFS                0x0050
+#define GIC_VPE_SWINT0_MAP_OFS         0x0054
+#define GIC_VPE_SWINT1_MAP_OFS         0x0058
+#define GIC_VPE_OTHER_ADDR_OFS         0x0080
+#define GIC_VPE_WD_CONFIG0_OFS         0x0090
+#define GIC_VPE_WD_COUNT0_OFS          0x0094
+#define GIC_VPE_WD_INITIAL0_OFS                0x0098
+#define GIC_VPE_COMPARE_LO_OFS         0x00a4
+#define GIC_VPE_COMPARE_HI_OFS         0x00a0
+
+#define GIC_VPE_EIC_SHADOW_SET_BASE    0x0100
+#define GIC_VPE_EIC_SS(intr) \
+       (GIC_EIC_SHADOW_SET_BASE + (4 * intr))
+
+#define GIC_VPE_EIC_VEC_BASE           0x0800
+#define GIC_VPE_EIC_VEC(intr) \
+       (GIC_VPE_EIC_VEC_BASE + (4 * intr))
+
+#define GIC_VPE_TENABLE_NMI_OFS                0x1000
+#define GIC_VPE_TENABLE_YQ_OFS         0x1004
+#define GIC_VPE_TENABLE_INT_31_0_OFS   0x1080
+#define GIC_VPE_TENABLE_INT_63_32_OFS  0x1084
+
+/* User Mode Visible Section Register Map */
+#define GIC_UMV_SH_COUNTER_31_00_OFS   0x0004
+#define GIC_UMV_SH_COUNTER_63_32_OFS   0x0000
+
+#endif /* !LE */
+
+/* Masks */
+#define GIC_SH_CONFIG_COUNTSTOP_SHF    28
+#define GIC_SH_CONFIG_COUNTSTOP_MSK    (MSK(1) << GIC_SH_CONFIG_COUNTSTOP_SHF)
+
+#define GIC_SH_CONFIG_COUNTBITS_SHF    24
+#define GIC_SH_CONFIG_COUNTBITS_MSK    (MSK(4) << GIC_SH_CONFIG_COUNTBITS_SHF)
+
+#define GIC_SH_CONFIG_NUMINTRS_SHF     16
+#define GIC_SH_CONFIG_NUMINTRS_MSK     (MSK(8) << GIC_SH_CONFIG_NUMINTRS_SHF)
+
+#define GIC_SH_CONFIG_NUMVPES_SHF      0
+#define GIC_SH_CONFIG_NUMVPES_MSK      (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
+
+#define GIC_SH_WEDGE_SET(intr)         (intr | (0x1 << 31))
+#define GIC_SH_WEDGE_CLR(intr)         (intr & ~(0x1 << 31))
+
+#define GIC_MAP_TO_PIN_SHF             31
+#define GIC_MAP_TO_PIN_MSK             (MSK(1) << GIC_MAP_TO_PIN_SHF)
+#define GIC_MAP_TO_NMI_SHF             30
+#define GIC_MAP_TO_NMI_MSK             (MSK(1) << GIC_MAP_TO_NMI_SHF)
+#define GIC_MAP_TO_YQ_SHF              29
+#define GIC_MAP_TO_YQ_MSK              (MSK(1) << GIC_MAP_TO_YQ_SHF)
+#define GIC_MAP_SHF                    0
+#define GIC_MAP_MSK                    (MSK(6) << GIC_MAP_SHF)
+
+/* GIC_VPE_CTL Masks */
+#define GIC_VPE_CTL_PERFCNT_RTBL_SHF   2
+#define GIC_VPE_CTL_PERFCNT_RTBL_MSK   (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
+#define GIC_VPE_CTL_TIMER_RTBL_SHF     1
+#define GIC_VPE_CTL_TIMER_RTBL_MSK     (MSK(1) << GIC_VPE_CTL_TIMER_RTBL_SHF)
+#define GIC_VPE_CTL_EIC_MODE_SHF       0
+#define GIC_VPE_CTL_EIC_MODE_MSK       (MSK(1) << GIC_VPE_CTL_EIC_MODE_SHF)
+
+/* GIC_VPE_PEND Masks */
+#define GIC_VPE_PEND_WD_SHF            0
+#define GIC_VPE_PEND_WD_MSK            (MSK(1) << GIC_VPE_PEND_WD_SHF)
+#define GIC_VPE_PEND_CMP_SHF           1
+#define GIC_VPE_PEND_CMP_MSK           (MSK(1) << GIC_VPE_PEND_CMP_SHF)
+#define GIC_VPE_PEND_TIMER_SHF         2
+#define GIC_VPE_PEND_TIMER_MSK         (MSK(1) << GIC_VPE_PEND_TIMER_SHF)
+#define GIC_VPE_PEND_PERFCOUNT_SHF     3
+#define GIC_VPE_PEND_PERFCOUNT_MSK     (MSK(1) << GIC_VPE_PEND_PERFCOUNT_SHF)
+#define GIC_VPE_PEND_SWINT0_SHF                4
+#define GIC_VPE_PEND_SWINT0_MSK                (MSK(1) << GIC_VPE_PEND_SWINT0_SHF)
+#define GIC_VPE_PEND_SWINT1_SHF                5
+#define GIC_VPE_PEND_SWINT1_MSK                (MSK(1) << GIC_VPE_PEND_SWINT1_SHF)
+
+/* GIC_VPE_RMASK Masks */
+#define GIC_VPE_RMASK_WD_SHF           0
+#define GIC_VPE_RMASK_WD_MSK           (MSK(1) << GIC_VPE_RMASK_WD_SHF)
+#define GIC_VPE_RMASK_CMP_SHF          1
+#define GIC_VPE_RMASK_CMP_MSK          (MSK(1) << GIC_VPE_RMASK_CMP_SHF)
+#define GIC_VPE_RMASK_TIMER_SHF                2
+#define GIC_VPE_RMASK_TIMER_MSK                (MSK(1) << GIC_VPE_RMASK_TIMER_SHF)
+#define GIC_VPE_RMASK_PERFCNT_SHF      3
+#define GIC_VPE_RMASK_PERFCNT_MSK      (MSK(1) << GIC_VPE_RMASK_PERFCNT_SHF)
+#define GIC_VPE_RMASK_SWINT0_SHF       4
+#define GIC_VPE_RMASK_SWINT0_MSK       (MSK(1) << GIC_VPE_RMASK_SWINT0_SHF)
+#define GIC_VPE_RMASK_SWINT1_SHF       5
+#define GIC_VPE_RMASK_SWINT1_MSK       (MSK(1) << GIC_VPE_RMASK_SWINT1_SHF)
+
+/* GIC_VPE_SMASK Masks */
+#define GIC_VPE_SMASK_WD_SHF           0
+#define GIC_VPE_SMASK_WD_MSK           (MSK(1) << GIC_VPE_SMASK_WD_SHF)
+#define GIC_VPE_SMASK_CMP_SHF          1
+#define GIC_VPE_SMASK_CMP_MSK          (MSK(1) << GIC_VPE_SMASK_CMP_SHF)
+#define GIC_VPE_SMASK_TIMER_SHF                2
+#define GIC_VPE_SMASK_TIMER_MSK                (MSK(1) << GIC_VPE_SMASK_TIMER_SHF)
+#define GIC_VPE_SMASK_PERFCNT_SHF      3
+#define GIC_VPE_SMASK_PERFCNT_MSK      (MSK(1) << GIC_VPE_SMASK_PERFCNT_SHF)
+#define GIC_VPE_SMASK_SWINT0_SHF       4
+#define GIC_VPE_SMASK_SWINT0_MSK       (MSK(1) << GIC_VPE_SMASK_SWINT0_SHF)
+#define GIC_VPE_SMASK_SWINT1_SHF       5
+#define GIC_VPE_SMASK_SWINT1_MSK       (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
+
+/*
+ * Set the Mapping of Interrupt X to a VPE.
+ */
+#define GIC_SH_MAP_TO_VPE_SMASK(intr, vpe) \
+       GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \
+                GIC_SH_MAP_TO_VPE_REG_BIT(vpe))
+
+struct gic_pcpu_mask {
+       DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS);
+};
+
+struct gic_pending_regs {
+       DECLARE_BITMAP(pending, GIC_NUM_INTRS);
+};
+
+struct gic_intrmask_regs {
+       DECLARE_BITMAP(intrmask, GIC_NUM_INTRS);
+};
+
+/*
+ * Interrupt Meta-data specification. The ipiflag helps
+ * in building ipi_map.
+ */
+struct gic_intr_map {
+       unsigned int intrnum;   /* Ext Intr Num         */
+       unsigned int cpunum;    /* Directed to this CPU */
+       unsigned int pin;       /* Directed to this Pin */
+       unsigned int polarity;  /* Polarity : +/-       */
+       unsigned int trigtype;  /* Trigger  : Edge/Levl */
+       unsigned int ipiflag;   /* Is used for IPI ?    */
+};
+
+extern void gic_init(unsigned long gic_base_addr,
+       unsigned long gic_addrspace_size, struct gic_intr_map *intrmap,
+       unsigned int intrmap_size, unsigned int irqbase);
+
+extern unsigned int gic_get_int(void);
+extern void gic_send_ipi(unsigned int intr);
+
+#endif /* _ASM_GICREGS_H */
index e62058b0d28c37c48d7a71c40c7271322fed5e63..f18d2816cbecd256a98f4b904614eba0c42ab1d6 100644 (file)
@@ -273,7 +273,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
  * memory-like regions on I/O busses.
  */
 #define ioremap_cachable(offset, size)                                 \
-       __ioremap_mode((offset), (size), PAGE_CACHABLE_DEFAULT)
+       __ioremap_mode((offset), (size), _page_cachable_default)
 
 /*
  * These two are MIPS specific ioremap variant.  ioremap_cacheable_cow
index 81602c8047eb582e7b8aa39559c1815c02a5ae42..a162268f17df42ba8794032ce4c0bacc4f534345 100644 (file)
@@ -99,8 +99,8 @@
 #define jmr3927_led_and_set(n/*0-16*/) jmr3927_ioc_reg_out((~(n)) & jmr3927_ioc_reg_in(JMR3927_IOC_LED_ADDR), JMR3927_IOC_LED_ADDR)
 
 /* DIPSW4 macro */
-#define jmr3927_dipsw1()       ((tx3927_pioptr->din & (1 << 11)) == 0)
-#define jmr3927_dipsw2()       ((tx3927_pioptr->din & (1 << 10)) == 0)
+#define jmr3927_dipsw1()       (gpio_get_value(11) == 0)
+#define jmr3927_dipsw2()       (gpio_get_value(10) == 0)
 #define jmr3927_dipsw3()       ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 2) == 0)
 #define jmr3927_dipsw4()       ((jmr3927_ioc_reg_in(JMR3927_IOC_DIPSW_ADDR) & 1) == 0)
 
index 338f99882a39878f8625d1071c6a634a37450c72..fb580333c102b21499ffc86fac0b370d69ee1813 100644 (file)
@@ -314,6 +314,6 @@ struct tx3927_ccfg_reg {
 #define tx3927_ccfgptr         ((struct tx3927_ccfg_reg *)TX3927_CCFG_REG)
 #define tx3927_tmrptr(ch)      ((struct txx927_tmr_reg *)TX3927_TMR_REG(ch))
 #define tx3927_sioptr(ch)      ((struct txx927_sio_reg *)TX3927_SIO_REG(ch))
-#define tx3927_pioptr          ((struct txx927_pio_reg *)TX3927_PIO_REG)
+#define tx3927_pioptr          ((struct txx9_pio_reg __iomem *)TX3927_PIO_REG)
 
 #endif /* __ASM_TX3927_H */
index 0474fe8dac3fa6a14dd87813babd08562a2bfb37..25dcf2feb0952b5dfa1c7589c894831647f3e99c 100644 (file)
@@ -22,18 +22,6 @@ struct txx927_sio_reg {
        volatile unsigned long rfifo;
 };
 
-struct txx927_pio_reg {
-       volatile unsigned long dout;
-       volatile unsigned long din;
-       volatile unsigned long dir;
-       volatile unsigned long od;
-       volatile unsigned long flag[2];
-       volatile unsigned long pol;
-       volatile unsigned long intc;
-       volatile unsigned long maskcpu;
-       volatile unsigned long maskext;
-};
-
 /*
  * SIO
  */
index 5bb57bf2b9d7f357b72696848af734aa9b49ab30..a05555165d05b14192c8ed7f19411a49382dbfbf 100644 (file)
@@ -3,9 +3,8 @@
  * BRIEF MODULE DESCRIPTION
  *     Include file for Alchemy Semiconductor's Au1k CPU.
  *
- * Copyright 2000,2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- *             ppopov@mvista.com or source@mvista.com
+ * Copyright 2000-2001, 2006-2008 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc. <source@mvista.com>
  *
  *  This program is free software; you can redistribute  it and/or modify it
  *  under  the terms of  the GNU General  Public License as published by the
@@ -117,13 +116,6 @@ extern struct au1xxx_irqmap au1xxx_irq_map[];
 
 #endif /* !defined (_LANGUAGE_ASSEMBLY) */
 
-#ifdef CONFIG_PM
-/* no CP0 timer irq */
-#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4)
-#else
-#define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5)
-#endif
-
 /*
  * SDRAM Register Offsets
  */
@@ -1693,20 +1685,6 @@ enum soc_au1200_ints {
 #define IOMEM_RESOURCE_START  0x10000000
 #define IOMEM_RESOURCE_END    0xffffffff
 
-  /*
-   * Borrowed from the PPC arch:
-   * The following macro is used to lookup irqs in a standard table
-   * format for those PPC systems that do not already have PCI
-   * interrupts properly routed.
-   */
-  /* FIXME - double check this from asm-ppc/pci-bridge.h */
-#define PCI_IRQ_TABLE_LOOKUP                            \
-  ({ long _ctl_ = -1;                                 \
-      if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot)    \
-              _ctl_ = pci_irq_table[idsel - min_idsel][pin-1];               \
-                     _ctl_; })
-
-
 #else /* Au1000 and Au1100 and Au1200 */
 
 /* don't allow any legacy ports probing */
index 89655c0cdcd6bb4e4760d344fb1c98872ce881c9..b493a5e46c639598f9066f0591a6d983b0073c57 100644 (file)
@@ -70,7 +70,6 @@ typedef struct
         ide_hwif_t              *hwif;
 #ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
         ide_drive_t             *drive;
-        u8                      white_list, black_list;
         struct dbdma_cmd        *dma_table_cpu;
         dma_addr_t              dma_table_dma;
 #endif
@@ -81,47 +80,6 @@ typedef struct
 #endif
 } _auide_hwif;
 
-#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
-/* HD white list */
-static const struct drive_list_entry dma_white_list [] = {
-/*
- * Hitachi
- */
-        { "HITACHI_DK14FA-20"    ,       NULL            },
-        { "HTS726060M9AT00"      ,       NULL            },
-/*
- * Maxtor
- */
-        { "Maxtor 6E040L0"      ,       NULL            },
-        { "Maxtor 6Y080P0"      ,       NULL            },
-        { "Maxtor 6Y160P0"      ,       NULL            },
-/*
- * Seagate
- */
-        { "ST3120026A"          ,       NULL            },
-        { "ST320014A"           ,       NULL            },
-        { "ST94011A"            ,       NULL            },
-        { "ST340016A"           ,       NULL            },
-/*
- * Western Digital
- */
-        { "WDC WD400UE-00HCT0"  ,       NULL            },
-        { "WDC WD400JB-00JJC0"  ,       NULL            },
-        { NULL                  ,       NULL            }
-};
-
-/* HD black list */
-static const struct drive_list_entry dma_black_list [] = {
-/*
- * Western Digital
- */
-        { "WDC WD100EB-00CGH0"  ,       NULL            },
-        { "WDC WD200BB-00AUA1"  ,       NULL            },
-        { "WDC AC24300L"        ,       NULL            },
-        { NULL                  ,       NULL            }
-};
-#endif
-
 /*******************************************************************************
 * PIO Mode timing calculation :                                                *
 *                                                                              *
index d2e28e64932ed8141d8f4b29df85f8739fb963a2..eedd048a7261d958c3eda305267b2a5c0f74f37c 100644 (file)
@@ -169,15 +169,15 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
 #define BCSR_INT_SD0INSERT     0x1000
 #define BCSR_INT_SD0EJECT      0x2000
 
-#define AU1XXX_SMC91111_PHYS_ADDR      (0x19000300)
-#define AU1XXX_SMC91111_IRQ                    DB1200_ETH_INT
-
-#define AU1XXX_ATA_PHYS_ADDR           (0x18800000)
-#define AU1XXX_ATA_REG_OFFSET          (5)
-#define AU1XXX_ATA_PHYS_LEN            (16 << AU1XXX_ATA_REG_OFFSET)
-#define AU1XXX_ATA_INT                 DB1200_IDE_INT
-#define AU1XXX_ATA_DDMA_REQ            DSCR_CMD0_DMA_REQ1;
-#define AU1XXX_ATA_RQSIZE              128
+#define SMC91C111_PHYS_ADDR    0x19000300
+#define SMC91C111_INT          DB1200_ETH_INT
+
+#define IDE_PHYS_ADDR          0x18800000
+#define IDE_REG_SHIFT          5
+#define IDE_PHYS_LEN           (16 << IDE_REG_SHIFT)
+#define IDE_INT                DB1200_IDE_INT
+#define IDE_DDMA_REQ           DSCR_CMD0_DMA_REQ1
+#define IDE_RQSIZE             128
 
 #define NAND_PHYS_ADDR   0x20000000
 
index 6eaf5efedf3a6667b508ff81d10d795b2d1e632e..e6b376bd9d06c1c296395f30db8bb397af5f195a 100644 (file)
@@ -1,12 +1,18 @@
 #ifndef __ASM_MACH_GENERIC_GPIO_H
 #define __ASM_MACH_GENERIC_GPIO_H
 
+#ifdef CONFIG_HAVE_GPIO_LIB
+#define gpio_get_value __gpio_get_value
+#define gpio_set_value __gpio_set_value
+#define gpio_cansleep  __gpio_cansleep
+#else
 int gpio_request(unsigned gpio, const char *label);
 void gpio_free(unsigned gpio);
 int gpio_direction_input(unsigned gpio);
 int gpio_direction_output(unsigned gpio, int value);
 int gpio_get_value(unsigned gpio);
 void gpio_set_value(unsigned gpio, int value);
+#endif
 int gpio_to_irq(unsigned gpio);
 int irq_to_gpio(unsigned irq);
 
index 372291f53fb9a2daf77af42141c27c91662cfb8d..7785bec732f267254bae3477b83a885e968bd928 100644 (file)
@@ -54,4 +54,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
        .nr_balance_failed      = 0,                    \
 }
 
+#include <asm-generic/topology.h>
+
 #endif /* _ASM_MACH_TOPOLOGY_H */
index edaa489b58f179213bc93c428c75ce6d762f0ad2..e2c6bcac3b4279933ed4b1419f92f35f972f4ddb 100644 (file)
@@ -182,15 +182,15 @@ static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
 #define SET_VCC_VPP(VCC, VPP, SLOT)\
        ((((VCC)<<2) | ((VPP)<<0)) << ((SLOT)*8))
 
-#define AU1XXX_SMC91111_PHYS_ADDR      (0x0D000300)
-#define AU1XXX_SMC91111_IRQ                    PB1200_ETH_INT
-
-#define AU1XXX_ATA_PHYS_ADDR           (0x0C800000)
-#define AU1XXX_ATA_REG_OFFSET          (5)
-#define AU1XXX_ATA_PHYS_LEN            (16 << AU1XXX_ATA_REG_OFFSET)
-#define AU1XXX_ATA_INT                 PB1200_IDE_INT
-#define AU1XXX_ATA_DDMA_REQ            DSCR_CMD0_DMA_REQ1;
-#define AU1XXX_ATA_RQSIZE              128
+#define SMC91C111_PHYS_ADDR    0x0D000300
+#define SMC91C111_INT          PB1200_ETH_INT
+
+#define IDE_PHYS_ADDR          0x0C800000
+#define IDE_REG_SHIFT          5
+#define IDE_PHYS_LEN           (16 << IDE_REG_SHIFT)
+#define IDE_INT                PB1200_IDE_INT
+#define IDE_DDMA_REQ           DSCR_CMD0_DMA_REQ1
+#define IDE_RQSIZE             128
 
 #define NAND_PHYS_ADDR   0x1C000000
 
index 1c39d339521e8220fd6bbb16fc29e26f49717705..33407bee4e73691a8040bc5d3fc3771d0c2b07e1 100644 (file)
@@ -68,6 +68,7 @@
 #define MIPS_REVISION_CORID_CORE_FPGA3     9
 #define MIPS_REVISION_CORID_CORE_24K       10
 #define MIPS_REVISION_CORID_CORE_FPGA4     11
+#define MIPS_REVISION_CORID_CORE_FPGA5     12
 
 /**** Artificial corid defines ****/
 /*
diff --git a/include/asm-mips/mips-boards/launch.h b/include/asm-mips/mips-boards/launch.h
new file mode 100644 (file)
index 0000000..d8ae7f9
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ *
+ */
+
+#ifndef _ASSEMBLER_
+
+struct cpulaunch {
+    unsigned long      pc;
+    unsigned long      gp;
+    unsigned long      sp;
+    unsigned long      a0;
+    unsigned long      _pad[3]; /* pad to cache line size to avoid thrashing */
+    unsigned long      flags;
+};
+
+#else
+
+#define LOG2CPULAUNCH  5
+#define        LAUNCH_PC       0
+#define        LAUNCH_GP       4
+#define        LAUNCH_SP       8
+#define        LAUNCH_A0       12
+#define        LAUNCH_FLAGS    28
+
+#endif
+
+#define LAUNCH_FREADY  1
+#define LAUNCH_FGO     2
+#define LAUNCH_FGONE   4
+
+#define CPULAUNCH      0x00000f00
+#define NCPULAUNCH     8
+
+/* Polling period in count cycles for secondary CPU's */
+#define LAUNCHPERIOD   10000
index 93bf4e51b8a4a7f28d201854ba723c354db04d1d..c1891578fa65893bf423a2e2e841f5dbf4c1c315 100644 (file)
@@ -51,6 +51,29 @@ static inline unsigned long get_msc_port_base(unsigned long reg)
        return (unsigned long) ioremap(addr, 0x10000);
 }
 
+/*
+ * GCMP Specific definitions
+ */
+#define GCMP_BASE_ADDR                 0x1fbf8000
+#define GCMP_ADDRSPACE_SZ              (256 * 1024)
+
+/*
+ * GIC Specific definitions
+ */
+#define GIC_BASE_ADDR                  0x1bdc0000
+#define GIC_ADDRSPACE_SZ               (128 * 1024)
+
+/*
+ * MSC01 BIU Specific definitions
+ * FIXME : These should be elsewhere ?
+ */
+#define MSC01_BIU_REG_BASE             0x1bc80000
+#define MSC01_BIU_ADDRSPACE_SZ         (256 * 1024)
+#define MSC01_SC_CFG_OFS               0x0110
+#define MSC01_SC_CFG_GICPRES_MSK       0x00000004
+#define MSC01_SC_CFG_GICPRES_SHF       2
+#define MSC01_SC_CFG_GICENA_SHF                3
+
 /*
  * Malta RTC-device indirect register access.
  */
index 7461318f1cd10a12c77823bec2beb86ae5d029a3..cea872fc6f5c0d1ae92f00f920f7f991c8b9b806 100644 (file)
@@ -39,7 +39,9 @@
 #define MIPSCPU_INT_I8259A     MIPSCPU_INT_MB0
 #define MIPSCPU_INT_MB1                3
 #define MIPSCPU_INT_SMI                MIPSCPU_INT_MB1
+#define MIPSCPU_INT_IPI0       MIPSCPU_INT_MB1 /* GIC IPI */
 #define MIPSCPU_INT_MB2                4
+#define MIPSCPU_INT_IPI1       MIPSCPU_INT_MB2 /* GIC IPI */
 #define MIPSCPU_INT_MB3                5
 #define MIPSCPU_INT_COREHI     MIPSCPU_INT_MB3
 #define MIPSCPU_INT_MB4                6
 #define MSC01E_INT_PERFCTR     10
 #define MSC01E_INT_CPUCTR      11
 
+/* GIC's Nomenclature for Core Interrupt Pins on the Malta */
+#define GIC_CPU_INT0           0 /* Core Interrupt 2   */
+#define GIC_CPU_INT1           1 /* .                  */
+#define GIC_CPU_INT2           2 /* .                  */
+#define GIC_CPU_INT3           3 /* .                  */
+#define GIC_CPU_INT4           4 /* .                  */
+#define GIC_CPU_INT5           5 /* Core Interrupt 5   */
+
+#define GIC_EXT_INTR(x)                x
+
+/* Dummy data */
+#define X                      0xdead
+
+/* External Interrupts used for IPI */
+#define GIC_IPI_EXT_INTR_RESCHED_VPE0  16
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE0  17
+#define GIC_IPI_EXT_INTR_RESCHED_VPE1  18
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE1  19
+#define GIC_IPI_EXT_INTR_RESCHED_VPE2  20
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE2  21
+#define GIC_IPI_EXT_INTR_RESCHED_VPE3  22
+#define GIC_IPI_EXT_INTR_CALLFNC_VPE3  23
+
+#define MIPS_GIC_IRQ_BASE      (MIPS_CPU_IRQ_BASE + 8)
+
 #ifndef __ASSEMBLY__
 extern void maltaint_init(void);
 #endif
diff --git a/include/asm-mips/mips-boards/maltasmp.h b/include/asm-mips/mips-boards/maltasmp.h
new file mode 100644 (file)
index 0000000..8d7e955
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * There are several SMP models supported
+ * SMTC is mutually exclusive to other options (atm)
+ */
+#if defined(CONFIG_MIPS_MT_SMTC)
+#define malta_smtc     1
+#define malta_cmp      0
+#define malta_smvp     0
+#else
+#define malta_smtc     0
+#if defined(CONFIG_MIPS_CMP)
+extern int gcmp_present;
+#define malta_cmp      gcmp_present
+#else
+#define malta_cmp      0
+#endif
+/* FIXME: should become COMFIG_MIPS_MT_SMVP */
+#if defined(CONFIG_MIPS_MT_SMP)
+#define malta_smvp     1
+#else
+#define malta_smvp     0
+#endif
+#endif
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+
+/* malta_smtc */
+#include <asm/smtc.h>
+#include <asm/smtc_ipi.h>
+
+/* malta_cmp */
+#include <asm/cmp.h>
+
+/* malta_smvp */
+#include <asm/smvp.h>
index 5a2f8a3a6a1f1b867fc102ceceac1573060cabdb..c9420aa97e3232dc8b96ccf2c960e6d0aa8f1f00 100644 (file)
@@ -197,8 +197,8 @@ static inline void __raw_evpe(void)
        "       .set    pop                                             \n");
 }
 
-/* Enable multiMT if previous suggested it should be.
-   EMT_ENABLE to force */
+/* Enable virtual processor execution if previous suggested it should be.
+   EVPE_ENABLE to force */
 
 #define EVPE_ENABLE MVPCONTROL_EVP
 
@@ -238,8 +238,8 @@ static inline void __raw_emt(void)
        "       .set    reorder");
 }
 
-/* enable multiVPE if previous suggested it should be.
-   EVPE_ENABLE to force */
+/* enable multi-threaded execution if previous suggested it should be.
+   EMT_ENABLE to force */
 
 #define EMT_ENABLE VPECONTROL_TE
 
index ceefe027c76104176a985751ea45d76a972db76f..4396e9ffd418960ed8aa15e17bc32e60587efd57 100644 (file)
@@ -107,7 +107,7 @@ static inline void pmd_clear(pmd_t *pmdp)
        pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
 }
 
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 #define pte_page(x)            pfn_to_page(pte_pfn(x))
 #define pte_pfn(x)             ((unsigned long)((x).pte_high >> 6))
 static inline pte_t
@@ -130,7 +130,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
 #define pte_pfn(x)             ((unsigned long)((x).pte >> PAGE_SHIFT))
 #define pfn_pte(pfn, prot)     __pte(((unsigned long long)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
 #endif
-#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1) */
+#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
 
 #define __pgd_offset(address)  pgd_index(address)
 #define __pud_offset(address)  (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
index 7494ba91112a77dca294aa9a01a91498c8041125..60e2f9338fcd6a6bd96adc064dad82eb8a763e2f 100644 (file)
  * unpredictable things.  The code (when it is written) to deal with
  * this problem will be in the update_mmu_cache() code for the r4k.
  */
-#if defined(CONFIG_CPU_MIPS32_R1) && defined(CONFIG_64BIT_PHYS_ADDR)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 
 #define _PAGE_PRESENT               (1<<6)  /* implemented in software */
 #define _PAGE_READ                  (1<<7)  /* implemented in software */
 #define _PAGE_WRITE                 (1<<8)  /* implemented in software */
 #define _PAGE_ACCESSED              (1<<9)  /* implemented in software */
 #define _PAGE_MODIFIED              (1<<10) /* implemented in software */
-#define _PAGE_FILE                  (1<<10)  /* set:pagecache unset:swap */
+#define _PAGE_FILE                  (1<<10) /* set:pagecache unset:swap */
 
 #define _PAGE_R4KBUG                (1<<0)  /* workaround for r4k bug  */
 #define _PAGE_GLOBAL                (1<<0)
 #define _PAGE_SILENT_READ           (1<<1)  /* synonym                 */
 #define _PAGE_DIRTY                 (1<<2)  /* The MIPS dirty bit      */
 #define _PAGE_SILENT_WRITE          (1<<2)
+#define _CACHE_SHIFT                3
 #define _CACHE_MASK                 (7<<3)
 
-/* MIPS32 defines only values 2 and 3. The rest are implementation
- * dependent.
- */
-#define _CACHE_UNCACHED             (2<<3)
-#define _CACHE_CACHABLE_NONCOHERENT (3<<3)
-#define _CACHE_CACHABLE_COW         (3<<3)  /* Au1x                    */
-
 #else
 
 #define _PAGE_PRESENT               (1<<0)  /* implemented in software */
 #define _PAGE_SILENT_WRITE          (1<<10)
 #define _CACHE_UNCACHED             (1<<11)
 #define _CACHE_MASK                 (1<<11)
-#define _CACHE_CACHABLE_NONCOHERENT 0
 
 #else
+
 #define _PAGE_R4KBUG                (1<<5)  /* workaround for r4k bug  */
 #define _PAGE_GLOBAL                (1<<6)
 #define _PAGE_VALID                 (1<<7)
 #define _PAGE_SILENT_READ           (1<<7)  /* synonym                 */
 #define _PAGE_DIRTY                 (1<<8)  /* The MIPS dirty bit      */
 #define _PAGE_SILENT_WRITE          (1<<8)
+#define _CACHE_SHIFT               9
 #define _CACHE_MASK                 (7<<9)
 
-#ifdef CONFIG_CPU_SB1
+#endif
+#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */
+
+
+/*
+ * Cache attributes
+ */
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+
+#define _CACHE_CACHABLE_NONCOHERENT 0
+
+#elif defined(CONFIG_CPU_SB1)
 
 /* No penalty for being coherent on the SB1, so just
    use it for "noncoherent" spaces, too.  Shouldn't hurt. */
 
-#define _CACHE_UNCACHED             (2<<9)
-#define _CACHE_CACHABLE_COW         (5<<9)
-#define _CACHE_CACHABLE_NONCOHERENT (5<<9)
-#define _CACHE_UNCACHED_ACCELERATED (7<<9)
+#define _CACHE_UNCACHED             (2<<_CACHE_SHIFT)
+#define _CACHE_CACHABLE_COW         (5<<_CACHE_SHIFT)
+#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
 
 #elif defined(CONFIG_CPU_RM9000)
 
-#define _CACHE_WT                      (0 << 9)
-#define _CACHE_WTWA                    (1 << 9)
-#define _CACHE_UC_B                    (2 << 9)
-#define _CACHE_WB                      (3 << 9)
-#define _CACHE_CWBEA                   (4 << 9)
-#define _CACHE_CWB                     (5 << 9)
-#define _CACHE_UCNB                    (6 << 9)
-#define _CACHE_FPC                     (7 << 9)
+#define _CACHE_WT                  (0<<_CACHE_SHIFT)
+#define _CACHE_WTWA                (1<<_CACHE_SHIFT)
+#define _CACHE_UC_B                (2<<_CACHE_SHIFT)
+#define _CACHE_WB                  (3<<_CACHE_SHIFT)
+#define _CACHE_CWBEA               (4<<_CACHE_SHIFT)
+#define _CACHE_CWB                 (5<<_CACHE_SHIFT)
+#define _CACHE_UCNB                (6<<_CACHE_SHIFT)
+#define _CACHE_FPC                 (7<<_CACHE_SHIFT)
 
-#define _CACHE_UNCACHED                        _CACHE_UC_B
-#define _CACHE_CACHABLE_NONCOHERENT    _CACHE_WB
+#define _CACHE_UNCACHED                    _CACHE_UC_B
+#define _CACHE_CACHABLE_NONCOHERENT _CACHE_WB
 
 #else
 
-#define _CACHE_CACHABLE_NO_WA       (0<<9)  /* R4600 only              */
-#define _CACHE_CACHABLE_WA          (1<<9)  /* R4600 only              */
-#define _CACHE_UNCACHED             (2<<9)  /* R4[0246]00              */
-#define _CACHE_CACHABLE_NONCOHERENT (3<<9)  /* R4[0246]00              */
-#define _CACHE_CACHABLE_CE          (4<<9)  /* R4[04]00MC only         */
-#define _CACHE_CACHABLE_COW         (5<<9)  /* R4[04]00MC only         */
-#define _CACHE_CACHABLE_CUW         (6<<9)  /* R4[04]00MC only         */
-#define _CACHE_UNCACHED_ACCELERATED (7<<9)  /* R10000 only             */
+#define _CACHE_CACHABLE_NO_WA      (0<<_CACHE_SHIFT)  /* R4600 only      */
+#define _CACHE_CACHABLE_WA         (1<<_CACHE_SHIFT)  /* R4600 only      */
+#define _CACHE_UNCACHED             (2<<_CACHE_SHIFT)  /* R4[0246]00      */
+#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* R4[0246]00      */
+#define _CACHE_CACHABLE_CE          (4<<_CACHE_SHIFT)  /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COW         (5<<_CACHE_SHIFT)  /* R4[04]00MC only */
+#define _CACHE_CACHABLE_COHERENT    (5<<_CACHE_SHIFT)  /* MIPS32R2 CMP    */
+#define _CACHE_CACHABLE_CUW         (6<<_CACHE_SHIFT)  /* R4[04]00MC only */
+#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)  /* R10000 only     */
 
 #endif
-#endif
-#endif /* defined(CONFIG_CPU_MIPS32_R1) && defined(CONFIG_64BIT_PHYS_ADDR) */
 
 #define __READABLE     (_PAGE_READ | _PAGE_SILENT_READ | _PAGE_ACCESSED)
 #define __WRITEABLE    (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
 
 #define _PAGE_CHG_MASK  (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
 
-#ifdef CONFIG_MIPS_UNCACHED
-#define PAGE_CACHABLE_DEFAULT  _CACHE_UNCACHED
-#elif defined(CONFIG_DMA_NONCOHERENT)
-#define PAGE_CACHABLE_DEFAULT  _CACHE_CACHABLE_NONCOHERENT
-#elif defined(CONFIG_CPU_RM9000)
-#define PAGE_CACHABLE_DEFAULT  _CACHE_CWB
-#else
-#define PAGE_CACHABLE_DEFAULT  _CACHE_CACHABLE_COW
-#endif
-
-#if defined(CONFIG_CPU_MIPS32_R1) && defined(CONFIG_64BIT_PHYS_ADDR)
-#define CONF_CM_DEFAULT                (PAGE_CACHABLE_DEFAULT >> 3)
-#else
-#define CONF_CM_DEFAULT                (PAGE_CACHABLE_DEFAULT >> 9)
-#endif
+#define CONF_CM_DEFAULT                (PAGE_CACHABLE_DEFAULT>>_CACHE_SHIFT)
 
 #endif /* _ASM_PGTABLE_BITS_H */
index 17a7703a296961858502ee636a15a7ccaecbe390..2f597eea44482a269c896c62aef017ca28ca4d3d 100644 (file)
@@ -23,15 +23,15 @@ struct vm_area_struct;
 
 #define PAGE_NONE      __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
 #define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
-                       PAGE_CACHABLE_DEFAULT)
+                                _page_cachable_default)
 #define PAGE_COPY      __pgprot(_PAGE_PRESENT | _PAGE_READ | \
-                       PAGE_CACHABLE_DEFAULT)
+                                _page_cachable_default)
 #define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_READ | \
-                       PAGE_CACHABLE_DEFAULT)
+                                _page_cachable_default)
 #define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
-                       _PAGE_GLOBAL | PAGE_CACHABLE_DEFAULT)
+                                _PAGE_GLOBAL | _page_cachable_default)
 #define PAGE_USERIO    __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
-                       PAGE_CACHABLE_DEFAULT)
+                                _page_cachable_default)
 #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
                        __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
 
@@ -40,23 +40,30 @@ struct vm_area_struct;
  * read. Also, write permissions imply read permissions. This is the closest
  * we can get by reasonable means..
  */
-#define __P000 PAGE_NONE
-#define __P001 PAGE_READONLY
-#define __P010 PAGE_COPY
-#define __P011 PAGE_COPY
-#define __P100 PAGE_READONLY
-#define __P101 PAGE_READONLY
-#define __P110 PAGE_COPY
-#define __P111 PAGE_COPY
-
-#define __S000 PAGE_NONE
-#define __S001 PAGE_READONLY
-#define __S010 PAGE_SHARED
-#define __S011 PAGE_SHARED
-#define __S100 PAGE_READONLY
-#define __S101 PAGE_READONLY
-#define __S110 PAGE_SHARED
-#define __S111 PAGE_SHARED
+
+/*
+ * Dummy values to fill the table in mmap.c
+ * The real values will be generated at runtime
+ */
+#define __P000 __pgprot(0)
+#define __P001 __pgprot(0)
+#define __P010 __pgprot(0)
+#define __P011 __pgprot(0)
+#define __P100 __pgprot(0)
+#define __P101 __pgprot(0)
+#define __P110 __pgprot(0)
+#define __P111 __pgprot(0)
+
+#define __S000 __pgprot(0)
+#define __S001 __pgprot(0)
+#define __S010 __pgprot(0)
+#define __S011 __pgprot(0)
+#define __S100 __pgprot(0)
+#define __S101 __pgprot(0)
+#define __S110 __pgprot(0)
+#define __S111 __pgprot(0)
+
+extern unsigned long _page_cachable_default;
 
 /*
  * ZERO_PAGE is a global shared page that is always zero; used
@@ -79,7 +86,7 @@ extern void paging_init(void);
 #define pmd_page(pmd)          (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
 #define pmd_page_vaddr(pmd)    pmd_val(pmd)
 
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 
 #define pte_none(pte)          (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
 #define pte_present(pte)       ((pte).pte_low & _PAGE_PRESENT)
@@ -182,7 +189,7 @@ extern pgd_t swapper_pg_dir[];
  * The following only work if pte_present() is true.
  * Undefined behaviour if not..
  */
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
 static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
 static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
@@ -285,6 +292,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
        return pte;
 }
 #endif
+static inline int pte_special(pte_t pte)       { return 0; }
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 /*
  * Macro to make mark a page protection value as "uncacheable".  Note
@@ -309,7 +318,7 @@ static inline pgprot_t pgprot_noncached(pgprot_t _prot)
  */
 #define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        pte.pte_low  &= _PAGE_CHG_MASK;
diff --git a/include/asm-mips/r4k-timer.h b/include/asm-mips/r4k-timer.h
new file mode 100644 (file)
index 0000000..a37d12b
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 by Ralf Baechle (ralf@linux-mips.org)
+ */
+#ifndef __ASM_R4K_TYPES_H
+#define __ASM_R4K_TYPES_H
+
+#include <linux/compiler.h>
+
+#ifdef CONFIG_SYNC_R4K
+
+extern void synchronise_count_master(void);
+extern void synchronise_count_slave(void);
+
+#else
+
+static inline void synchronise_count_master(void)
+{
+}
+
+static inline void synchronise_count_slave(void)
+{
+}
+
+#endif
+
+#endif /* __ASM_R4K_TYPES_H */
index b17fdfb5d8187551faac0e87d1b850bb86cbe5ae..43c207e72a637042902b43aa871131923ca52fc2 100644 (file)
@@ -51,6 +51,7 @@ static inline void register_smp_ops(struct plat_smp_ops *ops)
 #endif /* !CONFIG_SMP */
 
 extern struct plat_smp_ops up_smp_ops;
+extern struct plat_smp_ops cmp_smp_ops;
 extern struct plat_smp_ops vsmp_smp_ops;
 
 #endif /* __ASM_SMP_OPS_H */
index ff3e8936b493d8b7137ab991fe84fd9e1bdebc19..3639b28f80db84acbdea9672eb705fa8976aca7d 100644 (file)
@@ -44,6 +44,7 @@ extern int mipsmt_build_cpu_map(int startslot);
 extern void mipsmt_prepare_cpus(void);
 extern void smtc_smp_finish(void);
 extern void smtc_boot_secondary(int cpu, struct task_struct *t);
+extern void smtc_cpus_done(void);
 
 /*
  * Sharing the TLB between multiple VPEs means that the
diff --git a/include/asm-mips/smvp.h b/include/asm-mips/smvp.h
new file mode 100644 (file)
index 0000000..0d0e80a
--- /dev/null
@@ -0,0 +1,19 @@
+#ifndef _ASM_SMVP_H
+#define _ASM_SMVP_H
+
+/*
+ * Definitions for SMVP multitasking on MIPS MT cores
+ */
+struct task_struct;
+
+extern void smvp_smp_setup(void);
+extern void smvp_smp_finish(void);
+extern void smvp_boot_secondary(int cpu, struct task_struct *t);
+extern void smvp_init_secondary(void);
+extern void smvp_smp_finish(void);
+extern void smvp_cpus_done(void);
+extern void smvp_prepare_cpus(unsigned int max_cpus);
+
+/* This is platform specific */
+extern void smvp_send_ipi(int cpu, unsigned int action);
+#endif /*  _ASM_SMVP_H */
index d02e019b01272afd26e89ec09b0aba02580d64e8..e5dbde625ec205fbc1eebe46f5e60ca5ff07376a 100644 (file)
@@ -23,5 +23,7 @@ extern int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
 
 extern void (*board_nmi_handler_setup)(void);
 extern void (*board_ejtag_handler_setup)(void);
+extern void (*board_bind_eic_interrupt)(int irq, int regset);
+extern void (*board_watchpoint_handler)(struct pt_regs *regs);
 
 #endif /* _ASM_TRAPS_H */
index b180488dcdc49c628e10ebd9a61fd60d3c3c92d6..dfed7beb533fc392effee17408ebbb0dc1618a9b 100644 (file)
 #define RBTX4938_INTF_MODEM    (1 << RBTX4938_INTB_MODEM)
 #define RBTX4938_INTF_SWINT    (1 << RBTX4938_INTB_SWINT)
 
-#define rbtx4938_fpga_rev_ptr  \
-       ((volatile unsigned char *)RBTX4938_FPGA_REV_ADDR)
-#define rbtx4938_led_ptr       \
-       ((volatile unsigned char *)RBTX4938_LED_ADDR)
-#define rbtx4938_dipsw_ptr     \
-       ((volatile unsigned char *)RBTX4938_DIPSW_ADDR)
-#define rbtx4938_bdipsw_ptr    \
-       ((volatile unsigned char *)RBTX4938_BDIPSW_ADDR)
-#define rbtx4938_imask_ptr     \
-       ((volatile unsigned char *)RBTX4938_IMASK_ADDR)
-#define rbtx4938_imask2_ptr    \
-       ((volatile unsigned char *)RBTX4938_IMASK2_ADDR)
-#define rbtx4938_intpol_ptr    \
-       ((volatile unsigned char *)RBTX4938_INTPOL_ADDR)
-#define rbtx4938_istat_ptr     \
-       ((volatile unsigned char *)RBTX4938_ISTAT_ADDR)
-#define rbtx4938_istat2_ptr    \
-       ((volatile unsigned char *)RBTX4938_ISTAT2_ADDR)
-#define rbtx4938_imstat_ptr    \
-       ((volatile unsigned char *)RBTX4938_IMSTAT_ADDR)
-#define rbtx4938_imstat2_ptr   \
-       ((volatile unsigned char *)RBTX4938_IMSTAT2_ADDR)
-#define rbtx4938_softint_ptr   \
-       ((volatile unsigned char *)RBTX4938_SOFTINT_ADDR)
-#define rbtx4938_piosel_ptr    \
-       ((volatile unsigned char *)RBTX4938_PIOSEL_ADDR)
-#define rbtx4938_spics_ptr     \
-       ((volatile unsigned char *)RBTX4938_SPICS_ADDR)
-#define rbtx4938_sfpwr_ptr     \
-       ((volatile unsigned char *)RBTX4938_SFPWR_ADDR)
-#define rbtx4938_sfvol_ptr     \
-       ((volatile unsigned char *)RBTX4938_SFVOL_ADDR)
-#define rbtx4938_softreset_ptr \
-       ((volatile unsigned char *)RBTX4938_SOFTRESET_ADDR)
-#define rbtx4938_softresetlock_ptr     \
-       ((volatile unsigned char *)RBTX4938_SOFTRESETLOCK_ADDR)
-#define rbtx4938_pcireset_ptr  \
-       ((volatile unsigned char *)RBTX4938_PCIRESET_ADDR)
+#define rbtx4938_fpga_rev_addr ((__u8 __iomem *)RBTX4938_FPGA_REV_ADDR)
+#define rbtx4938_led_addr      ((__u8 __iomem *)RBTX4938_LED_ADDR)
+#define rbtx4938_dipsw_addr    ((__u8 __iomem *)RBTX4938_DIPSW_ADDR)
+#define rbtx4938_bdipsw_addr   ((__u8 __iomem *)RBTX4938_BDIPSW_ADDR)
+#define rbtx4938_imask_addr    ((__u8 __iomem *)RBTX4938_IMASK_ADDR)
+#define rbtx4938_imask2_addr   ((__u8 __iomem *)RBTX4938_IMASK2_ADDR)
+#define rbtx4938_intpol_addr   ((__u8 __iomem *)RBTX4938_INTPOL_ADDR)
+#define rbtx4938_istat_addr    ((__u8 __iomem *)RBTX4938_ISTAT_ADDR)
+#define rbtx4938_istat2_addr   ((__u8 __iomem *)RBTX4938_ISTAT2_ADDR)
+#define rbtx4938_imstat_addr   ((__u8 __iomem *)RBTX4938_IMSTAT_ADDR)
+#define rbtx4938_imstat2_addr  ((__u8 __iomem *)RBTX4938_IMSTAT2_ADDR)
+#define rbtx4938_softint_addr  ((__u8 __iomem *)RBTX4938_SOFTINT_ADDR)
+#define rbtx4938_piosel_addr   ((__u8 __iomem *)RBTX4938_PIOSEL_ADDR)
+#define rbtx4938_spics_addr    ((__u8 __iomem *)RBTX4938_SPICS_ADDR)
+#define rbtx4938_sfpwr_addr    ((__u8 __iomem *)RBTX4938_SFPWR_ADDR)
+#define rbtx4938_sfvol_addr    ((__u8 __iomem *)RBTX4938_SFVOL_ADDR)
+#define rbtx4938_softreset_addr        ((__u8 __iomem *)RBTX4938_SOFTRESET_ADDR)
+#define rbtx4938_softresetlock_addr    \
+                               ((__u8 __iomem *)RBTX4938_SOFTRESETLOCK_ADDR)
+#define rbtx4938_pcireset_addr ((__u8 __iomem *)RBTX4938_PCIRESET_ADDR)
 
 /*
  * IRQ mappings
index f7c448b90578f9dee0c36d07e68c8e97c8543df6..e8807f5c61e9bb2ee012a90f775950208cd64373 100644 (file)
@@ -13,8 +13,6 @@
 #ifndef __ASM_TX_BOARDS_TX4938_H
 #define __ASM_TX_BOARDS_TX4938_H
 
-#include <asm/tx4938/tx4938_mips.h>
-
 #define tx4938_read_nfmc(addr) (*(volatile unsigned int *)(addr))
 #define tx4938_write_nfmc(b, addr) (*(volatile unsigned int *)(addr)) = (b)
 
 #define TX4938_ACLC_REG                (TX4938_REG_BASE + 0xf700)
 #define TX4938_SPI_REG         (TX4938_REG_BASE + 0xf800)
 
-#ifndef _LANGUAGE_ASSEMBLY
-#include <asm/byteorder.h>
-
-#define TX4938_MKA(x) ((u32)( ((u32)(TX4938_REG_BASE)) | ((u32)(x)) ))
-
-#define TX4938_RD08( reg      )   (*(vu08*)(reg))
-#define TX4938_WR08( reg, val )  ((*(vu08*)(reg))=(val))
-
-#define TX4938_RD16( reg      )   (*(vu16*)(reg))
-#define TX4938_WR16( reg, val )  ((*(vu16*)(reg))=(val))
-
-#define TX4938_RD32( reg      )   (*(vu32*)(reg))
-#define TX4938_WR32( reg, val )  ((*(vu32*)(reg))=(val))
-
-#define TX4938_RD64( reg      )   (*(vu64*)(reg))
-#define TX4938_WR64( reg, val )  ((*(vu64*)(reg))=(val))
-
-#define TX4938_RD( reg      ) TX4938_RD32( reg )
-#define TX4938_WR( reg, val ) TX4938_WR32( reg, val )
-
-#endif /* !__ASSEMBLY__ */
-
 #ifdef __ASSEMBLY__
 #define _CONST64(c)    c
 #else
@@ -261,18 +237,6 @@ struct tx4938_sio_reg {
        volatile unsigned long rfifo;
 };
 
-struct tx4938_pio_reg {
-       volatile unsigned long dout;
-       volatile unsigned long din;
-       volatile unsigned long dir;
-       volatile unsigned long od;
-       volatile unsigned long flag[2];
-       volatile unsigned long pol;
-       volatile unsigned long intc;
-       volatile unsigned long maskcpu;
-       volatile unsigned long maskext;
-};
-
 struct tx4938_ndfmc_reg {
        endian_def_l2(unused0, dtr);
        endian_def_l2(unused1, mcr);
@@ -642,7 +606,7 @@ struct tx4938_ccfg_reg {
 #define tx4938_pcic1ptr                ((struct tx4938_pcic_reg *)TX4938_PCIC1_REG)
 #define tx4938_ccfgptr         ((struct tx4938_ccfg_reg *)TX4938_CCFG_REG)
 #define tx4938_sioptr(ch)      ((struct tx4938_sio_reg *)TX4938_SIO_REG(ch))
-#define tx4938_pioptr          ((struct tx4938_pio_reg *)TX4938_PIO_REG)
+#define tx4938_pioptr          ((struct txx9_pio_reg __iomem *)TX4938_PIO_REG)
 #define tx4938_aclcptr         ((struct tx4938_aclc_reg *)TX4938_ACLC_REG)
 #define tx4938_spiptr          ((struct tx4938_spi_reg *)TX4938_SPI_REG)
 #define tx4938_sramcptr                ((struct tx4938_sramc_reg *)TX4938_SRAMC_REG)
diff --git a/include/asm-mips/tx4938/tx4938_mips.h b/include/asm-mips/tx4938/tx4938_mips.h
deleted file mode 100644 (file)
index f346ff5..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * linux/include/asm-mips/tx4938/tx4938_mips.h
- * Generic bitmask definitions
- *
- * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
- * terms of the GNU General Public License version 2. This program is
- * licensed "as is" without any warranty of any kind, whether express
- * or implied.
- *
- * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
- */
-
-#ifndef TX4938_TX4938_MIPS_H
-#define TX4938_TX4938_MIPS_H
-#ifndef __ASSEMBLY__
-
-#define reg_rd08(r)    ((u8 )(*((vu8 *)(r))))
-#define reg_rd16(r)    ((u16)(*((vu16*)(r))))
-#define reg_rd32(r)    ((u32)(*((vu32*)(r))))
-#define reg_rd64(r)    ((u64)(*((vu64*)(r))))
-
-#define reg_wr08(r, v)  ((*((vu8 *)(r)))=((u8 )(v)))
-#define reg_wr16(r, v)  ((*((vu16*)(r)))=((u16)(v)))
-#define reg_wr32(r, v)  ((*((vu32*)(r)))=((u32)(v)))
-#define reg_wr64(r, v)  ((*((vu64*)(r)))=((u64)(v)))
-
-typedef volatile __signed char vs8;
-typedef volatile unsigned char vu8;
-
-typedef volatile __signed short vs16;
-typedef volatile unsigned short vu16;
-
-typedef volatile __signed int vs32;
-typedef volatile unsigned int vu32;
-
-typedef s8 s08;
-typedef vs8 vs08;
-
-typedef u8 u08;
-typedef vu8 vu08;
-
-#if (_MIPS_SZLONG == 64)
-
-typedef volatile __signed__ long vs64;
-typedef volatile unsigned long vu64;
-
-#else
-
-typedef volatile __signed__ long long vs64;
-typedef volatile unsigned long long vu64;
-
-#endif
-#endif
-#endif
diff --git a/include/asm-mips/txx9pio.h b/include/asm-mips/txx9pio.h
new file mode 100644 (file)
index 0000000..3d6fa9f
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * include/asm-mips/txx9pio.h
+ * TX39/TX49 PIO controller definitions.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#ifndef __ASM_TXX9PIO_H
+#define __ASM_TXX9PIO_H
+
+#include <linux/types.h>
+
+struct txx9_pio_reg {
+       __u32 dout;
+       __u32 din;
+       __u32 dir;
+       __u32 od;
+       __u32 flag[2];
+       __u32 pol;
+       __u32 intc;
+       __u32 maskcpu;
+       __u32 maskext;
+};
+
+int txx9_gpio_init(unsigned long baseaddr,
+                  unsigned int base, unsigned int num);
+
+#endif /* __ASM_TXX9PIO_H */
index 3249049e93aafcc8eeb345b07a6d220f29a7398e..792404948571847f8b9feb6067a7f63bb862e013 100644 (file)
@@ -5,25 +5,24 @@
  *
  * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org)
  */
-#ifndef __ASM_GENERIC_UNALIGNED_H
-#define __ASM_GENERIC_UNALIGNED_H
+#ifndef _ASM_MIPS_UNALIGNED_H
+#define _ASM_MIPS_UNALIGNED_H
 
 #include <linux/compiler.h>
+#if defined(__MIPSEB__)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#elif defined(__MIPSEL__)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#else
+#  error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???"
+#endif
 
-#define get_unaligned(ptr)                                     \
-({                                                             \
-       struct __packed {                                       \
-               typeof(*(ptr)) __v;                             \
-       } *__p = (void *) (ptr);                                \
-       __p->__v;                                               \
-})
-
-#define put_unaligned(val, ptr)                                        \
-do {                                                           \
-       struct __packed {                                       \
-               typeof(*(ptr)) __v;                             \
-       } *__p = (void *) (ptr);                                \
-       __p->__v = (val);                                       \
-} while(0)
-
-#endif /* __ASM_GENERIC_UNALIGNED_H */
+#endif /* _ASM_MIPS_UNALIGNED_H */
index 98cdb409648513dddaa702ef168bba48513e9c91..da9f6e3734099b31198cf31dd77b6070e158fdbb 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *  Include file for NEC VR4100 series Serial Interface Unit.
  *
- *  Copyright (C) 2005  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ *  Copyright (C) 2005-2008  Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
@@ -49,4 +49,10 @@ typedef enum {
 
 extern void vr41xx_select_irda_module(irda_module_t module, irda_speed_t speed);
 
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+extern void vr41xx_siu_early_setup(struct uart_port *port);
+#else
+static inline void vr41xx_siu_early_setup(struct uart_port *port) {}
+#endif
+
 #endif /* __NEC_VR41XX_SIU_H */
index 88b492f6ea9c4481e791bcf2c03f6f163d936e6d..22be64971cc69202823ddb79ff83f32500f1a7da 100644 (file)
@@ -7,7 +7,7 @@
  * Copyright (C) 2001, 2002 Paul Mundt
  * Copyright (C) 2002 MontaVista Software, Inc.
  * Copyright (C) 2002 TimeSys Corp.
- * Copyright (C) 2003-2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
+ * Copyright (C) 2003-2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
@@ -143,4 +143,10 @@ extern void vr41xx_disable_csiint(uint16_t mask);
 extern void vr41xx_enable_bcuint(void);
 extern void vr41xx_disable_bcuint(void);
 
+#ifdef CONFIG_SERIAL_VR41XX_CONSOLE
+extern void vr41xx_siu_setup(void);
+#else
+static inline void vr41xx_siu_setup(void) {}
+#endif
+
 #endif /* __NEC_VR41XX_H */
index bf9c515a998c8e0bc613952229f41ebe967fd4c4..3a8329b3e8694493c6b018eea7820873d04110f8 100644 (file)
@@ -97,7 +97,4 @@ signed __muldiv64s(signed val, signed mult, signed div)
        return result;
 }
 
-extern __attribute__((const))
-uint64_t div64_64(uint64_t dividend, uint64_t divisor);
-
 #endif /* _ASM_DIV64 */
index 375c4941dedaef42a8a496d482dd6215f19d1e89..6dc30fc827c458d5d2c6e67f964a79b720a96bdc 100644 (file)
@@ -224,6 +224,7 @@ static inline int pte_read(pte_t pte)       { return pte_val(pte) & __PAGE_PROT_USER;
 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_write(pte_t pte) { return pte_val(pte) & __PAGE_PROT_WRITE; }
+static inline int pte_special(pte_t pte){ return 0; }
 
 /*
  * The following only works if pte_present() is not true.
@@ -265,6 +266,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
        return pte;
 }
 
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
+
 #define pte_ERROR(e) \
        printk(KERN_ERR "%s:%d: bad pte %08lx.\n", \
               __FILE__, __LINE__, pte_val(e))
index cad3afbd035f1744d7c3751d252ab74c257347e3..0df671318ae4952d8a395da4c5c01ab61f040f11 100644 (file)
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
-#ifndef _ASM_UNALIGNED_H
-#define _ASM_UNALIGNED_H
+#ifndef _ASM_MN10300_UNALIGNED_H
+#define _ASM_MN10300_UNALIGNED_H
 
-#include <asm/types.h>
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#if 0
-extern int __bug_unaligned_x(void *ptr);
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
 
-/*
- * What is the most efficient way of loading/storing an unaligned value?
- *
- * That is the subject of this file.  Efficiency here is defined as
- * minimum code size with minimum register usage for the common cases.
- * It is currently not believed that long longs are common, so we
- * trade efficiency for the chars, shorts and longs against the long
- * longs.
- *
- * Current stats with gcc 2.7.2.2 for these functions:
- *
- *     ptrsize get:    code    regs    put:    code    regs
- *     1               1       1               1       2
- *     2               3       2               3       2
- *     4               7       3               7       3
- *     8               20      6               16      6
- *
- * gcc 2.95.1 seems to code differently:
- *
- *     ptrsize get:    code    regs    put:    code    regs
- *     1               1       1               1       2
- *     2               3       2               3       2
- *     4               7       4               7       4
- *     8               19      8               15      6
- *
- * which may or may not be more efficient (depending upon whether
- * you can afford the extra registers).  Hopefully the gcc 2.95
- * is inteligent enough to decide if it is better to use the
- * extra register, but evidence so far seems to suggest otherwise.
- *
- * Unfortunately, gcc is not able to optimise the high word
- * out of long long >> 32, or the low word from long long << 32
- */
-
-#define __get_unaligned_2(__p)                                 \
-       (__p[0] | __p[1] << 8)
-
-#define __get_unaligned_4(__p)                                 \
-       (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
-
-#define get_unaligned(ptr)                                     \
-({                                                             \
-       unsigned int __v1, __v2;                                \
-       __typeof__(*(ptr)) __v;                                 \
-       __u8 *__p = (__u8 *)(ptr);                              \
-                                                               \
-       switch (sizeof(*(ptr))) {                               \
-       case 1: __v = *(ptr);                   break;          \
-       case 2: __v = __get_unaligned_2(__p);   break;          \
-       case 4: __v = __get_unaligned_4(__p);   break;          \
-       case 8:                                                 \
-               __v2 = __get_unaligned_4((__p+4));              \
-               __v1 = __get_unaligned_4(__p);                  \
-               __v = ((unsigned long long)__v2 << 32 | __v1);  \
-               break;                                          \
-       default: __v = __bug_unaligned_x(__p);  break;          \
-       }                                                       \
-       __v;                                                    \
-})
-
-
-static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
-{
-       *__p++ = __v;
-       *__p++ = __v >> 8;
-}
-
-static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
-{
-       __put_unaligned_2(__v >> 16, __p + 2);
-       __put_unaligned_2(__v, __p);
-}
-
-static inline void __put_unaligned_8(const unsigned long long __v, __u8 *__p)
-{
-       /*
-        * tradeoff: 8 bytes of stack for all unaligned puts (2
-        * instructions), or an extra register in the long long
-        * case - go for the extra register.
-        */
-       __put_unaligned_4(__v >> 32, __p + 4);
-       __put_unaligned_4(__v, __p);
-}
-
-/*
- * Try to store an unaligned value as efficiently as possible.
- */
-#define put_unaligned(val, ptr)                                                \
-       ({                                                              \
-               switch (sizeof(*(ptr))) {                               \
-               case 1:                                                 \
-                       *(ptr) = (val);                                 \
-                       break;                                          \
-               case 2:                                                 \
-                       __put_unaligned_2((val), (__u8 *)(ptr));        \
-                       break;                                          \
-               case 4:                                                 \
-                       __put_unaligned_4((val), (__u8 *)(ptr));        \
-                       break;                                          \
-               case 8:                                                 \
-                       __put_unaligned_8((val), (__u8 *)(ptr));        \
-                       break;                                          \
-               default:                                                \
-                       __bug_unaligned_x(ptr);                         \
-                       break;                                          \
-               }                                                       \
-               (void) 0;                                               \
-       })
-
-
-#else
-
-#define get_unaligned(ptr) (*(ptr))
-#define put_unaligned(val, ptr) ({ *(ptr) = (val); (void) 0; })
-
-#endif
-
-#endif
+#endif /* _ASM_MN10300_UNALIGNED_H */
index f8eebcbad01f81d57e0a222d4573d9044f114e96..7a6ea10bd231a051217ddd00b0f9318a604b1878 100644 (file)
@@ -210,6 +210,7 @@ static __inline__ int fls(int x)
        return ret;
 }
 
+#include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 #include <asm-generic/bitops/hweight.h>
 #include <asm-generic/bitops/lock.h>
index fdc6d055ef7f2af68872f2f195d8b417ebb9558f..0c705c3a55efc01305dbea4e14eecf63c0f3f05e 100644 (file)
@@ -4,8 +4,8 @@
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 
 static inline int
 futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
index dc86adbec916da26e7825523324c39dc8a642fec..470a4b88124da2fcfb8c9936f523abd2662079a2 100644 (file)
@@ -323,6 +323,7 @@ static inline int pte_dirty(pte_t pte)              { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_write(pte_t pte)         { return pte_val(pte) & _PAGE_WRITE; }
 static inline int pte_file(pte_t pte)          { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte)       { return 0; }
 
 static inline pte_t pte_mkclean(pte_t pte)     { pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkold(pte_t pte)       { pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
@@ -330,6 +331,7 @@ static inline pte_t pte_wrprotect(pte_t pte)        { pte_val(pte) &= ~_PAGE_WRITE; ret
 static inline pte_t pte_mkdirty(pte_t pte)     { pte_val(pte) |= _PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkyoung(pte_t pte)     { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkwrite(pte_t pte)     { pte_val(pte) |= _PAGE_WRITE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
index 53c905838d9330a07598c1310c7567e3ddde8591..dfc5d3321a54bb72bba3b234586f2332142293ea 100644 (file)
@@ -1,7 +1,11 @@
-#ifndef _ASM_PARISC_UNALIGNED_H_
-#define _ASM_PARISC_UNALIGNED_H_
+#ifndef _ASM_PARISC_UNALIGNED_H
+#define _ASM_PARISC_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
 #ifdef __KERNEL__
 struct pt_regs;
@@ -9,4 +13,4 @@ void handle_unaligned(struct pt_regs *regs);
 int check_unaligned(struct pt_regs *regs);
 #endif
 
-#endif /* _ASM_PARISC_UNALIGNED_H_ */
+#endif /* _ASM_PARISC_UNALIGNED_H */
index a99a749294753b3a77688a203998111ef01eff13..897eade3afbeb0109159de4e1eb855f42ddab4e0 100644 (file)
@@ -313,6 +313,11 @@ static __inline__ int fls(unsigned int x)
        return 32 - lz;
 }
 
+static __inline__ unsigned long __fls(unsigned long x)
+{
+       return __ilog2(x);
+}
+
 /*
  * 64-bit can do this using one cntlzd (count leading zeroes doubleword)
  * instruction; for 32-bit we use the generic version, which does two
index 3f3673fd3ff34abef169c139c71302084b2cc874..6d406c5c5de40390853e18cf2f4a7c03899cd381 100644 (file)
@@ -4,9 +4,9 @@
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/errno.h>
 #include <asm/synch.h>
-#include <asm/uaccess.h>
 #include <asm/asm-compat.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
diff --git a/include/asm-powerpc/hugetlb.h b/include/asm-powerpc/hugetlb.h
new file mode 100644 (file)
index 0000000..649c6c3
--- /dev/null
@@ -0,0 +1,79 @@
+#ifndef _ASM_POWERPC_HUGETLB_H
+#define _ASM_POWERPC_HUGETLB_H
+
+#include <asm/page.h>
+
+
+int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
+                          unsigned long len);
+
+void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
+                           unsigned long end, unsigned long floor,
+                           unsigned long ceiling);
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep);
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_POWERPC_HUGETLB_H */
index b5c03127a9b9402b57e88de81c79454aabaa833b..5089deb8fec3f0fcf744d1647eea5475202555a1 100644 (file)
@@ -619,8 +619,6 @@ struct pt_regs;
 
 #define __ARCH_HAS_DO_SOFTIRQ
 
-extern void __do_softirq(void);
-
 #ifdef CONFIG_IRQSTACKS
 /*
  * Per-cpu stacks for handling hard and soft interrupts.
index d1b530fbf8dd667b31658b5fcbeb8b39a6198c68..f993e4198d5ca0d3554fc68ee4c64c92c25243ca 100644 (file)
@@ -1,6 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
 #ifndef __LINUX_KVM_POWERPC_H
 #define __LINUX_KVM_POWERPC_H
 
-/* powerpc does not support KVM */
+#include <asm/types.h>
+
+struct kvm_regs {
+       __u64 pc;
+       __u64 cr;
+       __u64 ctr;
+       __u64 lr;
+       __u64 xer;
+       __u64 msr;
+       __u64 srr0;
+       __u64 srr1;
+       __u64 pid;
+
+       __u64 sprg0;
+       __u64 sprg1;
+       __u64 sprg2;
+       __u64 sprg3;
+       __u64 sprg4;
+       __u64 sprg5;
+       __u64 sprg6;
+       __u64 sprg7;
+
+       __u64 gpr[32];
+};
+
+struct kvm_sregs {
+};
+
+struct kvm_fpu {
+       __u64 fpr[32];
+};
 
-#endif
+#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/include/asm-powerpc/kvm_asm.h b/include/asm-powerpc/kvm_asm.h
new file mode 100644 (file)
index 0000000..2197764
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_ASM_H__
+#define __POWERPC_KVM_ASM_H__
+
+/* IVPR must be 64KiB-aligned. */
+#define VCPU_SIZE_ORDER 4
+#define VCPU_SIZE_LOG   (VCPU_SIZE_ORDER + 12)
+#define VCPU_TLB_PGSZ   PPC44x_TLB_64K
+#define VCPU_SIZE_BYTES (1<<VCPU_SIZE_LOG)
+
+#define BOOKE_INTERRUPT_CRITICAL 0
+#define BOOKE_INTERRUPT_MACHINE_CHECK 1
+#define BOOKE_INTERRUPT_DATA_STORAGE 2
+#define BOOKE_INTERRUPT_INST_STORAGE 3
+#define BOOKE_INTERRUPT_EXTERNAL 4
+#define BOOKE_INTERRUPT_ALIGNMENT 5
+#define BOOKE_INTERRUPT_PROGRAM 6
+#define BOOKE_INTERRUPT_FP_UNAVAIL 7
+#define BOOKE_INTERRUPT_SYSCALL 8
+#define BOOKE_INTERRUPT_AP_UNAVAIL 9
+#define BOOKE_INTERRUPT_DECREMENTER 10
+#define BOOKE_INTERRUPT_FIT 11
+#define BOOKE_INTERRUPT_WATCHDOG 12
+#define BOOKE_INTERRUPT_DTLB_MISS 13
+#define BOOKE_INTERRUPT_ITLB_MISS 14
+#define BOOKE_INTERRUPT_DEBUG 15
+#define BOOKE_MAX_INTERRUPT 15
+
+#define RESUME_FLAG_NV          (1<<0)  /* Reload guest nonvolatile state? */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_GUEST_NV         RESUME_FLAG_NV
+#define RESUME_HOST             RESUME_FLAG_HOST
+#define RESUME_HOST_NV          (RESUME_FLAG_HOST|RESUME_FLAG_NV)
+
+#endif /* __POWERPC_KVM_ASM_H__ */
diff --git a/include/asm-powerpc/kvm_host.h b/include/asm-powerpc/kvm_host.h
new file mode 100644 (file)
index 0000000..04ffbb8
--- /dev/null
@@ -0,0 +1,152 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2007
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_HOST_H__
+#define __POWERPC_KVM_HOST_H__
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <asm/kvm_asm.h>
+
+#define KVM_MAX_VCPUS 1
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+/* We don't currently support large pages. */
+#define KVM_PAGES_PER_HPAGE (1<<31)
+
+struct kvm;
+struct kvm_run;
+struct kvm_vcpu;
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_vcpu_stat {
+       u32 sum_exits;
+       u32 mmio_exits;
+       u32 dcr_exits;
+       u32 signal_exits;
+       u32 light_exits;
+       /* Account for special types of light exits: */
+       u32 itlb_real_miss_exits;
+       u32 itlb_virt_miss_exits;
+       u32 dtlb_real_miss_exits;
+       u32 dtlb_virt_miss_exits;
+       u32 syscall_exits;
+       u32 isi_exits;
+       u32 dsi_exits;
+       u32 emulated_inst_exits;
+       u32 dec_exits;
+       u32 ext_intr_exits;
+};
+
+struct tlbe {
+       u32 tid; /* Only the low 8 bits are used. */
+       u32 word0;
+       u32 word1;
+       u32 word2;
+};
+
+struct kvm_arch {
+};
+
+struct kvm_vcpu_arch {
+       /* Unmodified copy of the guest's TLB. */
+       struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+       /* TLB that's actually used when the guest is running. */
+       struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+       /* Pages which are referenced in the shadow TLB. */
+       struct page *shadow_pages[PPC44x_TLB_SIZE];
+       /* Copy of the host's TLB. */
+       struct tlbe host_tlb[PPC44x_TLB_SIZE];
+
+       u32 host_stack;
+       u32 host_pid;
+
+       u64 fpr[32];
+       u32 gpr[32];
+
+       u32 pc;
+       u32 cr;
+       u32 ctr;
+       u32 lr;
+       u32 xer;
+
+       u32 msr;
+       u32 mmucr;
+       u32 sprg0;
+       u32 sprg1;
+       u32 sprg2;
+       u32 sprg3;
+       u32 sprg4;
+       u32 sprg5;
+       u32 sprg6;
+       u32 sprg7;
+       u32 srr0;
+       u32 srr1;
+       u32 csrr0;
+       u32 csrr1;
+       u32 dsrr0;
+       u32 dsrr1;
+       u32 dear;
+       u32 esr;
+       u32 dec;
+       u32 decar;
+       u32 tbl;
+       u32 tbu;
+       u32 tcr;
+       u32 tsr;
+       u32 ivor[16];
+       u32 ivpr;
+       u32 pir;
+       u32 pid;
+       u32 pvr;
+       u32 ccr0;
+       u32 ccr1;
+       u32 dbcr0;
+       u32 dbcr1;
+
+       u32 last_inst;
+       u32 fault_dear;
+       u32 fault_esr;
+       gpa_t paddr_accessed;
+
+       u8 io_gpr; /* GPR used as IO source/target */
+       u8 mmio_is_bigendian;
+       u8 dcr_needed;
+       u8 dcr_is_write;
+
+       u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
+
+       struct timer_list dec_timer;
+       unsigned long pending_exceptions;
+};
+
+struct kvm_guest_debug {
+       int enabled;
+       unsigned long bp[4];
+       int singlestep;
+};
+
+#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/include/asm-powerpc/kvm_para.h b/include/asm-powerpc/kvm_para.h
new file mode 100644 (file)
index 0000000..2d48f6a
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PARA_H__
+#define __POWERPC_KVM_PARA_H__
+
+#ifdef __KERNEL__
+
+static inline int kvm_para_available(void)
+{
+       return 0;
+}
+
+static inline unsigned int kvm_arch_para_features(void)
+{
+       return 0;
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/include/asm-powerpc/kvm_ppc.h b/include/asm-powerpc/kvm_ppc.h
new file mode 100644 (file)
index 0000000..7ac8203
--- /dev/null
@@ -0,0 +1,88 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
+ */
+
+#ifndef __POWERPC_KVM_PPC_H__
+#define __POWERPC_KVM_PPC_H__
+
+/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
+ * dependencies. */
+
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/types.h>
+#include <linux/kvm_types.h>
+#include <linux/kvm_host.h>
+
+struct kvm_tlb {
+       struct tlbe guest_tlb[PPC44x_TLB_SIZE];
+       struct tlbe shadow_tlb[PPC44x_TLB_SIZE];
+};
+
+enum emulation_result {
+       EMULATE_DONE,         /* no further processing */
+       EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
+       EMULATE_DO_DCR,       /* kvm_run filled with DCR request */
+       EMULATE_FAIL,         /* can't emulate this instruction */
+};
+
+extern const unsigned char exception_priority[];
+extern const unsigned char priority_exception[];
+
+extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
+extern char kvmppc_handlers_start[];
+extern unsigned long kvmppc_handler_len;
+
+extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
+extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                              unsigned int rt, unsigned int bytes,
+                              int is_bigendian);
+extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
+                               u32 val, unsigned int bytes, int is_bigendian);
+
+extern int kvmppc_emulate_instruction(struct kvm_run *run,
+                                      struct kvm_vcpu *vcpu);
+
+extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gfn_t gfn,
+                           u64 asid, u32 flags);
+extern void kvmppc_mmu_invalidate(struct kvm_vcpu *vcpu, u64 eaddr, u64 asid);
+extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
+
+extern void kvmppc_check_and_deliver_interrupts(struct kvm_vcpu *vcpu);
+
+static inline void kvmppc_queue_exception(struct kvm_vcpu *vcpu, int exception)
+{
+       unsigned int priority = exception_priority[exception];
+       set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static inline void kvmppc_clear_exception(struct kvm_vcpu *vcpu, int exception)
+{
+       unsigned int priority = exception_priority[exception];
+       clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
+{
+       if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR))
+               kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR);
+
+       vcpu->arch.msr = new_msr;
+}
+
+#endif /* __POWERPC_KVM_PPC_H__ */
index c8b02d97f75378c6c42b81c3adadfed7a57ffbd8..a825524c981a3965bda27107e53e83b62e7b4c13 100644 (file)
@@ -53,6 +53,8 @@
 
 #ifndef __ASSEMBLY__
 
+extern unsigned int tlb_44x_hwater;
+
 typedef struct {
        unsigned long id;
        unsigned long vdso_base;
index 67834eae57028966a943175588b912adb744fe72..25af4fc8daf40976d030c66dc553fd6c1802662e 100644 (file)
@@ -128,11 +128,6 @@ extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
 extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
 #define slice_mm_new_context(mm)       ((mm)->context.id == 0)
 
-#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-extern int is_hugepage_only_range(struct mm_struct *m,
-                                 unsigned long addr,
-                                 unsigned long len);
-
 #endif /* __ASSEMBLY__ */
 #else
 #define slice_init()
@@ -146,8 +141,6 @@ do {                                                \
 
 #ifdef CONFIG_HUGETLB_PAGE
 
-#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 
 #endif /* !CONFIG_HUGETLB_PAGE */
index daea7692d070fc9ac28aecee8f2dfd5ecdab480d..7c97b5a08d082782807d33467c5864ce0a2a6db6 100644 (file)
@@ -504,6 +504,7 @@ static inline int pte_write(pte_t pte)              { return pte_val(pte) & _PAGE_RW; }
 static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_file(pte_t pte)          { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte)       { return 0; }
 
 static inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }
 static inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -521,6 +522,8 @@ static inline pte_t pte_mkdirty(pte_t pte) {
        pte_val(pte) |= _PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkyoung(pte_t pte) {
        pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+       return pte; }
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
index dd4c26dc57d2727d533c19fef3f9ad4224ccf737..27f18695f7d6627f18071b6870281a665a431fef 100644 (file)
@@ -239,6 +239,7 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
 static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
+static inline int pte_special(pte_t pte) { return 0; }
 
 static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
 static inline void pte_cache(pte_t pte)   { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -257,6 +258,8 @@ static inline pte_t pte_mkyoung(pte_t pte) {
        pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkhuge(pte_t pte) {
        return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+       return pte; }
 
 /* Atomic PTE updates */
 static inline unsigned long pte_update(struct mm_struct *mm,
index fd98ca998b4fa87cec01af7418af98c207a273eb..cf83f2d7e2a5dea90b550861b3b0502c54853551 100644 (file)
@@ -138,6 +138,8 @@ typedef struct {
 
 struct thread_struct {
        unsigned long   ksp;            /* Kernel stack pointer */
+       unsigned long   ksp_limit;      /* if ksp <= ksp_limit stack overflow */
+
 #ifdef CONFIG_PPC64
        unsigned long   ksp_vsid;
 #endif
@@ -182,11 +184,14 @@ struct thread_struct {
 #define ARCH_MIN_TASKALIGN 16
 
 #define INIT_SP                (sizeof(init_stack) + (unsigned long) &init_stack)
+#define INIT_SP_LIMIT \
+       (_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
 
 
 #ifdef CONFIG_PPC32
 #define INIT_THREAD { \
        .ksp = INIT_SP, \
+       .ksp_limit = INIT_SP_LIMIT, \
        .fs = KERNEL_DS, \
        .pgdir = swapper_pg_dir, \
        .fpexc_mode = MSR_FE0 | MSR_FE1, \
@@ -194,6 +199,7 @@ struct thread_struct {
 #else
 #define INIT_THREAD  { \
        .ksp = INIT_SP, \
+       .ksp_limit = INIT_SP_LIMIT, \
        .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
        .fs = KERNEL_DS, \
        .fpr = {0}, \
index fab1674b31b6beb989478cc8ed65894decdef84d..2b6559a6d113d6e883887ff6766be4989ed54dcb 100644 (file)
@@ -204,7 +204,7 @@ extern int powersave_nap;   /* set if nap mode can be used in idle loop */
  * Changes the memory location '*ptr' to be val and returns
  * the previous value stored there.
  */
-static __inline__ unsigned long
+static __always_inline unsigned long
 __xchg_u32(volatile void *p, unsigned long val)
 {
        unsigned long prev;
@@ -229,7 +229,7 @@ __xchg_u32(volatile void *p, unsigned long val)
  * Changes the memory location '*ptr' to be val and returns
  * the previous value stored there.
  */
-static __inline__ unsigned long
+static __always_inline unsigned long
 __xchg_u32_local(volatile void *p, unsigned long val)
 {
        unsigned long prev;
@@ -247,7 +247,7 @@ __xchg_u32_local(volatile void *p, unsigned long val)
 }
 
 #ifdef CONFIG_PPC64
-static __inline__ unsigned long
+static __always_inline unsigned long
 __xchg_u64(volatile void *p, unsigned long val)
 {
        unsigned long prev;
@@ -266,7 +266,7 @@ __xchg_u64(volatile void *p, unsigned long val)
        return prev;
 }
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __xchg_u64_local(volatile void *p, unsigned long val)
 {
        unsigned long prev;
@@ -290,7 +290,7 @@ __xchg_u64_local(volatile void *p, unsigned long val)
  */
 extern void __xchg_called_with_bad_pointer(void);
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
 {
        switch (size) {
@@ -305,7 +305,7 @@ __xchg(volatile void *ptr, unsigned long x, unsigned int size)
        return x;
 }
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
 {
        switch (size) {
@@ -338,7 +338,7 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
  */
 #define __HAVE_ARCH_CMPXCHG    1
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
 {
        unsigned int prev;
@@ -361,7 +361,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
        return prev;
 }
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
                        unsigned long new)
 {
@@ -384,7 +384,7 @@ __cmpxchg_u32_local(volatile unsigned int *p, unsigned long old,
 }
 
 #ifdef CONFIG_PPC64
-static __inline__ unsigned long
+static __always_inline unsigned long
 __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
 {
        unsigned long prev;
@@ -406,7 +406,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
        return prev;
 }
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
                        unsigned long new)
 {
@@ -432,7 +432,7 @@ __cmpxchg_u64_local(volatile unsigned long *p, unsigned long old,
    if something tries to do an invalid cmpxchg().  */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
          unsigned int size)
 {
@@ -448,7 +448,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
        return old;
 }
 
-static __inline__ unsigned long
+static __always_inline unsigned long
 __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
          unsigned int size)
 {
index 6c95dfa2652f332cbdcf73b04565d7c3e546cad0..5f1b1e3c21374d5a1833ed00c9af9d5c06a35123 100644 (file)
@@ -5,15 +5,12 @@
 
 /*
  * The PowerPC can do unaligned accesses itself in big endian mode.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
-
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_UNALIGNED_H */
index 70435d32129a5dd3ea43335ed46755148ac4fe3e..55f9d38e3bf8702d302054ead258bf588edf199b 100644 (file)
@@ -483,6 +483,7 @@ static inline int pte_write(pte_t pte)              { return pte_val(pte) & _PAGE_RW; }
 static inline int pte_dirty(pte_t pte)         { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte)         { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_file(pte_t pte)          { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte)       { return 0; }
 
 static inline void pte_uncache(pte_t pte)       { pte_val(pte) |= _PAGE_NO_CACHE; }
 static inline void pte_cache(pte_t pte)         { pte_val(pte) &= ~_PAGE_NO_CACHE; }
@@ -500,6 +501,8 @@ static inline pte_t pte_mkdirty(pte_t pte) {
        pte_val(pte) |= _PAGE_DIRTY; return pte; }
 static inline pte_t pte_mkyoung(pte_t pte) {
        pte_val(pte) |= _PAGE_ACCESSED; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte) {
+       return pte; }
 
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
index e92b429d2be1be16176a4246eb4e10d135dcc736..13c9805349f170deda0308100707ba856390a730 100644 (file)
@@ -7,6 +7,7 @@ header-y += tape390.h
 header-y += ucontext.h
 header-y += vtoc.h
 header-y += zcrypt.h
+header-y += kvm.h
 
 unifdef-y += cmb.h
 unifdef-y += debug.h
index 965394e69452daf2cf856a5a54472cbb7f1ab8e4..b4eb24ab5af97811e5a0ca1845607de238210863 100644 (file)
@@ -769,6 +769,7 @@ static inline int sched_find_first_bit(unsigned long *b)
 }
 
 #include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
 #include <asm-generic/bitops/hweight.h>
index 289053ef5e601628403e57279cc681cdfef36282..a27f68985a791d7c5317a8574c0605ef1c54064c 100644 (file)
@@ -57,10 +57,9 @@ struct ccwgroup_driver {
 
 extern int  ccwgroup_driver_register   (struct ccwgroup_driver *cdriver);
 extern void ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver);
-extern int ccwgroup_create (struct device *root,
-                           unsigned int creator_id,
-                           struct ccw_driver *gdrv,
-                           int argc, char *argv[]);
+int ccwgroup_create_from_string(struct device *root, unsigned int creator_id,
+                               struct ccw_driver *cdrv, int num_devices,
+                               const char *buf);
 
 extern int ccwgroup_probe_ccwdev(struct ccw_device *cdev);
 extern void ccwgroup_remove_ccwdev(struct ccw_device *cdev);
diff --git a/include/asm-s390/hugetlb.h b/include/asm-s390/hugetlb.h
new file mode 100644 (file)
index 0000000..600a776
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ *  IBM System z Huge TLB Page Support for Kernel.
+ *
+ *    Copyright IBM Corp. 2008
+ *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
+ */
+
+#ifndef _ASM_S390_HUGETLB_H
+#define _ASM_S390_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+
+
+#define is_hugepage_only_range(mm, addr, len)  0
+#define hugetlb_free_pgd_range                 free_pgd_range
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pte);
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
+#define hugetlb_prefault_arch_hook(mm)         do { } while (0)
+
+int arch_prepare_hugepage(struct page *page);
+void arch_release_hugepage(struct page *page);
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+       /*
+        * PROT_NONE needs to be remapped from the pte type to the ste type.
+        * The HW invalid bit is also different for pte and ste. The pte
+        * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
+        * bit, so we don't have to clear it.
+        */
+       if (pte_val(pte) & _PAGE_INVALID) {
+               if (pte_val(pte) & _PAGE_SWT)
+                       pte_val(pte) |= _HPAGE_TYPE_NONE;
+               pte_val(pte) |= _SEGMENT_ENTRY_INV;
+       }
+       /*
+        * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
+        * table entry.
+        */
+       pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
+       /*
+        * Also set the change-override bit because we don't need dirty bit
+        * tracking for hugetlbfs pages.
+        */
+       pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
+       return pte;
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       pte_val(pte) |= _PAGE_RO;
+       return pte;
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
+               !(pte_val(pte) & _SEGMENT_ENTRY_RO);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       pte_t pte = *ptep;
+       unsigned long mask;
+
+       if (!MACHINE_HAS_HPAGE) {
+               ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
+               if (ptep) {
+                       mask = pte_val(pte) &
+                               (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
+                       pte = pte_mkhuge(*ptep);
+                       pte_val(pte) |= mask;
+               }
+       }
+       return pte;
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       pte_t pte = huge_ptep_get(ptep);
+
+       pmd_clear((pmd_t *) ptep);
+       return pte;
+}
+
+static inline void __pmd_csp(pmd_t *pmdp)
+{
+       register unsigned long reg2 asm("2") = pmd_val(*pmdp);
+       register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
+                                              _SEGMENT_ENTRY_INV;
+       register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
+
+       asm volatile(
+               "       csp %1,%3"
+               : "=m" (*pmdp)
+               : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
+       pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
+}
+
+static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
+{
+       unsigned long sto = (unsigned long) pmdp -
+                               pmd_index(address) * sizeof(pmd_t);
+
+       if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
+               asm volatile(
+                       "       .insn   rrf,0xb98e0000,%2,%3,0,0"
+                       : "=m" (*pmdp)
+                       : "m" (*pmdp), "a" (sto),
+                         "a" ((address & HPAGE_MASK))
+               );
+       }
+       pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
+}
+
+static inline void huge_ptep_invalidate(struct mm_struct *mm,
+                                       unsigned long address, pte_t *ptep)
+{
+       pmd_t *pmdp = (pmd_t *) ptep;
+
+       if (!MACHINE_HAS_IDTE) {
+               __pmd_csp(pmdp);
+               if (mm->context.noexec) {
+                       pmdp = get_shadow_table(pmdp);
+                       __pmd_csp(pmdp);
+               }
+               return;
+       }
+
+       __pmd_idte(address, pmdp);
+       if (mm->context.noexec) {
+               pmdp = get_shadow_table(pmdp);
+               __pmd_idte(address, pmdp);
+       }
+       return;
+}
+
+#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
+({                                                                         \
+       int __changed = !pte_same(huge_ptep_get(__ptep), __entry);          \
+       if (__changed) {                                                    \
+               huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep);       \
+               set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry);   \
+       }                                                                   \
+       __changed;                                                          \
+})
+
+#define huge_ptep_set_wrprotect(__mm, __addr, __ptep)                  \
+({                                                                     \
+       pte_t __pte = huge_ptep_get(__ptep);                            \
+       if (pte_write(__pte)) {                                         \
+               if (atomic_read(&(__mm)->mm_users) > 1 ||               \
+                   (__mm) != current->active_mm)                       \
+                       huge_ptep_invalidate(__mm, __addr, __ptep);     \
+               set_huge_pte_at(__mm, __addr, __ptep,                   \
+                               huge_pte_wrprotect(__pte));             \
+       }                                                               \
+})
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long address, pte_t *ptep)
+{
+       huge_ptep_invalidate(vma->vm_mm, address, ptep);
+}
+
+#endif /* _ASM_S390_HUGETLB_H */
index 573f2a35138662f698c72e0978450db22fba74b6..d74002f9579482f1bb13fc0a223b88802e6c1826 100644 (file)
@@ -1,6 +1,45 @@
 #ifndef __LINUX_KVM_S390_H
 #define __LINUX_KVM_S390_H
 
-/* s390 does not support KVM */
+/*
+ * asm-s390/kvm.h - KVM s390 specific structures and definitions
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ *               Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+#include <asm/types.h>
+
+/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
+struct kvm_pic_state {
+       /* no PIC for s390 */
+};
+
+struct kvm_ioapic_state {
+       /* no IOAPIC for s390 */
+};
+
+/* for KVM_GET_REGS and KVM_SET_REGS */
+struct kvm_regs {
+       /* general purpose regs for s390 */
+       __u64 gprs[16];
+};
+
+/* for KVM_GET_SREGS and KVM_SET_SREGS */
+struct kvm_sregs {
+       __u32 acrs[16];
+       __u64 crs[16];
+};
+
+/* for KVM_GET_FPU and KVM_SET_FPU */
+struct kvm_fpu {
+       __u32 fpc;
+       __u64 fprs[16];
+};
 
 #endif
diff --git a/include/asm-s390/kvm_host.h b/include/asm-s390/kvm_host.h
new file mode 100644 (file)
index 0000000..f8204a4
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * asm-s390/kvm_host.h - definition for kernel virtual machines on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Carsten Otte <cotte@de.ibm.com>
+ */
+
+
+#ifndef ASM_KVM_HOST_H
+#define ASM_KVM_HOST_H
+#include <linux/kvm_host.h>
+#include <asm/debug.h>
+
+#define KVM_MAX_VCPUS 64
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+struct kvm_guest_debug {
+};
+
+struct sca_entry {
+       atomic_t scn;
+       __u64   reserved;
+       __u64   sda;
+       __u64   reserved2[2];
+} __attribute__((packed));
+
+
+struct sca_block {
+       __u64   ipte_control;
+       __u64   reserved[5];
+       __u64   mcn;
+       __u64   reserved2;
+       struct sca_entry cpu[64];
+} __attribute__((packed));
+
+#define KVM_PAGES_PER_HPAGE 256
+
+#define CPUSTAT_HOST       0x80000000
+#define CPUSTAT_WAIT       0x10000000
+#define CPUSTAT_ECALL_PEND 0x08000000
+#define CPUSTAT_STOP_INT   0x04000000
+#define CPUSTAT_IO_INT     0x02000000
+#define CPUSTAT_EXT_INT    0x01000000
+#define CPUSTAT_RUNNING    0x00800000
+#define CPUSTAT_RETAINED   0x00400000
+#define CPUSTAT_TIMING_SUB 0x00020000
+#define CPUSTAT_SIE_SUB    0x00010000
+#define CPUSTAT_RRF        0x00008000
+#define CPUSTAT_SLSV       0x00004000
+#define CPUSTAT_SLSR       0x00002000
+#define CPUSTAT_ZARCH      0x00000800
+#define CPUSTAT_MCDS       0x00000100
+#define CPUSTAT_SM         0x00000080
+#define CPUSTAT_G          0x00000008
+#define CPUSTAT_J          0x00000002
+#define CPUSTAT_P          0x00000001
+
+struct sie_block {
+       atomic_t cpuflags;              /* 0x0000 */
+       __u32   prefix;                 /* 0x0004 */
+       __u8    reserved8[32];          /* 0x0008 */
+       __u64   cputm;                  /* 0x0028 */
+       __u64   ckc;                    /* 0x0030 */
+       __u64   epoch;                  /* 0x0038 */
+       __u8    reserved40[4];          /* 0x0040 */
+#define LCTL_CR0       0x8000
+       __u16   lctl;                   /* 0x0044 */
+       __s16   icpua;                  /* 0x0046 */
+       __u32   ictl;                   /* 0x0048 */
+       __u32   eca;                    /* 0x004c */
+       __u8    icptcode;               /* 0x0050 */
+       __u8    reserved51;             /* 0x0051 */
+       __u16   ihcpu;                  /* 0x0052 */
+       __u8    reserved54[2];          /* 0x0054 */
+       __u16   ipa;                    /* 0x0056 */
+       __u32   ipb;                    /* 0x0058 */
+       __u32   scaoh;                  /* 0x005c */
+       __u8    reserved60;             /* 0x0060 */
+       __u8    ecb;                    /* 0x0061 */
+       __u8    reserved62[2];          /* 0x0062 */
+       __u32   scaol;                  /* 0x0064 */
+       __u8    reserved68[4];          /* 0x0068 */
+       __u32   todpr;                  /* 0x006c */
+       __u8    reserved70[16];         /* 0x0070 */
+       __u64   gmsor;                  /* 0x0080 */
+       __u64   gmslm;                  /* 0x0088 */
+       psw_t   gpsw;                   /* 0x0090 */
+       __u64   gg14;                   /* 0x00a0 */
+       __u64   gg15;                   /* 0x00a8 */
+       __u8    reservedb0[30];         /* 0x00b0 */
+       __u16   iprcc;                  /* 0x00ce */
+       __u8    reservedd0[48];         /* 0x00d0 */
+       __u64   gcr[16];                /* 0x0100 */
+       __u64   gbea;                   /* 0x0180 */
+       __u8    reserved188[120];       /* 0x0188 */
+} __attribute__((packed));
+
+struct kvm_vcpu_stat {
+       u32 exit_userspace;
+       u32 exit_external_request;
+       u32 exit_external_interrupt;
+       u32 exit_stop_request;
+       u32 exit_validity;
+       u32 exit_instruction;
+       u32 instruction_lctl;
+       u32 instruction_lctg;
+       u32 exit_program_interruption;
+       u32 exit_instr_and_program;
+       u32 deliver_emergency_signal;
+       u32 deliver_service_signal;
+       u32 deliver_virtio_interrupt;
+       u32 deliver_stop_signal;
+       u32 deliver_prefix_signal;
+       u32 deliver_restart_signal;
+       u32 deliver_program_int;
+       u32 exit_wait_state;
+       u32 instruction_stidp;
+       u32 instruction_spx;
+       u32 instruction_stpx;
+       u32 instruction_stap;
+       u32 instruction_storage_key;
+       u32 instruction_stsch;
+       u32 instruction_chsc;
+       u32 instruction_stsi;
+       u32 instruction_stfl;
+       u32 instruction_sigp_sense;
+       u32 instruction_sigp_emergency;
+       u32 instruction_sigp_stop;
+       u32 instruction_sigp_arch;
+       u32 instruction_sigp_prefix;
+       u32 instruction_sigp_restart;
+       u32 diagnose_44;
+};
+
+struct io_info {
+       __u16        subchannel_id;            /* 0x0b8 */
+       __u16        subchannel_nr;            /* 0x0ba */
+       __u32        io_int_parm;              /* 0x0bc */
+       __u32        io_int_word;              /* 0x0c0 */
+};
+
+struct ext_info {
+       __u32 ext_params;
+       __u64 ext_params2;
+};
+
+#define PGM_OPERATION            0x01
+#define PGM_PRIVILEGED_OPERATION 0x02
+#define PGM_EXECUTE              0x03
+#define PGM_PROTECTION           0x04
+#define PGM_ADDRESSING           0x05
+#define PGM_SPECIFICATION        0x06
+#define PGM_DATA                 0x07
+
+struct pgm_info {
+       __u16 code;
+};
+
+struct prefix_info {
+       __u32 address;
+};
+
+struct interrupt_info {
+       struct list_head list;
+       u64     type;
+       union {
+               struct io_info io;
+               struct ext_info ext;
+               struct pgm_info pgm;
+               struct prefix_info prefix;
+       };
+};
+
+/* for local_interrupt.action_flags */
+#define ACTION_STORE_ON_STOP 1
+#define ACTION_STOP_ON_STOP  2
+
+struct local_interrupt {
+       spinlock_t lock;
+       struct list_head list;
+       atomic_t active;
+       struct float_interrupt *float_int;
+       int timer_due; /* event indicator for waitqueue below */
+       wait_queue_head_t wq;
+       atomic_t *cpuflags;
+       unsigned int action_bits;
+};
+
+struct float_interrupt {
+       spinlock_t lock;
+       struct list_head list;
+       atomic_t active;
+       int next_rr_cpu;
+       unsigned long idle_mask [(64 + sizeof(long) - 1) / sizeof(long)];
+       struct local_interrupt *local_int[64];
+};
+
+
+struct kvm_vcpu_arch {
+       struct sie_block *sie_block;
+       unsigned long     guest_gprs[16];
+       s390_fp_regs      host_fpregs;
+       unsigned int      host_acrs[NUM_ACRS];
+       s390_fp_regs      guest_fpregs;
+       unsigned int      guest_acrs[NUM_ACRS];
+       struct local_interrupt local_int;
+       struct timer_list ckc_timer;
+       union  {
+               cpuid_t   cpu_id;
+               u64       stidp_data;
+       };
+};
+
+struct kvm_vm_stat {
+       u32 remote_tlb_flush;
+};
+
+struct kvm_arch{
+       unsigned long guest_origin;
+       unsigned long guest_memsize;
+       struct sca_block *sca;
+       debug_info_t *dbf;
+       struct float_interrupt float_int;
+};
+
+extern int sie64a(struct sie_block *, __u64 *);
+#endif
diff --git a/include/asm-s390/kvm_para.h b/include/asm-s390/kvm_para.h
new file mode 100644 (file)
index 0000000..2c50379
--- /dev/null
@@ -0,0 +1,150 @@
+/*
+ * asm-s390/kvm_para.h - definition for paravirtual devices on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __S390_KVM_PARA_H
+#define __S390_KVM_PARA_H
+
+/*
+ * Hypercalls for KVM on s390. The calling convention is similar to the
+ * s390 ABI, so we use R2-R6 for parameters 1-5. In addition we use R1
+ * as hypercall number and R7 as parameter 6. The return value is
+ * written to R2. We use the diagnose instruction as hypercall. To avoid
+ * conflicts with existing diagnoses for LPAR and z/VM, we do not use
+ * the instruction encoded number, but specify the number in R1 and
+ * use 0x500 as KVM hypercall
+ *
+ * Copyright IBM Corp. 2007,2008
+ * Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ */
+
+static inline long kvm_hypercall0(unsigned long nr)
+{
+       register unsigned long __nr asm("1") = nr;
+       register long __rc asm("2");
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr): "memory", "cc");
+       return __rc;
+}
+
+static inline long kvm_hypercall1(unsigned long nr, unsigned long p1)
+{
+       register unsigned long __nr asm("1") = nr;
+       register unsigned long __p1 asm("2") = p1;
+       register long __rc asm("2");
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr), "0" (__p1) : "memory", "cc");
+       return __rc;
+}
+
+static inline long kvm_hypercall2(unsigned long nr, unsigned long p1,
+                              unsigned long p2)
+{
+       register unsigned long __nr asm("1") = nr;
+       register unsigned long __p1 asm("2") = p1;
+       register unsigned long __p2 asm("3") = p2;
+       register long __rc asm("2");
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2)
+                     : "memory", "cc");
+       return __rc;
+}
+
+static inline long kvm_hypercall3(unsigned long nr, unsigned long p1,
+                              unsigned long p2, unsigned long p3)
+{
+       register unsigned long __nr asm("1") = nr;
+       register unsigned long __p1 asm("2") = p1;
+       register unsigned long __p2 asm("3") = p2;
+       register unsigned long __p3 asm("4") = p3;
+       register long __rc asm("2");
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+                       "d" (__p3) : "memory", "cc");
+       return __rc;
+}
+
+
+static inline long kvm_hypercall4(unsigned long nr, unsigned long p1,
+                              unsigned long p2, unsigned long p3,
+                              unsigned long p4)
+{
+       register unsigned long __nr asm("1") = nr;
+       register unsigned long __p1 asm("2") = p1;
+       register unsigned long __p2 asm("3") = p2;
+       register unsigned long __p3 asm("4") = p3;
+       register unsigned long __p4 asm("5") = p4;
+       register long __rc asm("2");
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+                       "d" (__p3), "d" (__p4) : "memory", "cc");
+       return __rc;
+}
+
+static inline long kvm_hypercall5(unsigned long nr, unsigned long p1,
+                              unsigned long p2, unsigned long p3,
+                              unsigned long p4, unsigned long p5)
+{
+       register unsigned long __nr asm("1") = nr;
+       register unsigned long __p1 asm("2") = p1;
+       register unsigned long __p2 asm("3") = p2;
+       register unsigned long __p3 asm("4") = p3;
+       register unsigned long __p4 asm("5") = p4;
+       register unsigned long __p5 asm("6") = p5;
+       register long __rc asm("2");
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+                       "d" (__p3), "d" (__p4), "d" (__p5)  : "memory", "cc");
+       return __rc;
+}
+
+static inline long kvm_hypercall6(unsigned long nr, unsigned long p1,
+                              unsigned long p2, unsigned long p3,
+                              unsigned long p4, unsigned long p5,
+                              unsigned long p6)
+{
+       register unsigned long __nr asm("1") = nr;
+       register unsigned long __p1 asm("2") = p1;
+       register unsigned long __p2 asm("3") = p2;
+       register unsigned long __p3 asm("4") = p3;
+       register unsigned long __p4 asm("5") = p4;
+       register unsigned long __p5 asm("6") = p5;
+       register unsigned long __p6 asm("7") = p6;
+       register long __rc asm("2");
+
+       asm volatile ("diag 2,4,0x500\n"
+                     : "=d" (__rc) : "d" (__nr), "0" (__p1), "d" (__p2),
+                       "d" (__p3), "d" (__p4), "d" (__p5), "d" (__p6)
+                     : "memory", "cc");
+       return __rc;
+}
+
+/* kvm on s390 is always paravirtualization enabled */
+static inline int kvm_para_available(void)
+{
+       return 1;
+}
+
+/* No feature bits are currently assigned for kvm on s390 */
+static inline unsigned int kvm_arch_para_features(void)
+{
+       return 0;
+}
+
+#endif /* __S390_KVM_PARA_H */
diff --git a/include/asm-s390/kvm_virtio.h b/include/asm-s390/kvm_virtio.h
new file mode 100644 (file)
index 0000000..5c871a9
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * kvm_virtio.h - definition for virtio for kvm on s390
+ *
+ * Copyright IBM Corp. 2008
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ *
+ *    Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
+ */
+
+#ifndef __KVM_S390_VIRTIO_H
+#define __KVM_S390_VIRTIO_H
+
+#include <linux/types.h>
+
+struct kvm_device_desc {
+       /* The device type: console, network, disk etc.  Type 0 terminates. */
+       __u8 type;
+       /* The number of virtqueues (first in config array) */
+       __u8 num_vq;
+       /*
+        * The number of bytes of feature bits.  Multiply by 2: one for host
+        * features and one for guest acknowledgements.
+        */
+       __u8 feature_len;
+       /* The number of bytes of the config array after virtqueues. */
+       __u8 config_len;
+       /* A status byte, written by the Guest. */
+       __u8 status;
+       __u8 config[0];
+};
+
+/*
+ * This is how we expect the device configuration field for a virtqueue
+ * to be laid out in config space.
+ */
+struct kvm_vqconfig {
+       /* The token returned with an interrupt. Set by the guest */
+       __u64 token;
+       /* The address of the virtio ring */
+       __u64 address;
+       /* The number of entries in the virtio_ring */
+       __u16 num;
+
+};
+
+#define KVM_S390_VIRTIO_NOTIFY         0
+#define KVM_S390_VIRTIO_RESET          1
+#define KVM_S390_VIRTIO_SET_STATUS     2
+
+#endif
index 5de3efb31445a88f986a1e5480b059088042d520..0bc51d52a899d5b1d07b3bd1dceafa6858a0a3ef 100644 (file)
@@ -381,27 +381,32 @@ struct _lowcore
         /* whether the kernel died with panic() or not */
         __u32        panic_magic;              /* 0xe00 */
 
-       __u8         pad13[0x1200-0xe04];      /* 0xe04 */
+       __u8         pad13[0x11b8-0xe04];      /* 0xe04 */
+
+       /* 64 bit extparam used for pfault, diag 250 etc  */
+       __u64        ext_params2;               /* 0x11B8 */
+
+       __u8         pad14[0x1200-0x11C0];      /* 0x11C0 */
 
         /* System info area */ 
 
        __u64        floating_pt_save_area[16]; /* 0x1200 */
        __u64        gpregs_save_area[16];      /* 0x1280 */
        __u32        st_status_fixed_logout[4]; /* 0x1300 */
-       __u8         pad14[0x1318-0x1310];      /* 0x1310 */
+       __u8         pad15[0x1318-0x1310];      /* 0x1310 */
        __u32        prefixreg_save_area;       /* 0x1318 */
        __u32        fpt_creg_save_area;        /* 0x131c */
-       __u8         pad15[0x1324-0x1320];      /* 0x1320 */
+       __u8         pad16[0x1324-0x1320];      /* 0x1320 */
        __u32        tod_progreg_save_area;     /* 0x1324 */
        __u32        cpu_timer_save_area[2];    /* 0x1328 */
        __u32        clock_comp_save_area[2];   /* 0x1330 */
-       __u8         pad16[0x1340-0x1338];      /* 0x1338 */ 
+       __u8         pad17[0x1340-0x1338];      /* 0x1338 */
        __u32        access_regs_save_area[16]; /* 0x1340 */ 
        __u64        cregs_save_area[16];       /* 0x1380 */
 
        /* align to the top of the prefix area */
 
-       __u8         pad17[0x2000-0x1400];      /* 0x1400 */
+       __u8         pad18[0x2000-0x1400];      /* 0x1400 */
 #endif /* !__s390x__ */
 } __attribute__((packed)); /* End structure*/
 
index 1698e29c5b20fc5b38831bfe0a4b7de80b1487ba..5dd5e7b3476ff1e431fc034f04fe4241a1ea7878 100644 (file)
@@ -7,6 +7,7 @@ typedef struct {
        unsigned long asce_bits;
        unsigned long asce_limit;
        int noexec;
+       int pgstes;
 } mm_context_t;
 
 #endif
index b5a34c6f91a932c0841933c6db88da84ed5a7318..4c2fbf48c9c43fe2fa0a7384bc55c576d6dfc23c 100644 (file)
@@ -20,7 +20,13 @@ static inline int init_new_context(struct task_struct *tsk,
 #ifdef CONFIG_64BIT
        mm->context.asce_bits |= _ASCE_TYPE_REGION3;
 #endif
-       mm->context.noexec = s390_noexec;
+       if (current->mm->context.pgstes) {
+               mm->context.noexec = 0;
+               mm->context.pgstes = 1;
+       } else {
+               mm->context.noexec = s390_noexec;
+               mm->context.pgstes = 0;
+       }
        mm->context.asce_limit = STACK_TOP_MAX;
        crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
        return 0;
index fe7f92b6ae6df2c415688fa52a7d4b1157ac2641..f0f4579eac13144b01f43eaf401428dfb2d8c5db 100644 (file)
 #define PAGE_DEFAULT_ACC       0
 #define PAGE_DEFAULT_KEY       (PAGE_DEFAULT_ACC << 4)
 
+#define HPAGE_SHIFT    20
+#define HPAGE_SIZE     (1UL << HPAGE_SHIFT)
+#define HPAGE_MASK     (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
+
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+#define ARCH_HAS_HUGE_PTE_TYPE
+#define ARCH_HAS_PREPARE_HUGEPAGE
+#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
+
 #include <asm/setup.h>
 #ifndef __ASSEMBLY__
 
 static inline void clear_page(void *page)
 {
-       register unsigned long reg1 asm ("1") = 0;
-       register void *reg2 asm ("2") = page;
-       register unsigned long reg3 asm ("3") = 4096;
-       asm volatile(
-               "       mvcl    2,0"
-               : "+d" (reg2), "+d" (reg3) : "d" (reg1) : "memory", "cc");
+       if (MACHINE_HAS_PFMF) {
+               asm volatile(
+                       "       .insn   rre,0xb9af0000,%0,%1"
+                       : : "d" (0x10000), "a" (page) : "memory", "cc");
+       } else {
+               register unsigned long reg1 asm ("1") = 0;
+               register void *reg2 asm ("2") = page;
+               register unsigned long reg3 asm ("3") = 4096;
+               asm volatile(
+                       "       mvcl    2,0"
+                       : "+d" (reg2), "+d" (reg3) : "d" (reg1)
+                       : "memory", "cc");
+       }
 }
 
 static inline void copy_page(void *to, void *from)
@@ -108,26 +125,6 @@ page_get_storage_key(unsigned long addr)
        return skey;
 }
 
-extern unsigned long max_pfn;
-
-static inline int pfn_valid(unsigned long pfn)
-{
-       unsigned long dummy;
-       int ccode;
-
-       if (pfn >= max_pfn)
-               return 0;
-
-       asm volatile(
-               "       lra     %0,0(%2)\n"
-               "       ipm     %1\n"
-               "       srl     %1,28\n"
-               : "=d" (dummy), "=d" (ccode)
-               : "a" (pfn << PAGE_SHIFT)
-               : "cc");
-       return !ccode;
-}
-
 #endif /* !__ASSEMBLY__ */
 
 /* to align the pointer to the (next) page boundary */
index 65154dc9a9e538aea84f867c8aab6ac19cdeba3e..c7f4f8e3e297188310955e325c20717510acc590 100644 (file)
@@ -30,6 +30,7 @@
  */
 #ifndef __ASSEMBLY__
 #include <linux/mm_types.h>
+#include <asm/bitops.h>
 #include <asm/bug.h>
 #include <asm/processor.h>
 
@@ -128,7 +129,7 @@ extern char empty_zero_page[PAGE_SIZE];
 #define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page))
 #define VMEM_MAX_PFN   min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES)
 #define VMEM_MAX_PHYS  ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1))
-#define VMEM_MAP       ((struct page *) VMALLOC_END)
+#define vmemmap                ((struct page *) VMALLOC_END)
 
 /*
  * A 31 bit pagetable entry of S390 has following format:
@@ -219,6 +220,8 @@ extern char empty_zero_page[PAGE_SIZE];
 /* Software bits in the page table entry */
 #define _PAGE_SWT      0x001           /* SW pte type bit t */
 #define _PAGE_SWX      0x002           /* SW pte type bit x */
+#define _PAGE_SPECIAL  0x004           /* SW associated with special page */
+#define __HAVE_ARCH_PTE_SPECIAL
 
 /* Six different types of pages. */
 #define _PAGE_TYPE_EMPTY       0x400
@@ -230,6 +233,15 @@ extern char empty_zero_page[PAGE_SIZE];
 #define _PAGE_TYPE_EX_RO       0x202
 #define _PAGE_TYPE_EX_RW       0x002
 
+/*
+ * Only four types for huge pages, using the invalid bit and protection bit
+ * of a segment table entry.
+ */
+#define _HPAGE_TYPE_EMPTY      0x020   /* _SEGMENT_ENTRY_INV */
+#define _HPAGE_TYPE_NONE       0x220
+#define _HPAGE_TYPE_RO         0x200   /* _SEGMENT_ENTRY_RO  */
+#define _HPAGE_TYPE_RW         0x000
+
 /*
  * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
  * pte_none and pte_file to find out the pte type WITHOUT holding the page
@@ -258,6 +270,13 @@ extern char empty_zero_page[PAGE_SIZE];
  * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  */
 
+/* Page status table bits for virtualization */
+#define RCP_PCL_BIT    55
+#define RCP_HR_BIT     54
+#define RCP_HC_BIT     53
+#define RCP_GR_BIT     50
+#define RCP_GC_BIT     49
+
 #ifndef __s390x__
 
 /* Bits in the segment table address-space-control-element */
@@ -315,6 +334,9 @@ extern char empty_zero_page[PAGE_SIZE];
 #define _SEGMENT_ENTRY         (0)
 #define _SEGMENT_ENTRY_EMPTY   (_SEGMENT_ENTRY_INV)
 
+#define _SEGMENT_ENTRY_LARGE   0x400   /* STE-format control, large page   */
+#define _SEGMENT_ENTRY_CO      0x100   /* change-recording override   */
+
 #endif /* __s390x__ */
 
 /*
@@ -510,9 +532,56 @@ static inline int pte_file(pte_t pte)
        return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
 }
 
+static inline int pte_special(pte_t pte)
+{
+       return (pte_val(pte) & _PAGE_SPECIAL);
+}
+
 #define __HAVE_ARCH_PTE_SAME
 #define pte_same(a,b)  (pte_val(a) == pte_val(b))
 
+static inline void rcp_lock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+       unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+       preempt_disable();
+       while (test_and_set_bit(RCP_PCL_BIT, pgste))
+               ;
+#endif
+}
+
+static inline void rcp_unlock(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+       unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+       clear_bit(RCP_PCL_BIT, pgste);
+       preempt_enable();
+#endif
+}
+
+/* forward declaration for SetPageUptodate in page-flags.h*/
+static inline void page_clear_dirty(struct page *page);
+#include <linux/page-flags.h>
+
+static inline void ptep_rcp_copy(pte_t *ptep)
+{
+#ifdef CONFIG_PGSTE
+       struct page *page = virt_to_page(pte_val(*ptep));
+       unsigned int skey;
+       unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+       skey = page_get_storage_key(page_to_phys(page));
+       if (skey & _PAGE_CHANGED)
+               set_bit_simple(RCP_GC_BIT, pgste);
+       if (skey & _PAGE_REFERENCED)
+               set_bit_simple(RCP_GR_BIT, pgste);
+       if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
+               SetPageDirty(page);
+       if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
+               SetPageReferenced(page);
+#endif
+}
+
 /*
  * query functions pte_write/pte_dirty/pte_young only work if
  * pte_present() is true. Undefined behaviour if not..
@@ -599,6 +668,8 @@ static inline void pmd_clear(pmd_t *pmd)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
+       if (mm->context.pgstes)
+               ptep_rcp_copy(ptep);
        pte_val(*ptep) = _PAGE_TYPE_EMPTY;
        if (mm->context.noexec)
                pte_val(ptep[PTRS_PER_PTE]) = _PAGE_TYPE_EMPTY;
@@ -663,10 +734,34 @@ static inline pte_t pte_mkyoung(pte_t pte)
        return pte;
 }
 
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       pte_val(pte) |= _PAGE_SPECIAL;
+       return pte;
+}
+
 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
                                            unsigned long addr, pte_t *ptep)
 {
+#ifdef CONFIG_PGSTE
+       unsigned long physpage;
+       int young;
+       unsigned long *pgste;
+
+       if (!vma->vm_mm->context.pgstes)
+               return 0;
+       physpage = pte_val(*ptep) & PAGE_MASK;
+       pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
+
+       young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
+       rcp_lock(ptep);
+       if (young)
+               set_bit_simple(RCP_GR_BIT, pgste);
+       young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
+       rcp_unlock(ptep);
+       return young;
+#endif
        return 0;
 }
 
@@ -674,7 +769,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
 static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
                                         unsigned long address, pte_t *ptep)
 {
-       /* No need to flush TLB; bits are in storage key */
+       /* No need to flush TLB
+        * On s390 reference bits are in storage key and never in TLB
+        * With virtualization we handle the reference bit, without we
+        * we can simply return */
+#ifdef CONFIG_PGSTE
+       return ptep_test_and_clear_young(vma, address, ptep);
+#endif
        return 0;
 }
 
@@ -693,15 +794,25 @@ static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
                        : "=m" (*ptep) : "m" (*ptep),
                          "a" (pto), "a" (address));
        }
-       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
 }
 
 static inline void ptep_invalidate(struct mm_struct *mm,
                                   unsigned long address, pte_t *ptep)
 {
+       if (mm->context.pgstes) {
+               rcp_lock(ptep);
+               __ptep_ipte(address, ptep);
+               ptep_rcp_copy(ptep);
+               pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+               rcp_unlock(ptep);
+               return;
+       }
        __ptep_ipte(address, ptep);
-       if (mm->context.noexec)
+       pte_val(*ptep) = _PAGE_TYPE_EMPTY;
+       if (mm->context.noexec) {
                __ptep_ipte(address, ptep + PTRS_PER_PTE);
+               pte_val(*(ptep + PTRS_PER_PTE)) = _PAGE_TYPE_EMPTY;
+       }
 }
 
 /*
@@ -964,17 +1075,15 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
 
 #define kern_addr_valid(addr)   (1)
 
-extern int add_shared_memory(unsigned long start, unsigned long size);
-extern int remove_shared_memory(unsigned long start, unsigned long size);
+extern int vmem_add_mapping(unsigned long start, unsigned long size);
+extern int vmem_remove_mapping(unsigned long start, unsigned long size);
+extern int s390_enable_sie(void);
 
 /*
  * No page table caches to initialise
  */
 #define pgtable_cache_init()   do { } while (0)
 
-#define __HAVE_ARCH_MEMMAP_INIT
-extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
-
 #include <asm-generic/pgtable.h>
 
 #endif /* _S390_PAGE_H */
index 8eaf343a12a8d72e6df3ebfc5c69dc7067c9d8e6..a00f79dd323b540ae31fe01c0e764f7e012f157c 100644 (file)
@@ -172,16 +172,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
  */
 extern void task_show_regs(struct seq_file *m, struct task_struct *task);
 
-extern void show_registers(struct pt_regs *regs);
 extern void show_code(struct pt_regs *regs);
-extern void show_trace(struct task_struct *task, unsigned long *sp);
-#ifdef CONFIG_64BIT
-extern void show_last_breaking_event(struct pt_regs *regs);
-#else
-static inline void show_last_breaking_event(struct pt_regs *regs)
-{
-}
-#endif
 
 unsigned long get_wchan(struct task_struct *p);
 #define task_pt_regs(tsk) ((struct pt_regs *) \
index 61f6952f2e357e2b76f442994b91aa55c55e596f..441d7c260857845cd6e0e1b557277565c448a2cd 100644 (file)
@@ -463,8 +463,6 @@ struct user_regs_struct
 };
 
 #ifdef __KERNEL__
-#define __ARCH_SYS_PTRACE      1
-
 /*
  * These are defined as per linux/ptrace.h, which see.
  */
index 4b8ff55f680e4d6e26183d2da148c545054116d6..11240342a0f4f54a6e7e66b3e03667400d4a9e35 100644 (file)
@@ -127,6 +127,7 @@ extern int do_QDIO(struct ccw_device*, unsigned int flags,
                   unsigned int qidx,unsigned int count,
                   struct qdio_buffer *buffers);
 
+extern int qdio_get_ssqd_pct(struct ccw_device*);
 extern int qdio_synchronize(struct ccw_device*, unsigned int flags,
                            unsigned int queue_number);
 
index a76a6b8fd887e259a62681053f058c850c3b54d4..ba69674012a7bf3b622b51d9c873bd6149bab131 100644 (file)
@@ -59,22 +59,42 @@ extern unsigned int s390_noexec;
  */
 extern unsigned long machine_flags;
 
-#define MACHINE_IS_VM          (machine_flags & 1)
-#define MACHINE_IS_P390                (machine_flags & 4)
-#define MACHINE_HAS_MVPG       (machine_flags & 16)
-#define MACHINE_HAS_IDTE       (machine_flags & 128)
-#define MACHINE_HAS_DIAG9C     (machine_flags & 256)
+#define MACHINE_FLAG_VM                (1UL << 0)
+#define MACHINE_FLAG_IEEE      (1UL << 1)
+#define MACHINE_FLAG_P390      (1UL << 2)
+#define MACHINE_FLAG_CSP       (1UL << 3)
+#define MACHINE_FLAG_MVPG      (1UL << 4)
+#define MACHINE_FLAG_DIAG44    (1UL << 5)
+#define MACHINE_FLAG_IDTE      (1UL << 6)
+#define MACHINE_FLAG_DIAG9C    (1UL << 7)
+#define MACHINE_FLAG_MVCOS     (1UL << 8)
+#define MACHINE_FLAG_KVM       (1UL << 9)
+#define MACHINE_FLAG_HPAGE     (1UL << 10)
+#define MACHINE_FLAG_PFMF      (1UL << 11)
+
+#define MACHINE_IS_VM          (machine_flags & MACHINE_FLAG_VM)
+#define MACHINE_IS_KVM         (machine_flags & MACHINE_FLAG_KVM)
+#define MACHINE_IS_P390                (machine_flags & MACHINE_FLAG_P390)
+#define MACHINE_HAS_DIAG9C     (machine_flags & MACHINE_FLAG_DIAG9C)
 
 #ifndef __s390x__
-#define MACHINE_HAS_IEEE       (machine_flags & 2)
-#define MACHINE_HAS_CSP                (machine_flags & 8)
+#define MACHINE_HAS_IEEE       (machine_flags & MACHINE_FLAG_IEEE)
+#define MACHINE_HAS_CSP                (machine_flags & MACHINE_FLAG_CSP)
+#define MACHINE_HAS_IDTE       (0)
 #define MACHINE_HAS_DIAG44     (1)
+#define MACHINE_HAS_MVPG       (machine_flags & MACHINE_FLAG_MVPG)
 #define MACHINE_HAS_MVCOS      (0)
+#define MACHINE_HAS_HPAGE      (0)
+#define MACHINE_HAS_PFMF       (0)
 #else /* __s390x__ */
 #define MACHINE_HAS_IEEE       (1)
 #define MACHINE_HAS_CSP                (1)
-#define MACHINE_HAS_DIAG44     (machine_flags & 32)
-#define MACHINE_HAS_MVCOS      (machine_flags & 512)
+#define MACHINE_HAS_IDTE       (machine_flags & MACHINE_FLAG_IDTE)
+#define MACHINE_HAS_DIAG44     (machine_flags & MACHINE_FLAG_DIAG44)
+#define MACHINE_HAS_MVPG       (1)
+#define MACHINE_HAS_MVCOS      (machine_flags & MACHINE_FLAG_MVCOS)
+#define MACHINE_HAS_HPAGE      (machine_flags & MACHINE_FLAG_HPAGE)
+#define MACHINE_HAS_PFMF       (machine_flags & MACHINE_FLAG_PFMF)
 #endif /* __s390x__ */
 
 #define MACHINE_HAS_SCLP       (!MACHINE_IS_P390)
index 6f3821a6a902130010779a1c3f168a07fa5fe3d3..ae89cf2478fcabee3a0e4acf1834465286a9224e 100644 (file)
@@ -19,6 +19,7 @@
 #include <asm/lowcore.h>
 #include <asm/sigp.h>
 #include <asm/ptrace.h>
+#include <asm/system.h>
 
 /*
   s390 specific smp.c headers
@@ -53,10 +54,7 @@ extern void machine_power_off_smp(void);
 
 static inline __u16 hard_smp_processor_id(void)
 {
-        __u16 cpu_address;
-       asm volatile("stap %0" : "=m" (cpu_address));
-        return cpu_address;
+       return stap();
 }
 
 /*
@@ -108,5 +106,11 @@ static inline void smp_send_stop(void)
 #define smp_cpu_not_running(cpu)       1
 #endif
 
+#ifdef CONFIG_HOTPLUG_CPU
+extern int smp_rescan_cpus(void);
+#else
+static inline int smp_rescan_cpus(void) { return 0; }
+#endif
+
 extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
 #endif
diff --git a/include/asm-s390/sparsemem.h b/include/asm-s390/sparsemem.h
new file mode 100644 (file)
index 0000000..06dfdab
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _ASM_S390_SPARSEMEM_H
+#define _ASM_S390_SPARSEMEM_H
+
+#define SECTION_SIZE_BITS      25
+
+#ifdef CONFIG_64BIT
+
+#define MAX_PHYSADDR_BITS      42
+#define MAX_PHYSMEM_BITS       42
+
+#else
+
+#define MAX_PHYSADDR_BITS      31
+#define MAX_PHYSMEM_BITS       31
+
+#endif /* CONFIG_64BIT */
+
+#endif /* _ASM_S390_SPARSEMEM_H */
index abe10ae15e468d9e8e6733fd26e4a5b0d3bdc7e6..79d01343f8b0681c41b3eff67ccd33f6d1686b06 100644 (file)
@@ -11,6 +11,9 @@
  *              Christian Borntraeger <borntraeger@de.ibm.com>
  */
 
+#ifndef __ASM_S390_SYSINFO_H
+#define __ASM_S390_SYSINFO_H
+
 struct sysinfo_1_1_1 {
        char reserved_0[32];
        char manufacturer[16];
@@ -114,3 +117,5 @@ static inline int stsi(void *sysinfo, int fc, int sel1, int sel2)
                : "cc", "memory");
        return r0;
 }
+
+#endif /* __ASM_S390_SYSINFO_H */
index 92098df4d6e37a2567d33f4c4fca156a72264146..c819ae25a8425ddfd72136dda4d674bf0f8b0ac1 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/ptrace.h>
 #include <asm/setup.h>
 #include <asm/processor.h>
+#include <asm/lowcore.h>
 
 #ifdef __KERNEL__
 
@@ -422,6 +423,23 @@ extern void smp_ctl_clear_bit(int cr, int bit);
 
 #endif /* CONFIG_SMP */
 
+static inline unsigned int stfl(void)
+{
+       asm volatile(
+               "       .insn   s,0xb2b10000,0(0)\n" /* stfl */
+               "0:\n"
+               EX_TABLE(0b,0b));
+       return S390_lowcore.stfl_fac_list;
+}
+
+static inline unsigned short stap(void)
+{
+       unsigned short cpu_address;
+
+       asm volatile("stap %0" : "=m" (cpu_address));
+       return cpu_address;
+}
+
 extern void (*_machine_restart)(char *command);
 extern void (*_machine_halt)(void);
 extern void (*_machine_power_off)(void);
index 0a518915bf90f1087466444caf0809a8b9b3a02b..99bbed99a3b2d288d17a7154c174dd7de514c390 100644 (file)
@@ -89,7 +89,6 @@ static inline struct thread_info *current_thread_info(void)
  * thread information flags bit numbers
  */
 #define TIF_SYSCALL_TRACE      0       /* syscall trace active */
-#define TIF_RESTORE_SIGMASK    1       /* restore signal mask in do_signal() */
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_RESTART_SVC                4       /* restart svc with new svc number */
@@ -101,6 +100,7 @@ static inline struct thread_info *current_thread_info(void)
                                           TIF_NEED_RESCHED */
 #define TIF_31BIT              18      /* 32bit process */ 
 #define TIF_MEMDIE             19
+#define TIF_RESTORE_SIGMASK    20      /* restore signal mask in do_signal() */
 
 #define _TIF_SYSCALL_TRACE     (1<<TIF_SYSCALL_TRACE)
 #define _TIF_RESTORE_SIGMASK   (1<<TIF_RESTORE_SIGMASK)
index 9e57a93d7de153dfcfb094bd07d73cc29d31599f..d60394b9745ef60387ad385013f18f23d622980c 100644 (file)
@@ -2,6 +2,7 @@
 #define _S390_TLBFLUSH_H
 
 #include <linux/mm.h>
+#include <linux/sched.h>
 #include <asm/processor.h>
 #include <asm/pgalloc.h>
 
index 8e97b06f298ac5db164872ea50797fdccd5bc0e6..d96c916434586c3ea05abdac4ff451f0caa08465 100644 (file)
@@ -7,6 +7,10 @@
 
 cpumask_t cpu_coregroup_map(unsigned int cpu);
 
+extern cpumask_t cpu_core_map[NR_CPUS];
+
+#define topology_core_siblings(cpu)    (cpu_core_map[cpu])
+
 int topology_set_cpu_management(int fc);
 void topology_schedule_update(void);
 
index 8ee86dbedd1fe91fd87f3d99b9689466b6bd577c..da9627afe5d8578ea951037f7ddbba33fb8e6b08 100644 (file)
@@ -1,24 +1,13 @@
-/*
- *  include/asm-s390/unaligned.h
- *
- *  S390 version
- *
- *  Derived from "include/asm-i386/unaligned.h"
- */
-
-#ifndef __S390_UNALIGNED_H
-#define __S390_UNALIGNED_H
+#ifndef _ASM_S390_UNALIGNED_H
+#define _ASM_S390_UNALIGNED_H
 
 /*
  * The S390 can do unaligned accesses itself. 
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-#define get_unaligned(ptr) (*(ptr))
-
-#define put_unaligned(val, ptr) ((void)( *(ptr) = (val) ))
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
-#endif
+#endif /* _ASM_S390_UNALIGNED_H */
index b6ba5a60dec217b4b7f4421db7cc8e402c2e15d4..d7d382f63ee53a5c6a59a7033169ba6bc366cb8f 100644 (file)
@@ -95,6 +95,7 @@ static inline unsigned long ffz(unsigned long word)
 #include <asm-generic/bitops/ext2-atomic.h>
 #include <asm-generic/bitops/minix.h>
 #include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
 #endif /* __KERNEL__ */
index 74ed3681d33c3298a0b35f5d514fff8251320994..68256ec5fa35b3e2c46ead9e2566810583351dcd 100644 (file)
@@ -4,8 +4,8 @@
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/errno.h>
-#include <asm/uaccess.h>
 
 /* XXX: UP variants, fix for SH-4A and SMP.. */
 #include <asm/futex-irq.h>
diff --git a/include/asm-sh/hugetlb.h b/include/asm-sh/hugetlb.h
new file mode 100644 (file)
index 0000000..0240230
--- /dev/null
@@ -0,0 +1,91 @@
+#ifndef _ASM_SH_HUGETLB_H
+#define _ASM_SH_HUGETLB_H
+
+#include <asm/page.h>
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        unsigned long len) {
+       return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+                                         unsigned long addr, unsigned long end,
+                                         unsigned long floor,
+                                         unsigned long ceiling)
+{
+       free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
+{
+       set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_SH_HUGETLB_H */
index 3e3557c53c558c9c3f244ef9148313a4cd64dc87..cbc731d35c25a1a96f8cef4bef6daa7f2563ccdb 100644 (file)
@@ -326,6 +326,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
 #define pte_dirty(pte)         ((pte).pte_low & _PAGE_DIRTY)
 #define pte_young(pte)         ((pte).pte_low & _PAGE_ACCESSED)
 #define pte_file(pte)          ((pte).pte_low & _PAGE_FILE)
+#define pte_special(pte)       (0)
 
 #ifdef CONFIG_X2TLB
 #define pte_write(pte)         ((pte).pte_high & _PAGE_EXT_USER_WRITE)
@@ -356,6 +357,8 @@ PTE_BIT_FUNC(low, mkdirty, |= _PAGE_DIRTY);
 PTE_BIT_FUNC(low, mkold, &= ~_PAGE_ACCESSED);
 PTE_BIT_FUNC(low, mkyoung, |= _PAGE_ACCESSED);
 
+static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+
 /*
  * Macro and implementation to make a page protection as uncachable.
  */
index f9dd9d3114412d73190c7ba9ce4dabe89985b586..c78990cda55757f4a175ceb4fb987a91ef4dc588 100644 (file)
@@ -254,10 +254,11 @@ extern void __handle_bad_pmd_kernel(pmd_t * pmd);
 /*
  * The following have defined behavior only work if pte_present() is true.
  */
-static inline int pte_dirty(pte_t pte){ return pte_val(pte) & _PAGE_DIRTY; }
-static inline int pte_young(pte_t pte){ return pte_val(pte) & _PAGE_ACCESSED; }
-static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
-static inline int pte_write(pte_t pte){ return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_dirty(pte_t pte)  { return pte_val(pte) & _PAGE_DIRTY; }
+static inline int pte_young(pte_t pte)  { return pte_val(pte) & _PAGE_ACCESSED; }
+static inline int pte_file(pte_t pte)   { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_write(pte_t pte)  { return pte_val(pte) & _PAGE_WRITE; }
+static inline int pte_special(pte_t pte){ return 0; }
 
 static inline pte_t pte_wrprotect(pte_t pte)   { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
 static inline pte_t pte_mkclean(pte_t pte)     { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
@@ -266,6 +267,7 @@ static inline pte_t pte_mkwrite(pte_t pte)  { set_pte(&pte, __pte(pte_val(pte) |
 static inline pte_t pte_mkdirty(pte_t pte)     { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
 static inline pte_t pte_mkyoung(pte_t pte)     { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
 static inline pte_t pte_mkhuge(pte_t pte)      { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
 
 
 /*
index 5250e3063b42d32ad5dfd751121930920918d93a..c1641a01d50ff738d9f4bacd628b59155f400cce 100644 (file)
@@ -1,7 +1,19 @@
-#ifndef __ASM_SH_UNALIGNED_H
-#define __ASM_SH_UNALIGNED_H
+#ifndef _ASM_SH_UNALIGNED_H
+#define _ASM_SH_UNALIGNED_H
 
 /* SH can't handle unaligned accesses. */
-#include <asm-generic/unaligned.h>
+#ifdef __LITTLE_ENDIAN__
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#else
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#endif
 
-#endif /* __ASM_SH_UNALIGNED_H */
+#endif /* _ASM_SH_UNALIGNED_H */
index fcdba511633977559a3c2d3afbc76af406be6e6c..e6532c3e09c96efba0429780072f6224984c40b8 100644 (file)
        nop; \
        nop;
 
+#ifdef CONFIG_KGDB
+#define KGDB_TRAP(num) \
+       b kgdb_trap_low; \
+       rd %psr,%l0; \
+       nop; \
+       nop;
+#else
+#define KGDB_TRAP(num) \
+       BAD_TRAP(num)
+#endif
+
 /* The Get Condition Codes software trap for userland. */
 #define GETCC_TRAP \
         b getcc_trap_handler; mov %psr, %l0; nop; nop;
index d120adfb429fcfaa0483aa8296b69ea47fa2c723..b6ef301d05bf880d904c6d4ffde709bdf8f580d0 100644 (file)
@@ -1,94 +1,38 @@
-/* $Id: kgdb.h,v 1.8 1998/01/07 06:33:44 baccala Exp $
- * kgdb.h: Defines and declarations for serial line source level
- *         remote debugging of the Linux kernel using gdb.
- *
- * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
- */
 #ifndef _SPARC_KGDB_H
 #define _SPARC_KGDB_H
 
-#ifndef __ASSEMBLY__
-/* To init the kgdb engine. */
-extern void set_debug_traps(void);
-
-/* To enter the debugger explicitly. */
-extern void breakpoint(void);
-
-/* For convenience we define the format of a kgdb trap breakpoint
- * frame here also.
- */
-struct kgdb_frame {
-       unsigned long globals[8];
-       unsigned long outs[8];
-       unsigned long locals[8];
-       unsigned long ins[8];
-       unsigned long fpregs[32];
-       unsigned long y;
-       unsigned long psr;
-       unsigned long wim;
-       unsigned long tbr;
-       unsigned long pc;
-       unsigned long npc;
-       unsigned long fpsr;
-       unsigned long cpsr;
+#ifdef CONFIG_SPARC32
+#define BUFMAX                 2048
+#else
+#define BUFMAX                 4096
+#endif
+
+enum regnames {
+       GDB_G0, GDB_G1, GDB_G2, GDB_G3, GDB_G4, GDB_G5, GDB_G6, GDB_G7,
+       GDB_O0, GDB_O1, GDB_O2, GDB_O3, GDB_O4, GDB_O5, GDB_SP, GDB_O7,
+       GDB_L0, GDB_L1, GDB_L2, GDB_L3, GDB_L4, GDB_L5, GDB_L6, GDB_L7,
+       GDB_I0, GDB_I1, GDB_I2, GDB_I3, GDB_I4, GDB_I5, GDB_FP, GDB_I7,
+       GDB_F0,
+       GDB_F31 = GDB_F0 + 31,
+#ifdef CONFIG_SPARC32
+       GDB_Y, GDB_PSR, GDB_WIM, GDB_TBR, GDB_PC, GDB_NPC,
+       GDB_FSR, GDB_CSR,
+#else
+       GDB_F32 = GDB_F0 + 32,
+       GDB_F62 = GDB_F32 + 15,
+       GDB_PC, GDB_NPC, GDB_STATE, GDB_FSR, GDB_FPRS, GDB_Y,
+#endif
 };
-#endif /* !(__ASSEMBLY__) */
-
-/* Macros for assembly usage of the kgdb breakpoint frame. */
-#define KGDB_G0     0x000
-#define KGDB_G1     0x004
-#define KGDB_G2     0x008
-#define KGDB_G4     0x010
-#define KGDB_G6     0x018
-#define KGDB_I0     0x020
-#define KGDB_I2     0x028
-#define KGDB_I4     0x030
-#define KGDB_I6     0x038
-#define KGDB_Y      0x100
-#define KGDB_PSR    0x104
-#define KGDB_WIM    0x108
-#define KGDB_TBR    0x10c
-#define KGDB_PC     0x110
-#define KGDB_NPC    0x114
-
-#define SAVE_KGDB_GLOBALS(reg) \
-        std     %g0, [%reg + STACKFRAME_SZ + KGDB_G0]; \
-        std     %g2, [%reg + STACKFRAME_SZ + KGDB_G2]; \
-        std     %g4, [%reg + STACKFRAME_SZ + KGDB_G4]; \
-        std     %g6, [%reg + STACKFRAME_SZ + KGDB_G6];
-
-#define SAVE_KGDB_INS(reg) \
-        std     %i0, [%reg + STACKFRAME_SZ + KGDB_I0]; \
-        std     %i2, [%reg + STACKFRAME_SZ + KGDB_I2]; \
-        std     %i4, [%reg + STACKFRAME_SZ + KGDB_I4]; \
-        std     %i6, [%reg + STACKFRAME_SZ + KGDB_I6];
-
-#define SAVE_KGDB_SREGS(reg, reg_y, reg_psr, reg_wim, reg_tbr, reg_pc, reg_npc) \
-        st      %reg_y, [%reg + STACKFRAME_SZ + KGDB_Y]; \
-        st      %reg_psr, [%reg + STACKFRAME_SZ + KGDB_PSR]; \
-        st      %reg_wim, [%reg + STACKFRAME_SZ + KGDB_WIM]; \
-        st      %reg_tbr, [%reg + STACKFRAME_SZ + KGDB_TBR]; \
-        st      %reg_pc, [%reg + STACKFRAME_SZ + KGDB_PC]; \
-        st      %reg_npc, [%reg + STACKFRAME_SZ + KGDB_NPC];
 
-#define LOAD_KGDB_GLOBALS(reg) \
-        ld      [%reg + STACKFRAME_SZ + KGDB_G1], %g1; \
-        ldd     [%reg + STACKFRAME_SZ + KGDB_G2], %g2; \
-        ldd     [%reg + STACKFRAME_SZ + KGDB_G4], %g4; \
-        ldd     [%reg + STACKFRAME_SZ + KGDB_G6], %g6;
+#ifdef CONFIG_SPARC32
+#define NUMREGBYTES            ((GDB_CSR + 1) * 4)
+#else
+#define NUMREGBYTES            ((GDB_Y + 1) * 8)
+#endif
 
-#define LOAD_KGDB_INS(reg) \
-        ldd     [%reg + STACKFRAME_SZ + KGDB_I0], %i0; \
-        ldd     [%reg + STACKFRAME_SZ + KGDB_I2], %i2; \
-        ldd     [%reg + STACKFRAME_SZ + KGDB_I4], %i4; \
-        ldd     [%reg + STACKFRAME_SZ + KGDB_I6], %i6;
+extern void arch_kgdb_breakpoint(void);
 
-#define LOAD_KGDB_SREGS(reg, reg_y, reg_psr, reg_wim, reg_tbr, reg_pc, reg_npc) \
-       ld      [%reg + STACKFRAME_SZ + KGDB_Y], %reg_y; \
-       ld      [%reg + STACKFRAME_SZ + KGDB_PSR], %reg_psr; \
-       ld      [%reg + STACKFRAME_SZ + KGDB_WIM], %reg_wim; \
-       ld      [%reg + STACKFRAME_SZ + KGDB_TBR], %reg_tbr; \
-       ld      [%reg + STACKFRAME_SZ + KGDB_PC], %reg_pc; \
-       ld      [%reg + STACKFRAME_SZ + KGDB_NPC], %reg_npc;
+#define BREAK_INSTR_SIZE       4
+#define CACHE_FLUSH_IS_SAFE    1
 
-#endif /* !(_SPARC_KGDB_H) */
+#endif /* _SPARC_KGDB_H */
index 2cc235b74d94932de828fae558a48eb17294575d..d84af6d95f5c09f5a5c746b27a0bea8699680848 100644 (file)
@@ -219,6 +219,11 @@ static inline int pte_file(pte_t pte)
        return pte_val(pte) & BTFIXUP_HALF(pte_filei);
 }
 
+static inline int pte_special(pte_t pte)
+{
+       return 0;
+}
+
 /*
  */
 BTFIXUPDEF_HALF(pte_wrprotecti)
@@ -251,6 +256,8 @@ BTFIXUPDEF_CALL_CONST(pte_t, pte_mkyoung, pte_t)
 #define pte_mkdirty(pte) BTFIXUP_CALL(pte_mkdirty)(pte)
 #define pte_mkyoung(pte) BTFIXUP_CALL(pte_mkyoung)(pte)
 
+#define pte_mkspecial(pte)    (pte)
+
 #define pfn_pte(pfn, prot)             mk_pte(pfn_to_page(pfn), prot)
 
 BTFIXUPDEF_CALL(unsigned long,  pte_pfn, pte_t)
index e3006979709b6fe4777e580c726895bc210e30a5..8898efbbbe07c8e57cb66c791a82410ed417bac6 100644 (file)
@@ -1,5 +1,4 @@
-/* $Id: processor.h,v 1.83 2001/10/08 09:32:13 davem Exp $
- * include/asm-sparc/processor.h
+/* include/asm-sparc/processor.h
  *
  * Copyright (C) 1994 David S. Miller (davem@caip.rutgers.edu)
  */
@@ -65,7 +64,6 @@ struct thread_struct {
        struct fpq      fpqueue[16];
        unsigned long flags;
        mm_segment_t current_ds;
-       int new_signal;
 };
 
 #define SPARC_FLAG_KTHREAD      0x1    /* task is a kernel thread */
index b3f492208fd20168d2615a6bf51ae8a9101f63b3..e6d561599726a9223bb2026457de5a0b636acf5f 100644 (file)
@@ -51,13 +51,11 @@ void smp_bogo(struct seq_file *);
 void smp_info(struct seq_file *);
 
 BTFIXUPDEF_CALL(void, smp_cross_call, smpfunc_t, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long)
-BTFIXUPDEF_CALL(void, smp_message_pass, int, int, unsigned long, int)
 BTFIXUPDEF_CALL(int, __hard_smp_processor_id, void)
 BTFIXUPDEF_BLACKBOX(hard_smp_processor_id)
 BTFIXUPDEF_BLACKBOX(load_current)
 
 #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
-#define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait)
 
 static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
 static inline void xc1(smpfunc_t func, unsigned long arg1)
index 4e08210cd4c2a33b22eceb46c1788071deae9cdc..b4b024445fc984f0d419f5f997690b83a8efd733 100644 (file)
@@ -94,6 +94,8 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
        } while(0)
 #endif
 
+extern void flushw_all(void);
+
 /*
  * Flush windows so that the VM switch which follows
  * would not pull the stack from under us.
index b6f8eddd30af2f59bd6e81b548c664da1bef5501..11d2d5fb59029beede89e4870f6c74776924a9db 100644 (file)
@@ -1,6 +1,10 @@
-#ifndef _ASM_SPARC_UNALIGNED_H_
-#define _ASM_SPARC_UNALIGNED_H_
+#ifndef _ASM_SPARC_UNALIGNED_H
+#define _ASM_SPARC_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
 #endif /* _ASM_SPARC_UNALIGNED_H */
index 982ce8992b91fcae6a2d9e790235b718d7cfa1b1..11f9d8146cdff0823500641bacde73d97c33151f 100644 (file)
@@ -34,6 +34,7 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
 #include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/__ffs.h>
 #include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
 #include <asm-generic/bitops/fls64.h>
 
 #ifdef __KERNEL__
index c47f58d6c15c7aa4b01c47a9ff0610971f864b86..ca19f80a9b7d96bab4494525abb248cd88e3c7a1 100644 (file)
@@ -293,7 +293,6 @@ static int sun_fd_eject(int drive)
 
 #ifdef CONFIG_PCI
 #include <asm/ebus.h>
-#include <asm/isa.h>
 #include <asm/ns87303.h>
 
 static struct ebus_dma_info sun_pci_fd_ebus_dma;
@@ -558,82 +557,6 @@ static int __init ebus_fdthree_p(struct linux_ebus_device *edev)
 }
 #endif
 
-#ifdef CONFIG_PCI
-#undef ISA_FLOPPY_WORKS
-
-#ifdef ISA_FLOPPY_WORKS
-static unsigned long __init isa_floppy_init(void)
-{
-       struct sparc_isa_bridge *isa_br;
-       struct sparc_isa_device *isa_dev = NULL;
-
-       for_each_isa(isa_br) {
-               for_each_isadev(isa_dev, isa_br) {
-                       if (!strcmp(isa_dev->prom_node->name, "dma")) {
-                               struct sparc_isa_device *child =
-                                       isa_dev->child;
-
-                               while (child) {
-                                       if (!strcmp(child->prom_node->name,
-                                                   "floppy")) {
-                                               isa_dev = child;
-                                               goto isa_done;
-                                       }
-                                       child = child->next;
-                               }
-                       }
-               }
-       }
-isa_done:
-       if (!isa_dev)
-               return 0;
-
-       /* We could use DMA on devices behind the ISA bridge, but...
-        *
-        * There is a slight problem.  Normally on x86 kit the x86 processor
-        * delays I/O port instructions when the ISA bus "dma in progress"
-        * signal is active.  Well, sparc64 systems do not monitor this
-        * signal thus we would need to block all I/O port accesses in software
-        * when a dma transfer is active for some device.
-        */
-
-       sun_fdc = (struct sun_flpy_controller *)isa_dev->resource.start;
-       FLOPPY_IRQ = isa_dev->irq;
-
-       sun_fdops.fd_inb = sun_pci_fd_inb;
-       sun_fdops.fd_outb = sun_pci_fd_outb;
-
-       can_use_virtual_dma = use_virtual_dma = 1;
-       sun_fdops.fd_enable_dma = sun_fd_enable_dma;
-       sun_fdops.fd_disable_dma = sun_fd_disable_dma;
-       sun_fdops.fd_set_dma_mode = sun_fd_set_dma_mode;
-       sun_fdops.fd_set_dma_addr = sun_fd_set_dma_addr;
-       sun_fdops.fd_set_dma_count = sun_fd_set_dma_count;
-       sun_fdops.get_dma_residue = sun_get_dma_residue;
-
-       sun_fdops.fd_request_irq = sun_fd_request_irq;
-       sun_fdops.fd_free_irq = sun_fd_free_irq;
-
-       /* Floppy eject is manual.   Actually, could determine this
-        * via presence of 'manual' property in OBP node.
-        */
-       sun_fdops.fd_eject = sun_pci_fd_eject;
-
-        fdc_status = (unsigned long) &sun_fdc->status_82077;
-
-       allowed_drive_mask = 0;
-       sun_floppy_types[0] = 0;
-       sun_floppy_types[1] = 4;
-
-       sun_pci_broken_drive = 1;
-       sun_fdops.fd_outb = sun_pci_fd_broken_outb;
-
-       return sun_floppy_types[0];
-}
-#endif /* ISA_FLOPPY_WORKS */
-
-#endif
-
 static unsigned long __init sun_floppy_init(void)
 {
        char state[128];
@@ -667,13 +590,8 @@ static unsigned long __init sun_floppy_init(void)
                        }
                }
        ebus_done:
-               if (!edev) {
-#ifdef ISA_FLOPPY_WORKS
-                       return isa_floppy_init();
-#else
+               if (!edev)
                        return 0;
-#endif
-               }
 
                state_prop = of_get_property(edev->prom_node, "status", NULL);
                if (state_prop && !strncmp(state_prop, "disabled", 8))
index df1097d6ffbe63258dc566924cedb9e393438d5f..d8378935ae90918149a3099b46b38e11f5aa7d42 100644 (file)
@@ -2,9 +2,9 @@
 #define _SPARC64_FUTEX_H
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 #include <asm/errno.h>
 #include <asm/system.h>
-#include <asm/uaccess.h>
 
 #define __futex_cas_op(insn, ret, oldval, uaddr, oparg)        \
        __asm__ __volatile__(                           \
diff --git a/include/asm-sparc64/hugetlb.h b/include/asm-sparc64/hugetlb.h
new file mode 100644 (file)
index 0000000..412af58
--- /dev/null
@@ -0,0 +1,84 @@
+#ifndef _ASM_SPARC64_HUGETLB_H
+#define _ASM_SPARC64_HUGETLB_H
+
+#include <asm/page.h>
+
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                    pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+                             pte_t *ptep);
+
+void hugetlb_prefault_arch_hook(struct mm_struct *mm);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        unsigned long len) {
+       return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+                                         unsigned long addr, unsigned long end,
+                                         unsigned long floor,
+                                         unsigned long ceiling)
+{
+       free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_SPARC64_HUGETLB_H */
diff --git a/include/asm-sparc64/isa.h b/include/asm-sparc64/isa.h
deleted file mode 100644 (file)
index ecd9290..0000000
+++ /dev/null
@@ -1,47 +0,0 @@
-/* $Id: isa.h,v 1.1 2001/05/11 04:31:55 davem Exp $
- * isa.h: Sparc64 layer for PCI to ISA bridge devices.
- *
- * Copyright (C) 2001 David S. Miller (davem@redhat.com)
- */
-
-#ifndef __SPARC64_ISA_H
-#define __SPARC64_ISA_H
-
-#include <asm/oplib.h>
-#include <asm/prom.h>
-#include <asm/of_device.h>
-
-struct sparc_isa_bridge;
-
-struct sparc_isa_device {
-       struct of_device        ofdev;
-       struct sparc_isa_device *next;
-       struct sparc_isa_device *child;
-       struct sparc_isa_bridge *bus;
-       struct device_node      *prom_node;
-       struct resource         resource;
-       unsigned int            irq;
-};
-#define to_isa_device(d) container_of(d, struct sparc_isa_device, ofdev.dev)
-
-struct sparc_isa_bridge {
-       struct of_device        ofdev;
-       struct sparc_isa_bridge *next;
-       struct sparc_isa_device *devices;
-       struct pci_dev          *self;
-       int                     index;
-       struct device_node      *prom_node;
-};
-#define to_isa_bridge(d) container_of(d, struct sparc_isa_bridge, ofdev.dev)
-
-extern struct sparc_isa_bridge *isa_chain;
-
-extern void isa_init(void);
-
-#define for_each_isa(bus)                                              \
-        for((bus) = isa_chain; (bus); (bus) = (bus)->next)
-
-#define for_each_isadev(dev, bus)                                      \
-        for((dev) = (bus)->devices; (dev); (dev) = (dev)->next)
-
-#endif /* !(__SPARC64_ISA_H) */
diff --git a/include/asm-sparc64/kgdb.h b/include/asm-sparc64/kgdb.h
new file mode 100644 (file)
index 0000000..aa6532f
--- /dev/null
@@ -0,0 +1 @@
+#include <asm-sparc/kgdb.h>
index e93a482aa24a0b874c50a2d109bf55f6b9c28fdc..618117def0dc957b584228552aca889cb88e5fdb 100644 (file)
@@ -39,8 +39,6 @@
 #define HPAGE_SIZE             (_AC(1,UL) << HPAGE_SHIFT)
 #define HPAGE_MASK             (~(HPAGE_SIZE - 1UL))
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
-#define ARCH_HAS_HUGETLB_PREFAULT_HOOK
 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 #endif
 
index 549e45266b688ec2c2db083fb5581edad9cfbe3e..0e200e7acec793f4653721a30f1907782b40452a 100644 (file)
@@ -506,6 +506,11 @@ static inline pte_t pte_mkyoung(pte_t pte)
        return __pte(pte_val(pte) | mask);
 }
 
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return pte;
+}
+
 static inline unsigned long pte_young(pte_t pte)
 {
        unsigned long mask;
@@ -608,6 +613,11 @@ static inline unsigned long pte_present(pte_t pte)
        return val;
 }
 
+static inline int pte_special(pte_t pte)
+{
+       return 0;
+}
+
 #define pmd_set(pmdp, ptep)    \
        (pmd_val(*(pmdp)) = (__pa((unsigned long) (ptep)) >> 11UL))
 #define pud_set(pudp, pmdp)    \
index 72927749aebf1166291fcdc76edbbb42d2e78aa2..2f5d126f7163b45285c2cd6958ea3095a5b950e7 100644 (file)
 #define PIL_SMP_CTX_NEW_VERSION        4
 #define PIL_DEVICE_IRQ         5
 
-#ifndef __ASSEMBLY__
-#define PIL_RESERVED(PIL)      ((PIL) == PIL_SMP_CALL_FUNC || \
-                                (PIL) == PIL_SMP_RECEIVE_SIGNAL || \
-                                (PIL) == PIL_SMP_CAPTURE || \
-                                (PIL) == PIL_SMP_CTX_NEW_VERSION)
-#endif
-
 #endif /* !(_SPARC64_PIL_H) */
index b4b951d570bbf3915fdf5ddad8624c95f70b453c..714b81956f32a653a4c9863edbbc2aefb16b89a8 100644 (file)
@@ -1,4 +1,3 @@
-/* $Id: ptrace.h,v 1.14 2002/02/09 19:49:32 davem Exp $ */
 #ifndef _SPARC64_PTRACE_H
 #define _SPARC64_PTRACE_H
 
@@ -8,10 +7,15 @@
  * stack during a system call and basically all traps.
  */
 
+/* This magic value must have the low 9 bits clear,
+ * as that is where we encode the %tt value, see below.
+ */
 #define PT_REGS_MAGIC 0x57ac6c00
 
 #ifndef __ASSEMBLY__
 
+#include <linux/types.h>
+
 struct pt_regs {
        unsigned long u_regs[16]; /* globals and ins */
        unsigned long tstate;
@@ -33,6 +37,23 @@ struct pt_regs {
        unsigned int magic;
 };
 
+static inline int pt_regs_trap_type(struct pt_regs *regs)
+{
+       return regs->magic & 0x1ff;
+}
+
+static inline int pt_regs_clear_trap_type(struct pt_regs *regs)
+{
+       return regs->magic &= ~0x1ff;
+}
+
+static inline bool pt_regs_is_syscall(struct pt_regs *regs)
+{
+       int tt = pt_regs_trap_type(regs);
+
+       return (tt == 0x110 || tt == 0x111 || tt == 0x16d);
+}
+
 struct pt_regs32 {
        unsigned int psr;
        unsigned int pc;
index 53eae091a171920d90dbc083d5fb96d0a8f67b3e..6897ac31be4100baf733c45043294a36d4ed66bb 100644 (file)
@@ -180,12 +180,13 @@ do {      if (test_thread_flag(TIF_PERFCTR)) {                            \
        "ldx    [%%sp + 2047 + 0x70], %%i6\n\t"                         \
        "ldx    [%%sp + 2047 + 0x78], %%i7\n\t"                         \
        "ldx    [%%g6 + %9], %%g4\n\t"                                  \
-       "brz,pt %%o7, 1f\n\t"                                           \
+       "brz,pt %%o7, switch_to_pc\n\t"                                 \
        " mov   %%g7, %0\n\t"                                           \
        "sethi  %%hi(ret_from_syscall), %%g1\n\t"                       \
        "jmpl   %%g1 + %%lo(ret_from_syscall), %%g0\n\t"                \
        " nop\n\t"                                                      \
-       "1:\n\t"                                                        \
+       ".globl switch_to_pc\n\t"                                       \
+       "switch_to_pc:\n\t"                                             \
        : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \
          "=r" (__local_per_cpu_offset)                                 \
        : "0" (task_thread_info(next)),                                 \
index 98252cd44dd6db88b3f91367b94c36f4e8d4bae1..71e42d1a80d9914c2fb0aaca67ca3fa0265b7533 100644 (file)
@@ -1,5 +1,4 @@
-/* $Id: thread_info.h,v 1.1 2002/02/10 00:00:58 davem Exp $
- * thread_info.h: sparc64 low-level thread information
+/* thread_info.h: sparc64 low-level thread information
  *
  * Copyright (C) 2002  David S. Miller (davem@redhat.com)
  */
@@ -223,7 +222,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_PERFCTR            4       /* performance counters active */
 #define TIF_UNALIGNED          5       /* allowed to do unaligned accesses */
-#define TIF_NEWSIGNALS         6       /* wants new-style signals */
+/* flag bit 6 is available */
 #define TIF_32BIT              7       /* 32-bit binary */
 /* flag bit 8 is available */
 #define TIF_SECCOMP            9       /* secure computing */
@@ -242,7 +241,6 @@ register struct thread_info *current_thread_info_reg asm("g6");
 #define _TIF_NEED_RESCHED      (1<<TIF_NEED_RESCHED)
 #define _TIF_PERFCTR           (1<<TIF_PERFCTR)
 #define _TIF_UNALIGNED         (1<<TIF_UNALIGNED)
-#define _TIF_NEWSIGNALS                (1<<TIF_NEWSIGNALS)
 #define _TIF_32BIT             (1<<TIF_32BIT)
 #define _TIF_SECCOMP           (1<<TIF_SECCOMP)
 #define _TIF_SYSCALL_AUDIT     (1<<TIF_SYSCALL_AUDIT)
index d3cc4eff39a663984a73054caef84624f31fba36..1b55538b944fa82ce802dbdd358dde60818f7ae8 100644 (file)
 #define KPROBES_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
 #endif
 
+#ifdef CONFIG_KGDB
+#define KGDB_TRAP(lvl) TRAP_IRQ(kgdb_trap, lvl)
+#else
+#define KGDB_TRAP(lvl) TRAP_ARG(bad_trap, lvl)
+#endif
+
 #define SUN4V_ITSB_MISS                                        \
        ldxa    [%g0] ASI_SCRATCHPAD, %g2;              \
        ldx     [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4;    \
index 1ed3ba5377728d97734c2e94f65dbc93b64a210c..edcebb09441e9e1451c0e1cee3c4329ebd7b2128 100644 (file)
@@ -1,6 +1,10 @@
-#ifndef _ASM_SPARC64_UNALIGNED_H_
-#define _ASM_SPARC64_UNALIGNED_H_
+#ifndef _ASM_SPARC64_UNALIGNED_H
+#define _ASM_SPARC64_UNALIGNED_H
 
-#include <asm-generic/unaligned.h>
+#include <linux/unaligned/be_struct.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
+#define get_unaligned  __get_unaligned_be
+#define put_unaligned  __put_unaligned_be
 
 #endif /* _ASM_SPARC64_UNALIGNED_H */
index 7b73b2cd5b340be8212d45657eb44ad9cda71425..1e17f7409cabc072df18f3f516b0df0bc2a58506 100644 (file)
@@ -3,5 +3,4 @@
 
 #include "asm/arch/div64.h"
 
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
 #endif
index 4102b443e9259566346ca3974b881cc8ac465957..02db81b7b86ea48ffc43ed9405bbe1491959036e 100644 (file)
@@ -173,6 +173,11 @@ static inline int pte_newprot(pte_t pte)
        return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
 }
 
+static inline int pte_special(pte_t pte)
+{
+       return 0;
+}
+
 /*
  * =================================
  * Flags setting section.
@@ -241,6 +246,11 @@ static inline pte_t pte_mknewpage(pte_t pte)
        return(pte);
 }
 
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return(pte);
+}
+
 static inline void set_pte(pte_t *pteptr, pte_t pteval)
 {
        pte_copy(*pteptr, pteval);
index 1d2497c57274b599be081a5a5e4d794261f9ba29..a47196974e393d9826a53f92d026fe8602e3ab4f 100644 (file)
@@ -1,6 +1,6 @@
-#ifndef __UM_UNALIGNED_H
-#define __UM_UNALIGNED_H
+#ifndef _ASM_UM_UNALIGNED_H
+#define _ASM_UM_UNALIGNED_H
 
 #include "asm/arch/unaligned.h"
 
-#endif
+#endif /* _ASM_UM_UNALIGNED_H */
index e30b18653a94bf8cde19b2573888692d1d46d55a..53122b28491ec7b64a31d897d38866d040214702 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * include/asm-v850/unaligned.h -- Unaligned memory access
- *
  *  Copyright (C) 2001  NEC Corporation
  *  Copyright (C) 2001  Miles Bader <miles@gnu.org>
  *
  * Public License.  See the file COPYING in the main directory of this
  * archive for more details.
  *
- * This file is a copy of the arm version, include/asm-arm/unaligned.h
- *
  * Note that some v850 chips support unaligned access, but it seems too
  * annoying to use.
  */
+#ifndef _ASM_V850_UNALIGNED_H
+#define _ASM_V850_UNALIGNED_H
 
-#ifndef __V850_UNALIGNED_H__
-#define __V850_UNALIGNED_H__
-
-#include <asm/types.h>
-
-extern int __bug_unaligned_x(void *ptr);
-
-/*
- * What is the most efficient way of loading/storing an unaligned value?
- *
- * That is the subject of this file.  Efficiency here is defined as
- * minimum code size with minimum register usage for the common cases.
- * It is currently not believed that long longs are common, so we
- * trade efficiency for the chars, shorts and longs against the long
- * longs.
- *
- * Current stats with gcc 2.7.2.2 for these functions:
- *
- *     ptrsize get:    code    regs    put:    code    regs
- *     1               1       1               1       2
- *     2               3       2               3       2
- *     4               7       3               7       3
- *     8               20      6               16      6
- *
- * gcc 2.95.1 seems to code differently:
- *
- *     ptrsize get:    code    regs    put:    code    regs
- *     1               1       1               1       2
- *     2               3       2               3       2
- *     4               7       4               7       4
- *     8               19      8               15      6
- *
- * which may or may not be more efficient (depending upon whether
- * you can afford the extra registers).  Hopefully the gcc 2.95
- * is inteligent enough to decide if it is better to use the
- * extra register, but evidence so far seems to suggest otherwise.
- *
- * Unfortunately, gcc is not able to optimise the high word
- * out of long long >> 32, or the low word from long long << 32
- */
-
-#define __get_unaligned_2(__p)                                 \
-       (__p[0] | __p[1] << 8)
-
-#define __get_unaligned_4(__p)                                 \
-       (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24)
-
-#define get_unaligned(ptr)                                     \
-       ({                                                      \
-               __typeof__(*(ptr)) __v;                         \
-               __u8 *__p = (__u8 *)(ptr);                      \
-               switch (sizeof(*(ptr))) {                       \
-               case 1: __v = *(ptr);                   break;  \
-               case 2: __v = __get_unaligned_2(__p);   break;  \
-               case 4: __v = __get_unaligned_4(__p);   break;  \
-               case 8: {                                       \
-                               unsigned int __v1, __v2;        \
-                               __v2 = __get_unaligned_4((__p+4)); \
-                               __v1 = __get_unaligned_4(__p);  \
-                               __v = ((unsigned long long)__v2 << 32 | __v1);  \
-                       }                                       \
-                       break;                                  \
-               default: __v = __bug_unaligned_x(__p);  break;  \
-               }                                               \
-               __v;                                            \
-       })
-
-
-static inline void __put_unaligned_2(__u32 __v, register __u8 *__p)
-{
-       *__p++ = __v;
-       *__p++ = __v >> 8;
-}
-
-static inline void __put_unaligned_4(__u32 __v, register __u8 *__p)
-{
-       __put_unaligned_2(__v >> 16, __p + 2);
-       __put_unaligned_2(__v, __p);
-}
-
-static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p)
-{
-       /*
-        * tradeoff: 8 bytes of stack for all unaligned puts (2
-        * instructions), or an extra register in the long long
-        * case - go for the extra register.
-        */
-       __put_unaligned_4(__v >> 32, __p+4);
-       __put_unaligned_4(__v, __p);
-}
-
-/*
- * Try to store an unaligned value as efficiently as possible.
- */
-#define put_unaligned(val,ptr)                                 \
-       ({                                                      \
-               switch (sizeof(*(ptr))) {                       \
-               case 1:                                         \
-                       *(ptr) = (val);                         \
-                       break;                                  \
-               case 2: __put_unaligned_2((val),(__u8 *)(ptr)); \
-                       break;                                  \
-               case 4: __put_unaligned_4((val),(__u8 *)(ptr)); \
-                       break;                                  \
-               case 8: __put_unaligned_8((val),(__u8 *)(ptr)); \
-                       break;                                  \
-               default: __bug_unaligned_x(ptr);                \
-                       break;                                  \
-               }                                               \
-               (void) 0;                                       \
-       })
+#include <linux/unaligned/be_byteshift.h>
+#include <linux/unaligned/le_byteshift.h>
+#include <linux/unaligned/generic.h>
 
+#define get_unaligned  __get_unaligned_le
+#define put_unaligned  __put_unaligned_le
 
-#endif /* __V850_UNALIGNED_H__ */
+#endif /* _ASM_V850_UNALIGNED_H */
index 1ae7b270a1efe8d1cc9abe8c4260d54d714ecc0f..b81a4d4d333787e4c9e8b69cb74fd190f09c4e69 100644 (file)
@@ -62,12 +62,9 @@ static inline void set_bit(int nr, volatile void *addr)
  */
 static inline void __set_bit(int nr, volatile void *addr)
 {
-       asm volatile("bts %1,%0"
-                    : ADDR
-                    : "Ir" (nr) : "memory");
+       asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
 }
 
-
 /**
  * clear_bit - Clears a bit in memory
  * @nr: Bit to clear
@@ -297,19 +294,145 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
 static int test_bit(int nr, const volatile unsigned long *addr);
 #endif
 
-#define test_bit(nr,addr)                      \
-       (__builtin_constant_p(nr) ?             \
-        constant_test_bit((nr),(addr)) :       \
-        variable_test_bit((nr),(addr)))
+#define test_bit(nr, addr)                     \
+       (__builtin_constant_p((nr))             \
+        ? constant_test_bit((nr), (addr))      \
+        : variable_test_bit((nr), (addr)))
+
+/**
+ * __ffs - find first set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+       asm("bsf %1,%0"
+               : "=r" (word)
+               : "rm" (word));
+       return word;
+}
+
+/**
+ * ffz - find first zero bit in word
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+       asm("bsf %1,%0"
+               : "=r" (word)
+               : "r" (~word));
+       return word;
+}
+
+/*
+ * __fls: find last set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+       asm("bsr %1,%0"
+           : "=r" (word)
+           : "rm" (word));
+       return word;
+}
+
+#ifdef __KERNEL__
+/**
+ * ffs - find first set bit in word
+ * @x: the word to search
+ *
+ * This is defined the same way as the libc and compiler builtin ffs
+ * routines, therefore differs in spirit from the other bitops.
+ *
+ * ffs(value) returns 0 if value is 0 or the position of the first
+ * set bit if value is nonzero. The first (least significant) bit
+ * is at position 1.
+ */
+static inline int ffs(int x)
+{
+       int r;
+#ifdef CONFIG_X86_CMOV
+       asm("bsfl %1,%0\n\t"
+           "cmovzl %2,%0"
+           : "=r" (r) : "rm" (x), "r" (-1));
+#else
+       asm("bsfl %1,%0\n\t"
+           "jnz 1f\n\t"
+           "movl $-1,%0\n"
+           "1:" : "=r" (r) : "rm" (x));
+#endif
+       return r + 1;
+}
+
+/**
+ * fls - find last set bit in word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffs, but returns the position of the most significant set bit.
+ *
+ * fls(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 32.
+ */
+static inline int fls(int x)
+{
+       int r;
+#ifdef CONFIG_X86_CMOV
+       asm("bsrl %1,%0\n\t"
+           "cmovzl %2,%0"
+           : "=&r" (r) : "rm" (x), "rm" (-1));
+#else
+       asm("bsrl %1,%0\n\t"
+           "jnz 1f\n\t"
+           "movl $-1,%0\n"
+           "1:" : "=r" (r) : "rm" (x));
+#endif
+       return r + 1;
+}
+#endif /* __KERNEL__ */
 
 #undef BASE_ADDR
 #undef BIT_ADDR
 #undef ADDR
 
-#ifdef CONFIG_X86_32
-# include "bitops_32.h"
-#else
-# include "bitops_64.h"
-#endif
+static inline void set_bit_string(unsigned long *bitmap,
+               unsigned long i, int len)
+{
+       unsigned long end = i + len;
+       while (i < end) {
+               __set_bit(i, bitmap);
+               i++;
+       }
+}
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/sched.h>
+
+#define ARCH_HAS_FAST_MULTIPLIER 1
+
+#include <asm-generic/bitops/hweight.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/fls64.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
+#define ext2_set_bit_atomic(lock, nr, addr)                    \
+       test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr)                  \
+       test_and_clear_bit((nr), (unsigned long *)(addr))
+
+#include <asm-generic/bitops/minix.h>
 
+#endif /* __KERNEL__ */
 #endif /* _ASM_X86_BITOPS_H */
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
deleted file mode 100644 (file)
index 2513a81..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef _I386_BITOPS_H
-#define _I386_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
-{
-       int d0, d1, d2;
-       int res;
-
-       if (!size)
-               return 0;
-       /* This looks at memory.
-        * Mark it volatile to tell gcc not to move it around
-        */
-       asm volatile("movl $-1,%%eax\n\t"
-                    "xorl %%edx,%%edx\n\t"
-                    "repe; scasl\n\t"
-                    "je 1f\n\t"
-                    "xorl -4(%%edi),%%eax\n\t"
-                    "subl $4,%%edi\n\t"
-                    "bsfl %%eax,%%edx\n"
-                    "1:\tsubl %%ebx,%%edi\n\t"
-                    "shll $3,%%edi\n\t"
-                    "addl %%edi,%%edx"
-                    : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
-                    : "1" ((size + 31) >> 5), "2" (addr),
-                      "b" (addr) : "memory");
-       return res;
-}
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bit number to start searching at
- * @size: The maximum size to search
- */
-int find_next_zero_bit(const unsigned long *addr, int size, int offset);
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
-       __asm__("bsfl %1,%0"
-               :"=r" (word)
-               :"rm" (word));
-       return word;
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
-{
-       unsigned x = 0;
-
-       while (x < size) {
-               unsigned long val = *addr++;
-               if (val)
-                       return __ffs(val) + x;
-               x += sizeof(*addr) << 3;
-       }
-       return x;
-}
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bit number to start searching at
- * @size: The maximum size to search
- */
-int find_next_bit(const unsigned long *addr, int size, int offset);
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
-       __asm__("bsfl %1,%0"
-               :"=r" (word)
-               :"r" (~word));
-       return word;
-}
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/sched.h>
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz() (man ffs).
- */
-static inline int ffs(int x)
-{
-       int r;
-
-       __asm__("bsfl %1,%0\n\t"
-               "jnz 1f\n\t"
-               "movl $-1,%0\n"
-               "1:" : "=r" (r) : "rm" (x));
-       return r+1;
-}
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs().
- */
-static inline int fls(int x)
-{
-       int r;
-
-       __asm__("bsrl %1,%0\n\t"
-               "jnz 1f\n\t"
-               "movl $-1,%0\n"
-               "1:" : "=r" (r) : "rm" (x));
-       return r+1;
-}
-
-#include <asm-generic/bitops/hweight.h>
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls64.h>
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/ext2-non-atomic.h>
-
-#define ext2_set_bit_atomic(lock, nr, addr)                    \
-       test_and_set_bit((nr), (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr)                  \
-       test_and_clear_bit((nr), (unsigned long *)(addr))
-
-#include <asm-generic/bitops/minix.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _I386_BITOPS_H */
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
deleted file mode 100644 (file)
index 365f820..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef _X86_64_BITOPS_H
-#define _X86_64_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
-extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
-extern long find_first_bit(const unsigned long *addr, unsigned long size);
-extern long find_next_bit(const unsigned long *addr, long size, long offset);
-
-/* return index of first bet set in val or max when no bit is set */
-static inline long __scanbit(unsigned long val, unsigned long max)
-{
-       asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
-       return val;
-}
-
-#define find_next_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?        \
-  ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
-       find_next_bit(addr,size,off)))
-
-#define find_next_zero_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ?        \
-  ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
-       find_next_zero_bit(addr,size,off)))
-
-#define find_first_bit(addr, size)                                     \
-       ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG       \
-         ? (__scanbit(*(unsigned long *)(addr), (size)))               \
-         : find_first_bit((addr), (size))))
-
-#define find_first_zero_bit(addr, size)                                        \
-       ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG       \
-         ? (__scanbit(~*(unsigned long *)(addr), (size)))              \
-         : find_first_zero_bit((addr), (size))))
-
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
-                                 int len)
-{
-       unsigned long end = i + len;
-       while (i < end) {
-               __set_bit(i, bitmap);
-               i++;
-       }
-}
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
-       __asm__("bsfq %1,%0"
-               :"=r" (word)
-               :"r" (~word));
-       return word;
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
-       __asm__("bsfq %1,%0"
-               :"=r" (word)
-               :"rm" (word));
-       return word;
-}
-
-/*
- * __fls: find last bit set.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long __fls(unsigned long word)
-{
-       __asm__("bsrq %1,%0"
-               :"=r" (word)
-               :"rm" (word));
-       return word;
-}
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/sched.h>
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static inline int ffs(int x)
-{
-       int r;
-
-       __asm__("bsfl %1,%0\n\t"
-               "cmovzl %2,%0" 
-               : "=r" (r) : "rm" (x), "r" (-1));
-       return r+1;
-}
-
-/**
- * fls64 - find last bit set in 64 bit word
- * @x: the word to search
- *
- * This is defined the same way as fls.
- */
-static inline int fls64(__u64 x)
-{
-       if (x == 0)
-               return 0;
-       return __fls(x) + 1;
-}
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- */
-static inline int fls(int x)
-{
-       int r;
-
-       __asm__("bsrl %1,%0\n\t"
-               "cmovzl %2,%0"
-               : "=&r" (r) : "rm" (x), "rm" (-1));
-       return r+1;
-}
-
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
-#include <asm-generic/bitops/hweight.h>
-
-#endif /* __KERNEL__ */
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/ext2-non-atomic.h>
-
-#define ext2_set_bit_atomic(lock, nr, addr)                    \
-       test_and_set_bit((nr), (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr)                  \
-       test_and_clear_bit((nr), (unsigned long *)(addr))
-
-#include <asm-generic/bitops/minix.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _X86_64_BITOPS_H */
index 51151356840fcd5e099c84bb4c7e1ab623d47e77..e8659909e5f69a93dc781402fc5cf5db9df95c75 100644 (file)
@@ -9,6 +9,17 @@
 #include <asm/ist.h>
 #include <video/edid.h>
 
+/* setup data types */
+#define SETUP_NONE                     0
+
+/* extensible setup data list node */
+struct setup_data {
+       u64 next;
+       u32 type;
+       u32 len;
+       u8 data[0];
+};
+
 struct setup_header {
        __u8    setup_sects;
        __u16   root_flags;
@@ -46,6 +57,9 @@ struct setup_header {
        __u32   cmdline_size;
        __u32   hardware_subarch;
        __u64   hardware_subarch_data;
+       __u32   payload_offset;
+       __u32   payload_length;
+       __u64   setup_data;
 } __attribute__((packed));
 
 struct sys_desc_table {
index 0dbf8bf3ef0a8d06308ea3cf8e72612c28c016c2..9a2d644c08efc0981dbc13b1700e81917fef7c63 100644 (file)
        __mod;                                                  \
 })
 
-/*
- * (long)X = ((long long)divs) / (long)div
- * (long)rem = ((long long)divs) % (long)div
- *
- * Warning, this will do an exception if X overflows.
- */
-#define div_long_long_rem(a, b, c) div_ll_X_l_rem(a, b, c)
-
-static inline long div_ll_X_l_rem(long long divs, long div, long *rem)
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
 {
-       long dum2;
-       asm("divl %2":"=a"(dum2), "=d"(*rem)
-           : "rm"(div), "A"(divs));
-
-       return dum2;
-
+       union {
+               u64 v64;
+               u32 v32[2];
+       } d = { dividend };
+       u32 upper;
+
+       upper = d.v32[1];
+       d.v32[1] = 0;
+       if (upper >= divisor) {
+               d.v32[1] = upper / divisor;
+               upper %= divisor;
+       }
+       asm ("divl %2" : "=a" (d.v32[0]), "=d" (*remainder) :
+               "rm" (divisor), "0" (d.v32[0]), "1" (upper));
+       return d.v64;
 }
-
-extern uint64_t div64_64(uint64_t dividend, uint64_t divisor);
+#define div_u64_rem    div_u64_rem
 
 #else
 # include <asm-generic/div64.h>
index 1241e6ad1935b99c6273554eb8501f05833b28cb..4edf7514a75033d8173428e1ce1640995a87f154 100644 (file)
@@ -27,6 +27,7 @@ static inline void *dmi_alloc(unsigned len)
 
 #endif
 
+/* Use early IO mappings for DMI because it's initialized early */
 #define dmi_ioremap early_ioremap
 #define dmi_iounmap early_iounmap
 
index f478c57eb06033b32d673b13021db1c516e5721e..71c4d685d30d894ad1d1700bdbde2ea468f3e8d3 100644 (file)
@@ -48,7 +48,8 @@ extern struct e820map e820;
 extern void update_e820(void);
 
 extern void reserve_early(unsigned long start, unsigned long end, char *name);
-extern void early_res_to_bootmem(void);
+extern void free_early(unsigned long start, unsigned long end);
+extern void early_res_to_bootmem(unsigned long start, unsigned long end);
 
 #endif/*!__ASSEMBLY__*/
 
index ac0fbf24d722a36f3f0ee20e7060b5aace6bced1..e7a76b37b333cc8b40295d7379f8cbb46b241a29 100644 (file)
@@ -4,12 +4,12 @@
 #ifdef __KERNEL__
 
 #include <linux/futex.h>
+#include <linux/uaccess.h>
 
 #include <asm/asm.h>
 #include <asm/errno.h>
 #include <asm/processor.h>
 #include <asm/system.h>
-#include <asm/uaccess.h>
 
 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg)    \
        asm volatile("1:\t" insn "\n"                           \
index 9870cc1f2f8f4d5bb9b7ec296869b5cf2f4f17aa..7154dc4de9511692c5417973ac866f4883d2d7c3 100644 (file)
@@ -30,7 +30,13 @@ extern int geode_get_dev_base(unsigned int dev);
 
 /* MSRS */
 
-#define GX_GLCP_SYS_RSTPLL     0x4C000014
+#define MSR_GLIU_P2D_RO0       0x10000029
+
+#define MSR_LX_GLD_MSR_CONFIG  0x48002001
+#define MSR_LX_MSR_PADSEL      0x48002011      /* NOT 0x48000011; the data
+                                                * sheet has the wrong value */
+#define MSR_GLCP_SYS_RSTPLL    0x4C000014
+#define MSR_GLCP_DOTPLL                0x4C000015
 
 #define MSR_LBAR_SMB           0x5140000B
 #define MSR_LBAR_GPIO          0x5140000C
@@ -45,8 +51,14 @@ extern int geode_get_dev_base(unsigned int dev);
 #define MSR_PIC_ZSEL_LOW       0x51400022
 #define MSR_PIC_ZSEL_HIGH      0x51400023
 
-#define MFGPT_IRQ_MSR          0x51400028
-#define MFGPT_NR_MSR           0x51400029
+#define MSR_MFGPT_IRQ          0x51400028
+#define MSR_MFGPT_NR           0x51400029
+#define MSR_MFGPT_SETUP                0x5140002B
+
+#define MSR_LX_SPARE_MSR       0x80000011      /* DC-specific */
+
+#define MSR_GX_GLD_MSR_CONFIG  0xC0002001
+#define MSR_GX_MSR_PADSEL      0xC0002011
 
 /* Resource Sizes */
 
@@ -93,6 +105,15 @@ extern int geode_get_dev_base(unsigned int dev);
 #define PM_AWKD                        0x50
 #define PM_SSC                 0x54
 
+/* VSA2 magic values */
+
+#define VSA_VRC_INDEX          0xAC1C
+#define VSA_VRC_DATA           0xAC1E
+#define VSA_VR_UNLOCK          0xFC53  /* unlock virtual register */
+#define VSA_VR_SIGNATURE       0x0003
+#define VSA_VR_MEM_SIZE                0x0200
+#define VSA_SIG                        0x4132  /* signature is ascii 'VSA2' */
+
 /* GPIO */
 
 #define GPIO_OUTPUT_VAL                0x00
@@ -164,6 +185,17 @@ static inline int is_geode(void)
        return (is_geode_gx() || is_geode_lx());
 }
 
+/*
+ * The VSA has virtual registers that we can query for a signature.
+ */
+static inline int geode_has_vsa2(void)
+{
+       outw(VSA_VR_UNLOCK, VSA_VRC_INDEX);
+       outw(VSA_VR_SIGNATURE, VSA_VRC_INDEX);
+
+       return (inw(VSA_VRC_DATA) == VSA_SIG);
+}
+
 /* MFGPTs */
 
 #define MFGPT_MAX_TIMERS       8
diff --git a/include/asm-x86/hugetlb.h b/include/asm-x86/hugetlb.h
new file mode 100644 (file)
index 0000000..14171a4
--- /dev/null
@@ -0,0 +1,91 @@
+#ifndef _ASM_X86_HUGETLB_H
+#define _ASM_X86_HUGETLB_H
+
+#include <asm/page.h>
+
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+                                        unsigned long addr,
+                                        unsigned long len) {
+       return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
+{
+       if (len & ~HPAGE_MASK)
+               return -EINVAL;
+       if (addr & ~HPAGE_MASK)
+               return -EINVAL;
+       return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) {
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb,
+                                         unsigned long addr, unsigned long end,
+                                         unsigned long floor,
+                                         unsigned long ceiling)
+{
+       free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+                                  pte_t *ptep, pte_t pte)
+{
+       set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+                                           unsigned long addr, pte_t *ptep)
+{
+       return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+                                        unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+       return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+       return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+                                          unsigned long addr, pte_t *ptep)
+{
+       ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                            unsigned long addr, pte_t *ptep,
+                                            pte_t pte, int dirty)
+{
+       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+       return *ptep;
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+       return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+#endif /* _ASM_X86_HUGETLB_H */
index 6e73467a4fb13783ba21a73108e238e8e4e35fe7..049e81e797a0919e8090135c5b16fe5b99a32c5e 100644 (file)
@@ -133,11 +133,6 @@ extern void *early_ioremap(unsigned long offset, unsigned long size);
 extern void early_iounmap(void *addr, unsigned long size);
 extern void __iomem *fix_ioremap(unsigned idx, unsigned long phys);
 
-/* Use early IO mappings for DMI because it's initialized early */
-#define dmi_ioremap early_ioremap
-#define dmi_iounmap early_iounmap
-#define dmi_alloc alloc_bootmem
-
 /*
  * ISA I/O bus memory addresses are 1:1 with the physical address.
  */
index 7a71120426a354155d377e3f7f952d92c4300862..80eefef2cc76cd3b54ba93856a8fc500f639d736 100644 (file)
@@ -188,4 +188,45 @@ struct kvm_cpuid2 {
        struct kvm_cpuid_entry2 entries[0];
 };
 
+/* for KVM_GET_PIT and KVM_SET_PIT */
+struct kvm_pit_channel_state {
+       __u32 count; /* can be 65536 */
+       __u16 latched_count;
+       __u8 count_latched;
+       __u8 status_latched;
+       __u8 status;
+       __u8 read_state;
+       __u8 write_state;
+       __u8 write_latch;
+       __u8 rw_mode;
+       __u8 mode;
+       __u8 bcd;
+       __u8 gate;
+       __s64 count_load_time;
+};
+
+struct kvm_pit_state {
+       struct kvm_pit_channel_state channels[3];
+};
+
+#define KVM_TRC_INJ_VIRQ         (KVM_TRC_HANDLER + 0x02)
+#define KVM_TRC_REDELIVER_EVT    (KVM_TRC_HANDLER + 0x03)
+#define KVM_TRC_PEND_INTR        (KVM_TRC_HANDLER + 0x04)
+#define KVM_TRC_IO_READ          (KVM_TRC_HANDLER + 0x05)
+#define KVM_TRC_IO_WRITE         (KVM_TRC_HANDLER + 0x06)
+#define KVM_TRC_CR_READ          (KVM_TRC_HANDLER + 0x07)
+#define KVM_TRC_CR_WRITE         (KVM_TRC_HANDLER + 0x08)
+#define KVM_TRC_DR_READ          (KVM_TRC_HANDLER + 0x09)
+#define KVM_TRC_DR_WRITE         (KVM_TRC_HANDLER + 0x0A)
+#define KVM_TRC_MSR_READ         (KVM_TRC_HANDLER + 0x0B)
+#define KVM_TRC_MSR_WRITE        (KVM_TRC_HANDLER + 0x0C)
+#define KVM_TRC_CPUID            (KVM_TRC_HANDLER + 0x0D)
+#define KVM_TRC_INTR             (KVM_TRC_HANDLER + 0x0E)
+#define KVM_TRC_NMI              (KVM_TRC_HANDLER + 0x0F)
+#define KVM_TRC_VMMCALL          (KVM_TRC_HANDLER + 0x10)
+#define KVM_TRC_HLT              (KVM_TRC_HANDLER + 0x11)
+#define KVM_TRC_CLTS             (KVM_TRC_HANDLER + 0x12)
+#define KVM_TRC_LMSW             (KVM_TRC_HANDLER + 0x13)
+#define KVM_TRC_APIC_ACCESS      (KVM_TRC_HANDLER + 0x14)
+
 #endif
index 68ee390b2844024eac9beb1444790870cc628483..9d963cd6533c5d7541b71dac204d3debf524f685 100644 (file)
 
 #include <asm/desc.h>
 
+#define KVM_MAX_VCPUS 16
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+#define KVM_PIO_PAGE_OFFSET 1
+
 #define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
 #define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
 #define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS |   \
 #define INVALID_PAGE (~(hpa_t)0)
 #define UNMAPPED_GVA (~(gpa_t)0)
 
+/* shadow tables are PAE even on non-PAE hosts */
+#define KVM_HPAGE_SHIFT 21
+#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
+#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
+
+#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
+
 #define DE_VECTOR 0
 #define UD_VECTOR 6
 #define NM_VECTOR 7
@@ -48,6 +62,7 @@
 #define SS_VECTOR 12
 #define GP_VECTOR 13
 #define PF_VECTOR 14
+#define MC_VECTOR 18
 
 #define SELECTOR_TI_MASK (1 << 2)
 #define SELECTOR_RPL_MASK 0x03
@@ -58,7 +73,8 @@
 
 #define KVM_PERMILLE_MMU_PAGES 20
 #define KVM_MIN_ALLOC_MMU_PAGES 64
-#define KVM_NUM_MMU_PAGES 1024
+#define KVM_MMU_HASH_SHIFT 10
+#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
 #define KVM_MIN_FREE_MMU_PAGES 5
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 40
@@ -106,6 +122,12 @@ enum {
 
 #define KVM_NR_MEM_OBJS 40
 
+struct kvm_guest_debug {
+       int enabled;
+       unsigned long bp[4];
+       int singlestep;
+};
+
 /*
  * We don't want allocation failures within the mmu code, so we preallocate
  * enough memory for a single page fault in a cache.
@@ -140,6 +162,7 @@ union kvm_mmu_page_role {
                unsigned pad_for_nice_hex_output:6;
                unsigned metaphysical:1;
                unsigned access:3;
+               unsigned invalid:1;
        };
 };
 
@@ -204,11 +227,6 @@ struct kvm_vcpu_arch {
        u64 shadow_efer;
        u64 apic_base;
        struct kvm_lapic *apic;    /* kernel irqchip context */
-#define VCPU_MP_STATE_RUNNABLE          0
-#define VCPU_MP_STATE_UNINITIALIZED     1
-#define VCPU_MP_STATE_INIT_RECEIVED     2
-#define VCPU_MP_STATE_SIPI_RECEIVED     3
-#define VCPU_MP_STATE_HALTED            4
        int mp_state;
        int sipi_vector;
        u64 ia32_misc_enable_msr;
@@ -226,8 +244,9 @@ struct kvm_vcpu_arch {
        u64  *last_pte_updated;
 
        struct {
-               gfn_t gfn;          /* presumed gfn during guest pte update */
-               struct page *page;  /* page corresponding to that gfn */
+               gfn_t gfn;      /* presumed gfn during guest pte update */
+               pfn_t pfn;      /* pfn corresponding to that gfn */
+               int largepage;
        } update_pte;
 
        struct i387_fxsave_struct host_fx_image;
@@ -261,6 +280,11 @@ struct kvm_vcpu_arch {
        /* emulate context */
 
        struct x86_emulate_ctxt emulate_ctxt;
+
+       gpa_t time;
+       struct kvm_vcpu_time_info hv_clock;
+       unsigned int time_offset;
+       struct page *time_page;
 };
 
 struct kvm_mem_alias {
@@ -283,10 +307,13 @@ struct kvm_arch{
        struct list_head active_mmu_pages;
        struct kvm_pic *vpic;
        struct kvm_ioapic *vioapic;
+       struct kvm_pit *vpit;
 
        int round_robin_prev_vcpu;
        unsigned int tss_addr;
        struct page *apic_access_page;
+
+       gpa_t wall_clock;
 };
 
 struct kvm_vm_stat {
@@ -298,6 +325,7 @@ struct kvm_vm_stat {
        u32 mmu_recycled;
        u32 mmu_cache_miss;
        u32 remote_tlb_flush;
+       u32 lpages;
 };
 
 struct kvm_vcpu_stat {
@@ -320,6 +348,7 @@ struct kvm_vcpu_stat {
        u32 fpu_reload;
        u32 insn_emulation;
        u32 insn_emulation_fail;
+       u32 hypercalls;
 };
 
 struct descriptor_table {
@@ -355,6 +384,7 @@ struct kvm_x86_ops {
        u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
        void (*get_segment)(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
+       int (*get_cpl)(struct kvm_vcpu *vcpu);
        void (*set_segment)(struct kvm_vcpu *vcpu,
                            struct kvm_segment *var, int seg);
        void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm);
 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
 
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+                         const void *val, int bytes);
+int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
+                 gpa_t addr, unsigned long *ret);
+
+extern bool tdp_enabled;
+
 enum emulation_result {
        EMULATE_DONE,       /* no further processing */
        EMULATE_DO_MMIO,      /* kvm_run filled with mmio request */
@@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
 unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
 void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
                     unsigned long *rflags);
+void kvm_enable_efer_bits(u64);
 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
 
@@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
                    unsigned long value);
 
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
-unsigned long get_cr8(struct kvm_vcpu *vcpu);
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+
+void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
 void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
 
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
@@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
 
 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
 
+void kvm_enable_tdp(void);
+
 int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
 int complete_pio(struct kvm_vcpu *vcpu);
 
@@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
 #define ASM_VMX_VMWRITE_RSP_RDX   ".byte 0x0f, 0x79, 0xd4"
 #define ASM_VMX_VMXOFF            ".byte 0x0f, 0x01, 0xc4"
 #define ASM_VMX_VMXON_RAX         ".byte 0xf3, 0x0f, 0xc7, 0x30"
+#define ASM_VMX_INVVPID                  ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
 
 #define MSR_IA32_TIME_STAMP_COUNTER            0x010
 
@@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
 #define RMODE_TSS_SIZE                                                 \
        (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
 
+enum {
+       TASK_SWITCH_CALL = 0,
+       TASK_SWITCH_IRET = 1,
+       TASK_SWITCH_JMP = 2,
+       TASK_SWITCH_GATE = 3,
+};
+
+#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
+       trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+                                               vcpu, 5, d1, d2, d3, d4, d5)
+#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
+       trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+                                               vcpu, 4, d1, d2, d3, d4, 0)
+#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
+       trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+                                               vcpu, 3, d1, d2, d3, 0, 0)
+#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
+       trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+                                               vcpu, 2, d1, d2, 0, 0, 0)
+#define KVMTRACE_1D(evt, vcpu, d1, name) \
+       trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+                                               vcpu, 1, d1, 0, 0, 0, 0)
+#define KVMTRACE_0D(evt, vcpu, name) \
+       trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+                                               vcpu, 0, 0, 0, 0, 0, 0)
+
 #endif
index c6f3fd8d8c5350956b07764551c365f1f3b450b3..5098459420705ea53f41c2d05b0a91b688980334 100644 (file)
  * paravirtualization, the appropriate feature bit should be checked.
  */
 #define KVM_CPUID_FEATURES     0x40000001
+#define KVM_FEATURE_CLOCKSOURCE                0
+#define KVM_FEATURE_NOP_IO_DELAY       1
+#define KVM_FEATURE_MMU_OP             2
+
+#define MSR_KVM_WALL_CLOCK  0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define KVM_MAX_MMU_OP_BATCH           32
+
+/* Operations for KVM_HC_MMU_OP */
+#define KVM_MMU_OP_WRITE_PTE            1
+#define KVM_MMU_OP_FLUSH_TLB           2
+#define KVM_MMU_OP_RELEASE_PT          3
+
+/* Payload for KVM_HC_MMU_OP */
+struct kvm_mmu_op_header {
+       __u32 op;
+       __u32 pad;
+};
+
+struct kvm_mmu_op_write_pte {
+       struct kvm_mmu_op_header header;
+       __u64 pte_phys;
+       __u64 pte_val;
+};
+
+struct kvm_mmu_op_flush_tlb {
+       struct kvm_mmu_op_header header;
+};
+
+struct kvm_mmu_op_release_pt {
+       struct kvm_mmu_op_header header;
+       __u64 pt_phys;
+};
 
 #ifdef __KERNEL__
 #include <asm/processor.h>
 
+/* xen binary-compatible interface. See xen headers for details */
+struct kvm_vcpu_time_info {
+       uint32_t version;
+       uint32_t pad0;
+       uint64_t tsc_timestamp;
+       uint64_t system_time;
+       uint32_t tsc_to_system_mul;
+       int8_t   tsc_shift;
+       int8_t   pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct kvm_wall_clock {
+       uint32_t wc_version;
+       uint32_t wc_sec;
+       uint32_t wc_nsec;
+} __attribute__((__packed__));
+
+
+extern void kvmclock_init(void);
+
+
 /* This instruction is vmcall.  On non-VT architectures, it will generate a
  * trap that we will then rewrite to the appropriate instruction.
  */
index 0a6634f62abe2c1c37d91d078846cdeef51f15c3..21003b56ae959e0db51ebd1ac689ccb83ecbda37 100644 (file)
@@ -109,13 +109,8 @@ static inline int cpu_to_logical_apicid(int cpu)
 
 static inline int cpu_present_to_apicid(int mps_cpu)
 {
-#ifdef CONFIG_X86_64
-       if (cpu_present(mps_cpu))
+       if (mps_cpu < NR_CPUS && cpu_present(mps_cpu))
                return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
-#else
-       if (mps_cpu < get_physical_broadcast())
-               return  mps_cpu;
-#endif
        else
                return BAD_APICID;
 }
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h
new file mode 100644 (file)
index 0000000..97d4713
--- /dev/null
@@ -0,0 +1,132 @@
+/* OLPC machine specific definitions */
+
+#ifndef ASM_OLPC_H_
+#define ASM_OLPC_H_
+
+#include <asm/geode.h>
+
+struct olpc_platform_t {
+       int flags;
+       uint32_t boardrev;
+       int ecver;
+};
+
+#define OLPC_F_PRESENT         0x01
+#define OLPC_F_DCON            0x02
+#define OLPC_F_VSA             0x04
+
+#ifdef CONFIG_OLPC
+
+extern struct olpc_platform_t olpc_platform_info;
+
+/*
+ * OLPC board IDs contain the major build number within the mask 0x0ff0,
+ * and the minor build number withing 0x000f.  Pre-builds have a minor
+ * number less than 8, and normal builds start at 8.  For example, 0x0B10
+ * is a PreB1, and 0x0C18 is a C1.
+ */
+
+static inline uint32_t olpc_board(uint8_t id)
+{
+       return (id << 4) | 0x8;
+}
+
+static inline uint32_t olpc_board_pre(uint8_t id)
+{
+       return id << 4;
+}
+
+static inline int machine_is_olpc(void)
+{
+       return (olpc_platform_info.flags & OLPC_F_PRESENT) ? 1 : 0;
+}
+
+/*
+ * The DCON is OLPC's Display Controller.  It has a number of unique
+ * features that we might want to take advantage of..
+ */
+static inline int olpc_has_dcon(void)
+{
+       return (olpc_platform_info.flags & OLPC_F_DCON) ? 1 : 0;
+}
+
+/*
+ * The VSA is software from AMD that typical Geode bioses will include.
+ * It is used to emulate the PCI bus, VGA, etc.  OLPC's Open Firmware does
+ * not include the VSA; instead, PCI is emulated by the kernel.
+ *
+ * The VSA is described further in arch/x86/pci/olpc.c.
+ */
+static inline int olpc_has_vsa(void)
+{
+       return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0;
+}
+
+/*
+ * The "Mass Production" version of OLPC's XO is identified as being model
+ * C2.  During the prototype phase, the following models (in chronological
+ * order) were created: A1, B1, B2, B3, B4, C1.  The A1 through B2 models
+ * were based on Geode GX CPUs, and models after that were based upon
+ * Geode LX CPUs.  There were also some hand-assembled models floating
+ * around, referred to as PreB1, PreB2, etc.
+ */
+static inline int olpc_board_at_least(uint32_t rev)
+{
+       return olpc_platform_info.boardrev >= rev;
+}
+
+#else
+
+static inline int machine_is_olpc(void)
+{
+       return 0;
+}
+
+static inline int olpc_has_dcon(void)
+{
+       return 0;
+}
+
+static inline int olpc_has_vsa(void)
+{
+       return 0;
+}
+
+#endif
+
+/* EC related functions */
+
+extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen,
+               unsigned char *outbuf, size_t outlen);
+
+extern int olpc_ec_mask_set(uint8_t bits);
+extern int olpc_ec_mask_unset(uint8_t bits);
+
+/* EC commands */
+
+#define EC_FIRMWARE_REV                0x08
+
+/* SCI source values */
+
+#define EC_SCI_SRC_EMPTY       0x00
+#define EC_SCI_SRC_GAME                0x01
+#define EC_SCI_SRC_BATTERY     0x02
+#define EC_SCI_SRC_BATSOC      0x04
+#define EC_SCI_SRC_BATERR      0x08
+#define EC_SCI_SRC_EBOOK       0x10
+#define EC_SCI_SRC_WLAN                0x20
+#define EC_SCI_SRC_ACPWR       0x40
+#define EC_SCI_SRC_ALL         0x7F
+
+/* GPIO assignments */
+
+#define OLPC_GPIO_MIC_AC       geode_gpio(1)
+#define OLPC_GPIO_DCON_IRQ     geode_gpio(7)
+#define OLPC_GPIO_THRM_ALRM    geode_gpio(10)
+#define OLPC_GPIO_SMB_CLK      geode_gpio(14)
+#define OLPC_GPIO_SMB_DATA     geode_gpio(15)
+#define OLPC_GPIO_WORKAUX      geode_gpio(24)
+#define OLPC_GPIO_LID          geode_gpio(26)
+#define OLPC_GPIO_ECSCI                geode_gpio(27)
+
+#endif
index ddd8e248fc0a43bde4ec039beebbdfc500457e77..30bbde0cb34bfba7e272ad68be2dffad8ef20d0d 100644 (file)
@@ -19,6 +19,8 @@ struct pci_sysdata {
 };
 
 /* scan a bus after allocating a pci_sysdata for it */
+extern struct pci_bus *pci_scan_bus_on_node(int busno, struct pci_ops *ops,
+                                           int node);
 extern struct pci_bus *pci_scan_bus_with_sysdata(int busno);
 
 static inline int pci_domain_nr(struct pci_bus *bus)
index a496d6335d3baa644f80bd0c9e293ce5b9fc480c..801b31f714524052a2e44652bcf8db0746a45a57 100644 (file)
@@ -195,6 +195,11 @@ static inline int pte_exec(pte_t pte)
        return !(pte_val(pte) & _PAGE_NX);
 }
 
+static inline int pte_special(pte_t pte)
+{
+       return 0;
+}
+
 static inline int pmd_large(pmd_t pte)
 {
        return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
@@ -256,6 +261,11 @@ static inline pte_t pte_clrglobal(pte_t pte)
        return __pte(pte_val(pte) & ~(pteval_t)_PAGE_GLOBAL);
 }
 
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+       return pte;
+}
+
 extern pteval_t __supported_pte_mask;
 
 static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
index 117343b0c271f7f3781d8d67e5e0370671b1e1a7..559105220a477e2be9cfbd87eee62d1cd68767c2 100644 (file)
@@ -3,9 +3,6 @@
 
 #include <asm/processor-flags.h>
 
-/* migration helper, for KVM - will be removed in 2.6.25: */
-#define Xgt_desc_struct        desc_ptr
-
 /* Forward declaration, a strange C thing */
 struct task_struct;
 struct mm_struct;
@@ -722,6 +719,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
 
 static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
 {
+       trace_hardirqs_on();
        /* "mwait %eax, %ecx;" */
        asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
                     :: "a" (eax), "c" (ecx));
index 1e17bcce450e0ffcb47dbea2939c7366f769f29f..6c8b41b03f6def5593696d12752c3627028e10b3 100644 (file)
@@ -20,7 +20,11 @@ extern void syscall32_cpu_init(void);
 
 extern void check_efer(void);
 
+#ifdef CONFIG_X86_BIOS_REBOOT
 extern int reboot_force;
+#else
+static const int reboot_force = 0;
+#endif
 
 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr);
 
index 6b5233b4f84b945d713cf047c0ea591ea77469d7..e63741f19392e24d587b3383c18e6de2da1dab18 100644 (file)
@@ -15,5 +15,7 @@ struct machine_ops {
 extern struct machine_ops machine_ops;
 
 void machine_real_restart(unsigned char *code, int length);
+void native_machine_crash_shutdown(struct pt_regs *regs);
+void native_machine_shutdown(void);
 
 #endif /* _ASM_REBOOT_H */
index 53185996209664c82588904ca3429844696ab9b2..b6338829d1a8d866745c8b8126ff1d0995383828 100644 (file)
@@ -131,7 +131,6 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_SYSCALL_EMU                5       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      6       /* syscall auditing active */
 #define TIF_SECCOMP            7       /* secure computing */
-#define TIF_RESTORE_SIGMASK    8       /* restore signal mask in do_signal() */
 #define TIF_HRTICK_RESCHED     9       /* reprogram hrtick timer */
 #define TIF_MEMDIE             16
 #define TIF_DEBUG              17      /* uses debug registers */
@@ -151,7 +150,6 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
-#define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_HRTICK_RESCHED    (1 << TIF_HRTICK_RESCHED)
 #define _TIF_DEBUG             (1 << TIF_DEBUG)
 #define _TIF_IO_BITMAP         (1 << TIF_IO_BITMAP)
@@ -188,9 +186,20 @@ static inline struct thread_info *current_thread_info(void)
                                           this quantum (SMP) */
 #define TS_POLLING             0x0002  /* True if in idle loop
                                           and not sleeping */
+#define TS_RESTORE_SIGMASK     0x0004  /* restore signal mask in do_signal() */
 
 #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
 
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK       1
+static inline void set_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       ti->status |= TS_RESTORE_SIGMASK;
+       set_bit(TIF_SIGPENDING, &ti->flags);
+}
+#endif /* !__ASSEMBLY__ */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_THREAD_INFO_H */
index ed664e874decb873d83bad65ba2eb5a549dab69d..cb69f70abba107889e2011e12e21ef70cefbe19a 100644 (file)
@@ -109,7 +109,6 @@ static inline struct thread_info *stack_thread_info(void)
 #define TIF_IRET               5       /* force IRET */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
-#define TIF_RESTORE_SIGMASK    9       /* restore signal mask in do_signal */
 #define TIF_MCE_NOTIFY         10      /* notify userspace of an MCE */
 #define TIF_HRTICK_RESCHED     11      /* reprogram hrtick timer */
 /* 16 free */
@@ -133,7 +132,6 @@ static inline struct thread_info *stack_thread_info(void)
 #define _TIF_IRET              (1 << TIF_IRET)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
-#define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_MCE_NOTIFY                (1 << TIF_MCE_NOTIFY)
 #define _TIF_HRTICK_RESCHED    (1 << TIF_HRTICK_RESCHED)
 #define _TIF_IA32              (1 << TIF_IA32)
@@ -178,9 +176,20 @@ static inline struct thread_info *stack_thread_info(void)
 #define TS_COMPAT              0x0002  /* 32bit syscall active */
 #define TS_POLLING             0x0004  /* true if in idle loop
                                           and not sleeping */
+#define TS_RESTORE_SIGMASK     0x0008  /* restore signal mask in do_signal() */
 
 #define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
 
+#ifndef __ASSEMBLY__
+#define HAVE_SET_RESTORE_SIGMASK       1
+static inline void set_restore_sigmask(void)
+{
+       struct thread_info *ti = current_thread_info();
+       ti->status |= TS_RESTORE_SIGMASK;
+       set_bit(TIF_SIGPENDING, &ti->flags);
+}
+#endif /* !__ASSEMBLY__ */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_THREAD_INFO_H */
index 68779b048a3edba6f3371bb459735d8a170aec9f..bce72d7a958c21829aa79c041b4ac2fb06178421 100644 (file)
@@ -1,7 +1,6 @@
 #ifndef _ASMX86_TIME_H
 #define _ASMX86_TIME_H
 
-extern void (*late_time_init)(void);
 extern void hpet_time_init(void);
 
 #include <asm/mc146818rtc.h>
index 22073268b4814ec97a7ddab151e01c1807eabcc8..4f35a0fb4f22ac5ba6d459a45f4eb8971eb50cac 100644 (file)
@@ -193,9 +193,29 @@ extern cpumask_t cpu_coregroup_map(int cpu);
 #define topology_thread_siblings(cpu)          (per_cpu(cpu_sibling_map, cpu))
 #endif
 
+static inline void arch_fix_phys_package_id(int num, u32 slot)
+{
+}
+
+struct pci_bus;
+void set_pci_bus_resources_arch_default(struct pci_bus *b);
+
 #ifdef CONFIG_SMP
 #define mc_capable()                   (boot_cpu_data.x86_max_cores > 1)
 #define smt_capable()                  (smp_num_siblings > 1)
 #endif
 
+#ifdef CONFIG_NUMA
+extern int get_mp_bus_to_node(int busnum);
+extern void set_mp_bus_to_node(int busnum, int node);
+#else
+static inline int get_mp_bus_to_node(int busnum)
+{
+       return 0;
+}
+static inline void set_mp_bus_to_node(int busnum, int node)
+{
+}
+#endif
+
 #endif
index d2d8eb5b55f532365f9277b5f776109626f7ed4b..548873ab5fc1b0deabafc4b3dcbb23ee8cbf05db 100644 (file)
@@ -32,7 +32,7 @@ static inline cycles_t get_cycles(void)
        return ret;
 }
 
-static inline cycles_t vget_cycles(void)
+static __always_inline cycles_t vget_cycles(void)
 {
        /*
         * We only do VDSOs on TSC capable CPUs, so this shouldnt
index d270ffe727597474be2a85e8fcea665b14a122b3..a7bd416b4763832f0a9851df49e92c938b8c5232 100644 (file)
@@ -3,35 +3,12 @@
 
 /*
  * The x86 can do unaligned accesses itself.
- *
- * The strange macros are there to make sure these can't
- * be misused in a way that makes them not work on other
- * architectures where unaligned accesses aren't as simple.
  */
 
-/**
- * get_unaligned - get value from possibly mis-aligned location
- * @ptr: pointer to value
- *
- * This macro should be used for accessing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. retrieving a u16 value from a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define get_unaligned(ptr) (*(ptr))
+#include <linux/unaligned/access_ok.h>
+#include <linux/unaligned/generic.h>
 
-/**
- * put_unaligned - put value to a possibly mis-aligned location
- * @val: value to place
- * @ptr: pointer to location
- *
- * This macro should be used for placing values larger in size than
- * single bytes at locations that are expected to be improperly aligned,
- * e.g. writing a u16 value to a location not u16-aligned.
- *
- * Note that unaligned accesses can be very expensive on some architectures.
- */
-#define put_unaligned(val, ptr) ((void)(*(ptr) = (val)))
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
 
 #endif /* _ASM_X86_UNALIGNED_H */
index c8b024a48b4d15f2865e76f7a4babf6592191b4d..8014d96b21f1cfde6c7392ff5219cfaa26e1258b 100644 (file)
@@ -210,6 +210,8 @@ static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITABLE; }
 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
 static inline int pte_file(pte_t pte)  { return pte_val(pte) & _PAGE_FILE; }
+static inline int pte_special(pte_t pte) { return 0; }
+
 static inline pte_t pte_wrprotect(pte_t pte)   
        { pte_val(pte) &= ~(_PAGE_WRITABLE | _PAGE_HW_WRITE); return pte; }
 static inline pte_t pte_mkclean(pte_t pte)
@@ -222,6 +224,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
        { pte_val(pte) |= _PAGE_ACCESSED; return pte; }
 static inline pte_t pte_mkwrite(pte_t pte)
        { pte_val(pte) |= _PAGE_WRITABLE; return pte; }
+static inline pte_t pte_mkspecial(pte_t pte)
+       { return pte; }
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
index 28220890d0a6be20e1156fb1baacf654596071b9..8f3424fc5d1823df176ebf62662dec2d03b07d5d 100644 (file)
@@ -1,6 +1,4 @@
 /*
- * include/asm-xtensa/unaligned.h
- *
  * Xtensa doesn't handle unaligned accesses efficiently.
  *
  * This file is subject to the terms and conditions of the GNU General Public
@@ -9,20 +7,23 @@
  *
  * Copyright (C) 2001 - 2005 Tensilica Inc.
  */
+#ifndef _ASM_XTENSA_UNALIGNED_H
+#define _ASM_XTENSA_UNALIGNED_H
 
-#ifndef _XTENSA_UNALIGNED_H
-#define _XTENSA_UNALIGNED_H
-
-#include <linux/string.h>
-
-/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
-
-#define get_unaligned(ptr) \
-  ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; })
-
-#define put_unaligned(val, ptr)                                \
-  ({ __typeof__(*(ptr)) __tmp = (val);                 \
-     memmove((ptr), &__tmp, sizeof(*(ptr)));           \
-     (void)0; })
+#ifdef __XTENSA_EL__
+# include <linux/unaligned/le_memmove.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__XTENSA_EB__)
+# include <linux/unaligned/be_memmove.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error processor byte order undefined!
+#endif
 
-#endif /* _XTENSA_UNALIGNED_H */
+#endif /* _ASM_XTENSA_UNALIGNED_H */
index 224658b8d80689560e6ebf7c7bd94c41a2ea76bf..833d208c25d64194525e8b1ff09d8ddaf261251d 100644 (file)
@@ -57,10 +57,14 @@ static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
                                        struct scatterlist *sg2)
 {
        sg_set_page(&sg1[num - 1], (void *)sg2, 0, 0);
+       sg1[num - 1].page_link &= ~0x02;
 }
 
 static inline struct scatterlist *scatterwalk_sg_next(struct scatterlist *sg)
 {
+       if (sg_is_last(sg))
+               return NULL;
+
        return (++sg)->length ? sg : (void *)sg_page(sg);
 }
 
index bda6f04791d43e421efec9c5ced07bdd407f3eb4..78fade0a1e3528aaae16a306a47f22e91336e0e7 100644 (file)
@@ -20,6 +20,7 @@ header-y += affs_hardblocks.h
 header-y += aio_abi.h
 header-y += arcfb.h
 header-y += atmapi.h
+header-y += atmarp.h
 header-y += atmbr2684.h
 header-y += atmclip.h
 header-y += atm_eni.h
@@ -48,6 +49,7 @@ header-y += coff.h
 header-y += comstats.h
 header-y += const.h
 header-y += cgroupstats.h
+header-y += cramfs_fs.h
 header-y += cycx_cfm.h
 header-y += dlmconstants.h
 header-y += dlm_device.h
@@ -70,10 +72,12 @@ header-y += firewire-constants.h
 header-y += fuse.h
 header-y += genetlink.h
 header-y += gen_stats.h
+header-y += gfs2_ondisk.h
 header-y += gigaset_dev.h
 header-y += hysdn_if.h
 header-y += i2o-dev.h
 header-y += i8k.h
+header-y += if_addrlabel.h
 header-y += if_arcnet.h
 header-y += if_bonding.h
 header-y += if_cablemodem.h
@@ -91,6 +95,7 @@ header-y += if_tunnel.h
 header-y += in6.h
 header-y += in_route.h
 header-y += ioctl.h
+header-y += ip6_tunnel.h
 header-y += ipmi_msgdefs.h
 header-y += ipsec.h
 header-y += ipx.h
@@ -117,7 +122,6 @@ header-y += nfs2.h
 header-y += nfs4_mount.h
 header-y += nfs_mount.h
 header-y += nl80211.h
-header-y += oom.h
 header-y += param.h
 header-y += pci_regs.h
 header-y += pfkeyv2.h
@@ -166,7 +170,6 @@ unifdef-y += adfs_fs.h
 unifdef-y += agpgart.h
 unifdef-y += apm_bios.h
 unifdef-y += atalk.h
-unifdef-y += atmarp.h
 unifdef-y += atmdev.h
 unifdef-y += atm.h
 unifdef-y += atm_tcp.h
@@ -182,7 +185,6 @@ unifdef-y += cm4000_cs.h
 unifdef-y += cn_proc.h
 unifdef-y += coda.h
 unifdef-y += connector.h
-unifdef-y += cramfs_fs.h
 unifdef-y += cuda.h
 unifdef-y += cyclades.h
 unifdef-y += dccp.h
@@ -205,7 +207,6 @@ unifdef-y += futex.h
 unifdef-y += fs.h
 unifdef-y += gameport.h
 unifdef-y += generic_serial.h
-unifdef-y += gfs2_ondisk.h
 unifdef-y += hayesesp.h
 unifdef-y += hdlcdrv.h
 unifdef-y += hdlc.h
@@ -219,7 +220,6 @@ unifdef-y += i2c-dev.h
 unifdef-y += icmp.h
 unifdef-y += icmpv6.h
 unifdef-y += if_addr.h
-unifdef-y += if_addrlabel.h
 unifdef-y += if_arp.h
 unifdef-y += if_bridge.h
 unifdef-y += if_ec.h
@@ -243,7 +243,6 @@ unifdef-y += ipc.h
 unifdef-y += ipmi.h
 unifdef-y += ipv6.h
 unifdef-y += ipv6_route.h
-unifdef-y += ip6_tunnel.h
 unifdef-y += isdn.h
 unifdef-y += isdnif.h
 unifdef-y += isdn_divertif.h
index 2c7e003356ac184f4d7848b44a98468df65e01ec..41f7ce7edd7aea559be60998d65e68569d7b8d45 100644 (file)
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table);
 typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end);
 
 char * __acpi_map_table (unsigned long phys_addr, unsigned long size);
+int early_acpi_boot_init(void);
 int acpi_boot_init (void);
 int acpi_boot_table_init (void);
 int acpi_numa_init (void);
@@ -235,6 +236,10 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
 
 #else  /* CONFIG_ACPI */
 
+static inline int early_acpi_boot_init(void)
+{
+       return 0;
+}
 static inline int acpi_boot_init(void)
 {
        return 0;
index 03e34547d489132632b6a15b2286c7dc2ac8ffa5..661d90d6cf7cc848158004c431a19f1365e6f15c 100644 (file)
@@ -30,8 +30,6 @@
 #ifndef _AGP_BACKEND_H
 #define _AGP_BACKEND_H 1
 
-#ifdef __KERNEL__
-
 #ifndef TRUE
 #define TRUE 1
 #endif
@@ -111,5 +109,4 @@ extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
 extern void agp_backend_release(struct agp_bridge_data *);
 extern void agp_flush_chipset(struct agp_bridge_data *);
 
-#endif                         /* __KERNEL__ */
 #endif                         /* _AGP_BACKEND_H */
index 0d0b7f629bd31f9b57154af39085b452e8174847..b51ddd28444e38763e7066ed36ffa9a3a2b6b355 100644 (file)
@@ -209,27 +209,8 @@ extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb);
 extern int aio_put_req(struct kiocb *iocb);
 extern void kick_iocb(struct kiocb *iocb);
 extern int aio_complete(struct kiocb *iocb, long res, long res2);
-extern void __put_ioctx(struct kioctx *ctx);
 struct mm_struct;
 extern void exit_aio(struct mm_struct *mm);
-extern struct kioctx *lookup_ioctx(unsigned long ctx_id);
-extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-                        struct iocb *iocb);
-
-/* semi private, but used by the 32bit emulations: */
-struct kioctx *lookup_ioctx(unsigned long ctx_id);
-int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-                 struct iocb *iocb);
-
-#define get_ioctx(kioctx) do {                                         \
-       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
-       atomic_inc(&(kioctx)->users);                                   \
-} while (0)
-#define put_ioctx(kioctx) do {                                         \
-       BUG_ON(atomic_read(&(kioctx)->users) <= 0);                     \
-       if (unlikely(atomic_dec_and_test(&(kioctx)->users)))            \
-               __put_ioctx(kioctx);                                    \
-} while (0)
 
 #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait)
 
index 4ccb048cae1de97e0b4c9627f60c1f8cd7894f48..63c3bb98558f6b3e2d07cd9040621273966c1eec 100644 (file)
 /* Rule structure sizes -- if these change, different AUDIT_ADD and
  * AUDIT_LIST commands must be implemented. */
 #define AUDIT_MAX_FIELDS   64
-#define AUDIT_MAX_KEY_LEN  32
+#define AUDIT_MAX_KEY_LEN  256
 #define AUDIT_BITMASK_SIZE 64
 #define AUDIT_WORD(nr) ((__u32)((nr)/32))
 #define AUDIT_BIT(nr)  (1 << ((nr) - AUDIT_WORD(nr)*32))
 #define AUDIT_WATCH    105
 #define AUDIT_PERM     106
 #define AUDIT_DIR      107
+#define AUDIT_FILETYPE 108
 
 #define AUDIT_ARG0      200
 #define AUDIT_ARG1      (AUDIT_ARG0+1)
@@ -549,16 +550,20 @@ extern void                   audit_log_format(struct audit_buffer *ab,
                                             const char *fmt, ...)
                            __attribute__((format(printf,2,3)));
 extern void                audit_log_end(struct audit_buffer *ab);
-extern void                audit_log_hex(struct audit_buffer *ab,
-                                         const unsigned char *buf,
-                                         size_t len);
 extern int                 audit_string_contains_control(const char *string,
                                                          size_t len);
+extern void                audit_log_n_hex(struct audit_buffer *ab,
+                                         const unsigned char *buf,
+                                         size_t len);
+extern void                audit_log_n_string(struct audit_buffer *ab,
+                                              const char *buf,
+                                              size_t n);
+#define audit_log_string(a,b) audit_log_n_string(a, b, strlen(b));
+extern void                audit_log_n_untrustedstring(struct audit_buffer *ab,
+                                                       const char *string,
+                                                       size_t n);
 extern void                audit_log_untrustedstring(struct audit_buffer *ab,
                                                      const char *string);
-extern void                audit_log_n_untrustedstring(struct audit_buffer *ab,
-                                                       size_t n,
-                                                       const char *string);
 extern void                audit_log_d_path(struct audit_buffer *ab,
                                             const char *prefix,
                                             struct path *path);
@@ -569,7 +574,8 @@ extern int              audit_update_lsm_rules(void);
 extern int audit_filter_user(struct netlink_skb_parms *cb, int type);
 extern int audit_filter_type(int type);
 extern int  audit_receive_filter(int type, int pid, int uid, int seq,
-                        void *data, size_t datasz, uid_t loginuid, u32 sid);
+                               void *data, size_t datasz, uid_t loginuid,
+                               u32 sessionid, u32 sid);
 extern int audit_enabled;
 #else
 #define audit_log(c,g,t,f,...) do { ; } while (0)
@@ -577,9 +583,11 @@ extern int audit_enabled;
 #define audit_log_vformat(b,f,a) do { ; } while (0)
 #define audit_log_format(b,f,...) do { ; } while (0)
 #define audit_log_end(b) do { ; } while (0)
-#define audit_log_hex(a,b,l) do { ; } while (0)
-#define audit_log_untrustedstring(a,s) do { ; } while (0)
+#define audit_log_n_hex(a,b,l) do { ; } while (0)
+#define audit_log_n_string(a,c,l) do { ; } while (0)
+#define audit_log_string(a,c) do { ; } while (0)
 #define audit_log_n_untrustedstring(a,n,s) do { ; } while (0)
+#define audit_log_untrustedstring(a,s) do { ; } while (0)
 #define audit_log_d_path(b, p, d) do { ; } while (0)
 #define audit_enabled 0
 #endif
index 48a62baace582a5a841436ba2c8fa1bffdd52c4f..0a24d5550eb3a082725a150856af3055b77f4ae2 100644 (file)
 #include <linux/percpu_counter.h>
 #include <linux/log2.h>
 #include <linux/proportions.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
 #include <asm/atomic.h>
 
 struct page;
+struct device;
+struct dentry;
 
 /*
  * Bits in backing_dev_info.state
@@ -48,11 +52,26 @@ struct backing_dev_info {
 
        struct prop_local_percpu completions;
        int dirty_exceeded;
+
+       unsigned int min_ratio;
+       unsigned int max_ratio, max_prop_frac;
+
+       struct device *dev;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *debug_dir;
+       struct dentry *debug_stats;
+#endif
 };
 
 int bdi_init(struct backing_dev_info *bdi);
 void bdi_destroy(struct backing_dev_info *bdi);
 
+int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+               const char *fmt, ...);
+int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+void bdi_unregister(struct backing_dev_info *bdi);
+
 static inline void __add_bdi_stat(struct backing_dev_info *bdi,
                enum bdi_stat_item item, s64 amount)
 {
@@ -116,6 +135,8 @@ static inline s64 bdi_stat_sum(struct backing_dev_info *bdi,
        return sum;
 }
 
+extern void bdi_writeout_inc(struct backing_dev_info *bdi);
+
 /*
  * maximal error of a stat counter.
  */
@@ -128,24 +149,48 @@ static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
 #endif
 }
 
+int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
+int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
+
 /*
  * Flags in backing_dev_info::capability
- * - The first two flags control whether dirty pages will contribute to the
- *   VM's accounting and whether writepages() should be called for dirty pages
- *   (something that would not, for example, be appropriate for ramfs)
- * - These flags let !MMU mmap() govern direct device mapping vs immediate
- *   copying more easily for MAP_PRIVATE, especially for ROM filesystems
+ *
+ * The first three flags control whether dirty pages will contribute to the
+ * VM's accounting and whether writepages() should be called for dirty pages
+ * (something that would not, for example, be appropriate for ramfs)
+ *
+ * WARNING: these flags are closely related and should not normally be
+ * used separately.  The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these
+ * three flags into a single convenience macro.
+ *
+ * BDI_CAP_NO_ACCT_DIRTY:  Dirty pages shouldn't contribute to accounting
+ * BDI_CAP_NO_WRITEBACK:   Don't write pages back
+ * BDI_CAP_NO_ACCT_WB:     Don't automatically account writeback pages
+ *
+ * These flags let !MMU mmap() govern direct device mapping vs immediate
+ * copying more easily for MAP_PRIVATE, especially for ROM filesystems.
+ *
+ * BDI_CAP_MAP_COPY:       Copy can be mapped (MAP_PRIVATE)
+ * BDI_CAP_MAP_DIRECT:     Can be mapped directly (MAP_SHARED)
+ * BDI_CAP_READ_MAP:       Can be mapped for reading
+ * BDI_CAP_WRITE_MAP:      Can be mapped for writing
+ * BDI_CAP_EXEC_MAP:       Can be mapped for execution
  */
-#define BDI_CAP_NO_ACCT_DIRTY  0x00000001      /* Dirty pages shouldn't contribute to accounting */
-#define BDI_CAP_NO_WRITEBACK   0x00000002      /* Don't write pages back */
-#define BDI_CAP_MAP_COPY       0x00000004      /* Copy can be mapped (MAP_PRIVATE) */
-#define BDI_CAP_MAP_DIRECT     0x00000008      /* Can be mapped directly (MAP_SHARED) */
-#define BDI_CAP_READ_MAP       0x00000010      /* Can be mapped for reading */
-#define BDI_CAP_WRITE_MAP      0x00000020      /* Can be mapped for writing */
-#define BDI_CAP_EXEC_MAP       0x00000040      /* Can be mapped for execution */
+#define BDI_CAP_NO_ACCT_DIRTY  0x00000001
+#define BDI_CAP_NO_WRITEBACK   0x00000002
+#define BDI_CAP_MAP_COPY       0x00000004
+#define BDI_CAP_MAP_DIRECT     0x00000008
+#define BDI_CAP_READ_MAP       0x00000010
+#define BDI_CAP_WRITE_MAP      0x00000020
+#define BDI_CAP_EXEC_MAP       0x00000040
+#define BDI_CAP_NO_ACCT_WB     0x00000080
+
 #define BDI_CAP_VMFLAGS \
        (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP)
 
+#define BDI_CAP_NO_ACCT_AND_WRITEBACK \
+       (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB)
+
 #if defined(VM_MAYREAD) && \
        (BDI_CAP_READ_MAP != VM_MAYREAD || \
         BDI_CAP_WRITE_MAP != VM_MAYWRITE || \
@@ -156,9 +201,7 @@ static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi)
 extern struct backing_dev_info default_backing_dev_info;
 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page);
 
-int writeback_acquire(struct backing_dev_info *bdi);
 int writeback_in_progress(struct backing_dev_info *bdi);
-void writeback_release(struct backing_dev_info *bdi);
 
 static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits)
 {
@@ -187,17 +230,32 @@ void clear_bdi_congested(struct backing_dev_info *bdi, int rw);
 void set_bdi_congested(struct backing_dev_info *bdi, int rw);
 long congestion_wait(int rw, long timeout);
 
-#define bdi_cap_writeback_dirty(bdi) \
-       (!((bdi)->capabilities & BDI_CAP_NO_WRITEBACK))
 
-#define bdi_cap_account_dirty(bdi) \
-       (!((bdi)->capabilities & BDI_CAP_NO_ACCT_DIRTY))
+static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi)
+{
+       return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK);
+}
+
+static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi)
+{
+       return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY);
+}
 
-#define mapping_cap_writeback_dirty(mapping) \
-       bdi_cap_writeback_dirty((mapping)->backing_dev_info)
+static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi)
+{
+       /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */
+       return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB |
+                                     BDI_CAP_NO_WRITEBACK));
+}
 
-#define mapping_cap_account_dirty(mapping) \
-       bdi_cap_account_dirty((mapping)->backing_dev_info)
+static inline bool mapping_cap_writeback_dirty(struct address_space *mapping)
+{
+       return bdi_cap_writeback_dirty(mapping->backing_dev_info);
+}
 
+static inline bool mapping_cap_account_dirty(struct address_space *mapping)
+{
+       return bdi_cap_account_dirty(mapping->backing_dev_info);
+}
 
 #endif         /* _LINUX_BACKING_DEV_H */
index b7fc55ec8d480d71dcda61a249962ce846f2f22d..b512e48f6d8e9ccbcc811c364cf73a8f85bdd58f 100644 (file)
@@ -34,7 +34,8 @@ struct linux_binprm{
 #endif
        struct mm_struct *mm;
        unsigned long p; /* current top of mem */
-       int sh_bang;
+       unsigned int sh_bang:1,
+                    misc_bang:1;
        struct file * file;
        int e_uid, e_gid;
        kernel_cap_t cap_inheritable, cap_permitted;
@@ -48,7 +49,6 @@ struct linux_binprm{
        unsigned interp_flags;
        unsigned interp_data;
        unsigned long loader, exec;
-       unsigned long argv_len;
 };
 
 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
index d259690863fb0494a5d1ceeb580f30d0ac793eef..61c15eaf3fb3d8f309cd604380f0574f9547749f 100644 (file)
@@ -324,6 +324,8 @@ extern struct bio *bio_map_user_iov(struct request_queue *,
 extern void bio_unmap_user(struct bio *);
 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
                                gfp_t);
+extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
+                                gfp_t, int);
 extern void bio_set_pages_dirty(struct bio *bio);
 extern void bio_check_pages_dirty(struct bio *bio);
 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
index 1dbe074f1c645fed22e55484f26cdd90cc325bfb..43b406def35f6bfadcdbf5b621995e7f920bc851 100644 (file)
@@ -46,6 +46,8 @@
  * bitmap_shift_left(dst, src, n, nbits)       *dst = *src << n
  * bitmap_remap(dst, src, old, new, nbits)     *dst = map(old, new)(src)
  * bitmap_bitremap(oldbit, old, new, nbits)    newbit = map(old, new)(oldbit)
+ * bitmap_onto(dst, orig, relmap, nbits)       *dst = orig relative to relmap
+ * bitmap_fold(dst, orig, sz, nbits)           dst bits = orig bits mod sz
  * bitmap_scnprintf(buf, len, src, nbits)      Print bitmap src to buf
  * bitmap_parse(buf, buflen, dst, nbits)       Parse bitmap dst from kernel buf
  * bitmap_parse_user(ubuf, ulen, dst, nbits)   Parse bitmap dst from user buf
@@ -121,6 +123,10 @@ extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
                const unsigned long *old, const unsigned long *new, int bits);
 extern int bitmap_bitremap(int oldbit,
                const unsigned long *old, const unsigned long *new, int bits);
+extern void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+               const unsigned long *relmap, int bits);
+extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+               int sz, int bits);
 extern int bitmap_find_free_region(unsigned long *bitmap, int bits, int order);
 extern void bitmap_release_region(unsigned long *bitmap, int pos, int order);
 extern int bitmap_allocate_region(unsigned long *bitmap, int pos, int order);
index 40d54731de7e0b9fb6b44ebf7db983ea58fc1ac2..024f2b027244aa8fd8ce35cb4aaa6ee1c9e57fbb 100644 (file)
@@ -6,8 +6,8 @@
 #define BIT(nr)                        (1UL << (nr))
 #define BIT_MASK(nr)           (1UL << ((nr) % BITS_PER_LONG))
 #define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
-#define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_LONG)
 #define BITS_PER_BYTE          8
+#define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #endif
 
 /*
@@ -112,4 +112,53 @@ static inline unsigned fls_long(unsigned long l)
        return fls64(l);
 }
 
+#ifdef __KERNEL__
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+                                   unsigned long size);
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+extern unsigned long find_first_zero_bit(const unsigned long *addr,
+                                        unsigned long size);
+
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
+
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_bit(const unsigned long *addr,
+                                  unsigned long size, unsigned long offset);
+
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+
+extern unsigned long find_next_zero_bit(const unsigned long *addr,
+                                       unsigned long size,
+                                       unsigned long offset);
+
+#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
+#endif /* __KERNEL__ */
 #endif
index c5065e3d2ca9eea3e1a4319d117cdf4d5abbd539..d2a1b71e93c3eee9570e86513bdb2786d260699c 100644 (file)
@@ -215,8 +215,9 @@ struct request {
        /*
         * when request is used as a packet command carrier
         */
-       unsigned int cmd_len;
-       unsigned char cmd[BLK_MAX_CDB];
+       unsigned short cmd_len;
+       unsigned char __cmd[BLK_MAX_CDB];
+       unsigned char *cmd;
 
        unsigned int data_len;
        unsigned int extra_len; /* length of alignment and padding */
@@ -407,6 +408,41 @@ struct request_queue
 #define QUEUE_FLAG_PLUGGED     7       /* queue is plugged */
 #define QUEUE_FLAG_ELVSWITCH   8       /* don't use elevator, just do FIFO */
 #define QUEUE_FLAG_BIDI                9       /* queue supports bidi requests */
+#define QUEUE_FLAG_NOMERGES    10      /* disable merge attempts */
+
+static inline int queue_is_locked(struct request_queue *q)
+{
+#ifdef CONFIG_SMP
+       spinlock_t *lock = q->queue_lock;
+       return lock && spin_is_locked(lock);
+#else
+       return 1;
+#endif
+}
+
+static inline void queue_flag_set_unlocked(unsigned int flag,
+                                          struct request_queue *q)
+{
+       __set_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
+{
+       WARN_ON_ONCE(!queue_is_locked(q));
+       __set_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_clear_unlocked(unsigned int flag,
+                                            struct request_queue *q)
+{
+       __clear_bit(flag, &q->queue_flags);
+}
+
+static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
+{
+       WARN_ON_ONCE(!queue_is_locked(q));
+       __clear_bit(flag, &q->queue_flags);
+}
 
 enum {
        /*
@@ -451,6 +487,7 @@ enum {
 #define blk_queue_plugged(q)   test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
 #define blk_queue_tagged(q)    test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
 #define blk_queue_stopped(q)   test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
+#define blk_queue_nomerges(q)  test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
 #define blk_queue_flushing(q)  ((q)->ordseq)
 
 #define blk_fs_request(rq)     ((rq)->cmd_type == REQ_TYPE_FS)
@@ -496,17 +533,17 @@ static inline int blk_queue_full(struct request_queue *q, int rw)
 static inline void blk_set_queue_full(struct request_queue *q, int rw)
 {
        if (rw == READ)
-               set_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
+               queue_flag_set(QUEUE_FLAG_READFULL, q);
        else
-               set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+               queue_flag_set(QUEUE_FLAG_WRITEFULL, q);
 }
 
 static inline void blk_clear_queue_full(struct request_queue *q, int rw)
 {
        if (rw == READ)
-               clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags);
+               queue_flag_clear(QUEUE_FLAG_READFULL, q);
        else
-               clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags);
+               queue_flag_clear(QUEUE_FLAG_WRITEFULL, q);
 }
 
 
@@ -583,6 +620,7 @@ extern int blk_register_queue(struct gendisk *disk);
 extern void blk_unregister_queue(struct gendisk *disk);
 extern void register_disk(struct gendisk *dev);
 extern void generic_make_request(struct bio *bio);
+extern void blk_rq_init(struct request_queue *q, struct request *rq);
 extern void blk_put_request(struct request *);
 extern void __blk_put_request(struct request_queue *, struct request *);
 extern void blk_end_sync_rq(struct request *rq, int error);
@@ -626,6 +664,7 @@ extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
 extern void __blk_stop_queue(struct request_queue *q);
+extern void __blk_run_queue(struct request_queue *);
 extern void blk_run_queue(struct request_queue *);
 extern void blk_start_queueing(struct request_queue *);
 extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
index 4e4e340592fb08b26cb1c3c5cd7c14d7a9ff9bc9..6a5dbdc8a7dcef7fc265f5d68c7dbe91e42e92ad 100644 (file)
@@ -101,6 +101,8 @@ extern void reserve_bootmem_node(pg_data_t *pgdat,
 extern void free_bootmem_node(pg_data_t *pgdat,
                              unsigned long addr,
                              unsigned long size);
+extern void *alloc_bootmem_section(unsigned long size,
+                                  unsigned long section_nr);
 
 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
 #define alloc_bootmem_node(pgdat, x) \
index e8406c55c6d31885a3adddfba69a40378c961c19..cf0303a6061195de840a22d97ff24a7b41d4d49e 100644 (file)
@@ -56,19 +56,25 @@ struct sg_io_v4 {
 #if defined(CONFIG_BLK_DEV_BSG)
 struct bsg_class_device {
        struct device *class_dev;
-       struct device *dev;
+       struct device *parent;
        int minor;
        struct request_queue *queue;
+       struct kref ref;
+       void (*release)(struct device *);
 };
 
-extern int bsg_register_queue(struct request_queue *, struct device *, const char *);
+extern int bsg_register_queue(struct request_queue *q,
+                             struct device *parent, const char *name,
+                             void (*release)(struct device *));
 extern void bsg_unregister_queue(struct request_queue *);
 #else
-static inline int bsg_register_queue(struct request_queue * rq, struct device *dev, const char *name)
+static inline int bsg_register_queue(struct request_queue *q,
+                                    struct device *parent, const char *name,
+                                    void (*release)(struct device *))
 {
        return 0;
 }
-static inline void bsg_unregister_queue(struct request_queue *rq)
+static inline void bsg_unregister_queue(struct request_queue *q)
 {
 }
 #endif
index 932eb02a2753544706983630b58006d4aafa761c..82aa36c53ea7a38e413a384a646f241ef1ebaf1c 100644 (file)
@@ -225,7 +225,6 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct page *page,
                                get_block_t get_block);
 void block_sync_page(struct page *);
 sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
-int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
 int block_truncate_page(struct address_space *, loff_t, get_block_t *);
 int file_fsync(struct file *, struct dentry *, int);
 int nobh_write_begin(struct file *, struct address_space *,
index 79bedddfd9ca15c02d9fa91793d24027fa5f6b8c..1133d5f9d8181c4811362c6992b29b157a0dbb4e 100644 (file)
@@ -1,5 +1,3 @@
-header-y += big_endian.h
-header-y += little_endian.h
-
-unifdef-y += generic.h
+unifdef-y += big_endian.h
+unifdef-y += little_endian.h
 unifdef-y += swab.h
index bef87891cb2405941c7f4b4509e4b0f10461b984..961ed4b48d8eba1b7d72628cec0f65be7cacde32 100644 (file)
@@ -101,6 +101,8 @@ static inline __u16 __be16_to_cpup(const __be16 *p)
 #define __cpu_to_be16s(x) do {} while (0)
 #define __be16_to_cpus(x) do {} while (0)
 
+#ifdef __KERNEL__
 #include <linux/byteorder/generic.h>
+#endif
 
 #endif /* _LINUX_BYTEORDER_BIG_ENDIAN_H */
index d3771551fdd9046108596ccc185c431062cb8c01..0846e6b931cefc9ea9baedfe9bfc93300e4c474b 100644 (file)
  *
  */
 
-
-#if defined(__KERNEL__)
-/*
- * inside the kernel, we can use nicknames;
- * outside of it, we must avoid POSIX namespace pollution...
- */
 #define cpu_to_le64 __cpu_to_le64
 #define le64_to_cpu __le64_to_cpu
 #define cpu_to_le32 __cpu_to_le32
@@ -176,6 +170,4 @@ static inline void be64_add_cpu(__be64 *var, u64 val)
        *var = cpu_to_be64(be64_to_cpu(*var) + val);
 }
 
-#endif /* KERNEL */
-
 #endif /* _LINUX_BYTEORDER_GENERIC_H */
index 86e62b7501769fa14e8e6029265edcc694f5a692..05dc7c35b3b21e93956f5c462f24a79305749aed 100644 (file)
@@ -101,6 +101,8 @@ static inline __u16 __be16_to_cpup(const __be16 *p)
 #define __cpu_to_be16s(x) __swab16s((x))
 #define __be16_to_cpus(x) __swab16s((x))
 
+#ifdef __KERNEL__
 #include <linux/byteorder/generic.h>
+#endif
 
 #endif /* _LINUX_BYTEORDER_LITTLE_ENDIAN_H */
index 4552504c02289239953aab738adede76c8da20f3..97e24881c4c6f477496130c982e1962e770df9eb 100644 (file)
@@ -60,4 +60,8 @@
 #endif
 #endif
 
+#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+#define cache_line_size()      L1_CACHE_BYTES
+#endif
+
 #endif /* __LINUX_CACHE_H */
diff --git a/include/linux/calc64.h b/include/linux/calc64.h
deleted file mode 100644 (file)
index ebf4b8f..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-#ifndef _LINUX_CALC64_H
-#define _LINUX_CALC64_H
-
-#include <linux/types.h>
-#include <asm/div64.h>
-
-/*
- * This is a generic macro which is used when the architecture
- * specific div64.h does not provide a optimized one.
- *
- * The 64bit dividend is divided by the divisor (data type long), the
- * result is returned and the remainder stored in the variable
- * referenced by remainder (data type long *). In contrast to the
- * do_div macro the dividend is kept intact.
- */
-#ifndef div_long_long_rem
-#define div_long_long_rem(dividend, divisor, remainder)        \
-       do_div_llr((dividend), divisor, remainder)
-
-static inline unsigned long do_div_llr(const long long dividend,
-                                      const long divisor, long *remainder)
-{
-       u64 result = dividend;
-
-       *(remainder) = do_div(result, divisor);
-       return (unsigned long) result;
-}
-#endif
-
-/*
- * Sign aware variation of the above. On some architectures a
- * negative dividend leads to an divide overflow exception, which
- * is avoided by the sign check.
- */
-static inline long div_long_long_rem_signed(const long long dividend,
-                                           const long divisor, long *remainder)
-{
-       long res;
-
-       if (unlikely(dividend < 0)) {
-               res = -div_long_long_rem(-dividend, divisor, remainder);
-               *remainder = -(*remainder);
-       } else
-               res = div_long_long_rem(dividend, divisor, remainder);
-
-       return res;
-}
-
-#endif
index 7d50ff6d269fc6eb75d298e36140dfc012a8daf5..f4ea0dd9a618c737228cffd9fba1a52eea2fca64 100644 (file)
@@ -155,6 +155,7 @@ typedef struct kernel_cap_struct {
  *   Add any capability from current's capability bounding set
  *       to the current process' inheritable set
  *   Allow taking bits out of capability bounding set
+ *   Allow modification of the securebits for a process
  */
 
 #define CAP_SETPCAP          8
@@ -364,12 +365,12 @@ typedef struct kernel_cap_struct {
 # error Fix up hand-coded capability macro initializers
 #else /* HAND-CODED capability initializers */
 
-# define CAP_EMPTY_SET    {{ 0, 0 }}
-# define CAP_FULL_SET     {{ ~0, ~0 }}
-# define CAP_INIT_EFF_SET {{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }}
-# define CAP_FS_SET       {{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } }
-# define CAP_NFSD_SET     {{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \
-                            CAP_FS_MASK_B1 } }
+# define CAP_EMPTY_SET    ((kernel_cap_t){{ 0, 0 }})
+# define CAP_FULL_SET     ((kernel_cap_t){{ ~0, ~0 }})
+# define CAP_INIT_EFF_SET ((kernel_cap_t){{ ~CAP_TO_MASK(CAP_SETPCAP), ~0 }})
+# define CAP_FS_SET       ((kernel_cap_t){{ CAP_FS_MASK_B0, CAP_FS_MASK_B1 } })
+# define CAP_NFSD_SET     ((kernel_cap_t){{ CAP_FS_MASK_B0|CAP_TO_MASK(CAP_SYS_RESOURCE), \
+                                       CAP_FS_MASK_B1 } })
 
 #endif /* _LINUX_CAPABILITY_U32S != 2 */
 
@@ -490,8 +491,6 @@ extern const kernel_cap_t __cap_init_eff_set;
 int capable(int cap);
 int __capable(struct task_struct *t, int cap);
 
-extern long cap_prctl_drop(unsigned long cap);
-
 #endif /* __KERNEL__ */
 
 #endif /* !_LINUX_CAPABILITY_H */
index 1e29b13d00621ee32fcb50f1e08b4dd93b62afcd..fb4591977b039a80587fa79d899c2f86d632657b 100644 (file)
@@ -1,6 +1,5 @@
 #ifndef _LINUX_CDEV_H
 #define _LINUX_CDEV_H
-#ifdef __KERNEL__
 
 #include <linux/kobject.h>
 #include <linux/kdev_t.h>
@@ -34,4 +33,3 @@ void cd_forget(struct inode *);
 extern struct backing_dev_info directly_mappable_cdev_bdi;
 
 #endif
-#endif
index a6a6035a4e1ec16d1e27a3f2470fc316c476cd6a..e155aa78d8594eb545547f48f47c7b53bf9fc36a 100644 (file)
@@ -88,6 +88,17 @@ static inline void css_put(struct cgroup_subsys_state *css)
                __css_put(css);
 }
 
+/* bits in struct cgroup flags field */
+enum {
+       /* Control Group is dead */
+       CGRP_REMOVED,
+       /* Control Group has previously had a child cgroup or a task,
+        * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
+       CGRP_RELEASABLE,
+       /* Control Group requires release notifications to userspace */
+       CGRP_NOTIFY_ON_RELEASE,
+};
+
 struct cgroup {
        unsigned long flags;            /* "unsigned long" so bitops work */
 
@@ -139,10 +150,10 @@ struct css_set {
        struct kref ref;
 
        /*
-        * List running through all cgroup groups. Protected by
-        * css_set_lock
+        * List running through all cgroup groups in the same hash
+        * slot. Protected by css_set_lock
         */
-       struct list_head list;
+       struct hlist_node hlist;
 
        /*
         * List running through all tasks using this cgroup
@@ -163,7 +174,16 @@ struct css_set {
         * during subsystem registration (at boot time).
         */
        struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
+};
+
+/*
+ * cgroup_map_cb is an abstract callback API for reporting map-valued
+ * control files
+ */
 
+struct cgroup_map_cb {
+       int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value);
+       void *state;
 };
 
 /* struct cftype:
@@ -190,20 +210,51 @@ struct cftype {
                         struct file *file,
                         char __user *buf, size_t nbytes, loff_t *ppos);
        /*
-        * read_uint() is a shortcut for the common case of returning a
+        * read_u64() is a shortcut for the common case of returning a
         * single integer. Use it in place of read()
         */
-       u64 (*read_uint) (struct cgroup *cgrp, struct cftype *cft);
+       u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft);
+       /*
+        * read_s64() is a signed version of read_u64()
+        */
+       s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft);
+       /*
+        * read_map() is used for defining a map of key/value
+        * pairs. It should call cb->fill(cb, key, value) for each
+        * entry. The key/value pairs (and their ordering) should not
+        * change between reboots.
+        */
+       int (*read_map) (struct cgroup *cont, struct cftype *cft,
+                        struct cgroup_map_cb *cb);
+       /*
+        * read_seq_string() is used for outputting a simple sequence
+        * using seqfile.
+        */
+       int (*read_seq_string) (struct cgroup *cont, struct cftype *cft,
+                        struct seq_file *m);
+
        ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft,
                          struct file *file,
                          const char __user *buf, size_t nbytes, loff_t *ppos);
 
        /*
-        * write_uint() is a shortcut for the common case of accepting
+        * write_u64() is a shortcut for the common case of accepting
         * a single integer (as parsed by simple_strtoull) from
         * userspace. Use in place of write(); return 0 or error.
         */
-       int (*write_uint) (struct cgroup *cgrp, struct cftype *cft, u64 val);
+       int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val);
+       /*
+        * write_s64() is a signed version of write_u64()
+        */
+       int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val);
+
+       /*
+        * trigger() callback can be used to get some kick from the
+        * userspace, when the actual string written is not important
+        * at all. The private field can be used to determine the
+        * kick type for multiplexing.
+        */
+       int (*trigger)(struct cgroup *cgrp, unsigned int event);
 
        int (*release) (struct inode *inode, struct file *file);
 };
@@ -254,6 +305,12 @@ struct cgroup_subsys {
                        struct cgroup *cgrp);
        void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp);
        void (*bind)(struct cgroup_subsys *ss, struct cgroup *root);
+       /*
+        * This routine is called with the task_lock of mm->owner held
+        */
+       void (*mm_owner_changed)(struct cgroup_subsys *ss,
+                                       struct cgroup *old,
+                                       struct cgroup *new);
        int subsys_id;
        int active;
        int disabled;
@@ -339,4 +396,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats,
 
 #endif /* !CONFIG_CGROUPS */
 
+#ifdef CONFIG_MM_OWNER
+extern void
+cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new);
+#else /* !CONFIG_MM_OWNER */
+static inline void
+cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
+{
+}
+#endif /* CONFIG_MM_OWNER */
 #endif /* _LINUX_CGROUP_H */
index 1ddebfc52565f3eb4894c422c1fc5dd831ae523c..e2877454ec82df3205c0251f9c9759db9334dedb 100644 (file)
@@ -42,3 +42,9 @@ SUBSYS(mem_cgroup)
 #endif
 
 /* */
+
+#ifdef CONFIG_CGROUP_DEVICE
+SUBSYS(devices)
+#endif
+
+/* */
index 35094479ca557012e043ea0def0fbf9b6df11e65..55e434feec993d9656ccf6bcd31964005daa9667 100644 (file)
@@ -93,6 +93,8 @@ struct clocksource {
 #endif
 };
 
+extern struct clocksource *clock;      /* current clocksource */
+
 /*
  * Clock source flags bits::
  */
index 424fe9cf02c4efd833aaf53847ee985b735aa22e..b3ef0c461578c834c1266dc10b0390c51658aae6 100644 (file)
@@ -8,7 +8,6 @@
 #ifndef _LINUX_CODA_FS_I
 #define _LINUX_CODA_FS_I
 
-#ifdef __KERNEL__
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/coda.h>
@@ -52,4 +51,3 @@ struct inode *coda_fid_to_inode(struct CodaFid *fid, struct super_block *sb);
 void coda_replace_fid(struct inode *, struct CodaFid *, struct CodaFid *);
 
 #endif
-#endif
index 1c47a34aa79442f738bebf25f01756ddaceb586d..31b75311e2ca381bc4d12fef44f577919a7e57a5 100644 (file)
@@ -43,9 +43,6 @@ int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *);
 int coda_setattr(struct dentry *, struct iattr *);
 
 /* this file:  heloers */
-static __inline__ struct CodaFid *coda_i2f(struct inode *);
-static __inline__ char *coda_i2s(struct inode *);
-static __inline__ void coda_flag_inode(struct inode *, int flag);
 char *coda_f2s(struct CodaFid *f);
 int coda_isroot(struct inode *i);
 int coda_iscontrol(const char *name, size_t length);
index 8fa7857e153bc7f7649ea25e088924fb0a6190f9..cf8d11cad5aef24dd99c57c3f7406d9ea4c541d6 100644 (file)
@@ -65,10 +65,11 @@ struct compat_timex {
        compat_long_t calcnt;
        compat_long_t errcnt;
        compat_long_t stbcnt;
+       compat_int_t tai;
 
        compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
        compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
-       compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32;
+       compat_int_t :32; compat_int_t :32; compat_int_t :32;
 };
 
 #define _COMPAT_NSIG_WORDS     (_COMPAT_NSIG / _COMPAT_NSIG_BPW)
index b2fd7547b58deaa5e90b0fa5b384c0581da19d58..5c8351b859f03f2fb62c1de00c5150b3e40b8c92 100644 (file)
   BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0])))
 
 /*
- * Force always-inline if the user requests it so via the .config:
+ * Force always-inline if the user requests it so via the .config,
+ * or if gcc is too old:
  */
 #if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
-    !defined(CONFIG_OPTIMIZE_INLINING) && (__GNUC__ >= 4)
+    !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
 # define inline                inline          __attribute__((always_inline))
 # define __inline__    __inline__      __attribute__((always_inline))
 # define __inline      __inline        __attribute__((always_inline))
index 27304651d700ef7048294e4e001a96d099ce356a..977acb3d1fb2ee81534b5906b3817f90455454ac 100644 (file)
@@ -8,7 +8,7 @@
 
 #ifndef _LINUX_CONCAP_H
 #define _LINUX_CONCAP_H
-#ifdef __KERNEL__
+
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 
@@ -110,4 +110,3 @@ extern int concap_nop(struct concap_proto *cprot);
  */
 extern int concap_drop_skb(struct concap_proto *cprot, struct sk_buff *skb);
 #endif
-#endif
index 4b287ad9371abe101956dd2a05015ca7cda7459f..3ae65b1bf90fd891b32538390c103f12632560fd 100644 (file)
@@ -35,8 +35,6 @@
 #ifndef _CONFIGFS_H_
 #define _CONFIGFS_H_
 
-#ifdef __KERNEL__
-
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/list.h>
@@ -194,6 +192,4 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys);
 int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
 void configfs_undepend_item(struct configfs_subsystem *subsys, struct config_item *target);
 
-#endif  /* __KERNEL__ */
-
 #endif /* _CONFIGFS_H_ */
index a5f88a6a259d84a92e93a905a7937b03b7501dbf..a4f27fbdf5494ee885caa967a24d1934744386a3 100644 (file)
@@ -91,6 +91,7 @@ void give_up_console(const struct consw *sw);
 #define CON_ENABLED    (4)
 #define CON_BOOT       (8)
 #define CON_ANYTIME    (16) /* Safe to call when cpu is offline */
+#define CON_BRL                (32) /* Used for a braille device */
 
 struct console {
        char    name[16];
@@ -121,6 +122,9 @@ extern struct tty_driver *console_device(int *);
 extern void console_stop(struct console *);
 extern void console_start(struct console *);
 extern int is_console_locked(void);
+extern int braille_register_console(struct console *, int index,
+               char *console_options, char *braille_options);
+extern int braille_unregister_console(struct console *);
 
 extern int console_suspend_enabled;
 
index d71f7c0f931b34b63bfacde2b06d19ffc5001616..b03f80a078bee4f8d822dd90fc73545def4adcce 100644 (file)
@@ -53,6 +53,7 @@ struct vc_data {
        unsigned short  vc_hi_font_mask;        /* [#] Attribute set for upper 256 chars of font or 0 if not supported */
        struct console_font vc_font;            /* Current VC font set */
        unsigned short  vc_video_erase_char;    /* Background erase character */
+       unsigned short  vc_scrl_erase_char;     /* Erase character for scroll */
        /* VT terminal data */
        unsigned int    vc_state;               /* Escape sequence parser state */
        unsigned int    vc_npar,vc_par[NPAR];   /* Parameters of current escape sequence */
index f212fa98283e83425ea9db7ee2ba5e3de55e173b..7464ba3b4333980268d4af8e71318a0b27a37e9d 100644 (file)
@@ -108,7 +108,7 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
 extern void get_online_cpus(void);
 extern void put_online_cpus(void);
 #define hotcpu_notifier(fn, pri) {                             \
-       static struct notifier_block fn##_nb =                  \
+       static struct notifier_block fn##_nb __cpuinitdata =    \
                { .notifier_call = fn, .priority = pri };       \
        register_cpu_notifier(&fn##_nb);                        \
 }
index ddd8652fc3f38a91d5f471fd70a534e21337ba12..e7e91dbfde0f25927490cf1c9d18c3587b8d7ea3 100644 (file)
@@ -83,7 +83,8 @@ struct cpufreq_real_policy {
 };
 
 struct cpufreq_policy {
-       cpumask_t               cpus;   /* affected CPUs */
+       cpumask_t               cpus;   /* CPUs requiring sw coordination */
+       cpumask_t               related_cpus; /* CPUs with any coordination */
        unsigned int            shared_type; /* ANY or ALL affected CPUs
                                                should set cpufreq */
        unsigned int            cpu;    /* cpu nr of registered CPU */
@@ -307,6 +308,9 @@ extern struct cpufreq_governor cpufreq_gov_performance;
 #endif
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
 #define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_performance)
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
+extern struct cpufreq_governor cpufreq_gov_powersave;
+#define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_powersave)
 #elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
 extern struct cpufreq_governor cpufreq_gov_userspace;
 #define CPUFREQ_DEFAULT_GOVERNOR       (&cpufreq_gov_userspace)
index 259c8051155d9e34c783f9499b606ac780f8dd84..9650806fe2ea266a6c786ec9a95d96adfe8269d3 100644 (file)
@@ -14,6 +14,8 @@
  * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
  * For details of cpu_remap(), see bitmap_bitremap in lib/bitmap.c
  * For details of cpus_remap(), see bitmap_remap in lib/bitmap.c.
+ * For details of cpus_onto(), see bitmap_onto in lib/bitmap.c.
+ * For details of cpus_fold(), see bitmap_fold in lib/bitmap.c.
  *
  * The available cpumask operations are:
  *
@@ -53,7 +55,9 @@
  * int cpulist_scnprintf(buf, len, mask) Format cpumask as list for printing
  * int cpulist_parse(buf, map)         Parse ascii string as cpulist
  * int cpu_remap(oldbit, old, new)     newbit = map(old, new)(oldbit)
- * int cpus_remap(dst, src, old, new)  *dst = map(old, new)(src)
+ * void cpus_remap(dst, src, old, new) *dst = map(old, new)(src)
+ * void cpus_onto(dst, orig, relmap)   *dst = orig relative to relmap
+ * void cpus_fold(dst, orig, sz)       dst bits = orig bits mod sz
  *
  * for_each_cpu_mask(cpu, mask)                for-loop cpu over mask
  *
@@ -330,6 +334,22 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
        bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
 }
 
+#define cpus_onto(dst, orig, relmap) \
+               __cpus_onto(&(dst), &(orig), &(relmap), NR_CPUS)
+static inline void __cpus_onto(cpumask_t *dstp, const cpumask_t *origp,
+               const cpumask_t *relmapp, int nbits)
+{
+       bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+#define cpus_fold(dst, orig, sz) \
+               __cpus_fold(&(dst), &(orig), sz, NR_CPUS)
+static inline void __cpus_fold(cpumask_t *dstp, const cpumask_t *origp,
+               int sz, int nbits)
+{
+       bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+
 #if NR_CPUS > 1
 #define for_each_cpu_mask(cpu, mask)           \
        for ((cpu) = first_cpu(mask);           \
index 726761e2400365c1e5dc92d627dee8f2e06c837c..038578362b47fba21d555593b635c5d0eabebe15 100644 (file)
@@ -26,7 +26,7 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
 #define cpuset_current_mems_allowed (current->mems_allowed)
 void cpuset_init_current_mems_allowed(void);
 void cpuset_update_task_memory_state(void);
-int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl);
+int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
 
 extern int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask);
 extern int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask);
@@ -103,7 +103,7 @@ static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
 static inline void cpuset_init_current_mems_allowed(void) {}
 static inline void cpuset_update_task_memory_state(void) {}
 
-static inline int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
+static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 {
        return 1;
 }
index 90037617da8fedf515b269d8ed766074f55244d0..f52696a1ff0daa40414af9b071678280a373655a 100644 (file)
@@ -1,6 +1,5 @@
 #ifndef _LINUX_CRC_CCITT_H
 #define _LINUX_CRC_CCITT_H
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 
@@ -13,5 +12,4 @@ static inline u16 crc_ccitt_byte(u16 crc, const u8 c)
        return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff];
 }
 
-#endif /* __KERNEL__ */
 #endif /* _LINUX_CRC_CCITT_H */
index cfb1627ac51c88866b506953f4a52f41ff4620c4..2a6639407c8037581ce7baa58e95c8c936a26245 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __LINUX_DCACHE_H
 #define __LINUX_DCACHE_H
 
-#ifdef __KERNEL__
-
 #include <asm/atomic.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
@@ -365,6 +363,4 @@ extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
 
 extern int sysctl_vfs_cache_pressure;
 
-#endif /* __KERNEL__ */
-
 #endif /* __LINUX_DCACHE_H */
diff --git a/include/linux/debugobjects.h b/include/linux/debugobjects.h
new file mode 100644 (file)
index 0000000..8c243aa
--- /dev/null
@@ -0,0 +1,90 @@
+#ifndef _LINUX_DEBUGOBJECTS_H
+#define _LINUX_DEBUGOBJECTS_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+enum debug_obj_state {
+       ODEBUG_STATE_NONE,
+       ODEBUG_STATE_INIT,
+       ODEBUG_STATE_INACTIVE,
+       ODEBUG_STATE_ACTIVE,
+       ODEBUG_STATE_DESTROYED,
+       ODEBUG_STATE_NOTAVAILABLE,
+       ODEBUG_STATE_MAX,
+};
+
+struct debug_obj_descr;
+
+/**
+ * struct debug_obj - representaion of an tracked object
+ * @node:      hlist node to link the object into the tracker list
+ * @state:     tracked object state
+ * @object:    pointer to the real object
+ * @descr:     pointer to an object type specific debug description structure
+ */
+struct debug_obj {
+       struct hlist_node       node;
+       enum debug_obj_state    state;
+       void                    *object;
+       struct debug_obj_descr  *descr;
+};
+
+/**
+ * struct debug_obj_descr - object type specific debug description structure
+ * @name:              name of the object typee
+ * @fixup_init:                fixup function, which is called when the init check
+ *                     fails
+ * @fixup_activate:    fixup function, which is called when the activate check
+ *                     fails
+ * @fixup_destroy:     fixup function, which is called when the destroy check
+ *                     fails
+ * @fixup_free:                fixup function, which is called when the free check
+ *                     fails
+ */
+struct debug_obj_descr {
+       const char              *name;
+
+       int (*fixup_init)       (void *addr, enum debug_obj_state state);
+       int (*fixup_activate)   (void *addr, enum debug_obj_state state);
+       int (*fixup_destroy)    (void *addr, enum debug_obj_state state);
+       int (*fixup_free)       (void *addr, enum debug_obj_state state);
+};
+
+#ifdef CONFIG_DEBUG_OBJECTS
+extern void debug_object_init      (void *addr, struct debug_obj_descr *descr);
+extern void
+debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
+extern void debug_object_activate  (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
+extern void debug_object_destroy   (void *addr, struct debug_obj_descr *descr);
+extern void debug_object_free      (void *addr, struct debug_obj_descr *descr);
+
+extern void debug_objects_early_init(void);
+extern void debug_objects_mem_init(void);
+#else
+static inline void
+debug_object_init      (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_activate  (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_destroy   (void *addr, struct debug_obj_descr *descr) { }
+static inline void
+debug_object_free      (void *addr, struct debug_obj_descr *descr) { }
+
+static inline void debug_objects_early_init(void) { }
+static inline void debug_objects_mem_init(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+extern void debug_check_no_obj_freed(const void *address, unsigned long size);
+#else
+static inline void
+debug_check_no_obj_freed(const void *address, unsigned long size) { }
+#endif
+
+#endif
index ad3b787479a49ef54daff54e12f505b2833fd731..0d8d419d191ab1022036efee46bd7074968890e3 100644 (file)
@@ -8,8 +8,6 @@
 #ifndef _LINUX_DEVICE_MAPPER_H
 #define _LINUX_DEVICE_MAPPER_H
 
-#ifdef __KERNEL__
-
 #include <linux/bio.h>
 
 struct dm_target;
@@ -344,5 +342,4 @@ static inline unsigned long to_bytes(sector_t n)
        return (n << SECTOR_SHIFT);
 }
 
-#endif /* __KERNEL__ */
 #endif /* _LINUX_DEVICE_MAPPER_H */
index 1a060265aceae6f31b2875d86f660dc9baaee1ab..832fb0eb293368793cef2565db179a333907c09d 100644 (file)
@@ -35,7 +35,6 @@ struct device;
 struct device_driver;
 struct driver_private;
 struct class;
-struct class_device;
 struct bus_type;
 struct bus_type_private;
 
@@ -190,13 +189,10 @@ struct class {
        struct kset             class_dirs;
        struct semaphore        sem; /* locks children, devices, interfaces */
        struct class_attribute          *class_attrs;
-       struct class_device_attribute   *class_dev_attrs;
        struct device_attribute         *dev_attrs;
 
-       int (*uevent)(struct class_device *dev, struct kobj_uevent_env *env);
        int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
 
-       void (*release)(struct class_device *dev);
        void (*class_release)(struct class *class);
        void (*dev_release)(struct device *dev);
 
@@ -210,9 +206,6 @@ extern int class_for_each_device(struct class *class, void *data,
                                 int (*fn)(struct device *dev, void *data));
 extern struct device *class_find_device(struct class *class, void *data,
                                        int (*match)(struct device *, void *));
-extern struct class_device *class_find_child(struct class *class, void *data,
-                                  int (*match)(struct class_device *, void *));
-
 
 struct class_attribute {
        struct attribute attr;
@@ -228,92 +221,10 @@ extern int __must_check class_create_file(struct class *class,
 extern void class_remove_file(struct class *class,
                              const struct class_attribute *attr);
 
-struct class_device_attribute {
-       struct attribute attr;
-       ssize_t (*show)(struct class_device *, char *buf);
-       ssize_t (*store)(struct class_device *, const char *buf, size_t count);
-};
-
-#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store)         \
-struct class_device_attribute class_device_attr_##_name =      \
-       __ATTR(_name, _mode, _show, _store)
-
-extern int __must_check class_device_create_file(struct class_device *,
-                                   const struct class_device_attribute *);
-
-/**
- * struct class_device - class devices
- * @class: pointer to the parent class for this class device.  This is required.
- * @devt: for internal use by the driver core only.
- * @node: for internal use by the driver core only.
- * @kobj: for internal use by the driver core only.
- * @groups: optional additional groups to be created
- * @dev: if set, a symlink to the struct device is created in the sysfs
- * directory for this struct class device.
- * @class_data: pointer to whatever you want to store here for this struct
- * class_device.  Use class_get_devdata() and class_set_devdata() to get and
- * set this pointer.
- * @parent: pointer to a struct class_device that is the parent of this struct
- * class_device.  If NULL, this class_device will show up at the root of the
- * struct class in sysfs (which is probably what you want to have happen.)
- * @release: pointer to a release function for this struct class_device.  If
- * set, this will be called instead of the class specific release function.
- * Only use this if you want to override the default release function, like
- * when you are nesting class_device structures.
- * @uevent: pointer to a uevent function for this struct class_device.  If
- * set, this will be called instead of the class specific uevent function.
- * Only use this if you want to override the default uevent function, like
- * when you are nesting class_device structures.
- */
-struct class_device {
-       struct list_head        node;
-
-       struct kobject          kobj;
-       struct class            *class;
-       dev_t                   devt;
-       struct device           *dev;
-       void                    *class_data;
-       struct class_device     *parent;
-       struct attribute_group  **groups;
-
-       void (*release)(struct class_device *dev);
-       int (*uevent)(struct class_device *dev, struct kobj_uevent_env *env);
-       char class_id[BUS_ID_SIZE];
-};
-
-static inline void *class_get_devdata(struct class_device *dev)
-{
-       return dev->class_data;
-}
-
-static inline void class_set_devdata(struct class_device *dev, void *data)
-{
-       dev->class_data = data;
-}
-
-
-extern int __must_check class_device_register(struct class_device *);
-extern void class_device_unregister(struct class_device *);
-extern void class_device_initialize(struct class_device *);
-extern int __must_check class_device_add(struct class_device *);
-extern void class_device_del(struct class_device *);
-
-extern struct class_device *class_device_get(struct class_device *);
-extern void class_device_put(struct class_device *);
-
-extern void class_device_remove_file(struct class_device *,
-                                    const struct class_device_attribute *);
-extern int __must_check class_device_create_bin_file(struct class_device *,
-                                       struct bin_attribute *);
-extern void class_device_remove_bin_file(struct class_device *,
-                                        struct bin_attribute *);
-
 struct class_interface {
        struct list_head        node;
        struct class            *class;
 
-       int (*add)      (struct class_device *, struct class_interface *);
-       void (*remove)  (struct class_device *, struct class_interface *);
        int (*add_dev)          (struct device *, struct class_interface *);
        void (*remove_dev)      (struct device *, struct class_interface *);
 };
@@ -323,13 +234,6 @@ extern void class_interface_unregister(struct class_interface *);
 
 extern struct class *class_create(struct module *owner, const char *name);
 extern void class_destroy(struct class *cls);
-extern struct class_device *class_device_create(struct class *cls,
-                                               struct class_device *parent,
-                                               dev_t devt,
-                                               struct device *device,
-                                               const char *fmt, ...)
-                                       __attribute__((format(printf, 5, 6)));
-extern void class_device_destroy(struct class *cls, dev_t devt);
 
 /*
  * The type of device, "struct device" is embedded in. A class
@@ -465,7 +369,6 @@ struct device {
        spinlock_t              devres_lock;
        struct list_head        devres_head;
 
-       /* class_device migration path */
        struct list_head        node;
        struct class            *class;
        dev_t                   devt;   /* dev_t, creates the sysfs "dev" */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
new file mode 100644 (file)
index 0000000..0b0d9c3
--- /dev/null
@@ -0,0 +1,12 @@
+#include <linux/module.h>
+#include <linux/fs.h>
+
+#ifdef CONFIG_CGROUP_DEVICE
+extern int devcgroup_inode_permission(struct inode *inode, int mask);
+extern int devcgroup_inode_mknod(int mode, dev_t dev);
+#else
+static inline int devcgroup_inode_permission(struct inode *inode, int mask)
+{ return 0; }
+static inline int devcgroup_inode_mknod(int mode, dev_t dev)
+{ return 0; }
+#endif
index b672ddc007357bc2cd5395d34e6102f6870315a6..154769cad3f31c7207b3113ec5e00753a4684ee2 100644 (file)
@@ -17,6 +17,8 @@
 
 #ifdef CONFIG_UNIX98_PTYS
 
+int devpts_new_index(void);
+void devpts_kill_index(int idx);
 int devpts_pty_new(struct tty_struct *tty);      /* mknod in devpts */
 struct tty_struct *devpts_get_tty(int number);  /* get tty structure */
 void devpts_pty_kill(int number);               /* unlink */
@@ -24,6 +26,8 @@ void devpts_pty_kill(int number);              /* unlink */
 #else
 
 /* Dummy stubs in the no-pty case */
+static inline int devpts_new_index(void) { return -EINVAL; }
+static inline void devpts_kill_index(int idx) { }
 static inline int devpts_pty_new(struct tty_struct *tty) { return -EINVAL; }
 static inline struct tty_struct *devpts_get_tty(int number) { return NULL; }
 static inline void devpts_pty_kill(int number) { }
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
new file mode 100644 (file)
index 0000000..1677e2b
--- /dev/null
@@ -0,0 +1,74 @@
+#ifndef _DMA_ATTR_H
+#define _DMA_ATTR_H
+
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+
+/**
+ * an enum dma_attr represents an attribute associated with a DMA
+ * mapping. The semantics of each attribute should be defined in
+ * Documentation/DMA-attributes.txt.
+ */
+enum dma_attr {
+       DMA_ATTR_WRITE_BARRIER,
+       DMA_ATTR_MAX,
+};
+
+#define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX)
+
+/**
+ * struct dma_attrs - an opaque container for DMA attributes
+ * @flags - bitmask representing a collection of enum dma_attr
+ */
+struct dma_attrs {
+       unsigned long flags[__DMA_ATTRS_LONGS];
+};
+
+#define DEFINE_DMA_ATTRS(x)                                    \
+       struct dma_attrs x = {                                  \
+               .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 },   \
+       }
+
+static inline void init_dma_attrs(struct dma_attrs *attrs)
+{
+       bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
+}
+
+#ifdef CONFIG_HAVE_DMA_ATTRS
+/**
+ * dma_set_attr - set a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+       if (attrs == NULL)
+               return;
+       BUG_ON(attr >= DMA_ATTR_MAX);
+       __set_bit(attr, attrs->flags);
+}
+
+/**
+ * dma_get_attr - check for a specific attribute
+ * @attr: attribute to set
+ * @attrs: struct dma_attrs (may be NULL)
+ */
+static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+       if (attrs == NULL)
+               return 0;
+       BUG_ON(attr >= DMA_ATTR_MAX);
+       return test_bit(attr, attrs->flags);
+}
+#else /* !CONFIG_HAVE_DMA_ATTRS */
+static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+}
+
+static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
+{
+       return 0;
+}
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+#endif /* _DMA_ATTR_H */
index 3320307096237a61495d785d751e897bc54a0102..952e0f857ac9d9545750bbd5f197df6034a99913 100644 (file)
@@ -146,4 +146,21 @@ static inline void dmam_release_declared_memory(struct device *dev)
 }
 #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
 
+#ifndef CONFIG_HAVE_DMA_ATTRS
+struct dma_attrs;
+
+#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
+       dma_map_single(dev, cpu_addr, size, dir)
+
+#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
+       dma_unmap_single(dev, dma_addr, size, dir)
+
+#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
+       dma_map_sg(dev, sgl, nents, dir)
+
+#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
+       dma_unmap_sg(dev, sgl, nents, dir)
+
+#endif /* CONFIG_HAVE_DMA_ATTRS */
+
 #endif
index 325acdf5c4625c468f2f6108706ecb971efd856e..2a063b64133fcd23307e57b6b396fa935eb7c808 100644 (file)
@@ -90,6 +90,7 @@ static inline int dmi_check_system(const struct dmi_system_id *list) { return 0;
 static inline const char * dmi_get_system_info(int field) { return NULL; }
 static inline const struct dmi_device * dmi_find_device(int type, const char *name,
        const struct dmi_device *from) { return NULL; }
+static inline void dmi_scan_machine(void) { return; }
 static inline int dmi_get_year(int year) { return 0; }
 static inline int dmi_name_in_vendors(const char *s) { return 0; }
 #define dmi_available 0
index eab451e69a91a0892fee9fa556b4d3a652bdd056..7cf92e8a4196c0f6a38257cec095b1aa6d04939f 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Author: Dave Jiang <djiang@mvista.com>
  *
- * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
+ * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under
  * the terms of the GNU General Public License version 2. This program
  * is licensed "as is" without any warranty of any kind, whether express
  * or implied.
@@ -26,4 +26,16 @@ extern atomic_t edac_handlers;
 extern int edac_handler_set(void);
 extern void edac_atomic_assert_error(void);
 
+static inline void opstate_init(void)
+{
+       switch (edac_op_state) {
+       case EDAC_OPSTATE_POLL:
+       case EDAC_OPSTATE_NMI:
+               break;
+       default:
+               edac_op_state = EDAC_OPSTATE_POLL;
+       }
+       return;
+}
+
 #endif
index bad1b16ec49a73f432ed27a0c97131205f726c29..ff9fbed90123faff8fb8e3a7817a831f6bc5e276 100644 (file)
@@ -208,7 +208,7 @@ typedef struct elf32_hdr{
 } Elf32_Ehdr;
 
 typedef struct elf64_hdr {
-  unsigned char        e_ident[16];            /* ELF "magic number" */
+  unsigned char        e_ident[EI_NIDENT];     /* ELF "magic number" */
   Elf64_Half e_type;
   Elf64_Half e_machine;
   Elf64_Word e_version;
index b489fc6d0b6252f9e75134d8e7ecf5e529c623c5..a701399b7fed915e8cbd057be25a57d32bb2de10 100644 (file)
@@ -8,9 +8,6 @@
 #ifndef _LINUX_EVENTFD_H
 #define _LINUX_EVENTFD_H
 
-
-#ifdef __KERNEL__
-
 #ifdef CONFIG_EVENTFD
 
 struct file *eventfd_fget(int fd);
@@ -24,7 +21,5 @@ static inline int eventfd_signal(struct file *file, int n)
 
 #endif /* CONFIG_EVENTFD */
 
-#endif /* __KERNEL__ */
-
 #endif /* _LINUX_EVENTFD_H */
 
index 58c57a33e5dd47d40c20562b86a5f3a53f0ec8b1..72295b099228637fe7a6291eca43790b91260334 100644 (file)
@@ -791,6 +791,17 @@ struct fb_tile_ops {
  */
 #define FBINFO_MISC_ALWAYS_SETPAR   0x40000
 
+/*
+ * Host and GPU endianness differ.
+ */
+#define FBINFO_FOREIGN_ENDIAN  0x100000
+/*
+ * Big endian math. This is the same flags as above, but with different
+ * meaning, it is set by the fb subsystem depending FOREIGN_ENDIAN flag
+ * and host endianness. Drivers should not use this flag.
+ */
+#define FBINFO_BE_MATH  0x100000
+
 struct fb_info {
        int node;
        int flags;
@@ -899,15 +910,11 @@ struct fb_info {
 
 #endif
 
-#if defined (__BIG_ENDIAN)
-#define FB_LEFT_POS(bpp)          (32 - bpp)
-#define FB_SHIFT_HIGH(val, bits)  ((val) >> (bits))
-#define FB_SHIFT_LOW(val, bits)   ((val) << (bits))
-#else
-#define FB_LEFT_POS(bpp)          (0)
-#define FB_SHIFT_HIGH(val, bits)  ((val) << (bits))
-#define FB_SHIFT_LOW(val, bits)   ((val) >> (bits))
-#endif
+#define FB_LEFT_POS(p, bpp)          (fb_be_math(p) ? (32 - (bpp)) : 0)
+#define FB_SHIFT_HIGH(p, val, bits)  (fb_be_math(p) ? (val) >> (bits) : \
+                                                     (val) << (bits))
+#define FB_SHIFT_LOW(p, val, bits)   (fb_be_math(p) ? (val) << (bits) : \
+                                                     (val) >> (bits))
 
     /*
      *  `Generic' versions of the frame buffer device operations
@@ -970,6 +977,25 @@ extern void fb_deferred_io_cleanup(struct fb_info *info);
 extern int fb_deferred_io_fsync(struct file *file, struct dentry *dentry,
                                int datasync);
 
+static inline bool fb_be_math(struct fb_info *info)
+{
+#ifdef CONFIG_FB_FOREIGN_ENDIAN
+#if defined(CONFIG_FB_BOTH_ENDIAN)
+       return info->flags & FBINFO_BE_MATH;
+#elif defined(CONFIG_FB_BIG_ENDIAN)
+       return true;
+#elif defined(CONFIG_FB_LITTLE_ENDIAN)
+       return false;
+#endif /* CONFIG_FB_BOTH_ENDIAN */
+#else
+#ifdef __BIG_ENDIAN
+       return true;
+#else
+       return false;
+#endif /* __BIG_ENDIAN */
+#endif /* CONFIG_FB_FOREIGN_ENDIAN */
+}
+
 /* drivers/video/fbsysfs.c */
 extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
 extern void framebuffer_release(struct fb_info *info);
index d6d7c52055c69c633a8251181b278037aa7be166..a1ba005d08e7ef80f441d00a7f5d934582fe13e6 100644 (file)
@@ -474,8 +474,8 @@ struct address_space_operations {
        int (*releasepage) (struct page *, gfp_t);
        ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
                        loff_t offset, unsigned long nr_segs);
-       struct page* (*get_xip_page)(struct address_space *, sector_t,
-                       int);
+       int (*get_xip_mem)(struct address_space *, pgoff_t, int,
+                                               void **, unsigned long *);
        /* migrate the contents of a page to the specified target */
        int (*migratepage) (struct address_space *,
                        struct page *, struct page *);
@@ -1178,7 +1178,8 @@ struct block_device_operations {
        int (*ioctl) (struct inode *, struct file *, unsigned, unsigned long);
        long (*unlocked_ioctl) (struct file *, unsigned, unsigned long);
        long (*compat_ioctl) (struct file *, unsigned, unsigned long);
-       int (*direct_access) (struct block_device *, sector_t, unsigned long *);
+       int (*direct_access) (struct block_device *, sector_t,
+                                               void **, unsigned long *);
        int (*media_changed) (struct gendisk *);
        int (*revalidate_disk) (struct gendisk *);
        int (*getgeo)(struct block_device *, struct hd_geometry *);
@@ -1520,7 +1521,6 @@ extern int get_sb_pseudo(struct file_system_type *, char *,
        const struct super_operations *ops, unsigned long,
        struct vfsmount *mnt);
 extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb);
-int __put_super(struct super_block *sb);
 int __put_super_and_need_restart(struct super_block *sb);
 void unnamed_dev_init(void);
 
@@ -1964,7 +1964,6 @@ extern int vfs_stat_fd(int dfd, char __user *, struct kstat *);
 extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *);
 extern int vfs_fstat(unsigned int, struct kstat *);
 
-extern long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
                    unsigned long arg);
 
index 2cad5c67397e661766aaf10c0e77aacc7a8bac5b..c415a496de3a1a08d02ddc6668cf4f49ba98a979 100644 (file)
@@ -14,7 +14,6 @@
  * option) any later version.
  */
 
-#ifdef __KERNEL__
 #ifndef _FSL_DEVICE_H_
 #define _FSL_DEVICE_H_
 
@@ -127,4 +126,3 @@ struct mpc8xx_pcmcia_ops {
 };
 
 #endif /* _FSL_DEVICE_H_ */
-#endif /* __KERNEL__ */
index d4b7c4ac72e6dd70e9bbda61158b36847e83bb2a..a89513188ce7a85c076f11e512d994b0447b384e 100644 (file)
@@ -11,8 +11,6 @@
  * (C) Copyright 2005 Robert Love
  */
 
-#ifdef __KERNEL__
-
 #include <linux/dnotify.h>
 #include <linux/inotify.h>
 #include <linux/audit.h>
@@ -296,6 +294,4 @@ static inline void fsnotify_oldname_free(const char *old_name)
 
 #endif /* ! CONFIG_INOTIFY */
 
-#endif /* __KERNEL__ */
-
 #endif /* _LINUX_FS_NOTIFY_H */
index 5412da28fa4749daa9822bd4b99f82a61628388a..110833666e37a3dc4de25cf698d1057b7c5e56d9 100644 (file)
@@ -78,7 +78,7 @@ struct gs_port {
 #define GS_DEBUG_WRITE   0x00000040
 
 #ifdef __KERNEL__
-void gs_put_char(struct tty_struct *tty, unsigned char ch);
+int gs_put_char(struct tty_struct *tty, unsigned char ch);
 int  gs_write(struct tty_struct *tty, 
              const unsigned char *buf, int count);
 int  gs_write_room(struct tty_struct *tty);
index 164be9da3c1b5157c32a7d5125eb1b684267abec..b414be387180f1db79c90977921296640e15f5e9 100644 (file)
@@ -40,9 +40,9 @@ struct vm_area_struct;
 #define __GFP_FS       ((__force gfp_t)0x80u)  /* Can call down to low-level FS? */
 #define __GFP_COLD     ((__force gfp_t)0x100u) /* Cache-cold page required */
 #define __GFP_NOWARN   ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */
-#define __GFP_REPEAT   ((__force gfp_t)0x400u) /* Retry the allocation.  Might fail */
-#define __GFP_NOFAIL   ((__force gfp_t)0x800u) /* Retry for ever.  Cannot fail */
-#define __GFP_NORETRY  ((__force gfp_t)0x1000u)/* Do not retry.  Might fail */
+#define __GFP_REPEAT   ((__force gfp_t)0x400u) /* See above */
+#define __GFP_NOFAIL   ((__force gfp_t)0x800u) /* See above */
+#define __GFP_NORETRY  ((__force gfp_t)0x1000u)/* See above */
 #define __GFP_COMP     ((__force gfp_t)0x4000u)/* Add compound page metadata */
 #define __GFP_ZERO     ((__force gfp_t)0x8000u)/* Return zeroed page on success */
 #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */
@@ -119,35 +119,22 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags)
 
 static inline enum zone_type gfp_zone(gfp_t flags)
 {
-       int base = 0;
-
-#ifdef CONFIG_NUMA
-       if (flags & __GFP_THISNODE)
-               base = MAX_NR_ZONES;
-#endif
-
 #ifdef CONFIG_ZONE_DMA
        if (flags & __GFP_DMA)
-               return base + ZONE_DMA;
+               return ZONE_DMA;
 #endif
 #ifdef CONFIG_ZONE_DMA32
        if (flags & __GFP_DMA32)
-               return base + ZONE_DMA32;
+               return ZONE_DMA32;
 #endif
        if ((flags & (__GFP_HIGHMEM | __GFP_MOVABLE)) ==
                        (__GFP_HIGHMEM | __GFP_MOVABLE))
-               return base + ZONE_MOVABLE;
+               return ZONE_MOVABLE;
 #ifdef CONFIG_HIGHMEM
        if (flags & __GFP_HIGHMEM)
-               return base + ZONE_HIGHMEM;
+               return ZONE_HIGHMEM;
 #endif
-       return base + ZONE_NORMAL;
-}
-
-static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
-{
-       BUG_ON((gfp & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
-       return (gfp & ~(GFP_MOVABLE_MASK)) | migrate_flags;
+       return ZONE_NORMAL;
 }
 
 /*
@@ -157,13 +144,27 @@ static inline gfp_t set_migrateflags(gfp_t gfp, gfp_t migrate_flags)
  * virtual kernel addresses to the allocated page(s).
  */
 
+static inline int gfp_zonelist(gfp_t flags)
+{
+       if (NUMA_BUILD && unlikely(flags & __GFP_THISNODE))
+               return 1;
+
+       return 0;
+}
+
 /*
  * We get the zone list from the current node and the gfp_mask.
  * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
+ * There are two zonelists per node, one for all zones with memory and
+ * one containing just zones from the node the zonelist belongs to.
  *
  * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
  * optimized to &contig_page_data at compile-time.
  */
+static inline struct zonelist *node_zonelist(int nid, gfp_t flags)
+{
+       return NODE_DATA(nid)->node_zonelists + gfp_zonelist(flags);
+}
 
 #ifndef HAVE_ARCH_FREE_PAGE
 static inline void arch_free_page(struct page *page, int order) { }
@@ -174,6 +175,10 @@ static inline void arch_alloc_page(struct page *page, int order) { }
 
 extern struct page *__alloc_pages(gfp_t, unsigned int, struct zonelist *);
 
+extern struct page *
+__alloc_pages_nodemask(gfp_t, unsigned int,
+                               struct zonelist *, nodemask_t *nodemask);
+
 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
                                                unsigned int order)
 {
@@ -184,8 +189,7 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
        if (nid < 0)
                nid = numa_node_id();
 
-       return __alloc_pages(gfp_mask, order,
-               NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_mask));
+       return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
 }
 
 #ifdef CONFIG_NUMA
index d951ec4112419e640588923f67b548819ae6c914..4ce3b7a979ba27212ed386312ec497a6a98f189c 100644 (file)
@@ -498,13 +498,13 @@ struct hid_parser {
 
 struct hid_class_descriptor {
        __u8  bDescriptorType;
-       __u16 wDescriptorLength;
+       __le16 wDescriptorLength;
 } __attribute__ ((packed));
 
 struct hid_descriptor {
        __u8  bLength;
        __u8  bDescriptorType;
-       __u16 bcdHID;
+       __le16 bcdHID;
        __u8  bCountryCode;
        __u8  bNumDescriptors;
 
index 56f3236da829e5d26ca2434b7e7467d99f6c1611..31a4d653389f6d8e76405451c75f88c721328c03 100644 (file)
@@ -266,6 +266,21 @@ extern ktime_t ktime_get_real(void);
 extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
                         enum hrtimer_mode mode);
 
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t which_clock,
+                                 enum hrtimer_mode mode);
+
+extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
+#else
+static inline void hrtimer_init_on_stack(struct hrtimer *timer,
+                                        clockid_t which_clock,
+                                        enum hrtimer_mode mode)
+{
+       hrtimer_init(timer, which_clock, mode);
+}
+static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
+#endif
+
 /* Basic timer operations: */
 extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
                         const enum hrtimer_mode mode);
index addca4cd4f11c6d706cccfac2f4a81476ca133bb..a79e80b689d803d161a4ca694370aeec1ba25629 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/mempolicy.h>
 #include <linux/shm.h>
 #include <asm/tlbflush.h>
+#include <asm/hugetlb.h>
 
 struct ctl_table;
 
@@ -51,51 +52,6 @@ int pmd_huge(pmd_t pmd);
 void hugetlb_change_protection(struct vm_area_struct *vma,
                unsigned long address, unsigned long end, pgprot_t newprot);
 
-#ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define is_hugepage_only_range(mm, addr, len)  0
-#endif
-
-#ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
-#define hugetlb_free_pgd_range free_pgd_range
-#else
-void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
-                           unsigned long end, unsigned long floor,
-                           unsigned long ceiling);
-#endif
-
-#ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-/*
- * If the arch doesn't supply something else, assume that hugepage
- * size aligned regions are ok without further preparation.
- */
-static inline int prepare_hugepage_range(unsigned long addr, unsigned long len)
-{
-       if (len & ~HPAGE_MASK)
-               return -EINVAL;
-       if (addr & ~HPAGE_MASK)
-               return -EINVAL;
-       return 0;
-}
-#else
-int prepare_hugepage_range(unsigned long addr, unsigned long len);
-#endif
-
-#ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
-#define set_huge_pte_at(mm, addr, ptep, pte)   set_pte_at(mm, addr, ptep, pte)
-#define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
-#else
-void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
-                    pte_t *ptep, pte_t pte);
-pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
-                             pte_t *ptep);
-#endif
-
-#ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
-#define hugetlb_prefault_arch_hook(mm)         do { } while (0)
-#else
-void hugetlb_prefault_arch_hook(struct mm_struct *mm);
-#endif
-
 #else /* !CONFIG_HUGETLB_PAGE */
 
 static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
index 85d11916e9ea48a878f54eb3aff5045a8ec2b2c7..7244456e7e6573466f0534857bef00b8b1dbdad7 100644 (file)
@@ -11,7 +11,6 @@
 
 #ifndef LINUX_HWRANDOM_H_
 #define LINUX_HWRANDOM_H_
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 #include <linux/list.h>
@@ -46,5 +45,4 @@ extern int hwrng_register(struct hwrng *rng);
 /** Unregister a Hardware Random Number Generator driver. */
 extern void hwrng_unregister(struct hwrng *rng);
 
-#endif /* __KERNEL__ */
 #endif /* LINUX_HWRANDOM_H_ */
index 32eb8bbe48317d4acacfcb9719f4ea44f2287d9f..580acc93903e63795a23c1efee2dddbc23cab7b0 100644 (file)
 #define I2C_DRIVERID_UPD64031A 79      /* upd64031a video processor    */
 #define I2C_DRIVERID_SAA717X   80      /* saa717x video encoder        */
 #define I2C_DRIVERID_DS1672    81      /* Dallas/Maxim DS1672 RTC      */
-#define I2C_DRIVERID_X1205     82      /* Xicor/Intersil X1205 RTC     */
-#define I2C_DRIVERID_PCF8563   83      /* Philips PCF8563 RTC          */
 #define I2C_DRIVERID_BT866     85      /* Conexant bt866 video encoder */
 #define I2C_DRIVERID_KS0127    86      /* Samsung ks0127 video decoder */
 #define I2C_DRIVERID_TLV320AIC23B 87   /* TI TLV320AIC23B audio codec  */
-#define I2C_DRIVERID_ISL1208   88      /* Intersil ISL1208 RTC         */
 #define I2C_DRIVERID_WM8731    89      /* Wolfson WM8731 audio codec */
 #define I2C_DRIVERID_WM8750    90      /* Wolfson WM8750 audio codec */
 #define I2C_DRIVERID_WM8753    91      /* Wolfson WM8753 audio codec */
index 365e0df3646baf525d94aaec7899fa93a8f2a05c..cb63da5c2139f04c331f44da7e681c36ec07ece1 100644 (file)
@@ -126,7 +126,7 @@ struct i2c_driver {
         * With the driver model, device enumeration is NEVER done by drivers;
         * it's done by infrastructure.  (NEW STYLE DRIVERS ONLY)
         */
-       int (*probe)(struct i2c_client *);
+       int (*probe)(struct i2c_client *, const struct i2c_device_id *);
        int (*remove)(struct i2c_client *);
 
        /* driver model interfaces that don't relate to enumeration  */
@@ -140,11 +140,10 @@ struct i2c_driver {
        int (*command)(struct i2c_client *client,unsigned int cmd, void *arg);
 
        struct device_driver driver;
+       const struct i2c_device_id *id_table;
 };
 #define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
 
-#define I2C_NAME_SIZE  20
-
 /**
  * struct i2c_client - represent an I2C slave device
  * @flags: I2C_CLIENT_TEN indicates the device uses a ten bit chip address;
@@ -230,17 +229,17 @@ struct i2c_board_info {
 };
 
 /**
- * I2C_BOARD_INFO - macro used to list an i2c device and its driver
- * @driver: identifies the driver to use with the device
+ * I2C_BOARD_INFO - macro used to list an i2c device and its address
+ * @dev_type: identifies the device type
  * @dev_addr: the device's address on the bus.
  *
  * This macro initializes essential fields of a struct i2c_board_info,
  * declaring what has been provided on a particular board.  Optional
- * fields (such as the chip type, its associated irq, or device-specific
- * platform_data) are provided using conventional syntax.
+ * fields (such as associated irq, or device-specific platform_data)
+ * are provided using conventional syntax.
  */
-#define I2C_BOARD_INFO(driver,dev_addr) \
-       .driver_name = (driver), .addr = (dev_addr)
+#define I2C_BOARD_INFO(dev_type,dev_addr) \
+       .type = (dev_type), .addr = (dev_addr)
 
 
 /* Add-on boards should register/unregister their devices; e.g. a board
index e92170dda245b3241ef8ae597ace7b43ad2b7f72..7d51cbca49ab230f15d5e8f1276d8ad5f5537b82 100644 (file)
@@ -18,8 +18,6 @@
 #ifndef _I2O_H
 #define _I2O_H
 
-#ifdef __KERNEL__              /* This file to be included by kernel only */
-
 #include <linux/i2o-dev.h>
 
 /* How many different OSM's are we allowing */
@@ -613,14 +611,9 @@ struct i2o_sys_tbl {
 extern struct list_head i2o_controllers;
 
 /* Message functions */
-static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
 extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
-static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
-static inline int i2o_msg_post_wait(struct i2o_controller *,
-                                   struct i2o_message *, unsigned long);
 extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
                                 unsigned long, struct i2o_dma *);
-static inline void i2o_flush_reply(struct i2o_controller *, u32);
 
 /* IOP functions */
 extern int i2o_status_get(struct i2o_controller *);
@@ -1260,5 +1253,4 @@ extern void i2o_dump_message(struct i2o_message *);
 extern void i2o_dump_hrt(struct i2o_controller *c);
 extern void i2o_debug_state(struct i2o_controller *c);
 
-#endif                         /* __KERNEL__ */
 #endif                         /* _I2O_H */
index f20410dd4482b90db602a786776a2575255f6ec8..b0135b0c3a0435777b379d8f47f817d18c73de67 100644 (file)
@@ -47,13 +47,6 @@ typedef unsigned char        byte;   /* used everywhere */
 #define ERROR_RESET    3       /* Reset controller every 4th retry */
 #define ERROR_RECAL    1       /* Recalibrate every 2nd retry */
 
-/*
- * Tune flags
- */
-#define IDE_TUNE_NOAUTO                2
-#define IDE_TUNE_AUTO          1
-#define IDE_TUNE_DEFAULT       0
-
 /*
  * state flags
  */
@@ -68,23 +61,30 @@ typedef unsigned char       byte;   /* used everywhere */
  */
 #define IDE_NR_PORTS           (10)
 
-#define IDE_DATA_OFFSET                (0)
-#define IDE_ERROR_OFFSET       (1)
-#define IDE_NSECTOR_OFFSET     (2)
-#define IDE_SECTOR_OFFSET      (3)
-#define IDE_LCYL_OFFSET                (4)
-#define IDE_HCYL_OFFSET                (5)
-#define IDE_SELECT_OFFSET      (6)
-#define IDE_STATUS_OFFSET      (7)
-#define IDE_CONTROL_OFFSET     (8)
-#define IDE_IRQ_OFFSET         (9)
-
-#define IDE_FEATURE_OFFSET     IDE_ERROR_OFFSET
-#define IDE_COMMAND_OFFSET     IDE_STATUS_OFFSET
-#define IDE_ALTSTATUS_OFFSET   IDE_CONTROL_OFFSET
-#define IDE_IREASON_OFFSET     IDE_NSECTOR_OFFSET
-#define IDE_BCOUNTL_OFFSET     IDE_LCYL_OFFSET
-#define IDE_BCOUNTH_OFFSET     IDE_HCYL_OFFSET
+struct ide_io_ports {
+       unsigned long   data_addr;
+
+       union {
+               unsigned long error_addr;       /*   read:  error */
+               unsigned long feature_addr;     /*  write: feature */
+       };
+
+       unsigned long   nsect_addr;
+       unsigned long   lbal_addr;
+       unsigned long   lbam_addr;
+       unsigned long   lbah_addr;
+
+       unsigned long   device_addr;
+
+       union {
+               unsigned long status_addr;      /* Â read: status Â */
+               unsigned long command_addr;     /* write: command */
+       };
+
+       unsigned long   ctl_addr;
+
+       unsigned long   irq_addr;
+};
 
 #define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
 #define BAD_R_STAT             (BUSY_STAT   | ERR_STAT)
@@ -163,7 +163,11 @@ typedef u8 hwif_chipset_t;
  * Structure to hold all information about the location of this port
  */
 typedef struct hw_regs_s {
-       unsigned long   io_ports[IDE_NR_PORTS]; /* task file registers */
+       union {
+               struct ide_io_ports     io_ports;
+               unsigned long           io_ports_array[IDE_NR_PORTS];
+       };
+
        int             irq;                    /* our irq number */
        ide_ack_intr_t  *ack_intr;              /* acknowledge interrupt */
        hwif_chipset_t  chipset;
@@ -179,10 +183,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
 {
        unsigned int i;
 
-       for (i = IDE_DATA_OFFSET; i <= IDE_STATUS_OFFSET; i++)
-               hw->io_ports[i] = io_addr++;
+       for (i = 0; i <= 7; i++)
+               hw->io_ports_array[i] = io_addr++;
 
-       hw->io_ports[IDE_CONTROL_OFFSET] = ctl_addr;
+       hw->io_ports.ctl_addr = ctl_addr;
 }
 
 #include <asm/ide.h>
@@ -328,7 +332,6 @@ typedef struct ide_drive_s {
        unsigned atapi_overlap  : 1;    /* ATAPI overlap (not supported) */
        unsigned doorlocking    : 1;    /* for removable only: door lock/unlock works */
        unsigned nodma          : 1;    /* disallow DMA */
-       unsigned autotune       : 2;    /* 0=default, 1=autotune, 2=noautotune */
        unsigned remap_0_to_1   : 1;    /* 0=noremap, 1=remap 0->1 (for EZDrive) */
        unsigned blocked        : 1;    /* 1=powermanagment told us not to do anything, so sleep nicely */
        unsigned vdma           : 1;    /* 1=doing PIO over DMA 0=doing normal DMA */
@@ -387,6 +390,45 @@ typedef struct ide_drive_s {
 
 struct ide_port_info;
 
+struct ide_port_ops {
+       /* host specific initialization of devices on a port */
+       void    (*port_init_devs)(struct hwif_s *);
+       /* routine to program host for PIO mode */
+       void    (*set_pio_mode)(ide_drive_t *, const u8);
+       /* routine to program host for DMA mode */
+       void    (*set_dma_mode)(ide_drive_t *, const u8);
+       /* tweaks hardware to select drive */
+       void    (*selectproc)(ide_drive_t *);
+       /* chipset polling based on hba specifics */
+       int     (*reset_poll)(ide_drive_t *);
+       /* chipset specific changes to default for device-hba resets */
+       void    (*pre_reset)(ide_drive_t *);
+       /* routine to reset controller after a disk reset */
+       void    (*resetproc)(ide_drive_t *);
+       /* special host masking for drive selection */
+       void    (*maskproc)(ide_drive_t *, int);
+       /* check host's drive quirk list */
+       void    (*quirkproc)(ide_drive_t *);
+
+       u8      (*mdma_filter)(ide_drive_t *);
+       u8      (*udma_filter)(ide_drive_t *);
+
+       u8      (*cable_detect)(struct hwif_s *);
+};
+
+struct ide_dma_ops {
+       void    (*dma_host_set)(struct ide_drive_s *, int);
+       int     (*dma_setup)(struct ide_drive_s *);
+       void    (*dma_exec_cmd)(struct ide_drive_s *, u8);
+       void    (*dma_start)(struct ide_drive_s *);
+       int     (*dma_end)(struct ide_drive_s *);
+       int     (*dma_test_irq)(struct ide_drive_s *);
+       void    (*dma_lost_irq)(struct ide_drive_s *);
+       void    (*dma_timeout)(struct ide_drive_s *);
+};
+
+struct ide_task_s;
+
 typedef struct hwif_s {
        struct hwif_s *next;            /* for linked-list in ide_hwgroup_t */
        struct hwif_s *mate;            /* other hwif from same PCI chip */
@@ -395,8 +437,8 @@ typedef struct hwif_s {
 
        char name[6];                   /* name of interface, eg. "ide0" */
 
-               /* task file registers for pata and sata */
-       unsigned long   io_ports[IDE_NR_PORTS];
+       struct ide_io_ports     io_ports;
+
        unsigned long   sata_scr[SATA_NR_PORTS];
 
        ide_drive_t     drives[MAX_DRIVES];     /* drive info */
@@ -420,65 +462,25 @@ typedef struct hwif_s {
 
        struct device *dev;
 
-       const struct ide_port_info *cds;        /* chipset device struct */
-
        ide_ack_intr_t *ack_intr;
 
        void (*rw_disk)(ide_drive_t *, struct request *);
 
-#if 0
-       ide_hwif_ops_t  *hwifops;
-#else
-       /* host specific initialization of devices on a port */
-       void    (*port_init_devs)(struct hwif_s *);
-       /* routine to program host for PIO mode */
-       void    (*set_pio_mode)(ide_drive_t *, const u8);
-       /* routine to program host for DMA mode */
-       void    (*set_dma_mode)(ide_drive_t *, const u8);
-       /* tweaks hardware to select drive */
-       void    (*selectproc)(ide_drive_t *);
-       /* chipset polling based on hba specifics */
-       int     (*reset_poll)(ide_drive_t *);
-       /* chipset specific changes to default for device-hba resets */
-       void    (*pre_reset)(ide_drive_t *);
-       /* routine to reset controller after a disk reset */
-       void    (*resetproc)(ide_drive_t *);
-       /* special host masking for drive selection */
-       void    (*maskproc)(ide_drive_t *, int);
-       /* check host's drive quirk list */
-       void    (*quirkproc)(ide_drive_t *);
-#endif
-       u8 (*mdma_filter)(ide_drive_t *);
-       u8 (*udma_filter)(ide_drive_t *);
+       const struct ide_port_ops       *port_ops;
+       const struct ide_dma_ops        *dma_ops;
 
-       u8 (*cable_detect)(struct hwif_s *);
+       void (*tf_load)(ide_drive_t *, struct ide_task_s *);
+       void (*tf_read)(ide_drive_t *, struct ide_task_s *);
 
-       void (*ata_input_data)(ide_drive_t *, void *, u32);
-       void (*ata_output_data)(ide_drive_t *, void *, u32);
+       void (*input_data)(ide_drive_t *, struct request *, void *, unsigned);
+       void (*output_data)(ide_drive_t *, struct request *, void *, unsigned);
 
-       void (*atapi_input_bytes)(ide_drive_t *, void *, u32);
-       void (*atapi_output_bytes)(ide_drive_t *, void *, u32);
-
-       void (*dma_host_set)(ide_drive_t *, int);
-       int (*dma_setup)(ide_drive_t *);
-       void (*dma_exec_cmd)(ide_drive_t *, u8);
-       void (*dma_start)(ide_drive_t *);
-       int (*ide_dma_end)(ide_drive_t *drive);
-       int (*ide_dma_test_irq)(ide_drive_t *drive);
        void (*ide_dma_clear_irq)(ide_drive_t *drive);
-       void (*dma_lost_irq)(ide_drive_t *drive);
-       void (*dma_timeout)(ide_drive_t *drive);
 
        void (*OUTB)(u8 addr, unsigned long port);
        void (*OUTBSYNC)(ide_drive_t *drive, u8 addr, unsigned long port);
-       void (*OUTW)(u16 addr, unsigned long port);
-       void (*OUTSW)(unsigned long port, void *addr, u32 count);
-       void (*OUTSL)(unsigned long port, void *addr, u32 count);
 
        u8  (*INB)(unsigned long port);
-       u16 (*INW)(unsigned long port);
-       void (*INSW)(unsigned long port, void *addr, u32 count);
-       void (*INSL)(unsigned long port, void *addr, u32 count);
 
        /* dma physical region descriptor table (cpu view) */
        unsigned int    *dmatable_cpu;
@@ -503,10 +505,7 @@ typedef struct hwif_s {
 
        unsigned long   dma_base;       /* base addr for dma ports */
        unsigned long   dma_command;    /* dma command register */
-       unsigned long   dma_vendor1;    /* dma vendor 1 register */
        unsigned long   dma_status;     /* dma status register */
-       unsigned long   dma_vendor3;    /* dma vendor 3 register */
-       unsigned long   dma_prdtable;   /* actual prd table address */
 
        unsigned long   config_data;    /* for use by chipset-specific code */
        unsigned long   select_data;    /* for use by chipset-specific code */
@@ -514,11 +513,9 @@ typedef struct hwif_s {
        unsigned long   extra_base;     /* extra addr for dma ports */
        unsigned        extra_ports;    /* number of extra dma ports */
 
-       unsigned        noprobe    : 1; /* don't probe for this interface */
        unsigned        present    : 1; /* this interface exists */
        unsigned        serialized : 1; /* serialized all channel operation */
        unsigned        sharing_irq: 1; /* 1 = sharing irq with another hwif */
-       unsigned        reset      : 1; /* reset after probe */
        unsigned        sg_mapped  : 1; /* sg_table and sg_nents are ready */
        unsigned        mmio       : 1; /* host uses MMIO */
 
@@ -543,7 +540,7 @@ typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
 typedef int (ide_expiry_t)(ide_drive_t *);
 
 /* used by ide-cd, ide-floppy, etc. */
-typedef void (xfer_func_t)(ide_drive_t *, void *, u32);
+typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned);
 
 typedef struct hwgroup_s {
                /* irq handler, if active */
@@ -701,10 +698,6 @@ void ide_add_generic_settings(ide_drive_t *);
 read_proc_t proc_ide_read_capacity;
 read_proc_t proc_ide_read_geometry;
 
-#ifdef CONFIG_BLK_DEV_IDEPCI
-void ide_pci_create_host_proc(const char *, get_info_t *);
-#endif
-
 /*
  * Standard exit stuff:
  */
@@ -805,8 +798,14 @@ int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsig
 #ifndef _IDE_C
 extern ide_hwif_t      ide_hwifs[];            /* master data repository */
 #endif
+extern int ide_noacpi;
+extern int ide_acpigtf;
+extern int ide_acpionboot;
 extern int noautodma;
 
+extern int ide_vlb_clk;
+extern int ide_pci_clk;
+
 ide_hwif_t *ide_find_port_slot(const struct ide_port_info *);
 
 static inline ide_hwif_t *ide_find_port(void)
@@ -823,6 +822,10 @@ extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigne
 void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
                         ide_expiry_t *);
 
+void ide_execute_pkt_cmd(ide_drive_t *);
+
+void ide_pad_transfer(ide_drive_t *, int, int);
+
 ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8);
 
 ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, byte stat);
@@ -959,8 +962,7 @@ typedef struct ide_task_s {
        void                    *special;       /* valid_t generally */
 } ide_task_t;
 
-void ide_tf_load(ide_drive_t *, ide_task_t *);
-void ide_tf_read(ide_drive_t *, ide_task_t *);
+void ide_tf_dump(const char *, struct ide_taskfile *);
 
 extern void SELECT_DRIVE(ide_drive_t *);
 extern void SELECT_MASK(ide_drive_t *, int);
@@ -1009,10 +1011,15 @@ void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, u8
 void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
 
 #ifdef CONFIG_BLK_DEV_IDEDMA_PCI
-void ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
+int ide_pci_set_master(struct pci_dev *, const char *);
+unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
+int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
 #else
-static inline void ide_hwif_setup_dma(ide_hwif_t *hwif,
-                                     const struct ide_port_info *d) { }
+static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
+                                    const struct ide_port_info *d)
+{
+       return -EINVAL;
+}
 #endif
 
 extern void default_hwif_iops(ide_hwif_t *);
@@ -1061,8 +1068,8 @@ enum {
        IDE_HFLAG_NO_DMA                = (1 << 14),
        /* check if host is PCI IDE device before allowing DMA */
        IDE_HFLAG_NO_AUTODMA            = (1 << 15),
-       /* don't autotune PIO */
-       IDE_HFLAG_NO_AUTOTUNE           = (1 << 16),
+       /* host uses MMIO */
+       IDE_HFLAG_MMIO                  = (1 << 16),
        /* host is CS5510/CS5520 */
        IDE_HFLAG_CS5520                = IDE_HFLAG_VDMA,
        /* no LBA48 */
@@ -1084,6 +1091,8 @@ enum {
        /* unmask IRQs */
        IDE_HFLAG_UNMASK_IRQS           = (1 << 25),
        IDE_HFLAG_ABUSE_SET_DMA_MODE    = (1 << 26),
+       /* serialize ports if DMA is possible (for sl82c105) */
+       IDE_HFLAG_SERIALIZE_DMA         = (1 << 27),
        /* force host out of "simplex" mode */
        IDE_HFLAG_CLEAR_SIMPLEX         = (1 << 28),
        /* DSC overlap is unsupported */
@@ -1105,10 +1114,14 @@ struct ide_port_info {
        unsigned int            (*init_chipset)(struct pci_dev *, const char *);
        void                    (*init_iops)(ide_hwif_t *);
        void                    (*init_hwif)(ide_hwif_t *);
-       void                    (*init_dma)(ide_hwif_t *, unsigned long);
+       int                     (*init_dma)(ide_hwif_t *,
+                                           const struct ide_port_info *);
+
+       const struct ide_port_ops       *port_ops;
+       const struct ide_dma_ops        *dma_ops;
+
        ide_pci_enablebit_t     enablebits[2];
        hwif_chipset_t          chipset;
-       u8                      extra;
        u32                     host_flags;
        u8                      pio_mask;
        u8                      swdma_mask;
@@ -1155,13 +1168,16 @@ void ide_destroy_dmatable(ide_drive_t *);
 
 #ifdef CONFIG_BLK_DEV_IDEDMA_SFF
 extern int ide_build_dmatable(ide_drive_t *, struct request *);
-extern int ide_release_dma(ide_hwif_t *);
-extern void ide_setup_dma(ide_hwif_t *, unsigned long);
+int ide_allocate_dma_engine(ide_hwif_t *);
+void ide_release_dma_engine(ide_hwif_t *);
+void ide_setup_dma(ide_hwif_t *, unsigned long);
 
 void ide_dma_host_set(ide_drive_t *, int);
 extern int ide_dma_setup(ide_drive_t *);
+void ide_dma_exec_cmd(ide_drive_t *, u8);
 extern void ide_dma_start(ide_drive_t *);
 extern int __ide_dma_end(ide_drive_t *);
+int ide_dma_test_irq(ide_drive_t *);
 extern void ide_dma_lost_irq(ide_drive_t *);
 extern void ide_dma_timeout(ide_drive_t *);
 #endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
@@ -1179,7 +1195,7 @@ static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
 #endif /* CONFIG_BLK_DEV_IDEDMA */
 
 #ifndef CONFIG_BLK_DEV_IDEDMA_SFF
-static inline void ide_release_dma(ide_hwif_t *drive) {;}
+static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
 #endif
 
 #ifdef CONFIG_BLK_DEV_IDEACPI
@@ -1199,17 +1215,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
 #endif
 
 void ide_remove_port_from_hwgroup(ide_hwif_t *);
-extern int ide_hwif_request_regions(ide_hwif_t *hwif);
-extern void ide_hwif_release_regions(ide_hwif_t* hwif);
-void ide_unregister(unsigned int);
+void ide_unregister(ide_hwif_t *);
 
 void ide_register_region(struct gendisk *);
 void ide_unregister_region(struct gendisk *);
 
 void ide_undecoded_slave(ide_drive_t *);
 
+void ide_port_apply_params(ide_hwif_t *);
+
 int ide_device_add_all(u8 *idx, const struct ide_port_info *);
 int ide_device_add(u8 idx[4], const struct ide_port_info *);
+int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
 void ide_port_unregister_devices(ide_hwif_t *);
 void ide_port_scan(ide_hwif_t *);
 
@@ -1318,51 +1335,27 @@ static inline void ide_set_irq(ide_drive_t *drive, int on)
 {
        ide_hwif_t *hwif = drive->hwif;
 
-       hwif->OUTB(drive->ctl | (on ? 0 : 2),
-                  hwif->io_ports[IDE_CONTROL_OFFSET]);
+       hwif->OUTB(drive->ctl | (on ? 0 : 2), hwif->io_ports.ctl_addr);
 }
 
 static inline u8 ide_read_status(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
 
-       return hwif->INB(hwif->io_ports[IDE_STATUS_OFFSET]);
+       return hwif->INB(hwif->io_ports.status_addr);
 }
 
 static inline u8 ide_read_altstatus(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
 
-       return hwif->INB(hwif->io_ports[IDE_CONTROL_OFFSET]);
+       return hwif->INB(hwif->io_ports.ctl_addr);
 }
 
 static inline u8 ide_read_error(ide_drive_t *drive)
 {
        ide_hwif_t *hwif = drive->hwif;
 
-       return hwif->INB(hwif->io_ports[IDE_ERROR_OFFSET]);
-}
-
-/*
- * Too bad. The drive wants to send us data which we are not ready to accept.
- * Just throw it away.
- */
-static inline void ide_atapi_discard_data(ide_drive_t *drive, unsigned bcount)
-{
-       ide_hwif_t *hwif = drive->hwif;
-
-       /* FIXME: use ->atapi_input_bytes */
-       while (bcount--)
-               (void)hwif->INB(hwif->io_ports[IDE_DATA_OFFSET]);
+       return hwif->INB(hwif->io_ports.error_addr);
 }
-
-static inline void ide_atapi_write_zeros(ide_drive_t *drive, unsigned bcount)
-{
-       ide_hwif_t *hwif = drive->hwif;
-
-       /* FIXME: use ->atapi_output_bytes */
-       while (bcount--)
-               hwif->OUTB(0, hwif->io_ports[IDE_DATA_OFFSET]);
-}
-
 #endif /* _IDE_H */
index 0edda411959cb3126d81a7f87c2542ff4f5a57be..9a2d762124de7684983cdb36a51dc3814b461a8f 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <linux/types.h>
 #include <linux/bitops.h>
+#include <linux/init.h>
 
 #if BITS_PER_LONG == 32
 # define IDR_BITS 5
@@ -115,4 +116,6 @@ void ida_remove(struct ida *ida, int id);
 void ida_destroy(struct ida *ida);
 void ida_init(struct ida *ida);
 
+void __init idr_init_cache(void);
+
 #endif /* __IDR_H__ */
index 0d9d7ea2c1cc55fc7a829d63c5bd0adaf6af1d27..5f200bac37496ea2d92a1b52fb8bf347b9f7f748 100644 (file)
@@ -1,9 +1,6 @@
 #ifndef _LINUX_IF_MACVLAN_H
 #define _LINUX_IF_MACVLAN_H
 
-#ifdef __KERNEL__
-
 extern struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *);
 
-#endif /* __KERNEL__ */
 #endif /* _LINUX_IF_MACVLAN_H */
index 675a7dbe86f876d68cbd7d9573b74e305e7091d8..1354080cf8cfa611d85f4594c2e7a74b2867f4ef 100644 (file)
 #ifndef _LINUX_INET_H
 #define _LINUX_INET_H
 
-#ifdef __KERNEL__
 #include <linux/types.h>
 
 extern __be32 in_aton(const char *str);
 extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
 extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
-#endif
 #endif /* _LINUX_INET_H */
index fb58c0493cf2f4524232ed95fd7b56371602ad28..21d658cdfa27769146ea52dae2cbf61b3de7f302 100644 (file)
@@ -147,6 +147,8 @@ extern unsigned int reset_devices;
 void setup_arch(char **);
 void prepare_namespace(void);
 
+extern void (*late_time_init)(void);
+
 #endif
   
 #ifndef MODULE
index 37a6f5bc4a92ab5262fcb75e161b0de94114b98b..bf6b8a61f8db22913e5b9a09fa915adb6e69c768 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/ipc.h>
 #include <linux/pid_namespace.h>
 #include <linux/user_namespace.h>
+#include <linux/securebits.h>
 #include <net/net_namespace.h>
 
 #define INIT_FDTABLE \
@@ -172,7 +173,7 @@ extern struct group_info init_groups;
        .cap_inheritable = CAP_INIT_INH_SET,                            \
        .cap_permitted  = CAP_FULL_SET,                                 \
        .cap_bset       = CAP_INIT_BSET,                                \
-       .keep_capabilities = 0,                                         \
+       .securebits     = SECUREBITS_DEFAULT,                           \
        .user           = INIT_USER,                                    \
        .comm           = "swapper",                                    \
        .thread         = INIT_THREAD,                                  \
index b5fef13148bdd78692f6cf9aa523ca3662c1b7f4..f1fc7470d26ca540c27673074ac38447d28dcfbd 100644 (file)
@@ -289,6 +289,7 @@ struct softirq_action
 };
 
 asmlinkage void do_softirq(void);
+asmlinkage void __do_softirq(void);
 extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
 extern void softirq_init(void);
 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
index e3b2dda6c8eb5dd97c3ec78befe9c5c0cdb735a0..3a03a3604cce7e61e024c793739d227b5933176f 100644 (file)
@@ -58,9 +58,9 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
 }
 #endif
 
-void __iomem * devm_ioremap(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
                            unsigned long size);
-void __iomem * devm_ioremap_nocache(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
                                    unsigned long size);
 void devm_iounmap(struct device *dev, void __iomem *addr);
 int check_signature(const volatile void __iomem *io_addr,
index e4451d1da7534205dad7e7adfba595ab9d929064..ea6c18a8b0d442d13dabcdeca9961fb8540ca451 100644 (file)
@@ -4,6 +4,17 @@
 #include <linux/err.h>
 #include <linux/idr.h>
 #include <linux/rwsem.h>
+#include <linux/notifier.h>
+
+/*
+ * ipc namespace events
+ */
+#define IPCNS_MEMCHANGED   0x00000001   /* Notify lowmem size changed */
+#define IPCNS_CREATED  0x00000002   /* Notify new ipc namespace created */
+#define IPCNS_REMOVED  0x00000003   /* Notify ipc namespace removed */
+
+#define IPCNS_CALLBACK_PRI 0
+
 
 struct ipc_ids {
        int in_use;
@@ -30,15 +41,24 @@ struct ipc_namespace {
        size_t          shm_ctlall;
        int             shm_ctlmni;
        int             shm_tot;
+
+       struct notifier_block ipcns_nb;
 };
 
 extern struct ipc_namespace init_ipc_ns;
+extern atomic_t nr_ipc_ns;
 
 #ifdef CONFIG_SYSVIPC
 #define INIT_IPC_NS(ns)                .ns             = &init_ipc_ns,
-#else
+
+extern int register_ipcns_notifier(struct ipc_namespace *);
+extern int cond_register_ipcns_notifier(struct ipc_namespace *);
+extern int unregister_ipcns_notifier(struct ipc_namespace *);
+extern int ipcns_notify(unsigned long);
+
+#else /* CONFIG_SYSVIPC */
 #define INIT_IPC_NS(ns)
-#endif
+#endif /* CONFIG_SYSVIPC */
 
 #if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS)
 extern void free_ipc_ns(struct kref *kref);
index c5bd28b69aec469087da75e93e28e7c789a4ca3f..7ebdb4fb4e54a3fe8943de5190f7f5de0ac59b08 100644 (file)
@@ -64,7 +64,7 @@
  * applications and another for userland applications.  The
  * capabilities are basically the same for both interface, although
  * the interfaces are somewhat different.  The stuff in the
- * #ifdef KERNEL below is the in-kernel interface.  The userland
+ * #ifdef __KERNEL__ below is the in-kernel interface.  The userland
  * interface is defined later in the file.  */
 
 
@@ -75,8 +75,7 @@
  * work for sockets.
  */
 #define IPMI_MAX_ADDR_SIZE 32
-struct ipmi_addr
-{
+struct ipmi_addr {
         /* Try to take these from the "Channel Medium Type" table
            in section 6.5 of the IPMI 1.5 manual. */
        int   addr_type;
@@ -90,8 +89,7 @@ struct ipmi_addr
  * 0), or IPMC_BMC_CHANNEL if communicating directly with the BMC.
  */
 #define IPMI_SYSTEM_INTERFACE_ADDR_TYPE        0x0c
-struct ipmi_system_interface_addr
-{
+struct ipmi_system_interface_addr {
        int           addr_type;
        short         channel;
        unsigned char lun;
@@ -100,10 +98,9 @@ struct ipmi_system_interface_addr
 /* An IPMB Address. */
 #define IPMI_IPMB_ADDR_TYPE            0x01
 /* Used for broadcast get device id as described in section 17.9 of the
-   IPMI 1.5 manual. */ 
+   IPMI 1.5 manual. */
 #define IPMI_IPMB_BROADCAST_ADDR_TYPE  0x41
-struct ipmi_ipmb_addr
-{
+struct ipmi_ipmb_addr {
        int           addr_type;
        short         channel;
        unsigned char slave_addr;
@@ -128,8 +125,7 @@ struct ipmi_ipmb_addr
  * message is a little weird, but this is required.
  */
 #define IPMI_LAN_ADDR_TYPE             0x04
-struct ipmi_lan_addr
-{
+struct ipmi_lan_addr {
        int           addr_type;
        short         channel;
        unsigned char privilege;
@@ -162,16 +158,14 @@ struct ipmi_lan_addr
  * byte of data in the response (as the spec shows the messages laid
  * out).
  */
-struct ipmi_msg
-{
+struct ipmi_msg {
        unsigned char  netfn;
        unsigned char  cmd;
        unsigned short data_len;
        unsigned char  __user *data;
 };
 
-struct kernel_ipmi_msg
-{
+struct kernel_ipmi_msg {
        unsigned char  netfn;
        unsigned char  cmd;
        unsigned short data_len;
@@ -239,12 +233,11 @@ typedef struct ipmi_user *ipmi_user_t;
  * used after the message is delivered, so the upper layer may use the
  * link to build a linked list, if it likes.
  */
-struct ipmi_recv_msg
-{
+struct ipmi_recv_msg {
        struct list_head link;
 
        /* The type of message as defined in the "Receive Types"
-           defines above. */
+          defines above. */
        int              recv_type;
 
        ipmi_user_t      user;
@@ -271,9 +264,8 @@ struct ipmi_recv_msg
 /* Allocate and free the receive message. */
 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg);
 
-struct ipmi_user_hndl
-{
-        /* Routine type to call when a message needs to be routed to
+struct ipmi_user_hndl {
+       /* Routine type to call when a message needs to be routed to
           the upper layer.  This will be called with some locks held,
           the only IPMI routines that can be called are ipmi_request
           and the alloc/free operations.  The handler_data is the
@@ -368,9 +360,8 @@ int ipmi_request_supply_msgs(ipmi_user_t          user,
  * Poll the IPMI interface for the user.  This causes the IPMI code to
  * do an immediate check for information from the driver and handle
  * anything that is immediately pending.  This will not block in any
- * way.  This is useful if you need to implement polling from the user
- * for things like modifying the watchdog timeout when a panic occurs
- * or disabling the watchdog timer on a reboot.
+ * way.  This is useful if you need to spin waiting for something to
+ * happen in the IPMI driver.
  */
 void ipmi_poll_interface(ipmi_user_t user);
 
@@ -421,12 +412,6 @@ int ipmi_unregister_for_cmd(ipmi_user_t   user,
 int ipmi_get_maintenance_mode(ipmi_user_t user);
 int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
 
-/*
- * Allow run-to-completion mode to be set for the interface of
- * a specific user.
- */
-void ipmi_user_set_run_to_completion(ipmi_user_t user, int val);
-
 /*
  * When the user is created, it will not receive IPMI events by
  * default.  The user must set this to TRUE to get incoming events.
@@ -440,8 +425,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val);
  * every existing interface when a new watcher is registered with
  * ipmi_smi_watcher_register().
  */
-struct ipmi_smi_watcher
-{
+struct ipmi_smi_watcher {
        struct list_head link;
 
        /* You must set the owner to the current module, if you are in
@@ -512,8 +496,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len);
 
 
 /* Messages sent to the interface are this format. */
-struct ipmi_req
-{
+struct ipmi_req {
        unsigned char __user *addr; /* Address to send the message to. */
        unsigned int  addr_len;
 
@@ -538,12 +521,11 @@ struct ipmi_req
 
 /* Messages sent to the interface with timing parameters are this
    format. */
-struct ipmi_req_settime
-{
+struct ipmi_req_settime {
        struct ipmi_req req;
 
        /* See ipmi_request_settime() above for details on these
-           values. */
+          values. */
        int          retries;
        unsigned int retry_time_ms;
 };
@@ -560,8 +542,7 @@ struct ipmi_req_settime
                                             struct ipmi_req_settime)
 
 /* Messages received from the interface are this format. */
-struct ipmi_recv
-{
+struct ipmi_recv {
        int     recv_type; /* Is this a command, response or an
                              asyncronous event. */
 
@@ -607,13 +588,12 @@ struct ipmi_recv
                                              struct ipmi_recv)
 
 /* Register to get commands from other entities on this interface. */
-struct ipmi_cmdspec
-{
+struct ipmi_cmdspec {
        unsigned char netfn;
        unsigned char cmd;
 };
 
-/* 
+/*
  * Register to receive a specific command.  error values:
  *   - EFAULT - an address supplied was invalid.
  *   - EBUSY - The netfn/cmd supplied was already in use.
@@ -636,8 +616,7 @@ struct ipmi_cmdspec
  * else.  The chans field is a bitmask, (1 << channel) for each channel.
  * It may be IPMI_CHAN_ALL for all channels.
  */
-struct ipmi_cmdspec_chans
-{
+struct ipmi_cmdspec_chans {
        unsigned int netfn;
        unsigned int cmd;
        unsigned int chans;
@@ -659,7 +638,7 @@ struct ipmi_cmdspec_chans
 #define IPMICTL_UNREGISTER_FOR_CMD_CHANS _IOR(IPMI_IOC_MAGIC, 29,      \
                                             struct ipmi_cmdspec_chans)
 
-/* 
+/*
  * Set whether this interface receives events.  Note that the first
  * user registered for events will get all pending events for the
  * interface.  error values:
@@ -675,15 +654,18 @@ struct ipmi_cmdspec_chans
  * things it takes to determine your address (if not the BMC) and set
  * it for everyone else.  You should probably leave the LUN alone.
  */
-struct ipmi_channel_lun_address_set
-{
+struct ipmi_channel_lun_address_set {
        unsigned short channel;
        unsigned char  value;
 };
-#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set)
-#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set)
-#define IPMICTL_SET_MY_CHANNEL_LUN_CMD    _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set)
-#define IPMICTL_GET_MY_CHANNEL_LUN_CMD    _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set)
+#define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD \
+       _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set)
+#define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD \
+       _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set)
+#define IPMICTL_SET_MY_CHANNEL_LUN_CMD \
+       _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set)
+#define IPMICTL_GET_MY_CHANNEL_LUN_CMD \
+       _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set)
 /* Legacy interfaces, these only set IPMB 0. */
 #define IPMICTL_SET_MY_ADDRESS_CMD     _IOR(IPMI_IOC_MAGIC, 17, unsigned int)
 #define IPMICTL_GET_MY_ADDRESS_CMD     _IOR(IPMI_IOC_MAGIC, 18, unsigned int)
@@ -694,8 +676,7 @@ struct ipmi_channel_lun_address_set
  * Get/set the default timing values for an interface.  You shouldn't
  * generally mess with these.
  */
-struct ipmi_timing_parms
-{
+struct ipmi_timing_parms {
        int          retries;
        unsigned int retry_time_ms;
 };
index 6e8cec50338000c31748bd1a8e5d4b28e26820cf..62b73668b60247c06da84bc8cca6919a9a34f6d6 100644 (file)
@@ -60,8 +60,7 @@ typedef struct ipmi_smi *ipmi_smi_t;
  * asynchronous data and messages and request them from the
  * interface.
  */
-struct ipmi_smi_msg
-{
+struct ipmi_smi_msg {
        struct list_head link;
 
        long    msgid;
@@ -74,12 +73,11 @@ struct ipmi_smi_msg
        unsigned char rsp[IPMI_MAX_MSG_LENGTH];
 
        /* Will be called when the system is done with the message
-           (presumably to free it). */
+          (presumably to free it). */
        void (*done)(struct ipmi_smi_msg *msg);
 };
 
-struct ipmi_smi_handlers
-{
+struct ipmi_smi_handlers {
        struct module *owner;
 
        /* The low-level interface cannot start sending messages to
@@ -231,7 +229,7 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg)
    directory for this interface.  Note that the entry will
    automatically be dstroyed when the interface is destroyed. */
 int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
-                           read_proc_t *read_proc, write_proc_t *write_proc,
+                           read_proc_t *read_proc,
                            void *data, struct module *owner);
 
 #endif /* __LINUX_IPMI_SMI_H */
index 1e8728a9ee8a0bce0e05e1fa41cb47a09876d053..cd5a269fdb5e3a5280002c6dd767ee4efe331a48 100644 (file)
 #include <linux/errno.h>
 #include <linux/pnp.h>
 
-/*
- *  Configuration registers (TODO: change by specification)
- */ 
-
-#define ISAPNP_CFG_ACTIVATE            0x30    /* byte */
-#define ISAPNP_CFG_MEM                 0x40    /* 4 * dword */
-#define ISAPNP_CFG_PORT                        0x60    /* 8 * word */
-#define ISAPNP_CFG_IRQ                 0x70    /* 2 * word */
-#define ISAPNP_CFG_DMA                 0x74    /* 2 * byte */
-
 /*
  *
  */
index 8f4c71759d73fd5ee45b664135650e61572f69c9..bbd42197298f14b95f78ffcc24d6d2db508de56f 100644 (file)
@@ -1,11 +1,6 @@
 #ifndef _LINUX_ISICOM_H
 #define _LINUX_ISICOM_H
 
-/*#define              ISICOM_DEBUG*/
-/*#define              ISICOM_DEBUG_DTR_RTS*/
-
-#ifdef __KERNEL__
-
 #define                YES     1
 #define                NO      0
 
@@ -85,6 +80,4 @@
 
 #define                ISI_TXOK                0x0001
 
-#endif /*      __KERNEL__      */
-
 #endif /*      ISICOM_H        */
index 33ef710dac24481774d14d2333f73e0af34de6a5..abb6ac639e8e19740c8bbd30148db72a1028d031 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _LINUX_JIFFIES_H
 #define _LINUX_JIFFIES_H
 
-#include <linux/calc64.h>
+#include <linux/math64.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
 #include <linux/time.h>
diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h
new file mode 100644 (file)
index 0000000..22a7219
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef __LINUX_KBUILD_H
+#define __LINUX_KBUILD_H
+
+#define DEFINE(sym, val) \
+        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+#define OFFSET(sym, str, mem) \
+       DEFINE(sym, offsetof(struct str, mem))
+
+#define COMMENT(x) \
+       asm volatile("\n->#" x)
+
+#endif
index cd6d02cf854d8e22d566c169326644ed5f708865..4d46e299afb5cb44003e42d894912d23fbe17a13 100644 (file)
@@ -20,6 +20,9 @@
 extern const char linux_banner[];
 extern const char linux_proc_banner[];
 
+#define USHORT_MAX     ((u16)(~0U))
+#define SHORT_MAX      ((s16)(USHORT_MAX>>1))
+#define SHORT_MIN      (-SHORT_MAX - 1)
 #define INT_MAX                ((int)(~0U>>1))
 #define INT_MIN                (-INT_MAX - 1)
 #define UINT_MAX       (~0U)
@@ -188,6 +191,7 @@ extern int log_buf_copy(char *dest, int idx, int len);
 extern int printk_ratelimit_jiffies;
 extern int printk_ratelimit_burst;
 extern int printk_ratelimit(void);
+extern int __ratelimit(int ratelimit_jiffies, int ratelimit_burst);
 extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst);
 extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
                                   unsigned int interval_msec);
@@ -255,6 +259,7 @@ extern enum system_states {
 #define TAINT_USER                     (1<<6)
 #define TAINT_DIE                      (1<<7)
 #define TAINT_OVERRIDDEN_ACPI_TABLE    (1<<8)
+#define TAINT_WARN                     (1<<9)
 
 extern void dump_stack(void) __cold;
 
@@ -333,33 +338,90 @@ extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
 #endif /* __LITTLE_ENDIAN */
 
 /*
- * min()/max() macros that also do
+ * min()/max()/clamp() macros that also do
  * strict type-checking.. See the
  * "unnecessary" pointer comparison.
  */
-#define min(x,y) ({ \
-       typeof(x) _x = (x);     \
-       typeof(y) _y = (y);     \
-       (void) (&_x == &_y);            \
-       _x < _y ? _x : _y; })
-
-#define max(x,y) ({ \
-       typeof(x) _x = (x);     \
-       typeof(y) _y = (y);     \
-       (void) (&_x == &_y);            \
-       _x > _y ? _x : _y; })
+#define min(x, y) ({                           \
+       typeof(x) _min1 = (x);                  \
+       typeof(y) _min2 = (y);                  \
+       (void) (&_min1 == &_min2);              \
+       _min1 < _min2 ? _min1 : _min2; })
+
+#define max(x, y) ({                           \
+       typeof(x) _max1 = (x);                  \
+       typeof(y) _max2 = (y);                  \
+       (void) (&_max1 == &_max2);              \
+       _max1 > _max2 ? _max1 : _max2; })
+
+/**
+ * clamp - return a value clamped to a given range with strict typechecking
+ * @val: current value
+ * @min: minimum allowable value
+ * @max: maximum allowable value
+ *
+ * This macro does strict typechecking of min/max to make sure they are of the
+ * same type as val.  See the unnecessary pointer comparisons.
+ */
+#define clamp(val, min, max) ({                        \
+       typeof(val) __val = (val);              \
+       typeof(min) __min = (min);              \
+       typeof(max) __max = (max);              \
+       (void) (&__val == &__min);              \
+       (void) (&__val == &__max);              \
+       __val = __val < __min ? __min: __val;   \
+       __val > __max ? __max: __val; })
 
 /*
  * ..and if you can't take the strict
  * types, you can specify one yourself.
  *
- * Or not use min/max at all, of course.
+ * Or not use min/max/clamp at all, of course.
+ */
+#define min_t(type, x, y) ({                   \
+       type __min1 = (x);                      \
+       type __min2 = (y);                      \
+       __min1 < __min2 ? __min1: __min2; })
+
+#define max_t(type, x, y) ({                   \
+       type __max1 = (x);                      \
+       type __max2 = (y);                      \
+       __max1 > __max2 ? __max1: __max2; })
+
+/**
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @min: minimum allowable value
+ * @max: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * 'type' to make all the comparisons.
  */
-#define min_t(type,x,y) \
-       ({ type __x = (x); type __y = (y); __x < __y ? __x: __y; })
-#define max_t(type,x,y) \
-       ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; })
+#define clamp_t(type, val, min, max) ({                \
+       type __val = (val);                     \
+       type __min = (min);                     \
+       type __max = (max);                     \
+       __val = __val < __min ? __min: __val;   \
+       __val > __max ? __max: __val; })
 
+/**
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @min: minimum allowable value
+ * @max: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument 'val' is.  This is useful when val is an unsigned
+ * type and min and max are literals that will otherwise be assigned a signed
+ * integer type.
+ */
+#define clamp_val(val, min, max) ({            \
+       typeof(val) __val = (val);              \
+       typeof(val) __min = (min);              \
+       typeof(val) __max = (max);              \
+       __val = __val < __min ? __min: __val;   \
+       __val > __max ? __max: __val; })
 
 /**
  * container_of - cast a member of a structure out to the containing structure
index a70b8a8f20058c60a5bc6916deeaceda5040ea6e..c45c962d1cc527e82aa37f8c76385d87fc236c4a 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/list.h>
 #include <linux/rbtree.h>
 #include <linux/rcupdate.h>
+#include <linux/sysctl.h>
 #include <asm/atomic.h>
 
 #ifdef __KERNEL__
@@ -67,6 +68,8 @@ struct key;
 #define KEY_OTH_SETATTR        0x00000020
 #define KEY_OTH_ALL    0x0000003f
 
+#define KEY_PERM_UNDEF 0xffffffff
+
 struct seq_file;
 struct user_struct;
 struct signal_struct;
@@ -208,16 +211,19 @@ extern struct key *request_key(struct key_type *type,
 
 extern struct key *request_key_with_auxdata(struct key_type *type,
                                            const char *description,
-                                           const char *callout_info,
+                                           const void *callout_info,
+                                           size_t callout_len,
                                            void *aux);
 
 extern struct key *request_key_async(struct key_type *type,
                                     const char *description,
-                                    const char *callout_info);
+                                    const void *callout_info,
+                                    size_t callout_len);
 
 extern struct key *request_key_async_with_auxdata(struct key_type *type,
                                                  const char *description,
-                                                 const char *callout_info,
+                                                 const void *callout_info,
+                                                 size_t callout_len,
                                                  void *aux);
 
 extern int wait_for_key_construction(struct key *key, bool intr);
@@ -229,6 +235,7 @@ extern key_ref_t key_create_or_update(key_ref_t keyring,
                                      const char *description,
                                      const void *payload,
                                      size_t plen,
+                                     key_perm_t perm,
                                      unsigned long flags);
 
 extern int key_update(key_ref_t key,
@@ -257,14 +264,18 @@ extern int keyring_add_key(struct key *keyring,
 
 extern struct key *key_lookup(key_serial_t id);
 
-#define key_serial(key) ((key) ? (key)->serial : 0)
+static inline key_serial_t key_serial(struct key *key)
+{
+       return key ? key->serial : 0;
+}
+
+#ifdef CONFIG_SYSCTL
+extern ctl_table key_sysctls[];
+#endif
 
 /*
  * the userspace interface
  */
-extern struct key root_user_keyring, root_session_keyring;
-extern int alloc_uid_keyring(struct user_struct *user,
-                            struct task_struct *ctx);
 extern void switch_uid_keyring(struct user_struct *new_user);
 extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk);
 extern int copy_thread_group_keys(struct task_struct *tsk);
@@ -293,7 +304,6 @@ extern void key_init(void);
 #define make_key_ref(k, p)                     ({ NULL; })
 #define key_ref_to_ptr(k)              ({ NULL; })
 #define is_key_possessed(k)            0
-#define alloc_uid_keyring(u,c)         0
 #define switch_uid_keyring(u)          do { } while(0)
 #define __install_session_keyring(t, k)        ({ NULL; })
 #define copy_keys(f,t)                 0
@@ -306,10 +316,6 @@ extern void key_init(void);
 #define key_fsgid_changed(t)           do { } while(0)
 #define key_init()                     do { } while(0)
 
-/* Initial keyrings */
-extern struct key root_user_keyring;
-extern struct key root_session_keyring;
-
 #endif /* CONFIG_KEYS */
 #endif /* __KERNEL__ */
 #endif /* _LINUX_KEY_H */
index 3365945640c9a93d907efedb5c3ca707737797d2..656ee6b77a4a337d2dcdd728e8a1d79af83ba5ad 100644 (file)
@@ -49,5 +49,6 @@
 #define KEYCTL_SET_REQKEY_KEYRING      14      /* set default request-key keyring */
 #define KEYCTL_SET_TIMEOUT             15      /* set key timeout */
 #define KEYCTL_ASSUME_AUTHORITY                16      /* assume request_key() authorisation */
+#define KEYCTL_GET_SECURITY            17      /* get key security label */
 
 #endif /*  _LINUX_KEYCTL_H */
index 404f4464cb1aaf255b58c818a865ae4de2e103c7..29f62e1733ff4e23c61e4f25c75d6e5498e4ab93 100644 (file)
@@ -21,8 +21,6 @@
 #ifndef _LINUX_KFIFO_H
 #define _LINUX_KFIFO_H
 
-#ifdef __KERNEL__
-
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 
@@ -151,7 +149,4 @@ static inline unsigned int kfifo_len(struct kfifo *fifo)
        return ret;
 }
 
-#else
-#warning "don't include kernel headers in userspace"
-#endif /* __KERNEL__ */
 #endif
index 74071254c9d38c348b0bba649d9e8c9ebb4cb049..06c338ef7f1b9ee62cc43fa4e30e9d79a3acc072 100644 (file)
@@ -25,37 +25,47 @@ struct klist {
        void                    (*put)(struct klist_node *);
 };
 
+#define KLIST_INIT(_name, _get, _put)                                  \
+       { .k_lock       = __SPIN_LOCK_UNLOCKED(_name.k_lock),           \
+         .k_list       = LIST_HEAD_INIT(_name.k_list),                 \
+         .get          = _get,                                         \
+         .put          = _put, }
 
-extern void klist_init(struct klist * k, void (*get)(struct klist_node *),
+#define DEFINE_KLIST(_name, _get, _put)                                        \
+       struct klist _name = KLIST_INIT(_name, _get, _put)
+
+extern void klist_init(struct klist *k, void (*get)(struct klist_node *),
                       void (*put)(struct klist_node *));
 
 struct klist_node {
-       struct klist            * n_klist;
+       struct klist            *n_klist;
        struct list_head        n_node;
        struct kref             n_ref;
        struct completion       n_removed;
 };
 
-extern void klist_add_tail(struct klist_node * n, struct klist * k);
-extern void klist_add_head(struct klist_node * n, struct klist * k);
+extern void klist_add_tail(struct klist_node *n, struct klist *k);
+extern void klist_add_head(struct klist_node *n, struct klist *k);
+extern void klist_add_after(struct klist_node *n, struct klist_node *pos);
+extern void klist_add_before(struct klist_node *n, struct klist_node *pos);
 
-extern void klist_del(struct klist_node * n);
-extern void klist_remove(struct klist_node * n);
+extern void klist_del(struct klist_node *n);
+extern void klist_remove(struct klist_node *n);
 
-extern int klist_node_attached(struct klist_node * n);
+extern int klist_node_attached(struct klist_node *n);
 
 
 struct klist_iter {
-       struct klist            * i_klist;
-       struct list_head        * i_head;
-       struct klist_node       * i_cur;
+       struct klist            *i_klist;
+       struct list_head        *i_head;
+       struct klist_node       *i_cur;
 };
 
 
-extern void klist_iter_init(struct klist * k, struct klist_iter * i);
-extern void klist_iter_init_node(struct klist * k, struct klist_iter * i, 
-                                struct klist_node * n);
-extern void klist_iter_exit(struct klist_iter * i);
-extern struct klist_node * klist_next(struct klist_iter * i);
+extern void klist_iter_init(struct klist *k, struct klist_iter *i);
+extern void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+                                struct klist_node *n);
+extern void klist_iter_exit(struct klist_iter *i);
+extern struct klist_node *klist_next(struct klist_iter *i);
 
 #endif
index bafe178a381f19c98c7736370e5b31c5e8792ed2..73717ed9ea7912788cbe2c7f550eefd843d4aa97 100644 (file)
@@ -1,5 +1,3 @@
-#ifdef __KERNEL__
-
 #include <linux/mutex.h>
 
 typedef struct kobject *kobj_probe_t(dev_t, int *, void *);
@@ -10,5 +8,3 @@ int kobj_map(struct kobj_map *, dev_t, unsigned long, struct module *,
 void kobj_unmap(struct kobj_map *, dev_t, unsigned long);
 struct kobject *kobj_lookup(struct kobj_map *, dev_t, int *);
 struct kobj_map *kobj_map_init(kobj_probe_t *, struct mutex *);
-
-#endif
index caa3f411f15d0c48958e40d5369b4e16b9456b15..39e709f88aa001b73d1f9e7c6a9d33edac619c1f 100644 (file)
@@ -16,8 +16,6 @@
 #ifndef _KOBJECT_H_
 #define _KOBJECT_H_
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/sysfs.h>
@@ -224,5 +222,4 @@ static inline int kobject_action_type(const char *buf, size_t count,
 { return -EINVAL; }
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* _KOBJECT_H_ */
index 0f28486f636067677cf9b80c1279c7da88ffc4c9..1036631ff4fac552ffd850e24f7e21b939fe32ad 100644 (file)
@@ -173,6 +173,13 @@ struct kretprobe_blackpoint {
        const char *name;
        void *addr;
 };
+
+struct kprobe_blackpoint {
+       const char *name;
+       unsigned long start_addr;
+       unsigned long range;
+};
+
 extern struct kretprobe_blackpoint kretprobe_blacklist[];
 
 static inline void kretprobe_assert(struct kretprobe_instance *ri,
@@ -227,15 +234,21 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
 
 int register_kprobe(struct kprobe *p);
 void unregister_kprobe(struct kprobe *p);
+int register_kprobes(struct kprobe **kps, int num);
+void unregister_kprobes(struct kprobe **kps, int num);
 int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
 int longjmp_break_handler(struct kprobe *, struct pt_regs *);
 int register_jprobe(struct jprobe *p);
 void unregister_jprobe(struct jprobe *p);
+int register_jprobes(struct jprobe **jps, int num);
+void unregister_jprobes(struct jprobe **jps, int num);
 void jprobe_return(void);
 unsigned long arch_deref_entry_point(void *);
 
 int register_kretprobe(struct kretprobe *rp);
 void unregister_kretprobe(struct kretprobe *rp);
+int register_kretprobes(struct kretprobe **rps, int num);
+void unregister_kretprobes(struct kretprobe **rps, int num);
 
 void kprobe_flush_task(struct task_struct *tk);
 void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head);
@@ -254,16 +267,30 @@ static inline int register_kprobe(struct kprobe *p)
 {
        return -ENOSYS;
 }
+static inline int register_kprobes(struct kprobe **kps, int num)
+{
+       return -ENOSYS;
+}
 static inline void unregister_kprobe(struct kprobe *p)
 {
 }
+static inline void unregister_kprobes(struct kprobe **kps, int num)
+{
+}
 static inline int register_jprobe(struct jprobe *p)
 {
        return -ENOSYS;
 }
+static inline int register_jprobes(struct jprobe **jps, int num)
+{
+       return -ENOSYS;
+}
 static inline void unregister_jprobe(struct jprobe *p)
 {
 }
+static inline void unregister_jprobes(struct jprobe **jps, int num)
+{
+}
 static inline void jprobe_return(void)
 {
 }
@@ -271,9 +298,16 @@ static inline int register_kretprobe(struct kretprobe *rp)
 {
        return -ENOSYS;
 }
+static inline int register_kretprobes(struct kretprobe **rps, int num)
+{
+       return -ENOSYS;
+}
 static inline void unregister_kretprobe(struct kretprobe *rp)
 {
 }
+static inline void unregister_kretprobes(struct kretprobe **rps, int num)
+{
+}
 static inline void kprobe_flush_task(struct task_struct *tk)
 {
 }
index 5d185635786e129a35c28f475fdbb879cf78558b..0cef6badd6fb7cc07c445986e88bfaeb5e8903ed 100644 (file)
@@ -15,8 +15,6 @@
 #ifndef _KREF_H_
 #define _KREF_H_
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <asm/atomic.h>
 
@@ -29,5 +27,4 @@ void kref_init(struct kref *kref);
 void kref_get(struct kref *kref);
 int kref_put(struct kref *kref, void (*release) (struct kref *kref));
 
-#endif /* __KERNEL__ */
 #endif /* _KREF_H_ */
index c1ec04fd000d42d27d310ce79c92944f09d63c5f..a281afeddfbb0e2a4b9dd8747cc7afdffb271b2e 100644 (file)
@@ -8,11 +8,18 @@
  */
 
 #include <asm/types.h>
+#include <linux/compiler.h>
 #include <linux/ioctl.h>
 #include <asm/kvm.h>
 
 #define KVM_API_VERSION 12
 
+/* for KVM_TRACE_ENABLE */
+struct kvm_user_trace_setup {
+       __u32 buf_size; /* sub_buffer size of each per-cpu */
+       __u32 buf_nr; /* the number of sub_buffers of each per-cpu */
+};
+
 /* for KVM_CREATE_MEMORY_REGION */
 struct kvm_memory_region {
        __u32 slot;
@@ -73,6 +80,9 @@ struct kvm_irqchip {
 #define KVM_EXIT_INTR             10
 #define KVM_EXIT_SET_TPR          11
 #define KVM_EXIT_TPR_ACCESS       12
+#define KVM_EXIT_S390_SIEIC       13
+#define KVM_EXIT_S390_RESET       14
+#define KVM_EXIT_DCR              15
 
 /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */
 struct kvm_run {
@@ -137,6 +147,27 @@ struct kvm_run {
                        __u32 is_write;
                        __u32 pad;
                } tpr_access;
+               /* KVM_EXIT_S390_SIEIC */
+               struct {
+                       __u8 icptcode;
+                       __u64 mask; /* psw upper half */
+                       __u64 addr; /* psw lower half */
+                       __u16 ipa;
+                       __u32 ipb;
+               } s390_sieic;
+               /* KVM_EXIT_S390_RESET */
+#define KVM_S390_RESET_POR       1
+#define KVM_S390_RESET_CLEAR     2
+#define KVM_S390_RESET_SUBSYSTEM 4
+#define KVM_S390_RESET_CPU_INIT  8
+#define KVM_S390_RESET_IPL       16
+               __u64 s390_reset_flags;
+               /* KVM_EXIT_DCR */
+               struct {
+                       __u32 dcrn;
+                       __u32 data;
+                       __u8  is_write;
+               } dcr;
                /* Fix the size of the union. */
                char padding[256];
        };
@@ -204,6 +235,74 @@ struct kvm_vapic_addr {
        __u64 vapic_addr;
 };
 
+/* for KVM_SET_MPSTATE */
+
+#define KVM_MP_STATE_RUNNABLE          0
+#define KVM_MP_STATE_UNINITIALIZED     1
+#define KVM_MP_STATE_INIT_RECEIVED     2
+#define KVM_MP_STATE_HALTED            3
+#define KVM_MP_STATE_SIPI_RECEIVED     4
+
+struct kvm_mp_state {
+       __u32 mp_state;
+};
+
+struct kvm_s390_psw {
+       __u64 mask;
+       __u64 addr;
+};
+
+/* valid values for type in kvm_s390_interrupt */
+#define KVM_S390_SIGP_STOP             0xfffe0000u
+#define KVM_S390_PROGRAM_INT           0xfffe0001u
+#define KVM_S390_SIGP_SET_PREFIX       0xfffe0002u
+#define KVM_S390_RESTART               0xfffe0003u
+#define KVM_S390_INT_VIRTIO            0xffff2603u
+#define KVM_S390_INT_SERVICE           0xffff2401u
+#define KVM_S390_INT_EMERGENCY         0xffff1201u
+
+struct kvm_s390_interrupt {
+       __u32 type;
+       __u32 parm;
+       __u64 parm64;
+};
+
+#define KVM_TRC_SHIFT           16
+/*
+ * kvm trace categories
+ */
+#define KVM_TRC_ENTRYEXIT       (1 << KVM_TRC_SHIFT)
+#define KVM_TRC_HANDLER         (1 << (KVM_TRC_SHIFT + 1)) /* only 12 bits */
+
+/*
+ * kvm trace action
+ */
+#define KVM_TRC_VMENTRY         (KVM_TRC_ENTRYEXIT + 0x01)
+#define KVM_TRC_VMEXIT          (KVM_TRC_ENTRYEXIT + 0x02)
+#define KVM_TRC_PAGE_FAULT      (KVM_TRC_HANDLER + 0x01)
+
+#define KVM_TRC_HEAD_SIZE       12
+#define KVM_TRC_CYCLE_SIZE      8
+#define KVM_TRC_EXTRA_MAX       7
+
+/* This structure represents a single trace buffer record. */
+struct kvm_trace_rec {
+       __u32 event:28;
+       __u32 extra_u32:3;
+       __u32 cycle_in:1;
+       __u32 pid;
+       __u32 vcpu_id;
+       union {
+               struct {
+                       __u32 cycle_lo, cycle_hi;
+                       __u32 extra_u32[KVM_TRC_EXTRA_MAX];
+               } cycle;
+               struct {
+                       __u32 extra_u32[KVM_TRC_EXTRA_MAX];
+               } nocycle;
+       } u;
+};
+
 #define KVMIO 0xAE
 
 /*
@@ -212,6 +311,8 @@ struct kvm_vapic_addr {
 #define KVM_GET_API_VERSION       _IO(KVMIO,   0x00)
 #define KVM_CREATE_VM             _IO(KVMIO,   0x01) /* returns a VM fd */
 #define KVM_GET_MSR_INDEX_LIST    _IOWR(KVMIO, 0x02, struct kvm_msr_list)
+
+#define KVM_S390_ENABLE_SIE       _IO(KVMIO,   0x06)
 /*
  * Check if a kvm extension is available.  Argument is extension number,
  * return is 1 (yes) or 0 (no, sorry).
@@ -222,7 +323,12 @@ struct kvm_vapic_addr {
  */
 #define KVM_GET_VCPU_MMAP_SIZE    _IO(KVMIO,   0x04) /* in bytes */
 #define KVM_GET_SUPPORTED_CPUID   _IOWR(KVMIO, 0x05, struct kvm_cpuid2)
-
+/*
+ * ioctls for kvm trace
+ */
+#define KVM_TRACE_ENABLE          _IOW(KVMIO, 0x06, struct kvm_user_trace_setup)
+#define KVM_TRACE_PAUSE           _IO(KVMIO,  0x07)
+#define KVM_TRACE_DISABLE         _IO(KVMIO,  0x08)
 /*
  * Extension capability list.
  */
@@ -233,6 +339,13 @@ struct kvm_vapic_addr {
 #define KVM_CAP_SET_TSS_ADDR 4
 #define KVM_CAP_VAPIC 6
 #define KVM_CAP_EXT_CPUID 7
+#define KVM_CAP_CLOCKSOURCE 8
+#define KVM_CAP_NR_VCPUS 9       /* returns max vcpus per vm */
+#define KVM_CAP_NR_MEMSLOTS 10   /* returns max memory slots per vm */
+#define KVM_CAP_PIT 11
+#define KVM_CAP_NOP_IO_DELAY 12
+#define KVM_CAP_PV_MMU 13
+#define KVM_CAP_MP_STATE 14
 
 /*
  * ioctls for VM fds
@@ -255,6 +368,9 @@ struct kvm_vapic_addr {
 #define KVM_IRQ_LINE             _IOW(KVMIO, 0x61, struct kvm_irq_level)
 #define KVM_GET_IRQCHIP                  _IOWR(KVMIO, 0x62, struct kvm_irqchip)
 #define KVM_SET_IRQCHIP                  _IOR(KVMIO,  0x63, struct kvm_irqchip)
+#define KVM_CREATE_PIT           _IO(KVMIO,  0x64)
+#define KVM_GET_PIT              _IOWR(KVMIO, 0x65, struct kvm_pit_state)
+#define KVM_SET_PIT              _IOR(KVMIO,  0x66, struct kvm_pit_state)
 
 /*
  * ioctls for vcpu fds
@@ -281,5 +397,17 @@ struct kvm_vapic_addr {
 #define KVM_TPR_ACCESS_REPORTING  _IOWR(KVMIO,  0x92, struct kvm_tpr_access_ctl)
 /* Available with KVM_CAP_VAPIC */
 #define KVM_SET_VAPIC_ADDR        _IOW(KVMIO,  0x93, struct kvm_vapic_addr)
+/* valid for virtual machine (for floating interrupt)_and_ vcpu */
+#define KVM_S390_INTERRUPT        _IOW(KVMIO,  0x94, struct kvm_s390_interrupt)
+/* store status for s390 */
+#define KVM_S390_STORE_STATUS_NOADDR    (-1ul)
+#define KVM_S390_STORE_STATUS_PREFIXED  (-2ul)
+#define KVM_S390_STORE_STATUS    _IOW(KVMIO,  0x95, unsigned long)
+/* initial ipl psw for s390 */
+#define KVM_S390_SET_INITIAL_PSW  _IOW(KVMIO,  0x96, struct kvm_s390_psw)
+/* initial reset for s390 */
+#define KVM_S390_INITIAL_RESET    _IO(KVMIO,  0x97)
+#define KVM_GET_MP_STATE          _IOR(KVMIO,  0x98, struct kvm_mp_state)
+#define KVM_SET_MP_STATE          _IOW(KVMIO,  0x99, struct kvm_mp_state)
 
 #endif
index 928b0d59e9ba07d64686975f5db6022ad608ef8d..398978972b7a639cb6cf0b14b974971db0addd76 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/preempt.h>
+#include <linux/marker.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
 
 #include <asm/kvm_host.h>
 
-#define KVM_MAX_VCPUS 4
-#define KVM_MEMORY_SLOTS 8
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
-
-#define KVM_PIO_PAGE_OFFSET 1
-
 /*
  * vcpu->requests bit members
  */
 #define KVM_REQ_TLB_FLUSH          0
 #define KVM_REQ_MIGRATE_TIMER      1
 #define KVM_REQ_REPORT_TPR_ACCESS  2
+#define KVM_REQ_MMU_RELOAD         3
+#define KVM_REQ_TRIPLE_FAULT       4
 
 struct kvm_vcpu;
 extern struct kmem_cache *kvm_vcpu_cache;
 
-struct kvm_guest_debug {
-       int enabled;
-       unsigned long bp[4];
-       int singlestep;
-};
-
 /*
  * It would be nice to use something smarter than a linear search, TBD...
  * Thankfully we dont expect many devices to register (famous last words :),
@@ -67,7 +57,9 @@ void kvm_io_bus_register_dev(struct kvm_io_bus *bus,
 
 struct kvm_vcpu {
        struct kvm *kvm;
+#ifdef CONFIG_PREEMPT_NOTIFIERS
        struct preempt_notifier preempt_notifier;
+#endif
        int vcpu_id;
        struct mutex mutex;
        int   cpu;
@@ -100,6 +92,10 @@ struct kvm_memory_slot {
        unsigned long flags;
        unsigned long *rmap;
        unsigned long *dirty_bitmap;
+       struct {
+               unsigned long rmap_pde;
+               int write_count;
+       } *lpage_info;
        unsigned long userspace_addr;
        int user_alloc;
 };
@@ -114,11 +110,11 @@ struct kvm {
                                        KVM_PRIVATE_MEM_SLOTS];
        struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
        struct list_head vm_list;
-       struct file *filp;
        struct kvm_io_bus mmio_bus;
        struct kvm_io_bus pio_bus;
        struct kvm_vm_stat stat;
        struct kvm_arch arch;
+       atomic_t users_count;
 };
 
 /* The guest did something we don't support. */
@@ -145,14 +141,19 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
                  struct module *module);
 void kvm_exit(void);
 
+void kvm_get_kvm(struct kvm *kvm);
+void kvm_put_kvm(struct kvm *kvm);
+
 #define HPA_MSB ((sizeof(hpa_t) * 8) - 1)
 #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
 static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
 
 extern struct page *bad_page;
+extern pfn_t bad_pfn;
 
 int is_error_page(struct page *page);
+int is_error_pfn(pfn_t pfn);
 int kvm_is_error_hva(unsigned long addr);
 int kvm_set_memory_region(struct kvm *kvm,
                          struct kvm_userspace_memory_region *mem,
@@ -166,8 +167,19 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
                                int user_alloc);
 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 void kvm_release_page_clean(struct page *page);
 void kvm_release_page_dirty(struct page *page);
+void kvm_set_page_dirty(struct page *page);
+void kvm_set_page_accessed(struct page *page);
+
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
+void kvm_release_pfn_dirty(pfn_t);
+void kvm_release_pfn_clean(pfn_t pfn);
+void kvm_set_pfn_dirty(pfn_t pfn);
+void kvm_set_pfn_accessed(pfn_t pfn);
+void kvm_get_pfn(pfn_t pfn);
+
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
                        int len);
 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
@@ -188,6 +200,7 @@ void kvm_resched(struct kvm_vcpu *vcpu);
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
 void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_reload_remote_mmus(struct kvm *kvm);
 
 long kvm_arch_dev_ioctl(struct file *filp,
                        unsigned int ioctl, unsigned long arg);
@@ -223,6 +236,10 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs);
 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs);
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state);
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state);
 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
                                    struct kvm_debug_guest *dbg);
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
@@ -255,6 +272,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
 
 int kvm_cpu_get_interrupt(struct kvm_vcpu *v);
 int kvm_cpu_has_interrupt(struct kvm_vcpu *v);
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
 
 static inline void kvm_guest_enter(void)
@@ -296,5 +314,18 @@ struct kvm_stats_debugfs_item {
        struct dentry *dentry;
 };
 extern struct kvm_stats_debugfs_item debugfs_entries[];
+extern struct dentry *kvm_debugfs_dir;
+
+#ifdef CONFIG_KVM_TRACE
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg);
+void kvm_trace_cleanup(void);
+#else
+static inline
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
+{
+       return -EINVAL;
+}
+#define kvm_trace_cleanup() ((void)0)
+#endif
 
 #endif
index 5497aac0d2f829d4efe3df98f8c34496d947243f..3ddce03766caf5796f84f71bd7462f117c9a064a 100644 (file)
 
 /* Return values for hypercalls */
 #define KVM_ENOSYS             1000
+#define KVM_EFAULT             EFAULT
+#define KVM_E2BIG              E2BIG
 
-#define KVM_HC_VAPIC_POLL_IRQ            1
+#define KVM_HC_VAPIC_POLL_IRQ          1
+#define KVM_HC_MMU_OP                  2
 
 /*
  * hypercalls use architecture specific
 #include <asm/kvm_para.h>
 
 #ifdef __KERNEL__
+#ifdef CONFIG_KVM_GUEST
+void __init kvm_guest_init(void);
+#else
+#define kvm_guest_init() do { } while (0)
+#endif
+
 static inline int kvm_para_has_feature(unsigned int feature)
 {
        if (kvm_arch_para_features() & (1UL << feature))
index 1c4e46decb226658a5d35b36444bf21f6a39fd6d..9b6f395c9625e13237abd15a9cb41150a34f48d0 100644 (file)
@@ -38,6 +38,8 @@ typedef unsigned long  hva_t;
 typedef u64            hpa_t;
 typedef unsigned long  hfn_t;
 
+typedef hfn_t pfn_t;
+
 struct kvm_pio_request {
        unsigned long count;
        int cur_count;
index 395a523d8c30c157985a48789016efea659c1196..d1dfe872ee308f854a4b7dd2113e124c4813d1ab 100644 (file)
@@ -548,11 +548,6 @@ struct ata_device {
        u64                     n_sectors;      /* size of device, if ATA */
        unsigned int            class;          /* ATA_DEV_xxx */
 
-       union {
-               u16             id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
-               u32             gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
-       };
-
        u8                      pio_mode;
        u8                      dma_mode;
        u8                      xfer_mode;
@@ -574,8 +569,13 @@ struct ata_device {
        u16                     sectors;        /* Number of sectors per track */
 
        /* error history */
-       struct ata_ering        ering;
        int                     spdn_cnt;
+       struct ata_ering        ering;
+
+       union {
+               u16             id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */
+               u32             gscr[SATA_PMP_GSCR_DWORDS]; /* PMP GSCR block */
+       };
 };
 
 /* Offset into struct ata_device.  Fields above it are maintained
index dac16f99c70115ba4e4f1cee8b18f5f1b0f050c8..08cf4f6518892a4364ce1a84332b407eab2b122f 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _LINUX_LIST_H
 #define _LINUX_LIST_H
 
-#ifdef __KERNEL__
-
 #include <linux/stddef.h>
 #include <linux/poison.h>
 #include <linux/prefetch.h>
@@ -319,7 +317,16 @@ static inline int list_empty_careful(const struct list_head *head)
        return (next == head) && (next == head->prev);
 }
 
-static inline void __list_splice(struct list_head *list,
+/**
+ * list_is_singular - tests whether a list has just one entry.
+ * @head: the list to test.
+ */
+static inline int list_is_singular(const struct list_head *head)
+{
+       return !list_empty(head) && (head->next == head->prev);
+}
+
+static inline void __list_splice(const struct list_head *list,
                                 struct list_head *head)
 {
        struct list_head *first = list->next;
@@ -338,7 +345,8 @@ static inline void __list_splice(struct list_head *list,
  * @list: the new list to add.
  * @head: the place to add it in the first list.
  */
-static inline void list_splice(struct list_head *list, struct list_head *head)
+static inline void list_splice(const struct list_head *list,
+                               struct list_head *head)
 {
        if (!list_empty(list))
                __list_splice(list, head);
@@ -973,7 +981,4 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
                ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \
             pos = rcu_dereference(pos->next))
 
-#else
-#warning "don't include kernel headers in userspace"
-#endif /* __KERNEL__ */
 #endif
index 271153d27fba12101dce1a33b71607f9596799e5..c46c89505dac3721e4e337d26c283419cc01a68c 100644 (file)
@@ -40,7 +40,8 @@ extern struct lmb lmb;
 
 extern void __init lmb_init(void);
 extern void __init lmb_analyze(void);
-extern long __init lmb_add(u64 base, u64 size);
+extern long lmb_add(u64 base, u64 size);
+extern long lmb_remove(u64 base, u64 size);
 extern long __init lmb_reserve(u64 base, u64 size);
 extern u64 __init lmb_alloc_nid(u64 size, u64 align, int nid,
                                u64 (*nid_range)(u64, u64, int *));
@@ -53,6 +54,7 @@ extern u64 __init lmb_phys_mem_size(void);
 extern u64 __init lmb_end_of_DRAM(void);
 extern void __init lmb_enforce_memory_limit(u64 memory_limit);
 extern int __init lmb_is_reserved(u64 addr);
+extern int lmb_find(struct lmb_property *res);
 
 extern void lmb_dump_all(void);
 
diff --git a/include/linux/math64.h b/include/linux/math64.h
new file mode 100644 (file)
index 0000000..c1a5f81
--- /dev/null
@@ -0,0 +1,84 @@
+#ifndef _LINUX_MATH64_H
+#define _LINUX_MATH64_H
+
+#include <linux/types.h>
+#include <asm/div64.h>
+
+#if BITS_PER_LONG == 64
+
+/**
+ * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder
+ *
+ * This is commonly provided by 32bit archs to provide an optimized 64bit
+ * divide.
+ */
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+       *remainder = dividend % divisor;
+       return dividend / divisor;
+}
+
+/**
+ * div_s64_rem - signed 64bit divide with 32bit divisor with remainder
+ */
+static inline s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+       *remainder = dividend % divisor;
+       return dividend / divisor;
+}
+
+/**
+ * div64_u64 - unsigned 64bit divide with 64bit divisor
+ */
+static inline u64 div64_u64(u64 dividend, u64 divisor)
+{
+       return dividend / divisor;
+}
+
+#elif BITS_PER_LONG == 32
+
+#ifndef div_u64_rem
+static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
+{
+       *remainder = do_div(dividend, divisor);
+       return dividend;
+}
+#endif
+
+#ifndef div_s64_rem
+extern s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder);
+#endif
+
+#ifndef div64_u64
+extern u64 div64_u64(u64 dividend, u64 divisor);
+#endif
+
+#endif /* BITS_PER_LONG */
+
+/**
+ * div_u64 - unsigned 64bit divide with 32bit divisor
+ *
+ * This is the most common 64bit divide and should be used if possible,
+ * as many 32bit archs can optimize this variant better than a full 64bit
+ * divide.
+ */
+#ifndef div_u64
+static inline u64 div_u64(u64 dividend, u32 divisor)
+{
+       u32 remainder;
+       return div_u64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+/**
+ * div_s64 - signed 64bit divide with 32bit divisor
+ */
+#ifndef div_s64
+static inline s64 div_s64(s64 dividend, s32 divisor)
+{
+       s32 remainder;
+       return div_s64_rem(dividend, divisor, &remainder);
+}
+#endif
+
+#endif /* _LINUX_MATH64_H */
index f2bb770e530aa82de96092a916e332d2a593a0d6..7a3aea845902966f91399b08ec209a65997ee67d 100644 (file)
@@ -34,7 +34,6 @@
 extern int mca_find_adapter(int id, int start);
 extern int mca_find_unused_adapter(int id, int start);
 
-extern int mca_is_adapter_used(int slot);
 extern int mca_mark_as_used(int slot);
 extern void mca_mark_as_unused(int slot);
 
index 8b1c4295848b77b6808c1f451434e02e599fffb2..e6608776bc96138c4fac9eea0b2a4126997bc7b8 100644 (file)
@@ -27,9 +27,6 @@ struct mm_struct;
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
-extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p);
-extern void mm_free_cgroup(struct mm_struct *mm);
-
 #define page_reset_bad_cgroup(page)    ((page)->page_cgroup = 0)
 
 extern struct page_cgroup *page_get_page_cgroup(struct page *page);
@@ -48,8 +45,10 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
 extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
 int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
 
+extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+
 #define mm_match_cgroup(mm, cgroup)    \
-       ((cgroup) == rcu_dereference((mm)->mem_cgroup))
+       ((cgroup) == mem_cgroup_from_task((mm)->owner))
 
 extern int mem_cgroup_prepare_migration(struct page *page);
 extern void mem_cgroup_end_migration(struct page *page);
@@ -73,15 +72,6 @@ extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem,
                                struct zone *zone, int priority);
 
 #else /* CONFIG_CGROUP_MEM_RES_CTLR */
-static inline void mm_init_cgroup(struct mm_struct *mm,
-                                       struct task_struct *p)
-{
-}
-
-static inline void mm_free_cgroup(struct mm_struct *mm)
-{
-}
-
 static inline void page_reset_bad_cgroup(struct page *page)
 {
 }
index f80e0e331cb7e0f923feed2c26bb2cc03bd28e22..2f5f8a5ef2a08a5e796294275ee095cae2319fd5 100644 (file)
@@ -53,6 +53,13 @@ struct memory_notify {
 struct notifier_block;
 struct mem_section;
 
+/*
+ * Priorities for the hotplug memory callback routines (stored in decreasing
+ * order in the callback chain)
+ */
+#define SLAB_CALLBACK_PRI       1
+#define IPC_CALLBACK_PRI        10
+
 #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE
 static inline int memory_dev_init(void)
 {
index 8fee7a45736b647aef1f81da38d3aa2ed9f3dd34..73e358612eaffa3aa9ef27026d542a382b3a2afd 100644 (file)
@@ -8,8 +8,18 @@
 struct page;
 struct zone;
 struct pglist_data;
+struct mem_section;
 
 #ifdef CONFIG_MEMORY_HOTPLUG
+
+/*
+ * Magic number for free bootmem.
+ * The normal smallest mapcount is -1. Here is smaller value than it.
+ */
+#define SECTION_INFO           0xfffffffe
+#define MIX_INFO               0xfffffffd
+#define NODE_INFO              0xfffffffc
+
 /*
  * pgdat resizing functions
  */
@@ -64,9 +74,11 @@ extern int offline_pages(unsigned long, unsigned long, unsigned long);
 /* reasonably generic interface to expand the physical pages in a zone  */
 extern int __add_pages(struct zone *zone, unsigned long start_pfn,
        unsigned long nr_pages);
+extern int __remove_pages(struct zone *zone, unsigned long start_pfn,
+       unsigned long nr_pages);
 
 /*
- * Walk thorugh all memory which is registered as resource.
+ * Walk through all memory which is registered as resource.
  * arg is (start_pfn, nr_pages, private_arg_pointer)
  */
 extern int walk_memory_resource(unsigned long start_pfn,
@@ -142,6 +154,18 @@ static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
 #endif /* CONFIG_NUMA */
 #endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+static inline void put_page_bootmem(struct page *page)
+{
+}
+#else
+extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
+extern void put_page_bootmem(struct page *page);
+#endif
+
 #else /* ! CONFIG_MEMORY_HOTPLUG */
 /*
  * Stub functions for when hotplug is off
@@ -169,6 +193,10 @@ static inline int mhp_notimplemented(const char *func)
        return -ENOSYS;
 }
 
+static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+}
+
 #endif /* ! CONFIG_MEMORY_HOTPLUG */
 
 extern int add_memory(int nid, u64 start, u64 size);
@@ -176,5 +204,8 @@ extern int arch_add_memory(int nid, u64 start, u64 size);
 extern int remove_memory(u64 start, u64 size);
 extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
                                                                int nr_pages);
+extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms);
+extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
+                                         unsigned long pnum);
 
 #endif /* __LINUX_MEMORY_HOTPLUG_H */
index 59c4865bc85f13fbe36beefd959ab499f1d5aaaf..3a39570b81b85e525bea4cde10429ac470546bbe 100644 (file)
@@ -8,15 +8,32 @@
  * Copyright 2003,2004 Andi Kleen SuSE Labs
  */
 
+/*
+ * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
+ * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
+ * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
+ */
+
 /* Policies */
-#define MPOL_DEFAULT   0
-#define MPOL_PREFERRED 1
-#define MPOL_BIND      2
-#define MPOL_INTERLEAVE        3
+enum {
+       MPOL_DEFAULT,
+       MPOL_PREFERRED,
+       MPOL_BIND,
+       MPOL_INTERLEAVE,
+       MPOL_MAX,       /* always last member of enum */
+};
 
-#define MPOL_MAX MPOL_INTERLEAVE
+/* Flags for set_mempolicy */
+#define MPOL_F_STATIC_NODES    (1 << 15)
+#define MPOL_F_RELATIVE_NODES  (1 << 14)
 
-/* Flags for get_mem_policy */
+/*
+ * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
+ * either set_mempolicy() or mbind().
+ */
+#define MPOL_MODE_FLAGS        (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
+
+/* Flags for get_mempolicy */
 #define MPOL_F_NODE    (1<<0)  /* return next IL mode instead of node mask */
 #define MPOL_F_ADDR    (1<<1)  /* look up vma using address */
 #define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
 #define MPOL_MF_MOVE_ALL (1<<2)        /* Move every page to conform to mapping */
 #define MPOL_MF_INTERNAL (1<<3)        /* Internal flags start here */
 
+/*
+ * Internal flags that share the struct mempolicy flags word with
+ * "mode flags".  These flags are allocated from bit 0 up, as they
+ * are never OR'ed into the mode in mempolicy API arguments.
+ */
+#define MPOL_F_SHARED  (1 << 0)        /* identify shared policies */
+#define MPOL_F_LOCAL   (1 << 1)        /* preferred local allocation */
+
 #ifdef __KERNEL__
 
 #include <linux/mmzone.h>
@@ -35,7 +60,6 @@
 #include <linux/spinlock.h>
 #include <linux/nodemask.h>
 
-struct vm_area_struct;
 struct mm_struct;
 
 #ifdef CONFIG_NUMA
@@ -54,22 +78,27 @@ struct mm_struct;
  * mmap_sem.
  *
  * Freeing policy:
- * When policy is MPOL_BIND v.zonelist is kmalloc'ed and must be kfree'd.
- * All other policies don't have any external state. mpol_free() handles this.
+ * Mempolicy objects are reference counted.  A mempolicy will be freed when
+ * mpol_put() decrements the reference count to zero.
  *
- * Copying policy objects:
- * For MPOL_BIND the zonelist must be always duplicated. mpol_clone() does this.
+ * Duplicating policy objects:
+ * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
+ * to the new storage.  The reference count of the new object is initialized
+ * to 1, representing the caller of mpol_dup().
  */
 struct mempolicy {
        atomic_t refcnt;
-       short policy;   /* See MPOL_* above */
+       unsigned short mode;    /* See MPOL_* above */
+       unsigned short flags;   /* See set_mempolicy() MPOL_F_* above */
        union {
-               struct zonelist  *zonelist;     /* bind */
                short            preferred_node; /* preferred */
-               nodemask_t       nodes;         /* interleave */
+               nodemask_t       nodes;         /* interleave/bind */
                /* undefined for default */
        } v;
-       nodemask_t cpuset_mems_allowed; /* mempolicy relative to these nodes */
+       union {
+               nodemask_t cpuset_mems_allowed; /* relative to these nodes */
+               nodemask_t user_nodemask;       /* nodemask passed by user */
+       } w;
 };
 
 /*
@@ -77,18 +106,43 @@ struct mempolicy {
  * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
  */
 
-extern void __mpol_free(struct mempolicy *pol);
-static inline void mpol_free(struct mempolicy *pol)
+extern void __mpol_put(struct mempolicy *pol);
+static inline void mpol_put(struct mempolicy *pol)
 {
        if (pol)
-               __mpol_free(pol);
+               __mpol_put(pol);
 }
 
-extern struct mempolicy *__mpol_copy(struct mempolicy *pol);
-static inline struct mempolicy *mpol_copy(struct mempolicy *pol)
+/*
+ * Does mempolicy pol need explicit unref after use?
+ * Currently only needed for shared policies.
+ */
+static inline int mpol_needs_cond_ref(struct mempolicy *pol)
+{
+       return (pol && (pol->flags & MPOL_F_SHARED));
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+       if (mpol_needs_cond_ref(pol))
+               __mpol_put(pol);
+}
+
+extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+                                         struct mempolicy *frompol);
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
+                                               struct mempolicy *frompol)
+{
+       if (!frompol)
+               return frompol;
+       return __mpol_cond_copy(tompol, frompol);
+}
+
+extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
+static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
 {
        if (pol)
-               pol = __mpol_copy(pol);
+               pol = __mpol_dup(pol);
        return pol;
 }
 
@@ -108,11 +162,6 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
                return 1;
        return __mpol_equal(a, b);
 }
-#define vma_mpol_equal(a,b) mpol_equal(vma_policy(a), vma_policy(b))
-
-/* Could later add inheritance of the process policy here. */
-
-#define mpol_set_vma_default(vma) ((vma)->vm_policy = NULL)
 
 /*
  * Tree of shared policies for a shared memory region.
@@ -133,8 +182,7 @@ struct shared_policy {
        spinlock_t lock;
 };
 
-void mpol_shared_policy_init(struct shared_policy *info, int policy,
-                               nodemask_t *nodes);
+void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
 int mpol_set_shared_policy(struct shared_policy *info,
                                struct vm_area_struct *vma,
                                struct mempolicy *new);
@@ -149,9 +197,9 @@ extern void mpol_rebind_task(struct task_struct *tsk,
 extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
 extern void mpol_fix_fork_child_flag(struct task_struct *p);
 
-extern struct mempolicy default_policy;
 extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
-               unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol);
+                               unsigned long addr, gfp_t gfp_flags,
+                               struct mempolicy **mpol, nodemask_t **nodemask);
 extern unsigned slab_node(struct mempolicy *policy);
 
 extern enum zone_type policy_zone;
@@ -165,6 +213,13 @@ static inline void check_highest_zone(enum zone_type k)
 int do_migrate_pages(struct mm_struct *mm,
        const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
 
+
+#ifdef CONFIG_TMPFS
+extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
+
+extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+                       int no_context);
+#endif
 #else
 
 struct mempolicy {};
@@ -173,19 +228,26 @@ static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
        return 1;
 }
-#define vma_mpol_equal(a,b) 1
 
-#define mpol_set_vma_default(vma) do {} while(0)
+static inline void mpol_put(struct mempolicy *p)
+{
+}
+
+static inline void mpol_cond_put(struct mempolicy *pol)
+{
+}
 
-static inline void mpol_free(struct mempolicy *p)
+static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
+                                               struct mempolicy *from)
 {
+       return from;
 }
 
 static inline void mpol_get(struct mempolicy *pol)
 {
 }
 
-static inline struct mempolicy *mpol_copy(struct mempolicy *old)
+static inline struct mempolicy *mpol_dup(struct mempolicy *old)
 {
        return NULL;
 }
@@ -199,8 +261,8 @@ static inline int mpol_set_shared_policy(struct shared_policy *info,
        return -EINVAL;
 }
 
-static inline void mpol_shared_policy_init(struct shared_policy *info,
-                                       int policy, nodemask_t *nodes)
+static inline void mpol_shared_policy_init(struct shared_policy *sp,
+                                               struct mempolicy *mpol)
 {
 }
 
@@ -239,9 +301,12 @@ static inline void mpol_fix_fork_child_flag(struct task_struct *p)
 }
 
 static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
-               unsigned long addr, gfp_t gfp_flags, struct mempolicy **mpol)
+                               unsigned long addr, gfp_t gfp_flags,
+                               struct mempolicy **mpol, nodemask_t **nodemask)
 {
-       return NODE_DATA(0)->node_zonelists + gfp_zone(gfp_flags);
+       *mpol = NULL;
+       *nodemask = NULL;
+       return node_zonelist(0, gfp_flags);
 }
 
 static inline int do_migrate_pages(struct mm_struct *mm,
@@ -254,6 +319,21 @@ static inline int do_migrate_pages(struct mm_struct *mm,
 static inline void check_highest_zone(int k)
 {
 }
+
+#ifdef CONFIG_TMPFS
+static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
+                               int no_context)
+{
+       return 1;       /* error */
+}
+
+static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
+                               int no_context)
+{
+       return 0;
+}
+#endif
+
 #endif /* CONFIG_NUMA */
 #endif /* __KERNEL__ */
 
index ff7df1a2222fea4d5d5056280a0fba76eb84aab9..a744383d16e956b7fbc6cf4be6f2cb179ec2c0f3 100644 (file)
@@ -208,6 +208,38 @@ struct mlx4_mtt {
        int                     page_shift;
 };
 
+enum {
+       MLX4_DB_PER_PAGE = PAGE_SIZE / 4
+};
+
+struct mlx4_db_pgdir {
+       struct list_head        list;
+       DECLARE_BITMAP(order0, MLX4_DB_PER_PAGE);
+       DECLARE_BITMAP(order1, MLX4_DB_PER_PAGE / 2);
+       unsigned long          *bits[2];
+       __be32                 *db_page;
+       dma_addr_t              db_dma;
+};
+
+struct mlx4_ib_user_db_page;
+
+struct mlx4_db {
+       __be32                  *db;
+       union {
+               struct mlx4_db_pgdir            *pgdir;
+               struct mlx4_ib_user_db_page     *user_page;
+       }                       u;
+       dma_addr_t              dma;
+       int                     index;
+       int                     order;
+};
+
+struct mlx4_hwq_resources {
+       struct mlx4_db          db;
+       struct mlx4_mtt         mtt;
+       struct mlx4_buf         buf;
+};
+
 struct mlx4_mr {
        struct mlx4_mtt         mtt;
        u64                     iova;
@@ -341,8 +373,17 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                       struct mlx4_buf *buf);
 
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
+void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
+
+int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
+                      int size, int max_direct);
+void mlx4_free_hwq_res(struct mlx4_dev *mdev, struct mlx4_hwq_resources *wqres,
+                      int size);
+
 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
-                 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq);
+                 struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
+                 int collapsed);
 void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq);
 
 int mlx4_qp_alloc(struct mlx4_dev *dev, int sqpn, struct mlx4_qp *qp);
index a5e43febee4fa550a6a4a9d145ed0c893acd76f8..7f128b266faa39044cc85e4f7175aed51664fe16 100644 (file)
@@ -296,6 +296,10 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
 int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
                  struct mlx4_qp_context *context);
 
+int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+                    struct mlx4_qp_context *context,
+                    struct mlx4_qp *qp, enum mlx4_qp_state *qp_state);
+
 static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
 {
        return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1));
index b695875d63e308bcc9086e5889c0040438f0a363..c31a9cd2a30e03a2672b8fa299c4d6a24a3fa186 100644 (file)
@@ -107,6 +107,7 @@ extern unsigned int kobjsize(const void *objp);
 #define VM_ALWAYSDUMP  0x04000000      /* Always include in core dumps */
 
 #define VM_CAN_NONLINEAR 0x08000000    /* Has ->fault & does nonlinear pages */
+#define VM_MIXEDMAP    0x10000000      /* Can contain "struct page" and pure PFN pages */
 
 #ifndef VM_STACK_DEFAULT_FLAGS         /* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
@@ -164,8 +165,6 @@ struct vm_operations_struct {
        void (*open)(struct vm_area_struct * area);
        void (*close)(struct vm_area_struct * area);
        int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
-       struct page *(*nopage)(struct vm_area_struct *area,
-                       unsigned long address, int *type);
        unsigned long (*nopfn)(struct vm_area_struct *area,
                        unsigned long address);
 
@@ -173,7 +172,25 @@ struct vm_operations_struct {
         * writable, if an error is returned it will cause a SIGBUS */
        int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
 #ifdef CONFIG_NUMA
+       /*
+        * set_policy() op must add a reference to any non-NULL @new mempolicy
+        * to hold the policy upon return.  Caller should pass NULL @new to
+        * remove a policy and fall back to surrounding context--i.e. do not
+        * install a MPOL_DEFAULT policy, nor the task or system default
+        * mempolicy.
+        */
        int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+
+       /*
+        * get_policy() op must add reference [mpol_get()] to any policy at
+        * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
+        * in mm/mempolicy.c will do this automatically.
+        * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+        * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+        * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+        * must return NULL--i.e., do not "fallback" to task or system default
+        * policy.
+        */
        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
                                        unsigned long addr);
        int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
@@ -397,11 +414,11 @@ static inline void set_compound_order(struct page *page, unsigned long order)
  * we have run out of space and have to fall back to an
  * alternate (slower) way of determining the node.
  *
- *        No sparsemem: |       NODE     | ZONE | ... | FLAGS |
- * with space for node: | SECTION | NODE | ZONE | ... | FLAGS |
- *   no space for node: | SECTION |     ZONE    | ... | FLAGS |
+ * No sparsemem or sparsemem vmemmap: |       NODE     | ZONE | ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * classic sparse no space for node:  | SECTION |     ZONE    | ... | FLAGS |
  */
-#ifdef CONFIG_SPARSEMEM
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 #define SECTIONS_WIDTH         SECTIONS_SHIFT
 #else
 #define SECTIONS_WIDTH         0
@@ -409,9 +426,12 @@ static inline void set_compound_order(struct page *page, unsigned long order)
 
 #define ZONES_WIDTH            ZONES_SHIFT
 
-#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= FLAGS_RESERVED
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
 #define NODES_WIDTH            NODES_SHIFT
 #else
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#error "Vmemmap: No space for nodes field in page flags"
+#endif
 #define NODES_WIDTH            0
 #endif
 
@@ -454,8 +474,8 @@ static inline void set_compound_order(struct page *page, unsigned long order)
 
 #define ZONEID_PGSHIFT         (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > FLAGS_RESERVED
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 #endif
 
 #define ZONES_MASK             ((1UL << ZONES_WIDTH) - 1)
@@ -504,10 +524,12 @@ static inline struct zone *page_zone(struct page *page)
        return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
 }
 
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 static inline unsigned long page_to_section(struct page *page)
 {
        return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
 }
+#endif
 
 static inline void set_page_zone(struct page *page, enum zone_type zone)
 {
@@ -602,9 +624,12 @@ static inline struct address_space *page_mapping(struct page *page)
        struct address_space *mapping = page->mapping;
 
        VM_BUG_ON(PageSlab(page));
+#ifdef CONFIG_SWAP
        if (unlikely(PageSwapCache(page)))
                mapping = &swapper_space;
-       else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+       else
+#endif
+       if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
                mapping = NULL;
        return mapping;
 }
@@ -648,12 +673,6 @@ static inline int page_mapped(struct page *page)
        return atomic_read(&(page)->_mapcount) >= 0;
 }
 
-/*
- * Error return values for the *_nopage functions
- */
-#define NOPAGE_SIGBUS  (NULL)
-#define NOPAGE_OOM     ((struct page *) (-1))
-
 /*
  * Error return values for the *_nopfn functions
  */
@@ -720,7 +739,9 @@ struct zap_details {
        unsigned long truncate_count;           /* Compare vm_truncate_count */
 };
 
-struct page *vm_normal_page(struct vm_area_struct *, unsigned long, pte_t);
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+               pte_t pte);
+
 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
                unsigned long size, struct zap_details *);
 unsigned long unmap_vmas(struct mmu_gather **tlb,
@@ -1045,6 +1066,19 @@ extern void unlink_file_vma(struct vm_area_struct *);
 extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
        unsigned long addr, unsigned long len, pgoff_t pgoff);
 extern void exit_mmap(struct mm_struct *);
+
+#ifdef CONFIG_PROC_FS
+/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
+extern void added_exe_file_vma(struct mm_struct *mm);
+extern void removed_exe_file_vma(struct mm_struct *mm);
+#else
+static inline void added_exe_file_vma(struct mm_struct *mm)
+{}
+
+static inline void removed_exe_file_vma(struct mm_struct *mm)
+{}
+#endif /* CONFIG_PROC_FS */
+
 extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
 extern int install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
@@ -1149,6 +1183,8 @@ int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn);
 
 struct page *follow_page(struct vm_area_struct *, unsigned long address,
                        unsigned int foll_flags);
@@ -1207,8 +1243,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
                                        void __user *, size_t *, loff_t *);
 unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        unsigned long lru_pages);
-void drop_pagecache(void);
-void drop_slab(void);
 
 #ifndef CONFIG_MMU
 #define randomize_va_space 0
@@ -1229,6 +1263,7 @@ void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 int vmemmap_populate_basepages(struct page *start_page,
                                                unsigned long pages, int node);
 int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+void vmemmap_populate_print_last(void);
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_MM_H */
index af190ceab9719dc68b148e3b22b05b28a54c64d2..eb7c16cc95596fae106848e0a66b3b04f6f6b68a 100644 (file)
@@ -42,7 +42,10 @@ struct page {
                                         * to show when page is mapped
                                         * & limit reverse map searches.
                                         */
-               unsigned int inuse;     /* SLUB: Nr of objects */
+               struct {                /* SLUB */
+                       u16 inuse;
+                       u16 objects;
+               };
        };
        union {
            struct {
@@ -172,6 +175,7 @@ struct mm_struct {
        atomic_t mm_users;                      /* How many users with user space? */
        atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
        int map_count;                          /* number of VMAs */
+       int core_waiters;
        struct rw_semaphore mmap_sem;
        spinlock_t page_table_lock;             /* Protects page tables and some counters */
 
@@ -216,14 +220,20 @@ struct mm_struct {
        unsigned long flags; /* Must use atomic bitops to access the bits */
 
        /* coredumping support */
-       int core_waiters;
        struct completion *core_startup_done, core_done;
 
        /* aio bits */
-       rwlock_t                ioctx_list_lock;
+       rwlock_t                ioctx_list_lock;        /* aio lock */
        struct kioctx           *ioctx_list;
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR
-       struct mem_cgroup *mem_cgroup;
+#ifdef CONFIG_MM_OWNER
+       struct task_struct *owner;      /* The thread group leader that */
+                                       /* owns the mm_struct.          */
+#endif
+
+#ifdef CONFIG_PROC_FS
+       /* store ref to file /proc/<pid>/exe symlink points to */
+       struct file *exe_file;
+       unsigned long num_exe_file_vmas;
 #endif
 };
 
index 9f274a687c7e908959136b14dcc34d583d2f2b55..c463cd8a15a4aa028830ed8590988fdbf56f745c 100644 (file)
@@ -1,8 +1,8 @@
 #ifndef _LINUX_MMZONE_H
 #define _LINUX_MMZONE_H
 
-#ifdef __KERNEL__
 #ifndef __ASSEMBLY__
+#ifndef __GENERATING_BOUNDS_H
 
 #include <linux/spinlock.h>
 #include <linux/list.h>
@@ -15,6 +15,7 @@
 #include <linux/seqlock.h>
 #include <linux/nodemask.h>
 #include <linux/pageblock-flags.h>
+#include <linux/bounds.h>
 #include <asm/atomic.h>
 #include <asm/page.h>
 
@@ -95,6 +96,7 @@ enum zone_stat_item {
        NR_UNSTABLE_NFS,        /* NFS unstable pages */
        NR_BOUNCE,
        NR_VMSCAN_WRITE,
+       NR_WRITEBACK_TEMP,      /* Writeback using temporary buffers */
 #ifdef CONFIG_NUMA
        NUMA_HIT,               /* allocated in intended node */
        NUMA_MISS,              /* allocated in non intended node */
@@ -129,6 +131,8 @@ struct per_cpu_pageset {
 #define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
 #endif
 
+#endif /* !__GENERATING_BOUNDS.H */
+
 enum zone_type {
 #ifdef CONFIG_ZONE_DMA
        /*
@@ -177,9 +181,11 @@ enum zone_type {
        ZONE_HIGHMEM,
 #endif
        ZONE_MOVABLE,
-       MAX_NR_ZONES
+       __MAX_NR_ZONES
 };
 
+#ifndef __GENERATING_BOUNDS_H
+
 /*
  * When a memory allocation must conform to specific limitations (such
  * as being suitable for DMA) the caller will pass in hints to the
@@ -188,28 +194,15 @@ enum zone_type {
  * match the requested limits. See gfp_zone() in include/linux/gfp.h
  */
 
-/*
- * Count the active zones.  Note that the use of defined(X) outside
- * #if and family is not necessarily defined so ensure we cannot use
- * it later.  Use __ZONE_COUNT to work out how many shift bits we need.
- */
-#define __ZONE_COUNT (                 \
-         defined(CONFIG_ZONE_DMA)      \
-       + defined(CONFIG_ZONE_DMA32)    \
-       + 1                             \
-       + defined(CONFIG_HIGHMEM)       \
-       + 1                             \
-)
-#if __ZONE_COUNT < 2
+#if MAX_NR_ZONES < 2
 #define ZONES_SHIFT 0
-#elif __ZONE_COUNT <= 2
+#elif MAX_NR_ZONES <= 2
 #define ZONES_SHIFT 1
-#elif __ZONE_COUNT <= 4
+#elif MAX_NR_ZONES <= 4
 #define ZONES_SHIFT 2
 #else
 #error ZONES_SHIFT -- too many zones configured adjust calculation
 #endif
-#undef __ZONE_COUNT
 
 struct zone {
        /* Fields commonly accessed by the page allocator */
@@ -393,10 +386,10 @@ static inline int zone_is_oom_locked(const struct zone *zone)
  * The NUMA zonelists are doubled becausse we need zonelists that restrict the
  * allocations to a single node for GFP_THISNODE.
  *
- * [0 .. MAX_NR_ZONES -1]              : Zonelists with fallback
- * [MAZ_NR_ZONES ... MAZ_ZONELISTS -1]  : No fallback (GFP_THISNODE)
+ * [0] : Zonelist with fallback
+ * [1] : No fallback (GFP_THISNODE)
  */
-#define MAX_ZONELISTS (2 * MAX_NR_ZONES)
+#define MAX_ZONELISTS 2
 
 
 /*
@@ -464,10 +457,19 @@ struct zonelist_cache {
        unsigned long last_full_zap;            /* when last zap'd (jiffies) */
 };
 #else
-#define MAX_ZONELISTS MAX_NR_ZONES
+#define MAX_ZONELISTS 1
 struct zonelist_cache;
 #endif
 
+/*
+ * This struct contains information about a zone in a zonelist. It is stored
+ * here to avoid dereferences into large structures and lookups of tables
+ */
+struct zoneref {
+       struct zone *zone;      /* Pointer to actual zone */
+       int zone_idx;           /* zone_idx(zoneref->zone) */
+};
+
 /*
  * One allocation request operates on a zonelist. A zonelist
  * is a list of zones, the first one is the 'goal' of the
@@ -476,34 +478,23 @@ struct zonelist_cache;
  *
  * If zlcache_ptr is not NULL, then it is just the address of zlcache,
  * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
+ * *
+ * To speed the reading of the zonelist, the zonerefs contain the zone index
+ * of the entry being read. Helper functions to access information given
+ * a struct zoneref are
+ *
+ * zonelist_zone()     - Return the struct zone * for an entry in _zonerefs
+ * zonelist_zone_idx() - Return the index of the zone for an entry
+ * zonelist_node_idx() - Return the index of the node for an entry
  */
-
 struct zonelist {
        struct zonelist_cache *zlcache_ptr;                  // NULL or &zlcache
-       struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];      // NULL delimited
+       struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
 #ifdef CONFIG_NUMA
        struct zonelist_cache zlcache;                       // optional ...
 #endif
 };
 
-#ifdef CONFIG_NUMA
-/*
- * Only custom zonelists like MPOL_BIND need to be filtered as part of
- * policies. As described in the comment for struct zonelist_cache, these
- * zonelists will not have a zlcache so zlcache_ptr will not be set. Use
- * that to determine if the zonelists needs to be filtered or not.
- */
-static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
-{
-       return !zonelist->zlcache_ptr;
-}
-#else
-static inline int alloc_should_filter_zonelist(struct zonelist *zonelist)
-{
-       return 0;
-}
-#endif /* CONFIG_NUMA */
-
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
 struct node_active_region {
        unsigned long start_pfn;
@@ -637,9 +628,10 @@ static inline int is_normal_idx(enum zone_type idx)
 static inline int is_highmem(struct zone *zone)
 {
 #ifdef CONFIG_HIGHMEM
-       int zone_idx = zone - zone->zone_pgdat->node_zones;
-       return zone_idx == ZONE_HIGHMEM ||
-               (zone_idx == ZONE_MOVABLE && zone_movable_is_highmem());
+       int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
+       return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
+              (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
+               zone_movable_is_highmem());
 #else
        return 0;
 #endif
@@ -730,32 +722,103 @@ extern struct zone *next_zone(struct zone *zone);
             zone;                                      \
             zone = next_zone(zone))
 
-#ifdef CONFIG_SPARSEMEM
-#include <asm/sparsemem.h>
-#endif
+static inline struct zone *zonelist_zone(struct zoneref *zoneref)
+{
+       return zoneref->zone;
+}
 
-#if BITS_PER_LONG == 32
-/*
- * with 32 bit page->flags field, we reserve 9 bits for node/zone info.
- * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
+static inline int zonelist_zone_idx(struct zoneref *zoneref)
+{
+       return zoneref->zone_idx;
+}
+
+static inline int zonelist_node_idx(struct zoneref *zoneref)
+{
+#ifdef CONFIG_NUMA
+       /* zone_to_nid not available in this context */
+       return zoneref->zone->node;
+#else
+       return 0;
+#endif /* CONFIG_NUMA */
+}
+
+/**
+ * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
+ * @z - The cursor used as a starting point for the search
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ * @zone - The first suitable zone found is returned via this parameter
+ *
+ * This function returns the next zone at or below a given zone index that is
+ * within the allowed nodemask using a cursor as the starting point for the
+ * search. The zoneref returned is a cursor that is used as the next starting
+ * point for future calls to next_zones_zonelist().
  */
-#define FLAGS_RESERVED         9
+struct zoneref *next_zones_zonelist(struct zoneref *z,
+                                       enum zone_type highest_zoneidx,
+                                       nodemask_t *nodes,
+                                       struct zone **zone);
 
-#elif BITS_PER_LONG == 64
-/*
- * with 64 bit flags field, there's plenty of room.
+/**
+ * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
+ * @zonelist - The zonelist to search for a suitable zone
+ * @highest_zoneidx - The zone index of the highest zone to return
+ * @nodes - An optional nodemask to filter the zonelist with
+ * @zone - The first suitable zone found is returned via this parameter
+ *
+ * This function returns the first zone at or below a given zone index that is
+ * within the allowed nodemask. The zoneref returned is a cursor that can be
+ * used to iterate the zonelist with next_zones_zonelist. The cursor should
+ * not be used by the caller as it does not match the value of the zone
+ * returned.
  */
-#define FLAGS_RESERVED         32
+static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
+                                       enum zone_type highest_zoneidx,
+                                       nodemask_t *nodes,
+                                       struct zone **zone)
+{
+       return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
+                                                               zone);
+}
 
-#else
+/**
+ * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ * @nodemask - Nodemask allowed by the allocator
+ *
+ * This iterator iterates though all zones at or below a given zone index and
+ * within a given nodemask
+ */
+#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
+       for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone); \
+               zone;                                                   \
+               z = next_zones_zonelist(z, highidx, nodemask, &zone))   \
 
-#error BITS_PER_LONG not defined
+/**
+ * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
+ * @zone - The current zone in the iterator
+ * @z - The current pointer within zonelist->zones being iterated
+ * @zlist - The zonelist being iterated
+ * @highidx - The zone index of the highest zone to return
+ *
+ * This iterator iterates though all zones at or below a given zone index.
+ */
+#define for_each_zone_zonelist(zone, z, zlist, highidx) \
+       for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
 
+#ifdef CONFIG_SPARSEMEM
+#include <asm/sparsemem.h>
 #endif
 
 #if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
        !defined(CONFIG_ARCH_POPULATES_NODE_MAP)
-#define early_pfn_to_nid(nid)  (0UL)
+static inline unsigned long early_pfn_to_nid(unsigned long pfn)
+{
+       return 0;
+}
 #endif
 
 #ifdef CONFIG_FLATMEM
@@ -833,6 +896,7 @@ static inline struct mem_section *__nr_to_section(unsigned long nr)
        return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
 }
 extern int __section_nr(struct mem_section* ms);
+extern unsigned long usemap_size(void);
 
 /*
  * We use the lower bits of the mem_map pointer to store
@@ -938,6 +1002,6 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
 #define pfn_valid_within(pfn) (1)
 #endif
 
+#endif /* !__GENERATING_BOUNDS.H */
 #endif /* !__ASSEMBLY__ */
-#endif /* __KERNEL__ */
 #endif /* _LINUX_MMZONE_H */
index 139d49d2f078d5573f166475c99873916a041515..d73eceaa7afb481e25f623fbf9a7aec92117f808 100644 (file)
@@ -368,4 +368,15 @@ struct virtio_device_id {
 };
 #define VIRTIO_DEV_ANY_ID      0xffffffff
 
+/* i2c */
+
+#define I2C_NAME_SIZE  20
+#define I2C_MODULE_PREFIX "i2c:"
+
+struct i2c_device_id {
+       char name[I2C_NAME_SIZE];
+       kernel_ulong_t driver_data;     /* Data private to the driver */
+};
+
+
 #endif /* LINUX_MOD_DEVICETABLE_H */
index 819c4e889bf16f0157f45ad4326afa7e83418582..3e03b1acbc94a8ba7fbbd28e394f442c4710f2f9 100644 (file)
@@ -190,7 +190,7 @@ void *__symbol_get_gpl(const char *symbol);
        extern typeof(sym) sym;                                 \
        __CRC_SYMBOL(sym, sec)                                  \
        static const char __kstrtab_##sym[]                     \
-       __attribute__((section("__ksymtab_strings")))           \
+       __attribute__((section("__ksymtab_strings"), aligned(1))) \
        = MODULE_SYMBOL_PREFIX #sym;                            \
        static const struct kernel_symbol __ksymtab_##sym       \
        __used                                                  \
@@ -229,23 +229,6 @@ enum module_state
        MODULE_STATE_GOING,
 };
 
-/* Similar stuff for section attributes. */
-struct module_sect_attr
-{
-       struct module_attribute mattr;
-       char *name;
-       unsigned long address;
-};
-
-struct module_sect_attrs
-{
-       struct attribute_group grp;
-       int nsections;
-       struct module_sect_attr attrs[0];
-};
-
-struct module_param_attrs;
-
 struct module
 {
        enum module_state state;
index b4836d58f428993e6d7f468d742d6a99ed422e8a..4374d1adeb4b755b4227bccf1f3afaa0b214fc35 100644 (file)
@@ -10,7 +10,6 @@
  */
 #ifndef _LINUX_MOUNT_H
 #define _LINUX_MOUNT_H
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 #include <linux/list.h>
@@ -114,5 +113,4 @@ extern void mark_mounts_for_expiry(struct list_head *mounts);
 extern spinlock_t vfsmount_lock;
 extern dev_t name_to_dev_t(char *name);
 
-#endif
 #endif /* _LINUX_MOUNT_H */
index f950921523f55316e2ede3f73c7e4a5d22e98b34..b03b2745741396fff9f59e31f5d75af04f98094e 100644 (file)
 #define MSDOS_DOTDOT   "..         "   /* "..", padded to MSDOS_NAME chars */
 
 /* media of boot sector */
-#define FAT_VALID_MEDIA(x)     ((0xF8 <= (x) && (x) <= 0xFF) || (x) == 0xF0)
+static inline int fat_valid_media(u8 media)
+{
+       return 0xf8 <= media || media == 0xf0;
+}
+
 #define FAT_FIRST_ENT(s, x)    ((MSDOS_SB(s)->fat_bits == 32 ? 0x0FFFFF00 : \
        MSDOS_SB(s)->fat_bits == 16 ? 0xFF00 : 0xF00) | (x))
 
@@ -195,6 +199,7 @@ struct fat_mount_options {
        char *iocharset;          /* Charset used for filename input/display */
        unsigned short shortname; /* flags for shortname display/create rule */
        unsigned char name_check; /* r = relaxed, n = normal, s = strict */
+       unsigned short allow_utime;/* permission for setting the [am]time */
        unsigned quiet:1,         /* set = fake successful chmods and chowns */
                 showexec:1,      /* set = only set x bit for com/exe/bat */
                 sys_immutable:1, /* set = system files are immutable */
@@ -232,6 +237,7 @@ struct msdos_sb_info {
        struct mutex fat_lock;
        unsigned int prev_free;      /* previously allocated cluster number */
        unsigned int free_clusters;  /* -1 if undefined */
+       unsigned int free_clus_valid; /* is free_clusters valid? */
        struct fat_mount_options options;
        struct nls_table *nls_disk;  /* Codepage used on disk */
        struct nls_table *nls_io;    /* Charset used for input and display */
@@ -401,7 +407,7 @@ extern int fat_generic_ioctl(struct inode *inode, struct file *filp,
                             unsigned int cmd, unsigned long arg);
 extern const struct file_operations fat_file_operations;
 extern const struct inode_operations fat_file_inode_operations;
-extern int fat_notify_change(struct dentry * dentry, struct iattr * attr);
+extern int fat_setattr(struct dentry * dentry, struct iattr * attr);
 extern void fat_truncate(struct inode *inode);
 extern int fat_getattr(struct vfsmount *mnt, struct dentry *dentry,
                       struct kstat *stat);
index 10a3d5a1abffb30fd9b158148b26379416cd1fe0..6f3b8e79a9912c0aa4a5d06e1d07742387fd1346 100644 (file)
@@ -49,16 +49,26 @@ struct msginfo {
        unsigned short  msgseg; 
 };
 
+/*
+ * Scaling factor to compute msgmni:
+ * the memory dedicated to msg queues (msgmni * msgmnb) should occupy
+ * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c):
+ * up to 8MB       : msgmni = 16 (MSGMNI)
+ * 4 GB            : msgmni = 8K
+ * more than 16 GB : msgmni = 32K (IPCMNI)
+ */
+#define MSG_MEM_SCALE 32
+
 #define MSGMNI    16   /* <= IPCMNI */     /* max # of msg queue identifiers */
 #define MSGMAX  8192   /* <= INT_MAX */   /* max size of message (bytes) */
 #define MSGMNB 16384   /* <= INT_MAX */   /* default max size of a message queue */
 
 /* unused */
-#define MSGPOOL (MSGMNI*MSGMNB/1024)  /* size in kilobytes of message pool */
+#define MSGPOOL (MSGMNI * MSGMNB) /* size in bytes of message pool */
 #define MSGTQL  MSGMNB            /* number of system message headers */
 #define MSGMAP  MSGMNB            /* number of entries in message map */
 #define MSGSSZ  16                /* message segment size */
-#define __MSGSEG ((MSGPOOL*1024)/ MSGSSZ) /* max no. of segments */
+#define __MSGSEG (MSGPOOL / MSGSSZ) /* max no. of segments */
 #define MSGSEG (__MSGSEG <= 0xffff ? __MSGSEG : 0xffff)
 
 #ifdef __KERNEL__
index 94bb46d82efd74fedc12df205e43cc2f9fc8f167..8f293922720735ee164cf4b5183f064d09e9f5a6 100644 (file)
@@ -22,6 +22,7 @@ struct msi_desc {
                __u8    masked  : 1;
                __u8    is_64   : 1;    /* Address size: 0=32bit 1=64bit  */
                __u8    pos;            /* Location of the msi capability */
+               __u32   maskbits_mask;  /* mask bits mask */
                __u16   entry_nr;       /* specific enabled entry         */
                unsigned default_irq;   /* default pre-assigned irq       */
        }msi_attrib;
index 986572081e19053b84061d7ecfc20702fbce2b59..155719dab813e2fac8b52bc5a3ae3afdbcbbc8ce 100644 (file)
@@ -56,9 +56,11 @@ struct nbd_device {
        int magic;
 
        spinlock_t queue_lock;
-       struct list_head queue_head;/* Requests are added here...       */
+       struct list_head queue_head;    /* Requests waiting result */
        struct request *active_req;
        wait_queue_head_t active_wq;
+       struct list_head waiting_queue; /* Requests to be sent */
+       wait_queue_head_t waiting_wq;
 
        struct mutex tx_lock;
        struct gendisk *disk;
@@ -86,11 +88,7 @@ struct nbd_request {
        char handle[8];
        __be64 from;
        __be32 len;
-}
-#ifdef __GNUC__
-       __attribute__ ((packed))
-#endif
-;
+} __attribute__ ((packed));
 
 /*
  * This is the reply packet that nbd-server sends back to the client after
index 88766e43e121a5a3d5e6c627f2d91f6bc9b8b7db..9f2d76347f192458f35c825b5292f1f73f9cfb02 100644 (file)
@@ -204,6 +204,7 @@ void ncp_update_inode2(struct inode *, struct ncp_entry_info *);
 /* linux/fs/ncpfs/dir.c */
 extern const struct inode_operations ncp_dir_inode_operations;
 extern const struct file_operations ncp_dir_operations;
+extern struct dentry_operations ncp_root_dentry_operations;
 int ncp_conn_logged_in(struct super_block *);
 int ncp_date_dos2unix(__le16 time, __le16 date);
 void ncp_date_unix2dos(int unix_date, __le16 * time, __le16 * date);
@@ -223,6 +224,12 @@ int ncp_disconnect(struct ncp_server *server);
 void ncp_lock_server(struct ncp_server *server);
 void ncp_unlock_server(struct ncp_server *server);
 
+/* linux/fs/ncpfs/symlink.c */
+#if defined(CONFIG_NCPFS_EXTRAS) || defined(CONFIG_NCPFS_NFS_NS)
+extern const struct address_space_operations ncp_symlink_aops;
+int ncp_symlink(struct inode*, struct dentry*, const char*);
+#endif
+
 /* linux/fs/ncpfs/file.c */
 extern const struct inode_operations ncp_file_inode_operations;
 extern const struct file_operations ncp_file_operations;
index bdb4c8ae6924e4a0acc0ad123cd43cf8848f2721..4b0bec477846730ed8263c3ad097b3572e839e7d 100644 (file)
@@ -8,8 +8,6 @@
 #ifndef _LINUX_NCP_FS_I
 #define _LINUX_NCP_FS_I
 
-#ifdef __KERNEL__
-
 /*
  * This is the ncpfs part of the inode structure. This must contain
  * all the information we need to work with an inode after creation.
@@ -28,6 +26,4 @@ struct ncp_inode_info {
        struct inode vfs_inode;
 };
 
-#endif /* __KERNEL__ */
-
 #endif /* _LINUX_NCP_FS_I */
index fb0713b6ffaf910d6d5d58462d29f5fe2db03f8f..bec1062a25a1e83146e8478f2bb8443a05fcf288 100644 (file)
@@ -166,6 +166,7 @@ struct netlink_skb_parms
        __u32                   dst_group;
        kernel_cap_t            eff_cap;
        __u32                   loginuid;       /* Login (audit) uid */
+       __u32                   sessionid;      /* Session id (audit) */
        __u32                   sid;            /* SELinux security id */
 };
 
index 905e18f4b4122cdb7de8678766e4dada26e0b807..848025cd708783795c96a35c08a6176d0fe84a9f 100644 (file)
@@ -14,6 +14,8 @@
  * bitmap_scnlistprintf() and bitmap_parselist(), also in bitmap.c.
  * For details of node_remap(), see bitmap_bitremap in lib/bitmap.c.
  * For details of nodes_remap(), see bitmap_remap in lib/bitmap.c.
+ * For details of nodes_onto(), see bitmap_onto in lib/bitmap.c.
+ * For details of nodes_fold(), see bitmap_fold in lib/bitmap.c.
  *
  * The available nodemask operations are:
  *
@@ -55,7 +57,9 @@
  * int nodelist_scnprintf(buf, len, mask) Format nodemask as list for printing
  * int nodelist_parse(buf, map)                Parse ascii string as nodelist
  * int node_remap(oldbit, old, new)    newbit = map(old, new)(oldbit)
- * int nodes_remap(dst, src, old, new) *dst = map(old, new)(dst)
+ * void nodes_remap(dst, src, old, new)        *dst = map(old, new)(src)
+ * void nodes_onto(dst, orig, relmap)  *dst = orig relative to relmap
+ * void nodes_fold(dst, orig, sz)      dst bits = orig bits mod sz
  *
  * for_each_node_mask(node, mask)      for-loop node over mask
  *
@@ -326,6 +330,22 @@ static inline void __nodes_remap(nodemask_t *dstp, const nodemask_t *srcp,
        bitmap_remap(dstp->bits, srcp->bits, oldp->bits, newp->bits, nbits);
 }
 
+#define nodes_onto(dst, orig, relmap) \
+               __nodes_onto(&(dst), &(orig), &(relmap), MAX_NUMNODES)
+static inline void __nodes_onto(nodemask_t *dstp, const nodemask_t *origp,
+               const nodemask_t *relmapp, int nbits)
+{
+       bitmap_onto(dstp->bits, origp->bits, relmapp->bits, nbits);
+}
+
+#define nodes_fold(dst, orig, sz) \
+               __nodes_fold(&(dst), &(orig), sz, MAX_NUMNODES)
+static inline void __nodes_fold(nodemask_t *dstp, const nodemask_t *origp,
+               int sz, int nbits)
+{
+       bitmap_fold(dstp->bits, origp->bits, sz, nbits);
+}
+
 #if MAX_NUMNODES > 1
 #define for_each_node_mask(node, mask)                 \
        for ((node) = first_node(mask);                 \
index f4df40038f0c4cb54d2596cbe051dfd9bd241f0e..0ff6224d172a90c4a9dd8122c6b9358e129f53cf 100644 (file)
@@ -121,6 +121,10 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
 extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
                struct notifier_block *nb);
 
+extern int blocking_notifier_chain_cond_register(
+               struct blocking_notifier_head *nh,
+               struct notifier_block *nb);
+
 extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
                struct notifier_block *nb);
 extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
@@ -247,6 +251,7 @@ extern struct blocking_notifier_head reboot_notifier_list;
 #define VT_DEALLOCATE          0x0002 /* Console will be deallocated */
 #define VT_WRITE               0x0003 /* A char got output */
 #define VT_UPDATE              0x0004 /* A bigger update occurred */
+#define VT_PREWRITE            0x0005 /* A char is about to be written to the console */
 
 #endif /* __KERNEL__ */
 #endif /* _LINUX_NOTIFIER_H */
index 6dc11959770c23da51f4fa394c9b694f60aa1ff1..afe338217d91cc0b3e0166c0d9511970f4401871 100644 (file)
@@ -1,6 +1,5 @@
 #ifndef _LINUX_OF_DEVICE_H
 #define _LINUX_OF_DEVICE_H
-#ifdef __KERNEL__
 
 #include <linux/device.h>
 #include <linux/of.h>
@@ -25,5 +24,4 @@ static inline void of_device_free(struct of_device *dev)
        of_release_dev(&dev->dev);
 }
 
-#endif /* __KERNEL__ */
 #endif /* _LINUX_OF_DEVICE_H */
index 3852436b652a8a4568c16ea626829cbaa14c6aab..a7979baf1e39b102948698ebf46551db5f2034ca 100644 (file)
@@ -23,8 +23,8 @@ enum oom_constraint {
        CONSTRAINT_MEMORY_POLICY,
 };
 
-extern int try_set_zone_oom(struct zonelist *zonelist);
-extern void clear_zonelist_oom(struct zonelist *zonelist);
+extern int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_flags);
+extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
 
 extern void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order);
 extern int register_oom_notifier(struct notifier_block *nb);
index b5b30f1c1e59e84c2d9877dbe6c03e6b7e01f94c..590cff32415d92528501410ccf98bab5d3a8a216 100644 (file)
@@ -6,7 +6,10 @@
 #define PAGE_FLAGS_H
 
 #include <linux/types.h>
+#ifndef __GENERATING_BOUNDS_H
 #include <linux/mm_types.h>
+#include <linux/bounds.h>
+#endif /* !__GENERATING_BOUNDS_H */
 
 /*
  * Various page->flags bits:
  * extends from the high bits downwards.
  *
  *  | FIELD | ... | FLAGS |
- *  N-1     ^             0
- *          (N-FLAGS_RESERVED)
+ *  N-1           ^       0
+ *               (NR_PAGEFLAGS)
  *
- * The fields area is reserved for fields mapping zone, node and SPARSEMEM
- * section.  The boundry between these two areas is defined by
- * FLAGS_RESERVED which defines the width of the fields section
- * (see linux/mmzone.h).  New flags must _not_ overlap with this area.
+ * The fields area is reserved for fields mapping zone, node (for NUMA) and
+ * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
+ * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
  */
-#define PG_locked               0      /* Page is locked. Don't touch. */
-#define PG_error                1
-#define PG_referenced           2
-#define PG_uptodate             3
+enum pageflags {
+       PG_locked,              /* Page is locked. Don't touch. */
+       PG_error,
+       PG_referenced,
+       PG_uptodate,
+       PG_dirty,
+       PG_lru,
+       PG_active,
+       PG_slab,
+       PG_owner_priv_1,        /* Owner use. If pagecache, fs may use*/
+       PG_arch_1,
+       PG_reserved,
+       PG_private,             /* If pagecache, has fs-private data */
+       PG_writeback,           /* Page is under writeback */
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+       PG_head,                /* A head page */
+       PG_tail,                /* A tail page */
+#else
+       PG_compound,            /* A compound page */
+#endif
+       PG_swapcache,           /* Swap page: swp_entry_t in private */
+       PG_mappedtodisk,        /* Has blocks allocated on-disk */
+       PG_reclaim,             /* To be reclaimed asap */
+       PG_buddy,               /* Page is free, on buddy lists */
+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+       PG_uncached,            /* Page has been mapped as uncached */
+#endif
+       __NR_PAGEFLAGS
+};
+
+#ifndef __GENERATING_BOUNDS_H
+
+/*
+ * Macros to create function definitions for page flags
+ */
+#define TESTPAGEFLAG(uname, lname)                                     \
+static inline int Page##uname(struct page *page)                       \
+                       { return test_bit(PG_##lname, &page->flags); }
 
-#define PG_dirty                4
-#define PG_lru                  5
-#define PG_active               6
-#define PG_slab                         7      /* slab debug (Suparna wants this) */
+#define SETPAGEFLAG(uname, lname)                                      \
+static inline void SetPage##uname(struct page *page)                   \
+                       { set_bit(PG_##lname, &page->flags); }
 
-#define PG_owner_priv_1                 8      /* Owner use. If pagecache, fs may use*/
-#define PG_arch_1               9
-#define PG_reserved            10
-#define PG_private             11      /* If pagecache, has fs-private data */
+#define CLEARPAGEFLAG(uname, lname)                                    \
+static inline void ClearPage##uname(struct page *page)                 \
+                       { clear_bit(PG_##lname, &page->flags); }
 
-#define PG_writeback           12      /* Page is under writeback */
-#define PG_compound            14      /* Part of a compound page */
-#define PG_swapcache           15      /* Swap page: swp_entry_t in private */
+#define __SETPAGEFLAG(uname, lname)                                    \
+static inline void __SetPage##uname(struct page *page)                 \
+                       { __set_bit(PG_##lname, &page->flags); }
 
-#define PG_mappedtodisk                16      /* Has blocks allocated on-disk */
-#define PG_reclaim             17      /* To be reclaimed asap */
-#define PG_buddy               19      /* Page is free, on buddy lists */
+#define __CLEARPAGEFLAG(uname, lname)                                  \
+static inline void __ClearPage##uname(struct page *page)               \
+                       { __clear_bit(PG_##lname, &page->flags); }
+
+#define TESTSETFLAG(uname, lname)                                      \
+static inline int TestSetPage##uname(struct page *page)                        \
+               { return test_and_set_bit(PG_##lname, &page->flags); }
+
+#define TESTCLEARFLAG(uname, lname)                                    \
+static inline int TestClearPage##uname(struct page *page)              \
+               { return test_and_clear_bit(PG_##lname, &page->flags); }
 
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
-#define PG_readahead           PG_reclaim /* Reminder to do async read-ahead */
 
-/* PG_owner_priv_1 users should have descriptive aliases */
-#define PG_checked             PG_owner_priv_1 /* Used by some filesystems */
-#define PG_pinned              PG_owner_priv_1 /* Xen pinned pagetable */
+#define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname)              \
+       SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
+
+#define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname)            \
+       __SETPAGEFLAG(uname, lname)  __CLEARPAGEFLAG(uname, lname)
+
+#define PAGEFLAG_FALSE(uname)                                          \
+static inline int Page##uname(struct page *page)                       \
+                       { return 0; }
+
+#define TESTSCFLAG(uname, lname)                                       \
+       TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
+
+struct page;   /* forward declaration */
+
+PAGEFLAG(Locked, locked) TESTSCFLAG(Locked, locked)
+PAGEFLAG(Error, error)
+PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
+PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
+PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
+PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
+__PAGEFLAG(Slab, slab)
+PAGEFLAG(Checked, owner_priv_1)                /* Used by some filesystems */
+PAGEFLAG(Pinned, owner_priv_1) TESTSCFLAG(Pinned, owner_priv_1) /* Xen */
+PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
+PAGEFLAG(Private, private) __CLEARPAGEFLAG(Private, private)
+       __SETPAGEFLAG(Private, private)
 
-#if (BITS_PER_LONG > 32)
 /*
- * 64-bit-only flags build down from bit 31
- *
- * 32 bit  -------------------------------| FIELDS |       FLAGS         |
- * 64 bit  |           FIELDS             | ??????         FLAGS         |
- *         63                            32                              0
+ * Only test-and-set exist for PG_writeback.  The unconditional operators are
+ * risky: they bypass page accounting.
  */
-#define PG_uncached            31      /* Page has been mapped as uncached */
-#endif
+TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
+__PAGEFLAG(Buddy, buddy)
+PAGEFLAG(MappedToDisk, mappedtodisk)
 
+/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
+PAGEFLAG(Readahead, reclaim)           /* Reminder to do async read-ahead */
+
+#ifdef CONFIG_HIGHMEM
 /*
- * Manipulation of page state flags
+ * Must use a macro here due to header dependency issues. page_zone() is not
+ * available at this point.
  */
-#define PageLocked(page)               \
-               test_bit(PG_locked, &(page)->flags)
-#define SetPageLocked(page)            \
-               set_bit(PG_locked, &(page)->flags)
-#define TestSetPageLocked(page)                \
-               test_and_set_bit(PG_locked, &(page)->flags)
-#define ClearPageLocked(page)          \
-               clear_bit(PG_locked, &(page)->flags)
-#define TestClearPageLocked(page)      \
-               test_and_clear_bit(PG_locked, &(page)->flags)
-
-#define PageError(page)                test_bit(PG_error, &(page)->flags)
-#define SetPageError(page)     set_bit(PG_error, &(page)->flags)
-#define ClearPageError(page)   clear_bit(PG_error, &(page)->flags)
-
-#define PageReferenced(page)   test_bit(PG_referenced, &(page)->flags)
-#define SetPageReferenced(page)        set_bit(PG_referenced, &(page)->flags)
-#define ClearPageReferenced(page)      clear_bit(PG_referenced, &(page)->flags)
-#define TestClearPageReferenced(page) test_and_clear_bit(PG_referenced, &(page)->flags)
+#define PageHighMem(__p) is_highmem(page_zone(__p))
+#else
+PAGEFLAG_FALSE(HighMem)
+#endif
+
+#ifdef CONFIG_SWAP
+PAGEFLAG(SwapCache, swapcache)
+#else
+PAGEFLAG_FALSE(SwapCache)
+#endif
+
+#ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+PAGEFLAG(Uncached, uncached)
+#else
+PAGEFLAG_FALSE(Uncached)
+#endif
 
 static inline int PageUptodate(struct page *page)
 {
@@ -177,97 +241,59 @@ static inline void SetPageUptodate(struct page *page)
 #endif
 }
 
-#define ClearPageUptodate(page)        clear_bit(PG_uptodate, &(page)->flags)
-
-#define PageDirty(page)                test_bit(PG_dirty, &(page)->flags)
-#define SetPageDirty(page)     set_bit(PG_dirty, &(page)->flags)
-#define TestSetPageDirty(page) test_and_set_bit(PG_dirty, &(page)->flags)
-#define ClearPageDirty(page)   clear_bit(PG_dirty, &(page)->flags)
-#define __ClearPageDirty(page) __clear_bit(PG_dirty, &(page)->flags)
-#define TestClearPageDirty(page) test_and_clear_bit(PG_dirty, &(page)->flags)
-
-#define PageLRU(page)          test_bit(PG_lru, &(page)->flags)
-#define SetPageLRU(page)       set_bit(PG_lru, &(page)->flags)
-#define ClearPageLRU(page)     clear_bit(PG_lru, &(page)->flags)
-#define __ClearPageLRU(page)   __clear_bit(PG_lru, &(page)->flags)
-
-#define PageActive(page)       test_bit(PG_active, &(page)->flags)
-#define SetPageActive(page)    set_bit(PG_active, &(page)->flags)
-#define ClearPageActive(page)  clear_bit(PG_active, &(page)->flags)
-#define __ClearPageActive(page)        __clear_bit(PG_active, &(page)->flags)
-
-#define PageSlab(page)         test_bit(PG_slab, &(page)->flags)
-#define __SetPageSlab(page)    __set_bit(PG_slab, &(page)->flags)
-#define __ClearPageSlab(page)  __clear_bit(PG_slab, &(page)->flags)
-
-#ifdef CONFIG_HIGHMEM
-#define PageHighMem(page)      is_highmem(page_zone(page))
-#else
-#define PageHighMem(page)      0 /* needed to optimize away at compile time */
-#endif
+CLEARPAGEFLAG(Uptodate, uptodate)
 
-#define PageChecked(page)      test_bit(PG_checked, &(page)->flags)
-#define SetPageChecked(page)   set_bit(PG_checked, &(page)->flags)
-#define ClearPageChecked(page) clear_bit(PG_checked, &(page)->flags)
-
-#define PagePinned(page)       test_bit(PG_pinned, &(page)->flags)
-#define SetPagePinned(page)    set_bit(PG_pinned, &(page)->flags)
-#define ClearPagePinned(page)  clear_bit(PG_pinned, &(page)->flags)
+extern void cancel_dirty_page(struct page *page, unsigned int account_size);
 
-#define PageReserved(page)     test_bit(PG_reserved, &(page)->flags)
-#define SetPageReserved(page)  set_bit(PG_reserved, &(page)->flags)
-#define ClearPageReserved(page)        clear_bit(PG_reserved, &(page)->flags)
-#define __ClearPageReserved(page)      __clear_bit(PG_reserved, &(page)->flags)
+int test_clear_page_writeback(struct page *page);
+int test_set_page_writeback(struct page *page);
 
-#define SetPagePrivate(page)   set_bit(PG_private, &(page)->flags)
-#define ClearPagePrivate(page) clear_bit(PG_private, &(page)->flags)
-#define PagePrivate(page)      test_bit(PG_private, &(page)->flags)
-#define __SetPagePrivate(page)  __set_bit(PG_private, &(page)->flags)
-#define __ClearPagePrivate(page) __clear_bit(PG_private, &(page)->flags)
+static inline void set_page_writeback(struct page *page)
+{
+       test_set_page_writeback(page);
+}
 
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
 /*
- * Only test-and-set exist for PG_writeback.  The unconditional operators are
- * risky: they bypass page accounting.
+ * System with lots of page flags available. This allows separate
+ * flags for PageHead() and PageTail() checks of compound pages so that bit
+ * tests can be used in performance sensitive paths. PageCompound is
+ * generally not used in hot code paths.
  */
-#define PageWriteback(page)    test_bit(PG_writeback, &(page)->flags)
-#define TestSetPageWriteback(page) test_and_set_bit(PG_writeback,      \
-                                                       &(page)->flags)
-#define TestClearPageWriteback(page) test_and_clear_bit(PG_writeback,  \
-                                                       &(page)->flags)
+__PAGEFLAG(Head, head)
+__PAGEFLAG(Tail, tail)
 
-#define PageBuddy(page)                test_bit(PG_buddy, &(page)->flags)
-#define __SetPageBuddy(page)   __set_bit(PG_buddy, &(page)->flags)
-#define __ClearPageBuddy(page) __clear_bit(PG_buddy, &(page)->flags)
-
-#define PageMappedToDisk(page) test_bit(PG_mappedtodisk, &(page)->flags)
-#define SetPageMappedToDisk(page) set_bit(PG_mappedtodisk, &(page)->flags)
-#define ClearPageMappedToDisk(page) clear_bit(PG_mappedtodisk, &(page)->flags)
-
-#define PageReadahead(page)    test_bit(PG_readahead, &(page)->flags)
-#define SetPageReadahead(page) set_bit(PG_readahead, &(page)->flags)
-#define ClearPageReadahead(page) clear_bit(PG_readahead, &(page)->flags)
-
-#define PageReclaim(page)      test_bit(PG_reclaim, &(page)->flags)
-#define SetPageReclaim(page)   set_bit(PG_reclaim, &(page)->flags)
-#define ClearPageReclaim(page) clear_bit(PG_reclaim, &(page)->flags)
-#define TestClearPageReclaim(page) test_and_clear_bit(PG_reclaim, &(page)->flags)
+static inline int PageCompound(struct page *page)
+{
+       return page->flags & ((1L << PG_head) | (1L << PG_tail));
 
-#define PageCompound(page)     test_bit(PG_compound, &(page)->flags)
-#define __SetPageCompound(page)        __set_bit(PG_compound, &(page)->flags)
-#define __ClearPageCompound(page) __clear_bit(PG_compound, &(page)->flags)
+}
+#else
+/*
+ * Reduce page flag use as much as possible by overlapping
+ * compound page flags with the flags used for page cache pages. Possible
+ * because PageCompound is always set for compound pages and not for
+ * pages on the LRU and/or pagecache.
+ */
+TESTPAGEFLAG(Compound, compound)
+__PAGEFLAG(Head, compound)
 
 /*
  * PG_reclaim is used in combination with PG_compound to mark the
- * head and tail of a compound page
+ * head and tail of a compound page. This saves one page flag
+ * but makes it impossible to use compound pages for the page cache.
+ * The PG_reclaim bit would have to be used for reclaim or readahead
+ * if compound pages enter the page cache.
  *
  * PG_compound & PG_reclaim    => Tail page
  * PG_compound & ~PG_reclaim   => Head page
  */
-
 #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
 
-#define PageTail(page) (((page)->flags & PG_head_tail_mask)    \
-                               == PG_head_tail_mask)
+static inline int PageTail(struct page *page)
+{
+       return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
+}
 
 static inline void __SetPageTail(struct page *page)
 {
@@ -279,33 +305,6 @@ static inline void __ClearPageTail(struct page *page)
        page->flags &= ~PG_head_tail_mask;
 }
 
-#define PageHead(page) (((page)->flags & PG_head_tail_mask)    \
-                               == (1L << PG_compound))
-#define __SetPageHead(page)    __SetPageCompound(page)
-#define __ClearPageHead(page)  __ClearPageCompound(page)
-
-#ifdef CONFIG_SWAP
-#define PageSwapCache(page)    test_bit(PG_swapcache, &(page)->flags)
-#define SetPageSwapCache(page) set_bit(PG_swapcache, &(page)->flags)
-#define ClearPageSwapCache(page) clear_bit(PG_swapcache, &(page)->flags)
-#else
-#define PageSwapCache(page)    0
-#endif
-
-#define PageUncached(page)     test_bit(PG_uncached, &(page)->flags)
-#define SetPageUncached(page)  set_bit(PG_uncached, &(page)->flags)
-#define ClearPageUncached(page)        clear_bit(PG_uncached, &(page)->flags)
-
-struct page;   /* forward declaration */
-
-extern void cancel_dirty_page(struct page *page, unsigned int account_size);
-
-int test_clear_page_writeback(struct page *page);
-int test_set_page_writeback(struct page *page);
-
-static inline void set_page_writeback(struct page *page)
-{
-       test_set_page_writeback(page);
-}
-
+#endif /* !PAGEFLAGS_EXTENDED */
+#endif /* !__GENERATING_BOUNDS_H */
 #endif /* PAGE_FLAGS_H */
index 292491324b010f1184b48241bbc066f9377c8800..96acd0dae2419bd42fe102e60df664304c83cfba 100644 (file)
@@ -20,8 +20,6 @@
 /* Include the pci register defines */
 #include <linux/pci_regs.h>
 
-struct pci_vpd;
-
 /*
  * The PCI interface treats multi-function devices as independent
  * devices.  The slot/function address of each device is encoded
@@ -131,6 +129,8 @@ struct pci_cap_saved_state {
 };
 
 struct pcie_link_state;
+struct pci_vpd;
+
 /*
  * The pci_dev structure is used to describe PCI devices.
  */
@@ -254,7 +254,7 @@ static inline void pci_add_saved_cap(struct pci_dev *pci_dev,
 #define PCI_NUM_RESOURCES      11
 
 #ifndef PCI_BUS_NUM_RESOURCES
-#define PCI_BUS_NUM_RESOURCES  8
+#define PCI_BUS_NUM_RESOURCES  16
 #endif
 
 #define PCI_REGION_FLAG_MASK   0x0fU   /* These bits of resource flags tell us the PCI region flags */
@@ -666,6 +666,7 @@ int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
 
 void pci_walk_bus(struct pci_bus *top, void (*cb)(struct pci_dev *, void *),
                  void *userdata);
+int pci_cfg_space_size_ext(struct pci_dev *dev, unsigned check_exp_pcix);
 int pci_cfg_space_size(struct pci_dev *dev);
 unsigned char pci_bus_max_busnr(struct pci_bus *bus);
 
@@ -701,6 +702,8 @@ static inline int pci_enable_msi(struct pci_dev *dev)
        return -1;
 }
 
+static inline void pci_msi_shutdown(struct pci_dev *dev)
+{ }
 static inline void pci_disable_msi(struct pci_dev *dev)
 { }
 
@@ -710,6 +713,8 @@ static inline int pci_enable_msix(struct pci_dev *dev,
        return -1;
 }
 
+static inline void pci_msix_shutdown(struct pci_dev *dev)
+{ }
 static inline void pci_disable_msix(struct pci_dev *dev)
 { }
 
@@ -720,9 +725,11 @@ static inline void pci_restore_msi_state(struct pci_dev *dev)
 { }
 #else
 extern int pci_enable_msi(struct pci_dev *dev);
+extern void pci_msi_shutdown(struct pci_dev *dev);
 extern void pci_disable_msi(struct pci_dev *dev);
 extern int pci_enable_msix(struct pci_dev *dev,
        struct msix_entry *entries, int nvec);
+extern void pci_msix_shutdown(struct pci_dev *dev);
 extern void pci_disable_msix(struct pci_dev *dev);
 extern void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 extern void pci_restore_msi_state(struct pci_dev *dev);
@@ -1053,5 +1060,13 @@ extern unsigned long pci_cardbus_mem_size;
 
 extern int pcibios_add_platform_entries(struct pci_dev *dev);
 
+#ifdef CONFIG_PCI_MMCONFIG
+extern void __init pci_mmcfg_early_init(void);
+extern void __init pci_mmcfg_late_init(void);
+#else
+static inline void pci_mmcfg_early_init(void) { }
+static inline void pci_mmcfg_late_init(void) { }
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* LINUX_PCI_H */
index 70eb3c803d47825118844484c60384b523534f41..cf6dbd759395fbb4a9680e52f0dd960bbbac6605 100644 (file)
 #define PCI_DEVICE_ID_NEO_2DB9PRI       0x00C9
 #define PCI_DEVICE_ID_NEO_2RJ45         0x00CA
 #define PCI_DEVICE_ID_NEO_2RJ45PRI      0x00CB
+#define PCIE_DEVICE_ID_NEO_4_IBM        0x00F4
 
 #define PCI_VENDOR_ID_XIRCOM           0x115d
 #define PCI_DEVICE_ID_XIRCOM_RBM56G    0x0101
 #define PCI_DEVICE_ID_INTEL_82443GX_0  0x71a0
 #define PCI_DEVICE_ID_INTEL_82443GX_2  0x71a2
 #define PCI_DEVICE_ID_INTEL_82372FB_1  0x7601
+#define PCI_DEVICE_ID_INTEL_SCH_LPC    0x8119
+#define PCI_DEVICE_ID_INTEL_SCH_IDE    0x811a
 #define PCI_DEVICE_ID_INTEL_82454GX    0x84c4
 #define PCI_DEVICE_ID_INTEL_82450GX    0x84c5
 #define PCI_DEVICE_ID_INTEL_82451NX    0x84ca
index 1ac969724bb2fff929c397be1cbdcdf007a61953..d746a2abb322590a7e891762638e774ec0efe2b5 100644 (file)
@@ -4,7 +4,6 @@
 #include <linux/preempt.h>
 #include <linux/slab.h> /* For kmalloc() */
 #include <linux/smp.h>
-#include <linux/string.h> /* For memset() */
 #include <linux/cpumask.h>
 
 #include <asm/percpu.h>
index 012cd558189b443e6f3856426313cd783c6a36ce..a84e9ff9b27e9d59cfd1f2d0e8b3403e21b07ca0 100644 (file)
@@ -105,10 +105,6 @@ struct exec_domain {
  */
 #define personality(pers)      (pers & PER_MASK)
 
-/*
- * Personality of the currently running process.
- */
-#define get_personality                (current->personality)
 
 /*
  * Change personality of the currently running process.
index 96f4048a6cc37bd1a8970c30a1037c1f5f27ad04..02268c54c250767b8ec3eda32b4c68edfe768ff5 100644 (file)
@@ -27,14 +27,17 @@ struct phm_regs {
 
 #define PH_IOC_MAGIC           'p'
 #define PHN_GET_REG            _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *)
-#define PHN_SET_REG            _IOW (PH_IOC_MAGIC, 1, struct phm_reg *)
+#define PHN_SET_REG            _IOW(PH_IOC_MAGIC, 1, struct phm_reg *)
 #define PHN_GET_REGS           _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *)
-#define PHN_SET_REGS           _IOW (PH_IOC_MAGIC, 3, struct phm_regs *)
+#define PHN_SET_REGS           _IOW(PH_IOC_MAGIC, 3, struct phm_regs *)
 /* this ioctl tells the driver, that the caller is not OpenHaptics and might
  * use improved registers update (no more phantom switchoffs when using
  * libphantom) */
-#define PHN_NOT_OH             _IO  (PH_IOC_MAGIC, 4)
-#define PH_IOC_MAXNR           4
+#define PHN_NOT_OH             _IO(PH_IOC_MAGIC, 4)
+#define PHN_GETREG             _IOWR(PH_IOC_MAGIC, 5, struct phm_reg)
+#define PHN_SETREG             _IOW(PH_IOC_MAGIC, 6, struct phm_reg)
+#define PHN_GETREGS            _IOWR(PH_IOC_MAGIC, 7, struct phm_regs)
+#define PHN_SETREGS            _IOW(PH_IOC_MAGIC, 8, struct phm_regs)
 
 #define PHN_CONTROL            0x6     /* control byte in iaddr space */
 #define PHN_CTL_AMP            0x1     /*   switch after torques change */
index c7980810eb09d96becad505f2f9349e9a9a56685..c21c7e8124a7bb7bde92937fc37cf0810598d091 100644 (file)
@@ -60,7 +60,7 @@ struct pid
        /* lists of tasks that use this pid */
        struct hlist_head tasks[PIDTYPE_MAX];
        struct rcu_head rcu;
-       int level;
+       unsigned int level;
        struct upid numbers[1];
 };
 
@@ -89,9 +89,11 @@ extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
  * attach_pid() and detach_pid() must be called with the tasklist_lock
  * write-held.
  */
-extern int attach_pid(struct task_struct *task, enum pid_type type,
-                     struct pid *pid);
+extern void attach_pid(struct task_struct *task, enum pid_type type,
+                       struct pid *pid);
 extern void detach_pid(struct task_struct *task, enum pid_type);
+extern void change_pid(struct task_struct *task, enum pid_type,
+                       struct pid *pid);
 extern void transfer_pid(struct task_struct *old, struct task_struct *new,
                         enum pid_type);
 
index fcd61fa2c833e415ab823e03532bd4880b16be79..caff5283d15c6e11fe838ffad6b2877077af490e 100644 (file)
@@ -20,7 +20,7 @@ struct pid_namespace {
        int last_pid;
        struct task_struct *child_reaper;
        struct kmem_cache *pid_cachep;
-       int level;
+       unsigned int level;
        struct pid_namespace *parent;
 #ifdef CONFIG_PROC_FS
        struct vfsmount *proc_mnt;
index 1de72cbbe0d1af6a4ea9891adb659aaddfcd478d..39a7ee859b671cfc57f5fcd9e063fc3031e96f84 100644 (file)
@@ -21,8 +21,6 @@
 #ifndef _LINUX_PM_H
 #define _LINUX_PM_H
 
-#ifdef __KERNEL__
-
 #include <linux/list.h>
 #include <asm/atomic.h>
 #include <asm/errno.h>
@@ -225,6 +223,4 @@ extern unsigned int pm_flags;
 #define PM_APM 1
 #define PM_ACPI        2
 
-#endif /* __KERNEL__ */
-
 #endif /* _LINUX_PM_H */
index b2f05c230f4b8db61984950489173980c003178a..63b128d512fb5f413085b8b13d144d97cf41477f 100644 (file)
 #ifndef _LINUX_PNP_H
 #define _LINUX_PNP_H
 
-#ifdef __KERNEL__
-
 #include <linux/device.h>
 #include <linux/list.h>
 #include <linux/errno.h>
 #include <linux/mod_devicetable.h>
 
-#define PNP_MAX_PORT           40
-#define PNP_MAX_MEM            24
-#define PNP_MAX_IRQ            2
-#define PNP_MAX_DMA            2
 #define PNP_NAME_LEN           50
 
 struct pnp_protocol;
 struct pnp_dev;
+struct pnp_resource_table;
 
 /*
  * Resource Management
  */
+struct resource *pnp_get_resource(struct pnp_dev *, unsigned int, unsigned int);
+
+static inline int pnp_resource_valid(struct resource *res)
+{
+       if (res && !(res->flags & IORESOURCE_UNSET))
+               return 1;
+       return 0;
+}
+
+static inline resource_size_t pnp_resource_len(struct resource *res)
+{
+       if (res->start == 0 && res->end == 0)
+               return 0;
+       return res->end - res->start + 1;
+}
+
+
+static inline resource_size_t pnp_port_start(struct pnp_dev *dev,
+                                            unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_IO, bar)->start;
+}
+
+static inline resource_size_t pnp_port_end(struct pnp_dev *dev,
+                                          unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_IO, bar)->end;
+}
+
+static inline unsigned long pnp_port_flags(struct pnp_dev *dev,
+                                          unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_IO, bar)->flags;
+}
+
+static inline int pnp_port_valid(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IO, bar));
+}
+
+static inline resource_size_t pnp_port_len(struct pnp_dev *dev,
+                                          unsigned int bar)
+{
+       return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_IO, bar));
+}
+
+
+static inline resource_size_t pnp_mem_start(struct pnp_dev *dev,
+                                           unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_MEM, bar)->start;
+}
+
+static inline resource_size_t pnp_mem_end(struct pnp_dev *dev,
+                                         unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_MEM, bar)->end;
+}
+
+static inline unsigned long pnp_mem_flags(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_MEM, bar)->flags;
+}
+
+static inline int pnp_mem_valid(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_MEM, bar));
+}
+
+static inline resource_size_t pnp_mem_len(struct pnp_dev *dev,
+                                         unsigned int bar)
+{
+       return pnp_resource_len(pnp_get_resource(dev, IORESOURCE_MEM, bar));
+}
+
+
+static inline resource_size_t pnp_irq(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->start;
+}
+
+static inline unsigned long pnp_irq_flags(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_IRQ, bar)->flags;
+}
+
+static inline int pnp_irq_valid(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_IRQ, bar));
+}
+
+
+static inline resource_size_t pnp_dma(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_DMA, bar)->start;
+}
+
+static inline unsigned long pnp_dma_flags(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_get_resource(dev, IORESOURCE_DMA, bar)->flags;
+}
+
+static inline int pnp_dma_valid(struct pnp_dev *dev, unsigned int bar)
+{
+       return pnp_resource_valid(pnp_get_resource(dev, IORESOURCE_DMA, bar));
+}
 
-/* Use these instead of directly reading pnp_dev to get resource information */
-#define pnp_port_start(dev,bar)   ((dev)->res.port_resource[(bar)].start)
-#define pnp_port_end(dev,bar)     ((dev)->res.port_resource[(bar)].end)
-#define pnp_port_flags(dev,bar)   ((dev)->res.port_resource[(bar)].flags)
-#define pnp_port_valid(dev,bar) \
-       ((pnp_port_flags((dev),(bar)) & (IORESOURCE_IO | IORESOURCE_UNSET)) \
-               == IORESOURCE_IO)
-#define pnp_port_len(dev,bar) \
-       ((pnp_port_start((dev),(bar)) == 0 &&   \
-         pnp_port_end((dev),(bar)) ==          \
-         pnp_port_start((dev),(bar))) ? 0 :    \
-                                               \
-        (pnp_port_end((dev),(bar)) -           \
-         pnp_port_start((dev),(bar)) + 1))
-
-#define pnp_mem_start(dev,bar)   ((dev)->res.mem_resource[(bar)].start)
-#define pnp_mem_end(dev,bar)     ((dev)->res.mem_resource[(bar)].end)
-#define pnp_mem_flags(dev,bar)   ((dev)->res.mem_resource[(bar)].flags)
-#define pnp_mem_valid(dev,bar) \
-       ((pnp_mem_flags((dev),(bar)) & (IORESOURCE_MEM | IORESOURCE_UNSET)) \
-               == IORESOURCE_MEM)
-#define pnp_mem_len(dev,bar) \
-       ((pnp_mem_start((dev),(bar)) == 0 &&    \
-         pnp_mem_end((dev),(bar)) ==           \
-         pnp_mem_start((dev),(bar))) ? 0 :     \
-                                               \
-        (pnp_mem_end((dev),(bar)) -            \
-         pnp_mem_start((dev),(bar)) + 1))
-
-#define pnp_irq(dev,bar)        ((dev)->res.irq_resource[(bar)].start)
-#define pnp_irq_flags(dev,bar)  ((dev)->res.irq_resource[(bar)].flags)
-#define pnp_irq_valid(dev,bar) \
-       ((pnp_irq_flags((dev),(bar)) & (IORESOURCE_IRQ | IORESOURCE_UNSET)) \
-               == IORESOURCE_IRQ)
-
-#define pnp_dma(dev,bar)        ((dev)->res.dma_resource[(bar)].start)
-#define pnp_dma_flags(dev,bar)  ((dev)->res.dma_resource[(bar)].flags)
-#define pnp_dma_valid(dev,bar) \
-       ((pnp_dma_flags((dev),(bar)) & (IORESOURCE_DMA | IORESOURCE_UNSET)) \
-               == IORESOURCE_DMA)
 
 #define PNP_PORT_FLAG_16BITADDR        (1<<0)
 #define PNP_PORT_FLAG_FIXED    (1<<1)
@@ -118,13 +179,6 @@ struct pnp_option {
        struct pnp_option *next;        /* used to chain dependent resources */
 };
 
-struct pnp_resource_table {
-       struct resource port_resource[PNP_MAX_PORT];
-       struct resource mem_resource[PNP_MAX_MEM];
-       struct resource dma_resource[PNP_MAX_DMA];
-       struct resource irq_resource[PNP_MAX_IRQ];
-};
-
 /*
  * Device Management
  */
@@ -194,10 +248,9 @@ struct pnp_dev {
        int capabilities;
        struct pnp_option *independent;
        struct pnp_option *dependent;
-       struct pnp_resource_table res;
+       struct pnp_resource_table *res;
 
        char name[PNP_NAME_LEN];        /* contains a human-readable name */
-       unsigned short regs;            /* ISAPnP: supported registers */
        int flags;                      /* used by protocols */
        struct proc_dir_entry *procent; /* device entry in /proc/bus/isapnp */
        void *data;
@@ -328,8 +381,8 @@ struct pnp_protocol {
        char *name;
 
        /* resource control functions */
-       int (*get) (struct pnp_dev *dev, struct pnp_resource_table *res);
-       int (*set) (struct pnp_dev *dev, struct pnp_resource_table *res);
+       int (*get) (struct pnp_dev *dev);
+       int (*set) (struct pnp_dev *dev);
        int (*disable) (struct pnp_dev *dev);
 
        /* protocol specific suspend/resume */
@@ -358,20 +411,12 @@ extern struct bus_type pnp_bus_type;
 #if defined(CONFIG_PNP)
 
 /* device management */
-int pnp_register_protocol(struct pnp_protocol *protocol);
-void pnp_unregister_protocol(struct pnp_protocol *protocol);
-int pnp_add_device(struct pnp_dev *dev);
 int pnp_device_attach(struct pnp_dev *pnp_dev);
 void pnp_device_detach(struct pnp_dev *pnp_dev);
 extern struct list_head pnp_global;
 extern int pnp_platform_devices;
 
 /* multidevice card support */
-int pnp_add_card(struct pnp_card *card);
-void pnp_remove_card(struct pnp_card *card);
-int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev);
-void pnp_remove_card_device(struct pnp_dev *dev);
-int pnp_add_card_id(struct pnp_id *id, struct pnp_card *card);
 struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink,
                                        const char *id, struct pnp_dev *from);
 void pnp_release_card_device(struct pnp_dev *dev);
@@ -380,77 +425,42 @@ void pnp_unregister_card_driver(struct pnp_card_driver *drv);
 extern struct list_head pnp_cards;
 
 /* resource management */
-struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev);
-struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev,
-                                                int priority);
-int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data);
-int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data);
-int pnp_register_port_resource(struct pnp_option *option,
-                              struct pnp_port *data);
-int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data);
-void pnp_init_resource_table(struct pnp_resource_table *table);
-int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
-                         int mode);
 int pnp_auto_config_dev(struct pnp_dev *dev);
-int pnp_validate_config(struct pnp_dev *dev);
 int pnp_start_dev(struct pnp_dev *dev);
 int pnp_stop_dev(struct pnp_dev *dev);
 int pnp_activate_dev(struct pnp_dev *dev);
 int pnp_disable_dev(struct pnp_dev *dev);
-void pnp_resource_change(struct resource *resource, resource_size_t start,
-                        resource_size_t size);
 
 /* protocol helpers */
 int pnp_is_active(struct pnp_dev *dev);
 int compare_pnp_id(struct pnp_id *pos, const char *id);
-int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev);
 int pnp_register_driver(struct pnp_driver *drv);
 void pnp_unregister_driver(struct pnp_driver *drv);
 
 #else
 
 /* device management */
-static inline int pnp_register_protocol(struct pnp_protocol *protocol) { return -ENODEV; }
-static inline void pnp_unregister_protocol(struct pnp_protocol *protocol) { }
-static inline int pnp_init_device(struct pnp_dev *dev) { return -ENODEV; }
-static inline int pnp_add_device(struct pnp_dev *dev) { return -ENODEV; }
 static inline int pnp_device_attach(struct pnp_dev *pnp_dev) { return -ENODEV; }
 static inline void pnp_device_detach(struct pnp_dev *pnp_dev) { }
 
 #define pnp_platform_devices 0
 
 /* multidevice card support */
-static inline int pnp_add_card(struct pnp_card *card) { return -ENODEV; }
-static inline void pnp_remove_card(struct pnp_card *card) { }
-static inline int pnp_add_card_device(struct pnp_card *card, struct pnp_dev *dev) { return -ENODEV; }
-static inline void pnp_remove_card_device(struct pnp_dev *dev) { }
-static inline int pnp_add_card_id(struct pnp_id *id, struct pnp_card *card) { return -ENODEV; }
 static inline struct pnp_dev *pnp_request_card_device(struct pnp_card_link *clink, const char *id, struct pnp_dev *from) { return NULL; }
 static inline void pnp_release_card_device(struct pnp_dev *dev) { }
 static inline int pnp_register_card_driver(struct pnp_card_driver *drv) { return -ENODEV; }
 static inline void pnp_unregister_card_driver(struct pnp_card_driver *drv) { }
 
 /* resource management */
-static inline struct pnp_option *pnp_register_independent_option(struct pnp_dev *dev) { return NULL; }
-static inline struct pnp_option *pnp_register_dependent_option(struct pnp_dev *dev, int priority) { return NULL; }
-static inline int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data) { return -ENODEV; }
-static inline int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data) { return -ENODEV; }
-static inline int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data) { return -ENODEV; }
-static inline int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data) { return -ENODEV; }
-static inline void pnp_init_resource_table(struct pnp_resource_table *table) { }
-static inline int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res, int mode) { return -ENODEV; }
 static inline int pnp_auto_config_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline int pnp_validate_config(struct pnp_dev *dev) { return -ENODEV; }
 static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
 static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
 static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
 static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline void pnp_resource_change(struct resource *resource, resource_size_t start, resource_size_t size) { }
 
 /* protocol helpers */
 static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
 static inline int compare_pnp_id(struct pnp_id *pos, const char *id) { return -ENODEV; }
-static inline int pnp_add_id(struct pnp_id *id, struct pnp_dev *dev) { return -ENODEV; }
 static inline int pnp_register_driver(struct pnp_driver *drv) { return -ENODEV; }
 static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
 
@@ -466,6 +476,4 @@ static inline void pnp_unregister_driver(struct pnp_driver *drv) { }
 #define pnp_dbg(format, arg...) do {} while (0)
 #endif
 
-#endif /* __KERNEL__ */
-
 #endif /* _LINUX_PNP_H */
diff --git a/include/linux/pnpbios.h b/include/linux/pnpbios.h
deleted file mode 100644 (file)
index 329192a..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Include file for the interface to a PnP BIOS
- *
- * Original BIOS code (C) 1998 Christian Schmidt (chr.schmidt@tu-bs.de)
- * PnP handler parts (c) 1998 Tom Lees <tom@lpsg.demon.co.uk>
- * Minor reorganizations by David Hinds <dahinds@users.sourceforge.net>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2, or (at your option) any
- * later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#ifndef _LINUX_PNPBIOS_H
-#define _LINUX_PNPBIOS_H
-
-#ifdef __KERNEL__
-
-#include <linux/types.h>
-#include <linux/pnp.h>
-
-/*
- * Return codes
- */
-#define PNP_SUCCESS                     0x00
-#define PNP_NOT_SET_STATICALLY          0x7f
-#define PNP_UNKNOWN_FUNCTION            0x81
-#define PNP_FUNCTION_NOT_SUPPORTED      0x82
-#define PNP_INVALID_HANDLE              0x83
-#define PNP_BAD_PARAMETER               0x84
-#define PNP_SET_FAILED                  0x85
-#define PNP_EVENTS_NOT_PENDING          0x86
-#define PNP_SYSTEM_NOT_DOCKED           0x87
-#define PNP_NO_ISA_PNP_CARDS            0x88
-#define PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES 0x89
-#define PNP_CONFIG_CHANGE_FAILED_NO_BATTERY 0x8a
-#define PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT 0x8b
-#define PNP_BUFFER_TOO_SMALL            0x8c
-#define PNP_USE_ESCD_SUPPORT            0x8d
-#define PNP_MESSAGE_NOT_SUPPORTED       0x8e
-#define PNP_HARDWARE_ERROR              0x8f
-
-#define ESCD_SUCCESS                    0x00
-#define ESCD_IO_ERROR_READING           0x55
-#define ESCD_INVALID                    0x56
-#define ESCD_BUFFER_TOO_SMALL           0x59
-#define ESCD_NVRAM_TOO_SMALL            0x5a
-#define ESCD_FUNCTION_NOT_SUPPORTED     0x81
-
-/*
- * Events that can be received by "get event"
- */
-#define PNPEV_ABOUT_TO_CHANGE_CONFIG   0x0001
-#define PNPEV_DOCK_CHANGED             0x0002
-#define PNPEV_SYSTEM_DEVICE_CHANGED    0x0003
-#define PNPEV_CONFIG_CHANGED_FAILED    0x0004
-#define PNPEV_UNKNOWN_SYSTEM_EVENT     0xffff
-/* 0x8000 through 0xfffe are OEM defined */
-
-/*
- * Messages that should be sent through "send message"
- */
-#define PNPMSG_OK                      0x00
-#define PNPMSG_ABORT                   0x01
-#define PNPMSG_UNDOCK_DEFAULT_ACTION   0x40
-#define PNPMSG_POWER_OFF               0x41
-#define PNPMSG_PNP_OS_ACTIVE           0x42
-#define PNPMSG_PNP_OS_INACTIVE         0x43
-
-/*
- * Plug and Play BIOS flags
- */
-#define PNPBIOS_NO_DISABLE             0x0001
-#define PNPBIOS_NO_CONFIG              0x0002
-#define PNPBIOS_OUTPUT                 0x0004
-#define PNPBIOS_INPUT                  0x0008
-#define PNPBIOS_BOOTABLE               0x0010
-#define PNPBIOS_DOCK                   0x0020
-#define PNPBIOS_REMOVABLE              0x0040
-#define pnpbios_is_static(x) (((x)->flags & 0x0100) == 0x0000)
-#define pnpbios_is_dynamic(x) ((x)->flags & 0x0080)
-
-/*
- * Function Parameters
- */
-#define PNPMODE_STATIC 1
-#define PNPMODE_DYNAMIC 0
-
-/* 0x8000 through 0xffff are OEM defined */
-
-#pragma pack(1)
-struct pnp_dev_node_info {
-       __u16 no_nodes;
-       __u16 max_node_size;
-};
-struct pnp_docking_station_info {
-       __u32 location_id;
-       __u32 serial;
-       __u16 capabilities;
-};
-struct pnp_isa_config_struc {
-       __u8 revision;
-       __u8 no_csns;
-       __u16 isa_rd_data_port;
-       __u16 reserved;
-};
-struct escd_info_struc {
-       __u16 min_escd_write_size;
-       __u16 escd_size;
-       __u32 nv_storage_base;
-};
-struct pnp_bios_node {
-       __u16 size;
-       __u8 handle;
-       __u32 eisa_id;
-       __u8 type_code[3];
-       __u16 flags;
-       __u8 data[0];
-};
-#pragma pack()
-
-#ifdef CONFIG_PNPBIOS
-
-/* non-exported */
-extern struct pnp_dev_node_info node_info;
-
-extern int pnp_bios_dev_node_info(struct pnp_dev_node_info *data);
-extern int pnp_bios_get_dev_node(u8 *nodenum, char config,
-                                struct pnp_bios_node *data);
-extern int pnp_bios_set_dev_node(u8 nodenum, char config,
-                                struct pnp_bios_node *data);
-extern int pnp_bios_get_stat_res(char *info);
-extern int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data);
-extern int pnp_bios_escd_info(struct escd_info_struc *data);
-extern int pnp_bios_read_escd(char *data, u32 nvram_base);
-extern int pnp_bios_dock_station_info(struct pnp_docking_station_info *data);
-
-#endif /* CONFIG_PNPBIOS */
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_PNPBIOS_H */
index a9c31be7052c4bcbd4593963ebf32303b338e04f..9f31683728fd7e2c59f738ae35ab6835066270c1 100644 (file)
 #define LIST_POISON1  ((void *) 0x00100100)
 #define LIST_POISON2  ((void *) 0x00200200)
 
+/********** include/linux/timer.h **********/
+/*
+ * Magic number "tsta" to indicate a static timer initializer
+ * for the object debugging code.
+ */
+#define TIMER_ENTRY_STATIC     ((void *) 0x74737461)
+
 /********** mm/slab.c **********/
 /*
  * Magic nums for obj red zoning.
index 5c80b1939636ec556f775692aa5ed27aba5986b4..5ad79198d6f99809343dd063131b0ba48db8a063 100644 (file)
@@ -16,7 +16,8 @@
 # define PR_UNALIGN_NOPRINT    1       /* silently fix up unaligned user accesses */
 # define PR_UNALIGN_SIGBUS     2       /* generate SIGBUS on unaligned user access */
 
-/* Get/set whether or not to drop capabilities on setuid() away from uid 0 */
+/* Get/set whether or not to drop capabilities on setuid() away from
+ * uid 0 (as per security/commoncap.c) */
 #define PR_GET_KEEPCAPS   7
 #define PR_SET_KEEPCAPS   8
 
@@ -63,7 +64,7 @@
 #define PR_GET_SECCOMP 21
 #define PR_SET_SECCOMP 22
 
-/* Get/set the capability bounding set */
+/* Get/set the capability bounding set (as per security/commoncap.c) */
 #define PR_CAPBSET_READ 23
 #define PR_CAPBSET_DROP 24
 
@@ -73,4 +74,8 @@
 # define PR_TSC_ENABLE         1       /* allow the use of the timestamp counter */
 # define PR_TSC_SIGSEGV                2       /* throw a SIGSEGV instead of reading the TSC */
 
+/* Get/set securebits (as per security/commoncap.c) */
+#define PR_GET_SECUREBITS 27
+#define PR_SET_SECUREBITS 28
+
 #endif /* _LINUX_PRCTL_H */
index 9b6c935f69cf6d1986677b968bb17c2ca000e4b0..9883bc9422621c86b9417a111f8ad1b9d14403dd 100644 (file)
@@ -9,7 +9,6 @@
 
 struct net;
 struct completion;
-
 /*
  * The proc filesystem constants/structures
  */
@@ -41,7 +40,7 @@ enum {
  * /proc file has a parent, but "subdir" is NULL for all
  * non-directory entries).
  *
- * "get_info" is called at "read", while "owner" is used to protect module
+ * "owner" is used to protect module
  * from unloading while proc_dir_entry is in use
  */
 
@@ -49,7 +48,6 @@ typedef       int (read_proc_t)(char *page, char **start, off_t off,
                          int count, int *eof, void *data);
 typedef        int (write_proc_t)(struct file *file, const char __user *buffer,
                           unsigned long count, void *data);
-typedef int (get_info_t)(char *, char **, off_t, int);
 
 struct proc_dir_entry {
        unsigned int low_ino;
@@ -70,7 +68,6 @@ struct proc_dir_entry {
         * somewhere.
         */
        const struct file_operations *proc_fops;
-       get_info_t *get_info;
        struct module *owner;
        struct proc_dir_entry *next, *parent, *subdir;
        void *data;
@@ -97,10 +94,6 @@ struct vmcore {
 
 #ifdef CONFIG_PROC_FS
 
-extern struct proc_dir_entry proc_root;
-extern struct proc_dir_entry *proc_root_fs;
-extern struct proc_dir_entry *proc_bus;
-extern struct proc_dir_entry *proc_root_driver;
 extern struct proc_dir_entry *proc_root_kcore;
 
 extern spinlock_t proc_subdir_lock;
@@ -123,9 +116,10 @@ void de_put(struct proc_dir_entry *de);
 
 extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
                                                struct proc_dir_entry *parent);
-struct proc_dir_entry *proc_create(const char *name, mode_t mode,
+struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
                                struct proc_dir_entry *parent,
-                               const struct file_operations *proc_fops);
+                               const struct file_operations *proc_fops,
+                               void *data);
 extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
 
 extern struct vfsmount *proc_mnt;
@@ -180,6 +174,12 @@ extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *);
 extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
                        struct proc_dir_entry *parent);
 
+static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
+       struct proc_dir_entry *parent, const struct file_operations *proc_fops)
+{
+       return proc_create_data(name, mode, parent, proc_fops, NULL);
+}
+
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
        mode_t mode, struct proc_dir_entry *base, 
        read_proc_t *read_proc, void * data)
@@ -192,24 +192,19 @@ static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
        return res;
 }
  
-static inline struct proc_dir_entry *create_proc_info_entry(const char *name,
-       mode_t mode, struct proc_dir_entry *base, get_info_t *get_info)
-{
-       struct proc_dir_entry *res=create_proc_entry(name,mode,base);
-       if (res) res->get_info=get_info;
-       return res;
-}
-
 extern struct proc_dir_entry *proc_net_fops_create(struct net *net,
        const char *name, mode_t mode, const struct file_operations *fops);
 extern void proc_net_remove(struct net *net, const char *name);
 extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
        struct proc_dir_entry *parent);
 
-#else
+/* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are
+ * only needed to implement /proc/<pid>|self/exe so we define them here. */
+extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
+extern struct file *get_mm_exe_file(struct mm_struct *mm);
+extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm);
 
-#define proc_root_driver NULL
-#define proc_bus NULL
+#else
 
 #define proc_net_fops_create(net, name, mode, fops)  ({ (void)(mode), NULL; })
 static inline void proc_net_remove(struct net *net, const char *name) {}
@@ -226,6 +221,12 @@ static inline struct proc_dir_entry *proc_create(const char *name,
 {
        return NULL;
 }
+static inline struct proc_dir_entry *proc_create_data(const char *name,
+       mode_t mode, struct proc_dir_entry *parent,
+       const struct file_operations *proc_fops, void *data)
+{
+       return NULL;
+}
 #define remove_proc_entry(name, parent) do {} while (0)
 
 static inline struct proc_dir_entry *proc_symlink(const char *name,
@@ -236,16 +237,11 @@ static inline struct proc_dir_entry *proc_mkdir(const char *name,
 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
        mode_t mode, struct proc_dir_entry *base, 
        read_proc_t *read_proc, void * data) { return NULL; }
-static inline struct proc_dir_entry *create_proc_info_entry(const char *name,
-       mode_t mode, struct proc_dir_entry *base, get_info_t *get_info)
-       { return NULL; }
 
 struct tty_driver;
 static inline void proc_tty_register_driver(struct tty_driver *driver) {};
 static inline void proc_tty_unregister_driver(struct tty_driver *driver) {};
 
-extern struct proc_dir_entry proc_root;
-
 static inline int pid_ns_prepare_proc(struct pid_namespace *ns)
 {
        return 0;
@@ -255,6 +251,19 @@ static inline void pid_ns_release_proc(struct pid_namespace *ns)
 {
 }
 
+static inline void set_mm_exe_file(struct mm_struct *mm,
+                                  struct file *new_exe_file)
+{}
+
+static inline struct file *get_mm_exe_file(struct mm_struct *mm)
+{
+       return NULL;
+}
+
+static inline void dup_mm_exe_file(struct mm_struct *oldmm,
+                                  struct mm_struct *newmm)
+{}
+
 #endif /* CONFIG_PROC_FS */
 
 #if !defined(CONFIG_PROC_KCORE)
index ff576d1db67de07a090f40bcd93d75c71d879500..05c1cc736937fcc48d5df8781a6ad1b391f73807 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _LINUX_PROFILE_H
 #define _LINUX_PROFILE_H
 
-#ifdef __KERNEL__
-
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/cpumask.h>
@@ -118,6 +116,4 @@ static inline void unregister_timer_hook(int (*hook)(struct pt_regs *))
 
 #endif /* CONFIG_PROFILING */
 
-#endif /* __KERNEL__ */
-
 #endif /* _LINUX_PROFILE_H */
index 2c3b3cad92bec050b18605cffe39784f774f09d0..5afc1b23346d1f04536ea9339c1790b1cceeef3a 100644 (file)
@@ -77,6 +77,19 @@ void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
        local_irq_restore(flags);
 }
 
+/*
+ * Limit the time part in order to ensure there are some bits left for the
+ * cycle counter and fraction multiply.
+ */
+#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
+
+#define PROP_FRAC_SHIFT                (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
+#define PROP_FRAC_BASE         (1UL << PROP_FRAC_SHIFT)
+
+void __prop_inc_percpu_max(struct prop_descriptor *pd,
+                          struct prop_local_percpu *pl, long frac);
+
+
 /*
  * ----- SINGLE ------
  */
index ebe0c17039cf0f8dec6d6207da29052463a610d3..f98501ba557eea676a827aed770d57b0b4b12c56 100644 (file)
@@ -98,6 +98,10 @@ extern void ptrace_untrace(struct task_struct *child);
 extern int ptrace_may_attach(struct task_struct *task);
 extern int __ptrace_may_attach(struct task_struct *task);
 
+static inline int ptrace_reparented(struct task_struct *child)
+{
+       return child->real_parent != child->parent;
+}
 static inline void ptrace_link(struct task_struct *child,
                               struct task_struct *new_parent)
 {
index eb560d031acd16de3209e175dddae4dc4de718e7..dcddfb2009479f42a543363d0c0873c76d4bfcee 100644 (file)
@@ -202,10 +202,14 @@ struct quota_format_type;
 
 struct mem_dqinfo {
        struct quota_format_type *dqi_format;
+       int dqi_fmt_id;         /* Id of the dqi_format - used when turning
+                                * quotas on after remount RW */
        struct list_head dqi_dirty_list;        /* List of dirty dquots */
        unsigned long dqi_flags;
        unsigned int dqi_bgrace;
        unsigned int dqi_igrace;
+       qsize_t dqi_maxblimit;
+       qsize_t dqi_maxilimit;
        union {
                struct v1_mem_dqinfo v1_i;
                struct v2_mem_dqinfo v2_i;
@@ -296,8 +300,8 @@ struct dquot_operations {
 
 /* Operations handling requests from userspace */
 struct quotactl_ops {
-       int (*quota_on)(struct super_block *, int, int, char *);
-       int (*quota_off)(struct super_block *, int);
+       int (*quota_on)(struct super_block *, int, int, char *, int);
+       int (*quota_off)(struct super_block *, int, int);
        int (*quota_sync)(struct super_block *, int);
        int (*get_info)(struct super_block *, int, struct if_dqinfo *);
        int (*set_info)(struct super_block *, int, struct if_dqinfo *);
@@ -318,6 +322,10 @@ struct quota_format_type {
 
 #define DQUOT_USR_ENABLED      0x01            /* User diskquotas enabled */
 #define DQUOT_GRP_ENABLED      0x02            /* Group diskquotas enabled */
+#define DQUOT_USR_SUSPENDED    0x04            /* User diskquotas are off, but
+                                                * we have necessary info in
+                                                * memory to turn them on */
+#define DQUOT_GRP_SUSPENDED    0x08            /* The same for group quotas */
 
 struct quota_info {
        unsigned int flags;                     /* Flags for diskquotas on this device */
@@ -329,17 +337,19 @@ struct quota_info {
        struct quota_format_ops *ops[MAXQUOTAS];        /* Operations for each type */
 };
 
-/* Inline would be better but we need to dereference super_block which is not defined yet */
-int mark_dquot_dirty(struct dquot *dquot);
-
-#define dquot_dirty(dquot) test_bit(DQ_MOD_B, &(dquot)->dq_flags)
-
 #define sb_has_quota_enabled(sb, type) ((type)==USRQUOTA ? \
        (sb_dqopt(sb)->flags & DQUOT_USR_ENABLED) : (sb_dqopt(sb)->flags & DQUOT_GRP_ENABLED))
 
 #define sb_any_quota_enabled(sb) (sb_has_quota_enabled(sb, USRQUOTA) | \
                                  sb_has_quota_enabled(sb, GRPQUOTA))
 
+#define sb_has_quota_suspended(sb, type) \
+       ((type) == USRQUOTA ? (sb_dqopt(sb)->flags & DQUOT_USR_SUSPENDED) : \
+                             (sb_dqopt(sb)->flags & DQUOT_GRP_SUSPENDED))
+
+#define sb_any_quota_suspended(sb) (sb_has_quota_suspended(sb, USRQUOTA) | \
+                                 sb_has_quota_suspended(sb, GRPQUOTA))
+
 int register_quota_format(struct quota_format_type *fmt);
 void unregister_quota_format(struct quota_format_type *fmt);
 
index 5110201a415949e5e10bd8381ff4f38e120320cf..f867020538539738cafa8ac22130379889452833 100644 (file)
@@ -37,11 +37,11 @@ extern int dquot_release(struct dquot *dquot);
 extern int dquot_commit_info(struct super_block *sb, int type);
 extern int dquot_mark_dquot_dirty(struct dquot *dquot);
 
-extern int vfs_quota_on(struct super_block *sb, int type, int format_id, char *path);
+extern int vfs_quota_on(struct super_block *sb, int type, int format_id,
+               char *path, int remount);
 extern int vfs_quota_on_mount(struct super_block *sb, char *qf_name,
                int format_id, int type);
-extern int vfs_quota_off(struct super_block *sb, int type);
-#define vfs_quota_off_mount(sb, type) vfs_quota_off(sb, type)
+extern int vfs_quota_off(struct super_block *sb, int type, int remount);
 extern int vfs_quota_sync(struct super_block *sb, int type);
 extern int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
 extern int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
@@ -59,7 +59,7 @@ extern struct quotactl_ops vfs_quotactl_ops;
 
 /* It is better to call this function outside of any transaction as it might
  * need a lot of space in journal for dquot structure allocation. */
-static __inline__ void DQUOT_INIT(struct inode *inode)
+static inline void DQUOT_INIT(struct inode *inode)
 {
        BUG_ON(!inode->i_sb);
        if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode))
@@ -67,7 +67,7 @@ static __inline__ void DQUOT_INIT(struct inode *inode)
 }
 
 /* The same as with DQUOT_INIT */
-static __inline__ void DQUOT_DROP(struct inode *inode)
+static inline void DQUOT_DROP(struct inode *inode)
 {
        /* Here we can get arbitrary inode from clear_inode() so we have
         * to be careful. OTOH we don't need locking as quota operations
@@ -90,7 +90,7 @@ static __inline__ void DQUOT_DROP(struct inode *inode)
 
 /* The following allocation/freeing/transfer functions *must* be called inside
  * a transaction (deadlocks possible otherwise) */
-static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
 {
        if (sb_any_quota_enabled(inode->i_sb)) {
                /* Used space is updated in alloc_space() */
@@ -102,7 +102,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t
        return 0;
 }
 
-static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
+static inline int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
 {
        int ret;
         if (!(ret =  DQUOT_PREALLOC_SPACE_NODIRTY(inode, nr)))
@@ -110,7 +110,7 @@ static __inline__ int DQUOT_PREALLOC_SPACE(struct inode *inode, qsize_t nr)
        return ret;
 }
 
-static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
 {
        if (sb_any_quota_enabled(inode->i_sb)) {
                /* Used space is updated in alloc_space() */
@@ -122,7 +122,7 @@ static __inline__ int DQUOT_ALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
        return 0;
 }
 
-static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
+static inline int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
 {
        int ret;
        if (!(ret = DQUOT_ALLOC_SPACE_NODIRTY(inode, nr)))
@@ -130,7 +130,7 @@ static __inline__ int DQUOT_ALLOC_SPACE(struct inode *inode, qsize_t nr)
        return ret;
 }
 
-static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode)
+static inline int DQUOT_ALLOC_INODE(struct inode *inode)
 {
        if (sb_any_quota_enabled(inode->i_sb)) {
                DQUOT_INIT(inode);
@@ -140,7 +140,7 @@ static __inline__ int DQUOT_ALLOC_INODE(struct inode *inode)
        return 0;
 }
 
-static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
+static inline void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
 {
        if (sb_any_quota_enabled(inode->i_sb))
                inode->i_sb->dq_op->free_space(inode, nr);
@@ -148,19 +148,19 @@ static __inline__ void DQUOT_FREE_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
                inode_sub_bytes(inode, nr);
 }
 
-static __inline__ void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
+static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
 {
        DQUOT_FREE_SPACE_NODIRTY(inode, nr);
        mark_inode_dirty(inode);
 }
 
-static __inline__ void DQUOT_FREE_INODE(struct inode *inode)
+static inline void DQUOT_FREE_INODE(struct inode *inode)
 {
        if (sb_any_quota_enabled(inode->i_sb))
                inode->i_sb->dq_op->free_inode(inode, 1);
 }
 
-static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
+static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
 {
        if (sb_any_quota_enabled(inode->i_sb) && !IS_NOQUOTA(inode)) {
                DQUOT_INIT(inode);
@@ -171,14 +171,32 @@ static __inline__ int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
 }
 
 /* The following two functions cannot be called inside a transaction */
-#define DQUOT_SYNC(sb) sync_dquots(sb, -1)
+static inline void DQUOT_SYNC(struct super_block *sb)
+{
+       sync_dquots(sb, -1);
+}
 
-static __inline__ int DQUOT_OFF(struct super_block *sb)
+static inline int DQUOT_OFF(struct super_block *sb, int remount)
 {
        int ret = -ENOSYS;
 
-       if (sb_any_quota_enabled(sb) && sb->s_qcop && sb->s_qcop->quota_off)
-               ret = sb->s_qcop->quota_off(sb, -1);
+       if (sb->s_qcop && sb->s_qcop->quota_off)
+               ret = sb->s_qcop->quota_off(sb, -1, remount);
+       return ret;
+}
+
+static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
+{
+       int cnt;
+       int ret = 0, err;
+
+       if (!sb->s_qcop || !sb->s_qcop->quota_on)
+               return -ENOSYS;
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               err = sb->s_qcop->quota_on(sb, cnt, 0, NULL, 1);
+               if (err < 0 && !ret)
+                       ret = err;
+       }
        return ret;
 }
 
@@ -189,13 +207,43 @@ static __inline__ int DQUOT_OFF(struct super_block *sb)
  */
 #define sb_dquot_ops                           (NULL)
 #define sb_quotactl_ops                                (NULL)
-#define DQUOT_INIT(inode)                      do { } while(0)
-#define DQUOT_DROP(inode)                      do { } while(0)
-#define DQUOT_ALLOC_INODE(inode)               (0)
-#define DQUOT_FREE_INODE(inode)                        do { } while(0)
-#define DQUOT_SYNC(sb)                         do { } while(0)
-#define DQUOT_OFF(sb)                          do { } while(0)
-#define DQUOT_TRANSFER(inode, iattr)           (0)
+
+static inline void DQUOT_INIT(struct inode *inode)
+{
+}
+
+static inline void DQUOT_DROP(struct inode *inode)
+{
+}
+
+static inline int DQUOT_ALLOC_INODE(struct inode *inode)
+{
+       return 0;
+}
+
+static inline void DQUOT_FREE_INODE(struct inode *inode)
+{
+}
+
+static inline void DQUOT_SYNC(struct super_block *sb)
+{
+}
+
+static inline int DQUOT_OFF(struct super_block *sb, int remount)
+{
+       return 0;
+}
+
+static inline int DQUOT_ON_REMOUNT(struct super_block *sb)
+{
+       return 0;
+}
+
+static inline int DQUOT_TRANSFER(struct inode *inode, struct iattr *iattr)
+{
+       return 0;
+}
+
 static inline int DQUOT_PREALLOC_SPACE_NODIRTY(struct inode *inode, qsize_t nr)
 {
        inode_add_bytes(inode, nr);
@@ -235,11 +283,38 @@ static inline void DQUOT_FREE_SPACE(struct inode *inode, qsize_t nr)
 
 #endif /* CONFIG_QUOTA */
 
-#define DQUOT_PREALLOC_BLOCK_NODIRTY(inode, nr)        DQUOT_PREALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_PREALLOC_BLOCK(inode, nr)        DQUOT_PREALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_ALLOC_BLOCK_NODIRTY(inode, nr) DQUOT_ALLOC_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_ALLOC_BLOCK(inode, nr) DQUOT_ALLOC_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_FREE_BLOCK_NODIRTY(inode, nr) DQUOT_FREE_SPACE_NODIRTY(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
-#define DQUOT_FREE_BLOCK(inode, nr) DQUOT_FREE_SPACE(inode, ((qsize_t)(nr)) << (inode)->i_sb->s_blocksize_bits)
+static inline int DQUOT_PREALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+       return DQUOT_PREALLOC_SPACE_NODIRTY(inode,
+                       nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_PREALLOC_BLOCK(struct inode *inode, qsize_t nr)
+{
+       return DQUOT_PREALLOC_SPACE(inode,
+                       nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_ALLOC_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+       return DQUOT_ALLOC_SPACE_NODIRTY(inode,
+                       nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline int DQUOT_ALLOC_BLOCK(struct inode *inode, qsize_t nr)
+{
+       return DQUOT_ALLOC_SPACE(inode,
+                       nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline void DQUOT_FREE_BLOCK_NODIRTY(struct inode *inode, qsize_t nr)
+{
+       DQUOT_FREE_SPACE_NODIRTY(inode, nr << inode->i_sb->s_blocksize_bits);
+}
+
+static inline void DQUOT_FREE_BLOCK(struct inode *inode, qsize_t nr)
+{
+       DQUOT_FREE_SPACE(inode, nr << inode->i_sb->s_blocksize_bits);
+}
 
 #endif /* _LINUX_QUOTAOPS_ */
index 8ab630b67fcca8c3d0d58237915f2a8b3f0d936a..81a1a02d4566103b9dffc54771b4bde0d61f3358 100644 (file)
@@ -94,6 +94,7 @@ extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
 extern void md_do_sync(mddev_t *mddev);
 extern void md_new_event(mddev_t *mddev);
 extern void md_allow_write(mddev_t *mddev);
+extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
 
 #endif /* CONFIG_MD */
 #endif 
index 7bb6d1abf71e85944bc20e82243cc88df8353e71..812ffa590cff097ce5303266088fa1ec7ffe55a0 100644 (file)
@@ -84,6 +84,10 @@ struct mdk_rdev_s
 #define        AllReserved     6               /* If whole device is reserved for
                                         * one array */
 #define        AutoDetected    7               /* added by auto-detect */
+#define Blocked                8               /* An error occured on an externally
+                                        * managed array, don't allow writes
+                                        * until it is cleared */
+       wait_queue_head_t blocked_wait;
 
        int desc_nr;                    /* descriptor index in the superblock */
        int raid_disk;                  /* role of device in array */
index 93678f57ccbe280d3b681aa5c31a2c5049c32a6a..f0827d31ae6fab718158499c10aaa50979527b17 100644 (file)
@@ -252,6 +252,8 @@ struct r6_state {
 #define        STRIPE_EXPANDING        9
 #define        STRIPE_EXPAND_SOURCE    10
 #define        STRIPE_EXPAND_READY     11
+#define        STRIPE_IO_STARTED       12 /* do not count towards 'bypass_count' */
+#define        STRIPE_FULL_WRITE       13 /* all blocks are set to be overwritten */
 /*
  * Operations flags (in issue order)
  */
@@ -316,12 +318,17 @@ struct raid5_private_data {
        int                     previous_raid_disks;
 
        struct list_head        handle_list; /* stripes needing handling */
+       struct list_head        hold_list; /* preread ready stripes */
        struct list_head        delayed_list; /* stripes that have plugged requests */
        struct list_head        bitmap_list; /* stripes delaying awaiting bitmap update */
        struct bio              *retry_read_aligned; /* currently retrying aligned bios   */
        struct bio              *retry_read_aligned_list; /* aligned bios retry list  */
        atomic_t                preread_active_stripes; /* stripes with scheduled io */
        atomic_t                active_aligned_reads;
+       atomic_t                pending_full_writes; /* full write backlog */
+       int                     bypass_count; /* bypassed prereads */
+       int                     bypass_threshold; /* preread nice */
+       struct list_head        *last_hold; /* detect hold_list promotions */
 
        atomic_t                reshape_stripes; /* stripes with pending writes for reshape */
        /* unfortunately we need two cache names as we temporarily have
index b3dccd68629e1c0481dd41471798ec25884fafd8..b3aa05baab8ac439a569936e0d5bf60135f81fab 100644 (file)
@@ -33,8 +33,6 @@
 #ifndef __LINUX_RCUCLASSIC_H
 #define __LINUX_RCUCLASSIC_H
 
-#ifdef __KERNEL__
-
 #include <linux/cache.h>
 #include <linux/spinlock.h>
 #include <linux/threads.h>
@@ -163,5 +161,4 @@ extern long rcu_batches_completed_bh(void);
 #define rcu_enter_nohz()       do { } while (0)
 #define rcu_exit_nohz()                do { } while (0)
 
-#endif /* __KERNEL__ */
 #endif /* __LINUX_RCUCLASSIC_H */
index 37a642c54871f24166bec8e9b23b73e3369848bb..8082d6587a0f6544a1ec6c28af922dc01a4b90ea 100644 (file)
@@ -33,8 +33,6 @@
 #ifndef __LINUX_RCUPDATE_H
 #define __LINUX_RCUPDATE_H
 
-#ifdef __KERNEL__
-
 #include <linux/cache.h>
 #include <linux/spinlock.h>
 #include <linux/threads.h>
@@ -245,5 +243,4 @@ extern long rcu_batches_completed_bh(void);
 extern void rcu_init(void);
 extern int rcu_needs_cpu(int cpu);
 
-#endif /* __KERNEL__ */
 #endif /* __LINUX_RCUPDATE_H */
index d038aa6e5ee1b4dd64cba0e8e193439c16f010f7..8a05c7e20bc4e780c7765d34a2f9abda11436658 100644 (file)
@@ -33,8 +33,6 @@
 #ifndef __LINUX_RCUPREEMPT_H
 #define __LINUX_RCUPREEMPT_H
 
-#ifdef __KERNEL__
-
 #include <linux/cache.h>
 #include <linux/spinlock.h>
 #include <linux/threads.h>
@@ -104,5 +102,4 @@ static inline void rcu_exit_nohz(void)
 #define rcu_exit_nohz()                do { } while (0)
 #endif /* CONFIG_NO_HZ */
 
-#endif /* __KERNEL__ */
 #endif /* __LINUX_RCUPREEMPT_H */
index 21cd6b2a5c42415628be9780cb4894168abe1c87..b99ae073192a85113969ab0dededd08f5a0ea1cc 100644 (file)
@@ -32,7 +32,6 @@
 #ifndef __LINUX_RCUPREEMPT_TRACE_H
 #define __LINUX_RCUPREEMPT_TRACE_H
 
-#ifdef __KERNEL__
 #include <linux/types.h>
 #include <linux/kernel.h>
 
@@ -95,5 +94,4 @@ extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace);
 extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace);
 extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace);
 
-#endif /* __KERNEL__ */
 #endif /* __LINUX_RCUPREEMPT_TRACE_H */
index 8e7eff2cd0ab44d78d0ddc021ad3275459f34f9d..4aacaeecb56f503adf60a680710b46fc485a39d4 100644 (file)
@@ -2176,6 +2176,7 @@ int reiserfs_ioctl(struct inode *inode, struct file *filp,
                   unsigned int cmd, unsigned long arg);
 long reiserfs_compat_ioctl(struct file *filp,
                   unsigned int cmd, unsigned long arg);
+int reiserfs_unpack(struct inode *inode, struct file *filp);
 
 /* ioctl's command */
 #define REISERFS_IOC_UNPACK            _IOW(0xCD,1,long)
index db5ef9b83c3ff366be21465ffe2a86a5602032f4..336ee43ed7d8897eb9a1b4c7453123bf7ab3ebc8 100644 (file)
@@ -177,7 +177,6 @@ struct reiserfs_journal {
        struct reiserfs_journal_cnode *j_last;  /* newest journal block */
        struct reiserfs_journal_cnode *j_first; /*  oldest journal block.  start here for traverse */
 
-       struct file *j_dev_file;
        struct block_device *j_dev_bd;
        int j_1st_reserved_block;       /* first block on s_dev of reserved area journal */
 
index 61363ce896d5988857ed440087147eb27bc4e1c5..6d9e1fca098c468b49287791e181f6e4f51e7d54 100644 (file)
@@ -9,6 +9,8 @@
  *
  * Author: Pavel Emelianov <xemul@openvz.org>
  *
+ * See Documentation/controllers/resource_counter.txt for more
+ * info about what this counter is.
  */
 
 #include <linux/cgroup.h>
@@ -24,6 +26,10 @@ struct res_counter {
         * the current resource consumption level
         */
        unsigned long long usage;
+       /*
+        * the maximal value of the usage from the counter creation
+        */
+       unsigned long long max_usage;
        /*
         * the limit that usage cannot exceed
         */
@@ -39,8 +45,9 @@ struct res_counter {
        spinlock_t lock;
 };
 
-/*
+/**
  * Helpers to interact with userspace
+ * res_counter_read_u64() - returns the value of the specified member.
  * res_counter_read/_write - put/get the specified fields from the
  * res_counter struct to/from the user
  *
@@ -51,6 +58,8 @@ struct res_counter {
  * @pos:     and the offset.
  */
 
+u64 res_counter_read_u64(struct res_counter *counter, int member);
+
 ssize_t res_counter_read(struct res_counter *counter, int member,
                const char __user *buf, size_t nbytes, loff_t *pos,
                int (*read_strategy)(unsigned long long val, char *s));
@@ -64,6 +73,7 @@ ssize_t res_counter_write(struct res_counter *counter, int member,
 
 enum {
        RES_USAGE,
+       RES_MAX_USAGE,
        RES_LIMIT,
        RES_FAILCNT,
 };
@@ -124,4 +134,21 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt)
        return ret;
 }
 
+static inline void res_counter_reset_max(struct res_counter *cnt)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cnt->lock, flags);
+       cnt->max_usage = cnt->usage;
+       spin_unlock_irqrestore(&cnt->lock, flags);
+}
+
+static inline void res_counter_reset_failcnt(struct res_counter *cnt)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&cnt->lock, flags);
+       cnt->failcnt = 0;
+       spin_unlock_irqrestore(&cnt->lock, flags);
+}
 #endif
index ae13db714742a91f95e644f582e3517f5e85651e..aaa423a6f3d9167ed4cd77c23053b271d1874631 100644 (file)
@@ -19,6 +19,7 @@ struct task_struct;
 #define        RUSAGE_SELF     0
 #define        RUSAGE_CHILDREN (-1)
 #define RUSAGE_BOTH    (-2)            /* sys_wait4() uses this */
+#define        RUSAGE_THREAD   1               /* only the calling thread */
 
 struct rusage {
        struct timeval ru_utime;        /* user time used */
index 68e3f6853fa69a1950f106e6fd60ec37233865d9..c1c99c9643d38f5373e779dcbe5211e9bec31537 100644 (file)
@@ -14,8 +14,6 @@
 #ifndef LINUX_RIO_H
 #define LINUX_RIO_H
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <linux/ioport.h>
 #include <linux/list.h>
@@ -23,7 +21,6 @@
 #include <linux/device.h>
 #include <linux/rio_regs.h>
 
-#define RIO_ANY_DESTID         0xff
 #define RIO_NO_HOPCOUNT                -1
 #define RIO_INVALID_DESTID     0xffff
 
                                           entry is invalid (no route
                                           exists for the device ID) */
 
-#ifdef CONFIG_RAPIDIO_8_BIT_TRANSPORT
-#define RIO_MAX_ROUTE_ENTRIES  (1 << 8)
-#else
-#define RIO_MAX_ROUTE_ENTRIES  (1 << 16)
-#endif
+#define RIO_MAX_ROUTE_ENTRIES(size)    (size ? (1 << 16) : (1 << 8))
+#define RIO_ANY_DESTID(size)           (size ? 0xffff : 0xff)
 
 #define RIO_MAX_MBOX           4
 #define RIO_MAX_MSG_SIZE       0x1000
@@ -149,6 +143,11 @@ struct rio_dbell {
        void *dev_id;
 };
 
+enum rio_phy_type {
+       RIO_PHY_PARALLEL,
+       RIO_PHY_SERIAL,
+};
+
 /**
  * struct rio_mport - RIO master port info
  * @dbells: List of doorbell events
@@ -163,6 +162,7 @@ struct rio_dbell {
  * @id: Port ID, unique among all ports
  * @index: Port index, unique among all port interfaces of the same type
  * @name: Port name string
+ * @priv: Master port private data
  */
 struct rio_mport {
        struct list_head dbells;        /* list of doorbell events */
@@ -177,7 +177,13 @@ struct rio_mport {
        unsigned char id;       /* port ID, unique among all ports */
        unsigned char index;    /* port index, unique among all port
                                   interfaces of the same type */
+       unsigned int sys_size;  /* RapidIO common transport system size.
+                                * 0 - Small size. 256 devices.
+                                * 1 - Large size, 65536 devices.
+                                */
+       enum rio_phy_type phy_type;     /* RapidIO phy type */
        unsigned char name[40];
+       void *priv;             /* Master port private data */
 };
 
 /**
@@ -211,7 +217,7 @@ struct rio_switch {
        u16 switchid;
        u16 hopcount;
        u16 destid;
-       u8 route_table[RIO_MAX_ROUTE_ENTRIES];
+       u8 *route_table;
        int (*add_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
                          u16 table, u16 route_destid, u8 route_port);
        int (*get_entry) (struct rio_mport * mport, u16 destid, u8 hopcount,
@@ -229,13 +235,15 @@ struct rio_switch {
  * @dsend: Callback to send a doorbell message.
  */
 struct rio_ops {
-       int (*lcread) (int index, u32 offset, int len, u32 * data);
-       int (*lcwrite) (int index, u32 offset, int len, u32 data);
-       int (*cread) (int index, u16 destid, u8 hopcount, u32 offset, int len,
-                     u32 * data);
-       int (*cwrite) (int index, u16 destid, u8 hopcount, u32 offset, int len,
-                      u32 data);
-       int (*dsend) (int index, u16 destid, u16 data);
+       int (*lcread) (struct rio_mport *mport, int index, u32 offset, int len,
+                       u32 *data);
+       int (*lcwrite) (struct rio_mport *mport, int index, u32 offset, int len,
+                       u32 data);
+       int (*cread) (struct rio_mport *mport, int index, u16 destid,
+                       u8 hopcount, u32 offset, int len, u32 *data);
+       int (*cwrite) (struct rio_mport *mport, int index, u16 destid,
+                       u8 hopcount, u32 offset, int len, u32 data);
+       int (*dsend) (struct rio_mport *mport, int index, u16 destid, u16 data);
 };
 
 #define RIO_RESOURCE_MEM       0x00000100
@@ -321,5 +329,4 @@ extern void rio_close_inb_mbox(struct rio_mport *, int);
 extern int rio_open_outb_mbox(struct rio_mport *, void *, int, int);
 extern void rio_close_outb_mbox(struct rio_mport *, int);
 
-#endif                         /* __KERNEL__ */
 #endif                         /* LINUX_RIO_H */
index 7adb2a1aac92603449fe1810944b2c7513a6e393..90987b7bcc1be21efcd979f9029dbecb89daf503 100644 (file)
@@ -13,8 +13,6 @@
 #ifndef LINUX_RIO_DRV_H
 #define LINUX_RIO_DRV_H
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <linux/ioport.h>
 #include <linux/list.h>
@@ -465,5 +463,4 @@ extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
 extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
                                   struct rio_dev *from);
 
-#endif                         /* __KERNEL__ */
 #endif                         /* LINUX_RIO_DRV_H */
index 7b524b4109a039a5cf065d60af824fa4d2fb0ff0..efd348fe8ca75f6d9effcf8f05e87bd276e46d97 100644 (file)
@@ -9,8 +9,6 @@
 
 #include <linux/linkage.h>
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <asm/system.h>
@@ -90,5 +88,4 @@ extern void up_read_non_owner(struct rw_semaphore *sem);
 # define up_read_non_owner(sem)                        up_read(sem)
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* _LINUX_RWSEM_H */
index d0bd97044abd67ce71411e9c7a061dc2adc3b386..03c238088aee57a26fbbcd110cf10e901e918fb9 100644 (file)
@@ -68,7 +68,6 @@ struct sched_param {
 #include <linux/smp.h>
 #include <linux/sem.h>
 #include <linux/signal.h>
-#include <linux/securebits.h>
 #include <linux/fs_struct.h>
 #include <linux/compiler.h>
 #include <linux/completion.h>
@@ -555,6 +554,14 @@ struct signal_struct {
 #define SIGNAL_STOP_DEQUEUED   0x00000002 /* stop signal dequeued */
 #define SIGNAL_STOP_CONTINUED  0x00000004 /* SIGCONT since WCONTINUED reap */
 #define SIGNAL_GROUP_EXIT      0x00000008 /* group exit in progress */
+/*
+ * Pending notifications to parent.
+ */
+#define SIGNAL_CLD_STOPPED     0x00000010
+#define SIGNAL_CLD_CONTINUED   0x00000020
+#define SIGNAL_CLD_MASK                (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
+
+#define SIGNAL_UNKILLABLE      0x00000040 /* for init: ignore fatal signals */
 
 /* If true, all threads except ->group_exit_task have pending SIGKILL */
 static inline int signal_group_exit(const struct signal_struct *sig)
@@ -1133,7 +1140,7 @@ struct task_struct {
        gid_t gid,egid,sgid,fsgid;
        struct group_info *group_info;
        kernel_cap_t   cap_effective, cap_inheritable, cap_permitted, cap_bset;
-       unsigned keep_capabilities:1;
+       unsigned securebits;
        struct user_struct *user;
 #ifdef CONFIG_KEYS
        struct key *request_key_auth;   /* assumed request_key authority */
@@ -1168,7 +1175,7 @@ struct task_struct {
        struct sighand_struct *sighand;
 
        sigset_t blocked, real_blocked;
-       sigset_t saved_sigmask;         /* To be restored with TIF_RESTORE_SIGMASK */
+       sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
        struct sigpending pending;
 
        unsigned long sas_ss_sp;
@@ -1670,7 +1677,10 @@ extern struct pid_namespace init_pid_ns;
 extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
                struct pid_namespace *ns);
 
-extern struct task_struct *find_task_by_pid(pid_t nr);
+static inline struct task_struct *__deprecated find_task_by_pid(pid_t nr)
+{
+       return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
+}
 extern struct task_struct *find_task_by_vpid(pid_t nr);
 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
                struct pid_namespace *ns);
@@ -1746,8 +1756,7 @@ extern void zap_other_threads(struct task_struct *p);
 extern int kill_proc(pid_t, int, int);
 extern struct sigqueue *sigqueue_alloc(void);
 extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(int, struct sigqueue *,  struct task_struct *);
-extern int send_group_sigqueue(int, struct sigqueue *,  struct task_struct *);
+extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
 extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
 
@@ -1798,6 +1807,8 @@ extern void mmput(struct mm_struct *);
 extern struct mm_struct *get_task_mm(struct task_struct *task);
 /* Remove the current tasks stale references to the old mm_struct */
 extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Allocate a new mm structure and copy contents from tsk->mm */
+extern struct mm_struct *dup_mm(struct task_struct *tsk);
 
 extern int  copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
 extern void flush_thread(void);
@@ -2147,6 +2158,19 @@ static inline void migration_init(void)
 #define TASK_SIZE_OF(tsk)      TASK_SIZE
 #endif
 
+#ifdef CONFIG_MM_OWNER
+extern void mm_update_next_owner(struct mm_struct *mm);
+extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
+#else
+static inline void mm_update_next_owner(struct mm_struct *mm)
+{
+}
+
+static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+{
+}
+#endif /* CONFIG_MM_OWNER */
+
 #endif /* __KERNEL__ */
 
 #endif
index 5b0617840fa465cd9d92f00a738838e98ca4c08a..c1f19dbceb0566f8a4e1342e4f4c1291d0f29349 100644 (file)
@@ -3,28 +3,39 @@
 
 #define SECUREBITS_DEFAULT 0x00000000
 
-extern unsigned securebits;
-
 /* When set UID 0 has no special privileges. When unset, we support
    inheritance of root-permissions and suid-root executable under
    compatibility mode. We raise the effective and inheritable bitmasks
    *of the executable file* if the effective uid of the new process is
    0. If the real uid is 0, we raise the inheritable bitmask of the
    executable file. */
-#define SECURE_NOROOT            0
+#define SECURE_NOROOT                  0
+#define SECURE_NOROOT_LOCKED           1  /* make bit-0 immutable */
 
 /* When set, setuid to/from uid 0 does not trigger capability-"fixes"
    to be compatible with old programs relying on set*uid to loose
    privileges. When unset, setuid doesn't change privileges. */
-#define SECURE_NO_SETUID_FIXUP   2
+#define SECURE_NO_SETUID_FIXUP         2
+#define SECURE_NO_SETUID_FIXUP_LOCKED  3  /* make bit-2 immutable */
+
+/* When set, a process can retain its capabilities even after
+   transitioning to a non-root user (the set-uid fixup suppressed by
+   bit 2). Bit-4 is cleared when a process calls exec(); setting both
+   bit 4 and 5 will create a barrier through exec that no exec()'d
+   child can use this feature again. */
+#define SECURE_KEEP_CAPS               4
+#define SECURE_KEEP_CAPS_LOCKED                5  /* make bit-4 immutable */
 
 /* Each securesetting is implemented using two bits. One bit specify
    whether the setting is on or off. The other bit specify whether the
    setting is fixed or not. A setting which is fixed cannot be changed
    from user-level. */
+#define issecure_mask(X)       (1 << (X))
+#define issecure(X)            (issecure_mask(X) & current->securebits)
 
-#define issecure(X) ( (1 << (X+1)) & SECUREBITS_DEFAULT ?      \
-                     (1 << (X)) & SECUREBITS_DEFAULT :         \
-                     (1 << (X)) & securebits )
+#define SECURE_ALL_BITS                (issecure_mask(SECURE_NOROOT) | \
+                                issecure_mask(SECURE_NO_SETUID_FIXUP) | \
+                                issecure_mask(SECURE_KEEP_CAPS))
+#define SECURE_ALL_LOCKS       (SECURE_ALL_BITS << 1)
 
 #endif /* !_LINUX_SECUREBITS_H */
index 53a34539382aee1f94e34a677d20838a4aba9d6f..50737c70e78ea41e2a2e4551b6044df86f8a1de3 100644 (file)
@@ -34,8 +34,6 @@
 #include <linux/xfrm.h>
 #include <net/flow.h>
 
-extern unsigned securebits;
-
 /* Maximum number of letters for an LSM name string */
 #define SECURITY_NAME_MAX      10
 
@@ -46,25 +44,28 @@ struct audit_krule;
  * These functions are in security/capability.c and are used
  * as the default capabilities functions
  */
-extern int cap_capable (struct task_struct *tsk, int cap);
-extern int cap_settime (struct timespec *ts, struct timezone *tz);
-extern int cap_ptrace (struct task_struct *parent, struct task_struct *child);
-extern int cap_capget (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
-extern int cap_capset_check (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
-extern void cap_capset_set (struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
-extern int cap_bprm_set_security (struct linux_binprm *bprm);
-extern void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe);
+extern int cap_capable(struct task_struct *tsk, int cap);
+extern int cap_settime(struct timespec *ts, struct timezone *tz);
+extern int cap_ptrace(struct task_struct *parent, struct task_struct *child);
+extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern int cap_capset_check(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern int cap_bprm_set_security(struct linux_binprm *bprm);
+extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
 extern int cap_bprm_secureexec(struct linux_binprm *bprm);
-extern int cap_inode_setxattr(struct dentry *dentry, char *name, void *value, size_t size, int flags);
-extern int cap_inode_removexattr(struct dentry *dentry, char *name);
+extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
+                             const void *value, size_t size, int flags);
+extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
 extern int cap_inode_need_killpriv(struct dentry *dentry);
 extern int cap_inode_killpriv(struct dentry *dentry);
-extern int cap_task_post_setuid (uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
-extern void cap_task_reparent_to_init (struct task_struct *p);
-extern int cap_task_setscheduler (struct task_struct *p, int policy, struct sched_param *lp);
-extern int cap_task_setioprio (struct task_struct *p, int ioprio);
-extern int cap_task_setnice (struct task_struct *p, int nice);
-extern int cap_syslog (int type);
+extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
+extern void cap_task_reparent_to_init(struct task_struct *p);
+extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+                         unsigned long arg4, unsigned long arg5, long *rc_p);
+extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
+extern int cap_task_setioprio(struct task_struct *p, int ioprio);
+extern int cap_task_setnice(struct task_struct *p, int nice);
+extern int cap_syslog(int type);
 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
 
 struct msghdr;
@@ -128,7 +129,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
 {
        int i;
        if (opts->mnt_opts)
-               for(i = 0; i < opts->num_mnt_opts; i++)
+               for (i = 0; i < opts->num_mnt_opts; i++)
                        kfree(opts->mnt_opts[i]);
        kfree(opts->mnt_opts);
        opts->mnt_opts = NULL;
@@ -190,21 +191,21 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     @bprm contains the linux_binprm structure.
  *     Return 0 if the hook is successful and permission is granted.
  * @bprm_check_security:
- *     This hook mediates the point when a search for a binary handler will
- *     begin.  It allows a check the @bprm->security value which is set in
- *     the preceding set_security call.  The primary difference from
- *     set_security is that the argv list and envp list are reliably
- *     available in @bprm.  This hook may be called multiple times
- *     during a single execve; and in each pass set_security is called
- *     first.
- *     @bprm contains the linux_binprm structure.
+ *     This hook mediates the point when a search for a binary handler will
+ *     begin.  It allows a check the @bprm->security value which is set in
+ *     the preceding set_security call.  The primary difference from
+ *     set_security is that the argv list and envp list are reliably
+ *     available in @bprm.  This hook may be called multiple times
+ *     during a single execve; and in each pass set_security is called
+ *     first.
+ *     @bprm contains the linux_binprm structure.
  *     Return 0 if the hook is successful and permission is granted.
  * @bprm_secureexec:
- *      Return a boolean value (0 or 1) indicating whether a "secure exec" 
- *      is required.  The flag is passed in the auxiliary table
- *      on the initial stack to the ELF interpreter to indicate whether libc 
- *      should enable secure mode.
- *      @bprm contains the linux_binprm structure.
+ *     Return a boolean value (0 or 1) indicating whether a "secure exec"
+ *     is required.  The flag is passed in the auxiliary table
+ *     on the initial stack to the ELF interpreter to indicate whether libc
+ *     should enable secure mode.
+ *     @bprm contains the linux_binprm structure.
  *
  * Security hooks for filesystem operations.
  *
@@ -221,7 +222,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Check permission before obtaining filesystem statistics for the @mnt
  *     mountpoint.
  *     @dentry is a handle on the superblock for the filesystem.
- *     Return 0 if permission is granted.  
+ *     Return 0 if permission is granted.
  * @sb_mount:
  *     Check permission before an object specified by @dev_name is mounted on
  *     the mount point named by @nd.  For an ordinary mount, @dev_name
@@ -282,12 +283,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  * @sb_pivotroot:
  *     Check permission before pivoting the root filesystem.
  *     @old_path contains the path for the new location of the current root (put_old).
- *      @new_path contains the path for the new root (new_root).
+ *     @new_path contains the path for the new root (new_root).
  *     Return 0 if permission is granted.
  * @sb_post_pivotroot:
  *     Update module state after a successful pivot.
  *     @old_path contains the path for the old root.
- *      @new_path contains the path for the new root.
+ *     @new_path contains the path for the new root.
  * @sb_get_mnt_opts:
  *     Get the security relevant mount options used for a superblock
  *     @sb the superblock to get security mount options from
@@ -316,9 +317,9 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  * @inode_free_security:
  *     @inode contains the inode structure.
  *     Deallocate the inode security structure and set @inode->i_security to
- *     NULL. 
+ *     NULL.
  * @inode_init_security:
- *     Obtain the security attribute name suffix and value to set on a newly
+ *     Obtain the security attribute name suffix and value to set on a newly
  *     created inode and set up the incore security field for the new inode.
  *     This hook is called by the fs code as part of the inode creation
  *     transaction and provides for atomic labeling of the inode, unlike
@@ -349,7 +350,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     @new_dentry contains the dentry structure for the new link.
  *     Return 0 if permission is granted.
  * @inode_unlink:
- *     Check the permission to remove a hard link to a file. 
+ *     Check the permission to remove a hard link to a file.
  *     @dir contains the inode structure of parent directory of the file.
  *     @dentry contains the dentry structure for file to be unlinked.
  *     Return 0 if permission is granted.
@@ -361,7 +362,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Return 0 if permission is granted.
  * @inode_mkdir:
  *     Check permissions to create a new directory in the existing directory
- *     associated with inode strcture @dir. 
+ *     associated with inode strcture @dir.
  *     @dir containst the inode structure of parent of the directory to be created.
  *     @dentry contains the dentry structure of new directory.
  *     @mode contains the mode of new directory.
@@ -406,7 +407,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     called when the actual read/write operations are performed.
  *     @inode contains the inode structure to check.
  *     @mask contains the permission mask.
- *     @nd contains the nameidata (may be NULL).
+ *     @nd contains the nameidata (may be NULL).
  *     Return 0 if permission is granted.
  * @inode_setattr:
  *     Check permission before setting file attributes.  Note that the kernel
@@ -428,24 +429,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     can use this hook to release any persistent label associated with the
  *     inode.
  * @inode_setxattr:
- *     Check permission before setting the extended attributes
- *     @value identified by @name for @dentry.
- *     Return 0 if permission is granted.
+ *     Check permission before setting the extended attributes
+ *     @value identified by @name for @dentry.
+ *     Return 0 if permission is granted.
  * @inode_post_setxattr:
- *     Update inode security field after successful setxattr operation.
- *     @value identified by @name for @dentry.
+ *     Update inode security field after successful setxattr operation.
+ *     @value identified by @name for @dentry.
  * @inode_getxattr:
- *     Check permission before obtaining the extended attributes
- *     identified by @name for @dentry.
- *     Return 0 if permission is granted.
+ *     Check permission before obtaining the extended attributes
+ *     identified by @name for @dentry.
+ *     Return 0 if permission is granted.
  * @inode_listxattr:
- *     Check permission before obtaining the list of extended attribute 
- *     names for @dentry.
- *     Return 0 if permission is granted.
+ *     Check permission before obtaining the list of extended attribute
+ *     names for @dentry.
+ *     Return 0 if permission is granted.
  * @inode_removexattr:
- *     Check permission before removing the extended attribute
- *     identified by @name for @dentry.
- *     Return 0 if permission is granted.
+ *     Check permission before removing the extended attribute
+ *     identified by @name for @dentry.
+ *     Return 0 if permission is granted.
  * @inode_getsecurity:
  *     Retrieve a copy of the extended attribute representation of the
  *     security label associated with @name for @inode via @buffer.  Note that
@@ -457,7 +458,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Set the security label associated with @name for @inode from the
  *     extended attribute value @value.  @size indicates the size of the
  *     @value in bytes.  @flags may be XATTR_CREATE, XATTR_REPLACE, or 0.
- *     Note that @name is the remainder of the attribute name after the 
+ *     Note that @name is the remainder of the attribute name after the
  *     security. prefix has been removed.
  *     Return 0 on success.
  * @inode_listsecurity:
@@ -564,7 +565,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     struct file, so the file structure (and associated security information)
  *     can always be obtained:
  *             container_of(fown, struct file, f_owner)
- *     @tsk contains the structure of task receiving signal.
+ *     @tsk contains the structure of task receiving signal.
  *     @fown contains the file owner information.
  *     @sig is the signal that will be sent.  When 0, kernel sends SIGIO.
  *     Return 0 if permission is granted.
@@ -720,14 +721,16 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     @arg3 contains a argument.
  *     @arg4 contains a argument.
  *     @arg5 contains a argument.
- *     Return 0 if permission is granted.
+ *      @rc_p contains a pointer to communicate back the forced return code
+ *     Return 0 if permission is granted, and non-zero if the security module
+ *      has taken responsibility (setting *rc_p) for the prctl call.
  * @task_reparent_to_init:
- *     Set the security attributes in @p->security for a kernel thread that
- *     is being reparented to the init task.
+ *     Set the security attributes in @p->security for a kernel thread that
+ *     is being reparented to the init task.
  *     @p contains the task_struct for the kernel thread.
  * @task_to_inode:
- *     Set the security attributes for an inode based on an associated task's
- *     security attributes, e.g. for /proc/pid inodes.
+ *     Set the security attributes for an inode based on an associated task's
+ *     security attributes, e.g. for /proc/pid inodes.
  *     @p contains the task_struct for the task.
  *     @inode contains the inode structure for the inode.
  *
@@ -737,7 +740,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Save security information for a netlink message so that permission
  *     checking can be performed when the message is processed.  The security
  *     information can be saved using the eff_cap field of the
- *      netlink_skb_parms structure.  Also may be used to provide fine
+ *     netlink_skb_parms structure.  Also may be used to provide fine
  *     grained control over message transmission.
  *     @sk associated sock of task sending the message.,
  *     @skb contains the sk_buff structure for the netlink message.
@@ -805,14 +808,14 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     @sock contains the socket structure.
  *     @address contains the address to bind to.
  *     @addrlen contains the length of address.
- *     Return 0 if permission is granted.  
+ *     Return 0 if permission is granted.
  * @socket_connect:
  *     Check permission before socket protocol layer connect operation
  *     attempts to connect socket @sock to a remote address, @address.
  *     @sock contains the socket structure.
  *     @address contains the address of remote endpoint.
  *     @addrlen contains the length of address.
- *     Return 0 if permission is granted.  
+ *     Return 0 if permission is granted.
  * @socket_listen:
  *     Check permission before socket protocol layer listen operation.
  *     @sock contains the socket structure.
@@ -842,7 +845,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     @msg contains the message structure.
  *     @size contains the size of message structure.
  *     @flags contains the operational flags.
- *     Return 0 if permission is granted.  
+ *     Return 0 if permission is granted.
  * @socket_getsockname:
  *     Check permission before the local address (name) of the socket object
  *     @sock is retrieved.
@@ -866,7 +869,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     @sock contains the socket structure.
  *     @level contains the protocol level to set options for.
  *     @optname contains the name of the option to set.
- *     Return 0 if permission is granted.  
+ *     Return 0 if permission is granted.
  * @socket_shutdown:
  *     Checks permission before all or part of a connection on the socket
  *     @sock is shut down.
@@ -893,19 +896,19 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Return 0 if all is well, otherwise, typical getsockopt return
  *     values.
  * @socket_getpeersec_dgram:
- *     This hook allows the security module to provide peer socket security
- *     state for udp sockets on a per-packet basis to userspace via
- *     getsockopt SO_GETPEERSEC.  The application must first have indicated
- *     the IP_PASSSEC option via getsockopt.  It can then retrieve the
- *     security state returned by this hook for a packet via the SCM_SECURITY
- *     ancillary message type.
- *     @skb is the skbuff for the packet being queried
- *     @secdata is a pointer to a buffer in which to copy the security data
- *     @seclen is the maximum length for @secdata
- *     Return 0 on success, error on failure.
+ *     This hook allows the security module to provide peer socket security
+ *     state for udp sockets on a per-packet basis to userspace via
+ *     getsockopt SO_GETPEERSEC.  The application must first have indicated
+ *     the IP_PASSSEC option via getsockopt.  It can then retrieve the
+ *     security state returned by this hook for a packet via the SCM_SECURITY
+ *     ancillary message type.
+ *     @skb is the skbuff for the packet being queried
+ *     @secdata is a pointer to a buffer in which to copy the security data
+ *     @seclen is the maximum length for @secdata
+ *     Return 0 on success, error on failure.
  * @sk_alloc_security:
- *      Allocate and attach a security structure to the sk->sk_security field,
- *      which is used to copy security attributes between local stream sockets.
+ *     Allocate and attach a security structure to the sk->sk_security field,
+ *     which is used to copy security attributes between local stream sockets.
  * @sk_free_security:
  *     Deallocate security structure.
  * @sk_clone_security:
@@ -920,7 +923,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  * @inet_csk_clone:
  *     Sets the new child socket's sid to the openreq sid.
  * @inet_conn_established:
- *     Sets the connection's peersid to the secmark on skb.
+ *     Sets the connection's peersid to the secmark on skb.
  * @req_classify_flow:
  *     Sets the flow's sid to the openreq sid.
  *
@@ -999,13 +1002,24 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     No return value.
  * @key_permission:
  *     See whether a specific operational right is granted to a process on a
- *      key.
+ *     key.
  *     @key_ref refers to the key (key pointer + possession attribute bit).
  *     @context points to the process to provide the context against which to
- *       evaluate the security data on the key.
+ *     evaluate the security data on the key.
  *     @perm describes the combination of permissions required of this key.
  *     Return 1 if permission granted, 0 if permission denied and -ve it the
- *      normal permissions model should be effected.
+ *     normal permissions model should be effected.
+ * @key_getsecurity:
+ *     Get a textual representation of the security context attached to a key
+ *     for the purposes of honouring KEYCTL_GETSECURITY.  This function
+ *     allocates the storage for the NUL-terminated string and the caller
+ *     should free it.
+ *     @key points to the key to be queried.
+ *     @_buffer points to a pointer that should be set to point to the
+ *      resulting string (if no label or an error occurs).
+ *     Return the length of the string (including terminating NUL) or -ve if
+ *      an error.
+ *     May also return 0 (and a NULL buffer pointer) if there is no label.
  *
  * Security hooks affecting all System V IPC operations.
  *
@@ -1056,7 +1070,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     The @msq may be NULL, e.g. for IPC_INFO or MSG_INFO.
  *     @msq contains the message queue to act upon.  May be NULL.
  *     @cmd contains the operation to be performed.
- *     Return 0 if permission is granted.  
+ *     Return 0 if permission is granted.
  * @msg_queue_msgsnd:
  *     Check permission before a message, @msg, is enqueued on the message
  *     queue, @msq.
@@ -1066,8 +1080,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Return 0 if permission is granted.
  * @msg_queue_msgrcv:
  *     Check permission before a message, @msg, is removed from the message
- *     queue, @msq.  The @target task structure contains a pointer to the 
- *     process that will be receiving the message (not equal to the current 
+ *     queue, @msq.  The @target task structure contains a pointer to the
+ *     process that will be receiving the message (not equal to the current
  *     process when inline receives are being performed).
  *     @msq contains the message queue to retrieve message from.
  *     @msg contains the message destination.
@@ -1132,15 +1146,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  *     Return 0 if permission is granted.
  * @sem_semctl:
  *     Check permission when a semaphore operation specified by @cmd is to be
- *     performed on the semaphore @sma.  The @sma may be NULL, e.g. for 
+ *     performed on the semaphore @sma.  The @sma may be NULL, e.g. for
  *     IPC_INFO or SEM_INFO.
  *     @sma contains the semaphore structure.  May be NULL.
  *     @cmd contains the operation to be performed.
  *     Return 0 if permission is granted.
  * @sem_semop
  *     Check permissions before performing operations on members of the
- *     semaphore set @sma.  If the @alter flag is nonzero, the semaphore set 
- *      may be modified.
+ *     semaphore set @sma.  If the @alter flag is nonzero, the semaphore set
+ *     may be modified.
  *     @sma contains the semaphore structure.
  *     @sops contains the operations to perform.
  *     @nsops contains the number of operations to perform.
@@ -1211,7 +1225,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  * @syslog:
  *     Check permission before accessing the kernel message ring or changing
  *     logging to the console.
- *     See the syslog(2) manual page for an explanation of the @type values.  
+ *     See the syslog(2) manual page for an explanation of the @type values.
  *     @type contains the type of action.
  *     Return 0 if permission is granted.
  * @settime:
@@ -1223,22 +1237,22 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  * @vm_enough_memory:
  *     Check permissions for allocating a new virtual mapping.
  *     @mm contains the mm struct it is being added to.
- *      @pages contains the number of pages.
+ *     @pages contains the number of pages.
  *     Return 0 if permission is granted.
  *
  * @register_security:
- *     allow module stacking.
- *     @name contains the name of the security module being stacked.
- *     @ops contains a pointer to the struct security_operations of the module to stack.
- * 
+ *     allow module stacking.
+ *     @name contains the name of the security module being stacked.
+ *     @ops contains a pointer to the struct security_operations of the module to stack.
+ *
  * @secid_to_secctx:
  *     Convert secid to security context.
  *     @secid contains the security ID.
  *     @secdata contains the pointer that stores the converted security context.
  * @secctx_to_secid:
- *      Convert security context to secid.
- *      @secid contains the pointer to the generated security ID.
- *      @secdata contains the security context.
+ *     Convert security context to secid.
+ *     @secid contains the pointer to the generated security ID.
+ *     @secdata contains the security context.
  *
  * @release_secctx:
  *     Release the security context.
@@ -1281,49 +1295,49 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
 struct security_operations {
        char name[SECURITY_NAME_MAX + 1];
 
-       int (*ptrace) (struct task_struct * parent, struct task_struct * child);
-       int (*capget) (struct task_struct * target,
-                      kernel_cap_t * effective,
-                      kernel_cap_t * inheritable, kernel_cap_t * permitted);
-       int (*capset_check) (struct task_struct * target,
-                            kernel_cap_t * effective,
-                            kernel_cap_t * inheritable,
-                            kernel_cap_t * permitted);
-       void (*capset_set) (struct task_struct * target,
-                           kernel_cap_t * effective,
-                           kernel_cap_t * inheritable,
-                           kernel_cap_t * permitted);
-       int (*capable) (struct task_struct * tsk, int cap);
-       int (*acct) (struct file * file);
-       int (*sysctl) (struct ctl_table * table, int op);
-       int (*quotactl) (int cmds, int type, int id, struct super_block * sb);
-       int (*quota_on) (struct dentry * dentry);
+       int (*ptrace) (struct task_struct *parent, struct task_struct *child);
+       int (*capget) (struct task_struct *target,
+                      kernel_cap_t *effective,
+                      kernel_cap_t *inheritable, kernel_cap_t *permitted);
+       int (*capset_check) (struct task_struct *target,
+                            kernel_cap_t *effective,
+                            kernel_cap_t *inheritable,
+                            kernel_cap_t *permitted);
+       void (*capset_set) (struct task_struct *target,
+                           kernel_cap_t *effective,
+                           kernel_cap_t *inheritable,
+                           kernel_cap_t *permitted);
+       int (*capable) (struct task_struct *tsk, int cap);
+       int (*acct) (struct file *file);
+       int (*sysctl) (struct ctl_table *table, int op);
+       int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
+       int (*quota_on) (struct dentry *dentry);
        int (*syslog) (int type);
        int (*settime) (struct timespec *ts, struct timezone *tz);
        int (*vm_enough_memory) (struct mm_struct *mm, long pages);
 
-       int (*bprm_alloc_security) (struct linux_binprm * bprm);
-       void (*bprm_free_security) (struct linux_binprm * bprm);
-       void (*bprm_apply_creds) (struct linux_binprm * bprm, int unsafe);
-       void (*bprm_post_apply_creds) (struct linux_binprm * bprm);
-       int (*bprm_set_security) (struct linux_binprm * bprm);
-       int (*bprm_check_security) (struct linux_binprm * bprm);
-       int (*bprm_secureexec) (struct linux_binprm * bprm);
-
-       int (*sb_alloc_security) (struct super_block * sb);
-       void (*sb_free_security) (struct super_block * sb);
-       int (*sb_copy_data)(char *orig, char *copy);
+       int (*bprm_alloc_security) (struct linux_binprm *bprm);
+       void (*bprm_free_security) (struct linux_binprm *bprm);
+       void (*bprm_apply_creds) (struct linux_binprm *bprm, int unsafe);
+       void (*bprm_post_apply_creds) (struct linux_binprm *bprm);
+       int (*bprm_set_security) (struct linux_binprm *bprm);
+       int (*bprm_check_security) (struct linux_binprm *bprm);
+       int (*bprm_secureexec) (struct linux_binprm *bprm);
+
+       int (*sb_alloc_security) (struct super_block *sb);
+       void (*sb_free_security) (struct super_block *sb);
+       int (*sb_copy_data) (char *orig, char *copy);
        int (*sb_kern_mount) (struct super_block *sb, void *data);
        int (*sb_statfs) (struct dentry *dentry);
        int (*sb_mount) (char *dev_name, struct path *path,
                         char *type, unsigned long flags, void *data);
-       int (*sb_check_sb) (struct vfsmount * mnt, struct path *path);
-       int (*sb_umount) (struct vfsmount * mnt, int flags);
-       void (*sb_umount_close) (struct vfsmount * mnt);
-       void (*sb_umount_busy) (struct vfsmount * mnt);
-       void (*sb_post_remount) (struct vfsmount * mnt,
+       int (*sb_check_sb) (struct vfsmount *mnt, struct path *path);
+       int (*sb_umount) (struct vfsmount *mnt, int flags);
+       void (*sb_umount_close) (struct vfsmount *mnt);
+       void (*sb_umount_busy) (struct vfsmount *mnt);
+       void (*sb_post_remount) (struct vfsmount *mnt,
                                 unsigned long flags, void *data);
-       void (*sb_post_addmount) (struct vfsmount * mnt,
+       void (*sb_post_addmount) (struct vfsmount *mnt,
                                  struct path *mountpoint);
        int (*sb_pivotroot) (struct path *old_path,
                             struct path *new_path);
@@ -1337,177 +1351,177 @@ struct security_operations {
                                   struct super_block *newsb);
        int (*sb_parse_opts_str) (char *options, struct security_mnt_opts *opts);
 
-       int (*inode_alloc_security) (struct inode *inode);      
+       int (*inode_alloc_security) (struct inode *inode);
        void (*inode_free_security) (struct inode *inode);
        int (*inode_init_security) (struct inode *inode, struct inode *dir,
                                    char **name, void **value, size_t *len);
        int (*inode_create) (struct inode *dir,
-                            struct dentry *dentry, int mode);
+                            struct dentry *dentry, int mode);
        int (*inode_link) (struct dentry *old_dentry,
-                          struct inode *dir, struct dentry *new_dentry);
+                          struct inode *dir, struct dentry *new_dentry);
        int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
        int (*inode_symlink) (struct inode *dir,
-                             struct dentry *dentry, const char *old_name);
+                             struct dentry *dentry, const char *old_name);
        int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode);
        int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
        int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
-                           int mode, dev_t dev);
+                           int mode, dev_t dev);
        int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
-                            struct inode *new_dir, struct dentry *new_dentry);
+                            struct inode *new_dir, struct dentry *new_dentry);
        int (*inode_readlink) (struct dentry *dentry);
        int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
        int (*inode_permission) (struct inode *inode, int mask, struct nameidata *nd);
        int (*inode_setattr)    (struct dentry *dentry, struct iattr *attr);
        int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
-        void (*inode_delete) (struct inode *inode);
-       int (*inode_setxattr) (struct dentry *dentry, char *name, void *value,
-                              size_t size, int flags);
-       void (*inode_post_setxattr) (struct dentry *dentry, char *name, void *value,
-                                    size_t size, int flags);
-       int (*inode_getxattr) (struct dentry *dentry, char *name);
+       void (*inode_delete) (struct inode *inode);
+       int (*inode_setxattr) (struct dentry *dentry, const char *name,
+                              const void *value, size_t size, int flags);
+       void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
+                                    const void *value, size_t size, int flags);
+       int (*inode_getxattr) (struct dentry *dentry, const char *name);
        int (*inode_listxattr) (struct dentry *dentry);
-       int (*inode_removexattr) (struct dentry *dentry, char *name);
+       int (*inode_removexattr) (struct dentry *dentry, const char *name);
        int (*inode_need_killpriv) (struct dentry *dentry);
        int (*inode_killpriv) (struct dentry *dentry);
-       int (*inode_getsecurity)(const struct inode *inode, const char *name, void **buffer, bool alloc);
-       int (*inode_setsecurity)(struct inode *inode, const char *name, const void *value, size_t size, int flags);
-       int (*inode_listsecurity)(struct inode *inode, char *buffer, size_t buffer_size);
-       void (*inode_getsecid)(const struct inode *inode, u32 *secid);
-
-       int (*file_permission) (struct file * file, int mask);
-       int (*file_alloc_security) (struct file * file);
-       void (*file_free_security) (struct file * file);
-       int (*file_ioctl) (struct file * file, unsigned int cmd,
+       int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
+       int (*inode_setsecurity) (struct inode *inode, const char *name, const void *value, size_t size, int flags);
+       int (*inode_listsecurity) (struct inode *inode, char *buffer, size_t buffer_size);
+       void (*inode_getsecid) (const struct inode *inode, u32 *secid);
+
+       int (*file_permission) (struct file *file, int mask);
+       int (*file_alloc_security) (struct file *file);
+       void (*file_free_security) (struct file *file);
+       int (*file_ioctl) (struct file *file, unsigned int cmd,
                           unsigned long arg);
-       int (*file_mmap) (struct file * file,
+       int (*file_mmap) (struct file *file,
                          unsigned long reqprot, unsigned long prot,
                          unsigned long flags, unsigned long addr,
                          unsigned long addr_only);
-       int (*file_mprotect) (struct vm_area_struct * vma,
+       int (*file_mprotect) (struct vm_area_struct *vma,
                              unsigned long reqprot,
                              unsigned long prot);
-       int (*file_lock) (struct file * file, unsigned int cmd);
-       int (*file_fcntl) (struct file * file, unsigned int cmd,
+       int (*file_lock) (struct file *file, unsigned int cmd);
+       int (*file_fcntl) (struct file *file, unsigned int cmd,
                           unsigned long arg);
-       int (*file_set_fowner) (struct file * file);
-       int (*file_send_sigiotask) (struct task_struct * tsk,
-                                   struct fown_struct * fown, int sig);
-       int (*file_receive) (struct file * file);
-       int (*dentry_open)  (struct file *file);
+       int (*file_set_fowner) (struct file *file);
+       int (*file_send_sigiotask) (struct task_struct *tsk,
+                                   struct fown_struct *fown, int sig);
+       int (*file_receive) (struct file *file);
+       int (*dentry_open) (struct file *file);
 
        int (*task_create) (unsigned long clone_flags);
-       int (*task_alloc_security) (struct task_struct * p);
-       void (*task_free_security) (struct task_struct * p);
+       int (*task_alloc_security) (struct task_struct *p);
+       void (*task_free_security) (struct task_struct *p);
        int (*task_setuid) (uid_t id0, uid_t id1, uid_t id2, int flags);
        int (*task_post_setuid) (uid_t old_ruid /* or fsuid */ ,
                                 uid_t old_euid, uid_t old_suid, int flags);
        int (*task_setgid) (gid_t id0, gid_t id1, gid_t id2, int flags);
-       int (*task_setpgid) (struct task_struct * p, pid_t pgid);
-       int (*task_getpgid) (struct task_struct * p);
-       int (*task_getsid) (struct task_struct * p);
-       void (*task_getsecid) (struct task_struct * p, u32 * secid);
+       int (*task_setpgid) (struct task_struct *p, pid_t pgid);
+       int (*task_getpgid) (struct task_struct *p);
+       int (*task_getsid) (struct task_struct *p);
+       void (*task_getsecid) (struct task_struct *p, u32 *secid);
        int (*task_setgroups) (struct group_info *group_info);
-       int (*task_setnice) (struct task_struct * p, int nice);
-       int (*task_setioprio) (struct task_struct * p, int ioprio);
-       int (*task_getioprio) (struct task_struct * p);
-       int (*task_setrlimit) (unsigned int resource, struct rlimit * new_rlim);
-       int (*task_setscheduler) (struct task_struct * p, int policy,
-                                 struct sched_param * lp);
-       int (*task_getscheduler) (struct task_struct * p);
-       int (*task_movememory) (struct task_struct * p);
-       int (*task_kill) (struct task_struct * p,
-                         struct siginfo * info, int sig, u32 secid);
-       int (*task_wait) (struct task_struct * p);
+       int (*task_setnice) (struct task_struct *p, int nice);
+       int (*task_setioprio) (struct task_struct *p, int ioprio);
+       int (*task_getioprio) (struct task_struct *p);
+       int (*task_setrlimit) (unsigned int resource, struct rlimit *new_rlim);
+       int (*task_setscheduler) (struct task_struct *p, int policy,
+                                 struct sched_param *lp);
+       int (*task_getscheduler) (struct task_struct *p);
+       int (*task_movememory) (struct task_struct *p);
+       int (*task_kill) (struct task_struct *p,
+                         struct siginfo *info, int sig, u32 secid);
+       int (*task_wait) (struct task_struct *p);
        int (*task_prctl) (int option, unsigned long arg2,
                           unsigned long arg3, unsigned long arg4,
-                          unsigned long arg5);
-       void (*task_reparent_to_init) (struct task_struct * p);
-       void (*task_to_inode)(struct task_struct *p, struct inode *inode);
+                          unsigned long arg5, long *rc_p);
+       void (*task_reparent_to_init) (struct task_struct *p);
+       void (*task_to_inode) (struct task_struct *p, struct inode *inode);
 
-       int (*ipc_permission) (struct kern_ipc_perm * ipcp, short flag);
+       int (*ipc_permission) (struct kern_ipc_perm *ipcp, short flag);
        void (*ipc_getsecid) (struct kern_ipc_perm *ipcp, u32 *secid);
 
-       int (*msg_msg_alloc_security) (struct msg_msg * msg);
-       void (*msg_msg_free_security) (struct msg_msg * msg);
-
-       int (*msg_queue_alloc_security) (struct msg_queue * msq);
-       void (*msg_queue_free_security) (struct msg_queue * msq);
-       int (*msg_queue_associate) (struct msg_queue * msq, int msqflg);
-       int (*msg_queue_msgctl) (struct msg_queue * msq, int cmd);
-       int (*msg_queue_msgsnd) (struct msg_queue * msq,
-                                struct msg_msg * msg, int msqflg);
-       int (*msg_queue_msgrcv) (struct msg_queue * msq,
-                                struct msg_msg * msg,
-                                struct task_struct * target,
+       int (*msg_msg_alloc_security) (struct msg_msg *msg);
+       void (*msg_msg_free_security) (struct msg_msg *msg);
+
+       int (*msg_queue_alloc_security) (struct msg_queue *msq);
+       void (*msg_queue_free_security) (struct msg_queue *msq);
+       int (*msg_queue_associate) (struct msg_queue *msq, int msqflg);
+       int (*msg_queue_msgctl) (struct msg_queue *msq, int cmd);
+       int (*msg_queue_msgsnd) (struct msg_queue *msq,
+                                struct msg_msg *msg, int msqflg);
+       int (*msg_queue_msgrcv) (struct msg_queue *msq,
+                                struct msg_msg *msg,
+                                struct task_struct *target,
                                 long type, int mode);
 
-       int (*shm_alloc_security) (struct shmid_kernel * shp);
-       void (*shm_free_security) (struct shmid_kernel * shp);
-       int (*shm_associate) (struct shmid_kernel * shp, int shmflg);
-       int (*shm_shmctl) (struct shmid_kernel * shp, int cmd);
-       int (*shm_shmat) (struct shmid_kernel * shp, 
+       int (*shm_alloc_security) (struct shmid_kernel *shp);
+       void (*shm_free_security) (struct shmid_kernel *shp);
+       int (*shm_associate) (struct shmid_kernel *shp, int shmflg);
+       int (*shm_shmctl) (struct shmid_kernel *shp, int cmd);
+       int (*shm_shmat) (struct shmid_kernel *shp,
                          char __user *shmaddr, int shmflg);
 
-       int (*sem_alloc_security) (struct sem_array * sma);
-       void (*sem_free_security) (struct sem_array * sma);
-       int (*sem_associate) (struct sem_array * sma, int semflg);
-       int (*sem_semctl) (struct sem_array * sma, int cmd);
-       int (*sem_semop) (struct sem_array * sma, 
-                         struct sembuf * sops, unsigned nsops, int alter);
+       int (*sem_alloc_security) (struct sem_array *sma);
+       void (*sem_free_security) (struct sem_array *sma);
+       int (*sem_associate) (struct sem_array *sma, int semflg);
+       int (*sem_semctl) (struct sem_array *sma, int cmd);
+       int (*sem_semop) (struct sem_array *sma,
+                         struct sembuf *sops, unsigned nsops, int alter);
 
-       int (*netlink_send) (struct sock * sk, struct sk_buff * skb);
-       int (*netlink_recv) (struct sk_buff * skb, int cap);
+       int (*netlink_send) (struct sock *sk, struct sk_buff *skb);
+       int (*netlink_recv) (struct sk_buff *skb, int cap);
 
        /* allow module stacking */
        int (*register_security) (const char *name,
-                                 struct security_operations *ops);
+                                 struct security_operations *ops);
 
        void (*d_instantiate) (struct dentry *dentry, struct inode *inode);
 
-       int (*getprocattr)(struct task_struct *p, char *name, char **value);
-       int (*setprocattr)(struct task_struct *p, char *name, void *value, size_t size);
-       int (*secid_to_secctx)(u32 secid, char **secdata, u32 *seclen);
-       int (*secctx_to_secid)(char *secdata, u32 seclen, u32 *secid);
-       void (*release_secctx)(char *secdata, u32 seclen);
+       int (*getprocattr) (struct task_struct *p, char *name, char **value);
+       int (*setprocattr) (struct task_struct *p, char *name, void *value, size_t size);
+       int (*secid_to_secctx) (u32 secid, char **secdata, u32 *seclen);
+       int (*secctx_to_secid) (const char *secdata, u32 seclen, u32 *secid);
+       void (*release_secctx) (char *secdata, u32 seclen);
 
 #ifdef CONFIG_SECURITY_NETWORK
-       int (*unix_stream_connect) (struct socket * sock,
-                                   struct socket * other, struct sock * newsk);
-       int (*unix_may_send) (struct socket * sock, struct socket * other);
+       int (*unix_stream_connect) (struct socket *sock,
+                                   struct socket *other, struct sock *newsk);
+       int (*unix_may_send) (struct socket *sock, struct socket *other);
 
        int (*socket_create) (int family, int type, int protocol, int kern);
-       int (*socket_post_create) (struct socket * sock, int family,
+       int (*socket_post_create) (struct socket *sock, int family,
                                   int type, int protocol, int kern);
-       int (*socket_bind) (struct socket * sock,
-                           struct sockaddr * address, int addrlen);
-       int (*socket_connect) (struct socket * sock,
-                              struct sockaddr * address, int addrlen);
-       int (*socket_listen) (struct socket * sock, int backlog);
-       int (*socket_accept) (struct socket * sock, struct socket * newsock);
-       void (*socket_post_accept) (struct socket * sock,
-                                   struct socket * newsock);
-       int (*socket_sendmsg) (struct socket * sock,
-                              struct msghdr * msg, int size);
-       int (*socket_recvmsg) (struct socket * sock,
-                              struct msghdr * msg, int size, int flags);
-       int (*socket_getsockname) (struct socket * sock);
-       int (*socket_getpeername) (struct socket * sock);
-       int (*socket_getsockopt) (struct socket * sock, int level, int optname);
-       int (*socket_setsockopt) (struct socket * sock, int level, int optname);
-       int (*socket_shutdown) (struct socket * sock, int how);
-       int (*socket_sock_rcv_skb) (struct sock * sk, struct sk_buff * skb);
+       int (*socket_bind) (struct socket *sock,
+                           struct sockaddr *address, int addrlen);
+       int (*socket_connect) (struct socket *sock,
+                              struct sockaddr *address, int addrlen);
+       int (*socket_listen) (struct socket *sock, int backlog);
+       int (*socket_accept) (struct socket *sock, struct socket *newsock);
+       void (*socket_post_accept) (struct socket *sock,
+                                   struct socket *newsock);
+       int (*socket_sendmsg) (struct socket *sock,
+                              struct msghdr *msg, int size);
+       int (*socket_recvmsg) (struct socket *sock,
+                              struct msghdr *msg, int size, int flags);
+       int (*socket_getsockname) (struct socket *sock);
+       int (*socket_getpeername) (struct socket *sock);
+       int (*socket_getsockopt) (struct socket *sock, int level, int optname);
+       int (*socket_setsockopt) (struct socket *sock, int level, int optname);
+       int (*socket_shutdown) (struct socket *sock, int how);
+       int (*socket_sock_rcv_skb) (struct sock *sk, struct sk_buff *skb);
        int (*socket_getpeersec_stream) (struct socket *sock, char __user *optval, int __user *optlen, unsigned len);
        int (*socket_getpeersec_dgram) (struct socket *sock, struct sk_buff *skb, u32 *secid);
        int (*sk_alloc_security) (struct sock *sk, int family, gfp_t priority);
        void (*sk_free_security) (struct sock *sk);
        void (*sk_clone_security) (const struct sock *sk, struct sock *newsk);
        void (*sk_getsecid) (struct sock *sk, u32 *secid);
-       void (*sock_graft)(struct sock* sk, struct socket *parent);
-       int (*inet_conn_request)(struct sock *sk, struct sk_buff *skb,
-                                       struct request_sock *req);
-       void (*inet_csk_clone)(struct sock *newsk, const struct request_sock *req);
-       void (*inet_conn_established)(struct sock *sk, struct sk_buff *skb);
-       void (*req_classify_flow)(const struct request_sock *req, struct flowi *fl);
+       void (*sock_graft) (struct sock *sk, struct socket *parent);
+       int (*inet_conn_request) (struct sock *sk, struct sk_buff *skb,
+                                 struct request_sock *req);
+       void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req);
+       void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb);
+       void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl);
 #endif /* CONFIG_SECURITY_NETWORK */
 
 #ifdef CONFIG_SECURITY_NETWORK_XFRM
@@ -1521,57 +1535,57 @@ struct security_operations {
                u32 secid);
        void (*xfrm_state_free_security) (struct xfrm_state *x);
        int (*xfrm_state_delete_security) (struct xfrm_state *x);
-       int (*xfrm_policy_lookup)(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
-       int (*xfrm_state_pol_flow_match)(struct xfrm_state *x,
-                       struct xfrm_policy *xp, struct flowi *fl);
-       int (*xfrm_decode_session)(struct sk_buff *skb, u32 *secid, int ckall);
+       int (*xfrm_policy_lookup) (struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
+       int (*xfrm_state_pol_flow_match) (struct xfrm_state *x,
+                                         struct xfrm_policy *xp,
+                                         struct flowi *fl);
+       int (*xfrm_decode_session) (struct sk_buff *skb, u32 *secid, int ckall);
 #endif /* CONFIG_SECURITY_NETWORK_XFRM */
 
        /* key management security hooks */
 #ifdef CONFIG_KEYS
-       int (*key_alloc)(struct key *key, struct task_struct *tsk, unsigned long flags);
-       void (*key_free)(struct key *key);
-       int (*key_permission)(key_ref_t key_ref,
-                             struct task_struct *context,
-                             key_perm_t perm);
-
+       int (*key_alloc) (struct key *key, struct task_struct *tsk, unsigned long flags);
+       void (*key_free) (struct key *key);
+       int (*key_permission) (key_ref_t key_ref,
+                              struct task_struct *context,
+                              key_perm_t perm);
+       int (*key_getsecurity)(struct key *key, char **_buffer);
 #endif /* CONFIG_KEYS */
 
 #ifdef CONFIG_AUDIT
-       int (*audit_rule_init)(u32 field, u32 op, char *rulestr, void **lsmrule);
-       int (*audit_rule_known)(struct audit_krule *krule);
-       int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule,
-                               struct audit_context *actx);
-       void (*audit_rule_free)(void *lsmrule);
+       int (*audit_rule_init) (u32 field, u32 op, char *rulestr, void **lsmrule);
+       int (*audit_rule_known) (struct audit_krule *krule);
+       int (*audit_rule_match) (u32 secid, u32 field, u32 op, void *lsmrule,
+                                struct audit_context *actx);
+       void (*audit_rule_free) (void *lsmrule);
 #endif /* CONFIG_AUDIT */
 };
 
 /* prototypes */
-extern int security_init       (void);
+extern int security_init(void);
 extern int security_module_enable(struct security_operations *ops);
-extern int register_security   (struct security_operations *ops);
-extern int mod_reg_security    (const char *name, struct security_operations *ops);
+extern int register_security(struct security_operations *ops);
+extern int mod_reg_security(const char *name, struct security_operations *ops);
 extern struct dentry *securityfs_create_file(const char *name, mode_t mode,
                                             struct dentry *parent, void *data,
                                             const struct file_operations *fops);
 extern struct dentry *securityfs_create_dir(const char *name, struct dentry *parent);
 extern void securityfs_remove(struct dentry *dentry);
 
-
 /* Security operations */
 int security_ptrace(struct task_struct *parent, struct task_struct *child);
 int security_capget(struct task_struct *target,
-                    kernel_cap_t *effective,
-                    kernel_cap_t *inheritable,
-                    kernel_cap_t *permitted);
+                   kernel_cap_t *effective,
+                   kernel_cap_t *inheritable,
+                   kernel_cap_t *permitted);
 int security_capset_check(struct task_struct *target,
-                          kernel_cap_t *effective,
-                          kernel_cap_t *inheritable,
-                          kernel_cap_t *permitted);
-void security_capset_set(struct task_struct *target,
                          kernel_cap_t *effective,
                          kernel_cap_t *inheritable,
                          kernel_cap_t *permitted);
+void security_capset_set(struct task_struct *target,
+                        kernel_cap_t *effective,
+                        kernel_cap_t *inheritable,
+                        kernel_cap_t *permitted);
 int security_capable(struct task_struct *tsk, int cap);
 int security_acct(struct file *file);
 int security_sysctl(struct ctl_table *table, int op);
@@ -1594,7 +1608,7 @@ int security_sb_copy_data(char *orig, char *copy);
 int security_sb_kern_mount(struct super_block *sb, void *data);
 int security_sb_statfs(struct dentry *dentry);
 int security_sb_mount(char *dev_name, struct path *path,
-                       char *type, unsigned long flags, void *data);
+                     char *type, unsigned long flags, void *data);
 int security_sb_check_sb(struct vfsmount *mnt, struct path *path);
 int security_sb_umount(struct vfsmount *mnt, int flags);
 void security_sb_umount_close(struct vfsmount *mnt);
@@ -1619,25 +1633,25 @@ int security_inode_link(struct dentry *old_dentry, struct inode *dir,
                         struct dentry *new_dentry);
 int security_inode_unlink(struct inode *dir, struct dentry *dentry);
 int security_inode_symlink(struct inode *dir, struct dentry *dentry,
-                           const char *old_name);
+                          const char *old_name);
 int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode);
 int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
 int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev);
 int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
-                          struct inode *new_dir, struct dentry *new_dentry);
+                         struct inode *new_dir, struct dentry *new_dentry);
 int security_inode_readlink(struct dentry *dentry);
 int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
 int security_inode_permission(struct inode *inode, int mask, struct nameidata *nd);
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
 int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
 void security_inode_delete(struct inode *inode);
-int security_inode_setxattr(struct dentry *dentry, char *name,
-                            void *value, size_t size, int flags);
-void security_inode_post_setxattr(struct dentry *dentry, char *name,
-                                  void *value, size_t size, int flags);
-int security_inode_getxattr(struct dentry *dentry, char *name);
+int security_inode_setxattr(struct dentry *dentry, const char *name,
+                           const void *value, size_t size, int flags);
+void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+                                 const void *value, size_t size, int flags);
+int security_inode_getxattr(struct dentry *dentry, const char *name);
 int security_inode_listxattr(struct dentry *dentry);
-int security_inode_removexattr(struct dentry *dentry, char *name);
+int security_inode_removexattr(struct dentry *dentry, const char *name);
 int security_inode_need_killpriv(struct dentry *dentry);
 int security_inode_killpriv(struct dentry *dentry);
 int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
@@ -1652,12 +1666,12 @@ int security_file_mmap(struct file *file, unsigned long reqprot,
                        unsigned long prot, unsigned long flags,
                        unsigned long addr, unsigned long addr_only);
 int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
-                           unsigned long prot);
+                          unsigned long prot);
 int security_file_lock(struct file *file, unsigned int cmd);
 int security_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg);
 int security_file_set_fowner(struct file *file);
 int security_file_send_sigiotask(struct task_struct *tsk,
-                                 struct fown_struct *fown, int sig);
+                                struct fown_struct *fown, int sig);
 int security_file_receive(struct file *file);
 int security_dentry_open(struct file *file);
 int security_task_create(unsigned long clone_flags);
@@ -1665,7 +1679,7 @@ int security_task_alloc(struct task_struct *p);
 void security_task_free(struct task_struct *p);
 int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags);
 int security_task_post_setuid(uid_t old_ruid, uid_t old_euid,
-                              uid_t old_suid, int flags);
+                             uid_t old_suid, int flags);
 int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags);
 int security_task_setpgid(struct task_struct *p, pid_t pgid);
 int security_task_getpgid(struct task_struct *p);
@@ -1684,7 +1698,7 @@ int security_task_kill(struct task_struct *p, struct siginfo *info,
                        int sig, u32 secid);
 int security_task_wait(struct task_struct *p);
 int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
-                        unsigned long arg4, unsigned long arg5);
+                        unsigned long arg4, unsigned long arg5, long *rc_p);
 void security_task_reparent_to_init(struct task_struct *p);
 void security_task_to_inode(struct task_struct *p, struct inode *inode);
 int security_ipc_permission(struct kern_ipc_perm *ipcp, short flag);
@@ -1696,9 +1710,9 @@ void security_msg_queue_free(struct msg_queue *msq);
 int security_msg_queue_associate(struct msg_queue *msq, int msqflg);
 int security_msg_queue_msgctl(struct msg_queue *msq, int cmd);
 int security_msg_queue_msgsnd(struct msg_queue *msq,
-                              struct msg_msg *msg, int msqflg);
+                             struct msg_msg *msg, int msqflg);
 int security_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
-                              struct task_struct *target, long type, int mode);
+                             struct task_struct *target, long type, int mode);
 int security_shm_alloc(struct shmid_kernel *shp);
 void security_shm_free(struct shmid_kernel *shp);
 int security_shm_associate(struct shmid_kernel *shp, int shmflg);
@@ -1710,13 +1724,13 @@ int security_sem_associate(struct sem_array *sma, int semflg);
 int security_sem_semctl(struct sem_array *sma, int cmd);
 int security_sem_semop(struct sem_array *sma, struct sembuf *sops,
                        unsigned nsops, int alter);
-void security_d_instantiate (struct dentry *dentry, struct inode *inode);
+void security_d_instantiate(struct dentry *dentry, struct inode *inode);
 int security_getprocattr(struct task_struct *p, char *name, char **value);
 int security_setprocattr(struct task_struct *p, char *name, void *value, size_t size);
 int security_netlink_send(struct sock *sk, struct sk_buff *skb);
 int security_netlink_recv(struct sk_buff *skb, int cap);
 int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen);
-int security_secctx_to_secid(char *secdata, u32 seclen, u32 *secid);
+int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid);
 void security_release_secctx(char *secdata, u32 seclen);
 
 #else /* CONFIG_SECURITY */
@@ -1741,33 +1755,33 @@ static inline int security_init(void)
        return 0;
 }
 
-static inline int security_ptrace (struct task_struct *parent, struct task_struct * child)
+static inline int security_ptrace(struct task_struct *parent, struct task_struct *child)
 {
-       return cap_ptrace (parent, child);
+       return cap_ptrace(parent, child);
 }
 
-static inline int security_capget (struct task_struct *target,
+static inline int security_capget(struct task_struct *target,
                                   kernel_cap_t *effective,
                                   kernel_cap_t *inheritable,
                                   kernel_cap_t *permitted)
 {
-       return cap_capget (target, effective, inheritable, permitted);
+       return cap_capget(target, effective, inheritable, permitted);
 }
 
-static inline int security_capset_check (struct task_struct *target,
+static inline int security_capset_check(struct task_struct *target,
                                         kernel_cap_t *effective,
                                         kernel_cap_t *inheritable,
                                         kernel_cap_t *permitted)
 {
-       return cap_capset_check (target, effective, inheritable, permitted);
+       return cap_capset_check(target, effective, inheritable, permitted);
 }
 
-static inline void security_capset_set (struct task_struct *target,
+static inline void security_capset_set(struct task_struct *target,
                                        kernel_cap_t *effective,
                                        kernel_cap_t *inheritable,
                                        kernel_cap_t *permitted)
 {
-       cap_capset_set (target, effective, inheritable, permitted);
+       cap_capset_set(target, effective, inheritable, permitted);
 }
 
 static inline int security_capable(struct task_struct *tsk, int cap)
@@ -1775,7 +1789,7 @@ static inline int security_capable(struct task_struct *tsk, int cap)
        return cap_capable(tsk, cap);
 }
 
-static inline int security_acct (struct file *file)
+static inline int security_acct(struct file *file)
 {
        return 0;
 }
@@ -1785,13 +1799,13 @@ static inline int security_sysctl(struct ctl_table *table, int op)
        return 0;
 }
 
-static inline int security_quotactl (int cmds, int type, int id,
-                                    struct super_block * sb)
+static inline int security_quotactl(int cmds, int type, int id,
+                                    struct super_block *sb)
 {
        return 0;
 }
 
-static inline int security_quota_on (struct dentry * dentry)
+static inline int security_quota_on(struct dentry *dentry)
 {
        return 0;
 }
@@ -1816,102 +1830,102 @@ static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
        return cap_vm_enough_memory(mm, pages);
 }
 
-static inline int security_bprm_alloc (struct linux_binprm *bprm)
+static inline int security_bprm_alloc(struct linux_binprm *bprm)
 {
        return 0;
 }
 
-static inline void security_bprm_free (struct linux_binprm *bprm)
+static inline void security_bprm_free(struct linux_binprm *bprm)
 { }
 
-static inline void security_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
-{ 
-       cap_bprm_apply_creds (bprm, unsafe);
+static inline void security_bprm_apply_creds(struct linux_binprm *bprm, int unsafe)
+{
+       cap_bprm_apply_creds(bprm, unsafe);
 }
 
-static inline void security_bprm_post_apply_creds (struct linux_binprm *bprm)
+static inline void security_bprm_post_apply_creds(struct linux_binprm *bprm)
 {
        return;
 }
 
-static inline int security_bprm_set (struct linux_binprm *bprm)
+static inline int security_bprm_set(struct linux_binprm *bprm)
 {
-       return cap_bprm_set_security (bprm);
+       return cap_bprm_set_security(bprm);
 }
 
-static inline int security_bprm_check (struct linux_binprm *bprm)
+static inline int security_bprm_check(struct linux_binprm *bprm)
 {
        return 0;
 }
 
-static inline int security_bprm_secureexec (struct linux_binprm *bprm)
+static inline int security_bprm_secureexec(struct linux_binprm *bprm)
 {
        return cap_bprm_secureexec(bprm);
 }
 
-static inline int security_sb_alloc (struct super_block *sb)
+static inline int security_sb_alloc(struct super_block *sb)
 {
        return 0;
 }
 
-static inline void security_sb_free (struct super_block *sb)
+static inline void security_sb_free(struct super_block *sb)
 { }
 
-static inline int security_sb_copy_data (char *orig, char *copy)
+static inline int security_sb_copy_data(char *orig, char *copy)
 {
        return 0;
 }
 
-static inline int security_sb_kern_mount (struct super_block *sb, void *data)
+static inline int security_sb_kern_mount(struct super_block *sb, void *data)
 {
        return 0;
 }
 
-static inline int security_sb_statfs (struct dentry *dentry)
+static inline int security_sb_statfs(struct dentry *dentry)
 {
        return 0;
 }
 
-static inline int security_sb_mount (char *dev_name, struct path *path,
+static inline int security_sb_mount(char *dev_name, struct path *path,
                                    char *type, unsigned long flags,
                                    void *data)
 {
        return 0;
 }
 
-static inline int security_sb_check_sb (struct vfsmount *mnt,
-                                       struct path *path)
+static inline int security_sb_check_sb(struct vfsmount *mnt,
+                                      struct path *path)
 {
        return 0;
 }
 
-static inline int security_sb_umount (struct vfsmount *mnt, int flags)
+static inline int security_sb_umount(struct vfsmount *mnt, int flags)
 {
        return 0;
 }
 
-static inline void security_sb_umount_close (struct vfsmount *mnt)
+static inline void security_sb_umount_close(struct vfsmount *mnt)
 { }
 
-static inline void security_sb_umount_busy (struct vfsmount *mnt)
+static inline void security_sb_umount_busy(struct vfsmount *mnt)
 { }
 
-static inline void security_sb_post_remount (struct vfsmount *mnt,
+static inline void security_sb_post_remount(struct vfsmount *mnt,
                                             unsigned long flags, void *data)
 { }
 
-static inline void security_sb_post_addmount (struct vfsmount *mnt,
-                                             struct path *mountpoint)
+static inline void security_sb_post_addmount(struct vfsmount *mnt,
+                                            struct path *mountpoint)
 { }
 
-static inline int security_sb_pivotroot (struct path *old_path,
-                                        struct path *new_path)
+static inline int security_sb_pivotroot(struct path *old_path,
+                                       struct path *new_path)
 {
        return 0;
 }
 
-static inline void security_sb_post_pivotroot (struct path *old_path,
-                                              struct path *new_path)
+static inline void security_sb_post_pivotroot(struct path *old_path,
+                                             struct path *new_path)
 { }
 static inline int security_sb_get_mnt_opts(const struct super_block *sb,
                                           struct security_mnt_opts *opts)
@@ -1935,15 +1949,15 @@ static inline int security_sb_parse_opts_str(char *options, struct security_mnt_
        return 0;
 }
 
-static inline int security_inode_alloc (struct inode *inode)
+static inline int security_inode_alloc(struct inode *inode)
 {
        return 0;
 }
 
-static inline void security_inode_free (struct inode *inode)
+static inline void security_inode_free(struct inode *inode)
 { }
 
-static inline int security_inode_init_security (struct inode *inode,
+static inline int security_inode_init_security(struct inode *inode,
                                                struct inode *dir,
                                                char **name,
                                                void **value,
@@ -1951,55 +1965,55 @@ static inline int security_inode_init_security (struct inode *inode,
 {
        return -EOPNOTSUPP;
 }
-       
-static inline int security_inode_create (struct inode *dir,
+
+static inline int security_inode_create(struct inode *dir,
                                         struct dentry *dentry,
                                         int mode)
 {
        return 0;
 }
 
-static inline int security_inode_link (struct dentry *old_dentry,
+static inline int security_inode_link(struct dentry *old_dentry,
                                       struct inode *dir,
                                       struct dentry *new_dentry)
 {
        return 0;
 }
 
-static inline int security_inode_unlink (struct inode *dir,
+static inline int security_inode_unlink(struct inode *dir,
                                         struct dentry *dentry)
 {
        return 0;
 }
 
-static inline int security_inode_symlink (struct inode *dir,
+static inline int security_inode_symlink(struct inode *dir,
                                          struct dentry *dentry,
                                          const char *old_name)
 {
        return 0;
 }
 
-static inline int security_inode_mkdir (struct inode *dir,
+static inline int security_inode_mkdir(struct inode *dir,
                                        struct dentry *dentry,
                                        int mode)
 {
        return 0;
 }
 
-static inline int security_inode_rmdir (struct inode *dir,
+static inline int security_inode_rmdir(struct inode *dir,
                                        struct dentry *dentry)
 {
        return 0;
 }
 
-static inline int security_inode_mknod (struct inode *dir,
+static inline int security_inode_mknod(struct inode *dir,
                                        struct dentry *dentry,
                                        int mode, dev_t dev)
 {
        return 0;
 }
 
-static inline int security_inode_rename (struct inode *old_dir,
+static inline int security_inode_rename(struct inode *old_dir,
                                         struct dentry *old_dentry,
                                         struct inode *new_dir,
                                         struct dentry *new_dentry)
@@ -2007,59 +2021,61 @@ static inline int security_inode_rename (struct inode *old_dir,
        return 0;
 }
 
-static inline int security_inode_readlink (struct dentry *dentry)
+static inline int security_inode_readlink(struct dentry *dentry)
 {
        return 0;
 }
 
-static inline int security_inode_follow_link (struct dentry *dentry,
+static inline int security_inode_follow_link(struct dentry *dentry,
                                              struct nameidata *nd)
 {
        return 0;
 }
 
-static inline int security_inode_permission (struct inode *inode, int mask,
+static inline int security_inode_permission(struct inode *inode, int mask,
                                             struct nameidata *nd)
 {
        return 0;
 }
 
-static inline int security_inode_setattr (struct dentry *dentry,
+static inline int security_inode_setattr(struct dentry *dentry,
                                          struct iattr *attr)
 {
        return 0;
 }
 
-static inline int security_inode_getattr (struct vfsmount *mnt,
+static inline int security_inode_getattr(struct vfsmount *mnt,
                                          struct dentry *dentry)
 {
        return 0;
 }
 
-static inline void security_inode_delete (struct inode *inode)
+static inline void security_inode_delete(struct inode *inode)
 { }
 
-static inline int security_inode_setxattr (struct dentry *dentry, char *name,
-                                          void *value, size_t size, int flags)
+static inline int security_inode_setxattr(struct dentry *dentry,
+               const char *name, const void *value, size_t size, int flags)
 {
        return cap_inode_setxattr(dentry, name, value, size, flags);
 }
 
-static inline void security_inode_post_setxattr (struct dentry *dentry, char *name,
-                                                void *value, size_t size, int flags)
+static inline void security_inode_post_setxattr(struct dentry *dentry,
+               const char *name, const void *value, size_t size, int flags)
 { }
 
-static inline int security_inode_getxattr (struct dentry *dentry, char *name)
+static inline int security_inode_getxattr(struct dentry *dentry,
+                       const char *name)
 {
        return 0;
 }
 
-static inline int security_inode_listxattr (struct dentry *dentry)
+static inline int security_inode_listxattr(struct dentry *dentry)
 {
        return 0;
 }
 
-static inline int security_inode_removexattr (struct dentry *dentry, char *name)
+static inline int security_inode_removexattr(struct dentry *dentry,
+                       const char *name)
 {
        return cap_inode_removexattr(dentry, name);
 }
@@ -2094,198 +2110,198 @@ static inline void security_inode_getsecid(const struct inode *inode, u32 *secid
        *secid = 0;
 }
 
-static inline int security_file_permission (struct file *file, int mask)
+static inline int security_file_permission(struct file *file, int mask)
 {
        return 0;
 }
 
-static inline int security_file_alloc (struct file *file)
+static inline int security_file_alloc(struct file *file)
 {
        return 0;
 }
 
-static inline void security_file_free (struct file *file)
+static inline void security_file_free(struct file *file)
 { }
 
-static inline int security_file_ioctl (struct file *file, unsigned int cmd,
-                                      unsigned long arg)
+static inline int security_file_ioctl(struct file *file, unsigned int cmd,
+                                     unsigned long arg)
 {
        return 0;
 }
 
-static inline int security_file_mmap (struct file *file, unsigned long reqprot,
-                                     unsigned long prot,
-                                     unsigned long flags,
-                                     unsigned long addr,
-                                     unsigned long addr_only)
+static inline int security_file_mmap(struct file *file, unsigned long reqprot,
+                                    unsigned long prot,
+                                    unsigned long flags,
+                                    unsigned long addr,
+                                    unsigned long addr_only)
 {
        return 0;
 }
 
-static inline int security_file_mprotect (struct vm_area_struct *vma,
-                                         unsigned long reqprot,
-                                         unsigned long prot)
+static inline int security_file_mprotect(struct vm_area_struct *vma,
+                                        unsigned long reqprot,
+                                        unsigned long prot)
 {
        return 0;
 }
 
-static inline int security_file_lock (struct file *file, unsigned int cmd)
+static inline int security_file_lock(struct file *file, unsigned int cmd)
 {
        return 0;
 }
 
-static inline int security_file_fcntl (struct file *file, unsigned int cmd,
-                                      unsigned long arg)
+static inline int security_file_fcntl(struct file *file, unsigned int cmd,
+                                     unsigned long arg)
 {
        return 0;
 }
 
-static inline int security_file_set_fowner (struct file *file)
+static inline int security_file_set_fowner(struct file *file)
 {
        return 0;
 }
 
-static inline int security_file_send_sigiotask (struct task_struct *tsk,
-                                               struct fown_struct *fown,
-                                               int sig)
+static inline int security_file_send_sigiotask(struct task_struct *tsk,
+                                              struct fown_struct *fown,
+                                              int sig)
 {
        return 0;
 }
 
-static inline int security_file_receive (struct file *file)
+static inline int security_file_receive(struct file *file)
 {
        return 0;
 }
 
-static inline int security_dentry_open (struct file *file)
+static inline int security_dentry_open(struct file *file)
 {
        return 0;
 }
 
-static inline int security_task_create (unsigned long clone_flags)
+static inline int security_task_create(unsigned long clone_flags)
 {
        return 0;
 }
 
-static inline int security_task_alloc (struct task_struct *p)
+static inline int security_task_alloc(struct task_struct *p)
 {
        return 0;
 }
 
-static inline void security_task_free (struct task_struct *p)
+static inline void security_task_free(struct task_struct *p)
 { }
 
-static inline int security_task_setuid (uid_t id0, uid_t id1, uid_t id2,
-                                       int flags)
+static inline int security_task_setuid(uid_t id0, uid_t id1, uid_t id2,
+                                      int flags)
 {
        return 0;
 }
 
-static inline int security_task_post_setuid (uid_t old_ruid, uid_t old_euid,
-                                            uid_t old_suid, int flags)
+static inline int security_task_post_setuid(uid_t old_ruid, uid_t old_euid,
+                                           uid_t old_suid, int flags)
 {
-       return cap_task_post_setuid (old_ruid, old_euid, old_suid, flags);
+       return cap_task_post_setuid(old_ruid, old_euid, old_suid, flags);
 }
 
-static inline int security_task_setgid (gid_t id0, gid_t id1, gid_t id2,
-                                       int flags)
+static inline int security_task_setgid(gid_t id0, gid_t id1, gid_t id2,
+                                      int flags)
 {
        return 0;
 }
 
-static inline int security_task_setpgid (struct task_struct *p, pid_t pgid)
+static inline int security_task_setpgid(struct task_struct *p, pid_t pgid)
 {
        return 0;
 }
 
-static inline int security_task_getpgid (struct task_struct *p)
+static inline int security_task_getpgid(struct task_struct *p)
 {
        return 0;
 }
 
-static inline int security_task_getsid (struct task_struct *p)
+static inline int security_task_getsid(struct task_struct *p)
 {
        return 0;
 }
 
-static inline void security_task_getsecid (struct task_struct *p, u32 *secid)
+static inline void security_task_getsecid(struct task_struct *p, u32 *secid)
 {
        *secid = 0;
 }
 
-static inline int security_task_setgroups (struct group_info *group_info)
+static inline int security_task_setgroups(struct group_info *group_info)
 {
        return 0;
 }
 
-static inline int security_task_setnice (struct task_struct *p, int nice)
+static inline int security_task_setnice(struct task_struct *p, int nice)
 {
        return cap_task_setnice(p, nice);
 }
 
-static inline int security_task_setioprio (struct task_struct *p, int ioprio)
+static inline int security_task_setioprio(struct task_struct *p, int ioprio)
 {
        return cap_task_setioprio(p, ioprio);
 }
 
-static inline int security_task_getioprio (struct task_struct *p)
+static inline int security_task_getioprio(struct task_struct *p)
 {
        return 0;
 }
 
-static inline int security_task_setrlimit (unsigned int resource,
-                                          struct rlimit *new_rlim)
+static inline int security_task_setrlimit(unsigned int resource,
+                                         struct rlimit *new_rlim)
 {
        return 0;
 }
 
-static inline int security_task_setscheduler (struct task_struct *p,
-                                             int policy,
-                                             struct sched_param *lp)
+static inline int security_task_setscheduler(struct task_struct *p,
+                                            int policy,
+                                            struct sched_param *lp)
 {
        return cap_task_setscheduler(p, policy, lp);
 }
 
-static inline int security_task_getscheduler (struct task_struct *p)
+static inline int security_task_getscheduler(struct task_struct *p)
 {
        return 0;
 }
 
-static inline int security_task_movememory (struct task_struct *p)
+static inline int security_task_movememory(struct task_struct *p)
 {
        return 0;
 }
 
-static inline int security_task_kill (struct task_struct *p,
-                                     struct siginfo *info, int sig,
-                                     u32 secid)
+static inline int security_task_kill(struct task_struct *p,
+                                    struct siginfo *info, int sig,
+                                    u32 secid)
 {
        return 0;
 }
 
-static inline int security_task_wait (struct task_struct *p)
+static inline int security_task_wait(struct task_struct *p)
 {
        return 0;
 }
 
-static inline int security_task_prctl (int option, unsigned long arg2,
-                                      unsigned long arg3,
-                                      unsigned long arg4,
-                                      unsigned long arg5)
+static inline int security_task_prctl(int option, unsigned long arg2,
+                                     unsigned long arg3,
+                                     unsigned long arg4,
+                                     unsigned long arg5, long *rc_p)
 {
-       return 0;
+       return cap_task_prctl(option, arg2, arg3, arg3, arg5, rc_p);
 }
 
-static inline void security_task_reparent_to_init (struct task_struct *p)
+static inline void security_task_reparent_to_init(struct task_struct *p)
 {
-       cap_task_reparent_to_init (p);
+       cap_task_reparent_to_init(p);
 }
 
 static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
 { }
 
-static inline int security_ipc_permission (struct kern_ipc_perm *ipcp,
-                                          short flag)
+static inline int security_ipc_permission(struct kern_ipc_perm *ipcp,
+                                         short flag)
 {
        return 0;
 }
@@ -2295,98 +2311,98 @@ static inline void security_ipc_getsecid(struct kern_ipc_perm *ipcp, u32 *secid)
        *secid = 0;
 }
 
-static inline int security_msg_msg_alloc (struct msg_msg * msg)
+static inline int security_msg_msg_alloc(struct msg_msg *msg)
 {
        return 0;
 }
 
-static inline void security_msg_msg_free (struct msg_msg * msg)
+static inline void security_msg_msg_free(struct msg_msg *msg)
 { }
 
-static inline int security_msg_queue_alloc (struct msg_queue *msq)
+static inline int security_msg_queue_alloc(struct msg_queue *msq)
 {
        return 0;
 }
 
-static inline void security_msg_queue_free (struct msg_queue *msq)
+static inline void security_msg_queue_free(struct msg_queue *msq)
 { }
 
-static inline int security_msg_queue_associate (struct msg_queue * msq, 
-                                               int msqflg)
+static inline int security_msg_queue_associate(struct msg_queue *msq,
+                                              int msqflg)
 {
        return 0;
 }
 
-static inline int security_msg_queue_msgctl (struct msg_queue * msq, int cmd)
+static inline int security_msg_queue_msgctl(struct msg_queue *msq, int cmd)
 {
        return 0;
 }
 
-static inline int security_msg_queue_msgsnd (struct msg_queue * msq,
-                                            struct msg_msg * msg, int msqflg)
+static inline int security_msg_queue_msgsnd(struct msg_queue *msq,
+                                           struct msg_msg *msg, int msqflg)
 {
        return 0;
 }
 
-static inline int security_msg_queue_msgrcv (struct msg_queue * msq,
-                                            struct msg_msg * msg,
-                                            struct task_struct * target,
-                                            long type, int mode)
+static inline int security_msg_queue_msgrcv(struct msg_queue *msq,
+                                           struct msg_msg *msg,
+                                           struct task_struct *target,
+                                           long type, int mode)
 {
        return 0;
 }
 
-static inline int security_shm_alloc (struct shmid_kernel *shp)
+static inline int security_shm_alloc(struct shmid_kernel *shp)
 {
        return 0;
 }
 
-static inline void security_shm_free (struct shmid_kernel *shp)
+static inline void security_shm_free(struct shmid_kernel *shp)
 { }
 
-static inline int security_shm_associate (struct shmid_kernel * shp, 
-                                         int shmflg)
+static inline int security_shm_associate(struct shmid_kernel *shp,
+                                        int shmflg)
 {
        return 0;
 }
 
-static inline int security_shm_shmctl (struct shmid_kernel * shp, int cmd)
+static inline int security_shm_shmctl(struct shmid_kernel *shp, int cmd)
 {
        return 0;
 }
 
-static inline int security_shm_shmat (struct shmid_kernel * shp, 
-                                     char __user *shmaddr, int shmflg)
+static inline int security_shm_shmat(struct shmid_kernel *shp,
+                                    char __user *shmaddr, int shmflg)
 {
        return 0;
 }
 
-static inline int security_sem_alloc (struct sem_array *sma)
+static inline int security_sem_alloc(struct sem_array *sma)
 {
        return 0;
 }
 
-static inline void security_sem_free (struct sem_array *sma)
+static inline void security_sem_free(struct sem_array *sma)
 { }
 
-static inline int security_sem_associate (struct sem_array * sma, int semflg)
+static inline int security_sem_associate(struct sem_array *sma, int semflg)
 {
        return 0;
 }
 
-static inline int security_sem_semctl (struct sem_array * sma, int cmd)
+static inline int security_sem_semctl(struct sem_array *sma, int cmd)
 {
        return 0;
 }
 
-static inline int security_sem_semop (struct sem_array * sma, 
-                                     struct sembuf * sops, unsigned nsops, 
-                                     int alter)
+static inline int security_sem_semop(struct sem_array *sma,
+                                    struct sembuf *sops, unsigned nsops,
+                                    int alter)
 {
        return 0;
 }
 
-static inline void security_d_instantiate (struct dentry *dentry, struct inode *inode)
+static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode)
 { }
 
 static inline int security_getprocattr(struct task_struct *p, char *name, char **value)
@@ -2399,14 +2415,14 @@ static inline int security_setprocattr(struct task_struct *p, char *name, void *
        return -EINVAL;
 }
 
-static inline int security_netlink_send (struct sock *sk, struct sk_buff *skb)
+static inline int security_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
-       return cap_netlink_send (sk, skb);
+       return cap_netlink_send(sk, skb);
 }
 
-static inline int security_netlink_recv (struct sk_buff *skb, int cap)
+static inline int security_netlink_recv(struct sk_buff *skb, int cap)
 {
-       return cap_netlink_recv (skb, cap);
+       return cap_netlink_recv(skb, cap);
 }
 
 static inline struct dentry *securityfs_create_dir(const char *name,
@@ -2433,7 +2449,7 @@ static inline int security_secid_to_secctx(u32 secid, char **secdata, u32 *secle
        return -EOPNOTSUPP;
 }
 
-static inline int security_secctx_to_secid(char *secdata,
+static inline int security_secctx_to_secid(const char *secdata,
                                           u32 seclen,
                                           u32 *secid)
 {
@@ -2484,26 +2500,26 @@ void security_inet_conn_established(struct sock *sk,
                        struct sk_buff *skb);
 
 #else  /* CONFIG_SECURITY_NETWORK */
-static inline int security_unix_stream_connect(struct socket * sock,
-                                              struct socket * other,
-                                              struct sock * newsk)
+static inline int security_unix_stream_connect(struct socket *sock,
+                                              struct socket *other,
+                                              struct sock *newsk)
 {
        return 0;
 }
 
-static inline int security_unix_may_send(struct socket * sock, 
-                                        struct socket * other)
+static inline int security_unix_may_send(struct socket *sock,
+                                        struct socket *other)
 {
        return 0;
 }
 
-static inline int security_socket_create (int family, int type,
-                                         int protocol, int kern)
+static inline int security_socket_create(int family, int type,
+                                        int protocol, int kern)
 {
        return 0;
 }
 
-static inline int security_socket_post_create(struct socket * sock,
+static inline int security_socket_post_create(struct socket *sock,
                                              int family,
                                              int type,
                                              int protocol, int kern)
@@ -2511,77 +2527,77 @@ static inline int security_socket_post_create(struct socket * sock,
        return 0;
 }
 
-static inline int security_socket_bind(struct socket * sock, 
-                                      struct sockaddr * address, 
+static inline int security_socket_bind(struct socket *sock,
+                                      struct sockaddr *address,
                                       int addrlen)
 {
        return 0;
 }
 
-static inline int security_socket_connect(struct socket * sock, 
-                                         struct sockaddr * address, 
+static inline int security_socket_connect(struct socket *sock,
+                                         struct sockaddr *address,
                                          int addrlen)
 {
        return 0;
 }
 
-static inline int security_socket_listen(struct socket * sock, int backlog)
+static inline int security_socket_listen(struct socket *sock, int backlog)
 {
        return 0;
 }
 
-static inline int security_socket_accept(struct socket * sock, 
-                                        struct socket * newsock)
+static inline int security_socket_accept(struct socket *sock,
+                                        struct socket *newsock)
 {
        return 0;
 }
 
-static inline void security_socket_post_accept(struct socket * sock, 
-                                              struct socket * newsock)
+static inline void security_socket_post_accept(struct socket *sock,
+                                              struct socket *newsock)
 {
 }
 
-static inline int security_socket_sendmsg(struct socket * sock, 
-                                         struct msghdr * msg, int size)
+static inline int security_socket_sendmsg(struct socket *sock,
+                                         struct msghdr *msg, int size)
 {
        return 0;
 }
 
-static inline int security_socket_recvmsg(struct socket * sock, 
-                                         struct msghdr * msg, int size, 
+static inline int security_socket_recvmsg(struct socket *sock,
+                                         struct msghdr *msg, int size,
                                          int flags)
 {
        return 0;
 }
 
-static inline int security_socket_getsockname(struct socket * sock)
+static inline int security_socket_getsockname(struct socket *sock)
 {
        return 0;
 }
 
-static inline int security_socket_getpeername(struct socket * sock)
+static inline int security_socket_getpeername(struct socket *sock)
 {
        return 0;
 }
 
-static inline int security_socket_getsockopt(struct socket * sock, 
+static inline int security_socket_getsockopt(struct socket *sock,
                                             int level, int optname)
 {
        return 0;
 }
 
-static inline int security_socket_setsockopt(struct socket * sock, 
+static inline int security_socket_setsockopt(struct socket *sock,
                                             int level, int optname)
 {
        return 0;
 }
 
-static inline int security_socket_shutdown(struct socket * sock, int how)
+static inline int security_socket_shutdown(struct socket *sock, int how)
 {
        return 0;
 }
-static inline int security_sock_rcv_skb (struct sock * sk, 
-                                        struct sk_buff * skb)
+static inline int security_sock_rcv_skb(struct sock *sk,
+                                       struct sk_buff *skb)
 {
        return 0;
 }
@@ -2618,7 +2634,7 @@ static inline void security_req_classify_flow(const struct request_sock *req, st
 {
 }
 
-static inline void security_sock_graft(struct socksk, struct socket *parent)
+static inline void security_sock_graft(struct sock *sk, struct socket *parent)
 {
 }
 
@@ -2727,6 +2743,7 @@ int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long f
 void security_key_free(struct key *key);
 int security_key_permission(key_ref_t key_ref,
                            struct task_struct *context, key_perm_t perm);
+int security_key_getsecurity(struct key *key, char **_buffer);
 
 #else
 
@@ -2748,6 +2765,12 @@ static inline int security_key_permission(key_ref_t key_ref,
        return 0;
 }
 
+static inline int security_key_getsecurity(struct key *key, char **_buffer)
+{
+       *_buffer = NULL;
+       return 0;
+}
+
 #endif
 #endif /* CONFIG_KEYS */
 
index 5b5369c3c209c66c904f2a505e1bdaf8e0cbac56..a66304a09955b0d36e7b7f67193657a444355e77 100644 (file)
@@ -1,6 +1,5 @@
 #ifndef _LINUX_SEQ_FILE_H
 #define _LINUX_SEQ_FILE_H
-#ifdef __KERNEL__
 
 #include <linux/types.h>
 #include <linux/string.h>
@@ -69,4 +68,3 @@ extern struct list_head *seq_list_next(void *v, struct list_head *head,
                loff_t *ppos);
 
 #endif
-#endif
index 00b65c0a82ca0a1c00d1ea862e098c88f9689684..3d37c94abbc80622732e4a0793ccda55ea6227a7 100644 (file)
@@ -46,6 +46,7 @@ enum {
        PLAT8250_DEV_HUB6,
        PLAT8250_DEV_MCA,
        PLAT8250_DEV_AU1X00,
+       PLAT8250_DEV_SM501,
 };
 
 /*
index 7cb094a82456cbe7a6c3409013bf98d16dd9a827..d32123ae08adbc9a7ed70613c97be128d0bb5c15 100644 (file)
 /* Freescale ColdFire */
 #define PORT_MCF       78
 
-#define PORT_SC26XX    79
-
+/* Blackfin SPORT */
+#define PORT_BFIN_SPORT                79
 
 /* MN10300 on-chip UART numbers */
 #define PORT_MN10300           80
 #define PORT_MN10300_CTS       81
 
+#define PORT_SC26XX    82
+
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
index 8d5fb36ea04741488c7233c6f9171075117f9a65..f2d12d5a21b891cf53cbe19da6d10ded434aab59 100644 (file)
@@ -34,8 +34,7 @@ struct shmem_sb_info {
        uid_t uid;                  /* Mount uid for root directory */
        gid_t gid;                  /* Mount gid for root directory */
        mode_t mode;                /* Mount mode for root directory */
-       int policy;                 /* Default NUMA memory alloc policy */
-       nodemask_t policy_nodes;    /* nodemask for preferred and bind */
+       struct mempolicy *mpol;     /* default memory policy for mappings */
 };
 
 static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
index 42d2e0a948f4866533c7211c031362c8246b542a..84f997f8aa53cfcc8b04ff86bb4b0263f0e7dc1a 100644 (file)
@@ -362,8 +362,6 @@ int unhandled_signal(struct task_struct *tsk, int sig);
 #define sig_kernel_stop(sig) \
        (((sig) < SIGRTMIN) && siginmask(sig, SIG_KERNEL_STOP_MASK))
 
-#define sig_needs_tasklist(sig)        ((sig) == SIGCONT)
-
 #define sig_user_defined(t, signr) \
        (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) &&  \
         ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
index f62caaad94e00876d7877c8cb738c95f5e61021c..805ed4b92f9a40dfd533330fb2bea5e423ed00e1 100644 (file)
@@ -9,8 +9,6 @@
 #ifndef _LINUX_SLAB_H
 #define        _LINUX_SLAB_H
 
-#ifdef __KERNEL__
-
 #include <linux/gfp.h>
 #include <linux/types.h>
 
 #define SLAB_MEM_SPREAD                0x00100000UL    /* Spread some memory over cpuset */
 #define SLAB_TRACE             0x00200000UL    /* Trace allocations and frees */
 
+/* Flag to prevent checks on free */
+#ifdef CONFIG_DEBUG_OBJECTS
+# define SLAB_DEBUG_OBJECTS    0x00400000UL
+#else
+# define SLAB_DEBUG_OBJECTS    0x00000000UL
+#endif
+
 /* The following flags affect the page allocator grouping pages by mobility */
 #define SLAB_RECLAIM_ACCOUNT   0x00020000UL            /* Objects are reclaimable */
 #define SLAB_TEMPORARY         SLAB_RECLAIM_ACCOUNT    /* Objects are short-lived */
@@ -276,5 +281,4 @@ extern const struct seq_operations slabinfo_op;
 ssize_t slabinfo_write(struct file *, const char __user *, size_t, loff_t *);
 #endif
 
-#endif /* __KERNEL__ */
 #endif /* _LINUX_SLAB_H */
index 79d59c937fac4ca62d0ca98c0b3249d1842d78c1..71e43a12ebbbc62521498155d6bace9b2b0e0585 100644 (file)
@@ -29,6 +29,7 @@ enum stat_item {
        DEACTIVATE_TO_HEAD,     /* Cpu slab was moved to the head of partials */
        DEACTIVATE_TO_TAIL,     /* Cpu slab was moved to the tail of partials */
        DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
+       ORDER_FALLBACK,         /* Number of times fallback was necessary */
        NR_SLUB_STAT_ITEMS };
 
 struct kmem_cache_cpu {
@@ -48,10 +49,20 @@ struct kmem_cache_node {
        struct list_head partial;
 #ifdef CONFIG_SLUB_DEBUG
        atomic_long_t nr_slabs;
+       atomic_long_t total_objects;
        struct list_head full;
 #endif
 };
 
+/*
+ * Word size structure that can be atomically updated or read and that
+ * contains both the order and the number of objects that a slab of the
+ * given order would contain.
+ */
+struct kmem_cache_order_objects {
+       unsigned long x;
+};
+
 /*
  * Slab cache management.
  */
@@ -61,7 +72,7 @@ struct kmem_cache {
        int size;               /* The size of an object including meta data */
        int objsize;            /* The size of an object without meta data */
        int offset;             /* Free pointer offset. */
-       int order;              /* Current preferred allocation order */
+       struct kmem_cache_order_objects oo;
 
        /*
         * Avoid an extra cache line for UP, SMP and for the node local to
@@ -70,7 +81,8 @@ struct kmem_cache {
        struct kmem_cache_node local_node;
 
        /* Allocation and freeing of slabs */
-       int objects;            /* Number of objects in slab */
+       struct kmem_cache_order_objects max;
+       struct kmem_cache_order_objects min;
        gfp_t allocflags;       /* gfp flags to use on each alloc */
        int refcount;           /* Refcount for slab cache destroy */
        void (*ctor)(struct kmem_cache *, void *);
index f098dff93f6bc26cd732d15b75a9ff69d894d42e..caa43b2370cb04b6b12e7aa47d398e35dbfbedb3 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <linux/types.h>
 #include <linux/magic.h>
+#include <linux/time.h>
 
 enum smb_protocol { 
        SMB_PROTOCOL_NONE, 
index 8516954a51416ef434e46ba09db9ed88e130a647..8ccf4eca2c3d97b08a707736b885cf08c403d40d 100644 (file)
@@ -9,7 +9,6 @@
 #ifndef _LINUX_SMB_FS_I
 #define _LINUX_SMB_FS_I
 
-#ifdef __KERNEL__
 #include <linux/types.h>
 #include <linux/fs.h>
 
@@ -36,4 +35,3 @@ struct smb_inode_info {
 };
 
 #endif
-#endif
index 3aa97aa4277f3905467db75021bda4892033969e..8a060a7040d818dae9a86af3f350ea3d2635d17b 100644 (file)
@@ -9,8 +9,6 @@
 #ifndef _SMB_FS_SB
 #define _SMB_FS_SB
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <linux/smb.h>
 
@@ -96,6 +94,4 @@ smb_unlock_server(struct smb_sb_info *server)
        up(&(server->sem));
 }
 
-#endif /* __KERNEL__ */
-
 #endif
index c5d3fcad7b57c87449d7de9085daa64cd80f984f..efdc44593b522054dda0454f105fb0636b8ec0c2 100644 (file)
@@ -109,5 +109,7 @@ extern void *kmemdup(const void *src, size_t len, gfp_t gfp);
 extern char **argv_split(gfp_t gfp, const char *str, int *argcp);
 extern void argv_free(char **argv);
 
+extern bool sysfs_streq(const char *s1, const char *s2);
+
 #endif
 #endif /* _LINUX_STRING_H_ */
index 1d7d4c5797ee93c37ab2d7f035f26fa114649a3f..a6977423baf737ed67d4847cdce4c89fc24452e0 100644 (file)
 #include <asm/errno.h>
 
 #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
+extern void pm_set_vt_switch(int);
 extern int pm_prepare_console(void);
 extern void pm_restore_console(void);
 #else
-static inline int pm_prepare_console(void) { return 0; }
-static inline void pm_restore_console(void) {}
+static inline void pm_set_vt_switch(int do_switch)
+{
+}
+
+static inline int pm_prepare_console(void)
+{
+       return 0;
+}
+
+static inline void pm_restore_console(void)
+{
+}
 #endif
 
 typedef int __bitwise suspend_state_t;
index 13ad0b82ac286a37355a321928cbcd7dd5f31944..c59a51a2b0e792c39b5de96fecf228b3bfaca244 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _LINUX_SVGA_H
 #define _LINUX_SVGA_H
 
-#ifdef __KERNEL__
-
 #include <linux/pci.h>
 #include <video/vga.h>
 
@@ -122,6 +120,5 @@ void svga_set_timings(const struct svga_timing_regs *tm, struct fb_var_screeninf
 
 int svga_match_format(const struct svga_fb_format *frm, struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix);
 
-#endif /* __KERNEL__  */
 #endif /* _LINUX_SVGA_H */
 
index 878459ae0454c3e1c55815e95e07d50343635c5e..0b3377650c859784686109395e8604affed2f701 100644 (file)
@@ -177,11 +177,11 @@ extern void activate_page(struct page *);
 extern void mark_page_accessed(struct page *);
 extern void lru_add_drain(void);
 extern int lru_add_drain_all(void);
-extern int rotate_reclaimable_page(struct page *page);
+extern void rotate_reclaimable_page(struct page *page);
 extern void swap_setup(void);
 
 /* linux/mm/vmscan.c */
-extern unsigned long try_to_free_pages(struct zone **zones, int order,
+extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                        gfp_t gfp_mask);
 extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
                                                        gfp_t gfp_mask);
index 5562fbf72095cf5ba3b3688f8aebbc6cb983f85c..45f6bc82d317237b8f0359e529432695d217d42c 100644 (file)
 #define _SYNCLINK_H_
 #define SYNCLINK_H_VERSION 3.6
 
-#define BOOLEAN int
-#define TRUE 1
-#define FALSE 0
-
 #define BIT0   0x0001
 #define BIT1   0x0002
 #define BIT2   0x0004
index 8df6d1382ac8280b7446df8af3898ea8b0b4d588..0522f368f9d737d7f65376c8cc122cc2ea92c900 100644 (file)
@@ -240,26 +240,28 @@ asmlinkage long sys_truncate64(const char __user *path, loff_t length);
 asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length);
 #endif
 
-asmlinkage long sys_setxattr(char __user *path, char __user *name,
-                               void __user *value, size_t size, int flags);
-asmlinkage long sys_lsetxattr(char __user *path, char __user *name,
-                               void __user *value, size_t size, int flags);
-asmlinkage long sys_fsetxattr(int fd, char __user *name, void __user *value,
-                               size_t size, int flags);
-asmlinkage ssize_t sys_getxattr(char __user *path, char __user *name,
+asmlinkage long sys_setxattr(const char __user *path, const char __user *name,
+                            const void __user *value, size_t size, int flags);
+asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name,
+                             const void __user *value, size_t size, int flags);
+asmlinkage long sys_fsetxattr(int fd, const char __user *name,
+                             const void __user *value, size_t size, int flags);
+asmlinkage ssize_t sys_getxattr(const char __user *path, const char __user *name,
                                void __user *value, size_t size);
-asmlinkage ssize_t sys_lgetxattr(char __user *path, char __user *name,
+asmlinkage ssize_t sys_lgetxattr(const char __user *path, const char __user *name,
                                void __user *value, size_t size);
-asmlinkage ssize_t sys_fgetxattr(int fd, char __user *name,
+asmlinkage ssize_t sys_fgetxattr(int fd, const char __user *name,
                                void __user *value, size_t size);
-asmlinkage ssize_t sys_listxattr(char __user *path, char __user *list,
+asmlinkage ssize_t sys_listxattr(const char __user *path, char __user *list,
                                size_t size);
-asmlinkage ssize_t sys_llistxattr(char __user *path, char __user *list,
+asmlinkage ssize_t sys_llistxattr(const char __user *path, char __user *list,
                                size_t size);
 asmlinkage ssize_t sys_flistxattr(int fd, char __user *list, size_t size);
-asmlinkage long sys_removexattr(char __user *path, char __user *name);
-asmlinkage long sys_lremovexattr(char __user *path, char __user *name);
-asmlinkage long sys_fremovexattr(int fd, char __user *name);
+asmlinkage long sys_removexattr(const char __user *path,
+                               const char __user *name);
+asmlinkage long sys_lremovexattr(const char __user *path,
+                                const char __user *name);
+asmlinkage long sys_fremovexattr(int fd, const char __user *name);
 
 asmlinkage unsigned long sys_brk(unsigned long brk);
 asmlinkage long sys_mprotect(unsigned long start, size_t len,
index 571f01d20a86373454000db25760f7a7aa2572da..24141b4d1a116a4523ab9085e8f830cdd26cb471 100644 (file)
@@ -945,11 +945,14 @@ enum
 /* For the /proc/sys support */
 struct ctl_table;
 struct nsproxy;
+struct ctl_table_root;
+
 extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev);
 extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces,
                                                struct ctl_table_header *prev);
 extern void sysctl_head_finish(struct ctl_table_header *prev);
-extern int sysctl_perm(struct ctl_table *table, int op);
+extern int sysctl_perm(struct ctl_table_root *root,
+               struct ctl_table *table, int op);
 
 typedef struct ctl_table ctl_table;
 
@@ -981,11 +984,6 @@ extern int do_sysctl (int __user *name, int nlen,
                      void __user *oldval, size_t __user *oldlenp,
                      void __user *newval, size_t newlen);
 
-extern int do_sysctl_strategy (struct ctl_table *table,
-                              int __user *name, int nlen,
-                              void __user *oldval, size_t __user *oldlenp,
-                              void __user *newval, size_t newlen);
-
 extern ctl_handler sysctl_data;
 extern ctl_handler sysctl_string;
 extern ctl_handler sysctl_intvec;
@@ -1054,6 +1052,8 @@ struct ctl_table_root {
        struct list_head header_list;
        struct list_head *(*lookup)(struct ctl_table_root *root,
                                           struct nsproxy *namespaces);
+       int (*permissions)(struct ctl_table_root *root,
+                       struct nsproxy *namespaces, struct ctl_table *table);
 };
 
 /* struct ctl_table_header is used to maintain dynamic lists of
@@ -1085,8 +1085,6 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path,
 void unregister_sysctl_table(struct ctl_table_header * table);
 int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table);
 
-#else /* __KERNEL__ */
-
 #endif /* __KERNEL__ */
 
 #endif /* _LINUX_SYSCTL_H */
index 03378e3515b3fd073b23bf517057fa4708d5bf43..27bad59dae7946b7a5386cd5e5daeb2fe3ee5c4e 100644 (file)
@@ -32,7 +32,7 @@ struct attribute {
 
 struct attribute_group {
        const char              *name;
-       int                     (*is_visible)(struct kobject *,
+       mode_t                  (*is_visible)(struct kobject *,
                                              struct attribute *, int);
        struct attribute        **attrs;
 };
@@ -105,6 +105,8 @@ void sysfs_remove_link(struct kobject *kobj, const char *name);
 
 int __must_check sysfs_create_group(struct kobject *kobj,
                                    const struct attribute_group *grp);
+int sysfs_update_group(struct kobject *kobj,
+                      const struct attribute_group *grp);
 void sysfs_remove_group(struct kobject *kobj,
                        const struct attribute_group *grp);
 int sysfs_add_file_to_group(struct kobject *kobj,
@@ -188,6 +190,18 @@ static inline int sysfs_create_group(struct kobject *kobj,
        return 0;
 }
 
+static inline int sysfs_update_group(struct kobject *kobj,
+                               const struct attribute_group *grp)
+{
+       return 0;
+}
+
+static inline int sysfs_update_group(struct kobject *kobj,
+                               const struct attribute_group *grp)
+{
+       return 0;
+}
+
 static inline void sysfs_remove_group(struct kobject *kobj,
                                      const struct attribute_group *grp)
 {
index e0248631e461a960f0285d0a07ff138e80cb25d6..96411306eec63f66e3dc0afe20a169151ccbc3c6 100644 (file)
@@ -1,11 +1,7 @@
 #ifndef _LINUX_SYSV_FS_H
 #define _LINUX_SYSV_FS_H
 
-#if defined(__GNUC__)
-# define __packed2__   __attribute__((packed, aligned(2)))
-#else
->> I want to scream! <<
-#endif
+#define __packed2__    __attribute__((packed, aligned(2)))
 
 
 #ifndef __KERNEL__
index 004808a6df1d080f8389cf2a57f1d4c85ff7673d..6f371f24160b10ac57176bd2f9b3da36d01d445b 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef __LINUX_TEXTSEARCH_H
 #define __LINUX_TEXTSEARCH_H
 
-#ifdef __KERNEL__
-
 #include <linux/types.h>
 #include <linux/list.h>
 #include <linux/kernel.h>
@@ -177,6 +175,4 @@ static inline void *ts_config_priv(struct ts_config *conf)
        return ((u8 *) conf + TS_PRIV_ALIGN(sizeof(struct ts_config)));
 }
 
-#endif /* __KERNEL__ */
-
 #endif
index 90c1c191ea694707679e067209c24b6616721f54..06d3e6eb9ca84ddb19b85e862ab51bafbaf0ce4b 100644 (file)
@@ -41,6 +41,7 @@ struct thermal_zone_device_ops {
        int (*set_mode) (struct thermal_zone_device *, const char *);
        int (*get_trip_type) (struct thermal_zone_device *, int, char *);
        int (*get_trip_temp) (struct thermal_zone_device *, int, char *);
+       int (*get_crit_temp) (struct thermal_zone_device *, unsigned long *);
 };
 
 struct thermal_cooling_device_ops {
@@ -65,6 +66,23 @@ struct thermal_cooling_device {
                                ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
 #define CELSIUS_TO_KELVIN(t)   ((t)*10+2732)
 
+#if defined(CONFIG_HWMON) ||   \
+       (defined(CONFIG_HWMON_MODULE) && defined(CONFIG_THERMAL_MODULE))
+/* thermal zone devices with the same type share one hwmon device */
+struct thermal_hwmon_device {
+       char type[THERMAL_NAME_LENGTH];
+       struct device *device;
+       int count;
+       struct list_head tz_list;
+       struct list_head node;
+};
+
+struct thermal_hwmon_attr {
+       struct device_attribute attr;
+       char name[16];
+};
+#endif
+
 struct thermal_zone_device {
        int id;
        char type[THERMAL_NAME_LENGTH];
@@ -76,6 +94,13 @@ struct thermal_zone_device {
        struct idr idr;
        struct mutex lock;      /* protect cooling devices list */
        struct list_head node;
+#if defined(CONFIG_HWMON) ||   \
+       (defined(CONFIG_HWMON_MODULE) && defined(CONFIG_THERMAL_MODULE))
+       struct list_head hwmon_node;
+       struct thermal_hwmon_device *hwmon;
+       struct thermal_hwmon_attr temp_input;   /* hwmon sys attr */
+       struct thermal_hwmon_attr temp_crit;    /* hwmon sys attr */
+#endif
 };
 
 struct thermal_zone_device *thermal_zone_device_register(char *, int, void *,
@@ -88,24 +113,10 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
                                     struct thermal_cooling_device *);
 int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
                                       struct thermal_cooling_device *);
-
-#ifdef CONFIG_THERMAL
 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
                                                               struct
                                                               thermal_cooling_device_ops
                                                               *);
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
-#else
-static inline struct thermal_cooling_device
-*thermal_cooling_device_register(char *c, void *v,
-                                struct thermal_cooling_device_ops *t)
-{
-       return NULL;
-}
-static inline
-    void thermal_cooling_device_unregister(struct thermal_cooling_device *t)
-{
-};
-#endif
 
 #endif /* __THERMAL_H__ */
index accd7bad35b09152b8ecf2abcd8ab063c53ae5fb..38a56477f27ada34c3cd6e56c912205b066bc413 100644 (file)
@@ -92,6 +92,31 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
 #define set_need_resched()     set_thread_flag(TIF_NEED_RESCHED)
 #define clear_need_resched()   clear_thread_flag(TIF_NEED_RESCHED)
 
-#endif
+#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
+/*
+ * An arch can define its own version of set_restore_sigmask() to get the
+ * job done however works, with or without TIF_RESTORE_SIGMASK.
+ */
+#define HAVE_SET_RESTORE_SIGMASK       1
+
+/**
+ * set_restore_sigmask() - make sure saved_sigmask processing gets done
+ *
+ * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
+ * will run before returning to user mode, to process the flag.  For
+ * all callers, TIF_SIGPENDING is already set or it's no harm to set
+ * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
+ * arch code will notice on return to user mode, in case those bits
+ * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
+ * signal code always gets run when TIF_RESTORE_SIGMASK is set.
+ */
+static inline void set_restore_sigmask(void)
+{
+       set_thread_flag(TIF_RESTORE_SIGMASK);
+       set_thread_flag(TIF_SIGPENDING);
+}
+#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
+
+#endif /* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
index 979fefdeb86210c15e318cc7eb8986f813181c75..d4ba79248a27fab0f33a9697deebd02bfb9440e3 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/list.h>
 #include <linux/ktime.h>
 #include <linux/stddef.h>
+#include <linux/debugobjects.h>
 
 struct tvec_base;
 
@@ -25,6 +26,7 @@ struct timer_list {
 extern struct tvec_base boot_tvec_bases;
 
 #define TIMER_INITIALIZER(_function, _expires, _data) {                \
+               .entry = { .prev = TIMER_ENTRY_STATIC },        \
                .function = (_function),                        \
                .expires = (_expires),                          \
                .data = (_data),                                \
@@ -38,6 +40,17 @@ extern struct tvec_base boot_tvec_bases;
 void init_timer(struct timer_list *timer);
 void init_timer_deferrable(struct timer_list *timer);
 
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void init_timer_on_stack(struct timer_list *timer);
+extern void destroy_timer_on_stack(struct timer_list *timer);
+#else
+static inline void destroy_timer_on_stack(struct timer_list *timer) { }
+static inline void init_timer_on_stack(struct timer_list *timer)
+{
+       init_timer(timer);
+}
+#endif
+
 static inline void setup_timer(struct timer_list * timer,
                                void (*function)(unsigned long),
                                unsigned long data)
@@ -47,6 +60,15 @@ static inline void setup_timer(struct timer_list * timer,
        init_timer(timer);
 }
 
+static inline void setup_timer_on_stack(struct timer_list *timer,
+                                       void (*function)(unsigned long),
+                                       unsigned long data)
+{
+       timer->function = function;
+       timer->data = data;
+       init_timer_on_stack(timer);
+}
+
 /**
  * timer_pending - is a timer pending?
  * @timer: the timer in question
@@ -164,5 +186,4 @@ unsigned long __round_jiffies_relative(unsigned long j, int cpu);
 unsigned long round_jiffies(unsigned long j);
 unsigned long round_jiffies_relative(unsigned long j);
 
-
 #endif
index 8ea3e71ba7fa8ff17d8de5587f482450c349ef36..fc6035d29d568a018e5c43f6575e5b860379fa96 100644 (file)
@@ -58,6 +58,8 @@
 
 #include <asm/param.h>
 
+#define NTP_API                4       /* NTP API version */
+
 /*
  * SHIFT_KG and SHIFT_KF establish the damping of the PLL and are chosen
  * for a slightly underdamped convergence characteristic. SHIFT_KH
 #define MAXTC          10      /* maximum time constant (shift) */
 
 /*
- * The SHIFT_UPDATE define establishes the decimal point of the
- * time_offset variable which represents the current offset with
- * respect to standard time.
- *
  * SHIFT_USEC defines the scaling (shift) of the time_freq and
  * time_tolerance variables, which represent the current frequency
  * offset and maximum frequency tolerance.
  */
-#define SHIFT_UPDATE (SHIFT_HZ + 1) /* time offset scale (shift) */
 #define SHIFT_USEC 16          /* frequency offset scale (shift) */
-#define SHIFT_NSEC 12          /* kernel frequency offset scale */
-
-#define MAXPHASE 512000L        /* max phase error (us) */
-#define MAXFREQ (512L << SHIFT_USEC)  /* max frequency error (ppm) */
-#define MAXFREQ_NSEC (512000L << SHIFT_NSEC) /* max frequency error (ppb) */
+#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
+#define PPM_SCALE_INV_SHIFT 20
+#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
+                      PPM_SCALE + 1)
+
+#define MAXPHASE 500000000l    /* max phase error (ns) */
+#define MAXFREQ 500000         /* max frequency error (ns/s) */
+#define MAXFREQ_SCALED ((s64)MAXFREQ << NTP_SCALE_SHIFT)
 #define MINSEC 256             /* min interval between updates (s) */
 #define MAXSEC 2048            /* max interval between updates (s) */
-#define        NTP_PHASE_LIMIT (MAXPHASE << 5) /* beyond max. dispersion */
+#define NTP_PHASE_LIMIT ((MAXPHASE / NSEC_PER_USEC) << 5) /* beyond max. dispersion */
 
 /*
  * syscall interface - used (mainly by NTP daemon)
@@ -121,9 +121,11 @@ struct timex {
        long errcnt;            /* calibration errors (ro) */
        long stbcnt;            /* stability limit exceeded (ro) */
 
+       int tai;                /* TAI offset (ro) */
+
        int  :32; int  :32; int  :32; int  :32;
        int  :32; int  :32; int  :32; int  :32;
-       int  :32; int  :32; int  :32; int  :32;
+       int  :32; int  :32; int  :32;
 };
 
 /*
@@ -135,6 +137,9 @@ struct timex {
 #define ADJ_ESTERROR           0x0008  /* estimated time error */
 #define ADJ_STATUS             0x0010  /* clock status */
 #define ADJ_TIMECONST          0x0020  /* pll time constant */
+#define ADJ_TAI                        0x0080  /* set TAI offset */
+#define ADJ_MICRO              0x1000  /* select microsecond resolution */
+#define ADJ_NANO               0x2000  /* select nanosecond resolution */
 #define ADJ_TICK               0x4000  /* tick value */
 #define ADJ_OFFSET_SINGLESHOT  0x8001  /* old-fashioned adjtime */
 #define ADJ_OFFSET_SS_READ     0xa001  /* read-only adjtime */
@@ -146,8 +151,6 @@ struct timex {
 #define MOD_ESTERROR   ADJ_ESTERROR
 #define MOD_STATUS     ADJ_STATUS
 #define MOD_TIMECONST  ADJ_TIMECONST
-#define MOD_CLKB       ADJ_TICK
-#define MOD_CLKA       ADJ_OFFSET_SINGLESHOT /* 0x8000 in original */
 
 
 /*
@@ -169,9 +172,13 @@ struct timex {
 #define STA_PPSERROR   0x0800  /* PPS signal calibration error (ro) */
 
 #define STA_CLOCKERR   0x1000  /* clock hardware fault (ro) */
+#define STA_NANO       0x2000  /* resolution (0 = us, 1 = ns) (ro) */
+#define STA_MODE       0x4000  /* mode (0 = PLL, 1 = FLL) (ro) */
+#define STA_CLK                0x8000  /* clock source (0 = A, 1 = B) (ro) */
 
+/* read-only bits */
 #define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \
-    STA_PPSERROR | STA_CLOCKERR) /* read-only bits */
+       STA_PPSERROR | STA_CLOCKERR | STA_NANO | STA_MODE | STA_CLK)
 
 /*
  * Clock states (time_state)
@@ -203,10 +210,9 @@ extern int time_status;            /* clock synchronization status bits */
 extern long time_maxerror;     /* maximum error */
 extern long time_esterror;     /* estimated error */
 
-extern long time_freq;         /* frequency offset (scaled ppm) */
-
 extern long time_adjust;       /* The amount of adjtime left */
 
+extern void ntp_init(void);
 extern void ntp_clear(void);
 
 /**
@@ -225,7 +231,7 @@ static inline int ntp_synced(void)
        __x < 0 ? -(-__x >> __s) : __x >> __s;  \
 })
 
-#define TICK_LENGTH_SHIFT      32
+#define NTP_SCALE_SHIFT                32
 
 #ifdef CONFIG_NO_HZ
 #define NTP_INTERVAL_FREQ  (2)
@@ -234,8 +240,8 @@ static inline int ntp_synced(void)
 #endif
 #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ)
 
-/* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */
-extern u64 current_tick_length(void);
+/* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
+extern u64 tick_length;
 
 extern void second_overflow(void);
 extern void update_ntp_one_tick(void);
index dd8e08fe88551a3d9c9ca2529226bfa55631e69b..7f7121f9c9687a0e5692469b3ded0d082e5f0fb7 100644 (file)
@@ -177,27 +177,33 @@ struct signal_struct;
  * size each time the window is created or resized anyway.
  *                                             - TYT, 9/14/92
  */
+
+struct tty_operations;
+
 struct tty_struct {
        int     magic;
        struct tty_driver *driver;
+       const struct tty_operations *ops;
        int index;
        struct tty_ldisc ldisc;
        struct mutex termios_mutex;
+       spinlock_t ctrl_lock;
+       /* Termios values are protected by the termios mutex */
        struct ktermios *termios, *termios_locked;
        char name[64];
-       struct pid *pgrp;
+       struct pid *pgrp;               /* Protected by ctrl lock */
        struct pid *session;
        unsigned long flags;
        int count;
-       struct winsize winsize;
+       struct winsize winsize;         /* termios mutex */
        unsigned char stopped:1, hw_stopped:1, flow_stopped:1, packet:1;
        unsigned char low_latency:1, warned:1;
-       unsigned char ctrl_status;
+       unsigned char ctrl_status;      /* ctrl_lock */
        unsigned int receive_room;      /* Bytes free for queue */
 
        struct tty_struct *link;
        struct fasync_struct *fasync;
-       struct tty_bufhead buf;
+       struct tty_bufhead buf;         /* Locked internally */
        int alt_speed;          /* For magic substitution of 38400 bps */
        wait_queue_head_t write_wait;
        wait_queue_head_t read_wait;
@@ -211,6 +217,7 @@ struct tty_struct {
        /*
         * The following is data for the N_TTY line discipline.  For
         * historical reasons, this is included in the tty structure.
+        * Mostly locked by the BKL.
         */
        unsigned int column;
        unsigned char lnext:1, erasing:1, raw:1, real_raw:1, icanon:1;
@@ -292,15 +299,21 @@ extern void tty_unregister_device(struct tty_driver *driver, unsigned index);
 extern int tty_read_raw_data(struct tty_struct *tty, unsigned char *bufp,
                             int buflen);
 extern void tty_write_message(struct tty_struct *tty, char *msg);
+extern int tty_put_char(struct tty_struct *tty, unsigned char c);
+extern int tty_chars_in_buffer(struct tty_struct *tty);
+extern int tty_write_room(struct tty_struct *tty);
+extern void tty_driver_flush_buffer(struct tty_struct *tty);
+extern void tty_throttle(struct tty_struct *tty);
+extern void tty_unthrottle(struct tty_struct *tty);
 
 extern int is_current_pgrp_orphaned(void);
+extern struct pid *tty_get_pgrp(struct tty_struct *tty);
 extern int is_ignored(int sig);
 extern int tty_signal(int sig, struct tty_struct *tty);
 extern void tty_hangup(struct tty_struct * tty);
 extern void tty_vhangup(struct tty_struct * tty);
 extern void tty_unhangup(struct file *filp);
 extern int tty_hung_up_p(struct file * filp);
-extern int is_tty(struct file *filp);
 extern void do_SAK(struct tty_struct *tty);
 extern void __do_SAK(struct tty_struct *tty);
 extern void disassociate_ctty(int priv);
@@ -324,8 +337,7 @@ extern void tty_ldisc_put(int);
 extern void tty_wakeup(struct tty_struct *tty);
 extern void tty_ldisc_flush(struct tty_struct *tty);
 
-extern int tty_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
-                    unsigned long arg);
+extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
                        unsigned int cmd, unsigned long arg);
 extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
@@ -351,8 +363,7 @@ extern void tty_audit_add_data(struct tty_struct *tty, unsigned char *data,
 extern void tty_audit_exit(void);
 extern void tty_audit_fork(struct signal_struct *sig);
 extern void tty_audit_push(struct tty_struct *tty);
-extern void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid);
-extern void tty_audit_opening(void);
+extern void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid);
 #else
 static inline void tty_audit_add_data(struct tty_struct *tty,
                                      unsigned char *data, size_t size)
@@ -367,10 +378,7 @@ static inline void tty_audit_fork(struct signal_struct *sig)
 static inline void tty_audit_push(struct tty_struct *tty)
 {
 }
-static inline void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid)
-{
-}
-static inline void tty_audit_opening(void)
+static inline void tty_audit_push_task(struct task_struct *tsk, uid_t loginuid, u32 sessionid)
 {
 }
 #endif
index 21f69aca45053c2a538d55ce19d685619cbcfc4f..59f1c0bd8f9c6f8734a1320185a3eb28a9b29990 100644 (file)
  *     This routine is called when a particular tty device is opened.
  *     This routine is mandatory; if this routine is not filled in,
  *     the attempted open will fail with ENODEV.
+ *
+ *     Required method.
  *     
  * void (*close)(struct tty_struct * tty, struct file * filp);
  *
  *     This routine is called when a particular tty device is closed.
  *
+ *     Required method.
+ *
  * int (*write)(struct tty_struct * tty,
  *              const unsigned char *buf, int count);
  *
@@ -26,7 +30,9 @@
  *     number of characters actually accepted for writing.  This
  *     routine is mandatory.
  *
- * void (*put_char)(struct tty_struct *tty, unsigned char ch);
+ *     Optional: Required for writable devices.
+ *
+ * int (*put_char)(struct tty_struct *tty, unsigned char ch);
  *
  *     This routine is called by the kernel to write a single
  *     character to the tty device.  If the kernel uses this routine,
  *     done stuffing characters into the driver.  If there is no room
  *     in the queue, the character is ignored.
  *
+ *     Optional: Kernel will use the write method if not provided.
+ *
+ *     Note: Do not call this function directly, call tty_put_char
+ *
  * void (*flush_chars)(struct tty_struct *tty);
  *
  *     This routine is called by the kernel after it has written a
  *     series of characters to the tty device using put_char().  
+ *
+ *     Optional:
+ *
+ *     Note: Do not call this function directly, call tty_driver_flush_chars
  * 
  * int  (*write_room)(struct tty_struct *tty);
  *
  *     will accept for queuing to be written.  This number is subject
  *     to change as output buffers get emptied, or if the output flow
  *     control is acted.
+ *
+ *     Required if write method is provided else not needed.
+ *
+ *     Note: Do not call this function directly, call tty_write_room
  * 
  * int  (*ioctl)(struct tty_struct *tty, struct file * file,
  *         unsigned int cmd, unsigned long arg);
  *     device-specific ioctl's.  If the ioctl number passed in cmd
  *     is not recognized by the driver, it should return ENOIOCTLCMD.
  *
+ *     Optional
+ *
  * long (*compat_ioctl)(struct tty_struct *tty, struct file * file,
  *                     unsigned int cmd, unsigned long arg);
  *
  *     implement ioctl processing for 32 bit process on 64 bit system
+ *
+ *     Optional
  * 
  * void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
  *
  *     This routine allows the tty driver to be notified when
- *     device's termios settings have changed.  Note that a
- *     well-designed tty driver should be prepared to accept the case
- *     where old == NULL, and try to do something rational.
+ *     device's termios settings have changed.
+ *
+ *     Optional: Called under the termios lock
+ *
  *
  * void (*set_ldisc)(struct tty_struct *tty);
  *
  *     This routine allows the tty driver to be notified when the
  *     device's termios settings have changed.
+ *
+ *     Optional: Called under BKL (currently)
  * 
  * void (*throttle)(struct tty_struct * tty);
  *
  *     This routine notifies the tty driver that input buffers for
  *     the line discipline are close to full, and it should somehow
  *     signal that no more characters should be sent to the tty.
+ *
+ *     Optional: Always invoke via tty_throttle();
  * 
  * void (*unthrottle)(struct tty_struct * tty);
  *
  *     that characters can now be sent to the tty without fear of
  *     overrunning the input buffers of the line disciplines.
  * 
+ *     Optional: Always invoke via tty_unthrottle();
+ *
  * void (*stop)(struct tty_struct *tty);
  *
  *     This routine notifies the tty driver that it should stop
  *     outputting characters to the tty device.  
+ *
+ *     Optional:
+ *
+ *     Note: Call stop_tty not this method.
  * 
  * void (*start)(struct tty_struct *tty);
  *
  *     This routine notifies the tty driver that it resume sending
  *     characters to the tty device.
+ *
+ *     Optional:
+ *
+ *     Note: Call start_tty not this method.
  * 
  * void (*hangup)(struct tty_struct *tty);
  *
  *     This routine notifies the tty driver that it should hangup the
  *     tty device.
  *
+ *     Required:
+ *
  * void (*break_ctl)(struct tty_stuct *tty, int state);
  *
  *     This optional routine requests the tty driver to turn on or
  *
  *     If this routine is implemented, the high-level tty driver will
  *     handle the following ioctls: TCSBRK, TCSBRKP, TIOCSBRK,
- *     TIOCCBRK.  Otherwise, these ioctls will be passed down to the
- *     driver to handle.
+ *     TIOCCBRK.
+ *
+ *     Optional: Required for TCSBRK/BRKP/etc handling.
  *
  * void (*wait_until_sent)(struct tty_struct *tty, int timeout);
  * 
  *     This routine waits until the device has written out all of the
  *     characters in its transmitter FIFO.
  *
+ *     Optional: If not provided the device is assumed to have no FIFO
+ *
+ *     Note: Usually correct to call tty_wait_until_sent
+ *
  * void (*send_xchar)(struct tty_struct *tty, char ch);
  *
  *     This routine is used to send a high-priority XON/XOFF
  *     character to the device.
+ *
+ *     Optional: If not provided then the write method is called under
+ *     the atomic write lock to keep it serialized with the ldisc.
  */
 
 #include <linux/fs.h>
@@ -132,7 +179,7 @@ struct tty_operations {
        void (*close)(struct tty_struct * tty, struct file * filp);
        int  (*write)(struct tty_struct * tty,
                      const unsigned char *buf, int count);
-       void (*put_char)(struct tty_struct *tty, unsigned char ch);
+       int  (*put_char)(struct tty_struct *tty, unsigned char ch);
        void (*flush_chars)(struct tty_struct *tty);
        int  (*write_room)(struct tty_struct *tty);
        int  (*chars_in_buffer)(struct tty_struct *tty);
@@ -153,8 +200,6 @@ struct tty_operations {
        void (*send_xchar)(struct tty_struct *tty, char ch);
        int (*read_proc)(char *page, char **start, off_t off,
                          int count, int *eof, void *data);
-       int (*write_proc)(struct file *file, const char __user *buffer,
-                         unsigned long count, void *data);
        int (*tiocmget)(struct tty_struct *tty, struct file *file);
        int (*tiocmset)(struct tty_struct *tty, struct file *file,
                        unsigned int set, unsigned int clear);
@@ -190,48 +235,13 @@ struct tty_driver {
        struct tty_struct **ttys;
        struct ktermios **termios;
        struct ktermios **termios_locked;
-       void *driver_state;     /* only used for the PTY driver */
-       
+       void *driver_state;
+
        /*
-        * Interface routines from the upper tty layer to the tty
-        * driver.      Will be replaced with struct tty_operations.
+        * Driver methods
         */
-       int  (*open)(struct tty_struct * tty, struct file * filp);
-       void (*close)(struct tty_struct * tty, struct file * filp);
-       int  (*write)(struct tty_struct * tty,
-                     const unsigned char *buf, int count);
-       void (*put_char)(struct tty_struct *tty, unsigned char ch);
-       void (*flush_chars)(struct tty_struct *tty);
-       int  (*write_room)(struct tty_struct *tty);
-       int  (*chars_in_buffer)(struct tty_struct *tty);
-       int  (*ioctl)(struct tty_struct *tty, struct file * file,
-                   unsigned int cmd, unsigned long arg);
-       long (*compat_ioctl)(struct tty_struct *tty, struct file * file,
-                            unsigned int cmd, unsigned long arg);
-       void (*set_termios)(struct tty_struct *tty, struct ktermios * old);
-       void (*throttle)(struct tty_struct * tty);
-       void (*unthrottle)(struct tty_struct * tty);
-       void (*stop)(struct tty_struct *tty);
-       void (*start)(struct tty_struct *tty);
-       void (*hangup)(struct tty_struct *tty);
-       void (*break_ctl)(struct tty_struct *tty, int state);
-       void (*flush_buffer)(struct tty_struct *tty);
-       void (*set_ldisc)(struct tty_struct *tty);
-       void (*wait_until_sent)(struct tty_struct *tty, int timeout);
-       void (*send_xchar)(struct tty_struct *tty, char ch);
-       int (*read_proc)(char *page, char **start, off_t off,
-                         int count, int *eof, void *data);
-       int (*write_proc)(struct file *file, const char __user *buffer,
-                         unsigned long count, void *data);
-       int (*tiocmget)(struct tty_struct *tty, struct file *file);
-       int (*tiocmset)(struct tty_struct *tty, struct file *file,
-                       unsigned int set, unsigned int clear);
-#ifdef CONFIG_CONSOLE_POLL
-       int (*poll_init)(struct tty_driver *driver, int line, char *options);
-       int (*poll_get_char)(struct tty_driver *driver, int line);
-       void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
-#endif
 
+       const struct tty_operations *ops;
        struct list_head tty_drivers;
 };
 
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
new file mode 100644 (file)
index 0000000..99c1b4d
--- /dev/null
@@ -0,0 +1,67 @@
+#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
+#define _LINUX_UNALIGNED_ACCESS_OK_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+       return le16_to_cpup((__le16 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+       return le32_to_cpup((__le32 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+       return le64_to_cpup((__le64 *)p);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+       return be16_to_cpup((__be16 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+       return be32_to_cpup((__be32 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+       return be64_to_cpup((__be64 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+       *((__le16 *)p) = cpu_to_le16(val);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+       *((__le32 *)p) = cpu_to_le32(val);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+       *((__le64 *)p) = cpu_to_le64(val);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+       *((__be16 *)p) = cpu_to_be16(val);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+       *((__be32 *)p) = cpu_to_be32(val);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+       *((__be64 *)p) = cpu_to_be64(val);
+}
+
+#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h
new file mode 100644 (file)
index 0000000..46dd12c
--- /dev/null
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H
+#define _LINUX_UNALIGNED_BE_BYTESHIFT_H
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_be16(const u8 *p)
+{
+       return p[0] << 8 | p[1];
+}
+
+static inline u32 __get_unaligned_be32(const u8 *p)
+{
+       return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3];
+}
+
+static inline u64 __get_unaligned_be64(const u8 *p)
+{
+       return (u64)__get_unaligned_be32(p) << 32 |
+              __get_unaligned_be32(p + 4);
+}
+
+static inline void __put_unaligned_be16(u16 val, u8 *p)
+{
+       *p++ = val >> 8;
+       *p++ = val;
+}
+
+static inline void __put_unaligned_be32(u32 val, u8 *p)
+{
+       __put_unaligned_be16(val >> 16, p);
+       __put_unaligned_be16(val, p + 2);
+}
+
+static inline void __put_unaligned_be64(u64 val, u8 *p)
+{
+       __put_unaligned_be32(val >> 32, p);
+       __put_unaligned_be32(val, p + 4);
+}
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+       return __get_unaligned_be16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+       return __get_unaligned_be32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+       return __get_unaligned_be64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+       __put_unaligned_be16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+       __put_unaligned_be32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+       __put_unaligned_be64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h
new file mode 100644 (file)
index 0000000..c2a76c5
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
+#define _LINUX_UNALIGNED_BE_MEMMOVE_H
+
+#include <linux/unaligned/memmove.h>
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+       return __get_unaligned_memmove16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+       return __get_unaligned_memmove32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+       return __get_unaligned_memmove64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+       __put_unaligned_memmove16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+       __put_unaligned_memmove32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+       __put_unaligned_memmove64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h
new file mode 100644 (file)
index 0000000..1324158
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_BE_STRUCT_H
+#define _LINUX_UNALIGNED_BE_STRUCT_H
+
+#include <linux/unaligned/packed_struct.h>
+
+static inline u16 get_unaligned_be16(const void *p)
+{
+       return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_be32(const void *p)
+{
+       return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_be64(const void *p)
+{
+       return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_be16(u16 val, void *p)
+{
+       __put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+       __put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_be64(u64 val, void *p)
+{
+       __put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_BE_STRUCT_H */
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h
new file mode 100644 (file)
index 0000000..02d97ff
--- /dev/null
@@ -0,0 +1,68 @@
+#ifndef _LINUX_UNALIGNED_GENERIC_H
+#define _LINUX_UNALIGNED_GENERIC_H
+
+/*
+ * Cause a link-time error if we try an unaligned access other than
+ * 1,2,4 or 8 bytes long
+ */
+extern void __bad_unaligned_access_size(void);
+
+#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({                    \
+       __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr),                      \
+       __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)),   \
+       __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)),   \
+       __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)),   \
+       __bad_unaligned_access_size()))));                                      \
+       }))
+
+#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({                    \
+       __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr),                      \
+       __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)),   \
+       __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)),   \
+       __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)),   \
+       __bad_unaligned_access_size()))));                                      \
+       }))
+
+#define __put_unaligned_le(val, ptr) ({                                        \
+       void *__gu_p = (ptr);                                           \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1:                                                         \
+               *(u8 *)__gu_p = (__force u8)(val);                      \
+               break;                                                  \
+       case 2:                                                         \
+               put_unaligned_le16((__force u16)(val), __gu_p);         \
+               break;                                                  \
+       case 4:                                                         \
+               put_unaligned_le32((__force u32)(val), __gu_p);         \
+               break;                                                  \
+       case 8:                                                         \
+               put_unaligned_le64((__force u64)(val), __gu_p);         \
+               break;                                                  \
+       default:                                                        \
+               __bad_unaligned_access_size();                          \
+               break;                                                  \
+       }                                                               \
+       (void)0; })
+
+#define __put_unaligned_be(val, ptr) ({                                        \
+       void *__gu_p = (ptr);                                           \
+       switch (sizeof(*(ptr))) {                                       \
+       case 1:                                                         \
+               *(u8 *)__gu_p = (__force u8)(val);                      \
+               break;                                                  \
+       case 2:                                                         \
+               put_unaligned_be16((__force u16)(val), __gu_p);         \
+               break;                                                  \
+       case 4:                                                         \
+               put_unaligned_be32((__force u32)(val), __gu_p);         \
+               break;                                                  \
+       case 8:                                                         \
+               put_unaligned_be64((__force u64)(val), __gu_p);         \
+               break;                                                  \
+       default:                                                        \
+               __bad_unaligned_access_size();                          \
+               break;                                                  \
+       }                                                               \
+       (void)0; })
+
+#endif /* _LINUX_UNALIGNED_GENERIC_H */
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h
new file mode 100644 (file)
index 0000000..59777e9
--- /dev/null
@@ -0,0 +1,70 @@
+#ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H
+#define _LINUX_UNALIGNED_LE_BYTESHIFT_H
+
+#include <linux/kernel.h>
+
+static inline u16 __get_unaligned_le16(const u8 *p)
+{
+       return p[0] | p[1] << 8;
+}
+
+static inline u32 __get_unaligned_le32(const u8 *p)
+{
+       return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24;
+}
+
+static inline u64 __get_unaligned_le64(const u8 *p)
+{
+       return (u64)__get_unaligned_le32(p + 4) << 32 |
+              __get_unaligned_le32(p);
+}
+
+static inline void __put_unaligned_le16(u16 val, u8 *p)
+{
+       *p++ = val;
+       *p++ = val >> 8;
+}
+
+static inline void __put_unaligned_le32(u32 val, u8 *p)
+{
+       __put_unaligned_le16(val >> 16, p + 2);
+       __put_unaligned_le16(val, p);
+}
+
+static inline void __put_unaligned_le64(u64 val, u8 *p)
+{
+       __put_unaligned_le32(val >> 32, p + 4);
+       __put_unaligned_le32(val, p);
+}
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+       return __get_unaligned_le16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+       return __get_unaligned_le32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+       return __get_unaligned_le64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+       __put_unaligned_le16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+       __put_unaligned_le32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+       __put_unaligned_le64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */
diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h
new file mode 100644 (file)
index 0000000..269849b
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
+#define _LINUX_UNALIGNED_LE_MEMMOVE_H
+
+#include <linux/unaligned/memmove.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+       return __get_unaligned_memmove16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+       return __get_unaligned_memmove32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+       return __get_unaligned_memmove64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+       __put_unaligned_memmove16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+       __put_unaligned_memmove32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+       __put_unaligned_memmove64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h
new file mode 100644 (file)
index 0000000..088c457
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _LINUX_UNALIGNED_LE_STRUCT_H
+#define _LINUX_UNALIGNED_LE_STRUCT_H
+
+#include <linux/unaligned/packed_struct.h>
+
+static inline u16 get_unaligned_le16(const void *p)
+{
+       return __get_unaligned_cpu16((const u8 *)p);
+}
+
+static inline u32 get_unaligned_le32(const void *p)
+{
+       return __get_unaligned_cpu32((const u8 *)p);
+}
+
+static inline u64 get_unaligned_le64(const void *p)
+{
+       return __get_unaligned_cpu64((const u8 *)p);
+}
+
+static inline void put_unaligned_le16(u16 val, void *p)
+{
+       __put_unaligned_cpu16(val, p);
+}
+
+static inline void put_unaligned_le32(u32 val, void *p)
+{
+       __put_unaligned_cpu32(val, p);
+}
+
+static inline void put_unaligned_le64(u64 val, void *p)
+{
+       __put_unaligned_cpu64(val, p);
+}
+
+#endif /* _LINUX_UNALIGNED_LE_STRUCT_H */
diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h
new file mode 100644 (file)
index 0000000..eeb5a77
--- /dev/null
@@ -0,0 +1,45 @@
+#ifndef _LINUX_UNALIGNED_MEMMOVE_H
+#define _LINUX_UNALIGNED_MEMMOVE_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
+
+static inline u16 __get_unaligned_memmove16(const void *p)
+{
+       u16 tmp;
+       memmove(&tmp, p, 2);
+       return tmp;
+}
+
+static inline u32 __get_unaligned_memmove32(const void *p)
+{
+       u32 tmp;
+       memmove(&tmp, p, 4);
+       return tmp;
+}
+
+static inline u64 __get_unaligned_memmove64(const void *p)
+{
+       u64 tmp;
+       memmove(&tmp, p, 8);
+       return tmp;
+}
+
+static inline void __put_unaligned_memmove16(u16 val, void *p)
+{
+       memmove(p, &val, 2);
+}
+
+static inline void __put_unaligned_memmove32(u32 val, void *p)
+{
+       memmove(p, &val, 4);
+}
+
+static inline void __put_unaligned_memmove64(u64 val, void *p)
+{
+       memmove(p, &val, 8);
+}
+
+#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h
new file mode 100644 (file)
index 0000000..2498bb9
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H
+#define _LINUX_UNALIGNED_PACKED_STRUCT_H
+
+#include <linux/kernel.h>
+
+struct __una_u16 { u16 x __attribute__((packed)); };
+struct __una_u32 { u32 x __attribute__((packed)); };
+struct __una_u64 { u64 x __attribute__((packed)); };
+
+static inline u16 __get_unaligned_cpu16(const void *p)
+{
+       const struct __una_u16 *ptr = (const struct __una_u16 *)p;
+       return ptr->x;
+}
+
+static inline u32 __get_unaligned_cpu32(const void *p)
+{
+       const struct __una_u32 *ptr = (const struct __una_u32 *)p;
+       return ptr->x;
+}
+
+static inline u64 __get_unaligned_cpu64(const void *p)
+{
+       const struct __una_u64 *ptr = (const struct __una_u64 *)p;
+       return ptr->x;
+}
+
+static inline void __put_unaligned_cpu16(u16 val, void *p)
+{
+       struct __una_u16 *ptr = (struct __una_u16 *)p;
+       ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu32(u32 val, void *p)
+{
+       struct __una_u32 *ptr = (struct __una_u32 *)p;
+       ptr->x = val;
+}
+
+static inline void __put_unaligned_cpu64(u64 val, void *p)
+{
+       struct __una_u64 *ptr = (struct __una_u64 *)p;
+       ptr->x = val;
+}
+
+#endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */
index ce8e7da05807b7e69f0967b6c78620a4c202d6b7..364789aae9f335c44fa9e8b71848048134ab5ea2 100644 (file)
@@ -31,6 +31,7 @@ struct vm_struct {
        struct page             **pages;
        unsigned int            nr_pages;
        unsigned long           phys_addr;
+       void                    *caller;
 };
 
 /*
@@ -66,6 +67,8 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
 }
 
 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
+extern struct vm_struct *get_vm_area_caller(unsigned long size,
+                                       unsigned long flags, void *caller);
 extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
                                        unsigned long start, unsigned long end);
 extern struct vm_struct *get_vm_area_node(unsigned long size,
@@ -87,4 +90,6 @@ extern void free_vm_area(struct vm_struct *area);
 extern rwlock_t vmlist_lock;
 extern struct vm_struct *vmlist;
 
+extern const struct seq_operations vmalloc_op;
+
 #endif /* _LINUX_VMALLOC_H */
index 9f1b4b46151ebbebe40413a3368c18afa850704c..e83b69346d2342fd2927278d40f98c4fb1afdff3 100644 (file)
@@ -25,6 +25,7 @@
 #define HIGHMEM_ZONE(xx)
 #endif
 
+
 #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE
 
 enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
@@ -37,6 +38,9 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
                FOR_ALL_ZONES(PGSCAN_DIRECT),
                PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL,
                PAGEOUTRUN, ALLOCSTALL, PGROTATED,
+#ifdef CONFIG_HUGETLB_PAGE
+               HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
+#endif
                NR_VM_EVENT_ITEMS
 };
 
@@ -174,7 +178,7 @@ static inline unsigned long node_page_state(int node,
                zone_page_state(&zones[ZONE_MOVABLE], item);
 }
 
-extern void zone_statistics(struct zonelist *, struct zone *);
+extern void zone_statistics(struct zone *, struct zone *);
 
 #else
 
index b7b3362f7717823768d0ae30f539019048ec9571..f462439cc2886c56f7c1940f4100ba726d4068c4 100644 (file)
@@ -114,6 +114,9 @@ struct file;
 int dirty_writeback_centisecs_handler(struct ctl_table *, int, struct file *,
                                      void __user *, size_t *, loff_t *);
 
+void get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
+                struct backing_dev_info *bdi);
+
 void page_writeback_init(void);
 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
                                        unsigned long nr_pages_dirtied);
index df6b95d2218e26930ef95d0bee2e05187f487659..d131e352cfe1f1ec9b0157625cc2db98d1b8098a 100644 (file)
@@ -47,10 +47,10 @@ struct xattr_handler {
 };
 
 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
-ssize_t vfs_getxattr(struct dentry *, char *, void *, size_t);
+ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
-int vfs_setxattr(struct dentry *, char *, void *, size_t, int);
-int vfs_removexattr(struct dentry *, char *);
+int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
+int vfs_removexattr(struct dentry *, const char *);
 
 ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
 ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
index 0ea0bd85c0368543dae9bc754e7cd97dd50be61d..2a527742701a2232090cc7e35334ebac12601b9d 100644 (file)
@@ -64,6 +64,7 @@ enum {
        /* Conexant MPEG encoder/decoders: reserved range 410-420 */
        V4L2_IDENT_CX23415 = 415,
        V4L2_IDENT_CX23416 = 416,
+       V4L2_IDENT_CX23418 = 418,
 
        /* module vp27smpx: just ident 2700 */
        V4L2_IDENT_VP27SMPX = 2700,
index 316a58453134df7db59ff22d74253b73c2747f83..020d05758bd8b951a352aabc043b0312de827800 100644 (file)
@@ -107,9 +107,11 @@ int v4l2_chip_match_host(u32 id_type, u32 chip_id);
 struct i2c_driver;
 struct i2c_adapter;
 struct i2c_client;
+struct i2c_device_id;
 
 int v4l2_i2c_attach(struct i2c_adapter *adapter, int address, struct i2c_driver *driver,
-               const char *name, int (*probe)(struct i2c_client *));
+               const char *name,
+               int (*probe)(struct i2c_client *, const struct i2c_device_id *));
 
 /* ------------------------------------------------------------------------- */
 
index e7645578fc22dbb87e817a810d3184c8f8a45d96..347b6f8beb23761fa4a7c77fa65ad1dc6a81274b 100644 (file)
@@ -25,7 +25,7 @@ struct v4l2_i2c_driver_data {
        const char * const name;
        int driverid;
        int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
-       int (*probe)(struct i2c_client *client);
+       int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
        int (*remove)(struct i2c_client *client);
        int (*suspend)(struct i2c_client *client, pm_message_t state);
        int (*resume)(struct i2c_client *client);
index 9e4bab2769151fd51eed09d77bbbb52b2a76068d..7b6f06be795034d7ae64b385bfa9ef0eac4078c9 100644 (file)
@@ -30,7 +30,7 @@ struct v4l2_i2c_driver_data {
        const char * const name;
        int driverid;
        int (*command)(struct i2c_client *client, unsigned int cmd, void *arg);
-       int (*probe)(struct i2c_client *client);
+       int (*probe)(struct i2c_client *client, const struct i2c_device_id *id);
        int (*remove)(struct i2c_client *client);
        int (*suspend)(struct i2c_client *client, pm_message_t state);
        int (*resume)(struct i2c_client *client);
index 406db242f73a6d8a4fecdaeff456fa08cccad26d..164cb682e220d3f754cee32ab6a23f27acc61656 100644 (file)
@@ -40,4 +40,10 @@ extern int put_cmsg_compat(struct msghdr*, int, int, int, void *);
 
 extern int cmsghdr_from_user_compat_to_kern(struct msghdr *, struct sock *, unsigned char *, int);
 
+extern int compat_mc_setsockopt(struct sock *, int, int, char __user *, int,
+       int (*)(struct sock *, int, int, char __user *, int));
+extern int compat_mc_getsockopt(struct sock *, int, int, char __user *,
+       int __user *, int (*)(struct sock *, int, int, char __user *,
+                               int __user *));
+
 #endif /* NET_COMPAT_H */
index 56f3c94ae620823d09a013ba3878ff2de04510ba..9a51ebad3f1fc21e054cedd03b344d8a851c28bd 100644 (file)
@@ -405,7 +405,8 @@ struct sk_buff;
 struct ip_vs_protocol {
        struct ip_vs_protocol   *next;
        char                    *name;
-       __u16                   protocol;
+       u16                     protocol;
+       u16                     num_states;
        int                     dont_defrag;
        atomic_t                appcnt;         /* counter of proto app incs */
        int                     *timeout_table; /* protocol timeout table */
index 1bb7087833d374eb2429dd55318248ddc1747d50..a6874ba22d546322dc5ef9c2c86271003ec156e4 100644 (file)
@@ -107,16 +107,6 @@ struct nf_conntrack_tuple_mask
        } src;
 };
 
-/* This is optimized opposed to a memset of the whole structure.  Everything we
- * really care about is the  source/destination unions */
-#define NF_CT_TUPLE_U_BLANK(tuple)                                     \
-        do {                                                           \
-                (tuple)->src.u.all = 0;                                \
-                (tuple)->dst.u.all = 0;                                \
-               memset(&(tuple)->src.u3, 0, sizeof((tuple)->src.u3));   \
-               memset(&(tuple)->dst.u3, 0, sizeof((tuple)->dst.u3));   \
-        } while (0)
-
 #ifdef __KERNEL__
 
 static inline void nf_ct_dump_tuple_ip(const struct nf_conntrack_tuple *t)
index 5e53a85b5ca1bc56014a192b50304800298ae386..e4d2d6baa98388e4d673f5f57ddc20405a3fdb69 100644 (file)
@@ -103,6 +103,7 @@ struct cipso_v4_doi;
 struct netlbl_audit {
        u32 secid;
        uid_t loginuid;
+       u32 sessionid;
 };
 
 /*
index c6338802e8f11a4f727b4a45959dcb32725c6760..fc836ff824cc78eb23f4c38462d4db60b3487d08 100644 (file)
@@ -116,7 +116,7 @@ extern int          __ip_route_output_key(struct net *, struct rtable **, const struct f
 extern int             ip_route_output_key(struct net *, struct rtable **, struct flowi *flp);
 extern int             ip_route_output_flow(struct net *, struct rtable **rp, struct flowi *flp, struct sock *sk, int flags);
 extern int             ip_route_input(struct sk_buff*, __be32 dst, __be32 src, u8 tos, struct net_device *devin);
-extern unsigned short  ip_rt_frag_needed(struct net *net, struct iphdr *iph, unsigned short new_mtu);
+extern unsigned short  ip_rt_frag_needed(struct net *net, struct iphdr *iph, unsigned short new_mtu, struct net_device *dev);
 extern void            ip_rt_send_redirect(struct sk_buff *skb);
 
 extern unsigned                inet_addr_type(struct net *net, __be32 addr);
index baa9f372cfd165b7496954bef31510990d4190bd..d1350bcccb03c68efb07bac000b16c0f9cb49ca1 100644 (file)
@@ -597,8 +597,9 @@ struct xfrm_spi_skb_cb {
 /* Audit Information */
 struct xfrm_audit
 {
-       u32     loginuid;
        u32     secid;
+       uid_t   loginuid;
+       u32     sessionid;
 };
 
 #ifdef CONFIG_AUDITSYSCALL
@@ -616,13 +617,13 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
        return audit_buf;
 }
 
-static inline void xfrm_audit_helper_usrinfo(u32 auid, u32 secid,
+static inline void xfrm_audit_helper_usrinfo(uid_t auid, u32 ses, u32 secid,
                                             struct audit_buffer *audit_buf)
 {
        char *secctx;
        u32 secctx_len;
 
-       audit_log_format(audit_buf, " auid=%u", auid);
+       audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
        if (secid != 0 &&
            security_secid_to_secctx(secid, &secctx, &secctx_len) == 0) {
                audit_log_format(audit_buf, " subj=%s", secctx);
@@ -632,13 +633,13 @@ static inline void xfrm_audit_helper_usrinfo(u32 auid, u32 secid,
 }
 
 extern void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 secid);
+                                 u32 auid, u32 ses, u32 secid);
 extern void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                                 u32 auid, u32 secid);
+                                 u32 auid, u32 ses, u32 secid);
 extern void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                                u32 auid, u32 secid);
+                                u32 auid, u32 ses, u32 secid);
 extern void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                                   u32 auid, u32 secid);
+                                   u32 auid, u32 ses, u32 secid);
 extern void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
                                             struct sk_buff *skb);
 extern void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
@@ -647,10 +648,10 @@ extern void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
 extern void xfrm_audit_state_icvfail(struct xfrm_state *x,
                                     struct sk_buff *skb, u8 proto);
 #else
-#define xfrm_audit_policy_add(x, r, a, s)      do { ; } while (0)
-#define xfrm_audit_policy_delete(x, r, a, s)   do { ; } while (0)
-#define xfrm_audit_state_add(x, r, a, s)       do { ; } while (0)
-#define xfrm_audit_state_delete(x, r, a, s)    do { ; } while (0)
+#define xfrm_audit_policy_add(x, r, a, se, s)  do { ; } while (0)
+#define xfrm_audit_policy_delete(x, r, a, se, s)       do { ; } while (0)
+#define xfrm_audit_state_add(x, r, a, se, s)   do { ; } while (0)
+#define xfrm_audit_state_delete(x, r, a, se, s)        do { ; } while (0)
 #define xfrm_audit_state_replay_overflow(x, s) do { ; } while (0)
 #define xfrm_audit_state_notfound_simple(s, f) do { ; } while (0)
 #define xfrm_audit_state_notfound(s, f, sp, sq)        do { ; } while (0)
index 22298423cf0b3d0e597e347de0ab376b8bbcaf24..9ee0d2e51b16e5ea04910b5cfff671bd5563aeb8 100644 (file)
@@ -62,7 +62,7 @@ struct ib_umem_chunk {
 #ifdef CONFIG_INFINIBAND_USER_MEM
 
 struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
-                           size_t size, int access);
+                           size_t size, int access, int dmasync);
 void ib_umem_release(struct ib_umem *umem);
 int ib_umem_page_count(struct ib_umem *umem);
 
@@ -72,7 +72,7 @@ int ib_umem_page_count(struct ib_umem *umem);
 
 static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context,
                                          unsigned long addr, size_t size,
-                                         int access) {
+                                         int access, int dmasync) {
        return ERR_PTR(-EINVAL);
 }
 static inline void ib_umem_release(struct ib_umem *umem) { }
index 2dcbecce3f61201a13710d104bc69f746f265823..911a661b7278c08ef43d91afaceb1098b2670e15 100644 (file)
@@ -1542,6 +1542,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev,
                dma_unmap_single(dev->dma_device, addr, size, direction);
 }
 
+static inline u64 ib_dma_map_single_attrs(struct ib_device *dev,
+                                         void *cpu_addr, size_t size,
+                                         enum dma_data_direction direction,
+                                         struct dma_attrs *attrs)
+{
+       return dma_map_single_attrs(dev->dma_device, cpu_addr, size,
+                                   direction, attrs);
+}
+
+static inline void ib_dma_unmap_single_attrs(struct ib_device *dev,
+                                            u64 addr, size_t size,
+                                            enum dma_data_direction direction,
+                                            struct dma_attrs *attrs)
+{
+       return dma_unmap_single_attrs(dev->dma_device, addr, size,
+                                     direction, attrs);
+}
+
 /**
  * ib_dma_map_page - Map a physical page to DMA address
  * @dev: The device for which the dma_addr is to be created
@@ -1611,6 +1629,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev,
                dma_unmap_sg(dev->dma_device, sg, nents, direction);
 }
 
+static inline int ib_dma_map_sg_attrs(struct ib_device *dev,
+                                     struct scatterlist *sg, int nents,
+                                     enum dma_data_direction direction,
+                                     struct dma_attrs *attrs)
+{
+       return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
+
+static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
+                                        struct scatterlist *sg, int nents,
+                                        enum dma_data_direction direction,
+                                        struct dma_attrs *attrs)
+{
+       dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs);
+}
 /**
  * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
  * @dev: The device for which the DMA addresses were created
index 7b90b63fb5c7cabf920d26590b8de56663dd46e6..cd3ca63d4fb1e43aa98d9b3bac64aca04033db18 100644 (file)
@@ -225,6 +225,7 @@ struct iscsi_conn {
 
        /* custom statistics */
        uint32_t                eh_abort_cnt;
+       uint32_t                fmr_unalign_cnt;
 };
 
 struct iscsi_pool {
index b8b19e2f57bbece76e739752961ce91979512f23..f6a9fe0ef09c62cf38b28f6cdd9bfd9bad06121a 100644 (file)
@@ -181,7 +181,8 @@ struct scsi_device {
        sdev_printk(prefix, (scmd)->device, fmt, ##a)
 
 enum scsi_target_state {
-       STARGET_RUNNING = 1,
+       STARGET_CREATED = 1,
+       STARGET_RUNNING,
        STARGET_DEL,
 };
 
index 68b634b75068bfe9e33573d6c1fe2d6f475a8b53..1f1d53f8830b55036dbf241bb38e227d8e121b5f 100644 (file)
@@ -50,6 +50,7 @@
 #define MPU401_INFO_INTEGRATED (1 << 2)        /* integrated h/w port */
 #define MPU401_INFO_MMIO       (1 << 3)        /* MMIO access */
 #define MPU401_INFO_TX_IRQ     (1 << 4)        /* independent TX irq */
+#define MPU401_INFO_NO_ACK     (1 << 6)        /* No ACK cmd needed */
 
 #define MPU401_MODE_BIT_INPUT          0
 #define MPU401_MODE_BIT_OUTPUT         1
index 336c20db87f817f2d6667e02b5a05b1592fb3148..ed64862c4e18c13eaf5060679f42e5ec8f25556f 100644 (file)
 #ifndef __ATMEL_LCDC_H__
 #define __ATMEL_LCDC_H__
 
+
+/* Way LCD wires are connected to the chip:
+ * Some Atmel chips use BGR color mode (instead of standard RGB)
+ * A swapped wiring onboard can bring to RGB mode.
+ */
+#define ATMEL_LCDC_WIRING_BGR  0
+#define ATMEL_LCDC_WIRING_RGB  1
+
+
  /* LCD Controller info data structure, stored in device platform_data */
 struct atmel_lcdfb_info {
        spinlock_t              lock;
@@ -39,8 +48,10 @@ struct atmel_lcdfb_info {
        u8                      bl_power;
 #endif
        bool                    lcdcon_is_backlight;
+       u8                      saved_lcdcon;
 
        u8                      default_bpp;
+       u8                      lcd_wiring_mode;
        unsigned int            default_lcdcon2;
        unsigned int            default_dmacon;
        void (*atmel_lcdfb_power_control)(int on);
diff --git a/include/video/hecubafb.h b/include/video/hecubafb.h
new file mode 100644 (file)
index 0000000..7b99523
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * hecubafb.h - definitions for the hecuba framebuffer driver
+ *
+ * Copyright (C) 2008 by Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+#ifndef _LINUX_HECUBAFB_H_
+#define _LINUX_HECUBAFB_H_
+
+/* Apollo controller specific defines */
+#define APOLLO_START_NEW_IMG   0xA0
+#define APOLLO_STOP_IMG_DATA   0xA1
+#define APOLLO_DISPLAY_IMG     0xA2
+#define APOLLO_ERASE_DISPLAY   0xA3
+#define APOLLO_INIT_DISPLAY    0xA4
+
+/* Hecuba interface specific defines */
+#define HCB_WUP_BIT    0x01
+#define HCB_DS_BIT     0x02
+#define HCB_RW_BIT     0x04
+#define HCB_CD_BIT     0x08
+#define HCB_ACK_BIT    0x80
+
+/* struct used by hecuba. board specific stuff comes from *board */
+struct hecubafb_par {
+       struct fb_info *info;
+       struct hecuba_board *board;
+       void (*send_command)(struct hecubafb_par *, unsigned char);
+       void (*send_data)(struct hecubafb_par *, unsigned char);
+};
+
+/* board specific routines
+board drivers can implement wait_for_ack with interrupts if desired. if
+wait_for_ack is called with clear=0, then go to sleep and return when ack
+goes hi or if wait_for_ack with clear=1, then return when ack goes lo */
+struct hecuba_board {
+       struct module *owner;
+       void (*remove)(struct hecubafb_par *);
+       void (*set_ctl)(struct hecubafb_par *, unsigned char, unsigned char);
+       void (*set_data)(struct hecubafb_par *, unsigned char);
+       void (*wait_for_ack)(struct hecubafb_par *, int);
+       int (*init)(struct hecubafb_par *);
+};
+
+
+#endif
diff --git a/include/video/metronomefb.h b/include/video/metronomefb.h
new file mode 100644 (file)
index 0000000..dab04b4
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * metronomefb.h - definitions for the metronome framebuffer driver
+ *
+ * Copyright (C) 2008 by Jaya Kumar
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+#ifndef _LINUX_METRONOMEFB_H_
+#define _LINUX_METRONOMEFB_H_
+
+/* address and control descriptors used by metronome controller */
+struct metromem_desc {
+       u32 mFDADR0;
+       u32 mFSADR0;
+       u32 mFIDR0;
+       u32 mLDCMD0;
+};
+
+/* command structure used by metronome controller */
+struct metromem_cmd {
+       u16 opcode;
+       u16 args[((64-2)/2)];
+       u16 csum;
+};
+
+/* struct used by metronome. board specific stuff comes from *board */
+struct metronomefb_par {
+       unsigned char *metromem;
+       struct metromem_desc *metromem_desc;
+       struct metromem_cmd *metromem_cmd;
+       unsigned char *metromem_wfm;
+       unsigned char *metromem_img;
+       u16 *metromem_img_csum;
+       u16 *csum_table;
+       int metromemsize;
+       dma_addr_t metromem_dma;
+       dma_addr_t metromem_desc_dma;
+       struct fb_info *info;
+       struct metronome_board *board;
+       wait_queue_head_t waitq;
+       u8 frame_count;
+};
+
+/* board specific routines */
+struct metronome_board {
+       struct module *owner;
+       void (*free_irq)(struct fb_info *);
+       void (*init_gpio_regs)(struct metronomefb_par *);
+       void (*init_lcdc_regs)(struct metronomefb_par *);
+       void (*post_dma_setup)(struct metronomefb_par *);
+       void (*set_rst)(struct metronomefb_par *, int);
+       void (*set_stdby)(struct metronomefb_par *, int);
+       int (*met_wait_event)(struct metronomefb_par *);
+       int (*met_wait_event_intr)(struct metronomefb_par *);
+       int (*setup_irq)(struct fb_info *);
+};
+
+#endif
index ba3a389fab94dcef61341973de66ae12992892cf..3e7b257fc05fca6fba4d6325a3da1ae95c3f9fde 100644 (file)
@@ -259,17 +259,14 @@ config IKCONFIG_PROC
 config LOG_BUF_SHIFT
        int "Kernel log buffer size (16 => 64KB, 17 => 128KB)"
        range 12 21
-       default 17 if S390 || LOCKDEP
-       default 16 if X86_NUMAQ || IA64
-       default 15 if SMP
-       default 14
+       default 17
        help
          Select kernel log buffer size as a power of 2.
-         Defaults and Examples:
-                    17 => 128 KB for S/390
-                    16 => 64 KB for x86 NUMAQ or IA-64
-                    15 => 32 KB for SMP
-                    14 => 16 KB for uniprocessor
+         Examples:
+                    17 => 128 KB
+                    16 => 64 KB
+                    15 => 32 KB
+                    14 => 16 KB
                     13 =>  8 KB
                     12 =>  4 KB
 
@@ -284,6 +281,7 @@ config CGROUPS
 config CGROUP_DEBUG
        bool "Example debug cgroup subsystem"
        depends on CGROUPS
+       default n
        help
          This option enables a simple cgroup subsystem that
          exports useful debugging information about the cgroups
@@ -300,6 +298,13 @@ config CGROUP_NS
           for instance virtual servers and checkpoint/restart
           jobs.
 
+config CGROUP_DEVICE
+       bool "Device controller for cgroups"
+       depends on CGROUPS && EXPERIMENTAL
+       help
+         Provides a cgroup implementing whitelists for devices which
+         a process in the cgroup can mknod or open.
+
 config CPUSETS
        bool "Cpuset support"
        depends on SMP && CGROUPS
@@ -373,9 +378,13 @@ config RESOURCE_COUNTERS
           infrastructure that works with cgroups
        depends on CGROUPS
 
+config MM_OWNER
+       bool
+
 config CGROUP_MEM_RES_CTLR
        bool "Memory Resource Controller for Control Groups"
        depends on CGROUPS && RESOURCE_COUNTERS
+       select MM_OWNER
        help
          Provides a memory resource controller that manages both page cache and
          RSS memory.
@@ -388,6 +397,9 @@ config CGROUP_MEM_RES_CTLR
          Only enable when you're ok with these trade offs and really
          sure you need the memory resource controller.
 
+         This config option also selects MM_OWNER config option, which
+         could in turn add some fork/exit overhead.
+
 config SYSFS_DEPRECATED
        bool
 
@@ -496,16 +508,12 @@ source "usr/Kconfig"
 endif
 
 config CC_OPTIMIZE_FOR_SIZE
-       bool "Optimize for size (Look out for broken compilers!)"
+       bool "Optimize for size"
        default y
-       depends on ARM || H8300 || SUPERH || EXPERIMENTAL
        help
          Enabling this option will pass "-Os" instead of "-O2" to gcc
          resulting in a smaller kernel.
 
-         WARNING: some versions of gcc may generate incorrect code with this
-         option.  If problems are observed, a gcc upgrade may be needed.
-
          If unsure, say N.
 
 config SYSCTL
@@ -521,7 +529,7 @@ menuconfig EMBEDDED
 
 config UID16
        bool "Enable 16-bit UID system calls" if EMBEDDED
-       depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && SPARC32_COMPAT) || UML || (X86_64 && IA32_EMULATION)
+       depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION)
        default y
        help
          This enables the legacy 16-bit UID syscall wrappers.
@@ -542,6 +550,17 @@ config SYSCTL_SYSCALL
 
          If unsure say Y here.
 
+config SYSCTL_SYSCALL_CHECK
+       bool "Sysctl checks" if EMBEDDED
+       depends on SYSCTL_SYSCALL
+       default y
+       ---help---
+         sys_sysctl uses binary paths that have been found challenging
+         to properly maintain and use. This enables checks that help
+         you to keep things correct.
+
+         If unsure say Y here.
+
 config KALLSYMS
         bool "Load all symbols for debugging/ksymoops" if EMBEDDED
         default y
index 753dc54a664972ff0ae441ab8945c1345d563c2a..7473b0c59d4dd11c438bd4808c0044caba4e7a33 100644 (file)
@@ -133,7 +133,7 @@ static void __init md_setup_drive(void)
                else
                        dev = MKDEV(MD_MAJOR, minor);
                create_dev(name, dev);
-               for (i = 0; i < MD_SB_DISKS && devname != 0; i++) {
+               for (i = 0; i < MD_SB_DISKS && devname != NULL; i++) {
                        char *p;
                        char comp_name[64];
                        u32 rdev;
index 3ac5904d1b124c1584b0308b0c42712d9a86cc20..46dfd64ae8fb027cf74e1252b91b2919f81a1923 100644 (file)
@@ -212,7 +212,7 @@ int __init rd_load_image(char *from)
        }
 
        buf = kmalloc(BLOCK_SIZE, GFP_KERNEL);
-       if (buf == 0) {
+       if (!buf) {
                printk(KERN_ERR "RAMDISK: could not allocate buffer\n");
                goto done;
        }
index d53fee8d8604a5514889bfcd93acee4dd882fae0..8eeeccb328c9f984812941592258ebd72e5d96e6 100644 (file)
@@ -57,7 +57,7 @@ static char __init *find_link(int major, int minor, int ino,
                        continue;
                return (*p)->name;
        }
-       q = (struct hash *)malloc(sizeof(struct hash));
+       q = kmalloc(sizeof(struct hash), GFP_KERNEL);
        if (!q)
                panic("can't allocate link hash entry");
        q->major = major;
@@ -77,7 +77,7 @@ static void __init free_hash(void)
                while (*p) {
                        q = *p;
                        *p = q->next;
-                       free(q);
+                       kfree(q);
                }
        }
 }
@@ -445,10 +445,10 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
 {
        int written;
        dry_run = check_only;
-       header_buf = malloc(110);
-       symlink_buf = malloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1);
-       name_buf = malloc(N_ALIGN(PATH_MAX));
-       window = malloc(WSIZE);
+       header_buf = kmalloc(110, GFP_KERNEL);
+       symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL);
+       name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL);
+       window = kmalloc(WSIZE, GFP_KERNEL);
        if (!window || !header_buf || !symlink_buf || !name_buf)
                panic("can't allocate buffers");
        state = Start;
@@ -484,10 +484,10 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
                buf += inptr;
                len -= inptr;
        }
-       free(window);
-       free(name_buf);
-       free(symlink_buf);
-       free(header_buf);
+       kfree(window);
+       kfree(name_buf);
+       kfree(symlink_buf);
+       kfree(header_buf);
        return message;
 }
 
index 1687b0167c4a4c458c0b7956a106fc678813c431..a87d4ca5c36c19706ee4d8eb2997c45f727f4ff1 100644 (file)
 #include <linux/unwind.h>
 #include <linux/buffer_head.h>
 #include <linux/debug_locks.h>
+#include <linux/debugobjects.h>
 #include <linux/lockdep.h>
 #include <linux/pid_namespace.h>
 #include <linux/device.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/signal.h>
+#include <linux/idr.h>
 
 #include <asm/io.h>
 #include <asm/bugs.h>
@@ -458,7 +460,7 @@ static void noinline __init_refok rest_init(void)
        kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND);
        numa_default_policy();
        pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
-       kthreadd_task = find_task_by_pid(pid);
+       kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
        unlock_kernel();
 
        /*
@@ -542,6 +544,7 @@ asmlinkage void __init start_kernel(void)
         */
        unwind_init();
        lockdep_init();
+       debug_objects_early_init();
        cgroup_init_early();
 
        local_irq_disable();
@@ -559,6 +562,7 @@ asmlinkage void __init start_kernel(void)
        printk(KERN_NOTICE);
        printk(linux_banner);
        setup_arch(&command_line);
+       mm_init_owner(&init_mm, &init_task);
        setup_command_line(command_line);
        unwind_setup();
        setup_per_cpu_areas();
@@ -636,6 +640,8 @@ asmlinkage void __init start_kernel(void)
        enable_debug_pagealloc();
        cpu_hotplug_init();
        kmem_cache_init();
+       debug_objects_mem_init();
+       idr_init_cache();
        setup_per_cpu_pageset();
        numa_policy_init();
        if (late_time_init)
@@ -700,10 +706,8 @@ static void __init do_initcalls(void)
                int result;
 
                if (initcall_debug) {
-                       printk("Calling initcall 0x%p", *call);
-                       print_fn_descriptor_symbol(": %s()",
+                       print_fn_descriptor_symbol("calling  %s()\n",
                                        (unsigned long) *call);
-                       printk("\n");
                        t0 = ktime_get();
                }
 
@@ -713,15 +717,10 @@ static void __init do_initcalls(void)
                        t1 = ktime_get();
                        delta = ktime_sub(t1, t0);
 
-                       printk("initcall 0x%p", *call);
-                       print_fn_descriptor_symbol(": %s()",
+                       print_fn_descriptor_symbol("initcall %s()",
                                        (unsigned long) *call);
-                       printk(" returned %d.\n", result);
-
-                       printk("initcall 0x%p ran for %Ld msecs: ",
-                               *call, (unsigned long long)delta.tv64 >> 20);
-                       print_fn_descriptor_symbol("%s()\n",
-                               (unsigned long) *call);
+                       printk(" returned %d after %Ld msecs\n", result,
+                               (unsigned long long) delta.tv64 >> 20);
                }
 
                if (result && result != -ENODEV && initcall_debug) {
@@ -737,10 +736,9 @@ static void __init do_initcalls(void)
                        local_irq_enable();
                }
                if (msg) {
-                       printk(KERN_WARNING "initcall at 0x%p", *call);
-                       print_fn_descriptor_symbol(": %s()",
+                       print_fn_descriptor_symbol(KERN_WARNING "initcall %s()",
                                        (unsigned long) *call);
-                       printk(": returned with %s\n", msg);
+                       printk(" returned with %s\n", msg);
                }
        }
 
@@ -807,6 +805,8 @@ static int noinline init_post(void)
        (void) sys_dup(0);
        (void) sys_dup(0);
 
+       current->signal->flags |= SIGNAL_UNKILLABLE;
+
        if (ramdisk_execute_command) {
                run_init_process(ramdisk_execute_command);
                printk(KERN_WARNING "Failed to execute %s\n",
index 5fc5e33ea047c9f46b18948d06e0133733dba8dc..65c384395801aa2e9f19ec318735b9dc35c44c42 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o
-obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o
+obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o ipcns_notifier.o
 obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o
 obj_mq-$(CONFIG_COMPAT) += compat_mq.o
 obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y)
index 7f4235bed51bd92323cc6807ca68171c283633e2..d3497465cc0a5aa461dabce18b0f7af4dc537cea 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/sysctl.h>
 #include <linux/uaccess.h>
 #include <linux/ipc_namespace.h>
+#include <linux/msg.h>
+#include "util.h"
 
 static void *get_ipc(ctl_table *table)
 {
@@ -24,6 +26,27 @@ static void *get_ipc(ctl_table *table)
        return which;
 }
 
+/*
+ * Routine that is called when a tunable has successfully been changed by
+ * hand and it has a callback routine registered on the ipc namespace notifier
+ * chain: we don't want such tunables to be recomputed anymore upon memory
+ * add/remove or ipc namespace creation/removal.
+ * They can come back to a recomputable state by being set to a <0 value.
+ */
+static void tunable_set_callback(int val)
+{
+       if (val >= 0)
+               unregister_ipcns_notifier(current->nsproxy->ipc_ns);
+       else {
+               /*
+                * Re-enable automatic recomputing only if not already
+                * enabled.
+                */
+               recompute_msgmni(current->nsproxy->ipc_ns);
+               cond_register_ipcns_notifier(current->nsproxy->ipc_ns);
+       }
+}
+
 #ifdef CONFIG_PROC_FS
 static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp,
        void __user *buffer, size_t *lenp, loff_t *ppos)
@@ -35,6 +58,24 @@ static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp,
        return proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos);
 }
 
+static int proc_ipc_callback_dointvec(ctl_table *table, int write,
+       struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct ctl_table ipc_table;
+       size_t lenp_bef = *lenp;
+       int rc;
+
+       memcpy(&ipc_table, table, sizeof(ipc_table));
+       ipc_table.data = get_ipc(table);
+
+       rc = proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos);
+
+       if (write && !rc && lenp_bef == *lenp)
+               tunable_set_callback(*((int *)(ipc_table.data)));
+
+       return rc;
+}
+
 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
        struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -49,6 +90,7 @@ static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
 #else
 #define proc_ipc_doulongvec_minmax NULL
 #define proc_ipc_dointvec         NULL
+#define proc_ipc_callback_dointvec NULL
 #endif
 
 #ifdef CONFIG_SYSCTL_SYSCALL
@@ -90,8 +132,30 @@ static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen,
        }
        return 1;
 }
+
+static int sysctl_ipc_registered_data(ctl_table *table, int __user *name,
+               int nlen, void __user *oldval, size_t __user *oldlenp,
+               void __user *newval, size_t newlen)
+{
+       int rc;
+
+       rc = sysctl_ipc_data(table, name, nlen, oldval, oldlenp, newval,
+               newlen);
+
+       if (newval && newlen && rc > 0) {
+               /*
+                * Tunable has successfully been changed from userland
+                */
+               int *data = get_ipc(table);
+
+               tunable_set_callback(*data);
+       }
+
+       return rc;
+}
 #else
 #define sysctl_ipc_data NULL
+#define sysctl_ipc_registered_data NULL
 #endif
 
 static struct ctl_table ipc_kern_table[] = {
@@ -137,8 +201,8 @@ static struct ctl_table ipc_kern_table[] = {
                .data           = &init_ipc_ns.msg_ctlmni,
                .maxlen         = sizeof (init_ipc_ns.msg_ctlmni),
                .mode           = 0644,
-               .proc_handler   = proc_ipc_dointvec,
-               .strategy       = sysctl_ipc_data,
+               .proc_handler   = proc_ipc_callback_dointvec,
+               .strategy       = sysctl_ipc_registered_data,
        },
        {
                .ctl_name       = KERN_MSGMNB,
diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c
new file mode 100644 (file)
index 0000000..70ff091
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * linux/ipc/ipcns_notifier.c
+ * Copyright (C) 2007 BULL SA. Nadia Derbey
+ *
+ * Notification mechanism for ipc namespaces:
+ * The callback routine registered in the memory chain invokes the ipcns
+ * notifier chain with the IPCNS_MEMCHANGED event.
+ * Each callback routine registered in the ipcns namespace recomputes msgmni
+ * for the owning namespace.
+ */
+
+#include <linux/msg.h>
+#include <linux/rcupdate.h>
+#include <linux/notifier.h>
+#include <linux/nsproxy.h>
+#include <linux/ipc_namespace.h>
+
+#include "util.h"
+
+
+
+static BLOCKING_NOTIFIER_HEAD(ipcns_chain);
+
+
+static int ipcns_callback(struct notifier_block *self,
+                               unsigned long action, void *arg)
+{
+       struct ipc_namespace *ns;
+
+       switch (action) {
+       case IPCNS_MEMCHANGED:   /* amount of lowmem has changed */
+       case IPCNS_CREATED:
+       case IPCNS_REMOVED:
+               /*
+                * It's time to recompute msgmni
+                */
+               ns = container_of(self, struct ipc_namespace, ipcns_nb);
+               /*
+                * No need to get a reference on the ns: the 1st job of
+                * free_ipc_ns() is to unregister the callback routine.
+                * blocking_notifier_chain_unregister takes the wr lock to do
+                * it.
+                * When this callback routine is called the rd lock is held by
+                * blocking_notifier_call_chain.
+                * So the ipc ns cannot be freed while we are here.
+                */
+               recompute_msgmni(ns);
+               break;
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+int register_ipcns_notifier(struct ipc_namespace *ns)
+{
+       memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
+       ns->ipcns_nb.notifier_call = ipcns_callback;
+       ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
+       return blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb);
+}
+
+int cond_register_ipcns_notifier(struct ipc_namespace *ns)
+{
+       memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb));
+       ns->ipcns_nb.notifier_call = ipcns_callback;
+       ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI;
+       return blocking_notifier_chain_cond_register(&ipcns_chain,
+                                                       &ns->ipcns_nb);
+}
+
+int unregister_ipcns_notifier(struct ipc_namespace *ns)
+{
+       return blocking_notifier_chain_unregister(&ipcns_chain,
+                                               &ns->ipcns_nb);
+}
+
+int ipcns_notify(unsigned long val)
+{
+       return blocking_notifier_call_chain(&ipcns_chain, val, NULL);
+}
index 46585a05473eceb84f23c456e0f7dc96ab0bfbf6..32494e8cc7a5cd5fa72b874da80c149f007e84cb 100644 (file)
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -27,6 +27,7 @@
 #include <linux/msg.h>
 #include <linux/spinlock.h>
 #include <linux/init.h>
+#include <linux/mm.h>
 #include <linux/proc_fs.h>
 #include <linux/list.h>
 #include <linux/security.h>
@@ -70,7 +71,6 @@ struct msg_sender {
 #define msg_ids(ns)    ((ns)->ids[IPC_MSG_IDS])
 
 #define msg_unlock(msq)                ipc_unlock(&(msq)->q_perm)
-#define msg_buildid(id, seq)   ipc_buildid(id, seq)
 
 static void freeque(struct ipc_namespace *, struct kern_ipc_perm *);
 static int newque(struct ipc_namespace *, struct ipc_params *);
@@ -78,11 +78,49 @@ static int newque(struct ipc_namespace *, struct ipc_params *);
 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
 #endif
 
+/*
+ * Scale msgmni with the available lowmem size: the memory dedicated to msg
+ * queues should occupy at most 1/MSG_MEM_SCALE of lowmem.
+ * Also take into account the number of nsproxies created so far.
+ * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range.
+ */
+void recompute_msgmni(struct ipc_namespace *ns)
+{
+       struct sysinfo i;
+       unsigned long allowed;
+       int nb_ns;
+
+       si_meminfo(&i);
+       allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
+               / MSGMNB;
+       nb_ns = atomic_read(&nr_ipc_ns);
+       allowed /= nb_ns;
+
+       if (allowed < MSGMNI) {
+               ns->msg_ctlmni = MSGMNI;
+               goto out_callback;
+       }
+
+       if (allowed > IPCMNI / nb_ns) {
+               ns->msg_ctlmni = IPCMNI / nb_ns;
+               goto out_callback;
+       }
+
+       ns->msg_ctlmni = allowed;
+
+out_callback:
+
+       printk(KERN_INFO "msgmni has been set to %d for ipc namespace %p\n",
+               ns->msg_ctlmni, ns);
+}
+
 void msg_init_ns(struct ipc_namespace *ns)
 {
        ns->msg_ctlmax = MSGMAX;
        ns->msg_ctlmnb = MSGMNB;
-       ns->msg_ctlmni = MSGMNI;
+
+       recompute_msgmni(ns);
+
        atomic_set(&ns->msg_bytes, 0);
        atomic_set(&ns->msg_hdrs, 0);
        ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
@@ -103,21 +141,6 @@ void __init msg_init(void)
                                IPC_MSG_IDS, sysvipc_msg_proc_show);
 }
 
-/*
- * This routine is called in the paths where the rw_mutex is held to protect
- * access to the idr tree.
- */
-static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns,
-                                               int id)
-{
-       struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id);
-
-       if (IS_ERR(ipcp))
-               return (struct msg_queue *)ipcp;
-
-       return container_of(ipcp, struct msg_queue, q_perm);
-}
-
 /*
  * msg_lock_(check_) routines are called in the paths where the rw_mutex
  * is not held.
@@ -186,7 +209,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params)
                return id;
        }
 
-       msq->q_perm.id = msg_buildid(id, msq->q_perm.seq);
        msq->q_stime = msq->q_rtime = 0;
        msq->q_ctime = get_seconds();
        msq->q_cbytes = msq->q_qnum = 0;
@@ -324,19 +346,19 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
                out.msg_rtime           = in->msg_rtime;
                out.msg_ctime           = in->msg_ctime;
 
-               if (in->msg_cbytes > USHRT_MAX)
-                       out.msg_cbytes  = USHRT_MAX;
+               if (in->msg_cbytes > USHORT_MAX)
+                       out.msg_cbytes  = USHORT_MAX;
                else
                        out.msg_cbytes  = in->msg_cbytes;
                out.msg_lcbytes         = in->msg_cbytes;
 
-               if (in->msg_qnum > USHRT_MAX)
-                       out.msg_qnum    = USHRT_MAX;
+               if (in->msg_qnum > USHORT_MAX)
+                       out.msg_qnum    = USHORT_MAX;
                else
                        out.msg_qnum    = in->msg_qnum;
 
-               if (in->msg_qbytes > USHRT_MAX)
-                       out.msg_qbytes  = USHRT_MAX;
+               if (in->msg_qbytes > USHORT_MAX)
+                       out.msg_qbytes  = USHORT_MAX;
                else
                        out.msg_qbytes  = in->msg_qbytes;
                out.msg_lqbytes         = in->msg_qbytes;
@@ -351,31 +373,14 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
        }
 }
 
-struct msq_setbuf {
-       unsigned long   qbytes;
-       uid_t           uid;
-       gid_t           gid;
-       mode_t          mode;
-};
-
 static inline unsigned long
-copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
+copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
 {
        switch(version) {
        case IPC_64:
-       {
-               struct msqid64_ds tbuf;
-
-               if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
+               if (copy_from_user(out, buf, sizeof(*out)))
                        return -EFAULT;
-
-               out->qbytes             = tbuf.msg_qbytes;
-               out->uid                = tbuf.msg_perm.uid;
-               out->gid                = tbuf.msg_perm.gid;
-               out->mode               = tbuf.msg_perm.mode;
-
                return 0;
-       }
        case IPC_OLD:
        {
                struct msqid_ds tbuf_old;
@@ -383,14 +388,14 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
                if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
                        return -EFAULT;
 
-               out->uid                = tbuf_old.msg_perm.uid;
-               out->gid                = tbuf_old.msg_perm.gid;
-               out->mode               = tbuf_old.msg_perm.mode;
+               out->msg_perm.uid       = tbuf_old.msg_perm.uid;
+               out->msg_perm.gid       = tbuf_old.msg_perm.gid;
+               out->msg_perm.mode      = tbuf_old.msg_perm.mode;
 
                if (tbuf_old.msg_qbytes == 0)
-                       out->qbytes     = tbuf_old.msg_lqbytes;
+                       out->msg_qbytes = tbuf_old.msg_lqbytes;
                else
-                       out->qbytes     = tbuf_old.msg_qbytes;
+                       out->msg_qbytes = tbuf_old.msg_qbytes;
 
                return 0;
        }
@@ -399,10 +404,71 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
        }
 }
 
-asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
+/*
+ * This function handles some msgctl commands which require the rw_mutex
+ * to be held in write mode.
+ * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ */
+static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd,
+                      struct msqid_ds __user *buf, int version)
 {
        struct kern_ipc_perm *ipcp;
-       struct msq_setbuf uninitialized_var(setbuf);
+       struct msqid64_ds msqid64;
+       struct msg_queue *msq;
+       int err;
+
+       if (cmd == IPC_SET) {
+               if (copy_msqid_from_user(&msqid64, buf, version))
+                       return -EFAULT;
+       }
+
+       ipcp = ipcctl_pre_down(&msg_ids(ns), msqid, cmd,
+                              &msqid64.msg_perm, msqid64.msg_qbytes);
+       if (IS_ERR(ipcp))
+               return PTR_ERR(ipcp);
+
+       msq = container_of(ipcp, struct msg_queue, q_perm);
+
+       err = security_msg_queue_msgctl(msq, cmd);
+       if (err)
+               goto out_unlock;
+
+       switch (cmd) {
+       case IPC_RMID:
+               freeque(ns, ipcp);
+               goto out_up;
+       case IPC_SET:
+               if (msqid64.msg_qbytes > ns->msg_ctlmnb &&
+                   !capable(CAP_SYS_RESOURCE)) {
+                       err = -EPERM;
+                       goto out_unlock;
+               }
+
+               msq->q_qbytes = msqid64.msg_qbytes;
+
+               ipc_update_perm(&msqid64.msg_perm, ipcp);
+               msq->q_ctime = get_seconds();
+               /* sleeping receivers might be excluded by
+                * stricter permissions.
+                */
+               expunge_all(msq, -EAGAIN);
+               /* sleeping senders might be able to send
+                * due to a larger queue size.
+                */
+               ss_wakeup(&msq->q_senders, 0);
+               break;
+       default:
+               err = -EINVAL;
+       }
+out_unlock:
+       msg_unlock(msq);
+out_up:
+       up_write(&msg_ids(ns).rw_mutex);
+       return err;
+}
+
+asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
+{
        struct msg_queue *msq;
        int err, version;
        struct ipc_namespace *ns;
@@ -498,82 +564,13 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
                return success_return;
        }
        case IPC_SET:
-               if (!buf)
-                       return -EFAULT;
-               if (copy_msqid_from_user(&setbuf, buf, version))
-                       return -EFAULT;
-               break;
        case IPC_RMID:
-               break;
+               err = msgctl_down(ns, msqid, cmd, buf, version);
+               return err;
        default:
                return  -EINVAL;
        }
 
-       down_write(&msg_ids(ns).rw_mutex);
-       msq = msg_lock_check_down(ns, msqid);
-       if (IS_ERR(msq)) {
-               err = PTR_ERR(msq);
-               goto out_up;
-       }
-
-       ipcp = &msq->q_perm;
-
-       err = audit_ipc_obj(ipcp);
-       if (err)
-               goto out_unlock_up;
-       if (cmd == IPC_SET) {
-               err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
-                                        setbuf.mode);
-               if (err)
-                       goto out_unlock_up;
-       }
-
-       err = -EPERM;
-       if (current->euid != ipcp->cuid &&
-           current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
-               /* We _could_ check for CAP_CHOWN above, but we don't */
-               goto out_unlock_up;
-
-       err = security_msg_queue_msgctl(msq, cmd);
-       if (err)
-               goto out_unlock_up;
-
-       switch (cmd) {
-       case IPC_SET:
-       {
-               err = -EPERM;
-               if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
-                       goto out_unlock_up;
-
-               msq->q_qbytes = setbuf.qbytes;
-
-               ipcp->uid = setbuf.uid;
-               ipcp->gid = setbuf.gid;
-               ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
-                            (S_IRWXUGO & setbuf.mode);
-               msq->q_ctime = get_seconds();
-               /* sleeping receivers might be excluded by
-                * stricter permissions.
-                */
-               expunge_all(msq, -EAGAIN);
-               /* sleeping senders might be able to send
-                * due to a larger queue size.
-                */
-               ss_wakeup(&msq->q_senders, 0);
-               msg_unlock(msq);
-               break;
-       }
-       case IPC_RMID:
-               freeque(ns, &msq->q_perm);
-               break;
-       }
-       err = 0;
-out_up:
-       up_write(&msg_ids(ns).rw_mutex);
-       return err;
-out_unlock_up:
-       msg_unlock(msq);
-       goto out_up;
 out_unlock:
        msg_unlock(msq);
        return err;
index 1b967655eb350eba0c4308998e376a8d3a719c72..9171d948751e72c6c0c8fa1b8e60688ba4384a0c 100644 (file)
@@ -20,10 +20,20 @@ static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns)
        if (ns == NULL)
                return ERR_PTR(-ENOMEM);
 
+       atomic_inc(&nr_ipc_ns);
+
        sem_init_ns(ns);
        msg_init_ns(ns);
        shm_init_ns(ns);
 
+       /*
+        * msgmni has already been computed for the new ipc ns.
+        * Thus, do the ipcns creation notification before registering that
+        * new ipcns in the chain.
+        */
+       ipcns_notify(IPCNS_CREATED);
+       register_ipcns_notifier(ns);
+
        kref_init(&ns->kref);
        return ns;
 }
@@ -79,8 +89,24 @@ void free_ipc_ns(struct kref *kref)
        struct ipc_namespace *ns;
 
        ns = container_of(kref, struct ipc_namespace, kref);
+       /*
+        * Unregistering the hotplug notifier at the beginning guarantees
+        * that the ipc namespace won't be freed while we are inside the
+        * callback routine. Since the blocking_notifier_chain_XXX routines
+        * hold a rw lock on the notifier list, unregister_ipcns_notifier()
+        * won't take the rw lock before blocking_notifier_call_chain() has
+        * released the rd lock.
+        */
+       unregister_ipcns_notifier(ns);
        sem_exit_ns(ns);
        msg_exit_ns(ns);
        shm_exit_ns(ns);
        kfree(ns);
+       atomic_dec(&nr_ipc_ns);
+
+       /*
+        * Do the ipcns removal notification after decrementing nr_ipc_ns in
+        * order to have a correct value when recomputing msgmni.
+        */
+       ipcns_notify(IPCNS_REMOVED);
 }
index 0b45a4d383c6ae662845f5e26c6bc19f25c10a22..e9418df5ff3ecf0c3cb3bb6e6fbac9de5fcac828 100644 (file)
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -91,7 +91,6 @@
 
 #define sem_unlock(sma)                ipc_unlock(&(sma)->sem_perm)
 #define sem_checkid(sma, semid)        ipc_checkid(&sma->sem_perm, semid)
-#define sem_buildid(id, seq)   ipc_buildid(id, seq)
 
 static int newary(struct ipc_namespace *, struct ipc_params *);
 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
@@ -141,21 +140,6 @@ void __init sem_init (void)
                                IPC_SEM_IDS, sysvipc_sem_proc_show);
 }
 
-/*
- * This routine is called in the paths where the rw_mutex is held to protect
- * access to the idr tree.
- */
-static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns,
-                                               int id)
-{
-       struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id);
-
-       if (IS_ERR(ipcp))
-               return (struct sem_array *)ipcp;
-
-       return container_of(ipcp, struct sem_array, sem_perm);
-}
-
 /*
  * sem_lock_(check_) routines are called in the paths where the rw_mutex
  * is not held.
@@ -181,6 +165,25 @@ static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
        return container_of(ipcp, struct sem_array, sem_perm);
 }
 
+static inline void sem_lock_and_putref(struct sem_array *sma)
+{
+       ipc_lock_by_ptr(&sma->sem_perm);
+       ipc_rcu_putref(sma);
+}
+
+static inline void sem_getref_and_unlock(struct sem_array *sma)
+{
+       ipc_rcu_getref(sma);
+       ipc_unlock(&(sma)->sem_perm);
+}
+
+static inline void sem_putref(struct sem_array *sma)
+{
+       ipc_lock_by_ptr(&sma->sem_perm);
+       ipc_rcu_putref(sma);
+       ipc_unlock(&(sma)->sem_perm);
+}
+
 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
 {
        ipc_rmid(&sem_ids(ns), &s->sem_perm);
@@ -268,7 +271,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
        }
        ns->used_sems += nsems;
 
-       sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq);
        sma->sem_base = (struct sem *) &sma[1];
        /* sma->sem_pending = NULL; */
        sma->sem_pending_last = &sma->sem_pending;
@@ -700,19 +702,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                int i;
 
                if(nsems > SEMMSL_FAST) {
-                       ipc_rcu_getref(sma);
-                       sem_unlock(sma);                        
+                       sem_getref_and_unlock(sma);
 
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               ipc_lock_by_ptr(&sma->sem_perm);
-                               ipc_rcu_putref(sma);
-                               sem_unlock(sma);
+                               sem_putref(sma);
                                return -ENOMEM;
                        }
 
-                       ipc_lock_by_ptr(&sma->sem_perm);
-                       ipc_rcu_putref(sma);
+                       sem_lock_and_putref(sma);
                        if (sma->sem_perm.deleted) {
                                sem_unlock(sma);
                                err = -EIDRM;
@@ -733,38 +731,30 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
                int i;
                struct sem_undo *un;
 
-               ipc_rcu_getref(sma);
-               sem_unlock(sma);
+               sem_getref_and_unlock(sma);
 
                if(nsems > SEMMSL_FAST) {
                        sem_io = ipc_alloc(sizeof(ushort)*nsems);
                        if(sem_io == NULL) {
-                               ipc_lock_by_ptr(&sma->sem_perm);
-                               ipc_rcu_putref(sma);
-                               sem_unlock(sma);
+                               sem_putref(sma);
                                return -ENOMEM;
                        }
                }
 
                if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) {
-                       ipc_lock_by_ptr(&sma->sem_perm);
-                       ipc_rcu_putref(sma);
-                       sem_unlock(sma);
+                       sem_putref(sma);
                        err = -EFAULT;
                        goto out_free;
                }
 
                for (i = 0; i < nsems; i++) {
                        if (sem_io[i] > SEMVMX) {
-                               ipc_lock_by_ptr(&sma->sem_perm);
-                               ipc_rcu_putref(sma);
-                               sem_unlock(sma);
+                               sem_putref(sma);
                                err = -ERANGE;
                                goto out_free;
                        }
                }
-               ipc_lock_by_ptr(&sma->sem_perm);
-               ipc_rcu_putref(sma);
+               sem_lock_and_putref(sma);
                if (sma->sem_perm.deleted) {
                        sem_unlock(sma);
                        err = -EIDRM;
@@ -830,28 +820,14 @@ out_free:
        return err;
 }
 
-struct sem_setbuf {
-       uid_t   uid;
-       gid_t   gid;
-       mode_t  mode;
-};
-
-static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version)
+static inline unsigned long
+copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
 {
        switch(version) {
        case IPC_64:
-           {
-               struct semid64_ds tbuf;
-
-               if(copy_from_user(&tbuf, buf, sizeof(tbuf)))
+               if (copy_from_user(out, buf, sizeof(*out)))
                        return -EFAULT;
-
-               out->uid        = tbuf.sem_perm.uid;
-               out->gid        = tbuf.sem_perm.gid;
-               out->mode       = tbuf.sem_perm.mode;
-
                return 0;
-           }
        case IPC_OLD:
            {
                struct semid_ds tbuf_old;
@@ -859,9 +835,9 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __
                if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
                        return -EFAULT;
 
-               out->uid        = tbuf_old.sem_perm.uid;
-               out->gid        = tbuf_old.sem_perm.gid;
-               out->mode       = tbuf_old.sem_perm.mode;
+               out->sem_perm.uid       = tbuf_old.sem_perm.uid;
+               out->sem_perm.gid       = tbuf_old.sem_perm.gid;
+               out->sem_perm.mode      = tbuf_old.sem_perm.mode;
 
                return 0;
            }
@@ -870,38 +846,29 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __
        }
 }
 
-static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
-               int cmd, int version, union semun arg)
+/*
+ * This function handles some semctl commands which require the rw_mutex
+ * to be held in write mode.
+ * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ */
+static int semctl_down(struct ipc_namespace *ns, int semid,
+                      int cmd, int version, union semun arg)
 {
        struct sem_array *sma;
        int err;
-       struct sem_setbuf uninitialized_var(setbuf);
+       struct semid64_ds semid64;
        struct kern_ipc_perm *ipcp;
 
        if(cmd == IPC_SET) {
-               if(copy_semid_from_user (&setbuf, arg.buf, version))
+               if (copy_semid_from_user(&semid64, arg.buf, version))
                        return -EFAULT;
        }
-       sma = sem_lock_check_down(ns, semid);
-       if (IS_ERR(sma))
-               return PTR_ERR(sma);
 
-       ipcp = &sma->sem_perm;
-
-       err = audit_ipc_obj(ipcp);
-       if (err)
-               goto out_unlock;
+       ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0);
+       if (IS_ERR(ipcp))
+               return PTR_ERR(ipcp);
 
-       if (cmd == IPC_SET) {
-               err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
-               if (err)
-                       goto out_unlock;
-       }
-       if (current->euid != ipcp->cuid && 
-           current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) {
-               err=-EPERM;
-               goto out_unlock;
-       }
+       sma = container_of(ipcp, struct sem_array, sem_perm);
 
        err = security_sem_semctl(sma, cmd);
        if (err)
@@ -910,26 +877,19 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum,
        switch(cmd){
        case IPC_RMID:
                freeary(ns, ipcp);
-               err = 0;
-               break;
+               goto out_up;
        case IPC_SET:
-               ipcp->uid = setbuf.uid;
-               ipcp->gid = setbuf.gid;
-               ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
-                               | (setbuf.mode & S_IRWXUGO);
+               ipc_update_perm(&semid64.sem_perm, ipcp);
                sma->sem_ctime = get_seconds();
-               sem_unlock(sma);
-               err = 0;
                break;
        default:
-               sem_unlock(sma);
                err = -EINVAL;
-               break;
        }
-       return err;
 
 out_unlock:
        sem_unlock(sma);
+out_up:
+       up_write(&sem_ids(ns).rw_mutex);
        return err;
 }
 
@@ -963,9 +923,7 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg)
                return err;
        case IPC_RMID:
        case IPC_SET:
-               down_write(&sem_ids(ns).rw_mutex);
-               err = semctl_down(ns,semid,semnum,cmd,version,arg);
-               up_write(&sem_ids(ns).rw_mutex);
+               err = semctl_down(ns, semid, cmd, version, arg);
                return err;
        default:
                return -EINVAL;
@@ -1044,14 +1002,11 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
                return ERR_PTR(PTR_ERR(sma));
 
        nsems = sma->sem_nsems;
-       ipc_rcu_getref(sma);
-       sem_unlock(sma);
+       sem_getref_and_unlock(sma);
 
        new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
        if (!new) {
-               ipc_lock_by_ptr(&sma->sem_perm);
-               ipc_rcu_putref(sma);
-               sem_unlock(sma);
+               sem_putref(sma);
                return ERR_PTR(-ENOMEM);
        }
        new->semadj = (short *) &new[1];
@@ -1062,13 +1017,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid)
        if (un) {
                spin_unlock(&ulp->lock);
                kfree(new);
-               ipc_lock_by_ptr(&sma->sem_perm);
-               ipc_rcu_putref(sma);
-               sem_unlock(sma);
+               sem_putref(sma);
                goto out;
        }
-       ipc_lock_by_ptr(&sma->sem_perm);
-       ipc_rcu_putref(sma);
+       sem_lock_and_putref(sma);
        if (sma->sem_perm.deleted) {
                sem_unlock(sma);
                spin_unlock(&ulp->lock);
@@ -1298,6 +1250,7 @@ void exit_sem(struct task_struct *tsk)
        undo_list = tsk->sysvsem.undo_list;
        if (!undo_list)
                return;
+       tsk->sysvsem.undo_list = NULL;
 
        if (!atomic_dec_and_test(&undo_list->refcnt))
                return;
index cc63fae02f064d298689279a46599a8159b01df1..554429ade079ffbcd8b19fbfd657772db2eb36ec 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -60,7 +60,6 @@ static struct vm_operations_struct shm_vm_ops;
 
 #define shm_unlock(shp)                        \
        ipc_unlock(&(shp)->shm_perm)
-#define shm_buildid(id, seq)   ipc_buildid(id, seq)
 
 static int newseg(struct ipc_namespace *, struct ipc_params *);
 static void shm_open(struct vm_area_struct *vma);
@@ -127,18 +126,6 @@ static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
        return container_of(ipcp, struct shmid_kernel, shm_perm);
 }
 
-static inline struct shmid_kernel *shm_lock_check_down(
-                                               struct ipc_namespace *ns,
-                                               int id)
-{
-       struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
-
-       if (IS_ERR(ipcp))
-               return (struct shmid_kernel *)ipcp;
-
-       return container_of(ipcp, struct shmid_kernel, shm_perm);
-}
-
 /*
  * shm_lock_(check_) routines are called in the paths where the rw_mutex
  * is not held.
@@ -169,12 +156,6 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
        ipc_rmid(&shm_ids(ns), &s->shm_perm);
 }
 
-static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp)
-{
-       return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
-}
-
-
 
 /* This is called by fork, once for every shm attach. */
 static void shm_open(struct vm_area_struct *vma)
@@ -271,11 +252,9 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
 
        if (sfd->vm_ops->get_policy)
                pol = sfd->vm_ops->get_policy(vma, addr);
-       else if (vma->vm_policy) {
+       else if (vma->vm_policy)
                pol = vma->vm_policy;
-               mpol_get(pol);  /* get_vma_policy() expects this */
-       } else
-               pol = current->mempolicy;
+
        return pol;
 }
 #endif
@@ -418,7 +397,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        if (IS_ERR(file))
                goto no_file;
 
-       id = shm_addid(ns, shp);
+       id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
        if (id < 0) {
                error = id;
                goto no_id;
@@ -430,7 +409,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
        shp->shm_ctim = get_seconds();
        shp->shm_segsz = size;
        shp->shm_nattch = 0;
-       shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq);
        shp->shm_file = file;
        /*
         * shmid gets reported as "inode#" in /proc/pid/maps.
@@ -521,28 +499,14 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_
        }
 }
 
-struct shm_setbuf {
-       uid_t   uid;
-       gid_t   gid;
-       mode_t  mode;
-};     
-
-static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
+static inline unsigned long
+copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
 {
        switch(version) {
        case IPC_64:
-           {
-               struct shmid64_ds tbuf;
-
-               if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
+               if (copy_from_user(out, buf, sizeof(*out)))
                        return -EFAULT;
-
-               out->uid        = tbuf.shm_perm.uid;
-               out->gid        = tbuf.shm_perm.gid;
-               out->mode       = tbuf.shm_perm.mode;
-
                return 0;
-           }
        case IPC_OLD:
            {
                struct shmid_ds tbuf_old;
@@ -550,9 +514,9 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __
                if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
                        return -EFAULT;
 
-               out->uid        = tbuf_old.shm_perm.uid;
-               out->gid        = tbuf_old.shm_perm.gid;
-               out->mode       = tbuf_old.shm_perm.mode;
+               out->shm_perm.uid       = tbuf_old.shm_perm.uid;
+               out->shm_perm.gid       = tbuf_old.shm_perm.gid;
+               out->shm_perm.mode      = tbuf_old.shm_perm.mode;
 
                return 0;
            }
@@ -626,9 +590,53 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
        }
 }
 
-asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
+/*
+ * This function handles some shmctl commands which require the rw_mutex
+ * to be held in write mode.
+ * NOTE: no locks must be held, the rw_mutex is taken inside this function.
+ */
+static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
+                      struct shmid_ds __user *buf, int version)
+{
+       struct kern_ipc_perm *ipcp;
+       struct shmid64_ds shmid64;
+       struct shmid_kernel *shp;
+       int err;
+
+       if (cmd == IPC_SET) {
+               if (copy_shmid_from_user(&shmid64, buf, version))
+                       return -EFAULT;
+       }
+
+       ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0);
+       if (IS_ERR(ipcp))
+               return PTR_ERR(ipcp);
+
+       shp = container_of(ipcp, struct shmid_kernel, shm_perm);
+
+       err = security_shm_shmctl(shp, cmd);
+       if (err)
+               goto out_unlock;
+       switch (cmd) {
+       case IPC_RMID:
+               do_shm_rmid(ns, ipcp);
+               goto out_up;
+       case IPC_SET:
+               ipc_update_perm(&shmid64.shm_perm, ipcp);
+               shp->shm_ctim = get_seconds();
+               break;
+       default:
+               err = -EINVAL;
+       }
+out_unlock:
+       shm_unlock(shp);
+out_up:
+       up_write(&shm_ids(ns).rw_mutex);
+       return err;
+}
+
+asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
 {
-       struct shm_setbuf setbuf;
        struct shmid_kernel *shp;
        int err, version;
        struct ipc_namespace *ns;
@@ -785,97 +793,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf)
                goto out;
        }
        case IPC_RMID:
-       {
-               /*
-                *      We cannot simply remove the file. The SVID states
-                *      that the block remains until the last person
-                *      detaches from it, then is deleted. A shmat() on
-                *      an RMID segment is legal in older Linux and if 
-                *      we change it apps break...
-                *
-                *      Instead we set a destroyed flag, and then blow
-                *      the name away when the usage hits zero.
-                */
-               down_write(&shm_ids(ns).rw_mutex);
-               shp = shm_lock_check_down(ns, shmid);
-               if (IS_ERR(shp)) {
-                       err = PTR_ERR(shp);
-                       goto out_up;
-               }
-
-               err = audit_ipc_obj(&(shp->shm_perm));
-               if (err)
-                       goto out_unlock_up;
-
-               if (current->euid != shp->shm_perm.uid &&
-                   current->euid != shp->shm_perm.cuid && 
-                   !capable(CAP_SYS_ADMIN)) {
-                       err=-EPERM;
-                       goto out_unlock_up;
-               }
-
-               err = security_shm_shmctl(shp, cmd);
-               if (err)
-                       goto out_unlock_up;
-
-               do_shm_rmid(ns, &shp->shm_perm);
-               up_write(&shm_ids(ns).rw_mutex);
-               goto out;
-       }
-
        case IPC_SET:
-       {
-               if (!buf) {
-                       err = -EFAULT;
-                       goto out;
-               }
-
-               if (copy_shmid_from_user (&setbuf, buf, version)) {
-                       err = -EFAULT;
-                       goto out;
-               }
-               down_write(&shm_ids(ns).rw_mutex);
-               shp = shm_lock_check_down(ns, shmid);
-               if (IS_ERR(shp)) {
-                       err = PTR_ERR(shp);
-                       goto out_up;
-               }
-               err = audit_ipc_obj(&(shp->shm_perm));
-               if (err)
-                       goto out_unlock_up;
-               err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode);
-               if (err)
-                       goto out_unlock_up;
-               err=-EPERM;
-               if (current->euid != shp->shm_perm.uid &&
-                   current->euid != shp->shm_perm.cuid && 
-                   !capable(CAP_SYS_ADMIN)) {
-                       goto out_unlock_up;
-               }
-
-               err = security_shm_shmctl(shp, cmd);
-               if (err)
-                       goto out_unlock_up;
-               
-               shp->shm_perm.uid = setbuf.uid;
-               shp->shm_perm.gid = setbuf.gid;
-               shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO)
-                       | (setbuf.mode & S_IRWXUGO);
-               shp->shm_ctim = get_seconds();
-               break;
-       }
-
+               err = shmctl_down(ns, shmid, cmd, buf, version);
+               return err;
        default:
-               err = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
-       err = 0;
-out_unlock_up:
-       shm_unlock(shp);
-out_up:
-       up_write(&shm_ids(ns).rw_mutex);
-       goto out;
 out_unlock:
        shm_unlock(shp);
 out:
index fd1b50da9db8858793a11c0e6c503fb523280786..3339177b336cae51c0b5a8085ad9a087891bb0b2 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/audit.h>
 #include <linux/nsproxy.h>
 #include <linux/rwsem.h>
+#include <linux/memory.h>
 #include <linux/ipc_namespace.h>
 
 #include <asm/unistd.h>
@@ -52,11 +53,57 @@ struct ipc_namespace init_ipc_ns = {
        },
 };
 
+atomic_t nr_ipc_ns = ATOMIC_INIT(1);
+
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+
+static void ipc_memory_notifier(struct work_struct *work)
+{
+       ipcns_notify(IPCNS_MEMCHANGED);
+}
+
+static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
+
+
+static int ipc_memory_callback(struct notifier_block *self,
+                               unsigned long action, void *arg)
+{
+       switch (action) {
+       case MEM_ONLINE:    /* memory successfully brought online */
+       case MEM_OFFLINE:   /* or offline: it's time to recompute msgmni */
+               /*
+                * This is done by invoking the ipcns notifier chain with the
+                * IPC_MEMCHANGED event.
+                * In order not to keep the lock on the hotplug memory chain
+                * for too long, queue a work item that will, when waken up,
+                * activate the ipcns notification chain.
+                * No need to keep several ipc work items on the queue.
+                */
+               if (!work_pending(&ipc_memory_wq))
+                       schedule_work(&ipc_memory_wq);
+               break;
+       case MEM_GOING_ONLINE:
+       case MEM_GOING_OFFLINE:
+       case MEM_CANCEL_ONLINE:
+       case MEM_CANCEL_OFFLINE:
+       default:
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
 /**
  *     ipc_init        -       initialise IPC subsystem
  *
  *     The various system5 IPC resources (semaphores, messages and shared
  *     memory) are initialised
+ *     A callback routine is registered into the memory hotplug notifier
+ *     chain: since msgmni scales to lowmem this callback routine will be
+ *     called upon successful memory add / remove to recompute msmgni.
  */
  
 static int __init ipc_init(void)
@@ -64,6 +111,8 @@ static int __init ipc_init(void)
        sem_init();
        msg_init();
        shm_init();
+       hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI);
+       register_ipcns_notifier(&init_ipc_ns);
        return 0;
 }
 __initcall(ipc_init);
@@ -84,8 +133,8 @@ void ipc_init_ids(struct ipc_ids *ids)
        ids->seq = 0;
        {
                int seq_limit = INT_MAX/SEQ_MULTIPLIER;
-               if(seq_limit > USHRT_MAX)
-                       ids->seq_max = USHRT_MAX;
+               if (seq_limit > USHORT_MAX)
+                       ids->seq_max = USHORT_MAX;
                 else
                        ids->seq_max = seq_limit;
        }
@@ -116,13 +165,12 @@ void __init ipc_init_proc_interface(const char *path, const char *header,
        iface->ids      = ids;
        iface->show     = show;
 
-       pde = create_proc_entry(path,
-                               S_IRUGO,        /* world readable */
-                               NULL            /* parent dir */);
-       if (pde) {
-               pde->data = iface;
-               pde->proc_fops = &sysvipc_proc_fops;
-       } else {
+       pde = proc_create_data(path,
+                              S_IRUGO,        /* world readable */
+                              NULL,           /* parent dir */
+                              &sysvipc_proc_fops,
+                              iface);
+       if (!pde) {
                kfree(iface);
        }
 }
@@ -231,6 +279,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
        if(ids->seq > ids->seq_max)
                ids->seq = 0;
 
+       new->id = ipc_buildid(id, new->seq);
        spin_lock_init(&new->lock);
        new->deleted = 0;
        rcu_read_lock();
@@ -761,6 +810,70 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
                return ipcget_public(ns, ids, ops, params);
 }
 
+/**
+ * ipc_update_perm - update the permissions of an IPC.
+ * @in:  the permission given as input.
+ * @out: the permission of the ipc to set.
+ */
+void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
+{
+       out->uid = in->uid;
+       out->gid = in->gid;
+       out->mode = (out->mode & ~S_IRWXUGO)
+               | (in->mode & S_IRWXUGO);
+}
+
+/**
+ * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd
+ * @ids:  the table of ids where to look for the ipc
+ * @id:   the id of the ipc to retrieve
+ * @cmd:  the cmd to check
+ * @perm: the permission to set
+ * @extra_perm: one extra permission parameter used by msq
+ *
+ * This function does some common audit and permissions check for some IPC_XXX
+ * cmd and is called from semctl_down, shmctl_down and msgctl_down.
+ * It must be called without any lock held and
+ *  - retrieves the ipc with the given id in the given table.
+ *  - performs some audit and permission check, depending on the given cmd
+ *  - returns the ipc with both ipc and rw_mutex locks held in case of success
+ *    or an err-code without any lock held otherwise.
+ */
+struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
+                                     struct ipc64_perm *perm, int extra_perm)
+{
+       struct kern_ipc_perm *ipcp;
+       int err;
+
+       down_write(&ids->rw_mutex);
+       ipcp = ipc_lock_check_down(ids, id);
+       if (IS_ERR(ipcp)) {
+               err = PTR_ERR(ipcp);
+               goto out_up;
+       }
+
+       err = audit_ipc_obj(ipcp);
+       if (err)
+               goto out_unlock;
+
+       if (cmd == IPC_SET) {
+               err = audit_ipc_set_perm(extra_perm, perm->uid,
+                                        perm->gid, perm->mode);
+               if (err)
+                       goto out_unlock;
+       }
+       if (current->euid == ipcp->cuid ||
+           current->euid == ipcp->uid || capable(CAP_SYS_ADMIN))
+               return ipcp;
+
+       err = -EPERM;
+out_unlock:
+       ipc_unlock(ipcp);
+out_up:
+       up_write(&ids->rw_mutex);
+       return ERR_PTR(err);
+}
+
 #ifdef __ARCH_WANT_IPC_PARSE_VERSION
 
 
index f37d160c98fe7c5c42b44d9611979a4a79d793fc..cdb966aebe0716e8375617431d5734ed45aa639f 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <linux/err.h>
 
-#define USHRT_MAX 0xffff
 #define SEQ_MULTIPLIER (IPCMNI)
 
 void sem_init (void);
@@ -112,6 +111,9 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int);
 
 void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out);
 void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out);
+void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out);
+struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd,
+                                     struct ipc64_perm *perm, int extra_perm);
 
 #if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__)
   /* On IA-64, we always use the "64-bit version" of the IPC structures.  */ 
@@ -124,6 +126,8 @@ extern void free_msg(struct msg_msg *msg);
 extern struct msg_msg *load_msg(const void __user *src, int len);
 extern int store_msg(void __user *dest, struct msg_msg *msg, int len);
 
+extern void recompute_msgmni(struct ipc_namespace *);
+
 static inline int ipc_buildid(int id, int seq)
 {
        return SEQ_MULTIPLIER * seq + id;
index 6c5f081132a41862f1d8ffe3cee713e44d553685..188c43223f52a33e3b948dba43124a70cd349e30 100644 (file)
@@ -11,7 +11,7 @@ obj-y     = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
            hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
            notifier.o ksysfs.o pm_qos_params.o
 
-obj-$(CONFIG_SYSCTL) += sysctl_check.o
+obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
 obj-$(CONFIG_STACKTRACE) += stacktrace.o
 obj-y += time/
 obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
index a7b16086d36f7b863a19697eed4877b67226b197..b7d3709cc452afdba06492ecf2c977bab04dd530 100644 (file)
@@ -126,6 +126,8 @@ static int     audit_freelist_count;
 static LIST_HEAD(audit_freelist);
 
 static struct sk_buff_head audit_skb_queue;
+/* queue of skbs to send to auditd when/if it comes back */
+static struct sk_buff_head audit_skb_hold_queue;
 static struct task_struct *kauditd_task;
 static DECLARE_WAIT_QUEUE_HEAD(kauditd_wait);
 static DECLARE_WAIT_QUEUE_HEAD(audit_backlog_wait);
@@ -154,6 +156,11 @@ struct audit_buffer {
        gfp_t                gfp_mask;
 };
 
+struct audit_reply {
+       int pid;
+       struct sk_buff *skb;
+};
+
 static void audit_set_pid(struct audit_buffer *ab, pid_t pid)
 {
        if (ab) {
@@ -252,14 +259,15 @@ void audit_log_lost(const char *message)
 }
 
 static int audit_log_config_change(char *function_name, int new, int old,
-                                  uid_t loginuid, u32 sid, int allow_changes)
+                                  uid_t loginuid, u32 sessionid, u32 sid,
+                                  int allow_changes)
 {
        struct audit_buffer *ab;
        int rc = 0;
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
-       audit_log_format(ab, "%s=%d old=%d by auid=%u", function_name, new,
-                        old, loginuid);
+       audit_log_format(ab, "%s=%d old=%d auid=%u ses=%u", function_name, new,
+                        old, loginuid, sessionid);
        if (sid) {
                char *ctx = NULL;
                u32 len;
@@ -279,7 +287,8 @@ static int audit_log_config_change(char *function_name, int new, int old,
 }
 
 static int audit_do_config_change(char *function_name, int *to_change,
-                                 int new, uid_t loginuid, u32 sid)
+                                 int new, uid_t loginuid, u32 sessionid,
+                                 u32 sid)
 {
        int allow_changes, rc = 0, old = *to_change;
 
@@ -290,8 +299,8 @@ static int audit_do_config_change(char *function_name, int *to_change,
                allow_changes = 1;
 
        if (audit_enabled != AUDIT_OFF) {
-               rc = audit_log_config_change(function_name, new, old,
-                                            loginuid, sid, allow_changes);
+               rc = audit_log_config_change(function_name, new, old, loginuid,
+                                            sessionid, sid, allow_changes);
                if (rc)
                        allow_changes = 0;
        }
@@ -305,26 +314,28 @@ static int audit_do_config_change(char *function_name, int *to_change,
        return rc;
 }
 
-static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sid)
+static int audit_set_rate_limit(int limit, uid_t loginuid, u32 sessionid,
+                               u32 sid)
 {
        return audit_do_config_change("audit_rate_limit", &audit_rate_limit,
-                                     limit, loginuid, sid);
+                                     limit, loginuid, sessionid, sid);
 }
 
-static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sid)
+static int audit_set_backlog_limit(int limit, uid_t loginuid, u32 sessionid,
+                                  u32 sid)
 {
        return audit_do_config_change("audit_backlog_limit", &audit_backlog_limit,
-                                     limit, loginuid, sid);
+                                     limit, loginuid, sessionid, sid);
 }
 
-static int audit_set_enabled(int state, uid_t loginuid, u32 sid)
+static int audit_set_enabled(int state, uid_t loginuid, u32 sessionid, u32 sid)
 {
        int rc;
        if (state < AUDIT_OFF || state > AUDIT_LOCKED)
                return -EINVAL;
 
        rc =  audit_do_config_change("audit_enabled", &audit_enabled, state,
-                                    loginuid, sid);
+                                    loginuid, sessionid, sid);
 
        if (!rc)
                audit_ever_enabled |= !!state;
@@ -332,7 +343,7 @@ static int audit_set_enabled(int state, uid_t loginuid, u32 sid)
        return rc;
 }
 
-static int audit_set_failure(int state, uid_t loginuid, u32 sid)
+static int audit_set_failure(int state, uid_t loginuid, u32 sessionid, u32 sid)
 {
        if (state != AUDIT_FAIL_SILENT
            && state != AUDIT_FAIL_PRINTK
@@ -340,7 +351,43 @@ static int audit_set_failure(int state, uid_t loginuid, u32 sid)
                return -EINVAL;
 
        return audit_do_config_change("audit_failure", &audit_failure, state,
-                                     loginuid, sid);
+                                     loginuid, sessionid, sid);
+}
+
+/*
+ * Queue skbs to be sent to auditd when/if it comes back.  These skbs should
+ * already have been sent via prink/syslog and so if these messages are dropped
+ * it is not a huge concern since we already passed the audit_log_lost()
+ * notification and stuff.  This is just nice to get audit messages during
+ * boot before auditd is running or messages generated while auditd is stopped.
+ * This only holds messages is audit_default is set, aka booting with audit=1
+ * or building your kernel that way.
+ */
+static void audit_hold_skb(struct sk_buff *skb)
+{
+       if (audit_default &&
+           skb_queue_len(&audit_skb_hold_queue) < audit_backlog_limit)
+               skb_queue_tail(&audit_skb_hold_queue, skb);
+       else
+               kfree_skb(skb);
+}
+
+static void kauditd_send_skb(struct sk_buff *skb)
+{
+       int err;
+       /* take a reference in case we can't send it and we want to hold it */
+       skb_get(skb);
+       err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0);
+       if (err < 0) {
+               BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */
+               printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
+               audit_log_lost("auditd dissapeared\n");
+               audit_pid = 0;
+               /* we might get lucky and get this in the next auditd */
+               audit_hold_skb(skb);
+       } else
+               /* drop the extra reference if sent ok */
+               kfree_skb(skb);
 }
 
 static int kauditd_thread(void *dummy)
@@ -349,24 +396,41 @@ static int kauditd_thread(void *dummy)
 
        set_freezable();
        while (!kthread_should_stop()) {
+               /*
+                * if auditd just started drain the queue of messages already
+                * sent to syslog/printk.  remember loss here is ok.  we already
+                * called audit_log_lost() if it didn't go out normally.  so the
+                * race between the skb_dequeue and the next check for audit_pid
+                * doesn't matter.
+                *
+                * if you ever find kauditd to be too slow we can get a perf win
+                * by doing our own locking and keeping better track if there
+                * are messages in this queue.  I don't see the need now, but
+                * in 5 years when I want to play with this again I'll see this
+                * note and still have no friggin idea what i'm thinking today.
+                */
+               if (audit_default && audit_pid) {
+                       skb = skb_dequeue(&audit_skb_hold_queue);
+                       if (unlikely(skb)) {
+                               while (skb && audit_pid) {
+                                       kauditd_send_skb(skb);
+                                       skb = skb_dequeue(&audit_skb_hold_queue);
+                               }
+                       }
+               }
+
                skb = skb_dequeue(&audit_skb_queue);
                wake_up(&audit_backlog_wait);
                if (skb) {
-                       if (audit_pid) {
-                               int err = netlink_unicast(audit_sock, skb, audit_nlk_pid, 0);
-                               if (err < 0) {
-                                       BUG_ON(err != -ECONNREFUSED); /* Shoudn't happen */
-                                       printk(KERN_ERR "audit: *NO* daemon at audit_pid=%d\n", audit_pid);
-                                       audit_log_lost("auditd dissapeared\n");
-                                       audit_pid = 0;
-                               }
-                       } else {
+                       if (audit_pid)
+                               kauditd_send_skb(skb);
+                       else {
                                if (printk_ratelimit())
-                                       printk(KERN_NOTICE "%s\n", skb->data +
-                                               NLMSG_SPACE(0));
+                                       printk(KERN_NOTICE "%s\n", skb->data + NLMSG_SPACE(0));
                                else
                                        audit_log_lost("printk limit exceeded\n");
-                               kfree_skb(skb);
+
+                               audit_hold_skb(skb);
                        }
                } else {
                        DECLARE_WAITQUEUE(wait, current);
@@ -385,13 +449,13 @@ static int kauditd_thread(void *dummy)
        return 0;
 }
 
-static int audit_prepare_user_tty(pid_t pid, uid_t loginuid)
+static int audit_prepare_user_tty(pid_t pid, uid_t loginuid, u32 sessionid)
 {
        struct task_struct *tsk;
        int err;
 
        read_lock(&tasklist_lock);
-       tsk = find_task_by_pid(pid);
+       tsk = find_task_by_vpid(pid);
        err = -ESRCH;
        if (!tsk)
                goto out;
@@ -404,7 +468,7 @@ static int audit_prepare_user_tty(pid_t pid, uid_t loginuid)
        if (err)
                goto out;
 
-       tty_audit_push_task(tsk, loginuid);
+       tty_audit_push_task(tsk, loginuid, sessionid);
 out:
        read_unlock(&tasklist_lock);
        return err;
@@ -469,6 +533,19 @@ nlmsg_failure:                     /* Used by NLMSG_PUT */
        return NULL;
 }
 
+static int audit_send_reply_thread(void *arg)
+{
+       struct audit_reply *reply = (struct audit_reply *)arg;
+
+       mutex_lock(&audit_cmd_mutex);
+       mutex_unlock(&audit_cmd_mutex);
+
+       /* Ignore failure. It'll only happen if the sender goes away,
+          because our timeout is set to infinite. */
+       netlink_unicast(audit_sock, reply->skb, reply->pid, 0);
+       kfree(reply);
+       return 0;
+}
 /**
  * audit_send_reply - send an audit reply message via netlink
  * @pid: process id to send reply to
@@ -485,14 +562,26 @@ nlmsg_failure:                    /* Used by NLMSG_PUT */
 void audit_send_reply(int pid, int seq, int type, int done, int multi,
                      void *payload, int size)
 {
-       struct sk_buff  *skb;
+       struct sk_buff *skb;
+       struct task_struct *tsk;
+       struct audit_reply *reply = kmalloc(sizeof(struct audit_reply),
+                                           GFP_KERNEL);
+
+       if (!reply)
+               return;
+
        skb = audit_make_reply(pid, seq, type, done, multi, payload, size);
        if (!skb)
                return;
-       /* Ignore failure. It'll only happen if the sender goes away,
-          because our timeout is set to infinite. */
-       netlink_unicast(audit_sock, skb, pid, 0);
-       return;
+
+       reply->pid = pid;
+       reply->skb = skb;
+
+       tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
+       if (IS_ERR(tsk)) {
+               kfree(reply);
+               kfree_skb(skb);
+       }
 }
 
 /*
@@ -534,7 +623,8 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
 }
 
 static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
-                                    u32 pid, u32 uid, uid_t auid, u32 sid)
+                                    u32 pid, u32 uid, uid_t auid, u32 ses,
+                                    u32 sid)
 {
        int rc = 0;
        char *ctx = NULL;
@@ -546,8 +636,8 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        }
 
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
-       audit_log_format(*ab, "user pid=%d uid=%u auid=%u",
-                        pid, uid, auid);
+       audit_log_format(*ab, "user pid=%d uid=%u auid=%u ses=%u",
+                        pid, uid, auid, ses);
        if (sid) {
                rc = security_secid_to_secctx(sid, &ctx, &len);
                if (rc)
@@ -570,6 +660,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct audit_buffer     *ab;
        u16                     msg_type = nlh->nlmsg_type;
        uid_t                   loginuid; /* loginuid of sender */
+       u32                     sessionid;
        struct audit_sig_info   *sig_data;
        char                    *ctx = NULL;
        u32                     len;
@@ -591,6 +682,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        pid  = NETLINK_CREDS(skb)->pid;
        uid  = NETLINK_CREDS(skb)->uid;
        loginuid = NETLINK_CB(skb).loginuid;
+       sessionid = NETLINK_CB(skb).sessionid;
        sid  = NETLINK_CB(skb).sid;
        seq  = nlh->nlmsg_seq;
        data = NLMSG_DATA(nlh);
@@ -613,12 +705,12 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                status_get   = (struct audit_status *)data;
                if (status_get->mask & AUDIT_STATUS_ENABLED) {
                        err = audit_set_enabled(status_get->enabled,
-                                                       loginuid, sid);
+                                               loginuid, sessionid, sid);
                        if (err < 0) return err;
                }
                if (status_get->mask & AUDIT_STATUS_FAILURE) {
                        err = audit_set_failure(status_get->failure,
-                                                        loginuid, sid);
+                                               loginuid, sessionid, sid);
                        if (err < 0) return err;
                }
                if (status_get->mask & AUDIT_STATUS_PID) {
@@ -627,17 +719,17 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        if (audit_enabled != AUDIT_OFF)
                                audit_log_config_change("audit_pid", new_pid,
                                                        audit_pid, loginuid,
-                                                       sid, 1);
+                                                       sessionid, sid, 1);
 
                        audit_pid = new_pid;
                        audit_nlk_pid = NETLINK_CB(skb).pid;
                }
                if (status_get->mask & AUDIT_STATUS_RATE_LIMIT)
                        err = audit_set_rate_limit(status_get->rate_limit,
-                                                        loginuid, sid);
+                                                  loginuid, sessionid, sid);
                if (status_get->mask & AUDIT_STATUS_BACKLOG_LIMIT)
                        err = audit_set_backlog_limit(status_get->backlog_limit,
-                                                       loginuid, sid);
+                                                     loginuid, sessionid, sid);
                break;
        case AUDIT_USER:
        case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
@@ -649,12 +741,13 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (err == 1) {
                        err = 0;
                        if (msg_type == AUDIT_USER_TTY) {
-                               err = audit_prepare_user_tty(pid, loginuid);
+                               err = audit_prepare_user_tty(pid, loginuid,
+                                                            sessionid);
                                if (err)
                                        break;
                        }
                        audit_log_common_recv_msg(&ab, msg_type, pid, uid,
-                                                 loginuid, sid);
+                                                 loginuid, sessionid, sid);
 
                        if (msg_type != AUDIT_USER_TTY)
                                audit_log_format(ab, " msg='%.1024s'",
@@ -664,8 +757,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
 
                                audit_log_format(ab, " msg=");
                                size = nlmsg_len(nlh);
-                               audit_log_n_untrustedstring(ab, size,
-                                                           data);
+                               audit_log_n_untrustedstring(ab, data, size);
                        }
                        audit_set_pid(ab, pid);
                        audit_log_end(ab);
@@ -677,7 +769,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return -EINVAL;
                if (audit_enabled == AUDIT_LOCKED) {
                        audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                                 uid, loginuid, sid);
+                                                 uid, loginuid, sessionid, sid);
 
                        audit_log_format(ab, " audit_enabled=%d res=0",
                                         audit_enabled);
@@ -688,7 +780,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        case AUDIT_LIST:
                err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
                                           uid, seq, data, nlmsg_len(nlh),
-                                          loginuid, sid);
+                                          loginuid, sessionid, sid);
                break;
        case AUDIT_ADD_RULE:
        case AUDIT_DEL_RULE:
@@ -696,7 +788,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                        return -EINVAL;
                if (audit_enabled == AUDIT_LOCKED) {
                        audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                                 uid, loginuid, sid);
+                                                 uid, loginuid, sessionid, sid);
 
                        audit_log_format(ab, " audit_enabled=%d res=0",
                                         audit_enabled);
@@ -707,13 +799,13 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        case AUDIT_LIST_RULES:
                err = audit_receive_filter(nlh->nlmsg_type, NETLINK_CB(skb).pid,
                                           uid, seq, data, nlmsg_len(nlh),
-                                          loginuid, sid);
+                                          loginuid, sessionid, sid);
                break;
        case AUDIT_TRIM:
                audit_trim_trees();
 
                audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                         uid, loginuid, sid);
+                                         uid, loginuid, sessionid, sid);
 
                audit_log_format(ab, " op=trim res=1");
                audit_log_end(ab);
@@ -721,21 +813,21 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        case AUDIT_MAKE_EQUIV: {
                void *bufp = data;
                u32 sizes[2];
-               size_t len = nlmsg_len(nlh);
+               size_t msglen = nlmsg_len(nlh);
                char *old, *new;
 
                err = -EINVAL;
-               if (len < 2 * sizeof(u32))
+               if (msglen < 2 * sizeof(u32))
                        break;
                memcpy(sizes, bufp, 2 * sizeof(u32));
                bufp += 2 * sizeof(u32);
-               len -= 2 * sizeof(u32);
-               old = audit_unpack_string(&bufp, &len, sizes[0]);
+               msglen -= 2 * sizeof(u32);
+               old = audit_unpack_string(&bufp, &msglen, sizes[0]);
                if (IS_ERR(old)) {
                        err = PTR_ERR(old);
                        break;
                }
-               new = audit_unpack_string(&bufp, &len, sizes[1]);
+               new = audit_unpack_string(&bufp, &msglen, sizes[1]);
                if (IS_ERR(new)) {
                        err = PTR_ERR(new);
                        kfree(old);
@@ -745,7 +837,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                err = audit_tag_tree(old, new);
 
                audit_log_common_recv_msg(&ab, AUDIT_CONFIG_CHANGE, pid,
-                                         uid, loginuid, sid);
+                                         uid, loginuid, sessionid, sid);
 
                audit_log_format(ab, " op=make_equiv old=");
                audit_log_untrustedstring(ab, old);
@@ -779,7 +871,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                struct task_struct *tsk;
 
                read_lock(&tasklist_lock);
-               tsk = find_task_by_pid(pid);
+               tsk = find_task_by_vpid(pid);
                if (!tsk)
                        err = -ESRCH;
                else {
@@ -802,7 +894,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
                if (s->enabled != 0 && s->enabled != 1)
                        return -EINVAL;
                read_lock(&tasklist_lock);
-               tsk = find_task_by_pid(pid);
+               tsk = find_task_by_vpid(pid);
                if (!tsk)
                        err = -ESRCH;
                else {
@@ -877,6 +969,7 @@ static int __init audit_init(void)
                audit_sock->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
 
        skb_queue_head_init(&audit_skb_queue);
+       skb_queue_head_init(&audit_skb_hold_queue);
        audit_initialized = 1;
        audit_enabled = audit_default;
        audit_ever_enabled |= !!audit_default;
@@ -1199,7 +1292,7 @@ void audit_log_format(struct audit_buffer *ab, const char *fmt, ...)
  * This function will take the passed buf and convert it into a string of
  * ascii hex digits. The new string is placed onto the skb.
  */
-void audit_log_hex(struct audit_buffer *ab, const unsigned char *buf,
+void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
                size_t len)
 {
        int i, avail, new_len;
@@ -1235,8 +1328,8 @@ void audit_log_hex(struct audit_buffer *ab, const unsigned char *buf,
  * Format a string of no more than slen characters into the audit buffer,
  * enclosed in quote marks.
  */
-static void audit_log_n_string(struct audit_buffer *ab, size_t slen,
-                              const char *string)
+void audit_log_n_string(struct audit_buffer *ab, const char *string,
+                       size_t slen)
 {
        int avail, new_len;
        unsigned char *ptr;
@@ -1292,13 +1385,13 @@ int audit_string_contains_control(const char *string, size_t len)
  * The caller specifies the number of characters in the string to log, which may
  * or may not be the entire string.
  */
-void audit_log_n_untrustedstring(struct audit_buffer *ab, size_t len,
-                                const char *string)
+void audit_log_n_untrustedstring(struct audit_buffer *ab, const char *string,
+                                size_t len)
 {
        if (audit_string_contains_control(string, len))
-               audit_log_hex(ab, string, len);
+               audit_log_n_hex(ab, string, len);
        else
-               audit_log_n_string(ab, len, string);
+               audit_log_n_string(ab, string, len);
 }
 
 /**
@@ -1311,7 +1404,7 @@ void audit_log_n_untrustedstring(struct audit_buffer *ab, size_t len,
  */
 void audit_log_untrustedstring(struct audit_buffer *ab, const char *string)
 {
-       audit_log_n_untrustedstring(ab, strlen(string), string);
+       audit_log_n_untrustedstring(ab, string, strlen(string));
 }
 
 /* This is a helper-function to print the escaped d_path */
@@ -1355,19 +1448,23 @@ void audit_log_end(struct audit_buffer *ab)
                audit_log_lost("rate limit exceeded");
        } else {
                struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
+               nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0);
+
                if (audit_pid) {
-                       nlh->nlmsg_len = ab->skb->len - NLMSG_SPACE(0);
                        skb_queue_tail(&audit_skb_queue, ab->skb);
-                       ab->skb = NULL;
                        wake_up_interruptible(&kauditd_wait);
-               } else if (nlh->nlmsg_type != AUDIT_EOE) {
-                       if (printk_ratelimit()) {
-                               printk(KERN_NOTICE "type=%d %s\n",
-                                       nlh->nlmsg_type,
-                                       ab->skb->data + NLMSG_SPACE(0));
-                       } else
-                               audit_log_lost("printk limit exceeded\n");
+               } else {
+                       if (nlh->nlmsg_type != AUDIT_EOE) {
+                               if (printk_ratelimit()) {
+                                       printk(KERN_NOTICE "type=%d %s\n",
+                                               nlh->nlmsg_type,
+                                               ab->skb->data + NLMSG_SPACE(0));
+                               } else
+                                       audit_log_lost("printk limit exceeded\n");
+                       }
+                       audit_hold_skb(ab->skb);
                }
+               ab->skb = NULL;
        }
        audit_buffer_free(ab);
 }
index 3cfc54ee3e1f054d6533bbe6370d272ba2748c25..9d6717412fecd75eb36029b1c65d84228602b723 100644 (file)
@@ -74,6 +74,11 @@ struct audit_entry {
        struct audit_krule      rule;
 };
 
+#ifdef CONFIG_AUDIT
+extern int audit_enabled;
+extern int audit_ever_enabled;
+#endif
+
 extern int audit_pid;
 
 #define AUDIT_INODE_BUCKETS    32
@@ -104,6 +109,9 @@ struct audit_netlink_list {
 int audit_send_list(void *);
 
 struct inotify_watch;
+/* Inotify handle */
+extern struct inotify_handle *audit_ih;
+
 extern void audit_free_parent(struct inotify_watch *);
 extern void audit_handle_ievent(struct inotify_watch *, u32, u32, u32,
                                const char *, struct inode *);
@@ -111,6 +119,7 @@ extern int selinux_audit_rule_update(void);
 
 extern struct mutex audit_filter_mutex;
 extern void audit_free_rule_rcu(struct rcu_head *);
+extern struct list_head audit_filter_list[];
 
 #ifdef CONFIG_AUDIT_TREE
 extern struct audit_chunk *audit_tree_lookup(const struct inode *);
@@ -137,6 +146,10 @@ extern void audit_put_tree(struct audit_tree *);
 
 extern char *audit_unpack_string(void **, size_t *, size_t);
 
+extern pid_t audit_sig_pid;
+extern uid_t audit_sig_uid;
+extern u32 audit_sig_sid;
+
 #ifdef CONFIG_AUDITSYSCALL
 extern int __audit_signal_info(int sig, struct task_struct *t);
 static inline int audit_signal_info(int sig, struct task_struct *t)
index 28fef6bf85348ea0e363a9ecfc1b362df817656e..0e0bd27e65129ec94e087753ba398c42f92acc7e 100644 (file)
@@ -89,14 +89,9 @@ struct list_head audit_filter_list[AUDIT_NR_FILTERS] = {
 
 DEFINE_MUTEX(audit_filter_mutex);
 
-/* Inotify handle */
-extern struct inotify_handle *audit_ih;
-
 /* Inotify events we care about. */
 #define AUDIT_IN_WATCH IN_MOVE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF
 
-extern int audit_enabled;
-
 void audit_free_parent(struct inotify_watch *i_watch)
 {
        struct audit_parent *parent;
@@ -272,7 +267,7 @@ static int audit_to_watch(struct audit_krule *krule, char *path, int len,
                return -EINVAL;
 
        watch = audit_init_watch(path);
-       if (unlikely(IS_ERR(watch)))
+       if (IS_ERR(watch))
                return PTR_ERR(watch);
 
        audit_get_watch(watch);
@@ -422,7 +417,7 @@ exit_err:
 static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
 {
        struct audit_entry *entry;
-       struct audit_field *f;
+       struct audit_field *ino_f;
        int err = 0;
        int i;
 
@@ -483,6 +478,10 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
                        if (f->val & ~15)
                                goto exit_free;
                        break;
+               case AUDIT_FILETYPE:
+                       if ((f->val & ~S_IFMT) > S_IFMT)
+                               goto exit_free;
+                       break;
                case AUDIT_INODE:
                        err = audit_to_inode(&entry->rule, f);
                        if (err)
@@ -504,9 +503,9 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
                }
        }
 
-       f = entry->rule.inode_f;
-       if (f) {
-               switch(f->op) {
+       ino_f = entry->rule.inode_f;
+       if (ino_f) {
+               switch(ino_f->op) {
                case AUDIT_NOT_EQUAL:
                        entry->rule.inode_f = NULL;
                case AUDIT_EQUAL:
@@ -531,7 +530,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
 {
        int err = 0;
        struct audit_entry *entry;
-       struct audit_field *f;
+       struct audit_field *ino_f;
        void *bufp;
        size_t remain = datasz - sizeof(struct audit_rule_data);
        int i;
@@ -654,14 +653,18 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                        if (f->val & ~15)
                                goto exit_free;
                        break;
+               case AUDIT_FILETYPE:
+                       if ((f->val & ~S_IFMT) > S_IFMT)
+                               goto exit_free;
+                       break;
                default:
                        goto exit_free;
                }
        }
 
-       f = entry->rule.inode_f;
-       if (f) {
-               switch(f->op) {
+       ino_f = entry->rule.inode_f;
+       if (ino_f) {
+               switch(ino_f->op) {
                case AUDIT_NOT_EQUAL:
                        entry->rule.inode_f = NULL;
                case AUDIT_EQUAL:
@@ -848,7 +851,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old)
                return ERR_PTR(-ENOMEM);
 
        new = audit_init_watch(path);
-       if (unlikely(IS_ERR(new))) {
+       if (IS_ERR(new)) {
                kfree(path);
                goto out;
        }
@@ -989,7 +992,7 @@ static void audit_update_watch(struct audit_parent *parent,
                        audit_set_auditable(current->audit_context);
 
                nwatch = audit_dupe_watch(owatch);
-               if (unlikely(IS_ERR(nwatch))) {
+               if (IS_ERR(nwatch)) {
                        mutex_unlock(&audit_filter_mutex);
                        audit_panic("error updating watch, skipping");
                        return;
@@ -1004,7 +1007,7 @@ static void audit_update_watch(struct audit_parent *parent,
                        list_del_rcu(&oentry->list);
 
                        nentry = audit_dupe_rule(&oentry->rule, nwatch);
-                       if (unlikely(IS_ERR(nentry)))
+                       if (IS_ERR(nentry))
                                audit_panic("error updating watch, removing");
                        else {
                                int h = audit_hash_ino((u32)ino);
@@ -1500,8 +1503,9 @@ static void audit_list_rules(int pid, int seq, struct sk_buff_head *q)
 }
 
 /* Log rule additions and removals */
-static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action,
-                                 struct audit_krule *rule, int res)
+static void audit_log_rule_change(uid_t loginuid, u32 sessionid, u32 sid,
+                                 char *action, struct audit_krule *rule,
+                                 int res)
 {
        struct audit_buffer *ab;
 
@@ -1511,7 +1515,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action,
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
        if (!ab)
                return;
-       audit_log_format(ab, "auid=%u", loginuid);
+       audit_log_format(ab, "auid=%u ses=%u", loginuid, sessionid);
        if (sid) {
                char *ctx = NULL;
                u32 len;
@@ -1543,7 +1547,7 @@ static void audit_log_rule_change(uid_t loginuid, u32 sid, char *action,
  * @sid: SE Linux Security ID of sender
  */
 int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
-                        size_t datasz, uid_t loginuid, u32 sid)
+                        size_t datasz, uid_t loginuid, u32 sessionid, u32 sid)
 {
        struct task_struct *tsk;
        struct audit_netlink_list *dest;
@@ -1590,7 +1594,8 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
 
                err = audit_add_rule(entry,
                                     &audit_filter_list[entry->rule.listnr]);
-               audit_log_rule_change(loginuid, sid, "add", &entry->rule, !err);
+               audit_log_rule_change(loginuid, sessionid, sid, "add",
+                                     &entry->rule, !err);
 
                if (err)
                        audit_free_rule(entry);
@@ -1606,8 +1611,8 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
 
                err = audit_del_rule(entry,
                                     &audit_filter_list[entry->rule.listnr]);
-               audit_log_rule_change(loginuid, sid, "remove", &entry->rule,
-                                     !err);
+               audit_log_rule_change(loginuid, sessionid, sid, "remove",
+                                     &entry->rule, !err);
 
                audit_free_rule(entry);
                break;
@@ -1785,7 +1790,7 @@ int audit_update_lsm_rules(void)
                        watch = entry->rule.watch;
                        tree = entry->rule.tree;
                        nentry = audit_dupe_rule(&entry->rule, watch);
-                       if (unlikely(IS_ERR(nentry))) {
+                       if (IS_ERR(nentry)) {
                                /* save the first error encountered for the
                                 * return value */
                                if (!err)
index 56e56ed594a840e9061aac32b7aedfab076d81e9..c10e7aae04d795714fb8cf4ee9326eff4bf6c472 100644 (file)
@@ -68,9 +68,6 @@
 
 #include "audit.h"
 
-extern struct list_head audit_filter_list[];
-extern int audit_ever_enabled;
-
 /* AUDIT_NAMES is the number of slots we reserve in the audit_context
  * for saving names from getname(). */
 #define AUDIT_NAMES    20
@@ -283,6 +280,19 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
        }
 }
 
+static int audit_match_filetype(struct audit_context *ctx, int which)
+{
+       unsigned index = which & ~S_IFMT;
+       mode_t mode = which & S_IFMT;
+       if (index >= ctx->name_count)
+               return 0;
+       if (ctx->names[index].ino == -1)
+               return 0;
+       if ((ctx->names[index].mode ^ mode) & S_IFMT)
+               return 0;
+       return 1;
+}
+
 /*
  * We keep a linked list of fixed-sized (31 pointer) arrays of audit_chunk *;
  * ->first_trees points to its beginning, ->trees - to the current end of data.
@@ -592,6 +602,9 @@ static int audit_filter_rules(struct task_struct *tsk,
                case AUDIT_PERM:
                        result = audit_match_perm(ctx, f->val);
                        break;
+               case AUDIT_FILETYPE:
+                       result = audit_match_filetype(ctx, f->val);
+                       break;
                }
 
                if (!result)
@@ -1095,7 +1108,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
                        audit_log_format(*ab, "[%d]", i);
                audit_log_format(*ab, "=");
                if (has_cntl)
-                       audit_log_hex(*ab, buf, to_send);
+                       audit_log_n_hex(*ab, buf, to_send);
                else
                        audit_log_format(*ab, "\"%s\"", buf);
                audit_log_format(*ab, "\n");
@@ -1296,7 +1309,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
                        break; }
 
                case AUDIT_SOCKETCALL: {
-                       int i;
                        struct audit_aux_data_socketcall *axs = (void *)aux;
                        audit_log_format(ab, "nargs=%d", axs->nargs);
                        for (i=0; i<axs->nargs; i++)
@@ -1307,7 +1319,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
                        struct audit_aux_data_sockaddr *axs = (void *)aux;
 
                        audit_log_format(ab, "saddr=");
-                       audit_log_hex(ab, axs->a, axs->len);
+                       audit_log_n_hex(ab, axs->a, axs->len);
                        break; }
 
                case AUDIT_FD_PAIR: {
@@ -1321,7 +1333,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
 
        for (aux = context->aux_pids; aux; aux = aux->next) {
                struct audit_aux_data_pids *axs = (void *)aux;
-               int i;
 
                for (i = 0; i < axs->pid_count; i++)
                        if (audit_log_pid_context(context, axs->target_pid[i],
@@ -1371,8 +1382,8 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
                        default:
                                /* log the name's directory component */
                                audit_log_format(ab, " name=");
-                               audit_log_n_untrustedstring(ab, n->name_len,
-                                                           n->name);
+                               audit_log_n_untrustedstring(ab, n->name,
+                                                           n->name_len);
                        }
                } else
                        audit_log_format(ab, " name=(null)");
@@ -1596,7 +1607,7 @@ static inline void handle_one(const struct inode *inode)
        if (likely(put_tree_ref(context, chunk)))
                return;
        if (unlikely(!grow_tree_refs(context))) {
-               printk(KERN_WARNING "out of memory, audit has lost a tree reference");
+               printk(KERN_WARNING "out of memory, audit has lost a tree reference\n");
                audit_set_auditable(context);
                audit_put_chunk(chunk);
                unroll_tree_refs(context, p, count);
@@ -1656,7 +1667,7 @@ retry:
                }
                /* too bad */
                printk(KERN_WARNING
-                       "out of memory, audit has lost a tree reference");
+                       "out of memory, audit has lost a tree reference\n");
                unroll_tree_refs(context, p, count);
                audit_set_auditable(context);
                return;
@@ -1752,13 +1763,13 @@ static int audit_inc_name_count(struct audit_context *context,
        if (context->name_count >= AUDIT_NAMES) {
                if (inode)
                        printk(KERN_DEBUG "name_count maxed, losing inode data: "
-                              "dev=%02x:%02x, inode=%lu",
+                              "dev=%02x:%02x, inode=%lu\n",
                               MAJOR(inode->i_sb->s_dev),
                               MINOR(inode->i_sb->s_dev),
                               inode->i_ino);
 
                else
-                       printk(KERN_DEBUG "name_count maxed, losing inode data");
+                       printk(KERN_DEBUG "name_count maxed, losing inode data\n");
                return 1;
        }
        context->name_count++;
@@ -2361,9 +2372,6 @@ int __audit_signal_info(int sig, struct task_struct *t)
        struct audit_aux_data_pids *axp;
        struct task_struct *tsk = current;
        struct audit_context *ctx = tsk->audit_context;
-       extern pid_t audit_sig_pid;
-       extern uid_t audit_sig_uid;
-       extern u32 audit_sig_sid;
 
        if (audit_pid && t->tgid == audit_pid) {
                if (sig == SIGTERM || sig == SIGHUP || sig == SIGUSR1) {
diff --git a/kernel/bounds.c b/kernel/bounds.c
new file mode 100644 (file)
index 0000000..3c53013
--- /dev/null
@@ -0,0 +1,19 @@
+/*
+ * Generate definitions needed by the preprocessor.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#define __GENERATING_BOUNDS_H
+/* Include headers that define the enum constants of interest */
+#include <linux/page-flags.h>
+#include <linux/mmzone.h>
+#include <linux/kbuild.h>
+
+void foo(void)
+{
+       /* The enum constants to put into include/linux/bounds.h */
+       DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
+       DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
+       /* End of constants */
+}
index 6d8de051382b0687ab1a449c245ebb2f05cd38f0..fbc6fc8949b4e718be405f500e977bb10841cf64 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/kmod.h>
 #include <linux/delayacct.h>
 #include <linux/cgroupstats.h>
+#include <linux/hash.h>
 
 #include <asm/atomic.h>
 
@@ -118,17 +119,7 @@ static int root_count;
  * be called.
  */
 static int need_forkexit_callback;
-
-/* bits in struct cgroup flags field */
-enum {
-       /* Control Group is dead */
-       CGRP_REMOVED,
-       /* Control Group has previously had a child cgroup or a task,
-        * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */
-       CGRP_RELEASABLE,
-       /* Control Group requires release notifications to userspace */
-       CGRP_NOTIFY_ON_RELEASE,
-};
+static int need_mm_owner_callback __read_mostly;
 
 /* convenient tests for these bits */
 inline int cgroup_is_removed(const struct cgroup *cgrp)
@@ -204,6 +195,27 @@ static struct cg_cgroup_link init_css_set_link;
 static DEFINE_RWLOCK(css_set_lock);
 static int css_set_count;
 
+/* hash table for cgroup groups. This improves the performance to
+ * find an existing css_set */
+#define CSS_SET_HASH_BITS      7
+#define CSS_SET_TABLE_SIZE     (1 << CSS_SET_HASH_BITS)
+static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE];
+
+static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
+{
+       int i;
+       int index;
+       unsigned long tmp = 0UL;
+
+       for (i = 0; i < CGROUP_SUBSYS_COUNT; i++)
+               tmp += (unsigned long)css[i];
+       tmp = (tmp >> 16) ^ tmp;
+
+       index = hash_long(tmp, CSS_SET_HASH_BITS);
+
+       return &css_set_table[index];
+}
+
 /* We don't maintain the lists running through each css_set to its
  * task until after the first call to cgroup_iter_start(). This
  * reduces the fork()/exit() overhead for people who have cgroups
@@ -230,7 +242,7 @@ static int use_task_css_set_links;
 static void unlink_css_set(struct css_set *cg)
 {
        write_lock(&css_set_lock);
-       list_del(&cg->list);
+       hlist_del(&cg->hlist);
        css_set_count--;
        while (!list_empty(&cg->cg_links)) {
                struct cg_cgroup_link *link;
@@ -295,9 +307,7 @@ static inline void put_css_set_taskexit(struct css_set *cg)
 /*
  * find_existing_css_set() is a helper for
  * find_css_set(), and checks to see whether an existing
- * css_set is suitable. This currently walks a linked-list for
- * simplicity; a later patch will use a hash table for better
- * performance
+ * css_set is suitable.
  *
  * oldcg: the cgroup group that we're using before the cgroup
  * transition
@@ -314,7 +324,9 @@ static struct css_set *find_existing_css_set(
 {
        int i;
        struct cgroupfs_root *root = cgrp->root;
-       struct list_head *l = &init_css_set.list;
+       struct hlist_head *hhead;
+       struct hlist_node *node;
+       struct css_set *cg;
 
        /* Built the set of subsystem state objects that we want to
         * see in the new css_set */
@@ -331,18 +343,13 @@ static struct css_set *find_existing_css_set(
                }
        }
 
-       /* Look through existing cgroup groups to find one to reuse */
-       do {
-               struct css_set *cg =
-                       list_entry(l, struct css_set, list);
-
+       hhead = css_set_hash(template);
+       hlist_for_each_entry(cg, node, hhead, hlist) {
                if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) {
                        /* All subsystems matched */
                        return cg;
                }
-               /* Try the next cgroup group */
-               l = l->next;
-       } while (l != &init_css_set.list);
+       }
 
        /* No existing cgroup group matched */
        return NULL;
@@ -404,6 +411,8 @@ static struct css_set *find_css_set(
        struct list_head tmp_cg_links;
        struct cg_cgroup_link *link;
 
+       struct hlist_head *hhead;
+
        /* First see if we already have a cgroup group that matches
         * the desired set */
        write_lock(&css_set_lock);
@@ -428,6 +437,7 @@ static struct css_set *find_css_set(
        kref_init(&res->ref);
        INIT_LIST_HEAD(&res->cg_links);
        INIT_LIST_HEAD(&res->tasks);
+       INIT_HLIST_NODE(&res->hlist);
 
        /* Copy the set of subsystem state objects generated in
         * find_existing_css_set() */
@@ -467,9 +477,12 @@ static struct css_set *find_css_set(
 
        BUG_ON(!list_empty(&tmp_cg_links));
 
-       /* Link this cgroup group into the list */
-       list_add(&res->list, &init_css_set.list);
        css_set_count++;
+
+       /* Add this cgroup group to the hash table */
+       hhead = css_set_hash(res->subsys);
+       hlist_add_head(&res->hlist, hhead);
+
        write_unlock(&css_set_lock);
 
        return res;
@@ -562,7 +575,7 @@ static struct inode_operations cgroup_dir_inode_operations;
 static struct file_operations proc_cgroupstats_operations;
 
 static struct backing_dev_info cgroup_backing_dev_info = {
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
 };
 
 static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb)
@@ -948,7 +961,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
        int ret = 0;
        struct super_block *sb;
        struct cgroupfs_root *root;
-       struct list_head tmp_cg_links, *l;
+       struct list_head tmp_cg_links;
        INIT_LIST_HEAD(&tmp_cg_links);
 
        /* First find the desired set of subsystems */
@@ -990,6 +1003,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
                /* New superblock */
                struct cgroup *cgrp = &root->top_cgroup;
                struct inode *inode;
+               int i;
 
                BUG_ON(sb->s_root != NULL);
 
@@ -1034,22 +1048,25 @@ static int cgroup_get_sb(struct file_system_type *fs_type,
                /* Link the top cgroup in this hierarchy into all
                 * the css_set objects */
                write_lock(&css_set_lock);
-               l = &init_css_set.list;
-               do {
+               for (i = 0; i < CSS_SET_TABLE_SIZE; i++) {
+                       struct hlist_head *hhead = &css_set_table[i];
+                       struct hlist_node *node;
                        struct css_set *cg;
-                       struct cg_cgroup_link *link;
-                       cg = list_entry(l, struct css_set, list);
-                       BUG_ON(list_empty(&tmp_cg_links));
-                       link = list_entry(tmp_cg_links.next,
-                                         struct cg_cgroup_link,
-                                         cgrp_link_list);
-                       list_del(&link->cgrp_link_list);
-                       link->cg = cg;
-                       list_add(&link->cgrp_link_list,
-                                &root->top_cgroup.css_sets);
-                       list_add(&link->cg_link_list, &cg->cg_links);
-                       l = l->next;
-               } while (l != &init_css_set.list);
+
+                       hlist_for_each_entry(cg, node, hhead, hlist) {
+                               struct cg_cgroup_link *link;
+
+                               BUG_ON(list_empty(&tmp_cg_links));
+                               link = list_entry(tmp_cg_links.next,
+                                                 struct cg_cgroup_link,
+                                                 cgrp_link_list);
+                               list_del(&link->cgrp_link_list);
+                               link->cg = cg;
+                               list_add(&link->cgrp_link_list,
+                                        &root->top_cgroup.css_sets);
+                               list_add(&link->cg_link_list, &cg->cg_links);
+                       }
+               }
                write_unlock(&css_set_lock);
 
                free_cg_links(&tmp_cg_links);
@@ -1307,18 +1324,16 @@ enum cgroup_filetype {
        FILE_DIR,
        FILE_TASKLIST,
        FILE_NOTIFY_ON_RELEASE,
-       FILE_RELEASABLE,
        FILE_RELEASE_AGENT,
 };
 
-static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft,
-                                struct file *file,
-                                const char __user *userbuf,
-                                size_t nbytes, loff_t *unused_ppos)
+static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
+                               struct file *file,
+                               const char __user *userbuf,
+                               size_t nbytes, loff_t *unused_ppos)
 {
        char buffer[64];
        int retval = 0;
-       u64 val;
        char *end;
 
        if (!nbytes)
@@ -1329,16 +1344,18 @@ static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft,
                return -EFAULT;
 
        buffer[nbytes] = 0;     /* nul-terminate */
-
-       /* strip newline if necessary */
-       if (nbytes && (buffer[nbytes-1] == '\n'))
-               buffer[nbytes-1] = 0;
-       val = simple_strtoull(buffer, &end, 0);
-       if (*end)
-               return -EINVAL;
-
-       /* Pass to subsystem */
-       retval = cft->write_uint(cgrp, cft, val);
+       strstrip(buffer);
+       if (cft->write_u64) {
+               u64 val = simple_strtoull(buffer, &end, 0);
+               if (*end)
+                       return -EINVAL;
+               retval = cft->write_u64(cgrp, cft, val);
+       } else {
+               s64 val = simple_strtoll(buffer, &end, 0);
+               if (*end)
+                       return -EINVAL;
+               retval = cft->write_s64(cgrp, cft, val);
+       }
        if (!retval)
                retval = nbytes;
        return retval;
@@ -1419,23 +1436,39 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
                return -ENODEV;
        if (cft->write)
                return cft->write(cgrp, cft, file, buf, nbytes, ppos);
-       if (cft->write_uint)
-               return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos);
+       if (cft->write_u64 || cft->write_s64)
+               return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
+       if (cft->trigger) {
+               int ret = cft->trigger(cgrp, (unsigned int)cft->private);
+               return ret ? ret : nbytes;
+       }
        return -EINVAL;
 }
 
-static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft,
-                                  struct file *file,
-                                  char __user *buf, size_t nbytes,
-                                  loff_t *ppos)
+static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
+                              struct file *file,
+                              char __user *buf, size_t nbytes,
+                              loff_t *ppos)
 {
        char tmp[64];
-       u64 val = cft->read_uint(cgrp, cft);
+       u64 val = cft->read_u64(cgrp, cft);
        int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
 
        return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
 }
 
+static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
+                              struct file *file,
+                              char __user *buf, size_t nbytes,
+                              loff_t *ppos)
+{
+       char tmp[64];
+       s64 val = cft->read_s64(cgrp, cft);
+       int len = sprintf(tmp, "%lld\n", (long long) val);
+
+       return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+}
+
 static ssize_t cgroup_common_file_read(struct cgroup *cgrp,
                                          struct cftype *cft,
                                          struct file *file,
@@ -1490,11 +1523,56 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf,
 
        if (cft->read)
                return cft->read(cgrp, cft, file, buf, nbytes, ppos);
-       if (cft->read_uint)
-               return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos);
+       if (cft->read_u64)
+               return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos);
+       if (cft->read_s64)
+               return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos);
        return -EINVAL;
 }
 
+/*
+ * seqfile ops/methods for returning structured data. Currently just
+ * supports string->u64 maps, but can be extended in future.
+ */
+
+struct cgroup_seqfile_state {
+       struct cftype *cft;
+       struct cgroup *cgroup;
+};
+
+static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value)
+{
+       struct seq_file *sf = cb->state;
+       return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value);
+}
+
+static int cgroup_seqfile_show(struct seq_file *m, void *arg)
+{
+       struct cgroup_seqfile_state *state = m->private;
+       struct cftype *cft = state->cft;
+       if (cft->read_map) {
+               struct cgroup_map_cb cb = {
+                       .fill = cgroup_map_add,
+                       .state = m,
+               };
+               return cft->read_map(state->cgroup, cft, &cb);
+       }
+       return cft->read_seq_string(state->cgroup, cft, m);
+}
+
+int cgroup_seqfile_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       kfree(seq->private);
+       return single_release(inode, file);
+}
+
+static struct file_operations cgroup_seqfile_operations = {
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = cgroup_seqfile_release,
+};
+
 static int cgroup_file_open(struct inode *inode, struct file *file)
 {
        int err;
@@ -1507,7 +1585,18 @@ static int cgroup_file_open(struct inode *inode, struct file *file)
        cft = __d_cft(file->f_dentry);
        if (!cft)
                return -ENODEV;
-       if (cft->open)
+       if (cft->read_map || cft->read_seq_string) {
+               struct cgroup_seqfile_state *state =
+                       kzalloc(sizeof(*state), GFP_USER);
+               if (!state)
+                       return -ENOMEM;
+               state->cft = cft;
+               state->cgroup = __d_cgrp(file->f_dentry->d_parent);
+               file->f_op = &cgroup_seqfile_operations;
+               err = single_open(file, cgroup_seqfile_show, state);
+               if (err < 0)
+                       kfree(state);
+       } else if (cft->open)
                err = cft->open(inode, file);
        else
                err = 0;
@@ -1715,7 +1804,7 @@ static void cgroup_advance_iter(struct cgroup *cgrp,
  * The tasklist_lock is not held here, as do_each_thread() and
  * while_each_thread() are protected by RCU.
  */
-void cgroup_enable_task_cg_lists(void)
+static void cgroup_enable_task_cg_lists(void)
 {
        struct task_struct *p, *g;
        write_lock(&css_set_lock);
@@ -1913,14 +2002,14 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
 
        if (heap->size) {
                for (i = 0; i < heap->size; i++) {
-                       struct task_struct *p = heap->ptrs[i];
+                       struct task_struct *q = heap->ptrs[i];
                        if (i == 0) {
-                               latest_time = p->start_time;
-                               latest_task = p;
+                               latest_time = q->start_time;
+                               latest_task = q;
                        }
                        /* Process the task per the caller's callback */
-                       scan->process_task(p, scan);
-                       put_task_struct(p);
+                       scan->process_task(q, scan);
+                       put_task_struct(q);
                }
                /*
                 * If we had to process any tasks at all, scan again
@@ -2138,11 +2227,6 @@ static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
        return notify_on_release(cgrp);
 }
 
-static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft)
-{
-       return test_bit(CGRP_RELEASABLE, &cgrp->flags);
-}
-
 /*
  * for the common functions, 'private' gives the type of file
  */
@@ -2158,16 +2242,10 @@ static struct cftype files[] = {
 
        {
                .name = "notify_on_release",
-               .read_uint = cgroup_read_notify_on_release,
+               .read_u64 = cgroup_read_notify_on_release,
                .write = cgroup_common_file_write,
                .private = FILE_NOTIFY_ON_RELEASE,
        },
-
-       {
-               .name = "releasable",
-               .read_uint = cgroup_read_releasable,
-               .private = FILE_RELEASABLE,
-       }
 };
 
 static struct cftype cft_release_agent = {
@@ -2401,10 +2479,9 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
        return 0;
 }
 
-static void cgroup_init_subsys(struct cgroup_subsys *ss)
+static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
 {
        struct cgroup_subsys_state *css;
-       struct list_head *l;
 
        printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
 
@@ -2415,34 +2492,19 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss)
        BUG_ON(IS_ERR(css));
        init_cgroup_css(css, ss, dummytop);
 
-       /* Update all cgroup groups to contain a subsys
+       /* Update the init_css_set to contain a subsys
         * pointer to this state - since the subsystem is
-        * newly registered, all tasks and hence all cgroup
-        * groups are in the subsystem's top cgroup. */
-       write_lock(&css_set_lock);
-       l = &init_css_set.list;
-       do {
-               struct css_set *cg =
-                       list_entry(l, struct css_set, list);
-               cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
-               l = l->next;
-       } while (l != &init_css_set.list);
-       write_unlock(&css_set_lock);
-
-       /* If this subsystem requested that it be notified with fork
-        * events, we should send it one now for every process in the
-        * system */
-       if (ss->fork) {
-               struct task_struct *g, *p;
-
-               read_lock(&tasklist_lock);
-               do_each_thread(g, p) {
-                       ss->fork(ss, p);
-               } while_each_thread(g, p);
-               read_unlock(&tasklist_lock);
-       }
+        * newly registered, all tasks and hence the
+        * init_css_set is in the subsystem's top cgroup. */
+       init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id];
 
        need_forkexit_callback |= ss->fork || ss->exit;
+       need_mm_owner_callback |= !!ss->mm_owner_changed;
+
+       /* At system boot, before all subsystems have been
+        * registered, no tasks have been forked, so we don't
+        * need to invoke fork callbacks here. */
+       BUG_ON(!list_empty(&init_task.tasks));
 
        ss->active = 1;
 }
@@ -2458,9 +2520,9 @@ int __init cgroup_init_early(void)
        int i;
        kref_init(&init_css_set.ref);
        kref_get(&init_css_set.ref);
-       INIT_LIST_HEAD(&init_css_set.list);
        INIT_LIST_HEAD(&init_css_set.cg_links);
        INIT_LIST_HEAD(&init_css_set.tasks);
+       INIT_HLIST_NODE(&init_css_set.hlist);
        css_set_count = 1;
        init_cgroup_root(&rootnode);
        list_add(&rootnode.root_list, &roots);
@@ -2473,6 +2535,9 @@ int __init cgroup_init_early(void)
        list_add(&init_css_set_link.cg_link_list,
                 &init_css_set.cg_links);
 
+       for (i = 0; i < CSS_SET_TABLE_SIZE; i++)
+               INIT_HLIST_HEAD(&css_set_table[i]);
+
        for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
                struct cgroup_subsys *ss = subsys[i];
 
@@ -2502,7 +2567,7 @@ int __init cgroup_init(void)
 {
        int err;
        int i;
-       struct proc_dir_entry *entry;
+       struct hlist_head *hhead;
 
        err = bdi_init(&cgroup_backing_dev_info);
        if (err)
@@ -2514,13 +2579,15 @@ int __init cgroup_init(void)
                        cgroup_init_subsys(ss);
        }
 
+       /* Add init_css_set to the hash table */
+       hhead = css_set_hash(init_css_set.subsys);
+       hlist_add_head(&init_css_set.hlist, hhead);
+
        err = register_filesystem(&cgroup_fs_type);
        if (err < 0)
                goto out;
 
-       entry = create_proc_entry("cgroups", 0, NULL);
-       if (entry)
-               entry->proc_fops = &proc_cgroupstats_operations;
+       proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
 
 out:
        if (err)
@@ -2683,6 +2750,34 @@ void cgroup_fork_callbacks(struct task_struct *child)
        }
 }
 
+#ifdef CONFIG_MM_OWNER
+/**
+ * cgroup_mm_owner_callbacks - run callbacks when the mm->owner changes
+ * @p: the new owner
+ *
+ * Called on every change to mm->owner. mm_init_owner() does not
+ * invoke this routine, since it assigns the mm->owner the first time
+ * and does not change it.
+ */
+void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
+{
+       struct cgroup *oldcgrp, *newcgrp;
+
+       if (need_mm_owner_callback) {
+               int i;
+               for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
+                       struct cgroup_subsys *ss = subsys[i];
+                       oldcgrp = task_cgroup(old, ss->subsys_id);
+                       newcgrp = task_cgroup(new, ss->subsys_id);
+                       if (oldcgrp == newcgrp)
+                               continue;
+                       if (ss->mm_owner_changed)
+                               ss->mm_owner_changed(ss, oldcgrp, newcgrp);
+               }
+       }
+}
+#endif /* CONFIG_MM_OWNER */
+
 /**
  * cgroup_post_fork - called on a new task after adding it to the task list
  * @child: the task in question
index 37301e877cb03304ea06983c2766eef696b73892..c3dc3aba4c02a5b243b25b1ed7be971c5ec1ef17 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * kernel/ccontainer_debug.c - Example cgroup subsystem that
+ * kernel/cgroup_debug.c - Example cgroup subsystem that
  * exposes debug info
  *
  * Copyright (C) Google Inc, 2007
@@ -62,25 +62,35 @@ static u64 current_css_set_refcount_read(struct cgroup *cont,
        return count;
 }
 
+static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft)
+{
+       return test_bit(CGRP_RELEASABLE, &cgrp->flags);
+}
+
 static struct cftype files[] =  {
        {
                .name = "cgroup_refcount",
-               .read_uint = cgroup_refcount_read,
+               .read_u64 = cgroup_refcount_read,
        },
        {
                .name = "taskcount",
-               .read_uint = taskcount_read,
+               .read_u64 = taskcount_read,
        },
 
        {
                .name = "current_css_set",
-               .read_uint = current_css_set_read,
+               .read_u64 = current_css_set_read,
        },
 
        {
                .name = "current_css_set_refcount",
-               .read_uint = current_css_set_refcount_read,
+               .read_u64 = current_css_set_refcount_read,
        },
+
+       {
+               .name = "releasable",
+               .read_u64 = releasable_read,
+       }
 };
 
 static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
index e1ef04870c2a12122fddfd3a712ed6e6d2a441ea..32c254a8ab9af07ae3ac102972063956592e0b3b 100644 (file)
@@ -898,7 +898,7 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat
 
        current->state = TASK_INTERRUPTIBLE;
        schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
+       set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
 #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */
@@ -955,7 +955,8 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
                        __put_user(txc.jitcnt, &utp->jitcnt) ||
                        __put_user(txc.calcnt, &utp->calcnt) ||
                        __put_user(txc.errcnt, &utp->errcnt) ||
-                       __put_user(txc.stbcnt, &utp->stbcnt))
+                       __put_user(txc.stbcnt, &utp->stbcnt) ||
+                       __put_user(txc.tai, &utp->tai))
                ret = -EFAULT;
 
        return ret;
@@ -1080,4 +1081,3 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info)
 
        return 0;
 }
-
index e84d3f9c6c7ba87218df59577462fc2523c0b6f9..4c345210ed8cca3ffb1c64a9e4aef92cf97eedf8 100644 (file)
@@ -79,12 +79,11 @@ static int __init ikconfig_init(void)
        struct proc_dir_entry *entry;
 
        /* create the current config file */
-       entry = create_proc_entry("config.gz", S_IFREG | S_IRUGO,
-                                 &proc_root);
+       entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
+                           &ikconfig_file_ops);
        if (!entry)
                return -ENOMEM;
 
-       entry->proc_fops = &ikconfig_file_ops;
        entry->size = kernel_config_data_size;
 
        return 0;
@@ -95,7 +94,7 @@ static int __init ikconfig_init(void)
 
 static void __exit ikconfig_cleanup(void)
 {
-       remove_proc_entry("config.gz", &proc_root);
+       remove_proc_entry("config.gz", NULL);
 }
 
 module_init(ikconfig_init);
index 2011ad8d26973fae39a373aa4121ea2fd3f0b8d1..c77bc3a1c7226b504be0a50f05136662dd464a73 100644 (file)
@@ -33,17 +33,13 @@ static struct {
         * an ongoing cpu hotplug operation.
         */
        int refcount;
-       wait_queue_head_t writer_queue;
 } cpu_hotplug;
 
-#define writer_exists() (cpu_hotplug.active_writer != NULL)
-
 void __init cpu_hotplug_init(void)
 {
        cpu_hotplug.active_writer = NULL;
        mutex_init(&cpu_hotplug.lock);
        cpu_hotplug.refcount = 0;
-       init_waitqueue_head(&cpu_hotplug.writer_queue);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
@@ -65,11 +61,8 @@ void put_online_cpus(void)
        if (cpu_hotplug.active_writer == current)
                return;
        mutex_lock(&cpu_hotplug.lock);
-       cpu_hotplug.refcount--;
-
-       if (unlikely(writer_exists()) && !cpu_hotplug.refcount)
-               wake_up(&cpu_hotplug.writer_queue);
-
+       if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
+               wake_up_process(cpu_hotplug.active_writer);
        mutex_unlock(&cpu_hotplug.lock);
 
 }
@@ -98,8 +91,8 @@ void cpu_maps_update_done(void)
  * Note that during a cpu-hotplug operation, the new readers, if any,
  * will be blocked by the cpu_hotplug.lock
  *
- * Since cpu_maps_update_begin is always called after invoking
- * cpu_maps_update_begin, we can be sure that only one writer is active.
+ * Since cpu_hotplug_begin() is always called after invoking
+ * cpu_maps_update_begin(), we can be sure that only one writer is active.
  *
  * Note that theoretically, there is a possibility of a livelock:
  * - Refcount goes to zero, last reader wakes up the sleeping
@@ -115,19 +108,16 @@ void cpu_maps_update_done(void)
  */
 static void cpu_hotplug_begin(void)
 {
-       DECLARE_WAITQUEUE(wait, current);
-
-       mutex_lock(&cpu_hotplug.lock);
-
        cpu_hotplug.active_writer = current;
-       add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait);
-       while (cpu_hotplug.refcount) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
+
+       for (;;) {
+               mutex_lock(&cpu_hotplug.lock);
+               if (likely(!cpu_hotplug.refcount))
+                       break;
+               __set_current_state(TASK_UNINTERRUPTIBLE);
                mutex_unlock(&cpu_hotplug.lock);
                schedule();
-               mutex_lock(&cpu_hotplug.lock);
        }
-       remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait);
 }
 
 static void cpu_hotplug_done(void)
@@ -136,7 +126,7 @@ static void cpu_hotplug_done(void)
        mutex_unlock(&cpu_hotplug.lock);
 }
 /* Need to know about CPUs going up/down? */
-int __cpuinit register_cpu_notifier(struct notifier_block *nb)
+int __ref register_cpu_notifier(struct notifier_block *nb)
 {
        int ret;
        cpu_maps_update_begin();
@@ -149,7 +139,7 @@ int __cpuinit register_cpu_notifier(struct notifier_block *nb)
 
 EXPORT_SYMBOL(register_cpu_notifier);
 
-void unregister_cpu_notifier(struct notifier_block *nb)
+void __ref unregister_cpu_notifier(struct notifier_block *nb)
 {
        cpu_maps_update_begin();
        raw_notifier_chain_unregister(&cpu_chain, nb);
@@ -180,7 +170,7 @@ struct take_cpu_down_param {
 };
 
 /* Take this CPU down. */
-static int take_cpu_down(void *_param)
+static int __ref take_cpu_down(void *_param)
 {
        struct take_cpu_down_param *param = _param;
        int err;
@@ -199,7 +189,7 @@ static int take_cpu_down(void *_param)
 }
 
 /* Requires cpu_add_remove_lock to be held */
-static int _cpu_down(unsigned int cpu, int tasks_frozen)
+static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
        int err, nr_calls = 0;
        struct task_struct *p;
@@ -225,7 +215,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
                __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
                                          hcpu, nr_calls, NULL);
                printk("%s: attempt to take down CPU %u failed\n",
-                               __FUNCTION__, cpu);
+                               __func__, cpu);
                err = -EINVAL;
                goto out_release;
        }
@@ -274,7 +264,7 @@ out_release:
        return err;
 }
 
-int cpu_down(unsigned int cpu)
+int __ref cpu_down(unsigned int cpu)
 {
        int err = 0;
 
@@ -305,7 +295,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
        if (ret == NOTIFY_BAD) {
                nr_calls--;
                printk("%s: attempt to bring up CPU %u failed\n",
-                               __FUNCTION__, cpu);
+                               __func__, cpu);
                ret = -EINVAL;
                goto out_notify;
        }
index 8b35fbd8292f2d5b53b613eab630c67af87ec6f8..8da627d33804db7c9e30936fead573cedd4407e3 100644 (file)
@@ -127,6 +127,7 @@ struct cpuset_hotplug_scanner {
 typedef enum {
        CS_CPU_EXCLUSIVE,
        CS_MEM_EXCLUSIVE,
+       CS_MEM_HARDWALL,
        CS_MEMORY_MIGRATE,
        CS_SCHED_LOAD_BALANCE,
        CS_SPREAD_PAGE,
@@ -144,6 +145,11 @@ static inline int is_mem_exclusive(const struct cpuset *cs)
        return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
 }
 
+static inline int is_mem_hardwall(const struct cpuset *cs)
+{
+       return test_bit(CS_MEM_HARDWALL, &cs->flags);
+}
+
 static inline int is_sched_load_balance(const struct cpuset *cs)
 {
        return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
@@ -735,7 +741,8 @@ static inline int started_after(void *p1, void *p2)
  * Return nonzero if this tasks's cpus_allowed mask should be changed (in other
  * words, if its mask is not equal to its cpuset's mask).
  */
-int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+static int cpuset_test_cpumask(struct task_struct *tsk,
+                              struct cgroup_scanner *scan)
 {
        return !cpus_equal(tsk->cpus_allowed,
                        (cgroup_cs(scan->cg))->cpus_allowed);
@@ -752,7 +759,8 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
  * We don't need to re-check for the cgroup/cpuset membership, since we're
  * holding cgroup_lock() at this point.
  */
-void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan)
+static void cpuset_change_cpumask(struct task_struct *tsk,
+                                 struct cgroup_scanner *scan)
 {
        set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed));
 }
@@ -941,7 +949,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
        cs->mems_generation = cpuset_mems_generation++;
        mutex_unlock(&callback_mutex);
 
-       cpuset_being_rebound = cs;              /* causes mpol_copy() rebind */
+       cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
 
        fudge = 10;                             /* spare mmarray[] slots */
        fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */
@@ -992,7 +1000,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
         * rebind the vma mempolicies of each mm in mmarray[] to their
         * new cpuset, and release that mm.  The mpol_rebind_mm()
         * call takes mmap_sem, which we couldn't take while holding
-        * tasklist_lock.  Forks can happen again now - the mpol_copy()
+        * tasklist_lock.  Forks can happen again now - the mpol_dup()
         * cpuset_being_rebound check will catch such forks, and rebind
         * their vma mempolicies too.  Because we still hold the global
         * cgroup_mutex, we know that no other rebind effort will
@@ -1023,19 +1031,6 @@ int current_cpuset_is_being_rebound(void)
        return task_cs(current) == cpuset_being_rebound;
 }
 
-/*
- * Call with cgroup_mutex held.
- */
-
-static int update_memory_pressure_enabled(struct cpuset *cs, char *buf)
-{
-       if (simple_strtoul(buf, NULL, 10) != 0)
-               cpuset_memory_pressure_enabled = 1;
-       else
-               cpuset_memory_pressure_enabled = 0;
-       return 0;
-}
-
 static int update_relax_domain_level(struct cpuset *cs, char *buf)
 {
        int val = simple_strtol(buf, NULL, 10);
@@ -1053,25 +1048,20 @@ static int update_relax_domain_level(struct cpuset *cs, char *buf)
 
 /*
  * update_flag - read a 0 or a 1 in a file and update associated flag
- * bit:        the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE,
- *                             CS_SCHED_LOAD_BALANCE,
- *                             CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE,
- *                             CS_SPREAD_PAGE, CS_SPREAD_SLAB)
- * cs: the cpuset to update
- * buf:        the buffer where we read the 0 or 1
+ * bit:                the bit to update (see cpuset_flagbits_t)
+ * cs:         the cpuset to update
+ * turning_on:         whether the flag is being set or cleared
  *
  * Call with cgroup_mutex held.
  */
 
-static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf)
+static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
+                      int turning_on)
 {
-       int turning_on;
        struct cpuset trialcs;
        int err;
        int cpus_nonempty, balance_flag_changed;
 
-       turning_on = (simple_strtoul(buf, NULL, 10) != 0);
-
        trialcs = *cs;
        if (turning_on)
                set_bit(bit, &trialcs.flags);
@@ -1241,6 +1231,7 @@ typedef enum {
        FILE_MEMLIST,
        FILE_CPU_EXCLUSIVE,
        FILE_MEM_EXCLUSIVE,
+       FILE_MEM_HARDWALL,
        FILE_SCHED_LOAD_BALANCE,
        FILE_SCHED_RELAX_DOMAIN_LEVEL,
        FILE_MEMORY_PRESSURE_ENABLED,
@@ -1265,7 +1256,8 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
                return -E2BIG;
 
        /* +1 for nul-terminator */
-       if ((buffer = kmalloc(nbytes + 1, GFP_KERNEL)) == 0)
+       buffer = kmalloc(nbytes + 1, GFP_KERNEL);
+       if (!buffer)
                return -ENOMEM;
 
        if (copy_from_user(buffer, userbuf, nbytes)) {
@@ -1288,46 +1280,71 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont,
        case FILE_MEMLIST:
                retval = update_nodemask(cs, buffer);
                break;
+       case FILE_SCHED_RELAX_DOMAIN_LEVEL:
+               retval = update_relax_domain_level(cs, buffer);
+               break;
+       default:
+               retval = -EINVAL;
+               goto out2;
+       }
+
+       if (retval == 0)
+               retval = nbytes;
+out2:
+       cgroup_unlock();
+out1:
+       kfree(buffer);
+       return retval;
+}
+
+static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
+{
+       int retval = 0;
+       struct cpuset *cs = cgroup_cs(cgrp);
+       cpuset_filetype_t type = cft->private;
+
+       cgroup_lock();
+
+       if (cgroup_is_removed(cgrp)) {
+               cgroup_unlock();
+               return -ENODEV;
+       }
+
+       switch (type) {
        case FILE_CPU_EXCLUSIVE:
-               retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer);
+               retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
                break;
        case FILE_MEM_EXCLUSIVE:
-               retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer);
+               retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
                break;
-       case FILE_SCHED_LOAD_BALANCE:
-               retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer);
+       case FILE_MEM_HARDWALL:
+               retval = update_flag(CS_MEM_HARDWALL, cs, val);
                break;
-       case FILE_SCHED_RELAX_DOMAIN_LEVEL:
-               retval = update_relax_domain_level(cs, buffer);
+       case FILE_SCHED_LOAD_BALANCE:
+               retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
                break;
        case FILE_MEMORY_MIGRATE:
-               retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer);
+               retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
                break;
        case FILE_MEMORY_PRESSURE_ENABLED:
-               retval = update_memory_pressure_enabled(cs, buffer);
+               cpuset_memory_pressure_enabled = !!val;
                break;
        case FILE_MEMORY_PRESSURE:
                retval = -EACCES;
                break;
        case FILE_SPREAD_PAGE:
-               retval = update_flag(CS_SPREAD_PAGE, cs, buffer);
+               retval = update_flag(CS_SPREAD_PAGE, cs, val);
                cs->mems_generation = cpuset_mems_generation++;
                break;
        case FILE_SPREAD_SLAB:
-               retval = update_flag(CS_SPREAD_SLAB, cs, buffer);
+               retval = update_flag(CS_SPREAD_SLAB, cs, val);
                cs->mems_generation = cpuset_mems_generation++;
                break;
        default:
                retval = -EINVAL;
-               goto out2;
+               break;
        }
-
-       if (retval == 0)
-               retval = nbytes;
-out2:
        cgroup_unlock();
-out1:
-       kfree(buffer);
        return retval;
 }
 
@@ -1389,33 +1406,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont,
        case FILE_MEMLIST:
                s += cpuset_sprintf_memlist(s, cs);
                break;
-       case FILE_CPU_EXCLUSIVE:
-               *s++ = is_cpu_exclusive(cs) ? '1' : '0';
-               break;
-       case FILE_MEM_EXCLUSIVE:
-               *s++ = is_mem_exclusive(cs) ? '1' : '0';
-               break;
-       case FILE_SCHED_LOAD_BALANCE:
-               *s++ = is_sched_load_balance(cs) ? '1' : '0';
-               break;
        case FILE_SCHED_RELAX_DOMAIN_LEVEL:
                s += sprintf(s, "%d", cs->relax_domain_level);
                break;
-       case FILE_MEMORY_MIGRATE:
-               *s++ = is_memory_migrate(cs) ? '1' : '0';
-               break;
-       case FILE_MEMORY_PRESSURE_ENABLED:
-               *s++ = cpuset_memory_pressure_enabled ? '1' : '0';
-               break;
-       case FILE_MEMORY_PRESSURE:
-               s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter));
-               break;
-       case FILE_SPREAD_PAGE:
-               *s++ = is_spread_page(cs) ? '1' : '0';
-               break;
-       case FILE_SPREAD_SLAB:
-               *s++ = is_spread_slab(cs) ? '1' : '0';
-               break;
        default:
                retval = -EINVAL;
                goto out;
@@ -1428,121 +1421,137 @@ out:
        return retval;
 }
 
-
-
+static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft)
+{
+       struct cpuset *cs = cgroup_cs(cont);
+       cpuset_filetype_t type = cft->private;
+       switch (type) {
+       case FILE_CPU_EXCLUSIVE:
+               return is_cpu_exclusive(cs);
+       case FILE_MEM_EXCLUSIVE:
+               return is_mem_exclusive(cs);
+       case FILE_MEM_HARDWALL:
+               return is_mem_hardwall(cs);
+       case FILE_SCHED_LOAD_BALANCE:
+               return is_sched_load_balance(cs);
+       case FILE_MEMORY_MIGRATE:
+               return is_memory_migrate(cs);
+       case FILE_MEMORY_PRESSURE_ENABLED:
+               return cpuset_memory_pressure_enabled;
+       case FILE_MEMORY_PRESSURE:
+               return fmeter_getrate(&cs->fmeter);
+       case FILE_SPREAD_PAGE:
+               return is_spread_page(cs);
+       case FILE_SPREAD_SLAB:
+               return is_spread_slab(cs);
+       default:
+               BUG();
+       }
+}
 
 
 /*
  * for the common functions, 'private' gives the type of file
  */
 
-static struct cftype cft_cpus = {
-       .name = "cpus",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_CPULIST,
-};
-
-static struct cftype cft_mems = {
-       .name = "mems",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_MEMLIST,
-};
-
-static struct cftype cft_cpu_exclusive = {
-       .name = "cpu_exclusive",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_CPU_EXCLUSIVE,
-};
-
-static struct cftype cft_mem_exclusive = {
-       .name = "mem_exclusive",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_MEM_EXCLUSIVE,
-};
-
-static struct cftype cft_sched_load_balance = {
-       .name = "sched_load_balance",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_SCHED_LOAD_BALANCE,
-};
-
-static struct cftype cft_sched_relax_domain_level = {
-       .name = "sched_relax_domain_level",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
-};
-
-static struct cftype cft_memory_migrate = {
-       .name = "memory_migrate",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_MEMORY_MIGRATE,
+static struct cftype files[] = {
+       {
+               .name = "cpus",
+               .read = cpuset_common_file_read,
+               .write = cpuset_common_file_write,
+               .private = FILE_CPULIST,
+       },
+
+       {
+               .name = "mems",
+               .read = cpuset_common_file_read,
+               .write = cpuset_common_file_write,
+               .private = FILE_MEMLIST,
+       },
+
+       {
+               .name = "cpu_exclusive",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_CPU_EXCLUSIVE,
+       },
+
+       {
+               .name = "mem_exclusive",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_MEM_EXCLUSIVE,
+       },
+
+       {
+               .name = "mem_hardwall",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_MEM_HARDWALL,
+       },
+
+       {
+               .name = "sched_load_balance",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_SCHED_LOAD_BALANCE,
+       },
+
+       {
+               .name = "sched_relax_domain_level",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
+       },
+
+       {
+               .name = "memory_migrate",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_MEMORY_MIGRATE,
+       },
+
+       {
+               .name = "memory_pressure",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_MEMORY_PRESSURE,
+       },
+
+       {
+               .name = "memory_spread_page",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_SPREAD_PAGE,
+       },
+
+       {
+               .name = "memory_spread_slab",
+               .read_u64 = cpuset_read_u64,
+               .write_u64 = cpuset_write_u64,
+               .private = FILE_SPREAD_SLAB,
+       },
 };
 
 static struct cftype cft_memory_pressure_enabled = {
        .name = "memory_pressure_enabled",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
+       .read_u64 = cpuset_read_u64,
+       .write_u64 = cpuset_write_u64,
        .private = FILE_MEMORY_PRESSURE_ENABLED,
 };
 
-static struct cftype cft_memory_pressure = {
-       .name = "memory_pressure",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_MEMORY_PRESSURE,
-};
-
-static struct cftype cft_spread_page = {
-       .name = "memory_spread_page",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_SPREAD_PAGE,
-};
-
-static struct cftype cft_spread_slab = {
-       .name = "memory_spread_slab",
-       .read = cpuset_common_file_read,
-       .write = cpuset_common_file_write,
-       .private = FILE_SPREAD_SLAB,
-};
-
 static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont)
 {
        int err;
 
-       if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss,
-                                       &cft_sched_relax_domain_level)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0)
-               return err;
-       if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0)
+       err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files));
+       if (err)
                return err;
        /* memory_pressure_enabled is in root cpuset only */
-       if (err == 0 && !cont->parent)
+       if (!cont->parent)
                err = cgroup_add_file(cont, ss,
-                                        &cft_memory_pressure_enabled);
-       return 0;
+                                     &cft_memory_pressure_enabled);
+       return err;
 }
 
 /*
@@ -1642,7 +1651,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont)
        cpuset_update_task_memory_state();
 
        if (is_sched_load_balance(cs))
-               update_flag(CS_SCHED_LOAD_BALANCE, cs, "0");
+               update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
 
        number_of_cpusets--;
        kfree(cs);
@@ -1707,7 +1716,8 @@ int __init cpuset_init(void)
  * Called by cgroup_scan_tasks() for each task in a cgroup.
  * Return nonzero to stop the walk through the tasks.
  */
-void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan)
+static void cpuset_do_move_task(struct task_struct *tsk,
+                               struct cgroup_scanner *scan)
 {
        struct cpuset_hotplug_scanner *chsp;
 
@@ -1958,33 +1968,25 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
 }
 
 /**
- * cpuset_zonelist_valid_mems_allowed - check zonelist vs. curremt mems_allowed
- * @zl: the zonelist to be checked
+ * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
+ * @nodemask: the nodemask to be checked
  *
- * Are any of the nodes on zonelist zl allowed in current->mems_allowed?
+ * Are any of the nodes in the nodemask allowed in current->mems_allowed?
  */
-int cpuset_zonelist_valid_mems_allowed(struct zonelist *zl)
+int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
 {
-       int i;
-
-       for (i = 0; zl->zones[i]; i++) {
-               int nid = zone_to_nid(zl->zones[i]);
-
-               if (node_isset(nid, current->mems_allowed))
-                       return 1;
-       }
-       return 0;
+       return nodes_intersects(*nodemask, current->mems_allowed);
 }
 
 /*
- * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive
- * ancestor to the specified cpuset.  Call holding callback_mutex.
- * If no ancestor is mem_exclusive (an unusual configuration), then
- * returns the root cpuset.
+ * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
+ * mem_hardwall ancestor to the specified cpuset.  Call holding
+ * callback_mutex.  If no ancestor is mem_exclusive or mem_hardwall
+ * (an unusual configuration), then returns the root cpuset.
  */
-static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
+static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs)
 {
-       while (!is_mem_exclusive(cs) && cs->parent)
+       while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent)
                cs = cs->parent;
        return cs;
 }
@@ -1998,7 +2000,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
  * __GFP_THISNODE is set, yes, we can always allocate.  If zone
  * z's node is in our tasks mems_allowed, yes.  If it's not a
  * __GFP_HARDWALL request and this zone's nodes is in the nearest
- * mem_exclusive cpuset ancestor to this tasks cpuset, yes.
+ * hardwalled cpuset ancestor to this tasks cpuset, yes.
  * If the task has been OOM killed and has access to memory reserves
  * as specified by the TIF_MEMDIE flag, yes.
  * Otherwise, no.
@@ -2021,7 +2023,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
  * and do not allow allocations outside the current tasks cpuset
  * unless the task has been OOM killed as is marked TIF_MEMDIE.
  * GFP_KERNEL allocations are not so marked, so can escape to the
- * nearest enclosing mem_exclusive ancestor cpuset.
+ * nearest enclosing hardwalled ancestor cpuset.
  *
  * Scanning up parent cpusets requires callback_mutex.  The
  * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
@@ -2044,7 +2046,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs)
  *     in_interrupt - any node ok (current task context irrelevant)
  *     GFP_ATOMIC   - any node ok
  *     TIF_MEMDIE   - any node ok
- *     GFP_KERNEL   - any node in enclosing mem_exclusive cpuset ok
+ *     GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
  *     GFP_USER     - only nodes in current tasks mems allowed ok.
  *
  * Rule:
@@ -2081,7 +2083,7 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
        mutex_lock(&callback_mutex);
 
        task_lock(current);
-       cs = nearest_exclusive_ancestor(task_cs(current));
+       cs = nearest_hardwall_ancestor(task_cs(current));
        task_unlock(current);
 
        allowed = node_isset(node, cs->mems_allowed);
index 6a82bb716dace9bbcf0b469e3119fd10a95b9b1a..d2c60a82279085e80d0a46edbc0a3557e9b87f83 100644 (file)
@@ -149,12 +149,7 @@ static const struct file_operations proc_dma_operations = {
 
 static int __init proc_dma_init(void)
 {
-       struct proc_dir_entry *e;
-
-       e = create_proc_entry("dma", 0, NULL);
-       if (e)
-               e->proc_fops = &proc_dma_operations;
-
+       proc_create("dma", 0, NULL, &proc_dma_operations);
        return 0;
 }
 
index 97f609f574b1843556a6a07fc4c6c50180725b31..d3ad54677f9c0257fa0a373f8ce1b4774e947722 100644 (file)
 
 static void exit_mm(struct task_struct * tsk);
 
+static inline int task_detached(struct task_struct *p)
+{
+       return p->exit_signal == -1;
+}
+
 static void __unhash_process(struct task_struct *p)
 {
        nr_threads--;
@@ -160,7 +165,7 @@ repeat:
        zap_leader = 0;
        leader = p->group_leader;
        if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
-               BUG_ON(leader->exit_signal == -1);
+               BUG_ON(task_detached(leader));
                do_notify_parent(leader, leader->exit_signal);
                /*
                 * If we were the last child thread and the leader has
@@ -170,7 +175,7 @@ repeat:
                 * do_notify_parent() will have marked it self-reaping in
                 * that case.
                 */
-               zap_leader = (leader->exit_signal == -1);
+               zap_leader = task_detached(leader);
        }
 
        write_unlock_irq(&tasklist_lock);
@@ -329,13 +334,11 @@ void __set_special_pids(struct pid *pid)
        pid_t nr = pid_nr(pid);
 
        if (task_session(curr) != pid) {
-               detach_pid(curr, PIDTYPE_SID);
-               attach_pid(curr, PIDTYPE_SID, pid);
+               change_pid(curr, PIDTYPE_SID, pid);
                set_task_session(curr, nr);
        }
        if (task_pgrp(curr) != pid) {
-               detach_pid(curr, PIDTYPE_PGID);
-               attach_pid(curr, PIDTYPE_PGID, pid);
+               change_pid(curr, PIDTYPE_PGID, pid);
                set_task_pgrp(curr, nr);
        }
 }
@@ -557,6 +560,88 @@ void exit_fs(struct task_struct *tsk)
 
 EXPORT_SYMBOL_GPL(exit_fs);
 
+#ifdef CONFIG_MM_OWNER
+/*
+ * Task p is exiting and it owned mm, lets find a new owner for it
+ */
+static inline int
+mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
+{
+       /*
+        * If there are other users of the mm and the owner (us) is exiting
+        * we need to find a new owner to take on the responsibility.
+        */
+       if (!mm)
+               return 0;
+       if (atomic_read(&mm->mm_users) <= 1)
+               return 0;
+       if (mm->owner != p)
+               return 0;
+       return 1;
+}
+
+void mm_update_next_owner(struct mm_struct *mm)
+{
+       struct task_struct *c, *g, *p = current;
+
+retry:
+       if (!mm_need_new_owner(mm, p))
+               return;
+
+       read_lock(&tasklist_lock);
+       /*
+        * Search in the children
+        */
+       list_for_each_entry(c, &p->children, sibling) {
+               if (c->mm == mm)
+                       goto assign_new_owner;
+       }
+
+       /*
+        * Search in the siblings
+        */
+       list_for_each_entry(c, &p->parent->children, sibling) {
+               if (c->mm == mm)
+                       goto assign_new_owner;
+       }
+
+       /*
+        * Search through everything else. We should not get
+        * here often
+        */
+       do_each_thread(g, c) {
+               if (c->mm == mm)
+                       goto assign_new_owner;
+       } while_each_thread(g, c);
+
+       read_unlock(&tasklist_lock);
+       return;
+
+assign_new_owner:
+       BUG_ON(c == p);
+       get_task_struct(c);
+       /*
+        * The task_lock protects c->mm from changing.
+        * We always want mm->owner->mm == mm
+        */
+       task_lock(c);
+       /*
+        * Delay read_unlock() till we have the task_lock()
+        * to ensure that c does not slip away underneath us
+        */
+       read_unlock(&tasklist_lock);
+       if (c->mm != mm) {
+               task_unlock(c);
+               put_task_struct(c);
+               goto retry;
+       }
+       cgroup_mm_owner_callbacks(mm->owner, c);
+       mm->owner = c;
+       task_unlock(c);
+       put_task_struct(c);
+}
+#endif /* CONFIG_MM_OWNER */
+
 /*
  * Turn us into a lazy TLB process if we
  * aren't already..
@@ -596,6 +681,7 @@ static void exit_mm(struct task_struct * tsk)
        /* We don't want this task to be frozen prematurely */
        clear_freeze_flag(tsk);
        task_unlock(tsk);
+       mm_update_next_owner(mm);
        mmput(mm);
 }
 
@@ -610,7 +696,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
        if (unlikely(traced)) {
                /* Preserve ptrace links if someone else is tracing this child.  */
                list_del_init(&p->ptrace_list);
-               if (p->parent != p->real_parent)
+               if (ptrace_reparented(p))
                        list_add(&p->ptrace_list, &p->real_parent->ptrace_children);
        } else {
                /* If this child is being traced, then we're the one tracing it
@@ -634,18 +720,18 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
        /* If this is a threaded reparent there is no need to
         * notify anyone anything has happened.
         */
-       if (p->real_parent->group_leader == father->group_leader)
+       if (same_thread_group(p->real_parent, father))
                return;
 
        /* We don't want people slaying init.  */
-       if (p->exit_signal != -1)
+       if (!task_detached(p))
                p->exit_signal = SIGCHLD;
 
        /* If we'd notified the old parent about this child's death,
         * also notify the new parent.
         */
        if (!traced && p->exit_state == EXIT_ZOMBIE &&
-           p->exit_signal != -1 && thread_group_empty(p))
+           !task_detached(p) && thread_group_empty(p))
                do_notify_parent(p, p->exit_signal);
 
        kill_orphaned_pgrp(p, father);
@@ -698,18 +784,18 @@ static void forget_original_parent(struct task_struct *father)
                } else {
                        /* reparent ptraced task to its real parent */
                        __ptrace_unlink (p);
-                       if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 &&
+                       if (p->exit_state == EXIT_ZOMBIE && !task_detached(p) &&
                            thread_group_empty(p))
                                do_notify_parent(p, p->exit_signal);
                }
 
                /*
-                * if the ptraced child is a zombie with exit_signal == -1
-                * we must collect it before we exit, or it will remain
-                * zombie forever since we prevented it from self-reap itself
-                * while it was being traced by us, to be able to see it in wait4.
+                * if the ptraced child is a detached zombie we must collect
+                * it before we exit, or it will remain zombie forever since
+                * we prevented it from self-reap itself while it was being
+                * traced by us, to be able to see it in wait4.
                 */
-               if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1))
+               if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && task_detached(p)))
                        list_add(&p->ptrace_list, &ptrace_dead);
        }
 
@@ -766,29 +852,30 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
         * we have changed execution domain as these two values started
         * the same after a fork.
         */
-       if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 &&
+       if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) &&
            (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
-            tsk->self_exec_id != tsk->parent_exec_id)
-           && !capable(CAP_KILL))
+            tsk->self_exec_id != tsk->parent_exec_id) &&
+           !capable(CAP_KILL))
                tsk->exit_signal = SIGCHLD;
 
-
        /* If something other than our normal parent is ptracing us, then
         * send it a SIGCHLD instead of honoring exit_signal.  exit_signal
         * only has special meaning to our real parent.
         */
-       if (tsk->exit_signal != -1 && thread_group_empty(tsk)) {
-               int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD;
+       if (!task_detached(tsk) && thread_group_empty(tsk)) {
+               int signal = ptrace_reparented(tsk) ?
+                               SIGCHLD : tsk->exit_signal;
                do_notify_parent(tsk, signal);
        } else if (tsk->ptrace) {
                do_notify_parent(tsk, SIGCHLD);
        }
 
        state = EXIT_ZOMBIE;
-       if (tsk->exit_signal == -1 && likely(!tsk->ptrace))
+       if (task_detached(tsk) && likely(!tsk->ptrace))
                state = EXIT_DEAD;
        tsk->exit_state = state;
 
+       /* mt-exec, de_thread() is waiting for us */
        if (thread_group_leader(tsk) &&
            tsk->signal->notify_count < 0 &&
            tsk->signal->group_exit_task)
@@ -967,7 +1054,7 @@ NORET_TYPE void do_exit(long code)
        proc_exit_connector(tsk);
        exit_notify(tsk, group_dead);
 #ifdef CONFIG_NUMA
-       mpol_free(tsk->mempolicy);
+       mpol_put(tsk->mempolicy);
        tsk->mempolicy = NULL;
 #endif
 #ifdef CONFIG_FUTEX
@@ -1032,12 +1119,13 @@ asmlinkage long sys_exit(int error_code)
 NORET_TYPE void
 do_group_exit(int exit_code)
 {
+       struct signal_struct *sig = current->signal;
+
        BUG_ON(exit_code & 0x80); /* core dumps don't get here */
 
-       if (current->signal->flags & SIGNAL_GROUP_EXIT)
-               exit_code = current->signal->group_exit_code;
+       if (signal_group_exit(sig))
+               exit_code = sig->group_exit_code;
        else if (!thread_group_empty(current)) {
-               struct signal_struct *const sig = current->signal;
                struct sighand_struct *const sighand = current->sighand;
                spin_lock_irq(&sighand->siglock);
                if (signal_group_exit(sig))
@@ -1089,7 +1177,7 @@ static int eligible_child(enum pid_type type, struct pid *pid, int options,
         * Do not consider detached threads that are
         * not ptraced:
         */
-       if (p->exit_signal == -1 && !p->ptrace)
+       if (task_detached(p) && !p->ptrace)
                return 0;
 
        /* Wait for all children (clone and not) if __WALL is set;
@@ -1179,8 +1267,7 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
                return 0;
        }
 
-       /* traced means p->ptrace, but not vice versa */
-       traced = (p->real_parent != p->parent);
+       traced = ptrace_reparented(p);
 
        if (likely(!traced)) {
                struct signal_struct *psig;
@@ -1281,9 +1368,9 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
                 * If it's still not detached after that, don't release
                 * it now.
                 */
-               if (p->exit_signal != -1) {
+               if (!task_detached(p)) {
                        do_notify_parent(p, p->exit_signal);
-                       if (p->exit_signal != -1) {
+                       if (!task_detached(p)) {
                                p->exit_state = EXIT_ZOMBIE;
                                p = NULL;
                        }
index cb46befdd3a060bd3b6d96fc64acaadbf74c4e39..2bb675af4de30908b2dd1a22e745541e2bcbc3f5 100644 (file)
@@ -279,7 +279,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
                if (!tmp)
                        goto fail_nomem;
                *tmp = *mpnt;
-               pol = mpol_copy(vma_policy(mpnt));
+               pol = mpol_dup(vma_policy(mpnt));
                retval = PTR_ERR(pol);
                if (IS_ERR(pol))
                        goto fail_nomem_policy;
@@ -381,14 +381,13 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
        mm->ioctx_list = NULL;
        mm->free_area_cache = TASK_UNMAPPED_BASE;
        mm->cached_hole_size = ~0UL;
-       mm_init_cgroup(mm, p);
+       mm_init_owner(mm, p);
 
        if (likely(!mm_alloc_pgd(mm))) {
                mm->def_flags = 0;
                return mm;
        }
 
-       mm_free_cgroup(mm);
        free_mm(mm);
        return NULL;
 }
@@ -432,13 +431,13 @@ void mmput(struct mm_struct *mm)
        if (atomic_dec_and_test(&mm->mm_users)) {
                exit_aio(mm);
                exit_mmap(mm);
+               set_mm_exe_file(mm, NULL);
                if (!list_empty(&mm->mmlist)) {
                        spin_lock(&mmlist_lock);
                        list_del(&mm->mmlist);
                        spin_unlock(&mmlist_lock);
                }
                put_swap_token(mm);
-               mm_free_cgroup(mm);
                mmdrop(mm);
        }
 }
@@ -521,7 +520,7 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
  * Allocate a new mm structure and copy contents from the
  * mm structure of the passed in task structure.
  */
-static struct mm_struct *dup_mm(struct task_struct *tsk)
+struct mm_struct *dup_mm(struct task_struct *tsk)
 {
        struct mm_struct *mm, *oldmm = current->mm;
        int err;
@@ -545,6 +544,8 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
        if (init_new_context(tsk, mm))
                goto fail_nocontext;
 
+       dup_mm_exe_file(oldmm, mm);
+
        err = dup_mmap(mm, oldmm);
        if (err)
                goto free_pt;
@@ -891,7 +892,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
        sig->group_exit_code = 0;
        sig->group_exit_task = NULL;
        sig->group_stop_count = 0;
-       sig->curr_target = NULL;
+       sig->curr_target = tsk;
        init_sigpending(&sig->shared_pending);
        INIT_LIST_HEAD(&sig->posix_timers);
 
@@ -982,6 +983,13 @@ static void rt_mutex_init_task(struct task_struct *p)
 #endif
 }
 
+#ifdef CONFIG_MM_OWNER
+void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+{
+       mm->owner = p;
+}
+#endif /* CONFIG_MM_OWNER */
+
 /*
  * This creates a new process as a copy of the old one,
  * but does not actually start it yet.
@@ -1116,7 +1124,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->audit_context = NULL;
        cgroup_fork(p);
 #ifdef CONFIG_NUMA
-       p->mempolicy = mpol_copy(p->mempolicy);
+       p->mempolicy = mpol_dup(p->mempolicy);
        if (IS_ERR(p->mempolicy)) {
                retval = PTR_ERR(p->mempolicy);
                p->mempolicy = NULL;
@@ -1374,7 +1382,7 @@ bad_fork_cleanup_security:
        security_task_free(p);
 bad_fork_cleanup_policy:
 #ifdef CONFIG_NUMA
-       mpol_free(p->mempolicy);
+       mpol_put(p->mempolicy);
 bad_fork_cleanup_cgroup:
 #endif
        cgroup_exit(p, cgroup_callbacks_done);
@@ -1663,18 +1671,6 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp
        return 0;
 }
 
-/*
- * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not
- * supported yet
- */
-static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp)
-{
-       if (unshare_flags & CLONE_SYSVSEM)
-               return -EINVAL;
-
-       return 0;
-}
-
 /*
  * unshare allows a process to 'unshare' part of the process
  * context which was originally shared using clone.  copy_*
@@ -1690,8 +1686,8 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
        struct sighand_struct *new_sigh = NULL;
        struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL;
        struct files_struct *fd, *new_fd = NULL;
-       struct sem_undo_list *new_ulist = NULL;
        struct nsproxy *new_nsproxy = NULL;
+       int do_sysvsem = 0;
 
        check_unshare_flags(&unshare_flags);
 
@@ -1703,6 +1699,13 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
                                CLONE_NEWNET))
                goto bad_unshare_out;
 
+       /*
+        * CLONE_NEWIPC must also detach from the undolist: after switching
+        * to a new ipc namespace, the semaphore arrays from the old
+        * namespace are unreachable.
+        */
+       if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
+               do_sysvsem = 1;
        if ((err = unshare_thread(unshare_flags)))
                goto bad_unshare_out;
        if ((err = unshare_fs(unshare_flags, &new_fs)))
@@ -1713,13 +1716,17 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
                goto bad_unshare_cleanup_sigh;
        if ((err = unshare_fd(unshare_flags, &new_fd)))
                goto bad_unshare_cleanup_vm;
-       if ((err = unshare_semundo(unshare_flags, &new_ulist)))
-               goto bad_unshare_cleanup_fd;
        if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
                        new_fs)))
-               goto bad_unshare_cleanup_semundo;
+               goto bad_unshare_cleanup_fd;
 
-       if (new_fs ||  new_mm || new_fd || new_ulist || new_nsproxy) {
+       if (new_fs ||  new_mm || new_fd || do_sysvsem || new_nsproxy) {
+               if (do_sysvsem) {
+                       /*
+                        * CLONE_SYSVSEM is equivalent to sys_exit().
+                        */
+                       exit_sem(current);
+               }
 
                if (new_nsproxy) {
                        switch_task_namespaces(current, new_nsproxy);
@@ -1755,7 +1762,6 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
        if (new_nsproxy)
                put_nsproxy(new_nsproxy);
 
-bad_unshare_cleanup_semundo:
 bad_unshare_cleanup_fd:
        if (new_fd)
                put_files_struct(new_fd);
index e43945e995f54040aecc49678922b9e18186db52..98092c9817f42597c8c9a51b5ca1c4ce90509548 100644 (file)
@@ -1266,11 +1266,13 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
                if (!abs_time)
                        schedule();
                else {
-                       hrtimer_init(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+                       hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC,
+                                               HRTIMER_MODE_ABS);
                        hrtimer_init_sleeper(&t, current);
                        t.timer.expires = *abs_time;
 
-                       hrtimer_start(&t.timer, t.timer.expires, HRTIMER_MODE_ABS);
+                       hrtimer_start(&t.timer, t.timer.expires,
+                                               HRTIMER_MODE_ABS);
                        if (!hrtimer_active(&t.timer))
                                t.task = NULL;
 
@@ -1286,6 +1288,8 @@ static int futex_wait(u32 __user *uaddr, struct rw_semaphore *fshared,
 
                        /* Flag if a timeout occured */
                        rem = (t.task == NULL);
+
+                       destroy_hrtimer_on_stack(&t.timer);
                }
        }
        __set_current_state(TASK_RUNNING);
@@ -1367,7 +1371,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
 
        if (time) {
                to = &timeout;
-               hrtimer_init(&to->timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+               hrtimer_init_on_stack(&to->timer, CLOCK_REALTIME,
+                                     HRTIMER_MODE_ABS);
                hrtimer_init_sleeper(to, current);
                to->timer.expires = *time;
        }
@@ -1581,6 +1586,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
        unqueue_me_pi(&q);
        futex_unlock_mm(fshared);
 
+       if (to)
+               destroy_hrtimer_on_stack(&to->timer);
        return ret != -EINTR ? ret : -ERESTARTNOINTR;
 
  out_unlock_release_sem:
@@ -1588,6 +1595,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
 
  out_release_sem:
        futex_unlock_mm(fshared);
+       if (to)
+               destroy_hrtimer_on_stack(&to->timer);
        return ret;
 
  uaddr_faulted:
@@ -1615,6 +1624,8 @@ static int futex_lock_pi(u32 __user *uaddr, struct rw_semaphore *fshared,
        if (!ret && (uval != -EFAULT))
                goto retry;
 
+       if (to)
+               destroy_hrtimer_on_stack(&to->timer);
        return ret;
 }
 
index f78777abe769e1e3608b8f751d1c996f05aeea13..9af1d6a8095e40f80e64cdade6c0c3b505184466 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/tick.h>
 #include <linux/seq_file.h>
 #include <linux/err.h>
+#include <linux/debugobjects.h>
 
 #include <asm/uaccess.h>
 
@@ -342,6 +343,115 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
        return res;
 }
 
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+
+static struct debug_obj_descr hrtimer_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
+{
+       struct hrtimer *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               hrtimer_cancel(timer);
+               debug_object_init(timer, &hrtimer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
+{
+       switch (state) {
+
+       case ODEBUG_STATE_NOTAVAILABLE:
+               WARN_ON_ONCE(1);
+               return 0;
+
+       case ODEBUG_STATE_ACTIVE:
+               WARN_ON(1);
+
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct hrtimer *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               hrtimer_cancel(timer);
+               debug_object_free(timer, &hrtimer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static struct debug_obj_descr hrtimer_debug_descr = {
+       .name           = "hrtimer",
+       .fixup_init     = hrtimer_fixup_init,
+       .fixup_activate = hrtimer_fixup_activate,
+       .fixup_free     = hrtimer_fixup_free,
+};
+
+static inline void debug_hrtimer_init(struct hrtimer *timer)
+{
+       debug_object_init(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_activate(struct hrtimer *timer)
+{
+       debug_object_activate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
+{
+       debug_object_deactivate(timer, &hrtimer_debug_descr);
+}
+
+static inline void debug_hrtimer_free(struct hrtimer *timer)
+{
+       debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+                          enum hrtimer_mode mode);
+
+void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
+                          enum hrtimer_mode mode)
+{
+       debug_object_init_on_stack(timer, &hrtimer_debug_descr);
+       __hrtimer_init(timer, clock_id, mode);
+}
+
+void destroy_hrtimer_on_stack(struct hrtimer *timer)
+{
+       debug_object_free(timer, &hrtimer_debug_descr);
+}
+
+#else
+static inline void debug_hrtimer_init(struct hrtimer *timer) { }
+static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
+static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
+#endif
+
 /*
  * Check, whether the timer is on the callback pending list
  */
@@ -567,6 +677,7 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
                /* Timer is expired, act upon the callback mode */
                switch(timer->cb_mode) {
                case HRTIMER_CB_IRQSAFE_NO_RESTART:
+                       debug_hrtimer_deactivate(timer);
                        /*
                         * We can call the callback from here. No restart
                         * happens, so no danger of recursion
@@ -581,6 +692,7 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
                         * the tick timer in the softirq ! The calling site
                         * takes care of this.
                         */
+                       debug_hrtimer_deactivate(timer);
                        return 1;
                case HRTIMER_CB_IRQSAFE:
                case HRTIMER_CB_SOFTIRQ:
@@ -590,7 +702,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
                        list_add_tail(&timer->cb_entry,
                                      &base->cpu_base->cb_pending);
                        timer->state = HRTIMER_STATE_PENDING;
-                       raise_softirq(HRTIMER_SOFTIRQ);
                        return 1;
                default:
                        BUG();
@@ -633,6 +744,11 @@ static int hrtimer_switch_to_hres(void)
        return 1;
 }
 
+static inline void hrtimer_raise_softirq(void)
+{
+       raise_softirq(HRTIMER_SOFTIRQ);
+}
+
 #else
 
 static inline int hrtimer_hres_active(void) { return 0; }
@@ -651,6 +767,7 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
 {
        return 0;
 }
+static inline void hrtimer_raise_softirq(void) { }
 
 #endif /* CONFIG_HIGH_RES_TIMERS */
 
@@ -730,6 +847,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
        struct hrtimer *entry;
        int leftmost = 1;
 
+       debug_hrtimer_activate(timer);
+
        /*
         * Find the right place in the rbtree:
         */
@@ -826,6 +945,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
                 * reprogramming happens in the interrupt handler. This is a
                 * rare case and less expensive than a smp call.
                 */
+               debug_hrtimer_deactivate(timer);
                timer_stats_hrtimer_clear_start_info(timer);
                reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
                __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
@@ -850,7 +970,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
 {
        struct hrtimer_clock_base *base, *new_base;
        unsigned long flags;
-       int ret;
+       int ret, raise;
 
        base = lock_hrtimer_base(timer, &flags);
 
@@ -873,6 +993,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
                tim = ktime_add_safe(tim, base->resolution);
 #endif
        }
+
        timer->expires = tim;
 
        timer_stats_hrtimer_set_start_info(timer);
@@ -884,8 +1005,18 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
        enqueue_hrtimer(timer, new_base,
                        new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
 
+       /*
+        * The timer may be expired and moved to the cb_pending
+        * list. We can not raise the softirq with base lock held due
+        * to a possible deadlock with runqueue lock.
+        */
+       raise = timer->state == HRTIMER_STATE_PENDING;
+
        unlock_hrtimer_base(timer, &flags);
 
+       if (raise)
+               hrtimer_raise_softirq();
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(hrtimer_start);
@@ -996,14 +1127,8 @@ ktime_t hrtimer_get_next_event(void)
 }
 #endif
 
-/**
- * hrtimer_init - initialize a timer to the given clock
- * @timer:     the timer to be initialized
- * @clock_id:  the clock to be used
- * @mode:      timer mode abs/rel
- */
-void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
-                 enum hrtimer_mode mode)
+static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+                          enum hrtimer_mode mode)
 {
        struct hrtimer_cpu_base *cpu_base;
 
@@ -1024,6 +1149,19 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
        memset(timer->start_comm, 0, TASK_COMM_LEN);
 #endif
 }
+
+/**
+ * hrtimer_init - initialize a timer to the given clock
+ * @timer:     the timer to be initialized
+ * @clock_id:  the clock to be used
+ * @mode:      timer mode abs/rel
+ */
+void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+                 enum hrtimer_mode mode)
+{
+       debug_hrtimer_init(timer);
+       __hrtimer_init(timer, clock_id, mode);
+}
 EXPORT_SYMBOL_GPL(hrtimer_init);
 
 /**
@@ -1057,6 +1195,7 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
                timer = list_entry(cpu_base->cb_pending.next,
                                   struct hrtimer, cb_entry);
 
+               debug_hrtimer_deactivate(timer);
                timer_stats_account_hrtimer(timer);
 
                fn = timer->function;
@@ -1080,8 +1219,19 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
                         * If the timer was rearmed on another CPU, reprogram
                         * the event device.
                         */
-                       if (timer->base->first == &timer->node)
-                               hrtimer_reprogram(timer, timer->base);
+                       struct hrtimer_clock_base *base = timer->base;
+
+                       if (base->first == &timer->node &&
+                           hrtimer_reprogram(timer, base)) {
+                               /*
+                                * Timer is expired. Thus move it from tree to
+                                * pending list again.
+                                */
+                               __remove_hrtimer(timer, base,
+                                                HRTIMER_STATE_PENDING, 0);
+                               list_add_tail(&timer->cb_entry,
+                                             &base->cpu_base->cb_pending);
+                       }
                }
        }
        spin_unlock_irq(&cpu_base->lock);
@@ -1094,6 +1244,7 @@ static void __run_hrtimer(struct hrtimer *timer)
        enum hrtimer_restart (*fn)(struct hrtimer *);
        int restart;
 
+       debug_hrtimer_deactivate(timer);
        __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
        timer_stats_account_hrtimer(timer);
 
@@ -1352,22 +1503,27 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
 {
        struct hrtimer_sleeper t;
        struct timespec __user  *rmtp;
+       int ret = 0;
 
-       hrtimer_init(&t.timer, restart->nanosleep.index, HRTIMER_MODE_ABS);
+       hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
+                               HRTIMER_MODE_ABS);
        t.timer.expires.tv64 = restart->nanosleep.expires;
 
        if (do_nanosleep(&t, HRTIMER_MODE_ABS))
-               return 0;
+               goto out;
 
        rmtp = restart->nanosleep.rmtp;
        if (rmtp) {
-               int ret = update_rmtp(&t.timer, rmtp);
+               ret = update_rmtp(&t.timer, rmtp);
                if (ret <= 0)
-                       return ret;
+                       goto out;
        }
 
        /* The other values in restart are already filled in */
-       return -ERESTART_RESTARTBLOCK;
+       ret = -ERESTART_RESTARTBLOCK;
+out:
+       destroy_hrtimer_on_stack(&t.timer);
+       return ret;
 }
 
 long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
@@ -1375,20 +1531,23 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
 {
        struct restart_block *restart;
        struct hrtimer_sleeper t;
+       int ret = 0;
 
-       hrtimer_init(&t.timer, clockid, mode);
+       hrtimer_init_on_stack(&t.timer, clockid, mode);
        t.timer.expires = timespec_to_ktime(*rqtp);
        if (do_nanosleep(&t, mode))
-               return 0;
+               goto out;
 
        /* Absolute timers do not update the rmtp value and restart: */
-       if (mode == HRTIMER_MODE_ABS)
-               return -ERESTARTNOHAND;
+       if (mode == HRTIMER_MODE_ABS) {
+               ret = -ERESTARTNOHAND;
+               goto out;
+       }
 
        if (rmtp) {
-               int ret = update_rmtp(&t.timer, rmtp);
+               ret = update_rmtp(&t.timer, rmtp);
                if (ret <= 0)
-                       return ret;
+                       goto out;
        }
 
        restart = &current_thread_info()->restart_block;
@@ -1397,7 +1556,10 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
        restart->nanosleep.rmtp = rmtp;
        restart->nanosleep.expires = t.timer.expires.tv64;
 
-       return -ERESTART_RESTARTBLOCK;
+       ret = -ERESTART_RESTARTBLOCK;
+out:
+       destroy_hrtimer_on_stack(&t.timer);
+       return ret;
 }
 
 asmlinkage long
@@ -1442,6 +1604,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
        while ((node = rb_first(&old_base->active))) {
                timer = rb_entry(node, struct hrtimer, node);
                BUG_ON(hrtimer_callback_running(timer));
+               debug_hrtimer_deactivate(timer);
                __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0);
                timer->base = new_base;
                /*
index 6d9204f3a370cfa30b35c9712e15dd5c8723d49c..38a25b8d8bff6d15448270d4f56983537335fc2d 100644 (file)
@@ -1,6 +1,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/device.h>
+#include <linux/gfp.h>
 
 /*
  * Device resource management aware IRQ request/free implementation.
index 438a01464287fc391b8da015fbe7c80a666c4928..46e4ad1723f0545377c342c57a988a5ff99fef9c 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
+#include <linux/slab.h>
 
 #include "internals.h"
 
index f091d13def0083b8d25107993affa1065cb8aabc..6fc0040f3e3abacade62c7d63f74d240974d80fe 100644 (file)
@@ -472,11 +472,7 @@ static const struct file_operations kallsyms_operations = {
 
 static int __init kallsyms_init(void)
 {
-       struct proc_dir_entry *entry;
-
-       entry = create_proc_entry("kallsyms", 0444, NULL);
-       if (entry)
-               entry->proc_fops = &kallsyms_operations;
+       proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
        return 0;
 }
 __initcall(kallsyms_init);
index 6782dce93d018b15d0a789187c8179d023a991e7..1c5fcacbcf336b8dc82d0b02202a3d95c32fbd13 100644 (file)
@@ -1217,7 +1217,7 @@ static int __init parse_crashkernel_mem(char                      *cmdline,
                }
 
                /* match ? */
-               if (system_ram >= start && system_ram <= end) {
+               if (system_ram >= start && system_ram < end) {
                        *crash_size = size;
                        break;
                }
@@ -1405,6 +1405,9 @@ static int __init crash_save_vmcoreinfo_init(void)
        VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
        VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
        VMCOREINFO_NUMBER(NR_FREE_PAGES);
+       VMCOREINFO_NUMBER(PG_lru);
+       VMCOREINFO_NUMBER(PG_private);
+       VMCOREINFO_NUMBER(PG_swapcache);
 
        arch_crash_save_vmcoreinfo();
 
index fcfb580c3afc847e60fc9fa4f682b33c25650963..1e0250cb94860be6931efce5e37138fa488c18db 100644 (file)
@@ -72,6 +72,18 @@ DEFINE_MUTEX(kprobe_mutex);          /* Protects kprobe_table */
 DEFINE_SPINLOCK(kretprobe_lock);       /* Protects kretprobe_inst_table */
 static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
 
+/*
+ * Normally, functions that we'd want to prohibit kprobes in, are marked
+ * __kprobes. But, there are cases where such functions already belong to
+ * a different section (__sched for preempt_schedule)
+ *
+ * For such cases, we now have a blacklist
+ */
+struct kprobe_blackpoint kprobe_blacklist[] = {
+       {"preempt_schedule",},
+       {NULL}    /* Terminator */
+};
+
 #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
 /*
  * kprobe->ainsn.insn points to the copy of the instruction to be
@@ -417,6 +429,21 @@ static inline void free_rp_inst(struct kretprobe *rp)
        }
 }
 
+static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
+{
+       unsigned long flags;
+       struct kretprobe_instance *ri;
+       struct hlist_node *pos, *next;
+       /* No race here */
+       spin_lock_irqsave(&kretprobe_lock, flags);
+       hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
+               ri->rp = NULL;
+               hlist_del(&ri->uflist);
+       }
+       spin_unlock_irqrestore(&kretprobe_lock, flags);
+       free_rp_inst(rp);
+}
+
 /*
  * Keep all fields in the kprobe consistent
  */
@@ -492,9 +519,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
 
 static int __kprobes in_kprobes_functions(unsigned long addr)
 {
+       struct kprobe_blackpoint *kb;
+
        if (addr >= (unsigned long)__kprobes_text_start &&
            addr < (unsigned long)__kprobes_text_end)
                return -EINVAL;
+       /*
+        * If there exists a kprobe_blacklist, verify and
+        * fail any probe registration in the prohibited area
+        */
+       for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+               if (kb->start_addr) {
+                       if (addr >= kb->start_addr &&
+                           addr < (kb->start_addr + kb->range))
+                               return -EINVAL;
+               }
+       }
        return 0;
 }
 
@@ -555,6 +595,7 @@ static int __kprobes __register_kprobe(struct kprobe *p,
        }
 
        p->nmissed = 0;
+       INIT_LIST_HEAD(&p->list);
        mutex_lock(&kprobe_mutex);
        old_p = get_kprobe(p->addr);
        if (old_p) {
@@ -581,35 +622,28 @@ out:
        return ret;
 }
 
-int __kprobes register_kprobe(struct kprobe *p)
-{
-       return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
-}
-
-void __kprobes unregister_kprobe(struct kprobe *p)
+/*
+ * Unregister a kprobe without a scheduler synchronization.
+ */
+static int __kprobes __unregister_kprobe_top(struct kprobe *p)
 {
-       struct module *mod;
        struct kprobe *old_p, *list_p;
-       int cleanup_p;
 
-       mutex_lock(&kprobe_mutex);
        old_p = get_kprobe(p->addr);
-       if (unlikely(!old_p)) {
-               mutex_unlock(&kprobe_mutex);
-               return;
-       }
+       if (unlikely(!old_p))
+               return -EINVAL;
+
        if (p != old_p) {
                list_for_each_entry_rcu(list_p, &old_p->list, list)
                        if (list_p == p)
                        /* kprobe p is a valid probe */
                                goto valid_p;
-               mutex_unlock(&kprobe_mutex);
-               return;
+               return -EINVAL;
        }
 valid_p:
        if (old_p == p ||
            (old_p->pre_handler == aggr_pre_handler &&
-            p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
+            list_is_singular(&old_p->list))) {
                /*
                 * Only probe on the hash list. Disarm only if kprobes are
                 * enabled - otherwise, the breakpoint would already have
@@ -618,43 +652,97 @@ valid_p:
                if (kprobe_enabled)
                        arch_disarm_kprobe(p);
                hlist_del_rcu(&old_p->hlist);
-               cleanup_p = 1;
        } else {
+               if (p->break_handler)
+                       old_p->break_handler = NULL;
+               if (p->post_handler) {
+                       list_for_each_entry_rcu(list_p, &old_p->list, list) {
+                               if ((list_p != p) && (list_p->post_handler))
+                                       goto noclean;
+                       }
+                       old_p->post_handler = NULL;
+               }
+noclean:
                list_del_rcu(&p->list);
-               cleanup_p = 0;
        }
+       return 0;
+}
 
-       mutex_unlock(&kprobe_mutex);
+static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
+{
+       struct module *mod;
+       struct kprobe *old_p;
 
-       synchronize_sched();
        if (p->mod_refcounted) {
                mod = module_text_address((unsigned long)p->addr);
                if (mod)
                        module_put(mod);
        }
 
-       if (cleanup_p) {
-               if (p != old_p) {
-                       list_del_rcu(&p->list);
+       if (list_empty(&p->list) || list_is_singular(&p->list)) {
+               if (!list_empty(&p->list)) {
+                       /* "p" is the last child of an aggr_kprobe */
+                       old_p = list_entry(p->list.next, struct kprobe, list);
+                       list_del(&p->list);
                        kfree(old_p);
                }
                arch_remove_kprobe(p);
-       } else {
-               mutex_lock(&kprobe_mutex);
-               if (p->break_handler)
-                       old_p->break_handler = NULL;
-               if (p->post_handler){
-                       list_for_each_entry_rcu(list_p, &old_p->list, list){
-                               if (list_p->post_handler){
-                                       cleanup_p = 2;
-                                       break;
-                               }
-                       }
-                       if (cleanup_p == 0)
-                               old_p->post_handler = NULL;
+       }
+}
+
+static int __register_kprobes(struct kprobe **kps, int num,
+       unsigned long called_from)
+{
+       int i, ret = 0;
+
+       if (num <= 0)
+               return -EINVAL;
+       for (i = 0; i < num; i++) {
+               ret = __register_kprobe(kps[i], called_from);
+               if (ret < 0 && i > 0) {
+                       unregister_kprobes(kps, i);
+                       break;
                }
-               mutex_unlock(&kprobe_mutex);
        }
+       return ret;
+}
+
+/*
+ * Registration and unregistration functions for kprobe.
+ */
+int __kprobes register_kprobe(struct kprobe *p)
+{
+       return __register_kprobes(&p, 1,
+                                 (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kprobe(struct kprobe *p)
+{
+       unregister_kprobes(&p, 1);
+}
+
+int __kprobes register_kprobes(struct kprobe **kps, int num)
+{
+       return __register_kprobes(kps, num,
+                                 (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kprobes(struct kprobe **kps, int num)
+{
+       int i;
+
+       if (num <= 0)
+               return;
+       mutex_lock(&kprobe_mutex);
+       for (i = 0; i < num; i++)
+               if (__unregister_kprobe_top(kps[i]) < 0)
+                       kps[i]->addr = NULL;
+       mutex_unlock(&kprobe_mutex);
+
+       synchronize_sched();
+       for (i = 0; i < num; i++)
+               if (kps[i]->addr)
+                       __unregister_kprobe_bottom(kps[i]);
 }
 
 static struct notifier_block kprobe_exceptions_nb = {
@@ -667,24 +755,69 @@ unsigned long __weak arch_deref_entry_point(void *entry)
        return (unsigned long)entry;
 }
 
-int __kprobes register_jprobe(struct jprobe *jp)
+static int __register_jprobes(struct jprobe **jps, int num,
+       unsigned long called_from)
 {
-       unsigned long addr = arch_deref_entry_point(jp->entry);
+       struct jprobe *jp;
+       int ret = 0, i;
 
-       if (!kernel_text_address(addr))
+       if (num <= 0)
                return -EINVAL;
+       for (i = 0; i < num; i++) {
+               unsigned long addr;
+               jp = jps[i];
+               addr = arch_deref_entry_point(jp->entry);
+
+               if (!kernel_text_address(addr))
+                       ret = -EINVAL;
+               else {
+                       /* Todo: Verify probepoint is a function entry point */
+                       jp->kp.pre_handler = setjmp_pre_handler;
+                       jp->kp.break_handler = longjmp_break_handler;
+                       ret = __register_kprobe(&jp->kp, called_from);
+               }
+               if (ret < 0 && i > 0) {
+                       unregister_jprobes(jps, i);
+                       break;
+               }
+       }
+       return ret;
+}
 
-       /* Todo: Verify probepoint is a function entry point */
-       jp->kp.pre_handler = setjmp_pre_handler;
-       jp->kp.break_handler = longjmp_break_handler;
-
-       return __register_kprobe(&jp->kp,
+int __kprobes register_jprobe(struct jprobe *jp)
+{
+       return __register_jprobes(&jp, 1,
                (unsigned long)__builtin_return_address(0));
 }
 
 void __kprobes unregister_jprobe(struct jprobe *jp)
 {
-       unregister_kprobe(&jp->kp);
+       unregister_jprobes(&jp, 1);
+}
+
+int __kprobes register_jprobes(struct jprobe **jps, int num)
+{
+       return __register_jprobes(jps, num,
+               (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_jprobes(struct jprobe **jps, int num)
+{
+       int i;
+
+       if (num <= 0)
+               return;
+       mutex_lock(&kprobe_mutex);
+       for (i = 0; i < num; i++)
+               if (__unregister_kprobe_top(&jps[i]->kp) < 0)
+                       jps[i]->kp.addr = NULL;
+       mutex_unlock(&kprobe_mutex);
+
+       synchronize_sched();
+       for (i = 0; i < num; i++) {
+               if (jps[i]->kp.addr)
+                       __unregister_kprobe_bottom(&jps[i]->kp);
+       }
 }
 
 #ifdef CONFIG_KRETPROBES
@@ -725,7 +858,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
        return 0;
 }
 
-int __kprobes register_kretprobe(struct kretprobe *rp)
+static int __kprobes __register_kretprobe(struct kretprobe *rp,
+                                         unsigned long called_from)
 {
        int ret = 0;
        struct kretprobe_instance *inst;
@@ -771,46 +905,101 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
 
        rp->nmissed = 0;
        /* Establish function entry probe point */
-       if ((ret = __register_kprobe(&rp->kp,
-               (unsigned long)__builtin_return_address(0))) != 0)
+       ret = __register_kprobe(&rp->kp, called_from);
+       if (ret != 0)
                free_rp_inst(rp);
        return ret;
 }
 
+static int __register_kretprobes(struct kretprobe **rps, int num,
+       unsigned long called_from)
+{
+       int ret = 0, i;
+
+       if (num <= 0)
+               return -EINVAL;
+       for (i = 0; i < num; i++) {
+               ret = __register_kretprobe(rps[i], called_from);
+               if (ret < 0 && i > 0) {
+                       unregister_kretprobes(rps, i);
+                       break;
+               }
+       }
+       return ret;
+}
+
+int __kprobes register_kretprobe(struct kretprobe *rp)
+{
+       return __register_kretprobes(&rp, 1,
+                       (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kretprobe(struct kretprobe *rp)
+{
+       unregister_kretprobes(&rp, 1);
+}
+
+int __kprobes register_kretprobes(struct kretprobe **rps, int num)
+{
+       return __register_kretprobes(rps, num,
+                       (unsigned long)__builtin_return_address(0));
+}
+
+void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
+{
+       int i;
+
+       if (num <= 0)
+               return;
+       mutex_lock(&kprobe_mutex);
+       for (i = 0; i < num; i++)
+               if (__unregister_kprobe_top(&rps[i]->kp) < 0)
+                       rps[i]->kp.addr = NULL;
+       mutex_unlock(&kprobe_mutex);
+
+       synchronize_sched();
+       for (i = 0; i < num; i++) {
+               if (rps[i]->kp.addr) {
+                       __unregister_kprobe_bottom(&rps[i]->kp);
+                       cleanup_rp_inst(rps[i]);
+               }
+       }
+}
+
 #else /* CONFIG_KRETPROBES */
 int __kprobes register_kretprobe(struct kretprobe *rp)
 {
        return -ENOSYS;
 }
 
-static int __kprobes pre_handler_kretprobe(struct kprobe *p,
-                                          struct pt_regs *regs)
+int __kprobes register_kretprobes(struct kretprobe **rps, int num)
 {
-       return 0;
+       return -ENOSYS;
 }
-#endif /* CONFIG_KRETPROBES */
-
 void __kprobes unregister_kretprobe(struct kretprobe *rp)
 {
-       unsigned long flags;
-       struct kretprobe_instance *ri;
-       struct hlist_node *pos, *next;
+}
 
-       unregister_kprobe(&rp->kp);
+void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
+{
+}
 
-       /* No race here */
-       spin_lock_irqsave(&kretprobe_lock, flags);
-       hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
-               ri->rp = NULL;
-               hlist_del(&ri->uflist);
-       }
-       spin_unlock_irqrestore(&kretprobe_lock, flags);
-       free_rp_inst(rp);
+static int __kprobes pre_handler_kretprobe(struct kprobe *p,
+                                          struct pt_regs *regs)
+{
+       return 0;
 }
 
+#endif /* CONFIG_KRETPROBES */
+
 static int __init init_kprobes(void)
 {
        int i, err = 0;
+       unsigned long offset = 0, size = 0;
+       char *modname, namebuf[128];
+       const char *symbol_name;
+       void *addr;
+       struct kprobe_blackpoint *kb;
 
        /* FIXME allocate the probe table, currently defined statically */
        /* initialize all list heads */
@@ -819,6 +1008,28 @@ static int __init init_kprobes(void)
                INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
        }
 
+       /*
+        * Lookup and populate the kprobe_blacklist.
+        *
+        * Unlike the kretprobe blacklist, we'll need to determine
+        * the range of addresses that belong to the said functions,
+        * since a kprobe need not necessarily be at the beginning
+        * of a function.
+        */
+       for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
+               kprobe_lookup_name(kb->name, addr);
+               if (!addr)
+                       continue;
+
+               kb->start_addr = (unsigned long)addr;
+               symbol_name = kallsyms_lookup(kb->start_addr,
+                               &size, &offset, &modname, namebuf);
+               if (!symbol_name)
+                       kb->range = 0;
+               else
+                       kb->range = size;
+       }
+
        if (kretprobe_blacklist_size) {
                /* lookup the function address from its name */
                for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
@@ -1066,8 +1277,12 @@ module_init(init_kprobes);
 
 EXPORT_SYMBOL_GPL(register_kprobe);
 EXPORT_SYMBOL_GPL(unregister_kprobe);
+EXPORT_SYMBOL_GPL(register_kprobes);
+EXPORT_SYMBOL_GPL(unregister_kprobes);
 EXPORT_SYMBOL_GPL(register_jprobe);
 EXPORT_SYMBOL_GPL(unregister_jprobe);
+EXPORT_SYMBOL_GPL(register_jprobes);
+EXPORT_SYMBOL_GPL(unregister_jprobes);
 #ifdef CONFIG_KPROBES
 EXPORT_SYMBOL_GPL(jprobe_return);
 #endif
@@ -1075,4 +1290,6 @@ EXPORT_SYMBOL_GPL(jprobe_return);
 #ifdef CONFIG_KPROBES
 EXPORT_SYMBOL_GPL(register_kretprobe);
 EXPORT_SYMBOL_GPL(unregister_kretprobe);
+EXPORT_SYMBOL_GPL(register_kretprobes);
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
 #endif
index 92cf6930ab51a74e034ae9db30d72b21e9a84e9d..bd1b9ea024e1238cb230c159c426f3a4f0c8981e 100644 (file)
@@ -98,7 +98,7 @@ static void create_kthread(struct kthread_create_info *create)
                struct sched_param param = { .sched_priority = 0 };
                wait_for_completion(&create->started);
                read_lock(&tasklist_lock);
-               create->result = find_task_by_pid(pid);
+               create->result = find_task_by_pid_ns(pid, &init_pid_ns);
                read_unlock(&tasklist_lock);
                /*
                 * root may have changed our (kthreadd's) priority or CPU mask.
@@ -144,9 +144,9 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
 
        spin_lock(&kthread_create_lock);
        list_add_tail(&create.list, &kthread_create_list);
-       wake_up_process(kthreadd_task);
        spin_unlock(&kthread_create_lock);
 
+       wake_up_process(kthreadd_task);
        wait_for_completion(&create.done);
 
        if (!IS_ERR(create.result)) {
index 7c74dab0d21b9fcd2235b507b5e450459e042ef6..5e7b45c569233536ef23dadfafd90e40d20b7f62 100644 (file)
@@ -233,14 +233,7 @@ static struct file_operations lstats_fops = {
 
 static int __init init_lstats_procfs(void)
 {
-       struct proc_dir_entry *pe;
-
-       pe = create_proc_entry("latency_stats", 0644, NULL);
-       if (!pe)
-               return -ENOMEM;
-
-       pe->proc_fops = &lstats_fops;
-
+       proc_create("latency_stats", 0644, NULL, &lstats_fops);
        return 0;
 }
 __initcall(init_lstats_procfs);
index 8a135bd163c2cc719b4635b1e511aa87039d6f46..dc5d29648d852ccadd88ea79a91decc93653fa8b 100644 (file)
@@ -660,20 +660,12 @@ static const struct file_operations proc_lock_stat_operations = {
 
 static int __init lockdep_proc_init(void)
 {
-       struct proc_dir_entry *entry;
-
-       entry = create_proc_entry("lockdep", S_IRUSR, NULL);
-       if (entry)
-               entry->proc_fops = &proc_lockdep_operations;
-
-       entry = create_proc_entry("lockdep_stats", S_IRUSR, NULL);
-       if (entry)
-               entry->proc_fops = &proc_lockdep_stats_operations;
+       proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations);
+       proc_create("lockdep_stats", S_IRUSR, NULL,
+                   &proc_lockdep_stats_operations);
 
 #ifdef CONFIG_LOCK_STAT
-       entry = create_proc_entry("lock_stat", S_IRUSR, NULL);
-       if (entry)
-               entry->proc_fops = &proc_lock_stat_operations;
+       proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations);
 #endif
 
        return 0;
index 005b959545933703bbb76e3f9720c5e42466df99..b5a9fe1d50d5ce1c480ca7ca1777e55e6deb485e 100644 (file)
 #include <linux/rcupdate.h>
 #include <linux/marker.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 
 extern struct marker __start___markers[];
 extern struct marker __stop___markers[];
 
 /* Set to 1 to enable marker debug output */
-const int marker_debug;
+static const int marker_debug;
 
 /*
  * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
index 8d6cccc6c3cf99870f76b00202a2775bd5e2a54f..8674a390a2e8262e10c6fd61a86c40f1ac2079c8 100644 (file)
@@ -164,131 +164,140 @@ static const struct kernel_symbol *lookup_symbol(const char *name,
        return NULL;
 }
 
-static void printk_unused_warning(const char *name)
+static bool always_ok(bool gplok, bool warn, const char *name)
 {
-       printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
-               "however this module is using it.\n", name);
-       printk(KERN_WARNING "This symbol will go away in the future.\n");
-       printk(KERN_WARNING "Please evalute if this is the right api to use, "
-               "and if it really is, submit a report the linux kernel "
-               "mailinglist together with submitting your code for "
-               "inclusion.\n");
+       return true;
 }
 
-/* Find a symbol, return value, crc and module which owns it */
-static unsigned long __find_symbol(const char *name,
-                                  struct module **owner,
-                                  const unsigned long **crc,
-                                  int gplok)
+static bool printk_unused_warning(bool gplok, bool warn, const char *name)
 {
-       struct module *mod;
-       const struct kernel_symbol *ks;
-
-       /* Core kernel first. */
-       *owner = NULL;
-       ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
-       if (ks) {
-               *crc = symversion(__start___kcrctab, (ks - __start___ksymtab));
-               return ks->value;
+       if (warn) {
+               printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
+                      "however this module is using it.\n", name);
+               printk(KERN_WARNING
+                      "This symbol will go away in the future.\n");
+               printk(KERN_WARNING
+                      "Please evalute if this is the right api to use and if "
+                      "it really is, submit a report the linux kernel "
+                      "mailinglist together with submitting your code for "
+                      "inclusion.\n");
        }
-       if (gplok) {
-               ks = lookup_symbol(name, __start___ksymtab_gpl,
-                                        __stop___ksymtab_gpl);
-               if (ks) {
-                       *crc = symversion(__start___kcrctab_gpl,
-                                         (ks - __start___ksymtab_gpl));
-                       return ks->value;
-               }
-       }
-       ks = lookup_symbol(name, __start___ksymtab_gpl_future,
-                                __stop___ksymtab_gpl_future);
-       if (ks) {
-               if (!gplok) {
-                       printk(KERN_WARNING "Symbol %s is being used "
-                              "by a non-GPL module, which will not "
-                              "be allowed in the future\n", name);
-                       printk(KERN_WARNING "Please see the file "
-                              "Documentation/feature-removal-schedule.txt "
-                              "in the kernel source tree for more "
-                              "details.\n");
-               }
-               *crc = symversion(__start___kcrctab_gpl_future,
-                                 (ks - __start___ksymtab_gpl_future));
-               return ks->value;
+       return true;
+}
+
+static bool gpl_only_unused_warning(bool gplok, bool warn, const char *name)
+{
+       if (!gplok)
+               return false;
+       return printk_unused_warning(gplok, warn, name);
+}
+
+static bool gpl_only(bool gplok, bool warn, const char *name)
+{
+       return gplok;
+}
+
+static bool warn_if_not_gpl(bool gplok, bool warn, const char *name)
+{
+       if (!gplok && warn) {
+               printk(KERN_WARNING "Symbol %s is being used "
+                      "by a non-GPL module, which will not "
+                      "be allowed in the future\n", name);
+               printk(KERN_WARNING "Please see the file "
+                      "Documentation/feature-removal-schedule.txt "
+                      "in the kernel source tree for more details.\n");
        }
+       return true;
+}
 
-       ks = lookup_symbol(name, __start___ksymtab_unused,
-                                __stop___ksymtab_unused);
-       if (ks) {
-               printk_unused_warning(name);
-               *crc = symversion(__start___kcrctab_unused,
-                                 (ks - __start___ksymtab_unused));
-               return ks->value;
+struct symsearch {
+       const struct kernel_symbol *start, *stop;
+       const unsigned long *crcs;
+       bool (*check)(bool gplok, bool warn, const char *name);
+};
+
+/* Look through this array of symbol tables for a symbol match which
+ * passes the check function. */
+static const struct kernel_symbol *search_symarrays(const struct symsearch *arr,
+                                                   unsigned int num,
+                                                   const char *name,
+                                                   bool gplok,
+                                                   bool warn,
+                                                   const unsigned long **crc)
+{
+       unsigned int i;
+       const struct kernel_symbol *ks;
+
+       for (i = 0; i < num; i++) {
+               ks = lookup_symbol(name, arr[i].start, arr[i].stop);
+               if (!ks || !arr[i].check(gplok, warn, name))
+                       continue;
+
+               if (crc)
+                       *crc = symversion(arr[i].crcs, ks - arr[i].start);
+               return ks;
        }
+       return NULL;
+}
+
+/* Find a symbol, return value, (optional) crc and (optional) module
+ * which owns it */
+static unsigned long find_symbol(const char *name,
+                                struct module **owner,
+                                const unsigned long **crc,
+                                bool gplok,
+                                bool warn)
+{
+       struct module *mod;
+       const struct kernel_symbol *ks;
+       const struct symsearch arr[] = {
+               { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
+                 always_ok },
+               { __start___ksymtab_gpl, __stop___ksymtab_gpl,
+                 __start___kcrctab_gpl, gpl_only },
+               { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
+                 __start___kcrctab_gpl_future, warn_if_not_gpl },
+               { __start___ksymtab_unused, __stop___ksymtab_unused,
+                 __start___kcrctab_unused, printk_unused_warning },
+               { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
+                 __start___kcrctab_unused_gpl, gpl_only_unused_warning },
+       };
 
-       if (gplok)
-               ks = lookup_symbol(name, __start___ksymtab_unused_gpl,
-                                __stop___ksymtab_unused_gpl);
+       /* Core kernel first. */
+       ks = search_symarrays(arr, ARRAY_SIZE(arr), name, gplok, warn, crc);
        if (ks) {
-               printk_unused_warning(name);
-               *crc = symversion(__start___kcrctab_unused_gpl,
-                                 (ks - __start___ksymtab_unused_gpl));
+               if (owner)
+                       *owner = NULL;
                return ks->value;
        }
 
        /* Now try modules. */
        list_for_each_entry(mod, &modules, list) {
-               *owner = mod;
-               ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
-               if (ks) {
-                       *crc = symversion(mod->crcs, (ks - mod->syms));
-                       return ks->value;
-               }
-
-               if (gplok) {
-                       ks = lookup_symbol(name, mod->gpl_syms,
-                                          mod->gpl_syms + mod->num_gpl_syms);
-                       if (ks) {
-                               *crc = symversion(mod->gpl_crcs,
-                                                 (ks - mod->gpl_syms));
-                               return ks->value;
-                       }
-               }
-               ks = lookup_symbol(name, mod->unused_syms, mod->unused_syms + mod->num_unused_syms);
+               struct symsearch arr[] = {
+                       { mod->syms, mod->syms + mod->num_syms, mod->crcs,
+                         always_ok },
+                       { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
+                         mod->gpl_crcs, gpl_only },
+                       { mod->gpl_future_syms,
+                         mod->gpl_future_syms + mod->num_gpl_future_syms,
+                         mod->gpl_future_crcs, warn_if_not_gpl },
+                       { mod->unused_syms,
+                         mod->unused_syms + mod->num_unused_syms,
+                         mod->unused_crcs, printk_unused_warning },
+                       { mod->unused_gpl_syms,
+                         mod->unused_gpl_syms + mod->num_unused_gpl_syms,
+                         mod->unused_gpl_crcs, gpl_only_unused_warning },
+               };
+
+               ks = search_symarrays(arr, ARRAY_SIZE(arr),
+                                     name, gplok, warn, crc);
                if (ks) {
-                       printk_unused_warning(name);
-                       *crc = symversion(mod->unused_crcs, (ks - mod->unused_syms));
-                       return ks->value;
-               }
-
-               if (gplok) {
-                       ks = lookup_symbol(name, mod->unused_gpl_syms,
-                                          mod->unused_gpl_syms + mod->num_unused_gpl_syms);
-                       if (ks) {
-                               printk_unused_warning(name);
-                               *crc = symversion(mod->unused_gpl_crcs,
-                                                 (ks - mod->unused_gpl_syms));
-                               return ks->value;
-                       }
-               }
-               ks = lookup_symbol(name, mod->gpl_future_syms,
-                                  (mod->gpl_future_syms +
-                                   mod->num_gpl_future_syms));
-               if (ks) {
-                       if (!gplok) {
-                               printk(KERN_WARNING "Symbol %s is being used "
-                                      "by a non-GPL module, which will not "
-                                      "be allowed in the future\n", name);
-                               printk(KERN_WARNING "Please see the file "
-                                      "Documentation/feature-removal-schedule.txt "
-                                      "in the kernel source tree for more "
-                                      "details.\n");
-                       }
-                       *crc = symversion(mod->gpl_future_crcs,
-                                         (ks - mod->gpl_future_syms));
+                       if (owner)
+                               *owner = mod;
                        return ks->value;
                }
        }
+
        DEBUGP("Failed to find symbol %s\n", name);
        return -ENOENT;
 }
@@ -736,12 +745,13 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
        if (!forced && module_refcount(mod) != 0)
                wait_for_zero_refcount(mod);
 
+       mutex_unlock(&module_mutex);
        /* Final destruction now noone is using it. */
-       if (mod->exit != NULL) {
-               mutex_unlock(&module_mutex);
+       if (mod->exit != NULL)
                mod->exit();
-               mutex_lock(&module_mutex);
-       }
+       blocking_notifier_call_chain(&module_notify_list,
+                                    MODULE_STATE_GOING, mod);
+       mutex_lock(&module_mutex);
        /* Store the name of the last unloaded module for diagnostic purposes */
        strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
        free_module(mod);
@@ -777,10 +787,9 @@ static void print_unload_info(struct seq_file *m, struct module *mod)
 void __symbol_put(const char *symbol)
 {
        struct module *owner;
-       const unsigned long *crc;
 
        preempt_disable();
-       if (IS_ERR_VALUE(__find_symbol(symbol, &owner, &crc, 1)))
+       if (IS_ERR_VALUE(find_symbol(symbol, &owner, NULL, true, false)))
                BUG();
        module_put(owner);
        preempt_enable();
@@ -924,13 +933,10 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
                                          struct module *mod)
 {
        const unsigned long *crc;
-       struct module *owner;
 
-       if (IS_ERR_VALUE(__find_symbol("struct_module",
-                                               &owner, &crc, 1)))
+       if (IS_ERR_VALUE(find_symbol("struct_module", NULL, &crc, true, false)))
                BUG();
-       return check_version(sechdrs, versindex, "struct_module", mod,
-                            crc);
+       return check_version(sechdrs, versindex, "struct_module", mod, crc);
 }
 
 /* First part is kernel version, which we ignore. */
@@ -974,8 +980,8 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
        unsigned long ret;
        const unsigned long *crc;
 
-       ret = __find_symbol(name, &owner, &crc,
-                       !(mod->taints & TAINT_PROPRIETARY_MODULE));
+       ret = find_symbol(name, &owner, &crc,
+                         !(mod->taints & TAINT_PROPRIETARY_MODULE), true);
        if (!IS_ERR_VALUE(ret)) {
                /* use_module can fail due to OOM,
                   or module initialization or unloading */
@@ -991,6 +997,20 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
  * J. Corbet <corbet@lwn.net>
  */
 #if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
+struct module_sect_attr
+{
+       struct module_attribute mattr;
+       char *name;
+       unsigned long address;
+};
+
+struct module_sect_attrs
+{
+       struct attribute_group grp;
+       unsigned int nsections;
+       struct module_sect_attr attrs[0];
+};
+
 static ssize_t module_sect_show(struct module_attribute *mattr,
                                struct module *mod, char *buf)
 {
@@ -1001,7 +1021,7 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
 
 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
 {
-       int section;
+       unsigned int section;
 
        for (section = 0; section < sect_attrs->nsections; section++)
                kfree(sect_attrs->attrs[section].name);
@@ -1362,10 +1382,9 @@ void *__symbol_get(const char *symbol)
 {
        struct module *owner;
        unsigned long value;
-       const unsigned long *crc;
 
        preempt_disable();
-       value = __find_symbol(symbol, &owner, &crc, 1);
+       value = find_symbol(symbol, &owner, NULL, true, true);
        if (IS_ERR_VALUE(value))
                value = 0;
        else if (strong_try_module_get(owner))
@@ -1382,33 +1401,33 @@ EXPORT_SYMBOL_GPL(__symbol_get);
  */
 static int verify_export_symbols(struct module *mod)
 {
-       const char *name = NULL;
-       unsigned long i, ret = 0;
+       unsigned int i;
        struct module *owner;
-       const unsigned long *crc;
-
-       for (i = 0; i < mod->num_syms; i++)
-               if (!IS_ERR_VALUE(__find_symbol(mod->syms[i].name,
-                                                       &owner, &crc, 1))) {
-                       name = mod->syms[i].name;
-                       ret = -ENOEXEC;
-                       goto dup;
-               }
+       const struct kernel_symbol *s;
+       struct {
+               const struct kernel_symbol *sym;
+               unsigned int num;
+       } arr[] = {
+               { mod->syms, mod->num_syms },
+               { mod->gpl_syms, mod->num_gpl_syms },
+               { mod->gpl_future_syms, mod->num_gpl_future_syms },
+               { mod->unused_syms, mod->num_unused_syms },
+               { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
+       };
 
-       for (i = 0; i < mod->num_gpl_syms; i++)
-               if (!IS_ERR_VALUE(__find_symbol(mod->gpl_syms[i].name,
-                                                       &owner, &crc, 1))) {
-                       name = mod->gpl_syms[i].name;
-                       ret = -ENOEXEC;
-                       goto dup;
+       for (i = 0; i < ARRAY_SIZE(arr); i++) {
+               for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
+                       if (!IS_ERR_VALUE(find_symbol(s->name, &owner,
+                                                     NULL, true, false))) {
+                               printk(KERN_ERR
+                                      "%s: exports duplicate symbol %s"
+                                      " (owned by %s)\n",
+                                      mod->name, s->name, module_name(owner));
+                               return -ENOEXEC;
+                       }
                }
-
-dup:
-       if (ret)
-               printk(KERN_ERR "%s: exports duplicate symbol %s (owned by %s)\n",
-                       mod->name, name, module_name(owner));
-
-       return ret;
+       }
+       return 0;
 }
 
 /* Change all symbols so that st_value encodes the pointer directly. */
@@ -1814,8 +1833,9 @@ static struct module *load_module(void __user *umod,
        unwindex = find_sec(hdr, sechdrs, secstrings, ARCH_UNWIND_SECTION_NAME);
 #endif
 
-       /* Don't keep modinfo section */
+       /* Don't keep modinfo and version sections. */
        sechdrs[infoindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
+       sechdrs[versindex].sh_flags &= ~(unsigned long)SHF_ALLOC;
 #ifdef CONFIG_KALLSYMS
        /* Keep symbol and string tables for decoding later. */
        sechdrs[symindex].sh_flags |= SHF_ALLOC;
@@ -1977,7 +1997,8 @@ static struct module *load_module(void __user *umod,
                mod->unused_crcs = (void *)sechdrs[unusedcrcindex].sh_addr;
        mod->unused_gpl_syms = (void *)sechdrs[unusedgplindex].sh_addr;
        if (unusedgplcrcindex)
-               mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr;
+               mod->unused_gpl_crcs
+                       = (void *)sechdrs[unusedgplcrcindex].sh_addr;
 
 #ifdef CONFIG_MODVERSIONS
        if ((mod->num_syms && !crcindex) ||
@@ -2171,6 +2192,8 @@ sys_init_module(void __user *umod,
                mod->state = MODULE_STATE_GOING;
                synchronize_sched();
                module_put(mod);
+               blocking_notifier_call_chain(&module_notify_list,
+                                            MODULE_STATE_GOING, mod);
                mutex_lock(&module_mutex);
                free_module(mod);
                mutex_unlock(&module_mutex);
index 643360d1bb144fda223301b348059cd5381eb35e..823be11584efef8ef1d344f484cbf8c3d4f9617e 100644 (file)
@@ -31,6 +31,21 @@ static int notifier_chain_register(struct notifier_block **nl,
        return 0;
 }
 
+static int notifier_chain_cond_register(struct notifier_block **nl,
+               struct notifier_block *n)
+{
+       while ((*nl) != NULL) {
+               if ((*nl) == n)
+                       return 0;
+               if (n->priority > (*nl)->priority)
+                       break;
+               nl = &((*nl)->next);
+       }
+       n->next = *nl;
+       rcu_assign_pointer(*nl, n);
+       return 0;
+}
+
 static int notifier_chain_unregister(struct notifier_block **nl,
                struct notifier_block *n)
 {
@@ -204,6 +219,29 @@ int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
 }
 EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
 
+/**
+ *     blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain
+ *     @nh: Pointer to head of the blocking notifier chain
+ *     @n: New entry in notifier chain
+ *
+ *     Adds a notifier to a blocking notifier chain, only if not already
+ *     present in the chain.
+ *     Must be called in process context.
+ *
+ *     Currently always returns zero.
+ */
+int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh,
+               struct notifier_block *n)
+{
+       int ret;
+
+       down_write(&nh->rwsem);
+       ret = notifier_chain_cond_register(&nh->head, n);
+       up_write(&nh->rwsem);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register);
+
 /**
  *     blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
  *     @nh: Pointer to head of the blocking notifier chain
index aead4d69f62b80f0cf648eb8213bc633caf6d8de..48d7ed6fc3a4d2dcc206d5eb304f55ae7436ac70 100644 (file)
@@ -7,6 +7,8 @@
 #include <linux/module.h>
 #include <linux/cgroup.h>
 #include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/nsproxy.h>
 
 struct ns_cgroup {
        struct cgroup_subsys_state css;
index f5d332cf8c631704b1ee440c35317644e4f6eff9..adc785146a1cb81d6f7b3d676877921f10bb59e7 100644 (file)
@@ -139,6 +139,18 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk)
                goto out;
        }
 
+       /*
+        * CLONE_NEWIPC must detach from the undolist: after switching
+        * to a new ipc namespace, the semaphore arrays from the old
+        * namespace are unreachable.  In clone parlance, CLONE_SYSVSEM
+        * means share undolist with parent, so we must forbid using
+        * it along with CLONE_NEWIPC.
+        */
+       if ((flags & CLONE_NEWIPC) && (flags & CLONE_SYSVSEM)) {
+               err = -EINVAL;
+               goto out;
+       }
+
        new_ns = create_new_namespaces(flags, tsk, tsk->fs);
        if (IS_ERR(new_ns)) {
                err = PTR_ERR(new_ns);
index 24af9f8bac99204076ab80b36038d1d5404ae27a..425567f45b9f776148f1767b2f4af2d357a0c2fd 100644 (file)
@@ -153,6 +153,8 @@ EXPORT_SYMBOL(panic);
  *  'M' - System experienced a machine check exception.
  *  'B' - System has hit bad_page.
  *  'U' - Userspace-defined naughtiness.
+ *  'A' - ACPI table overridden.
+ *  'W' - Taint on warning.
  *
  *     The string is overwritten by the next call to print_taint().
  */
@@ -161,7 +163,7 @@ const char *print_tainted(void)
 {
        static char buf[20];
        if (tainted) {
-               snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c",
+               snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c",
                        tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
                        tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
                        tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
@@ -170,7 +172,8 @@ const char *print_tainted(void)
                        tainted & TAINT_BAD_PAGE ? 'B' : ' ',
                        tainted & TAINT_USER ? 'U' : ' ',
                        tainted & TAINT_DIE ? 'D' : ' ',
-                       tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ');
+                       tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ',
+                       tainted & TAINT_WARN ? 'W' : ' ');
        }
        else
                snprintf(buf, sizeof(buf), "Not tainted");
@@ -312,6 +315,7 @@ void warn_on_slowpath(const char *file, int line)
        print_modules();
        dump_stack();
        print_oops_end_marker();
+       add_taint(TAINT_WARN);
 }
 EXPORT_SYMBOL(warn_on_slowpath);
 #endif
index 477691576b338b38caa173c386b3a70ce7351a72..20d59fa2d493c7a20b83ff151745e38a847e0366 100644 (file)
@@ -111,10 +111,11 @@ EXPORT_SYMBOL(is_container_init);
 
 static  __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
 
-static void free_pidmap(struct pid_namespace *pid_ns, int pid)
+static void free_pidmap(struct upid *upid)
 {
-       struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
-       int offset = pid & BITS_PER_PAGE_MASK;
+       int nr = upid->nr;
+       struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
+       int offset = nr & BITS_PER_PAGE_MASK;
 
        clear_bit(offset, map->page);
        atomic_inc(&map->nr_free);
@@ -232,7 +233,7 @@ void free_pid(struct pid *pid)
        spin_unlock_irqrestore(&pidmap_lock, flags);
 
        for (i = 0; i <= pid->level; i++)
-               free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+               free_pidmap(pid->numbers + i);
 
        call_rcu(&pid->rcu, delayed_put_pid);
 }
@@ -278,8 +279,8 @@ out:
        return pid;
 
 out_free:
-       for (i++; i <= ns->level; i++)
-               free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);
+       while (++i <= ns->level)
+               free_pidmap(pid->numbers + i);
 
        kmem_cache_free(ns->pid_cachep, pid);
        pid = NULL;
@@ -316,7 +317,7 @@ EXPORT_SYMBOL_GPL(find_pid);
 /*
  * attach_pid() must be called with the tasklist_lock write-held.
  */
-int attach_pid(struct task_struct *task, enum pid_type type,
+void attach_pid(struct task_struct *task, enum pid_type type,
                struct pid *pid)
 {
        struct pid_link *link;
@@ -324,11 +325,10 @@ int attach_pid(struct task_struct *task, enum pid_type type,
        link = &task->pids[type];
        link->pid = pid;
        hlist_add_head_rcu(&link->node, &pid->tasks[type]);
-
-       return 0;
 }
 
-void detach_pid(struct task_struct *task, enum pid_type type)
+static void __change_pid(struct task_struct *task, enum pid_type type,
+                       struct pid *new)
 {
        struct pid_link *link;
        struct pid *pid;
@@ -338,7 +338,7 @@ void detach_pid(struct task_struct *task, enum pid_type type)
        pid = link->pid;
 
        hlist_del_rcu(&link->node);
-       link->pid = NULL;
+       link->pid = new;
 
        for (tmp = PIDTYPE_MAX; --tmp >= 0; )
                if (!hlist_empty(&pid->tasks[tmp]))
@@ -347,13 +347,24 @@ void detach_pid(struct task_struct *task, enum pid_type type)
        free_pid(pid);
 }
 
+void detach_pid(struct task_struct *task, enum pid_type type)
+{
+       __change_pid(task, type, NULL);
+}
+
+void change_pid(struct task_struct *task, enum pid_type type,
+               struct pid *pid)
+{
+       __change_pid(task, type, pid);
+       attach_pid(task, type, pid);
+}
+
 /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
 void transfer_pid(struct task_struct *old, struct task_struct *new,
                           enum pid_type type)
 {
        new->pids[type].pid = old->pids[type].pid;
        hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
-       old->pids[type].pid = NULL;
 }
 
 struct task_struct *pid_task(struct pid *pid, enum pid_type type)
@@ -380,12 +391,6 @@ struct task_struct *find_task_by_pid_type_ns(int type, int nr,
 
 EXPORT_SYMBOL(find_task_by_pid_type_ns);
 
-struct task_struct *find_task_by_pid(pid_t nr)
-{
-       return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns);
-}
-EXPORT_SYMBOL(find_task_by_pid);
-
 struct task_struct *find_task_by_vpid(pid_t vnr)
 {
        return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
index 6d792b66d854ff3536e30a1114e35a703be59695..98702b4b8851762969d9ddad76505e5739e8166c 100644 (file)
@@ -66,7 +66,7 @@ err_alloc:
        return NULL;
 }
 
-static struct pid_namespace *create_pid_namespace(int level)
+static struct pid_namespace *create_pid_namespace(unsigned int level)
 {
        struct pid_namespace *ns;
        int i;
@@ -92,7 +92,7 @@ static struct pid_namespace *create_pid_namespace(int level)
        atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
 
        for (i = 1; i < PIDMAP_ENTRIES; i++) {
-               ns->pidmap[i].page = 0;
+               ns->pidmap[i].page = NULL;
                atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
        }
 
index ae5c6c147c4b37def0b8e3e89f0db9c022d0bc67..f1525ad06cb3ebbb83680b2bc0176854002217ca 100644 (file)
@@ -4,8 +4,9 @@
 
 #include <linux/sched.h>
 #include <linux/posix-timers.h>
-#include <asm/uaccess.h>
 #include <linux/errno.h>
+#include <linux/math64.h>
+#include <asm/uaccess.h>
 
 static int check_clock(const clockid_t which_clock)
 {
@@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock,
                               union cpu_time_count cpu,
                               struct timespec *tp)
 {
-       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
-               tp->tv_sec = div_long_long_rem(cpu.sched,
-                                              NSEC_PER_SEC, &tp->tv_nsec);
-       } else {
+       if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
+               *tp = ns_to_timespec(cpu.sched);
+       else
                cputime_to_timespec(cpu.cpu, tp);
-       }
 }
 
 static inline int cpu_time_before(const clockid_t which_clock,
index 8476956ffd9238a378f6c5671649900776939dd0..dbd8398ddb0b41fa1f391849b82199078e72de70 100644 (file)
@@ -310,8 +310,7 @@ int posix_timer_event(struct k_itimer *timr,int si_private)
 
        if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
                struct task_struct *leader;
-               int ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
-                                       timr->it_process);
+               int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
 
                if (likely(ret >= 0))
                        return ret;
@@ -322,8 +321,7 @@ int posix_timer_event(struct k_itimer *timr,int si_private)
                timr->it_process = leader;
        }
 
-       return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
-                                  timr->it_process);
+       return send_sigqueue(timr->sigq, timr->it_process, 1);
 }
 EXPORT_SYMBOL_GPL(posix_timer_event);
 
index 6233f3b4ae6628c0676c73f09a2937f5039fcd5c..b45da40e8d25f6a480fb8366004fcd4de2607ef4 100644 (file)
@@ -19,16 +19,6 @@ config PM
          will issue the hlt instruction if nothing is to be done, thereby
          sending the processor to sleep and saving power.
 
-config PM_LEGACY
-       bool "Legacy Power Management API (DEPRECATED)"
-       depends on PM
-       default n
-       ---help---
-          Support for pm_register() and friends.  This old API is obsoleted
-          by the driver model.
-
-          If unsure, say N.
-
 config PM_DEBUG
        bool "Power Management Debug Support"
        depends on PM
index f7dfff28ecdb7035d290a3f7a9e8649283b5d222..597823b5b7003992714a8f784ed4f278fb9dd0c4 100644 (file)
@@ -4,7 +4,6 @@ EXTRA_CFLAGS    +=      -DDEBUG
 endif
 
 obj-y                          := main.o
-obj-$(CONFIG_PM_LEGACY)                += pm.o
 obj-$(CONFIG_PM_SLEEP)         += process.o console.o
 obj-$(CONFIG_HIBERNATION)      += swsusp.o disk.o snapshot.o swap.o user.o
 
index 89bcf4973ee51cb8cc8028c6a3cf6a2272367b0a..b8628be2a465c4841ccc69135d91618b56b2d6ca 100644 (file)
@@ -7,17 +7,39 @@
 #include <linux/vt_kern.h>
 #include <linux/kbd_kern.h>
 #include <linux/console.h>
+#include <linux/module.h>
 #include "power.h"
 
 #if defined(CONFIG_VT) && defined(CONFIG_VT_CONSOLE)
 #define SUSPEND_CONSOLE        (MAX_NR_CONSOLES-1)
 
 static int orig_fgconsole, orig_kmsg;
+static int disable_vt_switch;
+
+/*
+ * Normally during a suspend, we allocate a new console and switch to it.
+ * When we resume, we switch back to the original console.  This switch
+ * can be slow, so on systems where the framebuffer can handle restoration
+ * of video registers anyways, there's little point in doing the console
+ * switch.  This function allows you to disable it by passing it '0'.
+ */
+void pm_set_vt_switch(int do_switch)
+{
+       acquire_console_sem();
+       disable_vt_switch = !do_switch;
+       release_console_sem();
+}
+EXPORT_SYMBOL(pm_set_vt_switch);
 
 int pm_prepare_console(void)
 {
        acquire_console_sem();
 
+       if (disable_vt_switch) {
+               release_console_sem();
+               return 0;
+       }
+
        orig_fgconsole = fg_console;
 
        if (vc_allocate(SUSPEND_CONSOLE)) {
@@ -50,9 +72,12 @@ int pm_prepare_console(void)
 void pm_restore_console(void)
 {
        acquire_console_sem();
+       if (disable_vt_switch) {
+               release_console_sem();
+               return;
+       }
        set_console(orig_fgconsole);
        release_console_sem();
        kmsg_redirect = orig_kmsg;
-       return;
 }
 #endif
diff --git a/kernel/power/pm.c b/kernel/power/pm.c
deleted file mode 100644 (file)
index 60c73fa..0000000
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- *  pm.c - Power management interface
- *
- *  Copyright (C) 2000 Andrew Henroid
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <linux/pm.h>
-#include <linux/pm_legacy.h>
-#include <linux/interrupt.h>
-#include <linux/mutex.h>
-
-/*
- *     Locking notes:
- *             pm_devs_lock can be a semaphore providing pm ops are not called
- *     from an interrupt handler (already a bad idea so no change here). Each
- *     change must be protected so that an unlink of an entry doesn't clash
- *     with a pm send - which is permitted to sleep in the current architecture
- *
- *     Module unloads clashing with pm events now work out safely, the module 
- *     unload path will block until the event has been sent. It may well block
- *     until a resume but that will be fine.
- */
-static DEFINE_MUTEX(pm_devs_lock);
-static LIST_HEAD(pm_devs);
-
-/**
- *     pm_register - register a device with power management
- *     @type: device type 
- *     @id: device ID
- *     @callback: callback function
- *
- *     Add a device to the list of devices that wish to be notified about
- *     power management events. A &pm_dev structure is returned on success,
- *     on failure the return is %NULL.
- *
- *      The callback function will be called in process context and
- *      it may sleep.
- */
-struct pm_dev *pm_register(pm_dev_t type,
-                          unsigned long id,
-                          pm_callback callback)
-{
-       struct pm_dev *dev = kzalloc(sizeof(struct pm_dev), GFP_KERNEL);
-       if (dev) {
-               dev->type = type;
-               dev->id = id;
-               dev->callback = callback;
-
-               mutex_lock(&pm_devs_lock);
-               list_add(&dev->entry, &pm_devs);
-               mutex_unlock(&pm_devs_lock);
-       }
-       return dev;
-}
-
-/**
- *     pm_send - send request to a single device
- *     @dev: device to send to
- *     @rqst: power management request
- *     @data: data for the callback
- *
- *     Issue a power management request to a given device. The 
- *     %PM_SUSPEND and %PM_RESUME events are handled specially. The
- *     data field must hold the intended next state. No call is made
- *     if the state matches.
- *
- *     BUGS: what stops two power management requests occurring in parallel
- *     and conflicting.
- *
- *     WARNING: Calling pm_send directly is not generally recommended, in
- *     particular there is no locking against the pm_dev going away. The
- *     caller must maintain all needed locking or have 'inside knowledge'
- *     on the safety. Also remember that this function is not locked against
- *     pm_unregister. This means that you must handle SMP races on callback
- *     execution and unload yourself.
- */
-static int pm_send(struct pm_dev *dev, pm_request_t rqst, void *data)
-{
-       int status = 0;
-       unsigned long prev_state, next_state;
-
-       if (in_interrupt())
-               BUG();
-
-       switch (rqst) {
-       case PM_SUSPEND:
-       case PM_RESUME:
-               prev_state = dev->state;
-               next_state = (unsigned long) data;
-               if (prev_state != next_state) {
-                       if (dev->callback)
-                               status = (*dev->callback)(dev, rqst, data);
-                       if (!status) {
-                               dev->state = next_state;
-                               dev->prev_state = prev_state;
-                       }
-               }
-               else {
-                       dev->prev_state = prev_state;
-               }
-               break;
-       default:
-               if (dev->callback)
-                       status = (*dev->callback)(dev, rqst, data);
-               break;
-       }
-       return status;
-}
-
-/*
- * Undo incomplete request
- */
-static void pm_undo_all(struct pm_dev *last)
-{
-       struct list_head *entry = last->entry.prev;
-       while (entry != &pm_devs) {
-               struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
-               if (dev->state != dev->prev_state) {
-                       /* previous state was zero (running) resume or
-                        * previous state was non-zero (suspended) suspend
-                        */
-                       pm_request_t undo = (dev->prev_state
-                                            ? PM_SUSPEND:PM_RESUME);
-                       pm_send(dev, undo, (void*) dev->prev_state);
-               }
-               entry = entry->prev;
-       }
-}
-
-/**
- *     pm_send_all - send request to all managed devices
- *     @rqst: power management request
- *     @data: data for the callback
- *
- *     Issue a power management request to a all devices. The 
- *     %PM_SUSPEND events are handled specially. Any device is 
- *     permitted to fail a suspend by returning a non zero (error)
- *     value from its callback function. If any device vetoes a 
- *     suspend request then all other devices that have suspended 
- *     during the processing of this request are restored to their
- *     previous state.
- *
- *     WARNING:  This function takes the pm_devs_lock. The lock is not dropped until
- *     the callbacks have completed. This prevents races against pm locking
- *     functions, races against module unload pm_unregister code. It does
- *     mean however that you must not issue pm_ functions within the callback
- *     or you will deadlock and users will hate you.
- *
- *     Zero is returned on success. If a suspend fails then the status
- *     from the device that vetoes the suspend is returned.
- *
- *     BUGS: what stops two power management requests occurring in parallel
- *     and conflicting.
- */
-int pm_send_all(pm_request_t rqst, void *data)
-{
-       struct list_head *entry;
-       
-       mutex_lock(&pm_devs_lock);
-       entry = pm_devs.next;
-       while (entry != &pm_devs) {
-               struct pm_dev *dev = list_entry(entry, struct pm_dev, entry);
-               if (dev->callback) {
-                       int status = pm_send(dev, rqst, data);
-                       if (status) {
-                               /* return devices to previous state on
-                                * failed suspend request
-                                */
-                               if (rqst == PM_SUSPEND)
-                                       pm_undo_all(dev);
-                               mutex_unlock(&pm_devs_lock);
-                               return status;
-                       }
-               }
-               entry = entry->next;
-       }
-       mutex_unlock(&pm_devs_lock);
-       return 0;
-}
-
-EXPORT_SYMBOL(pm_register);
-EXPORT_SYMBOL(pm_send_all);
-
index bdd4ea8c3f2b67520c1a55e0ba34a0a247d350fc..8fb01c32aa3bb7eaf44f5b9e63e316628dd935c1 100644 (file)
@@ -111,6 +111,9 @@ struct console_cmdline
        char    name[8];                        /* Name of the driver       */
        int     index;                          /* Minor dev. to use        */
        char    *options;                       /* Options for the driver   */
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+       char    *brl_options;                   /* Options for braille driver */
+#endif
 };
 
 #define MAX_CMDLINECONSOLES 8
@@ -808,15 +811,60 @@ static void call_console_drivers(unsigned start, unsigned end)
 
 #endif
 
+static int __add_preferred_console(char *name, int idx, char *options,
+                                  char *brl_options)
+{
+       struct console_cmdline *c;
+       int i;
+
+       /*
+        *      See if this tty is not yet registered, and
+        *      if we have a slot free.
+        */
+       for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
+               if (strcmp(console_cmdline[i].name, name) == 0 &&
+                         console_cmdline[i].index == idx) {
+                               if (!brl_options)
+                                       selected_console = i;
+                               return 0;
+               }
+       if (i == MAX_CMDLINECONSOLES)
+               return -E2BIG;
+       if (!brl_options)
+               selected_console = i;
+       c = &console_cmdline[i];
+       strlcpy(c->name, name, sizeof(c->name));
+       c->options = options;
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+       c->brl_options = brl_options;
+#endif
+       c->index = idx;
+       return 0;
+}
 /*
  * Set up a list of consoles.  Called from init/main.c
  */
 static int __init console_setup(char *str)
 {
        char buf[sizeof(console_cmdline[0].name) + 4]; /* 4 for index */
-       char *s, *options;
+       char *s, *options, *brl_options = NULL;
        int idx;
 
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+       if (!memcmp(str, "brl,", 4)) {
+               brl_options = "";
+               str += 4;
+       } else if (!memcmp(str, "brl=", 4)) {
+               brl_options = str + 4;
+               str = strchr(brl_options, ',');
+               if (!str) {
+                       printk(KERN_ERR "need port name after brl=\n");
+                       return 1;
+               }
+               *(str++) = 0;
+       }
+#endif
+
        /*
         * Decode str into name, index, options.
         */
@@ -841,7 +889,7 @@ static int __init console_setup(char *str)
        idx = simple_strtoul(s, NULL, 10);
        *s = 0;
 
-       add_preferred_console(buf, idx, options);
+       __add_preferred_console(buf, idx, options, brl_options);
        return 1;
 }
 __setup("console=", console_setup);
@@ -861,28 +909,7 @@ __setup("console=", console_setup);
  */
 int add_preferred_console(char *name, int idx, char *options)
 {
-       struct console_cmdline *c;
-       int i;
-
-       /*
-        *      See if this tty is not yet registered, and
-        *      if we have a slot free.
-        */
-       for (i = 0; i < MAX_CMDLINECONSOLES && console_cmdline[i].name[0]; i++)
-               if (strcmp(console_cmdline[i].name, name) == 0 &&
-                         console_cmdline[i].index == idx) {
-                               selected_console = i;
-                               return 0;
-               }
-       if (i == MAX_CMDLINECONSOLES)
-               return -E2BIG;
-       selected_console = i;
-       c = &console_cmdline[i];
-       memcpy(c->name, name, sizeof(c->name));
-       c->name[sizeof(c->name) - 1] = 0;
-       c->options = options;
-       c->index = idx;
-       return 0;
+       return __add_preferred_console(name, idx, options, NULL);
 }
 
 int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, char *options)
@@ -894,7 +921,7 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
                if (strcmp(console_cmdline[i].name, name) == 0 &&
                          console_cmdline[i].index == idx) {
                                c = &console_cmdline[i];
-                               memcpy(c->name, name_new, sizeof(c->name));
+                               strlcpy(c->name, name_new, sizeof(c->name));
                                c->name[sizeof(c->name) - 1] = 0;
                                c->options = options;
                                c->index = idx_new;
@@ -1163,6 +1190,16 @@ void register_console(struct console *console)
                        continue;
                if (console->index < 0)
                        console->index = console_cmdline[i].index;
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+               if (console_cmdline[i].brl_options) {
+                       console->flags |= CON_BRL;
+                       braille_register_console(console,
+                                       console_cmdline[i].index,
+                                       console_cmdline[i].options,
+                                       console_cmdline[i].brl_options);
+                       return;
+               }
+#endif
                if (console->setup &&
                    console->setup(console, console_cmdline[i].options) != 0)
                        break;
@@ -1221,6 +1258,11 @@ int unregister_console(struct console *console)
         struct console *a, *b;
        int res = 1;
 
+#ifdef CONFIG_A11Y_BRAILLE_CONSOLE
+       if (console->flags & CON_BRL)
+               return braille_unregister_console(console);
+#endif
+
        acquire_console_sem();
        if (console_drivers == console) {
                console_drivers=console->next;
@@ -1272,8 +1314,8 @@ late_initcall(disable_boot_consoles);
  */
 void tty_write_message(struct tty_struct *tty, char *msg)
 {
-       if (tty && tty->driver->write)
-               tty->driver->write(tty, msg, strlen(msg));
+       if (tty && tty->ops->write)
+               tty->ops->write(tty, msg, strlen(msg));
        return;
 }
 
@@ -1287,31 +1329,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
  */
 int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
 {
-       static DEFINE_SPINLOCK(ratelimit_lock);
-       static unsigned toks = 10 * 5 * HZ;
-       static unsigned long last_msg;
-       static int missed;
-       unsigned long flags;
-       unsigned long now = jiffies;
-
-       spin_lock_irqsave(&ratelimit_lock, flags);
-       toks += now - last_msg;
-       last_msg = now;
-       if (toks > (ratelimit_burst * ratelimit_jiffies))
-               toks = ratelimit_burst * ratelimit_jiffies;
-       if (toks >= ratelimit_jiffies) {
-               int lost = missed;
-
-               missed = 0;
-               toks -= ratelimit_jiffies;
-               spin_unlock_irqrestore(&ratelimit_lock, flags);
-               if (lost)
-                       printk(KERN_WARNING "printk: %d messages suppressed.\n", lost);
-               return 1;
-       }
-       missed++;
-       spin_unlock_irqrestore(&ratelimit_lock, flags);
-       return 0;
+       return __ratelimit(ratelimit_jiffies, ratelimit_burst);
 }
 EXPORT_SYMBOL(__printk_ratelimit);
 
index 606d7387265ccee0c030c0afdf317ede0d943f26..ae7ead82cbc9f7bd25a15ec373c14a1f2ea79bf5 100644 (file)
@@ -587,10 +587,10 @@ static int __init create_proc_profile(void)
                return 0;
        if (create_hash_tables())
                return -1;
-       entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL);
+       entry = proc_create("profile", S_IWUSR | S_IRUGO,
+                           NULL, &proc_profile_operations);
        if (!entry)
                return 0;
-       entry->proc_fops = &proc_profile_operations;
        entry->size = (1+prof_len) * sizeof(atomic_t);
        hotcpu_notifier(profile_cpu_callback, 0);
        return 0;
index 67e392ed5496e97c54100edaa2557e2813d6faa8..6c19e94fd0a5482786e432599a0fb4840a5c66cb 100644 (file)
@@ -73,7 +73,7 @@ void __ptrace_unlink(struct task_struct *child)
        BUG_ON(!child->ptrace);
 
        child->ptrace = 0;
-       if (!list_empty(&child->ptrace_list)) {
+       if (ptrace_reparented(child)) {
                list_del_init(&child->ptrace_list);
                remove_parent(child);
                child->parent = child->real_parent;
@@ -168,8 +168,6 @@ int ptrace_attach(struct task_struct *task)
        audit_ptrace(task);
 
        retval = -EPERM;
-       if (task->pid <= 1)
-               goto out;
        if (same_thread_group(task, current))
                goto out;
 
@@ -208,8 +206,7 @@ repeat:
 
        __ptrace_link(task, current);
 
-       force_sig_specific(SIGSTOP, task);
-
+       send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
 bad:
        write_unlock_irqrestore(&tasklist_lock, flags);
        task_unlock(task);
@@ -522,12 +519,6 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
 {
        struct task_struct *child;
 
-       /*
-        * Tracing init is not allowed.
-        */
-       if (pid == 1)
-               return ERR_PTR(-EPERM);
-
        read_lock(&tasklist_lock);
        child = find_task_by_vpid(pid);
        if (child)
@@ -543,7 +534,6 @@ struct task_struct *ptrace_get_task_struct(pid_t pid)
 #define arch_ptrace_attach(child)      do { } while (0)
 #endif
 
-#ifndef __ARCH_SYS_PTRACE
 asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
 {
        struct task_struct *child;
@@ -591,7 +581,6 @@ asmlinkage long sys_ptrace(long request, long pid, long addr, long data)
        unlock_kernel();
        return ret;
 }
-#endif /* __ARCH_SYS_PTRACE */
 
 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
 {
@@ -612,7 +601,7 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
        return (copied == sizeof(data)) ? 0 : -EIO;
 }
 
-#ifdef CONFIG_COMPAT
+#if defined CONFIG_COMPAT && defined __ARCH_WANT_COMPAT_SYS_PTRACE
 #include <linux/compat.h>
 
 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
@@ -667,7 +656,6 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
        return ret;
 }
 
-#ifdef __ARCH_WANT_COMPAT_SYS_PTRACE
 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
                                  compat_long_t addr, compat_long_t data)
 {
@@ -710,6 +698,4 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
        unlock_kernel();
        return ret;
 }
-#endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */
-
-#endif /* CONFIG_COMPAT */
+#endif /* CONFIG_COMPAT && __ARCH_WANT_COMPAT_SYS_PTRACE */
index 47894f919d4ea2848263a3e1a5ec5218f27237e3..33acc424667e03381597d76771fa12c9ad777124 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/byteorder/swabb.h>
 #include <linux/stat.h>
 #include <linux/srcu.h>
+#include <linux/slab.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
index d6204a4858183c754d5d401ad230c11f60a546c1..7de644cdec43590be390d3037206e5722aa46856 100644 (file)
@@ -65,6 +65,35 @@ static struct vm_operations_struct relay_file_mmap_ops = {
        .close = relay_file_mmap_close,
 };
 
+/*
+ * allocate an array of pointers of struct page
+ */
+static struct page **relay_alloc_page_array(unsigned int n_pages)
+{
+       struct page **array;
+       size_t pa_size = n_pages * sizeof(struct page *);
+
+       if (pa_size > PAGE_SIZE) {
+               array = vmalloc(pa_size);
+               if (array)
+                       memset(array, 0, pa_size);
+       } else {
+               array = kzalloc(pa_size, GFP_KERNEL);
+       }
+       return array;
+}
+
+/*
+ * free an array of pointers of struct page
+ */
+static void relay_free_page_array(struct page **array)
+{
+       if (is_vmalloc_addr(array))
+               vfree(array);
+       else
+               kfree(array);
+}
+
 /**
  *     relay_mmap_buf: - mmap channel buffer to process address space
  *     @buf: relay channel buffer
@@ -109,7 +138,7 @@ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size)
        *size = PAGE_ALIGN(*size);
        n_pages = *size >> PAGE_SHIFT;
 
-       buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL);
+       buf->page_array = relay_alloc_page_array(n_pages);
        if (!buf->page_array)
                return NULL;
 
@@ -130,7 +159,7 @@ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size)
 depopulate:
        for (j = 0; j < i; j++)
                __free_page(buf->page_array[j]);
-       kfree(buf->page_array);
+       relay_free_page_array(buf->page_array);
        return NULL;
 }
 
@@ -189,7 +218,7 @@ static void relay_destroy_buf(struct rchan_buf *buf)
                vunmap(buf->start);
                for (i = 0; i < buf->page_count; i++)
                        __free_page(buf->page_array[i]);
-               kfree(buf->page_array);
+               relay_free_page_array(buf->page_array);
        }
        chan->buf[buf->cpu] = NULL;
        kfree(buf->padding);
@@ -1162,7 +1191,7 @@ static ssize_t relay_file_splice_read(struct file *in,
        ret = 0;
        spliced = 0;
 
-       while (len) {
+       while (len && !spliced) {
                ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret);
                if (ret < 0)
                        break;
index efbfc0fc232f33c49b5b918704b4e13ae5d0f2cc..d3c61b4ebef238c110ae3994e0d0fdc078fabfc8 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/types.h>
 #include <linux/parser.h>
 #include <linux/fs.h>
+#include <linux/slab.h>
 #include <linux/res_counter.h>
 #include <linux/uaccess.h>
 
@@ -27,6 +28,8 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
        }
 
        counter->usage += val;
+       if (counter->usage > counter->max_usage)
+               counter->max_usage = counter->usage;
        return 0;
 }
 
@@ -65,6 +68,8 @@ res_counter_member(struct res_counter *counter, int member)
        switch (member) {
        case RES_USAGE:
                return &counter->usage;
+       case RES_MAX_USAGE:
+               return &counter->max_usage;
        case RES_LIMIT:
                return &counter->limit;
        case RES_FAILCNT:
@@ -92,6 +97,11 @@ ssize_t res_counter_read(struct res_counter *counter, int member,
                        pos, buf, s - buf);
 }
 
+u64 res_counter_read_u64(struct res_counter *counter, int member)
+{
+       return *res_counter_member(counter, member);
+}
+
 ssize_t res_counter_write(struct res_counter *counter, int member,
                const char __user *userbuf, size_t nbytes, loff_t *pos,
                int (*write_strategy)(char *st_buf, unsigned long long *val))
index cee12cc47cab04c270e9193341ba43e8c57fd92b..74af2d7cb5a135ffa1da4c52ce9af026c527044d 100644 (file)
@@ -131,14 +131,8 @@ static const struct file_operations proc_iomem_operations = {
 
 static int __init ioresources_init(void)
 {
-       struct proc_dir_entry *entry;
-
-       entry = create_proc_entry("ioports", 0, NULL);
-       if (entry)
-               entry->proc_fops = &proc_ioports_operations;
-       entry = create_proc_entry("iomem", 0, NULL);
-       if (entry)
-               entry->proc_fops = &proc_iomem_operations;
+       proc_create("ioports", 0, NULL, &proc_ioports_operations);
+       proc_create("iomem", 0, NULL, &proc_iomem_operations);
        return 0;
 }
 __initcall(ioresources_init);
index 740fb409e5bb32854dc9cc9b753cb25d83cdb7d4..34bcc5bc120e5f2da970e180dfaf65d73b80dc7d 100644 (file)
@@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
 
        se->my_q = cfs_rq;
        se->load.weight = tg->shares;
-       se->load.inv_weight = div64_64(1ULL<<32, se->load.weight);
+       se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight);
        se->parent = parent;
 }
 #endif
@@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
                dequeue_entity(cfs_rq, se, 0);
 
        se->load.weight = shares;
-       se->load.inv_weight = div64_64((1ULL<<32), shares);
+       se->load.inv_weight = div64_u64((1ULL<<32), shares);
 
        if (on_rq)
                enqueue_entity(cfs_rq, se, 0);
@@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
        if (runtime == RUNTIME_INF)
                return 1ULL << 16;
 
-       return div64_64(runtime << 16, period);
+       return div64_u64(runtime << 16, period);
 }
 
 #ifdef CONFIG_CGROUP_SCHED
@@ -9057,13 +9057,13 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
 }
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
-static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype,
+static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
                                u64 shareval)
 {
        return sched_group_set_shares(cgroup_tg(cgrp), shareval);
 }
 
-static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
+static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
 {
        struct task_group *tg = cgroup_tg(cgrp);
 
@@ -9073,48 +9073,14 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft)
 
 #ifdef CONFIG_RT_GROUP_SCHED
 static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
-                               struct file *file,
-                               const char __user *userbuf,
-                               size_t nbytes, loff_t *unused_ppos)
+                               s64 val)
 {
-       char buffer[64];
-       int retval = 0;
-       s64 val;
-       char *end;
-
-       if (!nbytes)
-               return -EINVAL;
-       if (nbytes >= sizeof(buffer))
-               return -E2BIG;
-       if (copy_from_user(buffer, userbuf, nbytes))
-               return -EFAULT;
-
-       buffer[nbytes] = 0;     /* nul-terminate */
-
-       /* strip newline if necessary */
-       if (nbytes && (buffer[nbytes-1] == '\n'))
-               buffer[nbytes-1] = 0;
-       val = simple_strtoll(buffer, &end, 0);
-       if (*end)
-               return -EINVAL;
-
-       /* Pass to subsystem */
-       retval = sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
-       if (!retval)
-               retval = nbytes;
-       return retval;
+       return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
 }
 
-static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft,
-                                  struct file *file,
-                                  char __user *buf, size_t nbytes,
-                                  loff_t *ppos)
+static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
 {
-       char tmp[64];
-       long val = sched_group_rt_runtime(cgroup_tg(cgrp));
-       int len = sprintf(tmp, "%ld\n", val);
-
-       return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
+       return sched_group_rt_runtime(cgroup_tg(cgrp));
 }
 
 static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
@@ -9133,20 +9099,20 @@ static struct cftype cpu_files[] = {
 #ifdef CONFIG_FAIR_GROUP_SCHED
        {
                .name = "shares",
-               .read_uint = cpu_shares_read_uint,
-               .write_uint = cpu_shares_write_uint,
+               .read_u64 = cpu_shares_read_u64,
+               .write_u64 = cpu_shares_write_u64,
        },
 #endif
 #ifdef CONFIG_RT_GROUP_SCHED
        {
                .name = "rt_runtime_us",
-               .read = cpu_rt_runtime_read,
-               .write = cpu_rt_runtime_write,
+               .read_s64 = cpu_rt_runtime_read,
+               .write_s64 = cpu_rt_runtime_write,
        },
        {
                .name = "rt_period_us",
-               .read_uint = cpu_rt_period_read_uint,
-               .write_uint = cpu_rt_period_write_uint,
+               .read_u64 = cpu_rt_period_read_uint,
+               .write_u64 = cpu_rt_period_write_uint,
        },
 #endif
 };
@@ -9277,8 +9243,8 @@ out:
 static struct cftype files[] = {
        {
                .name = "usage",
-               .read_uint = cpuusage_read,
-               .write_uint = cpuusage_write,
+               .read_u64 = cpuusage_read,
+               .write_u64 = cpuusage_write,
        },
 };
 
index f3f4af4b8b0fb8ffa4e24da75f5982358c44cc3e..6b4a12558e88b94893afeb326c30a21da6269fbf 100644 (file)
@@ -277,12 +277,9 @@ static int __init init_sched_debug_procfs(void)
 {
        struct proc_dir_entry *pe;
 
-       pe = create_proc_entry("sched_debug", 0644, NULL);
+       pe = proc_create("sched_debug", 0644, NULL, &sched_debug_fops);
        if (!pe)
                return -ENOMEM;
-
-       pe->proc_fops = &sched_debug_fops;
-
        return 0;
 }
 
@@ -360,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
                avg_per_cpu = p->se.sum_exec_runtime;
                if (p->se.nr_migrations) {
-                       avg_per_cpu = div64_64(avg_per_cpu,
-                                              p->se.nr_migrations);
+                       avg_per_cpu = div64_u64(avg_per_cpu,
+                                               p->se.nr_migrations);
                } else {
                        avg_per_cpu = -1LL;
                }
index 64ad0ed15992562a4b67da61e4b34746dc9c48c7..72bb4f51f9634c448ad6eb4986d8ed7f35deb1f1 100644 (file)
 
 static struct kmem_cache *sigqueue_cachep;
 
+static int __sig_ignored(struct task_struct *t, int sig)
+{
+       void __user *handler;
+
+       /* Is it explicitly or implicitly ignored? */
+
+       handler = t->sighand->action[sig - 1].sa.sa_handler;
+       return handler == SIG_IGN ||
+               (handler == SIG_DFL && sig_kernel_ignore(sig));
+}
 
 static int sig_ignored(struct task_struct *t, int sig)
 {
-       void __user * handler;
-
        /*
         * Tracers always want to know about signals..
         */
@@ -58,10 +66,7 @@ static int sig_ignored(struct task_struct *t, int sig)
        if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
                return 0;
 
-       /* Is it explicitly or implicitly ignored? */
-       handler = t->sighand->action[sig-1].sa.sa_handler;
-       return   handler == SIG_IGN ||
-               (handler == SIG_DFL && sig_kernel_ignore(sig));
+       return __sig_ignored(t, sig);
 }
 
 /*
@@ -372,7 +377,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  */
 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
 {
-       int signr = 0;
+       int signr;
 
        /* We only dequeue private signals from ourselves, we don't let
         * signalfd steal them
@@ -405,8 +410,12 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                        }
                }
        }
+
        recalc_sigpending();
-       if (signr && unlikely(sig_kernel_stop(signr))) {
+       if (!signr)
+               return 0;
+
+       if (unlikely(sig_kernel_stop(signr))) {
                /*
                 * Set a marker that we have dequeued a stop signal.  Our
                 * caller might release the siglock and then the pending
@@ -422,9 +431,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
                if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
                        tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
        }
-       if (signr &&
-            ((info->si_code & __SI_MASK) == __SI_TIMER) &&
-            info->si_sys_private) {
+       if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
                /*
                 * Release the siglock to ensure proper locking order
                 * of timer locks outside of siglocks.  Note, we leave
@@ -526,21 +533,34 @@ static int rm_from_queue(unsigned long mask, struct sigpending *s)
 static int check_kill_permission(int sig, struct siginfo *info,
                                 struct task_struct *t)
 {
-       int error = -EINVAL;
+       struct pid *sid;
+       int error;
+
        if (!valid_signal(sig))
-               return error;
+               return -EINVAL;
 
-       if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
-               error = audit_signal_info(sig, t); /* Let audit system see the signal */
-               if (error)
-                       return error;
-               error = -EPERM;
-               if (((sig != SIGCONT) ||
-                       (task_session_nr(current) != task_session_nr(t)))
-                   && (current->euid ^ t->suid) && (current->euid ^ t->uid)
-                   && (current->uid ^ t->suid) && (current->uid ^ t->uid)
-                   && !capable(CAP_KILL))
+       if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
+               return 0;
+
+       error = audit_signal_info(sig, t); /* Let audit system see the signal */
+       if (error)
                return error;
+
+       if ((current->euid ^ t->suid) && (current->euid ^ t->uid) &&
+           (current->uid  ^ t->suid) && (current->uid  ^ t->uid) &&
+           !capable(CAP_KILL)) {
+               switch (sig) {
+               case SIGCONT:
+                       sid = task_session(t);
+                       /*
+                        * We don't return the error if sid == NULL. The
+                        * task was unhashed, the caller must notice this.
+                        */
+                       if (!sid || sid == task_session(current))
+                               break;
+               default:
+                       return -EPERM;
+               }
        }
 
        return security_task_kill(t, info, sig, 0);
@@ -550,62 +570,44 @@ static int check_kill_permission(int sig, struct siginfo *info,
 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
 
 /*
- * Handle magic process-wide effects of stop/continue signals.
- * Unlike the signal actions, these happen immediately at signal-generation
+ * Handle magic process-wide effects of stop/continue signals. Unlike
+ * the signal actions, these happen immediately at signal-generation
  * time regardless of blocking, ignoring, or handling.  This does the
  * actual continuing for SIGCONT, but not the actual stopping for stop
- * signals.  The process stop is done as a signal action for SIG_DFL.
+ * signals. The process stop is done as a signal action for SIG_DFL.
+ *
+ * Returns true if the signal should be actually delivered, otherwise
+ * it should be dropped.
  */
-static void handle_stop_signal(int sig, struct task_struct *p)
+static int prepare_signal(int sig, struct task_struct *p)
 {
+       struct signal_struct *signal = p->signal;
        struct task_struct *t;
 
-       if (p->signal->flags & SIGNAL_GROUP_EXIT)
+       if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
                /*
-                * The process is in the middle of dying already.
+                * The process is in the middle of dying, nothing to do.
                 */
-               return;
-
-       if (sig_kernel_stop(sig)) {
+       } else if (sig_kernel_stop(sig)) {
                /*
                 * This is a stop signal.  Remove SIGCONT from all queues.
                 */
-               rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
+               rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
                t = p;
                do {
                        rm_from_queue(sigmask(SIGCONT), &t->pending);
-                       t = next_thread(t);
-               } while (t != p);
+               } while_each_thread(p, t);
        } else if (sig == SIGCONT) {
+               unsigned int why;
                /*
                 * Remove all stop signals from all queues,
                 * and wake all threads.
                 */
-               if (unlikely(p->signal->group_stop_count > 0)) {
-                       /*
-                        * There was a group stop in progress.  We'll
-                        * pretend it finished before we got here.  We are
-                        * obliged to report it to the parent: if the
-                        * SIGSTOP happened "after" this SIGCONT, then it
-                        * would have cleared this pending SIGCONT.  If it
-                        * happened "before" this SIGCONT, then the parent
-                        * got the SIGCHLD about the stop finishing before
-                        * the continue happened.  We do the notification
-                        * now, and it's as if the stop had finished and
-                        * the SIGCHLD was pending on entry to this kill.
-                        */
-                       p->signal->group_stop_count = 0;
-                       p->signal->flags = SIGNAL_STOP_CONTINUED;
-                       spin_unlock(&p->sighand->siglock);
-                       do_notify_parent_cldstop(p, CLD_STOPPED);
-                       spin_lock(&p->sighand->siglock);
-               }
-               rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
+               rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
                t = p;
                do {
                        unsigned int state;
                        rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
-                       
                        /*
                         * If there is a handler for SIGCONT, we must make
                         * sure that no thread returns to user mode before
@@ -615,7 +617,7 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                         * running the handler.  With the TIF_SIGPENDING
                         * flag set, the thread will pause and acquire the
                         * siglock that we hold now and until we've queued
-                        * the pending signal. 
+                        * the pending signal.
                         *
                         * Wake up the stopped thread _after_ setting
                         * TIF_SIGPENDING
@@ -626,49 +628,163 @@ static void handle_stop_signal(int sig, struct task_struct *p)
                                state |= TASK_INTERRUPTIBLE;
                        }
                        wake_up_state(t, state);
+               } while_each_thread(p, t);
 
-                       t = next_thread(t);
-               } while (t != p);
+               /*
+                * Notify the parent with CLD_CONTINUED if we were stopped.
+                *
+                * If we were in the middle of a group stop, we pretend it
+                * was already finished, and then continued. Since SIGCHLD
+                * doesn't queue we report only CLD_STOPPED, as if the next
+                * CLD_CONTINUED was dropped.
+                */
+               why = 0;
+               if (signal->flags & SIGNAL_STOP_STOPPED)
+                       why |= SIGNAL_CLD_CONTINUED;
+               else if (signal->group_stop_count)
+                       why |= SIGNAL_CLD_STOPPED;
 
-               if (p->signal->flags & SIGNAL_STOP_STOPPED) {
+               if (why) {
                        /*
-                        * We were in fact stopped, and are now continued.
-                        * Notify the parent with CLD_CONTINUED.
+                        * The first thread which returns from finish_stop()
+                        * will take ->siglock, notice SIGNAL_CLD_MASK, and
+                        * notify its parent. See get_signal_to_deliver().
                         */
-                       p->signal->flags = SIGNAL_STOP_CONTINUED;
-                       p->signal->group_exit_code = 0;
-                       spin_unlock(&p->sighand->siglock);
-                       do_notify_parent_cldstop(p, CLD_CONTINUED);
-                       spin_lock(&p->sighand->siglock);
+                       signal->flags = why | SIGNAL_STOP_CONTINUED;
+                       signal->group_stop_count = 0;
+                       signal->group_exit_code = 0;
                } else {
                        /*
                         * We are not stopped, but there could be a stop
                         * signal in the middle of being processed after
                         * being removed from the queue.  Clear that too.
                         */
-                       p->signal->flags = 0;
+                       signal->flags &= ~SIGNAL_STOP_DEQUEUED;
                }
-       } else if (sig == SIGKILL) {
+       }
+
+       return !sig_ignored(p, sig);
+}
+
+/*
+ * Test if P wants to take SIG.  After we've checked all threads with this,
+ * it's equivalent to finding no threads not blocking SIG.  Any threads not
+ * blocking SIG were ruled out because they are not running and already
+ * have pending signals.  Such threads will dequeue from the shared queue
+ * as soon as they're available, so putting the signal on the shared queue
+ * will be equivalent to sending it to one such thread.
+ */
+static inline int wants_signal(int sig, struct task_struct *p)
+{
+       if (sigismember(&p->blocked, sig))
+               return 0;
+       if (p->flags & PF_EXITING)
+               return 0;
+       if (sig == SIGKILL)
+               return 1;
+       if (task_is_stopped_or_traced(p))
+               return 0;
+       return task_curr(p) || !signal_pending(p);
+}
+
+static void complete_signal(int sig, struct task_struct *p, int group)
+{
+       struct signal_struct *signal = p->signal;
+       struct task_struct *t;
+
+       /*
+        * Now find a thread we can wake up to take the signal off the queue.
+        *
+        * If the main thread wants the signal, it gets first crack.
+        * Probably the least surprising to the average bear.
+        */
+       if (wants_signal(sig, p))
+               t = p;
+       else if (!group || thread_group_empty(p))
+               /*
+                * There is just one thread and it does not need to be woken.
+                * It will dequeue unblocked signals before it runs again.
+                */
+               return;
+       else {
                /*
-                * Make sure that any pending stop signal already dequeued
-                * is undone by the wakeup for SIGKILL.
+                * Otherwise try to find a suitable thread.
                 */
-               p->signal->flags = 0;
+               t = signal->curr_target;
+               while (!wants_signal(sig, t)) {
+                       t = next_thread(t);
+                       if (t == signal->curr_target)
+                               /*
+                                * No thread needs to be woken.
+                                * Any eligible threads will see
+                                * the signal in the queue soon.
+                                */
+                               return;
+               }
+               signal->curr_target = t;
        }
+
+       /*
+        * Found a killable thread.  If the signal will be fatal,
+        * then start taking the whole group down immediately.
+        */
+       if (sig_fatal(p, sig) &&
+           !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
+           !sigismember(&t->real_blocked, sig) &&
+           (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
+               /*
+                * This signal will be fatal to the whole group.
+                */
+               if (!sig_kernel_coredump(sig)) {
+                       /*
+                        * Start a group exit and wake everybody up.
+                        * This way we don't have other threads
+                        * running and doing things after a slower
+                        * thread has the fatal signal pending.
+                        */
+                       signal->flags = SIGNAL_GROUP_EXIT;
+                       signal->group_exit_code = sig;
+                       signal->group_stop_count = 0;
+                       t = p;
+                       do {
+                               sigaddset(&t->pending.signal, SIGKILL);
+                               signal_wake_up(t, 1);
+                       } while_each_thread(p, t);
+                       return;
+               }
+       }
+
+       /*
+        * The signal is already in the shared-pending queue.
+        * Tell the chosen thread to wake up and dequeue it.
+        */
+       signal_wake_up(t, sig == SIGKILL);
+       return;
+}
+
+static inline int legacy_queue(struct sigpending *signals, int sig)
+{
+       return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
 }
 
 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
-                       struct sigpending *signals)
+                       int group)
 {
-       struct sigqueue * q = NULL;
-       int ret = 0;
+       struct sigpending *pending;
+       struct sigqueue *q;
+
+       assert_spin_locked(&t->sighand->siglock);
+       if (!prepare_signal(sig, t))
+               return 0;
 
+       pending = group ? &t->signal->shared_pending : &t->pending;
        /*
-        * Deliver the signal to listening signalfds. This must be called
-        * with the sighand lock held.
+        * Short-circuit ignored signals and support queuing
+        * exactly one non-rt signal, so that we can get more
+        * detailed information about the cause of the signal.
         */
-       signalfd_notify(t, sig);
-
+       if (legacy_queue(pending, sig))
+               return 0;
        /*
         * fast-pathed signals for kernel-internal things like SIGSTOP
         * or SIGKILL.
@@ -688,7 +804,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
                                             (is_si_special(info) ||
                                              info->si_code >= 0)));
        if (q) {
-               list_add_tail(&q->list, &signals->list);
+               list_add_tail(&q->list, &pending->list);
                switch ((unsigned long) info) {
                case (unsigned long) SEND_SIG_NOINFO:
                        q->info.si_signo = sig;
@@ -718,13 +834,12 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
        }
 
 out_set:
-       sigaddset(&signals->signal, sig);
-       return ret;
+       signalfd_notify(t, sig);
+       sigaddset(&pending->signal, sig);
+       complete_signal(sig, t, group);
+       return 0;
 }
 
-#define LEGACY_QUEUE(sigptr, sig) \
-       (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
-
 int print_fatal_signals;
 
 static void print_fatal_signal(struct pt_regs *regs, int signr)
@@ -757,29 +872,16 @@ static int __init setup_print_fatal_signals(char *str)
 
 __setup("print-fatal-signals=", setup_print_fatal_signals);
 
+int
+__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
+{
+       return send_signal(sig, info, p, 1);
+}
+
 static int
 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
-       int ret = 0;
-
-       BUG_ON(!irqs_disabled());
-       assert_spin_locked(&t->sighand->siglock);
-
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(t, sig))
-               goto out;
-
-       /* Support queueing exactly one non-rt signal, so that we
-          can get more detailed information about the cause of
-          the signal. */
-       if (LEGACY_QUEUE(&t->pending, sig))
-               goto out;
-
-       ret = send_signal(sig, info, t, &t->pending);
-       if (!ret && !sigismember(&t->blocked, sig))
-               signal_wake_up(t, sig == SIGKILL);
-out:
-       return ret;
+       return send_signal(sig, info, t, 0);
 }
 
 /*
@@ -790,7 +892,8 @@ out:
  * since we do not want to have a signal handler that was blocked
  * be invoked when user space had explicitly blocked it.
  *
- * We don't want to have recursive SIGSEGV's etc, for example.
+ * We don't want to have recursive SIGSEGV's etc, for example,
+ * that is why we also clear SIGNAL_UNKILLABLE.
  */
 int
 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
@@ -810,6 +913,8 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
                        recalc_sigpending_and_wake(t);
                }
        }
+       if (action->sa.sa_handler == SIG_DFL)
+               t->signal->flags &= ~SIGNAL_UNKILLABLE;
        ret = specific_send_sig_info(sig, info, t);
        spin_unlock_irqrestore(&t->sighand->siglock, flags);
 
@@ -822,134 +927,6 @@ force_sig_specific(int sig, struct task_struct *t)
        force_sig_info(sig, SEND_SIG_FORCED, t);
 }
 
-/*
- * Test if P wants to take SIG.  After we've checked all threads with this,
- * it's equivalent to finding no threads not blocking SIG.  Any threads not
- * blocking SIG were ruled out because they are not running and already
- * have pending signals.  Such threads will dequeue from the shared queue
- * as soon as they're available, so putting the signal on the shared queue
- * will be equivalent to sending it to one such thread.
- */
-static inline int wants_signal(int sig, struct task_struct *p)
-{
-       if (sigismember(&p->blocked, sig))
-               return 0;
-       if (p->flags & PF_EXITING)
-               return 0;
-       if (sig == SIGKILL)
-               return 1;
-       if (task_is_stopped_or_traced(p))
-               return 0;
-       return task_curr(p) || !signal_pending(p);
-}
-
-static void
-__group_complete_signal(int sig, struct task_struct *p)
-{
-       struct task_struct *t;
-
-       /*
-        * Now find a thread we can wake up to take the signal off the queue.
-        *
-        * If the main thread wants the signal, it gets first crack.
-        * Probably the least surprising to the average bear.
-        */
-       if (wants_signal(sig, p))
-               t = p;
-       else if (thread_group_empty(p))
-               /*
-                * There is just one thread and it does not need to be woken.
-                * It will dequeue unblocked signals before it runs again.
-                */
-               return;
-       else {
-               /*
-                * Otherwise try to find a suitable thread.
-                */
-               t = p->signal->curr_target;
-               if (t == NULL)
-                       /* restart balancing at this thread */
-                       t = p->signal->curr_target = p;
-
-               while (!wants_signal(sig, t)) {
-                       t = next_thread(t);
-                       if (t == p->signal->curr_target)
-                               /*
-                                * No thread needs to be woken.
-                                * Any eligible threads will see
-                                * the signal in the queue soon.
-                                */
-                               return;
-               }
-               p->signal->curr_target = t;
-       }
-
-       /*
-        * Found a killable thread.  If the signal will be fatal,
-        * then start taking the whole group down immediately.
-        */
-       if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
-           !sigismember(&t->real_blocked, sig) &&
-           (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
-               /*
-                * This signal will be fatal to the whole group.
-                */
-               if (!sig_kernel_coredump(sig)) {
-                       /*
-                        * Start a group exit and wake everybody up.
-                        * This way we don't have other threads
-                        * running and doing things after a slower
-                        * thread has the fatal signal pending.
-                        */
-                       p->signal->flags = SIGNAL_GROUP_EXIT;
-                       p->signal->group_exit_code = sig;
-                       p->signal->group_stop_count = 0;
-                       t = p;
-                       do {
-                               sigaddset(&t->pending.signal, SIGKILL);
-                               signal_wake_up(t, 1);
-                       } while_each_thread(p, t);
-                       return;
-               }
-       }
-
-       /*
-        * The signal is already in the shared-pending queue.
-        * Tell the chosen thread to wake up and dequeue it.
-        */
-       signal_wake_up(t, sig == SIGKILL);
-       return;
-}
-
-int
-__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
-{
-       int ret = 0;
-
-       assert_spin_locked(&p->sighand->siglock);
-       handle_stop_signal(sig, p);
-
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(p, sig))
-               return ret;
-
-       if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
-               /* This is a non-RT signal and we already have one queued.  */
-               return ret;
-
-       /*
-        * Put this signal on the shared-pending queue, or fail with EAGAIN.
-        * We always use the shared queue for process-wide signals,
-        * to avoid several races.
-        */
-       ret = send_signal(sig, info, p, &p->signal->shared_pending);
-       if (unlikely(ret))
-               return ret;
-
-       __group_complete_signal(sig, p);
-       return 0;
-}
-
 /*
  * Nuke all other threads in the group.
  */
@@ -978,13 +955,11 @@ int __fatal_signal_pending(struct task_struct *tsk)
 }
 EXPORT_SYMBOL(__fatal_signal_pending);
 
-/*
- * Must be called under rcu_read_lock() or with tasklist_lock read-held.
- */
 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
 {
        struct sighand_struct *sighand;
 
+       rcu_read_lock();
        for (;;) {
                sighand = rcu_dereference(tsk->sighand);
                if (unlikely(sighand == NULL))
@@ -995,6 +970,7 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
                        break;
                spin_unlock_irqrestore(&sighand->siglock, *flags);
        }
+       rcu_read_unlock();
 
        return sighand;
 }
@@ -1043,9 +1019,6 @@ int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
        struct task_struct *p;
 
        rcu_read_lock();
-       if (unlikely(sig_needs_tasklist(sig)))
-               read_lock(&tasklist_lock);
-
 retry:
        p = pid_task(pid, PIDTYPE_PID);
        if (p) {
@@ -1059,10 +1032,8 @@ retry:
                         */
                        goto retry;
        }
-
-       if (unlikely(sig_needs_tasklist(sig)))
-               read_unlock(&tasklist_lock);
        rcu_read_unlock();
+
        return error;
 }
 
@@ -1159,8 +1130,7 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
  */
 
 /*
- * These two are the most common entry points.  They send a signal
- * just to the specific thread.
+ * The caller must ensure the task can't exit.
  */
 int
 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
@@ -1175,17 +1145,9 @@ send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
        if (!valid_signal(sig))
                return -EINVAL;
 
-       /*
-        * We need the tasklist lock even for the specific
-        * thread case (when we don't need to follow the group
-        * lists) in order to avoid races with "p->sighand"
-        * going away or changing from under us.
-        */
-       read_lock(&tasklist_lock);  
        spin_lock_irqsave(&p->sighand->siglock, flags);
        ret = specific_send_sig_info(sig, info, p);
        spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       read_unlock(&tasklist_lock);
        return ret;
 }
 
@@ -1291,28 +1253,24 @@ void sigqueue_free(struct sigqueue *q)
        __sigqueue_free(q);
 }
 
-int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
+int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
 {
+       int sig = q->info.si_signo;
+       struct sigpending *pending;
        unsigned long flags;
-       int ret = 0;
+       int ret;
 
        BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
 
-       /*
-        * The rcu based delayed sighand destroy makes it possible to
-        * run this without tasklist lock held. The task struct itself
-        * cannot go away as create_timer did get_task_struct().
-        *
-        * We return -1, when the task is marked exiting, so
-        * posix_timer_event can redirect it to the group leader
-        */
-       rcu_read_lock();
+       ret = -1;
+       if (!likely(lock_task_sighand(t, &flags)))
+               goto ret;
 
-       if (!likely(lock_task_sighand(p, &flags))) {
-               ret = -1;
-               goto out_err;
-       }
+       ret = 1; /* the signal is ignored */
+       if (!prepare_signal(sig, t))
+               goto out;
 
+       ret = 0;
        if (unlikely(!list_empty(&q->list))) {
                /*
                 * If an SI_TIMER entry is already queue just increment
@@ -1322,77 +1280,15 @@ int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
                q->info.si_overrun++;
                goto out;
        }
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(p, sig)) {
-               ret = 1;
-               goto out;
-       }
-       /*
-        * Deliver the signal to listening signalfds. This must be called
-        * with the sighand lock held.
-        */
-       signalfd_notify(p, sig);
-
-       list_add_tail(&q->list, &p->pending.list);
-       sigaddset(&p->pending.signal, sig);
-       if (!sigismember(&p->blocked, sig))
-               signal_wake_up(p, sig == SIGKILL);
-
-out:
-       unlock_task_sighand(p, &flags);
-out_err:
-       rcu_read_unlock();
-
-       return ret;
-}
-
-int
-send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
-{
-       unsigned long flags;
-       int ret = 0;
-
-       BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
-
-       read_lock(&tasklist_lock);
-       /* Since it_lock is held, p->sighand cannot be NULL. */
-       spin_lock_irqsave(&p->sighand->siglock, flags);
-       handle_stop_signal(sig, p);
-
-       /* Short-circuit ignored signals.  */
-       if (sig_ignored(p, sig)) {
-               ret = 1;
-               goto out;
-       }
 
-       if (unlikely(!list_empty(&q->list))) {
-               /*
-                * If an SI_TIMER entry is already queue just increment
-                * the overrun count.  Other uses should not try to
-                * send the signal multiple times.
-                */
-               BUG_ON(q->info.si_code != SI_TIMER);
-               q->info.si_overrun++;
-               goto out;
-       } 
-       /*
-        * Deliver the signal to listening signalfds. This must be called
-        * with the sighand lock held.
-        */
-       signalfd_notify(p, sig);
-
-       /*
-        * Put this signal on the shared-pending queue.
-        * We always use the shared queue for process-wide signals,
-        * to avoid several races.
-        */
-       list_add_tail(&q->list, &p->signal->shared_pending.list);
-       sigaddset(&p->signal->shared_pending.signal, sig);
-
-       __group_complete_signal(sig, p);
+       signalfd_notify(t, sig);
+       pending = group ? &t->signal->shared_pending : &t->pending;
+       list_add_tail(&q->list, &pending->list);
+       sigaddset(&pending->signal, sig);
+       complete_signal(sig, t, group);
 out:
-       spin_unlock_irqrestore(&p->sighand->siglock, flags);
-       read_unlock(&tasklist_lock);
+       unlock_task_sighand(t, &flags);
+ret:
        return ret;
 }
 
@@ -1723,8 +1619,9 @@ static int do_signal_stop(int signr)
        } else {
                struct task_struct *t;
 
-               if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
-                   unlikely(sig->group_exit_task))
+               if (unlikely((sig->flags & (SIGNAL_STOP_DEQUEUED | SIGNAL_UNKILLABLE))
+                                        != SIGNAL_STOP_DEQUEUED) ||
+                   unlikely(signal_group_exit(sig)))
                        return 0;
                /*
                 * There is no group stop already in progress.
@@ -1799,8 +1696,9 @@ static int ptrace_signal(int signr, siginfo_t *info,
 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
                          struct pt_regs *regs, void *cookie)
 {
-       sigset_t *mask = &current->blocked;
-       int signr = 0;
+       struct sighand_struct *sighand = current->sighand;
+       struct signal_struct *signal = current->signal;
+       int signr;
 
 relock:
        /*
@@ -1811,16 +1709,32 @@ relock:
         */
        try_to_freeze();
 
-       spin_lock_irq(&current->sighand->siglock);
+       spin_lock_irq(&sighand->siglock);
+       /*
+        * Every stopped thread goes here after wakeup. Check to see if
+        * we should notify the parent, prepare_signal(SIGCONT) encodes
+        * the CLD_ si_code into SIGNAL_CLD_MASK bits.
+        */
+       if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
+               int why = (signal->flags & SIGNAL_STOP_CONTINUED)
+                               ? CLD_CONTINUED : CLD_STOPPED;
+               signal->flags &= ~SIGNAL_CLD_MASK;
+               spin_unlock_irq(&sighand->siglock);
+
+               read_lock(&tasklist_lock);
+               do_notify_parent_cldstop(current->group_leader, why);
+               read_unlock(&tasklist_lock);
+               goto relock;
+       }
+
        for (;;) {
                struct k_sigaction *ka;
 
-               if (unlikely(current->signal->group_stop_count > 0) &&
+               if (unlikely(signal->group_stop_count > 0) &&
                    do_signal_stop(0))
                        goto relock;
 
-               signr = dequeue_signal(current, mask, info);
-
+               signr = dequeue_signal(current, &current->blocked, info);
                if (!signr)
                        break; /* will return 0 */
 
@@ -1830,7 +1744,7 @@ relock:
                                continue;
                }
 
-               ka = &current->sighand->action[signr-1];
+               ka = &sighand->action[signr-1];
                if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
                        continue;
                if (ka->sa.sa_handler != SIG_DFL) {
@@ -1852,7 +1766,8 @@ relock:
                /*
                 * Global init gets no signals it doesn't want.
                 */
-               if (is_global_init(current))
+               if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
+                   !signal_group_exit(signal))
                        continue;
 
                if (sig_kernel_stop(signr)) {
@@ -1867,14 +1782,14 @@ relock:
                         * We need to check for that and bail out if necessary.
                         */
                        if (signr != SIGSTOP) {
-                               spin_unlock_irq(&current->sighand->siglock);
+                               spin_unlock_irq(&sighand->siglock);
 
                                /* signals can be posted during this window */
 
                                if (is_current_pgrp_orphaned())
                                        goto relock;
 
-                               spin_lock_irq(&current->sighand->siglock);
+                               spin_lock_irq(&sighand->siglock);
                        }
 
                        if (likely(do_signal_stop(signr))) {
@@ -1889,15 +1804,16 @@ relock:
                        continue;
                }
 
-               spin_unlock_irq(&current->sighand->siglock);
+               spin_unlock_irq(&sighand->siglock);
 
                /*
                 * Anything else is fatal, maybe with a core dump.
                 */
                current->flags |= PF_SIGNALED;
-               if ((signr != SIGKILL) && print_fatal_signals)
-                       print_fatal_signal(regs, signr);
+
                if (sig_kernel_coredump(signr)) {
+                       if (print_fatal_signals)
+                               print_fatal_signal(regs, signr);
                        /*
                         * If it was able to dump core, this kills all
                         * other threads in the group and synchronizes with
@@ -1915,7 +1831,7 @@ relock:
                do_group_exit(signr);
                /* NOTREACHED */
        }
-       spin_unlock_irq(&current->sighand->siglock);
+       spin_unlock_irq(&sighand->siglock);
        return signr;
 }
 
@@ -2259,6 +2175,7 @@ static int do_tkill(int tgid, int pid, int sig)
        int error;
        struct siginfo info;
        struct task_struct *p;
+       unsigned long flags;
 
        error = -ESRCH;
        info.si_signo = sig;
@@ -2267,22 +2184,24 @@ static int do_tkill(int tgid, int pid, int sig)
        info.si_pid = task_tgid_vnr(current);
        info.si_uid = current->uid;
 
-       read_lock(&tasklist_lock);
+       rcu_read_lock();
        p = find_task_by_vpid(pid);
        if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
                error = check_kill_permission(sig, &info, p);
                /*
                 * The null signal is a permissions and process existence
                 * probe.  No signal is actually delivered.
+                *
+                * If lock_task_sighand() fails we pretend the task dies
+                * after receiving the signal. The window is tiny, and the
+                * signal is private anyway.
                 */
-               if (!error && sig && p->sighand) {
-                       spin_lock_irq(&p->sighand->siglock);
-                       handle_stop_signal(sig, p);
+               if (!error && sig && lock_task_sighand(p, &flags)) {
                        error = specific_send_sig_info(sig, &info, p);
-                       spin_unlock_irq(&p->sighand->siglock);
+                       unlock_task_sighand(p, &flags);
                }
        }
-       read_unlock(&tasklist_lock);
+       rcu_read_unlock();
 
        return error;
 }
@@ -2339,13 +2258,14 @@ sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
 
 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
 {
+       struct task_struct *t = current;
        struct k_sigaction *k;
        sigset_t mask;
 
        if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
                return -EINVAL;
 
-       k = &current->sighand->action[sig-1];
+       k = &t->sighand->action[sig-1];
 
        spin_lock_irq(&current->sighand->siglock);
        if (oact)
@@ -2366,9 +2286,7 @@ int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
                 *   (for example, SIGCHLD), shall cause the pending signal to
                 *   be discarded, whether or not it is blocked"
                 */
-               if (act->sa.sa_handler == SIG_IGN ||
-                  (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
-                       struct task_struct *t = current;
+               if (__sig_ignored(t, sig)) {
                        sigemptyset(&mask);
                        sigaddset(&mask, sig);
                        rm_from_queue_full(&mask, &t->signal->shared_pending);
@@ -2623,7 +2541,7 @@ asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
 
        current->state = TASK_INTERRUPTIBLE;
        schedule();
-       set_thread_flag(TIF_RESTORE_SIGMASK);
+       set_restore_sigmask();
        return -ERESTARTNOHAND;
 }
 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
index 3c44956ee7e2312d30f28bc68a5b9825a7da657a..36e0617400470f398700376c7fa3359d768e7e58 100644 (file)
@@ -589,16 +589,20 @@ static void takeover_tasklets(unsigned int cpu)
        local_irq_disable();
 
        /* Find end, append list for that CPU. */
-       *__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head;
-       __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
-       per_cpu(tasklet_vec, cpu).head = NULL;
-       per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
+       if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
+               *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
+               __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
+               per_cpu(tasklet_vec, cpu).head = NULL;
+               per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
+       }
        raise_softirq_irqoff(TASKLET_SOFTIRQ);
 
-       *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
-       __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
-       per_cpu(tasklet_hi_vec, cpu).head = NULL;
-       per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
+       if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
+               *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
+               __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
+               per_cpu(tasklet_hi_vec, cpu).head = NULL;
+               per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
+       }
        raise_softirq_irqoff(HI_SOFTIRQ);
 
        local_irq_enable();
index 6a0cc71ee88d61e1afdd47e960a841515dad8a3c..895d2d4c94931e5136c9d063adf19d3d7999b0a6 100644 (file)
@@ -978,8 +978,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
                goto out;
 
        if (task_pgrp(p) != pgrp) {
-               detach_pid(p, PIDTYPE_PGID);
-               attach_pid(p, PIDTYPE_PGID, pgrp);
+               change_pid(p, PIDTYPE_PGID, pgrp);
                set_task_pgrp(p, pid_nr(pgrp));
        }
 
@@ -992,54 +991,67 @@ out:
 
 asmlinkage long sys_getpgid(pid_t pid)
 {
+       struct task_struct *p;
+       struct pid *grp;
+       int retval;
+
+       rcu_read_lock();
        if (!pid)
-               return task_pgrp_vnr(current);
+               grp = task_pgrp(current);
        else {
-               int retval;
-               struct task_struct *p;
-
-               read_lock(&tasklist_lock);
-               p = find_task_by_vpid(pid);
                retval = -ESRCH;
-               if (p) {
-                       retval = security_task_getpgid(p);
-                       if (!retval)
-                               retval = task_pgrp_vnr(p);
-               }
-               read_unlock(&tasklist_lock);
-               return retval;
+               p = find_task_by_vpid(pid);
+               if (!p)
+                       goto out;
+               grp = task_pgrp(p);
+               if (!grp)
+                       goto out;
+
+               retval = security_task_getpgid(p);
+               if (retval)
+                       goto out;
        }
+       retval = pid_vnr(grp);
+out:
+       rcu_read_unlock();
+       return retval;
 }
 
 #ifdef __ARCH_WANT_SYS_GETPGRP
 
 asmlinkage long sys_getpgrp(void)
 {
-       /* SMP - assuming writes are word atomic this is fine */
-       return task_pgrp_vnr(current);
+       return sys_getpgid(0);
 }
 
 #endif
 
 asmlinkage long sys_getsid(pid_t pid)
 {
+       struct task_struct *p;
+       struct pid *sid;
+       int retval;
+
+       rcu_read_lock();
        if (!pid)
-               return task_session_vnr(current);
+               sid = task_session(current);
        else {
-               int retval;
-               struct task_struct *p;
-
-               rcu_read_lock();
-               p = find_task_by_vpid(pid);
                retval = -ESRCH;
-               if (p) {
-                       retval = security_task_getsid(p);
-                       if (!retval)
-                               retval = task_session_vnr(p);
-               }
-               rcu_read_unlock();
-               return retval;
+               p = find_task_by_vpid(pid);
+               if (!p)
+                       goto out;
+               sid = task_session(p);
+               if (!sid)
+                       goto out;
+
+               retval = security_task_getsid(p);
+               if (retval)
+                       goto out;
        }
+       retval = pid_vnr(sid);
+out:
+       rcu_read_unlock();
+       return retval;
 }
 
 asmlinkage long sys_setsid(void)
@@ -1545,6 +1557,19 @@ out:
  *
  */
 
+static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r,
+                                    cputime_t *utimep, cputime_t *stimep)
+{
+       *utimep = cputime_add(*utimep, t->utime);
+       *stimep = cputime_add(*stimep, t->stime);
+       r->ru_nvcsw += t->nvcsw;
+       r->ru_nivcsw += t->nivcsw;
+       r->ru_minflt += t->min_flt;
+       r->ru_majflt += t->maj_flt;
+       r->ru_inblock += task_io_get_inblock(t);
+       r->ru_oublock += task_io_get_oublock(t);
+}
+
 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
 {
        struct task_struct *t;
@@ -1554,12 +1579,14 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
        memset((char *) r, 0, sizeof *r);
        utime = stime = cputime_zero;
 
-       rcu_read_lock();
-       if (!lock_task_sighand(p, &flags)) {
-               rcu_read_unlock();
-               return;
+       if (who == RUSAGE_THREAD) {
+               accumulate_thread_rusage(p, r, &utime, &stime);
+               goto out;
        }
 
+       if (!lock_task_sighand(p, &flags))
+               return;
+
        switch (who) {
                case RUSAGE_BOTH:
                case RUSAGE_CHILDREN:
@@ -1586,14 +1613,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
                        r->ru_oublock += p->signal->oublock;
                        t = p;
                        do {
-                               utime = cputime_add(utime, t->utime);
-                               stime = cputime_add(stime, t->stime);
-                               r->ru_nvcsw += t->nvcsw;
-                               r->ru_nivcsw += t->nivcsw;
-                               r->ru_minflt += t->min_flt;
-                               r->ru_majflt += t->maj_flt;
-                               r->ru_inblock += task_io_get_inblock(t);
-                               r->ru_oublock += task_io_get_oublock(t);
+                               accumulate_thread_rusage(t, r, &utime, &stime);
                                t = next_thread(t);
                        } while (t != p);
                        break;
@@ -1601,10 +1621,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
                default:
                        BUG();
        }
-
        unlock_task_sighand(p, &flags);
-       rcu_read_unlock();
 
+out:
        cputime_to_timeval(utime, &r->ru_utime);
        cputime_to_timeval(stime, &r->ru_stime);
 }
@@ -1618,7 +1637,8 @@ int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
 
 asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
 {
-       if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
+       if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
+           who != RUSAGE_THREAD)
                return -EINVAL;
        return getrusage(current, who, ru);
 }
@@ -1632,10 +1652,9 @@ asmlinkage long sys_umask(int mask)
 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
                          unsigned long arg4, unsigned long arg5)
 {
-       long error;
+       long uninitialized_var(error);
 
-       error = security_task_prctl(option, arg2, arg3, arg4, arg5);
-       if (error)
+       if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
                return error;
 
        switch (option) {
@@ -1688,17 +1707,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
                                error = -EINVAL;
                        break;
 
-               case PR_GET_KEEPCAPS:
-                       if (current->keep_capabilities)
-                               error = 1;
-                       break;
-               case PR_SET_KEEPCAPS:
-                       if (arg2 != 0 && arg2 != 1) {
-                               error = -EINVAL;
-                               break;
-                       }
-                       current->keep_capabilities = arg2;
-                       break;
                case PR_SET_NAME: {
                        struct task_struct *me = current;
                        unsigned char ncomm[sizeof(me->comm)];
@@ -1732,17 +1740,6 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
                case PR_SET_SECCOMP:
                        error = prctl_set_seccomp(arg2);
                        break;
-
-               case PR_CAPBSET_READ:
-                       if (!cap_valid(arg2))
-                               return -EINVAL;
-                       return !!cap_raised(current->cap_bset, arg2);
-               case PR_CAPBSET_DROP:
-#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
-                       return cap_prctl_drop(arg2);
-#else
-                       return -EINVAL;
-#endif
                case PR_GET_TSC:
                        error = GET_TSC_CTL(arg2);
                        break;
index fd3364827ccf0a838c79fe240016c93cedd508e4..d7ffdc59816ad3f822389d508f3c5ff1b030578d 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/writeback.h>
 #include <linux/hugetlb.h>
 #include <linux/initrd.h>
+#include <linux/key.h>
 #include <linux/times.h>
 #include <linux/limits.h>
 #include <linux/dcache.h>
@@ -144,12 +145,6 @@ extern int no_unaligned_warning;
 extern int max_lock_depth;
 #endif
 
-#ifdef CONFIG_SYSCTL_SYSCALL
-static int parse_table(int __user *, int, void __user *, size_t __user *,
-               void __user *, size_t, struct ctl_table *);
-#endif
-
-
 #ifdef CONFIG_PROC_SYSCTL
 static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
                  void __user *buffer, size_t *lenp, loff_t *ppos);
@@ -809,6 +804,14 @@ static struct ctl_table kern_table[] = {
                .proc_handler   = &proc_dostring,
                .strategy       = &sysctl_string,
        },
+#ifdef CONFIG_KEYS
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "keys",
+               .mode           = 0555,
+               .child          = key_sysctls,
+       },
+#endif
 /*
  * NOTE: do not add new entries to this table unless you have read
  * Documentation/sysctl/ctl_unnumbered.txt
@@ -1430,6 +1433,76 @@ void register_sysctl_root(struct ctl_table_root *root)
 }
 
 #ifdef CONFIG_SYSCTL_SYSCALL
+/* Perform the actual read/write of a sysctl table entry. */
+static int do_sysctl_strategy(struct ctl_table_root *root,
+                       struct ctl_table *table,
+                       int __user *name, int nlen,
+                       void __user *oldval, size_t __user *oldlenp,
+                       void __user *newval, size_t newlen)
+{
+       int op = 0, rc;
+
+       if (oldval)
+               op |= 004;
+       if (newval)
+               op |= 002;
+       if (sysctl_perm(root, table, op))
+               return -EPERM;
+
+       if (table->strategy) {
+               rc = table->strategy(table, name, nlen, oldval, oldlenp,
+                                    newval, newlen);
+               if (rc < 0)
+                       return rc;
+               if (rc > 0)
+                       return 0;
+       }
+
+       /* If there is no strategy routine, or if the strategy returns
+        * zero, proceed with automatic r/w */
+       if (table->data && table->maxlen) {
+               rc = sysctl_data(table, name, nlen, oldval, oldlenp,
+                                newval, newlen);
+               if (rc < 0)
+                       return rc;
+       }
+       return 0;
+}
+
+static int parse_table(int __user *name, int nlen,
+                      void __user *oldval, size_t __user *oldlenp,
+                      void __user *newval, size_t newlen,
+                      struct ctl_table_root *root,
+                      struct ctl_table *table)
+{
+       int n;
+repeat:
+       if (!nlen)
+               return -ENOTDIR;
+       if (get_user(n, name))
+               return -EFAULT;
+       for ( ; table->ctl_name || table->procname; table++) {
+               if (!table->ctl_name)
+                       continue;
+               if (n == table->ctl_name) {
+                       int error;
+                       if (table->child) {
+                               if (sysctl_perm(root, table, 001))
+                                       return -EPERM;
+                               name++;
+                               nlen--;
+                               table = table->child;
+                               goto repeat;
+                       }
+                       error = do_sysctl_strategy(root, table, name, nlen,
+                                                  oldval, oldlenp,
+                                                  newval, newlen);
+                       return error;
+               }
+       }
+       return -ENOTDIR;
+}
+
 int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp,
               void __user *newval, size_t newlen)
 {
@@ -1447,7 +1520,8 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol
        for (head = sysctl_head_next(NULL); head;
                        head = sysctl_head_next(head)) {
                error = parse_table(name, nlen, oldval, oldlenp, 
-                                       newval, newlen, head->ctl_table);
+                                       newval, newlen,
+                                       head->root, head->ctl_table);
                if (error != -ENOTDIR) {
                        sysctl_head_finish(head);
                        break;
@@ -1493,84 +1567,22 @@ static int test_perm(int mode, int op)
        return -EACCES;
 }
 
-int sysctl_perm(struct ctl_table *table, int op)
+int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
 {
        int error;
+       int mode;
+
        error = security_sysctl(table, op);
        if (error)
                return error;
-       return test_perm(table->mode, op);
-}
-
-#ifdef CONFIG_SYSCTL_SYSCALL
-static int parse_table(int __user *name, int nlen,
-                      void __user *oldval, size_t __user *oldlenp,
-                      void __user *newval, size_t newlen,
-                      struct ctl_table *table)
-{
-       int n;
-repeat:
-       if (!nlen)
-               return -ENOTDIR;
-       if (get_user(n, name))
-               return -EFAULT;
-       for ( ; table->ctl_name || table->procname; table++) {
-               if (!table->ctl_name)
-                       continue;
-               if (n == table->ctl_name) {
-                       int error;
-                       if (table->child) {
-                               if (sysctl_perm(table, 001))
-                                       return -EPERM;
-                               name++;
-                               nlen--;
-                               table = table->child;
-                               goto repeat;
-                       }
-                       error = do_sysctl_strategy(table, name, nlen,
-                                                  oldval, oldlenp,
-                                                  newval, newlen);
-                       return error;
-               }
-       }
-       return -ENOTDIR;
-}
 
-/* Perform the actual read/write of a sysctl table entry. */
-int do_sysctl_strategy (struct ctl_table *table,
-                       int __user *name, int nlen,
-                       void __user *oldval, size_t __user *oldlenp,
-                       void __user *newval, size_t newlen)
-{
-       int op = 0, rc;
-
-       if (oldval)
-               op |= 004;
-       if (newval) 
-               op |= 002;
-       if (sysctl_perm(table, op))
-               return -EPERM;
+       if (root->permissions)
+               mode = root->permissions(root, current->nsproxy, table);
+       else
+               mode = table->mode;
 
-       if (table->strategy) {
-               rc = table->strategy(table, name, nlen, oldval, oldlenp,
-                                    newval, newlen);
-               if (rc < 0)
-                       return rc;
-               if (rc > 0)
-                       return 0;
-       }
-
-       /* If there is no strategy routine, or if the strategy returns
-        * zero, proceed with automatic r/w */
-       if (table->data && table->maxlen) {
-               rc = sysctl_data(table, name, nlen, oldval, oldlenp,
-                                newval, newlen);
-               if (rc < 0)
-                       return rc;
-       }
-       return 0;
+       return test_perm(mode, op);
 }
-#endif /* CONFIG_SYSCTL_SYSCALL */
 
 static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
 {
@@ -1583,9 +1595,13 @@ static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
 
 static __init int sysctl_init(void)
 {
-       int err;
        sysctl_set_parent(NULL, root_table);
-       err = sysctl_check_table(current->nsproxy, root_table);
+#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
+       {
+               int err;
+               err = sysctl_check_table(current->nsproxy, root_table);
+       }
+#endif
        return 0;
 }
 
@@ -1712,10 +1728,12 @@ struct ctl_table_header *__register_sysctl_paths(
        header->unregistering = NULL;
        header->root = root;
        sysctl_set_parent(NULL, header->ctl_table);
+#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
        if (sysctl_check_table(namespaces, header->ctl_table)) {
                kfree(header);
                return NULL;
        }
+#endif
        spin_lock(&sysctl_lock);
        header_list = lookup_header_list(root, namespaces);
        list_add_tail(&header->ctl_entry, header_list);
index 07e86a828073bc7ebffdcdb67c3ee726be9a5705..4a23517169a6495cf1aa1bc689137e1c38698c62 100644 (file)
@@ -183,7 +183,7 @@ static int fill_pid(pid_t pid, struct task_struct *tsk,
 
        if (!tsk) {
                rcu_read_lock();
-               tsk = find_task_by_pid(pid);
+               tsk = find_task_by_vpid(pid);
                if (tsk)
                        get_task_struct(tsk);
                rcu_read_unlock();
@@ -230,7 +230,7 @@ static int fill_tgid(pid_t tgid, struct task_struct *first,
         */
        rcu_read_lock();
        if (!first)
-               first = find_task_by_pid(tgid);
+               first = find_task_by_vpid(tgid);
 
        if (!first || !lock_task_sighand(first, &flags))
                goto out;
@@ -547,7 +547,7 @@ void taskstats_exit(struct task_struct *tsk, int group_dead)
        if (!stats)
                goto err;
 
-       rc = fill_pid(tsk->pid, tsk, stats);
+       rc = fill_pid(-1, tsk, stats);
        if (rc < 0)
                goto err;
 
index 35d373a98782bbe0f02cb62535a4576a398dd61a..cbe0d5a222ff8727286701b90099528a533cfc73 100644 (file)
@@ -35,6 +35,8 @@
 #include <linux/syscalls.h>
 #include <linux/security.h>
 #include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/math64.h>
 
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
@@ -390,13 +392,17 @@ EXPORT_SYMBOL(set_normalized_timespec);
 struct timespec ns_to_timespec(const s64 nsec)
 {
        struct timespec ts;
+       s32 rem;
 
        if (!nsec)
                return (struct timespec) {0, 0};
 
-       ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec);
-       if (unlikely(nsec < 0))
-               set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec);
+       ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
+       if (unlikely(rem < 0)) {
+               ts.tv_sec--;
+               rem += NSEC_PER_SEC;
+       }
+       ts.tv_nsec = rem;
 
        return ts;
 }
@@ -526,8 +532,10 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
         * Convert jiffies to nanoseconds and separate with
         * one divide.
         */
-       u64 nsec = (u64)jiffies * TICK_NSEC;
-       value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec);
+       u32 rem;
+       value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+                                   NSEC_PER_SEC, &rem);
+       value->tv_nsec = rem;
 }
 EXPORT_SYMBOL(jiffies_to_timespec);
 
@@ -565,12 +573,11 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
         * Convert jiffies to nanoseconds and separate with
         * one divide.
         */
-       u64 nsec = (u64)jiffies * TICK_NSEC;
-       long tv_usec;
+       u32 rem;
 
-       value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec);
-       tv_usec /= NSEC_PER_USEC;
-       value->tv_usec = tv_usec;
+       value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
+                                   NSEC_PER_SEC, &rem);
+       value->tv_usec = rem / NSEC_PER_USEC;
 }
 EXPORT_SYMBOL(jiffies_to_timeval);
 
@@ -586,9 +593,7 @@ clock_t jiffies_to_clock_t(long x)
        return x / (HZ / USER_HZ);
 # endif
 #else
-       u64 tmp = (u64)x * TICK_NSEC;
-       do_div(tmp, (NSEC_PER_SEC / USER_HZ));
-       return (long)tmp;
+       return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
 #endif
 }
 EXPORT_SYMBOL(jiffies_to_clock_t);
@@ -600,16 +605,12 @@ unsigned long clock_t_to_jiffies(unsigned long x)
                return ~0UL;
        return x * (HZ / USER_HZ);
 #else
-       u64 jif;
-
        /* Don't worry about loss of precision here .. */
        if (x >= ~0UL / HZ * USER_HZ)
                return ~0UL;
 
        /* .. but do try to contain it here */
-       jif = x * (u64) HZ;
-       do_div(jif, USER_HZ);
-       return jif;
+       return div_u64((u64)x * HZ, USER_HZ);
 #endif
 }
 EXPORT_SYMBOL(clock_t_to_jiffies);
@@ -618,10 +619,9 @@ u64 jiffies_64_to_clock_t(u64 x)
 {
 #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
 # if HZ < USER_HZ
-       x *= USER_HZ;
-       do_div(x, HZ);
+       x = div_u64(x * USER_HZ, HZ);
 # elif HZ > USER_HZ
-       do_div(x, HZ / USER_HZ);
+       x = div_u64(x, HZ / USER_HZ);
 # else
        /* Nothing to do */
 # endif
@@ -631,8 +631,7 @@ u64 jiffies_64_to_clock_t(u64 x)
         * but even this doesn't overflow in hundreds of years
         * in 64 bits, so..
         */
-       x *= TICK_NSEC;
-       do_div(x, (NSEC_PER_SEC / USER_HZ));
+       x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
 #endif
        return x;
 }
@@ -641,21 +640,17 @@ EXPORT_SYMBOL(jiffies_64_to_clock_t);
 u64 nsec_to_clock_t(u64 x)
 {
 #if (NSEC_PER_SEC % USER_HZ) == 0
-       do_div(x, (NSEC_PER_SEC / USER_HZ));
+       return div_u64(x, NSEC_PER_SEC / USER_HZ);
 #elif (USER_HZ % 512) == 0
-       x *= USER_HZ/512;
-       do_div(x, (NSEC_PER_SEC / 512));
+       return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
 #else
        /*
          * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
          * overflow after 64.99 years.
          * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
          */
-       x *= 9;
-       do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2)) /
-                                 USER_HZ));
+       return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
 #endif
-       return x;
 }
 
 #if (BITS_PER_LONG < 64)
index 5fd9b946977038cbdc192dc5a11146696ad53947..5125ddd8196ba3cffb2fd683c2fd3abdac0a695b 100644 (file)
@@ -15,7 +15,8 @@
 #include <linux/jiffies.h>
 #include <linux/hrtimer.h>
 #include <linux/capability.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
+#include <linux/clocksource.h>
 #include <asm/timex.h>
 
 /*
  */
 unsigned long tick_usec = TICK_USEC;           /* USER_HZ period (usec) */
 unsigned long tick_nsec;                       /* ACTHZ period (nsec) */
-static u64 tick_length, tick_length_base;
+u64 tick_length;
+static u64 tick_length_base;
+
+static struct hrtimer leap_timer;
 
 #define MAX_TICKADJ            500             /* microsecs */
 #define MAX_TICKADJ_SCALED     (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \
-                                 TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ)
+                                 NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
 
 /*
  * phase-lock loop variables
@@ -35,11 +39,12 @@ static u64 tick_length, tick_length_base;
 /* TIME_ERROR prevents overwriting the CMOS clock */
 static int time_state = TIME_OK;       /* clock synchronization status */
 int time_status = STA_UNSYNC;          /* clock status bits            */
-static s64 time_offset;                /* time adjustment (ns)         */
+static long time_tai;                  /* TAI offset (s)               */
+static s64 time_offset;                        /* time adjustment (ns)         */
 static long time_constant = 2;         /* pll time constant            */
 long time_maxerror = NTP_PHASE_LIMIT;  /* maximum error (us)           */
 long time_esterror = NTP_PHASE_LIMIT;  /* estimated error (us)         */
-long time_freq;                                /* frequency offset (scaled ppm)*/
+static s64 time_freq;                  /* frequency offset (scaled ns/s)*/
 static long time_reftime;              /* time at last adjustment (s)  */
 long time_adjust;
 static long ntp_tick_adj;
@@ -47,16 +52,56 @@ static long ntp_tick_adj;
 static void ntp_update_frequency(void)
 {
        u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
-                               << TICK_LENGTH_SHIFT;
-       second_length += (s64)ntp_tick_adj << TICK_LENGTH_SHIFT;
-       second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC);
+                               << NTP_SCALE_SHIFT;
+       second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT;
+       second_length += time_freq;
 
        tick_length_base = second_length;
 
-       do_div(second_length, HZ);
-       tick_nsec = second_length >> TICK_LENGTH_SHIFT;
+       tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
+       tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ);
+}
+
+static void ntp_update_offset(long offset)
+{
+       long mtemp;
+       s64 freq_adj;
+
+       if (!(time_status & STA_PLL))
+               return;
 
-       do_div(tick_length_base, NTP_INTERVAL_FREQ);
+       if (!(time_status & STA_NANO))
+               offset *= NSEC_PER_USEC;
+
+       /*
+        * Scale the phase adjustment and
+        * clamp to the operating range.
+        */
+       offset = min(offset, MAXPHASE);
+       offset = max(offset, -MAXPHASE);
+
+       /*
+        * Select how the frequency is to be controlled
+        * and in which mode (PLL or FLL).
+        */
+       if (time_status & STA_FREQHOLD || time_reftime == 0)
+               time_reftime = xtime.tv_sec;
+       mtemp = xtime.tv_sec - time_reftime;
+       time_reftime = xtime.tv_sec;
+
+       freq_adj = (s64)offset * mtemp;
+       freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant);
+       time_status &= ~STA_MODE;
+       if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
+               freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
+                                   mtemp);
+               time_status |= STA_MODE;
+       }
+       freq_adj += time_freq;
+       freq_adj = min(freq_adj, MAXFREQ_SCALED);
+       time_freq = max(freq_adj, -MAXFREQ_SCALED);
+
+       time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
 }
 
 /**
@@ -78,62 +123,70 @@ void ntp_clear(void)
 }
 
 /*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
+ * Leap second processing. If in leap-insert state at the end of the
+ * day, the system clock is set back one second; if in leap-delete
+ * state, the system clock is set ahead one second.
  */
-void second_overflow(void)
+static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
 {
-       long time_adj;
+       enum hrtimer_restart res = HRTIMER_NORESTART;
 
-       /* Bump the maxerror field */
-       time_maxerror += MAXFREQ >> SHIFT_USEC;
-       if (time_maxerror > NTP_PHASE_LIMIT) {
-               time_maxerror = NTP_PHASE_LIMIT;
-               time_status |= STA_UNSYNC;
-       }
+       write_seqlock_irq(&xtime_lock);
 
-       /*
-        * Leap second processing. If in leap-insert state at the end of the
-        * day, the system clock is set back one second; if in leap-delete
-        * state, the system clock is set ahead one second. The microtime()
-        * routine or external clock driver will insure that reported time is
-        * always monotonic. The ugly divides should be replaced.
-        */
        switch (time_state) {
        case TIME_OK:
-               if (time_status & STA_INS)
-                       time_state = TIME_INS;
-               else if (time_status & STA_DEL)
-                       time_state = TIME_DEL;
                break;
        case TIME_INS:
-               if (xtime.tv_sec % 86400 == 0) {
-                       xtime.tv_sec--;
-                       wall_to_monotonic.tv_sec++;
-                       time_state = TIME_OOP;
-                       printk(KERN_NOTICE "Clock: inserting leap second "
-                                       "23:59:60 UTC\n");
-               }
+               xtime.tv_sec--;
+               wall_to_monotonic.tv_sec++;
+               time_state = TIME_OOP;
+               printk(KERN_NOTICE "Clock: "
+                      "inserting leap second 23:59:60 UTC\n");
+               leap_timer.expires = ktime_add_ns(leap_timer.expires,
+                                                 NSEC_PER_SEC);
+               res = HRTIMER_RESTART;
                break;
        case TIME_DEL:
-               if ((xtime.tv_sec + 1) % 86400 == 0) {
-                       xtime.tv_sec++;
-                       wall_to_monotonic.tv_sec--;
-                       time_state = TIME_WAIT;
-                       printk(KERN_NOTICE "Clock: deleting leap second "
-                                       "23:59:59 UTC\n");
-               }
+               xtime.tv_sec++;
+               time_tai--;
+               wall_to_monotonic.tv_sec--;
+               time_state = TIME_WAIT;
+               printk(KERN_NOTICE "Clock: "
+                      "deleting leap second 23:59:59 UTC\n");
                break;
        case TIME_OOP:
+               time_tai++;
                time_state = TIME_WAIT;
-               break;
+               /* fall through */
        case TIME_WAIT:
                if (!(time_status & (STA_INS | STA_DEL)))
-               time_state = TIME_OK;
+                       time_state = TIME_OK;
+               break;
+       }
+       update_vsyscall(&xtime, clock);
+
+       write_sequnlock_irq(&xtime_lock);
+
+       return res;
+}
+
+/*
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ */
+void second_overflow(void)
+{
+       s64 time_adj;
+
+       /* Bump the maxerror field */
+       time_maxerror += MAXFREQ / NSEC_PER_USEC;
+       if (time_maxerror > NTP_PHASE_LIMIT) {
+               time_maxerror = NTP_PHASE_LIMIT;
+               time_status |= STA_UNSYNC;
        }
 
        /*
@@ -143,7 +196,7 @@ void second_overflow(void)
        tick_length = tick_length_base;
        time_adj = shift_right(time_offset, SHIFT_PLL + time_constant);
        time_offset -= time_adj;
-       tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE);
+       tick_length += time_adj;
 
        if (unlikely(time_adjust)) {
                if (time_adjust > MAX_TICKADJ) {
@@ -154,25 +207,12 @@ void second_overflow(void)
                        tick_length -= MAX_TICKADJ_SCALED;
                } else {
                        tick_length += (s64)(time_adjust * NSEC_PER_USEC /
-                                       NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT;
+                                       NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
                        time_adjust = 0;
                }
        }
 }
 
-/*
- * Return how long ticks are at the moment, that is, how much time
- * update_wall_time_one_tick will add to xtime next time we call it
- * (assuming no calls to do_adjtimex in the meantime).
- * The return value is in fixed-point nanoseconds shifted by the
- * specified number of bits to the right of the binary point.
- * This function has no side-effects.
- */
-u64 current_tick_length(void)
-{
-       return tick_length;
-}
-
 #ifdef CONFIG_GENERIC_CMOS_UPDATE
 
 /* Disable the cmos update - used by virtualization and embedded */
@@ -236,8 +276,8 @@ static inline void notify_cmos_timer(void) { }
  */
 int do_adjtimex(struct timex *txc)
 {
-       long mtemp, save_adjust, rem;
-       s64 freq_adj, temp64;
+       struct timespec ts;
+       long save_adjust, sec;
        int result;
 
        /* In order to modify anything, you gotta be super-user! */
@@ -247,147 +287,132 @@ int do_adjtimex(struct timex *txc)
        /* Now we validate the data before disabling interrupts */
 
        if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
-         /* singleshot must not be used with any other mode bits */
-               if (txc->modes != ADJ_OFFSET_SINGLESHOT &&
-                                       txc->modes != ADJ_OFFSET_SS_READ)
+               /* singleshot must not be used with any other mode bits */
+               if (txc->modes & ~ADJ_OFFSET_SS_READ)
                        return -EINVAL;
        }
 
-       if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
-         /* adjustment Offset limited to +- .512 seconds */
-               if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE )
-                       return -EINVAL;
-
        /* if the quartz is off by more than 10% something is VERY wrong ! */
        if (txc->modes & ADJ_TICK)
                if (txc->tick <  900000/USER_HZ ||
                    txc->tick > 1100000/USER_HZ)
                        return -EINVAL;
 
+       if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
+               hrtimer_cancel(&leap_timer);
+       getnstimeofday(&ts);
+
        write_seqlock_irq(&xtime_lock);
-       result = time_state;    /* mostly `TIME_OK' */
 
        /* Save for later - semantics of adjtime is to return old value */
        save_adjust = time_adjust;
 
-#if 0  /* STA_CLOCKERR is never set yet */
-       time_status &= ~STA_CLOCKERR;           /* reset STA_CLOCKERR */
-#endif
        /* If there are input parameters, then process them */
-       if (txc->modes)
-       {
-           if (txc->modes & ADJ_STATUS)        /* only set allowed bits */
-               time_status =  (txc->status & ~STA_RONLY) |
-                             (time_status & STA_RONLY);
-
-           if (txc->modes & ADJ_FREQUENCY) {   /* p. 22 */
-               if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) {
-                   result = -EINVAL;
-                   goto leave;
-               }
-               time_freq = ((s64)txc->freq * NSEC_PER_USEC)
-                               >> (SHIFT_USEC - SHIFT_NSEC);
-           }
-
-           if (txc->modes & ADJ_MAXERROR) {
-               if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) {
-                   result = -EINVAL;
-                   goto leave;
+       if (txc->modes) {
+               if (txc->modes & ADJ_STATUS) {
+                       if ((time_status & STA_PLL) &&
+                           !(txc->status & STA_PLL)) {
+                               time_state = TIME_OK;
+                               time_status = STA_UNSYNC;
+                       }
+                       /* only set allowed bits */
+                       time_status &= STA_RONLY;
+                       time_status |= txc->status & ~STA_RONLY;
+
+                       switch (time_state) {
+                       case TIME_OK:
+                       start_timer:
+                               sec = ts.tv_sec;
+                               if (time_status & STA_INS) {
+                                       time_state = TIME_INS;
+                                       sec += 86400 - sec % 86400;
+                                       hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
+                               } else if (time_status & STA_DEL) {
+                                       time_state = TIME_DEL;
+                                       sec += 86400 - (sec + 1) % 86400;
+                                       hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
+                               }
+                               break;
+                       case TIME_INS:
+                       case TIME_DEL:
+                               time_state = TIME_OK;
+                               goto start_timer;
+                               break;
+                       case TIME_WAIT:
+                               if (!(time_status & (STA_INS | STA_DEL)))
+                                       time_state = TIME_OK;
+                               break;
+                       case TIME_OOP:
+                               hrtimer_restart(&leap_timer);
+                               break;
+                       }
                }
-               time_maxerror = txc->maxerror;
-           }
 
-           if (txc->modes & ADJ_ESTERROR) {
-               if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) {
-                   result = -EINVAL;
-                   goto leave;
+               if (txc->modes & ADJ_NANO)
+                       time_status |= STA_NANO;
+               if (txc->modes & ADJ_MICRO)
+                       time_status &= ~STA_NANO;
+
+               if (txc->modes & ADJ_FREQUENCY) {
+                       time_freq = (s64)txc->freq * PPM_SCALE;
+                       time_freq = min(time_freq, MAXFREQ_SCALED);
+                       time_freq = max(time_freq, -MAXFREQ_SCALED);
                }
-               time_esterror = txc->esterror;
-           }
 
-           if (txc->modes & ADJ_TIMECONST) {   /* p. 24 */
-               if (txc->constant < 0) {        /* NTP v4 uses values > 6 */
-                   result = -EINVAL;
-                   goto leave;
+               if (txc->modes & ADJ_MAXERROR)
+                       time_maxerror = txc->maxerror;
+               if (txc->modes & ADJ_ESTERROR)
+                       time_esterror = txc->esterror;
+
+               if (txc->modes & ADJ_TIMECONST) {
+                       time_constant = txc->constant;
+                       if (!(time_status & STA_NANO))
+                               time_constant += 4;
+                       time_constant = min(time_constant, (long)MAXTC);
+                       time_constant = max(time_constant, 0l);
                }
-               time_constant = min(txc->constant + 4, (long)MAXTC);
-           }
 
-           if (txc->modes & ADJ_OFFSET) {      /* values checked earlier */
-               if (txc->modes == ADJ_OFFSET_SINGLESHOT) {
-                   /* adjtime() is independent from ntp_adjtime() */
-                   time_adjust = txc->offset;
+               if (txc->modes & ADJ_TAI && txc->constant > 0)
+                       time_tai = txc->constant;
+
+               if (txc->modes & ADJ_OFFSET) {
+                       if (txc->modes == ADJ_OFFSET_SINGLESHOT)
+                               /* adjtime() is independent from ntp_adjtime() */
+                               time_adjust = txc->offset;
+                       else
+                               ntp_update_offset(txc->offset);
                }
-               else if (time_status & STA_PLL) {
-                   time_offset = txc->offset * NSEC_PER_USEC;
-
-                   /*
-                    * Scale the phase adjustment and
-                    * clamp to the operating range.
-                    */
-                   time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC);
-                   time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC);
-
-                   /*
-                    * Select whether the frequency is to be controlled
-                    * and in which mode (PLL or FLL). Clamp to the operating
-                    * range. Ugly multiply/divide should be replaced someday.
-                    */
-
-                   if (time_status & STA_FREQHOLD || time_reftime == 0)
-                       time_reftime = xtime.tv_sec;
-                   mtemp = xtime.tv_sec - time_reftime;
-                   time_reftime = xtime.tv_sec;
-
-                   freq_adj = time_offset * mtemp;
-                   freq_adj = shift_right(freq_adj, time_constant * 2 +
-                                          (SHIFT_PLL + 2) * 2 - SHIFT_NSEC);
-                   if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
-                       u64 utemp64;
-                       temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL);
-                       if (time_offset < 0) {
-                           utemp64 = -temp64;
-                           do_div(utemp64, mtemp);
-                           freq_adj -= utemp64;
-                       } else {
-                           utemp64 = temp64;
-                           do_div(utemp64, mtemp);
-                           freq_adj += utemp64;
-                       }
-                   }
-                   freq_adj += time_freq;
-                   freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
-                   time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
-                   time_offset = div_long_long_rem_signed(time_offset,
-                                                          NTP_INTERVAL_FREQ,
-                                                          &rem);
-                   time_offset <<= SHIFT_UPDATE;
-               } /* STA_PLL */
-           } /* txc->modes & ADJ_OFFSET */
-           if (txc->modes & ADJ_TICK)
-               tick_usec = txc->tick;
-
-           if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
-                   ntp_update_frequency();
-       } /* txc->modes */
-leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
+               if (txc->modes & ADJ_TICK)
+                       tick_usec = txc->tick;
+
+               if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
+                       ntp_update_frequency();
+       }
+
+       result = time_state;    /* mostly `TIME_OK' */
+       if (time_status & (STA_UNSYNC|STA_CLOCKERR))
                result = TIME_ERROR;
 
        if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
-                       (txc->modes == ADJ_OFFSET_SS_READ))
+           (txc->modes == ADJ_OFFSET_SS_READ))
                txc->offset = save_adjust;
-       else
-               txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) *
-                               NTP_INTERVAL_FREQ / 1000;
-       txc->freq          = (time_freq / NSEC_PER_USEC) <<
-                               (SHIFT_USEC - SHIFT_NSEC);
+       else {
+               txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
+                                         NTP_SCALE_SHIFT);
+               if (!(time_status & STA_NANO))
+                       txc->offset /= NSEC_PER_USEC;
+       }
+       txc->freq          = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
+                                        (s64)PPM_SCALE_INV,
+                                        NTP_SCALE_SHIFT);
        txc->maxerror      = time_maxerror;
        txc->esterror      = time_esterror;
        txc->status        = time_status;
        txc->constant      = time_constant;
        txc->precision     = 1;
-       txc->tolerance     = MAXFREQ;
+       txc->tolerance     = MAXFREQ_SCALED / PPM_SCALE;
        txc->tick          = tick_usec;
+       txc->tai           = time_tai;
 
        /* PPS is not implemented, so these are zero */
        txc->ppsfreq       = 0;
@@ -399,9 +424,15 @@ leave:     if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
        txc->errcnt        = 0;
        txc->stbcnt        = 0;
        write_sequnlock_irq(&xtime_lock);
-       do_gettimeofday(&txc->time);
+
+       txc->time.tv_sec = ts.tv_sec;
+       txc->time.tv_usec = ts.tv_nsec;
+       if (!(time_status & STA_NANO))
+               txc->time.tv_usec /= NSEC_PER_USEC;
+
        notify_cmos_timer();
-       return(result);
+
+       return result;
 }
 
 static int __init ntp_tick_adj_setup(char *str)
@@ -411,3 +442,10 @@ static int __init ntp_tick_adj_setup(char *str)
 }
 
 __setup("ntp_tick_adj=", ntp_tick_adj_setup);
+
+void __init ntp_init(void)
+{
+       ntp_clear();
+       hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
+       leap_timer.function = ntp_leap_second;
+}
index 2d6087c7cf9820fb4a16c43fdd75ed9f33d16bca..e91c29f961c900d7739c0dc2f27b81c480cdb55c 100644 (file)
@@ -53,7 +53,7 @@ void update_xtime_cache(u64 nsec)
        timespec_add_ns(&xtime_cache, nsec);
 }
 
-static struct clocksource *clock; /* pointer to current clocksource */
+struct clocksource *clock;
 
 
 #ifdef CONFIG_GENERIC_TIME
@@ -246,7 +246,7 @@ void __init timekeeping_init(void)
 
        write_seqlock_irqsave(&xtime_lock, flags);
 
-       ntp_clear();
+       ntp_init();
 
        clock = clocksource_get_next();
        clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -371,7 +371,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
         * here.  This is tuned so that an error of about 1 msec is adjusted
         * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
         */
-       error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
+       error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
        error2 = abs(error2);
        for (look_ahead = 0; error2 > 0; look_ahead++)
                error2 >>= 2;
@@ -380,8 +380,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
         * Now calculate the error in (1 << look_ahead) ticks, but first
         * remove the single look ahead already included in the error.
         */
-       tick_error = current_tick_length() >>
-               (TICK_LENGTH_SHIFT - clock->shift + 1);
+       tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
        tick_error -= clock->xtime_interval >> 1;
        error = ((error - tick_error) >> look_ahead) + tick_error;
 
@@ -412,7 +411,7 @@ static void clocksource_adjust(s64 offset)
        s64 error, interval = clock->cycle_interval;
        int adj;
 
-       error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
+       error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
        if (error > interval) {
                error >>= 2;
                if (likely(error <= interval))
@@ -434,7 +433,7 @@ static void clocksource_adjust(s64 offset)
        clock->xtime_interval += interval;
        clock->xtime_nsec -= offset;
        clock->error -= (interval - offset) <<
-                       (TICK_LENGTH_SHIFT - clock->shift);
+                       (NTP_SCALE_SHIFT - clock->shift);
 }
 
 /**
@@ -473,8 +472,8 @@ void update_wall_time(void)
                }
 
                /* accumulate error between NTP and clock interval */
-               clock->error += current_tick_length();
-               clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
+               clock->error += tick_length;
+               clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
        }
 
        /* correct the clock when NTP error is too big */
index 67fe8fc21fb130fd906ef050be927dc23d80fa02..a40e20fd00010d000a3dfc2406888b2f81146db6 100644 (file)
@@ -278,12 +278,9 @@ static int __init init_timer_list_procfs(void)
 {
        struct proc_dir_entry *pe;
 
-       pe = create_proc_entry("timer_list", 0644, NULL);
+       pe = proc_create("timer_list", 0644, NULL, &timer_list_fops);
        if (!pe)
                return -ENOMEM;
-
-       pe->proc_fops = &timer_list_fops;
-
        return 0;
 }
 __initcall(init_timer_list_procfs);
index 417da8c5bc7248018d915f40d0aedfa579679e8a..c994530d166da4fde2bed268f7a3ecfe661e93a8 100644 (file)
@@ -415,12 +415,9 @@ static int __init init_tstats_procfs(void)
 {
        struct proc_dir_entry *pe;
 
-       pe = create_proc_entry("timer_stats", 0644, NULL);
+       pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
        if (!pe)
                return -ENOMEM;
-
-       pe->proc_fops = &tstats_fops;
-
        return 0;
 }
 __initcall(init_tstats_procfs);
index f3d35d4ea42eff89e09db3e741022c22e593c3a9..ceacc6626572a7c2e2c345ba136bd456a267ba64 100644 (file)
@@ -320,14 +320,130 @@ static void timer_stats_account_timer(struct timer_list *timer)
 static void timer_stats_account_timer(struct timer_list *timer) {}
 #endif
 
-/**
- * init_timer - initialize a timer.
- * @timer: the timer to be initialized
- *
- * init_timer() must be done to a timer prior calling *any* of the
- * other timer functions.
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+
+static struct debug_obj_descr timer_debug_descr;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
  */
-void init_timer(struct timer_list *timer)
+static int timer_fixup_init(void *addr, enum debug_obj_state state)
+{
+       struct timer_list *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               del_timer_sync(timer);
+               debug_object_init(timer, &timer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int timer_fixup_activate(void *addr, enum debug_obj_state state)
+{
+       struct timer_list *timer = addr;
+
+       switch (state) {
+
+       case ODEBUG_STATE_NOTAVAILABLE:
+               /*
+                * This is not really a fixup. The timer was
+                * statically initialized. We just make sure that it
+                * is tracked in the object tracker.
+                */
+               if (timer->entry.next == NULL &&
+                   timer->entry.prev == TIMER_ENTRY_STATIC) {
+                       debug_object_init(timer, &timer_debug_descr);
+                       debug_object_activate(timer, &timer_debug_descr);
+                       return 0;
+               } else {
+                       WARN_ON_ONCE(1);
+               }
+               return 0;
+
+       case ODEBUG_STATE_ACTIVE:
+               WARN_ON(1);
+
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int timer_fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct timer_list *timer = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               del_timer_sync(timer);
+               debug_object_free(timer, &timer_debug_descr);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static struct debug_obj_descr timer_debug_descr = {
+       .name           = "timer_list",
+       .fixup_init     = timer_fixup_init,
+       .fixup_activate = timer_fixup_activate,
+       .fixup_free     = timer_fixup_free,
+};
+
+static inline void debug_timer_init(struct timer_list *timer)
+{
+       debug_object_init(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_activate(struct timer_list *timer)
+{
+       debug_object_activate(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_deactivate(struct timer_list *timer)
+{
+       debug_object_deactivate(timer, &timer_debug_descr);
+}
+
+static inline void debug_timer_free(struct timer_list *timer)
+{
+       debug_object_free(timer, &timer_debug_descr);
+}
+
+static void __init_timer(struct timer_list *timer);
+
+void init_timer_on_stack(struct timer_list *timer)
+{
+       debug_object_init_on_stack(timer, &timer_debug_descr);
+       __init_timer(timer);
+}
+EXPORT_SYMBOL_GPL(init_timer_on_stack);
+
+void destroy_timer_on_stack(struct timer_list *timer)
+{
+       debug_object_free(timer, &timer_debug_descr);
+}
+EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
+
+#else
+static inline void debug_timer_init(struct timer_list *timer) { }
+static inline void debug_timer_activate(struct timer_list *timer) { }
+static inline void debug_timer_deactivate(struct timer_list *timer) { }
+#endif
+
+static void __init_timer(struct timer_list *timer)
 {
        timer->entry.next = NULL;
        timer->base = __raw_get_cpu_var(tvec_bases);
@@ -337,6 +453,19 @@ void init_timer(struct timer_list *timer)
        memset(timer->start_comm, 0, TASK_COMM_LEN);
 #endif
 }
+
+/**
+ * init_timer - initialize a timer.
+ * @timer: the timer to be initialized
+ *
+ * init_timer() must be done to a timer prior calling *any* of the
+ * other timer functions.
+ */
+void init_timer(struct timer_list *timer)
+{
+       debug_timer_init(timer);
+       __init_timer(timer);
+}
 EXPORT_SYMBOL(init_timer);
 
 void init_timer_deferrable(struct timer_list *timer)
@@ -351,6 +480,8 @@ static inline void detach_timer(struct timer_list *timer,
 {
        struct list_head *entry = &timer->entry;
 
+       debug_timer_deactivate(timer);
+
        __list_del(entry->prev, entry->next);
        if (clear_pending)
                entry->next = NULL;
@@ -405,6 +536,8 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
                ret = 1;
        }
 
+       debug_timer_activate(timer);
+
        new_base = __get_cpu_var(tvec_bases);
 
        if (base != new_base) {
@@ -450,6 +583,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
        BUG_ON(timer_pending(timer) || !timer->function);
        spin_lock_irqsave(&base->lock, flags);
        timer_set_base(timer, base);
+       debug_timer_activate(timer);
        internal_add_timer(base, timer);
        /*
         * Check whether the other CPU is idle and needs to be
@@ -1086,11 +1220,14 @@ signed long __sched schedule_timeout(signed long timeout)
 
        expire = timeout + jiffies;
 
-       setup_timer(&timer, process_timeout, (unsigned long)current);
+       setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
        __mod_timer(&timer, expire);
        schedule();
        del_singleshot_timer_sync(&timer);
 
+       /* Remove the timer from the object tracker */
+       destroy_timer_on_stack(&timer);
+
        timeout = expire - jiffies;
 
  out:
index debce602bfddd9f117ec5fb98ad0023176eb7f7b..865ecf57a09604cc1307407cc99f90c1cecc5309 100644 (file)
@@ -53,10 +53,6 @@ struct user_struct root_user = {
        .files          = ATOMIC_INIT(0),
        .sigpending     = ATOMIC_INIT(0),
        .locked_shm     = 0,
-#ifdef CONFIG_KEYS
-       .uid_keyring    = &root_user_keyring,
-       .session_keyring = &root_session_keyring,
-#endif
 #ifdef CONFIG_USER_SCHED
        .tg             = &init_task_group,
 #endif
@@ -388,7 +384,7 @@ void free_uid(struct user_struct *up)
                local_irq_restore(flags);
 }
 
-struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
+struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
 {
        struct hlist_head *hashent = uidhashentry(ns, uid);
        struct user_struct *up, *new;
@@ -403,29 +399,15 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
        spin_unlock_irq(&uidhash_lock);
 
        if (!up) {
-               new = kmem_cache_alloc(uid_cachep, GFP_KERNEL);
+               new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
                if (!new)
                        goto out_unlock;
 
                new->uid = uid;
                atomic_set(&new->__count, 1);
-               atomic_set(&new->processes, 0);
-               atomic_set(&new->files, 0);
-               atomic_set(&new->sigpending, 0);
-#ifdef CONFIG_INOTIFY_USER
-               atomic_set(&new->inotify_watches, 0);
-               atomic_set(&new->inotify_devs, 0);
-#endif
-#ifdef CONFIG_POSIX_MQUEUE
-               new->mq_bytes = 0;
-#endif
-               new->locked_shm = 0;
-
-               if (alloc_uid_keyring(new, current) < 0)
-                       goto out_free_user;
 
                if (sched_create_user(new) < 0)
-                       goto out_put_keys;
+                       goto out_free_user;
 
                if (uids_user_create(new))
                        goto out_destoy_sched;
@@ -459,9 +441,6 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid)
 
 out_destoy_sched:
        sched_destroy_user(new);
-out_put_keys:
-       key_put(new->uid_keyring);
-       key_put(new->session_keyring);
 out_free_user:
        kmem_cache_free(uid_cachep, new);
 out_unlock:
index 4c9006275df7fef80ae61944d39cdeea78246bb0..a9ab0596de44afe2bf3daa75c2c4ddca064c7f97 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/version.h>
 #include <linux/nsproxy.h>
+#include <linux/slab.h>
 #include <linux/user_namespace.h>
 
 /*
@@ -73,3 +74,4 @@ void free_user_ns(struct kref *kref)
        release_uids(ns);
        kfree(ns);
 }
+EXPORT_SYMBOL(free_user_ns);
index 816d7b24fa031af51b08d9108366d21626e3d38e..64d398f1244436cbdc4ede981dd927e39ee708b5 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/utsname.h>
 #include <linux/version.h>
 #include <linux/err.h>
+#include <linux/slab.h>
 
 /*
  * Clone a new ns copying an original utsname, setting refcount to 1
index 00ff4d08e370c4ee3e5d23cabb9c86b0e53f7833..29fc39f1029cec92528b0cb66380028670b9eee1 100644 (file)
@@ -158,8 +158,8 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
  *
  * Returns 0 if @work was already on a queue, non-zero otherwise.
  *
- * We queue the work to the CPU it was submitted, but there is no
- * guarantee that it will be processed by that CPU.
+ * We queue the work to the CPU on which it was submitted, but if the CPU dies
+ * it can be processed by another CPU.
  */
 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
 {
@@ -195,7 +195,6 @@ static void delayed_work_timer_fn(unsigned long __data)
 int queue_delayed_work(struct workqueue_struct *wq,
                        struct delayed_work *dwork, unsigned long delay)
 {
-       timer_stats_timer_set_start_info(&dwork->timer);
        if (delay == 0)
                return queue_work(wq, &dwork->work);
 
@@ -219,11 +218,12 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
        struct timer_list *timer = &dwork->timer;
        struct work_struct *work = &dwork->work;
 
-       timer_stats_timer_set_start_info(&dwork->timer);
        if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
                BUG_ON(timer_pending(timer));
                BUG_ON(!list_empty(&work->entry));
 
+               timer_stats_timer_set_start_info(&dwork->timer);
+
                /* This stores cwq for the moment, for the timer_fn */
                set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
                timer->expires = jiffies + delay;
@@ -247,7 +247,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq)
        if (cwq->run_depth > 3) {
                /* morton gets to eat his hat */
                printk("%s: recursion depth exceeded: %d\n",
-                       __FUNCTION__, cwq->run_depth);
+                       __func__, cwq->run_depth);
                dump_stack();
        }
        while (!list_empty(&cwq->worklist)) {
@@ -564,7 +564,6 @@ EXPORT_SYMBOL(schedule_work);
 int schedule_delayed_work(struct delayed_work *dwork,
                                        unsigned long delay)
 {
-       timer_stats_timer_set_start_info(&dwork->timer);
        return queue_delayed_work(keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work);
@@ -581,7 +580,6 @@ EXPORT_SYMBOL(schedule_delayed_work);
 int schedule_delayed_work_on(int cpu,
                        struct delayed_work *dwork, unsigned long delay)
 {
-       timer_stats_timer_set_start_info(&dwork->timer);
        return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
 }
 EXPORT_SYMBOL(schedule_delayed_work_on);
@@ -772,7 +770,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name,
 }
 EXPORT_SYMBOL_GPL(__create_workqueue_key);
 
-static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
+static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
 {
        /*
         * Our caller is either destroy_workqueue() or CPU_DEAD,
@@ -808,19 +806,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
 void destroy_workqueue(struct workqueue_struct *wq)
 {
        const cpumask_t *cpu_map = wq_cpu_map(wq);
-       struct cpu_workqueue_struct *cwq;
        int cpu;
 
        get_online_cpus();
        spin_lock(&workqueue_lock);
        list_del(&wq->list);
        spin_unlock(&workqueue_lock);
-       put_online_cpus();
 
-       for_each_cpu_mask(cpu, *cpu_map) {
-               cwq = per_cpu_ptr(wq->cpu_wq, cpu);
-               cleanup_workqueue_thread(cwq, cpu);
-       }
+       for_each_cpu_mask(cpu, *cpu_map)
+               cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
+       put_online_cpus();
 
        free_percpu(wq->cpu_wq);
        kfree(wq);
@@ -838,7 +833,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
        action &= ~CPU_TASKS_FROZEN;
 
        switch (action) {
-
        case CPU_UP_PREPARE:
                cpu_set(cpu, cpu_populated_map);
        }
@@ -861,11 +855,17 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
                case CPU_UP_CANCELED:
                        start_workqueue_thread(cwq, -1);
                case CPU_DEAD:
-                       cleanup_workqueue_thread(cwq, cpu);
+                       cleanup_workqueue_thread(cwq);
                        break;
                }
        }
 
+       switch (action) {
+       case CPU_UP_CANCELED:
+       case CPU_DEAD:
+               cpu_clear(cpu, cpu_populated_map);
+       }
+
        return NOTIFY_OK;
 }
 
index 2d53dc092e8b1670bd26e6e3c6210f01caa54a29..8cc8e8722a3fac4f363125f2009fbf65d50cca58 100644 (file)
@@ -7,6 +7,12 @@ menu "Library routines"
 config BITREVERSE
        tristate
 
+config GENERIC_FIND_FIRST_BIT
+       def_bool n
+
+config GENERIC_FIND_NEXT_BIT
+       def_bool n
+
 config CRC_CCITT
        tristate "CRC-CCITT functions"
        help
index 754cc0027f2a8acc6f8358dd6a1ae0b1346845ba..d2099f41aa1ebe46bc614c7187290f7a32dd941c 100644 (file)
@@ -194,6 +194,37 @@ config TIMER_STATS
          (it defaults to deactivated on bootup and will only be activated
          if some application like powertop activates it explicitly).
 
+config DEBUG_OBJECTS
+       bool "Debug object operations"
+       depends on DEBUG_KERNEL
+       help
+         If you say Y here, additional code will be inserted into the
+         kernel to track the life time of various objects and validate
+         the operations on those objects.
+
+config DEBUG_OBJECTS_SELFTEST
+       bool "Debug objects selftest"
+       depends on DEBUG_OBJECTS
+       help
+         This enables the selftest of the object debug code.
+
+config DEBUG_OBJECTS_FREE
+       bool "Debug objects in freed memory"
+       depends on DEBUG_OBJECTS
+       help
+         This enables checks whether a k/v free operation frees an area
+         which contains an object which has not been deactivated
+         properly. This can make kmalloc/kfree-intensive workloads
+         much slower.
+
+config DEBUG_OBJECTS_TIMERS
+       bool "Debug timer objects"
+       depends on DEBUG_OBJECTS
+       help
+         If you say Y here, additional code will be inserted into the
+         timer routines to track the life time of timer objects and
+         validate the timer operations.
+
 config DEBUG_SLAB
        bool "Debug slab memory allocations"
        depends on DEBUG_KERNEL && SLAB
index bf8000fc7d48e771eb560f10fde894c64bffbb63..74b0cfb1fcc3c0eb009648484a368116d0681f88 100644 (file)
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
         rbtree.o radix-tree.o dump_stack.o \
         idr.o int_sqrt.o extable.o prio_tree.o \
         sha1.o irq_regs.o reciprocal_div.o argv_split.o \
-        proportions.o prio_heap.o
+        proportions.o prio_heap.o ratelimit.o
 
 lib-$(CONFIG_MMU) += ioremap.o
 lib-$(CONFIG_SMP) += cpumask.o
@@ -29,12 +29,14 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
+lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
 lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
 obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
 obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
 obj-$(CONFIG_PLIST) += plist.o
 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
 obj-$(CONFIG_DEBUG_LIST) += list_debug.o
+obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
 
 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
   lib-y += dec_and_lock.o
index a6939e18d7bb3bc4f8d0caba8004e6b075bd3983..c4cb48f77f0c6ab157ba41ec0411644a44a39ca9 100644 (file)
@@ -714,6 +714,164 @@ int bitmap_bitremap(int oldbit, const unsigned long *old,
 }
 EXPORT_SYMBOL(bitmap_bitremap);
 
+/**
+ * bitmap_onto - translate one bitmap relative to another
+ *     @dst: resulting translated bitmap
+ *     @orig: original untranslated bitmap
+ *     @relmap: bitmap relative to which translated
+ *     @bits: number of bits in each of these bitmaps
+ *
+ * Set the n-th bit of @dst iff there exists some m such that the
+ * n-th bit of @relmap is set, the m-th bit of @orig is set, and
+ * the n-th bit of @relmap is also the m-th _set_ bit of @relmap.
+ * (If you understood the previous sentence the first time your
+ * read it, you're overqualified for your current job.)
+ *
+ * In other words, @orig is mapped onto (surjectively) @dst,
+ * using the the map { <n, m> | the n-th bit of @relmap is the
+ * m-th set bit of @relmap }.
+ *
+ * Any set bits in @orig above bit number W, where W is the
+ * weight of (number of set bits in) @relmap are mapped nowhere.
+ * In particular, if for all bits m set in @orig, m >= W, then
+ * @dst will end up empty.  In situations where the possibility
+ * of such an empty result is not desired, one way to avoid it is
+ * to use the bitmap_fold() operator, below, to first fold the
+ * @orig bitmap over itself so that all its set bits x are in the
+ * range 0 <= x < W.  The bitmap_fold() operator does this by
+ * setting the bit (m % W) in @dst, for each bit (m) set in @orig.
+ *
+ * Example [1] for bitmap_onto():
+ *  Let's say @relmap has bits 30-39 set, and @orig has bits
+ *  1, 3, 5, 7, 9 and 11 set.  Then on return from this routine,
+ *  @dst will have bits 31, 33, 35, 37 and 39 set.
+ *
+ *  When bit 0 is set in @orig, it means turn on the bit in
+ *  @dst corresponding to whatever is the first bit (if any)
+ *  that is turned on in @relmap.  Since bit 0 was off in the
+ *  above example, we leave off that bit (bit 30) in @dst.
+ *
+ *  When bit 1 is set in @orig (as in the above example), it
+ *  means turn on the bit in @dst corresponding to whatever
+ *  is the second bit that is turned on in @relmap.  The second
+ *  bit in @relmap that was turned on in the above example was
+ *  bit 31, so we turned on bit 31 in @dst.
+ *
+ *  Similarly, we turned on bits 33, 35, 37 and 39 in @dst,
+ *  because they were the 4th, 6th, 8th and 10th set bits
+ *  set in @relmap, and the 4th, 6th, 8th and 10th bits of
+ *  @orig (i.e. bits 3, 5, 7 and 9) were also set.
+ *
+ *  When bit 11 is set in @orig, it means turn on the bit in
+ *  @dst corresponding to whatever is the twelth bit that is
+ *  turned on in @relmap.  In the above example, there were
+ *  only ten bits turned on in @relmap (30..39), so that bit
+ *  11 was set in @orig had no affect on @dst.
+ *
+ * Example [2] for bitmap_fold() + bitmap_onto():
+ *  Let's say @relmap has these ten bits set:
+ *             40 41 42 43 45 48 53 61 74 95
+ *  (for the curious, that's 40 plus the first ten terms of the
+ *  Fibonacci sequence.)
+ *
+ *  Further lets say we use the following code, invoking
+ *  bitmap_fold() then bitmap_onto, as suggested above to
+ *  avoid the possitility of an empty @dst result:
+ *
+ *     unsigned long *tmp;     // a temporary bitmap's bits
+ *
+ *     bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits);
+ *     bitmap_onto(dst, tmp, relmap, bits);
+ *
+ *  Then this table shows what various values of @dst would be, for
+ *  various @orig's.  I list the zero-based positions of each set bit.
+ *  The tmp column shows the intermediate result, as computed by
+ *  using bitmap_fold() to fold the @orig bitmap modulo ten
+ *  (the weight of @relmap).
+ *
+ *      @orig           tmp            @dst
+ *      0                0             40
+ *      1                1             41
+ *      9                9             95
+ *      10               0             40 (*)
+ *      1 3 5 7          1 3 5 7       41 43 48 61
+ *      0 1 2 3 4        0 1 2 3 4     40 41 42 43 45
+ *      0 9 18 27        0 9 8 7       40 61 74 95
+ *      0 10 20 30       0             40
+ *      0 11 22 33       0 1 2 3       40 41 42 43
+ *      0 12 24 36       0 2 4 6       40 42 45 53
+ *      78 102 211       1 2 8         41 42 74 (*)
+ *
+ * (*) For these marked lines, if we hadn't first done bitmap_fold()
+ *     into tmp, then the @dst result would have been empty.
+ *
+ * If either of @orig or @relmap is empty (no set bits), then @dst
+ * will be returned empty.
+ *
+ * If (as explained above) the only set bits in @orig are in positions
+ * m where m >= W, (where W is the weight of @relmap) then @dst will
+ * once again be returned empty.
+ *
+ * All bits in @dst not set by the above rule are cleared.
+ */
+void bitmap_onto(unsigned long *dst, const unsigned long *orig,
+                       const unsigned long *relmap, int bits)
+{
+       int n, m;               /* same meaning as in above comment */
+
+       if (dst == orig)        /* following doesn't handle inplace mappings */
+               return;
+       bitmap_zero(dst, bits);
+
+       /*
+        * The following code is a more efficient, but less
+        * obvious, equivalent to the loop:
+        *      for (m = 0; m < bitmap_weight(relmap, bits); m++) {
+        *              n = bitmap_ord_to_pos(orig, m, bits);
+        *              if (test_bit(m, orig))
+        *                      set_bit(n, dst);
+        *      }
+        */
+
+       m = 0;
+       for (n = find_first_bit(relmap, bits);
+            n < bits;
+            n = find_next_bit(relmap, bits, n + 1)) {
+               /* m == bitmap_pos_to_ord(relmap, n, bits) */
+               if (test_bit(m, orig))
+                       set_bit(n, dst);
+               m++;
+       }
+}
+EXPORT_SYMBOL(bitmap_onto);
+
+/**
+ * bitmap_fold - fold larger bitmap into smaller, modulo specified size
+ *     @dst: resulting smaller bitmap
+ *     @orig: original larger bitmap
+ *     @sz: specified size
+ *     @bits: number of bits in each of these bitmaps
+ *
+ * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst.
+ * Clear all other bits in @dst.  See further the comment and
+ * Example [2] for bitmap_onto() for why and how to use this.
+ */
+void bitmap_fold(unsigned long *dst, const unsigned long *orig,
+                       int sz, int bits)
+{
+       int oldbit;
+
+       if (dst == orig)        /* following doesn't handle inplace mappings */
+               return;
+       bitmap_zero(dst, bits);
+
+       for (oldbit = find_first_bit(orig, bits);
+            oldbit < bits;
+            oldbit = find_next_bit(orig, bits, oldbit + 1))
+               set_bit(oldbit % sz, dst);
+}
+EXPORT_SYMBOL(bitmap_fold);
+
 /*
  * Common code for bitmap_*_region() routines.
  *     bitmap: array of unsigned longs corresponding to the bitmap
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
new file mode 100644 (file)
index 0000000..a76a5e1
--- /dev/null
@@ -0,0 +1,890 @@
+/*
+ * Generic infrastructure for lifetime debugging of objects.
+ *
+ * Started by Thomas Gleixner
+ *
+ * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#include <linux/debugobjects.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/hash.h>
+
+#define ODEBUG_HASH_BITS       14
+#define ODEBUG_HASH_SIZE       (1 << ODEBUG_HASH_BITS)
+
+#define ODEBUG_POOL_SIZE       512
+#define ODEBUG_POOL_MIN_LEVEL  256
+
+#define ODEBUG_CHUNK_SHIFT     PAGE_SHIFT
+#define ODEBUG_CHUNK_SIZE      (1 << ODEBUG_CHUNK_SHIFT)
+#define ODEBUG_CHUNK_MASK      (~(ODEBUG_CHUNK_SIZE - 1))
+
+struct debug_bucket {
+       struct hlist_head       list;
+       spinlock_t              lock;
+};
+
+static struct debug_bucket     obj_hash[ODEBUG_HASH_SIZE];
+
+static struct debug_obj                obj_static_pool[ODEBUG_POOL_SIZE];
+
+static DEFINE_SPINLOCK(pool_lock);
+
+static HLIST_HEAD(obj_pool);
+
+static int                     obj_pool_min_free = ODEBUG_POOL_SIZE;
+static int                     obj_pool_free = ODEBUG_POOL_SIZE;
+static int                     obj_pool_used;
+static int                     obj_pool_max_used;
+static struct kmem_cache       *obj_cache;
+
+static int                     debug_objects_maxchain __read_mostly;
+static int                     debug_objects_fixups __read_mostly;
+static int                     debug_objects_warnings __read_mostly;
+static int                     debug_objects_enabled __read_mostly;
+static struct debug_obj_descr  *descr_test  __read_mostly;
+
+static int __init enable_object_debug(char *str)
+{
+       debug_objects_enabled = 1;
+       return 0;
+}
+early_param("debug_objects", enable_object_debug);
+
+static const char *obj_states[ODEBUG_STATE_MAX] = {
+       [ODEBUG_STATE_NONE]             = "none",
+       [ODEBUG_STATE_INIT]             = "initialized",
+       [ODEBUG_STATE_INACTIVE]         = "inactive",
+       [ODEBUG_STATE_ACTIVE]           = "active",
+       [ODEBUG_STATE_DESTROYED]        = "destroyed",
+       [ODEBUG_STATE_NOTAVAILABLE]     = "not available",
+};
+
+static int fill_pool(void)
+{
+       gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
+       struct debug_obj *new;
+
+       if (likely(obj_pool_free >= ODEBUG_POOL_MIN_LEVEL))
+               return obj_pool_free;
+
+       if (unlikely(!obj_cache))
+               return obj_pool_free;
+
+       while (obj_pool_free < ODEBUG_POOL_MIN_LEVEL) {
+
+               new = kmem_cache_zalloc(obj_cache, gfp);
+               if (!new)
+                       return obj_pool_free;
+
+               spin_lock(&pool_lock);
+               hlist_add_head(&new->node, &obj_pool);
+               obj_pool_free++;
+               spin_unlock(&pool_lock);
+       }
+       return obj_pool_free;
+}
+
+/*
+ * Lookup an object in the hash bucket.
+ */
+static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
+{
+       struct hlist_node *node;
+       struct debug_obj *obj;
+       int cnt = 0;
+
+       hlist_for_each_entry(obj, node, &b->list, node) {
+               cnt++;
+               if (obj->object == addr)
+                       return obj;
+       }
+       if (cnt > debug_objects_maxchain)
+               debug_objects_maxchain = cnt;
+
+       return NULL;
+}
+
+/*
+ * Allocate a new object. If the pool is empty and no refill possible,
+ * switch off the debugger.
+ */
+static struct debug_obj *
+alloc_object(void *addr, struct debug_bucket *b, struct debug_obj_descr *descr)
+{
+       struct debug_obj *obj = NULL;
+       int retry = 0;
+
+repeat:
+       spin_lock(&pool_lock);
+       if (obj_pool.first) {
+               obj         = hlist_entry(obj_pool.first, typeof(*obj), node);
+
+               obj->object = addr;
+               obj->descr  = descr;
+               obj->state  = ODEBUG_STATE_NONE;
+               hlist_del(&obj->node);
+
+               hlist_add_head(&obj->node, &b->list);
+
+               obj_pool_used++;
+               if (obj_pool_used > obj_pool_max_used)
+                       obj_pool_max_used = obj_pool_used;
+
+               obj_pool_free--;
+               if (obj_pool_free < obj_pool_min_free)
+                       obj_pool_min_free = obj_pool_free;
+       }
+       spin_unlock(&pool_lock);
+
+       if (fill_pool() && !obj && !retry++)
+               goto repeat;
+
+       return obj;
+}
+
+/*
+ * Put the object back into the pool or give it back to kmem_cache:
+ */
+static void free_object(struct debug_obj *obj)
+{
+       unsigned long idx = (unsigned long)(obj - obj_static_pool);
+
+       if (obj_pool_free < ODEBUG_POOL_SIZE || idx < ODEBUG_POOL_SIZE) {
+               spin_lock(&pool_lock);
+               hlist_add_head(&obj->node, &obj_pool);
+               obj_pool_free++;
+               obj_pool_used--;
+               spin_unlock(&pool_lock);
+       } else {
+               spin_lock(&pool_lock);
+               obj_pool_used--;
+               spin_unlock(&pool_lock);
+               kmem_cache_free(obj_cache, obj);
+       }
+}
+
+/*
+ * We run out of memory. That means we probably have tons of objects
+ * allocated.
+ */
+static void debug_objects_oom(void)
+{
+       struct debug_bucket *db = obj_hash;
+       struct hlist_node *node, *tmp;
+       struct debug_obj *obj;
+       unsigned long flags;
+       int i;
+
+       printk(KERN_WARNING "ODEBUG: Out of memory. ODEBUG disabled\n");
+
+       for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
+               spin_lock_irqsave(&db->lock, flags);
+               hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+                       hlist_del(&obj->node);
+                       free_object(obj);
+               }
+               spin_unlock_irqrestore(&db->lock, flags);
+       }
+}
+
+/*
+ * We use the pfn of the address for the hash. That way we can check
+ * for freed objects simply by checking the affected bucket.
+ */
+static struct debug_bucket *get_bucket(unsigned long addr)
+{
+       unsigned long hash;
+
+       hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
+       return &obj_hash[hash];
+}
+
+static void debug_print_object(struct debug_obj *obj, char *msg)
+{
+       static int limit;
+
+       if (limit < 5 && obj->descr != descr_test) {
+               limit++;
+               printk(KERN_ERR "ODEBUG: %s %s object type: %s\n", msg,
+                      obj_states[obj->state], obj->descr->name);
+               WARN_ON(1);
+       }
+       debug_objects_warnings++;
+}
+
+/*
+ * Try to repair the damage, so we have a better chance to get useful
+ * debug output.
+ */
+static void
+debug_object_fixup(int (*fixup)(void *addr, enum debug_obj_state state),
+                  void * addr, enum debug_obj_state state)
+{
+       if (fixup)
+               debug_objects_fixups += fixup(addr, state);
+}
+
+static void debug_object_is_on_stack(void *addr, int onstack)
+{
+       void *stack = current->stack;
+       int is_on_stack;
+       static int limit;
+
+       if (limit > 4)
+               return;
+
+       is_on_stack = (addr >= stack && addr < (stack + THREAD_SIZE));
+
+       if (is_on_stack == onstack)
+               return;
+
+       limit++;
+       if (is_on_stack)
+               printk(KERN_WARNING
+                      "ODEBUG: object is on stack, but not annotated\n");
+       else
+               printk(KERN_WARNING
+                      "ODEBUG: object is not on stack, but annotated\n");
+       WARN_ON(1);
+}
+
+static void
+__debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack)
+{
+       enum debug_obj_state state;
+       struct debug_bucket *db;
+       struct debug_obj *obj;
+       unsigned long flags;
+
+       db = get_bucket((unsigned long) addr);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       obj = lookup_object(addr, db);
+       if (!obj) {
+               obj = alloc_object(addr, db, descr);
+               if (!obj) {
+                       debug_objects_enabled = 0;
+                       spin_unlock_irqrestore(&db->lock, flags);
+                       debug_objects_oom();
+                       return;
+               }
+               debug_object_is_on_stack(addr, onstack);
+       }
+
+       switch (obj->state) {
+       case ODEBUG_STATE_NONE:
+       case ODEBUG_STATE_INIT:
+       case ODEBUG_STATE_INACTIVE:
+               obj->state = ODEBUG_STATE_INIT;
+               break;
+
+       case ODEBUG_STATE_ACTIVE:
+               debug_print_object(obj, "init");
+               state = obj->state;
+               spin_unlock_irqrestore(&db->lock, flags);
+               debug_object_fixup(descr->fixup_init, addr, state);
+               return;
+
+       case ODEBUG_STATE_DESTROYED:
+               debug_print_object(obj, "init");
+               break;
+       default:
+               break;
+       }
+
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_init - debug checks when an object is initialized
+ * @addr:      address of the object
+ * @descr:     pointer to an object specific debug description structure
+ */
+void debug_object_init(void *addr, struct debug_obj_descr *descr)
+{
+       if (!debug_objects_enabled)
+               return;
+
+       __debug_object_init(addr, descr, 0);
+}
+
+/**
+ * debug_object_init_on_stack - debug checks when an object on stack is
+ *                             initialized
+ * @addr:      address of the object
+ * @descr:     pointer to an object specific debug description structure
+ */
+void debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr)
+{
+       if (!debug_objects_enabled)
+               return;
+
+       __debug_object_init(addr, descr, 1);
+}
+
+/**
+ * debug_object_activate - debug checks when an object is activated
+ * @addr:      address of the object
+ * @descr:     pointer to an object specific debug description structure
+ */
+void debug_object_activate(void *addr, struct debug_obj_descr *descr)
+{
+       enum debug_obj_state state;
+       struct debug_bucket *db;
+       struct debug_obj *obj;
+       unsigned long flags;
+
+       if (!debug_objects_enabled)
+               return;
+
+       db = get_bucket((unsigned long) addr);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       obj = lookup_object(addr, db);
+       if (obj) {
+               switch (obj->state) {
+               case ODEBUG_STATE_INIT:
+               case ODEBUG_STATE_INACTIVE:
+                       obj->state = ODEBUG_STATE_ACTIVE;
+                       break;
+
+               case ODEBUG_STATE_ACTIVE:
+                       debug_print_object(obj, "activate");
+                       state = obj->state;
+                       spin_unlock_irqrestore(&db->lock, flags);
+                       debug_object_fixup(descr->fixup_activate, addr, state);
+                       return;
+
+               case ODEBUG_STATE_DESTROYED:
+                       debug_print_object(obj, "activate");
+                       break;
+               default:
+                       break;
+               }
+               spin_unlock_irqrestore(&db->lock, flags);
+               return;
+       }
+
+       spin_unlock_irqrestore(&db->lock, flags);
+       /*
+        * This happens when a static object is activated. We
+        * let the type specific code decide whether this is
+        * true or not.
+        */
+       debug_object_fixup(descr->fixup_activate, addr,
+                          ODEBUG_STATE_NOTAVAILABLE);
+}
+
+/**
+ * debug_object_deactivate - debug checks when an object is deactivated
+ * @addr:      address of the object
+ * @descr:     pointer to an object specific debug description structure
+ */
+void debug_object_deactivate(void *addr, struct debug_obj_descr *descr)
+{
+       struct debug_bucket *db;
+       struct debug_obj *obj;
+       unsigned long flags;
+
+       if (!debug_objects_enabled)
+               return;
+
+       db = get_bucket((unsigned long) addr);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       obj = lookup_object(addr, db);
+       if (obj) {
+               switch (obj->state) {
+               case ODEBUG_STATE_INIT:
+               case ODEBUG_STATE_INACTIVE:
+               case ODEBUG_STATE_ACTIVE:
+                       obj->state = ODEBUG_STATE_INACTIVE;
+                       break;
+
+               case ODEBUG_STATE_DESTROYED:
+                       debug_print_object(obj, "deactivate");
+                       break;
+               default:
+                       break;
+               }
+       } else {
+               struct debug_obj o = { .object = addr,
+                                      .state = ODEBUG_STATE_NOTAVAILABLE,
+                                      .descr = descr };
+
+               debug_print_object(&o, "deactivate");
+       }
+
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_destroy - debug checks when an object is destroyed
+ * @addr:      address of the object
+ * @descr:     pointer to an object specific debug description structure
+ */
+void debug_object_destroy(void *addr, struct debug_obj_descr *descr)
+{
+       enum debug_obj_state state;
+       struct debug_bucket *db;
+       struct debug_obj *obj;
+       unsigned long flags;
+
+       if (!debug_objects_enabled)
+               return;
+
+       db = get_bucket((unsigned long) addr);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       obj = lookup_object(addr, db);
+       if (!obj)
+               goto out_unlock;
+
+       switch (obj->state) {
+       case ODEBUG_STATE_NONE:
+       case ODEBUG_STATE_INIT:
+       case ODEBUG_STATE_INACTIVE:
+               obj->state = ODEBUG_STATE_DESTROYED;
+               break;
+       case ODEBUG_STATE_ACTIVE:
+               debug_print_object(obj, "destroy");
+               state = obj->state;
+               spin_unlock_irqrestore(&db->lock, flags);
+               debug_object_fixup(descr->fixup_destroy, addr, state);
+               return;
+
+       case ODEBUG_STATE_DESTROYED:
+               debug_print_object(obj, "destroy");
+               break;
+       default:
+               break;
+       }
+out_unlock:
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+/**
+ * debug_object_free - debug checks when an object is freed
+ * @addr:      address of the object
+ * @descr:     pointer to an object specific debug description structure
+ */
+void debug_object_free(void *addr, struct debug_obj_descr *descr)
+{
+       enum debug_obj_state state;
+       struct debug_bucket *db;
+       struct debug_obj *obj;
+       unsigned long flags;
+
+       if (!debug_objects_enabled)
+               return;
+
+       db = get_bucket((unsigned long) addr);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       obj = lookup_object(addr, db);
+       if (!obj)
+               goto out_unlock;
+
+       switch (obj->state) {
+       case ODEBUG_STATE_ACTIVE:
+               debug_print_object(obj, "free");
+               state = obj->state;
+               spin_unlock_irqrestore(&db->lock, flags);
+               debug_object_fixup(descr->fixup_free, addr, state);
+               return;
+       default:
+               hlist_del(&obj->node);
+               free_object(obj);
+               break;
+       }
+out_unlock:
+       spin_unlock_irqrestore(&db->lock, flags);
+}
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+static void __debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+       unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
+       struct hlist_node *node, *tmp;
+       struct debug_obj_descr *descr;
+       enum debug_obj_state state;
+       struct debug_bucket *db;
+       struct debug_obj *obj;
+       int cnt;
+
+       saddr = (unsigned long) address;
+       eaddr = saddr + size;
+       paddr = saddr & ODEBUG_CHUNK_MASK;
+       chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
+       chunks >>= ODEBUG_CHUNK_SHIFT;
+
+       for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
+               db = get_bucket(paddr);
+
+repeat:
+               cnt = 0;
+               spin_lock_irqsave(&db->lock, flags);
+               hlist_for_each_entry_safe(obj, node, tmp, &db->list, node) {
+                       cnt++;
+                       oaddr = (unsigned long) obj->object;
+                       if (oaddr < saddr || oaddr >= eaddr)
+                               continue;
+
+                       switch (obj->state) {
+                       case ODEBUG_STATE_ACTIVE:
+                               debug_print_object(obj, "free");
+                               descr = obj->descr;
+                               state = obj->state;
+                               spin_unlock_irqrestore(&db->lock, flags);
+                               debug_object_fixup(descr->fixup_free,
+                                                  (void *) oaddr, state);
+                               goto repeat;
+                       default:
+                               hlist_del(&obj->node);
+                               free_object(obj);
+                               break;
+                       }
+               }
+               spin_unlock_irqrestore(&db->lock, flags);
+               if (cnt > debug_objects_maxchain)
+                       debug_objects_maxchain = cnt;
+       }
+}
+
+void debug_check_no_obj_freed(const void *address, unsigned long size)
+{
+       if (debug_objects_enabled)
+               __debug_check_no_obj_freed(address, size);
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+static int debug_stats_show(struct seq_file *m, void *v)
+{
+       seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
+       seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
+       seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
+       seq_printf(m, "pool_free     :%d\n", obj_pool_free);
+       seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
+       seq_printf(m, "pool_used     :%d\n", obj_pool_used);
+       seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
+       return 0;
+}
+
+static int debug_stats_open(struct inode *inode, struct file *filp)
+{
+       return single_open(filp, debug_stats_show, NULL);
+}
+
+static const struct file_operations debug_stats_fops = {
+       .open           = debug_stats_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static int __init debug_objects_init_debugfs(void)
+{
+       struct dentry *dbgdir, *dbgstats;
+
+       if (!debug_objects_enabled)
+               return 0;
+
+       dbgdir = debugfs_create_dir("debug_objects", NULL);
+       if (!dbgdir)
+               return -ENOMEM;
+
+       dbgstats = debugfs_create_file("stats", 0444, dbgdir, NULL,
+                                      &debug_stats_fops);
+       if (!dbgstats)
+               goto err;
+
+       return 0;
+
+err:
+       debugfs_remove(dbgdir);
+
+       return -ENOMEM;
+}
+__initcall(debug_objects_init_debugfs);
+
+#else
+static inline void debug_objects_init_debugfs(void) { }
+#endif
+
+#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
+
+/* Random data structure for the self test */
+struct self_test {
+       unsigned long   dummy1[6];
+       int             static_init;
+       unsigned long   dummy2[3];
+};
+
+static __initdata struct debug_obj_descr descr_type_test;
+
+/*
+ * fixup_init is called when:
+ * - an active object is initialized
+ */
+static int __init fixup_init(void *addr, enum debug_obj_state state)
+{
+       struct self_test *obj = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               debug_object_deactivate(obj, &descr_type_test);
+               debug_object_init(obj, &descr_type_test);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_activate is called when:
+ * - an active object is activated
+ * - an unknown object is activated (might be a statically initialized object)
+ */
+static int __init fixup_activate(void *addr, enum debug_obj_state state)
+{
+       struct self_test *obj = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_NOTAVAILABLE:
+               if (obj->static_init == 1) {
+                       debug_object_init(obj, &descr_type_test);
+                       debug_object_activate(obj, &descr_type_test);
+                       /*
+                        * Real code should return 0 here ! This is
+                        * not a fixup of some bad behaviour. We
+                        * merily call the debug_init function to keep
+                        * track of the object.
+                        */
+                       return 1;
+               } else {
+                       /* Real code needs to emit a warning here */
+               }
+               return 0;
+
+       case ODEBUG_STATE_ACTIVE:
+               debug_object_deactivate(obj, &descr_type_test);
+               debug_object_activate(obj, &descr_type_test);
+               return 1;
+
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_destroy is called when:
+ * - an active object is destroyed
+ */
+static int __init fixup_destroy(void *addr, enum debug_obj_state state)
+{
+       struct self_test *obj = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               debug_object_deactivate(obj, &descr_type_test);
+               debug_object_destroy(obj, &descr_type_test);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+/*
+ * fixup_free is called when:
+ * - an active object is freed
+ */
+static int __init fixup_free(void *addr, enum debug_obj_state state)
+{
+       struct self_test *obj = addr;
+
+       switch (state) {
+       case ODEBUG_STATE_ACTIVE:
+               debug_object_deactivate(obj, &descr_type_test);
+               debug_object_free(obj, &descr_type_test);
+               return 1;
+       default:
+               return 0;
+       }
+}
+
+static int
+check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
+{
+       struct debug_bucket *db;
+       struct debug_obj *obj;
+       unsigned long flags;
+       int res = -EINVAL;
+
+       db = get_bucket((unsigned long) addr);
+
+       spin_lock_irqsave(&db->lock, flags);
+
+       obj = lookup_object(addr, db);
+       if (!obj && state != ODEBUG_STATE_NONE) {
+               printk(KERN_ERR "ODEBUG: selftest object not found\n");
+               WARN_ON(1);
+               goto out;
+       }
+       if (obj && obj->state != state) {
+               printk(KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
+                      obj->state, state);
+               WARN_ON(1);
+               goto out;
+       }
+       if (fixups != debug_objects_fixups) {
+               printk(KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
+                      fixups, debug_objects_fixups);
+               WARN_ON(1);
+               goto out;
+       }
+       if (warnings != debug_objects_warnings) {
+               printk(KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
+                      warnings, debug_objects_warnings);
+               WARN_ON(1);
+               goto out;
+       }
+       res = 0;
+out:
+       spin_unlock_irqrestore(&db->lock, flags);
+       if (res)
+               debug_objects_enabled = 0;
+       return res;
+}
+
+static __initdata struct debug_obj_descr descr_type_test = {
+       .name                   = "selftest",
+       .fixup_init             = fixup_init,
+       .fixup_activate         = fixup_activate,
+       .fixup_destroy          = fixup_destroy,
+       .fixup_free             = fixup_free,
+};
+
+static __initdata struct self_test obj = { .static_init = 0 };
+
+static void __init debug_objects_selftest(void)
+{
+       int fixups, oldfixups, warnings, oldwarnings;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       fixups = oldfixups = debug_objects_fixups;
+       warnings = oldwarnings = debug_objects_warnings;
+       descr_test = &descr_type_test;
+
+       debug_object_init(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+               goto out;
+       debug_object_activate(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+               goto out;
+       debug_object_activate(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
+               goto out;
+       debug_object_deactivate(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
+               goto out;
+       debug_object_destroy(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
+               goto out;
+       debug_object_init(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+               goto out;
+       debug_object_activate(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+               goto out;
+       debug_object_deactivate(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
+               goto out;
+       debug_object_free(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+               goto out;
+
+       obj.static_init = 1;
+       debug_object_activate(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
+               goto out;
+       debug_object_init(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
+               goto out;
+       debug_object_free(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
+               goto out;
+
+#ifdef CONFIG_DEBUG_OBJECTS_FREE
+       debug_object_init(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
+               goto out;
+       debug_object_activate(&obj, &descr_type_test);
+       if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
+               goto out;
+       __debug_check_no_obj_freed(&obj, sizeof(obj));
+       if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
+               goto out;
+#endif
+       printk(KERN_INFO "ODEBUG: selftest passed\n");
+
+out:
+       debug_objects_fixups = oldfixups;
+       debug_objects_warnings = oldwarnings;
+       descr_test = NULL;
+
+       local_irq_restore(flags);
+}
+#else
+static inline void debug_objects_selftest(void) { }
+#endif
+
+/*
+ * Called during early boot to initialize the hash buckets and link
+ * the static object pool objects into the poll list. After this call
+ * the object tracker is fully operational.
+ */
+void __init debug_objects_early_init(void)
+{
+       int i;
+
+       for (i = 0; i < ODEBUG_HASH_SIZE; i++)
+               spin_lock_init(&obj_hash[i].lock);
+
+       for (i = 0; i < ODEBUG_POOL_SIZE; i++)
+               hlist_add_head(&obj_static_pool[i].node, &obj_pool);
+}
+
+/*
+ * Called after the kmem_caches are functional to setup a dedicated
+ * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
+ * prevents that the debug code is called on kmem_cache_free() for the
+ * debug tracker objects to avoid recursive calls.
+ */
+void __init debug_objects_mem_init(void)
+{
+       if (!debug_objects_enabled)
+               return;
+
+       obj_cache = kmem_cache_create("debug_objects_cache",
+                                     sizeof (struct debug_obj), 0,
+                                     SLAB_DEBUG_OBJECTS, NULL);
+
+       if (!obj_cache)
+               debug_objects_enabled = 0;
+       else
+               debug_objects_selftest();
+}
index edc27a5d1b735aba4855d33e9756ab375d9569ce..26c87c49d776cf8365e9ff7ba2a7319e481f716f 100644 (file)
@@ -20,7 +20,7 @@ static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
  *
  * Managed ioremap().  Map is automatically unmapped on driver detach.
  */
-void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
                           unsigned long size)
 {
        void __iomem **ptr, *addr;
@@ -49,7 +49,7 @@ EXPORT_SYMBOL(devm_ioremap);
  * Managed ioremap_nocache().  Map is automatically unmapped on driver
  * detach.
  */
-void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
                                   unsigned long size)
 {
        void __iomem **ptr, *addr;
index b71cf93c529adaa3364afa4a1b997c54cc9ef363..bb5bd0c0f030a9cf44c57ef07b50fac3e7170ce8 100644 (file)
@@ -16,9 +16,8 @@
  * assembly versions such as arch/ppc/lib/div64.S and arch/sh/lib/div64.S.
  */
 
-#include <linux/types.h>
 #include <linux/module.h>
-#include <asm/div64.h>
+#include <linux/math64.h>
 
 /* Not needed on 64bit architectures */
 #if BITS_PER_LONG == 32
@@ -58,10 +57,31 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
 
 EXPORT_SYMBOL(__div64_32);
 
+#ifndef div_s64_rem
+s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
+{
+       u64 quotient;
+
+       if (dividend < 0) {
+               quotient = div_u64_rem(-dividend, abs(divisor), (u32 *)remainder);
+               *remainder = -*remainder;
+               if (divisor > 0)
+                       quotient = -quotient;
+       } else {
+               quotient = div_u64_rem(dividend, abs(divisor), (u32 *)remainder);
+               if (divisor < 0)
+                       quotient = -quotient;
+       }
+       return quotient;
+}
+EXPORT_SYMBOL(div_s64_rem);
+#endif
+
 /* 64bit divisor, dividend and result. dynamic precision */
-uint64_t div64_64(uint64_t dividend, uint64_t divisor)
+#ifndef div64_u64
+u64 div64_u64(u64 dividend, u64 divisor)
 {
-       uint32_t high, d;
+       u32 high, d;
 
        high = divisor >> 32;
        if (high) {
@@ -72,10 +92,9 @@ uint64_t div64_64(uint64_t dividend, uint64_t divisor)
        } else
                d = divisor;
 
-       do_div(dividend, d);
-
-       return dividend;
+       return div_u64(dividend, d);
 }
-EXPORT_SYMBOL(div64_64);
+EXPORT_SYMBOL(div64_u64);
+#endif
 
 #endif /* BITS_PER_LONG == 32 */
index 78ccd73a884188f9e9348df1e4ad6e0e527e24c0..24c59ded47a05d5617e2fe2501077f990de1ec4b 100644 (file)
 
 #define BITOP_WORD(nr)         ((nr) / BITS_PER_LONG)
 
-/**
- * find_next_bit - find the next set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bitnumber to start searching at
- * @size: The maximum size to search
+#ifdef CONFIG_GENERIC_FIND_NEXT_BIT
+/*
+ * Find the next set bit in a memory region.
  */
 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
-               unsigned long offset)
+                           unsigned long offset)
 {
        const unsigned long *p = addr + BITOP_WORD(offset);
        unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -60,7 +58,6 @@ found_first:
 found_middle:
        return result + __ffs(tmp);
 }
-
 EXPORT_SYMBOL(find_next_bit);
 
 /*
@@ -68,7 +65,7 @@ EXPORT_SYMBOL(find_next_bit);
  * Linus' asm-alpha/bitops.h.
  */
 unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
-               unsigned long offset)
+                                unsigned long offset)
 {
        const unsigned long *p = addr + BITOP_WORD(offset);
        unsigned long result = offset & ~(BITS_PER_LONG-1);
@@ -105,8 +102,62 @@ found_first:
 found_middle:
        return result + ffz(tmp);
 }
-
 EXPORT_SYMBOL(find_next_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_NEXT_BIT */
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+       const unsigned long *p = addr;
+       unsigned long result = 0;
+       unsigned long tmp;
+
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+
+       tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found:
+       return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(find_first_bit);
+
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size)
+{
+       const unsigned long *p = addr;
+       unsigned long result = 0;
+       unsigned long tmp;
+
+       while (size & ~(BITS_PER_LONG-1)) {
+               if (~(tmp = *(p++)))
+                       goto found;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+
+       tmp = (*p) | (~0UL << size);
+       if (tmp == ~0UL)        /* Are any bits zero? */
+               return result + size;   /* Nope. */
+found:
+       return result + ffz(tmp);
+}
+EXPORT_SYMBOL(find_first_zero_bit);
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
 
 #ifdef __BIG_ENDIAN
 
index afbb0b1023d46cd5a3619175730c7ced7bc1e419..7a02e173f02773c2bc5fc3bfa763e269ef1b9f7a 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -385,8 +385,8 @@ void idr_remove(struct idr *idp, int id)
        while (idp->id_free_cnt >= IDR_FREE_MAX) {
                p = alloc_layer(idp);
                kmem_cache_free(idr_layer_cache, p);
-               return;
        }
+       return;
 }
 EXPORT_SYMBOL(idr_remove);
 
@@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer)
        memset(idr_layer, 0, sizeof(struct idr_layer));
 }
 
-static  int init_id_cache(void)
+void __init idr_init_cache(void)
 {
-       if (!idr_layer_cache)
-               idr_layer_cache = kmem_cache_create("idr_layer_cache",
-                       sizeof(struct idr_layer), 0, 0, idr_cache_ctor);
-       return 0;
+       idr_layer_cache = kmem_cache_create("idr_layer_cache",
+                               sizeof(struct idr_layer), 0, SLAB_PANIC,
+                               idr_cache_ctor);
 }
 
 /**
@@ -602,7 +601,6 @@ static  int init_id_cache(void)
  */
 void idr_init(struct idr *idp)
 {
-       init_id_cache();
        memset(idp, 0, sizeof(struct idr));
        spin_lock_init(&idp->lock);
 }
index 845f91d3ac1226ad6bee1e87e1fc74169ccd36cf..9762294be06257b6d6e6873dbd6b0503f21029c1 100644 (file)
@@ -811,6 +811,9 @@ DEBG("<dyn");
   ll = malloc(sizeof(*ll) * (286+30));  /* literal/length and distance code lengths */
 #endif
 
+  if (ll == NULL)
+    return 1;
+
   /* make local bit buffer */
   b = bb;
   k = bk;
index dd6ca48fe6b0675267e9591267c2af876f3454c8..37a3ea4cac9f1c0a35c3e73430a5b28814f28d82 100644 (file)
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap);
 void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
 {
        resource_size_t start = pci_resource_start(dev, bar);
-       unsigned long len = pci_resource_len(dev, bar);
+       resource_size_t len = pci_resource_len(dev, bar);
        unsigned long flags = pci_resource_flags(dev, bar);
 
        if (!len || !start)
index 120bd175aa7855044fdb5a440b6fc189b21344b3..cca37f96faa22b5cbe73de502da86cd94f61997f 100644 (file)
@@ -1,38 +1,37 @@
 /*
- *     klist.c - Routines for manipulating klists.
+ * klist.c - Routines for manipulating klists.
  *
+ * Copyright (C) 2005 Patrick Mochel
  *
- *     This klist interface provides a couple of structures that wrap around 
- *     struct list_head to provide explicit list "head" (struct klist) and 
- *     list "node" (struct klist_node) objects. For struct klist, a spinlock
- *     is included that protects access to the actual list itself. struct 
- *     klist_node provides a pointer to the klist that owns it and a kref
- *     reference count that indicates the number of current users of that node
- *     in the list.
+ * This file is released under the GPL v2.
  *
- *     The entire point is to provide an interface for iterating over a list
- *     that is safe and allows for modification of the list during the
- *     iteration (e.g. insertion and removal), including modification of the
- *     current node on the list.
+ * This klist interface provides a couple of structures that wrap around
+ * struct list_head to provide explicit list "head" (struct klist) and list
+ * "node" (struct klist_node) objects. For struct klist, a spinlock is
+ * included that protects access to the actual list itself. struct
+ * klist_node provides a pointer to the klist that owns it and a kref
+ * reference count that indicates the number of current users of that node
+ * in the list.
  *
- *     It works using a 3rd object type - struct klist_iter - that is declared
- *     and initialized before an iteration. klist_next() is used to acquire the
- *     next element in the list. It returns NULL if there are no more items.
- *     Internally, that routine takes the klist's lock, decrements the reference
- *     count of the previous klist_node and increments the count of the next
- *     klist_node. It then drops the lock and returns.
+ * The entire point is to provide an interface for iterating over a list
+ * that is safe and allows for modification of the list during the
+ * iteration (e.g. insertion and removal), including modification of the
+ * current node on the list.
  *
- *     There are primitives for adding and removing nodes to/from a klist. 
- *     When deleting, klist_del() will simply decrement the reference count. 
- *     Only when the count goes to 0 is the node removed from the list. 
- *     klist_remove() will try to delete the node from the list and block
- *     until it is actually removed. This is useful for objects (like devices)
- *     that have been removed from the system and must be freed (but must wait
- *     until all accessors have finished).
+ * It works using a 3rd object type - struct klist_iter - that is declared
+ * and initialized before an iteration. klist_next() is used to acquire the
+ * next element in the list. It returns NULL if there are no more items.
+ * Internally, that routine takes the klist's lock, decrements the
+ * reference count of the previous klist_node and increments the count of
+ * the next klist_node. It then drops the lock and returns.
  *
- *     Copyright (C) 2005 Patrick Mochel
- *
- *     This file is released under the GPL v2.
+ * There are primitives for adding and removing nodes to/from a klist.
+ * When deleting, klist_del() will simply decrement the reference count.
+ * Only when the count goes to 0 is the node removed from the list.
+ * klist_remove() will try to delete the node from the list and block until
+ * it is actually removed. This is useful for objects (like devices) that
+ * have been removed from the system and must be freed (but must wait until
+ * all accessors have finished).
  */
 
 #include <linux/klist.h>
 
 
 /**
- *     klist_init - Initialize a klist structure. 
- *     @k:     The klist we're initializing.
- *     @get:   The get function for the embedding object (NULL if none)
- *     @put:   The put function for the embedding object (NULL if none)
+ * klist_init - Initialize a klist structure.
+ * @k: The klist we're initializing.
+ * @get: The get function for the embedding object (NULL if none)
+ * @put: The put function for the embedding object (NULL if none)
  *
  * Initialises the klist structure.  If the klist_node structures are
  * going to be embedded in refcounted objects (necessary for safe
@@ -51,8 +50,7 @@
  * functions that take and release references on the embedding
  * objects.
  */
-
-void klist_init(struct klist * k, void (*get)(struct klist_node *),
+void klist_init(struct klist *k, void (*get)(struct klist_node *),
                void (*put)(struct klist_node *))
 {
        INIT_LIST_HEAD(&k->k_list);
@@ -60,26 +58,23 @@ void klist_init(struct klist * k, void (*get)(struct klist_node *),
        k->get = get;
        k->put = put;
 }
-
 EXPORT_SYMBOL_GPL(klist_init);
 
-
-static void add_head(struct klist * k, struct klist_node * n)
+static void add_head(struct klist *k, struct klist_node *n)
 {
        spin_lock(&k->k_lock);
        list_add(&n->n_node, &k->k_list);
        spin_unlock(&k->k_lock);
 }
 
-static void add_tail(struct klist * k, struct klist_node * n)
+static void add_tail(struct klist *k, struct klist_node *n)
 {
        spin_lock(&k->k_lock);
        list_add_tail(&n->n_node, &k->k_list);
        spin_unlock(&k->k_lock);
 }
 
-
-static void klist_node_init(struct klist * k, struct klist_node * n)
+static void klist_node_init(struct klist *k, struct klist_node *n)
 {
        INIT_LIST_HEAD(&n->n_node);
        init_completion(&n->n_removed);
@@ -89,60 +84,83 @@ static void klist_node_init(struct klist * k, struct klist_node * n)
                k->get(n);
 }
 
-
 /**
- *     klist_add_head - Initialize a klist_node and add it to front.
- *     @n:     node we're adding.
- *     @k:     klist it's going on.
+ * klist_add_head - Initialize a klist_node and add it to front.
+ * @n: node we're adding.
+ * @k: klist it's going on.
  */
-
-void klist_add_head(struct klist_node * n, struct klist * k)
+void klist_add_head(struct klist_node *n, struct klist *k)
 {
        klist_node_init(k, n);
        add_head(k, n);
 }
-
 EXPORT_SYMBOL_GPL(klist_add_head);
 
-
 /**
- *     klist_add_tail - Initialize a klist_node and add it to back.
- *     @n:     node we're adding.
- *     @k:     klist it's going on.
+ * klist_add_tail - Initialize a klist_node and add it to back.
+ * @n: node we're adding.
+ * @k: klist it's going on.
  */
-
-void klist_add_tail(struct klist_node * n, struct klist * k)
+void klist_add_tail(struct klist_node *n, struct klist *k)
 {
        klist_node_init(k, n);
        add_tail(k, n);
 }
-
 EXPORT_SYMBOL_GPL(klist_add_tail);
 
+/**
+ * klist_add_after - Init a klist_node and add it after an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_after(struct klist_node *n, struct klist_node *pos)
+{
+       struct klist *k = pos->n_klist;
+
+       klist_node_init(k, n);
+       spin_lock(&k->k_lock);
+       list_add(&n->n_node, &pos->n_node);
+       spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_after);
+
+/**
+ * klist_add_before - Init a klist_node and add it before an existing node
+ * @n: node we're adding.
+ * @pos: node to put @n after
+ */
+void klist_add_before(struct klist_node *n, struct klist_node *pos)
+{
+       struct klist *k = pos->n_klist;
+
+       klist_node_init(k, n);
+       spin_lock(&k->k_lock);
+       list_add_tail(&n->n_node, &pos->n_node);
+       spin_unlock(&k->k_lock);
+}
+EXPORT_SYMBOL_GPL(klist_add_before);
 
-static void klist_release(struct kref * kref)
+static void klist_release(struct kref *kref)
 {
-       struct klist_node * n = container_of(kref, struct klist_node, n_ref);
+       struct klist_node *n = container_of(kref, struct klist_node, n_ref);
 
        list_del(&n->n_node);
        complete(&n->n_removed);
        n->n_klist = NULL;
 }
 
-static int klist_dec_and_del(struct klist_node * n)
+static int klist_dec_and_del(struct klist_node *n)
 {
        return kref_put(&n->n_ref, klist_release);
 }
 
-
 /**
- *     klist_del - Decrement the reference count of node and try to remove.
- *     @n:     node we're deleting.
+ * klist_del - Decrement the reference count of node and try to remove.
+ * @n: node we're deleting.
  */
-
-void klist_del(struct klist_node * n)
+void klist_del(struct klist_node *n)
 {
-       struct klist * k = n->n_klist;
+       struct klist *k = n->n_klist;
        void (*put)(struct klist_node *) = k->put;
 
        spin_lock(&k->k_lock);
@@ -152,48 +170,40 @@ void klist_del(struct klist_node * n)
        if (put)
                put(n);
 }
-
 EXPORT_SYMBOL_GPL(klist_del);
 
-
 /**
- *     klist_remove - Decrement the refcount of node and wait for it to go away.
- *     @n:     node we're removing.
+ * klist_remove - Decrement the refcount of node and wait for it to go away.
+ * @n: node we're removing.
  */
-
-void klist_remove(struct klist_node * n)
+void klist_remove(struct klist_node *n)
 {
        klist_del(n);
        wait_for_completion(&n->n_removed);
 }
-
 EXPORT_SYMBOL_GPL(klist_remove);
 
-
 /**
- *     klist_node_attached - Say whether a node is bound to a list or not.
- *     @n:     Node that we're testing.
+ * klist_node_attached - Say whether a node is bound to a list or not.
+ * @n: Node that we're testing.
  */
-
-int klist_node_attached(struct klist_node * n)
+int klist_node_attached(struct klist_node *n)
 {
        return (n->n_klist != NULL);
 }
-
 EXPORT_SYMBOL_GPL(klist_node_attached);
 
-
 /**
- *     klist_iter_init_node - Initialize a klist_iter structure.
- *     @k:     klist we're iterating.
- *     @i:     klist_iter we're filling.
- *     @n:     node to start with.
+ * klist_iter_init_node - Initialize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter we're filling.
+ * @n: node to start with.
  *
- *     Similar to klist_iter_init(), but starts the action off with @n, 
- *     instead of with the list head.
+ * Similar to klist_iter_init(), but starts the action off with @n,
+ * instead of with the list head.
  */
-
-void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_node * n)
+void klist_iter_init_node(struct klist *k, struct klist_iter *i,
+                         struct klist_node *n)
 {
        i->i_klist = k;
        i->i_head = &k->k_list;
@@ -201,66 +211,56 @@ void klist_iter_init_node(struct klist * k, struct klist_iter * i, struct klist_
        if (n)
                kref_get(&n->n_ref);
 }
-
 EXPORT_SYMBOL_GPL(klist_iter_init_node);
 
-
 /**
- *     klist_iter_init - Iniitalize a klist_iter structure.
- *     @k:     klist we're iterating.
- *     @i:     klist_iter structure we're filling.
+ * klist_iter_init - Iniitalize a klist_iter structure.
+ * @k: klist we're iterating.
+ * @i: klist_iter structure we're filling.
  *
- *     Similar to klist_iter_init_node(), but start with the list head.
+ * Similar to klist_iter_init_node(), but start with the list head.
  */
-
-void klist_iter_init(struct klist * k, struct klist_iter * i)
+void klist_iter_init(struct klist *k, struct klist_iter *i)
 {
        klist_iter_init_node(k, i, NULL);
 }
-
 EXPORT_SYMBOL_GPL(klist_iter_init);
 
-
 /**
- *     klist_iter_exit - Finish a list iteration.
- *     @i:     Iterator structure.
+ * klist_iter_exit - Finish a list iteration.
+ * @i: Iterator structure.
  *
- *     Must be called when done iterating over list, as it decrements the 
- *     refcount of the current node. Necessary in case iteration exited before
- *     the end of the list was reached, and always good form.
+ * Must be called when done iterating over list, as it decrements the
+ * refcount of the current node. Necessary in case iteration exited before
+ * the end of the list was reached, and always good form.
  */
-
-void klist_iter_exit(struct klist_iter * i)
+void klist_iter_exit(struct klist_iter *i)
 {
        if (i->i_cur) {
                klist_del(i->i_cur);
                i->i_cur = NULL;
        }
 }
-
 EXPORT_SYMBOL_GPL(klist_iter_exit);
 
-
-static struct klist_node * to_klist_node(struct list_head * n)
+static struct klist_node *to_klist_node(struct list_head *n)
 {
        return container_of(n, struct klist_node, n_node);
 }
 
-
 /**
- *     klist_next - Ante up next node in list.
- *     @i:     Iterator structure.
+ * klist_next - Ante up next node in list.
+ * @i: Iterator structure.
  *
- *     First grab list lock. Decrement the reference count of the previous
- *     node, if there was one. Grab the next node, increment its reference 
- *     count, drop the lock, and return that next node.
+ * First grab list lock. Decrement the reference count of the previous
+ * node, if there was one. Grab the next node, increment its reference
+ * count, drop the lock, and return that next node.
  */
-
-struct klist_node * klist_next(struct klist_iter * i)
+struct klist_node *klist_next(struct klist_iter *i)
 {
-       struct list_head * next;
-       struct klist_node * lnode = i->i_cur;
-       struct klist_node * knode = NULL;
+       struct list_head *next;
+       struct klist_node *lnode = i->i_cur;
+       struct klist_node *knode = NULL;
        void (*put)(struct klist_node *) = i->i_klist->put;
 
        spin_lock(&i->i_klist->k_lock);
@@ -281,7 +281,4 @@ struct klist_node * klist_next(struct klist_iter * i)
                put(lnode);
        return knode;
 }
-
 EXPORT_SYMBOL_GPL(klist_next);
-
-
index 2c6490370922ccddf939db20e6bf0b71fcbf9526..718e5101c263596224f7f35b523cea27e093ecd6 100644 (file)
@@ -90,7 +90,7 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
        }
 
        pr_debug("kobject: '%s' (%p): %s: path = '%s'\n", kobject_name(kobj),
-                kobj, __FUNCTION__, path);
+                kobj, __func__, path);
 }
 
 /**
@@ -181,7 +181,7 @@ static int kobject_add_internal(struct kobject *kobj)
        }
 
        pr_debug("kobject: '%s' (%p): %s: parent: '%s', set: '%s'\n",
-                kobject_name(kobj), kobj, __FUNCTION__,
+                kobject_name(kobj), kobj, __func__,
                 parent ? kobject_name(parent) : "<NULL>",
                 kobj->kset ? kobject_name(&kobj->kset->kobj) : "<NULL>");
 
@@ -196,10 +196,10 @@ static int kobject_add_internal(struct kobject *kobj)
                        printk(KERN_ERR "%s failed for %s with "
                               "-EEXIST, don't try to register things with "
                               "the same name in the same directory.\n",
-                              __FUNCTION__, kobject_name(kobj));
+                              __func__, kobject_name(kobj));
                else
                        printk(KERN_ERR "%s failed for %s (%d)\n",
-                              __FUNCTION__, kobject_name(kobj), error);
+                              __func__, kobject_name(kobj), error);
                dump_stack();
        } else
                kobj->state_in_sysfs = 1;
@@ -216,21 +216,12 @@ static int kobject_add_internal(struct kobject *kobj)
 static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
                                  va_list vargs)
 {
-       va_list aq;
-       char *name;
-
-       va_copy(aq, vargs);
-       name = kvasprintf(GFP_KERNEL, fmt, vargs);
-       va_end(aq);
-
-       if (!name)
-               return -ENOMEM;
-
        /* Free the old name, if necessary. */
        kfree(kobj->name);
 
-       /* Now, set the new name */
-       kobj->name = name;
+       kobj->name = kvasprintf(GFP_KERNEL, fmt, vargs);
+       if (!kobj->name)
+               return -ENOMEM;
 
        return 0;
 }
@@ -246,12 +237,12 @@ static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt,
  */
 int kobject_set_name(struct kobject *kobj, const char *fmt, ...)
 {
-       va_list args;
+       va_list vargs;
        int retval;
 
-       va_start(args, fmt);
-       retval = kobject_set_name_vargs(kobj, fmt, args);
-       va_end(args);
+       va_start(vargs, fmt);
+       retval = kobject_set_name_vargs(kobj, fmt, vargs);
+       va_end(vargs);
 
        return retval;
 }
@@ -301,12 +292,9 @@ EXPORT_SYMBOL(kobject_init);
 static int kobject_add_varg(struct kobject *kobj, struct kobject *parent,
                            const char *fmt, va_list vargs)
 {
-       va_list aq;
        int retval;
 
-       va_copy(aq, vargs);
-       retval = kobject_set_name_vargs(kobj, fmt, aq);
-       va_end(aq);
+       retval = kobject_set_name_vargs(kobj, fmt, vargs);
        if (retval) {
                printk(KERN_ERR "kobject: can not set name properly!\n");
                return retval;
@@ -540,7 +528,7 @@ static void kobject_cleanup(struct kobject *kobj)
        const char *name = kobj->name;
 
        pr_debug("kobject: '%s' (%p): %s\n",
-                kobject_name(kobj), kobj, __FUNCTION__);
+                kobject_name(kobj), kobj, __func__);
 
        if (t && !t->release)
                pr_debug("kobject: '%s' (%p): does not have a release() "
@@ -600,7 +588,7 @@ void kobject_put(struct kobject *kobj)
 
 static void dynamic_kobj_release(struct kobject *kobj)
 {
-       pr_debug("kobject: (%p): %s\n", kobj, __FUNCTION__);
+       pr_debug("kobject: (%p): %s\n", kobj, __func__);
        kfree(kobj);
 }
 
@@ -657,7 +645,7 @@ struct kobject *kobject_create_and_add(const char *name, struct kobject *parent)
        retval = kobject_add(kobj, parent, "%s", name);
        if (retval) {
                printk(KERN_WARNING "%s: kobject_add error: %d\n",
-                      __FUNCTION__, retval);
+                      __func__, retval);
                kobject_put(kobj);
                kobj = NULL;
        }
@@ -765,7 +753,7 @@ static void kset_release(struct kobject *kobj)
 {
        struct kset *kset = container_of(kobj, struct kset, kobj);
        pr_debug("kobject: '%s' (%p): %s\n",
-                kobject_name(kobj), kobj, __FUNCTION__);
+                kobject_name(kobj), kobj, __func__);
        kfree(kset);
 }
 
index 9fb6b86cf6b15970ede185e8233a975dd4049d8a..2fa545a631607dc1d0498a7c2bff592ad7d115bb 100644 (file)
@@ -101,7 +101,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
        int retval = 0;
 
        pr_debug("kobject: '%s' (%p): %s\n",
-                kobject_name(kobj), kobj, __FUNCTION__);
+                kobject_name(kobj), kobj, __func__);
 
        /* search the kset we belong to */
        top_kobj = kobj;
@@ -111,7 +111,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
        if (!top_kobj->kset) {
                pr_debug("kobject: '%s' (%p): %s: attempted to send uevent "
                         "without kset!\n", kobject_name(kobj), kobj,
-                        __FUNCTION__);
+                        __func__);
                return -EINVAL;
        }
 
@@ -123,7 +123,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
                if (!uevent_ops->filter(kset, kobj)) {
                        pr_debug("kobject: '%s' (%p): %s: filter function "
                                 "caused the event to drop!\n",
-                                kobject_name(kobj), kobj, __FUNCTION__);
+                                kobject_name(kobj), kobj, __func__);
                        return 0;
                }
 
@@ -135,7 +135,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
        if (!subsystem) {
                pr_debug("kobject: '%s' (%p): %s: unset subsystem caused the "
                         "event to drop!\n", kobject_name(kobj), kobj,
-                        __FUNCTION__);
+                        __func__);
                return 0;
        }
 
@@ -177,7 +177,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
                if (retval) {
                        pr_debug("kobject: '%s' (%p): %s: uevent() returned "
                                 "%d\n", kobject_name(kobj), kobj,
-                                __FUNCTION__, retval);
+                                __func__, retval);
                        goto exit;
                }
        }
index 207147ab25e46a90cfa0d17bd703f352c56e2526..83287d3869a3d8e52129982f85af166c5b1246fc 100644 (file)
--- a/lib/lmb.c
+++ b/lib/lmb.c
@@ -46,14 +46,13 @@ void lmb_dump_all(void)
 #endif /* DEBUG */
 }
 
-static unsigned long __init lmb_addrs_overlap(u64 base1, u64 size1,
-               u64 base2, u64 size2)
+static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2,
+                                       u64 size2)
 {
        return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
 }
 
-static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
-               u64 base2, u64 size2)
+static long lmb_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2)
 {
        if (base2 == base1 + size1)
                return 1;
@@ -63,7 +62,7 @@ static long __init lmb_addrs_adjacent(u64 base1, u64 size1,
        return 0;
 }
 
-static long __init lmb_regions_adjacent(struct lmb_region *rgn,
+static long lmb_regions_adjacent(struct lmb_region *rgn,
                unsigned long r1, unsigned long r2)
 {
        u64 base1 = rgn->region[r1].base;
@@ -74,7 +73,7 @@ static long __init lmb_regions_adjacent(struct lmb_region *rgn,
        return lmb_addrs_adjacent(base1, size1, base2, size2);
 }
 
-static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
+static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
 {
        unsigned long i;
 
@@ -86,7 +85,7 @@ static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r)
 }
 
 /* Assumption: base addr of region 1 < base addr of region 2 */
-static void __init lmb_coalesce_regions(struct lmb_region *rgn,
+static void lmb_coalesce_regions(struct lmb_region *rgn,
                unsigned long r1, unsigned long r2)
 {
        rgn->region[r1].size += rgn->region[r2].size;
@@ -118,7 +117,7 @@ void __init lmb_analyze(void)
                lmb.memory.size += lmb.memory.region[i].size;
 }
 
-static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
+static long lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
 {
        unsigned long coalesced = 0;
        long adjacent, i;
@@ -182,7 +181,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size)
        return 0;
 }
 
-long __init lmb_add(u64 base, u64 size)
+long lmb_add(u64 base, u64 size)
 {
        struct lmb_region *_rgn = &lmb.memory;
 
@@ -194,6 +193,55 @@ long __init lmb_add(u64 base, u64 size)
 
 }
 
+long lmb_remove(u64 base, u64 size)
+{
+       struct lmb_region *rgn = &(lmb.memory);
+       u64 rgnbegin, rgnend;
+       u64 end = base + size;
+       int i;
+
+       rgnbegin = rgnend = 0; /* supress gcc warnings */
+
+       /* Find the region where (base, size) belongs to */
+       for (i=0; i < rgn->cnt; i++) {
+               rgnbegin = rgn->region[i].base;
+               rgnend = rgnbegin + rgn->region[i].size;
+
+               if ((rgnbegin <= base) && (end <= rgnend))
+                       break;
+       }
+
+       /* Didn't find the region */
+       if (i == rgn->cnt)
+               return -1;
+
+       /* Check to see if we are removing entire region */
+       if ((rgnbegin == base) && (rgnend == end)) {
+               lmb_remove_region(rgn, i);
+               return 0;
+       }
+
+       /* Check to see if region is matching at the front */
+       if (rgnbegin == base) {
+               rgn->region[i].base = end;
+               rgn->region[i].size -= size;
+               return 0;
+       }
+
+       /* Check to see if the region is matching at the end */
+       if (rgnend == end) {
+               rgn->region[i].size -= size;
+               return 0;
+       }
+
+       /*
+        * We need to split the entry -  adjust the current one to the
+        * beginging of the hole and add the region after hole.
+        */
+       rgn->region[i].size = base - rgn->region[i].base;
+       return lmb_add_region(rgn, end, rgnend - end);
+}
+
 long __init lmb_reserve(u64 base, u64 size)
 {
        struct lmb_region *_rgn = &lmb.reserved;
@@ -426,3 +474,36 @@ int __init lmb_is_reserved(u64 addr)
        }
        return 0;
 }
+
+/*
+ * Given a <base, len>, find which memory regions belong to this range.
+ * Adjust the request and return a contiguous chunk.
+ */
+int lmb_find(struct lmb_property *res)
+{
+       int i;
+       u64 rstart, rend;
+
+       rstart = res->base;
+       rend = rstart + res->size - 1;
+
+       for (i = 0; i < lmb.memory.cnt; i++) {
+               u64 start = lmb.memory.region[i].base;
+               u64 end = start + lmb.memory.region[i].size - 1;
+
+               if (start > rend)
+                       return -1;
+
+               if ((end >= rstart) && (start < rend)) {
+                       /* adjust the request */
+                       if (rstart < start)
+                               rstart = start;
+                       if (rend > end)
+                               rend = end;
+                       res->base = rstart;
+                       res->size = rend - rstart + 1;
+                       return 0;
+               }
+       }
+       return -1;
+}
index 393a0e915c23b252b747c62645beb463e25843ed..119174494cb5c096eaf5b1da239dbd5a4040ebc0 100644 (file)
@@ -102,6 +102,7 @@ void percpu_counter_destroy(struct percpu_counter *fbc)
                return;
 
        free_percpu(fbc->counters);
+       fbc->counters = NULL;
 #ifdef CONFIG_HOTPLUG_CPU
        mutex_lock(&percpu_counters_lock);
        list_del(&fbc->list);
index 9508d9a7af3ef9ba482925a9d1cdc34ef2fddd0e..4f387a643d723c3853bec845ef0a90018a0e00f0 100644 (file)
 #include <linux/proportions.h>
 #include <linux/rcupdate.h>
 
-/*
- * Limit the time part in order to ensure there are some bits left for the
- * cycle counter.
- */
-#define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
-
 int prop_descriptor_init(struct prop_descriptor *pd, int shift)
 {
        int err;
@@ -267,6 +261,38 @@ void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
        prop_put_global(pd, pg);
 }
 
+/*
+ * identical to __prop_inc_percpu, except that it limits this pl's fraction to
+ * @frac/PROP_FRAC_BASE by ignoring events when this limit has been exceeded.
+ */
+void __prop_inc_percpu_max(struct prop_descriptor *pd,
+                          struct prop_local_percpu *pl, long frac)
+{
+       struct prop_global *pg = prop_get_global(pd);
+
+       prop_norm_percpu(pg, pl);
+
+       if (unlikely(frac != PROP_FRAC_BASE)) {
+               unsigned long period_2 = 1UL << (pg->shift - 1);
+               unsigned long counter_mask = period_2 - 1;
+               unsigned long global_count;
+               long numerator, denominator;
+
+               numerator = percpu_counter_read_positive(&pl->events);
+               global_count = percpu_counter_read(&pg->events);
+               denominator = period_2 + (global_count & counter_mask);
+
+               if (numerator > ((denominator * frac) >> PROP_FRAC_SHIFT))
+                       goto out_put;
+       }
+
+       percpu_counter_add(&pl->events, 1);
+       percpu_counter_add(&pg->events, 1);
+
+out_put:
+       prop_put_global(pd, pg);
+}
+
 /*
  * Obtain a fraction of this proportion
  *
index 65f0e758ec3819d57f1799e5563a9f90dc0e41eb..bd521716ab1a0498242e5dabc7021f1db7e0d91a 100644 (file)
@@ -114,8 +114,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
                }
        }
        if (ret == NULL)
-               ret = kmem_cache_alloc(radix_tree_node_cachep,
-                               set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+               ret = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
 
        BUG_ON(radix_tree_is_indirect_ptr(ret));
        return ret;
@@ -150,8 +149,7 @@ int radix_tree_preload(gfp_t gfp_mask)
        rtp = &__get_cpu_var(radix_tree_preloads);
        while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
                preempt_enable();
-               node = kmem_cache_alloc(radix_tree_node_cachep,
-                               set_migrateflags(gfp_mask, __GFP_RECLAIMABLE));
+               node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
                if (node == NULL)
                        goto out;
                preempt_disable();
@@ -1098,7 +1096,8 @@ void __init radix_tree_init(void)
 {
        radix_tree_node_cachep = kmem_cache_create("radix_tree_node",
                        sizeof(struct radix_tree_node), 0,
-                       SLAB_PANIC, radix_tree_node_ctor);
+                       SLAB_PANIC | SLAB_RECLAIM_ACCOUNT,
+                       radix_tree_node_ctor);
        radix_tree_init_maxindex();
        hotcpu_notifier(radix_tree_callback, 0);
 }
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
new file mode 100644 (file)
index 0000000..485e304
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * ratelimit.c - Do something with rate limit.
+ *
+ * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
+ *
+ * This file is released under the GPLv2.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+
+/*
+ * __ratelimit - rate limiting
+ * @ratelimit_jiffies: minimum time in jiffies between two callbacks
+ * @ratelimit_burst: number of callbacks we do before ratelimiting
+ *
+ * This enforces a rate limit: not more than @ratelimit_burst callbacks
+ * in every ratelimit_jiffies
+ */
+int __ratelimit(int ratelimit_jiffies, int ratelimit_burst)
+{
+       static DEFINE_SPINLOCK(ratelimit_lock);
+       static unsigned toks = 10 * 5 * HZ;
+       static unsigned long last_msg;
+       static int missed;
+       unsigned long flags;
+       unsigned long now = jiffies;
+
+       spin_lock_irqsave(&ratelimit_lock, flags);
+       toks += now - last_msg;
+       last_msg = now;
+       if (toks > (ratelimit_burst * ratelimit_jiffies))
+               toks = ratelimit_burst * ratelimit_jiffies;
+       if (toks >= ratelimit_jiffies) {
+               int lost = missed;
+
+               missed = 0;
+               toks -= ratelimit_jiffies;
+               spin_unlock_irqrestore(&ratelimit_lock, flags);
+               if (lost)
+                       printk(KERN_WARNING "%s: %d messages suppressed\n",
+                               __func__, lost);
+               return 1;
+       }
+       missed++;
+       spin_unlock_irqrestore(&ratelimit_lock, flags);
+       return 0;
+}
+EXPORT_SYMBOL(__ratelimit);
index 5efafed3d6b65df9e56f968e32fb012f4cd13c65..b19b87af65a3553a0650d679cabfac9fecd4eb3b 100644 (file)
@@ -493,6 +493,33 @@ char *strsep(char **s, const char *ct)
 EXPORT_SYMBOL(strsep);
 #endif
 
+/**
+ * sysfs_streq - return true if strings are equal, modulo trailing newline
+ * @s1: one string
+ * @s2: another string
+ *
+ * This routine returns true iff two strings are equal, treating both
+ * NUL and newline-then-NUL as equivalent string terminations.  It's
+ * geared for use with sysfs input strings, which generally terminate
+ * with newlines but are compared against values without newlines.
+ */
+bool sysfs_streq(const char *s1, const char *s2)
+{
+       while (*s1 && *s1 == *s2) {
+               s1++;
+               s2++;
+       }
+
+       if (*s1 == *s2)
+               return true;
+       if (!*s1 && *s2 == '\n' && !s2[1])
+               return true;
+       if (*s1 == '\n' && !s1[1] && !*s2)
+               return true;
+       return false;
+}
+EXPORT_SYMBOL(sysfs_streq);
+
 #ifndef __HAVE_ARCH_MEMSET
 /**
  * memset - Fill a region of memory with the given value
index 025922807e6e648bb7dc4eda61bfd8fa609a221c..d568894df8ccd335c4acdcb47da4a7b3c4830150 100644 (file)
@@ -31,6 +31,7 @@
 
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/iommu-helper.h>
 
 #define OFFSET(val,align) ((unsigned long)     \
                           ( (val) & ( (align) - 1)))
@@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
        return (addr & ~mask) != 0;
 }
 
-static inline unsigned int is_span_boundary(unsigned int index,
-                                           unsigned int nslots,
-                                           unsigned long offset_slots,
-                                           unsigned long max_slots)
-{
-       unsigned long offset = (offset_slots + index) & (max_slots - 1);
-       return offset + nslots > max_slots;
-}
-
 /*
  * Allocates bounce buffer and returns its kernel virtual address.
  */
@@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
         * request and allocate a buffer from that IO TLB pool.
         */
        spin_lock_irqsave(&io_tlb_lock, flags);
-       {
-               index = ALIGN(io_tlb_index, stride);
-               if (index >= io_tlb_nslabs)
-                       index = 0;
-               wrap = index;
-
-               do {
-                       while (is_span_boundary(index, nslots, offset_slots,
-                                               max_slots)) {
-                               index += stride;
-                               if (index >= io_tlb_nslabs)
-                                       index = 0;
-                               if (index == wrap)
-                                       goto not_found;
-                       }
-
-                       /*
-                        * If we find a slot that indicates we have 'nslots'
-                        * number of contiguous buffers, we allocate the
-                        * buffers from that slot and mark the entries as '0'
-                        * indicating unavailable.
-                        */
-                       if (io_tlb_list[index] >= nslots) {
-                               int count = 0;
-
-                               for (i = index; i < (int) (index + nslots); i++)
-                                       io_tlb_list[i] = 0;
-                               for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--)
-                                       io_tlb_list[i] = ++count;
-                               dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
-
-                               /*
-                                * Update the indices to avoid searching in
-                                * the next round.
-                                */
-                               io_tlb_index = ((index + nslots) < io_tlb_nslabs
-                                               ? (index + nslots) : 0);
-
-                               goto found;
-                       }
+       index = ALIGN(io_tlb_index, stride);
+       if (index >= io_tlb_nslabs)
+               index = 0;
+       wrap = index;
+
+       do {
+               while (iommu_is_span_boundary(index, nslots, offset_slots,
+                                             max_slots)) {
                        index += stride;
                        if (index >= io_tlb_nslabs)
                                index = 0;
-               } while (index != wrap);
+                       if (index == wrap)
+                               goto not_found;
+               }
 
-  not_found:
-               spin_unlock_irqrestore(&io_tlb_lock, flags);
-               return NULL;
-       }
-  found:
+               /*
+                * If we find a slot that indicates we have 'nslots' number of
+                * contiguous buffers, we allocate the buffers from that slot
+                * and mark the entries as '0' indicating unavailable.
+                */
+               if (io_tlb_list[index] >= nslots) {
+                       int count = 0;
+
+                       for (i = index; i < (int) (index + nslots); i++)
+                               io_tlb_list[i] = 0;
+                       for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--)
+                               io_tlb_list[i] = ++count;
+                       dma_addr = io_tlb_start + (index << IO_TLB_SHIFT);
+
+                       /*
+                        * Update the indices to avoid searching in the next
+                        * round.
+                        */
+                       io_tlb_index = ((index + nslots) < io_tlb_nslabs
+                                       ? (index + nslots) : 0);
+
+                       goto found;
+               }
+               index += stride;
+               if (index >= io_tlb_nslabs)
+                       index = 0;
+       } while (index != wrap);
+
+not_found:
+       spin_unlock_irqrestore(&io_tlb_lock, flags);
+       return NULL;
+found:
        spin_unlock_irqrestore(&io_tlb_lock, flags);
 
        /*
@@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
  * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
  */
 dma_addr_t
-swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size,
+                        int dir, struct dma_attrs *attrs)
 {
        dma_addr_t dev_addr = virt_to_bus(ptr);
        void *map;
@@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
 
        return dev_addr;
 }
+EXPORT_SYMBOL(swiotlb_map_single_attrs);
+
+dma_addr_t
+swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
+{
+       return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL);
+}
 
 /*
  * Unmap a single streaming mode DMA translation.  The dma_addr and size must
@@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
  * whatever the device wrote there.
  */
 void
-swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
-                    int dir)
+swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr,
+                          size_t size, int dir, struct dma_attrs *attrs)
 {
        char *dma_addr = bus_to_virt(dev_addr);
 
@@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
        else if (dir == DMA_FROM_DEVICE)
                dma_mark_clean(dma_addr, size);
 }
+EXPORT_SYMBOL(swiotlb_unmap_single_attrs);
 
+void
+swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
+                    int dir)
+{
+       return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL);
+}
 /*
  * Make physical memory consistent for a single streaming mode DMA translation
  * after a transfer.
@@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
                                  SYNC_FOR_DEVICE);
 }
 
+void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int,
+                           struct dma_attrs *);
 /*
  * Map a set of buffers described by scatterlist in streaming mode for DMA.
  * This is the scatter-gather version of the above swiotlb_map_single
@@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr,
  * same here.
  */
 int
-swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-              int dir)
+swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems,
+                    int dir, struct dma_attrs *attrs)
 {
        struct scatterlist *sg;
        void *addr;
@@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
                                /* Don't panic here, we expect map_sg users
                                   to do proper error handling. */
                                swiotlb_full(hwdev, sg->length, dir, 0);
-                               swiotlb_unmap_sg(hwdev, sgl, i, dir);
+                               swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir,
+                                                      attrs);
                                sgl[0].dma_length = 0;
                                return 0;
                        }
@@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
        }
        return nelems;
 }
+EXPORT_SYMBOL(swiotlb_map_sg_attrs);
+
+int
+swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+              int dir)
+{
+       return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
 
 /*
  * Unmap a set of streaming mode DMA translations.  Again, cpu read rules
  * concerning calls here are the same as for swiotlb_unmap_single() above.
  */
 void
-swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
-                int dir)
+swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
+                      int nelems, int dir, struct dma_attrs *attrs)
 {
        struct scatterlist *sg;
        int i;
@@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
                        dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
        }
 }
+EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
+
+void
+swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems,
+                int dir)
+{
+       return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL);
+}
 
 /*
  * Make physical memory consistent for a set of streaming mode DMA translations
index 0016ebd4dcbac755450579197d317738727816a8..3aa819d628c112cc185f9545c7ae5e3fb8a5c310 100644 (file)
@@ -143,6 +143,18 @@ config MEMORY_HOTREMOVE
        depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
        depends on MIGRATION
 
+#
+# If we have space for more page flags then we can enable additional
+# optimizations and functionality.
+#
+# Regular Sparsemem takes page flag bits for the sectionid if it does not
+# use a virtual memmap. Disable extended page flags for 32 bit platforms
+# that require the use of a sectionid in the page flags.
+#
+config PAGEFLAGS_EXTENDED
+       def_bool y
+       depends on 64BIT || SPARSEMEM_VMEMMAP || !NUMA || !SPARSEMEM
+
 # Heavily threaded applications may benefit from splitting the mm-wide
 # page_table_lock, so that faults on different parts of the user address
 # space can be handled with less contention: split it at this NR_CPUS.
index e8644b1e5527df1069f968d17d1893fa0dc3e5fa..7c4f9e09709547eca3baeac0aaffa86ac9377e04 100644 (file)
 #include <linux/fs.h>
 #include <linux/sched.h>
 #include <linux/module.h>
+#include <linux/writeback.h>
+#include <linux/device.h>
+
+
+static struct class *bdi_class;
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static struct dentry *bdi_debug_root;
+
+static void bdi_debug_init(void)
+{
+       bdi_debug_root = debugfs_create_dir("bdi", NULL);
+}
+
+static int bdi_debug_stats_show(struct seq_file *m, void *v)
+{
+       struct backing_dev_info *bdi = m->private;
+       long background_thresh;
+       long dirty_thresh;
+       long bdi_thresh;
+
+       get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
+
+#define K(x) ((x) << (PAGE_SHIFT - 10))
+       seq_printf(m,
+                  "BdiWriteback:     %8lu kB\n"
+                  "BdiReclaimable:   %8lu kB\n"
+                  "BdiDirtyThresh:   %8lu kB\n"
+                  "DirtyThresh:      %8lu kB\n"
+                  "BackgroundThresh: %8lu kB\n",
+                  (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
+                  (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
+                  K(bdi_thresh),
+                  K(dirty_thresh),
+                  K(background_thresh));
+#undef K
+
+       return 0;
+}
+
+static int bdi_debug_stats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, bdi_debug_stats_show, inode->i_private);
+}
+
+static const struct file_operations bdi_debug_stats_fops = {
+       .open           = bdi_debug_stats_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+{
+       bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
+       bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
+                                              bdi, &bdi_debug_stats_fops);
+}
+
+static void bdi_debug_unregister(struct backing_dev_info *bdi)
+{
+       debugfs_remove(bdi->debug_stats);
+       debugfs_remove(bdi->debug_dir);
+}
+#else
+static inline void bdi_debug_init(void)
+{
+}
+static inline void bdi_debug_register(struct backing_dev_info *bdi,
+                                     const char *name)
+{
+}
+static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
+{
+}
+#endif
+
+static ssize_t read_ahead_kb_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t count)
+{
+       struct backing_dev_info *bdi = dev_get_drvdata(dev);
+       char *end;
+       unsigned long read_ahead_kb;
+       ssize_t ret = -EINVAL;
+
+       read_ahead_kb = simple_strtoul(buf, &end, 10);
+       if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
+               bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
+               ret = count;
+       }
+       return ret;
+}
+
+#define K(pages) ((pages) << (PAGE_SHIFT - 10))
+
+#define BDI_SHOW(name, expr)                                           \
+static ssize_t name##_show(struct device *dev,                         \
+                          struct device_attribute *attr, char *page)   \
+{                                                                      \
+       struct backing_dev_info *bdi = dev_get_drvdata(dev);            \
+                                                                       \
+       return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr);  \
+}
+
+BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
+
+static ssize_t min_ratio_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct backing_dev_info *bdi = dev_get_drvdata(dev);
+       char *end;
+       unsigned int ratio;
+       ssize_t ret = -EINVAL;
+
+       ratio = simple_strtoul(buf, &end, 10);
+       if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
+               ret = bdi_set_min_ratio(bdi, ratio);
+               if (!ret)
+                       ret = count;
+       }
+       return ret;
+}
+BDI_SHOW(min_ratio, bdi->min_ratio)
+
+static ssize_t max_ratio_store(struct device *dev,
+               struct device_attribute *attr, const char *buf, size_t count)
+{
+       struct backing_dev_info *bdi = dev_get_drvdata(dev);
+       char *end;
+       unsigned int ratio;
+       ssize_t ret = -EINVAL;
+
+       ratio = simple_strtoul(buf, &end, 10);
+       if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
+               ret = bdi_set_max_ratio(bdi, ratio);
+               if (!ret)
+                       ret = count;
+       }
+       return ret;
+}
+BDI_SHOW(max_ratio, bdi->max_ratio)
+
+#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
+
+static struct device_attribute bdi_dev_attrs[] = {
+       __ATTR_RW(read_ahead_kb),
+       __ATTR_RW(min_ratio),
+       __ATTR_RW(max_ratio),
+       __ATTR_NULL,
+};
+
+static __init int bdi_class_init(void)
+{
+       bdi_class = class_create(THIS_MODULE, "bdi");
+       bdi_class->dev_attrs = bdi_dev_attrs;
+       bdi_debug_init();
+       return 0;
+}
+
+postcore_initcall(bdi_class_init);
+
+int bdi_register(struct backing_dev_info *bdi, struct device *parent,
+               const char *fmt, ...)
+{
+       char *name;
+       va_list args;
+       int ret = 0;
+       struct device *dev;
+
+       va_start(args, fmt);
+       name = kvasprintf(GFP_KERNEL, fmt, args);
+       va_end(args);
+
+       if (!name)
+               return -ENOMEM;
+
+       dev = device_create(bdi_class, parent, MKDEV(0, 0), name);
+       if (IS_ERR(dev)) {
+               ret = PTR_ERR(dev);
+               goto exit;
+       }
+
+       bdi->dev = dev;
+       dev_set_drvdata(bdi->dev, bdi);
+       bdi_debug_register(bdi, name);
+
+exit:
+       kfree(name);
+       return ret;
+}
+EXPORT_SYMBOL(bdi_register);
+
+int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
+{
+       return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
+}
+EXPORT_SYMBOL(bdi_register_dev);
+
+void bdi_unregister(struct backing_dev_info *bdi)
+{
+       if (bdi->dev) {
+               bdi_debug_unregister(bdi);
+               device_unregister(bdi->dev);
+               bdi->dev = NULL;
+       }
+}
+EXPORT_SYMBOL(bdi_unregister);
 
 int bdi_init(struct backing_dev_info *bdi)
 {
        int i;
        int err;
 
+       bdi->dev = NULL;
+
+       bdi->min_ratio = 0;
+       bdi->max_ratio = 100;
+       bdi->max_prop_frac = PROP_FRAC_BASE;
+
        for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
                err = percpu_counter_init_irq(&bdi->bdi_stat[i], 0);
                if (err)
@@ -33,6 +250,8 @@ void bdi_destroy(struct backing_dev_info *bdi)
 {
        int i;
 
+       bdi_unregister(bdi);
+
        for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
                percpu_counter_destroy(&bdi->bdi_stat[i]);
 
index 2ccea700968f7ecaaa265023e79e52d540a82871..e8fb927392b9ac02b4ad16878847551d103a1b43 100644 (file)
@@ -111,44 +111,74 @@ static unsigned long __init init_bootmem_core(pg_data_t *pgdat,
  * might be used for boot-time allocations - or it might get added
  * to the free page pool later on.
  */
-static int __init reserve_bootmem_core(bootmem_data_t *bdata,
+static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
                        unsigned long addr, unsigned long size, int flags)
 {
        unsigned long sidx, eidx;
        unsigned long i;
-       int ret;
+
+       BUG_ON(!size);
+
+       /* out of range, don't hold other */
+       if (addr + size < bdata->node_boot_start ||
+               PFN_DOWN(addr) > bdata->node_low_pfn)
+               return 0;
 
        /*
-        * round up, partially reserved pages are considered
-        * fully reserved.
+        * Round up to index to the range.
         */
+       if (addr > bdata->node_boot_start)
+               sidx= PFN_DOWN(addr - bdata->node_boot_start);
+       else
+               sidx = 0;
+
+       eidx = PFN_UP(addr + size - bdata->node_boot_start);
+       if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
+               eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
+
+       for (i = sidx; i < eidx; i++) {
+               if (test_bit(i, bdata->node_bootmem_map)) {
+                       if (flags & BOOTMEM_EXCLUSIVE)
+                               return -EBUSY;
+               }
+       }
+
+       return 0;
+
+}
+
+static void __init reserve_bootmem_core(bootmem_data_t *bdata,
+                       unsigned long addr, unsigned long size, int flags)
+{
+       unsigned long sidx, eidx;
+       unsigned long i;
+
        BUG_ON(!size);
-       BUG_ON(PFN_DOWN(addr) >= bdata->node_low_pfn);
-       BUG_ON(PFN_UP(addr + size) > bdata->node_low_pfn);
-       BUG_ON(addr < bdata->node_boot_start);
 
-       sidx = PFN_DOWN(addr - bdata->node_boot_start);
+       /* out of range */
+       if (addr + size < bdata->node_boot_start ||
+               PFN_DOWN(addr) > bdata->node_low_pfn)
+               return;
+
+       /*
+        * Round up to index to the range.
+        */
+       if (addr > bdata->node_boot_start)
+               sidx= PFN_DOWN(addr - bdata->node_boot_start);
+       else
+               sidx = 0;
+
        eidx = PFN_UP(addr + size - bdata->node_boot_start);
+       if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
+               eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
 
-       for (i = sidx; i < eidx; i++)
+       for (i = sidx; i < eidx; i++) {
                if (test_and_set_bit(i, bdata->node_bootmem_map)) {
 #ifdef CONFIG_DEBUG_BOOTMEM
                        printk("hm, page %08lx reserved twice.\n", i*PAGE_SIZE);
 #endif
-                       if (flags & BOOTMEM_EXCLUSIVE) {
-                               ret = -EBUSY;
-                               goto err;
-                       }
                }
-
-       return 0;
-
-err:
-       /* unreserve memory we accidentally reserved */
-       for (i--; i >= sidx; i--)
-               clear_bit(i, bdata->node_bootmem_map);
-
-       return ret;
+       }
 }
 
 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
@@ -206,9 +236,11 @@ void * __init
 __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
              unsigned long align, unsigned long goal, unsigned long limit)
 {
-       unsigned long offset, remaining_size, areasize, preferred;
+       unsigned long areasize, preferred;
        unsigned long i, start = 0, incr, eidx, end_pfn;
        void *ret;
+       unsigned long node_boot_start;
+       void *node_bootmem_map;
 
        if (!size) {
                printk("__alloc_bootmem_core(): zero-sized request\n");
@@ -216,70 +248,83 @@ __alloc_bootmem_core(struct bootmem_data *bdata, unsigned long size,
        }
        BUG_ON(align & (align-1));
 
-       if (limit && bdata->node_boot_start >= limit)
-               return NULL;
-
        /* on nodes without memory - bootmem_map is NULL */
        if (!bdata->node_bootmem_map)
                return NULL;
 
+       /* bdata->node_boot_start is supposed to be (12+6)bits alignment on x86_64 ? */
+       node_boot_start = bdata->node_boot_start;
+       node_bootmem_map = bdata->node_bootmem_map;
+       if (align) {
+               node_boot_start = ALIGN(bdata->node_boot_start, align);
+               if (node_boot_start > bdata->node_boot_start)
+                       node_bootmem_map = (unsigned long *)bdata->node_bootmem_map +
+                           PFN_DOWN(node_boot_start - bdata->node_boot_start)/BITS_PER_LONG;
+       }
+
+       if (limit && node_boot_start >= limit)
+               return NULL;
+
        end_pfn = bdata->node_low_pfn;
        limit = PFN_DOWN(limit);
        if (limit && end_pfn > limit)
                end_pfn = limit;
 
-       eidx = end_pfn - PFN_DOWN(bdata->node_boot_start);
-       offset = 0;
-       if (align && (bdata->node_boot_start & (align - 1UL)) != 0)
-               offset = align - (bdata->node_boot_start & (align - 1UL));
-       offset = PFN_DOWN(offset);
+       eidx = end_pfn - PFN_DOWN(node_boot_start);
 
        /*
         * We try to allocate bootmem pages above 'goal'
         * first, then we try to allocate lower pages.
         */
-       if (goal && goal >= bdata->node_boot_start && PFN_DOWN(goal) < end_pfn) {
-               preferred = goal - bdata->node_boot_start;
+       preferred = 0;
+       if (goal && PFN_DOWN(goal) < end_pfn) {
+               if (goal > node_boot_start)
+                       preferred = goal - node_boot_start;
 
-               if (bdata->last_success >= preferred)
+               if (bdata->last_success > node_boot_start &&
+                       bdata->last_success - node_boot_start >= preferred)
                        if (!limit || (limit && limit > bdata->last_success))
-                               preferred = bdata->last_success;
-       } else
-               preferred = 0;
+                               preferred = bdata->last_success - node_boot_start;
+       }
 
-       preferred = PFN_DOWN(ALIGN(preferred, align)) + offset;
+       preferred = PFN_DOWN(ALIGN(preferred, align));
        areasize = (size + PAGE_SIZE-1) / PAGE_SIZE;
        incr = align >> PAGE_SHIFT ? : 1;
 
 restart_scan:
-       for (i = preferred; i < eidx; i += incr) {
+       for (i = preferred; i < eidx;) {
                unsigned long j;
-               i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
+
+               i = find_next_zero_bit(node_bootmem_map, eidx, i);
                i = ALIGN(i, incr);
                if (i >= eidx)
                        break;
-               if (test_bit(i, bdata->node_bootmem_map))
+               if (test_bit(i, node_bootmem_map)) {
+                       i += incr;
                        continue;
+               }
                for (j = i + 1; j < i + areasize; ++j) {
                        if (j >= eidx)
                                goto fail_block;
-                       if (test_bit(j, bdata->node_bootmem_map))
+                       if (test_bit(j, node_bootmem_map))
                                goto fail_block;
                }
                start = i;
                goto found;
        fail_block:
                i = ALIGN(j, incr);
+               if (i == j)
+                       i += incr;
        }
 
-       if (preferred > offset) {
-               preferred = offset;
+       if (preferred > 0) {
+               preferred = 0;
                goto restart_scan;
        }
        return NULL;
 
 found:
-       bdata->last_success = PFN_PHYS(start);
+       bdata->last_success = PFN_PHYS(start) + node_boot_start;
        BUG_ON(start >= eidx);
 
        /*
@@ -289,6 +334,7 @@ found:
         */
        if (align < PAGE_SIZE &&
            bdata->last_offset && bdata->last_pos+1 == start) {
+               unsigned long offset, remaining_size;
                offset = ALIGN(bdata->last_offset, align);
                BUG_ON(offset > PAGE_SIZE);
                remaining_size = PAGE_SIZE - offset;
@@ -297,14 +343,12 @@ found:
                        /* last_pos unchanged */
                        bdata->last_offset = offset + size;
                        ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
-                                          offset +
-                                          bdata->node_boot_start);
+                                          offset + node_boot_start);
                } else {
                        remaining_size = size - remaining_size;
                        areasize = (remaining_size + PAGE_SIZE-1) / PAGE_SIZE;
                        ret = phys_to_virt(bdata->last_pos * PAGE_SIZE +
-                                          offset +
-                                          bdata->node_boot_start);
+                                          offset + node_boot_start);
                        bdata->last_pos = start + areasize - 1;
                        bdata->last_offset = remaining_size;
                }
@@ -312,14 +356,14 @@ found:
        } else {
                bdata->last_pos = start + areasize - 1;
                bdata->last_offset = size & ~PAGE_MASK;
-               ret = phys_to_virt(start * PAGE_SIZE + bdata->node_boot_start);
+               ret = phys_to_virt(start * PAGE_SIZE + node_boot_start);
        }
 
        /*
         * Reserve the area now:
         */
        for (i = start; i < start + areasize; i++)
-               if (unlikely(test_and_set_bit(i, bdata->node_bootmem_map)))
+               if (unlikely(test_and_set_bit(i, node_bootmem_map)))
                        BUG();
        memset(ret, 0, size);
        return ret;
@@ -401,6 +445,11 @@ unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
 void __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
                                 unsigned long size, int flags)
 {
+       int ret;
+
+       ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
+       if (ret < 0)
+               return;
        reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
 }
 
@@ -412,6 +461,7 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
 
 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
 {
+       register_page_bootmem_info_node(pgdat);
        return free_all_bootmem_core(pgdat);
 }
 
@@ -426,7 +476,18 @@ unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
 int __init reserve_bootmem(unsigned long addr, unsigned long size,
                            int flags)
 {
-       return reserve_bootmem_core(NODE_DATA(0)->bdata, addr, size, flags);
+       bootmem_data_t *bdata;
+       int ret;
+
+       list_for_each_entry(bdata, &bdata_list, list) {
+               ret = can_reserve_bootmem_core(bdata, addr, size, flags);
+               if (ret < 0)
+                       return ret;
+       }
+       list_for_each_entry(bdata, &bdata_list, list)
+               reserve_bootmem_core(bdata, addr, size, flags);
+
+       return 0;
 }
 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
 
@@ -484,6 +545,37 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
        return __alloc_bootmem(size, align, goal);
 }
 
+#ifdef CONFIG_SPARSEMEM
+void * __init alloc_bootmem_section(unsigned long size,
+                                   unsigned long section_nr)
+{
+       void *ptr;
+       unsigned long limit, goal, start_nr, end_nr, pfn;
+       struct pglist_data *pgdat;
+
+       pfn = section_nr_to_pfn(section_nr);
+       goal = PFN_PHYS(pfn);
+       limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
+       pgdat = NODE_DATA(early_pfn_to_nid(pfn));
+       ptr = __alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
+                                  limit);
+
+       if (!ptr)
+               return NULL;
+
+       start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
+       end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
+       if (start_nr != section_nr || end_nr != section_nr) {
+               printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
+                      section_nr);
+               free_bootmem_core(pgdat->bdata, __pa(ptr), size);
+               ptr = NULL;
+       }
+
+       return ptr;
+}
+#endif
+
 #ifndef ARCH_LOW_ADDRESS_LIMIT
 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
 #endif
index 34aaac451a96124d163a99397af386ecb4fc6a4c..b1f0885dda2275b09baa7d82205f6529efd093b1 100644 (file)
 #include <linux/types.h>
 #include <linux/wait.h>
 
+#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
+#define DMAPOOL_DEBUG 1
+#endif
+
 struct dma_pool {              /* the pool */
        struct list_head page_list;
        spinlock_t lock;
@@ -216,7 +220,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
        page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
                                         &page->dma, mem_flags);
        if (page->vaddr) {
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
                memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
                pool_initialise_page(pool, page);
@@ -239,7 +243,7 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
 {
        dma_addr_t dma = page->dma;
 
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
        memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
 #endif
        dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
@@ -336,7 +340,7 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
        page->offset = *(int *)(page->vaddr + offset);
        retval = offset + page->vaddr;
        *handle = offset + page->dma;
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
        memset(retval, POOL_POISON_ALLOCATED, pool->size);
 #endif
  done:
@@ -391,7 +395,7 @@ void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
        }
 
        offset = vaddr - page->vaddr;
-#ifdef CONFIG_DEBUG_SLAB
+#ifdef DMAPOOL_DEBUG
        if ((dma - page->dma) != offset) {
                if (pool->dev)
                        dev_err(pool->dev,
index 3c0f1e99f5e40ad90a116f62a39ca1e0ca3f5fa7..343cfdfebd9ea0931b9ac6c4e91ed61ba0262730 100644 (file)
@@ -49,7 +49,7 @@ asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
                goto out;
        }
 
-       if (mapping->a_ops->get_xip_page) {
+       if (mapping->a_ops->get_xip_mem) {
                switch (advice) {
                case POSIX_FADV_NORMAL:
                case POSIX_FADV_RANDOM:
index 07e9d9258b486f804a2af0b98972104f2d63bd89..239d36163bbe53fbb2115595e82ed9aef3180b02 100644 (file)
@@ -576,10 +576,12 @@ EXPORT_SYMBOL(unlock_page);
  */
 void end_page_writeback(struct page *page)
 {
-       if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
-               if (!test_clear_page_writeback(page))
-                       BUG();
-       }
+       if (TestClearPageReclaim(page))
+               rotate_reclaimable_page(page);
+
+       if (!test_clear_page_writeback(page))
+               BUG();
+
        smp_mb__after_clear_bit();
        wake_up_page(page, PG_writeback);
 }
index 5e598c42afd726be44f9d862a086d52ed8adbd2a..3e744abcce9daa8aada1487bd42e31b769c414bf 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/rmap.h>
 #include <linux/sched.h>
 #include <asm/tlbflush.h>
+#include <asm/io.h>
 
 /*
  * We do use our own empty page to avoid interference with other users
@@ -42,37 +43,41 @@ static struct page *xip_sparse_page(void)
 
 /*
  * This is a file read routine for execute in place files, and uses
- * the mapping->a_ops->get_xip_page() function for the actual low-level
+ * the mapping->a_ops->get_xip_mem() function for the actual low-level
  * stuff.
  *
  * Note the struct file* is not used at all.  It may be NULL.
  */
-static void
+static ssize_t
 do_xip_mapping_read(struct address_space *mapping,
                    struct file_ra_state *_ra,
                    struct file *filp,
-                   loff_t *ppos,
-                   read_descriptor_t *desc,
-                   read_actor_t actor)
+                   char __user *buf,
+                   size_t len,
+                   loff_t *ppos)
 {
        struct inode *inode = mapping->host;
        pgoff_t index, end_index;
        unsigned long offset;
-       loff_t isize;
+       loff_t isize, pos;
+       size_t copied = 0, error = 0;
 
-       BUG_ON(!mapping->a_ops->get_xip_page);
+       BUG_ON(!mapping->a_ops->get_xip_mem);
 
-       index = *ppos >> PAGE_CACHE_SHIFT;
-       offset = *ppos & ~PAGE_CACHE_MASK;
+       pos = *ppos;
+       index = pos >> PAGE_CACHE_SHIFT;
+       offset = pos & ~PAGE_CACHE_MASK;
 
        isize = i_size_read(inode);
        if (!isize)
                goto out;
 
        end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
-       for (;;) {
-               struct page *page;
-               unsigned long nr, ret;
+       do {
+               unsigned long nr, left;
+               void *xip_mem;
+               unsigned long xip_pfn;
+               int zero = 0;
 
                /* nr is the maximum number of bytes to copy from this page */
                nr = PAGE_CACHE_SIZE;
@@ -85,19 +90,17 @@ do_xip_mapping_read(struct address_space *mapping,
                        }
                }
                nr = nr - offset;
+               if (nr > len)
+                       nr = len;
 
-               page = mapping->a_ops->get_xip_page(mapping,
-                       index*(PAGE_SIZE/512), 0);
-               if (!page)
-                       goto no_xip_page;
-               if (unlikely(IS_ERR(page))) {
-                       if (PTR_ERR(page) == -ENODATA) {
+               error = mapping->a_ops->get_xip_mem(mapping, index, 0,
+                                                       &xip_mem, &xip_pfn);
+               if (unlikely(error)) {
+                       if (error == -ENODATA) {
                                /* sparse */
-                               page = ZERO_PAGE(0);
-                       } else {
-                               desc->error = PTR_ERR(page);
+                               zero = 1;
+                       } else
                                goto out;
-                       }
                }
 
                /* If users can be writing to this page using arbitrary
@@ -105,10 +108,10 @@ do_xip_mapping_read(struct address_space *mapping,
                 * before reading the page on the kernel side.
                 */
                if (mapping_writably_mapped(mapping))
-                       flush_dcache_page(page);
+                       /* address based flush */ ;
 
                /*
-                * Ok, we have the page, so now we can copy it to user space...
+                * Ok, we have the mem, so now we can copy it to user space...
                 *
                 * The actor routine returns how many bytes were actually used..
                 * NOTE! This may not be the same as how much of a user buffer
@@ -116,47 +119,38 @@ do_xip_mapping_read(struct address_space *mapping,
                 * "pos" here (the actor routine has to update the user buffer
                 * pointers and the remaining count).
                 */
-               ret = actor(desc, page, offset, nr);
-               offset += ret;
-               index += offset >> PAGE_CACHE_SHIFT;
-               offset &= ~PAGE_CACHE_MASK;
+               if (!zero)
+                       left = __copy_to_user(buf+copied, xip_mem+offset, nr);
+               else
+                       left = __clear_user(buf + copied, nr);
 
-               if (ret == nr && desc->count)
-                       continue;
-               goto out;
+               if (left) {
+                       error = -EFAULT;
+                       goto out;
+               }
 
-no_xip_page:
-               /* Did not get the page. Report it */
-               desc->error = -EIO;
-               goto out;
-       }
+               copied += (nr - left);
+               offset += (nr - left);
+               index += offset >> PAGE_CACHE_SHIFT;
+               offset &= ~PAGE_CACHE_MASK;
+       } while (copied < len);
 
 out:
-       *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
+       *ppos = pos + copied;
        if (filp)
                file_accessed(filp);
+
+       return (copied ? copied : error);
 }
 
 ssize_t
 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
 {
-       read_descriptor_t desc;
-
        if (!access_ok(VERIFY_WRITE, buf, len))
                return -EFAULT;
 
-       desc.written = 0;
-       desc.arg.buf = buf;
-       desc.count = len;
-       desc.error = 0;
-
-       do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
-                           ppos, &desc, file_read_actor);
-
-       if (desc.written)
-               return desc.written;
-       else
-               return desc.error;
+       return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
+                           buf, len, ppos);
 }
 EXPORT_SYMBOL_GPL(xip_file_read);
 
@@ -211,13 +205,16 @@ __xip_unmap (struct address_space * mapping,
  *
  * This function is derived from filemap_fault, but used for execute in place
  */
-static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
+static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       struct file *file = area->vm_file;
+       struct file *file = vma->vm_file;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
-       struct page *page;
        pgoff_t size;
+       void *xip_mem;
+       unsigned long xip_pfn;
+       struct page *page;
+       int error;
 
        /* XXX: are VM_FAULT_ codes OK? */
 
@@ -225,35 +222,44 @@ static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
        if (vmf->pgoff >= size)
                return VM_FAULT_SIGBUS;
 
-       page = mapping->a_ops->get_xip_page(mapping,
-                                       vmf->pgoff*(PAGE_SIZE/512), 0);
-       if (!IS_ERR(page))
-               goto out;
-       if (PTR_ERR(page) != -ENODATA)
+       error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
+                                               &xip_mem, &xip_pfn);
+       if (likely(!error))
+               goto found;
+       if (error != -ENODATA)
                return VM_FAULT_OOM;
 
        /* sparse block */
-       if ((area->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
-           (area->vm_flags & (VM_SHARED| VM_MAYSHARE)) &&
+       if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
+           (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
            (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
+               int err;
+
                /* maybe shared writable, allocate new block */
-               page = mapping->a_ops->get_xip_page(mapping,
-                                       vmf->pgoff*(PAGE_SIZE/512), 1);
-               if (IS_ERR(page))
+               error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
+                                                       &xip_mem, &xip_pfn);
+               if (error)
                        return VM_FAULT_SIGBUS;
-               /* unmap page at pgoff from all other vmas */
+               /* unmap sparse mappings at pgoff from all other vmas */
                __xip_unmap(mapping, vmf->pgoff);
+
+found:
+               err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
+                                                       xip_pfn);
+               if (err == -ENOMEM)
+                       return VM_FAULT_OOM;
+               BUG_ON(err);
+               return VM_FAULT_NOPAGE;
        } else {
                /* not shared and writable, use xip_sparse_page() */
                page = xip_sparse_page();
                if (!page)
                        return VM_FAULT_OOM;
-       }
 
-out:
-       page_cache_get(page);
-       vmf->page = page;
-       return 0;
+               page_cache_get(page);
+               vmf->page = page;
+               return 0;
+       }
 }
 
 static struct vm_operations_struct xip_file_vm_ops = {
@@ -262,11 +268,11 @@ static struct vm_operations_struct xip_file_vm_ops = {
 
 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
 {
-       BUG_ON(!file->f_mapping->a_ops->get_xip_page);
+       BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
 
        file_accessed(file);
        vma->vm_ops = &xip_file_vm_ops;
-       vma->vm_flags |= VM_CAN_NONLINEAR;
+       vma->vm_flags |= VM_CAN_NONLINEAR | VM_MIXEDMAP;
        return 0;
 }
 EXPORT_SYMBOL_GPL(xip_file_mmap);
@@ -279,17 +285,17 @@ __xip_file_write(struct file *filp, const char __user *buf,
        const struct address_space_operations *a_ops = mapping->a_ops;
        struct inode    *inode = mapping->host;
        long            status = 0;
-       struct page     *page;
        size_t          bytes;
        ssize_t         written = 0;
 
-       BUG_ON(!mapping->a_ops->get_xip_page);
+       BUG_ON(!mapping->a_ops->get_xip_mem);
 
        do {
                unsigned long index;
                unsigned long offset;
                size_t copied;
-               char *kaddr;
+               void *xip_mem;
+               unsigned long xip_pfn;
 
                offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
                index = pos >> PAGE_CACHE_SHIFT;
@@ -297,28 +303,22 @@ __xip_file_write(struct file *filp, const char __user *buf,
                if (bytes > count)
                        bytes = count;
 
-               page = a_ops->get_xip_page(mapping,
-                                          index*(PAGE_SIZE/512), 0);
-               if (IS_ERR(page) && (PTR_ERR(page) == -ENODATA)) {
+               status = a_ops->get_xip_mem(mapping, index, 0,
+                                               &xip_mem, &xip_pfn);
+               if (status == -ENODATA) {
                        /* we allocate a new page unmap it */
-                       page = a_ops->get_xip_page(mapping,
-                                                  index*(PAGE_SIZE/512), 1);
-                       if (!IS_ERR(page))
+                       status = a_ops->get_xip_mem(mapping, index, 1,
+                                                       &xip_mem, &xip_pfn);
+                       if (!status)
                                /* unmap page at pgoff from all other vmas */
                                __xip_unmap(mapping, index);
                }
 
-               if (IS_ERR(page)) {
-                       status = PTR_ERR(page);
+               if (status)
                        break;
-               }
 
-               fault_in_pages_readable(buf, bytes);
-               kaddr = kmap_atomic(page, KM_USER0);
                copied = bytes -
-                       __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
-               kunmap_atomic(kaddr, KM_USER0);
-               flush_dcache_page(page);
+                       __copy_from_user_nocache(xip_mem + offset, buf, bytes);
 
                if (likely(copied > 0)) {
                        status = copied;
@@ -398,7 +398,7 @@ EXPORT_SYMBOL_GPL(xip_file_write);
 
 /*
  * truncate a page used for execute in place
- * functionality is analog to block_truncate_page but does use get_xip_page
+ * functionality is analog to block_truncate_page but does use get_xip_mem
  * to get the page instead of page cache
  */
 int
@@ -408,9 +408,11 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        unsigned blocksize;
        unsigned length;
-       struct page *page;
+       void *xip_mem;
+       unsigned long xip_pfn;
+       int err;
 
-       BUG_ON(!mapping->a_ops->get_xip_page);
+       BUG_ON(!mapping->a_ops->get_xip_mem);
 
        blocksize = 1 << mapping->host->i_blkbits;
        length = offset & (blocksize - 1);
@@ -421,18 +423,16 @@ xip_truncate_page(struct address_space *mapping, loff_t from)
 
        length = blocksize - length;
 
-       page = mapping->a_ops->get_xip_page(mapping,
-                                           index*(PAGE_SIZE/512), 0);
-       if (!page)
-               return -ENOMEM;
-       if (unlikely(IS_ERR(page))) {
-               if (PTR_ERR(page) == -ENODATA)
+       err = mapping->a_ops->get_xip_mem(mapping, index, 0,
+                                               &xip_mem, &xip_pfn);
+       if (unlikely(err)) {
+               if (err == -ENODATA)
                        /* Hole? No need to truncate */
                        return 0;
                else
-                       return PTR_ERR(page);
+                       return err;
        }
-       zero_user(page, offset, length);
+       memset(xip_mem + offset, 0, length);
        return 0;
 }
 EXPORT_SYMBOL_GPL(xip_truncate_page);
index 51c9e2c0164068681b299b37f48840af59d5c79d..bbf953eeb58bafbad0016a6f73db4844e5514c22 100644 (file)
@@ -95,13 +95,16 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
        int nid;
        struct page *page = NULL;
        struct mempolicy *mpol;
+       nodemask_t *nodemask;
        struct zonelist *zonelist = huge_zonelist(vma, address,
-                                       htlb_alloc_mask, &mpol);
-       struct zone **z;
-
-       for (z = zonelist->zones; *z; z++) {
-               nid = zone_to_nid(*z);
-               if (cpuset_zone_allowed_softwall(*z, htlb_alloc_mask) &&
+                                       htlb_alloc_mask, &mpol, &nodemask);
+       struct zone *zone;
+       struct zoneref *z;
+
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                                               MAX_NR_ZONES - 1, nodemask) {
+               nid = zone_to_nid(zone);
+               if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask) &&
                    !list_empty(&hugepage_freelists[nid])) {
                        page = list_entry(hugepage_freelists[nid].next,
                                          struct page, lru);
@@ -113,7 +116,7 @@ static struct page *dequeue_huge_page_vma(struct vm_area_struct *vma,
                        break;
                }
        }
-       mpol_free(mpol);        /* unref if mpol !NULL */
+       mpol_cond_put(mpol);
        return page;
 }
 
@@ -129,6 +132,7 @@ static void update_and_free_page(struct page *page)
        }
        set_compound_page_dtor(page, NULL);
        set_page_refcounted(page);
+       arch_release_hugepage(page);
        __free_pages(page, HUGETLB_PAGE_ORDER);
 }
 
@@ -195,9 +199,14 @@ static struct page *alloc_fresh_huge_page_node(int nid)
        struct page *page;
 
        page = alloc_pages_node(nid,
-               htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN,
+               htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|
+                                               __GFP_REPEAT|__GFP_NOWARN,
                HUGETLB_PAGE_ORDER);
        if (page) {
+               if (arch_prepare_hugepage(page)) {
+                       __free_pages(page, HUGETLB_PAGE_ORDER);
+                       return NULL;
+               }
                set_compound_page_dtor(page, free_huge_page);
                spin_lock(&hugetlb_lock);
                nr_huge_pages++;
@@ -239,6 +248,11 @@ static int alloc_fresh_huge_page(void)
                hugetlb_next_nid = next_nid;
        } while (!page && hugetlb_next_nid != start_nid);
 
+       if (ret)
+               count_vm_event(HTLB_BUDDY_PGALLOC);
+       else
+               count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
+
        return ret;
 }
 
@@ -281,7 +295,8 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
        }
        spin_unlock(&hugetlb_lock);
 
-       page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN,
+       page = alloc_pages(htlb_alloc_mask|__GFP_COMP|
+                                       __GFP_REPEAT|__GFP_NOWARN,
                                        HUGETLB_PAGE_ORDER);
 
        spin_lock(&hugetlb_lock);
@@ -299,9 +314,11 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma,
                 */
                nr_huge_pages_node[nid]++;
                surplus_huge_pages_node[nid]++;
+               __count_vm_event(HTLB_BUDDY_PGALLOC);
        } else {
                nr_huge_pages--;
                surplus_huge_pages--;
+               __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
        }
        spin_unlock(&hugetlb_lock);
 
@@ -369,11 +386,19 @@ retry:
        resv_huge_pages += delta;
        ret = 0;
 free:
+       /* Free the needed pages to the hugetlb pool */
        list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+               if ((--needed) < 0)
+                       break;
                list_del(&page->lru);
-               if ((--needed) >= 0)
-                       enqueue_huge_page(page);
-               else {
+               enqueue_huge_page(page);
+       }
+
+       /* Free unnecessary surplus pages to the buddy allocator */
+       if (!list_empty(&surplus_list)) {
+               spin_unlock(&hugetlb_lock);
+               list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
+                       list_del(&page->lru);
                        /*
                         * The page has a reference count of zero already, so
                         * call free_huge_page directly instead of using
@@ -381,10 +406,9 @@ free:
                         * unlocked which is safe because free_huge_page takes
                         * hugetlb_lock before deciding how to free the page.
                         */
-                       spin_unlock(&hugetlb_lock);
                        free_huge_page(page);
-                       spin_lock(&hugetlb_lock);
                }
+               spin_lock(&hugetlb_lock);
        }
 
        return ret;
@@ -718,7 +742,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
                entry =
                    pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
        } else {
-               entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
+               entry = huge_pte_wrprotect(mk_pte(page, vma->vm_page_prot));
        }
        entry = pte_mkyoung(entry);
        entry = pte_mkhuge(entry);
@@ -731,8 +755,8 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
 {
        pte_t entry;
 
-       entry = pte_mkwrite(pte_mkdirty(*ptep));
-       if (ptep_set_access_flags(vma, address, ptep, entry, 1)) {
+       entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
+       if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
                update_mmu_cache(vma, address, entry);
        }
 }
@@ -762,10 +786,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
 
                spin_lock(&dst->page_table_lock);
                spin_lock(&src->page_table_lock);
-               if (!pte_none(*src_pte)) {
+               if (!huge_pte_none(huge_ptep_get(src_pte))) {
                        if (cow)
-                               ptep_set_wrprotect(src, addr, src_pte);
-                       entry = *src_pte;
+                               huge_ptep_set_wrprotect(src, addr, src_pte);
+                       entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
                        set_huge_pte_at(dst, addr, dst_pte, entry);
@@ -809,7 +833,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        continue;
 
                pte = huge_ptep_get_and_clear(mm, address, ptep);
-               if (pte_none(pte))
+               if (huge_pte_none(pte))
                        continue;
 
                page = pte_page(pte);
@@ -873,8 +897,9 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
        spin_lock(&mm->page_table_lock);
 
        ptep = huge_pte_offset(mm, address & HPAGE_MASK);
-       if (likely(pte_same(*ptep, pte))) {
+       if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW */
+               huge_ptep_clear_flush(vma, address, ptep);
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
                /* Make the old page be freed below */
@@ -942,7 +967,7 @@ retry:
                goto backout;
 
        ret = 0;
-       if (!pte_none(*ptep))
+       if (!huge_pte_none(huge_ptep_get(ptep)))
                goto backout;
 
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
@@ -984,8 +1009,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * the same page in the page cache.
         */
        mutex_lock(&hugetlb_instantiation_mutex);
-       entry = *ptep;
-       if (pte_none(entry)) {
+       entry = huge_ptep_get(ptep);
+       if (huge_pte_none(entry)) {
                ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
                mutex_unlock(&hugetlb_instantiation_mutex);
                return ret;
@@ -995,7 +1020,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        spin_lock(&mm->page_table_lock);
        /* Check for a racing update before calling hugetlb_cow */
-       if (likely(pte_same(entry, *ptep)))
+       if (likely(pte_same(entry, huge_ptep_get(ptep))))
                if (write_access && !pte_write(entry))
                        ret = hugetlb_cow(mm, vma, address, ptep, entry);
        spin_unlock(&mm->page_table_lock);
@@ -1025,7 +1050,8 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 */
                pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
 
-               if (!pte || pte_none(*pte) || (write && !pte_write(*pte))) {
+               if (!pte || huge_pte_none(huge_ptep_get(pte)) ||
+                   (write && !pte_write(huge_ptep_get(pte)))) {
                        int ret;
 
                        spin_unlock(&mm->page_table_lock);
@@ -1041,7 +1067,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
                }
 
                pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
-               page = pte_page(*pte);
+               page = pte_page(huge_ptep_get(pte));
 same_page:
                if (pages) {
                        get_page(page);
@@ -1090,7 +1116,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
                        continue;
                if (huge_pmd_unshare(mm, &address, ptep))
                        continue;
-               if (!pte_none(*ptep)) {
+               if (!huge_pte_none(huge_ptep_get(ptep))) {
                        pte = huge_ptep_get_and_clear(mm, address, ptep);
                        pte = pte_mkhuge(pte_modify(pte, newprot));
                        set_huge_pte_at(mm, address, ptep, pte);
index 789727309f4d52ae03ce7f687dce1fac6bcfa383..0034e947e4bc677c9220c267eaaa75d5d89e7760 100644 (file)
@@ -34,8 +34,7 @@ static inline void __put_page(struct page *page)
        atomic_dec(&page->_count);
 }
 
-extern void __init __free_pages_bootmem(struct page *page,
-                                               unsigned int order);
+extern void __free_pages_bootmem(struct page *page, unsigned int order);
 
 /*
  * function for dealing with page's order in buddy system.
index 93ee375b38e7edc7b414b7a2b6891411a45d0b0c..23a0ec3e0ea07bb0f37a001bc1f7e5a683172c22 100644 (file)
@@ -112,7 +112,7 @@ static long madvise_willneed(struct vm_area_struct * vma,
        if (!file)
                return -EBADF;
 
-       if (file->f_mapping->a_ops->get_xip_page) {
+       if (file->f_mapping->a_ops->get_xip_mem) {
                /* no bad return value, but ignore advice */
                return 0;
        }
index 2e0bfc93484b327557e6b88f509476c4f837ac8f..e46451e1d9b793563b08d8ef015927b484b0be1d 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/bit_spinlock.h>
 #include <linux/rcupdate.h>
+#include <linux/slab.h>
 #include <linux/swap.h>
 #include <linux/spinlock.h>
 #include <linux/fs.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 
 #include <asm/uaccess.h>
 
 struct cgroup_subsys mem_cgroup_subsys;
 static const int MEM_CGROUP_RECLAIM_RETRIES = 5;
+static struct kmem_cache *page_cgroup_cache;
 
 /*
  * Statistics for memory cgroup.
@@ -45,6 +48,8 @@ enum mem_cgroup_stat_index {
         */
        MEM_CGROUP_STAT_CACHE,     /* # of pages charged as cache */
        MEM_CGROUP_STAT_RSS,       /* # of pages charged as rss */
+       MEM_CGROUP_STAT_PGPGIN_COUNT,   /* # of pages paged in */
+       MEM_CGROUP_STAT_PGPGOUT_COUNT,  /* # of pages paged out */
 
        MEM_CGROUP_STAT_NSTATS,
 };
@@ -196,6 +201,13 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, int flags,
                __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_CACHE, val);
        else
                __mem_cgroup_stat_add_safe(stat, MEM_CGROUP_STAT_RSS, val);
+
+       if (charge)
+               __mem_cgroup_stat_add_safe(stat,
+                               MEM_CGROUP_STAT_PGPGIN_COUNT, 1);
+       else
+               __mem_cgroup_stat_add_safe(stat,
+                               MEM_CGROUP_STAT_PGPGOUT_COUNT, 1);
 }
 
 static struct mem_cgroup_per_zone *
@@ -236,26 +248,12 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
                                css);
 }
 
-static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
+struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
 {
        return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
                                struct mem_cgroup, css);
 }
 
-void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
-{
-       struct mem_cgroup *mem;
-
-       mem = mem_cgroup_from_task(p);
-       css_get(&mem->css);
-       mm->mem_cgroup = mem;
-}
-
-void mm_free_cgroup(struct mm_struct *mm)
-{
-       css_put(&mm->mem_cgroup->css);
-}
-
 static inline int page_cgroup_locked(struct page *page)
 {
        return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
@@ -287,10 +285,10 @@ static void unlock_page_cgroup(struct page *page)
        bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
 }
 
-static void __mem_cgroup_remove_list(struct page_cgroup *pc)
+static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz,
+                       struct page_cgroup *pc)
 {
        int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-       struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
 
        if (from)
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1;
@@ -301,10 +299,10 @@ static void __mem_cgroup_remove_list(struct page_cgroup *pc)
        list_del_init(&pc->lru);
 }
 
-static void __mem_cgroup_add_list(struct page_cgroup *pc)
+static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz,
+                               struct page_cgroup *pc)
 {
        int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE;
-       struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc);
 
        if (!to) {
                MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1;
@@ -476,6 +474,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
        int zid = zone_idx(z);
        struct mem_cgroup_per_zone *mz;
 
+       BUG_ON(!mem_cont);
        mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
        if (active)
                src = &mz->active_list;
@@ -560,7 +559,7 @@ retry:
        }
        unlock_page_cgroup(page);
 
-       pc = kzalloc(sizeof(struct page_cgroup), gfp_mask);
+       pc = kmem_cache_zalloc(page_cgroup_cache, gfp_mask);
        if (pc == NULL)
                goto err;
 
@@ -574,7 +573,7 @@ retry:
                mm = &init_mm;
 
        rcu_read_lock();
-       mem = rcu_dereference(mm->mem_cgroup);
+       mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
        /*
         * For every charge from the cgroup, increment reference count
         */
@@ -602,7 +601,6 @@ retry:
                        mem_cgroup_out_of_memory(mem, gfp_mask);
                        goto out;
                }
-               congestion_wait(WRITE, HZ/10);
        }
 
        pc->ref_cnt = 1;
@@ -610,7 +608,7 @@ retry:
        pc->page = page;
        pc->flags = PAGE_CGROUP_FLAG_ACTIVE;
        if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE)
-               pc->flags |= PAGE_CGROUP_FLAG_CACHE;
+               pc->flags = PAGE_CGROUP_FLAG_CACHE;
 
        lock_page_cgroup(page);
        if (page_get_page_cgroup(page)) {
@@ -622,14 +620,14 @@ retry:
                 */
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
-               kfree(pc);
+               kmem_cache_free(page_cgroup_cache, pc);
                goto retry;
        }
        page_assign_page_cgroup(page, pc);
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_add_list(pc);
+       __mem_cgroup_add_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
        unlock_page_cgroup(page);
@@ -637,7 +635,7 @@ done:
        return 0;
 out:
        css_put(&mem->css);
-       kfree(pc);
+       kmem_cache_free(page_cgroup_cache, pc);
 err:
        return -ENOMEM;
 }
@@ -685,7 +683,7 @@ void mem_cgroup_uncharge_page(struct page *page)
        if (--(pc->ref_cnt) == 0) {
                mz = page_cgroup_zoneinfo(pc);
                spin_lock_irqsave(&mz->lru_lock, flags);
-               __mem_cgroup_remove_list(pc);
+               __mem_cgroup_remove_list(mz, pc);
                spin_unlock_irqrestore(&mz->lru_lock, flags);
 
                page_assign_page_cgroup(page, NULL);
@@ -695,7 +693,7 @@ void mem_cgroup_uncharge_page(struct page *page)
                res_counter_uncharge(&mem->res, PAGE_SIZE);
                css_put(&mem->css);
 
-               kfree(pc);
+               kmem_cache_free(page_cgroup_cache, pc);
                return;
        }
 
@@ -747,7 +745,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_remove_list(pc);
+       __mem_cgroup_remove_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
        page_assign_page_cgroup(page, NULL);
@@ -759,7 +757,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage)
 
        mz = page_cgroup_zoneinfo(pc);
        spin_lock_irqsave(&mz->lru_lock, flags);
-       __mem_cgroup_add_list(pc);
+       __mem_cgroup_add_list(mz, pc);
        spin_unlock_irqrestore(&mz->lru_lock, flags);
 
        unlock_page_cgroup(newpage);
@@ -853,13 +851,10 @@ static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp)
        return 0;
 }
 
-static ssize_t mem_cgroup_read(struct cgroup *cont,
-                       struct cftype *cft, struct file *file,
-                       char __user *userbuf, size_t nbytes, loff_t *ppos)
+static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft)
 {
-       return res_counter_read(&mem_cgroup_from_cont(cont)->res,
-                               cft->private, userbuf, nbytes, ppos,
-                               NULL);
+       return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res,
+                                   cft->private);
 }
 
 static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
@@ -871,27 +866,25 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft,
                                mem_cgroup_write_strategy);
 }
 
-static ssize_t mem_force_empty_write(struct cgroup *cont,
-                               struct cftype *cft, struct file *file,
-                               const char __user *userbuf,
-                               size_t nbytes, loff_t *ppos)
+static int mem_cgroup_reset(struct cgroup *cont, unsigned int event)
 {
-       struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
-       int ret = mem_cgroup_force_empty(mem);
-       if (!ret)
-               ret = nbytes;
-       return ret;
+       struct mem_cgroup *mem;
+
+       mem = mem_cgroup_from_cont(cont);
+       switch (event) {
+       case RES_MAX_USAGE:
+               res_counter_reset_max(&mem->res);
+               break;
+       case RES_FAILCNT:
+               res_counter_reset_failcnt(&mem->res);
+               break;
+       }
+       return 0;
 }
 
-/*
- * Note: This should be removed if cgroup supports write-only file.
- */
-static ssize_t mem_force_empty_read(struct cgroup *cont,
-                               struct cftype *cft,
-                               struct file *file, char __user *userbuf,
-                               size_t nbytes, loff_t *ppos)
+static int mem_force_empty_write(struct cgroup *cont, unsigned int event)
 {
-       return -EINVAL;
+       return mem_cgroup_force_empty(mem_cgroup_from_cont(cont));
 }
 
 static const struct mem_cgroup_stat_desc {
@@ -900,11 +893,13 @@ static const struct mem_cgroup_stat_desc {
 } mem_cgroup_stat_desc[] = {
        [MEM_CGROUP_STAT_CACHE] = { "cache", PAGE_SIZE, },
        [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, },
+       [MEM_CGROUP_STAT_PGPGIN_COUNT] = {"pgpgin", 1, },
+       [MEM_CGROUP_STAT_PGPGOUT_COUNT] = {"pgpgout", 1, },
 };
 
-static int mem_control_stat_show(struct seq_file *m, void *arg)
+static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
+                                struct cgroup_map_cb *cb)
 {
-       struct cgroup *cont = m->private;
        struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont);
        struct mem_cgroup_stat *stat = &mem_cont->stat;
        int i;
@@ -914,8 +909,7 @@ static int mem_control_stat_show(struct seq_file *m, void *arg)
 
                val = mem_cgroup_read_stat(stat, i);
                val *= mem_cgroup_stat_desc[i].unit;
-               seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg,
-                               (long long)val);
+               cb->fill(cb, mem_cgroup_stat_desc[i].msg, val);
        }
        /* showing # of active pages */
        {
@@ -925,52 +919,43 @@ static int mem_control_stat_show(struct seq_file *m, void *arg)
                                                MEM_CGROUP_ZSTAT_INACTIVE);
                active = mem_cgroup_get_all_zonestat(mem_cont,
                                                MEM_CGROUP_ZSTAT_ACTIVE);
-               seq_printf(m, "active %ld\n", (active) * PAGE_SIZE);
-               seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE);
+               cb->fill(cb, "active", (active) * PAGE_SIZE);
+               cb->fill(cb, "inactive", (inactive) * PAGE_SIZE);
        }
        return 0;
 }
 
-static const struct file_operations mem_control_stat_file_operations = {
-       .read = seq_read,
-       .llseek = seq_lseek,
-       .release = single_release,
-};
-
-static int mem_control_stat_open(struct inode *unused, struct file *file)
-{
-       /* XXX __d_cont */
-       struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
-
-       file->f_op = &mem_control_stat_file_operations;
-       return single_open(file, mem_control_stat_show, cont);
-}
-
 static struct cftype mem_cgroup_files[] = {
        {
                .name = "usage_in_bytes",
                .private = RES_USAGE,
-               .read = mem_cgroup_read,
+               .read_u64 = mem_cgroup_read,
+       },
+       {
+               .name = "max_usage_in_bytes",
+               .private = RES_MAX_USAGE,
+               .trigger = mem_cgroup_reset,
+               .read_u64 = mem_cgroup_read,
        },
        {
                .name = "limit_in_bytes",
                .private = RES_LIMIT,
                .write = mem_cgroup_write,
-               .read = mem_cgroup_read,
+               .read_u64 = mem_cgroup_read,
        },
        {
                .name = "failcnt",
                .private = RES_FAILCNT,
-               .read = mem_cgroup_read,
+               .trigger = mem_cgroup_reset,
+               .read_u64 = mem_cgroup_read,
        },
        {
                .name = "force_empty",
-               .write = mem_force_empty_write,
-               .read = mem_force_empty_read,
+               .trigger = mem_force_empty_write,
        },
        {
                .name = "stat",
-               .open = mem_control_stat_open,
+               .read_map = mem_control_stat_show,
        },
 };
 
@@ -1010,6 +995,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node)
        kfree(mem->info.nodeinfo[node]);
 }
 
+static struct mem_cgroup *mem_cgroup_alloc(void)
+{
+       struct mem_cgroup *mem;
+
+       if (sizeof(*mem) < PAGE_SIZE)
+               mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+       else
+               mem = vmalloc(sizeof(*mem));
+
+       if (mem)
+               memset(mem, 0, sizeof(*mem));
+       return mem;
+}
+
+static void mem_cgroup_free(struct mem_cgroup *mem)
+{
+       if (sizeof(*mem) < PAGE_SIZE)
+               kfree(mem);
+       else
+               vfree(mem);
+}
+
+
 static struct cgroup_subsys_state *
 mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 {
@@ -1018,17 +1026,15 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
 
        if (unlikely((cont->parent) == NULL)) {
                mem = &init_mem_cgroup;
-               init_mm.mem_cgroup = mem;
-       } else
-               mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
-
-       if (mem == NULL)
-               return ERR_PTR(-ENOMEM);
+               page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC);
+       } else {
+               mem = mem_cgroup_alloc();
+               if (!mem)
+                       return ERR_PTR(-ENOMEM);
+       }
 
        res_counter_init(&mem->res);
 
-       memset(&mem->info, 0, sizeof(mem->info));
-
        for_each_node_state(node, N_POSSIBLE)
                if (alloc_mem_cgroup_per_zone_info(mem, node))
                        goto free_out;
@@ -1038,7 +1044,7 @@ free_out:
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
        if (cont->parent != NULL)
-               kfree(mem);
+               mem_cgroup_free(mem);
        return ERR_PTR(-ENOMEM);
 }
 
@@ -1058,7 +1064,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
        for_each_node_state(node, N_POSSIBLE)
                free_mem_cgroup_per_zone_info(mem, node);
 
-       kfree(mem_cgroup_from_cont(cont));
+       mem_cgroup_free(mem_cgroup_from_cont(cont));
 }
 
 static int mem_cgroup_populate(struct cgroup_subsys *ss,
@@ -1098,10 +1104,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
        if (!thread_group_leader(p))
                goto out;
 
-       css_get(&mem->css);
-       rcu_assign_pointer(mm->mem_cgroup, mem);
-       css_put(&old_mem->css);
-
 out:
        mmput(mm);
 }
index 0d14d1e58a5fa78e6b6a01369cc1e0869c77dfe9..bbab1e37055e22df235af61ddd201fe4b9832283 100644 (file)
@@ -371,57 +371,93 @@ static inline int is_cow_mapping(unsigned int flags)
 }
 
 /*
- * This function gets the "struct page" associated with a pte.
+ * vm_normal_page -- This function gets the "struct page" associated with a pte.
  *
- * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
- * will have each page table entry just pointing to a raw page frame
- * number, and as far as the VM layer is concerned, those do not have
- * pages associated with them - even if the PFN might point to memory
- * that otherwise is perfectly fine and has a "struct page".
+ * "Special" mappings do not wish to be associated with a "struct page" (either
+ * it doesn't exist, or it exists but they don't want to touch it). In this
+ * case, NULL is returned here. "Normal" mappings do have a struct page.
  *
- * The way we recognize those mappings is through the rules set up
- * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
- * and the vm_pgoff will point to the first PFN mapped: thus every
- * page that is a raw mapping will always honor the rule
+ * There are 2 broad cases. Firstly, an architecture may define a pte_special()
+ * pte bit, in which case this function is trivial. Secondly, an architecture
+ * may not have a spare pte bit, which requires a more complicated scheme,
+ * described below.
+ *
+ * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
+ * special mapping (even if there are underlying and valid "struct pages").
+ * COWed pages of a VM_PFNMAP are always normal.
+ *
+ * The way we recognize COWed pages within VM_PFNMAP mappings is through the
+ * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
+ * set, and the vm_pgoff will point to the first PFN mapped: thus every special
+ * mapping will always honor the rule
  *
  *     pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
  *
- * and if that isn't true, the page has been COW'ed (in which case it
- * _does_ have a "struct page" associated with it even if it is in a
- * VM_PFNMAP range).
+ * And for normal mappings this is false.
+ *
+ * This restricts such mappings to be a linear translation from virtual address
+ * to pfn. To get around this restriction, we allow arbitrary mappings so long
+ * as the vma is not a COW mapping; in that case, we know that all ptes are
+ * special (because none can have been COWed).
+ *
+ *
+ * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
+ *
+ * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
+ * page" backing, however the difference is that _all_ pages with a struct
+ * page (that is, those where pfn_valid is true) are refcounted and considered
+ * normal pages by the VM. The disadvantage is that pages are refcounted
+ * (which can be slower and simply not an option for some PFNMAP users). The
+ * advantage is that we don't have to follow the strict linearity rule of
+ * PFNMAP mappings in order to support COWable mappings.
+ *
  */
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+#ifdef __HAVE_ARCH_PTE_SPECIAL
+# define HAVE_PTE_SPECIAL 1
+#else
+# define HAVE_PTE_SPECIAL 0
+#endif
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                               pte_t pte)
 {
-       unsigned long pfn = pte_pfn(pte);
+       unsigned long pfn;
 
-       if (unlikely(vma->vm_flags & VM_PFNMAP)) {
-               unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
-               if (pfn == vma->vm_pgoff + off)
-                       return NULL;
-               if (!is_cow_mapping(vma->vm_flags))
-                       return NULL;
+       if (HAVE_PTE_SPECIAL) {
+               if (likely(!pte_special(pte))) {
+                       VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+                       return pte_page(pte);
+               }
+               VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+               return NULL;
        }
 
-#ifdef CONFIG_DEBUG_VM
-       /*
-        * Add some anal sanity checks for now. Eventually,
-        * we should just do "return pfn_to_page(pfn)", but
-        * in the meantime we check that we get a valid pfn,
-        * and that the resulting page looks ok.
-        */
-       if (unlikely(!pfn_valid(pfn))) {
-               print_bad_pte(vma, pte, addr);
-               return NULL;
+       /* !HAVE_PTE_SPECIAL case follows: */
+
+       pfn = pte_pfn(pte);
+
+       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+               if (vma->vm_flags & VM_MIXEDMAP) {
+                       if (!pfn_valid(pfn))
+                               return NULL;
+                       goto out;
+               } else {
+                       unsigned long off;
+                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
+                       if (pfn == vma->vm_pgoff + off)
+                               return NULL;
+                       if (!is_cow_mapping(vma->vm_flags))
+                               return NULL;
+               }
        }
-#endif
+
+       VM_BUG_ON(!pfn_valid(pfn));
 
        /*
-        * NOTE! We still have PageReserved() pages in the page 
-        * tables. 
+        * NOTE! We still have PageReserved() pages in the page tables.
         *
-        * The PAGE_ZERO() pages and various VDSO mappings can
-        * cause them to exist.
+        * eg. VDSO mappings can cause them to exist.
         */
+out:
        return pfn_to_page(pfn);
 }
 
@@ -1057,8 +1093,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                if (pages)
                        foll_flags |= FOLL_GET;
                if (!write && !(vma->vm_flags & VM_LOCKED) &&
-                   (!vma->vm_ops || (!vma->vm_ops->nopage &&
-                                       !vma->vm_ops->fault)))
+                   (!vma->vm_ops || !vma->vm_ops->fault))
                        foll_flags |= FOLL_ANON;
 
                do {
@@ -1141,8 +1176,10 @@ pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
  * old drivers should use this, and they needed to mark their
  * pages reserved for the old functions anyway.
  */
-static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page, pgprot_t prot)
 {
+       struct mm_struct *mm = vma->vm_mm;
        int retval;
        pte_t *pte;
        spinlock_t *ptl;
@@ -1202,40 +1239,26 @@ out:
  *
  * The page does not need to be reserved.
  */
-int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page)
 {
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
        if (!page_count(page))
                return -EINVAL;
        vma->vm_flags |= VM_INSERTPAGE;
-       return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
+       return insert_page(vma, addr, page, vma->vm_page_prot);
 }
 EXPORT_SYMBOL(vm_insert_page);
 
-/**
- * vm_insert_pfn - insert single pfn into user vma
- * @vma: user vma to map to
- * @addr: target user address of this page
- * @pfn: source kernel pfn
- *
- * Similar to vm_inert_page, this allows drivers to insert individual pages
- * they've allocated into a user vma. Same comments apply.
- *
- * This function should only be called from a vm_ops->fault handler, and
- * in that case the handler should return NULL.
- */
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
-               unsigned long pfn)
+static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn, pgprot_t prot)
 {
        struct mm_struct *mm = vma->vm_mm;
        int retval;
        pte_t *pte, entry;
        spinlock_t *ptl;
 
-       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
-       BUG_ON(is_cow_mapping(vma->vm_flags));
-
        retval = -ENOMEM;
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
@@ -1245,19 +1268,74 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                goto out_unlock;
 
        /* Ok, finally just insert the thing.. */
-       entry = pfn_pte(pfn, vma->vm_page_prot);
+       entry = pte_mkspecial(pfn_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry);
+       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
 
        retval = 0;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
-
 out:
        return retval;
 }
+
+/**
+ * vm_insert_pfn - insert single pfn into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ *
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * they've allocated into a user vma. Same comments apply.
+ *
+ * This function should only be called from a vm_ops->fault handler, and
+ * in that case the handler should return NULL.
+ */
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+{
+       /*
+        * Technically, architectures with pte_special can avoid all these
+        * restrictions (same for remap_pfn_range).  However we would like
+        * consistency in testing and feature parity among all, so we should
+        * try to keep these invariants in place for everybody.
+        */
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+                                               (VM_PFNMAP|VM_MIXEDMAP));
+       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
 EXPORT_SYMBOL(vm_insert_pfn);
 
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+{
+       BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+
+       /*
+        * If we don't have pte special, then we have to use the pfn_valid()
+        * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+        * refcount the page if pfn_valid is true (hence insert_page rather
+        * than insert_pfn).
+        */
+       if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
+               struct page *page;
+
+               page = pfn_to_page(pfn);
+               return insert_page(vma, addr, page, vma->vm_page_prot);
+       }
+       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_mixed);
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
@@ -1276,7 +1354,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
        arch_enter_lazy_mmu_mode();
        do {
                BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
+               set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
@@ -2199,20 +2277,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        BUG_ON(vma->vm_flags & VM_PFNMAP);
 
-       if (likely(vma->vm_ops->fault)) {
-               ret = vma->vm_ops->fault(vma, &vmf);
-               if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
-                       return ret;
-       } else {
-               /* Legacy ->nopage path */
-               ret = 0;
-               vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
-               /* no page was available -- either SIGBUS or OOM */
-               if (unlikely(vmf.page == NOPAGE_SIGBUS))
-                       return VM_FAULT_SIGBUS;
-               else if (unlikely(vmf.page == NOPAGE_OOM))
-                       return VM_FAULT_OOM;
-       }
+       ret = vma->vm_ops->fault(vma, &vmf);
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+               return ret;
 
        /*
         * For consistency in subsequent calls, make the faulted page always
@@ -2377,10 +2444,13 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
        unsigned long pfn;
 
        pte_unmap(page_table);
-       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
-       BUG_ON(is_cow_mapping(vma->vm_flags));
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
 
        pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
+
+       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
        if (unlikely(pfn == NOPFN_OOM))
                return VM_FAULT_OOM;
        else if (unlikely(pfn == NOPFN_SIGBUS))
@@ -2458,7 +2528,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
-                               if (vma->vm_ops->fault || vma->vm_ops->nopage)
+                               if (likely(vma->vm_ops->fault))
                                        return do_linear_fault(mm, vma, address,
                                                pte, pmd, write_access, entry);
                                if (unlikely(vma->vm_ops->nopfn))
index 0fb33027127120460a05ddc2bb827d7b26d31aaa..b17dca7249f8a41e0806e4465291f1dd2eba4913 100644 (file)
@@ -29,6 +29,8 @@
 
 #include <asm/tlbflush.h>
 
+#include "internal.h"
+
 /* add this memory to iomem resource */
 static struct resource *register_memory_resource(u64 start, u64 size)
 {
@@ -58,8 +60,105 @@ static void release_memory_resource(struct resource *res)
        return;
 }
 
-
 #ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
+static void get_page_bootmem(unsigned long info,  struct page *page, int magic)
+{
+       atomic_set(&page->_mapcount, magic);
+       SetPagePrivate(page);
+       set_page_private(page, info);
+       atomic_inc(&page->_count);
+}
+
+void put_page_bootmem(struct page *page)
+{
+       int magic;
+
+       magic = atomic_read(&page->_mapcount);
+       BUG_ON(magic >= -1);
+
+       if (atomic_dec_return(&page->_count) == 1) {
+               ClearPagePrivate(page);
+               set_page_private(page, 0);
+               reset_page_mapcount(page);
+               __free_pages_bootmem(page, 0);
+       }
+
+}
+
+void register_page_bootmem_info_section(unsigned long start_pfn)
+{
+       unsigned long *usemap, mapsize, section_nr, i;
+       struct mem_section *ms;
+       struct page *page, *memmap;
+
+       if (!pfn_valid(start_pfn))
+               return;
+
+       section_nr = pfn_to_section_nr(start_pfn);
+       ms = __nr_to_section(section_nr);
+
+       /* Get section's memmap address */
+       memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
+
+       /*
+        * Get page for the memmap's phys address
+        * XXX: need more consideration for sparse_vmemmap...
+        */
+       page = virt_to_page(memmap);
+       mapsize = sizeof(struct page) * PAGES_PER_SECTION;
+       mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
+
+       /* remember memmap's page */
+       for (i = 0; i < mapsize; i++, page++)
+               get_page_bootmem(section_nr, page, SECTION_INFO);
+
+       usemap = __nr_to_section(section_nr)->pageblock_flags;
+       page = virt_to_page(usemap);
+
+       mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
+
+       for (i = 0; i < mapsize; i++, page++)
+               get_page_bootmem(section_nr, page, MIX_INFO);
+
+}
+
+void register_page_bootmem_info_node(struct pglist_data *pgdat)
+{
+       unsigned long i, pfn, end_pfn, nr_pages;
+       int node = pgdat->node_id;
+       struct page *page;
+       struct zone *zone;
+
+       nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
+       page = virt_to_page(pgdat);
+
+       for (i = 0; i < nr_pages; i++, page++)
+               get_page_bootmem(node, page, NODE_INFO);
+
+       zone = &pgdat->node_zones[0];
+       for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
+               if (zone->wait_table) {
+                       nr_pages = zone->wait_table_hash_nr_entries
+                               * sizeof(wait_queue_head_t);
+                       nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
+                       page = virt_to_page(zone->wait_table);
+
+                       for (i = 0; i < nr_pages; i++, page++)
+                               get_page_bootmem(node, page, NODE_INFO);
+               }
+       }
+
+       pfn = pgdat->node_start_pfn;
+       end_pfn = pfn + pgdat->node_spanned_pages;
+
+       /* register_section info */
+       for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
+               register_page_bootmem_info_section(pfn);
+
+}
+#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
+
 static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
@@ -101,6 +200,36 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
        return register_new_memory(__pfn_to_section(phys_start_pfn));
 }
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+static int __remove_section(struct zone *zone, struct mem_section *ms)
+{
+       /*
+        * XXX: Freeing memmap with vmemmap is not implement yet.
+        *      This should be removed later.
+        */
+       return -EBUSY;
+}
+#else
+static int __remove_section(struct zone *zone, struct mem_section *ms)
+{
+       unsigned long flags;
+       struct pglist_data *pgdat = zone->zone_pgdat;
+       int ret = -EINVAL;
+
+       if (!valid_section(ms))
+               return ret;
+
+       ret = unregister_memory_section(ms);
+       if (ret)
+               return ret;
+
+       pgdat_resize_lock(pgdat, &flags);
+       sparse_remove_one_section(zone, ms);
+       pgdat_resize_unlock(pgdat, &flags);
+       return 0;
+}
+#endif
+
 /*
  * Reasonably generic function for adding memory.  It is
  * expected that archs that support memory hotplug will
@@ -134,6 +263,42 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
 }
 EXPORT_SYMBOL_GPL(__add_pages);
 
+/**
+ * __remove_pages() - remove sections of pages from a zone
+ * @zone: zone from which pages need to be removed
+ * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
+ * @nr_pages: number of pages to remove (must be multiple of section size)
+ *
+ * Generic helper function to remove section mappings and sysfs entries
+ * for the section of the memory we are removing. Caller needs to make
+ * sure that pages are marked reserved and zones are adjust properly by
+ * calling offline_pages().
+ */
+int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
+                unsigned long nr_pages)
+{
+       unsigned long i, ret = 0;
+       int sections_to_remove;
+
+       /*
+        * We can only remove entire sections
+        */
+       BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
+       BUG_ON(nr_pages % PAGES_PER_SECTION);
+
+       release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
+
+       sections_to_remove = nr_pages / PAGES_PER_SECTION;
+       for (i = 0; i < sections_to_remove; i++) {
+               unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
+               ret = __remove_section(zone, __pfn_to_section(pfn));
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(__remove_pages);
+
 static void grow_zone_span(struct zone *zone,
                unsigned long start_pfn, unsigned long end_pfn)
 {
@@ -164,6 +329,25 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
                                        pgdat->node_start_pfn;
 }
 
+void online_page(struct page *page)
+{
+       totalram_pages++;
+       num_physpages++;
+
+#ifdef CONFIG_HIGHMEM
+       if (PageHighMem(page))
+               totalhigh_pages++;
+#endif
+
+#ifdef CONFIG_FLATMEM
+       max_mapnr = max(page_to_pfn(page), max_mapnr);
+#endif
+
+       ClearPageReserved(page);
+       init_page_count(page);
+       __free_page(page);
+}
+
 static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
                        void *arg)
 {
index 3c3601121509d8a3c1d3ab2a8cf4a35953a9aa4e..a37a5034f63dda824f15b5ffb89156fea43fb4a3 100644 (file)
@@ -63,7 +63,6 @@
    grows down?
    make bind policy root only? It can trigger oom much faster and the
    kernel is not always grateful with that.
-   could replace all the switch()es with a mempolicy_ops structure.
 */
 
 #include <linux/mempolicy.h>
@@ -89,6 +88,7 @@
 #include <linux/rmap.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
+#include <linux/ctype.h>
 
 #include <asm/tlbflush.h>
 #include <asm/uaccess.h>
@@ -105,142 +105,264 @@ static struct kmem_cache *sn_cache;
    policied. */
 enum zone_type policy_zone = 0;
 
+/*
+ * run-time system-wide default policy => local allocation
+ */
 struct mempolicy default_policy = {
        .refcnt = ATOMIC_INIT(1), /* never free it */
-       .policy = MPOL_DEFAULT,
+       .mode = MPOL_PREFERRED,
+       .flags = MPOL_F_LOCAL,
 };
 
-static void mpol_rebind_policy(struct mempolicy *pol,
-                               const nodemask_t *newmask);
+static const struct mempolicy_operations {
+       int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
+       void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes);
+} mpol_ops[MPOL_MAX];
 
-/* Do sanity checking on a policy */
-static int mpol_check_policy(int mode, nodemask_t *nodes)
+/* Check that the nodemask contains at least one populated zone */
+static int is_valid_nodemask(const nodemask_t *nodemask)
 {
-       int was_empty, is_empty;
+       int nd, k;
 
-       if (!nodes)
-               return 0;
+       /* Check that there is something useful in this mask */
+       k = policy_zone;
 
-       /*
-        * "Contextualize" the in-coming nodemast for cpusets:
-        * Remember whether in-coming nodemask was empty,  If not,
-        * restrict the nodes to the allowed nodes in the cpuset.
-        * This is guaranteed to be a subset of nodes with memory.
-        */
-       cpuset_update_task_memory_state();
-       is_empty = was_empty = nodes_empty(*nodes);
-       if (!was_empty) {
-               nodes_and(*nodes, *nodes, cpuset_current_mems_allowed);
-               is_empty = nodes_empty(*nodes); /* after "contextualization" */
-       }
+       for_each_node_mask(nd, *nodemask) {
+               struct zone *z;
 
-       switch (mode) {
-       case MPOL_DEFAULT:
-               /*
-                * require caller to specify an empty nodemask
-                * before "contextualization"
-                */
-               if (!was_empty)
-                       return -EINVAL;
-               break;
-       case MPOL_BIND:
-       case MPOL_INTERLEAVE:
-               /*
-                * require at least 1 valid node after "contextualization"
-                */
-               if (is_empty)
-                       return -EINVAL;
-               break;
-       case MPOL_PREFERRED:
-               /*
-                * Did caller specify invalid nodes?
-                * Don't silently accept this as "local allocation".
-                */
-               if (!was_empty && is_empty)
-                       return -EINVAL;
-               break;
+               for (k = 0; k <= policy_zone; k++) {
+                       z = &NODE_DATA(nd)->node_zones[k];
+                       if (z->present_pages > 0)
+                               return 1;
+               }
        }
+
        return 0;
 }
 
-/* Generate a custom zonelist for the BIND policy. */
-static struct zonelist *bind_zonelist(nodemask_t *nodes)
+static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
 {
-       struct zonelist *zl;
-       int num, max, nd;
-       enum zone_type k;
+       return pol->flags & (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES);
+}
 
-       max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
-       max++;                  /* space for zlcache_ptr (see mmzone.h) */
-       zl = kmalloc(sizeof(struct zone *) * max, GFP_KERNEL);
-       if (!zl)
-               return ERR_PTR(-ENOMEM);
-       zl->zlcache_ptr = NULL;
-       num = 0;
-       /* First put in the highest zones from all nodes, then all the next 
-          lower zones etc. Avoid empty zones because the memory allocator
-          doesn't like them. If you implement node hot removal you
-          have to fix that. */
-       k = MAX_NR_ZONES - 1;
-       while (1) {
-               for_each_node_mask(nd, *nodes) { 
-                       struct zone *z = &NODE_DATA(nd)->node_zones[k];
-                       if (z->present_pages > 0) 
-                               zl->zones[num++] = z;
-               }
-               if (k == 0)
-                       break;
-               k--;
-       }
-       if (num == 0) {
-               kfree(zl);
-               return ERR_PTR(-EINVAL);
-       }
-       zl->zones[num] = NULL;
-       return zl;
+static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
+                                  const nodemask_t *rel)
+{
+       nodemask_t tmp;
+       nodes_fold(tmp, *orig, nodes_weight(*rel));
+       nodes_onto(*ret, tmp, *rel);
+}
+
+static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
+{
+       if (nodes_empty(*nodes))
+               return -EINVAL;
+       pol->v.nodes = *nodes;
+       return 0;
+}
+
+static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
+{
+       if (!nodes)
+               pol->flags |= MPOL_F_LOCAL;     /* local allocation */
+       else if (nodes_empty(*nodes))
+               return -EINVAL;                 /*  no allowed nodes */
+       else
+               pol->v.preferred_node = first_node(*nodes);
+       return 0;
+}
+
+static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
+{
+       if (!is_valid_nodemask(nodes))
+               return -EINVAL;
+       pol->v.nodes = *nodes;
+       return 0;
 }
 
 /* Create a new policy */
-static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
+static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
+                                 nodemask_t *nodes)
 {
        struct mempolicy *policy;
+       nodemask_t cpuset_context_nmask;
+       int ret;
 
-       pr_debug("setting mode %d nodes[0] %lx\n",
-                mode, nodes ? nodes_addr(*nodes)[0] : -1);
+       pr_debug("setting mode %d flags %d nodes[0] %lx\n",
+                mode, flags, nodes ? nodes_addr(*nodes)[0] : -1);
 
-       if (mode == MPOL_DEFAULT)
-               return NULL;
+       if (mode == MPOL_DEFAULT) {
+               if (nodes && !nodes_empty(*nodes))
+                       return ERR_PTR(-EINVAL);
+               return NULL;    /* simply delete any existing policy */
+       }
+       VM_BUG_ON(!nodes);
+
+       /*
+        * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
+        * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
+        * All other modes require a valid pointer to a non-empty nodemask.
+        */
+       if (mode == MPOL_PREFERRED) {
+               if (nodes_empty(*nodes)) {
+                       if (((flags & MPOL_F_STATIC_NODES) ||
+                            (flags & MPOL_F_RELATIVE_NODES)))
+                               return ERR_PTR(-EINVAL);
+                       nodes = NULL;   /* flag local alloc */
+               }
+       } else if (nodes_empty(*nodes))
+               return ERR_PTR(-EINVAL);
        policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
        if (!policy)
                return ERR_PTR(-ENOMEM);
        atomic_set(&policy->refcnt, 1);
-       switch (mode) {
-       case MPOL_INTERLEAVE:
-               policy->v.nodes = *nodes;
-               if (nodes_weight(policy->v.nodes) == 0) {
-                       kmem_cache_free(policy_cache, policy);
-                       return ERR_PTR(-EINVAL);
-               }
-               break;
-       case MPOL_PREFERRED:
-               policy->v.preferred_node = first_node(*nodes);
-               if (policy->v.preferred_node >= MAX_NUMNODES)
-                       policy->v.preferred_node = -1;
-               break;
-       case MPOL_BIND:
-               policy->v.zonelist = bind_zonelist(nodes);
-               if (IS_ERR(policy->v.zonelist)) {
-                       void *error_code = policy->v.zonelist;
-                       kmem_cache_free(policy_cache, policy);
-                       return error_code;
-               }
-               break;
+       policy->mode = mode;
+       policy->flags = flags;
+
+       if (nodes) {
+               /*
+                * cpuset related setup doesn't apply to local allocation
+                */
+               cpuset_update_task_memory_state();
+               if (flags & MPOL_F_RELATIVE_NODES)
+                       mpol_relative_nodemask(&cpuset_context_nmask, nodes,
+                                              &cpuset_current_mems_allowed);
+               else
+                       nodes_and(cpuset_context_nmask, *nodes,
+                                 cpuset_current_mems_allowed);
+               if (mpol_store_user_nodemask(policy))
+                       policy->w.user_nodemask = *nodes;
+               else
+                       policy->w.cpuset_mems_allowed =
+                                               cpuset_mems_allowed(current);
+       }
+
+       ret = mpol_ops[mode].create(policy,
+                               nodes ? &cpuset_context_nmask : NULL);
+       if (ret < 0) {
+               kmem_cache_free(policy_cache, policy);
+               return ERR_PTR(ret);
        }
-       policy->policy = mode;
-       policy->cpuset_mems_allowed = cpuset_mems_allowed(current);
        return policy;
 }
 
+/* Slow path of a mpol destructor. */
+void __mpol_put(struct mempolicy *p)
+{
+       if (!atomic_dec_and_test(&p->refcnt))
+               return;
+       kmem_cache_free(policy_cache, p);
+}
+
+static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes)
+{
+}
+
+static void mpol_rebind_nodemask(struct mempolicy *pol,
+                                const nodemask_t *nodes)
+{
+       nodemask_t tmp;
+
+       if (pol->flags & MPOL_F_STATIC_NODES)
+               nodes_and(tmp, pol->w.user_nodemask, *nodes);
+       else if (pol->flags & MPOL_F_RELATIVE_NODES)
+               mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
+       else {
+               nodes_remap(tmp, pol->v.nodes, pol->w.cpuset_mems_allowed,
+                           *nodes);
+               pol->w.cpuset_mems_allowed = *nodes;
+       }
+
+       pol->v.nodes = tmp;
+       if (!node_isset(current->il_next, tmp)) {
+               current->il_next = next_node(current->il_next, tmp);
+               if (current->il_next >= MAX_NUMNODES)
+                       current->il_next = first_node(tmp);
+               if (current->il_next >= MAX_NUMNODES)
+                       current->il_next = numa_node_id();
+       }
+}
+
+static void mpol_rebind_preferred(struct mempolicy *pol,
+                                 const nodemask_t *nodes)
+{
+       nodemask_t tmp;
+
+       if (pol->flags & MPOL_F_STATIC_NODES) {
+               int node = first_node(pol->w.user_nodemask);
+
+               if (node_isset(node, *nodes)) {
+                       pol->v.preferred_node = node;
+                       pol->flags &= ~MPOL_F_LOCAL;
+               } else
+                       pol->flags |= MPOL_F_LOCAL;
+       } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
+               mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
+               pol->v.preferred_node = first_node(tmp);
+       } else if (!(pol->flags & MPOL_F_LOCAL)) {
+               pol->v.preferred_node = node_remap(pol->v.preferred_node,
+                                                  pol->w.cpuset_mems_allowed,
+                                                  *nodes);
+               pol->w.cpuset_mems_allowed = *nodes;
+       }
+}
+
+/* Migrate a policy to a different set of nodes */
+static void mpol_rebind_policy(struct mempolicy *pol,
+                              const nodemask_t *newmask)
+{
+       if (!pol)
+               return;
+       if (!mpol_store_user_nodemask(pol) &&
+           nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
+               return;
+       mpol_ops[pol->mode].rebind(pol, newmask);
+}
+
+/*
+ * Wrapper for mpol_rebind_policy() that just requires task
+ * pointer, and updates task mempolicy.
+ */
+
+void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
+{
+       mpol_rebind_policy(tsk->mempolicy, new);
+}
+
+/*
+ * Rebind each vma in mm to new nodemask.
+ *
+ * Call holding a reference to mm.  Takes mm->mmap_sem during call.
+ */
+
+void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
+{
+       struct vm_area_struct *vma;
+
+       down_write(&mm->mmap_sem);
+       for (vma = mm->mmap; vma; vma = vma->vm_next)
+               mpol_rebind_policy(vma->vm_policy, new);
+       up_write(&mm->mmap_sem);
+}
+
+static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
+       [MPOL_DEFAULT] = {
+               .rebind = mpol_rebind_default,
+       },
+       [MPOL_INTERLEAVE] = {
+               .create = mpol_new_interleave,
+               .rebind = mpol_rebind_nodemask,
+       },
+       [MPOL_PREFERRED] = {
+               .create = mpol_new_preferred,
+               .rebind = mpol_rebind_preferred,
+       },
+       [MPOL_BIND] = {
+               .create = mpol_new_bind,
+               .rebind = mpol_rebind_nodemask,
+       },
+};
+
 static void gather_stats(struct page *, void *, int pte_dirty);
 static void migrate_page_add(struct page *page, struct list_head *pagelist,
                                unsigned long flags);
@@ -421,7 +543,7 @@ static int policy_vma(struct vm_area_struct *vma, struct mempolicy *new)
        if (!err) {
                mpol_get(new);
                vma->vm_policy = new;
-               mpol_free(old);
+               mpol_put(old);
        }
        return err;
 }
@@ -479,46 +601,55 @@ static void mpol_set_task_struct_flag(void)
 }
 
 /* Set the process memory policy */
-static long do_set_mempolicy(int mode, nodemask_t *nodes)
+static long do_set_mempolicy(unsigned short mode, unsigned short flags,
+                            nodemask_t *nodes)
 {
        struct mempolicy *new;
+       struct mm_struct *mm = current->mm;
 
-       if (mpol_check_policy(mode, nodes))
-               return -EINVAL;
-       new = mpol_new(mode, nodes);
+       new = mpol_new(mode, flags, nodes);
        if (IS_ERR(new))
                return PTR_ERR(new);
-       mpol_free(current->mempolicy);
+
+       /*
+        * prevent changing our mempolicy while show_numa_maps()
+        * is using it.
+        * Note:  do_set_mempolicy() can be called at init time
+        * with no 'mm'.
+        */
+       if (mm)
+               down_write(&mm->mmap_sem);
+       mpol_put(current->mempolicy);
        current->mempolicy = new;
        mpol_set_task_struct_flag();
-       if (new && new->policy == MPOL_INTERLEAVE)
+       if (new && new->mode == MPOL_INTERLEAVE &&
+           nodes_weight(new->v.nodes))
                current->il_next = first_node(new->v.nodes);
+       if (mm)
+               up_write(&mm->mmap_sem);
+
        return 0;
 }
 
-/* Fill a zone bitmap for a policy */
-static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
+/*
+ * Return nodemask for policy for get_mempolicy() query
+ */
+static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
 {
-       int i;
-
        nodes_clear(*nodes);
-       switch (p->policy) {
+       if (p == &default_policy)
+               return;
+
+       switch (p->mode) {
        case MPOL_BIND:
-               for (i = 0; p->v.zonelist->zones[i]; i++)
-                       node_set(zone_to_nid(p->v.zonelist->zones[i]),
-                               *nodes);
-               break;
-       case MPOL_DEFAULT:
-               break;
+               /* Fall through */
        case MPOL_INTERLEAVE:
                *nodes = p->v.nodes;
                break;
        case MPOL_PREFERRED:
-               /* or use current node instead of memory_map? */
-               if (p->v.preferred_node < 0)
-                       *nodes = node_states[N_HIGH_MEMORY];
-               else
+               if (!(p->flags & MPOL_F_LOCAL))
                        node_set(p->v.preferred_node, *nodes);
+               /* else return empty node mask for local allocation */
                break;
        default:
                BUG();
@@ -561,6 +692,11 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
        }
 
        if (flags & MPOL_F_ADDR) {
+               /*
+                * Do NOT fall back to task policy if the
+                * vma/shared policy at addr is NULL.  We
+                * want to return MPOL_DEFAULT in this case.
+                */
                down_read(&mm->mmap_sem);
                vma = find_vma_intersection(mm, addr, addr+1);
                if (!vma) {
@@ -575,7 +711,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
                return -EINVAL;
 
        if (!pol)
-               pol = &default_policy;
+               pol = &default_policy;  /* indicates default behavior */
 
        if (flags & MPOL_F_NODE) {
                if (flags & MPOL_F_ADDR) {
@@ -584,14 +720,17 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
                                goto out;
                        *policy = err;
                } else if (pol == current->mempolicy &&
-                               pol->policy == MPOL_INTERLEAVE) {
+                               pol->mode == MPOL_INTERLEAVE) {
                        *policy = current->il_next;
                } else {
                        err = -EINVAL;
                        goto out;
                }
-       } else
-               *policy = pol->policy;
+       } else {
+               *policy = pol == &default_policy ? MPOL_DEFAULT :
+                                               pol->mode;
+               *policy |= pol->flags;
+       }
 
        if (vma) {
                up_read(&current->mm->mmap_sem);
@@ -600,9 +739,10 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
 
        err = 0;
        if (nmask)
-               get_zonemask(pol, nmask);
+               get_policy_nodemask(pol, nmask);
 
  out:
+       mpol_cond_put(pol);
        if (vma)
                up_read(&current->mm->mmap_sem);
        return err;
@@ -664,7 +804,7 @@ int do_migrate_pages(struct mm_struct *mm,
        int err = 0;
        nodemask_t tmp;
 
-       down_read(&mm->mmap_sem);
+       down_read(&mm->mmap_sem);
 
        err = migrate_vmas(mm, from_nodes, to_nodes, flags);
        if (err)
@@ -781,8 +921,8 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
 #endif
 
 static long do_mbind(unsigned long start, unsigned long len,
-                    unsigned long mode, nodemask_t *nmask,
-                    unsigned long flags)
+                    unsigned short mode, unsigned short mode_flags,
+                    nodemask_t *nmask, unsigned long flags)
 {
        struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
@@ -791,9 +931,8 @@ static long do_mbind(unsigned long start, unsigned long len,
        int err;
        LIST_HEAD(pagelist);
 
-       if ((flags & ~(unsigned long)(MPOL_MF_STRICT |
-                                     MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
-           || mode > MPOL_MAX)
+       if (flags & ~(unsigned long)(MPOL_MF_STRICT |
+                                    MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
                return -EINVAL;
        if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
                return -EPERM;
@@ -812,10 +951,7 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (end == start)
                return 0;
 
-       if (mpol_check_policy(mode, nmask))
-               return -EINVAL;
-
-       new = mpol_new(mode, nmask);
+       new = mpol_new(mode, mode_flags, nmask);
        if (IS_ERR(new))
                return PTR_ERR(new);
 
@@ -826,8 +962,9 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (!new)
                flags |= MPOL_MF_DISCONTIG_OK;
 
-       pr_debug("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
-                mode, nmask ? nodes_addr(*nmask)[0] : -1);
+       pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
+                start, start + len, mode, mode_flags,
+                nmask ? nodes_addr(*nmask)[0] : -1);
 
        down_write(&mm->mmap_sem);
        vma = check_range(mm, start, end, nmask,
@@ -848,7 +985,7 @@ static long do_mbind(unsigned long start, unsigned long len,
        }
 
        up_write(&mm->mmap_sem);
-       mpol_free(new);
+       mpol_put(new);
        return err;
 }
 
@@ -926,11 +1063,19 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
 {
        nodemask_t nodes;
        int err;
+       unsigned short mode_flags;
 
+       mode_flags = mode & MPOL_MODE_FLAGS;
+       mode &= ~MPOL_MODE_FLAGS;
+       if (mode >= MPOL_MAX)
+               return -EINVAL;
+       if ((mode_flags & MPOL_F_STATIC_NODES) &&
+           (mode_flags & MPOL_F_RELATIVE_NODES))
+               return -EINVAL;
        err = get_nodes(&nodes, nmask, maxnode);
        if (err)
                return err;
-       return do_mbind(start, len, mode, &nodes, flags);
+       return do_mbind(start, len, mode, mode_flags, &nodes, flags);
 }
 
 /* Set the process memory policy */
@@ -939,13 +1084,18 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
 {
        int err;
        nodemask_t nodes;
+       unsigned short flags;
 
-       if (mode < 0 || mode > MPOL_MAX)
+       flags = mode & MPOL_MODE_FLAGS;
+       mode &= ~MPOL_MODE_FLAGS;
+       if ((unsigned int)mode >= MPOL_MAX)
+               return -EINVAL;
+       if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
                return -EINVAL;
        err = get_nodes(&nodes, nmask, maxnode);
        if (err)
                return err;
-       return do_set_mempolicy(mode, &nodes);
+       return do_set_mempolicy(mode, flags, &nodes);
 }
 
 asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
@@ -1131,59 +1281,75 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
  *
  * Returns effective policy for a VMA at specified address.
  * Falls back to @task or system default policy, as necessary.
- * Returned policy has extra reference count if shared, vma,
- * or some other task's policy [show_numa_maps() can pass
- * @task != current].  It is the caller's responsibility to
- * free the reference in these cases.
+ * Current or other task's task mempolicy and non-shared vma policies
+ * are protected by the task's mmap_sem, which must be held for read by
+ * the caller.
+ * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
+ * count--added by the get_policy() vm_op, as appropriate--to protect against
+ * freeing by another task.  It is the caller's responsibility to free the
+ * extra reference for shared policies.
  */
-static struct mempolicy * get_vma_policy(struct task_struct *task,
+static struct mempolicy *get_vma_policy(struct task_struct *task,
                struct vm_area_struct *vma, unsigned long addr)
 {
        struct mempolicy *pol = task->mempolicy;
-       int shared_pol = 0;
 
        if (vma) {
                if (vma->vm_ops && vma->vm_ops->get_policy) {
-                       pol = vma->vm_ops->get_policy(vma, addr);
-                       shared_pol = 1; /* if pol non-NULL, add ref below */
-               } else if (vma->vm_policy &&
-                               vma->vm_policy->policy != MPOL_DEFAULT)
+                       struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
+                                                                       addr);
+                       if (vpol)
+                               pol = vpol;
+               } else if (vma->vm_policy)
                        pol = vma->vm_policy;
        }
        if (!pol)
                pol = &default_policy;
-       else if (!shared_pol && pol != current->mempolicy)
-               mpol_get(pol);  /* vma or other task's policy */
        return pol;
 }
 
-/* Return a zonelist representing a mempolicy */
-static struct zonelist *zonelist_policy(gfp_t gfp, struct mempolicy *policy)
+/*
+ * Return a nodemask representing a mempolicy for filtering nodes for
+ * page allocation
+ */
+static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
 {
-       int nd;
+       /* Lower zones don't get a nodemask applied for MPOL_BIND */
+       if (unlikely(policy->mode == MPOL_BIND) &&
+                       gfp_zone(gfp) >= policy_zone &&
+                       cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
+               return &policy->v.nodes;
 
-       switch (policy->policy) {
+       return NULL;
+}
+
+/* Return a zonelist indicated by gfp for node representing a mempolicy */
+static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy)
+{
+       int nd = numa_node_id();
+
+       switch (policy->mode) {
        case MPOL_PREFERRED:
-               nd = policy->v.preferred_node;
-               if (nd < 0)
-                       nd = numa_node_id();
+               if (!(policy->flags & MPOL_F_LOCAL))
+                       nd = policy->v.preferred_node;
                break;
        case MPOL_BIND:
-               /* Lower zones don't get a policy applied */
-               /* Careful: current->mems_allowed might have moved */
-               if (gfp_zone(gfp) >= policy_zone)
-                       if (cpuset_zonelist_valid_mems_allowed(policy->v.zonelist))
-                               return policy->v.zonelist;
-               /*FALL THROUGH*/
+               /*
+                * Normally, MPOL_BIND allocations are node-local within the
+                * allowed nodemask.  However, if __GFP_THISNODE is set and the
+                * current node is part of the mask, we use the zonelist for
+                * the first node in the mask instead.
+                */
+               if (unlikely(gfp & __GFP_THISNODE) &&
+                               unlikely(!node_isset(nd, policy->v.nodes)))
+                       nd = first_node(policy->v.nodes);
+               break;
        case MPOL_INTERLEAVE: /* should not happen */
-       case MPOL_DEFAULT:
-               nd = numa_node_id();
                break;
        default:
-               nd = 0;
                BUG();
        }
-       return NODE_DATA(nd)->node_zonelists + gfp_zone(gfp);
+       return node_zonelist(nd, gfp);
 }
 
 /* Do dynamic interleaving for a process */
@@ -1196,36 +1362,51 @@ static unsigned interleave_nodes(struct mempolicy *policy)
        next = next_node(nid, policy->v.nodes);
        if (next >= MAX_NUMNODES)
                next = first_node(policy->v.nodes);
-       me->il_next = next;
+       if (next < MAX_NUMNODES)
+               me->il_next = next;
        return nid;
 }
 
 /*
  * Depending on the memory policy provide a node from which to allocate the
  * next slab entry.
+ * @policy must be protected by freeing by the caller.  If @policy is
+ * the current task's mempolicy, this protection is implicit, as only the
+ * task can change it's policy.  The system default policy requires no
+ * such protection.
  */
 unsigned slab_node(struct mempolicy *policy)
 {
-       int pol = policy ? policy->policy : MPOL_DEFAULT;
+       if (!policy || policy->flags & MPOL_F_LOCAL)
+               return numa_node_id();
+
+       switch (policy->mode) {
+       case MPOL_PREFERRED:
+               /*
+                * handled MPOL_F_LOCAL above
+                */
+               return policy->v.preferred_node;
 
-       switch (pol) {
        case MPOL_INTERLEAVE:
                return interleave_nodes(policy);
 
-       case MPOL_BIND:
+       case MPOL_BIND: {
                /*
                 * Follow bind policy behavior and start allocation at the
                 * first node.
                 */
-               return zone_to_nid(policy->v.zonelist->zones[0]);
-
-       case MPOL_PREFERRED:
-               if (policy->v.preferred_node >= 0)
-                       return policy->v.preferred_node;
-               /* Fall through */
+               struct zonelist *zonelist;
+               struct zone *zone;
+               enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
+               zonelist = &NODE_DATA(numa_node_id())->node_zonelists[0];
+               (void)first_zones_zonelist(zonelist, highest_zoneidx,
+                                                       &policy->v.nodes,
+                                                       &zone);
+               return zone->node;
+       }
 
        default:
-               return numa_node_id();
+               BUG();
        }
 }
 
@@ -1234,10 +1415,13 @@ static unsigned offset_il_node(struct mempolicy *pol,
                struct vm_area_struct *vma, unsigned long off)
 {
        unsigned nnodes = nodes_weight(pol->v.nodes);
-       unsigned target = (unsigned)off % nnodes;
+       unsigned target;
        int c;
        int nid = -1;
 
+       if (!nnodes)
+               return numa_node_id();
+       target = (unsigned int)off % nnodes;
        c = 0;
        do {
                nid = next_node(nid, pol->v.nodes);
@@ -1274,40 +1458,30 @@ static inline unsigned interleave_nid(struct mempolicy *pol,
  * @vma = virtual memory area whose policy is sought
  * @addr = address in @vma for shared policy lookup and interleave policy
  * @gfp_flags = for requested zone
- * @mpol = pointer to mempolicy pointer for reference counted 'BIND policy
+ * @mpol = pointer to mempolicy pointer for reference counted mempolicy
+ * @nodemask = pointer to nodemask pointer for MPOL_BIND nodemask
  *
- * Returns a zonelist suitable for a huge page allocation.
- * If the effective policy is 'BIND, returns pointer to policy's zonelist.
- * If it is also a policy for which get_vma_policy() returns an extra
- * reference, we must hold that reference until after allocation.
- * In that case, return policy via @mpol so hugetlb allocation can drop
- * the reference.  For non-'BIND referenced policies, we can/do drop the
- * reference here, so the caller doesn't need to know about the special case
- * for default and current task policy.
+ * Returns a zonelist suitable for a huge page allocation and a pointer
+ * to the struct mempolicy for conditional unref after allocation.
+ * If the effective policy is 'BIND, returns a pointer to the mempolicy's
+ * @nodemask for filtering the zonelist.
  */
 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
-                               gfp_t gfp_flags, struct mempolicy **mpol)
+                               gfp_t gfp_flags, struct mempolicy **mpol,
+                               nodemask_t **nodemask)
 {
-       struct mempolicy *pol = get_vma_policy(current, vma, addr);
        struct zonelist *zl;
 
-       *mpol = NULL;           /* probably no unref needed */
-       if (pol->policy == MPOL_INTERLEAVE) {
-               unsigned nid;
-
-               nid = interleave_nid(pol, vma, addr, HPAGE_SHIFT);
-               if (unlikely(pol != &default_policy &&
-                               pol != current->mempolicy))
-                       __mpol_free(pol);       /* finished with pol */
-               return NODE_DATA(nid)->node_zonelists + gfp_zone(gfp_flags);
-       }
+       *mpol = get_vma_policy(current, vma, addr);
+       *nodemask = NULL;       /* assume !MPOL_BIND */
 
-       zl = zonelist_policy(GFP_HIGHUSER, pol);
-       if (unlikely(pol != &default_policy && pol != current->mempolicy)) {
-               if (pol->policy != MPOL_BIND)
-                       __mpol_free(pol);       /* finished with pol */
-               else
-                       *mpol = pol;    /* unref needed after allocation */
+       if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
+               zl = node_zonelist(interleave_nid(*mpol, vma, addr,
+                                               HPAGE_SHIFT), gfp_flags);
+       } else {
+               zl = policy_zonelist(gfp_flags, *mpol);
+               if ((*mpol)->mode == MPOL_BIND)
+                       *nodemask = &(*mpol)->v.nodes;
        }
        return zl;
 }
@@ -1321,9 +1495,9 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
        struct zonelist *zl;
        struct page *page;
 
-       zl = NODE_DATA(nid)->node_zonelists + gfp_zone(gfp);
+       zl = node_zonelist(nid, gfp);
        page = __alloc_pages(gfp, order, zl);
-       if (page && page_zone(page) == zl->zones[0])
+       if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
                inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
        return page;
 }
@@ -1358,28 +1532,27 @@ alloc_page_vma(gfp_t gfp, struct vm_area_struct *vma, unsigned long addr)
 
        cpuset_update_task_memory_state();
 
-       if (unlikely(pol->policy == MPOL_INTERLEAVE)) {
+       if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
                unsigned nid;
 
                nid = interleave_nid(pol, vma, addr, PAGE_SHIFT);
-               if (unlikely(pol != &default_policy &&
-                               pol != current->mempolicy))
-                       __mpol_free(pol);       /* finished with pol */
+               mpol_cond_put(pol);
                return alloc_page_interleave(gfp, 0, nid);
        }
-       zl = zonelist_policy(gfp, pol);
-       if (pol != &default_policy && pol != current->mempolicy) {
+       zl = policy_zonelist(gfp, pol);
+       if (unlikely(mpol_needs_cond_ref(pol))) {
                /*
-                * slow path: ref counted policy -- shared or vma
+                * slow path: ref counted shared policy
                 */
-               struct page *page =  __alloc_pages(gfp, 0, zl);
-               __mpol_free(pol);
+               struct page *page =  __alloc_pages_nodemask(gfp, 0,
+                                               zl, policy_nodemask(gfp, pol));
+               __mpol_put(pol);
                return page;
        }
        /*
         * fast path:  default or task policy
         */
-       return __alloc_pages(gfp, 0, zl);
+       return __alloc_pages_nodemask(gfp, 0, zl, policy_nodemask(gfp, pol));
 }
 
 /**
@@ -1409,22 +1582,28 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
                cpuset_update_task_memory_state();
        if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
                pol = &default_policy;
-       if (pol->policy == MPOL_INTERLEAVE)
+
+       /*
+        * No reference counting needed for current->mempolicy
+        * nor system default_policy
+        */
+       if (pol->mode == MPOL_INTERLEAVE)
                return alloc_page_interleave(gfp, order, interleave_nodes(pol));
-       return __alloc_pages(gfp, order, zonelist_policy(gfp, pol));
+       return __alloc_pages_nodemask(gfp, order,
+                       policy_zonelist(gfp, pol), policy_nodemask(gfp, pol));
 }
 EXPORT_SYMBOL(alloc_pages_current);
 
 /*
- * If mpol_copy() sees current->cpuset == cpuset_being_rebound, then it
+ * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
  * rebinds the mempolicy its copying by calling mpol_rebind_policy()
  * with the mems_allowed returned by cpuset_mems_allowed().  This
  * keeps mempolicies cpuset relative after its cpuset moves.  See
  * further kernel/cpuset.c update_nodemask().
  */
 
-/* Slow path of a mempolicy copy */
-struct mempolicy *__mpol_copy(struct mempolicy *old)
+/* Slow path of a mempolicy duplicate */
+struct mempolicy *__mpol_dup(struct mempolicy *old)
 {
        struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
 
@@ -1436,55 +1615,64 @@ struct mempolicy *__mpol_copy(struct mempolicy *old)
        }
        *new = *old;
        atomic_set(&new->refcnt, 1);
-       if (new->policy == MPOL_BIND) {
-               int sz = ksize(old->v.zonelist);
-               new->v.zonelist = kmemdup(old->v.zonelist, sz, GFP_KERNEL);
-               if (!new->v.zonelist) {
-                       kmem_cache_free(policy_cache, new);
-                       return ERR_PTR(-ENOMEM);
-               }
-       }
        return new;
 }
 
+/*
+ * If *frompol needs [has] an extra ref, copy *frompol to *tompol ,
+ * eliminate the * MPOL_F_* flags that require conditional ref and
+ * [NOTE!!!] drop the extra ref.  Not safe to reference *frompol directly
+ * after return.  Use the returned value.
+ *
+ * Allows use of a mempolicy for, e.g., multiple allocations with a single
+ * policy lookup, even if the policy needs/has extra ref on lookup.
+ * shmem_readahead needs this.
+ */
+struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
+                                               struct mempolicy *frompol)
+{
+       if (!mpol_needs_cond_ref(frompol))
+               return frompol;
+
+       *tompol = *frompol;
+       tompol->flags &= ~MPOL_F_SHARED;        /* copy doesn't need unref */
+       __mpol_put(frompol);
+       return tompol;
+}
+
+static int mpol_match_intent(const struct mempolicy *a,
+                            const struct mempolicy *b)
+{
+       if (a->flags != b->flags)
+               return 0;
+       if (!mpol_store_user_nodemask(a))
+               return 1;
+       return nodes_equal(a->w.user_nodemask, b->w.user_nodemask);
+}
+
 /* Slow path of a mempolicy comparison */
 int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
 {
        if (!a || !b)
                return 0;
-       if (a->policy != b->policy)
+       if (a->mode != b->mode)
                return 0;
-       switch (a->policy) {
-       case MPOL_DEFAULT:
-               return 1;
+       if (a->mode != MPOL_DEFAULT && !mpol_match_intent(a, b))
+               return 0;
+       switch (a->mode) {
+       case MPOL_BIND:
+               /* Fall through */
        case MPOL_INTERLEAVE:
                return nodes_equal(a->v.nodes, b->v.nodes);
        case MPOL_PREFERRED:
-               return a->v.preferred_node == b->v.preferred_node;
-       case MPOL_BIND: {
-               int i;
-               for (i = 0; a->v.zonelist->zones[i]; i++)
-                       if (a->v.zonelist->zones[i] != b->v.zonelist->zones[i])
-                               return 0;
-               return b->v.zonelist->zones[i] == NULL;
-       }
+               return a->v.preferred_node == b->v.preferred_node &&
+                       a->flags == b->flags;
        default:
                BUG();
                return 0;
        }
 }
 
-/* Slow path of a mpol destructor. */
-void __mpol_free(struct mempolicy *p)
-{
-       if (!atomic_dec_and_test(&p->refcnt))
-               return;
-       if (p->policy == MPOL_BIND)
-               kfree(p->v.zonelist);
-       p->policy = MPOL_DEFAULT;
-       kmem_cache_free(policy_cache, p);
-}
-
 /*
  * Shared memory backing store policy support.
  *
@@ -1547,7 +1735,7 @@ static void sp_insert(struct shared_policy *sp, struct sp_node *new)
        rb_link_node(&new->nd, parent, p);
        rb_insert_color(&new->nd, &sp->root);
        pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
-                new->policy ? new->policy->policy : 0);
+                new->policy ? new->policy->mode : 0);
 }
 
 /* Find shared policy intersecting idx */
@@ -1573,7 +1761,7 @@ static void sp_delete(struct shared_policy *sp, struct sp_node *n)
 {
        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
        rb_erase(&n->nd, &sp->root);
-       mpol_free(n->policy);
+       mpol_put(n->policy);
        kmem_cache_free(sn_cache, n);
 }
 
@@ -1587,6 +1775,7 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
        n->start = start;
        n->end = end;
        mpol_get(pol);
+       pol->flags |= MPOL_F_SHARED;    /* for unref */
        n->policy = pol;
        return n;
 }
@@ -1633,33 +1822,41 @@ restart:
                sp_insert(sp, new);
        spin_unlock(&sp->lock);
        if (new2) {
-               mpol_free(new2->policy);
+               mpol_put(new2->policy);
                kmem_cache_free(sn_cache, new2);
        }
        return 0;
 }
 
-void mpol_shared_policy_init(struct shared_policy *info, int policy,
-                               nodemask_t *policy_nodes)
-{
-       info->root = RB_ROOT;
-       spin_lock_init(&info->lock);
-
-       if (policy != MPOL_DEFAULT) {
-               struct mempolicy *newpol;
-
-               /* Falls back to MPOL_DEFAULT on any error */
-               newpol = mpol_new(policy, policy_nodes);
-               if (!IS_ERR(newpol)) {
-                       /* Create pseudo-vma that contains just the policy */
-                       struct vm_area_struct pvma;
-
-                       memset(&pvma, 0, sizeof(struct vm_area_struct));
-                       /* Policy covers entire file */
-                       pvma.vm_end = TASK_SIZE;
-                       mpol_set_shared_policy(info, &pvma, newpol);
-                       mpol_free(newpol);
-               }
+/**
+ * mpol_shared_policy_init - initialize shared policy for inode
+ * @sp: pointer to inode shared policy
+ * @mpol:  struct mempolicy to install
+ *
+ * Install non-NULL @mpol in inode's shared policy rb-tree.
+ * On entry, the current task has a reference on a non-NULL @mpol.
+ * This must be released on exit.
+ */
+void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
+{
+       sp->root = RB_ROOT;             /* empty tree == default mempolicy */
+       spin_lock_init(&sp->lock);
+
+       if (mpol) {
+               struct vm_area_struct pvma;
+               struct mempolicy *new;
+
+               /* contextualize the tmpfs mount point mempolicy */
+               new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
+               mpol_put(mpol); /* drop our ref on sb mpol */
+               if (IS_ERR(new))
+                       return;         /* no valid nodemask intersection */
+
+               /* Create pseudo-vma that contains just the policy */
+               memset(&pvma, 0, sizeof(struct vm_area_struct));
+               pvma.vm_end = TASK_SIZE;        /* policy covers entire file */
+               mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
+               mpol_put(new);                  /* drop initial ref */
        }
 }
 
@@ -1670,9 +1867,10 @@ int mpol_set_shared_policy(struct shared_policy *info,
        struct sp_node *new = NULL;
        unsigned long sz = vma_pages(vma);
 
-       pr_debug("set_shared_policy %lx sz %lu %d %lx\n",
+       pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
                 vma->vm_pgoff,
-                sz, npol? npol->policy : -1,
+                sz, npol ? npol->mode : -1,
+                npol ? npol->flags : -1,
                 npol ? nodes_addr(npol->v.nodes)[0] : -1);
 
        if (npol) {
@@ -1700,7 +1898,7 @@ void mpol_free_shared_policy(struct shared_policy *p)
                n = rb_entry(next, struct sp_node, nd);
                next = rb_next(&n->nd);
                rb_erase(&n->nd, &p->root);
-               mpol_free(n->policy);
+               mpol_put(n->policy);
                kmem_cache_free(sn_cache, n);
        }
        spin_unlock(&p->lock);
@@ -1745,120 +1943,177 @@ void __init numa_policy_init(void)
        if (unlikely(nodes_empty(interleave_nodes)))
                node_set(prefer, interleave_nodes);
 
-       if (do_set_mempolicy(MPOL_INTERLEAVE, &interleave_nodes))
+       if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
                printk("numa_policy_init: interleaving failed\n");
 }
 
 /* Reset policy of current process to default */
 void numa_default_policy(void)
 {
-       do_set_mempolicy(MPOL_DEFAULT, NULL);
+       do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
 }
 
-/* Migrate a policy to a different set of nodes */
-static void mpol_rebind_policy(struct mempolicy *pol,
-                              const nodemask_t *newmask)
-{
-       nodemask_t *mpolmask;
-       nodemask_t tmp;
+/*
+ * Parse and format mempolicy from/to strings
+ */
 
-       if (!pol)
-               return;
-       mpolmask = &pol->cpuset_mems_allowed;
-       if (nodes_equal(*mpolmask, *newmask))
-               return;
+/*
+ * "local" is pseudo-policy:  MPOL_PREFERRED with MPOL_F_LOCAL flag
+ * Used only for mpol_parse_str() and mpol_to_str()
+ */
+#define MPOL_LOCAL (MPOL_INTERLEAVE + 1)
+static const char * const policy_types[] =
+       { "default", "prefer", "bind", "interleave", "local" };
 
-       switch (pol->policy) {
-       case MPOL_DEFAULT:
-               break;
-       case MPOL_INTERLEAVE:
-               nodes_remap(tmp, pol->v.nodes, *mpolmask, *newmask);
-               pol->v.nodes = tmp;
-               *mpolmask = *newmask;
-               current->il_next = node_remap(current->il_next,
-                                               *mpolmask, *newmask);
-               break;
-       case MPOL_PREFERRED:
-               pol->v.preferred_node = node_remap(pol->v.preferred_node,
-                                               *mpolmask, *newmask);
-               *mpolmask = *newmask;
-               break;
-       case MPOL_BIND: {
-               nodemask_t nodes;
-               struct zone **z;
-               struct zonelist *zonelist;
 
+#ifdef CONFIG_TMPFS
+/**
+ * mpol_parse_str - parse string to mempolicy
+ * @str:  string containing mempolicy to parse
+ * @mpol:  pointer to struct mempolicy pointer, returned on success.
+ * @no_context:  flag whether to "contextualize" the mempolicy
+ *
+ * Format of input:
+ *     <mode>[=<flags>][:<nodelist>]
+ *
+ * if @no_context is true, save the input nodemask in w.user_nodemask in
+ * the returned mempolicy.  This will be used to "clone" the mempolicy in
+ * a specific context [cpuset] at a later time.  Used to parse tmpfs mpol
+ * mount option.  Note that if 'static' or 'relative' mode flags were
+ * specified, the input nodemask will already have been saved.  Saving
+ * it again is redundant, but safe.
+ *
+ * On success, returns 0, else 1
+ */
+int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context)
+{
+       struct mempolicy *new = NULL;
+       unsigned short uninitialized_var(mode);
+       unsigned short uninitialized_var(mode_flags);
+       nodemask_t nodes;
+       char *nodelist = strchr(str, ':');
+       char *flags = strchr(str, '=');
+       int i;
+       int err = 1;
+
+       if (nodelist) {
+               /* NUL-terminate mode or flags string */
+               *nodelist++ = '\0';
+               if (nodelist_parse(nodelist, nodes))
+                       goto out;
+               if (!nodes_subset(nodes, node_states[N_HIGH_MEMORY]))
+                       goto out;
+       } else
                nodes_clear(nodes);
-               for (z = pol->v.zonelist->zones; *z; z++)
-                       node_set(zone_to_nid(*z), nodes);
-               nodes_remap(tmp, nodes, *mpolmask, *newmask);
-               nodes = tmp;
 
-               zonelist = bind_zonelist(&nodes);
+       if (flags)
+               *flags++ = '\0';        /* terminate mode string */
 
-               /* If no mem, then zonelist is NULL and we keep old zonelist.
-                * If that old zonelist has no remaining mems_allowed nodes,
-                * then zonelist_policy() will "FALL THROUGH" to MPOL_DEFAULT.
-                */
+       for (i = 0; i <= MPOL_LOCAL; i++) {
+               if (!strcmp(str, policy_types[i])) {
+                       mode = i;
+                       break;
+               }
+       }
+       if (i > MPOL_LOCAL)
+               goto out;
 
-               if (!IS_ERR(zonelist)) {
-                       /* Good - got mem - substitute new zonelist */
-                       kfree(pol->v.zonelist);
-                       pol->v.zonelist = zonelist;
+       switch (mode) {
+       case MPOL_PREFERRED:
+               /*
+                * Insist on a nodelist of one node only
+                */
+               if (nodelist) {
+                       char *rest = nodelist;
+                       while (isdigit(*rest))
+                               rest++;
+                       if (!*rest)
+                               err = 0;
                }
-               *mpolmask = *newmask;
                break;
-       }
-       default:
-               BUG();
+       case MPOL_INTERLEAVE:
+               /*
+                * Default to online nodes with memory if no nodelist
+                */
+               if (!nodelist)
+                       nodes = node_states[N_HIGH_MEMORY];
+               err = 0;
+               break;
+       case MPOL_LOCAL:
+               /*
+                * Don't allow a nodelist;  mpol_new() checks flags
+                */
+               if (nodelist)
+                       goto out;
+               mode = MPOL_PREFERRED;
                break;
-       }
-}
-
-/*
- * Wrapper for mpol_rebind_policy() that just requires task
- * pointer, and updates task mempolicy.
- */
 
-void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
-{
-       mpol_rebind_policy(tsk->mempolicy, new);
-}
+       /*
+        * case MPOL_BIND:    mpol_new() enforces non-empty nodemask.
+        * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags.
+        */
+       }
 
-/*
- * Rebind each vma in mm to new nodemask.
- *
- * Call holding a reference to mm.  Takes mm->mmap_sem during call.
- */
+       mode_flags = 0;
+       if (flags) {
+               /*
+                * Currently, we only support two mutually exclusive
+                * mode flags.
+                */
+               if (!strcmp(flags, "static"))
+                       mode_flags |= MPOL_F_STATIC_NODES;
+               else if (!strcmp(flags, "relative"))
+                       mode_flags |= MPOL_F_RELATIVE_NODES;
+               else
+                       err = 1;
+       }
 
-void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
-{
-       struct vm_area_struct *vma;
+       new = mpol_new(mode, mode_flags, &nodes);
+       if (IS_ERR(new))
+               err = 1;
+       else if (no_context)
+               new->w.user_nodemask = nodes;   /* save for contextualization */
 
-       down_write(&mm->mmap_sem);
-       for (vma = mm->mmap; vma; vma = vma->vm_next)
-               mpol_rebind_policy(vma->vm_policy, new);
-       up_write(&mm->mmap_sem);
+out:
+       /* Restore string for error message */
+       if (nodelist)
+               *--nodelist = ':';
+       if (flags)
+               *--flags = '=';
+       if (!err)
+               *mpol = new;
+       return err;
 }
+#endif /* CONFIG_TMPFS */
 
-/*
- * Display pages allocated per node and memory policy via /proc.
- */
-
-static const char * const policy_types[] =
-       { "default", "prefer", "bind", "interleave" };
-
-/*
+/**
+ * mpol_to_str - format a mempolicy structure for printing
+ * @buffer:  to contain formatted mempolicy string
+ * @maxlen:  length of @buffer
+ * @pol:  pointer to mempolicy to be formatted
+ * @no_context:  "context free" mempolicy - use nodemask in w.user_nodemask
+ *
  * Convert a mempolicy into a string.
  * Returns the number of characters in buffer (if positive)
  * or an error (negative)
  */
-static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
+int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context)
 {
        char *p = buffer;
        int l;
        nodemask_t nodes;
-       int mode = pol ? pol->policy : MPOL_DEFAULT;
+       unsigned short mode;
+       unsigned short flags = pol ? pol->flags : 0;
+
+       /*
+        * Sanity check:  room for longest mode, flag and some nodes
+        */
+       VM_BUG_ON(maxlen < strlen("interleave") + strlen("relative") + 16);
+
+       if (!pol || pol == &default_policy)
+               mode = MPOL_DEFAULT;
+       else
+               mode = pol->mode;
 
        switch (mode) {
        case MPOL_DEFAULT:
@@ -1867,33 +2122,50 @@ static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
 
        case MPOL_PREFERRED:
                nodes_clear(nodes);
-               node_set(pol->v.preferred_node, nodes);
+               if (flags & MPOL_F_LOCAL)
+                       mode = MPOL_LOCAL;      /* pseudo-policy */
+               else
+                       node_set(pol->v.preferred_node, nodes);
                break;
 
        case MPOL_BIND:
-               get_zonemask(pol, &nodes);
-               break;
-
+               /* Fall through */
        case MPOL_INTERLEAVE:
-               nodes = pol->v.nodes;
+               if (no_context)
+                       nodes = pol->w.user_nodemask;
+               else
+                       nodes = pol->v.nodes;
                break;
 
        default:
                BUG();
-               return -EFAULT;
        }
 
        l = strlen(policy_types[mode]);
-       if (buffer + maxlen < p + l + 1)
-               return -ENOSPC;
+       if (buffer + maxlen < p + l + 1)
+               return -ENOSPC;
 
        strcpy(p, policy_types[mode]);
        p += l;
 
-       if (!nodes_empty(nodes)) {
+       if (flags & MPOL_MODE_FLAGS) {
                if (buffer + maxlen < p + 2)
                        return -ENOSPC;
                *p++ = '=';
+
+               /*
+                * Currently, the only defined flags are mutually exclusive
+                */
+               if (flags & MPOL_F_STATIC_NODES)
+                       p += snprintf(p, buffer + maxlen - p, "static");
+               else if (flags & MPOL_F_RELATIVE_NODES)
+                       p += snprintf(p, buffer + maxlen - p, "relative");
+       }
+
+       if (!nodes_empty(nodes)) {
+               if (buffer + maxlen < p + 2)
+                       return -ENOSPC;
+               *p++ = ':';
                p += nodelist_scnprintf(p, buffer + maxlen - p, nodes);
        }
        return p - buffer;
@@ -1971,6 +2243,9 @@ static inline void check_huge_range(struct vm_area_struct *vma,
 }
 #endif
 
+/*
+ * Display pages allocated per node and memory policy via /proc.
+ */
 int show_numa_map(struct seq_file *m, void *v)
 {
        struct proc_maps_private *priv = m->private;
@@ -1990,12 +2265,8 @@ int show_numa_map(struct seq_file *m, void *v)
                return 0;
 
        pol = get_vma_policy(priv->task, vma, vma->vm_start);
-       mpol_to_str(buffer, sizeof(buffer), pol);
-       /*
-        * unref shared or other task's mempolicy
-        */
-       if (pol != &default_policy && pol != current->mempolicy)
-               __mpol_free(pol);
+       mpol_to_str(buffer, sizeof(buffer), pol, 0);
+       mpol_cond_put(pol);
 
        seq_printf(m, "%08lx %s", vma->vm_start, buffer);
 
index 4e0eccca5e265ac19bc507a171a2f720d27f8c21..449d77d409f52622bbf825b7fd17d43df7ff5fbf 100644 (file)
@@ -383,7 +383,14 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
 
        if (PageDirty(page)) {
                clear_page_dirty_for_io(page);
-               set_page_dirty(newpage);
+               /*
+                * Want to mark the page and the radix tree as dirty, and
+                * redo the accounting that clear_page_dirty_for_io undid,
+                * but we can't use set_page_dirty because that function
+                * is actually a signal that all of the page has become dirty.
+                * Wheras only part of our page may be dirty.
+                */
+               __set_page_dirty_nobuffers(newpage);
        }
 
 #ifdef CONFIG_SWAP
index 5efe0ded69b1857935d0295e9999210e4999a516..5178800bc1294f6649651f5cb2d7ee9a6e183495 100644 (file)
@@ -33,7 +33,7 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
         * When tmpfs swaps out a page from a file, any process mapping that
         * file will not get a swp_entry_t in its pte, but rather it is like
         * any other file mapping (ie. marked !present and faulted in with
-        * tmpfs's .nopage). So swapped out tmpfs mappings are tested here.
+        * tmpfs's .fault). So swapped out tmpfs mappings are tested here.
         *
         * However when tmpfs moves the page from pagecache and into swapcache,
         * it is still in core, but the find_get_page below won't find it.
index a32d28ce31cda697aff68fdc6c939560096e3a50..fac66337da2a3bcd3a003ba9d33096938c17fb57 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -230,9 +230,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
        might_sleep();
        if (vma->vm_ops && vma->vm_ops->close)
                vma->vm_ops->close(vma);
-       if (vma->vm_file)
+       if (vma->vm_file) {
                fput(vma->vm_file);
-       mpol_free(vma_policy(vma));
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(vma->vm_mm);
+       }
+       mpol_put(vma_policy(vma));
        kmem_cache_free(vm_area_cachep, vma);
        return next;
 }
@@ -623,10 +626,13 @@ again:                    remove_next = 1 + (end > next->vm_end);
                spin_unlock(&mapping->i_mmap_lock);
 
        if (remove_next) {
-               if (file)
+               if (file) {
                        fput(file);
+                       if (next->vm_flags & VM_EXECUTABLE)
+                               removed_exe_file_vma(mm);
+               }
                mm->map_count--;
-               mpol_free(vma_policy(next));
+               mpol_put(vma_policy(next));
                kmem_cache_free(vm_area_cachep, next);
                /*
                 * In mprotect's case 6 (see comments on vma_merge),
@@ -1068,7 +1074,6 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
                mapping_cap_account_dirty(vma->vm_file->f_mapping);
 }
 
-
 unsigned long mmap_region(struct file *file, unsigned long addr,
                          unsigned long len, unsigned long flags,
                          unsigned int vm_flags, unsigned long pgoff,
@@ -1155,6 +1160,8 @@ munmap_back:
                error = file->f_op->mmap(file, vma);
                if (error)
                        goto unmap_and_free_vma;
+               if (vm_flags & VM_EXECUTABLE)
+                       added_exe_file_vma(mm);
        } else if (vm_flags & VM_SHARED) {
                error = shmem_zero_setup(vma);
                if (error)
@@ -1181,22 +1188,22 @@ munmap_back:
        if (vma_wants_writenotify(vma))
                vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
 
-       if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
+       if (file && vma_merge(mm, prev, addr, vma->vm_end,
                        vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
-               file = vma->vm_file;
-               vma_link(mm, vma, prev, rb_link, rb_parent);
-               if (correct_wcount)
-                       atomic_inc(&inode->i_writecount);
-       } else {
-               if (file) {
-                       if (correct_wcount)
-                               atomic_inc(&inode->i_writecount);
-                       fput(file);
-               }
-               mpol_free(vma_policy(vma));
+               mpol_put(vma_policy(vma));
                kmem_cache_free(vm_area_cachep, vma);
+               fput(file);
+               if (vm_flags & VM_EXECUTABLE)
+                       removed_exe_file_vma(mm);
+       } else {
+               vma_link(mm, vma, prev, rb_link, rb_parent);
+               file = vma->vm_file;
        }
-out:   
+
+       /* Once vma denies write, undo our temporary denial count */
+       if (correct_wcount)
+               atomic_inc(&inode->i_writecount);
+out:
        mm->total_vm += len >> PAGE_SHIFT;
        vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
        if (vm_flags & VM_LOCKED) {
@@ -1813,15 +1820,18 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
                new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
        }
 
-       pol = mpol_copy(vma_policy(vma));
+       pol = mpol_dup(vma_policy(vma));
        if (IS_ERR(pol)) {
                kmem_cache_free(vm_area_cachep, new);
                return PTR_ERR(pol);
        }
        vma_set_policy(new, pol);
 
-       if (new->vm_file)
+       if (new->vm_file) {
                get_file(new->vm_file);
+               if (vma->vm_flags & VM_EXECUTABLE)
+                       added_exe_file_vma(mm);
+       }
 
        if (new->vm_ops && new->vm_ops->open)
                new->vm_ops->open(new);
@@ -2129,7 +2139,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
                if (new_vma) {
                        *new_vma = *vma;
-                       pol = mpol_copy(vma_policy(vma));
+                       pol = mpol_dup(vma_policy(vma));
                        if (IS_ERR(pol)) {
                                kmem_cache_free(vm_area_cachep, new_vma);
                                return NULL;
@@ -2138,8 +2148,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
                        new_vma->vm_start = addr;
                        new_vma->vm_end = addr + len;
                        new_vma->vm_pgoff = pgoff;
-                       if (new_vma->vm_file)
+                       if (new_vma->vm_file) {
                                get_file(new_vma->vm_file);
+                               if (vma->vm_flags & VM_EXECUTABLE)
+                                       added_exe_file_vma(mm);
+                       }
                        if (new_vma->vm_ops && new_vma->vm_ops->open)
                                new_vma->vm_ops->open(new_vma);
                        vma_link(mm, new_vma, prev, rb_link, rb_parent);
index eb5838634f181a1d6c58c948c10aac09dd272f26..486ed595ee6fa939f1cef55b7e4349d000d86317 100644 (file)
@@ -42,3 +42,33 @@ struct zone *next_zone(struct zone *zone)
        return zone;
 }
 
+static inline int zref_in_nodemask(struct zoneref *zref, nodemask_t *nodes)
+{
+#ifdef CONFIG_NUMA
+       return node_isset(zonelist_node_idx(zref), *nodes);
+#else
+       return 1;
+#endif /* CONFIG_NUMA */
+}
+
+/* Returns the next zone at or below highest_zoneidx in a zonelist */
+struct zoneref *next_zones_zonelist(struct zoneref *z,
+                                       enum zone_type highest_zoneidx,
+                                       nodemask_t *nodes,
+                                       struct zone **zone)
+{
+       /*
+        * Find the next suitable zone to use for the allocation.
+        * Only filter based on nodemask if it's set
+        */
+       if (likely(nodes == NULL))
+               while (zonelist_zone_idx(z) > highest_zoneidx)
+                       z++;
+       else
+               while (zonelist_zone_idx(z) > highest_zoneidx ||
+                               (z->zone && !zref_in_nodemask(z, nodes)))
+                       z++;
+
+       *zone = zonelist_zone(z++);
+       return z;
+}
index 5d8ae086f74e541a6e526288e072e87b665484ab..ef8c62cec697a06495b13f2658aff4e30000065c 100644 (file)
@@ -105,7 +105,11 @@ unsigned int kobjsize(const void *objp)
 {
        struct page *page;
 
-       if (!objp || !((page = virt_to_page(objp))))
+       /*
+        * If the object we have should not have ksize performed on it,
+        * return size of 0
+        */
+       if (!objp || (unsigned long)objp >= memory_end || !((page = virt_to_page(objp))))
                return 0;
 
        if (PageSlab(page))
@@ -962,8 +966,13 @@ unsigned long do_mmap_pgoff(struct file *file,
 
        INIT_LIST_HEAD(&vma->anon_vma_node);
        atomic_set(&vma->vm_usage, 1);
-       if (file)
+       if (file) {
                get_file(file);
+               if (vm_flags & VM_EXECUTABLE) {
+                       added_exe_file_vma(current->mm);
+                       vma->vm_mm = current->mm;
+               }
+       }
        vma->vm_file    = file;
        vma->vm_flags   = vm_flags;
        vma->vm_start   = addr;
@@ -1018,8 +1027,11 @@ unsigned long do_mmap_pgoff(struct file *file,
        up_write(&nommu_vma_sem);
        kfree(vml);
        if (vma) {
-               if (vma->vm_file)
+               if (vma->vm_file) {
                        fput(vma->vm_file);
+                       if (vma->vm_flags & VM_EXECUTABLE)
+                               removed_exe_file_vma(vma->vm_mm);
+               }
                kfree(vma);
        }
        return ret;
@@ -1049,7 +1061,7 @@ EXPORT_SYMBOL(do_mmap_pgoff);
 /*
  * handle mapping disposal for uClinux
  */
-static void put_vma(struct vm_area_struct *vma)
+static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
 {
        if (vma) {
                down_write(&nommu_vma_sem);
@@ -1071,8 +1083,11 @@ static void put_vma(struct vm_area_struct *vma)
                        realalloc -= kobjsize(vma);
                        askedalloc -= sizeof(*vma);
 
-                       if (vma->vm_file)
+                       if (vma->vm_file) {
                                fput(vma->vm_file);
+                               if (vma->vm_flags & VM_EXECUTABLE)
+                                       removed_exe_file_vma(mm);
+                       }
                        kfree(vma);
                }
 
@@ -1109,7 +1124,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
  found:
        vml = *parent;
 
-       put_vma(vml->vma);
+       put_vma(mm, vml->vma);
 
        *parent = vml->next;
        realalloc -= kobjsize(vml);
@@ -1154,7 +1169,7 @@ void exit_mmap(struct mm_struct * mm)
 
                while ((tmp = mm->context.vmlist)) {
                        mm->context.vmlist = tmp->next;
-                       put_vma(tmp->vma);
+                       put_vma(mm, tmp->vma);
 
                        realalloc -= kobjsize(tmp);
                        askedalloc -= sizeof(*tmp);
index beb592fe9389ffa9fe34c8c92ca82ada55abd960..8a5467ee6265deb11a48fe9861348dd182dc9f7f 100644 (file)
@@ -53,8 +53,7 @@ static DEFINE_SPINLOCK(zone_scan_mutex);
  *    of least surprise ... (be careful when you change it)
  */
 
-unsigned long badness(struct task_struct *p, unsigned long uptime,
-                       struct mem_cgroup *mem)
+unsigned long badness(struct task_struct *p, unsigned long uptime)
 {
        unsigned long points, cpu_time, run_time, s;
        struct mm_struct *mm;
@@ -175,12 +174,14 @@ static inline enum oom_constraint constrained_alloc(struct zonelist *zonelist,
                                                    gfp_t gfp_mask)
 {
 #ifdef CONFIG_NUMA
-       struct zone **z;
+       struct zone *zone;
+       struct zoneref *z;
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        nodemask_t nodes = node_states[N_HIGH_MEMORY];
 
-       for (z = zonelist->zones; *z; z++)
-               if (cpuset_zone_allowed_softwall(*z, gfp_mask))
-                       node_clear(zone_to_nid(*z), nodes);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
+               if (cpuset_zone_allowed_softwall(zone, gfp_mask))
+                       node_clear(zone_to_nid(zone), nodes);
                else
                        return CONSTRAINT_CPUSET;
 
@@ -254,7 +255,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints,
                if (p->oomkilladj == OOM_DISABLE)
                        continue;
 
-               points = badness(p, uptime.tv_sec, mem);
+               points = badness(p, uptime.tv_sec);
                if (points > *ppoints || !chosen) {
                        chosen = p;
                        *ppoints = points;
@@ -460,29 +461,29 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier);
  * if a parallel OOM killing is already taking place that includes a zone in
  * the zonelist.  Otherwise, locks all zones in the zonelist and returns 1.
  */
-int try_set_zone_oom(struct zonelist *zonelist)
+int try_set_zone_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 {
-       struct zone **z;
+       struct zoneref *z;
+       struct zone *zone;
        int ret = 1;
 
-       z = zonelist->zones;
-
        spin_lock(&zone_scan_mutex);
-       do {
-               if (zone_is_oom_locked(*z)) {
+       for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+               if (zone_is_oom_locked(zone)) {
                        ret = 0;
                        goto out;
                }
-       } while (*(++z) != NULL);
+       }
+
+       for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+               /*
+                * Lock each zone in the zonelist under zone_scan_mutex so a
+                * parallel invocation of try_set_zone_oom() doesn't succeed
+                * when it shouldn't.
+                */
+               zone_set_flag(zone, ZONE_OOM_LOCKED);
+       }
 
-       /*
-        * Lock each zone in the zonelist under zone_scan_mutex so a parallel
-        * invocation of try_set_zone_oom() doesn't succeed when it shouldn't.
-        */
-       z = zonelist->zones;
-       do {
-               zone_set_flag(*z, ZONE_OOM_LOCKED);
-       } while (*(++z) != NULL);
 out:
        spin_unlock(&zone_scan_mutex);
        return ret;
@@ -493,16 +494,15 @@ out:
  * allocation attempts with zonelists containing them may now recall the OOM
  * killer, if necessary.
  */
-void clear_zonelist_oom(struct zonelist *zonelist)
+void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask)
 {
-       struct zone **z;
-
-       z = zonelist->zones;
+       struct zoneref *z;
+       struct zone *zone;
 
        spin_lock(&zone_scan_mutex);
-       do {
-               zone_clear_flag(*z, ZONE_OOM_LOCKED);
-       } while (*(++z) != NULL);
+       for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) {
+               zone_clear_flag(zone, ZONE_OOM_LOCKED);
+       }
        spin_unlock(&zone_scan_mutex);
 }
 
index 5e00f1772c20fc6d9e4e70a087800e93b3ae3df2..789b6adbef37f1f38c3887f10ba130595903408a 100644 (file)
@@ -164,9 +164,20 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
  */
 static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
 {
-       __prop_inc_percpu(&vm_completions, &bdi->completions);
+       __prop_inc_percpu_max(&vm_completions, &bdi->completions,
+                             bdi->max_prop_frac);
 }
 
+void bdi_writeout_inc(struct backing_dev_info *bdi)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __bdi_writeout_inc(bdi);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(bdi_writeout_inc);
+
 static inline void task_dirty_inc(struct task_struct *tsk)
 {
        prop_inc_single(&vm_dirties, &tsk->dirties);
@@ -200,7 +211,8 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
        avail_dirty = dirty -
                (global_page_state(NR_FILE_DIRTY) +
                 global_page_state(NR_WRITEBACK) +
-                global_page_state(NR_UNSTABLE_NFS));
+                global_page_state(NR_UNSTABLE_NFS) +
+                global_page_state(NR_WRITEBACK_TEMP));
 
        if (avail_dirty < 0)
                avail_dirty = 0;
@@ -242,6 +254,55 @@ static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
        *pdirty = dirty;
 }
 
+/*
+ *
+ */
+static DEFINE_SPINLOCK(bdi_lock);
+static unsigned int bdi_min_ratio;
+
+int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bdi_lock, flags);
+       if (min_ratio > bdi->max_ratio) {
+               ret = -EINVAL;
+       } else {
+               min_ratio -= bdi->min_ratio;
+               if (bdi_min_ratio + min_ratio < 100) {
+                       bdi_min_ratio += min_ratio;
+                       bdi->min_ratio += min_ratio;
+               } else {
+                       ret = -EINVAL;
+               }
+       }
+       spin_unlock_irqrestore(&bdi_lock, flags);
+
+       return ret;
+}
+
+int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       if (max_ratio > 100)
+               return -EINVAL;
+
+       spin_lock_irqsave(&bdi_lock, flags);
+       if (bdi->min_ratio > max_ratio) {
+               ret = -EINVAL;
+       } else {
+               bdi->max_ratio = max_ratio;
+               bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
+       }
+       spin_unlock_irqrestore(&bdi_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(bdi_set_max_ratio);
+
 /*
  * Work out the current dirty-memory clamping and background writeout
  * thresholds.
@@ -300,7 +361,7 @@ static unsigned long determine_dirtyable_memory(void)
        return x + 1;   /* Ensure that we never return 0 */
 }
 
-static void
+void
 get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
                 struct backing_dev_info *bdi)
 {
@@ -330,7 +391,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
        *pdirty = dirty;
 
        if (bdi) {
-               u64 bdi_dirty = dirty;
+               u64 bdi_dirty;
                long numerator, denominator;
 
                /*
@@ -338,8 +399,12 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
                 */
                bdi_writeout_fraction(bdi, &numerator, &denominator);
 
+               bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
                bdi_dirty *= numerator;
                do_div(bdi_dirty, denominator);
+               bdi_dirty += (dirty * bdi->min_ratio) / 100;
+               if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
+                       bdi_dirty = dirty * bdi->max_ratio / 100;
 
                *pbdi_dirty = bdi_dirty;
                clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
@@ -1192,7 +1257,7 @@ int test_clear_page_writeback(struct page *page)
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
-                       if (bdi_cap_writeback_dirty(bdi)) {
+                       if (bdi_cap_account_writeback(bdi)) {
                                __dec_bdi_stat(bdi, BDI_WRITEBACK);
                                __bdi_writeout_inc(bdi);
                        }
@@ -1221,7 +1286,7 @@ int test_set_page_writeback(struct page *page)
                        radix_tree_tag_set(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
-                       if (bdi_cap_writeback_dirty(bdi))
+                       if (bdi_cap_account_writeback(bdi))
                                __inc_bdi_stat(bdi, BDI_WRITEBACK);
                }
                if (!PageDirty(page))
index 32e796af12a16c756b70051426195e8983227cdc..bdd5c432c426fdf6714ff90e1eebdfccfdccee8c 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
 #include <linux/memcontrol.h>
+#include <linux/debugobjects.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -532,8 +533,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        if (reserved)
                return;
 
-       if (!PageHighMem(page))
+       if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+               debug_check_no_obj_freed(page_address(page),
+                                          PAGE_SIZE << order);
+       }
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
@@ -546,7 +550,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 /*
  * permit the bootmem allocator to evade page validation on high-order frees
  */
-void __init __free_pages_bootmem(struct page *page, unsigned int order)
+void __free_pages_bootmem(struct page *page, unsigned int order)
 {
        if (order == 0) {
                __ClearPageReserved(page);
@@ -632,7 +636,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
        if (PageReserved(page))
                return 1;
 
-       page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
+       page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_reclaim |
                        1 << PG_referenced | 1 << PG_arch_1 |
                        1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
        set_page_private(page, 0);
@@ -995,8 +999,10 @@ static void free_hot_cold_page(struct page *page, int cold)
        if (free_pages_check(page))
                return;
 
-       if (!PageHighMem(page))
+       if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
+               debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
+       }
        arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
@@ -1050,7 +1056,7 @@ void split_page(struct page *page, unsigned int order)
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
  */
-static struct page *buffered_rmqueue(struct zonelist *zonelist,
+static struct page *buffered_rmqueue(struct zone *preferred_zone,
                        struct zone *zone, int order, gfp_t gfp_flags)
 {
        unsigned long flags;
@@ -1102,7 +1108,7 @@ again:
        }
 
        __count_zone_vm_events(PGALLOC, zone, 1 << order);
-       zone_statistics(zonelist, zone);
+       zone_statistics(preferred_zone, zone);
        local_irq_restore(flags);
        put_cpu();
 
@@ -1284,7 +1290,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
        if (!zlc)
                return NULL;
 
-       if (time_after(jiffies, zlc->last_full_zap + HZ)) {
+       if (time_after(jiffies, zlc->last_full_zap + HZ)) {
                bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
                zlc->last_full_zap = jiffies;
        }
@@ -1317,7 +1323,7 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  * We are low on memory in the second scan, and should leave no stone
  * unturned looking for a free page.
  */
-static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
                                                nodemask_t *allowednodes)
 {
        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
@@ -1328,7 +1334,7 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
        if (!zlc)
                return 1;
 
-       i = z - zonelist->zones;
+       i = z - zonelist->_zonerefs;
        n = zlc->z_to_n[i];
 
        /* This zone is worth trying if it is allowed but not full */
@@ -1340,7 +1346,7 @@ static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
  * zlc->fullzones, so that subsequent attempts to allocate a page
  * from that zone don't waste time re-examining it.
  */
-static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
 {
        struct zonelist_cache *zlc;     /* cached zonelist speedup info */
        int i;                          /* index of *z in zonelist zones */
@@ -1349,7 +1355,7 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
        if (!zlc)
                return;
 
-       i = z - zonelist->zones;
+       i = z - zonelist->_zonerefs;
 
        set_bit(i, zlc->fullzones);
 }
@@ -1361,13 +1367,13 @@ static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
        return NULL;
 }
 
-static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
                                nodemask_t *allowednodes)
 {
        return 1;
 }
 
-static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
 {
 }
 #endif /* CONFIG_NUMA */
@@ -1377,42 +1383,31 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
  * a page.
  */
 static struct page *
-get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
-               struct zonelist *zonelist, int alloc_flags)
+get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
+               struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
 {
-       struct zone **z;
+       struct zoneref *z;
        struct page *page = NULL;
-       int classzone_idx = zone_idx(zonelist->zones[0]);
-       struct zone *zone;
+       int classzone_idx;
+       struct zone *zone, *preferred_zone;
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
-       enum zone_type highest_zoneidx = -1; /* Gets set for policy zonelists */
+
+       (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
+                                                       &preferred_zone);
+       classzone_idx = zone_idx(preferred_zone);
 
 zonelist_scan:
        /*
         * Scan zonelist, looking for a zone with enough free.
         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
         */
-       z = zonelist->zones;
-
-       do {
-               /*
-                * In NUMA, this could be a policy zonelist which contains
-                * zones that may not be allowed by the current gfp_mask.
-                * Check the zone is allowed by the current flags
-                */
-               if (unlikely(alloc_should_filter_zonelist(zonelist))) {
-                       if (highest_zoneidx == -1)
-                               highest_zoneidx = gfp_zone(gfp_mask);
-                       if (zone_idx(*z) > highest_zoneidx)
-                               continue;
-               }
-
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                                               high_zoneidx, nodemask) {
                if (NUMA_BUILD && zlc_active &&
                        !zlc_zone_worth_trying(zonelist, z, allowednodes))
                                continue;
-               zone = *z;
                if ((alloc_flags & ALLOC_CPUSET) &&
                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                goto try_next_zone;
@@ -1433,7 +1428,7 @@ zonelist_scan:
                        }
                }
 
-               page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
+               page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
                if (page)
                        break;
 this_zone_full:
@@ -1446,7 +1441,7 @@ try_next_zone:
                        zlc_active = 1;
                        did_zlc_setup = 1;
                }
-       } while (*(++z) != NULL);
+       }
 
        if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
                /* Disable zlc cache for second zonelist scan */
@@ -1459,18 +1454,21 @@ try_next_zone:
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order,
-               struct zonelist *zonelist)
+static struct page *
+__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
+                       struct zonelist *zonelist, nodemask_t *nodemask)
 {
        const gfp_t wait = gfp_mask & __GFP_WAIT;
-       struct zone **z;
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zoneref *z;
+       struct zone *zone;
        struct page *page;
        struct reclaim_state reclaim_state;
        struct task_struct *p = current;
        int do_retry;
        int alloc_flags;
-       int did_some_progress;
+       unsigned long did_some_progress;
+       unsigned long pages_reclaimed = 0;
 
        might_sleep_if(wait);
 
@@ -1478,9 +1476,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
                return NULL;
 
 restart:
-       z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
+       z = zonelist->_zonerefs;  /* the list of zones suitable for gfp_mask */
 
-       if (unlikely(*z == NULL)) {
+       if (unlikely(!z->zone)) {
                /*
                 * Happens if we have an empty zonelist as a result of
                 * GFP_THISNODE being used on a memoryless node
@@ -1488,8 +1486,8 @@ restart:
                return NULL;
        }
 
-       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
-                               zonelist, ALLOC_WMARK_LOW|ALLOC_CPUSET);
+       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+                       zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
        if (page)
                goto got_pg;
 
@@ -1504,8 +1502,8 @@ restart:
        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
                goto nopage;
 
-       for (z = zonelist->zones; *z; z++)
-               wakeup_kswapd(*z, order);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
+               wakeup_kswapd(zone, order);
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
@@ -1533,7 +1531,8 @@ restart:
         * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
         */
-       page = get_page_from_freelist(gfp_mask, order, zonelist, alloc_flags);
+       page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
+                                               high_zoneidx, alloc_flags);
        if (page)
                goto got_pg;
 
@@ -1545,8 +1544,8 @@ rebalance:
                if (!(gfp_mask & __GFP_NOMEMALLOC)) {
 nofail_alloc:
                        /* go through the zonelist yet again, ignoring mins */
-                       page = get_page_from_freelist(gfp_mask, order,
-                               zonelist, ALLOC_NO_WATERMARKS);
+                       page = get_page_from_freelist(gfp_mask, nodemask, order,
+                               zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
                        if (page)
                                goto got_pg;
                        if (gfp_mask & __GFP_NOFAIL) {
@@ -1569,7 +1568,7 @@ nofail_alloc:
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
+       did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
 
        p->reclaim_state = NULL;
        p->flags &= ~PF_MEMALLOC;
@@ -1580,12 +1579,12 @@ nofail_alloc:
                drain_all_pages();
 
        if (likely(did_some_progress)) {
-               page = get_page_from_freelist(gfp_mask, order,
-                                               zonelist, alloc_flags);
+               page = get_page_from_freelist(gfp_mask, nodemask, order,
+                                       zonelist, high_zoneidx, alloc_flags);
                if (page)
                        goto got_pg;
        } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
-               if (!try_set_zone_oom(zonelist)) {
+               if (!try_set_zone_oom(zonelist, gfp_mask)) {
                        schedule_timeout_uninterruptible(1);
                        goto restart;
                }
@@ -1596,21 +1595,22 @@ nofail_alloc:
                 * a parallel oom killing, we must fail if we're still
                 * under heavy pressure.
                 */
-               page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, order,
-                               zonelist, ALLOC_WMARK_HIGH|ALLOC_CPUSET);
+               page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
+                       order, zonelist, high_zoneidx,
+                       ALLOC_WMARK_HIGH|ALLOC_CPUSET);
                if (page) {
-                       clear_zonelist_oom(zonelist);
+                       clear_zonelist_oom(zonelist, gfp_mask);
                        goto got_pg;
                }
 
                /* The OOM killer will not help higher order allocs so fail */
                if (order > PAGE_ALLOC_COSTLY_ORDER) {
-                       clear_zonelist_oom(zonelist);
+                       clear_zonelist_oom(zonelist, gfp_mask);
                        goto nopage;
                }
 
                out_of_memory(zonelist, gfp_mask, order);
-               clear_zonelist_oom(zonelist);
+               clear_zonelist_oom(zonelist, gfp_mask);
                goto restart;
        }
 
@@ -1618,14 +1618,26 @@ nofail_alloc:
         * Don't let big-order allocations loop unless the caller explicitly
         * requests that.  Wait for some write requests to complete then retry.
         *
-        * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
-        * <= 3, but that may not be true in other implementations.
+        * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
+        * means __GFP_NOFAIL, but that may not be true in other
+        * implementations.
+        *
+        * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
+        * specified, then we retry until we no longer reclaim any pages
+        * (above), or we've reclaimed an order of pages at least as
+        * large as the allocation's order. In both cases, if the
+        * allocation still fails, we stop retrying.
         */
+       pages_reclaimed += did_some_progress;
        do_retry = 0;
        if (!(gfp_mask & __GFP_NORETRY)) {
-               if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
-                                               (gfp_mask & __GFP_REPEAT))
+               if (order <= PAGE_ALLOC_COSTLY_ORDER) {
                        do_retry = 1;
+               } else {
+                       if (gfp_mask & __GFP_REPEAT &&
+                               pages_reclaimed < (1 << order))
+                                       do_retry = 1;
+               }
                if (gfp_mask & __GFP_NOFAIL)
                        do_retry = 1;
        }
@@ -1646,6 +1658,20 @@ got_pg:
        return page;
 }
 
+struct page *
+__alloc_pages(gfp_t gfp_mask, unsigned int order,
+               struct zonelist *zonelist)
+{
+       return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
+}
+
+struct page *
+__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
+               struct zonelist *zonelist, nodemask_t *nodemask)
+{
+       return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
+}
+
 EXPORT_SYMBOL(__alloc_pages);
 
 /*
@@ -1712,15 +1738,15 @@ EXPORT_SYMBOL(free_pages);
 
 static unsigned int nr_free_zone_pages(int offset)
 {
+       struct zoneref *z;
+       struct zone *zone;
+
        /* Just pick one node, since fallback list is circular */
-       pg_data_t *pgdat = NODE_DATA(numa_node_id());
        unsigned int sum = 0;
 
-       struct zonelist *zonelist = pgdat->node_zonelists + offset;
-       struct zone **zonep = zonelist->zones;
-       struct zone *zone;
+       struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
 
-       for (zone = *zonep++; zone; zone = *zonep++) {
+       for_each_zone_zonelist(zone, z, zonelist, offset) {
                unsigned long size = zone->present_pages;
                unsigned long high = zone->pages_high;
                if (size > high)
@@ -1889,6 +1915,12 @@ void show_free_areas(void)
        show_swap_cache_info();
 }
 
+static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
+{
+       zoneref->zone = zone;
+       zoneref->zone_idx = zone_idx(zone);
+}
+
 /*
  * Builds allocation fallback zone lists.
  *
@@ -1906,7 +1938,8 @@ static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
                zone_type--;
                zone = pgdat->node_zones + zone_type;
                if (populated_zone(zone)) {
-                       zonelist->zones[nr_zones++] = zone;
+                       zoneref_set_zone(zone,
+                               &zonelist->_zonerefs[nr_zones++]);
                        check_highest_zone(zone_type);
                }
 
@@ -2078,17 +2111,16 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
  */
 static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
 {
-       enum zone_type i;
        int j;
        struct zonelist *zonelist;
 
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               zonelist = pgdat->node_zonelists + i;
-               for (j = 0; zonelist->zones[j] != NULL; j++)
-                       ;
-               j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
-               zonelist->zones[j] = NULL;
-       }
+       zonelist = &pgdat->node_zonelists[0];
+       for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
+               ;
+       j = build_zonelists_node(NODE_DATA(node), zonelist, j,
+                                                       MAX_NR_ZONES - 1);
+       zonelist->_zonerefs[j].zone = NULL;
+       zonelist->_zonerefs[j].zone_idx = 0;
 }
 
 /*
@@ -2096,15 +2128,13 @@ static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  */
 static void build_thisnode_zonelists(pg_data_t *pgdat)
 {
-       enum zone_type i;
        int j;
        struct zonelist *zonelist;
 
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               zonelist = pgdat->node_zonelists + MAX_NR_ZONES + i;
-               j = build_zonelists_node(pgdat, zonelist, 0, i);
-               zonelist->zones[j] = NULL;
-       }
+       zonelist = &pgdat->node_zonelists[1];
+       j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
+       zonelist->_zonerefs[j].zone = NULL;
+       zonelist->_zonerefs[j].zone_idx = 0;
 }
 
 /*
@@ -2117,27 +2147,26 @@ static int node_order[MAX_NUMNODES];
 
 static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
 {
-       enum zone_type i;
        int pos, j, node;
        int zone_type;          /* needs to be signed */
        struct zone *z;
        struct zonelist *zonelist;
 
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               zonelist = pgdat->node_zonelists + i;
-               pos = 0;
-               for (zone_type = i; zone_type >= 0; zone_type--) {
-                       for (j = 0; j < nr_nodes; j++) {
-                               node = node_order[j];
-                               z = &NODE_DATA(node)->node_zones[zone_type];
-                               if (populated_zone(z)) {
-                                       zonelist->zones[pos++] = z;
-                                       check_highest_zone(zone_type);
-                               }
+       zonelist = &pgdat->node_zonelists[0];
+       pos = 0;
+       for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
+               for (j = 0; j < nr_nodes; j++) {
+                       node = node_order[j];
+                       z = &NODE_DATA(node)->node_zones[zone_type];
+                       if (populated_zone(z)) {
+                               zoneref_set_zone(z,
+                                       &zonelist->_zonerefs[pos++]);
+                               check_highest_zone(zone_type);
                        }
                }
-               zonelist->zones[pos] = NULL;
        }
+       zonelist->_zonerefs[pos].zone = NULL;
+       zonelist->_zonerefs[pos].zone_idx = 0;
 }
 
 static int default_zonelist_order(void)
@@ -2214,7 +2243,8 @@ static void build_zonelists(pg_data_t *pgdat)
        /* initialize zonelists */
        for (i = 0; i < MAX_ZONELISTS; i++) {
                zonelist = pgdat->node_zonelists + i;
-               zonelist->zones[0] = NULL;
+               zonelist->_zonerefs[0].zone = NULL;
+               zonelist->_zonerefs[0].zone_idx = 0;
        }
 
        /* NUMA-aware ordering of nodes */
@@ -2264,19 +2294,15 @@ static void build_zonelists(pg_data_t *pgdat)
 /* Construct the zonelist performance cache - see further mmzone.h */
 static void build_zonelist_cache(pg_data_t *pgdat)
 {
-       int i;
-
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               struct zonelist *zonelist;
-               struct zonelist_cache *zlc;
-               struct zone **z;
+       struct zonelist *zonelist;
+       struct zonelist_cache *zlc;
+       struct zoneref *z;
 
-               zonelist = pgdat->node_zonelists + i;
-               zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
-               bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
-               for (z = zonelist->zones; *z; z++)
-                       zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
-       }
+       zonelist = &pgdat->node_zonelists[0];
+       zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
+       bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+       for (z = zonelist->_zonerefs; z->zone; z++)
+               zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
 }
 
 
@@ -2290,45 +2316,44 @@ static void set_zonelist_order(void)
 static void build_zonelists(pg_data_t *pgdat)
 {
        int node, local_node;
-       enum zone_type i,j;
+       enum zone_type j;
+       struct zonelist *zonelist;
 
        local_node = pgdat->node_id;
-       for (i = 0; i < MAX_NR_ZONES; i++) {
-               struct zonelist *zonelist;
-
-               zonelist = pgdat->node_zonelists + i;
 
-               j = build_zonelists_node(pgdat, zonelist, 0, i);
-               /*
-                * Now we build the zonelist so that it contains the zones
-                * of all the other nodes.
-                * We don't want to pressure a particular node, so when
-                * building the zones for node N, we make sure that the
-                * zones coming right after the local ones are those from
-                * node N+1 (modulo N)
-                */
-               for (node = local_node + 1; node < MAX_NUMNODES; node++) {
-                       if (!node_online(node))
-                               continue;
-                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
-               }
-               for (node = 0; node < local_node; node++) {
-                       if (!node_online(node))
-                               continue;
-                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
-               }
+       zonelist = &pgdat->node_zonelists[0];
+       j = build_zonelists_node(pgdat, zonelist, 0, MAX_NR_ZONES - 1);
 
-               zonelist->zones[j] = NULL;
+       /*
+        * Now we build the zonelist so that it contains the zones
+        * of all the other nodes.
+        * We don't want to pressure a particular node, so when
+        * building the zones for node N, we make sure that the
+        * zones coming right after the local ones are those from
+        * node N+1 (modulo N)
+        */
+       for (node = local_node + 1; node < MAX_NUMNODES; node++) {
+               if (!node_online(node))
+                       continue;
+               j = build_zonelists_node(NODE_DATA(node), zonelist, j,
+                                                       MAX_NR_ZONES - 1);
        }
+       for (node = 0; node < local_node; node++) {
+               if (!node_online(node))
+                       continue;
+               j = build_zonelists_node(NODE_DATA(node), zonelist, j,
+                                                       MAX_NR_ZONES - 1);
+       }
+
+       zonelist->_zonerefs[j].zone = NULL;
+       zonelist->_zonerefs[j].zone_idx = 0;
 }
 
 /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
 static void build_zonelist_cache(pg_data_t *pgdat)
 {
-       int i;
-
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               pgdat->node_zonelists[i].zlcache_ptr = NULL;
+       pgdat->node_zonelists[0].zlcache_ptr = NULL;
+       pgdat->node_zonelists[1].zlcache_ptr = NULL;
 }
 
 #endif /* CONFIG_NUMA */
@@ -2518,7 +2543,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
+       struct zone *z;
 
+       z = &NODE_DATA(nid)->node_zones[zone];
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                /*
                 * There can be holes in boot-time mem_map[]s
@@ -2536,7 +2563,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                init_page_count(page);
                reset_page_mapcount(page);
                SetPageReserved(page);
-
                /*
                 * Mark the block movable so that blocks are reserved for
                 * movable at startup. This will force kernel allocations
@@ -2545,8 +2571,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                 * kernel allocations are made. Later some blocks near
                 * the start are marked MIGRATE_RESERVE by
                 * setup_zone_migrate_reserve()
+                *
+                * bitmap is created for zone's valid pfn range. but memmap
+                * can be created for invalid pages (for alignment)
+                * check here not to call set_pageblock_migratetype() against
+                * pfn out of zone.
                 */
-               if ((pfn & (pageblock_nr_pages-1)))
+               if ((z->zone_start_pfn <= pfn)
+                   && (pfn < z->zone_start_pfn + z->spanned_pages)
+                   && !(pfn & (pageblock_nr_pages - 1)))
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 
                INIT_LIST_HEAD(&page->lru);
@@ -4339,9 +4372,7 @@ void *__init alloc_large_system_hash(const char *tablename,
                else if (hashdist)
                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
                else {
-                       unsigned long order;
-                       for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
-                               ;
+                       unsigned long order = get_order(size);
                        table = (void*) __get_free_pages(GFP_ATOMIC, order);
                        /*
                         * If bucketsize is not a power-of-two, we may free
@@ -4460,6 +4491,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
        pfn = page_to_pfn(page);
        bitmap = get_pageblock_bitmap(zone, pfn);
        bitidx = pfn_to_bitidx(zone, pfn);
+       VM_BUG_ON(pfn < zone->zone_start_pfn);
+       VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
 
        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
                if (flags & value)
index 1cf1417ef8b77bc111f2fc5b4f4c24c3fbee1f88..0afd2387e507d8f8deed9697f1971fc953f4b4b6 100644 (file)
@@ -9,11 +9,15 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
        int err = 0;
 
        pte = pte_offset_map(pmd, addr);
-       do {
+       for (;;) {
                err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, private);
                if (err)
                       break;
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+               addr += PAGE_SIZE;
+               if (addr == end)
+                       break;
+               pte++;
+       }
 
        pte_unmap(pte);
        return err;
index 8762e8988972baa01820369ba1fd46d7e727d215..d8723a5f6496d05af23445e6ae862754e9d41214 100644 (file)
@@ -235,7 +235,13 @@ unsigned long max_sane_readahead(unsigned long nr)
 
 static int __init readahead_init(void)
 {
-       return bdi_init(&default_backing_dev_info);
+       int err;
+
+       err = bdi_init(&default_backing_dev_info);
+       if (!err)
+               bdi_register(&default_backing_dev_info, NULL, "default");
+
+       return err;
 }
 subsys_initcall(readahead_init);
 
index 997f06907b6da787798e71f515f44cd51495b771..bf0a5b7cfb8e7a97369271ed235a714bd11dfd12 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -413,9 +413,6 @@ int page_referenced(struct page *page, int is_locked,
 {
        int referenced = 0;
 
-       if (page_test_and_clear_young(page))
-               referenced++;
-
        if (TestClearPageReferenced(page))
                referenced++;
 
@@ -433,6 +430,10 @@ int page_referenced(struct page *page, int is_locked,
                        unlock_page(page);
                }
        }
+
+       if (page_test_and_clear_young(page))
+               referenced++;
+
        return referenced;
 }
 
@@ -661,7 +662,6 @@ void page_remove_rmap(struct page *page, struct vm_area_struct *vma)
                        printk (KERN_EMERG "  page->mapping = %p\n", page->mapping);
                        print_symbol (KERN_EMERG "  vma->vm_ops = %s\n", (unsigned long)vma->vm_ops);
                        if (vma->vm_ops) {
-                               print_symbol (KERN_EMERG "  vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage);
                                print_symbol (KERN_EMERG "  vma->vm_ops->fault = %s\n", (unsigned long)vma->vm_ops->fault);
                        }
                        if (vma->vm_file && vma->vm_file->f_op)
index f514dd392cd93570e108ba5a489a28975d85034a..e2a6ae1a44e9a5e8b5e6594d11f76b11a02ea49f 100644 (file)
@@ -201,7 +201,7 @@ static struct vm_operations_struct shmem_vm_ops;
 
 static struct backing_dev_info shmem_backing_dev_info  __read_mostly = {
        .ra_pages       = 0,    /* No readahead */
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
        .unplug_io_fn   = default_unplug_io_fn,
 };
 
@@ -1079,104 +1079,47 @@ redirty:
 
 #ifdef CONFIG_NUMA
 #ifdef CONFIG_TMPFS
-static int shmem_parse_mpol(char *value, int *policy, nodemask_t *policy_nodes)
+static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
 {
-       char *nodelist = strchr(value, ':');
-       int err = 1;
+       char buffer[64];
 
-       if (nodelist) {
-               /* NUL-terminate policy string */
-               *nodelist++ = '\0';
-               if (nodelist_parse(nodelist, *policy_nodes))
-                       goto out;
-               if (!nodes_subset(*policy_nodes, node_states[N_HIGH_MEMORY]))
-                       goto out;
-       }
-       if (!strcmp(value, "default")) {
-               *policy = MPOL_DEFAULT;
-               /* Don't allow a nodelist */
-               if (!nodelist)
-                       err = 0;
-       } else if (!strcmp(value, "prefer")) {
-               *policy = MPOL_PREFERRED;
-               /* Insist on a nodelist of one node only */
-               if (nodelist) {
-                       char *rest = nodelist;
-                       while (isdigit(*rest))
-                               rest++;
-                       if (!*rest)
-                               err = 0;
-               }
-       } else if (!strcmp(value, "bind")) {
-               *policy = MPOL_BIND;
-               /* Insist on a nodelist */
-               if (nodelist)
-                       err = 0;
-       } else if (!strcmp(value, "interleave")) {
-               *policy = MPOL_INTERLEAVE;
-               /*
-                * Default to online nodes with memory if no nodelist
-                */
-               if (!nodelist)
-                       *policy_nodes = node_states[N_HIGH_MEMORY];
-               err = 0;
-       }
-out:
-       /* Restore string for error message */
-       if (nodelist)
-               *--nodelist = ':';
-       return err;
-}
-
-static void shmem_show_mpol(struct seq_file *seq, int policy,
-                           const nodemask_t policy_nodes)
-{
-       char *policy_string;
+       if (!mpol || mpol->mode == MPOL_DEFAULT)
+               return;         /* show nothing */
 
-       switch (policy) {
-       case MPOL_PREFERRED:
-               policy_string = "prefer";
-               break;
-       case MPOL_BIND:
-               policy_string = "bind";
-               break;
-       case MPOL_INTERLEAVE:
-               policy_string = "interleave";
-               break;
-       default:
-               /* MPOL_DEFAULT */
-               return;
-       }
+       mpol_to_str(buffer, sizeof(buffer), mpol, 1);
 
-       seq_printf(seq, ",mpol=%s", policy_string);
-
-       if (policy != MPOL_INTERLEAVE ||
-           !nodes_equal(policy_nodes, node_states[N_HIGH_MEMORY])) {
-               char buffer[64];
-               int len;
+       seq_printf(seq, ",mpol=%s", buffer);
+}
 
-               len = nodelist_scnprintf(buffer, sizeof(buffer), policy_nodes);
-               if (len < sizeof(buffer))
-                       seq_printf(seq, ":%s", buffer);
-               else
-                       seq_printf(seq, ":?");
+static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+       struct mempolicy *mpol = NULL;
+       if (sbinfo->mpol) {
+               spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
+               mpol = sbinfo->mpol;
+               mpol_get(mpol);
+               spin_unlock(&sbinfo->stat_lock);
        }
+       return mpol;
 }
 #endif /* CONFIG_TMPFS */
 
 static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
                        struct shmem_inode_info *info, unsigned long idx)
 {
+       struct mempolicy mpol, *spol;
        struct vm_area_struct pvma;
        struct page *page;
 
+       spol = mpol_cond_copy(&mpol,
+                               mpol_shared_policy_lookup(&info->policy, idx));
+
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
        pvma.vm_pgoff = idx;
        pvma.vm_ops = NULL;
-       pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
+       pvma.vm_policy = spol;
        page = swapin_readahead(entry, gfp, &pvma, 0);
-       mpol_free(pvma.vm_policy);
        return page;
 }
 
@@ -1184,27 +1127,21 @@ static struct page *shmem_alloc_page(gfp_t gfp,
                        struct shmem_inode_info *info, unsigned long idx)
 {
        struct vm_area_struct pvma;
-       struct page *page;
 
        /* Create a pseudo vma that just contains the policy */
        pvma.vm_start = 0;
        pvma.vm_pgoff = idx;
        pvma.vm_ops = NULL;
        pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
-       page = alloc_page_vma(gfp, &pvma, 0);
-       mpol_free(pvma.vm_policy);
-       return page;
+
+       /*
+        * alloc_page_vma() will drop the shared policy reference
+        */
+       return alloc_page_vma(gfp, &pvma, 0);
 }
 #else /* !CONFIG_NUMA */
 #ifdef CONFIG_TMPFS
-static inline int shmem_parse_mpol(char *value, int *policy,
-                                               nodemask_t *policy_nodes)
-{
-       return 1;
-}
-
-static inline void shmem_show_mpol(struct seq_file *seq, int policy,
-                           const nodemask_t policy_nodes)
+static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
 {
 }
 #endif /* CONFIG_TMPFS */
@@ -1222,6 +1159,13 @@ static inline struct page *shmem_alloc_page(gfp_t gfp,
 }
 #endif /* CONFIG_NUMA */
 
+#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
+static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
+{
+       return NULL;
+}
+#endif
+
 /*
  * shmem_getpage - either get the page from swap or allocate a new one
  *
@@ -1576,8 +1520,8 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
                case S_IFREG:
                        inode->i_op = &shmem_inode_operations;
                        inode->i_fop = &shmem_file_operations;
-                       mpol_shared_policy_init(&info->policy, sbinfo->policy,
-                                                       &sbinfo->policy_nodes);
+                       mpol_shared_policy_init(&info->policy,
+                                                shmem_get_sbmpol(sbinfo));
                        break;
                case S_IFDIR:
                        inc_nlink(inode);
@@ -1591,8 +1535,7 @@ shmem_get_inode(struct super_block *sb, int mode, dev_t dev)
                         * Must not load anything in the rbtree,
                         * mpol_free_shared_policy will not be called.
                         */
-                       mpol_shared_policy_init(&info->policy, MPOL_DEFAULT,
-                                               NULL);
+                       mpol_shared_policy_init(&info->policy, NULL);
                        break;
                }
        } else
@@ -2207,8 +2150,7 @@ static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
                        if (*rest)
                                goto bad_val;
                } else if (!strcmp(this_char,"mpol")) {
-                       if (shmem_parse_mpol(value, &sbinfo->policy,
-                                            &sbinfo->policy_nodes))
+                       if (mpol_parse_str(value, &sbinfo->mpol, 1))
                                goto bad_val;
                } else {
                        printk(KERN_ERR "tmpfs: Bad mount option %s\n",
@@ -2259,8 +2201,9 @@ static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
        sbinfo->free_blocks = config.max_blocks - blocks;
        sbinfo->max_inodes  = config.max_inodes;
        sbinfo->free_inodes = config.max_inodes - inodes;
-       sbinfo->policy      = config.policy;
-       sbinfo->policy_nodes = config.policy_nodes;
+
+       mpol_put(sbinfo->mpol);
+       sbinfo->mpol        = config.mpol;      /* transfers initial ref */
 out:
        spin_unlock(&sbinfo->stat_lock);
        return error;
@@ -2281,7 +2224,7 @@ static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
                seq_printf(seq, ",uid=%u", sbinfo->uid);
        if (sbinfo->gid != 0)
                seq_printf(seq, ",gid=%u", sbinfo->gid);
-       shmem_show_mpol(seq, sbinfo->policy, sbinfo->policy_nodes);
+       shmem_show_mpol(seq, sbinfo->mpol);
        return 0;
 }
 #endif /* CONFIG_TMPFS */
@@ -2311,8 +2254,7 @@ static int shmem_fill_super(struct super_block *sb,
        sbinfo->mode = S_IRWXUGO | S_ISVTX;
        sbinfo->uid = current->fsuid;
        sbinfo->gid = current->fsgid;
-       sbinfo->policy = MPOL_DEFAULT;
-       sbinfo->policy_nodes = node_states[N_HIGH_MEMORY];
+       sbinfo->mpol = NULL;
        sb->s_fs_info = sbinfo;
 
 #ifdef CONFIG_TMPFS
index 03927cb5ec9e119ca06cf337d69de017303d82ba..06236e4ddc1b99686d64683865ca648ed2f79864 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
 #include       <linux/fault-inject.h>
 #include       <linux/rtmutex.h>
 #include       <linux/reciprocal_div.h>
+#include       <linux/debugobjects.h>
 
 #include       <asm/cacheflush.h>
 #include       <asm/tlbflush.h>
 #define        BYTES_PER_WORD          sizeof(void *)
 #define        REDZONE_ALIGN           max(BYTES_PER_WORD, __alignof__(unsigned long long))
 
-#ifndef cache_line_size
-#define cache_line_size()      L1_CACHE_BYTES
-#endif
-
 #ifndef ARCH_KMALLOC_MINALIGN
 /*
  * Enforce a minimum alignment for the kmalloc caches.
                         SLAB_CACHE_DMA | \
                         SLAB_STORE_USER | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+                        SLAB_DEBUG_OBJECTS)
 #else
 # define CREATE_MASK   (SLAB_HWCACHE_ALIGN | \
                         SLAB_CACHE_DMA | \
                         SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
-                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
+                        SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
+                        SLAB_DEBUG_OBJECTS)
 #endif
 
 /*
@@ -862,7 +861,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
 }
 
-#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
+#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
 
 static void __slab_error(const char *function, struct kmem_cache *cachep,
                        char *msg)
@@ -2157,7 +2156,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
         */
        if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
            size > KMALLOC_MAX_SIZE) {
-               printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
+               printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
                                name);
                BUG();
        }
@@ -3242,15 +3241,16 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
 {
        struct zonelist *zonelist;
        gfp_t local_flags;
-       struct zone **z;
+       struct zoneref *z;
+       struct zone *zone;
+       enum zone_type high_zoneidx = gfp_zone(flags);
        void *obj = NULL;
        int nid;
 
        if (flags & __GFP_THISNODE)
                return NULL;
 
-       zonelist = &NODE_DATA(slab_node(current->mempolicy))
-                       ->node_zonelists[gfp_zone(flags)];
+       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
        local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
 
 retry:
@@ -3258,10 +3258,10 @@ retry:
         * Look through allowed nodes for objects available
         * from existing per node queues.
         */
-       for (z = zonelist->zones; *z && !obj; z++) {
-               nid = zone_to_nid(*z);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+               nid = zone_to_nid(zone);
 
-               if (cpuset_zone_allowed_hardwall(*z, flags) &&
+               if (cpuset_zone_allowed_hardwall(zone, flags) &&
                        cache->nodelists[nid] &&
                        cache->nodelists[nid]->free_objects)
                                obj = ____cache_alloc_node(cache,
@@ -3763,6 +3763,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 
        local_irq_save(flags);
        debug_check_no_locks_freed(objp, obj_size(cachep));
+       if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
+               debug_check_no_obj_freed(objp, obj_size(cachep));
        __cache_free(cachep, objp);
        local_irq_restore(flags);
 }
@@ -3788,6 +3790,7 @@ void kfree(const void *objp)
        kfree_debugcheck(objp);
        c = virt_to_cache(objp);
        debug_check_no_locks_freed(objp, obj_size(c));
+       debug_check_no_obj_freed(objp, obj_size(c));
        __cache_free(c, (void *)objp);
        local_irq_restore(flags);
 }
index e2c3c0ec546373857ba3c2132edde98cdf5e8bb0..6038cbadf796bb165a2b7bf7c9fbe707297f6f53 100644 (file)
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -533,7 +533,8 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
 {
        struct kmem_cache *c;
 
-       c = slob_alloc(sizeof(struct kmem_cache), flags, 0, -1);
+       c = slob_alloc(sizeof(struct kmem_cache),
+               flags, ARCH_KMALLOC_MINALIGN, -1);
 
        if (c) {
                c->name = name;
index 39592b5ce68adc07493ee8ae077f1dcd98839080..32b62623846af6a7242c11f641774adeff09d45b 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
 #include <linux/cpuset.h>
 #include <linux/mempolicy.h>
 #include <linux/ctype.h>
+#include <linux/debugobjects.h>
 #include <linux/kallsyms.h>
 #include <linux/memory.h>
+#include <linux/math64.h>
 
 /*
  * Lock order:
@@ -149,25 +151,6 @@ static inline void ClearSlabDebug(struct page *page)
 /* Enable to test recovery from slab corruption on boot */
 #undef SLUB_RESILIENCY_TEST
 
-#if PAGE_SHIFT <= 12
-
-/*
- * Small page size. Make sure that we do not fragment memory
- */
-#define DEFAULT_MAX_ORDER 1
-#define DEFAULT_MIN_OBJECTS 4
-
-#else
-
-/*
- * Large page machines are customarily able to handle larger
- * page orders.
- */
-#define DEFAULT_MAX_ORDER 2
-#define DEFAULT_MIN_OBJECTS 8
-
-#endif
-
 /*
  * Mininum number of partial slabs. These will be left on the partial
  * lists even if they are empty. kmem_cache_shrink may reclaim them.
@@ -204,13 +187,6 @@ static inline void ClearSlabDebug(struct page *page)
 /* Internal SLUB flags */
 #define __OBJECT_POISON                0x80000000 /* Poison object */
 #define __SYSFS_ADD_DEFERRED   0x40000000 /* Not yet visible via sysfs */
-#define __KMALLOC_CACHE                0x20000000 /* objects freed using kfree */
-#define __PAGE_ALLOC_FALLBACK  0x10000000 /* Allow fallback to page alloc */
-
-/* Not all arches define cache_line_size */
-#ifndef cache_line_size
-#define cache_line_size()      L1_CACHE_BYTES
-#endif
 
 static int kmem_size = sizeof(struct kmem_cache);
 
@@ -301,7 +277,7 @@ static inline int check_valid_pointer(struct kmem_cache *s,
                return 1;
 
        base = page_address(page);
-       if (object < base || object >= base + s->objects * s->size ||
+       if (object < base || object >= base + page->objects * s->size ||
                (object - base) % s->size) {
                return 0;
        }
@@ -327,8 +303,8 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
 }
 
 /* Loop over all objects in a slab */
-#define for_each_object(__p, __s, __addr) \
-       for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
+#define for_each_object(__p, __s, __addr, __objects) \
+       for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
                        __p += (__s)->size)
 
 /* Scan freelist */
@@ -341,6 +317,26 @@ static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
        return (p - addr) / s->size;
 }
 
+static inline struct kmem_cache_order_objects oo_make(int order,
+                                               unsigned long size)
+{
+       struct kmem_cache_order_objects x = {
+               (order << 16) + (PAGE_SIZE << order) / size
+       };
+
+       return x;
+}
+
+static inline int oo_order(struct kmem_cache_order_objects x)
+{
+       return x.x >> 16;
+}
+
+static inline int oo_objects(struct kmem_cache_order_objects x)
+{
+       return x.x & ((1 << 16) - 1);
+}
+
 #ifdef CONFIG_SLUB_DEBUG
 /*
  * Debug settings:
@@ -451,8 +447,8 @@ static void print_tracking(struct kmem_cache *s, void *object)
 
 static void print_page_info(struct page *page)
 {
-       printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
-               page, page->inuse, page->freelist, page->flags);
+       printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
+               page, page->objects, page->inuse, page->freelist, page->flags);
 
 }
 
@@ -652,6 +648,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
                                p + off, POISON_INUSE, s->size - off);
 }
 
+/* Check the pad bytes at the end of a slab page */
 static int slab_pad_check(struct kmem_cache *s, struct page *page)
 {
        u8 *start;
@@ -664,20 +661,20 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
                return 1;
 
        start = page_address(page);
-       end = start + (PAGE_SIZE << s->order);
-       length = s->objects * s->size;
-       remainder = end - (start + length);
+       length = (PAGE_SIZE << compound_order(page));
+       end = start + length;
+       remainder = length % s->size;
        if (!remainder)
                return 1;
 
-       fault = check_bytes(start + length, POISON_INUSE, remainder);
+       fault = check_bytes(end - remainder, POISON_INUSE, remainder);
        if (!fault)
                return 1;
        while (end > fault && end[-1] == POISON_INUSE)
                end--;
 
        slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
-       print_section("Padding", start, length);
+       print_section("Padding", end - remainder, remainder);
 
        restore_bytes(s, "slab padding", POISON_INUSE, start, end);
        return 0;
@@ -739,15 +736,24 @@ static int check_object(struct kmem_cache *s, struct page *page,
 
 static int check_slab(struct kmem_cache *s, struct page *page)
 {
+       int maxobj;
+
        VM_BUG_ON(!irqs_disabled());
 
        if (!PageSlab(page)) {
                slab_err(s, page, "Not a valid slab page");
                return 0;
        }
-       if (page->inuse > s->objects) {
+
+       maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
+       if (page->objects > maxobj) {
+               slab_err(s, page, "objects %u > max %u",
+                       s->name, page->objects, maxobj);
+               return 0;
+       }
+       if (page->inuse > page->objects) {
                slab_err(s, page, "inuse %u > max %u",
-                       s->name, page->inuse, s->objects);
+                       s->name, page->inuse, page->objects);
                return 0;
        }
        /* Slab_pad_check fixes things up after itself */
@@ -764,8 +770,9 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
        int nr = 0;
        void *fp = page->freelist;
        void *object = NULL;
+       unsigned long max_objects;
 
-       while (fp && nr <= s->objects) {
+       while (fp && nr <= page->objects) {
                if (fp == search)
                        return 1;
                if (!check_valid_pointer(s, page, fp)) {
@@ -777,7 +784,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
                        } else {
                                slab_err(s, page, "Freepointer corrupt");
                                page->freelist = NULL;
-                               page->inuse = s->objects;
+                               page->inuse = page->objects;
                                slab_fix(s, "Freelist cleared");
                                return 0;
                        }
@@ -788,10 +795,20 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
                nr++;
        }
 
-       if (page->inuse != s->objects - nr) {
+       max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
+       if (max_objects > 65535)
+               max_objects = 65535;
+
+       if (page->objects != max_objects) {
+               slab_err(s, page, "Wrong number of objects. Found %d but "
+                       "should be %d", page->objects, max_objects);
+               page->objects = max_objects;
+               slab_fix(s, "Number of objects adjusted.");
+       }
+       if (page->inuse != page->objects - nr) {
                slab_err(s, page, "Wrong object count. Counter is %d but "
-                       "counted were %d", page->inuse, s->objects - nr);
-               page->inuse = s->objects - nr;
+                       "counted were %d", page->inuse, page->objects - nr);
+               page->inuse = page->objects - nr;
                slab_fix(s, "Object count adjusted.");
        }
        return search == NULL;
@@ -845,7 +862,7 @@ static inline unsigned long slabs_node(struct kmem_cache *s, int node)
        return atomic_long_read(&n->nr_slabs);
 }
 
-static inline void inc_slabs_node(struct kmem_cache *s, int node)
+static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
 {
        struct kmem_cache_node *n = get_node(s, node);
 
@@ -855,14 +872,17 @@ static inline void inc_slabs_node(struct kmem_cache *s, int node)
         * dilemma by deferring the increment of the count during
         * bootstrap (see early_kmem_cache_node_alloc).
         */
-       if (!NUMA_BUILD || n)
+       if (!NUMA_BUILD || n) {
                atomic_long_inc(&n->nr_slabs);
+               atomic_long_add(objects, &n->total_objects);
+       }
 }
-static inline void dec_slabs_node(struct kmem_cache *s, int node)
+static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
 {
        struct kmem_cache_node *n = get_node(s, node);
 
        atomic_long_dec(&n->nr_slabs);
+       atomic_long_sub(objects, &n->total_objects);
 }
 
 /* Object debug checks for alloc/free paths */
@@ -910,7 +930,7 @@ bad:
                 * as used avoids touching the remaining objects.
                 */
                slab_fix(s, "Marking all objects used");
-               page->inuse = s->objects;
+               page->inuse = page->objects;
                page->freelist = NULL;
        }
        return 0;
@@ -1060,31 +1080,52 @@ static inline unsigned long kmem_cache_flags(unsigned long objsize,
 
 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
                                                        { return 0; }
-static inline void inc_slabs_node(struct kmem_cache *s, int node) {}
-static inline void dec_slabs_node(struct kmem_cache *s, int node) {}
+static inline void inc_slabs_node(struct kmem_cache *s, int node,
+                                                       int objects) {}
+static inline void dec_slabs_node(struct kmem_cache *s, int node,
+                                                       int objects) {}
 #endif
+
 /*
  * Slab allocation and freeing
  */
+static inline struct page *alloc_slab_page(gfp_t flags, int node,
+                                       struct kmem_cache_order_objects oo)
+{
+       int order = oo_order(oo);
+
+       if (node == -1)
+               return alloc_pages(flags, order);
+       else
+               return alloc_pages_node(node, flags, order);
+}
+
 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 {
        struct page *page;
-       int pages = 1 << s->order;
+       struct kmem_cache_order_objects oo = s->oo;
 
        flags |= s->allocflags;
 
-       if (node == -1)
-               page = alloc_pages(flags, s->order);
-       else
-               page = alloc_pages_node(node, flags, s->order);
-
-       if (!page)
-               return NULL;
+       page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
+                                                                       oo);
+       if (unlikely(!page)) {
+               oo = s->min;
+               /*
+                * Allocation may have failed due to fragmentation.
+                * Try a lower order alloc if possible
+                */
+               page = alloc_slab_page(flags, node, oo);
+               if (!page)
+                       return NULL;
 
+               stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
+       }
+       page->objects = oo_objects(oo);
        mod_zone_page_state(page_zone(page),
                (s->flags & SLAB_RECLAIM_ACCOUNT) ?
                NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
-               pages);
+               1 << oo_order(oo));
 
        return page;
 }
@@ -1111,7 +1152,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
        if (!page)
                goto out;
 
-       inc_slabs_node(s, page_to_nid(page));
+       inc_slabs_node(s, page_to_nid(page), page->objects);
        page->slab = s;
        page->flags |= 1 << PG_slab;
        if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
@@ -1121,10 +1162,10 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
        start = page_address(page);
 
        if (unlikely(s->flags & SLAB_POISON))
-               memset(start, POISON_INUSE, PAGE_SIZE << s->order);
+               memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
 
        last = start;
-       for_each_object(p, s, start) {
+       for_each_object(p, s, start, page->objects) {
                setup_object(s, page, last);
                set_freepointer(s, last, p);
                last = p;
@@ -1140,13 +1181,15 @@ out:
 
 static void __free_slab(struct kmem_cache *s, struct page *page)
 {
-       int pages = 1 << s->order;
+       int order = compound_order(page);
+       int pages = 1 << order;
 
        if (unlikely(SlabDebug(page))) {
                void *p;
 
                slab_pad_check(s, page);
-               for_each_object(p, s, page_address(page))
+               for_each_object(p, s, page_address(page),
+                                               page->objects)
                        check_object(s, page, p, 0);
                ClearSlabDebug(page);
        }
@@ -1158,7 +1201,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
 
        __ClearPageSlab(page);
        reset_page_mapcount(page);
-       __free_pages(page, s->order);
+       __free_pages(page, order);
 }
 
 static void rcu_free_slab(struct rcu_head *h)
@@ -1184,7 +1227,7 @@ static void free_slab(struct kmem_cache *s, struct page *page)
 
 static void discard_slab(struct kmem_cache *s, struct page *page)
 {
-       dec_slabs_node(s, page_to_nid(page));
+       dec_slabs_node(s, page_to_nid(page), page->objects);
        free_slab(s, page);
 }
 
@@ -1284,7 +1327,9 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
 {
 #ifdef CONFIG_NUMA
        struct zonelist *zonelist;
-       struct zone **z;
+       struct zoneref *z;
+       struct zone *zone;
+       enum zone_type high_zoneidx = gfp_zone(flags);
        struct page *page;
 
        /*
@@ -1309,14 +1354,13 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
                        get_cycles() % 1024 > s->remote_node_defrag_ratio)
                return NULL;
 
-       zonelist = &NODE_DATA(
-               slab_node(current->mempolicy))->node_zonelists[gfp_zone(flags)];
-       for (z = zonelist->zones; *z; z++) {
+       zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
                struct kmem_cache_node *n;
 
-               n = get_node(s, zone_to_nid(*z));
+               n = get_node(s, zone_to_nid(zone));
 
-               if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
+               if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
                                n->nr_partial > MIN_PARTIAL) {
                        page = get_partial_node(n);
                        if (page)
@@ -1519,7 +1563,7 @@ load_freelist:
                goto debug;
 
        c->freelist = object[c->offset];
-       c->page->inuse = s->objects;
+       c->page->inuse = c->page->objects;
        c->page->freelist = NULL;
        c->node = page_to_nid(c->page);
 unlock_out:
@@ -1556,27 +1600,6 @@ new_slab:
                c->page = new;
                goto load_freelist;
        }
-
-       /*
-        * No memory available.
-        *
-        * If the slab uses higher order allocs but the object is
-        * smaller than a page size then we can fallback in emergencies
-        * to the page allocator via kmalloc_large. The page allocator may
-        * have failed to obtain a higher order page and we can try to
-        * allocate a single page if the object fits into a single page.
-        * That is only possible if certain conditions are met that are being
-        * checked when a slab is created.
-        */
-       if (!(gfpflags & __GFP_NORETRY) &&
-                               (s->flags & __PAGE_ALLOC_FALLBACK)) {
-               if (gfpflags & __GFP_WAIT)
-                       local_irq_enable();
-               object = kmalloc_large(s->objsize, gfpflags);
-               if (gfpflags & __GFP_WAIT)
-                       local_irq_disable();
-               return object;
-       }
        return NULL;
 debug:
        if (!alloc_debug_processing(s, c->page, object, addr))
@@ -1726,6 +1749,8 @@ static __always_inline void slab_free(struct kmem_cache *s,
        local_irq_save(flags);
        c = get_cpu_slab(s, smp_processor_id());
        debug_check_no_locks_freed(object, c->objsize);
+       if (!(s->flags & SLAB_DEBUG_OBJECTS))
+               debug_check_no_obj_freed(object, s->objsize);
        if (likely(page == c->page && c->node >= 0)) {
                object[c->offset] = c->freelist;
                c->freelist = object;
@@ -1777,8 +1802,8 @@ static struct page *get_object_page(const void *x)
  * take the list_lock.
  */
 static int slub_min_order;
-static int slub_max_order = DEFAULT_MAX_ORDER;
-static int slub_min_objects = DEFAULT_MIN_OBJECTS;
+static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
+static int slub_min_objects;
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
@@ -1793,7 +1818,7 @@ static int slub_nomerge;
  * system components. Generally order 0 allocations should be preferred since
  * order 0 does not cause fragmentation in the page allocator. Larger objects
  * be problematic to put into order 0 slabs because there may be too much
- * unused space left. We go to a higher order if more than 1/8th of the slab
+ * unused space left. We go to a higher order if more than 1/16th of the slab
  * would be wasted.
  *
  * In order to reach satisfactory performance we must ensure that a minimum
@@ -1818,6 +1843,9 @@ static inline int slab_order(int size, int min_objects,
        int rem;
        int min_order = slub_min_order;
 
+       if ((PAGE_SIZE << min_order) / size > 65535)
+               return get_order(size * 65535) - 1;
+
        for (order = max(min_order,
                                fls(min_objects * size - 1) - PAGE_SHIFT);
                        order <= max_order; order++) {
@@ -1852,8 +1880,10 @@ static inline int calculate_order(int size)
         * we reduce the minimum objects required in a slab.
         */
        min_objects = slub_min_objects;
+       if (!min_objects)
+               min_objects = 4 * (fls(nr_cpu_ids) + 1);
        while (min_objects > 1) {
-               fraction = 8;
+               fraction = 16;
                while (fraction >= 4) {
                        order = slab_order(size, min_objects,
                                                slub_max_order, fraction);
@@ -2095,7 +2125,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
        init_tracking(kmalloc_caches, n);
 #endif
        init_kmem_cache_node(n);
-       inc_slabs_node(kmalloc_caches, node);
+       inc_slabs_node(kmalloc_caches, node, page->objects);
 
        /*
         * lockdep requires consistent irq usage for each lock
@@ -2171,11 +2201,12 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
  * calculate_sizes() determines the order and the distribution of data within
  * a slab object.
  */
-static int calculate_sizes(struct kmem_cache *s)
+static int calculate_sizes(struct kmem_cache *s, int forced_order)
 {
        unsigned long flags = s->flags;
        unsigned long size = s->objsize;
        unsigned long align = s->align;
+       int order;
 
        /*
         * Round up object size to the next word boundary. We can only
@@ -2259,26 +2290,16 @@ static int calculate_sizes(struct kmem_cache *s)
         */
        size = ALIGN(size, align);
        s->size = size;
+       if (forced_order >= 0)
+               order = forced_order;
+       else
+               order = calculate_order(size);
 
-       if ((flags & __KMALLOC_CACHE) &&
-                       PAGE_SIZE / size < slub_min_objects) {
-               /*
-                * Kmalloc cache that would not have enough objects in
-                * an order 0 page. Kmalloc slabs can fallback to
-                * page allocator order 0 allocs so take a reasonably large
-                * order that will allows us a good number of objects.
-                */
-               s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER);
-               s->flags |= __PAGE_ALLOC_FALLBACK;
-               s->allocflags |= __GFP_NOWARN;
-       } else
-               s->order = calculate_order(size);
-
-       if (s->order < 0)
+       if (order < 0)
                return 0;
 
        s->allocflags = 0;
-       if (s->order)
+       if (order)
                s->allocflags |= __GFP_COMP;
 
        if (s->flags & SLAB_CACHE_DMA)
@@ -2290,9 +2311,12 @@ static int calculate_sizes(struct kmem_cache *s)
        /*
         * Determine the number of objects per slab
         */
-       s->objects = (PAGE_SIZE << s->order) / size;
+       s->oo = oo_make(order, size);
+       s->min = oo_make(get_order(size), size);
+       if (oo_objects(s->oo) > oo_objects(s->max))
+               s->max = s->oo;
 
-       return !!s->objects;
+       return !!oo_objects(s->oo);
 
 }
 
@@ -2308,7 +2332,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
        s->align = align;
        s->flags = kmem_cache_flags(size, flags, name, ctor);
 
-       if (!calculate_sizes(s))
+       if (!calculate_sizes(s, -1))
                goto error;
 
        s->refcount = 1;
@@ -2325,7 +2349,7 @@ error:
        if (flags & SLAB_PANIC)
                panic("Cannot create slab %s size=%lu realsize=%u "
                        "order=%u offset=%u flags=%lx\n",
-                       s->name, (unsigned long)size, s->size, s->order,
+                       s->name, (unsigned long)size, s->size, oo_order(s->oo),
                        s->offset, flags);
        return 0;
 }
@@ -2371,26 +2395,52 @@ const char *kmem_cache_name(struct kmem_cache *s)
 }
 EXPORT_SYMBOL(kmem_cache_name);
 
+static void list_slab_objects(struct kmem_cache *s, struct page *page,
+                                                       const char *text)
+{
+#ifdef CONFIG_SLUB_DEBUG
+       void *addr = page_address(page);
+       void *p;
+       DECLARE_BITMAP(map, page->objects);
+
+       bitmap_zero(map, page->objects);
+       slab_err(s, page, "%s", text);
+       slab_lock(page);
+       for_each_free_object(p, s, page->freelist)
+               set_bit(slab_index(p, s, addr), map);
+
+       for_each_object(p, s, addr, page->objects) {
+
+               if (!test_bit(slab_index(p, s, addr), map)) {
+                       printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
+                                                       p, p - addr);
+                       print_tracking(s, p);
+               }
+       }
+       slab_unlock(page);
+#endif
+}
+
 /*
- * Attempt to free all slabs on a node. Return the number of slabs we
- * were unable to free.
+ * Attempt to free all partial slabs on a node.
  */
-static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
-                       struct list_head *list)
+static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
-       int slabs_inuse = 0;
        unsigned long flags;
        struct page *page, *h;
 
        spin_lock_irqsave(&n->list_lock, flags);
-       list_for_each_entry_safe(page, h, list, lru)
+       list_for_each_entry_safe(page, h, &n->partial, lru) {
                if (!page->inuse) {
                        list_del(&page->lru);
                        discard_slab(s, page);
-               } else
-                       slabs_inuse++;
+                       n->nr_partial--;
+               } else {
+                       list_slab_objects(s, page,
+                               "Objects remaining on kmem_cache_close()");
+               }
+       }
        spin_unlock_irqrestore(&n->list_lock, flags);
-       return slabs_inuse;
 }
 
 /*
@@ -2407,8 +2457,8 @@ static inline int kmem_cache_close(struct kmem_cache *s)
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = get_node(s, node);
 
-               n->nr_partial -= free_list(s, n, &n->partial);
-               if (slabs_node(s, node))
+               free_partial(s, n);
+               if (n->nr_partial || slabs_node(s, node))
                        return 1;
        }
        free_kmem_cache_nodes(s);
@@ -2426,8 +2476,11 @@ void kmem_cache_destroy(struct kmem_cache *s)
        if (!s->refcount) {
                list_del(&s->list);
                up_write(&slub_lock);
-               if (kmem_cache_close(s))
-                       WARN_ON(1);
+               if (kmem_cache_close(s)) {
+                       printk(KERN_ERR "SLUB %s: %s called for cache that "
+                               "still has objects.\n", s->name, __func__);
+                       dump_stack();
+               }
                sysfs_slab_remove(s);
        } else
                up_write(&slub_lock);
@@ -2486,7 +2539,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
 
        down_write(&slub_lock);
        if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
-                       flags | __KMALLOC_CACHE, NULL))
+                                                               flags, NULL))
                goto panic;
 
        list_add(&s->list, &slab_caches);
@@ -2734,8 +2787,9 @@ int kmem_cache_shrink(struct kmem_cache *s)
        struct kmem_cache_node *n;
        struct page *page;
        struct page *t;
+       int objects = oo_objects(s->max);
        struct list_head *slabs_by_inuse =
-               kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
+               kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
        unsigned long flags;
 
        if (!slabs_by_inuse)
@@ -2748,7 +2802,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
                if (!n->nr_partial)
                        continue;
 
-               for (i = 0; i < s->objects; i++)
+               for (i = 0; i < objects; i++)
                        INIT_LIST_HEAD(slabs_by_inuse + i);
 
                spin_lock_irqsave(&n->list_lock, flags);
@@ -2780,7 +2834,7 @@ int kmem_cache_shrink(struct kmem_cache *s)
                 * Rebuild the partial list with the slabs filled up most
                 * first and the least used slabs at the end.
                 */
-               for (i = s->objects - 1; i >= 0; i--)
+               for (i = objects - 1; i >= 0; i--)
                        list_splice(slabs_by_inuse + i, n->partial.prev);
 
                spin_unlock_irqrestore(&n->list_lock, flags);
@@ -2928,7 +2982,7 @@ void __init kmem_cache_init(void)
        kmalloc_caches[0].refcount = -1;
        caches++;
 
-       hotplug_memory_notifier(slab_memory_callback, 1);
+       hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
 #endif
 
        /* Able to allocate the per node structures */
@@ -3001,9 +3055,6 @@ static int slab_unmergeable(struct kmem_cache *s)
        if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
                return 1;
 
-       if ((s->flags & __PAGE_ALLOC_FALLBACK))
-               return 1;
-
        if (s->ctor)
                return 1;
 
@@ -3196,7 +3247,8 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
 }
 
 #if (defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)) || defined(CONFIG_SLABINFO)
-static unsigned long count_partial(struct kmem_cache_node *n)
+static unsigned long count_partial(struct kmem_cache_node *n,
+                                       int (*get_count)(struct page *))
 {
        unsigned long flags;
        unsigned long x = 0;
@@ -3204,10 +3256,25 @@ static unsigned long count_partial(struct kmem_cache_node *n)
 
        spin_lock_irqsave(&n->list_lock, flags);
        list_for_each_entry(page, &n->partial, lru)
-               x += page->inuse;
+               x += get_count(page);
        spin_unlock_irqrestore(&n->list_lock, flags);
        return x;
 }
+
+static int count_inuse(struct page *page)
+{
+       return page->inuse;
+}
+
+static int count_total(struct page *page)
+{
+       return page->objects;
+}
+
+static int count_free(struct page *page)
+{
+       return page->objects - page->inuse;
+}
 #endif
 
 #if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
@@ -3222,7 +3289,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
                return 0;
 
        /* Now we know that a valid freelist exists */
-       bitmap_zero(map, s->objects);
+       bitmap_zero(map, page->objects);
 
        for_each_free_object(p, s, page->freelist) {
                set_bit(slab_index(p, s, addr), map);
@@ -3230,7 +3297,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
                        return 0;
        }
 
-       for_each_object(p, s, addr)
+       for_each_object(p, s, addr, page->objects)
                if (!test_bit(slab_index(p, s, addr), map))
                        if (!check_object(s, page, p, 1))
                                return 0;
@@ -3296,7 +3363,7 @@ static long validate_slab_cache(struct kmem_cache *s)
 {
        int node;
        unsigned long count = 0;
-       unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
+       unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
                                sizeof(unsigned long), GFP_KERNEL);
 
        if (!map)
@@ -3499,14 +3566,14 @@ static void process_slab(struct loc_track *t, struct kmem_cache *s,
                struct page *page, enum track_item alloc)
 {
        void *addr = page_address(page);
-       DECLARE_BITMAP(map, s->objects);
+       DECLARE_BITMAP(map, page->objects);
        void *p;
 
-       bitmap_zero(map, s->objects);
+       bitmap_zero(map, page->objects);
        for_each_free_object(p, s, page->freelist)
                set_bit(slab_index(p, s, addr), map);
 
-       for_each_object(p, s, addr)
+       for_each_object(p, s, addr, page->objects)
                if (!test_bit(slab_index(p, s, addr), map))
                        add_location(t, s, get_track(s, p, alloc));
 }
@@ -3555,12 +3622,10 @@ static int list_locations(struct kmem_cache *s, char *buf,
                        len += sprintf(buf + len, "<not-available>");
 
                if (l->sum_time != l->min_time) {
-                       unsigned long remainder;
-
                        len += sprintf(buf + len, " age=%ld/%ld/%ld",
-                       l->min_time,
-                       div_long_long_rem(l->sum_time, l->count, &remainder),
-                       l->max_time);
+                               l->min_time,
+                               (long)div_u64(l->sum_time, l->count),
+                               l->max_time);
                } else
                        len += sprintf(buf + len, " age=%ld",
                                l->min_time);
@@ -3596,22 +3661,23 @@ static int list_locations(struct kmem_cache *s, char *buf,
 }
 
 enum slab_stat_type {
-       SL_FULL,
-       SL_PARTIAL,
-       SL_CPU,
-       SL_OBJECTS
+       SL_ALL,                 /* All slabs */
+       SL_PARTIAL,             /* Only partially allocated slabs */
+       SL_CPU,                 /* Only slabs used for cpu caches */
+       SL_OBJECTS,             /* Determine allocated objects not slabs */
+       SL_TOTAL                /* Determine object capacity not slabs */
 };
 
-#define SO_FULL                (1 << SL_FULL)
+#define SO_ALL         (1 << SL_ALL)
 #define SO_PARTIAL     (1 << SL_PARTIAL)
 #define SO_CPU         (1 << SL_CPU)
 #define SO_OBJECTS     (1 << SL_OBJECTS)
+#define SO_TOTAL       (1 << SL_TOTAL)
 
 static ssize_t show_slab_objects(struct kmem_cache *s,
                            char *buf, unsigned long flags)
 {
        unsigned long total = 0;
-       int cpu;
        int node;
        int x;
        unsigned long *nodes;
@@ -3622,56 +3688,60 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
                return -ENOMEM;
        per_cpu = nodes + nr_node_ids;
 
-       for_each_possible_cpu(cpu) {
-               struct page *page;
-               struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
+       if (flags & SO_CPU) {
+               int cpu;
 
-               if (!c)
-                       continue;
+               for_each_possible_cpu(cpu) {
+                       struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
 
-               page = c->page;
-               node = c->node;
-               if (node < 0)
-                       continue;
-               if (page) {
-                       if (flags & SO_CPU) {
-                               if (flags & SO_OBJECTS)
-                                       x = page->inuse;
+                       if (!c || c->node < 0)
+                               continue;
+
+                       if (c->page) {
+                                       if (flags & SO_TOTAL)
+                                               x = c->page->objects;
+                               else if (flags & SO_OBJECTS)
+                                       x = c->page->inuse;
                                else
                                        x = 1;
+
                                total += x;
-                               nodes[node] += x;
+                               nodes[c->node] += x;
                        }
-                       per_cpu[node]++;
+                       per_cpu[c->node]++;
                }
        }
 
-       for_each_node_state(node, N_NORMAL_MEMORY) {
-               struct kmem_cache_node *n = get_node(s, node);
+       if (flags & SO_ALL) {
+               for_each_node_state(node, N_NORMAL_MEMORY) {
+                       struct kmem_cache_node *n = get_node(s, node);
+
+               if (flags & SO_TOTAL)
+                       x = atomic_long_read(&n->total_objects);
+               else if (flags & SO_OBJECTS)
+                       x = atomic_long_read(&n->total_objects) -
+                               count_partial(n, count_free);
 
-               if (flags & SO_PARTIAL) {
-                       if (flags & SO_OBJECTS)
-                               x = count_partial(n);
                        else
-                               x = n->nr_partial;
+                               x = atomic_long_read(&n->nr_slabs);
                        total += x;
                        nodes[node] += x;
                }
 
-               if (flags & SO_FULL) {
-                       int full_slabs = atomic_long_read(&n->nr_slabs)
-                                       - per_cpu[node]
-                                       - n->nr_partial;
+       } else if (flags & SO_PARTIAL) {
+               for_each_node_state(node, N_NORMAL_MEMORY) {
+                       struct kmem_cache_node *n = get_node(s, node);
 
-                       if (flags & SO_OBJECTS)
-                               x = full_slabs * s->objects;
+                       if (flags & SO_TOTAL)
+                               x = count_partial(n, count_total);
+                       else if (flags & SO_OBJECTS)
+                               x = count_partial(n, count_inuse);
                        else
-                               x = full_slabs;
+                               x = n->nr_partial;
                        total += x;
                        nodes[node] += x;
                }
        }
-
        x = sprintf(buf, "%lu", total);
 #ifdef CONFIG_NUMA
        for_each_node_state(node, N_NORMAL_MEMORY)
@@ -3686,14 +3756,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s,
 static int any_slab_objects(struct kmem_cache *s)
 {
        int node;
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
-
-               if (c && c->page)
-                       return 1;
-       }
 
        for_each_online_node(node) {
                struct kmem_cache_node *n = get_node(s, node);
@@ -3701,7 +3763,7 @@ static int any_slab_objects(struct kmem_cache *s)
                if (!n)
                        continue;
 
-               if (n->nr_partial || atomic_long_read(&n->nr_slabs))
+               if (atomic_read(&n->total_objects))
                        return 1;
        }
        return 0;
@@ -3743,15 +3805,27 @@ SLAB_ATTR_RO(object_size);
 
 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%d\n", s->objects);
+       return sprintf(buf, "%d\n", oo_objects(s->oo));
 }
 SLAB_ATTR_RO(objs_per_slab);
 
+static ssize_t order_store(struct kmem_cache *s,
+                               const char *buf, size_t length)
+{
+       int order = simple_strtoul(buf, NULL, 10);
+
+       if (order > slub_max_order || order < slub_min_order)
+               return -EINVAL;
+
+       calculate_sizes(s, order);
+       return length;
+}
+
 static ssize_t order_show(struct kmem_cache *s, char *buf)
 {
-       return sprintf(buf, "%d\n", s->order);
+       return sprintf(buf, "%d\n", oo_order(s->oo));
 }
-SLAB_ATTR_RO(order);
+SLAB_ATTR(order);
 
 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
 {
@@ -3772,7 +3846,7 @@ SLAB_ATTR_RO(aliases);
 
 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
 {
-       return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
+       return show_slab_objects(s, buf, SO_ALL);
 }
 SLAB_ATTR_RO(slabs);
 
@@ -3790,10 +3864,22 @@ SLAB_ATTR_RO(cpu_slabs);
 
 static ssize_t objects_show(struct kmem_cache *s, char *buf)
 {
-       return show_slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
+       return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
 }
 SLAB_ATTR_RO(objects);
 
+static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
+{
+       return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
+}
+SLAB_ATTR_RO(objects_partial);
+
+static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
+{
+       return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
+}
+SLAB_ATTR_RO(total_objects);
+
 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
 {
        return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
@@ -3873,7 +3959,7 @@ static ssize_t red_zone_store(struct kmem_cache *s,
        s->flags &= ~SLAB_RED_ZONE;
        if (buf[0] == '1')
                s->flags |= SLAB_RED_ZONE;
-       calculate_sizes(s);
+       calculate_sizes(s, -1);
        return length;
 }
 SLAB_ATTR(red_zone);
@@ -3892,7 +3978,7 @@ static ssize_t poison_store(struct kmem_cache *s,
        s->flags &= ~SLAB_POISON;
        if (buf[0] == '1')
                s->flags |= SLAB_POISON;
-       calculate_sizes(s);
+       calculate_sizes(s, -1);
        return length;
 }
 SLAB_ATTR(poison);
@@ -3911,7 +3997,7 @@ static ssize_t store_user_store(struct kmem_cache *s,
        s->flags &= ~SLAB_STORE_USER;
        if (buf[0] == '1')
                s->flags |= SLAB_STORE_USER;
-       calculate_sizes(s);
+       calculate_sizes(s, -1);
        return length;
 }
 SLAB_ATTR(store_user);
@@ -4042,7 +4128,7 @@ STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
-
+STAT_ATTR(ORDER_FALLBACK, order_fallback);
 #endif
 
 static struct attribute *slab_attrs[] = {
@@ -4051,6 +4137,8 @@ static struct attribute *slab_attrs[] = {
        &objs_per_slab_attr.attr,
        &order_attr.attr,
        &objects_attr.attr,
+       &objects_partial_attr.attr,
+       &total_objects_attr.attr,
        &slabs_attr.attr,
        &partial_attr.attr,
        &cpu_slabs_attr.attr,
@@ -4093,6 +4181,7 @@ static struct attribute *slab_attrs[] = {
        &deactivate_to_head_attr.attr,
        &deactivate_to_tail_attr.attr,
        &deactivate_remote_frees_attr.attr,
+       &order_fallback_attr.attr,
 #endif
        NULL
 };
@@ -4379,7 +4468,8 @@ static int s_show(struct seq_file *m, void *p)
        unsigned long nr_partials = 0;
        unsigned long nr_slabs = 0;
        unsigned long nr_inuse = 0;
-       unsigned long nr_objs;
+       unsigned long nr_objs = 0;
+       unsigned long nr_free = 0;
        struct kmem_cache *s;
        int node;
 
@@ -4393,14 +4483,15 @@ static int s_show(struct seq_file *m, void *p)
 
                nr_partials += n->nr_partial;
                nr_slabs += atomic_long_read(&n->nr_slabs);
-               nr_inuse += count_partial(n);
+               nr_objs += atomic_long_read(&n->total_objects);
+               nr_free += count_partial(n, count_free);
        }
 
-       nr_objs = nr_slabs * s->objects;
-       nr_inuse += (nr_slabs - nr_partials) * s->objects;
+       nr_inuse = nr_objs - nr_free;
 
        seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
-                  nr_objs, s->size, s->objects, (1 << s->order));
+                  nr_objs, s->size, oo_objects(s->oo),
+                  (1 << oo_order(s->oo)));
        seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
        seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
                   0UL);
index 98d6b39c34722c1734bee587a612ce0f74d7fbfa..36511c7b5e2c797067ccb539646f580aa8ec1012 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/module.h>
 #include <linux/spinlock.h>
 #include <linux/vmalloc.h>
+#include "internal.h"
 #include <asm/dma.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
@@ -208,12 +209,12 @@ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long p
 }
 
 /*
- * We need this if we ever free the mem_maps.  While not implemented yet,
- * this function is included for parity with its sibling.
+ * Decode mem_map from the coded memmap
  */
-static __attribute((unused))
 struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
 {
+       /* mask off the extra low bits of information */
+       coded_mem_map &= SECTION_MAP_MASK;
        return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
 }
 
@@ -232,7 +233,7 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
        return 1;
 }
 
-static unsigned long usemap_size(void)
+unsigned long usemap_size(void)
 {
        unsigned long size_bytes;
        size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8;
@@ -260,7 +261,7 @@ static unsigned long *__init sparse_early_usemap_alloc(unsigned long pnum)
        /* Stupid: suppress gcc warning for SPARSEMEM && !NUMA */
        nid = 0;
 
-       printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
+       printk(KERN_WARNING "%s: allocation failed\n", __func__);
        return NULL;
 }
 
@@ -273,8 +274,8 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
        if (map)
                return map;
 
-       map = alloc_bootmem_node(NODE_DATA(nid),
-                       sizeof(struct page) * PAGES_PER_SECTION);
+       map = alloc_bootmem_pages_node(NODE_DATA(nid),
+                      PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION));
        return map;
 }
 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
@@ -290,11 +291,14 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum)
                return map;
 
        printk(KERN_ERR "%s: sparsemem memory map backing failed "
-                       "some memory will not be available.\n", __FUNCTION__);
+                       "some memory will not be available.\n", __func__);
        ms->section_mem_map = 0;
        return NULL;
 }
 
+void __attribute__((weak)) __meminit vmemmap_populate_print_last(void)
+{
+}
 /*
  * Allocate the accumulated non-linear sections, allocate a mem_map
  * for each and record the physical to section mapping.
@@ -304,22 +308,50 @@ void __init sparse_init(void)
        unsigned long pnum;
        struct page *map;
        unsigned long *usemap;
+       unsigned long **usemap_map;
+       int size;
+
+       /*
+        * map is using big page (aka 2M in x86 64 bit)
+        * usemap is less one page (aka 24 bytes)
+        * so alloc 2M (with 2M align) and 24 bytes in turn will
+        * make next 2M slip to one more 2M later.
+        * then in big system, the memory will have a lot of holes...
+        * here try to allocate 2M pages continously.
+        *
+        * powerpc need to call sparse_init_one_section right after each
+        * sparse_early_mem_map_alloc, so allocate usemap_map at first.
+        */
+       size = sizeof(unsigned long *) * NR_MEM_SECTIONS;
+       usemap_map = alloc_bootmem(size);
+       if (!usemap_map)
+               panic("can not allocate usemap_map\n");
 
        for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
                if (!present_section_nr(pnum))
                        continue;
+               usemap_map[pnum] = sparse_early_usemap_alloc(pnum);
+       }
 
-               map = sparse_early_mem_map_alloc(pnum);
-               if (!map)
+       for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) {
+               if (!present_section_nr(pnum))
                        continue;
 
-               usemap = sparse_early_usemap_alloc(pnum);
+               usemap = usemap_map[pnum];
                if (!usemap)
                        continue;
 
+               map = sparse_early_mem_map_alloc(pnum);
+               if (!map)
+                       continue;
+
                sparse_init_one_section(__nr_to_section(pnum), pnum, map,
                                                                usemap);
        }
+
+       vmemmap_populate_print_last();
+
+       free_bootmem(__pa(usemap_map), size);
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
@@ -334,6 +366,9 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
 {
        return; /* XXX: Not implemented yet */
 }
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+}
 #else
 static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
 {
@@ -371,8 +406,69 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
                free_pages((unsigned long)memmap,
                           get_order(sizeof(struct page) * nr_pages));
 }
+
+static void free_map_bootmem(struct page *page, unsigned long nr_pages)
+{
+       unsigned long maps_section_nr, removing_section_nr, i;
+       int magic;
+
+       for (i = 0; i < nr_pages; i++, page++) {
+               magic = atomic_read(&page->_mapcount);
+
+               BUG_ON(magic == NODE_INFO);
+
+               maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
+               removing_section_nr = page->private;
+
+               /*
+                * When this function is called, the removing section is
+                * logical offlined state. This means all pages are isolated
+                * from page allocator. If removing section's memmap is placed
+                * on the same section, it must not be freed.
+                * If it is freed, page allocator may allocate it which will
+                * be removed physically soon.
+                */
+               if (maps_section_nr != removing_section_nr)
+                       put_page_bootmem(page);
+       }
+}
 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
 
+static void free_section_usemap(struct page *memmap, unsigned long *usemap)
+{
+       struct page *usemap_page;
+       unsigned long nr_pages;
+
+       if (!usemap)
+               return;
+
+       usemap_page = virt_to_page(usemap);
+       /*
+        * Check to see if allocation came from hot-plug-add
+        */
+       if (PageSlab(usemap_page)) {
+               kfree(usemap);
+               if (memmap)
+                       __kfree_section_memmap(memmap, PAGES_PER_SECTION);
+               return;
+       }
+
+       /*
+        * The usemap came from bootmem. This is packed with other usemaps
+        * on the section which has pgdat at boot time. Just keep it as is now.
+        */
+
+       if (memmap) {
+               struct page *memmap_page;
+               memmap_page = virt_to_page(memmap);
+
+               nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
+                       >> PAGE_SHIFT;
+
+               free_map_bootmem(memmap_page, nr_pages);
+       }
+}
+
 /*
  * returns the number of sections whose mem_maps were properly
  * set.  If this is <=0, then that means that the passed-in
@@ -425,4 +521,20 @@ out:
        }
        return ret;
 }
+
+void sparse_remove_one_section(struct zone *zone, struct mem_section *ms)
+{
+       struct page *memmap = NULL;
+       unsigned long *usemap = NULL;
+
+       if (ms->section_mem_map) {
+               usemap = ms->pageblock_flags;
+               memmap = sparse_decode_mem_map(ms->section_mem_map,
+                                               __section_nr(ms));
+               ms->section_mem_map = 0;
+               ms->pageblock_flags = NULL;
+       }
+
+       free_section_usemap(memmap, usemap);
+}
 #endif
index aa1139ccf3a7ef91d9e93a8aaa21bb340f665716..91e194445a5eae53ee3ae97e3f616cdcb7d442d0 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -132,34 +132,21 @@ static void pagevec_move_tail(struct pagevec *pvec)
  * Writeback is about to end against a page which has been marked for immediate
  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
  * inactive list.
- *
- * Returns zero if it cleared PG_writeback.
  */
-int rotate_reclaimable_page(struct page *page)
+void  rotate_reclaimable_page(struct page *page)
 {
-       struct pagevec *pvec;
-       unsigned long flags;
-
-       if (PageLocked(page))
-               return 1;
-       if (PageDirty(page))
-               return 1;
-       if (PageActive(page))
-               return 1;
-       if (!PageLRU(page))
-               return 1;
-
-       page_cache_get(page);
-       local_irq_save(flags);
-       pvec = &__get_cpu_var(lru_rotate_pvecs);
-       if (!pagevec_add(pvec, page))
-               pagevec_move_tail(pvec);
-       local_irq_restore(flags);
-
-       if (!test_clear_page_writeback(page))
-               BUG();
+       if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
+           PageLRU(page)) {
+               struct pagevec *pvec;
+               unsigned long flags;
 
-       return 0;
+               page_cache_get(page);
+               local_irq_save(flags);
+               pvec = &__get_cpu_var(lru_rotate_pvecs);
+               if (!pagevec_add(pvec, page))
+                       pagevec_move_tail(pvec);
+               local_irq_restore(flags);
+       }
 }
 
 /*
index 50757ee3f9f3ee243ad23df60ae56a923bb0e64e..d8aadaf2a0baa5e87aff579039323585add00952 100644 (file)
@@ -33,7 +33,7 @@ static const struct address_space_operations swap_aops = {
 };
 
 static struct backing_dev_info swap_backing_dev_info = {
-       .capabilities   = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK,
+       .capabilities   = BDI_CAP_NO_ACCT_AND_WRITEBACK,
        .unplug_io_fn   = swap_unplug_io_fn,
 };
 
index 2da149cfc9accead70439821262dda7d5668815f..bd1bb59203065621e561900b125c017bfe65054d 100644 (file)
@@ -1426,11 +1426,7 @@ static const struct file_operations proc_swaps_operations = {
 
 static int __init procswaps_init(void)
 {
-       struct proc_dir_entry *entry;
-
-       entry = create_proc_entry("swaps", 0, NULL);
-       if (entry)
-               entry->proc_fops = &proc_swaps_operations;
+       proc_create("swaps", 0, NULL, &proc_swaps_operations);
        return 0;
 }
 __initcall(procswaps_init);
@@ -1582,6 +1578,14 @@ asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
                error = -EINVAL;
                goto bad_swap;
        case 2:
+               /* swap partition endianess hack... */
+               if (swab32(swap_header->info.version) == 1) {
+                       swab32s(&swap_header->info.version);
+                       swab32s(&swap_header->info.last_page);
+                       swab32s(&swap_header->info.nr_badpages);
+                       for (i = 0; i < swap_header->info.nr_badpages; i++)
+                               swab32s(&swap_header->info.badpages[i]);
+               }
                /* Check the swap header's sub-version and the size of
                    the swap file and bad block lists */
                if (swap_header->info.version != 1) {
index 7d20ce41ecf52c2cd4027207558734f835124992..b8961cb6341401e27f9debffc5832154525fadcb 100644 (file)
@@ -391,6 +391,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
        pgoff_t next;
        int i;
        int ret = 0;
+       int ret2 = 0;
        int did_range_unmap = 0;
        int wrapped = 0;
 
@@ -438,9 +439,13 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
                                }
                        }
                        BUG_ON(page_mapped(page));
-                       ret = do_launder_page(mapping, page);
-                       if (ret == 0 && !invalidate_complete_page2(mapping, page))
-                               ret = -EIO;
+                       ret2 = do_launder_page(mapping, page);
+                       if (ret2 == 0) {
+                               if (!invalidate_complete_page2(mapping, page))
+                                       ret2 = -EIO;
+                       }
+                       if (ret2 < 0)
+                               ret = ret2;
                        unlock_page(page);
                }
                pagevec_release(&pvec);
index ecf91f8034bf0e8cb2c3effb63609fd3c8405cdb..6e45b0f3d1256e0bdc3a3caffa74597cf96a5138 100644 (file)
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/interrupt.h>
-
+#include <linux/seq_file.h>
+#include <linux/debugobjects.h>
 #include <linux/vmalloc.h>
+#include <linux/kallsyms.h>
 
 #include <asm/uaccess.h>
 #include <asm/tlbflush.h>
@@ -25,7 +27,7 @@ DEFINE_RWLOCK(vmlist_lock);
 struct vm_struct *vmlist;
 
 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
-                           int node);
+                           int node, void *caller);
 
 static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
 {
@@ -204,9 +206,9 @@ unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 }
 EXPORT_SYMBOL(vmalloc_to_pfn);
 
-static struct vm_struct *__get_vm_area_node(unsigned long size, unsigned long flags,
-                                           unsigned long start, unsigned long end,
-                                           int node, gfp_t gfp_mask)
+static struct vm_struct *
+__get_vm_area_node(unsigned long size, unsigned long flags, unsigned long start,
+               unsigned long end, int node, gfp_t gfp_mask, void *caller)
 {
        struct vm_struct **p, *tmp, *area;
        unsigned long align = 1;
@@ -269,6 +271,7 @@ found:
        area->pages = NULL;
        area->nr_pages = 0;
        area->phys_addr = 0;
+       area->caller = caller;
        write_unlock(&vmlist_lock);
 
        return area;
@@ -284,7 +287,8 @@ out:
 struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
                                unsigned long start, unsigned long end)
 {
-       return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL);
+       return __get_vm_area_node(size, flags, start, end, -1, GFP_KERNEL,
+                                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL_GPL(__get_vm_area);
 
@@ -299,14 +303,22 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
  */
 struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
 {
-       return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
+       return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+                               -1, GFP_KERNEL, __builtin_return_address(0));
+}
+
+struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+                               void *caller)
+{
+       return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END,
+                                               -1, GFP_KERNEL, caller);
 }
 
 struct vm_struct *get_vm_area_node(unsigned long size, unsigned long flags,
                                   int node, gfp_t gfp_mask)
 {
        return __get_vm_area_node(size, flags, VMALLOC_START, VMALLOC_END, node,
-                                 gfp_mask);
+                                 gfp_mask, __builtin_return_address(0));
 }
 
 /* Caller must hold vmlist_lock */
@@ -383,6 +395,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
        }
 
        debug_check_no_locks_freed(addr, area->size);
+       debug_check_no_obj_freed(addr, area->size);
 
        if (deallocate_pages) {
                int i;
@@ -455,9 +468,11 @@ void *vmap(struct page **pages, unsigned int count,
        if (count > num_physpages)
                return NULL;
 
-       area = get_vm_area((count << PAGE_SHIFT), flags);
+       area = get_vm_area_caller((count << PAGE_SHIFT), flags,
+                                       __builtin_return_address(0));
        if (!area)
                return NULL;
+
        if (map_vm_area(area, prot, &pages)) {
                vunmap(area->addr);
                return NULL;
@@ -468,7 +483,7 @@ void *vmap(struct page **pages, unsigned int count,
 EXPORT_SYMBOL(vmap);
 
 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
-                                pgprot_t prot, int node)
+                                pgprot_t prot, int node, void *caller)
 {
        struct page **pages;
        unsigned int nr_pages, array_size, i;
@@ -480,7 +495,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
        /* Please note that the recursion is strictly bounded. */
        if (array_size > PAGE_SIZE) {
                pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
-                                       PAGE_KERNEL, node);
+                               PAGE_KERNEL, node, caller);
                area->flags |= VM_VPAGES;
        } else {
                pages = kmalloc_node(array_size,
@@ -488,6 +503,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                                node);
        }
        area->pages = pages;
+       area->caller = caller;
        if (!area->pages) {
                remove_vm_area(area->addr);
                kfree(area);
@@ -521,7 +537,8 @@ fail:
 
 void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
 {
-       return __vmalloc_area_node(area, gfp_mask, prot, -1);
+       return __vmalloc_area_node(area, gfp_mask, prot, -1,
+                                       __builtin_return_address(0));
 }
 
 /**
@@ -530,13 +547,14 @@ void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot)
  *     @gfp_mask:      flags for the page level allocator
  *     @prot:          protection mask for the allocated pages
  *     @node:          node to use for allocation or -1
+ *     @caller:        caller's return address
  *
  *     Allocate enough pages to cover @size from the page level
  *     allocator with @gfp_mask flags.  Map them into contiguous
  *     kernel virtual space, using a pagetable protection of @prot.
  */
 static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
-                           int node)
+                                               int node, void *caller)
 {
        struct vm_struct *area;
 
@@ -544,16 +562,19 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot,
        if (!size || (size >> PAGE_SHIFT) > num_physpages)
                return NULL;
 
-       area = get_vm_area_node(size, VM_ALLOC, node, gfp_mask);
+       area = __get_vm_area_node(size, VM_ALLOC, VMALLOC_START, VMALLOC_END,
+                                               node, gfp_mask, caller);
+
        if (!area)
                return NULL;
 
-       return __vmalloc_area_node(area, gfp_mask, prot, node);
+       return __vmalloc_area_node(area, gfp_mask, prot, node, caller);
 }
 
 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
 {
-       return __vmalloc_node(size, gfp_mask, prot, -1);
+       return __vmalloc_node(size, gfp_mask, prot, -1,
+                               __builtin_return_address(0));
 }
 EXPORT_SYMBOL(__vmalloc);
 
@@ -568,7 +589,8 @@ EXPORT_SYMBOL(__vmalloc);
  */
 void *vmalloc(unsigned long size)
 {
-       return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+                                       -1, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc);
 
@@ -608,7 +630,8 @@ EXPORT_SYMBOL(vmalloc_user);
  */
 void *vmalloc_node(unsigned long size, int node)
 {
-       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, node);
+       return __vmalloc_node(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
+                                       node, __builtin_return_address(0));
 }
 EXPORT_SYMBOL(vmalloc_node);
 
@@ -843,7 +866,8 @@ struct vm_struct *alloc_vm_area(size_t size)
 {
        struct vm_struct *area;
 
-       area = get_vm_area(size, VM_IOREMAP);
+       area = get_vm_area_caller(size, VM_IOREMAP,
+                               __builtin_return_address(0));
        if (area == NULL)
                return NULL;
 
@@ -873,3 +897,85 @@ void free_vm_area(struct vm_struct *area)
        kfree(area);
 }
 EXPORT_SYMBOL_GPL(free_vm_area);
+
+
+#ifdef CONFIG_PROC_FS
+static void *s_start(struct seq_file *m, loff_t *pos)
+{
+       loff_t n = *pos;
+       struct vm_struct *v;
+
+       read_lock(&vmlist_lock);
+       v = vmlist;
+       while (n > 0 && v) {
+               n--;
+               v = v->next;
+       }
+       if (!n)
+               return v;
+
+       return NULL;
+
+}
+
+static void *s_next(struct seq_file *m, void *p, loff_t *pos)
+{
+       struct vm_struct *v = p;
+
+       ++*pos;
+       return v->next;
+}
+
+static void s_stop(struct seq_file *m, void *p)
+{
+       read_unlock(&vmlist_lock);
+}
+
+static int s_show(struct seq_file *m, void *p)
+{
+       struct vm_struct *v = p;
+
+       seq_printf(m, "0x%p-0x%p %7ld",
+               v->addr, v->addr + v->size, v->size);
+
+       if (v->caller) {
+               char buff[2 * KSYM_NAME_LEN];
+
+               seq_putc(m, ' ');
+               sprint_symbol(buff, (unsigned long)v->caller);
+               seq_puts(m, buff);
+       }
+
+       if (v->nr_pages)
+               seq_printf(m, " pages=%d", v->nr_pages);
+
+       if (v->phys_addr)
+               seq_printf(m, " phys=%lx", v->phys_addr);
+
+       if (v->flags & VM_IOREMAP)
+               seq_printf(m, " ioremap");
+
+       if (v->flags & VM_ALLOC)
+               seq_printf(m, " vmalloc");
+
+       if (v->flags & VM_MAP)
+               seq_printf(m, " vmap");
+
+       if (v->flags & VM_USERMAP)
+               seq_printf(m, " user");
+
+       if (v->flags & VM_VPAGES)
+               seq_printf(m, " vpages");
+
+       seq_putc(m, '\n');
+       return 0;
+}
+
+const struct seq_operations vmalloc_op = {
+       .start = s_start,
+       .next = s_next,
+       .stop = s_stop,
+       .show = s_show,
+};
+#endif
+
index f80a5b7c057ffc387ba87e50b038f8e3756e1084..9a29901ad3b3474858d94f1e281306645f5fc297 100644 (file)
@@ -191,7 +191,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                shrinker->nr += delta;
                if (shrinker->nr < 0) {
                        printk(KERN_ERR "%s: nr=%ld\n",
-                                       __FUNCTION__, shrinker->nr);
+                                       __func__, shrinker->nr);
                        shrinker->nr = max_pass;
                }
 
@@ -339,7 +339,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                if (PagePrivate(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
-                               printk("%s: orphaned page\n", __FUNCTION__);
+                               printk("%s: orphaned page\n", __func__);
                                return PAGE_CLEAN;
                        }
                }
@@ -1246,17 +1246,16 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
+       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
        unsigned long nr_reclaimed = 0;
-       int i;
-
+       struct zoneref *z;
+       struct zone *zone;
 
        sc->all_unreclaimable = 1;
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *zone = zones[i];
-
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
                if (!populated_zone(zone))
                        continue;
                /*
@@ -1300,9 +1299,12 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
  * hope that some of these pages can be written.  But if the allocating task
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
+ *
+ * returns:    0, if no pages reclaimed
+ *             else, the number of pages reclaimed
  */
-static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
-                                         struct scan_control *sc)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+                                       struct scan_control *sc)
 {
        int priority;
        int ret = 0;
@@ -1310,7 +1312,9 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
        unsigned long nr_reclaimed = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long lru_pages = 0;
-       int i;
+       struct zoneref *z;
+       struct zone *zone;
+       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
 
        if (scan_global_lru(sc))
                count_vm_event(ALLOCSTALL);
@@ -1318,8 +1322,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
         * mem_cgroup will not do shrink_slab.
         */
        if (scan_global_lru(sc)) {
-               for (i = 0; zones[i] != NULL; i++) {
-                       struct zone *zone = zones[i];
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
@@ -1333,13 +1336,13 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
                sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               nr_reclaimed += shrink_zones(priority, zones, sc);
+               nr_reclaimed += shrink_zones(priority, zonelist, sc);
                /*
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
                 */
                if (scan_global_lru(sc)) {
-                       shrink_slab(sc->nr_scanned, gfp_mask, lru_pages);
+                       shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
                        if (reclaim_state) {
                                nr_reclaimed += reclaim_state->reclaimed_slab;
                                reclaim_state->reclaimed_slab = 0;
@@ -1347,7 +1350,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
                }
                total_scanned += sc->nr_scanned;
                if (nr_reclaimed >= sc->swap_cluster_max) {
-                       ret = 1;
+                       ret = nr_reclaimed;
                        goto out;
                }
 
@@ -1370,7 +1373,7 @@ static unsigned long do_try_to_free_pages(struct zone **zones, gfp_t gfp_mask,
        }
        /* top priority shrink_caches still had more to do? don't OOM, then */
        if (!sc->all_unreclaimable && scan_global_lru(sc))
-               ret = 1;
+               ret = nr_reclaimed;
 out:
        /*
         * Now that we've scanned all the zones at this priority level, note
@@ -1383,8 +1386,7 @@ out:
                priority = 0;
 
        if (scan_global_lru(sc)) {
-               for (i = 0; zones[i] != NULL; i++) {
-                       struct zone *zone = zones[i];
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
@@ -1397,7 +1399,8 @@ out:
        return ret;
 }
 
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+                                                               gfp_t gfp_mask)
 {
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
@@ -1410,7 +1413,7 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
                .isolate_pages = isolate_pages_global,
        };
 
-       return do_try_to_free_pages(zones, gfp_mask, &sc);
+       return do_try_to_free_pages(zonelist, &sc);
 }
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -1419,7 +1422,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                                                gfp_t gfp_mask)
 {
        struct scan_control sc = {
-               .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
                .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
@@ -1428,13 +1430,12 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
                .mem_cgroup = mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
        };
-       struct zone **zones;
-       int target_zone = gfp_zone(GFP_HIGHUSER_MOVABLE);
+       struct zonelist *zonelist;
 
-       zones = NODE_DATA(numa_node_id())->node_zonelists[target_zone].zones;
-       if (do_try_to_free_pages(zones, sc.gfp_mask, &sc))
-               return 1;
-       return 0;
+       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+       zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+       return do_try_to_free_pages(zonelist, &sc);
 }
 #endif
 
index 7c7286e9506d29479d263ddc5a54b159fef08ddd..1a32130b958c418ec8a018aa676c73bb3d7cde32 100644 (file)
@@ -322,6 +322,7 @@ void refresh_cpu_vm_stats(int cpu)
                                p->expire = 3;
 #endif
                        }
+               cond_resched();
 #ifdef CONFIG_NUMA
                /*
                 * Deal with draining the remote pageset of this
@@ -364,13 +365,13 @@ void refresh_cpu_vm_stats(int cpu)
  *
  * Must be called with interrupts disabled.
  */
-void zone_statistics(struct zonelist *zonelist, struct zone *z)
+void zone_statistics(struct zone *preferred_zone, struct zone *z)
 {
-       if (z->zone_pgdat == zonelist->zones[0]->zone_pgdat) {
+       if (z->zone_pgdat == preferred_zone->zone_pgdat) {
                __inc_zone_state(z, NUMA_HIT);
        } else {
                __inc_zone_state(z, NUMA_MISS);
-               __inc_zone_state(zonelist->zones[0], NUMA_FOREIGN);
+               __inc_zone_state(preferred_zone, NUMA_FOREIGN);
        }
        if (z->node == numa_node_id())
                __inc_zone_state(z, NUMA_LOCAL);
@@ -547,6 +548,10 @@ static int pagetypeinfo_show(struct seq_file *m, void *arg)
 {
        pg_data_t *pgdat = (pg_data_t *)arg;
 
+       /* check memoryless node */
+       if (!node_state(pgdat->node_id, N_HIGH_MEMORY))
+               return 0;
+
        seq_printf(m, "Page block order: %d\n", pageblock_order);
        seq_printf(m, "Pages per block:  %lu\n", pageblock_nr_pages);
        seq_putc(m, '\n');
@@ -607,6 +612,7 @@ static const char * const vmstat_text[] = {
        "nr_unstable",
        "nr_bounce",
        "nr_vmscan_write",
+       "nr_writeback_temp",
 
 #ifdef CONFIG_NUMA
        "numa_hit",
@@ -645,6 +651,10 @@ static const char * const vmstat_text[] = {
        "allocstall",
 
        "pgrotated",
+#ifdef CONFIG_HUGETLB_PAGE
+       "htlb_buddy_alloc_success",
+       "htlb_buddy_alloc_fail",
+#endif
 #endif
 };
 
index a9018287312071c4cd77a442b062b7444c7b72bf..8f3c58e5f7a562d88abdd9df744bf77fb65663c6 100644 (file)
@@ -76,7 +76,6 @@ static void __exit br_deinit(void)
        rcu_assign_pointer(br_stp_sap->rcv_func, NULL);
 
        br_netlink_fini();
-       br_netfilter_fini();
        unregister_netdevice_notifier(&br_device_notifier);
        brioctl_set(NULL);
 
@@ -84,6 +83,7 @@ static void __exit br_deinit(void)
 
        synchronize_net();
 
+       br_netfilter_fini();
        llc_sap_put(br_stp_sap);
        br_fdb_get_hook = NULL;
        br_fdb_put_hook = NULL;
index 298e0f463c56e0aa67d7e876fee6255255b12bc1..77a981a1ee52249d5fbed7966f294afe7730ffb3 100644 (file)
@@ -411,9 +411,12 @@ err2:
        br_fdb_delete_by_port(br, p, 1);
 err1:
        kobject_del(&p->kobj);
-       return err;
+       goto put_back;
 err0:
        kobject_put(&p->kobj);
+
+put_back:
+       dev_put(dev);
        return err;
 }
 
index 201cbfc6b9ece724b3cda9427f27d976534567c7..69877b8e7e9c772be9479bced40b6b8273662322 100644 (file)
@@ -435,15 +435,13 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                        if (!filter)
                                return -ENOMEM;
 
-                       err = copy_from_user(filter, optval, optlen);
-                       if (err) {
+                       if (copy_from_user(filter, optval, optlen)) {
                                kfree(filter);
-                               return err;
+                               return -EFAULT;
                        }
                } else if (count == 1) {
-                       err = copy_from_user(&sfilter, optval, optlen);
-                       if (err)
-                               return err;
+                       if (copy_from_user(&sfilter, optval, optlen))
+                               return -EFAULT;
                }
 
                lock_sock(sk);
@@ -493,9 +491,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(err_mask))
                        return -EINVAL;
 
-               err = copy_from_user(&err_mask, optval, optlen);
-               if (err)
-                       return err;
+               if (copy_from_user(&err_mask, optval, optlen))
+                       return -EFAULT;
 
                err_mask &= CAN_ERR_MASK;
 
@@ -531,7 +528,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(ro->loopback))
                        return -EINVAL;
 
-               err = copy_from_user(&ro->loopback, optval, optlen);
+               if (copy_from_user(&ro->loopback, optval, optlen))
+                       return -EFAULT;
 
                break;
 
@@ -539,7 +537,8 @@ static int raw_setsockopt(struct socket *sock, int level, int optname,
                if (optlen != sizeof(ro->recv_own_msgs))
                        return -EINVAL;
 
-               err = copy_from_user(&ro->recv_own_msgs, optval, optlen);
+               if (copy_from_user(&ro->recv_own_msgs, optval, optlen))
+                       return -EFAULT;
 
                break;
 
index 80013fb69a61e0a62bb05ee48643ad2466e2cef8..c823f6f290cbf6c6399cfae02f17b940052c2c73 100644 (file)
@@ -24,6 +24,8 @@
 
 #include <net/scm.h>
 #include <net/sock.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
 #include <asm/uaccess.h>
 #include <net/compat.h>
 
@@ -521,6 +523,203 @@ asmlinkage long compat_sys_getsockopt(int fd, int level, int optname,
        }
        return err;
 }
+
+struct compat_group_req {
+       __u32                            gr_interface;
+       struct __kernel_sockaddr_storage gr_group
+               __attribute__ ((aligned(4)));
+} __attribute__ ((packed));
+
+struct compat_group_source_req {
+       __u32                            gsr_interface;
+       struct __kernel_sockaddr_storage gsr_group
+               __attribute__ ((aligned(4)));
+       struct __kernel_sockaddr_storage gsr_source
+               __attribute__ ((aligned(4)));
+} __attribute__ ((packed));
+
+struct compat_group_filter {
+       __u32                            gf_interface;
+       struct __kernel_sockaddr_storage gf_group
+               __attribute__ ((aligned(4)));
+       __u32                            gf_fmode;
+       __u32                            gf_numsrc;
+       struct __kernel_sockaddr_storage gf_slist[1]
+               __attribute__ ((aligned(4)));
+} __attribute__ ((packed));
+
+#define __COMPAT_GF0_SIZE (sizeof(struct compat_group_filter) - \
+                       sizeof(struct __kernel_sockaddr_storage))
+
+
+int compat_mc_setsockopt(struct sock *sock, int level, int optname,
+       char __user *optval, int optlen,
+       int (*setsockopt)(struct sock *,int,int,char __user *,int))
+{
+       char __user     *koptval = optval;
+       int             koptlen = optlen;
+
+       switch (optname) {
+       case MCAST_JOIN_GROUP:
+       case MCAST_LEAVE_GROUP:
+       {
+               struct compat_group_req __user *gr32 = (void *)optval;
+               struct group_req __user *kgr =
+                       compat_alloc_user_space(sizeof(struct group_req));
+               u32 interface;
+
+               if (!access_ok(VERIFY_READ, gr32, sizeof(*gr32)) ||
+                   !access_ok(VERIFY_WRITE, kgr, sizeof(struct group_req)) ||
+                   __get_user(interface, &gr32->gr_interface) ||
+                   __put_user(interface, &kgr->gr_interface) ||
+                   copy_in_user(&kgr->gr_group, &gr32->gr_group,
+                               sizeof(kgr->gr_group)))
+                       return -EFAULT;
+               koptval = (char __user *)kgr;
+               koptlen = sizeof(struct group_req);
+               break;
+       }
+       case MCAST_JOIN_SOURCE_GROUP:
+       case MCAST_LEAVE_SOURCE_GROUP:
+       case MCAST_BLOCK_SOURCE:
+       case MCAST_UNBLOCK_SOURCE:
+       {
+               struct compat_group_source_req __user *gsr32 = (void *)optval;
+               struct group_source_req __user *kgsr = compat_alloc_user_space(
+                       sizeof(struct group_source_req));
+               u32 interface;
+
+               if (!access_ok(VERIFY_READ, gsr32, sizeof(*gsr32)) ||
+                   !access_ok(VERIFY_WRITE, kgsr,
+                       sizeof(struct group_source_req)) ||
+                   __get_user(interface, &gsr32->gsr_interface) ||
+                   __put_user(interface, &kgsr->gsr_interface) ||
+                   copy_in_user(&kgsr->gsr_group, &gsr32->gsr_group,
+                               sizeof(kgsr->gsr_group)) ||
+                   copy_in_user(&kgsr->gsr_source, &gsr32->gsr_source,
+                               sizeof(kgsr->gsr_source)))
+                       return -EFAULT;
+               koptval = (char __user *)kgsr;
+               koptlen = sizeof(struct group_source_req);
+               break;
+       }
+       case MCAST_MSFILTER:
+       {
+               struct compat_group_filter __user *gf32 = (void *)optval;
+               struct group_filter __user *kgf;
+               u32 interface, fmode, numsrc;
+
+               if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) ||
+                   __get_user(interface, &gf32->gf_interface) ||
+                   __get_user(fmode, &gf32->gf_fmode) ||
+                   __get_user(numsrc, &gf32->gf_numsrc))
+                       return -EFAULT;
+               koptlen = optlen + sizeof(struct group_filter) -
+                               sizeof(struct compat_group_filter);
+               if (koptlen < GROUP_FILTER_SIZE(numsrc))
+                       return -EINVAL;
+               kgf = compat_alloc_user_space(koptlen);
+               if (!access_ok(VERIFY_WRITE, kgf, koptlen) ||
+                   __put_user(interface, &kgf->gf_interface) ||
+                   __put_user(fmode, &kgf->gf_fmode) ||
+                   __put_user(numsrc, &kgf->gf_numsrc) ||
+                   copy_in_user(&kgf->gf_group, &gf32->gf_group,
+                               sizeof(kgf->gf_group)) ||
+                   (numsrc && copy_in_user(kgf->gf_slist, gf32->gf_slist,
+                               numsrc * sizeof(kgf->gf_slist[0]))))
+                       return -EFAULT;
+               koptval = (char __user *)kgf;
+               break;
+       }
+
+       default:
+               break;
+       }
+       return setsockopt(sock, level, optname, koptval, koptlen);
+}
+
+EXPORT_SYMBOL(compat_mc_setsockopt);
+
+int compat_mc_getsockopt(struct sock *sock, int level, int optname,
+       char __user *optval, int __user *optlen,
+       int (*getsockopt)(struct sock *,int,int,char __user *,int __user *))
+{
+       struct compat_group_filter __user *gf32 = (void *)optval;
+       struct group_filter __user *kgf;
+       int __user      *koptlen;
+       u32 interface, fmode, numsrc;
+       int klen, ulen, err;
+
+       if (optname != MCAST_MSFILTER)
+               return getsockopt(sock, level, optname, optval, optlen);
+
+       koptlen = compat_alloc_user_space(sizeof(*koptlen));
+       if (!access_ok(VERIFY_READ, optlen, sizeof(*optlen)) ||
+           __get_user(ulen, optlen))
+               return -EFAULT;
+
+       /* adjust len for pad */
+       klen = ulen + sizeof(*kgf) - sizeof(*gf32);
+
+       if (klen < GROUP_FILTER_SIZE(0))
+               return -EINVAL;
+
+       if (!access_ok(VERIFY_WRITE, koptlen, sizeof(*koptlen)) ||
+           __put_user(klen, koptlen))
+               return -EFAULT;
+
+       /* have to allow space for previous compat_alloc_user_space, too */
+       kgf = compat_alloc_user_space(klen+sizeof(*optlen));
+
+       if (!access_ok(VERIFY_READ, gf32, __COMPAT_GF0_SIZE) ||
+           __get_user(interface, &gf32->gf_interface) ||
+           __get_user(fmode, &gf32->gf_fmode) ||
+           __get_user(numsrc, &gf32->gf_numsrc) ||
+           __put_user(interface, &kgf->gf_interface) ||
+           __put_user(fmode, &kgf->gf_fmode) ||
+           __put_user(numsrc, &kgf->gf_numsrc) ||
+           copy_in_user(&kgf->gf_group,&gf32->gf_group,sizeof(kgf->gf_group)))
+               return -EFAULT;
+
+       err = getsockopt(sock, level, optname, (char __user *)kgf, koptlen);
+       if (err)
+               return err;
+
+       if (!access_ok(VERIFY_READ, koptlen, sizeof(*koptlen)) ||
+           __get_user(klen, koptlen))
+               return -EFAULT;
+
+       ulen = klen - (sizeof(*kgf)-sizeof(*gf32));
+
+       if (!access_ok(VERIFY_WRITE, optlen, sizeof(*optlen)) ||
+           __put_user(ulen, optlen))
+               return -EFAULT;
+
+       if (!access_ok(VERIFY_READ, kgf, klen) ||
+           !access_ok(VERIFY_WRITE, gf32, ulen) ||
+           __get_user(interface, &kgf->gf_interface) ||
+           __get_user(fmode, &kgf->gf_fmode) ||
+           __get_user(numsrc, &kgf->gf_numsrc) ||
+           __put_user(interface, &gf32->gf_interface) ||
+           __put_user(fmode, &gf32->gf_fmode) ||
+           __put_user(numsrc, &gf32->gf_numsrc))
+               return -EFAULT;
+       if (numsrc) {
+               int copylen;
+
+               klen -= GROUP_FILTER_SIZE(0);
+               copylen = numsrc * sizeof(gf32->gf_slist[0]);
+               if (copylen > klen)
+                       copylen = klen;
+               if (copy_in_user(gf32->gf_slist, kgf->gf_slist, copylen))
+                       return -EFAULT;
+       }
+       return err;
+}
+
+EXPORT_SYMBOL(compat_mc_getsockopt);
+
+
 /* Argument list sizes for compat_sys_socketcall */
 #define AL(x) ((x) * sizeof(u32))
 static unsigned char nas[18]={AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
index e1df1ab3e04a18d32f9f4aa3fe86e42a9737e3cd..ed49da592051f368cf32b3bafd59bd05494c3c67 100644 (file)
@@ -1524,7 +1524,7 @@ static int dev_gso_segment(struct sk_buff *skb)
        if (!segs)
                return 0;
 
-       if (unlikely(IS_ERR(segs)))
+       if (IS_ERR(segs))
                return PTR_ERR(segs);
 
        skb->next = segs;
index f2b5270efdaaa4e056cff85624662fc853e79775..24eca23c2db3159c91898bd7fefe8bbb501f1233 100644 (file)
@@ -1234,7 +1234,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)
                segs = ops->gso_segment(skb, features);
        rcu_read_unlock();
 
-       if (!segs || unlikely(IS_ERR(segs)))
+       if (!segs || IS_ERR(segs))
                goto out;
 
        skb = segs;
index c67d00e8c6003eb49761a44155501124c7c09b9d..87397351ddace424de5f0341482b408cdfaaf363 100644 (file)
@@ -691,7 +691,8 @@ static void icmp_unreach(struct sk_buff *skb)
                                               NIPQUAD(iph->daddr));
                        } else {
                                info = ip_rt_frag_needed(net, iph,
-                                                    ntohs(icmph->un.frag.mtu));
+                                                        ntohs(icmph->un.frag.mtu),
+                                                        skb->dev);
                                if (!info)
                                        goto out;
                        }
index 08349267ceb4c091285c1b44dfdc719a1936447e..e527628f56cf714c8cac834c2fa1499a9f91c82a 100644 (file)
@@ -753,23 +753,15 @@ static inline int ip_ufo_append_data(struct sock *sk,
                skb->ip_summed = CHECKSUM_PARTIAL;
                skb->csum = 0;
                sk->sk_sndmsg_off = 0;
-       }
 
-       err = skb_append_datato_frags(sk,skb, getfrag, from,
-                              (length - transhdrlen));
-       if (!err) {
-               /* specify the length of each IP datagram fragment*/
+               /* specify the length of each IP datagram fragment */
                skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
                skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
                __skb_queue_tail(&sk->sk_write_queue, skb);
-
-               return 0;
        }
-       /* There is not enough support do UFO ,
-        * so follow normal path
-        */
-       kfree_skb(skb);
-       return err;
+
+       return skb_append_datato_frags(sk, skb, getfrag, from,
+                                      (length - transhdrlen));
 }
 
 /*
@@ -863,9 +855,9 @@ int ip_append_data(struct sock *sk,
                csummode = CHECKSUM_PARTIAL;
 
        inet->cork.length += length;
-       if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
-                       (rt->u.dst.dev->features & NETIF_F_UFO)) {
-
+       if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
+           (sk->sk_protocol == IPPROTO_UDP) &&
+           (rt->u.dst.dev->features & NETIF_F_UFO)) {
                err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
                                         fragheaderlen, transhdrlen, mtu,
                                         flags);
index d8adfd4972e219684b69c325cd8707681a0ac54d..e0514e82308e1d10c76314ea94ed00b5f0506a3d 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/mroute.h>
 #include <net/route.h>
 #include <net/xfrm.h>
+#include <net/compat.h>
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 #include <net/transp_v6.h>
 #endif
@@ -923,6 +924,10 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname,
        if (level != SOL_IP)
                return -ENOPROTOOPT;
 
+       if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
+               return compat_mc_setsockopt(sk, level, optname, optval, optlen,
+                       ip_setsockopt);
+
        err = do_ip_setsockopt(sk, level, optname, optval, optlen);
 #ifdef CONFIG_NETFILTER
        /* we need to exclude all possible ENOPROTOOPTs except default case */
@@ -1181,7 +1186,14 @@ int ip_getsockopt(struct sock *sk, int level,
 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
                         char __user *optval, int __user *optlen)
 {
-       int err = do_ip_getsockopt(sk, level, optname, optval, optlen);
+       int err;
+
+       if (optname == MCAST_MSFILTER)
+               return compat_mc_getsockopt(sk, level, optname, optval, optlen,
+                       ip_getsockopt);
+
+       err = do_ip_getsockopt(sk, level, optname, optval, optlen);
+
 #ifdef CONFIG_NETFILTER
        /* we need to exclude all possible ENOPROTOOPTs except default case */
        if (err == -ENOPROTOOPT && optname != IP_PKTOPTIONS &&
index 0f42d1c1f6905fdea7b4fa55643cc2576237d380..89dee4346f6065184dce57a0584412fd210cd130 100644 (file)
@@ -412,12 +412,12 @@ static struct packet_type rarp_packet_type __initdata = {
        .func = ic_rarp_recv,
 };
 
-static inline void ic_rarp_init(void)
+static inline void __init ic_rarp_init(void)
 {
        dev_add_pack(&rarp_packet_type);
 }
 
-static inline void ic_rarp_cleanup(void)
+static inline void __init ic_rarp_cleanup(void)
 {
        dev_remove_pack(&rarp_packet_type);
 }
@@ -682,7 +682,7 @@ static void __init ic_bootp_init_ext(u8 *e)
 /*
  *  Initialize the DHCP/BOOTP mechanism.
  */
-static inline void ic_bootp_init(void)
+static inline void __init ic_bootp_init(void)
 {
        int i;
 
@@ -696,7 +696,7 @@ static inline void ic_bootp_init(void)
 /*
  *  DHCP/BOOTP cleanup.
  */
-static inline void ic_bootp_cleanup(void)
+static inline void __init ic_bootp_cleanup(void)
 {
        dev_remove_pack(&bootp_packet_type);
 }
index dde28a250d92f0c0dd1d9776cfa4c366a9793c35..4b1c16cbb16b7c71fdec2814bd3d430f2597ada5 100644 (file)
@@ -148,7 +148,7 @@ const char * ip_vs_state_name(__u16 proto, int state)
        struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
 
        if (pp == NULL || pp->state_name == NULL)
-               return "ERR!";
+               return (IPPROTO_IP == proto) ? "NONE" : "ERR!";
        return pp->state_name(state);
 }
 
index a842676e1c69c24ace971fcaf663054533c2c9f2..4bf835e1d86de7535667ebb1357e1237e0849e81 100644 (file)
@@ -160,6 +160,7 @@ static void ah_exit(struct ip_vs_protocol *pp)
 struct ip_vs_protocol ip_vs_protocol_ah = {
        .name =                 "AH",
        .protocol =             IPPROTO_AH,
+       .num_states =           1,
        .dont_defrag =          1,
        .init =                 ah_init,
        .exit =                 ah_exit,
index aef0d3ee8e44f6859d6dfed03b352c2c7b7aba44..db6a6b7b1a0bf290bf1097357ae5968c5265cc1c 100644 (file)
@@ -159,6 +159,7 @@ static void esp_exit(struct ip_vs_protocol *pp)
 struct ip_vs_protocol ip_vs_protocol_esp = {
        .name =                 "ESP",
        .protocol =             IPPROTO_ESP,
+       .num_states =           1,
        .dont_defrag =          1,
        .init =                 esp_init,
        .exit =                 esp_exit,
index 620e40ff79a903c5007682545d29535c07480f0a..b83dc14b0a4d0bee0b72f60adbe7b32a954ea4e8 100644 (file)
@@ -594,6 +594,7 @@ static void ip_vs_tcp_exit(struct ip_vs_protocol *pp)
 struct ip_vs_protocol ip_vs_protocol_tcp = {
        .name =                 "TCP",
        .protocol =             IPPROTO_TCP,
+       .num_states =           IP_VS_TCP_S_LAST,
        .dont_defrag =          0,
        .appcnt =               ATOMIC_INIT(0),
        .init =                 ip_vs_tcp_init,
index 1caa2908373f28cea2f7c98e3d7191fbaf25d4f3..75771cb3cd6f298b90173a26cb50a34e372cfa1d 100644 (file)
@@ -409,6 +409,7 @@ static void udp_exit(struct ip_vs_protocol *pp)
 struct ip_vs_protocol ip_vs_protocol_udp = {
        .name =                 "UDP",
        .protocol =             IPPROTO_UDP,
+       .num_states =           IP_VS_UDP_S_LAST,
        .dont_defrag =          0,
        .init =                 udp_init,
        .exit =                 udp_exit,
index 69c56663cc9a90645cdcfab47495261f01e70e99..eff54efe0351f5158d5df9b8c8229961b754fe34 100644 (file)
@@ -288,11 +288,16 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
        char *p;
        int i;
 
+       if (buflen < sizeof(struct ip_vs_sync_mesg)) {
+               IP_VS_ERR_RL("sync message header too short\n");
+               return;
+       }
+
        /* Convert size back to host byte order */
        m->size = ntohs(m->size);
 
        if (buflen != m->size) {
-               IP_VS_ERR("bogus message\n");
+               IP_VS_ERR_RL("bogus sync message size\n");
                return;
        }
 
@@ -307,9 +312,48 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
        for (i=0; i<m->nr_conns; i++) {
                unsigned flags, state;
 
-               s = (struct ip_vs_sync_conn *)p;
+               if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
+                       IP_VS_ERR_RL("bogus conn in sync message\n");
+                       return;
+               }
+               s = (struct ip_vs_sync_conn *) p;
                flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC;
+               flags &= ~IP_VS_CONN_F_HASHED;
+               if (flags & IP_VS_CONN_F_SEQ_MASK) {
+                       opt = (struct ip_vs_sync_conn_options *)&s[1];
+                       p += FULL_CONN_SIZE;
+                       if (p > buffer+buflen) {
+                               IP_VS_ERR_RL("bogus conn options in sync message\n");
+                               return;
+                       }
+               } else {
+                       opt = NULL;
+                       p += SIMPLE_CONN_SIZE;
+               }
+
                state = ntohs(s->state);
+               if (!(flags & IP_VS_CONN_F_TEMPLATE)) {
+                       pp = ip_vs_proto_get(s->protocol);
+                       if (!pp) {
+                               IP_VS_ERR_RL("Unsupported protocol %u in sync msg\n",
+                                       s->protocol);
+                               continue;
+                       }
+                       if (state >= pp->num_states) {
+                               IP_VS_DBG(2, "Invalid %s state %u in sync msg\n",
+                                       pp->name, state);
+                               continue;
+                       }
+               } else {
+                       /* protocol in templates is not used for state/timeout */
+                       pp = NULL;
+                       if (state > 0) {
+                               IP_VS_DBG(2, "Invalid template state %u in sync msg\n",
+                                       state);
+                               state = 0;
+                       }
+               }
+
                if (!(flags & IP_VS_CONN_F_TEMPLATE))
                        cp = ip_vs_conn_in_get(s->protocol,
                                               s->caddr, s->cport,
@@ -345,14 +389,9 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
                                IP_VS_ERR("ip_vs_conn_new failed\n");
                                return;
                        }
-                       cp->state = state;
                } else if (!cp->dest) {
                        dest = ip_vs_try_bind_dest(cp);
-                       if (!dest) {
-                               /* it is an unbound entry created by
-                                * synchronization */
-                               cp->flags = flags | IP_VS_CONN_F_HASHED;
-                       } else
+                       if (dest)
                                atomic_dec(&dest->refcnt);
                } else if ((cp->dest) && (cp->protocol == IPPROTO_TCP) &&
                           (cp->state != state)) {
@@ -371,23 +410,22 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
                        }
                }
 
-               if (flags & IP_VS_CONN_F_SEQ_MASK) {
-                       opt = (struct ip_vs_sync_conn_options *)&s[1];
+               if (opt)
                        memcpy(&cp->in_seq, opt, sizeof(*opt));
-                       p += FULL_CONN_SIZE;
-               } else
-                       p += SIMPLE_CONN_SIZE;
-
                atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
                cp->state = state;
-               pp = ip_vs_proto_get(s->protocol);
-               cp->timeout = pp->timeout_table[cp->state];
+               cp->old_state = cp->state;
+               /*
+                * We can not recover the right timeout for templates
+                * in all cases, we can not find the right fwmark
+                * virtual service. If needed, we can do it for
+                * non-fwmark persistent services.
+                */
+               if (!(flags & IP_VS_CONN_F_TEMPLATE) && pp->timeout_table)
+                       cp->timeout = pp->timeout_table[state];
+               else
+                       cp->timeout = (3*60*HZ);
                ip_vs_conn_put(cp);
-
-               if (p > buffer+buflen) {
-                       IP_VS_ERR("bogus message\n");
-                       return;
-               }
        }
 }
 
index 719be29f7506dc308b2743c98b9c17488bea037c..26a37cedcf2e3ab1ecb7620e02dd66be7754d961 100644 (file)
@@ -296,9 +296,8 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
                if (v->data_len > 0xFFFF)
                        return -EINVAL;
                if (diff > skb_tailroom(e->skb)) {
-                       nskb = skb_copy_expand(e->skb, 0,
-                                              diff - skb_tailroom(e->skb),
-                                              GFP_ATOMIC);
+                       nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
+                                              diff, GFP_ATOMIC);
                        if (!nskb) {
                                printk(KERN_WARNING "ip_queue: error "
                                      "in mangle, dropping packet\n");
index cacb9cb27dab75d62b9628f58e048b3f2110902e..5a955c440364728d3002a09213790273bb924339 100644 (file)
@@ -303,7 +303,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
        const struct nf_conntrack_tuple_hash *h;
        struct nf_conntrack_tuple tuple;
 
-       NF_CT_TUPLE_U_BLANK(&tuple);
+       memset(&tuple, 0, sizeof(tuple));
        tuple.src.u3.ip = inet->rcv_saddr;
        tuple.src.u.tcp.port = inet->sport;
        tuple.dst.u3.ip = inet->daddr;
index ce25a13f3430b06f27445c8a3fa5e30c6a117baa..5e3685c5c407ac2e6b24c13eaf055319d2d341aa 100644 (file)
@@ -1430,11 +1430,13 @@ static inline unsigned short guess_mtu(unsigned short old_mtu)
 }
 
 unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
-                                unsigned short new_mtu)
+                                unsigned short new_mtu,
+                                struct net_device *dev)
 {
-       int i;
+       int i, k;
        unsigned short old_mtu = ntohs(iph->tot_len);
        struct rtable *rth;
+       int  ikeys[2] = { dev->ifindex, 0 };
        __be32  skeys[2] = { iph->saddr, 0, };
        __be32  daddr = iph->daddr;
        unsigned short est_mtu = 0;
@@ -1442,22 +1444,26 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
        if (ipv4_config.no_pmtu_disc)
                return 0;
 
-       for (i = 0; i < 2; i++) {
-               unsigned hash = rt_hash(daddr, skeys[i], 0);
+       for (k = 0; k < 2; k++) {
+               for (i = 0; i < 2; i++) {
+                       unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
 
-               rcu_read_lock();
-               for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
-                    rth = rcu_dereference(rth->u.dst.rt_next)) {
-                       if (rth->fl.fl4_dst == daddr &&
-                           rth->fl.fl4_src == skeys[i] &&
-                           rth->rt_dst  == daddr &&
-                           rth->rt_src  == iph->saddr &&
-                           rth->fl.iif == 0 &&
-                           !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
-                           net_eq(dev_net(rth->u.dst.dev), net) &&
-                           rth->rt_genid == atomic_read(&rt_genid)) {
+                       rcu_read_lock();
+                       for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
+                            rth = rcu_dereference(rth->u.dst.rt_next)) {
                                unsigned short mtu = new_mtu;
 
+                               if (rth->fl.fl4_dst != daddr ||
+                                   rth->fl.fl4_src != skeys[i] ||
+                                   rth->rt_dst != daddr ||
+                                   rth->rt_src != iph->saddr ||
+                                   rth->fl.oif != ikeys[k] ||
+                                   rth->fl.iif != 0 ||
+                                   dst_metric_locked(&rth->u.dst, RTAX_MTU) ||
+                                   !net_eq(dev_net(rth->u.dst.dev), net) ||
+                                   rth->rt_genid != atomic_read(&rt_genid))
+                                       continue;
+
                                if (new_mtu < 68 || new_mtu >= old_mtu) {
 
                                        /* BSD 4.2 compatibility hack :-( */
@@ -1483,8 +1489,8 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
                                        est_mtu = mtu;
                                }
                        }
+                       rcu_read_unlock();
                }
-               rcu_read_unlock();
        }
        return est_mtu ? : new_mtu;
 }
index 3a6be23d222f382c32dcd1e3acc5e55b97594797..6a250828b767aef7f037a3cf10d10b1dd74625cb 100644 (file)
@@ -285,14 +285,12 @@ int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
        if (in_flight >= tp->snd_cwnd)
                return 1;
 
-       if (!sk_can_gso(sk))
-               return 0;
-
        left = tp->snd_cwnd - in_flight;
-       if (sysctl_tcp_tso_win_divisor)
-               return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd;
-       else
-               return left <= tcp_max_burst(tp);
+       if (sk_can_gso(sk) &&
+           left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
+           left * tp->mss_cache < sk->sk_gso_max_size)
+               return 1;
+       return left <= tcp_max_burst(tp);
 }
 EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
 
index eb5b9854c8c7330791ada69b8c9e8695f7a73f3d..4a1221e5e8ee2ec8b7d1a5157aa063f6a3f8011e 100644 (file)
@@ -15,8 +15,8 @@
 
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/math64.h>
 #include <net/tcp.h>
-#include <asm/div64.h>
 
 #define BICTCP_BETA_SCALE    1024      /* Scale factor beta calculation
                                         * max_cwnd = snd_cwnd * beta
@@ -128,7 +128,7 @@ static u32 cubic_root(u64 a)
         * x    = ( 2 * x  +  a / x  ) / 3
         *  k+1          k         k
         */
-       x = (2 * x + (u32)div64_64(a, (u64)x * (u64)(x - 1)));
+       x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
        x = ((x * 341) >> 10);
        return x;
 }
index ac9b8482f702eae07f6f66b935b36f986354d91f..0298f80681f2291b33e1b5d1dab83ee47a428010 100644 (file)
@@ -4925,8 +4925,7 @@ step5:
        tcp_data_snd_check(sk);
        tcp_ack_snd_check(sk);
 
-       if (tcp_defer_accept_check(sk))
-               return -1;
+       tcp_defer_accept_check(sk);
        return 0;
 
 csum_error:
index be24d6ee34bd7be9af38ceaeda87b8ec5744ea9a..0e1a8c91f78eb337c3f0fb1f58dd2497f1f76e2d 100644 (file)
@@ -229,7 +229,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                         */
                        tcp_reno_cong_avoid(sk, ack, in_flight);
                } else {
-                       u32 rtt, target_cwnd, diff;
+                       u32 rtt, diff;
+                       u64 target_cwnd;
 
                        /* We have enough RTT samples, so, using the Vegas
                         * algorithm, we determine if we should increase or
@@ -252,8 +253,9 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                         * We keep it as a fixed point number with
                         * V_PARAM_SHIFT bits to the right of the binary point.
                         */
-                       target_cwnd = ((old_wnd * vegas->baseRTT)
-                                      << V_PARAM_SHIFT) / rtt;
+                       target_cwnd = ((u64)old_wnd * vegas->baseRTT);
+                       target_cwnd <<= V_PARAM_SHIFT;
+                       do_div(target_cwnd, rtt);
 
                        /* Calculate the difference between the window we had,
                         * and the window we would like to have. This quantity
@@ -279,7 +281,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                                 * utilization.
                                 */
                                tp->snd_cwnd = min(tp->snd_cwnd,
-                                                  (target_cwnd >>
+                                                  ((u32)target_cwnd >>
                                                    V_PARAM_SHIFT)+1);
 
                        } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
index d16689e9851620a44944c397989004bc447411ef..2bf618a3b00b6f71ec3bb3c41ed171010acda2ab 100644 (file)
@@ -133,7 +133,8 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
                 */
                tcp_reno_cong_avoid(sk, ack, in_flight);
        } else {
-               u32 rtt, target_cwnd;
+               u64 target_cwnd;
+               u32 rtt;
 
                /* We have enough rtt samples, so, using the Veno
                 * algorithm, we determine the state of the network.
@@ -141,8 +142,9 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
 
                rtt = veno->minrtt;
 
-               target_cwnd = ((tp->snd_cwnd * veno->basertt)
-                              << V_PARAM_SHIFT) / rtt;
+               target_cwnd = (tp->snd_cwnd * veno->basertt);
+               target_cwnd <<= V_PARAM_SHIFT;
+               do_div(target_cwnd, rtt);
 
                veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
 
index b2c9becc02e8cad4e6453578587cabc6a7701c52..42814a2ec9d73f0249b53386e997104cde4dee64 100644 (file)
@@ -167,7 +167,7 @@ config IPV6_SIT
          Tunneling means encapsulating data of one protocol type within
          another protocol and sending it over a channel that understands the
          encapsulating protocol. This driver implements encapsulation of IPv6
-         into IPv4 packets. This is useful if you want to connect to IPv6
+         into IPv4 packets. This is useful if you want to connect two IPv6
          networks over an IPv4-only path.
 
          Saying M here will produce a module called sit.ko. If unsure, say Y.
index c8c6e33d116340d8f1a902893c5e183d6ab118e2..2de3c464fe75b4e3bf1a57fd148dcc739d123761 100644 (file)
@@ -358,7 +358,7 @@ static int pim6_rcv(struct sk_buff *skb)
        if (pim->type != ((PIM_VERSION << 4) | PIM_REGISTER) ||
            (pim->flags & PIM_NULL_REGISTER) ||
            (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
-            (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))))
+            csum_fold(skb_checksum(skb, 0, skb->len, 0))))
                goto drop;
 
        /* check if the inner packet is destined to mcast group */
index 06de9d0e1f6b653384279180612b2b0b69df1f66..56d55fecf8ec49d49a79e804b0129e8570441df5 100644 (file)
@@ -52,6 +52,7 @@
 #include <net/udp.h>
 #include <net/udplite.h>
 #include <net/xfrm.h>
+#include <net/compat.h>
 
 #include <asm/uaccess.h>
 
@@ -779,6 +780,10 @@ int compat_ipv6_setsockopt(struct sock *sk, int level, int optname,
        if (level != SOL_IPV6)
                return -ENOPROTOOPT;
 
+       if (optname >= MCAST_JOIN_GROUP && optname <= MCAST_MSFILTER)
+               return compat_mc_setsockopt(sk, level, optname, optval, optlen,
+                       ipv6_setsockopt);
+
        err = do_ipv6_setsockopt(sk, level, optname, optval, optlen);
 #ifdef CONFIG_NETFILTER
        /* we need to exclude all possible ENOPROTOOPTs except default case */
@@ -1122,6 +1127,10 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
        if (level != SOL_IPV6)
                return -ENOPROTOOPT;
 
+       if (optname == MCAST_MSFILTER)
+               return compat_mc_getsockopt(sk, level, optname, optval, optlen,
+                       ipv6_getsockopt);
+
        err = do_ipv6_getsockopt(sk, level, optname, optval, optlen);
 #ifdef CONFIG_NETFILTER
        /* we need to exclude all possible ENOPROTOOPTs except default case */
index 92a36c9e540281a6f865d489dc2747fa20620008..2eff3ae8977dd3a0fd8b7c0d3858ac076cb27b28 100644 (file)
@@ -298,9 +298,8 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
                if (v->data_len > 0xFFFF)
                        return -EINVAL;
                if (diff > skb_tailroom(e->skb)) {
-                       nskb = skb_copy_expand(e->skb, 0,
-                                              diff - skb_tailroom(e->skb),
-                                              GFP_ATOMIC);
+                       nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
+                                              diff, GFP_ATOMIC);
                        if (!nskb) {
                                printk(KERN_WARNING "ip6_queue: OOM "
                                      "in mangle, dropping packet\n");
index d2620410cb0aa1215c2e60682f215618466e069f..76c3057d01790c23ae1e1055e6927cf6c2d2a6a0 100644 (file)
@@ -555,10 +555,8 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
 
        ircomm_tty_shutdown(self);
 
-       if (tty->driver->flush_buffer)
-               tty->driver->flush_buffer(tty);
-       if (tty->ldisc.flush_buffer)
-               tty->ldisc.flush_buffer(tty);
+       tty_driver_flush_buffer(tty);
+       tty_ldisc_flush(tty);
 
        tty->closing = 0;
        self->tty = NULL;
index a4f1439ffdd8b80a4d5847121abe041adeba7627..75497e55927d695a3eee228833c68097edeb86ae 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include "irnet_irda.h"                /* Private header */
+#include <linux/seq_file.h>
 
 /*
  * PPP disconnect work: we need to make sure we're in
@@ -1717,34 +1718,23 @@ irnet_expiry_indication(discinfo_t *    expiry,
  */
 
 #ifdef CONFIG_PROC_FS
-/*------------------------------------------------------------------*/
-/*
- * Function irnet_proc_read (buf, start, offset, len, unused)
- *
- *    Give some info to the /proc file system
- */
 static int
-irnet_proc_read(char * buf,
-               char ** start,
-               off_t   offset,
-               int     len)
+irnet_proc_show(struct seq_file *m, void *v)
 {
   irnet_socket *       self;
   char *               state;
   int                  i = 0;
 
-  len = 0;
-
   /* Get the IrNET server information... */
-  len += sprintf(buf+len, "IrNET server - ");
-  len += sprintf(buf+len, "IrDA state: %s, ",
+  seq_printf(m, "IrNET server - ");
+  seq_printf(m, "IrDA state: %s, ",
                 (irnet_server.running ? "running" : "dead"));
-  len += sprintf(buf+len, "stsap_sel: %02x, ", irnet_server.s.stsap_sel);
-  len += sprintf(buf+len, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel);
+  seq_printf(m, "stsap_sel: %02x, ", irnet_server.s.stsap_sel);
+  seq_printf(m, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel);
 
   /* Do we need to continue ? */
   if(!irnet_server.running)
-    return len;
+    return 0;
 
   /* Protect access to the instance list */
   spin_lock_bh(&irnet_server.spinlock);
@@ -1754,23 +1744,23 @@ irnet_proc_read(char *  buf,
   while(self != NULL)
     {
       /* Start printing info about the socket. */
-      len += sprintf(buf+len, "\nIrNET socket %d - ", i++);
+      seq_printf(m, "\nIrNET socket %d - ", i++);
 
       /* First, get the requested configuration */
-      len += sprintf(buf+len, "Requested IrDA name: \"%s\", ", self->rname);
-      len += sprintf(buf+len, "daddr: %08x, ", self->rdaddr);
-      len += sprintf(buf+len, "saddr: %08x\n", self->rsaddr);
+      seq_printf(m, "Requested IrDA name: \"%s\", ", self->rname);
+      seq_printf(m, "daddr: %08x, ", self->rdaddr);
+      seq_printf(m, "saddr: %08x\n", self->rsaddr);
 
       /* Second, get all the PPP info */
-      len += sprintf(buf+len, "        PPP state: %s",
+      seq_printf(m, "  PPP state: %s",
                 (self->ppp_open ? "registered" : "unregistered"));
       if(self->ppp_open)
        {
-         len += sprintf(buf+len, ", unit: ppp%d",
+         seq_printf(m, ", unit: ppp%d",
                         ppp_unit_number(&self->chan));
-         len += sprintf(buf+len, ", channel: %d",
+         seq_printf(m, ", channel: %d",
                         ppp_channel_index(&self->chan));
-         len += sprintf(buf+len, ", mru: %d",
+         seq_printf(m, ", mru: %d",
                         self->mru);
          /* Maybe add self->flags ? Later... */
        }
@@ -1789,10 +1779,10 @@ irnet_proc_read(char *  buf,
              state = "weird";
            else
              state = "idle";
-      len += sprintf(buf+len, "\n      IrDA state: %s, ", state);
-      len += sprintf(buf+len, "daddr: %08x, ", self->daddr);
-      len += sprintf(buf+len, "stsap_sel: %02x, ", self->stsap_sel);
-      len += sprintf(buf+len, "dtsap_sel: %02x\n", self->dtsap_sel);
+      seq_printf(m, "\n        IrDA state: %s, ", state);
+      seq_printf(m, "daddr: %08x, ", self->daddr);
+      seq_printf(m, "stsap_sel: %02x, ", self->stsap_sel);
+      seq_printf(m, "dtsap_sel: %02x\n", self->dtsap_sel);
 
       /* Next socket, please... */
       self = (irnet_socket *) hashbin_get_next(irnet_server.list);
@@ -1801,8 +1791,21 @@ irnet_proc_read(char *   buf,
   /* Spin lock end */
   spin_unlock_bh(&irnet_server.spinlock);
 
-  return len;
+  return 0;
 }
+
+static int irnet_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, irnet_proc_show, NULL);
+}
+
+static const struct file_operations irnet_proc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = irnet_proc_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
 #endif /* PROC_FS */
 
 
@@ -1841,7 +1844,7 @@ irda_irnet_init(void)
 
 #ifdef CONFIG_PROC_FS
   /* Add a /proc file for irnet infos */
-  create_proc_info_entry("irnet", 0, proc_irda, irnet_proc_read);
+  proc_create("irnet", 0, proc_irda, &irnet_proc_fops);
 #endif /* CONFIG_PROC_FS */
 
   /* Setup the IrNET server */
index 0ba92d0d52040e6c142dd7a1184d6cb44e87dde7..3e408952a3f178ce98f36da48b400d0d8ec6a8e6 100644 (file)
@@ -159,14 +159,6 @@ static void
                                DISCOVERY_MODE,
                                void *);
 #endif
-/* -------------------------- PROC ENTRY -------------------------- */
-#ifdef CONFIG_PROC_FS
-static int
-       irnet_proc_read(char *,
-                       char **,
-                       off_t,
-                       int);
-#endif /* CONFIG_PROC_FS */
 
 /**************************** VARIABLES ****************************/
 
index 2403a31fe0f64ea6de774982e2ba3c6b7ea36a35..9e7236ff6bcc029e6886b43c32416bcdaa51d252 100644 (file)
@@ -1498,7 +1498,8 @@ static int pfkey_add(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr,
                err = xfrm_state_update(x);
 
        xfrm_audit_state_add(x, err ? 0 : 1,
-                            audit_get_loginuid(current), 0);
+                            audit_get_loginuid(current),
+                            audit_get_sessionid(current), 0);
 
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
@@ -1552,7 +1553,8 @@ static int pfkey_delete(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
        km_state_notify(x, &c);
 out:
        xfrm_audit_state_delete(x, err ? 0 : 1,
-                              audit_get_loginuid(current), 0);
+                               audit_get_loginuid(current),
+                               audit_get_sessionid(current), 0);
        xfrm_state_put(x);
 
        return err;
@@ -1728,6 +1730,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hd
                return -EINVAL;
 
        audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
        audit_info.secid = 0;
        err = xfrm_state_flush(proto, &audit_info);
        if (err)
@@ -2324,7 +2327,8 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
                                 hdr->sadb_msg_type != SADB_X_SPDUPDATE);
 
        xfrm_audit_policy_add(xp, err ? 0 : 1,
-                            audit_get_loginuid(current), 0);
+                             audit_get_loginuid(current),
+                             audit_get_sessionid(current), 0);
 
        if (err)
                goto out;
@@ -2406,7 +2410,8 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
                return -ENOENT;
 
        xfrm_audit_policy_delete(xp, err ? 0 : 1,
-                               audit_get_loginuid(current), 0);
+                                audit_get_loginuid(current),
+                                audit_get_sessionid(current), 0);
 
        if (err)
                goto out;
@@ -2667,7 +2672,8 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h
 
        if (delete) {
                xfrm_audit_policy_delete(xp, err ? 0 : 1,
-                               audit_get_loginuid(current), 0);
+                               audit_get_loginuid(current),
+                               audit_get_sessionid(current), 0);
 
                if (err)
                        goto out;
@@ -2767,6 +2773,7 @@ static int pfkey_spdflush(struct sock *sk, struct sk_buff *skb, struct sadb_msg
        int err;
 
        audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
        audit_info.secid = 0;
        err = xfrm_policy_flush(XFRM_POLICY_TYPE_MAIN, &audit_info);
        if (err)
index 520a5180a4f6f074a4849addb4a96c2fc698efaf..a24b459dd45aecf9ba8e1214fce68fe2b8330c6c 100644 (file)
@@ -73,7 +73,9 @@ config MAC80211_MESH
 
 config MAC80211_LEDS
        bool "Enable LED triggers"
-       depends on MAC80211 && LEDS_TRIGGERS
+       depends on MAC80211
+       select NEW_LEDS
+       select LEDS_TRIGGERS
        ---help---
          This option enables a few LED triggers for different
          packet receive/transmit events.
index 742003d3a84112b91640b76f255ccb88d303ea44..9ee3affab346a548bc4f046afac9c7fc07134b11 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/types.h>
 #include <linux/jhash.h>
+#include <asm/unaligned.h>
 #include "ieee80211_i.h"
 
 
index 02de8f1522a3492edf8904942980564031f2cacf..3df809222d1cbcfc1c88605d0406303ed1d25098 100644 (file)
@@ -7,7 +7,6 @@
  * published by the Free Software Foundation.
  */
 
-#include <asm/unaligned.h>
 #include "mesh.h"
 
 #define TEST_FRAME_LEN 8192
index 4eac65c74ed05ac18a504957f2dbcb721bb2014d..c4b1799da5d76ac98920527e9fe85d30866ff9c8 100644 (file)
@@ -104,7 +104,7 @@ nf_ct_get_tuple(const struct sk_buff *skb,
                const struct nf_conntrack_l3proto *l3proto,
                const struct nf_conntrack_l4proto *l4proto)
 {
-       NF_CT_TUPLE_U_BLANK(tuple);
+       memset(tuple, 0, sizeof(*tuple));
 
        tuple->src.l3num = l3num;
        if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
@@ -151,7 +151,7 @@ nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
                   const struct nf_conntrack_l3proto *l3proto,
                   const struct nf_conntrack_l4proto *l4proto)
 {
-       NF_CT_TUPLE_U_BLANK(inverse);
+       memset(inverse, 0, sizeof(*inverse));
 
        inverse->src.l3num = orig->src.l3num;
        if (l3proto->invert_tuple(inverse, orig) == 0)
index bbd26893c0c4f7e46d1c312eeaed743b7834f5be..582ec3efc8a55f32603feeca879a8267a2639d6b 100644 (file)
@@ -214,7 +214,7 @@ int nf_queue(struct sk_buff *skb,
 
        segs = skb_gso_segment(skb, 0);
        kfree_skb(skb);
-       if (unlikely(IS_ERR(segs)))
+       if (IS_ERR(segs))
                return 1;
 
        do {
index 2c9fe5c12894f7f7200e68e35d6be9c39c31ad59..3447025ce068e9216fe5b235bd4f3213573673be 100644 (file)
@@ -454,9 +454,8 @@ nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
                if (data_len > 0xFFFF)
                        return -EINVAL;
                if (diff > skb_tailroom(e->skb)) {
-                       nskb = skb_copy_expand(e->skb, 0,
-                                              diff - skb_tailroom(e->skb),
-                                              GFP_ATOMIC);
+                       nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
+                                              diff, GFP_ATOMIC);
                        if (!nskb) {
                                printk(KERN_WARNING "nf_queue: OOM "
                                      "in mangle, dropping packet\n");
index f52f7f810ac4f470521c8c4d95fadc8950ad1548..11b22abc2b70cb475c5a585120ba0dc68094c36c 100644 (file)
@@ -787,7 +787,7 @@ static const struct file_operations xt_table_ops = {
        .open    = xt_table_open,
        .read    = seq_read,
        .llseek  = seq_lseek,
-       .release = seq_release,
+       .release = seq_release_net,
 };
 
 static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
index 3b2aa56833b9072d7e107d55796d62dae53935fe..9685b6fcbc81b60fb01a5cd7d8a3c6ac5e368024 100644 (file)
@@ -90,7 +90,7 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct net_device *in,
                const struct xt_target *target, const void *targinfo)
 {
        struct ipv6hdr *ipv6h = ipv6_hdr(skb);
-       unsigned int tcphoff;
+       int tcphoff;
        u_int8_t nexthdr;
 
        nexthdr = ipv6h->nexthdr;
index b15e7e2fa14328612cdf4de25150cc62604e8a58..d7e8983cd37f6303921edf25bc201eca7e99c4a2 100644 (file)
@@ -4,12 +4,11 @@
 #include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/skbuff.h>
+#include <linux/math64.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/xt_connbytes.h>
 #include <net/netfilter/nf_conntrack.h>
 
-#include <asm/div64.h>
-
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_DESCRIPTION("Xtables: Number of packets/bytes per connection matching");
@@ -82,7 +81,7 @@ connbytes_mt(const struct sk_buff *skb, const struct net_device *in,
                        break;
                }
                if (pkts != 0)
-                       what = div64_64(bytes, pkts);
+                       what = div64_u64(bytes, pkts);
                break;
        }
 
index d282ad1570a77482a58b836065f79e3ae2714a61..0099da5b2591e5295429b7578688fa6949db3db1 100644 (file)
@@ -1780,6 +1780,7 @@ int __init netlbl_unlabel_defconf(void)
         * messages so don't worry to much about these values. */
        security_task_getsecid(current, &audit_info.secid);
        audit_info.loginuid = 0;
+       audit_info.sessionid = 0;
 
        entry = kzalloc(sizeof(*entry), GFP_KERNEL);
        if (entry == NULL)
index b17d4203806e2102812bcd4cbe4754340cde9395..68706b4e3bf8a5d800510854a074537c762cf73d 100644 (file)
@@ -107,7 +107,9 @@ struct audit_buffer *netlbl_audit_start_common(int type,
        if (audit_buf == NULL)
                return NULL;
 
-       audit_log_format(audit_buf, "netlabel: auid=%u", audit_info->loginuid);
+       audit_log_format(audit_buf, "netlabel: auid=%u ses=%u",
+                        audit_info->loginuid,
+                        audit_info->sessionid);
 
        if (audit_info->secid != 0 &&
            security_secid_to_secctx(audit_info->secid,
index 6d7f4ab46c2bd7499d726eac6b5190332e87e69c..6caef8b20611c55443fb1d4617f33cebfa42d835 100644 (file)
@@ -51,6 +51,7 @@ static inline void netlbl_netlink_auditinfo(struct sk_buff *skb,
 {
        audit_info->secid = NETLINK_CB(skb).sid;
        audit_info->loginuid = NETLINK_CB(skb).loginuid;
+       audit_info->sessionid = NETLINK_CB(skb).sessionid;
 }
 
 /* NetLabel NETLINK I/O functions */
index 46f3e44bb83a583bbacfc4d84765082aaee7f4e8..9b97f8006c9c6edfd0dea0dde522b6a7e64c171f 100644 (file)
@@ -1248,6 +1248,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        NETLINK_CB(skb).pid     = nlk->pid;
        NETLINK_CB(skb).dst_group = dst_group;
        NETLINK_CB(skb).loginuid = audit_get_loginuid(current);
+       NETLINK_CB(skb).sessionid = audit_get_sessionid(current);
        security_task_getsecid(current, &(NETLINK_CB(skb).sid));
        memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
 
index a20e2ef7704beb4142dc3ee75d67604bffa10a10..f0463d757a98489f5b12138c0026d1b91742fd9d 100644 (file)
@@ -521,7 +521,8 @@ static void sfq_destroy(struct Qdisc *sch)
        struct sfq_sched_data *q = qdisc_priv(sch);
 
        tcf_destroy_chain(q->filter_list);
-       del_timer(&q->perturb_timer);
+       q->perturb_period = 0;
+       del_timer_sync(&q->perturb_timer);
 }
 
 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
index 75d748eee0eb81d4339b8b258dccc8dd653d7d0f..e1770f7ba0b3ec8a9e1d861ff8a2b950000fc8bb 100644 (file)
@@ -445,7 +445,7 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
 /**
  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
  * @task: task to be put to sleep
- *
+ * @action: function pointer to be executed after wait
  */
 void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
 {
index 6ad070d87702b6f09a4d1da40930aca10f862829..ad487e8abcc2f2f0bcada4432db6c25e8dbf0dee 100644 (file)
@@ -70,10 +70,9 @@ static inline void msg_set_bits(struct tipc_msg *m, u32 w,
                                u32 pos, u32 mask, u32 val)
 {
        val = (val & mask) << pos;
-       val = htonl(val);
-       mask = htonl(mask << pos);
-       m->hdr[w] &= ~mask;
-       m->hdr[w] |= val;
+       mask = mask << pos;
+       m->hdr[w] &= ~htonl(mask);
+       m->hdr[w] |= htonl(val);
 }
 
 /*
index 8aa6440d689f75bde521070c5d3f81ce5c81199b..ac765dd9c7f5793396dd2dd4bbc27529f67143a9 100644 (file)
@@ -129,8 +129,7 @@ static struct xfrm_algo_desc aead_list[] = {
 
 static struct xfrm_algo_desc aalg_list[] = {
 {
-       .name = "hmac(digest_null)",
-       .compat = "digest_null",
+       .name = "digest_null",
 
        .uinfo = {
                .auth = {
index 2519129c6d213f85bceebc165e2c27744f2fd3b6..09cd9c0c2d805bcac6f742d5a3ef8945502fe223 100644 (file)
@@ -150,7 +150,7 @@ static int xfrm_output_gso(struct sk_buff *skb)
 
        segs = skb_gso_segment(skb, 0);
        kfree_skb(skb);
-       if (unlikely(IS_ERR(segs)))
+       if (IS_ERR(segs))
                return PTR_ERR(segs);
 
        do {
index e0c0390613c0bca03920328ae2ad77052a22ecff..cae9fd81554350eb1893aa1da37ada7f646b5a08 100644 (file)
@@ -762,6 +762,7 @@ xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
                        if (err) {
                                xfrm_audit_policy_delete(pol, 0,
                                                         audit_info->loginuid,
+                                                        audit_info->sessionid,
                                                         audit_info->secid);
                                return err;
                        }
@@ -777,6 +778,7 @@ xfrm_policy_flush_secctx_check(u8 type, struct xfrm_audit *audit_info)
                                if (err) {
                                        xfrm_audit_policy_delete(pol, 0,
                                                        audit_info->loginuid,
+                                                       audit_info->sessionid,
                                                        audit_info->secid);
                                        return err;
                                }
@@ -819,6 +821,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
                        write_unlock_bh(&xfrm_policy_lock);
 
                        xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
+                                                audit_info->sessionid,
                                                 audit_info->secid);
 
                        xfrm_policy_kill(pol);
@@ -841,6 +844,7 @@ int xfrm_policy_flush(u8 type, struct xfrm_audit *audit_info)
 
                                xfrm_audit_policy_delete(pol, 1,
                                                         audit_info->loginuid,
+                                                        audit_info->sessionid,
                                                         audit_info->secid);
                                xfrm_policy_kill(pol);
                                killed++;
@@ -2472,14 +2476,14 @@ static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp,
 }
 
 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
-                          u32 auid, u32 secid)
+                          uid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SPD-add");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        xfrm_audit_common_policyinfo(xp, audit_buf);
        audit_log_end(audit_buf);
@@ -2487,14 +2491,14 @@ void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
 EXPORT_SYMBOL_GPL(xfrm_audit_policy_add);
 
 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
-                             u32 auid, u32 secid)
+                             uid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SPD-delete");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        xfrm_audit_common_policyinfo(xp, audit_buf);
        audit_log_end(audit_buf);
index 5dcc10b93c861df1e675589947137bf0006d392e..72fddafd891a0b3f3be603804e6c5727a56a5b3d 100644 (file)
@@ -496,7 +496,8 @@ expired:
                km_state_expired(x, 1, 0);
 
        xfrm_audit_state_delete(x, err ? 0 : 1,
-                               audit_get_loginuid(current), 0);
+                               audit_get_loginuid(current),
+                               audit_get_sessionid(current), 0);
 
 out:
        spin_unlock(&x->lock);
@@ -603,6 +604,7 @@ xfrm_state_flush_secctx_check(u8 proto, struct xfrm_audit *audit_info)
                           (err = security_xfrm_state_delete(x)) != 0) {
                                xfrm_audit_state_delete(x, 0,
                                                        audit_info->loginuid,
+                                                       audit_info->sessionid,
                                                        audit_info->secid);
                                return err;
                        }
@@ -641,6 +643,7 @@ restart:
                                err = xfrm_state_delete(x);
                                xfrm_audit_state_delete(x, err ? 0 : 1,
                                                        audit_info->loginuid,
+                                                       audit_info->sessionid,
                                                        audit_info->secid);
                                xfrm_state_put(x);
 
@@ -2112,7 +2115,7 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
                iph6 = ipv6_hdr(skb);
                audit_log_format(audit_buf,
                                 " src=" NIP6_FMT " dst=" NIP6_FMT
-                                " flowlbl=0x%x%x%x",
+                                " flowlbl=0x%x%02x%02x",
                                 NIP6(iph6->saddr),
                                 NIP6(iph6->daddr),
                                 iph6->flow_lbl[0] & 0x0f,
@@ -2123,14 +2126,14 @@ static void xfrm_audit_helper_pktinfo(struct sk_buff *skb, u16 family,
 }
 
 void xfrm_audit_state_add(struct xfrm_state *x, int result,
-                         u32 auid, u32 secid)
+                         uid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SAD-add");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
        xfrm_audit_helper_sainfo(x, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        audit_log_end(audit_buf);
@@ -2138,14 +2141,14 @@ void xfrm_audit_state_add(struct xfrm_state *x, int result,
 EXPORT_SYMBOL_GPL(xfrm_audit_state_add);
 
 void xfrm_audit_state_delete(struct xfrm_state *x, int result,
-                            u32 auid, u32 secid)
+                            uid_t auid, u32 sessionid, u32 secid)
 {
        struct audit_buffer *audit_buf;
 
        audit_buf = xfrm_audit_start("SAD-delete");
        if (audit_buf == NULL)
                return;
-       xfrm_audit_helper_usrinfo(auid, secid, audit_buf);
+       xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf);
        xfrm_audit_helper_sainfo(x, audit_buf);
        audit_log_format(audit_buf, " res=%u", result);
        audit_log_end(audit_buf);
index 22a30ae582a2738cc8453fcdefe1893caa1b6263..a1b0fbe3ea351f6e7e953078ffd90d29dd29e196 100644 (file)
@@ -407,6 +407,9 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct xfrm_state *x;
        int err;
        struct km_event c;
+       uid_t loginuid = NETLINK_CB(skb).loginuid;
+       u32 sessionid = NETLINK_CB(skb).sessionid;
+       u32 sid = NETLINK_CB(skb).sid;
 
        err = verify_newsa_info(p, attrs);
        if (err)
@@ -422,8 +425,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        else
                err = xfrm_state_update(x);
 
-       xfrm_audit_state_add(x, err ? 0 : 1, NETLINK_CB(skb).loginuid,
-                            NETLINK_CB(skb).sid);
+       xfrm_audit_state_add(x, err ? 0 : 1, loginuid, sessionid, sid);
 
        if (err < 0) {
                x->km.state = XFRM_STATE_DEAD;
@@ -478,6 +480,9 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        int err = -ESRCH;
        struct km_event c;
        struct xfrm_usersa_id *p = nlmsg_data(nlh);
+       uid_t loginuid = NETLINK_CB(skb).loginuid;
+       u32 sessionid = NETLINK_CB(skb).sessionid;
+       u32 sid = NETLINK_CB(skb).sid;
 
        x = xfrm_user_state_lookup(p, attrs, &err);
        if (x == NULL)
@@ -502,8 +507,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_notify(x, &c);
 
 out:
-       xfrm_audit_state_delete(x, err ? 0 : 1, NETLINK_CB(skb).loginuid,
-                               NETLINK_CB(skb).sid);
+       xfrm_audit_state_delete(x, err ? 0 : 1, loginuid, sessionid, sid);
        xfrm_state_put(x);
        return err;
 }
@@ -1123,6 +1127,9 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
        struct km_event c;
        int err;
        int excl;
+       uid_t loginuid = NETLINK_CB(skb).loginuid;
+       u32 sessionid = NETLINK_CB(skb).sessionid;
+       u32 sid = NETLINK_CB(skb).sid;
 
        err = verify_newpolicy_info(p);
        if (err)
@@ -1141,8 +1148,7 @@ static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
         * a type XFRM_MSG_UPDPOLICY - JHS */
        excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
        err = xfrm_policy_insert(p->dir, xp, excl);
-       xfrm_audit_policy_add(xp, err ? 0 : 1, NETLINK_CB(skb).loginuid,
-                             NETLINK_CB(skb).sid);
+       xfrm_audit_policy_add(xp, err ? 0 : 1, loginuid, sessionid, sid);
 
        if (err) {
                security_xfrm_policy_free(xp->security);
@@ -1371,9 +1377,12 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                                            NETLINK_CB(skb).pid);
                }
        } else {
-               xfrm_audit_policy_delete(xp, err ? 0 : 1,
-                                        NETLINK_CB(skb).loginuid,
-                                        NETLINK_CB(skb).sid);
+               uid_t loginuid = NETLINK_CB(skb).loginuid;
+               u32 sessionid = NETLINK_CB(skb).sessionid;
+               u32 sid = NETLINK_CB(skb).sid;
+
+               xfrm_audit_policy_delete(xp, err ? 0 : 1, loginuid, sessionid,
+                                        sid);
 
                if (err != 0)
                        goto out;
@@ -1399,6 +1408,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
        int err;
 
        audit_info.loginuid = NETLINK_CB(skb).loginuid;
+       audit_info.sessionid = NETLINK_CB(skb).sessionid;
        audit_info.secid = NETLINK_CB(skb).sid;
        err = xfrm_state_flush(p->proto, &audit_info);
        if (err)
@@ -1546,6 +1556,7 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
                return err;
 
        audit_info.loginuid = NETLINK_CB(skb).loginuid;
+       audit_info.sessionid = NETLINK_CB(skb).sessionid;
        audit_info.secid = NETLINK_CB(skb).sid;
        err = xfrm_policy_flush(type, &audit_info);
        if (err)
@@ -1604,9 +1615,11 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        read_unlock(&xp->lock);
        err = 0;
        if (up->hard) {
+               uid_t loginuid = NETLINK_CB(skb).loginuid;
+               uid_t sessionid = NETLINK_CB(skb).sessionid;
+               u32 sid = NETLINK_CB(skb).sid;
                xfrm_policy_delete(xp, p->dir);
-               xfrm_audit_policy_delete(xp, 1, NETLINK_CB(skb).loginuid,
-                                        NETLINK_CB(skb).sid);
+               xfrm_audit_policy_delete(xp, 1, loginuid, sessionid, sid);
 
        } else {
                // reset the timers here?
@@ -1640,9 +1653,11 @@ static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
        km_state_expired(x, ue->hard, current->pid);
 
        if (ue->hard) {
+               uid_t loginuid = NETLINK_CB(skb).loginuid;
+               uid_t sessionid = NETLINK_CB(skb).sessionid;
+               u32 sid = NETLINK_CB(skb).sid;
                __xfrm_state_delete(x);
-               xfrm_audit_state_delete(x, 1, NETLINK_CB(skb).loginuid,
-                                       NETLINK_CB(skb).sid);
+               xfrm_audit_state_delete(x, 1, loginuid, sessionid, sid);
        }
        err = 0;
 out:
index 05e438f8b4e2d8511e69a4961cf8e35b0dbbeebd..e90dc5d04392dcc0f10d6d480dd6a866cc54df0e 100644 (file)
@@ -33,10 +33,8 @@ static struct file_operations mark_ops = {
 static int example_init(void)
 {
        printk(KERN_ALERT "example init\n");
-       pentry_example = create_proc_entry("marker-example", 0444, NULL);
-       if (pentry_example)
-               pentry_example->proc_fops = &mark_ops;
-       else
+       pentry_example = proc_create("marker-example", 0444, NULL, &mark_ops);
+       if (!pentry_example)
                return -EPERM;
        return 0;
 }
index 9468ec7971db933e2594171ff66c8dd17c9368c8..9c4b3e2b7098a9d2af14c50614189c9a28cd689b 100755 (executable)
@@ -1,2 +1,18 @@
 #!/bin/sh
-indent -npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1 "$@"
+PARAM="-npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1"
+RES=`indent --version`
+V1=`echo $RES | cut -d' ' -f3 | cut -d'.' -f1`
+V2=`echo $RES | cut -d' ' -f3 | cut -d'.' -f2`
+V3=`echo $RES | cut -d' ' -f3 | cut -d'.' -f3`
+if [ $V1 -gt 2 ]; then
+  PARAM="$PARAM -il0"
+elif [ $V1 -eq 2 ]; then
+  if [ $V2 -gt 2 ]; then
+    PARAM="$PARAM -il0";
+  elif [ $V2 -eq 2 ]; then
+    if [ $V3 -ge 10 ]; then
+      PARAM="$PARAM -il0"
+    fi
+  fi
+fi
+indent $PARAM "$@"
index 24b3c8fe6bca9f7e679bb23045dcf6a24f77b0d3..a098a0454dc8ba50e9fcce958976f9f93aefa1ae 100644 (file)
@@ -76,7 +76,7 @@ modpost = scripts/mod/modpost                    \
  $(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,)       \
  $(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile)   \
  $(if $(KBUILD_EXTMOD),-I $(modulesymfile))      \
- $(if $(iKBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(EXTRA_SYMBOLS))) \
  $(if $(KBUILD_EXTMOD),-o $(modulesymfile))      \
  $(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S)      \
  $(if $(CONFIG_MARKERS),-K $(kernelmarkersfile)) \
index 64ec4b8a51b502cca0b37638f4443813d65d35ae..b6bbbcdc557eb05d7eeb55f11887a34915f722b1 100755 (executable)
@@ -9,7 +9,7 @@ use strict;
 my $P = $0;
 $P =~ s@.*/@@g;
 
-my $V = '0.16';
+my $V = '0.18';
 
 use Getopt::Long qw(:config no_auto_abbrev);
 
@@ -131,6 +131,17 @@ our $NonptrType;
 our $Type;
 our $Declare;
 
+our $UTF8      = qr {
+       [\x09\x0A\x0D\x20-\x7E]              # ASCII
+       | [\xC2-\xDF][\x80-\xBF]             # non-overlong 2-byte
+       |  \xE0[\xA0-\xBF][\x80-\xBF]        # excluding overlongs
+       | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2}  # straight 3-byte
+       |  \xED[\x80-\x9F][\x80-\xBF]        # excluding surrogates
+       |  \xF0[\x90-\xBF][\x80-\xBF]{2}     # planes 1-3
+       | [\xF1-\xF3][\x80-\xBF]{3}          # planes 4-15
+       |  \xF4[\x80-\x8F][\x80-\xBF]{2}     # plane 16
+}x;
+
 our @typeList = (
        qr{void},
        qr{char},
@@ -692,7 +703,7 @@ sub annotate_values {
        while (length($cur)) {
                @av_paren_type = ('E') if ($#av_paren_type < 0);
                print " <" . join('', @av_paren_type) .
-                                       "> <$type> " if ($dbg_values > 1);
+                               "> <$type> <$av_pending>" if ($dbg_values > 1);
                if ($cur =~ /^(\s+)/o) {
                        print "WS($1)\n" if ($dbg_values > 1);
                        if ($1 =~ /\n/ && $av_preprocessor) {
@@ -705,9 +716,18 @@ sub annotate_values {
                        $type = 'T';
 
                } elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) {
-                       print "DEFINE($1)\n" if ($dbg_values > 1);
+                       print "DEFINE($1,$2)\n" if ($dbg_values > 1);
                        $av_preprocessor = 1;
-                       $av_pending = 'N';
+                       push(@av_paren_type, $type);
+                       if ($2 ne '') {
+                               $av_pending = 'N';
+                       }
+                       $type = 'E';
+
+               } elsif ($cur =~ /^(#\s*undef\s*$Ident)/o) {
+                       print "UNDEF($1)\n" if ($dbg_values > 1);
+                       $av_preprocessor = 1;
+                       push(@av_paren_type, $type);
 
                } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if))/o) {
                        print "PRE_START($1)\n" if ($dbg_values > 1);
@@ -715,7 +735,7 @@ sub annotate_values {
 
                        push(@av_paren_type, $type);
                        push(@av_paren_type, $type);
-                       $type = 'N';
+                       $type = 'E';
 
                } elsif ($cur =~ /^(#\s*(?:else|elif))/o) {
                        print "PRE_RESTART($1)\n" if ($dbg_values > 1);
@@ -723,7 +743,7 @@ sub annotate_values {
 
                        push(@av_paren_type, $av_paren_type[$#av_paren_type]);
 
-                       $type = 'N';
+                       $type = 'E';
 
                } elsif ($cur =~ /^(#\s*(?:endif))/o) {
                        print "PRE_END($1)\n" if ($dbg_values > 1);
@@ -734,11 +754,16 @@ sub annotate_values {
                        # one does, and continue as if the #endif was not here.
                        pop(@av_paren_type);
                        push(@av_paren_type, $type);
-                       $type = 'N';
+                       $type = 'E';
 
                } elsif ($cur =~ /^(\\\n)/o) {
                        print "PRECONT($1)\n" if ($dbg_values > 1);
 
+               } elsif ($cur =~ /^(__attribute__)\s*\(?/o) {
+                       print "ATTR($1)\n" if ($dbg_values > 1);
+                       $av_pending = $type;
+                       $type = 'N';
+
                } elsif ($cur =~ /^(sizeof)\s*(\()?/o) {
                        print "SIZEOF($1)\n" if ($dbg_values > 1);
                        if (defined $2) {
@@ -930,7 +955,7 @@ sub process {
                        # edge is a close comment then we must be in a comment
                        # at context start.
                        my $edge;
-                       for (my $ln = $linenr; $ln < ($linenr + $realcnt); $ln++) {
+                       for (my $ln = $linenr + 1; $ln < ($linenr + $realcnt); $ln++) {
                                next if ($line =~ /^-/);
                                ($edge) = ($rawlines[$ln - 1] =~ m@(/\*|\*/)@);
                                last if (defined $edge);
@@ -951,9 +976,9 @@ sub process {
                        ##print "COMMENT:$in_comment edge<$edge> $rawline\n";
                        sanitise_line_reset($in_comment);
 
-               } elsif ($realcnt) {
+               } elsif ($realcnt && $rawline =~ /^(?:\+| |$)/) {
                        # Standardise the strings and chars within the input to
-                       # simplify matching.
+                       # simplify matching -- only bother with positive lines.
                        $line = sanitise_line($rawline);
                }
                push(@lines, $line);
@@ -1066,17 +1091,14 @@ sub process {
 
 # UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php
                if (($realfile =~ /^$/ || $line =~ /^\+/) &&
-                    !($rawline =~ m/^(
-                               [\x09\x0A\x0D\x20-\x7E]              # ASCII
-                               | [\xC2-\xDF][\x80-\xBF]             # non-overlong 2-byte
-                               |  \xE0[\xA0-\xBF][\x80-\xBF]        # excluding overlongs
-                               | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2}  # straight 3-byte
-                               |  \xED[\x80-\x9F][\x80-\xBF]        # excluding surrogates
-                               |  \xF0[\x90-\xBF][\x80-\xBF]{2}     # planes 1-3
-                               | [\xF1-\xF3][\x80-\xBF]{3}          # planes 4-15
-                               |  \xF4[\x80-\x8F][\x80-\xBF]{2}     # plane 16
-                               )*$/x )) {
-                       ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $herecurr);
+                   $rawline !~ m/^$UTF8*$/) {
+                       my ($utf8_prefix) = ($rawline =~ /^($UTF8*)/);
+
+                       my $blank = copy_spacing($rawline);
+                       my $ptr = substr($blank, 0, length($utf8_prefix)) . "^";
+                       my $hereptr = "$hereline$ptr\n";
+
+                       ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $hereptr);
                }
 
 #ignore lines being removed
@@ -1112,7 +1134,7 @@ sub process {
                if ($rawline =~ /^\+\s* \t\s*\S/ ||
                    $rawline =~ /^\+\s*        \s*/) {
                        my $herevet = "$here\n" . cat_vet($rawline) . "\n";
-                       ERROR("use tabs not spaces\n" . $herevet);
+                       ERROR("code indent should use tabs where possible\n" . $herevet);
                }
 
 # check for RCS/CVS revision markers
@@ -1121,35 +1143,40 @@ sub process {
                }
 
 # Check for potential 'bare' types
-               if ($realcnt) {
-                       my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
-                       $s =~ s/\n./ /g;
-                       $s =~ s/{.*$//;
+               my ($stat, $cond);
+               if ($realcnt && $line =~ /.\s*\S/) {
+                       ($stat, $cond) = ctx_statement_block($linenr,
+                                                               $realcnt, 0);
+                       $stat =~ s/\n./\n /g;
+                       $cond =~ s/\n./\n /g;
+
+                       my $s = $stat;
+                       $s =~ s/{.*$//s;
 
                        # Ignore goto labels.
-                       if ($s =~ /$Ident:\*$/) {
+                       if ($s =~ /$Ident:\*$/s) {
 
                        # Ignore functions being called
-                       } elsif ($s =~ /^.\s*$Ident\s*\(/) {
+                       } elsif ($s =~ /^.\s*$Ident\s*\(/s) {
 
                        # definitions in global scope can only start with types
-                       } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) {
+                       } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/s) {
                                possible($1, $s);
 
                        # declarations always start with types
-                       } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) {
+                       } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/s) {
                                possible($1, $s);
                        }
 
                        # any (foo ... *) is a pointer cast, and foo is a type
-                       while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) {
+                       while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/sg) {
                                possible($1, $s);
                        }
 
                        # Check for any sort of function declaration.
                        # int foo(something bar, other baz);
                        # void (*store_gdt)(x86_descr_ptr *);
-                       if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) {
+                       if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/s) {
                                my ($name_len) = length($1);
 
                                my $ctx = $s;
@@ -1282,18 +1309,19 @@ sub process {
                           ($prevline !~ /^ }/) &&
                           ($prevline !~ /^.DECLARE_$Ident\(\Q$name\E\)/) &&
                           ($prevline !~ /^.LIST_HEAD\(\Q$name\E\)/) &&
+                          ($prevline !~ /^.$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(/) &&
                           ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=|\[)/)) {
                                WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr);
                        }
                }
 
 # check for external initialisers.
-               if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL);/) {
+               if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL|false)\s*;/) {
                        ERROR("do not initialise externals to 0 or NULL\n" .
                                $herecurr);
                }
 # check for static initialisers.
-               if ($line =~ /\s*static\s.*=\s*(0|NULL);/) {
+               if ($line =~ /\s*static\s.*=\s*(0|NULL|false)\s*;/) {
                        ERROR("do not initialise statics to 0 or NULL\n" .
                                $herecurr);
                }
@@ -1512,7 +1540,10 @@ sub process {
                                        if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) {
                                                ERROR("space required before that '$op' $at\n" . $hereptr);
                                        }
-                                       if ($ctx =~ /.xW/) {
+                                       if ($op  eq '*' && $cc =~/\s*const\b/) {
+                                               # A unary '*' may be const
+
+                                       } elsif ($ctx =~ /.xW/) {
                                                ERROR("space prohibited after that '$op' $at\n" . $hereptr);
                                        }
 
@@ -1617,7 +1648,7 @@ sub process {
 
 # Check for illegal assignment in if conditional.
                if ($line =~ /\bif\s*\(/) {
-                       my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0);
+                       my ($s, $c) = ($stat, $cond);
 
                        if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/) {
                                ERROR("do not use assignment in if condition\n" . $herecurr);
@@ -1695,7 +1726,7 @@ sub process {
 #warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line)
                if ($tree && $rawline =~ m{^.\#\s*include\s*\<asm\/(.*)\.h\>}) {
                        my $checkfile = "$root/include/linux/$1.h";
-                       if (-f $checkfile && $1 ne 'irq.h') {
+                       if (-f $checkfile && $1 ne 'irq') {
                                WARN("Use #include <linux/$1.h> instead of <asm/$1.h>\n" .
                                        $herecurr);
                        }
@@ -1910,7 +1941,8 @@ sub process {
                }
 
 # check for spinlock_t definitions without a comment.
-               if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/) {
+               if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ ||
+                   $line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) {
                        my $which = $1;
                        if (!ctx_has_comment($first_line, $linenr)) {
                                CHK("$1 definition without comment\n" . $herecurr);
@@ -1940,7 +1972,26 @@ sub process {
                }
 
 # check for new externs in .c files.
-               if ($line =~ /^.\s*extern\s/ && ($realfile =~ /\.c$/)) {
+               if ($realfile =~ /\.c$/ && defined $stat &&
+                   $stat =~ /^.\s*(?:extern\s+)?$Type\s+$Ident(\s*)\(/s)
+               {
+                       my $paren_space = $1;
+
+                       my $s = $stat;
+                       if (defined $cond) {
+                               substr($s, 0, length($cond), '');
+                       }
+                       if ($s =~ /^\s*;/) {
+                               WARN("externs should be avoided in .c files\n" .  $herecurr);
+                       }
+
+                       if ($paren_space =~ /\n/) {
+                               WARN("arguments for function declarations should follow identifier\n" . $herecurr);
+                       }
+
+               } elsif ($realfile =~ /\.c$/ && defined $stat &&
+                   $stat =~ /^.\s*extern\s+/)
+               {
                        WARN("externs should be avoided in .c files\n" .  $herecurr);
                }
 
@@ -1964,11 +2015,11 @@ sub process {
                }
 
 # check for semaphores used as mutexes
-               if ($line =~ /\b(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
+               if ($line =~ /^.\s*(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
                        WARN("mutexes are preferred for single holder semaphores\n" . $herecurr);
                }
 # check for semaphores used as mutexes
-               if ($line =~ /\binit_MUTEX_LOCKED\s*\(/) {
+               if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) {
                        WARN("consider using a completion\n" . $herecurr);
                }
 # recommend strict_strto* over simple_strto*
@@ -1979,11 +2030,24 @@ sub process {
 # use of NR_CPUS is usually wrong
 # ignore definitions of NR_CPUS and usage to define arrays as likely right
                if ($line =~ /\bNR_CPUS\b/ &&
-                   $line !~ /^.#\s*define\s+NR_CPUS\s+/ &&
-                   $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/)
+                   $line !~ /^.#\s*if\b.*\bNR_CPUS\b/ &&
+                   $line !~ /^.#\s*define\b.*\bNR_CPUS\b/ &&
+                   $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/ &&
+                   $line !~ /\[[^\]]*\.\.\.[^\]]*NR_CPUS[^\]]*\]/ &&
+                   $line !~ /\[[^\]]*NR_CPUS[^\]]*\.\.\.[^\]]*\]/)
                {
                        WARN("usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr);
                }
+
+# check for %L{u,d,i} in strings
+               my $string;
+               while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) {
+                       $string = substr($rawline, $-[1], $+[1] - $-[1]);
+                       if ($string =~ /(?<!%)%L[udi]/) {
+                               WARN("\%Ld/%Lu are not-standard C, use %lld/%llu\n" . $herecurr);
+                               last;
+                       }
+               }
        }
 
        # If we have no input at all, then there is nothing to report on
index 684fb9cdc055cc15d1ac3f39cdbd9fa47a245104..5f3415f28736a386d84b77ae4fa9e2f179958221 100644 (file)
@@ -135,7 +135,7 @@ parse() {
                        str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
                        ;;
                "slink")
-                       local target=`field 11 $(LC_ALL=C ls -l "${location}")`
+                       local target=`readlink "${location}"`
                        str="${ftype} ${name} ${target} ${str}"
                        ;;
                *)
index c912137f80e247d0b8076e32f7386005127889a0..5d20a2e24cd1e18814483327645f25b6c2f73d0e 100644 (file)
@@ -7,12 +7,6 @@
  *
  * Usage: nm -n vmlinux | scripts/kallsyms [--all-symbols] > symbols.S
  *
- * ChangeLog:
- *
- * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
- *      Changed the compression method from stem compression to "table lookup"
- *      compression
- *
  *      Table compression uses all the unused char codes on the symbols and
  *  maps these to the most used substrings (tokens). For instance, it might
  *  map char code 0xF7 to represent "write_" and then in every symbol where
index bed0f4e2d2f7e6632518c8577d39afd74462afdc..6a61cee4a32c39ca6931b191e93b6a6313689331 100644 (file)
@@ -5,25 +5,6 @@
 
 /* A lexical scanner generated by flex */
 
-#define yy_create_buffer zconf_create_buffer
-#define yy_delete_buffer zconf_delete_buffer
-#define yy_flex_debug zconf_flex_debug
-#define yy_init_buffer zconf_init_buffer
-#define yy_flush_buffer zconf_flush_buffer
-#define yy_load_buffer_state zconf_load_buffer_state
-#define yy_switch_to_buffer zconf_switch_to_buffer
-#define yyin zconfin
-#define yyleng zconfleng
-#define yylex zconflex
-#define yylineno zconflineno
-#define yyout zconfout
-#define yyrestart zconfrestart
-#define yytext zconftext
-#define yywrap zconfwrap
-#define yyalloc zconfalloc
-#define yyrealloc zconfrealloc
-#define yyfree zconffree
-
 #define FLEX_SCANNER
 #define YY_FLEX_MAJOR_VERSION 2
 #define YY_FLEX_MINOR_VERSION 5
@@ -354,7 +335,7 @@ void zconffree (void *  );
 
 /* Begin user sect3 */
 
-#define zconfwrap(n) 1
+#define zconfwrap() 1
 #define YY_SKIP_YYWRAP
 
 typedef unsigned char YY_CHAR;
@@ -1535,7 +1516,7 @@ static int yy_get_next_buffer (void)
 
                /* Read in more data. */
                YY_INPUT( (&YY_CURRENT_BUFFER_LVALUE->yy_ch_buf[number_to_move]),
-                       (yy_n_chars), num_to_read );
+                       (yy_n_chars), (size_t) num_to_read );
 
                YY_CURRENT_BUFFER_LVALUE->yy_n_chars = (yy_n_chars);
                }
@@ -2007,7 +1988,7 @@ YY_BUFFER_STATE zconf_scan_buffer  (char * base, yy_size_t  size )
 
 /** Setup the input buffer state to scan a string. The next call to zconflex() will
  * scan from a @e copy of @a str.
- * @param str a NUL-terminated string to scan
+ * @param yystr a NUL-terminated string to scan
  * 
  * @return the newly allocated buffer state object.
  * @note If you want to scan bytes that may contain NUL values, then use
index 4d09f6ddefe3daa71bb9d5d8bcbe1b774b0b2821..8e69461313d19de2c133479154c123e479f059e0 100644 (file)
@@ -21,7 +21,7 @@ P(menu_get_help,const char *,(struct menu *menu));
 /* symbol.c */
 P(symbol_hash,struct symbol *,[SYMBOL_HASHSIZE]);
 
-P(sym_lookup,struct symbol *,(const char *name, int isconst));
+P(sym_lookup,struct symbol *,(const char *name, int flags));
 P(sym_find,struct symbol *,(const char *name));
 P(sym_re_search,struct symbol **,(const char *pattern));
 P(sym_type_name,const char *,(enum symbol_type type));
index 4946bd02b46d4324939b8ba7e03db1174359d645..616c60138183c8c9b74c3f2562f7e4a1e5b6d5d9 100644 (file)
@@ -89,7 +89,7 @@ do_resize:
        box_y = y + 2;
        box_x = (width - box_width) / 2;
        draw_box(dialog, y + 1, box_x - 1, 3, box_width + 2,
-                dlg.border.atr, dlg.dialog.atr);
+                dlg.dialog.atr, dlg.border.atr);
 
        print_buttons(dialog, height, width, 0);
 
index 606ceb9e746ec3b9649fd534c50f16e95f73f69f..07ff8d105c9def5885221d8668a4c7859b3bbb29 100644 (file)
@@ -235,18 +235,22 @@ void menu_finalize(struct menu *parent)
        sym = parent->sym;
        if (parent->list) {
                if (sym && sym_is_choice(sym)) {
-                       /* find the first choice value and find out choice type */
-                       for (menu = parent->list; menu; menu = menu->next) {
-                               if (menu->sym) {
-                                       current_entry = parent;
-                                       if (sym->type == S_UNKNOWN)
+                       if (sym->type == S_UNKNOWN) {
+                               /* find the first choice value to find out choice type */
+                               current_entry = parent;
+                               for (menu = parent->list; menu; menu = menu->next) {
+                                       if (menu->sym && menu->sym->type != S_UNKNOWN) {
                                                menu_set_type(menu->sym->type);
-                                       current_entry = menu;
-                                       if (menu->sym->type == S_UNKNOWN)
-                                               menu_set_type(sym->type);
-                                       break;
+                                               break;
+                                       }
                                }
                        }
+                       /* set the type of the remaining choice values */
+                       for (menu = parent->list; menu; menu = menu->next) {
+                               current_entry = menu;
+                               if (menu->sym && menu->sym->type == S_UNKNOWN)
+                                       menu_set_type(sym->type);
+                       }
                        parentdep = expr_alloc_symbol(sym);
                } else if (parent->prompt)
                        parentdep = parent->prompt->visible.expr;
@@ -313,50 +317,36 @@ void menu_finalize(struct menu *parent)
                }
        }
        for (menu = parent->list; menu; menu = menu->next) {
-               if (sym && sym_is_choice(sym) && menu->sym) {
+               if (sym && sym_is_choice(sym) &&
+                   menu->sym && !sym_is_choice_value(menu->sym)) {
+                       current_entry = menu;
                        menu->sym->flags |= SYMBOL_CHOICEVAL;
                        if (!menu->prompt)
                                menu_warn(menu, "choice value must have a prompt");
                        for (prop = menu->sym->prop; prop; prop = prop->next) {
-                               if (prop->type == P_PROMPT && prop->menu != menu) {
-                                       prop_warn(prop, "choice values "
-                                           "currently only support a "
-                                           "single prompt");
-                               }
                                if (prop->type == P_DEFAULT)
                                        prop_warn(prop, "defaults for choice "
-                                           "values not supported");
+                                                 "values not supported");
+                               if (prop->menu == menu)
+                                       continue;
+                               if (prop->type == P_PROMPT &&
+                                   prop->menu->parent->sym != sym)
+                                       prop_warn(prop, "choice value used outside its choice group");
                        }
-                       current_entry = menu;
-                       if (menu->sym->type == S_UNKNOWN)
-                               menu_set_type(sym->type);
                        /* Non-tristate choice values of tristate choices must
                         * depend on the choice being set to Y. The choice
                         * values' dependencies were propagated to their
                         * properties above, so the change here must be re-
-                        * propagated. */
+                        * propagated.
+                        */
                        if (sym->type == S_TRISTATE && menu->sym->type != S_TRISTATE) {
                                basedep = expr_alloc_comp(E_EQUAL, sym, &symbol_yes);
-                               basedep = expr_alloc_and(basedep, menu->dep);
-                               basedep = expr_eliminate_dups(basedep);
-                               menu->dep = basedep;
+                               menu->dep = expr_alloc_and(basedep, menu->dep);
                                for (prop = menu->sym->prop; prop; prop = prop->next) {
                                        if (prop->menu != menu)
                                                continue;
-                                       dep = expr_alloc_and(expr_copy(basedep),
-                                                            prop->visible.expr);
-                                       dep = expr_eliminate_dups(dep);
-                                       dep = expr_trans_bool(dep);
-                                       prop->visible.expr = dep;
-                                       if (prop->type == P_SELECT) {
-                                               struct symbol *es = prop_get_symbol(prop);
-                                               dep2 = expr_alloc_symbol(menu->sym);
-                                               dep = expr_alloc_and(dep2,
-                                                                    expr_copy(dep));
-                                               dep = expr_alloc_or(es->rev_dep.expr, dep);
-                                               dep = expr_eliminate_dups(dep);
-                                               es->rev_dep.expr = dep;
-                                       }
+                                       prop->visible.expr = expr_alloc_and(expr_copy(basedep),
+                                                                           prop->visible.expr);
                                }
                        }
                        menu_add_symbol(P_CHOICE, sym, NULL);
index 4a03191ad17676105395c83564a78f8502dc51e4..18f3e5c33634f7729a9ad757e638757649bb043d 100644 (file)
@@ -40,7 +40,7 @@ void sym_add_default(struct symbol *sym, const char *def)
 {
        struct property *prop = prop_alloc(P_DEFAULT, sym);
 
-       prop->expr = expr_alloc_symbol(sym_lookup(def, 1));
+       prop->expr = expr_alloc_symbol(sym_lookup(def, SYMBOL_CONST));
 }
 
 void sym_init(void)
@@ -350,9 +350,6 @@ void sym_calc_value(struct symbol *sym)
                ;
        }
 
-       if (sym->flags & SYMBOL_AUTO)
-               sym->flags &= ~SYMBOL_WRITE;
-
        sym->curr = newval;
        if (sym_is_choice(sym) && newval.tri == yes)
                sym->curr.val = sym_calc_choice(sym);
@@ -377,6 +374,9 @@ void sym_calc_value(struct symbol *sym)
                                sym_set_changed(choice_sym);
                }
        }
+
+       if (sym->flags & SYMBOL_AUTO)
+               sym->flags &= ~SYMBOL_WRITE;
 }
 
 void sym_clear_all_valid(void)
@@ -651,7 +651,7 @@ bool sym_is_changable(struct symbol *sym)
        return sym->visible > sym->rev_dep.tri;
 }
 
-struct symbol *sym_lookup(const char *name, int isconst)
+struct symbol *sym_lookup(const char *name, int flags)
 {
        struct symbol *symbol;
        const char *ptr;
@@ -671,11 +671,10 @@ struct symbol *sym_lookup(const char *name, int isconst)
                hash &= 0xff;
 
                for (symbol = symbol_hash[hash]; symbol; symbol = symbol->next) {
-                       if (!strcmp(symbol->name, name)) {
-                               if ((isconst && symbol->flags & SYMBOL_CONST) ||
-                                   (!isconst && !(symbol->flags & SYMBOL_CONST)))
-                                       return symbol;
-                       }
+                       if (!strcmp(symbol->name, name) &&
+                           (flags ? symbol->flags & flags
+                                  : !(symbol->flags & (SYMBOL_CONST|SYMBOL_CHOICE))))
+                               return symbol;
                }
                new_name = strdup(name);
        } else {
@@ -687,8 +686,7 @@ struct symbol *sym_lookup(const char *name, int isconst)
        memset(symbol, 0, sizeof(*symbol));
        symbol->name = new_name;
        symbol->type = S_UNKNOWN;
-       if (isconst)
-               symbol->flags |= SYMBOL_CONST;
+       symbol->flags |= flags;
 
        symbol->next = symbol_hash[hash];
        symbol_hash[hash] = symbol;
@@ -762,8 +760,6 @@ struct symbol **sym_re_search(const char *pattern)
 }
 
 
-struct symbol *sym_check_deps(struct symbol *sym);
-
 static struct symbol *sym_check_expr_deps(struct expr *e)
 {
        struct symbol *sym;
@@ -795,40 +791,100 @@ static struct symbol *sym_check_expr_deps(struct expr *e)
 }
 
 /* return NULL when dependencies are OK */
-struct symbol *sym_check_deps(struct symbol *sym)
+static struct symbol *sym_check_sym_deps(struct symbol *sym)
 {
        struct symbol *sym2;
        struct property *prop;
 
-       if (sym->flags & SYMBOL_CHECK) {
-               fprintf(stderr, "%s:%d:error: found recursive dependency: %s",
-                       sym->prop->file->name, sym->prop->lineno, sym->name);
-               return sym;
-       }
-       if (sym->flags & SYMBOL_CHECKED)
-               return NULL;
-
-       sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
        sym2 = sym_check_expr_deps(sym->rev_dep.expr);
        if (sym2)
-               goto out;
+               return sym2;
 
        for (prop = sym->prop; prop; prop = prop->next) {
                if (prop->type == P_CHOICE || prop->type == P_SELECT)
                        continue;
                sym2 = sym_check_expr_deps(prop->visible.expr);
                if (sym2)
-                       goto out;
+                       break;
                if (prop->type != P_DEFAULT || sym_is_choice(sym))
                        continue;
                sym2 = sym_check_expr_deps(prop->expr);
                if (sym2)
-                       goto out;
+                       break;
        }
-out:
+
+       return sym2;
+}
+
+static struct symbol *sym_check_choice_deps(struct symbol *choice)
+{
+       struct symbol *sym, *sym2;
+       struct property *prop;
+       struct expr *e;
+
+       prop = sym_get_choice_prop(choice);
+       expr_list_for_each_sym(prop->expr, e, sym)
+               sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
+
+       choice->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
+       sym2 = sym_check_sym_deps(choice);
+       choice->flags &= ~SYMBOL_CHECK;
        if (sym2)
-               fprintf(stderr, " -> %s%s", sym->name, sym2 == sym? "\n": "");
-       sym->flags &= ~SYMBOL_CHECK;
+               goto out;
+
+       expr_list_for_each_sym(prop->expr, e, sym) {
+               sym2 = sym_check_sym_deps(sym);
+               if (sym2) {
+                       fprintf(stderr, " -> %s", sym->name);
+                       break;
+               }
+       }
+out:
+       expr_list_for_each_sym(prop->expr, e, sym)
+               sym->flags &= ~SYMBOL_CHECK;
+
+       if (sym2 && sym_is_choice_value(sym2) &&
+           prop_get_symbol(sym_get_choice_prop(sym2)) == choice)
+               sym2 = choice;
+
+       return sym2;
+}
+
+struct symbol *sym_check_deps(struct symbol *sym)
+{
+       struct symbol *sym2;
+       struct property *prop;
+
+       if (sym->flags & SYMBOL_CHECK) {
+               fprintf(stderr, "%s:%d:error: found recursive dependency: %s",
+                       sym->prop->file->name, sym->prop->lineno,
+                       sym->name ? sym->name : "<choice>");
+               return sym;
+       }
+       if (sym->flags & SYMBOL_CHECKED)
+               return NULL;
+
+       if (sym_is_choice_value(sym)) {
+               /* for choice groups start the check with main choice symbol */
+               prop = sym_get_choice_prop(sym);
+               sym2 = sym_check_deps(prop_get_symbol(prop));
+       } else if (sym_is_choice(sym)) {
+               sym2 = sym_check_choice_deps(sym);
+       } else {
+               sym->flags |= (SYMBOL_CHECK | SYMBOL_CHECKED);
+               sym2 = sym_check_sym_deps(sym);
+               sym->flags &= ~SYMBOL_CHECK;
+       }
+
+       if (sym2) {
+               fprintf(stderr, " -> %s", sym->name ? sym->name : "<choice>");
+               if (sym2 == sym) {
+                       fprintf(stderr, "\n");
+                       zconfnerrs++;
+                       sym2 = NULL;
+               }
+       }
+
        return sym2;
 }
 
@@ -904,7 +960,7 @@ void prop_add_env(const char *env)
        }
 
        prop = prop_alloc(P_ENV, sym);
-       prop->expr = expr_alloc_symbol(sym_lookup(env, 1));
+       prop->expr = expr_alloc_symbol(sym_lookup(env, SYMBOL_CONST));
 
        sym_env_list = expr_alloc_one(E_LIST, sym_env_list);
        sym_env_list->right.sym = sym;
index d22d92496f2550662ad9d21b6c60e8838f1c246b..95df833b5a9d172d6a03dd682ded3513ff795017 100644 (file)
@@ -446,16 +446,16 @@ union yyalloc
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  3
 /* YYLAST -- Last index in YYTABLE.  */
-#define YYLAST   258
+#define YYLAST   259
 
 /* YYNTOKENS -- Number of terminals.  */
 #define YYNTOKENS  35
 /* YYNNTS -- Number of nonterminals.  */
-#define YYNNTS  45
+#define YYNNTS  46
 /* YYNRULES -- Number of rules.  */
-#define YYNRULES  108
+#define YYNRULES  110
 /* YYNRULES -- Number of states.  */
-#define YYNSTATES  178
+#define YYNSTATES  180
 
 /* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX.  */
 #define YYUNDEFTOK  2
@@ -507,13 +507,14 @@ static const yytype_uint16 yyprhs[] =
       28,    33,    37,    39,    41,    43,    45,    47,    49,    51,
       53,    55,    57,    59,    61,    63,    67,    70,    74,    77,
       81,    84,    85,    88,    91,    94,    97,   100,   103,   107,
-     112,   117,   122,   128,   132,   133,   137,   138,   141,   144,
-     147,   149,   153,   154,   157,   160,   163,   166,   169,   174,
-     178,   181,   186,   187,   190,   194,   196,   200,   201,   204,
-     207,   210,   214,   217,   219,   223,   224,   227,   230,   233,
-     237,   241,   244,   247,   250,   251,   254,   257,   260,   265,
-     266,   269,   271,   273,   276,   279,   282,   284,   287,   288,
-     291,   293,   297,   301,   305,   308,   312,   316,   318
+     112,   117,   122,   128,   132,   133,   137,   138,   141,   145,
+     148,   150,   154,   155,   158,   161,   164,   167,   170,   175,
+     179,   182,   187,   188,   191,   195,   197,   201,   202,   205,
+     208,   211,   215,   218,   220,   224,   225,   228,   231,   234,
+     238,   242,   245,   248,   251,   252,   255,   258,   261,   266,
+     267,   270,   272,   274,   277,   280,   283,   285,   288,   289,
+     292,   294,   298,   302,   306,   309,   313,   317,   319,   321,
+     322
 };
 
 /* YYRHS -- A `-1'-separated list of the rules' RHS.  */
@@ -533,24 +534,25 @@ static const yytype_int8 yyrhs[] =
       30,    -1,    20,    78,    77,    30,    -1,    21,    25,    77,
       30,    -1,    22,    79,    79,    77,    30,    -1,    23,    48,
       30,    -1,    -1,    48,    25,    49,    -1,    -1,    33,    74,
-      -1,     7,    30,    -1,    50,    54,    -1,    75,    -1,    51,
-      56,    52,    -1,    -1,    54,    55,    -1,    54,    72,    -1,
-      54,    70,    -1,    54,    30,    -1,    54,    40,    -1,    18,
-      74,    77,    30,    -1,    19,    73,    30,    -1,    17,    30,
-      -1,    20,    25,    77,    30,    -1,    -1,    56,    39,    -1,
-      14,    78,    76,    -1,    75,    -1,    57,    60,    58,    -1,
-      -1,    60,    39,    -1,    60,    64,    -1,    60,    53,    -1,
-       4,    74,    30,    -1,    61,    71,    -1,    75,    -1,    62,
-      65,    63,    -1,    -1,    65,    39,    -1,    65,    64,    -1,
-      65,    53,    -1,     6,    74,    30,    -1,     9,    74,    30,
-      -1,    67,    71,    -1,    12,    30,    -1,    69,    13,    -1,
-      -1,    71,    72,    -1,    71,    30,    -1,    71,    40,    -1,
-      16,    24,    78,    30,    -1,    -1,    74,    77,    -1,    25,
-      -1,    26,    -1,     5,    30,    -1,     8,    30,    -1,    15,
-      30,    -1,    30,    -1,    76,    30,    -1,    -1,    14,    78,
-      -1,    79,    -1,    79,    33,    79,    -1,    79,    27,    79,
-      -1,    29,    78,    28,    -1,    34,    78,    -1,    78,    31,
-      78,    -1,    78,    32,    78,    -1,    25,    -1,    26,    -1
+      -1,     7,    80,    30,    -1,    50,    54,    -1,    75,    -1,
+      51,    56,    52,    -1,    -1,    54,    55,    -1,    54,    72,
+      -1,    54,    70,    -1,    54,    30,    -1,    54,    40,    -1,
+      18,    74,    77,    30,    -1,    19,    73,    30,    -1,    17,
+      30,    -1,    20,    25,    77,    30,    -1,    -1,    56,    39,
+      -1,    14,    78,    76,    -1,    75,    -1,    57,    60,    58,
+      -1,    -1,    60,    39,    -1,    60,    64,    -1,    60,    53,
+      -1,     4,    74,    30,    -1,    61,    71,    -1,    75,    -1,
+      62,    65,    63,    -1,    -1,    65,    39,    -1,    65,    64,
+      -1,    65,    53,    -1,     6,    74,    30,    -1,     9,    74,
+      30,    -1,    67,    71,    -1,    12,    30,    -1,    69,    13,
+      -1,    -1,    71,    72,    -1,    71,    30,    -1,    71,    40,
+      -1,    16,    24,    78,    30,    -1,    -1,    74,    77,    -1,
+      25,    -1,    26,    -1,     5,    30,    -1,     8,    30,    -1,
+      15,    30,    -1,    30,    -1,    76,    30,    -1,    -1,    14,
+      78,    -1,    79,    -1,    79,    33,    79,    -1,    79,    27,
+      79,    -1,    29,    78,    28,    -1,    34,    78,    -1,    78,
+      31,    78,    -1,    78,    32,    78,    -1,    25,    -1,    26,
+      -1,    -1,    25,    -1
 };
 
 /* YYRLINE[YYN] -- source line where rule number YYN was defined.  */
@@ -566,7 +568,8 @@ static const yytype_uint16 yyrline[] =
      339,   344,   351,   356,   364,   367,   369,   370,   371,   374,
      382,   389,   396,   402,   409,   411,   412,   413,   416,   424,
      426,   431,   432,   435,   436,   437,   441,   442,   445,   446,
-     449,   450,   451,   452,   453,   454,   455,   458,   459
+     449,   450,   451,   452,   453,   454,   455,   458,   459,   462,
+     463
 };
 #endif
 
@@ -590,7 +593,8 @@ static const char *const yytname[] =
   "if_entry", "if_end", "if_stmt", "if_block", "menu", "menu_entry",
   "menu_end", "menu_stmt", "menu_block", "source_stmt", "comment",
   "comment_stmt", "help_start", "help", "depends_list", "depends",
-  "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", "symbol", 0
+  "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", "symbol",
+  "word_opt", 0
 };
 #endif
 
@@ -619,7 +623,8 @@ static const yytype_uint8 yyr1[] =
       60,    61,    62,    63,    64,    65,    65,    65,    65,    66,
       67,    68,    69,    70,    71,    71,    71,    71,    72,    73,
       73,    74,    74,    75,    75,    75,    76,    76,    77,    77,
-      78,    78,    78,    78,    78,    78,    78,    79,    79
+      78,    78,    78,    78,    78,    78,    78,    79,    79,    80,
+      80
 };
 
 /* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN.  */
@@ -629,13 +634,14 @@ static const yytype_uint8 yyr2[] =
        4,     3,     1,     1,     1,     1,     1,     1,     1,     1,
        1,     1,     1,     1,     1,     3,     2,     3,     2,     3,
        2,     0,     2,     2,     2,     2,     2,     2,     3,     4,
-       4,     4,     5,     3,     0,     3,     0,     2,     2,     2,
+       4,     4,     5,     3,     0,     3,     0,     2,     3,     2,
        1,     3,     0,     2,     2,     2,     2,     2,     4,     3,
        2,     4,     0,     2,     3,     1,     3,     0,     2,     2,
        2,     3,     2,     1,     3,     0,     2,     2,     2,     3,
        3,     2,     2,     2,     0,     2,     2,     2,     4,     0,
        2,     1,     1,     2,     2,     2,     1,     2,     0,     2,
-       1,     3,     3,     3,     2,     3,     3,     1,     1
+       1,     3,     3,     3,     2,     3,     3,     1,     1,     0,
+       1
 };
 
 /* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
@@ -643,69 +649,69 @@ static const yytype_uint8 yyr2[] =
    means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
-       3,     0,     0,     1,     0,     0,     0,     0,     0,     0,
+       3,     0,     0,     1,     0,     0,     0,     0,     0,   109,
        0,     0,     0,     0,     0,     0,    12,    16,    13,    14,
       18,    15,    17,     0,    19,     0,     4,    31,    22,    31,
       23,    52,    62,     5,    67,    20,    84,    75,     6,    24,
       84,    21,     8,    11,    91,    92,     0,     0,    93,     0,
-      48,    94,     0,     0,     0,   107,   108,     0,     0,     0,
-     100,    95,     0,     0,     0,     0,     0,     0,     0,     0,
-       0,     0,    96,     7,    71,    79,    80,    27,    29,     0,
-     104,     0,     0,    64,     0,     0,     9,    10,     0,     0,
-       0,     0,    89,     0,     0,     0,    44,     0,    37,    36,
-      32,    33,     0,    35,    34,     0,     0,    89,     0,    56,
-      57,    53,    55,    54,    63,    51,    50,    68,    70,    66,
-      69,    65,    86,    87,    85,    76,    78,    74,    77,    73,
-      97,   103,   105,   106,   102,   101,    26,    82,     0,    98,
-       0,    98,    98,    98,     0,     0,     0,    83,    60,    98,
-       0,    98,     0,     0,     0,    38,    90,     0,     0,    98,
-      46,    43,    25,     0,    59,     0,    88,    99,    39,    40,
-      41,     0,     0,    45,    58,    61,    42,    47
+     110,     0,    94,     0,     0,     0,   107,   108,     0,     0,
+       0,   100,    95,     0,     0,     0,     0,     0,     0,     0,
+       0,     0,     0,    96,     7,    71,    79,    48,    80,    27,
+      29,     0,   104,     0,     0,    64,     0,     0,     9,    10,
+       0,     0,     0,     0,    89,     0,     0,     0,    44,     0,
+      37,    36,    32,    33,     0,    35,    34,     0,     0,    89,
+       0,    56,    57,    53,    55,    54,    63,    51,    50,    68,
+      70,    66,    69,    65,    86,    87,    85,    76,    78,    74,
+      77,    73,    97,   103,   105,   106,   102,   101,    26,    82,
+       0,    98,     0,    98,    98,    98,     0,     0,     0,    83,
+      60,    98,     0,    98,     0,     0,     0,    38,    90,     0,
+       0,    98,    46,    43,    25,     0,    59,     0,    88,    99,
+      39,    40,    41,     0,     0,    45,    58,    61,    42,    47
 };
 
 /* YYDEFGOTO[NTERM-NUM].  */
 static const yytype_int16 yydefgoto[] =
 {
-      -1,     1,     2,    25,    26,    99,    27,    28,    29,    30,
-      64,   100,   101,   145,   173,    31,    32,   115,    33,    66,
-     111,    67,    34,   119,    35,    68,    36,    37,   127,    38,
-      70,    39,    40,    41,   102,   103,    69,   104,   140,   141,
-      42,    73,   154,    59,    60
+      -1,     1,     2,    25,    26,   101,    27,    28,    29,    30,
+      65,   102,   103,   147,   175,    31,    32,   117,    33,    67,
+     113,    68,    34,   121,    35,    69,    36,    37,   129,    38,
+      71,    39,    40,    41,   104,   105,    70,   106,   142,   143,
+      42,    74,   156,    60,    61,    51
 };
 
 /* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
    STATE-NUM.  */
-#define YYPACT_NINF -78
+#define YYPACT_NINF -80
 static const yytype_int16 yypact[] =
 {
-     -78,    33,   130,   -78,   -28,    73,    73,     7,    73,    36,
-      41,    73,    26,    52,    -4,    58,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,    90,   -78,    94,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,    74,    85,   -78,    96,
-     -78,   -78,   131,   134,   147,   -78,   -78,    -4,    -4,   193,
-     -10,   -78,   162,   164,    38,   102,    64,   148,     5,   192,
-       5,   165,   -78,   174,   -78,   -78,   -78,   -78,   -78,    65,
-     -78,    -4,    -4,   174,   103,   103,   -78,   -78,   175,   185,
-     197,    73,    73,    -4,   194,   103,   -78,   231,   -78,   -78,
-     -78,   -78,   220,   -78,   -78,   204,    73,    73,   210,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,
-     -78,   -78,   205,   -78,   -78,   -78,   -78,   -78,    -4,   222,
-     208,   222,   195,   222,   103,     2,   209,   -78,   -78,   222,
-     211,   222,   199,    -4,   212,   -78,   -78,   213,   214,   222,
-     207,   -78,   -78,   215,   -78,   216,   -78,   111,   -78,   -78,
-     -78,   217,    73,   -78,   -78,   -78,   -78,   -78
+     -80,     2,   132,   -80,   -13,    -1,    -1,    -2,    -1,     9,
+      33,    -1,    27,    40,    -3,    38,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,    71,   -80,    77,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,    57,    61,   -80,    63,
+     -80,    76,   -80,    87,   101,   133,   -80,   -80,    -3,    -3,
+     195,    -6,   -80,   136,   149,    39,   104,    65,   150,     5,
+     194,     5,   167,   -80,   176,   -80,   -80,   -80,   -80,   -80,
+     -80,    68,   -80,    -3,    -3,   176,    72,    72,   -80,   -80,
+     177,   187,    78,    -1,    -1,    -3,   196,    72,   -80,   222,
+     -80,   -80,   -80,   -80,   221,   -80,   -80,   205,    -1,    -1,
+     211,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,
+     -80,   -80,   -80,   -80,   206,   -80,   -80,   -80,   -80,   -80,
+      -3,   223,   209,   223,   197,   223,    72,     7,   210,   -80,
+     -80,   223,   212,   223,   201,    -3,   213,   -80,   -80,   214,
+     215,   223,   208,   -80,   -80,   216,   -80,   217,   -80,   113,
+     -80,   -80,   -80,   218,    -1,   -80,   -80,   -80,   -80,   -80
 };
 
 /* YYPGOTO[NTERM-NUM].  */
 static const yytype_int16 yypgoto[] =
 {
-     -78,   -78,   -78,   -78,   121,   -35,   -78,   -78,   -78,   -78,
-     219,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -44,   -78,
-     -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,   -78,    -6,
-     -78,   -78,   -78,   -78,   -78,   183,   218,    21,   143,    -5,
-     146,   196,    69,   -53,   -77
+     -80,   -80,   -80,   -80,   122,   -34,   -80,   -80,   -80,   -80,
+     220,   -80,   -80,   -80,   -80,   -80,   -80,   -80,    59,   -80,
+     -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   -80,   125,
+     -80,   -80,   -80,   -80,   -80,   183,   219,    22,   142,    -5,
+     147,   192,    69,   -54,   -79,   -80
 };
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
@@ -715,62 +721,62 @@ static const yytype_int16 yypgoto[] =
 #define YYTABLE_NINF -82
 static const yytype_int16 yytable[] =
 {
-      46,    47,    43,    49,    79,    80,    52,   134,   135,     6,
-       7,     8,     9,    10,    11,    12,    13,    84,   144,    14,
-      15,    55,    56,    85,   118,    57,   126,   160,   132,   133,
-      58,   110,   161,     3,   123,    24,   123,    48,   -28,    88,
-     142,   -28,   -28,   -28,   -28,   -28,   -28,   -28,   -28,   -28,
-      89,    53,   -28,   -28,    90,   -28,    91,    92,    93,    94,
-      95,    96,   120,    97,   128,    88,    50,   159,    98,   -49,
-     -49,    51,   -49,   -49,   -49,   -49,    89,    54,   -49,   -49,
-      90,   105,   106,   107,   108,   152,   139,   113,    61,    97,
-     124,    62,   124,   131,   109,    63,    81,    82,    44,    45,
-     167,   149,   -30,    88,    72,   -30,   -30,   -30,   -30,   -30,
-     -30,   -30,   -30,   -30,    89,    74,   -30,   -30,    90,   -30,
-      91,    92,    93,    94,    95,    96,    75,    97,    55,    56,
-      -2,     4,    98,     5,     6,     7,     8,     9,    10,    11,
-      12,    13,    81,    82,    14,    15,    16,    17,    18,    19,
-      20,    21,    22,     7,     8,    23,    10,    11,    12,    13,
-      24,    76,    14,    15,    77,   -81,    88,   177,   -81,   -81,
-     -81,   -81,   -81,   -81,   -81,   -81,   -81,    78,    24,   -81,
-     -81,    90,   -81,   -81,   -81,   -81,   -81,   -81,   114,   117,
-      97,   125,    86,    88,    87,   122,   -72,   -72,   -72,   -72,
-     -72,   -72,   -72,   -72,   130,   136,   -72,   -72,    90,   153,
-     156,   157,   158,   116,   121,   137,   129,    97,   163,   143,
-     165,   138,   122,    72,    81,    82,    81,    82,   171,   166,
-      81,    82,   146,   147,   148,   151,   153,    82,   155,   162,
-     172,   164,   168,   169,   170,   174,   175,   176,    65,   112,
-     150,     0,     0,     0,     0,    83,     0,     0,    71
+      46,    47,     3,    49,    81,    82,    53,   136,   137,     6,
+       7,     8,     9,    10,    11,    12,    13,    43,   146,    14,
+      15,    86,    56,    57,    44,    45,    58,    87,    48,   134,
+     135,    59,   162,   112,    50,    24,   125,   163,   125,   -28,
+      90,   144,   -28,   -28,   -28,   -28,   -28,   -28,   -28,   -28,
+     -28,    91,    54,   -28,   -28,    92,   -28,    93,    94,    95,
+      96,    97,    98,    52,    99,    55,    90,   161,    62,   100,
+     -49,   -49,    63,   -49,   -49,   -49,   -49,    91,    64,   -49,
+     -49,    92,   107,   108,   109,   110,   154,    73,   141,   115,
+      99,    75,   126,    76,   126,   111,   133,    56,    57,    83,
+      84,   169,   140,   151,   -30,    90,    77,   -30,   -30,   -30,
+     -30,   -30,   -30,   -30,   -30,   -30,    91,    78,   -30,   -30,
+      92,   -30,    93,    94,    95,    96,    97,    98,   120,    99,
+     128,    79,    -2,     4,   100,     5,     6,     7,     8,     9,
+      10,    11,    12,    13,    83,    84,    14,    15,    16,    17,
+      18,    19,    20,    21,    22,     7,     8,    23,    10,    11,
+      12,    13,    24,    80,    14,    15,    88,   -81,    90,   179,
+     -81,   -81,   -81,   -81,   -81,   -81,   -81,   -81,   -81,    89,
+      24,   -81,   -81,    92,   -81,   -81,   -81,   -81,   -81,   -81,
+     116,   119,    99,   127,   122,    90,   130,   124,   -72,   -72,
+     -72,   -72,   -72,   -72,   -72,   -72,   132,   138,   -72,   -72,
+      92,   155,   158,   159,   160,   118,   123,   139,   131,    99,
+     165,   145,   167,   148,   124,    73,    83,    84,    83,    84,
+     173,   168,    83,    84,   149,   150,   153,   155,    84,   157,
+     164,   174,   166,   170,   171,   172,   176,   177,   178,    66,
+     114,   152,    85,     0,     0,     0,     0,     0,     0,    72
 };
 
 static const yytype_int16 yycheck[] =
 {
-       5,     6,    30,     8,    57,    58,    11,    84,    85,     4,
-       5,     6,     7,     8,     9,    10,    11,    27,    95,    14,
-      15,    25,    26,    33,    68,    29,    70,    25,    81,    82,
-      34,    66,    30,     0,    69,    30,    71,    30,     0,     1,
-      93,     3,     4,     5,     6,     7,     8,     9,    10,    11,
-      12,    25,    14,    15,    16,    17,    18,    19,    20,    21,
-      22,    23,    68,    25,    70,     1,    30,   144,    30,     5,
-       6,    30,     8,     9,    10,    11,    12,    25,    14,    15,
-      16,    17,    18,    19,    20,   138,    91,    66,    30,    25,
-      69,     1,    71,    28,    30,     1,    31,    32,    25,    26,
-     153,   106,     0,     1,    30,     3,     4,     5,     6,     7,
-       8,     9,    10,    11,    12,    30,    14,    15,    16,    17,
-      18,    19,    20,    21,    22,    23,    30,    25,    25,    26,
-       0,     1,    30,     3,     4,     5,     6,     7,     8,     9,
-      10,    11,    31,    32,    14,    15,    16,    17,    18,    19,
-      20,    21,    22,     5,     6,    25,     8,     9,    10,    11,
-      30,    30,    14,    15,    30,     0,     1,   172,     3,     4,
-       5,     6,     7,     8,     9,    10,    11,    30,    30,    14,
-      15,    16,    17,    18,    19,    20,    21,    22,    67,    68,
-      25,    70,    30,     1,    30,    30,     4,     5,     6,     7,
-       8,     9,    10,    11,    30,    30,    14,    15,    16,    14,
-     141,   142,   143,    67,    68,    30,    70,    25,   149,    25,
-     151,    24,    30,    30,    31,    32,    31,    32,   159,    30,
-      31,    32,     1,    13,    30,    25,    14,    32,    30,    30,
-      33,    30,    30,    30,    30,    30,    30,    30,    29,    66,
-     107,    -1,    -1,    -1,    -1,    59,    -1,    -1,    40
+       5,     6,     0,     8,    58,    59,    11,    86,    87,     4,
+       5,     6,     7,     8,     9,    10,    11,    30,    97,    14,
+      15,    27,    25,    26,    25,    26,    29,    33,    30,    83,
+      84,    34,    25,    67,    25,    30,    70,    30,    72,     0,
+       1,    95,     3,     4,     5,     6,     7,     8,     9,    10,
+      11,    12,    25,    14,    15,    16,    17,    18,    19,    20,
+      21,    22,    23,    30,    25,    25,     1,   146,    30,    30,
+       5,     6,     1,     8,     9,    10,    11,    12,     1,    14,
+      15,    16,    17,    18,    19,    20,   140,    30,    93,    67,
+      25,    30,    70,    30,    72,    30,    28,    25,    26,    31,
+      32,   155,    24,   108,     0,     1,    30,     3,     4,     5,
+       6,     7,     8,     9,    10,    11,    12,    30,    14,    15,
+      16,    17,    18,    19,    20,    21,    22,    23,    69,    25,
+      71,    30,     0,     1,    30,     3,     4,     5,     6,     7,
+       8,     9,    10,    11,    31,    32,    14,    15,    16,    17,
+      18,    19,    20,    21,    22,     5,     6,    25,     8,     9,
+      10,    11,    30,    30,    14,    15,    30,     0,     1,   174,
+       3,     4,     5,     6,     7,     8,     9,    10,    11,    30,
+      30,    14,    15,    16,    17,    18,    19,    20,    21,    22,
+      68,    69,    25,    71,    69,     1,    71,    30,     4,     5,
+       6,     7,     8,     9,    10,    11,    30,    30,    14,    15,
+      16,    14,   143,   144,   145,    68,    69,    30,    71,    25,
+     151,    25,   153,     1,    30,    30,    31,    32,    31,    32,
+     161,    30,    31,    32,    13,    30,    25,    14,    32,    30,
+      30,    33,    30,    30,    30,    30,    30,    30,    30,    29,
+      67,   109,    60,    -1,    -1,    -1,    -1,    -1,    -1,    40
 };
 
 /* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
@@ -782,19 +788,19 @@ static const yytype_uint8 yystos[] =
       20,    21,    22,    25,    30,    38,    39,    41,    42,    43,
       44,    50,    51,    53,    57,    59,    61,    62,    64,    66,
       67,    68,    75,    30,    25,    26,    74,    74,    30,    74,
-      30,    30,    74,    25,    25,    25,    26,    29,    34,    78,
-      79,    30,     1,     1,    45,    45,    54,    56,    60,    71,
-      65,    71,    30,    76,    30,    30,    30,    30,    30,    78,
-      78,    31,    32,    76,    27,    33,    30,    30,     1,    12,
-      16,    18,    19,    20,    21,    22,    23,    25,    30,    40,
-      46,    47,    69,    70,    72,    17,    18,    19,    20,    30,
-      40,    55,    70,    72,    39,    52,    75,    39,    53,    58,
-      64,    75,    30,    40,    72,    39,    53,    63,    64,    75,
-      30,    28,    78,    78,    79,    79,    30,    30,    24,    74,
-      73,    74,    78,    25,    79,    48,     1,    13,    30,    74,
-      73,    25,    78,    14,    77,    30,    77,    77,    77,    79,
-      25,    30,    30,    77,    30,    77,    30,    78,    30,    30,
-      30,    77,    33,    49,    30,    30,    30,    74
+      25,    80,    30,    74,    25,    25,    25,    26,    29,    34,
+      78,    79,    30,     1,     1,    45,    45,    54,    56,    60,
+      71,    65,    71,    30,    76,    30,    30,    30,    30,    30,
+      30,    78,    78,    31,    32,    76,    27,    33,    30,    30,
+       1,    12,    16,    18,    19,    20,    21,    22,    23,    25,
+      30,    40,    46,    47,    69,    70,    72,    17,    18,    19,
+      20,    30,    40,    55,    70,    72,    39,    52,    75,    39,
+      53,    58,    64,    75,    30,    40,    72,    39,    53,    63,
+      64,    75,    30,    28,    78,    78,    79,    79,    30,    30,
+      24,    74,    73,    74,    78,    25,    79,    48,     1,    13,
+      30,    74,    73,    25,    78,    14,    77,    30,    77,    77,
+      77,    79,    25,    30,    30,    77,    30,    77,    30,    78,
+      30,    30,    30,    77,    33,    49,    30,    30,    30,    74
 };
 
 #define yyerrok                (yyerrstatus = 0)
@@ -1781,8 +1787,8 @@ yyreduce:
   case 48:
 
     {
-       struct symbol *sym = sym_lookup(NULL, 0);
-       sym->flags |= SYMBOL_CHOICE;
+       struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), SYMBOL_CHOICE);
+       sym->flags |= SYMBOL_AUTO;
        menu_add_entry(sym);
        menu_add_expr(P_CHOICE, NULL, NULL);
        printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
@@ -2014,7 +2020,12 @@ yyreduce:
 
   case 108:
 
-    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 1); free((yyvsp[(1) - (1)].string)); ;}
+    { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); ;}
+    break;
+
+  case 109:
+
+    { (yyval.string) = NULL; ;}
     break;
 
 
index d9b96ba8e38caf6e3cf083b012272bd4ef041827..9710b82466f2b13ecaa576b969dcb5568257b065 100644 (file)
@@ -91,7 +91,7 @@ static struct menu *current_menu, *current_entry;
 %type <id> end
 %type <id> option_name
 %type <menu> if_entry menu_entry choice_entry
-%type <string> symbol_option_arg
+%type <string> symbol_option_arg word_opt
 
 %destructor {
        fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -239,10 +239,10 @@ symbol_option_arg:
 
 /* choice entry */
 
-choice: T_CHOICE T_EOL
+choice: T_CHOICE word_opt T_EOL
 {
-       struct symbol *sym = sym_lookup(NULL, 0);
-       sym->flags |= SYMBOL_CHOICE;
+       struct symbol *sym = sym_lookup($2, SYMBOL_CHOICE);
+       sym->flags |= SYMBOL_AUTO;
        menu_add_entry(sym);
        menu_add_expr(P_CHOICE, NULL, NULL);
        printd(DEBUG_PARSE, "%s:%d:choice\n", zconf_curname(), zconf_lineno());
@@ -456,9 +456,12 @@ expr:        symbol                                { $$ = expr_alloc_symbol($1); }
 ;
 
 symbol:          T_WORD        { $$ = sym_lookup($1, 0); free($1); }
-       | T_WORD_QUOTE  { $$ = sym_lookup($1, 1); free($1); }
+       | T_WORD_QUOTE  { $$ = sym_lookup($1, SYMBOL_CONST); free($1); }
 ;
 
+word_opt: /* empty */                  { $$ = NULL; }
+       | T_WORD
+
 %%
 
 void conf_parse(const char *name)
index 263d04ab2d947398ee443f3b4ed1401700e0c307..83cee18a02e922130fae1bf9d8dff87d4f6169d8 100755 (executable)
@@ -5,7 +5,7 @@ use strict;
 ## Copyright (c) 1998 Michael Zucchi, All Rights Reserved        ##
 ## Copyright (C) 2000, 1  Tim Waugh <twaugh@redhat.com>          ##
 ## Copyright (C) 2001  Simon Huggins                             ##
-## Copyright (C) 2005-2007  Randy Dunlap                         ##
+## Copyright (C) 2005-2008  Randy Dunlap                         ##
 ##                                                              ##
 ## #define enhancements by Armin Kuster <akuster@mvista.com>    ##
 ## Copyright (c) 2000 MontaVista Software, Inc.                         ##
@@ -366,6 +366,7 @@ foreach my $pattern (keys %highlights) {
 # dumps section contents to arrays/hashes intended for that purpose.
 #
 sub dump_section {
+    my $file = shift;
     my $name = shift;
     my $contents = join "\n", @_;
 
@@ -379,6 +380,10 @@ sub dump_section {
        $parameterdescs{$name} = $contents;
     } else {
 #      print STDERR "other section '$name' = '$contents'\n";
+       if (defined($sections{$name}) && ($sections{$name} ne "")) {
+               print STDERR "Error(${file}:$.): duplicate section name '$name'\n";
+               ++$errors;
+       }
        $sections{$name} = $contents;
        push @sectionlist, $name;
     }
@@ -388,6 +393,7 @@ sub dump_section {
 # dump DOC: section after checking that it should go out
 #
 sub dump_doc_section {
+    my $file = shift;
     my $name = shift;
     my $contents = join "\n", @_;
 
@@ -399,7 +405,7 @@ sub dump_doc_section {
        ( $function_only == 1 && defined($function_table{$name})) ||
        ( $function_only == 2 && !defined($function_table{$name})))
     {
-       dump_section $name, $contents;
+       dump_section($file, $name, $contents);
        output_blockhead({'sectionlist' => \@sectionlist,
                          'sections' => \%sections,
                          'module' => $modulename,
@@ -1923,7 +1929,7 @@ sub process_file($) {
                        print STDERR "Warning(${file}:$.): contents before sections\n";
                        ++$warnings;
                    }
-                   dump_section($section, xml_escape($contents));
+                   dump_section($file, $section, xml_escape($contents));
                    $section = $section_default;
                }
 
@@ -1940,10 +1946,15 @@ sub process_file($) {
            } elsif (/$doc_end/) {
 
                if ($contents ne "") {
-                   dump_section($section, xml_escape($contents));
+                   dump_section($file, $section, xml_escape($contents));
                    $section = $section_default;
                    $contents = "";
                }
+               # look for doc_com + <text> + doc_end:
+               if ($_ =~ m'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') {
+                   print STDERR "Warning(${file}:$.): suspicious ending line: $_";
+                   ++$warnings;
+               }
 
                $prototype = "";
                $state = 3;
@@ -1954,7 +1965,7 @@ sub process_file($) {
                # @parameter line to signify start of description
                if ($1 eq "" &&
                        ($section =~ m/^@/ || $section eq $section_context)) {
-                   dump_section($section, xml_escape($contents));
+                   dump_section($file, $section, xml_escape($contents));
                    $section = $section_default;
                    $contents = "";
                } else {
@@ -1974,7 +1985,7 @@ sub process_file($) {
        } elsif ($state == 4) {
                # Documentation block
                if (/$doc_block/) {
-                       dump_doc_section($section, xml_escape($contents));
+                       dump_doc_section($file, $section, xml_escape($contents));
                        $contents = "";
                        $function = "";
                        %constants = ();
@@ -1992,7 +2003,7 @@ sub process_file($) {
                }
                elsif (/$doc_end/)
                {
-                       dump_doc_section($section, xml_escape($contents));
+                       dump_doc_section($file, $section, xml_escape($contents));
                        $contents = "";
                        $function = "";
                        %constants = ();
index 769b69db89c13b6f6af68abbfe3167929d81d5e8..e04c4218cb5200d415a8933e84478e68b9cceee6 100644 (file)
@@ -576,6 +576,15 @@ static int do_virtio_entry(const char *filename, struct virtio_device_id *id,
        return 1;
 }
 
+/* Looks like: i2c:S */
+static int do_i2c_entry(const char *filename, struct i2c_device_id *id,
+                       char *alias)
+{
+       sprintf(alias, I2C_MODULE_PREFIX "%s", id->name);
+
+       return 1;
+}
+
 /* Ignore any prefix, eg. v850 prepends _ */
 static inline int sym_is(const char *symbol, const char *name)
 {
@@ -704,6 +713,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
                do_table(symval, sym->st_size,
                         sizeof(struct virtio_device_id), "virtio",
                         do_virtio_entry, mod);
+       else if (sym_is(symname, "__mod_i2c_device_table"))
+               do_table(symval, sym->st_size,
+                        sizeof(struct i2c_device_id), "i2c",
+                        do_i2c_entry, mod);
        free(zeros);
 }
 
index f8b42ab0724be194a258a00e4e24f814ad3e7475..757294b4f3222d28dfeef6415f9784a497b23c84 100644 (file)
@@ -1552,10 +1552,10 @@ static void read_symbols(char *modname)
        }
 
        license = get_modinfo(info.modinfo, info.modinfo_len, "license");
-       if (!license && !is_vmlinux(modname))
-               fatal("modpost: missing MODULE_LICENSE() in %s\n"
-                     "see include/linux/module.h for "
-                     "more information\n", modname);
+       if (info.modinfo && !license && !is_vmlinux(modname))
+               warn("modpost: missing MODULE_LICENSE() in %s\n"
+                    "see include/linux/module.h for "
+                    "more information\n", modname);
        while (license) {
                if (license_is_gpl_compatible(license))
                        mod->gpl_compatible = 1;
index 9e8b025250144ce739e80583a095b3059d89110c..7ef1107a72875412e3cdc52f48fe609f7dfbc764 100644 (file)
@@ -18,3 +18,4 @@ obj-$(CONFIG_SECURITY_SELINUX)                += selinux/built-in.o
 obj-$(CONFIG_SECURITY_SMACK)           += commoncap.o smack/built-in.o
 obj-$(CONFIG_SECURITY_CAPABILITIES)    += commoncap.o capability.o
 obj-$(CONFIG_SECURITY_ROOTPLUG)                += commoncap.o root_plug.o
+obj-$(CONFIG_CGROUP_DEVICE)            += device_cgroup.o
index 2c6e06d18fabfe7d9f10378f0cf9b353f7299962..38ac54e3aed14aebfe84f0b8e11ce4802dae845c 100644 (file)
@@ -44,6 +44,7 @@ static struct security_operations capability_ops = {
        .task_setioprio =               cap_task_setioprio,
        .task_setnice =                 cap_task_setnice,
        .task_post_setuid =             cap_task_post_setuid,
+       .task_prctl =                   cap_task_prctl,
        .task_reparent_to_init =        cap_task_reparent_to_init,
 
        .syslog =                       cap_syslog,
index 852905789cafbc6879fae3e63edb6b2144f1a8a4..5edabc7542ae00c46e568456285f9bf5f369f2e6 100644 (file)
 #include <linux/hugetlb.h>
 #include <linux/mount.h>
 #include <linux/sched.h>
-
-/* Global security state */
-
-unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
-EXPORT_SYMBOL(securebits);
+#include <linux/prctl.h>
+#include <linux/securebits.h>
 
 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
 {
@@ -368,7 +365,7 @@ void cap_bprm_apply_creds (struct linux_binprm *bprm, int unsafe)
 
        /* AUD: Audit candidate if current->cap_effective is set */
 
-       current->keep_capabilities = 0;
+       current->securebits &= ~issecure_mask(SECURE_KEEP_CAPS);
 }
 
 int cap_bprm_secureexec (struct linux_binprm *bprm)
@@ -386,8 +383,8 @@ int cap_bprm_secureexec (struct linux_binprm *bprm)
                current->egid != current->gid);
 }
 
-int cap_inode_setxattr(struct dentry *dentry, char *name, void *value,
-                      size_t size, int flags)
+int cap_inode_setxattr(struct dentry *dentry, const char *name,
+                      const void *value, size_t size, int flags)
 {
        if (!strcmp(name, XATTR_NAME_CAPS)) {
                if (!capable(CAP_SETFCAP))
@@ -400,7 +397,7 @@ int cap_inode_setxattr(struct dentry *dentry, char *name, void *value,
        return 0;
 }
 
-int cap_inode_removexattr(struct dentry *dentry, char *name)
+int cap_inode_removexattr(struct dentry *dentry, const char *name)
 {
        if (!strcmp(name, XATTR_NAME_CAPS)) {
                if (!capable(CAP_SETFCAP))
@@ -448,7 +445,7 @@ static inline void cap_emulate_setxuid (int old_ruid, int old_euid,
 {
        if ((old_ruid == 0 || old_euid == 0 || old_suid == 0) &&
            (current->uid != 0 && current->euid != 0 && current->suid != 0) &&
-           !current->keep_capabilities) {
+           !issecure(SECURE_KEEP_CAPS)) {
                cap_clear (current->cap_permitted);
                cap_clear (current->cap_effective);
        }
@@ -547,7 +544,7 @@ int cap_task_setnice (struct task_struct *p, int nice)
  * this task could get inconsistent info.  There can be no
  * racing writer bc a task can only change its own caps.
  */
-long cap_prctl_drop(unsigned long cap)
+static long cap_prctl_drop(unsigned long cap)
 {
        if (!capable(CAP_SETPCAP))
                return -EPERM;
@@ -556,6 +553,7 @@ long cap_prctl_drop(unsigned long cap)
        cap_lower(current->cap_bset, cap);
        return 0;
 }
+
 #else
 int cap_task_setscheduler (struct task_struct *p, int policy,
                           struct sched_param *lp)
@@ -572,12 +570,99 @@ int cap_task_setnice (struct task_struct *p, int nice)
 }
 #endif
 
+int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+                  unsigned long arg4, unsigned long arg5, long *rc_p)
+{
+       long error = 0;
+
+       switch (option) {
+       case PR_CAPBSET_READ:
+               if (!cap_valid(arg2))
+                       error = -EINVAL;
+               else
+                       error = !!cap_raised(current->cap_bset, arg2);
+               break;
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+       case PR_CAPBSET_DROP:
+               error = cap_prctl_drop(arg2);
+               break;
+
+       /*
+        * The next four prctl's remain to assist with transitioning a
+        * system from legacy UID=0 based privilege (when filesystem
+        * capabilities are not in use) to a system using filesystem
+        * capabilities only - as the POSIX.1e draft intended.
+        *
+        * Note:
+        *
+        *  PR_SET_SECUREBITS =
+        *      issecure_mask(SECURE_KEEP_CAPS_LOCKED)
+        *    | issecure_mask(SECURE_NOROOT)
+        *    | issecure_mask(SECURE_NOROOT_LOCKED)
+        *    | issecure_mask(SECURE_NO_SETUID_FIXUP)
+        *    | issecure_mask(SECURE_NO_SETUID_FIXUP_LOCKED)
+        *
+        * will ensure that the current process and all of its
+        * children will be locked into a pure
+        * capability-based-privilege environment.
+        */
+       case PR_SET_SECUREBITS:
+               if ((((current->securebits & SECURE_ALL_LOCKS) >> 1)
+                    & (current->securebits ^ arg2))                  /*[1]*/
+                   || ((current->securebits & SECURE_ALL_LOCKS
+                        & ~arg2))                                    /*[2]*/
+                   || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
+                   || (cap_capable(current, CAP_SETPCAP) != 0)) {    /*[4]*/
+                       /*
+                        * [1] no changing of bits that are locked
+                        * [2] no unlocking of locks
+                        * [3] no setting of unsupported bits
+                        * [4] doing anything requires privilege (go read about
+                        *     the "sendmail capabilities bug")
+                        */
+                       error = -EPERM;  /* cannot change a locked bit */
+               } else {
+                       current->securebits = arg2;
+               }
+               break;
+       case PR_GET_SECUREBITS:
+               error = current->securebits;
+               break;
+
+#endif /* def CONFIG_SECURITY_FILE_CAPABILITIES */
+
+       case PR_GET_KEEPCAPS:
+               if (issecure(SECURE_KEEP_CAPS))
+                       error = 1;
+               break;
+       case PR_SET_KEEPCAPS:
+               if (arg2 > 1) /* Note, we rely on arg2 being unsigned here */
+                       error = -EINVAL;
+               else if (issecure(SECURE_KEEP_CAPS_LOCKED))
+                       error = -EPERM;
+               else if (arg2)
+                       current->securebits |= issecure_mask(SECURE_KEEP_CAPS);
+               else
+                       current->securebits &=
+                               ~issecure_mask(SECURE_KEEP_CAPS);
+               break;
+
+       default:
+               /* No functionality available - continue with default */
+               return 0;
+       }
+
+       /* Functionality provided */
+       *rc_p = error;
+       return 1;
+}
+
 void cap_task_reparent_to_init (struct task_struct *p)
 {
        cap_set_init_eff(p->cap_effective);
        cap_clear(p->cap_inheritable);
        cap_set_full(p->cap_permitted);
-       p->keep_capabilities = 0;
+       p->securebits = SECUREBITS_DEFAULT;
        return;
 }
 
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
new file mode 100644 (file)
index 0000000..4ea5836
--- /dev/null
@@ -0,0 +1,575 @@
+/*
+ * dev_cgroup.c - device cgroup subsystem
+ *
+ * Copyright 2007 IBM Corp
+ */
+
+#include <linux/device_cgroup.h>
+#include <linux/cgroup.h>
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+#define ACC_MKNOD 1
+#define ACC_READ  2
+#define ACC_WRITE 4
+#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
+
+#define DEV_BLOCK 1
+#define DEV_CHAR  2
+#define DEV_ALL   4  /* this represents all devices */
+
+/*
+ * whitelist locking rules:
+ * cgroup_lock() cannot be taken under dev_cgroup->lock.
+ * dev_cgroup->lock can be taken with or without cgroup_lock().
+ *
+ * modifications always require cgroup_lock
+ * modifications to a list which is visible require the
+ *   dev_cgroup->lock *and* cgroup_lock()
+ * walking the list requires dev_cgroup->lock or cgroup_lock().
+ *
+ * reasoning: dev_whitelist_copy() needs to kmalloc, so needs
+ *   a mutex, which the cgroup_lock() is.  Since modifying
+ *   a visible list requires both locks, either lock can be
+ *   taken for walking the list.
+ */
+
+struct dev_whitelist_item {
+       u32 major, minor;
+       short type;
+       short access;
+       struct list_head list;
+};
+
+struct dev_cgroup {
+       struct cgroup_subsys_state css;
+       struct list_head whitelist;
+       spinlock_t lock;
+};
+
+static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
+{
+       return container_of(cgroup_subsys_state(cgroup, devices_subsys_id),
+                           struct dev_cgroup, css);
+}
+
+struct cgroup_subsys devices_subsys;
+
+static int devcgroup_can_attach(struct cgroup_subsys *ss,
+               struct cgroup *new_cgroup, struct task_struct *task)
+{
+       if (current != task && !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+       return 0;
+}
+
+/*
+ * called under cgroup_lock()
+ */
+static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
+{
+       struct dev_whitelist_item *wh, *tmp, *new;
+
+       list_for_each_entry(wh, orig, list) {
+               new = kmalloc(sizeof(*wh), GFP_KERNEL);
+               if (!new)
+                       goto free_and_exit;
+               new->major = wh->major;
+               new->minor = wh->minor;
+               new->type = wh->type;
+               new->access = wh->access;
+               list_add_tail(&new->list, dest);
+       }
+
+       return 0;
+
+free_and_exit:
+       list_for_each_entry_safe(wh, tmp, dest, list) {
+               list_del(&wh->list);
+               kfree(wh);
+       }
+       return -ENOMEM;
+}
+
+/* Stupid prototype - don't bother combining existing entries */
+/*
+ * called under cgroup_lock()
+ * since the list is visible to other tasks, we need the spinlock also
+ */
+static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
+                       struct dev_whitelist_item *wh)
+{
+       struct dev_whitelist_item *whcopy;
+
+       whcopy = kmalloc(sizeof(*whcopy), GFP_KERNEL);
+       if (!whcopy)
+               return -ENOMEM;
+
+       memcpy(whcopy, wh, sizeof(*whcopy));
+       spin_lock(&dev_cgroup->lock);
+       list_add_tail(&whcopy->list, &dev_cgroup->whitelist);
+       spin_unlock(&dev_cgroup->lock);
+       return 0;
+}
+
+/*
+ * called under cgroup_lock()
+ * since the list is visible to other tasks, we need the spinlock also
+ */
+static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
+                       struct dev_whitelist_item *wh)
+{
+       struct dev_whitelist_item *walk, *tmp;
+
+       spin_lock(&dev_cgroup->lock);
+       list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
+               if (walk->type == DEV_ALL)
+                       goto remove;
+               if (walk->type != wh->type)
+                       continue;
+               if (walk->major != ~0 && walk->major != wh->major)
+                       continue;
+               if (walk->minor != ~0 && walk->minor != wh->minor)
+                       continue;
+
+remove:
+               walk->access &= ~wh->access;
+               if (!walk->access) {
+                       list_del(&walk->list);
+                       kfree(walk);
+               }
+       }
+       spin_unlock(&dev_cgroup->lock);
+}
+
+/*
+ * called from kernel/cgroup.c with cgroup_lock() held.
+ */
+static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
+                                               struct cgroup *cgroup)
+{
+       struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
+       struct cgroup *parent_cgroup;
+       int ret;
+
+       dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
+       if (!dev_cgroup)
+               return ERR_PTR(-ENOMEM);
+       INIT_LIST_HEAD(&dev_cgroup->whitelist);
+       parent_cgroup = cgroup->parent;
+
+       if (parent_cgroup == NULL) {
+               struct dev_whitelist_item *wh;
+               wh = kmalloc(sizeof(*wh), GFP_KERNEL);
+               if (!wh) {
+                       kfree(dev_cgroup);
+                       return ERR_PTR(-ENOMEM);
+               }
+               wh->minor = wh->major = ~0;
+               wh->type = DEV_ALL;
+               wh->access = ACC_MKNOD | ACC_READ | ACC_WRITE;
+               list_add(&wh->list, &dev_cgroup->whitelist);
+       } else {
+               parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
+               ret = dev_whitelist_copy(&dev_cgroup->whitelist,
+                               &parent_dev_cgroup->whitelist);
+               if (ret) {
+                       kfree(dev_cgroup);
+                       return ERR_PTR(ret);
+               }
+       }
+
+       spin_lock_init(&dev_cgroup->lock);
+       return &dev_cgroup->css;
+}
+
+static void devcgroup_destroy(struct cgroup_subsys *ss,
+                       struct cgroup *cgroup)
+{
+       struct dev_cgroup *dev_cgroup;
+       struct dev_whitelist_item *wh, *tmp;
+
+       dev_cgroup = cgroup_to_devcgroup(cgroup);
+       list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
+               list_del(&wh->list);
+               kfree(wh);
+       }
+       kfree(dev_cgroup);
+}
+
+#define DEVCG_ALLOW 1
+#define DEVCG_DENY 2
+#define DEVCG_LIST 3
+
+#define MAJMINLEN 10
+#define ACCLEN 4
+
+static void set_access(char *acc, short access)
+{
+       int idx = 0;
+       memset(acc, 0, ACCLEN);
+       if (access & ACC_READ)
+               acc[idx++] = 'r';
+       if (access & ACC_WRITE)
+               acc[idx++] = 'w';
+       if (access & ACC_MKNOD)
+               acc[idx++] = 'm';
+}
+
+static char type_to_char(short type)
+{
+       if (type == DEV_ALL)
+               return 'a';
+       if (type == DEV_CHAR)
+               return 'c';
+       if (type == DEV_BLOCK)
+               return 'b';
+       return 'X';
+}
+
+static void set_majmin(char *str, unsigned m)
+{
+       memset(str, 0, MAJMINLEN);
+       if (m == ~0)
+               sprintf(str, "*");
+       else
+               snprintf(str, MAJMINLEN, "%d", m);
+}
+
+static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
+                               struct seq_file *m)
+{
+       struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
+       struct dev_whitelist_item *wh;
+       char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
+
+       spin_lock(&devcgroup->lock);
+       list_for_each_entry(wh, &devcgroup->whitelist, list) {
+               set_access(acc, wh->access);
+               set_majmin(maj, wh->major);
+               set_majmin(min, wh->minor);
+               seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
+                          maj, min, acc);
+       }
+       spin_unlock(&devcgroup->lock);
+
+       return 0;
+}
+
+/*
+ * may_access_whitelist:
+ * does the access granted to dev_cgroup c contain the access
+ * requested in whitelist item refwh.
+ * return 1 if yes, 0 if no.
+ * call with c->lock held
+ */
+static int may_access_whitelist(struct dev_cgroup *c,
+                                      struct dev_whitelist_item *refwh)
+{
+       struct dev_whitelist_item *whitem;
+
+       list_for_each_entry(whitem, &c->whitelist, list) {
+               if (whitem->type & DEV_ALL)
+                       return 1;
+               if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+                       continue;
+               if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
+                       continue;
+               if (whitem->major != ~0 && whitem->major != refwh->major)
+                       continue;
+               if (whitem->minor != ~0 && whitem->minor != refwh->minor)
+                       continue;
+               if (refwh->access & (~(whitem->access | ACC_MASK)))
+                       continue;
+               return 1;
+       }
+       return 0;
+}
+
+/*
+ * parent_has_perm:
+ * when adding a new allow rule to a device whitelist, the rule
+ * must be allowed in the parent device
+ */
+static int parent_has_perm(struct cgroup *childcg,
+                                 struct dev_whitelist_item *wh)
+{
+       struct cgroup *pcg = childcg->parent;
+       struct dev_cgroup *parent;
+       int ret;
+
+       if (!pcg)
+               return 1;
+       parent = cgroup_to_devcgroup(pcg);
+       spin_lock(&parent->lock);
+       ret = may_access_whitelist(parent, wh);
+       spin_unlock(&parent->lock);
+       return ret;
+}
+
+/*
+ * Modify the whitelist using allow/deny rules.
+ * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
+ * so we can give a container CAP_MKNOD to let it create devices but not
+ * modify the whitelist.
+ * It seems likely we'll want to add a CAP_CONTAINER capability to allow
+ * us to also grant CAP_SYS_ADMIN to containers without giving away the
+ * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
+ *
+ * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
+ * new access is only allowed if you're in the top-level cgroup, or your
+ * parent cgroup has the access you're asking for.
+ */
+static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft,
+                               struct file *file, const char __user *userbuf,
+                               size_t nbytes, loff_t *ppos)
+{
+       struct cgroup *cur_cgroup;
+       struct dev_cgroup *devcgroup, *cur_devcgroup;
+       int filetype = cft->private;
+       char *buffer, *b;
+       int retval = 0, count;
+       struct dev_whitelist_item wh;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       devcgroup = cgroup_to_devcgroup(cgroup);
+       cur_cgroup = task_cgroup(current, devices_subsys.subsys_id);
+       cur_devcgroup = cgroup_to_devcgroup(cur_cgroup);
+
+       buffer = kmalloc(nbytes+1, GFP_KERNEL);
+       if (!buffer)
+               return -ENOMEM;
+
+       if (copy_from_user(buffer, userbuf, nbytes)) {
+               retval = -EFAULT;
+               goto out1;
+       }
+       buffer[nbytes] = 0;     /* nul-terminate */
+
+       cgroup_lock();
+       if (cgroup_is_removed(cgroup)) {
+               retval = -ENODEV;
+               goto out2;
+       }
+
+       memset(&wh, 0, sizeof(wh));
+       b = buffer;
+
+       switch (*b) {
+       case 'a':
+               wh.type = DEV_ALL;
+               wh.access = ACC_MASK;
+               goto handle;
+       case 'b':
+               wh.type = DEV_BLOCK;
+               break;
+       case 'c':
+               wh.type = DEV_CHAR;
+               break;
+       default:
+               retval = -EINVAL;
+               goto out2;
+       }
+       b++;
+       if (!isspace(*b)) {
+               retval = -EINVAL;
+               goto out2;
+       }
+       b++;
+       if (*b == '*') {
+               wh.major = ~0;
+               b++;
+       } else if (isdigit(*b)) {
+               wh.major = 0;
+               while (isdigit(*b)) {
+                       wh.major = wh.major*10+(*b-'0');
+                       b++;
+               }
+       } else {
+               retval = -EINVAL;
+               goto out2;
+       }
+       if (*b != ':') {
+               retval = -EINVAL;
+               goto out2;
+       }
+       b++;
+
+       /* read minor */
+       if (*b == '*') {
+               wh.minor = ~0;
+               b++;
+       } else if (isdigit(*b)) {
+               wh.minor = 0;
+               while (isdigit(*b)) {
+                       wh.minor = wh.minor*10+(*b-'0');
+                       b++;
+               }
+       } else {
+               retval = -EINVAL;
+               goto out2;
+       }
+       if (!isspace(*b)) {
+               retval = -EINVAL;
+               goto out2;
+       }
+       for (b++, count = 0; count < 3; count++, b++) {
+               switch (*b) {
+               case 'r':
+                       wh.access |= ACC_READ;
+                       break;
+               case 'w':
+                       wh.access |= ACC_WRITE;
+                       break;
+               case 'm':
+                       wh.access |= ACC_MKNOD;
+                       break;
+               case '\n':
+               case '\0':
+                       count = 3;
+                       break;
+               default:
+                       retval = -EINVAL;
+                       goto out2;
+               }
+       }
+
+handle:
+       retval = 0;
+       switch (filetype) {
+       case DEVCG_ALLOW:
+               if (!parent_has_perm(cgroup, &wh))
+                       retval = -EPERM;
+               else
+                       retval = dev_whitelist_add(devcgroup, &wh);
+               break;
+       case DEVCG_DENY:
+               dev_whitelist_rm(devcgroup, &wh);
+               break;
+       default:
+               retval = -EINVAL;
+               goto out2;
+       }
+
+       if (retval == 0)
+               retval = nbytes;
+
+out2:
+       cgroup_unlock();
+out1:
+       kfree(buffer);
+       return retval;
+}
+
+static struct cftype dev_cgroup_files[] = {
+       {
+               .name = "allow",
+               .write  = devcgroup_access_write,
+               .private = DEVCG_ALLOW,
+       },
+       {
+               .name = "deny",
+               .write = devcgroup_access_write,
+               .private = DEVCG_DENY,
+       },
+       {
+               .name = "list",
+               .read_seq_string = devcgroup_seq_read,
+               .private = DEVCG_LIST,
+       },
+};
+
+static int devcgroup_populate(struct cgroup_subsys *ss,
+                               struct cgroup *cgroup)
+{
+       return cgroup_add_files(cgroup, ss, dev_cgroup_files,
+                                       ARRAY_SIZE(dev_cgroup_files));
+}
+
+struct cgroup_subsys devices_subsys = {
+       .name = "devices",
+       .can_attach = devcgroup_can_attach,
+       .create = devcgroup_create,
+       .destroy  = devcgroup_destroy,
+       .populate = devcgroup_populate,
+       .subsys_id = devices_subsys_id,
+};
+
+int devcgroup_inode_permission(struct inode *inode, int mask)
+{
+       struct cgroup *cgroup;
+       struct dev_cgroup *dev_cgroup;
+       struct dev_whitelist_item *wh;
+
+       dev_t device = inode->i_rdev;
+       if (!device)
+               return 0;
+       if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
+               return 0;
+       cgroup = task_cgroup(current, devices_subsys.subsys_id);
+       dev_cgroup = cgroup_to_devcgroup(cgroup);
+       if (!dev_cgroup)
+               return 0;
+
+       spin_lock(&dev_cgroup->lock);
+       list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
+               if (wh->type & DEV_ALL)
+                       goto acc_check;
+               if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
+                       continue;
+               if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
+                       continue;
+               if (wh->major != ~0 && wh->major != imajor(inode))
+                       continue;
+               if (wh->minor != ~0 && wh->minor != iminor(inode))
+                       continue;
+acc_check:
+               if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
+                       continue;
+               if ((mask & MAY_READ) && !(wh->access & ACC_READ))
+                       continue;
+               spin_unlock(&dev_cgroup->lock);
+               return 0;
+       }
+       spin_unlock(&dev_cgroup->lock);
+
+       return -EPERM;
+}
+
+int devcgroup_inode_mknod(int mode, dev_t dev)
+{
+       struct cgroup *cgroup;
+       struct dev_cgroup *dev_cgroup;
+       struct dev_whitelist_item *wh;
+
+       cgroup = task_cgroup(current, devices_subsys.subsys_id);
+       dev_cgroup = cgroup_to_devcgroup(cgroup);
+       if (!dev_cgroup)
+               return 0;
+
+       spin_lock(&dev_cgroup->lock);
+       list_for_each_entry(wh, &dev_cgroup->whitelist, list) {
+               if (wh->type & DEV_ALL)
+                       goto acc_check;
+               if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
+                       continue;
+               if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
+                       continue;
+               if (wh->major != ~0 && wh->major != MAJOR(dev))
+                       continue;
+               if (wh->minor != ~0 && wh->minor != MINOR(dev))
+                       continue;
+acc_check:
+               if (!(wh->access & ACC_MKNOD))
+                       continue;
+               spin_unlock(&dev_cgroup->lock);
+               return 0;
+       }
+       spin_unlock(&dev_cgroup->lock);
+       return -EPERM;
+}
index b0232bbf427b26008c5c3e64f3c823350e1c4718..f50c6c3c32c9542eb796ba5c162b2bb08ea52ea9 100644 (file)
@@ -365,8 +365,8 @@ static void dummy_inode_delete (struct inode *ino)
        return;
 }
 
-static int dummy_inode_setxattr (struct dentry *dentry, char *name, void *value,
-                               size_t size, int flags)
+static int dummy_inode_setxattr (struct dentry *dentry, const char *name,
+                                const void *value, size_t size, int flags)
 {
        if (!strncmp(name, XATTR_SECURITY_PREFIX,
                     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
@@ -375,12 +375,13 @@ static int dummy_inode_setxattr (struct dentry *dentry, char *name, void *value,
        return 0;
 }
 
-static void dummy_inode_post_setxattr (struct dentry *dentry, char *name, void *value,
-                                      size_t size, int flags)
+static void dummy_inode_post_setxattr (struct dentry *dentry, const char *name,
+                                      const void *value, size_t size,
+                                      int flags)
 {
 }
 
-static int dummy_inode_getxattr (struct dentry *dentry, char *name)
+static int dummy_inode_getxattr (struct dentry *dentry, const char *name)
 {
        return 0;
 }
@@ -390,7 +391,7 @@ static int dummy_inode_listxattr (struct dentry *dentry)
        return 0;
 }
 
-static int dummy_inode_removexattr (struct dentry *dentry, char *name)
+static int dummy_inode_removexattr (struct dentry *dentry, const char *name)
 {
        if (!strncmp(name, XATTR_SECURITY_PREFIX,
                     sizeof(XATTR_SECURITY_PREFIX) - 1) &&
@@ -604,7 +605,7 @@ static int dummy_task_kill (struct task_struct *p, struct siginfo *info,
 }
 
 static int dummy_task_prctl (int option, unsigned long arg2, unsigned long arg3,
-                            unsigned long arg4, unsigned long arg5)
+                            unsigned long arg4, unsigned long arg5, long *rc_p)
 {
        return 0;
 }
@@ -967,7 +968,7 @@ static int dummy_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
        return -EOPNOTSUPP;
 }
 
-static int dummy_secctx_to_secid(char *secdata, u32 seclen, u32 *secid)
+static int dummy_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
 {
        return -EOPNOTSUPP;
 }
@@ -993,6 +994,13 @@ static inline int dummy_key_permission(key_ref_t key_ref,
 {
        return 0;
 }
+
+static int dummy_key_getsecurity(struct key *key, char **_buffer)
+{
+       *_buffer = NULL;
+       return 0;
+}
+
 #endif /* CONFIG_KEYS */
 
 #ifdef CONFIG_AUDIT
@@ -1209,6 +1217,7 @@ void security_fixup_ops (struct security_operations *ops)
        set_to_dummy_if_null(ops, key_alloc);
        set_to_dummy_if_null(ops, key_free);
        set_to_dummy_if_null(ops, key_permission);
+       set_to_dummy_if_null(ops, key_getsecurity);
 #endif /* CONFIG_KEYS */
 #ifdef CONFIG_AUDIT
        set_to_dummy_if_null(ops, audit_rule_init);
index 5145adfb6a05cabec8864b81ee7c5f692dbf03cd..747a464943aff0ada556e903fa6750fc7b670b60 100644 (file)
@@ -14,3 +14,4 @@ obj-y := \
 
 obj-$(CONFIG_KEYS_COMPAT) += compat.o
 obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_SYSCTL) += sysctl.o
index e10ec995f2751101d39a7dfbd145cd8ce52cc6ee..c766c68a63bc2078c62bf256730e05e14b36268b 100644 (file)
@@ -79,6 +79,9 @@ asmlinkage long compat_sys_keyctl(u32 option,
        case KEYCTL_ASSUME_AUTHORITY:
                return keyctl_assume_authority(arg2);
 
+       case KEYCTL_GET_SECURITY:
+               return keyctl_get_security(arg2, compat_ptr(arg3), arg4);
+
        default:
                return -EOPNOTSUPP;
        }
index 7d894ef70370bcb2c4906447c22965ba829a1e2a..8c05587f5018b49147bf44ed5f4ad846a173573b 100644 (file)
@@ -57,10 +57,6 @@ struct key_user {
        int                     qnbytes;        /* number of bytes allocated to this user */
 };
 
-#define KEYQUOTA_MAX_KEYS      100
-#define KEYQUOTA_MAX_BYTES     10000
-#define KEYQUOTA_LINK_BYTES    4               /* a link in a keyring is worth 4 bytes */
-
 extern struct rb_root  key_user_tree;
 extern spinlock_t      key_user_lock;
 extern struct key_user root_key_user;
@@ -68,6 +64,16 @@ extern struct key_user       root_key_user;
 extern struct key_user *key_user_lookup(uid_t uid);
 extern void key_user_put(struct key_user *user);
 
+/*
+ * key quota limits
+ * - root has its own separate limits to everyone else
+ */
+extern unsigned key_quota_root_maxkeys;
+extern unsigned key_quota_root_maxbytes;
+extern unsigned key_quota_maxkeys;
+extern unsigned key_quota_maxbytes;
+
+#define KEYQUOTA_LINK_BYTES    4               /* a link in a keyring is worth 4 bytes */
 
 
 extern struct rb_root key_serial_tree;
@@ -77,8 +83,6 @@ extern struct mutex key_construction_mutex;
 extern wait_queue_head_t request_key_conswq;
 
 
-extern void keyring_publish_name(struct key *keyring);
-
 extern int __key_link(struct key *keyring, struct key *key);
 
 extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
@@ -102,14 +106,15 @@ extern key_ref_t search_process_keyrings(struct key_type *type,
                                         key_match_func_t match,
                                         struct task_struct *tsk);
 
-extern struct key *find_keyring_by_name(const char *name, key_serial_t bound);
+extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
 
 extern int install_thread_keyring(struct task_struct *tsk);
 extern int install_process_keyring(struct task_struct *tsk);
 
 extern struct key *request_key_and_link(struct key_type *type,
                                        const char *description,
-                                       const char *callout_info,
+                                       const void *callout_info,
+                                       size_t callout_len,
                                        void *aux,
                                        struct key *dest_keyring,
                                        unsigned long flags);
@@ -120,13 +125,15 @@ extern struct key *request_key_and_link(struct key_type *type,
 struct request_key_auth {
        struct key              *target_key;
        struct task_struct      *context;
-       char                    *callout_info;
+       void                    *callout_info;
+       size_t                  callout_len;
        pid_t                   pid;
 };
 
 extern struct key_type key_type_request_key_auth;
 extern struct key *request_key_auth_new(struct key *target,
-                                       const char *callout_info);
+                                       const void *callout_info,
+                                       size_t callout_len);
 
 extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
 
@@ -152,7 +159,8 @@ extern long keyctl_negate_key(key_serial_t, unsigned, key_serial_t);
 extern long keyctl_set_reqkey_keyring(int);
 extern long keyctl_set_timeout(key_serial_t, unsigned);
 extern long keyctl_assume_authority(key_serial_t);
-
+extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
+                               size_t buflen);
 
 /*
  * debugging key validation
index 654d23baf3525ceee6134f75039a4d450fc1edd9..14948cf83ef6acc6ea19811465201ff5ae8660f0 100644 (file)
@@ -1,6 +1,6 @@
 /* Basic authentication token and access key management
  *
- * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -27,6 +27,11 @@ DEFINE_SPINLOCK(key_serial_lock);
 struct rb_root key_user_tree; /* tree of quota records indexed by UID */
 DEFINE_SPINLOCK(key_user_lock);
 
+unsigned int key_quota_root_maxkeys = 200;     /* root's key count quota */
+unsigned int key_quota_root_maxbytes = 20000;  /* root's key space quota */
+unsigned int key_quota_maxkeys = 200;          /* general key count quota */
+unsigned int key_quota_maxbytes = 20000;       /* general key space quota */
+
 static LIST_HEAD(key_types_list);
 static DECLARE_RWSEM(key_types_sem);
 
@@ -137,36 +142,6 @@ void key_user_put(struct key_user *user)
 
 } /* end key_user_put() */
 
-/*****************************************************************************/
-/*
- * insert a key with a fixed serial number
- */
-static void __init __key_insert_serial(struct key *key)
-{
-       struct rb_node *parent, **p;
-       struct key *xkey;
-
-       parent = NULL;
-       p = &key_serial_tree.rb_node;
-
-       while (*p) {
-               parent = *p;
-               xkey = rb_entry(parent, struct key, serial_node);
-
-               if (key->serial < xkey->serial)
-                       p = &(*p)->rb_left;
-               else if (key->serial > xkey->serial)
-                       p = &(*p)->rb_right;
-               else
-                       BUG();
-       }
-
-       /* we've found a suitable hole - arrange for this key to occupy it */
-       rb_link_node(&key->serial_node, parent, p);
-       rb_insert_color(&key->serial_node, &key_serial_tree);
-
-} /* end __key_insert_serial() */
-
 /*****************************************************************************/
 /*
  * assign a key the next unique serial number
@@ -266,11 +241,16 @@ struct key *key_alloc(struct key_type *type, const char *desc,
        /* check that the user's quota permits allocation of another key and
         * its description */
        if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
+               unsigned maxkeys = (uid == 0) ?
+                       key_quota_root_maxkeys : key_quota_maxkeys;
+               unsigned maxbytes = (uid == 0) ?
+                       key_quota_root_maxbytes : key_quota_maxbytes;
+
                spin_lock(&user->lock);
                if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
-                       if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
-                           user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
-                           )
+                       if (user->qnkeys + 1 >= maxkeys ||
+                           user->qnbytes + quotalen >= maxbytes ||
+                           user->qnbytes + quotalen < user->qnbytes)
                                goto no_quota;
                }
 
@@ -375,11 +355,14 @@ int key_payload_reserve(struct key *key, size_t datalen)
 
        /* contemplate the quota adjustment */
        if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+               unsigned maxbytes = (key->user->uid == 0) ?
+                       key_quota_root_maxbytes : key_quota_maxbytes;
+
                spin_lock(&key->user->lock);
 
                if (delta > 0 &&
-                   key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
-                   ) {
+                   (key->user->qnbytes + delta >= maxbytes ||
+                    key->user->qnbytes + delta < key->user->qnbytes)) {
                        ret = -EDQUOT;
                }
                else {
@@ -757,11 +740,11 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
                               const char *description,
                               const void *payload,
                               size_t plen,
+                              key_perm_t perm,
                               unsigned long flags)
 {
        struct key_type *ktype;
        struct key *keyring, *key = NULL;
-       key_perm_t perm;
        key_ref_t key_ref;
        int ret;
 
@@ -806,15 +789,17 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
                        goto found_matching_key;
        }
 
-       /* decide on the permissions we want */
-       perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
-       perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
+       /* if the client doesn't provide, decide on the permissions we want */
+       if (perm == KEY_PERM_UNDEF) {
+               perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+               perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
 
-       if (ktype->read)
-               perm |= KEY_POS_READ | KEY_USR_READ;
+               if (ktype->read)
+                       perm |= KEY_POS_READ | KEY_USR_READ;
 
-       if (ktype == &key_type_keyring || ktype->update)
-               perm |= KEY_USR_WRITE;
+               if (ktype == &key_type_keyring || ktype->update)
+                       perm |= KEY_USR_WRITE;
+       }
 
        /* allocate a new key */
        key = key_alloc(ktype, description, current->fsuid, current->fsgid,
@@ -1018,17 +1003,4 @@ void __init key_init(void)
        rb_insert_color(&root_key_user.node,
                        &key_user_tree);
 
-       /* record root's user standard keyrings */
-       key_check(&root_user_keyring);
-       key_check(&root_session_keyring);
-
-       __key_insert_serial(&root_user_keyring);
-       __key_insert_serial(&root_session_keyring);
-
-       keyring_publish_name(&root_user_keyring);
-       keyring_publish_name(&root_session_keyring);
-
-       /* link the two root keyrings together */
-       key_link(&root_session_keyring, &root_user_keyring);
-
 } /* end key_init() */
index d9ca15c109ccfaa167cafc8c39dec87f2a3e305d..acc9c89e40a8de6937cba2971c8805e09a4d0f6f 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/capability.h>
 #include <linux/string.h>
 #include <linux/err.h>
+#include <linux/vmalloc.h>
+#include <linux/security.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -62,9 +64,10 @@ asmlinkage long sys_add_key(const char __user *_type,
        char type[32], *description;
        void *payload;
        long ret;
+       bool vm;
 
        ret = -EINVAL;
-       if (plen > 32767)
+       if (plen > 1024 * 1024 - 1)
                goto error;
 
        /* draw all the data into kernel space */
@@ -81,11 +84,18 @@ asmlinkage long sys_add_key(const char __user *_type,
        /* pull the payload in if one was supplied */
        payload = NULL;
 
+       vm = false;
        if (_payload) {
                ret = -ENOMEM;
                payload = kmalloc(plen, GFP_KERNEL);
-               if (!payload)
-                       goto error2;
+               if (!payload) {
+                       if (plen <= PAGE_SIZE)
+                               goto error2;
+                       vm = true;
+                       payload = vmalloc(plen);
+                       if (!payload)
+                               goto error2;
+               }
 
                ret = -EFAULT;
                if (copy_from_user(payload, _payload, plen) != 0)
@@ -102,7 +112,8 @@ asmlinkage long sys_add_key(const char __user *_type,
        /* create or update the requested key and add it to the target
         * keyring */
        key_ref = key_create_or_update(keyring_ref, type, description,
-                                      payload, plen, KEY_ALLOC_IN_QUOTA);
+                                      payload, plen, KEY_PERM_UNDEF,
+                                      KEY_ALLOC_IN_QUOTA);
        if (!IS_ERR(key_ref)) {
                ret = key_ref_to_ptr(key_ref)->serial;
                key_ref_put(key_ref);
@@ -113,7 +124,10 @@ asmlinkage long sys_add_key(const char __user *_type,
 
        key_ref_put(keyring_ref);
  error3:
-       kfree(payload);
+       if (!vm)
+               kfree(payload);
+       else
+               vfree(payload);
  error2:
        kfree(description);
  error:
@@ -140,6 +154,7 @@ asmlinkage long sys_request_key(const char __user *_type,
        struct key_type *ktype;
        struct key *key;
        key_ref_t dest_ref;
+       size_t callout_len;
        char type[32], *description, *callout_info;
        long ret;
 
@@ -157,12 +172,14 @@ asmlinkage long sys_request_key(const char __user *_type,
 
        /* pull the callout info into kernel space */
        callout_info = NULL;
+       callout_len = 0;
        if (_callout_info) {
                callout_info = strndup_user(_callout_info, PAGE_SIZE);
                if (IS_ERR(callout_info)) {
                        ret = PTR_ERR(callout_info);
                        goto error2;
                }
+               callout_len = strlen(callout_info);
        }
 
        /* get the destination keyring if specified */
@@ -183,8 +200,8 @@ asmlinkage long sys_request_key(const char __user *_type,
        }
 
        /* do the search */
-       key = request_key_and_link(ktype, description, callout_info, NULL,
-                                  key_ref_to_ptr(dest_ref),
+       key = request_key_and_link(ktype, description, callout_info,
+                                  callout_len, NULL, key_ref_to_ptr(dest_ref),
                                   KEY_ALLOC_IN_QUOTA);
        if (IS_ERR(key)) {
                ret = PTR_ERR(key);
@@ -714,10 +731,16 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
 
                /* transfer the quota burden to the new user */
                if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+                       unsigned maxkeys = (uid == 0) ?
+                               key_quota_root_maxkeys : key_quota_maxkeys;
+                       unsigned maxbytes = (uid == 0) ?
+                               key_quota_root_maxbytes : key_quota_maxbytes;
+
                        spin_lock(&newowner->lock);
-                       if (newowner->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
-                           newowner->qnbytes + key->quotalen >=
-                           KEYQUOTA_MAX_BYTES)
+                       if (newowner->qnkeys + 1 >= maxkeys ||
+                           newowner->qnbytes + key->quotalen >= maxbytes ||
+                           newowner->qnbytes + key->quotalen <
+                           newowner->qnbytes)
                                goto quota_overrun;
 
                        newowner->qnkeys++;
@@ -821,9 +844,10 @@ long keyctl_instantiate_key(key_serial_t id,
        key_ref_t keyring_ref;
        void *payload;
        long ret;
+       bool vm = false;
 
        ret = -EINVAL;
-       if (plen > 32767)
+       if (plen > 1024 * 1024 - 1)
                goto error;
 
        /* the appropriate instantiation authorisation key must have been
@@ -843,8 +867,14 @@ long keyctl_instantiate_key(key_serial_t id,
        if (_payload) {
                ret = -ENOMEM;
                payload = kmalloc(plen, GFP_KERNEL);
-               if (!payload)
-                       goto error;
+               if (!payload) {
+                       if (plen <= PAGE_SIZE)
+                               goto error;
+                       vm = true;
+                       payload = vmalloc(plen);
+                       if (!payload)
+                               goto error;
+               }
 
                ret = -EFAULT;
                if (copy_from_user(payload, _payload, plen) != 0)
@@ -877,7 +907,10 @@ long keyctl_instantiate_key(key_serial_t id,
        }
 
 error2:
-       kfree(payload);
+       if (!vm)
+               kfree(payload);
+       else
+               vfree(payload);
 error:
        return ret;
 
@@ -1055,6 +1088,66 @@ error:
 
 } /* end keyctl_assume_authority() */
 
+/*
+ * get the security label of a key
+ * - the key must grant us view permission
+ * - if there's a buffer, we place up to buflen bytes of data into it
+ * - unless there's an error, we return the amount of information available,
+ *   irrespective of how much we may have copied (including the terminal NUL)
+ * - implements keyctl(KEYCTL_GET_SECURITY)
+ */
+long keyctl_get_security(key_serial_t keyid,
+                        char __user *buffer,
+                        size_t buflen)
+{
+       struct key *key, *instkey;
+       key_ref_t key_ref;
+       char *context;
+       long ret;
+
+       key_ref = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW);
+       if (IS_ERR(key_ref)) {
+               if (PTR_ERR(key_ref) != -EACCES)
+                       return PTR_ERR(key_ref);
+
+               /* viewing a key under construction is also permitted if we
+                * have the authorisation token handy */
+               instkey = key_get_instantiation_authkey(keyid);
+               if (IS_ERR(instkey))
+                       return PTR_ERR(key_ref);
+               key_put(instkey);
+
+               key_ref = lookup_user_key(NULL, keyid, 0, 1, 0);
+               if (IS_ERR(key_ref))
+                       return PTR_ERR(key_ref);
+       }
+
+       key = key_ref_to_ptr(key_ref);
+       ret = security_key_getsecurity(key, &context);
+       if (ret == 0) {
+               /* if no information was returned, give userspace an empty
+                * string */
+               ret = 1;
+               if (buffer && buflen > 0 &&
+                   copy_to_user(buffer, "", 1) != 0)
+                       ret = -EFAULT;
+       } else if (ret > 0) {
+               /* return as much data as there's room for */
+               if (buffer && buflen > 0) {
+                       if (buflen > ret)
+                               buflen = ret;
+
+                       if (copy_to_user(buffer, context, buflen) != 0)
+                               ret = -EFAULT;
+               }
+
+               kfree(context);
+       }
+
+       key_ref_put(key_ref);
+       return ret;
+}
+
 /*****************************************************************************/
 /*
  * the key control system call
@@ -1135,6 +1228,11 @@ asmlinkage long sys_keyctl(int option, unsigned long arg2, unsigned long arg3,
        case KEYCTL_ASSUME_AUTHORITY:
                return keyctl_assume_authority((key_serial_t) arg2);
 
+       case KEYCTL_GET_SECURITY:
+               return keyctl_get_security((key_serial_t) arg2,
+                                          (char *) arg3,
+                                          (size_t) arg4);
+
        default:
                return -EOPNOTSUPP;
        }
index 88292e3dee966103fff3f4a9e1cba7b5d22fc5c9..a9ab8affc092d99d1f4d55da9f5c6d10964bb4a1 100644 (file)
@@ -1,6 +1,6 @@
-/* keyring.c: keyring handling
+/* Keyring handling
  *
- * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -79,7 +79,7 @@ static DECLARE_RWSEM(keyring_serialise_link_sem);
  * publish the name of a keyring so that it can be found by name (if it has
  * one)
  */
-void keyring_publish_name(struct key *keyring)
+static void keyring_publish_name(struct key *keyring)
 {
        int bucket;
 
@@ -292,7 +292,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
 
        struct keyring_list *keylist;
        struct timespec now;
-       unsigned long possessed;
+       unsigned long possessed, kflags;
        struct key *keyring, *key;
        key_ref_t key_ref;
        long err;
@@ -319,6 +319,32 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
        err = -EAGAIN;
        sp = 0;
 
+       /* firstly we should check to see if this top-level keyring is what we
+        * are looking for */
+       key_ref = ERR_PTR(-EAGAIN);
+       kflags = keyring->flags;
+       if (keyring->type == type && match(keyring, description)) {
+               key = keyring;
+
+               /* check it isn't negative and hasn't expired or been
+                * revoked */
+               if (kflags & (1 << KEY_FLAG_REVOKED))
+                       goto error_2;
+               if (key->expiry && now.tv_sec >= key->expiry)
+                       goto error_2;
+               key_ref = ERR_PTR(-ENOKEY);
+               if (kflags & (1 << KEY_FLAG_NEGATIVE))
+                       goto error_2;
+               goto found;
+       }
+
+       /* otherwise, the top keyring must not be revoked, expired, or
+        * negatively instantiated if we are to search it */
+       key_ref = ERR_PTR(-EAGAIN);
+       if (kflags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_NEGATIVE)) ||
+           (keyring->expiry && now.tv_sec >= keyring->expiry))
+               goto error_2;
+
        /* start processing a new keyring */
 descend:
        if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
@@ -331,13 +357,14 @@ descend:
        /* iterate through the keys in this keyring first */
        for (kix = 0; kix < keylist->nkeys; kix++) {
                key = keylist->keys[kix];
+               kflags = key->flags;
 
                /* ignore keys not of this type */
                if (key->type != type)
                        continue;
 
                /* skip revoked keys and expired keys */
-               if (test_bit(KEY_FLAG_REVOKED, &key->flags))
+               if (kflags & (1 << KEY_FLAG_REVOKED))
                        continue;
 
                if (key->expiry && now.tv_sec >= key->expiry)
@@ -352,8 +379,8 @@ descend:
                                        context, KEY_SEARCH) < 0)
                        continue;
 
-               /* we set a different error code if we find a negative key */
-               if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+               /* we set a different error code if we pass a negative key */
+               if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
                        err = -ENOKEY;
                        continue;
                }
@@ -489,10 +516,9 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
 /*
  * find a keyring with the specified name
  * - all named keyrings are searched
- * - only find keyrings with search permission for the process
- * - only find keyrings with a serial number greater than the one specified
+ * - normally only finds keyrings with search permission for the current process
  */
-struct key *find_keyring_by_name(const char *name, key_serial_t bound)
+struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
 {
        struct key *keyring;
        int bucket;
@@ -518,15 +544,11 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound)
                        if (strcmp(keyring->description, name) != 0)
                                continue;
 
-                       if (key_permission(make_key_ref(keyring, 0),
+                       if (!skip_perm_check &&
+                           key_permission(make_key_ref(keyring, 0),
                                           KEY_SEARCH) < 0)
                                continue;
 
-                       /* found a potential candidate, but we still need to
-                        * check the serial number */
-                       if (keyring->serial <= bound)
-                               continue;
-
                        /* we've got a match */
                        atomic_inc(&keyring->usage);
                        read_unlock(&keyring_name_lock);
index 694126003ed3bcfaacc980e3210540de38cda107..f619170da760688f4d40c856ab2adb1a999fff26 100644 (file)
@@ -70,19 +70,15 @@ static int __init key_proc_init(void)
        struct proc_dir_entry *p;
 
 #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
-       p = create_proc_entry("keys", 0, NULL);
+       p = proc_create("keys", 0, NULL, &proc_keys_fops);
        if (!p)
                panic("Cannot create /proc/keys\n");
-
-       p->proc_fops = &proc_keys_fops;
 #endif
 
-       p = create_proc_entry("key-users", 0, NULL);
+       p = proc_create("key-users", 0, NULL, &proc_key_users_fops);
        if (!p)
                panic("Cannot create /proc/key-users\n");
 
-       p->proc_fops = &proc_key_users_fops;
-
        return 0;
 
 } /* end key_proc_init() */
@@ -246,6 +242,10 @@ static int proc_key_users_show(struct seq_file *m, void *v)
 {
        struct rb_node *_p = v;
        struct key_user *user = rb_entry(_p, struct key_user, node);
+       unsigned maxkeys = (user->uid == 0) ?
+               key_quota_root_maxkeys : key_quota_maxkeys;
+       unsigned maxbytes = (user->uid == 0) ?
+               key_quota_root_maxbytes : key_quota_maxbytes;
 
        seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
                   user->uid,
@@ -253,10 +253,9 @@ static int proc_key_users_show(struct seq_file *m, void *v)
                   atomic_read(&user->nkeys),
                   atomic_read(&user->nikeys),
                   user->qnkeys,
-                  KEYQUOTA_MAX_KEYS,
+                  maxkeys,
                   user->qnbytes,
-                  KEYQUOTA_MAX_BYTES
-                  );
+                  maxbytes);
 
        return 0;
 
index c886a2bb792ae034950ec71aa20af5fd1d4c17ee..5be6d018759abf43d469bc786d04305223464574 100644 (file)
@@ -1,6 +1,6 @@
-/* process_keys.c: management of a process's keyrings
+/* Management of a process's keyrings
  *
- * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -23,6 +23,9 @@
 /* session keyring create vs join semaphore */
 static DEFINE_MUTEX(key_session_mutex);
 
+/* user keyring creation semaphore */
+static DEFINE_MUTEX(key_user_keyring_mutex);
+
 /* the root user's tracking struct */
 struct key_user root_key_user = {
        .usage          = ATOMIC_INIT(3),
@@ -33,78 +36,84 @@ struct key_user root_key_user = {
        .uid            = 0,
 };
 
-/* the root user's UID keyring */
-struct key root_user_keyring = {
-       .usage          = ATOMIC_INIT(1),
-       .serial         = 2,
-       .type           = &key_type_keyring,
-       .user           = &root_key_user,
-       .sem            = __RWSEM_INITIALIZER(root_user_keyring.sem),
-       .perm           = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
-       .flags          = 1 << KEY_FLAG_INSTANTIATED,
-       .description    = "_uid.0",
-#ifdef KEY_DEBUGGING
-       .magic          = KEY_DEBUG_MAGIC,
-#endif
-};
-
-/* the root user's default session keyring */
-struct key root_session_keyring = {
-       .usage          = ATOMIC_INIT(1),
-       .serial         = 1,
-       .type           = &key_type_keyring,
-       .user           = &root_key_user,
-       .sem            = __RWSEM_INITIALIZER(root_session_keyring.sem),
-       .perm           = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
-       .flags          = 1 << KEY_FLAG_INSTANTIATED,
-       .description    = "_uid_ses.0",
-#ifdef KEY_DEBUGGING
-       .magic          = KEY_DEBUG_MAGIC,
-#endif
-};
-
 /*****************************************************************************/
 /*
- * allocate the keyrings to be associated with a UID
+ * install user and user session keyrings for a particular UID
  */
-int alloc_uid_keyring(struct user_struct *user,
-                     struct task_struct *ctx)
+static int install_user_keyrings(struct task_struct *tsk)
 {
+       struct user_struct *user = tsk->user;
        struct key *uid_keyring, *session_keyring;
        char buf[20];
        int ret;
 
-       /* concoct a default session keyring */
-       sprintf(buf, "_uid_ses.%u", user->uid);
+       kenter("%p{%u}", user, user->uid);
 
-       session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
-                                       KEY_ALLOC_IN_QUOTA, NULL);
-       if (IS_ERR(session_keyring)) {
-               ret = PTR_ERR(session_keyring);
-               goto error;
+       if (user->uid_keyring) {
+               kleave(" = 0 [exist]");
+               return 0;
        }
 
-       /* and a UID specific keyring, pointed to by the default session
-        * keyring */
-       sprintf(buf, "_uid.%u", user->uid);
+       mutex_lock(&key_user_keyring_mutex);
+       ret = 0;
 
-       uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx,
-                                   KEY_ALLOC_IN_QUOTA, session_keyring);
-       if (IS_ERR(uid_keyring)) {
-               key_put(session_keyring);
-               ret = PTR_ERR(uid_keyring);
-               goto error;
+       if (!user->uid_keyring) {
+               /* get the UID-specific keyring
+                * - there may be one in existence already as it may have been
+                *   pinned by a session, but the user_struct pointing to it
+                *   may have been destroyed by setuid */
+               sprintf(buf, "_uid.%u", user->uid);
+
+               uid_keyring = find_keyring_by_name(buf, true);
+               if (IS_ERR(uid_keyring)) {
+                       uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1,
+                                                   tsk, KEY_ALLOC_IN_QUOTA,
+                                                   NULL);
+                       if (IS_ERR(uid_keyring)) {
+                               ret = PTR_ERR(uid_keyring);
+                               goto error;
+                       }
+               }
+
+               /* get a default session keyring (which might also exist
+                * already) */
+               sprintf(buf, "_uid_ses.%u", user->uid);
+
+               session_keyring = find_keyring_by_name(buf, true);
+               if (IS_ERR(session_keyring)) {
+                       session_keyring =
+                               keyring_alloc(buf, user->uid, (gid_t) -1,
+                                             tsk, KEY_ALLOC_IN_QUOTA, NULL);
+                       if (IS_ERR(session_keyring)) {
+                               ret = PTR_ERR(session_keyring);
+                               goto error_release;
+                       }
+
+                       /* we install a link from the user session keyring to
+                        * the user keyring */
+                       ret = key_link(session_keyring, uid_keyring);
+                       if (ret < 0)
+                               goto error_release_both;
+               }
+
+               /* install the keyrings */
+               user->uid_keyring = uid_keyring;
+               user->session_keyring = session_keyring;
        }
 
-       /* install the keyrings */
-       user->uid_keyring = uid_keyring;
-       user->session_keyring = session_keyring;
-       ret = 0;
+       mutex_unlock(&key_user_keyring_mutex);
+       kleave(" = 0");
+       return 0;
 
+error_release_both:
+       key_put(session_keyring);
+error_release:
+       key_put(uid_keyring);
 error:
+       mutex_unlock(&key_user_keyring_mutex);
+       kleave(" = %d", ret);
        return ret;
-
-} /* end alloc_uid_keyring() */
+}
 
 /*****************************************************************************/
 /*
@@ -481,7 +490,7 @@ key_ref_t search_process_keyrings(struct key_type *type,
                }
        }
        /* or search the user-session keyring */
-       else {
+       else if (context->user->session_keyring) {
                key_ref = keyring_search_aux(
                        make_key_ref(context->user->session_keyring, 1),
                        context, type, description, match);
@@ -614,6 +623,9 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
                if (!context->signal->session_keyring) {
                        /* always install a session keyring upon access if one
                         * doesn't exist yet */
+                       ret = install_user_keyrings(context);
+                       if (ret < 0)
+                               goto error;
                        ret = install_session_keyring(
                                context, context->user->session_keyring);
                        if (ret < 0)
@@ -628,12 +640,24 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id,
                break;
 
        case KEY_SPEC_USER_KEYRING:
+               if (!context->user->uid_keyring) {
+                       ret = install_user_keyrings(context);
+                       if (ret < 0)
+                               goto error;
+               }
+
                key = context->user->uid_keyring;
                atomic_inc(&key->usage);
                key_ref = make_key_ref(key, 1);
                break;
 
        case KEY_SPEC_USER_SESSION_KEYRING:
+               if (!context->user->session_keyring) {
+                       ret = install_user_keyrings(context);
+                       if (ret < 0)
+                               goto error;
+               }
+
                key = context->user->session_keyring;
                atomic_inc(&key->usage);
                key_ref = make_key_ref(key, 1);
@@ -744,7 +768,7 @@ long join_session_keyring(const char *name)
        mutex_lock(&key_session_mutex);
 
        /* look for an existing keyring of this name */
-       keyring = find_keyring_by_name(name, 0);
+       keyring = find_keyring_by_name(name, false);
        if (PTR_ERR(keyring) == -ENOKEY) {
                /* not found - try and create a new one */
                keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk,
index 5ecc5057fb542c1bb9f914e72f4b95695d05dd70..ba32ca6469bdf2e74a8b0f9cb94d27604b7251af 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/kmod.h>
 #include <linux/err.h>
 #include <linux/keyctl.h>
+#include <linux/slab.h>
 #include "internal.h"
 
 /*
@@ -161,21 +162,22 @@ error_alloc:
  * call out to userspace for key construction
  * - we ignore program failure and go on key status instead
  */
-static int construct_key(struct key *key, const char *callout_info, void *aux)
+static int construct_key(struct key *key, const void *callout_info,
+                        size_t callout_len, void *aux)
 {
        struct key_construction *cons;
        request_key_actor_t actor;
        struct key *authkey;
        int ret;
 
-       kenter("%d,%s,%p", key->serial, callout_info, aux);
+       kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux);
 
        cons = kmalloc(sizeof(*cons), GFP_KERNEL);
        if (!cons)
                return -ENOMEM;
 
        /* allocate an authorisation key */
-       authkey = request_key_auth_new(key, callout_info);
+       authkey = request_key_auth_new(key, callout_info, callout_len);
        if (IS_ERR(authkey)) {
                kfree(cons);
                ret = PTR_ERR(authkey);
@@ -331,6 +333,7 @@ alloc_failed:
 static struct key *construct_key_and_link(struct key_type *type,
                                          const char *description,
                                          const char *callout_info,
+                                         size_t callout_len,
                                          void *aux,
                                          struct key *dest_keyring,
                                          unsigned long flags)
@@ -348,7 +351,7 @@ static struct key *construct_key_and_link(struct key_type *type,
        key_user_put(user);
 
        if (ret == 0) {
-               ret = construct_key(key, callout_info, aux);
+               ret = construct_key(key, callout_info, callout_len, aux);
                if (ret < 0)
                        goto construction_failed;
        }
@@ -370,7 +373,8 @@ construction_failed:
  */
 struct key *request_key_and_link(struct key_type *type,
                                 const char *description,
-                                const char *callout_info,
+                                const void *callout_info,
+                                size_t callout_len,
                                 void *aux,
                                 struct key *dest_keyring,
                                 unsigned long flags)
@@ -378,8 +382,8 @@ struct key *request_key_and_link(struct key_type *type,
        struct key *key;
        key_ref_t key_ref;
 
-       kenter("%s,%s,%s,%p,%p,%lx",
-              type->name, description, callout_info, aux,
+       kenter("%s,%s,%p,%zu,%p,%p,%lx",
+              type->name, description, callout_info, callout_len, aux,
               dest_keyring, flags);
 
        /* search all the process keyrings for a key */
@@ -398,7 +402,8 @@ struct key *request_key_and_link(struct key_type *type,
                        goto error;
 
                key = construct_key_and_link(type, description, callout_info,
-                                            aux, dest_keyring, flags);
+                                            callout_len, aux, dest_keyring,
+                                            flags);
        }
 
 error:
@@ -434,10 +439,13 @@ struct key *request_key(struct key_type *type,
                        const char *callout_info)
 {
        struct key *key;
+       size_t callout_len = 0;
        int ret;
 
-       key = request_key_and_link(type, description, callout_info, NULL,
-                                  NULL, KEY_ALLOC_IN_QUOTA);
+       if (callout_info)
+               callout_len = strlen(callout_info);
+       key = request_key_and_link(type, description, callout_info, callout_len,
+                                  NULL, NULL, KEY_ALLOC_IN_QUOTA);
        if (!IS_ERR(key)) {
                ret = wait_for_key_construction(key, false);
                if (ret < 0) {
@@ -458,14 +466,15 @@ EXPORT_SYMBOL(request_key);
  */
 struct key *request_key_with_auxdata(struct key_type *type,
                                     const char *description,
-                                    const char *callout_info,
+                                    const void *callout_info,
+                                    size_t callout_len,
                                     void *aux)
 {
        struct key *key;
        int ret;
 
-       key = request_key_and_link(type, description, callout_info, aux,
-                                  NULL, KEY_ALLOC_IN_QUOTA);
+       key = request_key_and_link(type, description, callout_info, callout_len,
+                                  aux, NULL, KEY_ALLOC_IN_QUOTA);
        if (!IS_ERR(key)) {
                ret = wait_for_key_construction(key, false);
                if (ret < 0) {
@@ -485,10 +494,12 @@ EXPORT_SYMBOL(request_key_with_auxdata);
  */
 struct key *request_key_async(struct key_type *type,
                              const char *description,
-                             const char *callout_info)
+                             const void *callout_info,
+                             size_t callout_len)
 {
-       return request_key_and_link(type, description, callout_info, NULL,
-                                   NULL, KEY_ALLOC_IN_QUOTA);
+       return request_key_and_link(type, description, callout_info,
+                                   callout_len, NULL, NULL,
+                                   KEY_ALLOC_IN_QUOTA);
 }
 EXPORT_SYMBOL(request_key_async);
 
@@ -500,10 +511,11 @@ EXPORT_SYMBOL(request_key_async);
  */
 struct key *request_key_async_with_auxdata(struct key_type *type,
                                           const char *description,
-                                          const char *callout_info,
+                                          const void *callout_info,
+                                          size_t callout_len,
                                           void *aux)
 {
-       return request_key_and_link(type, description, callout_info, aux,
-                                   NULL, KEY_ALLOC_IN_QUOTA);
+       return request_key_and_link(type, description, callout_info,
+                                   callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA);
 }
 EXPORT_SYMBOL(request_key_async_with_auxdata);
index e42b5252486fe07eb623f413ec61cc7073758da8..bd237b0a6331efffeff58b6ab6b300f823cd76aa 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/err.h>
 #include <linux/seq_file.h>
+#include <linux/slab.h>
 #include <asm/uaccess.h>
 #include "internal.h"
 
@@ -61,7 +62,7 @@ static void request_key_auth_describe(const struct key *key,
 
        seq_puts(m, "key:");
        seq_puts(m, key->description);
-       seq_printf(m, " pid:%d ci:%zu", rka->pid, strlen(rka->callout_info));
+       seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
 
 } /* end request_key_auth_describe() */
 
@@ -77,7 +78,7 @@ static long request_key_auth_read(const struct key *key,
        size_t datalen;
        long ret;
 
-       datalen = strlen(rka->callout_info);
+       datalen = rka->callout_len;
        ret = datalen;
 
        /* we can return the data as is */
@@ -137,7 +138,8 @@ static void request_key_auth_destroy(struct key *key)
  * create an authorisation token for /sbin/request-key or whoever to gain
  * access to the caller's security data
  */
-struct key *request_key_auth_new(struct key *target, const char *callout_info)
+struct key *request_key_auth_new(struct key *target, const void *callout_info,
+                                size_t callout_len)
 {
        struct request_key_auth *rka, *irka;
        struct key *authkey = NULL;
@@ -152,7 +154,7 @@ struct key *request_key_auth_new(struct key *target, const char *callout_info)
                kleave(" = -ENOMEM");
                return ERR_PTR(-ENOMEM);
        }
-       rka->callout_info = kmalloc(strlen(callout_info) + 1, GFP_KERNEL);
+       rka->callout_info = kmalloc(callout_len, GFP_KERNEL);
        if (!rka->callout_info) {
                kleave(" = -ENOMEM");
                kfree(rka);
@@ -186,7 +188,8 @@ struct key *request_key_auth_new(struct key *target, const char *callout_info)
        }
 
        rka->target_key = key_get(target);
-       strcpy(rka->callout_info, callout_info);
+       memcpy(rka->callout_info, callout_info, callout_len);
+       rka->callout_len = callout_len;
 
        /* allocate the auth key */
        sprintf(desc, "%x", target->serial);
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
new file mode 100644 (file)
index 0000000..b611d49
--- /dev/null
@@ -0,0 +1,50 @@
+/* Key management controls
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/key.h>
+#include <linux/sysctl.h>
+#include "internal.h"
+
+ctl_table key_sysctls[] = {
+       {
+               .ctl_name = CTL_UNNUMBERED,
+               .procname = "maxkeys",
+               .data = &key_quota_maxkeys,
+               .maxlen = sizeof(unsigned),
+               .mode = 0644,
+               .proc_handler = &proc_dointvec,
+       },
+       {
+               .ctl_name = CTL_UNNUMBERED,
+               .procname = "maxbytes",
+               .data = &key_quota_maxbytes,
+               .maxlen = sizeof(unsigned),
+               .mode = 0644,
+               .proc_handler = &proc_dointvec,
+       },
+       {
+               .ctl_name = CTL_UNNUMBERED,
+               .procname = "root_maxkeys",
+               .data = &key_quota_root_maxkeys,
+               .maxlen = sizeof(unsigned),
+               .mode = 0644,
+               .proc_handler = &proc_dointvec,
+       },
+       {
+               .ctl_name = CTL_UNNUMBERED,
+               .procname = "root_maxbytes",
+               .data = &key_quota_root_maxbytes,
+               .maxlen = sizeof(unsigned),
+               .mode = 0644,
+               .proc_handler = &proc_dointvec,
+       },
+       { .ctl_name = 0 }
+};
index 6112d1404c818d40077a2417679decb9047bf40d..a41cf42a4fa0b5d68921f632d329dbf5cb9d7d76 100644 (file)
@@ -86,6 +86,7 @@ static struct security_operations rootplug_security_ops = {
 
        .task_post_setuid =             cap_task_post_setuid,
        .task_reparent_to_init =        cap_task_reparent_to_init,
+       .task_prctl =                   cap_task_prctl,
 
        .bprm_check_security =          rootplug_bprm_check_security,
 };
index 8a285c7b99629462f75078a94d525dec39cf17a0..59838a99b80e981d27ae892e7d3aa0204bb484c1 100644 (file)
@@ -491,23 +491,23 @@ void security_inode_delete(struct inode *inode)
        security_ops->inode_delete(inode);
 }
 
-int security_inode_setxattr(struct dentry *dentry, char *name,
-                            void *value, size_t size, int flags)
+int security_inode_setxattr(struct dentry *dentry, const char *name,
+                           const void *value, size_t size, int flags)
 {
        if (unlikely(IS_PRIVATE(dentry->d_inode)))
                return 0;
        return security_ops->inode_setxattr(dentry, name, value, size, flags);
 }
 
-void security_inode_post_setxattr(struct dentry *dentry, char *name,
-                                  void *value, size_t size, int flags)
+void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+                                 const void *value, size_t size, int flags)
 {
        if (unlikely(IS_PRIVATE(dentry->d_inode)))
                return;
        security_ops->inode_post_setxattr(dentry, name, value, size, flags);
 }
 
-int security_inode_getxattr(struct dentry *dentry, char *name)
+int security_inode_getxattr(struct dentry *dentry, const char *name)
 {
        if (unlikely(IS_PRIVATE(dentry->d_inode)))
                return 0;
@@ -521,7 +521,7 @@ int security_inode_listxattr(struct dentry *dentry)
        return security_ops->inode_listxattr(dentry);
 }
 
-int security_inode_removexattr(struct dentry *dentry, char *name)
+int security_inode_removexattr(struct dentry *dentry, const char *name)
 {
        if (unlikely(IS_PRIVATE(dentry->d_inode)))
                return 0;
@@ -733,9 +733,9 @@ int security_task_wait(struct task_struct *p)
 }
 
 int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
-                        unsigned long arg4, unsigned long arg5)
+                        unsigned long arg4, unsigned long arg5, long *rc_p)
 {
-       return security_ops->task_prctl(option, arg2, arg3, arg4, arg5);
+       return security_ops->task_prctl(option, arg2, arg3, arg4, arg5, rc_p);
 }
 
 void security_task_reparent_to_init(struct task_struct *p)
@@ -886,7 +886,7 @@ int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
 }
 EXPORT_SYMBOL(security_secid_to_secctx);
 
-int security_secctx_to_secid(char *secdata, u32 seclen, u32 *secid)
+int security_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
 {
        return security_ops->secctx_to_secid(secdata, seclen, secid);
 }
@@ -1156,6 +1156,11 @@ int security_key_permission(key_ref_t key_ref,
        return security_ops->key_permission(key_ref, context, perm);
 }
 
+int security_key_getsecurity(struct key *key, char **_buffer)
+{
+       return security_ops->key_getsecurity(key, _buffer);
+}
+
 #endif /* CONFIG_KEYS */
 
 #ifdef CONFIG_AUDIT
index 95a8ef4a5073fbcde9f2fcec8a5301cf7db27eb2..114b4b4c97b23b9acdf25e92d1b250f78167457d 100644 (file)
@@ -646,7 +646,7 @@ void avc_audit(u32 ssid, u32 tsid,
                                        if (*p)
                                                audit_log_untrustedstring(ab, p);
                                        else
-                                               audit_log_hex(ab, p, len);
+                                               audit_log_n_hex(ab, p, len);
                                        break;
                                }
                        }
index 308e2cf17d75ef363907243359fcf1c64a9d0800..1b50a6ebc55fca85edaa9b7ef6256f31cc07ad4d 100644 (file)
@@ -2619,7 +2619,7 @@ static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
        return dentry_has_perm(current, mnt, dentry, FILE__GETATTR);
 }
 
-static int selinux_inode_setotherxattr(struct dentry *dentry, char *name)
+static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
 {
        if (!strncmp(name, XATTR_SECURITY_PREFIX,
                     sizeof XATTR_SECURITY_PREFIX - 1)) {
@@ -2638,7 +2638,8 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, char *name)
        return dentry_has_perm(current, NULL, dentry, FILE__SETATTR);
 }
 
-static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value, size_t size, int flags)
+static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
+                                 const void *value, size_t size, int flags)
 {
        struct task_security_struct *tsec = current->security;
        struct inode *inode = dentry->d_inode;
@@ -2687,8 +2688,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value
                            &ad);
 }
 
-static void selinux_inode_post_setxattr(struct dentry *dentry, char *name,
-                                       void *value, size_t size, int flags)
+static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
+                                        const void *value, size_t size,
+                                       int flags)
 {
        struct inode *inode = dentry->d_inode;
        struct inode_security_struct *isec = inode->i_security;
@@ -2711,7 +2713,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, char *name,
        return;
 }
 
-static int selinux_inode_getxattr(struct dentry *dentry, char *name)
+static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
 {
        return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
 }
@@ -2721,7 +2723,7 @@ static int selinux_inode_listxattr(struct dentry *dentry)
        return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
 }
 
-static int selinux_inode_removexattr(struct dentry *dentry, char *name)
+static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
 {
        if (strcmp(name, XATTR_NAME_SELINUX))
                return selinux_inode_setotherxattr(dentry, name);
@@ -3284,9 +3286,6 @@ static int selinux_task_kill(struct task_struct *p, struct siginfo *info,
        if (rc)
                return rc;
 
-       if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
-               return 0;
-
        if (!sig)
                perm = PROCESS__SIGNULL; /* null signal; existence test */
        else
@@ -3303,12 +3302,13 @@ static int selinux_task_prctl(int option,
                              unsigned long arg2,
                              unsigned long arg3,
                              unsigned long arg4,
-                             unsigned long arg5)
+                             unsigned long arg5,
+                             long *rc_p)
 {
        /* The current prctl operations do not appear to require
           any SELinux controls since they merely observe or modify
           the state of the current process. */
-       return 0;
+       return secondary_ops->task_prctl(option, arg2, arg3, arg4, arg5, rc_p);
 }
 
 static int selinux_task_wait(struct task_struct *p)
@@ -5235,7 +5235,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
        return security_sid_to_context(secid, secdata, seclen);
 }
 
-static int selinux_secctx_to_secid(char *secdata, u32 seclen, u32 *secid)
+static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
 {
        return security_context_to_sid(secdata, seclen, secid);
 }
@@ -5297,6 +5297,20 @@ static int selinux_key_permission(key_ref_t key_ref,
                            SECCLASS_KEY, perm, NULL);
 }
 
+static int selinux_key_getsecurity(struct key *key, char **_buffer)
+{
+       struct key_security_struct *ksec = key->security;
+       char *context = NULL;
+       unsigned len;
+       int rc;
+
+       rc = security_sid_to_context(ksec->sid, &context, &len);
+       if (!rc)
+               rc = len;
+       *_buffer = context;
+       return rc;
+}
+
 #endif
 
 static struct security_operations selinux_ops = {
@@ -5485,6 +5499,7 @@ static struct security_operations selinux_ops = {
        .key_alloc =                    selinux_key_alloc,
        .key_free =                     selinux_key_free,
        .key_permission =               selinux_key_permission,
+       .key_getsecurity =              selinux_key_getsecurity,
 #endif
 
 #ifdef CONFIG_AUDIT
@@ -5533,14 +5548,6 @@ static __init int selinux_init(void)
        else
                printk(KERN_DEBUG "SELinux:  Starting in permissive mode\n");
 
-#ifdef CONFIG_KEYS
-       /* Add security information to initial keyrings */
-       selinux_key_alloc(&root_user_keyring, current,
-                         KEY_ALLOC_NOT_IN_QUOTA);
-       selinux_key_alloc(&root_session_keyring, current,
-                         KEY_ALLOC_NOT_IN_QUOTA);
-#endif
-
        return 0;
 }
 
index ff869e8b6f4a044852234c78b6e412000eac8153..c0d314d9f8e1d2346b017f81e71766990bef1d79 100644 (file)
 
 int avc_ss_reset(u32 seqno);
 
-struct av_perm_to_string
-{
+struct av_perm_to_string {
        u16 tclass;
        u32 value;
        const char *name;
 };
 
-struct av_inherit
-{
+struct av_inherit {
        u16 tclass;
        const char **common_pts;
        u32 common_base;
 };
 
-struct selinux_class_perm
-{
+struct selinux_class_perm {
        const struct av_perm_to_string *av_perm_to_string;
        u32 av_pts_len;
        const char **class_to_string;
index 9a9e7cd9a37984a3c67e7b2d0e31662c0d1ff2a6..487a7d81fe204ae35df5d96d390b46e7652548fc 100644 (file)
@@ -64,7 +64,7 @@ static inline void selinux_netlbl_cache_invalidate(void)
 }
 
 static inline void selinux_netlbl_sk_security_reset(
-                                              struct sk_security_struct *ssec,
+                                              struct sk_security_struct *ssec,
                                               int family)
 {
        return;
index 300b61bad7b3738cf5934b9b3e328d410361e418..032c2357dad1d4cef675bfc05a5e2ebb567cee7e 100644 (file)
@@ -4,16 +4,16 @@
  *  This file contains the SELinux security data structures for kernel objects.
  *
  *  Author(s):  Stephen Smalley, <sds@epoch.ncsc.mil>
- *              Chris Vance, <cvance@nai.com>
- *              Wayne Salamon, <wsalamon@nai.com>
- *              James Morris <jmorris@redhat.com>
+ *             Chris Vance, <cvance@nai.com>
+ *             Wayne Salamon, <wsalamon@nai.com>
+ *             James Morris <jmorris@redhat.com>
  *
  *  Copyright (C) 2001,2002 Networks Associates Technology, Inc.
  *  Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
  *
  *     This program is free software; you can redistribute it and/or modify
  *     it under the terms of the GNU General Public License version 2,
- *      as published by the Free Software Foundation.
+ *     as published by the Free Software Foundation.
  */
 #ifndef _SELINUX_OBJSEC_H_
 #define _SELINUX_OBJSEC_H_
 #include "avc.h"
 
 struct task_security_struct {
-       u32 osid;            /* SID prior to last execve */
-       u32 sid;             /* current SID */
-       u32 exec_sid;        /* exec SID */
-       u32 create_sid;      /* fscreate SID */
-       u32 keycreate_sid;   /* keycreate SID */
-       u32 sockcreate_sid;  /* fscreate SID */
+       u32 osid;               /* SID prior to last execve */
+       u32 sid;                /* current SID */
+       u32 exec_sid;           /* exec SID */
+       u32 create_sid;         /* fscreate SID */
+       u32 keycreate_sid;      /* keycreate SID */
+       u32 sockcreate_sid;     /* fscreate SID */
 };
 
 struct inode_security_struct {
-        struct inode *inode;           /* back pointer to inode object */
-       struct list_head list;         /* list of inode_security_struct */
-       u32 task_sid;        /* SID of creating task */
-       u32 sid;             /* SID of this object */
-       u16 sclass;       /* security class of this object */
-       unsigned char initialized;     /* initialization flag */
+       struct inode *inode;    /* back pointer to inode object */
+       struct list_head list;  /* list of inode_security_struct */
+       u32 task_sid;           /* SID of creating task */
+       u32 sid;                /* SID of this object */
+       u16 sclass;             /* security class of this object */
+       unsigned char initialized;      /* initialization flag */
        struct mutex lock;
-       unsigned char inherit;         /* inherit SID from parent entry */
+       unsigned char inherit;  /* inherit SID from parent entry */
 };
 
 struct file_security_struct {
-       u32 sid;              /* SID of open file description */
-       u32 fown_sid;         /* SID of file owner (for SIGIO) */
-       u32 isid;             /* SID of inode at the time of file open */
-       u32 pseqno;           /* Policy seqno at the time of file open */
+       u32 sid;                /* SID of open file description */
+       u32 fown_sid;           /* SID of file owner (for SIGIO) */
+       u32 isid;               /* SID of inode at the time of file open */
+       u32 pseqno;             /* Policy seqno at the time of file open */
 };
 
 struct superblock_security_struct {
-       struct super_block *sb;         /* back pointer to sb object */
-       struct list_head list;          /* list of superblock_security_struct */
+       struct super_block *sb;         /* back pointer to sb object */
+       struct list_head list;          /* list of superblock_security_struct */
        u32 sid;                        /* SID of file system superblock */
        u32 def_sid;                    /* default SID for labeling */
        u32 mntpoint_sid;               /* SECURITY_FS_USE_MNTPOINT context for files */
-       unsigned int behavior;          /* labeling behavior */
-       unsigned char initialized;      /* initialization flag */
+       unsigned int behavior;          /* labeling behavior */
+       unsigned char initialized;      /* initialization flag */
        unsigned char flags;            /* which mount options were specified */
-       unsigned char proc;             /* proc fs */
+       unsigned char proc;             /* proc fs */
        struct mutex lock;
        struct list_head isec_head;
        spinlock_t isec_lock;
 };
 
 struct msg_security_struct {
-       u32 sid;              /* SID of message */
+       u32 sid;        /* SID of message */
 };
 
 struct ipc_security_struct {
        u16 sclass;     /* security class of this object */
-       u32 sid;              /* SID of IPC resource */
+       u32 sid;        /* SID of IPC resource */
 };
 
 struct bprm_security_struct {
-       u32 sid;                       /* SID for transformed process */
+       u32 sid;                /* SID for transformed process */
        unsigned char set;
 
        /*
@@ -123,7 +123,7 @@ struct sk_security_struct {
 };
 
 struct key_security_struct {
-       u32 sid;         /* SID of key */
+       u32 sid;        /* SID of key */
 };
 
 extern unsigned int selinux_checkreqprot;
index 1904c462a605fc99dd29b9107c6c797d7ed4d62d..ad30ac4273d629b37e9f93ae2cbf143db36fc4f0 100644 (file)
@@ -62,7 +62,7 @@ enum {
 extern int selinux_policycap_netpeer;
 extern int selinux_policycap_openperm;
 
-int security_load_policy(void * data, size_t len);
+int security_load_policy(void *data, size_t len);
 
 int security_policycap_supported(unsigned int req_cap);
 
@@ -93,10 +93,10 @@ int security_change_sid(u32 ssid, u32 tsid,
 int security_sid_to_context(u32 sid, char **scontext,
        u32 *scontext_len);
 
-int security_context_to_sid(char *scontext, u32 scontext_len,
+int security_context_to_sid(const char *scontext, u32 scontext_len,
        u32 *out_sid);
 
-int security_context_to_sid_default(char *scontext, u32 scontext_len,
+int security_context_to_sid_default(const char *scontext, u32 scontext_len,
                                    u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
 
 int security_get_user_sids(u32 callsid, char *username,
@@ -110,7 +110,7 @@ int security_node_sid(u16 domain, void *addr, u32 addrlen,
        u32 *out_sid);
 
 int security_validate_transition(u32 oldsid, u32 newsid, u32 tasksid,
-                                 u16 tclass);
+                                u16 tclass);
 
 int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid);
 
index 2edc4c5e0c619ab532ff63763ef455a210e44fee..b6ccd09379f15456c04e32c89dca4c5119baf128 100644 (file)
 #include <net/ipv6.h>
 #include <asm/bug.h>
 
+#include "netnode.h"
 #include "objsec.h"
 
 #define SEL_NETNODE_HASH_SIZE       256
 #define SEL_NETNODE_HASH_BKT_LIMIT   16
 
+struct sel_netnode_bkt {
+       unsigned int size;
+       struct list_head list;
+};
+
 struct sel_netnode {
        struct netnode_security_struct nsec;
 
@@ -60,7 +66,7 @@ struct sel_netnode {
 
 static LIST_HEAD(sel_netnode_list);
 static DEFINE_SPINLOCK(sel_netnode_lock);
-static struct list_head sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
+static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
 
 /**
  * sel_netnode_free - Frees a node entry
@@ -87,7 +93,7 @@ static void sel_netnode_free(struct rcu_head *p)
  * the bucket number for the given IP address.
  *
  */
-static u32 sel_netnode_hashfn_ipv4(__be32 addr)
+static unsigned int sel_netnode_hashfn_ipv4(__be32 addr)
 {
        /* at some point we should determine if the mismatch in byte order
         * affects the hash function dramatically */
@@ -103,7 +109,7 @@ static u32 sel_netnode_hashfn_ipv4(__be32 addr)
  * the bucket number for the given IP address.
  *
  */
-static u32 sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
+static unsigned int sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
 {
        /* just hash the least significant 32 bits to keep things fast (they
         * are the most likely to be different anyway), we can revisit this
@@ -123,7 +129,7 @@ static u32 sel_netnode_hashfn_ipv6(const struct in6_addr *addr)
  */
 static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
 {
-       u32 idx;
+       unsigned int idx;
        struct sel_netnode *node;
 
        switch (family) {
@@ -137,7 +143,7 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
                BUG();
        }
 
-       list_for_each_entry_rcu(node, &sel_netnode_hash[idx], list)
+       list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
                if (node->nsec.family == family)
                        switch (family) {
                        case PF_INET:
@@ -159,15 +165,12 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
  * @node: the new node record
  *
  * Description:
- * Add a new node record to the network address hash table.  Returns zero on
- * success, negative values on failure.
+ * Add a new node record to the network address hash table.
  *
  */
-static int sel_netnode_insert(struct sel_netnode *node)
+static void sel_netnode_insert(struct sel_netnode *node)
 {
-       u32 idx;
-       u32 count = 0;
-       struct sel_netnode *iter;
+       unsigned int idx;
 
        switch (node->nsec.family) {
        case PF_INET:
@@ -179,32 +182,21 @@ static int sel_netnode_insert(struct sel_netnode *node)
        default:
                BUG();
        }
-       list_add_rcu(&node->list, &sel_netnode_hash[idx]);
+
+       INIT_RCU_HEAD(&node->rcu);
 
        /* we need to impose a limit on the growth of the hash table so check
         * this bucket to make sure it is within the specified bounds */
-       list_for_each_entry(iter, &sel_netnode_hash[idx], list)
-               if (++count > SEL_NETNODE_HASH_BKT_LIMIT) {
-                       list_del_rcu(&iter->list);
-                       call_rcu(&iter->rcu, sel_netnode_free);
-                       break;
-               }
-
-       return 0;
-}
-
-/**
- * sel_netnode_destroy - Remove a node record from the table
- * @node: the existing node record
- *
- * Description:
- * Remove an existing node record from the network address table.
- *
- */
-static void sel_netnode_destroy(struct sel_netnode *node)
-{
-       list_del_rcu(&node->list);
-       call_rcu(&node->rcu, sel_netnode_free);
+       list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
+       if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
+               struct sel_netnode *tail;
+               tail = list_entry(
+                       rcu_dereference(sel_netnode_hash[idx].list.prev),
+                       struct sel_netnode, list);
+               list_del_rcu(&tail->list);
+               call_rcu(&tail->rcu, sel_netnode_free);
+       } else
+               sel_netnode_hash[idx].size++;
 }
 
 /**
@@ -222,7 +214,7 @@ static void sel_netnode_destroy(struct sel_netnode *node)
  */
 static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
 {
-       int ret;
+       int ret = -ENOMEM;
        struct sel_netnode *node;
        struct sel_netnode *new = NULL;
 
@@ -230,25 +222,21 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
        node = sel_netnode_find(addr, family);
        if (node != NULL) {
                *sid = node->nsec.sid;
-               ret = 0;
-               goto out;
+               spin_unlock_bh(&sel_netnode_lock);
+               return 0;
        }
        new = kzalloc(sizeof(*new), GFP_ATOMIC);
-       if (new == NULL) {
-               ret = -ENOMEM;
+       if (new == NULL)
                goto out;
-       }
        switch (family) {
        case PF_INET:
                ret = security_node_sid(PF_INET,
-                                       addr, sizeof(struct in_addr),
-                                       &new->nsec.sid);
+                                       addr, sizeof(struct in_addr), sid);
                new->nsec.addr.ipv4 = *(__be32 *)addr;
                break;
        case PF_INET6:
                ret = security_node_sid(PF_INET6,
-                                       addr, sizeof(struct in6_addr),
-                                       &new->nsec.sid);
+                                       addr, sizeof(struct in6_addr), sid);
                ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
                break;
        default:
@@ -256,11 +244,10 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
        }
        if (ret != 0)
                goto out;
+
        new->nsec.family = family;
-       ret = sel_netnode_insert(new);
-       if (ret != 0)
-               goto out;
-       *sid = new->nsec.sid;
+       new->nsec.sid = *sid;
+       sel_netnode_insert(new);
 
 out:
        spin_unlock_bh(&sel_netnode_lock);
@@ -312,13 +299,18 @@ int sel_netnode_sid(void *addr, u16 family, u32 *sid)
  */
 static void sel_netnode_flush(void)
 {
-       u32 idx;
-       struct sel_netnode *node;
+       unsigned int idx;
+       struct sel_netnode *node, *node_tmp;
 
        spin_lock_bh(&sel_netnode_lock);
-       for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++)
-               list_for_each_entry(node, &sel_netnode_hash[idx], list)
-                       sel_netnode_destroy(node);
+       for (idx = 0; idx < SEL_NETNODE_HASH_SIZE; idx++) {
+               list_for_each_entry_safe(node, node_tmp,
+                                        &sel_netnode_hash[idx].list, list) {
+                               list_del_rcu(&node->list);
+                               call_rcu(&node->rcu, sel_netnode_free);
+               }
+               sel_netnode_hash[idx].size = 0;
+       }
        spin_unlock_bh(&sel_netnode_lock);
 }
 
@@ -340,8 +332,10 @@ static __init int sel_netnode_init(void)
        if (!selinux_enabled)
                return 0;
 
-       for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++)
-               INIT_LIST_HEAD(&sel_netnode_hash[iter]);
+       for (iter = 0; iter < SEL_NETNODE_HASH_SIZE; iter++) {
+               INIT_LIST_HEAD(&sel_netnode_hash[iter].list);
+               sel_netnode_hash[iter].size = 0;
+       }
 
        ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET,
                               SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
index 68ede3c498ab434dd7dcd41e736176c4f7fd0224..90b4cff7c350280e151d8ba6fd65fc09775c92fd 100644 (file)
@@ -114,8 +114,7 @@ static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum)
 
        idx = sel_netport_hashfn(pnum);
        list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list)
-               if (port->psec.port == pnum &&
-                   port->psec.protocol == protocol)
+               if (port->psec.port == pnum && port->psec.protocol == protocol)
                        return port;
 
        return NULL;
@@ -126,11 +125,10 @@ static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum)
  * @port: the new port record
  *
  * Description:
- * Add a new port record to the network address hash table.  Returns zero on
- * success, negative values on failure.
+ * Add a new port record to the network address hash table.
  *
  */
-static int sel_netport_insert(struct sel_netport *port)
+static void sel_netport_insert(struct sel_netport *port)
 {
        unsigned int idx;
 
@@ -140,13 +138,13 @@ static int sel_netport_insert(struct sel_netport *port)
        list_add_rcu(&port->list, &sel_netport_hash[idx].list);
        if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
                struct sel_netport *tail;
-               tail = list_entry(port->list.prev, struct sel_netport, list);
-               list_del_rcu(port->list.prev);
+               tail = list_entry(
+                       rcu_dereference(sel_netport_hash[idx].list.prev),
+                       struct sel_netport, list);
+               list_del_rcu(&tail->list);
                call_rcu(&tail->rcu, sel_netport_free);
        } else
                sel_netport_hash[idx].size++;
-
-       return 0;
 }
 
 /**
@@ -163,7 +161,7 @@ static int sel_netport_insert(struct sel_netport *port)
  */
 static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
 {
-       int ret;
+       int ret = -ENOMEM;
        struct sel_netport *port;
        struct sel_netport *new = NULL;
 
@@ -171,23 +169,20 @@ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid)
        port = sel_netport_find(protocol, pnum);
        if (port != NULL) {
                *sid = port->psec.sid;
-               ret = 0;
-               goto out;
+               spin_unlock_bh(&sel_netport_lock);
+               return 0;
        }
        new = kzalloc(sizeof(*new), GFP_ATOMIC);
-       if (new == NULL) {
-               ret = -ENOMEM;
+       if (new == NULL)
                goto out;
-       }
-       ret = security_port_sid(protocol, pnum, &new->psec.sid);
+       ret = security_port_sid(protocol, pnum, sid);
        if (ret != 0)
                goto out;
+
        new->psec.port = pnum;
        new->psec.protocol = protocol;
-       ret = sel_netport_insert(new);
-       if (ret != 0)
-               goto out;
-       *sid = new->psec.sid;
+       new->psec.sid = *sid;
+       sel_netport_insert(new);
 
 out:
        spin_unlock_bh(&sel_netport_lock);
@@ -239,11 +234,12 @@ int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid)
 static void sel_netport_flush(void)
 {
        unsigned int idx;
-       struct sel_netport *port;
+       struct sel_netport *port, *port_tmp;
 
        spin_lock_bh(&sel_netport_lock);
        for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) {
-               list_for_each_entry(port, &sel_netport_hash[idx].list, list) {
+               list_for_each_entry_safe(port, port_tmp,
+                                        &sel_netport_hash[idx].list, list) {
                        list_del_rcu(&port->list);
                        call_rcu(&port->rcu, sel_netport_free);
                }
index f3a1fc6e5d664952f66c48e8ee869a6f66845a53..65b9f8366e9c8635020c934e66f12a4901bbc1d8 100644 (file)
@@ -59,10 +59,10 @@ struct cond_node {
        struct cond_node *next;
 };
 
-int cond_policydb_init(struct policydbp);
-void cond_policydb_destroy(struct policydbp);
+int cond_policydb_init(struct policydb *p);
+void cond_policydb_destroy(struct policydb *p);
 
-int cond_init_bool_indexes(struct policydbp);
+int cond_init_bool_indexes(struct policydb *p);
 int cond_destroy_bool(void *key, void *datum, void *p);
 
 int cond_index_bool(void *key, void *datum, void *datap);
index 2eee0dab524d831585624f722f40e8f3e86b61a3..b9a6f7fc62fca2b0b251ccb2843f60655426c944 100644 (file)
@@ -84,9 +84,9 @@ static inline int mls_context_cmp(struct context *c1, struct context *c2)
                return 1;
 
        return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
-               ebitmap_cmp(&c1->range.level[0].cat,&c2->range.level[0].cat) &&
+               ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
                (c1->range.level[1].sens == c2->range.level[1].sens) &&
-               ebitmap_cmp(&c1->range.level[1].cat,&c2->range.level[1].cat));
+               ebitmap_cmp(&c1->range.level[1].cat, &c2->range.level[1].cat));
 }
 
 static inline void mls_context_destroy(struct context *c)
index 7e2ff3e3c6d220759c0d123f024c904dfc4b3d69..953872cd84ab0a17bf161154b1dfd38080357ff5 100644 (file)
@@ -40,8 +40,8 @@ struct hashtab_info {
  * the new hash table otherwise.
  */
 struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
-                               int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
-                               u32 size);
+                              int (*keycmp)(struct hashtab *h, const void *key1, const void *key2),
+                              u32 size);
 
 /*
  * Inserts the specified (key, datum) pair into the specified hash table.
@@ -49,7 +49,7 @@ struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *
  * Returns -ENOMEM on memory allocation error,
  * -EEXIST if there is already an entry with the same key,
  * -EINVAL for general errors or
* 0 otherwise.
+  0 otherwise.
  */
 int hashtab_insert(struct hashtab *h, void *k, void *d);
 
index ab53663d9f5f96691915d568954712260e92f2ae..0fdf6257ef6429928eed044d33d9b025ecda7b49 100644 (file)
@@ -13,7 +13,7 @@
 /*
  * Updated: Hewlett-Packard <paul.moore@hp.com>
  *
- *      Added support to import/export the MLS label from NetLabel
+ *     Added support to import/export the MLS label from NetLabel
  *
  * (c) Copyright Hewlett-Packard Development Company, L.P., 2006
  */
@@ -31,7 +31,7 @@ int mls_range_isvalid(struct policydb *p, struct mls_range *r);
 int mls_level_isvalid(struct policydb *p, struct mls_level *l);
 
 int mls_context_to_sid(char oldc,
-                      char **scontext,
+                      char **scontext,
                       struct context *context,
                       struct sidtab *s,
                       u32 def_sid);
@@ -49,7 +49,7 @@ int mls_compute_sid(struct context *scontext,
                    struct context *newcontext);
 
 int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
-                         struct context *usercon);
+                        struct context *usercon);
 
 #ifdef CONFIG_NETLABEL
 void mls_export_netlbl_lvl(struct context *context,
index 0c692d58d489fe31b4977b0916d0fb6dee401630..b6e943a210610db265022c1b6af9dfddc8f57021 100644 (file)
@@ -31,7 +31,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
                return 1;
 
        return ((l1->sens == l2->sens) &&
-               ebitmap_cmp(&l1->cat, &l2->cat));
+               ebitmap_cmp(&l1->cat, &l2->cat));
 }
 
 static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
@@ -40,7 +40,7 @@ static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
                return 1;
 
        return ((l1->sens >= l2->sens) &&
-               ebitmap_contains(&l1->cat, &l2->cat));
+               ebitmap_contains(&l1->cat, &l2->cat));
 }
 
 #define mls_level_incomp(l1, l2) \
index ba593a3da8775e9d3b18eea4802fc4c8466667a5..4253370fda6a22cf7353b0c5c415316fbaa05f8c 100644 (file)
  *
  * Updated: Frank Mayer <mayerf@tresys.com> and Karl MacMillan <kmacmillan@tresys.com>
  *
- *     Added conditional policy language extensions
+ *     Added conditional policy language extensions
  *
  * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
  * Copyright (C) 2003 - 2004 Tresys Technology, LLC
  *     This program is free software; you can redistribute it and/or modify
- *     it under the terms of the GNU General Public License as published by
+ *     it under the terms of the GNU General Public License as published by
  *     the Free Software Foundation, version 2.
  */
 
@@ -221,7 +221,7 @@ struct policydb {
        /* type enforcement conditional access vectors and transitions */
        struct avtab te_cond_avtab;
        /* linked list indexing te_cond_avtab by conditional */
-       struct cond_nodecond_list;
+       struct cond_node *cond_list;
 
        /* role allows */
        struct role_allow *role_allow;
@@ -230,10 +230,10 @@ struct policydb {
           TCP or UDP port numbers, network interfaces and nodes */
        struct ocontext *ocontexts[OCON_NUM];
 
-        /* security contexts for files in filesystems that cannot support
+       /* security contexts for files in filesystems that cannot support
           a persistent label mapping or use another
           fixed labeling behavior. */
-       struct genfs *genfs;
+       struct genfs *genfs;
 
        /* range transitions */
        struct range_trans *range_tr;
index 2daaddbb301d27e549a372d88f26fb6b809c3900..dcc2e1c4fd83df58438bfd8592da112bfaa4b28d 100644 (file)
@@ -708,7 +708,7 @@ out:
 
 }
 
-static int security_context_to_sid_core(char *scontext, u32 scontext_len,
+static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
                                        u32 *sid, u32 def_sid, gfp_t gfp_flags)
 {
        char *scontext2;
@@ -835,7 +835,7 @@ out:
  * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
  * memory is available, or 0 on success.
  */
-int security_context_to_sid(char *scontext, u32 scontext_len, u32 *sid)
+int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
 {
        return security_context_to_sid_core(scontext, scontext_len,
                                            sid, SECSID_NULL, GFP_KERNEL);
@@ -858,8 +858,8 @@ int security_context_to_sid(char *scontext, u32 scontext_len, u32 *sid)
  * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
  * memory is available, or 0 on success.
  */
-int security_context_to_sid_default(char *scontext, u32 scontext_len, u32 *sid,
-                                   u32 def_sid, gfp_t gfp_flags)
+int security_context_to_sid_default(const char *scontext, u32 scontext_len,
+                                   u32 *sid, u32 def_sid, gfp_t gfp_flags)
 {
        return security_context_to_sid_core(scontext, scontext_len,
                                            sid, def_sid, gfp_flags);
index 4215971434e6bb9bc1b0b174428253be9ac1c660..b5c8f9237008618630d95cdf58ba945b06fe94d0 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/pipe_fs_i.h>
 #include <net/netlabel.h>
 #include <net/cipso_ipv4.h>
+#include <linux/audit.h>
 
 #include "smack.h"
 
@@ -574,8 +575,8 @@ static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_setxattr(struct dentry *dentry, char *name,
-                               void *value, size_t size, int flags)
+static int smack_inode_setxattr(struct dentry *dentry, const char *name,
+                               const void *value, size_t size, int flags)
 {
        int rc = 0;
 
@@ -604,8 +605,8 @@ static int smack_inode_setxattr(struct dentry *dentry, char *name,
  * Set the pointer in the inode blob to the entry found
  * in the master label list.
  */
-static void smack_inode_post_setxattr(struct dentry *dentry, char *name,
-                                     void *value, size_t size, int flags)
+static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
+                                     const void *value, size_t size, int flags)
 {
        struct inode_smack *isp;
        char *nsp;
@@ -641,7 +642,7 @@ static void smack_inode_post_setxattr(struct dentry *dentry, char *name,
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_getxattr(struct dentry *dentry, char *name)
+static int smack_inode_getxattr(struct dentry *dentry, const char *name)
 {
        return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ);
 }
@@ -655,7 +656,7 @@ static int smack_inode_getxattr(struct dentry *dentry, char *name)
  *
  * Returns 0 if access is permitted, an error code otherwise
  */
-static int smack_inode_removexattr(struct dentry *dentry, char *name)
+static int smack_inode_removexattr(struct dentry *dentry, const char *name)
 {
        int rc = 0;
 
@@ -752,6 +753,18 @@ static int smack_inode_listsecurity(struct inode *inode, char *buffer,
        return -EINVAL;
 }
 
+/**
+ * smack_inode_getsecid - Extract inode's security id
+ * @inode: inode to extract the info from
+ * @secid: where result will be saved
+ */
+static void smack_inode_getsecid(const struct inode *inode, u32 *secid)
+{
+       struct inode_smack *isp = inode->i_security;
+
+       *secid = smack_to_secid(isp->smk_inode);
+}
+
 /*
  * File Hooks
  */
@@ -1117,15 +1130,6 @@ static int smack_task_movememory(struct task_struct *p)
 static int smack_task_kill(struct task_struct *p, struct siginfo *info,
                           int sig, u32 secid)
 {
-       /*
-        * Special cases where signals really ought to go through
-        * in spite of policy. Stephen Smalley suggests it may
-        * make sense to change the caller so that it doesn't
-        * bother with the LSM hook in these cases.
-        */
-       if (info != SEND_SIG_NOINFO &&
-           (is_si_special(info) || SI_FROMKERNEL(info)))
-               return 0;
        /*
         * Sending a signal requires that the sender
         * can write the receiver.
@@ -1242,7 +1246,7 @@ static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap)
        int rc;
        int byte;
 
-       if (catset == 0)
+       if (!catset)
                return;
 
        sap->flags |= NETLBL_SECATTR_MLS_CAT;
@@ -1805,6 +1809,18 @@ static int smack_ipc_permission(struct kern_ipc_perm *ipp, short flag)
        return smk_curacc(isp, may);
 }
 
+/**
+ * smack_ipc_getsecid - Extract smack security id
+ * @ipcp: the object permissions
+ * @secid: where result will be saved
+ */
+static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
+{
+       char *smack = ipp->security;
+
+       *secid = smack_to_secid(smack);
+}
+
 /* module stacking operations */
 
 /**
@@ -2381,6 +2397,124 @@ static int smack_key_permission(key_ref_t key_ref,
 }
 #endif /* CONFIG_KEYS */
 
+/*
+ * Smack Audit hooks
+ *
+ * Audit requires a unique representation of each Smack specific
+ * rule. This unique representation is used to distinguish the
+ * object to be audited from remaining kernel objects and also
+ * works as a glue between the audit hooks.
+ *
+ * Since repository entries are added but never deleted, we'll use
+ * the smack_known label address related to the given audit rule as
+ * the needed unique representation. This also better fits the smack
+ * model where nearly everything is a label.
+ */
+#ifdef CONFIG_AUDIT
+
+/**
+ * smack_audit_rule_init - Initialize a smack audit rule
+ * @field: audit rule fields given from user-space (audit.h)
+ * @op: required testing operator (=, !=, >, <, ...)
+ * @rulestr: smack label to be audited
+ * @vrule: pointer to save our own audit rule representation
+ *
+ * Prepare to audit cases where (@field @op @rulestr) is true.
+ * The label to be audited is created if necessay.
+ */
+static int smack_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
+{
+       char **rule = (char **)vrule;
+       *rule = NULL;
+
+       if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
+               return -EINVAL;
+
+       if (op != AUDIT_EQUAL && op != AUDIT_NOT_EQUAL)
+               return -EINVAL;
+
+       *rule = smk_import(rulestr, 0);
+
+       return 0;
+}
+
+/**
+ * smack_audit_rule_known - Distinguish Smack audit rules
+ * @krule: rule of interest, in Audit kernel representation format
+ *
+ * This is used to filter Smack rules from remaining Audit ones.
+ * If it's proved that this rule belongs to us, the
+ * audit_rule_match hook will be called to do the final judgement.
+ */
+static int smack_audit_rule_known(struct audit_krule *krule)
+{
+       struct audit_field *f;
+       int i;
+
+       for (i = 0; i < krule->field_count; i++) {
+               f = &krule->fields[i];
+
+               if (f->type == AUDIT_SUBJ_USER || f->type == AUDIT_OBJ_USER)
+                       return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * smack_audit_rule_match - Audit given object ?
+ * @secid: security id for identifying the object to test
+ * @field: audit rule flags given from user-space
+ * @op: required testing operator
+ * @vrule: smack internal rule presentation
+ * @actx: audit context associated with the check
+ *
+ * The core Audit hook. It's used to take the decision of
+ * whether to audit or not to audit a given object.
+ */
+static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule,
+                                 struct audit_context *actx)
+{
+       char *smack;
+       char *rule = vrule;
+
+       if (!rule) {
+               audit_log(actx, GFP_KERNEL, AUDIT_SELINUX_ERR,
+                         "Smack: missing rule\n");
+               return -ENOENT;
+       }
+
+       if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
+               return 0;
+
+       smack = smack_from_secid(secid);
+
+       /*
+        * No need to do string comparisons. If a match occurs,
+        * both pointers will point to the same smack_known
+        * label.
+        */
+       if (op == AUDIT_EQUAL)
+               return (rule == smack);
+       if (op == AUDIT_NOT_EQUAL)
+               return (rule != smack);
+
+       return 0;
+}
+
+/**
+ * smack_audit_rule_free - free smack rule representation
+ * @vrule: rule to be freed.
+ *
+ * No memory was allocated.
+ */
+static void smack_audit_rule_free(void *vrule)
+{
+       /* No-op */
+}
+
+#endif /* CONFIG_AUDIT */
+
 /*
  * smack_secid_to_secctx - return the smack label for a secid
  * @secid: incoming integer
@@ -2406,7 +2540,7 @@ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
  *
  * Exists for audit and networking code.
  */
-static int smack_secctx_to_secid(char *secdata, u32 seclen, u32 *secid)
+static int smack_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
 {
        *secid = smack_to_secid(secdata);
        return 0;
@@ -2467,6 +2601,7 @@ struct security_operations smack_ops = {
        .inode_getsecurity =            smack_inode_getsecurity,
        .inode_setsecurity =            smack_inode_setsecurity,
        .inode_listsecurity =           smack_inode_listsecurity,
+       .inode_getsecid =               smack_inode_getsecid,
 
        .file_permission =              smack_file_permission,
        .file_alloc_security =          smack_file_alloc_security,
@@ -2495,8 +2630,10 @@ struct security_operations smack_ops = {
        .task_wait =                    smack_task_wait,
        .task_reparent_to_init =        cap_task_reparent_to_init,
        .task_to_inode =                smack_task_to_inode,
+       .task_prctl =                   cap_task_prctl,
 
        .ipc_permission =               smack_ipc_permission,
+       .ipc_getsecid =                 smack_ipc_getsecid,
 
        .msg_msg_alloc_security =       smack_msg_msg_alloc_security,
        .msg_msg_free_security =        smack_msg_msg_free_security,
@@ -2541,12 +2678,22 @@ struct security_operations smack_ops = {
        .sk_free_security =             smack_sk_free_security,
        .sock_graft =                   smack_sock_graft,
        .inet_conn_request =            smack_inet_conn_request,
+
  /* key management security hooks */
 #ifdef CONFIG_KEYS
        .key_alloc =                    smack_key_alloc,
        .key_free =                     smack_key_free,
        .key_permission =               smack_key_permission,
 #endif /* CONFIG_KEYS */
+
+ /* Audit hooks */
+#ifdef CONFIG_AUDIT
+       .audit_rule_init =              smack_audit_rule_init,
+       .audit_rule_known =             smack_audit_rule_known,
+       .audit_rule_match =             smack_audit_rule_match,
+       .audit_rule_free =              smack_audit_rule_free,
+#endif /* CONFIG_AUDIT */
+
        .secid_to_secctx =              smack_secid_to_secctx,
        .secctx_to_secid =              smack_secctx_to_secid,
        .release_secctx =               smack_release_secctx,
index 6ba283783b70af178d7c66bca1e44eb632674f2b..271a835fbbe3f72f4e5465cbf84b9b271fa7e77a 100644 (file)
@@ -317,13 +317,14 @@ static const struct file_operations smk_load_ops = {
 /**
  * smk_cipso_doi - initialize the CIPSO domain
  */
-void smk_cipso_doi(void)
+static void smk_cipso_doi(void)
 {
        int rc;
        struct cipso_v4_doi *doip;
        struct netlbl_audit audit_info;
 
        audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
        audit_info.secid = smack_to_secid(current->security);
 
        rc = netlbl_cfg_map_del(NULL, &audit_info);
@@ -350,12 +351,13 @@ void smk_cipso_doi(void)
 /**
  * smk_unlbl_ambient - initialize the unlabeled domain
  */
-void smk_unlbl_ambient(char *oldambient)
+static void smk_unlbl_ambient(char *oldambient)
 {
        int rc;
        struct netlbl_audit audit_info;
 
        audit_info.loginuid = audit_get_loginuid(current);
+       audit_info.sessionid = audit_get_sessionid(current);
        audit_info.secid = smack_to_secid(current->security);
 
        if (oldambient != NULL) {
index 9977ec2eace35d547772db031c0db29bed15fe37..cb5ead3e202df5512317aeba7e5c281d993f8fae 100644 (file)
@@ -544,7 +544,7 @@ int __init snd_info_init(void)
 {
        struct proc_dir_entry *p;
 
-       p = snd_create_proc_entry("asound", S_IFDIR | S_IRUGO | S_IXUGO, &proc_root);
+       p = snd_create_proc_entry("asound", S_IFDIR | S_IRUGO | S_IXUGO, NULL);
        if (p == NULL)
                return -ENOMEM;
        snd_proc_root = p;
@@ -594,7 +594,7 @@ int __exit snd_info_done(void)
 #ifdef CONFIG_SND_OSSEMUL
                snd_info_free_entry(snd_oss_root);
 #endif
-               snd_remove_proc_entry(&proc_root, snd_proc_root);
+               snd_remove_proc_entry(NULL, snd_proc_root);
        }
        return 0;
 }
index 920e5780c2284c9c086dd1bfaf02aff5462786d2..23b7bc02728b44ac9ad1086a827ffd3b917418ea 100644 (file)
@@ -629,9 +629,8 @@ static const struct file_operations snd_mem_proc_fops = {
 static int __init snd_mem_init(void)
 {
 #ifdef CONFIG_PROC_FS
-       snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL);
-       if (snd_mem_proc)
-               snd_mem_proc->proc_fops = &snd_mem_proc_fops;
+       snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
+                                  &snd_mem_proc_fops);
 #endif
        return 0;
 }
index fe85af1c56934ab58ecbb0fda39d92b4f60753c8..a78a8d045175bb65b4dc576612e289b36957b898 100644 (file)
@@ -8,6 +8,8 @@ config SND_PCSP
        tristate "Internal PC speaker support"
        depends on X86_PC && HIGH_RES_TIMERS
        depends on INPUT
+       depends on SND
+       select SND_PCM
        help
          If you don't have a sound card in your computer, you can include a
          driver for the PC speaker which allows it to act like a primitive
index 18cca2457d44c1164978cc2453531396d8b38b0a..2af09996a3d01a39d4b913a23c1d08855ed54f06 100644 (file)
@@ -243,7 +243,7 @@ static int snd_mpu401_uart_cmd(struct snd_mpu401 * mpu, unsigned char cmd,
 #endif
        }
        mpu->write(mpu, cmd, MPU401C(mpu));
-       if (ack) {
+       if (ack && !(mpu->info_flags & MPU401_INFO_NO_ACK)) {
                ok = 0;
                timeout = 10000;
                while (!ok && timeout-- > 0) {
index cdda64b02f4688c638f09b169bf0aefb1799a156..d9783a4263e0bee2a8ae47e01eb7e063e7dea51b 100644 (file)
@@ -60,6 +60,7 @@ enum {
        ALC880_TCL_S700,
        ALC880_LG,
        ALC880_LG_LW,
+       ALC880_MEDION_RIM,
 #ifdef CONFIG_SND_DEBUG
        ALC880_TEST,
 #endif
@@ -2275,6 +2276,75 @@ static void alc880_lg_lw_unsol_event(struct hda_codec *codec, unsigned int res)
                alc880_lg_lw_automute(codec);
 }
 
+static struct snd_kcontrol_new alc880_medion_rim_mixer[] = {
+       HDA_CODEC_VOLUME("Master Playback Volume", 0x0c, 0x0, HDA_OUTPUT),
+       HDA_BIND_MUTE("Master Playback Switch", 0x0c, 2, HDA_INPUT),
+       HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x0, HDA_INPUT),
+       HDA_CODEC_MUTE("Mic Playback Switch", 0x0b, 0x0, HDA_INPUT),
+       HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x0b, 0x1, HDA_INPUT),
+       HDA_CODEC_MUTE("Internal Playback Switch", 0x0b, 0x1, HDA_INPUT),
+       { } /* end */
+};
+
+static struct hda_input_mux alc880_medion_rim_capture_source = {
+       .num_items = 2,
+       .items = {
+               { "Mic", 0x0 },
+               { "Internal Mic", 0x1 },
+       },
+};
+
+static struct hda_verb alc880_medion_rim_init_verbs[] = {
+       {0x13, AC_VERB_SET_CONNECT_SEL, 0x00}, /* HP */
+
+       {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+       {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+
+       /* Mic1 (rear panel) pin widget for input and vref at 80% */
+       {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
+       {0x18, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+       /* Mic2 (as headphone out) for HP output */
+       {0x19, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP},
+       {0x19, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
+       /* Internal Speaker */
+       {0x1b, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
+       {0x1b, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
+
+       {0x20, AC_VERB_SET_COEF_INDEX, 0x07},
+       {0x20, AC_VERB_SET_PROC_COEF,  0x3060},
+
+       {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC880_HP_EVENT},
+       { }
+};
+
+/* toggle speaker-output according to the hp-jack state */
+static void alc880_medion_rim_automute(struct hda_codec *codec)
+{
+       unsigned int present;
+       unsigned char bits;
+
+       present = snd_hda_codec_read(codec, 0x14, 0,
+                                    AC_VERB_GET_PIN_SENSE, 0)
+               & AC_PINSENSE_PRESENCE;
+       bits = present ? HDA_AMP_MUTE : 0;
+       snd_hda_codec_amp_stereo(codec, 0x1b, HDA_OUTPUT, 0,
+                                HDA_AMP_MUTE, bits);
+       if (present)
+               snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 0);
+       else
+               snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 2);
+}
+
+static void alc880_medion_rim_unsol_event(struct hda_codec *codec,
+                                         unsigned int res)
+{
+       /* Looks like the unsol event is incompatible with the standard
+        * definition.  4bit tag is placed at 28 bit!
+        */
+       if ((res >> 28) == ALC880_HP_EVENT)
+               alc880_medion_rim_automute(codec);
+}
+
 #ifdef CONFIG_SND_HDA_POWER_SAVE
 static struct hda_amp_list alc880_loopbacks[] = {
        { 0x0b, HDA_INPUT, 0 },
@@ -2882,6 +2952,7 @@ static const char *alc880_models[ALC880_MODEL_LAST] = {
        [ALC880_F1734]          = "F1734",
        [ALC880_LG]             = "lg",
        [ALC880_LG_LW]          = "lg-lw",
+       [ALC880_MEDION_RIM]     = "medion",
 #ifdef CONFIG_SND_DEBUG
        [ALC880_TEST]           = "test",
 #endif
@@ -2933,6 +3004,7 @@ static struct snd_pci_quirk alc880_cfg_tbl[] = {
        SND_PCI_QUIRK(0x1584, 0x9070, "Uniwill", ALC880_UNIWILL),
        SND_PCI_QUIRK(0x1584, 0x9077, "Uniwill P53", ALC880_UNIWILL_P53),
        SND_PCI_QUIRK(0x161f, 0x203d, "W810", ALC880_W810),
+       SND_PCI_QUIRK(0x161f, 0x205d, "Medion Rim 2150", ALC880_MEDION_RIM),
        SND_PCI_QUIRK(0x1695, 0x400d, "EPoX", ALC880_5ST_DIG),
        SND_PCI_QUIRK(0x1695, 0x4012, "EPox EP-5LDA", ALC880_5ST_DIG),
        SND_PCI_QUIRK(0x1734, 0x107c, "FSC F1734", ALC880_F1734),
@@ -3227,6 +3299,20 @@ static struct alc_config_preset alc880_presets[] = {
                .unsol_event = alc880_lg_lw_unsol_event,
                .init_hook = alc880_lg_lw_automute,
        },
+       [ALC880_MEDION_RIM] = {
+               .mixers = { alc880_medion_rim_mixer },
+               .init_verbs = { alc880_volume_init_verbs,
+                               alc880_medion_rim_init_verbs,
+                               alc_gpio2_init_verbs },
+               .num_dacs = ARRAY_SIZE(alc880_dac_nids),
+               .dac_nids = alc880_dac_nids,
+               .dig_out_nid = ALC880_DIGOUT_NID,
+               .num_channel_mode = ARRAY_SIZE(alc880_2_jack_modes),
+               .channel_mode = alc880_2_jack_modes,
+               .input_mux = &alc880_medion_rim_capture_source,
+               .unsol_event = alc880_medion_rim_unsol_event,
+               .init_hook = alc880_medion_rim_automute,
+       },
 #ifdef CONFIG_SND_DEBUG
        [ALC880_TEST] = {
                .mixers = { alc880_test_mixer },
index 4490422fb930bc047af5df27cad62266e3a55689..67350901772ce986582ef19e0b8ce9dad738f9cb 100644 (file)
@@ -2429,6 +2429,7 @@ static int __devinit snd_vt1724_probe(struct pci_dev *pci,
                        if ((err = snd_mpu401_uart_new(card, 0, MPU401_HW_ICE1712,
                                                       ICEREG1724(ice, MPU_CTRL),
                                                       (MPU401_INFO_INTEGRATED |
+                                                       MPU401_INFO_NO_ACK |
                                                        MPU401_INFO_TX_IRQ),
                                                       ice->irq, 0,
                                                       &ice->rmidi[0])) < 0) {
@@ -2442,12 +2443,10 @@ static int __devinit snd_vt1724_probe(struct pci_dev *pci,
                        outb(inb(ICEREG1724(ice, IRQMASK)) &
                             ~(VT1724_IRQ_MPU_RX | VT1724_IRQ_MPU_TX),
                             ICEREG1724(ice, IRQMASK));
-#if 0 /* for testing */
                        /* set watermarks */
                        outb(VT1724_MPU_RX_FIFO | 0x1,
                             ICEREG1724(ice, MPU_FIFO_WM));
                        outb(0x1, ICEREG1724(ice, MPU_FIFO_WM));
-#endif
                }
        }
 
index d49417bf78c693dc115390cd14d6c54d84fd8c7f..9ca1133261437f7f7d630e4e2276b9af9d068e1c 100644 (file)
@@ -663,7 +663,7 @@ static int __init aica_init(void)
                return err;
        pd = platform_device_register_simple(SND_AICA_DRIVER, -1,
                                             aica_memory_space, 2);
-       if (unlikely(IS_ERR(pd))) {
+       if (IS_ERR(pd)) {
                platform_driver_unregister(&snd_aica_driver);
                return PTR_ERR(pd);
        }
index a3b51df2bea148293bb5f12b55e47ee42936d45f..18f28ac4bfe82997733df97c1c1eaaf05dc41adb 100644 (file)
@@ -30,6 +30,7 @@ source "sound/soc/s3c24xx/Kconfig"
 source "sound/soc/sh/Kconfig"
 source "sound/soc/fsl/Kconfig"
 source "sound/soc/davinci/Kconfig"
+source "sound/soc/omap/Kconfig"
 
 # Supported codecs
 source "sound/soc/codecs/Kconfig"
index e489dbdde45839664cbf44dd408143afce2374e2..782db2127108fd26c19a0bb6bf2ee378cbd4d0ac 100644 (file)
@@ -1,4 +1,4 @@
 snd-soc-core-objs := soc-core.o soc-dapm.o
 
 obj-$(CONFIG_SND_SOC)  += snd-soc-core.o
-obj-$(CONFIG_SND_SOC)  += codecs/ at91/ pxa/ s3c24xx/ sh/ fsl/ davinci/
+obj-$(CONFIG_SND_SOC)  += codecs/ at91/ pxa/ s3c24xx/ sh/ fsl/ davinci/ omap/
index 76a5c7b05dfbfdde7647d92af0f112816efb17f5..fb41826c4c4cdba27f39486eab75f888ff08cc99 100644 (file)
@@ -150,7 +150,7 @@ static int wm8753_write(struct snd_soc_codec *codec, unsigned int reg,
        data[0] = (reg << 1) | ((value >> 8) & 0x0001);
        data[1] = value & 0x00ff;
 
-       wm8753_write_reg_cache (codec, reg, value);
+       wm8753_write_reg_cache(codec, reg, value);
        if (codec->hw_write(codec->control_data, data, 2) == 2)
                return 0;
        else
@@ -249,7 +249,7 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
        struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
        int mode = wm8753_read_reg_cache(codec, WM8753_IOCTL);
 
-       if (((mode &0xc) >> 2) == ucontrol->value.integer.value[0])
+       if (((mode & 0xc) >> 2) == ucontrol->value.integer.value[0])
                return 0;
 
        mode &= 0xfff3;
@@ -342,7 +342,8 @@ static int wm8753_add_controls(struct snd_soc_codec *codec)
 
        for (i = 0; i < ARRAY_SIZE(wm8753_snd_controls); i++) {
                err = snd_ctl_add(codec->card,
-                               snd_soc_cnew(&wm8753_snd_controls[i],codec, NULL));
+                               snd_soc_cnew(&wm8753_snd_controls[i],
+                                               codec, NULL));
                if (err < 0)
                        return err;
        }
@@ -722,7 +723,7 @@ static void pll_factors(struct _pll_div *pll_div, unsigned int target,
 
        if ((Ndiv < 6) || (Ndiv > 12))
                printk(KERN_WARNING
-                       "WM8753 N value outwith recommended range! N = %d\n",Ndiv);
+                       "wm8753: unsupported N = %d\n", Ndiv);
 
        pll_div->n = Ndiv;
        Nmod = target % source;
@@ -1300,8 +1301,9 @@ static int wm8753_dapm_event(struct snd_soc_codec *codec, int event)
 }
 
 #define WM8753_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
-               SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | \
-               SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
+               SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 |\
+               SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 |\
+               SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000)
 
 #define WM8753_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
        SNDRV_PCM_FMTBIT_S24_LE)
@@ -1507,9 +1509,9 @@ static int wm8753_suspend(struct platform_device *pdev, pm_message_t state)
        struct snd_soc_codec *codec = socdev->codec;
 
        /* we only need to suspend if we are a valid card */
-       if(!codec->card)
+       if (!codec->card)
                return 0;
-               
+
        wm8753_dapm_event(codec, SNDRV_CTL_POWER_D3cold);
        return 0;
 }
@@ -1523,7 +1525,7 @@ static int wm8753_resume(struct platform_device *pdev)
        u16 *cache = codec->reg_cache;
 
        /* we only need to resume if we are a valid card */
-       if(!codec->card)
+       if (!codec->card)
                return 0;
 
        /* Sync reg_cache with the hardware */
@@ -1613,9 +1615,10 @@ static int wm8753_init(struct snd_soc_device *socdev)
        wm8753_add_widgets(codec);
        ret = snd_soc_register_card(socdev);
        if (ret < 0) {
-       printk(KERN_ERR "wm8753: failed to register card\n");
+               printk(KERN_ERR "wm8753: failed to register card\n");
                goto card_err;
-    }
+       }
+
        return ret;
 
 card_err:
@@ -1630,7 +1633,7 @@ pcm_err:
    around */
 static struct snd_soc_device *wm8753_socdev;
 
-#if defined (CONFIG_I2C) || defined (CONFIG_I2C_MODULE)
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
 
 /*
  * WM8753 2 wire address is determined by GPIO5
@@ -1661,7 +1664,7 @@ static int wm8753_codec_probe(struct i2c_adapter *adap, int addr, int kind)
        client_template.addr = addr;
 
        i2c =  kmemdup(&client_template, sizeof(client_template), GFP_KERNEL);
-       if (i2c == NULL){
+       if (!i2c) {
                kfree(codec);
                return -ENOMEM;
        }
@@ -1749,7 +1752,7 @@ static int wm8753_probe(struct platform_device *pdev)
        wm8753_socdev = socdev;
        INIT_DELAYED_WORK(&codec->delayed_work, wm8753_work);
 
-#if defined (CONFIG_I2C) || defined (CONFIG_I2C_MODULE)
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        if (setup->i2c_address) {
                normal_i2c[0] = setup->i2c_address;
                codec->hw_write = (hw_write_t)i2c_master_send;
@@ -1793,7 +1796,7 @@ static int wm8753_remove(struct platform_device *pdev)
        run_delayed_work(&codec->delayed_work);
        snd_soc_free_pcms(socdev);
        snd_soc_dapm_free(socdev);
-#if defined (CONFIG_I2C) || defined (CONFIG_I2C_MODULE)
+#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
        i2c_del_driver(&wm8753_i2c_driver);
 #endif
        kfree(codec->private_data);
@@ -1808,7 +1811,6 @@ struct snd_soc_codec_device soc_codec_dev_wm8753 = {
        .suspend =      wm8753_suspend,
        .resume =       wm8753_resume,
 };
-
 EXPORT_SYMBOL_GPL(soc_codec_dev_wm8753);
 
 MODULE_DESCRIPTION("ASoC WM8753 driver");
index d2d79e182a45b1bc44c79e6278043a0ae147a9cf..76c1e2d33e7d0b8d0b5d22a6dd7dc5fbf7fa37f6 100644 (file)
@@ -37,23 +37,23 @@ static int ac97_write(struct snd_soc_codec *codec,
  * WM9712 register cache
  */
 static const u16 wm9712_reg[] = {
-       0x6174, 0x8000, 0x8000, 0x8000, // 6
-       0x0f0f, 0xaaa0, 0xc008, 0x6808, // e
-       0xe808, 0xaaa0, 0xad00, 0x8000, // 16
-       0xe808, 0x3000, 0x8000, 0x0000, // 1e
-       0x0000, 0x0000, 0x0000, 0x000f, // 26
-       0x0405, 0x0410, 0xbb80, 0xbb80, // 2e
-       0x0000, 0xbb80, 0x0000, 0x0000, // 36
-       0x0000, 0x2000, 0x0000, 0x0000, // 3e
-       0x0000, 0x0000, 0x0000, 0x0000, // 46
-       0x0000, 0x0000, 0xf83e, 0xffff, // 4e
-       0x0000, 0x0000, 0x0000, 0xf83e, // 56
-       0x0008, 0x0000, 0x0000, 0x0000, // 5e
-       0xb032, 0x3e00, 0x0000, 0x0000, // 66
-       0x0000, 0x0000, 0x0000, 0x0000, // 6e
-       0x0000, 0x0000, 0x0000, 0x0006, // 76
-       0x0001, 0x0000, 0x574d, 0x4c12, // 7e
-       0x0000, 0x0000 // virtual hp mixers
+       0x6174, 0x8000, 0x8000, 0x8000, /*  6 */
+       0x0f0f, 0xaaa0, 0xc008, 0x6808, /*  e */
+       0xe808, 0xaaa0, 0xad00, 0x8000, /* 16 */
+       0xe808, 0x3000, 0x8000, 0x0000, /* 1e */
+       0x0000, 0x0000, 0x0000, 0x000f, /* 26 */
+       0x0405, 0x0410, 0xbb80, 0xbb80, /* 2e */
+       0x0000, 0xbb80, 0x0000, 0x0000, /* 36 */
+       0x0000, 0x2000, 0x0000, 0x0000, /* 3e */
+       0x0000, 0x0000, 0x0000, 0x0000, /* 46 */
+       0x0000, 0x0000, 0xf83e, 0xffff, /* 4e */
+       0x0000, 0x0000, 0x0000, 0xf83e, /* 56 */
+       0x0008, 0x0000, 0x0000, 0x0000, /* 5e */
+       0xb032, 0x3e00, 0x0000, 0x0000, /* 66 */
+       0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
+       0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
+       0x0001, 0x0000, 0x574d, 0x4c12, /* 7e */
+       0x0000, 0x0000 /* virtual hp mixers */
 };
 
 /* virtual HP mixers regs */
@@ -94,7 +94,7 @@ static const struct snd_kcontrol_new wm9712_snd_ac97_controls[] = {
 SOC_DOUBLE("Speaker Playback Volume", AC97_MASTER, 8, 0, 31, 1),
 SOC_SINGLE("Speaker Playback Switch", AC97_MASTER, 15, 1, 1),
 SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1),
-SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE,15, 1, 1),
+SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE, 15, 1, 1),
 SOC_DOUBLE("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1),
 
 SOC_SINGLE("Speaker Playback ZC Switch", AC97_MASTER, 7, 1, 0),
@@ -165,7 +165,8 @@ static int wm9712_add_controls(struct snd_soc_codec *codec)
 
        for (i = 0; i < ARRAY_SIZE(wm9712_snd_ac97_controls); i++) {
                err = snd_ctl_add(codec->card,
-                               snd_soc_cnew(&wm9712_snd_ac97_controls[i],codec, NULL));
+                                 snd_soc_cnew(&wm9712_snd_ac97_controls[i],
+                                              codec, NULL));
                if (err < 0)
                        return err;
        }
@@ -363,7 +364,6 @@ static const char *audio_map[][3] = {
        {"Left HP Mixer", "PCM Playback Switch",  "Left DAC"},
        {"Left HP Mixer", "Mic Sidetone Switch",  "Mic PGA"},
        {"Left HP Mixer", NULL,  "ALC Sidetone Mux"},
-       //{"Right HP Mixer", NULL, "HP Mixer"},
 
        /* Right HP mixer */
        {"Right HP Mixer", "PCBeep Bypass Switch", "PCBEEP"},
@@ -454,15 +454,13 @@ static int wm9712_add_widgets(struct snd_soc_codec *codec)
 {
        int i;
 
-       for(i = 0; i < ARRAY_SIZE(wm9712_dapm_widgets); i++) {
+       for (i = 0; i < ARRAY_SIZE(wm9712_dapm_widgets); i++)
                snd_soc_dapm_new_control(codec, &wm9712_dapm_widgets[i]);
-       }
 
-       /* set up audio path audio_mapnects */
-       for(i = 0; audio_map[i][0] != NULL; i++) {
+       /* set up audio path connects */
+       for (i = 0; audio_map[i][0] != NULL; i++)
                snd_soc_dapm_connect_input(codec, audio_map[i][0],
-                       audio_map[i][1], audio_map[i][2]);
-       }
+                                          audio_map[i][1], audio_map[i][2]);
 
        snd_soc_dapm_new_widgets(codec);
        return 0;
@@ -540,7 +538,8 @@ static int ac97_aux_prepare(struct snd_pcm_substream *substream)
 }
 
 #define WM9712_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
-               SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000)
+               SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 |\
+               SNDRV_PCM_RATE_48000)
 
 struct snd_soc_codec_dai wm9712_dai[] = {
 {
@@ -577,8 +576,6 @@ EXPORT_SYMBOL_GPL(wm9712_dai);
 
 static int wm9712_dapm_event(struct snd_soc_codec *codec, int event)
 {
-       u16 reg;
-
        switch (event) {
        case SNDRV_CTL_POWER_D0: /* full On */
        case SNDRV_CTL_POWER_D1: /* partial On */
@@ -633,7 +630,7 @@ static int wm9712_soc_resume(struct platform_device *pdev)
        u16 *cache = codec->reg_cache;
 
        ret = wm9712_reset(codec, 1);
-       if (ret < 0){
+       if (ret < 0) {
                printk(KERN_ERR "could not reset AC97 codec\n");
                return ret;
        }
@@ -642,9 +639,9 @@ static int wm9712_soc_resume(struct platform_device *pdev)
 
        if (ret == 0) {
                /* Sync reg_cache with the hardware after cold reset */
-               for (i = 2; i < ARRAY_SIZE(wm9712_reg) << 1; i+=2) {
+               for (i = 2; i < ARRAY_SIZE(wm9712_reg) << 1; i += 2) {
                        if (i == AC97_INT_PAGING || i == AC97_POWERDOWN ||
-                               (i > 0x58 && i != 0x5c))
+                           (i > 0x58 && i != 0x5c))
                                continue;
                        soc_ac97_ops.write(codec->ac97, i, cache[i>>1]);
                }
@@ -757,7 +754,6 @@ struct snd_soc_codec_device soc_codec_dev_wm9712 = {
        .suspend =      wm9712_soc_suspend,
        .resume =       wm9712_soc_resume,
 };
-
 EXPORT_SYMBOL_GPL(soc_codec_dev_wm9712);
 
 MODULE_DESCRIPTION("ASoC WM9711/WM9712 driver");
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
new file mode 100644 (file)
index 0000000..0230d83
--- /dev/null
@@ -0,0 +1,19 @@
+menu "SoC Audio for the Texas Instruments OMAP"
+
+config SND_OMAP_SOC
+       tristate "SoC Audio for the Texas Instruments OMAP chips"
+       depends on ARCH_OMAP && SND_SOC
+
+config SND_OMAP_SOC_MCBSP
+       tristate
+       select OMAP_MCBSP
+
+config SND_OMAP_SOC_N810
+       tristate "SoC Audio support for Nokia N810"
+       depends on SND_OMAP_SOC && MACH_NOKIA_N810
+       select SND_OMAP_SOC_MCBSP
+       select SND_SOC_TLV320AIC3X
+       help
+         Say Y if you want to add support for SoC audio on Nokia N810.
+
+endmenu
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
new file mode 100644 (file)
index 0000000..d8d8d58
--- /dev/null
@@ -0,0 +1,11 @@
+# OMAP Platform Support
+snd-soc-omap-objs := omap-pcm.o
+snd-soc-omap-mcbsp-objs := omap-mcbsp.o
+
+obj-$(CONFIG_SND_OMAP_SOC) += snd-soc-omap.o
+obj-$(CONFIG_SND_OMAP_SOC_MCBSP) += snd-soc-omap-mcbsp.o
+
+# OMAP Machine Support
+snd-soc-n810-objs := n810.o
+
+obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
new file mode 100644 (file)
index 0000000..83b1eb4
--- /dev/null
@@ -0,0 +1,336 @@
+/*
+ * n810.c  --  SoC audio for Nokia N810
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Jarkko Nikula <jarkko.nikula@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/soc.h>
+#include <sound/soc-dapm.h>
+
+#include <asm/mach-types.h>
+#include <asm/arch/hardware.h>
+#include <asm/arch/gpio.h>
+#include <asm/arch/mcbsp.h>
+
+#include "omap-mcbsp.h"
+#include "omap-pcm.h"
+#include "../codecs/tlv320aic3x.h"
+
+#define RX44_HEADSET_AMP_GPIO  10
+#define RX44_SPEAKER_AMP_GPIO  101
+
+static struct clk *sys_clkout2;
+static struct clk *sys_clkout2_src;
+static struct clk *func96m_clk;
+
+static int n810_spk_func;
+static int n810_jack_func;
+
+static void n810_ext_control(struct snd_soc_codec *codec)
+{
+       snd_soc_dapm_set_endpoint(codec, "Ext Spk", n810_spk_func);
+       snd_soc_dapm_set_endpoint(codec, "Headphone Jack", n810_jack_func);
+
+       snd_soc_dapm_sync_endpoints(codec);
+}
+
+static int n810_startup(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_codec *codec = rtd->socdev->codec;
+
+       n810_ext_control(codec);
+       return clk_enable(sys_clkout2);
+}
+
+static void n810_shutdown(struct snd_pcm_substream *substream)
+{
+       clk_disable(sys_clkout2);
+}
+
+static int n810_hw_params(struct snd_pcm_substream *substream,
+       struct snd_pcm_hw_params *params)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_codec_dai *codec_dai = rtd->dai->codec_dai;
+       struct snd_soc_cpu_dai *cpu_dai = rtd->dai->cpu_dai;
+       int err;
+
+       /* Set codec DAI configuration */
+       err = codec_dai->dai_ops.set_fmt(codec_dai,
+                                        SND_SOC_DAIFMT_I2S |
+                                        SND_SOC_DAIFMT_NB_NF |
+                                        SND_SOC_DAIFMT_CBM_CFM);
+       if (err < 0)
+               return err;
+
+       /* Set cpu DAI configuration */
+       err = cpu_dai->dai_ops.set_fmt(cpu_dai,
+                                      SND_SOC_DAIFMT_I2S |
+                                      SND_SOC_DAIFMT_NB_NF |
+                                      SND_SOC_DAIFMT_CBM_CFM);
+       if (err < 0)
+               return err;
+
+       /* Set the codec system clock for DAC and ADC */
+       err = codec_dai->dai_ops.set_sysclk(codec_dai, 0, 12000000,
+                                           SND_SOC_CLOCK_IN);
+
+       return err;
+}
+
+static struct snd_soc_ops n810_ops = {
+       .startup = n810_startup,
+       .hw_params = n810_hw_params,
+       .shutdown = n810_shutdown,
+};
+
+static int n810_get_spk(struct snd_kcontrol *kcontrol,
+                       struct snd_ctl_elem_value *ucontrol)
+{
+       ucontrol->value.integer.value[0] = n810_spk_func;
+
+       return 0;
+}
+
+static int n810_set_spk(struct snd_kcontrol *kcontrol,
+                       struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+
+       if (n810_spk_func == ucontrol->value.integer.value[0])
+               return 0;
+
+       n810_spk_func = ucontrol->value.integer.value[0];
+       n810_ext_control(codec);
+
+       return 1;
+}
+
+static int n810_get_jack(struct snd_kcontrol *kcontrol,
+                        struct snd_ctl_elem_value *ucontrol)
+{
+       ucontrol->value.integer.value[0] = n810_jack_func;
+
+       return 0;
+}
+
+static int n810_set_jack(struct snd_kcontrol *kcontrol,
+                        struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_codec *codec =  snd_kcontrol_chip(kcontrol);
+
+       if (n810_jack_func == ucontrol->value.integer.value[0])
+               return 0;
+
+       n810_jack_func = ucontrol->value.integer.value[0];
+       n810_ext_control(codec);
+
+       return 1;
+}
+
+static int n810_spk_event(struct snd_soc_dapm_widget *w,
+                         struct snd_kcontrol *k, int event)
+{
+       if (SND_SOC_DAPM_EVENT_ON(event))
+               omap_set_gpio_dataout(RX44_SPEAKER_AMP_GPIO, 1);
+       else
+               omap_set_gpio_dataout(RX44_SPEAKER_AMP_GPIO, 0);
+
+       return 0;
+}
+
+static int n810_jack_event(struct snd_soc_dapm_widget *w,
+                          struct snd_kcontrol *k, int event)
+{
+       if (SND_SOC_DAPM_EVENT_ON(event))
+               omap_set_gpio_dataout(RX44_HEADSET_AMP_GPIO, 1);
+       else
+               omap_set_gpio_dataout(RX44_HEADSET_AMP_GPIO, 0);
+
+       return 0;
+}
+
+static const struct snd_soc_dapm_widget aic33_dapm_widgets[] = {
+       SND_SOC_DAPM_SPK("Ext Spk", n810_spk_event),
+       SND_SOC_DAPM_HP("Headphone Jack", n810_jack_event),
+};
+
+static const char *audio_map[][3] = {
+       {"Headphone Jack", NULL, "HPLOUT"},
+       {"Headphone Jack", NULL, "HPROUT"},
+
+       {"Ext Spk", NULL, "LLOUT"},
+       {"Ext Spk", NULL, "RLOUT"},
+};
+
+static const char *spk_function[] = {"Off", "On"};
+static const char *jack_function[] = {"Off", "Headphone"};
+static const struct soc_enum n810_enum[] = {
+       SOC_ENUM_SINGLE_EXT(2, spk_function),
+       SOC_ENUM_SINGLE_EXT(3, jack_function),
+};
+
+static const struct snd_kcontrol_new aic33_n810_controls[] = {
+       SOC_ENUM_EXT("Speaker Function", n810_enum[0],
+                    n810_get_spk, n810_set_spk),
+       SOC_ENUM_EXT("Jack Function", n810_enum[1],
+                    n810_get_jack, n810_set_jack),
+};
+
+static int n810_aic33_init(struct snd_soc_codec *codec)
+{
+       int i, err;
+
+       /* Not connected */
+       snd_soc_dapm_set_endpoint(codec, "MONO_LOUT", 0);
+       snd_soc_dapm_set_endpoint(codec, "HPLCOM", 0);
+       snd_soc_dapm_set_endpoint(codec, "HPRCOM", 0);
+
+       /* Add N810 specific controls */
+       for (i = 0; i < ARRAY_SIZE(aic33_n810_controls); i++) {
+               err = snd_ctl_add(codec->card,
+                       snd_soc_cnew(&aic33_n810_controls[i], codec, NULL));
+               if (err < 0)
+                       return err;
+       }
+
+       /* Add N810 specific widgets */
+       for (i = 0; i < ARRAY_SIZE(aic33_dapm_widgets); i++)
+               snd_soc_dapm_new_control(codec, &aic33_dapm_widgets[i]);
+
+       /* Set up N810 specific audio path audio_map */
+       for (i = 0; i < ARRAY_SIZE(audio_map); i++)
+               snd_soc_dapm_connect_input(codec, audio_map[i][0],
+                       audio_map[i][1], audio_map[i][2]);
+
+       snd_soc_dapm_sync_endpoints(codec);
+
+       return 0;
+}
+
+/* Digital audio interface glue - connects codec <--> CPU */
+static struct snd_soc_dai_link n810_dai = {
+       .name = "TLV320AIC33",
+       .stream_name = "AIC33",
+       .cpu_dai = &omap_mcbsp_dai[0],
+       .codec_dai = &aic3x_dai,
+       .init = n810_aic33_init,
+       .ops = &n810_ops,
+};
+
+/* Audio machine driver */
+static struct snd_soc_machine snd_soc_machine_n810 = {
+       .name = "N810",
+       .dai_link = &n810_dai,
+       .num_links = 1,
+};
+
+/* Audio private data */
+static struct aic3x_setup_data n810_aic33_setup = {
+       .i2c_address = 0x18,
+};
+
+/* Audio subsystem */
+static struct snd_soc_device n810_snd_devdata = {
+       .machine = &snd_soc_machine_n810,
+       .platform = &omap_soc_platform,
+       .codec_dev = &soc_codec_dev_aic3x,
+       .codec_data = &n810_aic33_setup,
+};
+
+static struct platform_device *n810_snd_device;
+
+static int __init n810_soc_init(void)
+{
+       int err;
+       struct device *dev;
+
+       if (!machine_is_nokia_n810())
+               return -ENODEV;
+
+       n810_snd_device = platform_device_alloc("soc-audio", -1);
+       if (!n810_snd_device)
+               return -ENOMEM;
+
+       platform_set_drvdata(n810_snd_device, &n810_snd_devdata);
+       n810_snd_devdata.dev = &n810_snd_device->dev;
+       *(unsigned int *)n810_dai.cpu_dai->private_data = 1; /* McBSP2 */
+       err = platform_device_add(n810_snd_device);
+       if (err)
+               goto err1;
+
+       dev = &n810_snd_device->dev;
+
+       sys_clkout2_src = clk_get(dev, "sys_clkout2_src");
+       if (IS_ERR(sys_clkout2_src)) {
+               dev_err(dev, "Could not get sys_clkout2_src clock\n");
+               return -ENODEV;
+       }
+       sys_clkout2 = clk_get(dev, "sys_clkout2");
+       if (IS_ERR(sys_clkout2)) {
+               dev_err(dev, "Could not get sys_clkout2\n");
+               goto err1;
+       }
+       /*
+        * Configure 12 MHz output on SYS_CLKOUT2. Therefore we must use
+        * 96 MHz as its parent in order to get 12 MHz
+        */
+       func96m_clk = clk_get(dev, "func_96m_ck");
+       if (IS_ERR(func96m_clk)) {
+               dev_err(dev, "Could not get func 96M clock\n");
+               goto err2;
+       }
+       clk_set_parent(sys_clkout2_src, func96m_clk);
+       clk_set_rate(sys_clkout2, 12000000);
+
+       if (omap_request_gpio(RX44_HEADSET_AMP_GPIO) < 0)
+               BUG();
+       if (omap_request_gpio(RX44_SPEAKER_AMP_GPIO) < 0)
+               BUG();
+       omap_set_gpio_direction(RX44_HEADSET_AMP_GPIO, 0);
+       omap_set_gpio_direction(RX44_SPEAKER_AMP_GPIO, 0);
+
+       return 0;
+err2:
+       clk_put(sys_clkout2);
+       platform_device_del(n810_snd_device);
+err1:
+       platform_device_put(n810_snd_device);
+
+       return err;
+
+}
+
+static void __exit n810_soc_exit(void)
+{
+       platform_device_unregister(n810_snd_device);
+}
+
+module_init(n810_soc_init);
+module_exit(n810_soc_exit);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@nokia.com>");
+MODULE_DESCRIPTION("ALSA SoC Nokia N810");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
new file mode 100644 (file)
index 0000000..40d87e6
--- /dev/null
@@ -0,0 +1,414 @@
+/*
+ * omap-mcbsp.c  --  OMAP ALSA SoC DAI driver using McBSP port
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Jarkko Nikula <jarkko.nikula@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/initval.h>
+#include <sound/soc.h>
+
+#include <asm/arch/control.h>
+#include <asm/arch/dma.h>
+#include <asm/arch/mcbsp.h>
+#include "omap-mcbsp.h"
+#include "omap-pcm.h"
+
+#define OMAP_MCBSP_RATES       (SNDRV_PCM_RATE_44100 | \
+                                SNDRV_PCM_RATE_48000 | \
+                                SNDRV_PCM_RATE_KNOT)
+
+struct omap_mcbsp_data {
+       unsigned int                    bus_id;
+       struct omap_mcbsp_reg_cfg       regs;
+       /*
+        * Flags indicating is the bus already activated and configured by
+        * another substream
+        */
+       int                             active;
+       int                             configured;
+};
+
+#define to_mcbsp(priv) container_of((priv), struct omap_mcbsp_data, bus_id)
+
+static struct omap_mcbsp_data mcbsp_data[NUM_LINKS];
+
+/*
+ * Stream DMA parameters. DMA request line and port address are set runtime
+ * since they are different between OMAP1 and later OMAPs
+ */
+static struct omap_pcm_dma_data omap_mcbsp_dai_dma_params[NUM_LINKS][2] = {
+{
+       { .name         = "I2S PCM Stereo out", },
+       { .name         = "I2S PCM Stereo in", },
+},
+};
+
+#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
+static const int omap1_dma_reqs[][2] = {
+       { OMAP_DMA_MCBSP1_TX, OMAP_DMA_MCBSP1_RX },
+       { OMAP_DMA_MCBSP2_TX, OMAP_DMA_MCBSP2_RX },
+       { OMAP_DMA_MCBSP3_TX, OMAP_DMA_MCBSP3_RX },
+};
+static const unsigned long omap1_mcbsp_port[][2] = {
+       { OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1,
+         OMAP1510_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1 },
+       { OMAP1510_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1,
+         OMAP1510_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1 },
+       { OMAP1510_MCBSP3_BASE + OMAP_MCBSP_REG_DXR1,
+         OMAP1510_MCBSP3_BASE + OMAP_MCBSP_REG_DRR1 },
+};
+#else
+static const int omap1_dma_reqs[][2] = {};
+static const unsigned long omap1_mcbsp_port[][2] = {};
+#endif
+#if defined(CONFIG_ARCH_OMAP2420)
+static const int omap2420_dma_reqs[][2] = {
+       { OMAP24XX_DMA_MCBSP1_TX, OMAP24XX_DMA_MCBSP1_RX },
+       { OMAP24XX_DMA_MCBSP2_TX, OMAP24XX_DMA_MCBSP2_RX },
+};
+static const unsigned long omap2420_mcbsp_port[][2] = {
+       { OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DXR1,
+         OMAP24XX_MCBSP1_BASE + OMAP_MCBSP_REG_DRR1 },
+       { OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DXR1,
+         OMAP24XX_MCBSP2_BASE + OMAP_MCBSP_REG_DRR1 },
+};
+#else
+static const int omap2420_dma_reqs[][2] = {};
+static const unsigned long omap2420_mcbsp_port[][2] = {};
+#endif
+
+static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_cpu_dai *cpu_dai = rtd->dai->cpu_dai;
+       struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+       int err = 0;
+
+       if (!cpu_dai->active)
+               err = omap_mcbsp_request(mcbsp_data->bus_id);
+
+       return err;
+}
+
+static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_cpu_dai *cpu_dai = rtd->dai->cpu_dai;
+       struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+
+       if (!cpu_dai->active) {
+               omap_mcbsp_free(mcbsp_data->bus_id);
+               mcbsp_data->configured = 0;
+       }
+}
+
+static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_cpu_dai *cpu_dai = rtd->dai->cpu_dai;
+       struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+       int err = 0;
+
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               if (!mcbsp_data->active++)
+                       omap_mcbsp_start(mcbsp_data->bus_id);
+               break;
+
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               if (!--mcbsp_data->active)
+                       omap_mcbsp_stop(mcbsp_data->bus_id);
+               break;
+       default:
+               err = -EINVAL;
+       }
+
+       return err;
+}
+
+static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
+                                   struct snd_pcm_hw_params *params)
+{
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct snd_soc_cpu_dai *cpu_dai = rtd->dai->cpu_dai;
+       struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+       struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
+       int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id;
+       unsigned long port;
+
+       if (cpu_class_is_omap1()) {
+               dma = omap1_dma_reqs[bus_id][substream->stream];
+               port = omap1_mcbsp_port[bus_id][substream->stream];
+       } else if (cpu_is_omap2420()) {
+               dma = omap2420_dma_reqs[bus_id][substream->stream];
+               port = omap2420_mcbsp_port[bus_id][substream->stream];
+       } else {
+               /*
+                * TODO: Add support for 2430 and 3430
+                */
+               return -ENODEV;
+       }
+       omap_mcbsp_dai_dma_params[id][substream->stream].dma_req = dma;
+       omap_mcbsp_dai_dma_params[id][substream->stream].port_addr = port;
+       cpu_dai->dma_data = &omap_mcbsp_dai_dma_params[id][substream->stream];
+
+       if (mcbsp_data->configured) {
+               /* McBSP already configured by another stream */
+               return 0;
+       }
+
+       switch (params_channels(params)) {
+       case 2:
+               /* Set 1 word per (McBPSP) frame and use dual-phase frames */
+               regs->rcr2      |= RFRLEN2(1 - 1) | RPHASE;
+               regs->rcr1      |= RFRLEN1(1 - 1);
+               regs->xcr2      |= XFRLEN2(1 - 1) | XPHASE;
+               regs->xcr1      |= XFRLEN1(1 - 1);
+               break;
+       default:
+               /* Unsupported number of channels */
+               return -EINVAL;
+       }
+
+       switch (params_format(params)) {
+       case SNDRV_PCM_FORMAT_S16_LE:
+               /* Set word lengths */
+               regs->rcr2      |= RWDLEN2(OMAP_MCBSP_WORD_16);
+               regs->rcr1      |= RWDLEN1(OMAP_MCBSP_WORD_16);
+               regs->xcr2      |= XWDLEN2(OMAP_MCBSP_WORD_16);
+               regs->xcr1      |= XWDLEN1(OMAP_MCBSP_WORD_16);
+               /* Set FS period and length in terms of bit clock periods */
+               regs->srgr2     |= FPER(16 * 2 - 1);
+               regs->srgr1     |= FWID(16 - 1);
+               break;
+       default:
+               /* Unsupported PCM format */
+               return -EINVAL;
+       }
+
+       omap_mcbsp_config(bus_id, &mcbsp_data->regs);
+       mcbsp_data->configured = 1;
+
+       return 0;
+}
+
+/*
+ * This must be called before _set_clkdiv and _set_sysclk since McBSP register
+ * cache is initialized here
+ */
+static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_cpu_dai *cpu_dai,
+                                     unsigned int fmt)
+{
+       struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+       struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
+
+       if (mcbsp_data->configured)
+               return 0;
+
+       memset(regs, 0, sizeof(*regs));
+       /* Generic McBSP register settings */
+       regs->spcr2     |= XINTM(3) | FREE;
+       regs->spcr1     |= RINTM(3);
+       regs->rcr2      |= RFIG;
+       regs->xcr2      |= XFIG;
+
+       switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
+       case SND_SOC_DAIFMT_I2S:
+               /* 1-bit data delay */
+               regs->rcr2      |= RDATDLY(1);
+               regs->xcr2      |= XDATDLY(1);
+               break;
+       default:
+               /* Unsupported data format */
+               return -EINVAL;
+       }
+
+       switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
+       case SND_SOC_DAIFMT_CBS_CFS:
+               /* McBSP master. Set FS and bit clocks as outputs */
+               regs->pcr0      |= FSXM | FSRM |
+                                  CLKXM | CLKRM;
+               /* Sample rate generator drives the FS */
+               regs->srgr2     |= FSGM;
+               break;
+       case SND_SOC_DAIFMT_CBM_CFM:
+               /* McBSP slave */
+               break;
+       default:
+               /* Unsupported master/slave configuration */
+               return -EINVAL;
+       }
+
+       /* Set bit clock (CLKX/CLKR) and FS polarities */
+       switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
+       case SND_SOC_DAIFMT_NB_NF:
+               /*
+                * Normal BCLK + FS.
+                * FS active low. TX data driven on falling edge of bit clock
+                * and RX data sampled on rising edge of bit clock.
+                */
+               regs->pcr0      |= FSXP | FSRP |
+                                  CLKXP | CLKRP;
+               break;
+       case SND_SOC_DAIFMT_NB_IF:
+               regs->pcr0      |= CLKXP | CLKRP;
+               break;
+       case SND_SOC_DAIFMT_IB_NF:
+               regs->pcr0      |= FSXP | FSRP;
+               break;
+       case SND_SOC_DAIFMT_IB_IF:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int omap_mcbsp_dai_set_clkdiv(struct snd_soc_cpu_dai *cpu_dai,
+                                    int div_id, int div)
+{
+       struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+       struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
+
+       if (div_id != OMAP_MCBSP_CLKGDV)
+               return -ENODEV;
+
+       regs->srgr1     |= CLKGDV(div - 1);
+
+       return 0;
+}
+
+static int omap_mcbsp_dai_set_clks_src(struct omap_mcbsp_data *mcbsp_data,
+                                      int clk_id)
+{
+       int sel_bit;
+       u16 reg;
+
+       if (cpu_class_is_omap1()) {
+               /* OMAP1's can use only external source clock */
+               if (unlikely(clk_id == OMAP_MCBSP_SYSCLK_CLKS_FCLK))
+                       return -EINVAL;
+               else
+                       return 0;
+       }
+
+       switch (mcbsp_data->bus_id) {
+       case 0:
+               reg = OMAP2_CONTROL_DEVCONF0;
+               sel_bit = 2;
+               break;
+       case 1:
+               reg = OMAP2_CONTROL_DEVCONF0;
+               sel_bit = 6;
+               break;
+       /* TODO: Support for ports 3 - 5 in OMAP2430 and OMAP34xx */
+       default:
+               return -EINVAL;
+       }
+
+       if (cpu_class_is_omap2()) {
+               if (clk_id == OMAP_MCBSP_SYSCLK_CLKS_FCLK) {
+                       omap_ctrl_writel(omap_ctrl_readl(reg) &
+                                        ~(1 << sel_bit), reg);
+               } else {
+                       omap_ctrl_writel(omap_ctrl_readl(reg) |
+                                        (1 << sel_bit), reg);
+               }
+       }
+
+       return 0;
+}
+
+static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_cpu_dai *cpu_dai,
+                                        int clk_id, unsigned int freq,
+                                        int dir)
+{
+       struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
+       struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
+       int err = 0;
+
+       switch (clk_id) {
+       case OMAP_MCBSP_SYSCLK_CLK:
+               regs->srgr2     |= CLKSM;
+               break;
+       case OMAP_MCBSP_SYSCLK_CLKS_FCLK:
+       case OMAP_MCBSP_SYSCLK_CLKS_EXT:
+               err = omap_mcbsp_dai_set_clks_src(mcbsp_data, clk_id);
+               break;
+
+       case OMAP_MCBSP_SYSCLK_CLKX_EXT:
+               regs->srgr2     |= CLKSM;
+       case OMAP_MCBSP_SYSCLK_CLKR_EXT:
+               regs->pcr0      |= SCLKME;
+               break;
+       default:
+               err = -ENODEV;
+       }
+
+       return err;
+}
+
+struct snd_soc_cpu_dai omap_mcbsp_dai[NUM_LINKS] = {
+{
+       .name = "omap-mcbsp-dai",
+       .id = 0,
+       .type = SND_SOC_DAI_I2S,
+       .playback = {
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = OMAP_MCBSP_RATES,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
+       },
+       .capture = {
+               .channels_min = 2,
+               .channels_max = 2,
+               .rates = OMAP_MCBSP_RATES,
+               .formats = SNDRV_PCM_FMTBIT_S16_LE,
+       },
+       .ops = {
+               .startup = omap_mcbsp_dai_startup,
+               .shutdown = omap_mcbsp_dai_shutdown,
+               .trigger = omap_mcbsp_dai_trigger,
+               .hw_params = omap_mcbsp_dai_hw_params,
+       },
+       .dai_ops = {
+               .set_fmt = omap_mcbsp_dai_set_dai_fmt,
+               .set_clkdiv = omap_mcbsp_dai_set_clkdiv,
+               .set_sysclk = omap_mcbsp_dai_set_dai_sysclk,
+       },
+       .private_data = &mcbsp_data[0].bus_id,
+},
+};
+EXPORT_SYMBOL_GPL(omap_mcbsp_dai);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@nokia.com>");
+MODULE_DESCRIPTION("OMAP I2S SoC Interface");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-mcbsp.h b/sound/soc/omap/omap-mcbsp.h
new file mode 100644 (file)
index 0000000..9965fd4
--- /dev/null
@@ -0,0 +1,49 @@
+/*
+ * omap-mcbsp.h
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Jarkko Nikula <jarkko.nikula@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_I2S_H__
+#define __OMAP_I2S_H__
+
+/* Source clocks for McBSP sample rate generator */
+enum omap_mcbsp_clksrg_clk {
+       OMAP_MCBSP_SYSCLK_CLKS_FCLK,    /* Internal FCLK */
+       OMAP_MCBSP_SYSCLK_CLKS_EXT,     /* External CLKS pin */
+       OMAP_MCBSP_SYSCLK_CLK,          /* Internal ICLK */
+       OMAP_MCBSP_SYSCLK_CLKX_EXT,     /* External CLKX pin */
+       OMAP_MCBSP_SYSCLK_CLKR_EXT,     /* External CLKR pin */
+};
+
+/* McBSP dividers */
+enum omap_mcbsp_div {
+       OMAP_MCBSP_CLKGDV,              /* Sample rate generator divider */
+};
+
+/*
+ * REVISIT: Preparation for the ASoC v2. Let the number of available links to
+ * be same than number of McBSP ports found in OMAP(s) we are compiling for.
+ */
+#define NUM_LINKS      1
+
+extern struct snd_soc_cpu_dai omap_mcbsp_dai[NUM_LINKS];
+
+#endif
diff --git a/sound/soc/omap/omap-pcm.c b/sound/soc/omap/omap-pcm.c
new file mode 100644 (file)
index 0000000..6237020
--- /dev/null
@@ -0,0 +1,357 @@
+/*
+ * omap-pcm.c  --  ALSA PCM interface for the OMAP SoC
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Jarkko Nikula <jarkko.nikula@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <sound/core.h>
+#include <sound/pcm.h>
+#include <sound/pcm_params.h>
+#include <sound/soc.h>
+
+#include <asm/arch/dma.h>
+#include "omap-pcm.h"
+
+static const struct snd_pcm_hardware omap_pcm_hardware = {
+       .info                   = SNDRV_PCM_INFO_MMAP |
+                                 SNDRV_PCM_INFO_MMAP_VALID |
+                                 SNDRV_PCM_INFO_INTERLEAVED |
+                                 SNDRV_PCM_INFO_PAUSE |
+                                 SNDRV_PCM_INFO_RESUME,
+       .formats                = SNDRV_PCM_FMTBIT_S16_LE,
+       .period_bytes_min       = 32,
+       .period_bytes_max       = 64 * 1024,
+       .periods_min            = 2,
+       .periods_max            = 255,
+       .buffer_bytes_max       = 128 * 1024,
+};
+
+struct omap_runtime_data {
+       spinlock_t                      lock;
+       struct omap_pcm_dma_data        *dma_data;
+       int                             dma_ch;
+       int                             period_index;
+};
+
+static void omap_pcm_dma_irq(int ch, u16 stat, void *data)
+{
+       struct snd_pcm_substream *substream = data;
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct omap_runtime_data *prtd = runtime->private_data;
+       unsigned long flags;
+
+       if (cpu_is_omap1510()) {
+               /*
+                * OMAP1510 doesn't support DMA chaining so have to restart
+                * the transfer after all periods are transferred
+                */
+               spin_lock_irqsave(&prtd->lock, flags);
+               if (prtd->period_index >= 0) {
+                       if (++prtd->period_index == runtime->periods) {
+                               prtd->period_index = 0;
+                               omap_start_dma(prtd->dma_ch);
+                       }
+               }
+               spin_unlock_irqrestore(&prtd->lock, flags);
+       }
+
+       snd_pcm_period_elapsed(substream);
+}
+
+/* this may get called several times by oss emulation */
+static int omap_pcm_hw_params(struct snd_pcm_substream *substream,
+                             struct snd_pcm_hw_params *params)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct snd_soc_pcm_runtime *rtd = substream->private_data;
+       struct omap_runtime_data *prtd = runtime->private_data;
+       struct omap_pcm_dma_data *dma_data = rtd->dai->cpu_dai->dma_data;
+       int err = 0;
+
+       if (!dma_data)
+               return -ENODEV;
+
+       snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
+       runtime->dma_bytes = params_buffer_bytes(params);
+
+       if (prtd->dma_data)
+               return 0;
+       prtd->dma_data = dma_data;
+       err = omap_request_dma(dma_data->dma_req, dma_data->name,
+                              omap_pcm_dma_irq, substream, &prtd->dma_ch);
+       if (!cpu_is_omap1510()) {
+               /*
+                * Link channel with itself so DMA doesn't need any
+                * reprogramming while looping the buffer
+                */
+               omap_dma_link_lch(prtd->dma_ch, prtd->dma_ch);
+       }
+
+       return err;
+}
+
+static int omap_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct omap_runtime_data *prtd = runtime->private_data;
+
+       if (prtd->dma_data == NULL)
+               return 0;
+
+       if (!cpu_is_omap1510())
+               omap_dma_unlink_lch(prtd->dma_ch, prtd->dma_ch);
+       omap_free_dma(prtd->dma_ch);
+       prtd->dma_data = NULL;
+
+       snd_pcm_set_runtime_buffer(substream, NULL);
+
+       return 0;
+}
+
+static int omap_pcm_prepare(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct omap_runtime_data *prtd = runtime->private_data;
+       struct omap_pcm_dma_data *dma_data = prtd->dma_data;
+       struct omap_dma_channel_params dma_params;
+
+       memset(&dma_params, 0, sizeof(dma_params));
+       /*
+        * Note: Regardless of interface data formats supported by OMAP McBSP
+        * or EAC blocks, internal representation is always fixed 16-bit/sample
+        */
+       dma_params.data_type                    = OMAP_DMA_DATA_TYPE_S16;
+       dma_params.trigger                      = dma_data->dma_req;
+       dma_params.sync_mode                    = OMAP_DMA_SYNC_ELEMENT;
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+               dma_params.src_amode            = OMAP_DMA_AMODE_POST_INC;
+               dma_params.dst_amode            = OMAP_DMA_AMODE_CONSTANT;
+               dma_params.src_or_dst_synch     = OMAP_DMA_DST_SYNC;
+               dma_params.src_start            = runtime->dma_addr;
+               dma_params.dst_start            = dma_data->port_addr;
+       } else {
+               dma_params.src_amode            = OMAP_DMA_AMODE_CONSTANT;
+               dma_params.dst_amode            = OMAP_DMA_AMODE_POST_INC;
+               dma_params.src_or_dst_synch     = OMAP_DMA_SRC_SYNC;
+               dma_params.src_start            = dma_data->port_addr;
+               dma_params.dst_start            = runtime->dma_addr;
+       }
+       /*
+        * Set DMA transfer frame size equal to ALSA period size and frame
+        * count as no. of ALSA periods. Then with DMA frame interrupt enabled,
+        * we can transfer the whole ALSA buffer with single DMA transfer but
+        * still can get an interrupt at each period bounary
+        */
+       dma_params.elem_count   = snd_pcm_lib_period_bytes(substream) / 2;
+       dma_params.frame_count  = runtime->periods;
+       omap_set_dma_params(prtd->dma_ch, &dma_params);
+
+       omap_enable_dma_irq(prtd->dma_ch, OMAP_DMA_FRAME_IRQ);
+
+       return 0;
+}
+
+static int omap_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct omap_runtime_data *prtd = runtime->private_data;
+       int ret = 0;
+
+       spin_lock_irq(&prtd->lock);
+       switch (cmd) {
+       case SNDRV_PCM_TRIGGER_START:
+       case SNDRV_PCM_TRIGGER_RESUME:
+       case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+               prtd->period_index = 0;
+               omap_start_dma(prtd->dma_ch);
+               break;
+
+       case SNDRV_PCM_TRIGGER_STOP:
+       case SNDRV_PCM_TRIGGER_SUSPEND:
+       case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+               prtd->period_index = -1;
+               omap_stop_dma(prtd->dma_ch);
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       spin_unlock_irq(&prtd->lock);
+
+       return ret;
+}
+
+static snd_pcm_uframes_t omap_pcm_pointer(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct omap_runtime_data *prtd = runtime->private_data;
+       dma_addr_t ptr;
+       snd_pcm_uframes_t offset;
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               ptr = omap_get_dma_src_pos(prtd->dma_ch);
+       else
+               ptr = omap_get_dma_dst_pos(prtd->dma_ch);
+
+       offset = bytes_to_frames(runtime, ptr - runtime->dma_addr);
+       if (offset >= runtime->buffer_size)
+               offset = 0;
+
+       return offset;
+}
+
+static int omap_pcm_open(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+       struct omap_runtime_data *prtd;
+       int ret;
+
+       snd_soc_set_runtime_hwparams(substream, &omap_pcm_hardware);
+
+       /* Ensure that buffer size is a multiple of period size */
+       ret = snd_pcm_hw_constraint_integer(runtime,
+                                           SNDRV_PCM_HW_PARAM_PERIODS);
+       if (ret < 0)
+               goto out;
+
+       prtd = kzalloc(sizeof(prtd), GFP_KERNEL);
+       if (prtd == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       spin_lock_init(&prtd->lock);
+       runtime->private_data = prtd;
+
+out:
+       return ret;
+}
+
+static int omap_pcm_close(struct snd_pcm_substream *substream)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+
+       kfree(runtime->private_data);
+       return 0;
+}
+
+static int omap_pcm_mmap(struct snd_pcm_substream *substream,
+       struct vm_area_struct *vma)
+{
+       struct snd_pcm_runtime *runtime = substream->runtime;
+
+       return dma_mmap_writecombine(substream->pcm->card->dev, vma,
+                                    runtime->dma_area,
+                                    runtime->dma_addr,
+                                    runtime->dma_bytes);
+}
+
+struct snd_pcm_ops omap_pcm_ops = {
+       .open           = omap_pcm_open,
+       .close          = omap_pcm_close,
+       .ioctl          = snd_pcm_lib_ioctl,
+       .hw_params      = omap_pcm_hw_params,
+       .hw_free        = omap_pcm_hw_free,
+       .prepare        = omap_pcm_prepare,
+       .trigger        = omap_pcm_trigger,
+       .pointer        = omap_pcm_pointer,
+       .mmap           = omap_pcm_mmap,
+};
+
+static u64 omap_pcm_dmamask = DMA_BIT_MASK(32);
+
+static int omap_pcm_preallocate_dma_buffer(struct snd_pcm *pcm,
+       int stream)
+{
+       struct snd_pcm_substream *substream = pcm->streams[stream].substream;
+       struct snd_dma_buffer *buf = &substream->dma_buffer;
+       size_t size = omap_pcm_hardware.buffer_bytes_max;
+
+       buf->dev.type = SNDRV_DMA_TYPE_DEV;
+       buf->dev.dev = pcm->card->dev;
+       buf->private_data = NULL;
+       buf->area = dma_alloc_writecombine(pcm->card->dev, size,
+                                          &buf->addr, GFP_KERNEL);
+       if (!buf->area)
+               return -ENOMEM;
+
+       buf->bytes = size;
+       return 0;
+}
+
+static void omap_pcm_free_dma_buffers(struct snd_pcm *pcm)
+{
+       struct snd_pcm_substream *substream;
+       struct snd_dma_buffer *buf;
+       int stream;
+
+       for (stream = 0; stream < 2; stream++) {
+               substream = pcm->streams[stream].substream;
+               if (!substream)
+                       continue;
+
+               buf = &substream->dma_buffer;
+               if (!buf->area)
+                       continue;
+
+               dma_free_writecombine(pcm->card->dev, buf->bytes,
+                                     buf->area, buf->addr);
+               buf->area = NULL;
+       }
+}
+
+int omap_pcm_new(struct snd_card *card, struct snd_soc_codec_dai *dai,
+                struct snd_pcm *pcm)
+{
+       int ret = 0;
+
+       if (!card->dev->dma_mask)
+               card->dev->dma_mask = &omap_pcm_dmamask;
+       if (!card->dev->coherent_dma_mask)
+               card->dev->coherent_dma_mask = DMA_32BIT_MASK;
+
+       if (dai->playback.channels_min) {
+               ret = omap_pcm_preallocate_dma_buffer(pcm,
+                       SNDRV_PCM_STREAM_PLAYBACK);
+               if (ret)
+                       goto out;
+       }
+
+       if (dai->capture.channels_min) {
+               ret = omap_pcm_preallocate_dma_buffer(pcm,
+                       SNDRV_PCM_STREAM_CAPTURE);
+               if (ret)
+                       goto out;
+       }
+
+out:
+       return ret;
+}
+
+struct snd_soc_platform omap_soc_platform = {
+       .name           = "omap-pcm-audio",
+       .pcm_ops        = &omap_pcm_ops,
+       .pcm_new        = omap_pcm_new,
+       .pcm_free       = omap_pcm_free_dma_buffers,
+};
+EXPORT_SYMBOL_GPL(omap_soc_platform);
+
+MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@nokia.com>");
+MODULE_DESCRIPTION("OMAP PCM DMA module");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/omap/omap-pcm.h b/sound/soc/omap/omap-pcm.h
new file mode 100644 (file)
index 0000000..e4369bd
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * omap-pcm.h
+ *
+ * Copyright (C) 2008 Nokia Corporation
+ *
+ * Contact: Jarkko Nikula <jarkko.nikula@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __OMAP_PCM_H__
+#define __OMAP_PCM_H__
+
+struct omap_pcm_dma_data {
+       char            *name;          /* stream identifier */
+       int             dma_req;        /* DMA request line */
+       unsigned long   port_addr;      /* transmit/receive register */
+};
+
+extern struct snd_soc_platform omap_soc_platform;
+
+#endif
index 9ed8f2e8da10f7820a0621ebfdc42d35aa07b1d5..4eab2c19c454dbda65a62a4e000f05841332c0c5 100644 (file)
@@ -1,10 +1,10 @@
 /*
  * SoC audio for ln2440sbc
- * 
+ *
  * Copyright 2007 KonekTel, a.s.
  * Author: Ivan Kuten
  *         ivan.kuten@promwad.com
- * 
+ *
  * Heavily based on smdk2443_wm9710.c
  * Copyright 2007 Wolfson Microelectronics PLC.
  * Author: Graeme Gregory
index 962cc20b1af5976e0ee109ca7072c05de240903c..0e9d1c5f24840dab0d640d6f733aba13237742a2 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/arch/regs-gpio.h>
 #include <asm/hardware.h>
 #include <asm/arch/audio.h>
-#include <asm/io.h>
+#include <linux/io.h>
 #include <asm/arch/spi-gpio.h>
 
 #include <asm/plat-s3c24xx/regs-iis.h>
@@ -122,7 +122,7 @@ static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
 
        /* set MCLK division for sample rate */
        ret = cpu_dai->dai_ops.set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
-               S3C2410_IISMOD_32FS );
+               S3C2410_IISMOD_32FS);
        if (ret < 0)
                return ret;
 
@@ -133,7 +133,7 @@ static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
 
        /* set prescaler division for sample rate */
        ret = cpu_dai->dai_ops.set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
-               S3C24XX_PRESCALE(4,4));
+               S3C24XX_PRESCALE(4, 4));
        if (ret < 0)
                return ret;
 
@@ -222,7 +222,7 @@ static struct snd_soc_ops neo1973_voice_ops = {
        .hw_free = neo1973_voice_hw_free,
 };
 
-static int neo1973_scenario = 0;
+static int neo1973_scenario;
 
 static int neo1973_get_scenario(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
@@ -233,7 +233,7 @@ static int neo1973_get_scenario(struct snd_kcontrol *kcontrol,
 
 static int set_scenario_endpoints(struct snd_soc_codec *codec, int scenario)
 {
-       switch(neo1973_scenario) {
+       switch (neo1973_scenario) {
        case NEO_AUDIO_OFF:
                snd_soc_dapm_set_endpoint(codec, "Audio Out",    0);
                snd_soc_dapm_set_endpoint(codec, "GSM Line Out", 0);
@@ -334,7 +334,7 @@ static void lm4857_write_regs(void)
 static int lm4857_get_reg(struct snd_kcontrol *kcontrol,
        struct snd_ctl_elem_value *ucontrol)
 {
-       int reg=kcontrol->private_value & 0xFF;
+       int reg = kcontrol->private_value & 0xFF;
        int shift = (kcontrol->private_value >> 8) & 0x0F;
        int mask = (kcontrol->private_value >> 16) & 0xFF;
 
@@ -349,11 +349,11 @@ static int lm4857_set_reg(struct snd_kcontrol *kcontrol,
        int shift = (kcontrol->private_value >> 8) & 0x0F;
        int mask = (kcontrol->private_value >> 16) & 0xFF;
 
-       if (((lm4857_regs[reg] >> shift ) & mask) ==
+       if (((lm4857_regs[reg] >> shift) & mask) ==
                ucontrol->value.integer.value[0])
                return 0;
 
-       lm4857_regs[reg] &= ~ (mask << shift);
+       lm4857_regs[reg] &= ~(mask << shift);
        lm4857_regs[reg] |= ucontrol->value.integer.value[0] << shift;
        lm4857_write_regs();
        return 1;
@@ -398,7 +398,7 @@ static const struct snd_soc_dapm_widget wm8753_dapm_widgets[] = {
 
 
 /* example machine audio_mapnections */
-static const charaudio_map[][3] = {
+static const char *audio_map[][3] = {
 
        /* Connections to the lm4857 amp */
        {"Audio Out", NULL, "LOUT1"},
@@ -450,7 +450,7 @@ static const char *neo_scenarios[] = {
 };
 
 static const struct soc_enum neo_scenario_enum[] = {
-       SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(neo_scenarios),neo_scenarios),
+       SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(neo_scenarios), neo_scenarios),
 };
 
 static const struct snd_kcontrol_new wm8753_neo1973_controls[] = {
@@ -521,8 +521,8 @@ static int neo1973_wm8753_init(struct snd_soc_codec *codec)
 /*
  * BT Codec DAI
  */
-static struct snd_soc_cpu_dai bt_dai =
-{      .name = "Bluetooth",
+static struct snd_soc_cpu_dai bt_dai = {
+       .name = "Bluetooth",
        .id = 0,
        .type = SND_SOC_DAI_PCM,
        .playback = {
@@ -616,6 +616,35 @@ static int lm4857_i2c_attach(struct i2c_adapter *adap)
        return i2c_probe(adap, &addr_data, lm4857_amp_probe);
 }
 
+static u8 lm4857_state;
+
+static int lm4857_suspend(struct i2c_client *dev, pm_message_t state)
+{
+       dev_dbg(&dev->dev, "lm4857_suspend\n");
+       lm4857_state = lm4857_regs[LM4857_CTRL] & 0xf;
+       if (lm4857_state) {
+               lm4857_regs[LM4857_CTRL] &= 0xf0;
+               lm4857_write_regs();
+       }
+       return 0;
+}
+
+static int lm4857_resume(struct i2c_client *dev)
+{
+       if (lm4857_state) {
+               lm4857_regs[LM4857_CTRL] |= (lm4857_state & 0x0f);
+               lm4857_write_regs();
+       }
+       return 0;
+}
+
+static void lm4857_shutdown(struct i2c_client *dev)
+{
+       dev_dbg(&dev->dev, "lm4857_shutdown\n");
+       lm4857_regs[LM4857_CTRL] &= 0xf0;
+       lm4857_write_regs();
+}
+
 /* corgi i2c codec control layer */
 static struct i2c_driver lm4857_i2c_driver = {
        .driver = {
@@ -623,6 +652,9 @@ static struct i2c_driver lm4857_i2c_driver = {
                .owner = THIS_MODULE,
        },
        .id =             I2C_DRIVERID_LM4857,
+       .suspend =        lm4857_suspend,
+       .resume =         lm4857_resume,
+       .shutdown =       lm4857_shutdown,
        .attach_adapter = lm4857_i2c_attach,
        .detach_client =  lm4857_i2c_detach,
        .command =        NULL,
@@ -667,6 +699,6 @@ module_init(neo1973_init);
 module_exit(neo1973_exit);
 
 /* Module information */
-MODULE_AUTHOR("Graeme Gregory, graeme.gregory@wolfsonmicro.com, www.wolfsonmicro.com");
+MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org, www.openmoko.org");
 MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973");
 MODULE_LICENSE("GPL");
index 1c1ddbf7f3c01e61605fc5f1ee29856f3a215349..e81d9a6c83dac829f356d12d52c3539418f2aea8 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/wait.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
@@ -30,7 +31,6 @@
 #include <sound/soc.h>
 
 #include <asm/hardware.h>
-#include <asm/io.h>
 #include <asm/plat-s3c/regs-ac97.h>
 #include <asm/arch/regs-gpio.h>
 #include <asm/arch/regs-clock.h>
@@ -47,7 +47,7 @@ struct s3c24xx_ac97_info {
 };
 static struct s3c24xx_ac97_info s3c24xx_ac97;
 
-DECLARE_COMPLETION(ac97_completion);
+static DECLARE_COMPLETION(ac97_completion);
 static u32 codec_ready;
 static DECLARE_MUTEX(ac97_mutex);
 
@@ -290,7 +290,7 @@ static int s3c2443_ac97_trigger(struct snd_pcm_substream *substream, int cmd)
        u32 ac_glbctrl;
 
        ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
-       switch(cmd) {
+       switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -333,7 +333,7 @@ static int s3c2443_ac97_mic_trigger(struct snd_pcm_substream *substream,
        u32 ac_glbctrl;
 
        ac_glbctrl = readl(s3c24xx_ac97.regs + S3C_AC97_GLBCTRL);
-       switch(cmd) {
+       switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
        case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
@@ -391,7 +391,6 @@ struct snd_soc_cpu_dai s3c2443_ac97_dai[] = {
                .trigger = s3c2443_ac97_mic_trigger,},
 },
 };
-
 EXPORT_SYMBOL_GPL(s3c2443_ac97_dai);
 EXPORT_SYMBOL_GPL(soc_ac97_ops);
 
index 49580fb481d5d55916a92dc721b0cad96a4987aa..6c70a81c730cdbf8aea253a4240cd7007c7cfd68 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/module.h>
 #include <linux/init.h>
+#include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
@@ -30,7 +31,6 @@
 #include <sound/soc.h>
 
 #include <asm/dma.h>
-#include <asm/io.h>
 #include <asm/hardware.h>
 #include <asm/arch/dma.h>
 #include <asm/arch/audio.h>
@@ -93,7 +93,7 @@ static void s3c24xx_pcm_enqueue(struct snd_pcm_substream *substream)
        while (prtd->dma_loaded < prtd->dma_limit) {
                unsigned long len = prtd->dma_period;
 
-               DBG("dma_loaded: %d\n",prtd->dma_loaded);
+               DBG("dma_loaded: %d\n", prtd->dma_loaded);
 
                if ((pos + len) > prtd->dma_end) {
                        len  = prtd->dma_end - pos;
@@ -101,7 +101,7 @@ static void s3c24xx_pcm_enqueue(struct snd_pcm_substream *substream)
                               __func__, len);
                }
 
-               ret = s3c2410_dma_enqueue(prtd->params->channel, 
+               ret = s3c2410_dma_enqueue(prtd->params->channel,
                        substream, pos, len);
 
                if (ret == 0) {
@@ -129,7 +129,7 @@ static void s3c24xx_audio_buffdone(struct s3c2410_dma_chan *channel,
                return;
 
        prtd = substream->runtime->private_data;
-       
+
        if (substream)
                snd_pcm_period_elapsed(substream);
 
@@ -150,7 +150,7 @@ static int s3c24xx_pcm_hw_params(struct snd_pcm_substream *substream,
        struct snd_soc_pcm_runtime *rtd = substream->private_data;
        struct s3c24xx_pcm_dma_params *dma = rtd->dai->cpu_dai->dma_data;
        unsigned long totbytes = params_buffer_bytes(params);
-       int ret=0;
+       int ret = 0;
 
        DBG("Entered %s\n", __func__);
 
@@ -223,7 +223,7 @@ static int s3c24xx_pcm_prepare(struct snd_pcm_substream *substream)
        /* return if this is a bufferless transfer e.g.
         * codec <--> BT codec or GSM modem -- lg FIXME */
        if (!prtd->params)
-               return 0;
+               return 0;
 
        /* channel needs configuring for mem=>device, increment memory addr,
         * sync to pclk, half-word transfers to the IIS-FIFO. */
@@ -293,8 +293,8 @@ static int s3c24xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
        return ret;
 }
 
-static snd_pcm_uframes_t 
-       s3c24xx_pcm_pointer(struct snd_pcm_substream *substream)
+static snd_pcm_uframes_t
+s3c24xx_pcm_pointer(struct snd_pcm_substream *substream)
 {
        struct snd_pcm_runtime *runtime = substream->runtime;
        struct s3c24xx_runtime_data *prtd = runtime->private_data;
@@ -313,7 +313,7 @@ static snd_pcm_uframes_t
 
        spin_unlock(&prtd->lock);
 
-       DBG("Pointer %x %x\n",src,dst);
+       DBG("Pointer %x %x\n", src, dst);
 
        /* we seem to be getting the odd error from the pcm library due
         * to out-of-bounds pointers. this is maybe due to the dma engine
@@ -355,11 +355,11 @@ static int s3c24xx_pcm_close(struct snd_pcm_substream *substream)
 
        DBG("Entered %s\n", __func__);
 
-       if (prtd)
-               kfree(prtd);
-       else
+       if (!prtd)
                DBG("s3c24xx_pcm_close called with prtd == NULL\n");
 
+       kfree(prtd);
+
        return 0;
 }
 
@@ -371,9 +371,9 @@ static int s3c24xx_pcm_mmap(struct snd_pcm_substream *substream,
        DBG("Entered %s\n", __func__);
 
        return dma_mmap_writecombine(substream->pcm->card->dev, vma,
-                                     runtime->dma_area,
-                                     runtime->dma_addr,
-                                     runtime->dma_bytes);
+                                    runtime->dma_area,
+                                    runtime->dma_addr,
+                                    runtime->dma_bytes);
 }
 
 static struct snd_pcm_ops s3c24xx_pcm_ops = {
@@ -432,7 +432,7 @@ static void s3c24xx_pcm_free_dma_buffers(struct snd_pcm *pcm)
 
 static u64 s3c24xx_pcm_dmamask = DMA_32BIT_MASK;
 
-static int s3c24xx_pcm_new(struct snd_card *card, 
+static int s3c24xx_pcm_new(struct snd_card *card,
        struct snd_soc_codec_dai *dai, struct snd_pcm *pcm)
 {
        int ret = 0;
@@ -467,7 +467,6 @@ struct snd_soc_platform s3c24xx_soc_platform = {
        .pcm_new        = s3c24xx_pcm_new,
        .pcm_free       = s3c24xx_pcm_free_dma_buffers,
 };
-
 EXPORT_SYMBOL_GPL(s3c24xx_soc_platform);
 
 MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
index b2e12893e3f4d4b0f39d64ff733333171e3a0dfb..c82cf15730a1e85c751483f75944e8905c0baa10 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/kvm_para.h>
 #include <linux/pagemap.h>
 #include <linux/mman.h>
+#include <linux/swap.h>
 
 #include <asm/processor.h>
 #include <asm/io.h>
@@ -59,7 +60,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
 
 static __read_mostly struct preempt_ops kvm_preempt_ops;
 
-static struct dentry *debugfs_dir;
+struct dentry *kvm_debugfs_dir;
 
 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
                           unsigned long arg);
@@ -119,6 +120,29 @@ void kvm_flush_remote_tlbs(struct kvm *kvm)
        smp_call_function_mask(cpus, ack_flush, NULL, 1);
 }
 
+void kvm_reload_remote_mmus(struct kvm *kvm)
+{
+       int i, cpu;
+       cpumask_t cpus;
+       struct kvm_vcpu *vcpu;
+
+       cpus_clear(cpus);
+       for (i = 0; i < KVM_MAX_VCPUS; ++i) {
+               vcpu = kvm->vcpus[i];
+               if (!vcpu)
+                       continue;
+               if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
+                       continue;
+               cpu = vcpu->cpu;
+               if (cpu != -1 && cpu != raw_smp_processor_id())
+                       cpu_set(cpu, cpus);
+       }
+       if (cpus_empty(cpus))
+               return;
+       smp_call_function_mask(cpus, ack_flush, NULL, 1);
+}
+
+
 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
 {
        struct page *page;
@@ -170,6 +194,7 @@ static struct kvm *kvm_create_vm(void)
        mutex_init(&kvm->lock);
        kvm_io_bus_init(&kvm->mmio_bus);
        init_rwsem(&kvm->slots_lock);
+       atomic_set(&kvm->users_count, 1);
        spin_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
        spin_unlock(&kvm_lock);
@@ -189,9 +214,13 @@ static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
        if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
                vfree(free->dirty_bitmap);
 
+       if (!dont || free->lpage_info != dont->lpage_info)
+               vfree(free->lpage_info);
+
        free->npages = 0;
        free->dirty_bitmap = NULL;
        free->rmap = NULL;
+       free->lpage_info = NULL;
 }
 
 void kvm_free_physmem(struct kvm *kvm)
@@ -215,11 +244,25 @@ static void kvm_destroy_vm(struct kvm *kvm)
        mmdrop(mm);
 }
 
+void kvm_get_kvm(struct kvm *kvm)
+{
+       atomic_inc(&kvm->users_count);
+}
+EXPORT_SYMBOL_GPL(kvm_get_kvm);
+
+void kvm_put_kvm(struct kvm *kvm)
+{
+       if (atomic_dec_and_test(&kvm->users_count))
+               kvm_destroy_vm(kvm);
+}
+EXPORT_SYMBOL_GPL(kvm_put_kvm);
+
+
 static int kvm_vm_release(struct inode *inode, struct file *filp)
 {
        struct kvm *kvm = filp->private_data;
 
-       kvm_destroy_vm(kvm);
+       kvm_put_kvm(kvm);
        return 0;
 }
 
@@ -301,6 +344,25 @@ int __kvm_set_memory_region(struct kvm *kvm,
                new.user_alloc = user_alloc;
                new.userspace_addr = mem->userspace_addr;
        }
+       if (npages && !new.lpage_info) {
+               int largepages = npages / KVM_PAGES_PER_HPAGE;
+               if (npages % KVM_PAGES_PER_HPAGE)
+                       largepages++;
+               if (base_gfn % KVM_PAGES_PER_HPAGE)
+                       largepages++;
+
+               new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
+
+               if (!new.lpage_info)
+                       goto out_free;
+
+               memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
+
+               if (base_gfn % KVM_PAGES_PER_HPAGE)
+                       new.lpage_info[0].write_count = 1;
+               if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
+                       new.lpage_info[largepages-1].write_count = 1;
+       }
 
        /* Allocate page dirty bitmap if needed */
        if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
@@ -397,6 +459,12 @@ int is_error_page(struct page *page)
 }
 EXPORT_SYMBOL_GPL(is_error_page);
 
+int is_error_pfn(pfn_t pfn)
+{
+       return pfn == bad_pfn;
+}
+EXPORT_SYMBOL_GPL(is_error_pfn);
+
 static inline unsigned long bad_hva(void)
 {
        return PAGE_OFFSET;
@@ -444,7 +512,7 @@ int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
 
-static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
+unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 {
        struct kvm_memory_slot *slot;
 
@@ -458,7 +526,7 @@ static unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
 /*
  * Requires current->mm->mmap_sem to be held
  */
-struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 {
        struct page *page[1];
        unsigned long addr;
@@ -469,7 +537,7 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
        addr = gfn_to_hva(kvm, gfn);
        if (kvm_is_error_hva(addr)) {
                get_page(bad_page);
-               return bad_page;
+               return page_to_pfn(bad_page);
        }
 
        npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
@@ -477,27 +545,71 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 
        if (npages != 1) {
                get_page(bad_page);
-               return bad_page;
+               return page_to_pfn(bad_page);
        }
 
-       return page[0];
+       return page_to_pfn(page[0]);
+}
+
+EXPORT_SYMBOL_GPL(gfn_to_pfn);
+
+struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+{
+       return pfn_to_page(gfn_to_pfn(kvm, gfn));
 }
 
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
 void kvm_release_page_clean(struct page *page)
 {
-       put_page(page);
+       kvm_release_pfn_clean(page_to_pfn(page));
 }
 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
 
+void kvm_release_pfn_clean(pfn_t pfn)
+{
+       put_page(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
+
 void kvm_release_page_dirty(struct page *page)
 {
+       kvm_release_pfn_dirty(page_to_pfn(page));
+}
+EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
+
+void kvm_release_pfn_dirty(pfn_t pfn)
+{
+       kvm_set_pfn_dirty(pfn);
+       kvm_release_pfn_clean(pfn);
+}
+EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
+
+void kvm_set_page_dirty(struct page *page)
+{
+       kvm_set_pfn_dirty(page_to_pfn(page));
+}
+EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
+
+void kvm_set_pfn_dirty(pfn_t pfn)
+{
+       struct page *page = pfn_to_page(pfn);
        if (!PageReserved(page))
                SetPageDirty(page);
-       put_page(page);
 }
-EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
+EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
+
+void kvm_set_pfn_accessed(pfn_t pfn)
+{
+       mark_page_accessed(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
+
+void kvm_get_pfn(pfn_t pfn)
+{
+       get_page(pfn_to_page(pfn));
+}
+EXPORT_SYMBOL_GPL(kvm_get_pfn);
 
 static int next_segment(unsigned long len, int offset)
 {
@@ -554,7 +666,9 @@ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
        addr = gfn_to_hva(kvm, gfn);
        if (kvm_is_error_hva(addr))
                return -EFAULT;
+       pagefault_disable();
        r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
+       pagefault_enable();
        if (r)
                return -EFAULT;
        return 0;
@@ -651,6 +765,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
         * We will block until either an interrupt or a signal wakes us up
         */
        while (!kvm_cpu_has_interrupt(vcpu)
+              && !kvm_cpu_has_pending_timer(vcpu)
               && !signal_pending(current)
               && !kvm_arch_vcpu_runnable(vcpu)) {
                set_current_state(TASK_INTERRUPTIBLE);
@@ -678,8 +793,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        if (vmf->pgoff == 0)
                page = virt_to_page(vcpu->run);
+#ifdef CONFIG_X86
        else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
                page = virt_to_page(vcpu->arch.pio_data);
+#endif
        else
                return VM_FAULT_SIGBUS;
        get_page(page);
@@ -701,11 +818,11 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
 {
        struct kvm_vcpu *vcpu = filp->private_data;
 
-       fput(vcpu->kvm->filp);
+       kvm_put_kvm(vcpu->kvm);
        return 0;
 }
 
-static struct file_operations kvm_vcpu_fops = {
+static const struct file_operations kvm_vcpu_fops = {
        .release        = kvm_vcpu_release,
        .unlocked_ioctl = kvm_vcpu_ioctl,
        .compat_ioctl   = kvm_vcpu_ioctl,
@@ -723,9 +840,10 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
 
        r = anon_inode_getfd(&fd, &inode, &file,
                             "kvm-vcpu", &kvm_vcpu_fops, vcpu);
-       if (r)
+       if (r) {
+               kvm_put_kvm(vcpu->kvm);
                return r;
-       atomic_inc(&vcpu->kvm->filp->f_count);
+       }
        return fd;
 }
 
@@ -760,6 +878,7 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
        mutex_unlock(&kvm->lock);
 
        /* Now it's all set up, let userspace reach it */
+       kvm_get_kvm(kvm);
        r = create_vcpu_fd(vcpu);
        if (r < 0)
                goto unlink;
@@ -802,28 +921,39 @@ static long kvm_vcpu_ioctl(struct file *filp,
                r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
                break;
        case KVM_GET_REGS: {
-               struct kvm_regs kvm_regs;
+               struct kvm_regs *kvm_regs;
 
-               memset(&kvm_regs, 0, sizeof kvm_regs);
-               r = kvm_arch_vcpu_ioctl_get_regs(vcpu, &kvm_regs);
-               if (r)
+               r = -ENOMEM;
+               kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+               if (!kvm_regs)
                        goto out;
+               r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
+               if (r)
+                       goto out_free1;
                r = -EFAULT;
-               if (copy_to_user(argp, &kvm_regs, sizeof kvm_regs))
-                       goto out;
+               if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
+                       goto out_free1;
                r = 0;
+out_free1:
+               kfree(kvm_regs);
                break;
        }
        case KVM_SET_REGS: {
-               struct kvm_regs kvm_regs;
+               struct kvm_regs *kvm_regs;
 
-               r = -EFAULT;
-               if (copy_from_user(&kvm_regs, argp, sizeof kvm_regs))
+               r = -ENOMEM;
+               kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
+               if (!kvm_regs)
                        goto out;
-               r = kvm_arch_vcpu_ioctl_set_regs(vcpu, &kvm_regs);
+               r = -EFAULT;
+               if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
+                       goto out_free2;
+               r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
                if (r)
-                       goto out;
+                       goto out_free2;
                r = 0;
+out_free2:
+               kfree(kvm_regs);
                break;
        }
        case KVM_GET_SREGS: {
@@ -851,6 +981,30 @@ static long kvm_vcpu_ioctl(struct file *filp,
                r = 0;
                break;
        }
+       case KVM_GET_MP_STATE: {
+               struct kvm_mp_state mp_state;
+
+               r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
+               if (r)
+                       goto out;
+               r = -EFAULT;
+               if (copy_to_user(argp, &mp_state, sizeof mp_state))
+                       goto out;
+               r = 0;
+               break;
+       }
+       case KVM_SET_MP_STATE: {
+               struct kvm_mp_state mp_state;
+
+               r = -EFAULT;
+               if (copy_from_user(&mp_state, argp, sizeof mp_state))
+                       goto out;
+               r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
+               if (r)
+                       goto out;
+               r = 0;
+               break;
+       }
        case KVM_TRANSLATE: {
                struct kvm_translation tr;
 
@@ -1005,7 +1159,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
        return 0;
 }
 
-static struct file_operations kvm_vm_fops = {
+static const struct file_operations kvm_vm_fops = {
        .release        = kvm_vm_release,
        .unlocked_ioctl = kvm_vm_ioctl,
        .compat_ioctl   = kvm_vm_ioctl,
@@ -1024,12 +1178,10 @@ static int kvm_dev_ioctl_create_vm(void)
                return PTR_ERR(kvm);
        r = anon_inode_getfd(&fd, &inode, &file, "kvm-vm", &kvm_vm_fops, kvm);
        if (r) {
-               kvm_destroy_vm(kvm);
+               kvm_put_kvm(kvm);
                return r;
        }
 
-       kvm->filp = file;
-
        return fd;
 }
 
@@ -1059,7 +1211,15 @@ static long kvm_dev_ioctl(struct file *filp,
                r = -EINVAL;
                if (arg)
                        goto out;
-               r = 2 * PAGE_SIZE;
+               r = PAGE_SIZE;     /* struct kvm_run */
+#ifdef CONFIG_X86
+               r += PAGE_SIZE;    /* pio data page */
+#endif
+               break;
+       case KVM_TRACE_ENABLE:
+       case KVM_TRACE_PAUSE:
+       case KVM_TRACE_DISABLE:
+               r = kvm_trace_ioctl(ioctl, arg);
                break;
        default:
                return kvm_arch_dev_ioctl(filp, ioctl, arg);
@@ -1232,9 +1392,9 @@ static void kvm_init_debug(void)
 {
        struct kvm_stats_debugfs_item *p;
 
-       debugfs_dir = debugfs_create_dir("kvm", NULL);
+       kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
        for (p = debugfs_entries; p->name; ++p)
-               p->dentry = debugfs_create_file(p->name, 0444, debugfs_dir,
+               p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
                                                (void *)(long)p->offset,
                                                stat_fops[p->kind]);
 }
@@ -1245,7 +1405,7 @@ static void kvm_exit_debug(void)
 
        for (p = debugfs_entries; p->name; ++p)
                debugfs_remove(p->dentry);
-       debugfs_remove(debugfs_dir);
+       debugfs_remove(kvm_debugfs_dir);
 }
 
 static int kvm_suspend(struct sys_device *dev, pm_message_t state)
@@ -1272,6 +1432,7 @@ static struct sys_device kvm_sysdev = {
 };
 
 struct page *bad_page;
+pfn_t bad_pfn;
 
 static inline
 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
@@ -1313,6 +1474,8 @@ int kvm_init(void *opaque, unsigned int vcpu_size,
                goto out;
        }
 
+       bad_pfn = page_to_pfn(bad_page);
+
        r = kvm_arch_hardware_setup();
        if (r < 0)
                goto out_free_0;
@@ -1386,6 +1549,7 @@ EXPORT_SYMBOL_GPL(kvm_init);
 
 void kvm_exit(void)
 {
+       kvm_trace_cleanup();
        misc_deregister(&kvm_dev);
        kmem_cache_destroy(kvm_vcpu_cache);
        sysdev_unregister(&kvm_sysdev);
diff --git a/virt/kvm/kvm_trace.c b/virt/kvm/kvm_trace.c
new file mode 100644 (file)
index 0000000..0e49547
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * kvm trace
+ *
+ * It is designed to allow debugging traces of kvm to be generated
+ * on UP / SMP machines.  Each trace entry can be timestamped so that
+ * it's possible to reconstruct a chronological record of trace events.
+ * The implementation refers to blktrace kernel support.
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
+ *
+ * Authors: Feng(Eric) Liu, eric.e.liu@intel.com
+ *
+ * Date:    Feb 2008
+ */
+
+#include <linux/module.h>
+#include <linux/relay.h>
+#include <linux/debugfs.h>
+
+#include <linux/kvm_host.h>
+
+#define KVM_TRACE_STATE_RUNNING        (1 << 0)
+#define KVM_TRACE_STATE_PAUSE          (1 << 1)
+#define KVM_TRACE_STATE_CLEARUP        (1 << 2)
+
+struct kvm_trace {
+       int trace_state;
+       struct rchan *rchan;
+       struct dentry *lost_file;
+       atomic_t lost_records;
+};
+static struct kvm_trace *kvm_trace;
+
+struct kvm_trace_probe {
+       const char *name;
+       const char *format;
+       u32 cycle_in;
+       marker_probe_func *probe_func;
+};
+
+static inline int calc_rec_size(int cycle, int extra)
+{
+       int rec_size = KVM_TRC_HEAD_SIZE;
+
+       rec_size += extra;
+       return cycle ? rec_size += KVM_TRC_CYCLE_SIZE : rec_size;
+}
+
+static void kvm_add_trace(void *probe_private, void *call_data,
+                         const char *format, va_list *args)
+{
+       struct kvm_trace_probe *p = probe_private;
+       struct kvm_trace *kt = kvm_trace;
+       struct kvm_trace_rec rec;
+       struct kvm_vcpu *vcpu;
+       int    i, extra, size;
+
+       if (unlikely(kt->trace_state != KVM_TRACE_STATE_RUNNING))
+               return;
+
+       rec.event       = va_arg(*args, u32);
+       vcpu            = va_arg(*args, struct kvm_vcpu *);
+       rec.pid         = current->tgid;
+       rec.vcpu_id     = vcpu->vcpu_id;
+
+       extra           = va_arg(*args, u32);
+       WARN_ON(!(extra <= KVM_TRC_EXTRA_MAX));
+       extra           = min_t(u32, extra, KVM_TRC_EXTRA_MAX);
+       rec.extra_u32   = extra;
+
+       rec.cycle_in    = p->cycle_in;
+
+       if (rec.cycle_in) {
+               u64 cycle = 0;
+
+               cycle = get_cycles();
+               rec.u.cycle.cycle_lo = (u32)cycle;
+               rec.u.cycle.cycle_hi = (u32)(cycle >> 32);
+
+               for (i = 0; i < rec.extra_u32; i++)
+                       rec.u.cycle.extra_u32[i] = va_arg(*args, u32);
+       } else {
+               for (i = 0; i < rec.extra_u32; i++)
+                       rec.u.nocycle.extra_u32[i] = va_arg(*args, u32);
+       }
+
+       size = calc_rec_size(rec.cycle_in, rec.extra_u32 * sizeof(u32));
+       relay_write(kt->rchan, &rec, size);
+}
+
+static struct kvm_trace_probe kvm_trace_probes[] = {
+       { "kvm_trace_entryexit", "%u %p %u %u %u %u %u %u", 1, kvm_add_trace },
+       { "kvm_trace_handler", "%u %p %u %u %u %u %u %u", 0, kvm_add_trace },
+};
+
+static int lost_records_get(void *data, u64 *val)
+{
+       struct kvm_trace *kt = data;
+
+       *val = atomic_read(&kt->lost_records);
+       return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(kvm_trace_lost_ops, lost_records_get, NULL, "%llu\n");
+
+/*
+ *  The relay channel is used in "no-overwrite" mode, it keeps trace of how
+ *  many times we encountered a full subbuffer, to tell user space app the
+ *  lost records there were.
+ */
+static int kvm_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
+                                    void *prev_subbuf, size_t prev_padding)
+{
+       struct kvm_trace *kt;
+
+       if (!relay_buf_full(buf))
+               return 1;
+
+       kt = buf->chan->private_data;
+       atomic_inc(&kt->lost_records);
+
+       return 0;
+}
+
+static struct dentry *kvm_create_buf_file_callack(const char *filename,
+                                                struct dentry *parent,
+                                                int mode,
+                                                struct rchan_buf *buf,
+                                                int *is_global)
+{
+       return debugfs_create_file(filename, mode, parent, buf,
+                                  &relay_file_operations);
+}
+
+static int kvm_remove_buf_file_callback(struct dentry *dentry)
+{
+       debugfs_remove(dentry);
+       return 0;
+}
+
+static struct rchan_callbacks kvm_relay_callbacks = {
+       .subbuf_start           = kvm_subbuf_start_callback,
+       .create_buf_file        = kvm_create_buf_file_callack,
+       .remove_buf_file        = kvm_remove_buf_file_callback,
+};
+
+static int do_kvm_trace_enable(struct kvm_user_trace_setup *kuts)
+{
+       struct kvm_trace *kt;
+       int i, r = -ENOMEM;
+
+       if (!kuts->buf_size || !kuts->buf_nr)
+               return -EINVAL;
+
+       kt = kzalloc(sizeof(*kt), GFP_KERNEL);
+       if (!kt)
+               goto err;
+
+       r = -EIO;
+       atomic_set(&kt->lost_records, 0);
+       kt->lost_file = debugfs_create_file("lost_records", 0444, kvm_debugfs_dir,
+                                           kt, &kvm_trace_lost_ops);
+       if (!kt->lost_file)
+               goto err;
+
+       kt->rchan = relay_open("trace", kvm_debugfs_dir, kuts->buf_size,
+                               kuts->buf_nr, &kvm_relay_callbacks, kt);
+       if (!kt->rchan)
+               goto err;
+
+       kvm_trace = kt;
+
+       for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
+               struct kvm_trace_probe *p = &kvm_trace_probes[i];
+
+               r = marker_probe_register(p->name, p->format, p->probe_func, p);
+               if (r)
+                       printk(KERN_INFO "Unable to register probe %s\n",
+                              p->name);
+       }
+
+       kvm_trace->trace_state = KVM_TRACE_STATE_RUNNING;
+
+       return 0;
+err:
+       if (kt) {
+               if (kt->lost_file)
+                       debugfs_remove(kt->lost_file);
+               if (kt->rchan)
+                       relay_close(kt->rchan);
+               kfree(kt);
+       }
+       return r;
+}
+
+static int kvm_trace_enable(char __user *arg)
+{
+       struct kvm_user_trace_setup kuts;
+       int ret;
+
+       ret = copy_from_user(&kuts, arg, sizeof(kuts));
+       if (ret)
+               return -EFAULT;
+
+       ret = do_kvm_trace_enable(&kuts);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int kvm_trace_pause(void)
+{
+       struct kvm_trace *kt = kvm_trace;
+       int r = -EINVAL;
+
+       if (kt == NULL)
+               return r;
+
+       if (kt->trace_state == KVM_TRACE_STATE_RUNNING) {
+               kt->trace_state = KVM_TRACE_STATE_PAUSE;
+               relay_flush(kt->rchan);
+               r = 0;
+       }
+
+       return r;
+}
+
+void kvm_trace_cleanup(void)
+{
+       struct kvm_trace *kt = kvm_trace;
+       int i;
+
+       if (kt == NULL)
+               return;
+
+       if (kt->trace_state == KVM_TRACE_STATE_RUNNING ||
+           kt->trace_state == KVM_TRACE_STATE_PAUSE) {
+
+               kt->trace_state = KVM_TRACE_STATE_CLEARUP;
+
+               for (i = 0; i < ARRAY_SIZE(kvm_trace_probes); i++) {
+                       struct kvm_trace_probe *p = &kvm_trace_probes[i];
+                       marker_probe_unregister(p->name, p->probe_func, p);
+               }
+
+               relay_close(kt->rchan);
+               debugfs_remove(kt->lost_file);
+               kfree(kt);
+       }
+}
+
+int kvm_trace_ioctl(unsigned int ioctl, unsigned long arg)
+{
+       void __user *argp = (void __user *)arg;
+       long r = -EINVAL;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       switch (ioctl) {
+       case KVM_TRACE_ENABLE:
+               r = kvm_trace_enable(argp);
+               break;
+       case KVM_TRACE_PAUSE:
+               r = kvm_trace_pause();
+               break;
+       case KVM_TRACE_DISABLE:
+               r = 0;
+               kvm_trace_cleanup();
+               break;
+       }
+
+       return r;
+}